repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
ERD
|
ERD-main/projects/DiffusionDet/diffusiondet/head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# Modified from https://github.com/ShoufaChen/DiffusionDet/blob/main/diffusiondet/detector.py # noqa
# Modified from https://github.com/ShoufaChen/DiffusionDet/blob/main/diffusiondet/head.py # noqa
# This work is licensed under the CC-BY-NC 4.0 License.
# Users should be careful about adopting these features in any commercial matters. # noqa
# For more details, please refer to https://github.com/ShoufaChen/DiffusionDet/blob/main/LICENSE # noqa
import copy
import math
import random
import warnings
from typing import Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import build_activation_layer
from mmcv.ops import batched_nms
from mmengine.structures import InstanceData
from torch import Tensor
from mmdet.registry import MODELS, TASK_UTILS
from mmdet.structures import SampleList
from mmdet.structures.bbox import (bbox2roi, bbox_cxcywh_to_xyxy,
bbox_xyxy_to_cxcywh, get_box_wh,
scale_boxes)
from mmdet.utils import InstanceList
_DEFAULT_SCALE_CLAMP = math.log(100000.0 / 16)
def cosine_beta_schedule(timesteps, s=0.008):
"""Cosine schedule as proposed in
https://openreview.net/forum?id=-NEXDKk8gZ."""
steps = timesteps + 1
x = torch.linspace(0, timesteps, steps, dtype=torch.float64)
alphas_cumprod = torch.cos(
((x / timesteps) + s) / (1 + s) * math.pi * 0.5)**2
alphas_cumprod = alphas_cumprod / alphas_cumprod[0]
betas = 1 - (alphas_cumprod[1:] / alphas_cumprod[:-1])
return torch.clip(betas, 0, 0.999)
def extract(a, t, x_shape):
"""extract the appropriate t index for a batch of indices."""
batch_size = t.shape[0]
out = a.gather(-1, t)
return out.reshape(batch_size, *((1, ) * (len(x_shape) - 1)))
class SinusoidalPositionEmbeddings(nn.Module):
def __init__(self, dim):
super().__init__()
self.dim = dim
def forward(self, time):
device = time.device
half_dim = self.dim // 2
embeddings = math.log(10000) / (half_dim - 1)
embeddings = torch.exp(
torch.arange(half_dim, device=device) * -embeddings)
embeddings = time[:, None] * embeddings[None, :]
embeddings = torch.cat((embeddings.sin(), embeddings.cos()), dim=-1)
return embeddings
@MODELS.register_module()
class DynamicDiffusionDetHead(nn.Module):
def __init__(self,
num_classes=80,
feat_channels=256,
num_proposals=500,
num_heads=6,
prior_prob=0.01,
snr_scale=2.0,
timesteps=1000,
sampling_timesteps=1,
self_condition=False,
box_renewal=True,
use_ensemble=True,
deep_supervision=True,
ddim_sampling_eta=1.0,
criterion=dict(
type='DiffusionDetCriterion',
num_classes=80,
assigner=dict(
type='DiffusionDetMatcher',
match_costs=[
dict(
type='FocalLossCost',
alpha=2.0,
gamma=0.25,
weight=2.0),
dict(
type='BBoxL1Cost',
weight=5.0,
box_format='xyxy'),
dict(type='IoUCost', iou_mode='giou', weight=2.0)
],
center_radius=2.5,
candidate_topk=5),
),
single_head=dict(
type='DiffusionDetHead',
num_cls_convs=1,
num_reg_convs=3,
dim_feedforward=2048,
num_heads=8,
dropout=0.0,
act_cfg=dict(type='ReLU'),
dynamic_conv=dict(dynamic_dim=64, dynamic_num=2)),
roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(
type='RoIAlign', output_size=7, sampling_ratio=2),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
test_cfg=None,
**kwargs) -> None:
super().__init__()
self.roi_extractor = MODELS.build(roi_extractor)
self.num_classes = num_classes
self.num_classes = num_classes
self.feat_channels = feat_channels
self.num_proposals = num_proposals
self.num_heads = num_heads
# Build Diffusion
assert isinstance(timesteps, int), 'The type of `timesteps` should ' \
f'be int but got {type(timesteps)}'
assert sampling_timesteps <= timesteps
self.timesteps = timesteps
self.sampling_timesteps = sampling_timesteps
self.snr_scale = snr_scale
self.ddim_sampling = self.sampling_timesteps < self.timesteps
self.ddim_sampling_eta = ddim_sampling_eta
self.self_condition = self_condition
self.box_renewal = box_renewal
self.use_ensemble = use_ensemble
self._build_diffusion()
# Build assigner
assert criterion.get('assigner', None) is not None
assigner = TASK_UTILS.build(criterion.get('assigner'))
# Init parameters.
self.use_focal_loss = assigner.use_focal_loss
self.use_fed_loss = assigner.use_fed_loss
# build criterion
criterion.update(deep_supervision=deep_supervision)
self.criterion = TASK_UTILS.build(criterion)
# Build Dynamic Head.
single_head_ = single_head.copy()
single_head_num_classes = single_head_.get('num_classes', None)
if single_head_num_classes is None:
single_head_.update(num_classes=num_classes)
else:
if single_head_num_classes != num_classes:
warnings.warn(
'The `num_classes` of `DynamicDiffusionDetHead` and '
'`SingleDiffusionDetHead` should be same, changing '
f'`single_head.num_classes` to {num_classes}')
single_head_.update(num_classes=num_classes)
single_head_feat_channels = single_head_.get('feat_channels', None)
if single_head_feat_channels is None:
single_head_.update(feat_channels=feat_channels)
else:
if single_head_feat_channels != feat_channels:
warnings.warn(
'The `feat_channels` of `DynamicDiffusionDetHead` and '
'`SingleDiffusionDetHead` should be same, changing '
f'`single_head.feat_channels` to {feat_channels}')
single_head_.update(feat_channels=feat_channels)
default_pooler_resolution = roi_extractor['roi_layer'].get(
'output_size')
assert default_pooler_resolution is not None
single_head_pooler_resolution = single_head_.get('pooler_resolution')
if single_head_pooler_resolution is None:
single_head_.update(pooler_resolution=default_pooler_resolution)
else:
if single_head_pooler_resolution != default_pooler_resolution:
warnings.warn(
'The `pooler_resolution` of `DynamicDiffusionDetHead` '
'and `SingleDiffusionDetHead` should be same, changing '
f'`single_head.pooler_resolution` to {num_classes}')
single_head_.update(
pooler_resolution=default_pooler_resolution)
single_head_.update(
use_focal_loss=self.use_focal_loss, use_fed_loss=self.use_fed_loss)
single_head_module = MODELS.build(single_head_)
self.num_heads = num_heads
self.head_series = nn.ModuleList(
[copy.deepcopy(single_head_module) for _ in range(num_heads)])
self.deep_supervision = deep_supervision
# Gaussian random feature embedding layer for time
time_dim = feat_channels * 4
self.time_mlp = nn.Sequential(
SinusoidalPositionEmbeddings(feat_channels),
nn.Linear(feat_channels, time_dim), nn.GELU(),
nn.Linear(time_dim, time_dim))
self.prior_prob = prior_prob
self.test_cfg = test_cfg
self.use_nms = self.test_cfg.get('use_nms', True)
self._init_weights()
def _init_weights(self):
# init all parameters.
bias_value = -math.log((1 - self.prior_prob) / self.prior_prob)
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
# initialize the bias for focal loss and fed loss.
if self.use_focal_loss or self.use_fed_loss:
if p.shape[-1] == self.num_classes or \
p.shape[-1] == self.num_classes + 1:
nn.init.constant_(p, bias_value)
def _build_diffusion(self):
betas = cosine_beta_schedule(self.timesteps)
alphas = 1. - betas
alphas_cumprod = torch.cumprod(alphas, dim=0)
alphas_cumprod_prev = F.pad(alphas_cumprod[:-1], (1, 0), value=1.)
self.register_buffer('betas', betas)
self.register_buffer('alphas_cumprod', alphas_cumprod)
self.register_buffer('alphas_cumprod_prev', alphas_cumprod_prev)
# calculations for diffusion q(x_t | x_{t-1}) and others
self.register_buffer('sqrt_alphas_cumprod', torch.sqrt(alphas_cumprod))
self.register_buffer('sqrt_one_minus_alphas_cumprod',
torch.sqrt(1. - alphas_cumprod))
self.register_buffer('log_one_minus_alphas_cumprod',
torch.log(1. - alphas_cumprod))
self.register_buffer('sqrt_recip_alphas_cumprod',
torch.sqrt(1. / alphas_cumprod))
self.register_buffer('sqrt_recipm1_alphas_cumprod',
torch.sqrt(1. / alphas_cumprod - 1))
# calculations for posterior q(x_{t-1} | x_t, x_0)
# equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t)
posterior_variance = betas * (1. - alphas_cumprod_prev) / (
1. - alphas_cumprod)
self.register_buffer('posterior_variance', posterior_variance)
# log calculation clipped because the posterior variance is 0 at
# the beginning of the diffusion chain
self.register_buffer('posterior_log_variance_clipped',
torch.log(posterior_variance.clamp(min=1e-20)))
self.register_buffer(
'posterior_mean_coef1',
betas * torch.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod))
self.register_buffer('posterior_mean_coef2',
(1. - alphas_cumprod_prev) * torch.sqrt(alphas) /
(1. - alphas_cumprod))
def forward(self, features, init_bboxes, init_t, init_features=None):
time = self.time_mlp(init_t, )
inter_class_logits = []
inter_pred_bboxes = []
bs = len(features[0])
bboxes = init_bboxes
if init_features is not None:
init_features = init_features[None].repeat(1, bs, 1)
proposal_features = init_features.clone()
else:
proposal_features = None
for head_idx, single_head in enumerate(self.head_series):
class_logits, pred_bboxes, proposal_features = single_head(
features, bboxes, proposal_features, self.roi_extractor, time)
if self.deep_supervision:
inter_class_logits.append(class_logits)
inter_pred_bboxes.append(pred_bboxes)
bboxes = pred_bboxes.detach()
if self.deep_supervision:
return torch.stack(inter_class_logits), torch.stack(
inter_pred_bboxes)
else:
return class_logits[None, ...], pred_bboxes[None, ...]
def loss(self, x: Tuple[Tensor], batch_data_samples: SampleList) -> dict:
"""Perform forward propagation and loss calculation of the detection
head on the features of the upstream network.
Args:
x (tuple[Tensor]): Features from the upstream network, each is
a 4D-tensor.
batch_data_samples (List[:obj:`DetDataSample`]): The Data
Samples. It usually includes information such as
`gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`.
Returns:
dict: A dictionary of loss components.
"""
prepare_outputs = self.prepare_training_targets(batch_data_samples)
(batch_gt_instances, batch_pred_instances, batch_gt_instances_ignore,
batch_img_metas) = prepare_outputs
batch_diff_bboxes = torch.stack([
pred_instances.diff_bboxes_abs
for pred_instances in batch_pred_instances
])
batch_time = torch.stack(
[pred_instances.time for pred_instances in batch_pred_instances])
pred_logits, pred_bboxes = self(x, batch_diff_bboxes, batch_time)
output = {
'pred_logits': pred_logits[-1],
'pred_boxes': pred_bboxes[-1]
}
if self.deep_supervision:
output['aux_outputs'] = [{
'pred_logits': a,
'pred_boxes': b
} for a, b in zip(pred_logits[:-1], pred_bboxes[:-1])]
losses = self.criterion(output, batch_gt_instances, batch_img_metas)
return losses
def prepare_training_targets(self, batch_data_samples):
# hard-setting seed to keep results same (if necessary)
# random.seed(0)
# torch.manual_seed(0)
# torch.cuda.manual_seed_all(0)
# torch.backends.cudnn.deterministic = True
# torch.backends.cudnn.benchmark = False
batch_gt_instances = []
batch_pred_instances = []
batch_gt_instances_ignore = []
batch_img_metas = []
for data_sample in batch_data_samples:
img_meta = data_sample.metainfo
gt_instances = data_sample.gt_instances
gt_bboxes = gt_instances.bboxes
h, w = img_meta['img_shape']
image_size = gt_bboxes.new_tensor([w, h, w, h])
norm_gt_bboxes = gt_bboxes / image_size
norm_gt_bboxes_cxcywh = bbox_xyxy_to_cxcywh(norm_gt_bboxes)
pred_instances = self.prepare_diffusion(norm_gt_bboxes_cxcywh,
image_size)
gt_instances.set_metainfo(dict(image_size=image_size))
gt_instances.norm_bboxes_cxcywh = norm_gt_bboxes_cxcywh
batch_gt_instances.append(gt_instances)
batch_pred_instances.append(pred_instances)
batch_img_metas.append(data_sample.metainfo)
if 'ignored_instances' in data_sample:
batch_gt_instances_ignore.append(data_sample.ignored_instances)
else:
batch_gt_instances_ignore.append(None)
return (batch_gt_instances, batch_pred_instances,
batch_gt_instances_ignore, batch_img_metas)
def prepare_diffusion(self, gt_boxes, image_size):
device = gt_boxes.device
time = torch.randint(
0, self.timesteps, (1, ), dtype=torch.long, device=device)
noise = torch.randn(self.num_proposals, 4, device=device)
num_gt = gt_boxes.shape[0]
if num_gt < self.num_proposals:
# 3 * sigma = 1/2 --> sigma: 1/6
box_placeholder = torch.randn(
self.num_proposals - num_gt, 4, device=device) / 6. + 0.5
box_placeholder[:, 2:] = torch.clip(
box_placeholder[:, 2:], min=1e-4)
x_start = torch.cat((gt_boxes, box_placeholder), dim=0)
else:
select_mask = [True] * self.num_proposals + \
[False] * (num_gt - self.num_proposals)
random.shuffle(select_mask)
x_start = gt_boxes[select_mask]
x_start = (x_start * 2. - 1.) * self.snr_scale
# noise sample
x = self.q_sample(x_start=x_start, time=time, noise=noise)
x = torch.clamp(x, min=-1 * self.snr_scale, max=self.snr_scale)
x = ((x / self.snr_scale) + 1) / 2.
diff_bboxes = bbox_cxcywh_to_xyxy(x)
# convert to abs bboxes
diff_bboxes_abs = diff_bboxes * image_size
metainfo = dict(time=time.squeeze(-1))
pred_instances = InstanceData(metainfo=metainfo)
pred_instances.diff_bboxes = diff_bboxes
pred_instances.diff_bboxes_abs = diff_bboxes_abs
pred_instances.noise = noise
return pred_instances
# forward diffusion
def q_sample(self, x_start, time, noise=None):
if noise is None:
noise = torch.randn_like(x_start)
x_start_shape = x_start.shape
sqrt_alphas_cumprod_t = extract(self.sqrt_alphas_cumprod, time,
x_start_shape)
sqrt_one_minus_alphas_cumprod_t = extract(
self.sqrt_one_minus_alphas_cumprod, time, x_start_shape)
return sqrt_alphas_cumprod_t * x_start + \
sqrt_one_minus_alphas_cumprod_t * noise
def predict(self,
x: Tuple[Tensor],
batch_data_samples: SampleList,
rescale: bool = False) -> InstanceList:
"""Perform forward propagation of the detection head and predict
detection results on the features of the upstream network.
Args:
x (tuple[Tensor]): Multi-level features from the
upstream network, each is a 4D-tensor.
batch_data_samples (List[:obj:`DetDataSample`]): The Data
Samples. It usually includes information such as
`gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`.
rescale (bool, optional): Whether to rescale the results.
Defaults to False.
Returns:
list[obj:`InstanceData`]: Detection results of each image
after the post process.
"""
# hard-setting seed to keep results same (if necessary)
# seed = 0
# random.seed(seed)
# torch.manual_seed(seed)
# torch.cuda.manual_seed_all(seed)
device = x[-1].device
batch_img_metas = [
data_samples.metainfo for data_samples in batch_data_samples
]
(time_pairs, batch_noise_bboxes, batch_noise_bboxes_raw,
batch_image_size) = self.prepare_testing_targets(
batch_img_metas, device)
predictions = self.predict_by_feat(
x,
time_pairs=time_pairs,
batch_noise_bboxes=batch_noise_bboxes,
batch_noise_bboxes_raw=batch_noise_bboxes_raw,
batch_image_size=batch_image_size,
device=device,
batch_img_metas=batch_img_metas)
return predictions
def predict_by_feat(self,
x,
time_pairs,
batch_noise_bboxes,
batch_noise_bboxes_raw,
batch_image_size,
device,
batch_img_metas=None,
cfg=None,
rescale=True):
batch_size = len(batch_img_metas)
cfg = self.test_cfg if cfg is None else cfg
cfg = copy.deepcopy(cfg)
ensemble_score, ensemble_label, ensemble_coord = [], [], []
for time, time_next in time_pairs:
batch_time = torch.full((batch_size, ),
time,
device=device,
dtype=torch.long)
# self_condition = x_start if self.self_condition else None
pred_logits, pred_bboxes = self(x, batch_noise_bboxes, batch_time)
x_start = pred_bboxes[-1]
x_start = x_start / batch_image_size[:, None, :]
x_start = bbox_xyxy_to_cxcywh(x_start)
x_start = (x_start * 2 - 1.) * self.snr_scale
x_start = torch.clamp(
x_start, min=-1 * self.snr_scale, max=self.snr_scale)
pred_noise = self.predict_noise_from_start(batch_noise_bboxes_raw,
batch_time, x_start)
pred_noise_list, x_start_list = [], []
noise_bboxes_list, num_remain_list = [], []
if self.box_renewal: # filter
score_thr = cfg.get('score_thr', 0)
for img_id in range(batch_size):
score_per_image = pred_logits[-1][img_id]
score_per_image = torch.sigmoid(score_per_image)
value, _ = torch.max(score_per_image, -1, keepdim=False)
keep_idx = value > score_thr
num_remain_list.append(torch.sum(keep_idx))
pred_noise_list.append(pred_noise[img_id, keep_idx, :])
x_start_list.append(x_start[img_id, keep_idx, :])
noise_bboxes_list.append(batch_noise_bboxes[img_id,
keep_idx, :])
if time_next < 0:
# Not same as original DiffusionDet
if self.use_ensemble and self.sampling_timesteps > 1:
box_pred_per_image, scores_per_image, labels_per_image = \
self.inference(
box_cls=pred_logits[-1],
box_pred=pred_bboxes[-1],
cfg=cfg,
device=device)
ensemble_score.append(scores_per_image)
ensemble_label.append(labels_per_image)
ensemble_coord.append(box_pred_per_image)
continue
alpha = self.alphas_cumprod[time]
alpha_next = self.alphas_cumprod[time_next]
sigma = self.ddim_sampling_eta * ((1 - alpha / alpha_next) *
(1 - alpha_next) /
(1 - alpha)).sqrt()
c = (1 - alpha_next - sigma**2).sqrt()
batch_noise_bboxes_list = []
batch_noise_bboxes_raw_list = []
for idx in range(batch_size):
pred_noise = pred_noise_list[idx]
x_start = x_start_list[idx]
noise_bboxes = noise_bboxes_list[idx]
num_remain = num_remain_list[idx]
noise = torch.randn_like(noise_bboxes)
noise_bboxes = x_start * alpha_next.sqrt() + \
c * pred_noise + sigma * noise
if self.box_renewal: # filter
# replenish with randn boxes
if num_remain < self.num_proposals:
noise_bboxes = torch.cat(
(noise_bboxes,
torch.randn(
self.num_proposals - num_remain,
4,
device=device)),
dim=0)
else:
select_mask = [True] * self.num_proposals + \
[False] * (num_remain -
self.num_proposals)
random.shuffle(select_mask)
noise_bboxes = noise_bboxes[select_mask]
# raw noise boxes
batch_noise_bboxes_raw_list.append(noise_bboxes)
# resize to xyxy
noise_bboxes = torch.clamp(
noise_bboxes,
min=-1 * self.snr_scale,
max=self.snr_scale)
noise_bboxes = ((noise_bboxes / self.snr_scale) + 1) / 2
noise_bboxes = bbox_cxcywh_to_xyxy(noise_bboxes)
noise_bboxes = noise_bboxes * batch_image_size[idx]
batch_noise_bboxes_list.append(noise_bboxes)
batch_noise_bboxes = torch.stack(batch_noise_bboxes_list)
batch_noise_bboxes_raw = torch.stack(batch_noise_bboxes_raw_list)
if self.use_ensemble and self.sampling_timesteps > 1:
box_pred_per_image, scores_per_image, labels_per_image = \
self.inference(
box_cls=pred_logits[-1],
box_pred=pred_bboxes[-1],
cfg=cfg,
device=device)
ensemble_score.append(scores_per_image)
ensemble_label.append(labels_per_image)
ensemble_coord.append(box_pred_per_image)
if self.use_ensemble and self.sampling_timesteps > 1:
steps = len(ensemble_score)
results_list = []
for idx in range(batch_size):
ensemble_score_per_img = [
ensemble_score[i][idx] for i in range(steps)
]
ensemble_label_per_img = [
ensemble_label[i][idx] for i in range(steps)
]
ensemble_coord_per_img = [
ensemble_coord[i][idx] for i in range(steps)
]
scores_per_image = torch.cat(ensemble_score_per_img, dim=0)
labels_per_image = torch.cat(ensemble_label_per_img, dim=0)
box_pred_per_image = torch.cat(ensemble_coord_per_img, dim=0)
if self.use_nms:
det_bboxes, keep_idxs = batched_nms(
box_pred_per_image, scores_per_image, labels_per_image,
cfg.nms)
box_pred_per_image = box_pred_per_image[keep_idxs]
labels_per_image = labels_per_image[keep_idxs]
scores_per_image = det_bboxes[:, -1]
results = InstanceData()
results.bboxes = box_pred_per_image
results.scores = scores_per_image
results.labels = labels_per_image
results_list.append(results)
else:
box_cls = pred_logits[-1]
box_pred = pred_bboxes[-1]
results_list = self.inference(box_cls, box_pred, cfg, device)
if rescale:
results_list = self.do_results_post_process(
results_list, cfg, batch_img_metas=batch_img_metas)
return results_list
@staticmethod
def do_results_post_process(results_list, cfg, batch_img_metas=None):
processed_results = []
for results, img_meta in zip(results_list, batch_img_metas):
assert img_meta.get('scale_factor') is not None
scale_factor = [1 / s for s in img_meta['scale_factor']]
results.bboxes = scale_boxes(results.bboxes, scale_factor)
# clip w, h
h, w = img_meta['ori_shape']
results.bboxes[:, 0::2] = results.bboxes[:, 0::2].clamp(
min=0, max=w)
results.bboxes[:, 1::2] = results.bboxes[:, 1::2].clamp(
min=0, max=h)
# filter small size bboxes
if cfg.get('min_bbox_size', 0) >= 0:
w, h = get_box_wh(results.bboxes)
valid_mask = (w > cfg.min_bbox_size) & (h > cfg.min_bbox_size)
if not valid_mask.all():
results = results[valid_mask]
processed_results.append(results)
return processed_results
def prepare_testing_targets(self, batch_img_metas, device):
# [-1, 0, 1, 2, ..., T-1] when sampling_timesteps == timesteps
times = torch.linspace(
-1, self.timesteps - 1, steps=self.sampling_timesteps + 1)
times = list(reversed(times.int().tolist()))
# [(T-1, T-2), (T-2, T-3), ..., (1, 0), (0, -1)]
time_pairs = list(zip(times[:-1], times[1:]))
noise_bboxes_list = []
noise_bboxes_raw_list = []
image_size_list = []
for img_meta in batch_img_metas:
h, w = img_meta['img_shape']
image_size = torch.tensor([w, h, w, h],
dtype=torch.float32,
device=device)
noise_bboxes_raw = torch.randn((self.num_proposals, 4),
device=device)
noise_bboxes = torch.clamp(
noise_bboxes_raw, min=-1 * self.snr_scale, max=self.snr_scale)
noise_bboxes = ((noise_bboxes / self.snr_scale) + 1) / 2
noise_bboxes = bbox_cxcywh_to_xyxy(noise_bboxes)
noise_bboxes = noise_bboxes * image_size
noise_bboxes_raw_list.append(noise_bboxes_raw)
noise_bboxes_list.append(noise_bboxes)
image_size_list.append(image_size[None])
batch_noise_bboxes = torch.stack(noise_bboxes_list)
batch_image_size = torch.cat(image_size_list)
batch_noise_bboxes_raw = torch.stack(noise_bboxes_raw_list)
return (time_pairs, batch_noise_bboxes, batch_noise_bboxes_raw,
batch_image_size)
def predict_noise_from_start(self, x_t, t, x0):
results = (extract(
self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - x0) / \
extract(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape)
return results
def inference(self, box_cls, box_pred, cfg, device):
"""
Args:
box_cls (Tensor): tensor of shape (batch_size, num_proposals, K).
The tensor predicts the classification probability for
each proposal.
box_pred (Tensor): tensors of shape (batch_size, num_proposals, 4).
The tensor predicts 4-vector (x,y,w,h) box
regression values for every proposal
Returns:
results (List[Instances]): a list of #images elements.
"""
results = []
if self.use_focal_loss or self.use_fed_loss:
scores = torch.sigmoid(box_cls)
labels = torch.arange(
self.num_classes,
device=device).unsqueeze(0).repeat(self.num_proposals,
1).flatten(0, 1)
box_pred_list = []
scores_list = []
labels_list = []
for i, (scores_per_image,
box_pred_per_image) in enumerate(zip(scores, box_pred)):
scores_per_image, topk_indices = scores_per_image.flatten(
0, 1).topk(
self.num_proposals, sorted=False)
labels_per_image = labels[topk_indices]
box_pred_per_image = box_pred_per_image.view(-1, 1, 4).repeat(
1, self.num_classes, 1).view(-1, 4)
box_pred_per_image = box_pred_per_image[topk_indices]
if self.use_ensemble and self.sampling_timesteps > 1:
box_pred_list.append(box_pred_per_image)
scores_list.append(scores_per_image)
labels_list.append(labels_per_image)
continue
if self.use_nms:
det_bboxes, keep_idxs = batched_nms(
box_pred_per_image, scores_per_image, labels_per_image,
cfg.nms)
box_pred_per_image = box_pred_per_image[keep_idxs]
labels_per_image = labels_per_image[keep_idxs]
# some nms would reweight the score, such as softnms
scores_per_image = det_bboxes[:, -1]
result = InstanceData()
result.bboxes = box_pred_per_image
result.scores = scores_per_image
result.labels = labels_per_image
results.append(result)
else:
# For each box we assign the best class or the second
# best if the best on is `no_object`.
scores, labels = F.softmax(box_cls, dim=-1)[:, :, :-1].max(-1)
for i, (scores_per_image, labels_per_image,
box_pred_per_image) in enumerate(
zip(scores, labels, box_pred)):
if self.use_ensemble and self.sampling_timesteps > 1:
return box_pred_per_image, scores_per_image, \
labels_per_image
if self.use_nms:
det_bboxes, keep_idxs = batched_nms(
box_pred_per_image, scores_per_image, labels_per_image,
cfg.nms)
box_pred_per_image = box_pred_per_image[keep_idxs]
labels_per_image = labels_per_image[keep_idxs]
# some nms would reweight the score, such as softnms
scores_per_image = det_bboxes[:, -1]
result = InstanceData()
result.bboxes = box_pred_per_image
result.scores = scores_per_image
result.labels = labels_per_image
results.append(result)
if self.use_ensemble and self.sampling_timesteps > 1:
return box_pred_list, scores_list, labels_list
else:
return results
@MODELS.register_module()
class SingleDiffusionDetHead(nn.Module):
def __init__(
self,
num_classes=80,
feat_channels=256,
dim_feedforward=2048,
num_cls_convs=1,
num_reg_convs=3,
num_heads=8,
dropout=0.0,
pooler_resolution=7,
scale_clamp=_DEFAULT_SCALE_CLAMP,
bbox_weights=(2.0, 2.0, 1.0, 1.0),
use_focal_loss=True,
use_fed_loss=False,
act_cfg=dict(type='ReLU', inplace=True),
dynamic_conv=dict(dynamic_dim=64, dynamic_num=2)
) -> None:
super().__init__()
self.feat_channels = feat_channels
# Dynamic
self.self_attn = nn.MultiheadAttention(
feat_channels, num_heads, dropout=dropout)
self.inst_interact = DynamicConv(
feat_channels=feat_channels,
pooler_resolution=pooler_resolution,
dynamic_dim=dynamic_conv['dynamic_dim'],
dynamic_num=dynamic_conv['dynamic_num'])
self.linear1 = nn.Linear(feat_channels, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, feat_channels)
self.norm1 = nn.LayerNorm(feat_channels)
self.norm2 = nn.LayerNorm(feat_channels)
self.norm3 = nn.LayerNorm(feat_channels)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.dropout3 = nn.Dropout(dropout)
self.activation = build_activation_layer(act_cfg)
# block time mlp
self.block_time_mlp = nn.Sequential(
nn.SiLU(), nn.Linear(feat_channels * 4, feat_channels * 2))
# cls.
cls_module = list()
for _ in range(num_cls_convs):
cls_module.append(nn.Linear(feat_channels, feat_channels, False))
cls_module.append(nn.LayerNorm(feat_channels))
cls_module.append(nn.ReLU(inplace=True))
self.cls_module = nn.ModuleList(cls_module)
# reg.
reg_module = list()
for _ in range(num_reg_convs):
reg_module.append(nn.Linear(feat_channels, feat_channels, False))
reg_module.append(nn.LayerNorm(feat_channels))
reg_module.append(nn.ReLU(inplace=True))
self.reg_module = nn.ModuleList(reg_module)
# pred.
self.use_focal_loss = use_focal_loss
self.use_fed_loss = use_fed_loss
if self.use_focal_loss or self.use_fed_loss:
self.class_logits = nn.Linear(feat_channels, num_classes)
else:
self.class_logits = nn.Linear(feat_channels, num_classes + 1)
self.bboxes_delta = nn.Linear(feat_channels, 4)
self.scale_clamp = scale_clamp
self.bbox_weights = bbox_weights
def forward(self, features, bboxes, pro_features, pooler, time_emb):
"""
:param bboxes: (N, num_boxes, 4)
:param pro_features: (N, num_boxes, feat_channels)
"""
N, num_boxes = bboxes.shape[:2]
# roi_feature.
proposal_boxes = list()
for b in range(N):
proposal_boxes.append(bboxes[b])
rois = bbox2roi(proposal_boxes)
roi_features = pooler(features, rois)
if pro_features is None:
pro_features = roi_features.view(N, num_boxes, self.feat_channels,
-1).mean(-1)
roi_features = roi_features.view(N * num_boxes, self.feat_channels,
-1).permute(2, 0, 1)
# self_att.
pro_features = pro_features.view(N, num_boxes,
self.feat_channels).permute(1, 0, 2)
pro_features2 = self.self_attn(
pro_features, pro_features, value=pro_features)[0]
pro_features = pro_features + self.dropout1(pro_features2)
pro_features = self.norm1(pro_features)
# inst_interact.
pro_features = pro_features.view(
num_boxes, N,
self.feat_channels).permute(1, 0,
2).reshape(1, N * num_boxes,
self.feat_channels)
pro_features2 = self.inst_interact(pro_features, roi_features)
pro_features = pro_features + self.dropout2(pro_features2)
obj_features = self.norm2(pro_features)
# obj_feature.
obj_features2 = self.linear2(
self.dropout(self.activation(self.linear1(obj_features))))
obj_features = obj_features + self.dropout3(obj_features2)
obj_features = self.norm3(obj_features)
fc_feature = obj_features.transpose(0, 1).reshape(N * num_boxes, -1)
scale_shift = self.block_time_mlp(time_emb)
scale_shift = torch.repeat_interleave(scale_shift, num_boxes, dim=0)
scale, shift = scale_shift.chunk(2, dim=1)
fc_feature = fc_feature * (scale + 1) + shift
cls_feature = fc_feature.clone()
reg_feature = fc_feature.clone()
for cls_layer in self.cls_module:
cls_feature = cls_layer(cls_feature)
for reg_layer in self.reg_module:
reg_feature = reg_layer(reg_feature)
class_logits = self.class_logits(cls_feature)
bboxes_deltas = self.bboxes_delta(reg_feature)
pred_bboxes = self.apply_deltas(bboxes_deltas, bboxes.view(-1, 4))
return (class_logits.view(N, num_boxes,
-1), pred_bboxes.view(N, num_boxes,
-1), obj_features)
def apply_deltas(self, deltas, boxes):
"""Apply transformation `deltas` (dx, dy, dw, dh) to `boxes`.
Args:
deltas (Tensor): transformation deltas of shape (N, k*4),
where k >= 1. deltas[i] represents k potentially
different class-specific box transformations for
the single box boxes[i].
boxes (Tensor): boxes to transform, of shape (N, 4)
"""
boxes = boxes.to(deltas.dtype)
widths = boxes[:, 2] - boxes[:, 0]
heights = boxes[:, 3] - boxes[:, 1]
ctr_x = boxes[:, 0] + 0.5 * widths
ctr_y = boxes[:, 1] + 0.5 * heights
wx, wy, ww, wh = self.bbox_weights
dx = deltas[:, 0::4] / wx
dy = deltas[:, 1::4] / wy
dw = deltas[:, 2::4] / ww
dh = deltas[:, 3::4] / wh
# Prevent sending too large values into torch.exp()
dw = torch.clamp(dw, max=self.scale_clamp)
dh = torch.clamp(dh, max=self.scale_clamp)
pred_ctr_x = dx * widths[:, None] + ctr_x[:, None]
pred_ctr_y = dy * heights[:, None] + ctr_y[:, None]
pred_w = torch.exp(dw) * widths[:, None]
pred_h = torch.exp(dh) * heights[:, None]
pred_boxes = torch.zeros_like(deltas)
pred_boxes[:, 0::4] = pred_ctr_x - 0.5 * pred_w # x1
pred_boxes[:, 1::4] = pred_ctr_y - 0.5 * pred_h # y1
pred_boxes[:, 2::4] = pred_ctr_x + 0.5 * pred_w # x2
pred_boxes[:, 3::4] = pred_ctr_y + 0.5 * pred_h # y2
return pred_boxes
class DynamicConv(nn.Module):
def __init__(self,
feat_channels: int,
dynamic_dim: int = 64,
dynamic_num: int = 2,
pooler_resolution: int = 7) -> None:
super().__init__()
self.feat_channels = feat_channels
self.dynamic_dim = dynamic_dim
self.dynamic_num = dynamic_num
self.num_params = self.feat_channels * self.dynamic_dim
self.dynamic_layer = nn.Linear(self.feat_channels,
self.dynamic_num * self.num_params)
self.norm1 = nn.LayerNorm(self.dynamic_dim)
self.norm2 = nn.LayerNorm(self.feat_channels)
self.activation = nn.ReLU(inplace=True)
num_output = self.feat_channels * pooler_resolution**2
self.out_layer = nn.Linear(num_output, self.feat_channels)
self.norm3 = nn.LayerNorm(self.feat_channels)
def forward(self, pro_features: Tensor, roi_features: Tensor) -> Tensor:
"""Forward function.
Args:
pro_features: (1, N * num_boxes, self.feat_channels)
roi_features: (49, N * num_boxes, self.feat_channels)
Returns:
"""
features = roi_features.permute(1, 0, 2)
parameters = self.dynamic_layer(pro_features).permute(1, 0, 2)
param1 = parameters[:, :, :self.num_params].view(
-1, self.feat_channels, self.dynamic_dim)
param2 = parameters[:, :,
self.num_params:].view(-1, self.dynamic_dim,
self.feat_channels)
features = torch.bmm(features, param1)
features = self.norm1(features)
features = self.activation(features)
features = torch.bmm(features, param2)
features = self.norm2(features)
features = self.activation(features)
features = features.flatten(1)
features = self.out_layer(features)
features = self.norm3(features)
features = self.activation(features)
return features
| 43,032 | 40.577778 | 106 |
py
|
ERD
|
ERD-main/projects/DiffusionDet/diffusiondet/diffusiondet.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.models import SingleStageDetector
from mmdet.registry import MODELS
from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig
@MODELS.register_module()
class DiffusionDet(SingleStageDetector):
"""Implementation of `DiffusionDet <>`_"""
def __init__(self,
backbone: ConfigType,
neck: ConfigType,
bbox_head: ConfigType,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
data_preprocessor=data_preprocessor,
init_cfg=init_cfg)
| 920 | 33.111111 | 65 |
py
|
ERD
|
ERD-main/projects/DiffusionDet/diffusiondet/__init__.py
|
from .diffusiondet import DiffusionDet
from .head import (DynamicConv, DynamicDiffusionDetHead,
SingleDiffusionDetHead, SinusoidalPositionEmbeddings)
from .loss import DiffusionDetCriterion, DiffusionDetMatcher
__all__ = [
'DiffusionDet', 'DynamicDiffusionDetHead', 'SingleDiffusionDetHead',
'SinusoidalPositionEmbeddings', 'DynamicConv', 'DiffusionDetCriterion',
'DiffusionDetMatcher'
]
| 420 | 37.272727 | 75 |
py
|
ERD
|
ERD-main/projects/DiffusionDet/configs/diffusiondet_r50_fpn_500-proposals_1-step_crop-ms-480-800-450k_coco.py
|
_base_ = [
'mmdet::_base_/datasets/coco_detection.py',
'mmdet::_base_/schedules/schedule_1x.py',
'mmdet::_base_/default_runtime.py'
]
custom_imports = dict(
imports=['projects.DiffusionDet.diffusiondet'], allow_failed_imports=False)
# model settings
model = dict(
type='DiffusionDet',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_size_divisor=32),
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=4),
bbox_head=dict(
type='DynamicDiffusionDetHead',
num_classes=80,
feat_channels=256,
num_proposals=500,
num_heads=6,
deep_supervision=True,
prior_prob=0.01,
snr_scale=2.0,
sampling_timesteps=1,
ddim_sampling_eta=1.0,
single_head=dict(
type='SingleDiffusionDetHead',
num_cls_convs=1,
num_reg_convs=3,
dim_feedforward=2048,
num_heads=8,
dropout=0.0,
act_cfg=dict(type='ReLU', inplace=True),
dynamic_conv=dict(dynamic_dim=64, dynamic_num=2)),
roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=2),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
# criterion
criterion=dict(
type='DiffusionDetCriterion',
num_classes=80,
assigner=dict(
type='DiffusionDetMatcher',
match_costs=[
dict(
type='FocalLossCost',
alpha=0.25,
gamma=2.0,
weight=2.0,
eps=1e-8),
dict(type='BBoxL1Cost', weight=5.0, box_format='xyxy'),
dict(type='IoUCost', iou_mode='giou', weight=2.0)
],
center_radius=2.5,
candidate_topk=5),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
alpha=0.25,
gamma=2.0,
reduction='sum',
loss_weight=2.0),
loss_bbox=dict(type='L1Loss', reduction='sum', loss_weight=5.0),
loss_giou=dict(type='GIoULoss', reduction='sum',
loss_weight=2.0))),
test_cfg=dict(
use_nms=True,
score_thr=0.5,
min_bbox_size=0,
nms=dict(type='nms', iou_threshold=0.5),
))
backend = 'pillow'
train_pipeline = [
dict(
type='LoadImageFromFile',
backend_args=_base_.backend_args,
imdecode_backend=backend),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='RandomFlip', prob=0.5),
dict(
type='RandomChoice',
transforms=[[
dict(
type='RandomChoiceResize',
scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333),
(608, 1333), (640, 1333), (672, 1333), (704, 1333),
(736, 1333), (768, 1333), (800, 1333)],
keep_ratio=True,
backend=backend),
],
[
dict(
type='RandomChoiceResize',
scales=[(400, 1333), (500, 1333), (600, 1333)],
keep_ratio=True,
backend=backend),
dict(
type='RandomCrop',
crop_type='absolute_range',
crop_size=(384, 600),
allow_negative_crop=True),
dict(
type='RandomChoiceResize',
scales=[(480, 1333), (512, 1333), (544, 1333),
(576, 1333), (608, 1333), (640, 1333),
(672, 1333), (704, 1333), (736, 1333),
(768, 1333), (800, 1333)],
keep_ratio=True,
backend=backend)
]]),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(
type='LoadImageFromFile',
backend_args=_base_.backend_args,
imdecode_backend=backend),
dict(type='Resize', scale=(1333, 800), keep_ratio=True, backend=backend),
# If you don't have a gt annotation, delete the pipeline
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
sampler=dict(type='InfiniteSampler'),
dataset=dict(
filter_cfg=dict(filter_empty_gt=False, min_size=1e-5),
pipeline=train_pipeline))
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
# optimizer
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(
_delete_=True, type='AdamW', lr=0.000025, weight_decay=0.0001),
clip_grad=dict(max_norm=1.0, norm_type=2))
train_cfg = dict(
_delete_=True,
type='IterBasedTrainLoop',
max_iters=450000,
val_interval=75000)
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.01, by_epoch=False, begin=0, end=1000),
dict(
type='MultiStepLR',
begin=0,
end=450000,
by_epoch=False,
milestones=[350000, 420000],
gamma=0.1)
]
default_hooks = dict(
checkpoint=dict(by_epoch=False, interval=75000, max_keep_ckpts=3))
log_processor = dict(by_epoch=False)
| 6,186 | 32.263441 | 79 |
py
|
ERD
|
ERD-main/projects/DiffusionDet/model_converters/diffusiondet_resnet_to_mmdet.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
from collections import OrderedDict
import numpy as np
import torch
from mmengine.fileio import load
def convert(src, dst):
if src.endswith('pth'):
src_model = torch.load(src)
else:
src_model = load(src)
dst_state_dict = OrderedDict()
for k, v in src_model['model'].items():
key_name_split = k.split('.')
if 'backbone.fpn_lateral' in k:
lateral_id = int(key_name_split[-2][-1])
name = f'neck.lateral_convs.{lateral_id - 2}.' \
f'conv.{key_name_split[-1]}'
elif 'backbone.fpn_output' in k:
lateral_id = int(key_name_split[-2][-1])
name = f'neck.fpn_convs.{lateral_id - 2}.conv.' \
f'{key_name_split[-1]}'
elif 'backbone.bottom_up.stem.conv1.norm.' in k:
name = f'backbone.bn1.{key_name_split[-1]}'
elif 'backbone.bottom_up.stem.conv1.' in k:
name = f'backbone.conv1.{key_name_split[-1]}'
elif 'backbone.bottom_up.res' in k:
# weight_type = key_name_split[-1]
res_id = int(key_name_split[2][-1]) - 1
# deal with short cut
if 'shortcut' in key_name_split[4]:
if 'shortcut' == key_name_split[-2]:
name = f'backbone.layer{res_id}.' \
f'{key_name_split[3]}.downsample.0.' \
f'{key_name_split[-1]}'
elif 'shortcut' == key_name_split[-3]:
name = f'backbone.layer{res_id}.' \
f'{key_name_split[3]}.downsample.1.' \
f'{key_name_split[-1]}'
else:
print(f'Unvalid key {k}')
# deal with conv
elif 'conv' in key_name_split[-2]:
conv_id = int(key_name_split[-2][-1])
name = f'backbone.layer{res_id}.{key_name_split[3]}' \
f'.conv{conv_id}.{key_name_split[-1]}'
# deal with BN
elif key_name_split[-2] == 'norm':
conv_id = int(key_name_split[-3][-1])
name = f'backbone.layer{res_id}.{key_name_split[3]}.' \
f'bn{conv_id}.{key_name_split[-1]}'
else:
print(f'{k} is invalid')
elif key_name_split[0] == 'head':
# d2: head.xxx -> mmdet: bbox_head.xxx
name = f'bbox_{k}'
else:
# some base parameters such as beta will not convert
print(f'{k} is not converted!!')
continue
if not isinstance(v, np.ndarray) and not isinstance(v, torch.Tensor):
raise ValueError(
'Unsupported type found in checkpoint! {}: {}'.format(
k, type(v)))
if not isinstance(v, torch.Tensor):
dst_state_dict[name] = torch.from_numpy(v)
else:
dst_state_dict[name] = v
mmdet_model = dict(state_dict=dst_state_dict, meta=dict())
torch.save(mmdet_model, dst)
def main():
parser = argparse.ArgumentParser(description='Convert model keys')
parser.add_argument('src', help='src detectron model path')
parser.add_argument('dst', help='save path')
args = parser.parse_args()
convert(args.src, args.dst)
if __name__ == '__main__':
main()
| 3,395 | 37.157303 | 77 |
py
|
ERD
|
ERD-main/projects/SparseInst/sparseinst/sparseinst.py
|
# Copyright (c) Tianheng Cheng and its affiliates. All Rights Reserved
from typing import List, Tuple, Union
import torch
import torch.nn.functional as F
from mmengine.structures import InstanceData
from torch import Tensor
from mmdet.models import BaseDetector
from mmdet.models.utils import unpack_gt_instances
from mmdet.registry import MODELS
from mmdet.structures import OptSampleList, SampleList
from mmdet.utils import ConfigType, OptConfigType
@torch.jit.script
def rescoring_mask(scores, mask_pred, masks):
mask_pred_ = mask_pred.float()
return scores * ((masks * mask_pred_).sum([1, 2]) /
(mask_pred_.sum([1, 2]) + 1e-6))
@MODELS.register_module()
class SparseInst(BaseDetector):
"""Implementation of `SparseInst <https://arxiv.org/abs/1912.02424>`_
Args:
data_preprocessor (:obj:`ConfigDict` or dict, optional): Config of
:class:`DetDataPreprocessor` to process the input data.
Defaults to None.
backbone (:obj:`ConfigDict` or dict): The backbone module.
encoder (:obj:`ConfigDict` or dict): The encoder module.
decoder (:obj:`ConfigDict` or dict): The decoder module.
criterion (:obj:`ConfigDict` or dict, optional): The training matcher
and losses. Defaults to None.
test_cfg (:obj:`ConfigDict` or dict, optional): The testing config
of SparseInst. Defaults to None.
init_cfg (:obj:`ConfigDict` or dict, optional): the config to control
the initialization. Defaults to None.
"""
def __init__(self,
data_preprocessor: ConfigType,
backbone: ConfigType,
encoder: ConfigType,
decoder: ConfigType,
criterion: OptConfigType = None,
test_cfg: OptConfigType = None,
init_cfg: OptConfigType = None):
super().__init__(
data_preprocessor=data_preprocessor, init_cfg=init_cfg)
# backbone
self.backbone = MODELS.build(backbone)
# encoder & decoder
self.encoder = MODELS.build(encoder)
self.decoder = MODELS.build(decoder)
# matcher & loss (matcher is built in loss)
self.criterion = MODELS.build(criterion)
# inference
self.cls_threshold = test_cfg.score_thr
self.mask_threshold = test_cfg.mask_thr_binary
def _forward(
self,
batch_inputs: Tensor,
batch_data_samples: OptSampleList = None) -> Tuple[List[Tensor]]:
"""Network forward process. Usually includes backbone, neck and head
forward without any post-processing.
Args:
batch_inputs (Tensor): Inputs with shape (N, C, H, W).
Returns:
tuple[list]: A tuple of features from ``bbox_head`` forward.
"""
x = self.backbone(batch_inputs)
x = self.encoder(x)
results = self.decoder(x)
return results
def predict(self,
batch_inputs: Tensor,
batch_data_samples: SampleList,
rescale: bool = True) -> SampleList:
"""Predict results from a batch of inputs and data samples with post-
processing.
Args:
batch_inputs (Tensor): Inputs with shape (N, C, H, W).
batch_data_samples (List[:obj:`DetDataSample`]): The Data
Samples. It usually includes information such as
`gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`.
rescale (bool): Whether to rescale the results.
Defaults to True.
Returns:
list[:obj:`DetDataSample`]: Detection results of the
input images. Each DetDataSample usually contain
'pred_instances'. And the ``pred_instances`` usually
contains following keys.
- scores (Tensor): Classification scores, has a shape
(num_instance, )
- labels (Tensor): Labels of bboxes, has a shape
(num_instances, ).
- bboxes (Tensor): Has a shape (num_instances, 4),
the last dimension 4 arrange as (x1, y1, x2, y2).
"""
max_shape = batch_inputs.shape[-2:]
output = self._forward(batch_inputs)
pred_scores = output['pred_logits'].sigmoid()
pred_masks = output['pred_masks'].sigmoid()
pred_objectness = output['pred_scores'].sigmoid()
pred_scores = torch.sqrt(pred_scores * pred_objectness)
results_list = []
for batch_idx, (scores_per_image, mask_pred_per_image,
datasample) in enumerate(
zip(pred_scores, pred_masks, batch_data_samples)):
result = InstanceData()
# max/argmax
scores, labels = scores_per_image.max(dim=-1)
# cls threshold
keep = scores > self.cls_threshold
scores = scores[keep]
labels = labels[keep]
mask_pred_per_image = mask_pred_per_image[keep]
if scores.size(0) == 0:
result.scores = scores
result.labels = labels
results_list.append(result)
continue
img_meta = datasample.metainfo
# rescoring mask using maskness
scores = rescoring_mask(scores,
mask_pred_per_image > self.mask_threshold,
mask_pred_per_image)
h, w = img_meta['img_shape'][:2]
mask_pred_per_image = F.interpolate(
mask_pred_per_image.unsqueeze(1),
size=max_shape,
mode='bilinear',
align_corners=False)[:, :, :h, :w]
if rescale:
ori_h, ori_w = img_meta['ori_shape'][:2]
mask_pred_per_image = F.interpolate(
mask_pred_per_image,
size=(ori_h, ori_w),
mode='bilinear',
align_corners=False).squeeze(1)
mask_pred = mask_pred_per_image > self.mask_threshold
result.masks = mask_pred
result.scores = scores
result.labels = labels
# create an empty bbox in InstanceData to avoid bugs when
# calculating metrics.
result.bboxes = result.scores.new_zeros(len(scores), 4)
results_list.append(result)
batch_data_samples = self.add_pred_to_datasample(
batch_data_samples, results_list)
return batch_data_samples
def loss(self, batch_inputs: Tensor,
batch_data_samples: SampleList) -> Union[dict, list]:
"""Calculate losses from a batch of inputs and data samples.
Args:
batch_inputs (Tensor): Input images of shape (N, C, H, W).
These should usually be mean centered and std scaled.
batch_data_samples (list[:obj:`DetDataSample`]): The batch
data samples. It usually includes information such
as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.
Returns:
dict: A dictionary of loss components.
"""
outs = self._forward(batch_inputs)
(batch_gt_instances, batch_gt_instances_ignore,
batch_img_metas) = unpack_gt_instances(batch_data_samples)
losses = self.criterion(outs, batch_gt_instances, batch_img_metas,
batch_gt_instances_ignore)
return losses
def extract_feat(self, batch_inputs: Tensor) -> Tuple[Tensor]:
"""Extract features.
Args:
batch_inputs (Tensor): Image tensor with shape (N, C, H ,W).
Returns:
tuple[Tensor]: Multi-level features that may have
different resolutions.
"""
x = self.backbone(batch_inputs)
x = self.encoder(x)
return x
| 7,972 | 37.516908 | 78 |
py
|
ERD
|
ERD-main/projects/SparseInst/sparseinst/loss.py
|
# Copyright (c) Tianheng Cheng and its affiliates. All Rights Reserved
import torch
import torch.nn as nn
import torch.nn.functional as F
from scipy.optimize import linear_sum_assignment
from torch.cuda.amp import autocast
from mmdet.registry import MODELS, TASK_UTILS
from mmdet.utils import reduce_mean
def compute_mask_iou(inputs, targets):
inputs = inputs.sigmoid()
# thresholding
binarized_inputs = (inputs >= 0.4).float()
targets = (targets > 0.5).float()
intersection = (binarized_inputs * targets).sum(-1)
union = targets.sum(-1) + binarized_inputs.sum(-1) - intersection
score = intersection / (union + 1e-6)
return score
def dice_score(inputs, targets):
inputs = inputs.sigmoid()
numerator = 2 * torch.matmul(inputs, targets.t())
denominator = (inputs * inputs).sum(-1)[:,
None] + (targets * targets).sum(-1)
score = numerator / (denominator + 1e-4)
return score
@MODELS.register_module()
class SparseInstCriterion(nn.Module):
"""This part is partially derivated from:
https://github.com/facebookresearch/detr/blob/main/models/detr.py.
"""
def __init__(
self,
num_classes,
assigner,
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
alpha=0.25,
gamma=2.0,
reduction='sum',
loss_weight=2.0),
loss_obj=dict(
type='CrossEntropyLoss',
use_sigmoid=True,
reduction='mean',
loss_weight=1.0),
loss_mask=dict(
type='CrossEntropyLoss',
use_sigmoid=True,
reduction='mean',
loss_weight=5.0),
loss_dice=dict(
type='DiceLoss',
use_sigmoid=True,
reduction='sum',
eps=5e-5,
loss_weight=2.0),
):
super().__init__()
self.matcher = TASK_UTILS.build(assigner)
self.num_classes = num_classes
self.loss_cls = MODELS.build(loss_cls)
self.loss_obj = MODELS.build(loss_obj)
self.loss_mask = MODELS.build(loss_mask)
self.loss_dice = MODELS.build(loss_dice)
def _get_src_permutation_idx(self, indices):
# permute predictions following indices
batch_idx = torch.cat(
[torch.full_like(src, i) for i, (src, _) in enumerate(indices)])
src_idx = torch.cat([src for (src, _) in indices])
return batch_idx, src_idx
def _get_tgt_permutation_idx(self, indices):
# permute targets following indices
batch_idx = torch.cat(
[torch.full_like(tgt, i) for i, (_, tgt) in enumerate(indices)])
tgt_idx = torch.cat([tgt for (_, tgt) in indices])
return batch_idx, tgt_idx
def loss_classification(self, outputs, batch_gt_instances, indices,
num_instances):
assert 'pred_logits' in outputs
src_logits = outputs['pred_logits']
idx = self._get_src_permutation_idx(indices)
target_classes_o = torch.cat(
[gt.labels[J] for gt, (_, J) in zip(batch_gt_instances, indices)])
target_classes = torch.full(
src_logits.shape[:2],
self.num_classes,
dtype=torch.int64,
device=src_logits.device)
target_classes[idx] = target_classes_o
src_logits = src_logits.flatten(0, 1)
target_classes = target_classes.flatten(0, 1)
# comp focal loss.
class_loss = self.loss_cls(
src_logits,
target_classes,
) / num_instances
return class_loss
def loss_masks_with_iou_objectness(self, outputs, batch_gt_instances,
indices, num_instances):
src_idx = self._get_src_permutation_idx(indices)
tgt_idx = self._get_tgt_permutation_idx(indices)
# Bx100xHxW
assert 'pred_masks' in outputs
assert 'pred_scores' in outputs
src_iou_scores = outputs['pred_scores']
src_masks = outputs['pred_masks']
with torch.no_grad():
target_masks = torch.cat([
gt.masks.to_tensor(
dtype=src_masks.dtype, device=src_masks.device)
for gt in batch_gt_instances
])
num_masks = [len(gt.masks) for gt in batch_gt_instances]
target_masks = target_masks.to(src_masks)
if len(target_masks) == 0:
loss_dice = src_masks.sum() * 0.0
loss_mask = src_masks.sum() * 0.0
loss_objectness = src_iou_scores.sum() * 0.0
return loss_objectness, loss_dice, loss_mask
src_masks = src_masks[src_idx]
target_masks = F.interpolate(
target_masks[:, None],
size=src_masks.shape[-2:],
mode='bilinear',
align_corners=False).squeeze(1)
src_masks = src_masks.flatten(1)
# FIXME: tgt_idx
mix_tgt_idx = torch.zeros_like(tgt_idx[1])
cum_sum = 0
for num_mask in num_masks:
mix_tgt_idx[cum_sum:cum_sum + num_mask] = cum_sum
cum_sum += num_mask
mix_tgt_idx += tgt_idx[1]
target_masks = target_masks[mix_tgt_idx].flatten(1)
with torch.no_grad():
ious = compute_mask_iou(src_masks, target_masks)
tgt_iou_scores = ious
src_iou_scores = src_iou_scores[src_idx]
tgt_iou_scores = tgt_iou_scores.flatten(0)
src_iou_scores = src_iou_scores.flatten(0)
loss_objectness = self.loss_obj(src_iou_scores, tgt_iou_scores)
loss_dice = self.loss_dice(src_masks, target_masks) / num_instances
loss_mask = self.loss_mask(src_masks, target_masks)
return loss_objectness, loss_dice, loss_mask
def forward(self, outputs, batch_gt_instances, batch_img_metas,
batch_gt_instances_ignore):
# Retrieve the matching between the outputs of
# the last layer and the targets
indices = self.matcher(outputs, batch_gt_instances)
# Compute the average number of target boxes
# across all nodes, for normalization purposes
num_instances = sum(gt.labels.shape[0] for gt in batch_gt_instances)
num_instances = torch.as_tensor([num_instances],
dtype=torch.float,
device=next(iter(
outputs.values())).device)
num_instances = reduce_mean(num_instances).clamp_(min=1).item()
# Compute all the requested losses
loss_cls = self.loss_classification(outputs, batch_gt_instances,
indices, num_instances)
loss_obj, loss_dice, loss_mask = self.loss_masks_with_iou_objectness(
outputs, batch_gt_instances, indices, num_instances)
return dict(
loss_cls=loss_cls,
loss_obj=loss_obj,
loss_dice=loss_dice,
loss_mask=loss_mask)
@TASK_UTILS.register_module()
class SparseInstMatcher(nn.Module):
def __init__(self, alpha=0.8, beta=0.2):
super().__init__()
self.alpha = alpha
self.beta = beta
self.mask_score = dice_score
def forward(self, outputs, batch_gt_instances):
with torch.no_grad():
B, N, H, W = outputs['pred_masks'].shape
pred_masks = outputs['pred_masks']
pred_logits = outputs['pred_logits'].sigmoid()
device = pred_masks.device
tgt_ids = torch.cat([gt.labels for gt in batch_gt_instances])
if tgt_ids.shape[0] == 0:
return [(torch.as_tensor([]).to(pred_logits),
torch.as_tensor([]).to(pred_logits))] * B
tgt_masks = torch.cat([
gt.masks.to_tensor(dtype=pred_masks.dtype, device=device)
for gt in batch_gt_instances
])
tgt_masks = F.interpolate(
tgt_masks[:, None],
size=pred_masks.shape[-2:],
mode='bilinear',
align_corners=False).squeeze(1)
pred_masks = pred_masks.view(B * N, -1)
tgt_masks = tgt_masks.flatten(1)
with autocast(enabled=False):
pred_masks = pred_masks.float()
tgt_masks = tgt_masks.float()
pred_logits = pred_logits.float()
mask_score = self.mask_score(pred_masks, tgt_masks)
# Nx(Number of gts)
matching_prob = pred_logits.view(B * N, -1)[:, tgt_ids]
C = (mask_score**self.alpha) * (matching_prob**self.beta)
C = C.view(B, N, -1).cpu()
# hungarian matching
sizes = [len(gt.masks) for gt in batch_gt_instances]
indices = [
linear_sum_assignment(c[i], maximize=True)
for i, c in enumerate(C.split(sizes, -1))
]
indices = [(torch.as_tensor(i, dtype=torch.int64),
torch.as_tensor(j, dtype=torch.int64))
for i, j in indices]
return indices
| 9,212 | 35.852 | 79 |
py
|
ERD
|
ERD-main/projects/SparseInst/sparseinst/encoder.py
|
# Copyright (c) Tianheng Cheng and its affiliates. All Rights Reserved
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmengine.model.weight_init import caffe2_xavier_init, kaiming_init
from mmdet.registry import MODELS
class PyramidPoolingModule(nn.Module):
def __init__(self,
in_channels,
channels=512,
sizes=(1, 2, 3, 6),
act_cfg=dict(type='ReLU')):
super().__init__()
self.stages = []
self.stages = nn.ModuleList(
[self._make_stage(in_channels, channels, size) for size in sizes])
self.bottleneck = nn.Conv2d(in_channels + len(sizes) * channels,
in_channels, 1)
self.act = MODELS.build(act_cfg)
def _make_stage(self, features, out_features, size):
prior = nn.AdaptiveAvgPool2d(output_size=(size, size))
conv = nn.Conv2d(features, out_features, 1)
return nn.Sequential(prior, conv)
def forward(self, feats):
h, w = feats.size(2), feats.size(3)
priors = [
F.interpolate(
input=self.act(stage(feats)),
size=(h, w),
mode='bilinear',
align_corners=False) for stage in self.stages
] + [feats]
out = self.act(self.bottleneck(torch.cat(priors, 1)))
return out
@MODELS.register_module()
class InstanceContextEncoder(nn.Module):
"""
Instance Context Encoder
1. construct feature pyramids from ResNet
2. enlarge receptive fields (ppm)
3. multi-scale fusion
"""
def __init__(self,
in_channels,
out_channels=256,
with_ppm=True,
act_cfg=dict(type='ReLU')):
super().__init__()
self.num_channels = out_channels
self.in_channels = in_channels
self.with_ppm = with_ppm
fpn_laterals = []
fpn_outputs = []
for in_channel in reversed(self.in_channels):
lateral_conv = nn.Conv2d(in_channel, self.num_channels, 1)
output_conv = nn.Conv2d(
self.num_channels, self.num_channels, 3, padding=1)
caffe2_xavier_init(lateral_conv)
caffe2_xavier_init(output_conv)
fpn_laterals.append(lateral_conv)
fpn_outputs.append(output_conv)
self.fpn_laterals = nn.ModuleList(fpn_laterals)
self.fpn_outputs = nn.ModuleList(fpn_outputs)
# ppm
if self.with_ppm:
self.ppm = PyramidPoolingModule(
self.num_channels, self.num_channels // 4, act_cfg=act_cfg)
# final fusion
self.fusion = nn.Conv2d(self.num_channels * 3, self.num_channels, 1)
kaiming_init(self.fusion)
def forward(self, features):
features = features[::-1]
prev_features = self.fpn_laterals[0](features[0])
if self.with_ppm:
prev_features = self.ppm(prev_features)
outputs = [self.fpn_outputs[0](prev_features)]
for feature, lat_conv, output_conv in zip(features[1:],
self.fpn_laterals[1:],
self.fpn_outputs[1:]):
lat_features = lat_conv(feature)
top_down_features = F.interpolate(
prev_features, scale_factor=2.0, mode='nearest')
prev_features = lat_features + top_down_features
outputs.insert(0, output_conv(prev_features))
size = outputs[0].shape[2:]
features = [outputs[0]] + [
F.interpolate(x, size, mode='bilinear', align_corners=False)
for x in outputs[1:]
]
features = self.fusion(torch.cat(features, dim=1))
return features
| 3,806 | 35.961165 | 78 |
py
|
ERD
|
ERD-main/projects/SparseInst/sparseinst/decoder.py
|
# Copyright (c) Tianheng Cheng and its affiliates. All Rights Reserved
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmengine.model.weight_init import caffe2_xavier_init, kaiming_init
from torch.nn import init
from mmdet.registry import MODELS
def _make_stack_3x3_convs(num_convs,
in_channels,
out_channels,
act_cfg=dict(type='ReLU', inplace=True)):
convs = []
for _ in range(num_convs):
convs.append(nn.Conv2d(in_channels, out_channels, 3, padding=1))
convs.append(MODELS.build(act_cfg))
in_channels = out_channels
return nn.Sequential(*convs)
class InstanceBranch(nn.Module):
def __init__(self,
in_channels,
dim=256,
num_convs=4,
num_masks=100,
num_classes=80,
kernel_dim=128,
act_cfg=dict(type='ReLU', inplace=True)):
super().__init__()
num_masks = num_masks
self.num_classes = num_classes
self.inst_convs = _make_stack_3x3_convs(num_convs, in_channels, dim,
act_cfg)
# iam prediction, a simple conv
self.iam_conv = nn.Conv2d(dim, num_masks, 3, padding=1)
# outputs
self.cls_score = nn.Linear(dim, self.num_classes)
self.mask_kernel = nn.Linear(dim, kernel_dim)
self.objectness = nn.Linear(dim, 1)
self.prior_prob = 0.01
self._init_weights()
def _init_weights(self):
for m in self.inst_convs.modules():
if isinstance(m, nn.Conv2d):
kaiming_init(m)
bias_value = -math.log((1 - self.prior_prob) / self.prior_prob)
for module in [self.iam_conv, self.cls_score]:
init.constant_(module.bias, bias_value)
init.normal_(self.iam_conv.weight, std=0.01)
init.normal_(self.cls_score.weight, std=0.01)
init.normal_(self.mask_kernel.weight, std=0.01)
init.constant_(self.mask_kernel.bias, 0.0)
def forward(self, features):
# instance features (x4 convs)
features = self.inst_convs(features)
# predict instance activation maps
iam = self.iam_conv(features)
iam_prob = iam.sigmoid()
B, N = iam_prob.shape[:2]
C = features.size(1)
# BxNxHxW -> BxNx(HW)
iam_prob = iam_prob.view(B, N, -1)
normalizer = iam_prob.sum(-1).clamp(min=1e-6)
iam_prob = iam_prob / normalizer[:, :, None]
# aggregate features: BxCxHxW -> Bx(HW)xC
inst_features = torch.bmm(iam_prob,
features.view(B, C, -1).permute(0, 2, 1))
# predict classification & segmentation kernel & objectness
pred_logits = self.cls_score(inst_features)
pred_kernel = self.mask_kernel(inst_features)
pred_scores = self.objectness(inst_features)
return pred_logits, pred_kernel, pred_scores, iam
class MaskBranch(nn.Module):
def __init__(self,
in_channels,
dim=256,
num_convs=4,
kernel_dim=128,
act_cfg=dict(type='ReLU', inplace=True)):
super().__init__()
self.mask_convs = _make_stack_3x3_convs(num_convs, in_channels, dim,
act_cfg)
self.projection = nn.Conv2d(dim, kernel_dim, kernel_size=1)
self._init_weights()
def _init_weights(self):
for m in self.mask_convs.modules():
if isinstance(m, nn.Conv2d):
kaiming_init(m)
kaiming_init(self.projection)
def forward(self, features):
# mask features (x4 convs)
features = self.mask_convs(features)
return self.projection(features)
@MODELS.register_module()
class BaseIAMDecoder(nn.Module):
def __init__(self,
in_channels,
num_classes,
ins_dim=256,
ins_conv=4,
mask_dim=256,
mask_conv=4,
kernel_dim=128,
scale_factor=2.0,
output_iam=False,
num_masks=100,
act_cfg=dict(type='ReLU', inplace=True)):
super().__init__()
# add 2 for coordinates
in_channels = in_channels # ENCODER.NUM_CHANNELS + 2
self.scale_factor = scale_factor
self.output_iam = output_iam
self.inst_branch = InstanceBranch(
in_channels,
dim=ins_dim,
num_convs=ins_conv,
num_masks=num_masks,
num_classes=num_classes,
kernel_dim=kernel_dim,
act_cfg=act_cfg)
self.mask_branch = MaskBranch(
in_channels,
dim=mask_dim,
num_convs=mask_conv,
kernel_dim=kernel_dim,
act_cfg=act_cfg)
@torch.no_grad()
def compute_coordinates_linspace(self, x):
# linspace is not supported in ONNX
h, w = x.size(2), x.size(3)
y_loc = torch.linspace(-1, 1, h, device=x.device)
x_loc = torch.linspace(-1, 1, w, device=x.device)
y_loc, x_loc = torch.meshgrid(y_loc, x_loc)
y_loc = y_loc.expand([x.shape[0], 1, -1, -1])
x_loc = x_loc.expand([x.shape[0], 1, -1, -1])
locations = torch.cat([x_loc, y_loc], 1)
return locations.to(x)
@torch.no_grad()
def compute_coordinates(self, x):
h, w = x.size(2), x.size(3)
y_loc = -1.0 + 2.0 * torch.arange(h, device=x.device) / (h - 1)
x_loc = -1.0 + 2.0 * torch.arange(w, device=x.device) / (w - 1)
y_loc, x_loc = torch.meshgrid(y_loc, x_loc)
y_loc = y_loc.expand([x.shape[0], 1, -1, -1])
x_loc = x_loc.expand([x.shape[0], 1, -1, -1])
locations = torch.cat([x_loc, y_loc], 1)
return locations.to(x)
def forward(self, features):
coord_features = self.compute_coordinates(features)
features = torch.cat([coord_features, features], dim=1)
pred_logits, pred_kernel, pred_scores, iam = self.inst_branch(features)
mask_features = self.mask_branch(features)
N = pred_kernel.shape[1]
# mask_features: BxCxHxW
B, C, H, W = mask_features.shape
pred_masks = torch.bmm(pred_kernel,
mask_features.view(B, C,
H * W)).view(B, N, H, W)
pred_masks = F.interpolate(
pred_masks,
scale_factor=self.scale_factor,
mode='bilinear',
align_corners=False)
output = {
'pred_logits': pred_logits,
'pred_masks': pred_masks,
'pred_scores': pred_scores,
}
if self.output_iam:
iam = F.interpolate(
iam,
scale_factor=self.scale_factor,
mode='bilinear',
align_corners=False)
output['pred_iam'] = iam
return output
class GroupInstanceBranch(nn.Module):
def __init__(self,
in_channels,
num_groups=4,
dim=256,
num_convs=4,
num_masks=100,
num_classes=80,
kernel_dim=128,
act_cfg=dict(type='ReLU', inplace=True)):
super().__init__()
self.num_groups = num_groups
self.num_classes = num_classes
self.inst_convs = _make_stack_3x3_convs(
num_convs, in_channels, dim, act_cfg=act_cfg)
# iam prediction, a group conv
expand_dim = dim * self.num_groups
self.iam_conv = nn.Conv2d(
dim,
num_masks * self.num_groups,
3,
padding=1,
groups=self.num_groups)
# outputs
self.fc = nn.Linear(expand_dim, expand_dim)
self.cls_score = nn.Linear(expand_dim, self.num_classes)
self.mask_kernel = nn.Linear(expand_dim, kernel_dim)
self.objectness = nn.Linear(expand_dim, 1)
self.prior_prob = 0.01
self._init_weights()
def _init_weights(self):
for m in self.inst_convs.modules():
if isinstance(m, nn.Conv2d):
kaiming_init(m)
bias_value = -math.log((1 - self.prior_prob) / self.prior_prob)
for module in [self.iam_conv, self.cls_score]:
init.constant_(module.bias, bias_value)
init.normal_(self.iam_conv.weight, std=0.01)
init.normal_(self.cls_score.weight, std=0.01)
init.normal_(self.mask_kernel.weight, std=0.01)
init.constant_(self.mask_kernel.bias, 0.0)
caffe2_xavier_init(self.fc)
def forward(self, features):
# instance features (x4 convs)
features = self.inst_convs(features)
# predict instance activation maps
iam = self.iam_conv(features)
iam_prob = iam.sigmoid()
B, N = iam_prob.shape[:2]
C = features.size(1)
# BxNxHxW -> BxNx(HW)
iam_prob = iam_prob.view(B, N, -1)
normalizer = iam_prob.sum(-1).clamp(min=1e-6)
iam_prob = iam_prob / normalizer[:, :, None]
# aggregate features: BxCxHxW -> Bx(HW)xC
inst_features = torch.bmm(iam_prob,
features.view(B, C, -1).permute(0, 2, 1))
inst_features = inst_features.reshape(B, 4, N // self.num_groups,
-1).transpose(1, 2).reshape(
B, N // self.num_groups, -1)
inst_features = F.relu_(self.fc(inst_features))
# predict classification & segmentation kernel & objectness
pred_logits = self.cls_score(inst_features)
pred_kernel = self.mask_kernel(inst_features)
pred_scores = self.objectness(inst_features)
return pred_logits, pred_kernel, pred_scores, iam
@MODELS.register_module()
class GroupIAMDecoder(BaseIAMDecoder):
def __init__(self,
in_channels,
num_classes,
num_groups=4,
ins_dim=256,
ins_conv=4,
mask_dim=256,
mask_conv=4,
kernel_dim=128,
scale_factor=2.0,
output_iam=False,
num_masks=100,
act_cfg=dict(type='ReLU', inplace=True)):
super().__init__(
in_channels=in_channels,
num_classes=num_classes,
ins_dim=ins_dim,
ins_conv=ins_conv,
mask_dim=mask_dim,
mask_conv=mask_conv,
kernel_dim=kernel_dim,
scale_factor=scale_factor,
output_iam=output_iam,
num_masks=num_masks,
act_cfg=act_cfg)
self.inst_branch = GroupInstanceBranch(
in_channels,
num_groups=num_groups,
dim=ins_dim,
num_convs=ins_conv,
num_masks=num_masks,
num_classes=num_classes,
kernel_dim=kernel_dim,
act_cfg=act_cfg)
class GroupInstanceSoftBranch(GroupInstanceBranch):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.softmax_bias = nn.Parameter(torch.ones([
1,
]))
def forward(self, features):
# instance features (x4 convs)
features = self.inst_convs(features)
# predict instance activation maps
iam = self.iam_conv(features)
B, N = iam.shape[:2]
C = features.size(1)
# BxNxHxW -> BxNx(HW)
iam_prob = F.softmax(iam.view(B, N, -1) + self.softmax_bias, dim=-1)
# aggregate features: BxCxHxW -> Bx(HW)xC
inst_features = torch.bmm(iam_prob,
features.view(B, C, -1).permute(0, 2, 1))
inst_features = inst_features.reshape(B, self.num_groups,
N // self.num_groups,
-1).transpose(1, 2).reshape(
B, N // self.num_groups, -1)
inst_features = F.relu_(self.fc(inst_features))
# predict classification & segmentation kernel & objectness
pred_logits = self.cls_score(inst_features)
pred_kernel = self.mask_kernel(inst_features)
pred_scores = self.objectness(inst_features)
return pred_logits, pred_kernel, pred_scores, iam
@MODELS.register_module()
class GroupIAMSoftDecoder(BaseIAMDecoder):
def __init__(self,
in_channels,
num_classes,
num_groups=4,
ins_dim=256,
ins_conv=4,
mask_dim=256,
mask_conv=4,
kernel_dim=128,
scale_factor=2.0,
output_iam=False,
num_masks=100,
act_cfg=dict(type='ReLU', inplace=True)):
super().__init__(
in_channels=in_channels,
num_classes=num_classes,
ins_dim=ins_dim,
ins_conv=ins_conv,
mask_dim=mask_dim,
mask_conv=mask_conv,
kernel_dim=kernel_dim,
scale_factor=scale_factor,
output_iam=output_iam,
num_masks=num_masks,
act_cfg=act_cfg)
self.inst_branch = GroupInstanceSoftBranch(
in_channels,
num_groups=num_groups,
dim=ins_dim,
num_convs=ins_conv,
num_masks=num_masks,
num_classes=num_classes,
kernel_dim=kernel_dim,
act_cfg=act_cfg)
| 13,792 | 33.396509 | 79 |
py
|
ERD
|
ERD-main/projects/SparseInst/sparseinst/__init__.py
|
from .decoder import BaseIAMDecoder, GroupIAMDecoder, GroupIAMSoftDecoder
from .encoder import PyramidPoolingModule
from .loss import SparseInstCriterion, SparseInstMatcher
from .sparseinst import SparseInst
__all__ = [
'BaseIAMDecoder', 'GroupIAMDecoder', 'GroupIAMSoftDecoder',
'PyramidPoolingModule', 'SparseInstCriterion', 'SparseInstMatcher',
'SparseInst'
]
| 376 | 33.272727 | 73 |
py
|
ERD
|
ERD-main/projects/SparseInst/configs/sparseinst_r50_iam_8xb8-ms-270k_coco.py
|
_base_ = [
'mmdet::_base_/datasets/coco_instance.py',
'mmdet::_base_/schedules/schedule_1x.py',
'mmdet::_base_/default_runtime.py'
]
custom_imports = dict(
imports=['projects.SparseInst.sparseinst'], allow_failed_imports=False)
model = dict(
type='SparseInst',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_mask=True,
pad_size_divisor=32),
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(1, 2, 3),
frozen_stages=0,
norm_cfg=dict(type='BN', requires_grad=False),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
encoder=dict(
type='InstanceContextEncoder',
in_channels=[512, 1024, 2048],
out_channels=256),
decoder=dict(
type='BaseIAMDecoder',
in_channels=256 + 2,
num_classes=80,
ins_dim=256,
ins_conv=4,
mask_dim=256,
mask_conv=4,
kernel_dim=128,
scale_factor=2.0,
output_iam=False,
num_masks=100),
criterion=dict(
type='SparseInstCriterion',
num_classes=80,
assigner=dict(type='SparseInstMatcher', alpha=0.8, beta=0.2),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
alpha=0.25,
gamma=2.0,
reduction='sum',
loss_weight=2.0),
loss_obj=dict(
type='CrossEntropyLoss',
use_sigmoid=True,
reduction='mean',
loss_weight=1.0),
loss_mask=dict(
type='CrossEntropyLoss',
use_sigmoid=True,
reduction='mean',
loss_weight=5.0),
loss_dice=dict(
type='DiceLoss',
use_sigmoid=True,
reduction='sum',
eps=5e-5,
loss_weight=2.0),
),
test_cfg=dict(score_thr=0.005, mask_thr_binary=0.45))
backend = 'pillow'
train_pipeline = [
dict(
type='LoadImageFromFile',
backend_args={{_base_.backend_args}},
imdecode_backend=backend),
dict(
type='LoadAnnotations',
with_bbox=True,
with_mask=True,
poly2mask=False),
dict(
type='RandomChoiceResize',
scales=[(416, 853), (448, 853), (480, 853), (512, 853), (544, 853),
(576, 853), (608, 853), (640, 853)],
keep_ratio=True,
backend=backend),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(
type='LoadImageFromFile',
backend_args={{_base_.backend_args}},
imdecode_backend=backend),
dict(type='Resize', scale=(640, 853), keep_ratio=True, backend=backend),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=8,
num_workers=8,
sampler=dict(type='InfiniteSampler'),
dataset=dict(pipeline=train_pipeline))
test_dataloader = dict(dataset=dict(pipeline=test_pipeline))
val_dataloader = test_dataloader
val_evaluator = dict(metric='segm')
test_evaluator = val_evaluator
# optimizer
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(_delete_=True, type='AdamW', lr=0.00005, weight_decay=0.05))
train_cfg = dict(
_delete_=True,
type='IterBasedTrainLoop',
max_iters=270000,
val_interval=10000)
# learning rate
param_scheduler = [
dict(
type='MultiStepLR',
begin=0,
end=270000,
by_epoch=False,
milestones=[210000, 250000],
gamma=0.1)
]
default_hooks = dict(
checkpoint=dict(by_epoch=False, interval=10000, max_keep_ckpts=3))
log_processor = dict(by_epoch=False)
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (8 samples per GPU)
auto_scale_lr = dict(base_batch_size=64, enable=True)
| 4,133 | 27.122449 | 79 |
py
|
ERD
|
ERD-main/projects/EfficientDet/convert_tf_to_pt.py
|
import argparse
import numpy as np
import torch
from tensorflow.python.training import py_checkpoint_reader
torch.set_printoptions(precision=20)
def tf2pth(v):
if v.ndim == 4:
return np.ascontiguousarray(v.transpose(3, 2, 0, 1))
elif v.ndim == 2:
return np.ascontiguousarray(v.transpose())
return v
def convert_key(model_name, bifpn_repeats, weights):
p6_w1 = [
torch.tensor([-1e4, -1e4], dtype=torch.float64)
for _ in range(bifpn_repeats)
]
p5_w1 = [
torch.tensor([-1e4, -1e4], dtype=torch.float64)
for _ in range(bifpn_repeats)
]
p4_w1 = [
torch.tensor([-1e4, -1e4], dtype=torch.float64)
for _ in range(bifpn_repeats)
]
p3_w1 = [
torch.tensor([-1e4, -1e4], dtype=torch.float64)
for _ in range(bifpn_repeats)
]
p4_w2 = [
torch.tensor([-1e4, -1e4, -1e4], dtype=torch.float64)
for _ in range(bifpn_repeats)
]
p5_w2 = [
torch.tensor([-1e4, -1e4, -1e4], dtype=torch.float64)
for _ in range(bifpn_repeats)
]
p6_w2 = [
torch.tensor([-1e4, -1e4, -1e4], dtype=torch.float64)
for _ in range(bifpn_repeats)
]
p7_w2 = [
torch.tensor([-1e4, -1e4], dtype=torch.float64)
for _ in range(bifpn_repeats)
]
idx2key = {
0: '1.0',
1: '2.0',
2: '2.1',
3: '3.0',
4: '3.1',
5: '4.0',
6: '4.1',
7: '4.2',
8: '4.3',
9: '4.4',
10: '4.5',
11: '5.0',
12: '5.1',
13: '5.2',
14: '5.3',
15: '5.4'
}
m = dict()
for k, v in weights.items():
if 'Exponential' in k or 'global_step' in k:
continue
seg = k.split('/')
if len(seg) == 1:
continue
if seg[2] == 'depthwise_conv2d':
v = v.transpose(1, 0)
if seg[0] == model_name:
if seg[1] == 'stem':
prefix = 'backbone.layers.0'
mapping = {
'conv2d/kernel': 'conv.weight',
'tpu_batch_normalization/beta': 'bn.bias',
'tpu_batch_normalization/gamma': 'bn.weight',
'tpu_batch_normalization/moving_mean': 'bn.running_mean',
'tpu_batch_normalization/moving_variance':
'bn.running_var',
}
suffix = mapping['/'.join(seg[2:])]
m[prefix + '.' + suffix] = v
elif seg[1].startswith('blocks_'):
idx = int(seg[1][7:])
prefix = '.'.join(['backbone', 'layers', idx2key[idx]])
base_mapping = {
'depthwise_conv2d/depthwise_kernel':
'depthwise_conv.conv.weight',
'se/conv2d/kernel': 'se.conv1.conv.weight',
'se/conv2d/bias': 'se.conv1.conv.bias',
'se/conv2d_1/kernel': 'se.conv2.conv.weight',
'se/conv2d_1/bias': 'se.conv2.conv.bias'
}
if idx == 0:
mapping = {
'conv2d/kernel':
'linear_conv.conv.weight',
'tpu_batch_normalization/beta':
'depthwise_conv.bn.bias',
'tpu_batch_normalization/gamma':
'depthwise_conv.bn.weight',
'tpu_batch_normalization/moving_mean':
'depthwise_conv.bn.running_mean',
'tpu_batch_normalization/moving_variance':
'depthwise_conv.bn.running_var',
'tpu_batch_normalization_1/beta':
'linear_conv.bn.bias',
'tpu_batch_normalization_1/gamma':
'linear_conv.bn.weight',
'tpu_batch_normalization_1/moving_mean':
'linear_conv.bn.running_mean',
'tpu_batch_normalization_1/moving_variance':
'linear_conv.bn.running_var',
}
else:
mapping = {
'depthwise_conv2d/depthwise_kernel':
'depthwise_conv.conv.weight',
'conv2d/kernel':
'expand_conv.conv.weight',
'conv2d_1/kernel':
'linear_conv.conv.weight',
'tpu_batch_normalization/beta':
'expand_conv.bn.bias',
'tpu_batch_normalization/gamma':
'expand_conv.bn.weight',
'tpu_batch_normalization/moving_mean':
'expand_conv.bn.running_mean',
'tpu_batch_normalization/moving_variance':
'expand_conv.bn.running_var',
'tpu_batch_normalization_1/beta':
'depthwise_conv.bn.bias',
'tpu_batch_normalization_1/gamma':
'depthwise_conv.bn.weight',
'tpu_batch_normalization_1/moving_mean':
'depthwise_conv.bn.running_mean',
'tpu_batch_normalization_1/moving_variance':
'depthwise_conv.bn.running_var',
'tpu_batch_normalization_2/beta':
'linear_conv.bn.bias',
'tpu_batch_normalization_2/gamma':
'linear_conv.bn.weight',
'tpu_batch_normalization_2/moving_mean':
'linear_conv.bn.running_mean',
'tpu_batch_normalization_2/moving_variance':
'linear_conv.bn.running_var',
}
mapping.update(base_mapping)
suffix = mapping['/'.join(seg[2:])]
m[prefix + '.' + suffix] = v
elif seg[0] == 'resample_p6':
prefix = 'neck.bifpn.0.p5_to_p6.0'
mapping = {
'conv2d/kernel': 'down_conv.weight',
'conv2d/bias': 'down_conv.bias',
'bn/beta': 'bn.bias',
'bn/gamma': 'bn.weight',
'bn/moving_mean': 'bn.running_mean',
'bn/moving_variance': 'bn.running_var',
}
suffix = mapping['/'.join(seg[1:])]
m[prefix + '.' + suffix] = v
elif seg[0] == 'fpn_cells':
fpn_idx = int(seg[1][5:])
prefix = '.'.join(['neck', 'bifpn', str(fpn_idx)])
fnode_id = int(seg[2][5])
if fnode_id == 0:
mapping = {
'op_after_combine5/conv/depthwise_kernel':
'conv6_up.depthwise_conv.weight',
'op_after_combine5/conv/pointwise_kernel':
'conv6_up.pointwise_conv.weight',
'op_after_combine5/conv/bias':
'conv6_up.pointwise_conv.bias',
'op_after_combine5/bn/beta':
'conv6_up.bn.bias',
'op_after_combine5/bn/gamma':
'conv6_up.bn.weight',
'op_after_combine5/bn/moving_mean':
'conv6_up.bn.running_mean',
'op_after_combine5/bn/moving_variance':
'conv6_up.bn.running_var',
}
if seg[3] != 'WSM' and seg[3] != 'WSM_1':
suffix = mapping['/'.join(seg[3:])]
if 'depthwise_conv' in suffix:
v = v.transpose(1, 0)
m[prefix + '.' + suffix] = v
elif seg[3] == 'WSM':
p6_w1[fpn_idx][0] = v
elif seg[3] == 'WSM_1':
p6_w1[fpn_idx][1] = v
if torch.min(p6_w1[fpn_idx]) > -1e4:
m[prefix + '.p6_w1'] = p6_w1[fpn_idx]
elif fnode_id == 1:
base_mapping = {
'op_after_combine6/conv/depthwise_kernel':
'conv5_up.depthwise_conv.weight',
'op_after_combine6/conv/pointwise_kernel':
'conv5_up.pointwise_conv.weight',
'op_after_combine6/conv/bias':
'conv5_up.pointwise_conv.bias',
'op_after_combine6/bn/beta':
'conv5_up.bn.bias',
'op_after_combine6/bn/gamma':
'conv5_up.bn.weight',
'op_after_combine6/bn/moving_mean':
'conv5_up.bn.running_mean',
'op_after_combine6/bn/moving_variance':
'conv5_up.bn.running_var',
}
if fpn_idx == 0:
mapping = {
'resample_0_2_6/conv2d/kernel':
'p5_down_channel.down_conv.weight',
'resample_0_2_6/conv2d/bias':
'p5_down_channel.down_conv.bias',
'resample_0_2_6/bn/beta':
'p5_down_channel.bn.bias',
'resample_0_2_6/bn/gamma':
'p5_down_channel.bn.weight',
'resample_0_2_6/bn/moving_mean':
'p5_down_channel.bn.running_mean',
'resample_0_2_6/bn/moving_variance':
'p5_down_channel.bn.running_var',
}
base_mapping.update(mapping)
if seg[3] != 'WSM' and seg[3] != 'WSM_1':
suffix = base_mapping['/'.join(seg[3:])]
if 'depthwise_conv' in suffix:
v = v.transpose(1, 0)
m[prefix + '.' + suffix] = v
elif seg[3] == 'WSM':
p5_w1[fpn_idx][0] = v
elif seg[3] == 'WSM_1':
p5_w1[fpn_idx][1] = v
if torch.min(p5_w1[fpn_idx]) > -1e4:
m[prefix + '.p5_w1'] = p5_w1[fpn_idx]
elif fnode_id == 2:
base_mapping = {
'op_after_combine7/conv/depthwise_kernel':
'conv4_up.depthwise_conv.weight',
'op_after_combine7/conv/pointwise_kernel':
'conv4_up.pointwise_conv.weight',
'op_after_combine7/conv/bias':
'conv4_up.pointwise_conv.bias',
'op_after_combine7/bn/beta':
'conv4_up.bn.bias',
'op_after_combine7/bn/gamma':
'conv4_up.bn.weight',
'op_after_combine7/bn/moving_mean':
'conv4_up.bn.running_mean',
'op_after_combine7/bn/moving_variance':
'conv4_up.bn.running_var',
}
if fpn_idx == 0:
mapping = {
'resample_0_1_7/conv2d/kernel':
'p4_down_channel.down_conv.weight',
'resample_0_1_7/conv2d/bias':
'p4_down_channel.down_conv.bias',
'resample_0_1_7/bn/beta':
'p4_down_channel.bn.bias',
'resample_0_1_7/bn/gamma':
'p4_down_channel.bn.weight',
'resample_0_1_7/bn/moving_mean':
'p4_down_channel.bn.running_mean',
'resample_0_1_7/bn/moving_variance':
'p4_down_channel.bn.running_var',
}
base_mapping.update(mapping)
if seg[3] != 'WSM' and seg[3] != 'WSM_1':
suffix = base_mapping['/'.join(seg[3:])]
if 'depthwise_conv' in suffix:
v = v.transpose(1, 0)
m[prefix + '.' + suffix] = v
elif seg[3] == 'WSM':
p4_w1[fpn_idx][0] = v
elif seg[3] == 'WSM_1':
p4_w1[fpn_idx][1] = v
if torch.min(p4_w1[fpn_idx]) > -1e4:
m[prefix + '.p4_w1'] = p4_w1[fpn_idx]
elif fnode_id == 3:
base_mapping = {
'op_after_combine8/conv/depthwise_kernel':
'conv3_up.depthwise_conv.weight',
'op_after_combine8/conv/pointwise_kernel':
'conv3_up.pointwise_conv.weight',
'op_after_combine8/conv/bias':
'conv3_up.pointwise_conv.bias',
'op_after_combine8/bn/beta':
'conv3_up.bn.bias',
'op_after_combine8/bn/gamma':
'conv3_up.bn.weight',
'op_after_combine8/bn/moving_mean':
'conv3_up.bn.running_mean',
'op_after_combine8/bn/moving_variance':
'conv3_up.bn.running_var',
}
if fpn_idx == 0:
mapping = {
'resample_0_0_8/conv2d/kernel':
'p3_down_channel.down_conv.weight',
'resample_0_0_8/conv2d/bias':
'p3_down_channel.down_conv.bias',
'resample_0_0_8/bn/beta':
'p3_down_channel.bn.bias',
'resample_0_0_8/bn/gamma':
'p3_down_channel.bn.weight',
'resample_0_0_8/bn/moving_mean':
'p3_down_channel.bn.running_mean',
'resample_0_0_8/bn/moving_variance':
'p3_down_channel.bn.running_var',
}
base_mapping.update(mapping)
if seg[3] != 'WSM' and seg[3] != 'WSM_1':
suffix = base_mapping['/'.join(seg[3:])]
if 'depthwise_conv' in suffix:
v = v.transpose(1, 0)
m[prefix + '.' + suffix] = v
elif seg[3] == 'WSM':
p3_w1[fpn_idx][0] = v
elif seg[3] == 'WSM_1':
p3_w1[fpn_idx][1] = v
if torch.min(p3_w1[fpn_idx]) > -1e4:
m[prefix + '.p3_w1'] = p3_w1[fpn_idx]
elif fnode_id == 4:
base_mapping = {
'op_after_combine9/conv/depthwise_kernel':
'conv4_down.depthwise_conv.weight',
'op_after_combine9/conv/pointwise_kernel':
'conv4_down.pointwise_conv.weight',
'op_after_combine9/conv/bias':
'conv4_down.pointwise_conv.bias',
'op_after_combine9/bn/beta':
'conv4_down.bn.bias',
'op_after_combine9/bn/gamma':
'conv4_down.bn.weight',
'op_after_combine9/bn/moving_mean':
'conv4_down.bn.running_mean',
'op_after_combine9/bn/moving_variance':
'conv4_down.bn.running_var',
}
if fpn_idx == 0:
mapping = {
'resample_0_1_9/conv2d/kernel':
'p4_level_connection.down_conv.weight',
'resample_0_1_9/conv2d/bias':
'p4_level_connection.down_conv.bias',
'resample_0_1_9/bn/beta':
'p4_level_connection.bn.bias',
'resample_0_1_9/bn/gamma':
'p4_level_connection.bn.weight',
'resample_0_1_9/bn/moving_mean':
'p4_level_connection.bn.running_mean',
'resample_0_1_9/bn/moving_variance':
'p4_level_connection.bn.running_var',
}
base_mapping.update(mapping)
if seg[3] != 'WSM' and seg[3] != 'WSM_1' and seg[3] != 'WSM_2':
suffix = base_mapping['/'.join(seg[3:])]
if 'depthwise_conv' in suffix:
v = v.transpose(1, 0)
m[prefix + '.' + suffix] = v
elif seg[3] == 'WSM':
p4_w2[fpn_idx][0] = v
elif seg[3] == 'WSM_1':
p4_w2[fpn_idx][1] = v
elif seg[3] == 'WSM_2':
p4_w2[fpn_idx][2] = v
if torch.min(p4_w2[fpn_idx]) > -1e4:
m[prefix + '.p4_w2'] = p4_w2[fpn_idx]
elif fnode_id == 5:
base_mapping = {
'op_after_combine10/conv/depthwise_kernel':
'conv5_down.depthwise_conv.weight',
'op_after_combine10/conv/pointwise_kernel':
'conv5_down.pointwise_conv.weight',
'op_after_combine10/conv/bias':
'conv5_down.pointwise_conv.bias',
'op_after_combine10/bn/beta':
'conv5_down.bn.bias',
'op_after_combine10/bn/gamma':
'conv5_down.bn.weight',
'op_after_combine10/bn/moving_mean':
'conv5_down.bn.running_mean',
'op_after_combine10/bn/moving_variance':
'conv5_down.bn.running_var',
}
if fpn_idx == 0:
mapping = {
'resample_0_2_10/conv2d/kernel':
'p5_level_connection.down_conv.weight',
'resample_0_2_10/conv2d/bias':
'p5_level_connection.down_conv.bias',
'resample_0_2_10/bn/beta':
'p5_level_connection.bn.bias',
'resample_0_2_10/bn/gamma':
'p5_level_connection.bn.weight',
'resample_0_2_10/bn/moving_mean':
'p5_level_connection.bn.running_mean',
'resample_0_2_10/bn/moving_variance':
'p5_level_connection.bn.running_var',
}
base_mapping.update(mapping)
if seg[3] != 'WSM' and seg[3] != 'WSM_1' and seg[3] != 'WSM_2':
suffix = base_mapping['/'.join(seg[3:])]
if 'depthwise_conv' in suffix:
v = v.transpose(1, 0)
m[prefix + '.' + suffix] = v
elif seg[3] == 'WSM':
p5_w2[fpn_idx][0] = v
elif seg[3] == 'WSM_1':
p5_w2[fpn_idx][1] = v
elif seg[3] == 'WSM_2':
p5_w2[fpn_idx][2] = v
if torch.min(p5_w2[fpn_idx]) > -1e4:
m[prefix + '.p5_w2'] = p5_w2[fpn_idx]
elif fnode_id == 6:
base_mapping = {
'op_after_combine11/conv/depthwise_kernel':
'conv6_down.depthwise_conv.weight',
'op_after_combine11/conv/pointwise_kernel':
'conv6_down.pointwise_conv.weight',
'op_after_combine11/conv/bias':
'conv6_down.pointwise_conv.bias',
'op_after_combine11/bn/beta':
'conv6_down.bn.bias',
'op_after_combine11/bn/gamma':
'conv6_down.bn.weight',
'op_after_combine11/bn/moving_mean':
'conv6_down.bn.running_mean',
'op_after_combine11/bn/moving_variance':
'conv6_down.bn.running_var',
}
if seg[3] != 'WSM' and seg[3] != 'WSM_1' and seg[3] != 'WSM_2':
suffix = base_mapping['/'.join(seg[3:])]
if 'depthwise_conv' in suffix:
v = v.transpose(1, 0)
m[prefix + '.' + suffix] = v
elif seg[3] == 'WSM':
p6_w2[fpn_idx][0] = v
elif seg[3] == 'WSM_1':
p6_w2[fpn_idx][1] = v
elif seg[3] == 'WSM_2':
p6_w2[fpn_idx][2] = v
if torch.min(p6_w2[fpn_idx]) > -1e4:
m[prefix + '.p6_w2'] = p6_w2[fpn_idx]
elif fnode_id == 7:
base_mapping = {
'op_after_combine12/conv/depthwise_kernel':
'conv7_down.depthwise_conv.weight',
'op_after_combine12/conv/pointwise_kernel':
'conv7_down.pointwise_conv.weight',
'op_after_combine12/conv/bias':
'conv7_down.pointwise_conv.bias',
'op_after_combine12/bn/beta':
'conv7_down.bn.bias',
'op_after_combine12/bn/gamma':
'conv7_down.bn.weight',
'op_after_combine12/bn/moving_mean':
'conv7_down.bn.running_mean',
'op_after_combine12/bn/moving_variance':
'conv7_down.bn.running_var',
}
if seg[3] != 'WSM' and seg[3] != 'WSM_1' and seg[3] != 'WSM_2':
suffix = base_mapping['/'.join(seg[3:])]
if 'depthwise_conv' in suffix:
v = v.transpose(1, 0)
m[prefix + '.' + suffix] = v
elif seg[3] == 'WSM':
p7_w2[fpn_idx][0] = v
elif seg[3] == 'WSM_1':
p7_w2[fpn_idx][1] = v
if torch.min(p7_w2[fpn_idx]) > -1e4:
m[prefix + '.p7_w2'] = p7_w2[fpn_idx]
elif seg[0] == 'box_net':
if 'box-predict' in seg[1]:
prefix = '.'.join(['bbox_head', 'reg_header'])
base_mapping = {
'depthwise_kernel': 'depthwise_conv.weight',
'pointwise_kernel': 'pointwise_conv.weight',
'bias': 'pointwise_conv.bias'
}
suffix = base_mapping['/'.join(seg[2:])]
if 'depthwise_conv' in suffix:
v = v.transpose(1, 0)
m[prefix + '.' + suffix] = v
elif 'bn' in seg[1]:
bbox_conv_idx = int(seg[1][4])
bbox_bn_idx = int(seg[1][9]) - 3
prefix = '.'.join([
'bbox_head', 'reg_bn_list',
str(bbox_conv_idx),
str(bbox_bn_idx)
])
base_mapping = {
'beta': 'bias',
'gamma': 'weight',
'moving_mean': 'running_mean',
'moving_variance': 'running_var'
}
suffix = base_mapping['/'.join(seg[2:])]
m[prefix + '.' + suffix] = v
else:
bbox_conv_idx = int(seg[1][4])
prefix = '.'.join(
['bbox_head', 'reg_conv_list',
str(bbox_conv_idx)])
base_mapping = {
'depthwise_kernel': 'depthwise_conv.weight',
'pointwise_kernel': 'pointwise_conv.weight',
'bias': 'pointwise_conv.bias'
}
suffix = base_mapping['/'.join(seg[2:])]
if 'depthwise_conv' in suffix:
v = v.transpose(1, 0)
m[prefix + '.' + suffix] = v
elif seg[0] == 'class_net':
if 'class-predict' in seg[1]:
prefix = '.'.join(['bbox_head', 'cls_header'])
base_mapping = {
'depthwise_kernel': 'depthwise_conv.weight',
'pointwise_kernel': 'pointwise_conv.weight',
'bias': 'pointwise_conv.bias'
}
suffix = base_mapping['/'.join(seg[2:])]
if 'depthwise_conv' in suffix:
v = v.transpose(1, 0)
m[prefix + '.' + suffix] = v
elif 'bn' in seg[1]:
cls_conv_idx = int(seg[1][6])
cls_bn_idx = int(seg[1][11]) - 3
prefix = '.'.join([
'bbox_head', 'cls_bn_list',
str(cls_conv_idx),
str(cls_bn_idx)
])
base_mapping = {
'beta': 'bias',
'gamma': 'weight',
'moving_mean': 'running_mean',
'moving_variance': 'running_var'
}
suffix = base_mapping['/'.join(seg[2:])]
m[prefix + '.' + suffix] = v
else:
cls_conv_idx = int(seg[1][6])
prefix = '.'.join(
['bbox_head', 'cls_conv_list',
str(cls_conv_idx)])
base_mapping = {
'depthwise_kernel': 'depthwise_conv.weight',
'pointwise_kernel': 'pointwise_conv.weight',
'bias': 'pointwise_conv.bias'
}
suffix = base_mapping['/'.join(seg[2:])]
if 'depthwise_conv' in suffix:
v = v.transpose(1, 0)
m[prefix + '.' + suffix] = v
return m
def parse_args():
parser = argparse.ArgumentParser(
description='convert efficientdet weight from tensorflow to pytorch')
parser.add_argument(
'--backbone',
type=str,
help='efficientnet model name, like efficientnet-b0')
parser.add_argument(
'--tensorflow_weight',
type=str,
help='efficientdet tensorflow weight name, like efficientdet-d0/model')
parser.add_argument(
'--out_weight',
type=str,
help='efficientdet pytorch weight name like demo.pth')
args = parser.parse_args()
return args
def main():
args = parse_args()
model_name = args.backbone
ori_weight_name = args.tensorflow_weight
out_name = args.out_weight
repeat_map = {
0: 3,
1: 4,
2: 5,
3: 6,
4: 7,
5: 7,
6: 8,
7: 8,
}
reader = py_checkpoint_reader.NewCheckpointReader(ori_weight_name)
weights = {
n: torch.as_tensor(tf2pth(reader.get_tensor(n)))
for (n, _) in reader.get_variable_to_shape_map().items()
}
bifpn_repeats = repeat_map[int(model_name[14])]
out = convert_key(model_name, bifpn_repeats, weights)
result = {'state_dict': out}
torch.save(result, out_name)
if __name__ == '__main__':
main()
| 26,971 | 42.017544 | 79 |
py
|
ERD
|
ERD-main/projects/EfficientDet/configs/efficientdet_effb3_bifpn_8xb16-crop896-300e_coco-90cls.py
|
_base_ = [
'mmdet::_base_/datasets/coco_detection.py',
'mmdet::_base_/schedules/schedule_1x.py',
'mmdet::_base_/default_runtime.py'
]
custom_imports = dict(
imports=['projects.EfficientDet.efficientdet'], allow_failed_imports=False)
image_size = 896
batch_augments = [
dict(type='BatchFixedSizePad', size=(image_size, image_size))
]
dataset_type = 'Coco90Dataset'
evalute_type = 'Coco90Metric'
norm_cfg = dict(type='SyncBN', requires_grad=True, eps=1e-3, momentum=0.01)
checkpoint = 'https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b3_3rdparty_8xb32-aa-advprop_in1k_20220119-53b41118.pth' # noqa
model = dict(
type='EfficientDet',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_size_divisor=image_size,
batch_augments=batch_augments),
backbone=dict(
type='EfficientNet',
arch='b3',
drop_path_rate=0.3,
out_indices=(3, 4, 5),
frozen_stages=0,
conv_cfg=dict(type='Conv2dSamePadding'),
norm_cfg=norm_cfg,
norm_eval=False,
init_cfg=dict(
type='Pretrained', prefix='backbone', checkpoint=checkpoint)),
neck=dict(
type='BiFPN',
num_stages=6,
in_channels=[48, 136, 384],
out_channels=160,
start_level=0,
norm_cfg=norm_cfg),
bbox_head=dict(
type='EfficientDetSepBNHead',
num_classes=90,
num_ins=5,
in_channels=160,
feat_channels=160,
stacked_convs=4,
norm_cfg=norm_cfg,
anchor_generator=dict(
type='AnchorGenerator',
octave_base_scale=4,
scales_per_octave=3,
ratios=[1.0, 0.5, 2.0],
strides=[8, 16, 32, 64, 128],
center_offset=0.5),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=1.5,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='HuberLoss', beta=0.1, loss_weight=50)),
# training and testing settings
train_cfg=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0,
ignore_iof_thr=-1),
sampler=dict(
type='PseudoSampler'), # Focal loss should use PseudoSampler
allowed_border=-1,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(
type='soft_nms',
iou_threshold=0.3,
sigma=0.5,
min_score=1e-3,
method='gaussian'),
max_per_img=100))
# dataset settings
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomResize',
scale=(image_size, image_size),
ratio_range=(0.1, 2.0),
keep_ratio=True),
dict(type='RandomCrop', crop_size=(image_size, image_size)),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='Resize', scale=(image_size, image_size), keep_ratio=True),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=16,
num_workers=8,
dataset=dict(type=dataset_type, pipeline=train_pipeline))
val_dataloader = dict(dataset=dict(type=dataset_type, pipeline=test_pipeline))
test_dataloader = val_dataloader
val_evaluator = dict(type=evalute_type)
test_evaluator = val_evaluator
optim_wrapper = dict(
optimizer=dict(lr=0.16, weight_decay=4e-5),
paramwise_cfg=dict(
norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True),
clip_grad=dict(max_norm=10, norm_type=2))
# learning policy
max_epochs = 300
param_scheduler = [
dict(type='LinearLR', start_factor=0.1, by_epoch=False, begin=0, end=917),
dict(
type='CosineAnnealingLR',
eta_min=0.0,
begin=1,
T_max=299,
end=300,
by_epoch=True,
convert_to_iter_based=True)
]
train_cfg = dict(max_epochs=max_epochs, val_interval=1)
vis_backends = [
dict(type='LocalVisBackend'),
dict(type='TensorboardVisBackend')
]
visualizer = dict(
type='DetLocalVisualizer', vis_backends=vis_backends, name='visualizer')
default_hooks = dict(checkpoint=dict(type='CheckpointHook', interval=15))
custom_hooks = [
dict(
type='EMAHook',
ema_type='ExpMomentumEMA',
momentum=0.0002,
update_buffers=True,
priority=49)
]
# cudnn_benchmark=True can accelerate fix-size training
env_cfg = dict(cudnn_benchmark=True)
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (16 samples per GPU)
auto_scale_lr = dict(base_batch_size=128)
| 5,356 | 30.145349 | 155 |
py
|
ERD
|
ERD-main/projects/EfficientDet/configs/efficientdet_effb3_bifpn_8xb16-crop896-300e_coco.py
|
_base_ = [
'mmdet::_base_/datasets/coco_detection.py',
'mmdet::_base_/schedules/schedule_1x.py',
'mmdet::_base_/default_runtime.py'
]
custom_imports = dict(
imports=['projects.EfficientDet.efficientdet'], allow_failed_imports=False)
image_size = 896
batch_augments = [
dict(type='BatchFixedSizePad', size=(image_size, image_size))
]
dataset_type = 'CocoDataset'
evalute_type = 'CocoMetric'
norm_cfg = dict(type='SyncBN', requires_grad=True, eps=1e-3, momentum=0.01)
checkpoint = 'https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b3_3rdparty_8xb32-aa-advprop_in1k_20220119-53b41118.pth' # noqa
model = dict(
type='EfficientDet',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_size_divisor=image_size,
batch_augments=batch_augments),
backbone=dict(
type='EfficientNet',
arch='b3',
drop_path_rate=0.3,
out_indices=(3, 4, 5),
frozen_stages=0,
conv_cfg=dict(type='Conv2dSamePadding'),
norm_cfg=norm_cfg,
norm_eval=False,
init_cfg=dict(
type='Pretrained', prefix='backbone', checkpoint=checkpoint)),
neck=dict(
type='BiFPN',
num_stages=6,
in_channels=[48, 136, 384],
out_channels=160,
start_level=0,
norm_cfg=norm_cfg),
bbox_head=dict(
type='EfficientDetSepBNHead',
num_classes=80,
num_ins=5,
in_channels=160,
feat_channels=160,
stacked_convs=4,
norm_cfg=norm_cfg,
anchor_generator=dict(
type='AnchorGenerator',
octave_base_scale=4,
scales_per_octave=3,
ratios=[1.0, 0.5, 2.0],
strides=[8, 16, 32, 64, 128],
center_offset=0.5),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=1.5,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='HuberLoss', beta=0.1, loss_weight=50)),
# training and testing settings
train_cfg=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0,
ignore_iof_thr=-1),
sampler=dict(
type='PseudoSampler'), # Focal loss should use PseudoSampler
allowed_border=-1,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(
type='soft_nms',
iou_threshold=0.3,
sigma=0.5,
min_score=1e-3,
method='gaussian'),
max_per_img=100))
# dataset settings
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomResize',
scale=(image_size, image_size),
ratio_range=(0.1, 2.0),
keep_ratio=True),
dict(type='RandomCrop', crop_size=(image_size, image_size)),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='Resize', scale=(image_size, image_size), keep_ratio=True),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=16,
num_workers=8,
dataset=dict(type=dataset_type, pipeline=train_pipeline))
val_dataloader = dict(dataset=dict(type=dataset_type, pipeline=test_pipeline))
test_dataloader = val_dataloader
val_evaluator = dict(type=evalute_type)
test_evaluator = val_evaluator
optim_wrapper = dict(
optimizer=dict(lr=0.16, weight_decay=4e-5),
paramwise_cfg=dict(
norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True),
clip_grad=dict(max_norm=10, norm_type=2))
# learning policy
max_epochs = 300
param_scheduler = [
dict(type='LinearLR', start_factor=0.1, by_epoch=False, begin=0, end=917),
dict(
type='CosineAnnealingLR',
eta_min=0.0,
begin=1,
T_max=299,
end=300,
by_epoch=True,
convert_to_iter_based=True)
]
train_cfg = dict(max_epochs=max_epochs, val_interval=1)
vis_backends = [
dict(type='LocalVisBackend'),
dict(type='TensorboardVisBackend')
]
visualizer = dict(
type='DetLocalVisualizer', vis_backends=vis_backends, name='visualizer')
default_hooks = dict(checkpoint=dict(type='CheckpointHook', interval=15))
custom_hooks = [
dict(
type='EMAHook',
ema_type='ExpMomentumEMA',
momentum=0.0002,
update_buffers=True,
priority=49)
]
# cudnn_benchmark=True can accelerate fix-size training
env_cfg = dict(cudnn_benchmark=True)
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (16 samples per GPU)
auto_scale_lr = dict(base_batch_size=128)
| 5,352 | 30.122093 | 155 |
py
|
ERD
|
ERD-main/projects/EfficientDet/configs/efficientdet_effb0_bifpn_8xb16-crop512-300e_coco.py
|
_base_ = [
'mmdet::_base_/datasets/coco_detection.py',
'mmdet::_base_/schedules/schedule_1x.py',
'mmdet::_base_/default_runtime.py'
]
custom_imports = dict(
imports=['projects.EfficientDet.efficientdet'], allow_failed_imports=False)
image_size = 512
batch_augments = [
dict(type='BatchFixedSizePad', size=(image_size, image_size))
]
dataset_type = 'CocoDataset'
evalute_type = 'CocoMetric'
norm_cfg = dict(type='SyncBN', requires_grad=True, eps=1e-3, momentum=0.01)
checkpoint = 'https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b0_3rdparty_8xb32-aa-advprop_in1k_20220119-26434485.pth' # noqa
model = dict(
type='EfficientDet',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_size_divisor=image_size,
batch_augments=batch_augments),
backbone=dict(
type='EfficientNet',
arch='b0',
drop_path_rate=0.2,
out_indices=(3, 4, 5),
frozen_stages=0,
conv_cfg=dict(type='Conv2dSamePadding'),
norm_cfg=norm_cfg,
norm_eval=False,
init_cfg=dict(
type='Pretrained', prefix='backbone', checkpoint=checkpoint)),
neck=dict(
type='BiFPN',
num_stages=3,
in_channels=[40, 112, 320],
out_channels=64,
start_level=0,
norm_cfg=norm_cfg),
bbox_head=dict(
type='EfficientDetSepBNHead',
num_classes=80,
num_ins=5,
in_channels=64,
feat_channels=64,
stacked_convs=3,
norm_cfg=norm_cfg,
anchor_generator=dict(
type='AnchorGenerator',
octave_base_scale=4,
scales_per_octave=3,
ratios=[1.0, 0.5, 2.0],
strides=[8, 16, 32, 64, 128],
center_offset=0.5),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=1.5,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='HuberLoss', beta=0.1, loss_weight=50)),
# training and testing settings
train_cfg=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0,
ignore_iof_thr=-1),
sampler=dict(
type='PseudoSampler'), # Focal loss should use PseudoSampler
allowed_border=-1,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(
type='soft_nms',
iou_threshold=0.3,
sigma=0.5,
min_score=1e-3,
method='gaussian'),
max_per_img=100))
# dataset settings
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomResize',
scale=(image_size, image_size),
ratio_range=(0.1, 2.0),
keep_ratio=True),
dict(type='RandomCrop', crop_size=(image_size, image_size)),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='Resize', scale=(image_size, image_size), keep_ratio=True),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=16,
num_workers=8,
dataset=dict(type=dataset_type, pipeline=train_pipeline))
val_dataloader = dict(dataset=dict(type=dataset_type, pipeline=test_pipeline))
test_dataloader = val_dataloader
val_evaluator = dict(type=evalute_type)
test_evaluator = val_evaluator
optim_wrapper = dict(
optimizer=dict(lr=0.16, weight_decay=4e-5),
paramwise_cfg=dict(
norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True),
clip_grad=dict(max_norm=10, norm_type=2))
# learning policy
max_epochs = 300
param_scheduler = [
dict(type='LinearLR', start_factor=0.1, by_epoch=False, begin=0, end=917),
dict(
type='CosineAnnealingLR',
eta_min=0.0,
begin=1,
T_max=299,
end=300,
by_epoch=True,
convert_to_iter_based=True)
]
train_cfg = dict(max_epochs=max_epochs, val_interval=1)
vis_backends = [
dict(type='LocalVisBackend'),
dict(type='TensorboardVisBackend')
]
visualizer = dict(
type='DetLocalVisualizer', vis_backends=vis_backends, name='visualizer')
default_hooks = dict(checkpoint=dict(type='CheckpointHook', interval=15))
custom_hooks = [
dict(
type='EMAHook',
ema_type='ExpMomentumEMA',
momentum=0.0002,
update_buffers=True,
priority=49)
]
# cudnn_benchmark=True can accelerate fix-size training
env_cfg = dict(cudnn_benchmark=True)
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (16 samples per GPU)
auto_scale_lr = dict(base_batch_size=128)
| 5,349 | 30.104651 | 155 |
py
|
ERD
|
ERD-main/projects/EfficientDet/configs/tensorflow/efficientdet_effb0_bifpn_8xb16-crop512-300e_coco_tf.py
|
_base_ = [
'mmdet::_base_/datasets/coco_detection.py',
'mmdet::_base_/schedules/schedule_1x.py',
'mmdet::_base_/default_runtime.py'
]
custom_imports = dict(
imports=['projects.EfficientDet.efficientdet'], allow_failed_imports=False)
image_size = 512
batch_augments = [
dict(type='BatchFixedSizePad', size=(image_size, image_size))
]
dataset_type = 'Coco90Dataset'
evalute_type = 'Coco90Metric'
norm_cfg = dict(type='SyncBN', requires_grad=True, eps=1e-3, momentum=0.01)
checkpoint = 'https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b0_3rdparty_8xb32-aa-advprop_in1k_20220119-26434485.pth' # noqa
model = dict(
type='EfficientDet',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_size_divisor=image_size,
batch_augments=batch_augments),
backbone=dict(
type='EfficientNet',
arch='b0',
drop_path_rate=0.2,
out_indices=(3, 4, 5),
frozen_stages=0,
conv_cfg=dict(type='Conv2dSamePadding'),
norm_cfg=norm_cfg,
norm_eval=False,
init_cfg=dict(
type='Pretrained', prefix='backbone', checkpoint=checkpoint)),
neck=dict(
type='BiFPN',
num_stages=3,
in_channels=[40, 112, 320],
out_channels=64,
start_level=0,
norm_cfg=norm_cfg),
bbox_head=dict(
type='EfficientDetSepBNHead',
num_classes=90,
num_ins=5,
in_channels=64,
feat_channels=64,
stacked_convs=3,
norm_cfg=norm_cfg,
anchor_generator=dict(
type='YXYXAnchorGenerator',
octave_base_scale=4,
scales_per_octave=3,
ratios=[1.0, 0.5, 2.0],
strides=[8, 16, 32, 64, 128],
center_offset=0.5),
bbox_coder=dict(
type='YXYXDeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=1.5,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='HuberLoss', beta=0.1, loss_weight=50)),
# training and testing settings
train_cfg=dict(
assigner=dict(
type='TransMaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0,
ignore_iof_thr=-1),
sampler=dict(
type='PseudoSampler'), # Focal loss should use PseudoSampler
allowed_border=-1,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(
type='soft_nms',
iou_threshold=0.3,
sigma=0.5,
min_score=1e-3,
method='gaussian'),
max_per_img=100))
# dataset settings
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomResize',
scale=(image_size, image_size),
ratio_range=(0.1, 2.0),
keep_ratio=True),
dict(type='RandomCrop', crop_size=(image_size, image_size)),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='Resize', scale=(image_size, image_size), keep_ratio=True),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=16,
num_workers=8,
dataset=dict(type=dataset_type, pipeline=train_pipeline))
val_dataloader = dict(dataset=dict(type=dataset_type, pipeline=test_pipeline))
test_dataloader = val_dataloader
val_evaluator = dict(type=evalute_type)
test_evaluator = val_evaluator
optim_wrapper = dict(
optimizer=dict(lr=0.16, weight_decay=4e-5),
paramwise_cfg=dict(
norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True),
clip_grad=dict(max_norm=10, norm_type=2))
# learning policy
max_epochs = 300
param_scheduler = [
dict(type='LinearLR', start_factor=0.1, by_epoch=False, begin=0, end=917),
dict(
type='CosineAnnealingLR',
eta_min=0.0,
begin=1,
T_max=299,
end=300,
by_epoch=True,
convert_to_iter_based=True)
]
train_cfg = dict(max_epochs=max_epochs, val_interval=1)
vis_backends = [
dict(type='LocalVisBackend'),
dict(type='TensorboardVisBackend')
]
visualizer = dict(
type='DetLocalVisualizer', vis_backends=vis_backends, name='visualizer')
default_hooks = dict(checkpoint=dict(type='CheckpointHook', interval=15))
custom_hooks = [
dict(
type='EMAHook',
ema_type='ExpMomentumEMA',
momentum=0.0002,
update_buffers=True,
priority=49)
]
# cudnn_benchmark=True can accelerate fix-size training
env_cfg = dict(cudnn_benchmark=True)
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (16 samples per GPU)
auto_scale_lr = dict(base_batch_size=128)
| 5,366 | 30.203488 | 155 |
py
|
ERD
|
ERD-main/projects/EfficientDet/efficientdet/efficientdet.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.models.detectors.single_stage import SingleStageDetector
from mmdet.registry import MODELS
from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig
@MODELS.register_module()
class EfficientDet(SingleStageDetector):
def __init__(self,
backbone: ConfigType,
neck: ConfigType,
bbox_head: ConfigType,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
data_preprocessor=data_preprocessor,
init_cfg=init_cfg)
| 896 | 33.5 | 67 |
py
|
ERD
|
ERD-main/projects/EfficientDet/efficientdet/bifpn.py
|
from typing import List
import torch
import torch.nn as nn
from mmcv.cnn.bricks import Swish
from mmengine.model import BaseModule
from mmdet.registry import MODELS
from mmdet.utils import MultiConfig, OptConfigType
from .utils import DepthWiseConvBlock, DownChannelBlock, MaxPool2dSamePadding
class BiFPNStage(nn.Module):
"""
in_channels: List[int], input dim for P3, P4, P5
out_channels: int, output dim for P2 - P7
first_time: int, whether is the first bifpnstage
conv_bn_act_pattern: bool, whether use conv_bn_act_pattern
norm_cfg: (:obj:`ConfigDict` or dict, optional): Config dict for
normalization layer.
epsilon: float, hyperparameter in fusion features
"""
def __init__(self,
in_channels: List[int],
out_channels: int,
first_time: bool = False,
apply_bn_for_resampling: bool = True,
conv_bn_act_pattern: bool = False,
norm_cfg: OptConfigType = dict(
type='BN', momentum=1e-2, eps=1e-3),
epsilon: float = 1e-4) -> None:
super().__init__()
assert isinstance(in_channels, list)
self.in_channels = in_channels
self.out_channels = out_channels
self.first_time = first_time
self.apply_bn_for_resampling = apply_bn_for_resampling
self.conv_bn_act_pattern = conv_bn_act_pattern
self.norm_cfg = norm_cfg
self.epsilon = epsilon
if self.first_time:
self.p5_down_channel = DownChannelBlock(
self.in_channels[-1],
self.out_channels,
apply_norm=self.apply_bn_for_resampling,
conv_bn_act_pattern=self.conv_bn_act_pattern,
norm_cfg=norm_cfg)
self.p4_down_channel = DownChannelBlock(
self.in_channels[-2],
self.out_channels,
apply_norm=self.apply_bn_for_resampling,
conv_bn_act_pattern=self.conv_bn_act_pattern,
norm_cfg=norm_cfg)
self.p3_down_channel = DownChannelBlock(
self.in_channels[-3],
self.out_channels,
apply_norm=self.apply_bn_for_resampling,
conv_bn_act_pattern=self.conv_bn_act_pattern,
norm_cfg=norm_cfg)
self.p5_to_p6 = nn.Sequential(
DownChannelBlock(
self.in_channels[-1],
self.out_channels,
apply_norm=self.apply_bn_for_resampling,
conv_bn_act_pattern=self.conv_bn_act_pattern,
norm_cfg=norm_cfg), MaxPool2dSamePadding(3, 2))
self.p6_to_p7 = MaxPool2dSamePadding(3, 2)
self.p4_level_connection = DownChannelBlock(
self.in_channels[-2],
self.out_channels,
apply_norm=self.apply_bn_for_resampling,
conv_bn_act_pattern=self.conv_bn_act_pattern,
norm_cfg=norm_cfg)
self.p5_level_connection = DownChannelBlock(
self.in_channels[-1],
self.out_channels,
apply_norm=self.apply_bn_for_resampling,
conv_bn_act_pattern=self.conv_bn_act_pattern,
norm_cfg=norm_cfg)
self.p6_upsample = nn.Upsample(scale_factor=2, mode='nearest')
self.p5_upsample = nn.Upsample(scale_factor=2, mode='nearest')
self.p4_upsample = nn.Upsample(scale_factor=2, mode='nearest')
self.p3_upsample = nn.Upsample(scale_factor=2, mode='nearest')
# bottom to up: feature map down_sample module
self.p4_down_sample = MaxPool2dSamePadding(3, 2)
self.p5_down_sample = MaxPool2dSamePadding(3, 2)
self.p6_down_sample = MaxPool2dSamePadding(3, 2)
self.p7_down_sample = MaxPool2dSamePadding(3, 2)
# Fuse Conv Layers
self.conv6_up = DepthWiseConvBlock(
out_channels,
out_channels,
apply_norm=self.apply_bn_for_resampling,
conv_bn_act_pattern=self.conv_bn_act_pattern,
norm_cfg=norm_cfg)
self.conv5_up = DepthWiseConvBlock(
out_channels,
out_channels,
apply_norm=self.apply_bn_for_resampling,
conv_bn_act_pattern=self.conv_bn_act_pattern,
norm_cfg=norm_cfg)
self.conv4_up = DepthWiseConvBlock(
out_channels,
out_channels,
apply_norm=self.apply_bn_for_resampling,
conv_bn_act_pattern=self.conv_bn_act_pattern,
norm_cfg=norm_cfg)
self.conv3_up = DepthWiseConvBlock(
out_channels,
out_channels,
apply_norm=self.apply_bn_for_resampling,
conv_bn_act_pattern=self.conv_bn_act_pattern,
norm_cfg=norm_cfg)
self.conv4_down = DepthWiseConvBlock(
out_channels,
out_channels,
apply_norm=self.apply_bn_for_resampling,
conv_bn_act_pattern=self.conv_bn_act_pattern,
norm_cfg=norm_cfg)
self.conv5_down = DepthWiseConvBlock(
out_channels,
out_channels,
apply_norm=self.apply_bn_for_resampling,
conv_bn_act_pattern=self.conv_bn_act_pattern,
norm_cfg=norm_cfg)
self.conv6_down = DepthWiseConvBlock(
out_channels,
out_channels,
apply_norm=self.apply_bn_for_resampling,
conv_bn_act_pattern=self.conv_bn_act_pattern,
norm_cfg=norm_cfg)
self.conv7_down = DepthWiseConvBlock(
out_channels,
out_channels,
apply_norm=self.apply_bn_for_resampling,
conv_bn_act_pattern=self.conv_bn_act_pattern,
norm_cfg=norm_cfg)
# weights
self.p6_w1 = nn.Parameter(
torch.ones(2, dtype=torch.float32), requires_grad=True)
self.p6_w1_relu = nn.ReLU()
self.p5_w1 = nn.Parameter(
torch.ones(2, dtype=torch.float32), requires_grad=True)
self.p5_w1_relu = nn.ReLU()
self.p4_w1 = nn.Parameter(
torch.ones(2, dtype=torch.float32), requires_grad=True)
self.p4_w1_relu = nn.ReLU()
self.p3_w1 = nn.Parameter(
torch.ones(2, dtype=torch.float32), requires_grad=True)
self.p3_w1_relu = nn.ReLU()
self.p4_w2 = nn.Parameter(
torch.ones(3, dtype=torch.float32), requires_grad=True)
self.p4_w2_relu = nn.ReLU()
self.p5_w2 = nn.Parameter(
torch.ones(3, dtype=torch.float32), requires_grad=True)
self.p5_w2_relu = nn.ReLU()
self.p6_w2 = nn.Parameter(
torch.ones(3, dtype=torch.float32), requires_grad=True)
self.p6_w2_relu = nn.ReLU()
self.p7_w2 = nn.Parameter(
torch.ones(2, dtype=torch.float32), requires_grad=True)
self.p7_w2_relu = nn.ReLU()
self.swish = Swish()
def combine(self, x):
if not self.conv_bn_act_pattern:
x = self.swish(x)
return x
def forward(self, x):
if self.first_time:
p3, p4, p5 = x
# build feature map P6
p6_in = self.p5_to_p6(p5)
# build feature map P7
p7_in = self.p6_to_p7(p6_in)
p3_in = self.p3_down_channel(p3)
p4_in = self.p4_down_channel(p4)
p5_in = self.p5_down_channel(p5)
else:
p3_in, p4_in, p5_in, p6_in, p7_in = x
# Weights for P6_0 and P7_0 to P6_1
p6_w1 = self.p6_w1_relu(self.p6_w1)
weight = p6_w1 / (torch.sum(p6_w1, dim=0) + self.epsilon)
# Connections for P6_0 and P7_0 to P6_1 respectively
p6_up = self.conv6_up(
self.combine(weight[0] * p6_in +
weight[1] * self.p6_upsample(p7_in)))
# Weights for P5_0 and P6_1 to P5_1
p5_w1 = self.p5_w1_relu(self.p5_w1)
weight = p5_w1 / (torch.sum(p5_w1, dim=0) + self.epsilon)
# Connections for P5_0 and P6_1 to P5_1 respectively
p5_up = self.conv5_up(
self.combine(weight[0] * p5_in +
weight[1] * self.p5_upsample(p6_up)))
# Weights for P4_0 and P5_1 to P4_1
p4_w1 = self.p4_w1_relu(self.p4_w1)
weight = p4_w1 / (torch.sum(p4_w1, dim=0) + self.epsilon)
# Connections for P4_0 and P5_1 to P4_1 respectively
p4_up = self.conv4_up(
self.combine(weight[0] * p4_in +
weight[1] * self.p4_upsample(p5_up)))
# Weights for P3_0 and P4_1 to P3_2
p3_w1 = self.p3_w1_relu(self.p3_w1)
weight = p3_w1 / (torch.sum(p3_w1, dim=0) + self.epsilon)
# Connections for P3_0 and P4_1 to P3_2 respectively
p3_out = self.conv3_up(
self.combine(weight[0] * p3_in +
weight[1] * self.p3_upsample(p4_up)))
if self.first_time:
p4_in = self.p4_level_connection(p4)
p5_in = self.p5_level_connection(p5)
# Weights for P4_0, P4_1 and P3_2 to P4_2
p4_w2 = self.p4_w2_relu(self.p4_w2)
weight = p4_w2 / (torch.sum(p4_w2, dim=0) + self.epsilon)
# Connections for P4_0, P4_1 and P3_2 to P4_2 respectively
p4_out = self.conv4_down(
self.combine(weight[0] * p4_in + weight[1] * p4_up +
weight[2] * self.p4_down_sample(p3_out)))
# Weights for P5_0, P5_1 and P4_2 to P5_2
p5_w2 = self.p5_w2_relu(self.p5_w2)
weight = p5_w2 / (torch.sum(p5_w2, dim=0) + self.epsilon)
# Connections for P5_0, P5_1 and P4_2 to P5_2 respectively
p5_out = self.conv5_down(
self.combine(weight[0] * p5_in + weight[1] * p5_up +
weight[2] * self.p5_down_sample(p4_out)))
# Weights for P6_0, P6_1 and P5_2 to P6_2
p6_w2 = self.p6_w2_relu(self.p6_w2)
weight = p6_w2 / (torch.sum(p6_w2, dim=0) + self.epsilon)
# Connections for P6_0, P6_1 and P5_2 to P6_2 respectively
p6_out = self.conv6_down(
self.combine(weight[0] * p6_in + weight[1] * p6_up +
weight[2] * self.p6_down_sample(p5_out)))
# Weights for P7_0 and P6_2 to P7_2
p7_w2 = self.p7_w2_relu(self.p7_w2)
weight = p7_w2 / (torch.sum(p7_w2, dim=0) + self.epsilon)
# Connections for P7_0 and P6_2 to P7_2
p7_out = self.conv7_down(
self.combine(weight[0] * p7_in +
weight[1] * self.p7_down_sample(p6_out)))
return p3_out, p4_out, p5_out, p6_out, p7_out
@MODELS.register_module()
class BiFPN(BaseModule):
"""
num_stages: int, bifpn number of repeats
in_channels: List[int], input dim for P3, P4, P5
out_channels: int, output dim for P2 - P7
start_level: int, Index of input features in backbone
epsilon: float, hyperparameter in fusion features
apply_bn_for_resampling: bool, whether use bn after resampling
conv_bn_act_pattern: bool, whether use conv_bn_act_pattern
norm_cfg: (:obj:`ConfigDict` or dict, optional): Config dict for
normalization layer.
init_cfg: MultiConfig: init method
"""
def __init__(self,
num_stages: int,
in_channels: List[int],
out_channels: int,
start_level: int = 0,
epsilon: float = 1e-4,
apply_bn_for_resampling: bool = True,
conv_bn_act_pattern: bool = False,
norm_cfg: OptConfigType = dict(
type='BN', momentum=1e-2, eps=1e-3),
init_cfg: MultiConfig = None) -> None:
super().__init__(init_cfg=init_cfg)
self.start_level = start_level
self.bifpn = nn.Sequential(*[
BiFPNStage(
in_channels=in_channels,
out_channels=out_channels,
first_time=True if _ == 0 else False,
apply_bn_for_resampling=apply_bn_for_resampling,
conv_bn_act_pattern=conv_bn_act_pattern,
norm_cfg=norm_cfg,
epsilon=epsilon) for _ in range(num_stages)
])
def forward(self, x):
x = x[self.start_level:]
x = self.bifpn(x)
return x
| 12,443 | 39.534202 | 77 |
py
|
ERD
|
ERD-main/projects/EfficientDet/efficientdet/utils.py
|
import math
from typing import Tuple, Union
import torch
import torch.nn as nn
from mmcv.cnn.bricks import Swish, build_norm_layer
from torch.nn import functional as F
from torch.nn.init import _calculate_fan_in_and_fan_out, trunc_normal_
from mmdet.registry import MODELS
from mmdet.utils import OptConfigType
def variance_scaling_trunc(tensor, gain=1.):
fan_in, _ = _calculate_fan_in_and_fan_out(tensor)
gain /= max(1.0, fan_in)
std = math.sqrt(gain) / .87962566103423978
return trunc_normal_(tensor, 0., std)
@MODELS.register_module()
class Conv2dSamePadding(nn.Conv2d):
def __init__(self,
in_channels: int,
out_channels: int,
kernel_size: Union[int, Tuple[int, int]],
stride: Union[int, Tuple[int, int]] = 1,
padding: Union[int, Tuple[int, int]] = 0,
dilation: Union[int, Tuple[int, int]] = 1,
groups: int = 1,
bias: bool = True):
super().__init__(in_channels, out_channels, kernel_size, stride, 0,
dilation, groups, bias)
def forward(self, x: torch.Tensor) -> torch.Tensor:
img_h, img_w = x.size()[-2:]
kernel_h, kernel_w = self.weight.size()[-2:]
extra_w = (math.ceil(img_w / self.stride[1]) -
1) * self.stride[1] - img_w + kernel_w
extra_h = (math.ceil(img_h / self.stride[0]) -
1) * self.stride[0] - img_h + kernel_h
left = extra_w // 2
right = extra_w - left
top = extra_h // 2
bottom = extra_h - top
x = F.pad(x, [left, right, top, bottom])
return F.conv2d(x, self.weight, self.bias, self.stride, self.padding,
self.dilation, self.groups)
class MaxPool2dSamePadding(nn.Module):
def __init__(self,
kernel_size: Union[int, Tuple[int, int]] = 3,
stride: Union[int, Tuple[int, int]] = 2,
**kwargs):
super().__init__()
self.pool = nn.MaxPool2d(kernel_size, stride, **kwargs)
self.stride = self.pool.stride
self.kernel_size = self.pool.kernel_size
if isinstance(self.stride, int):
self.stride = [self.stride] * 2
if isinstance(self.kernel_size, int):
self.kernel_size = [self.kernel_size] * 2
def forward(self, x):
h, w = x.shape[-2:]
extra_h = (math.ceil(w / self.stride[1]) -
1) * self.stride[1] - w + self.kernel_size[1]
extra_v = (math.ceil(h / self.stride[0]) -
1) * self.stride[0] - h + self.kernel_size[0]
left = extra_h // 2
right = extra_h - left
top = extra_v // 2
bottom = extra_v - top
x = F.pad(x, [left, right, top, bottom])
x = self.pool(x)
return x
class DepthWiseConvBlock(nn.Module):
def __init__(
self,
in_channels: int,
out_channels: int,
apply_norm: bool = True,
conv_bn_act_pattern: bool = False,
norm_cfg: OptConfigType = dict(type='BN', momentum=1e-2, eps=1e-3)
) -> None:
super(DepthWiseConvBlock, self).__init__()
self.depthwise_conv = Conv2dSamePadding(
in_channels,
in_channels,
kernel_size=3,
stride=1,
groups=in_channels,
bias=False)
self.pointwise_conv = Conv2dSamePadding(
in_channels, out_channels, kernel_size=1, stride=1)
self.apply_norm = apply_norm
if self.apply_norm:
self.bn = build_norm_layer(norm_cfg, num_features=out_channels)[1]
self.apply_activation = conv_bn_act_pattern
if self.apply_activation:
self.swish = Swish()
def forward(self, x):
x = self.depthwise_conv(x)
x = self.pointwise_conv(x)
if self.apply_norm:
x = self.bn(x)
if self.apply_activation:
x = self.swish(x)
return x
class DownChannelBlock(nn.Module):
def __init__(
self,
in_channels: int,
out_channels: int,
apply_norm: bool = True,
conv_bn_act_pattern: bool = False,
norm_cfg: OptConfigType = dict(type='BN', momentum=1e-2, eps=1e-3)
) -> None:
super(DownChannelBlock, self).__init__()
self.down_conv = Conv2dSamePadding(in_channels, out_channels, 1)
self.apply_norm = apply_norm
if self.apply_norm:
self.bn = build_norm_layer(norm_cfg, num_features=out_channels)[1]
self.apply_activation = conv_bn_act_pattern
if self.apply_activation:
self.swish = Swish()
def forward(self, x):
x = self.down_conv(x)
if self.apply_norm:
x = self.bn(x)
if self.apply_activation:
x = self.swish(x)
return x
| 4,897 | 30.6 | 78 |
py
|
ERD
|
ERD-main/projects/EfficientDet/efficientdet/huber_loss.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional
import torch
import torch.nn as nn
from torch import Tensor
from mmdet.models.losses.utils import weighted_loss
from mmdet.registry import MODELS
@weighted_loss
def huber_loss(pred: Tensor, target: Tensor, beta: float = 1.0) -> Tensor:
"""Huber loss.
Args:
pred (Tensor): The prediction.
target (Tensor): The learning target of the prediction.
beta (float, optional): The threshold in the piecewise function.
Defaults to 1.0.
Returns:
Tensor: Calculated loss
"""
assert beta > 0
if target.numel() == 0:
return pred.sum() * 0
assert pred.size() == target.size()
diff = torch.abs(pred - target)
loss = torch.where(diff < beta, 0.5 * diff * diff,
beta * diff - 0.5 * beta * beta)
return loss
@MODELS.register_module()
class HuberLoss(nn.Module):
"""Huber loss.
Args:
beta (float, optional): The threshold in the piecewise function.
Defaults to 1.0.
reduction (str, optional): The method to reduce the loss.
Options are "none", "mean" and "sum". Defaults to "mean".
loss_weight (float, optional): The weight of loss.
"""
def __init__(self,
beta: float = 1.0,
reduction: str = 'mean',
loss_weight: float = 1.0) -> None:
super().__init__()
self.beta = beta
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self,
pred: Tensor,
target: Tensor,
weight: Optional[Tensor] = None,
avg_factor: Optional[int] = None,
reduction_override: Optional[str] = None,
**kwargs) -> Tensor:
"""Forward function.
Args:
pred (Tensor): The prediction.
target (Tensor): The learning target of the prediction.
weight (Tensor, optional): The weight of loss for each
prediction. Defaults to None.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction_override (str, optional): The reduction method used to
override the original reduction method of the loss.
Defaults to None.
Returns:
Tensor: Calculated loss
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
loss_bbox = self.loss_weight * huber_loss(
pred,
target,
weight,
beta=self.beta,
reduction=reduction,
avg_factor=avg_factor,
**kwargs)
return loss_bbox
| 2,888 | 30.402174 | 78 |
py
|
ERD
|
ERD-main/projects/EfficientDet/efficientdet/__init__.py
|
from .bifpn import BiFPN
from .efficientdet import EfficientDet
from .efficientdet_head import EfficientDetSepBNHead
from .huber_loss import HuberLoss
from .tensorflow.anchor_generator import YXYXAnchorGenerator
from .tensorflow.coco_90class import Coco90Dataset
from .tensorflow.coco_90metric import Coco90Metric
from .tensorflow.trans_max_iou_assigner import TransMaxIoUAssigner
from .tensorflow.yxyx_bbox_coder import YXYXDeltaXYWHBBoxCoder
from .utils import Conv2dSamePadding
__all__ = [
'EfficientDet', 'BiFPN', 'HuberLoss', 'EfficientDetSepBNHead',
'Conv2dSamePadding', 'Coco90Dataset', 'Coco90Metric',
'YXYXAnchorGenerator', 'TransMaxIoUAssigner', 'YXYXDeltaXYWHBBoxCoder'
]
| 696 | 40 | 74 |
py
|
ERD
|
ERD-main/projects/EfficientDet/efficientdet/efficientdet_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import List, Tuple
import torch
import torch.nn as nn
from mmcv.cnn.bricks import Swish, build_norm_layer
from mmengine.model import bias_init_with_prob
from torch import Tensor
from mmdet.models.dense_heads.anchor_head import AnchorHead
from mmdet.models.utils import images_to_levels, multi_apply
from mmdet.registry import MODELS
from mmdet.structures.bbox import cat_boxes, get_box_tensor
from mmdet.utils import (InstanceList, OptConfigType, OptInstanceList,
OptMultiConfig, reduce_mean)
from .utils import DepthWiseConvBlock
@MODELS.register_module()
class EfficientDetSepBNHead(AnchorHead):
"""EfficientDetHead with separate BN.
num_classes (int): Number of categories num_ins (int): Number of the input
feature map. in_channels (int): Number of channels in the input feature
map. feat_channels (int): Number of hidden channels. stacked_convs (int):
Number of repetitions of conv norm_cfg (dict): Config dict for
normalization layer. anchor_generator (dict): Config dict for anchor
generator bbox_coder (dict): Config of bounding box coder. loss_cls (dict):
Config of classification loss. loss_bbox (dict): Config of localization
loss. train_cfg (dict): Training config of anchor head. test_cfg (dict):
Testing config of anchor head. init_cfg (dict or list[dict], optional):
Initialization config dict.
"""
def __init__(self,
num_classes: int,
num_ins: int,
in_channels: int,
feat_channels: int,
stacked_convs: int = 3,
norm_cfg: OptConfigType = dict(
type='BN', momentum=1e-2, eps=1e-3),
init_cfg: OptMultiConfig = None,
**kwargs) -> None:
self.num_ins = num_ins
self.stacked_convs = stacked_convs
self.norm_cfg = norm_cfg
super().__init__(
num_classes=num_classes,
in_channels=in_channels,
feat_channels=feat_channels,
init_cfg=init_cfg,
**kwargs)
def _init_layers(self) -> None:
"""Initialize layers of the head."""
self.reg_conv_list = nn.ModuleList()
self.cls_conv_list = nn.ModuleList()
for i in range(self.stacked_convs):
channels = self.in_channels if i == 0 else self.feat_channels
self.reg_conv_list.append(
DepthWiseConvBlock(
channels, self.feat_channels, apply_norm=False))
self.cls_conv_list.append(
DepthWiseConvBlock(
channels, self.feat_channels, apply_norm=False))
self.reg_bn_list = nn.ModuleList([
nn.ModuleList([
build_norm_layer(
self.norm_cfg, num_features=self.feat_channels)[1]
for j in range(self.num_ins)
]) for i in range(self.stacked_convs)
])
self.cls_bn_list = nn.ModuleList([
nn.ModuleList([
build_norm_layer(
self.norm_cfg, num_features=self.feat_channels)[1]
for j in range(self.num_ins)
]) for i in range(self.stacked_convs)
])
self.cls_header = DepthWiseConvBlock(
self.in_channels,
self.num_base_priors * self.cls_out_channels,
apply_norm=False)
self.reg_header = DepthWiseConvBlock(
self.in_channels, self.num_base_priors * 4, apply_norm=False)
self.swish = Swish()
def init_weights(self) -> None:
"""Initialize weights of the head."""
for m in self.reg_conv_list:
nn.init.constant_(m.pointwise_conv.bias, 0.0)
for m in self.cls_conv_list:
nn.init.constant_(m.pointwise_conv.bias, 0.0)
bias_cls = bias_init_with_prob(0.01)
nn.init.constant_(self.cls_header.pointwise_conv.bias, bias_cls)
nn.init.constant_(self.reg_header.pointwise_conv.bias, 0.0)
def forward_single_bbox(self, feat: Tensor, level_id: int,
i: int) -> Tensor:
conv_op = self.reg_conv_list[i]
bn = self.reg_bn_list[i][level_id]
feat = conv_op(feat)
feat = bn(feat)
feat = self.swish(feat)
return feat
def forward_single_cls(self, feat: Tensor, level_id: int,
i: int) -> Tensor:
conv_op = self.cls_conv_list[i]
bn = self.cls_bn_list[i][level_id]
feat = conv_op(feat)
feat = bn(feat)
feat = self.swish(feat)
return feat
def forward(self, feats: Tuple[Tensor]) -> tuple:
cls_scores = []
bbox_preds = []
for level_id in range(self.num_ins):
feat = feats[level_id]
for i in range(self.stacked_convs):
feat = self.forward_single_bbox(feat, level_id, i)
bbox_pred = self.reg_header(feat)
bbox_preds.append(bbox_pred)
for level_id in range(self.num_ins):
feat = feats[level_id]
for i in range(self.stacked_convs):
feat = self.forward_single_cls(feat, level_id, i)
cls_score = self.cls_header(feat)
cls_scores.append(cls_score)
return cls_scores, bbox_preds
def loss_by_feat(
self,
cls_scores: List[Tensor],
bbox_preds: List[Tensor],
batch_gt_instances: InstanceList,
batch_img_metas: List[dict],
batch_gt_instances_ignore: OptInstanceList = None) -> dict:
"""Calculate the loss based on the features extracted by the detection
head.
Args:
cls_scores (list[Tensor]): Box scores for each scale level
has shape (N, num_anchors * num_classes, H, W).
bbox_preds (list[Tensor]): Box energies / deltas for each scale
level with shape (N, num_anchors * 4, H, W).
batch_gt_instances (list[:obj:`InstanceData`]): Batch of
gt_instance. It usually includes ``bboxes`` and ``labels``
attributes.
batch_img_metas (list[dict]): Meta information of each image, e.g.,
image size, scaling factor, etc.
batch_gt_instances_ignore (list[:obj:`InstanceData`], optional):
Batch of gt_instances_ignore. It includes ``bboxes`` attribute
data that is ignored during training and testing.
Defaults to None.
Returns:
dict: A dictionary of loss components.
"""
featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
assert len(featmap_sizes) == self.prior_generator.num_levels
device = cls_scores[0].device
anchor_list, valid_flag_list = self.get_anchors(
featmap_sizes, batch_img_metas, device=device)
cls_reg_targets = self.get_targets(
anchor_list,
valid_flag_list,
batch_gt_instances,
batch_img_metas,
batch_gt_instances_ignore=batch_gt_instances_ignore)
(labels_list, label_weights_list, bbox_targets_list, bbox_weights_list,
avg_factor) = cls_reg_targets
# anchor number of multi levels
num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]]
# concat all level anchors and flags to a single tensor
concat_anchor_list = []
for i in range(len(anchor_list)):
concat_anchor_list.append(cat_boxes(anchor_list[i]))
all_anchor_list = images_to_levels(concat_anchor_list,
num_level_anchors)
avg_factor = reduce_mean(
torch.tensor(avg_factor, dtype=torch.float, device=device)).item()
avg_factor = max(avg_factor, 1.0)
losses_cls, losses_bbox = multi_apply(
self.loss_by_feat_single,
cls_scores,
bbox_preds,
all_anchor_list,
labels_list,
label_weights_list,
bbox_targets_list,
bbox_weights_list,
avg_factor=avg_factor)
return dict(loss_cls=losses_cls, loss_bbox=losses_bbox)
def loss_by_feat_single(self, cls_score: Tensor, bbox_pred: Tensor,
anchors: Tensor, labels: Tensor,
label_weights: Tensor, bbox_targets: Tensor,
bbox_weights: Tensor, avg_factor: int) -> tuple:
"""Calculate the loss of a single scale level based on the features
extracted by the detection head.
Args:
cls_score (Tensor): Box scores for each scale level
Has shape (N, num_anchors * num_classes, H, W).
bbox_pred (Tensor): Box energies / deltas for each scale
level with shape (N, num_anchors * 4, H, W).
anchors (Tensor): Box reference for each scale level with shape
(N, num_total_anchors, 4).
labels (Tensor): Labels of each anchors with shape
(N, num_total_anchors).
label_weights (Tensor): Label weights of each anchor with shape
(N, num_total_anchors)
bbox_targets (Tensor): BBox regression targets of each anchor
weight shape (N, num_total_anchors, 4).
bbox_weights (Tensor): BBox regression loss weights of each anchor
with shape (N, num_total_anchors, 4).
avg_factor (int): Average factor that is used to average the loss.
Returns:
tuple: loss components.
"""
# classification loss
labels = labels.reshape(-1)
label_weights = label_weights.reshape(-1)
cls_score = cls_score.permute(0, 2, 3,
1).reshape(-1, self.cls_out_channels)
loss_cls = self.loss_cls(
cls_score, labels, label_weights, avg_factor=avg_factor)
# regression loss
target_dim = bbox_targets.size(-1)
bbox_targets = bbox_targets.reshape(-1, target_dim)
bbox_weights = bbox_weights.reshape(-1, target_dim)
bbox_pred = bbox_pred.permute(0, 2, 3,
1).reshape(-1,
self.bbox_coder.encode_size)
if self.reg_decoded_bbox:
# When the regression loss (e.g. `IouLoss`, `GIouLoss`)
# is applied directly on the decoded bounding boxes, it
# decodes the already encoded coordinates to absolute format.
anchors = anchors.reshape(-1, anchors.size(-1))
bbox_pred = self.bbox_coder.decode(anchors, bbox_pred)
bbox_pred = get_box_tensor(bbox_pred)
loss_bbox = self.loss_bbox(
bbox_pred, bbox_targets, bbox_weights, avg_factor=avg_factor * 4)
return loss_cls, loss_bbox
| 10,986 | 40.935115 | 79 |
py
|
ERD
|
ERD-main/projects/EfficientDet/efficientdet/tensorflow/anchor_generator.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional, Tuple, Union
import torch
from torch import Tensor
from mmdet.models.task_modules.prior_generators.anchor_generator import \
AnchorGenerator
from mmdet.registry import TASK_UTILS
from mmdet.structures.bbox import HorizontalBoxes
DeviceType = Union[str, torch.device]
@TASK_UTILS.register_module()
class YXYXAnchorGenerator(AnchorGenerator):
def gen_single_level_base_anchors(self,
base_size: Union[int, float],
scales: Tensor,
ratios: Tensor,
center: Optional[Tuple[float]] = None) \
-> Tensor:
"""Generate base anchors of a single level.
Args:
base_size (int | float): Basic size of an anchor.
scales (torch.Tensor): Scales of the anchor.
ratios (torch.Tensor): The ratio between the height
and width of anchors in a single level.
center (tuple[float], optional): The center of the base anchor
related to a single feature grid. Defaults to None.
Returns:
torch.Tensor: Anchors in a single-level feature maps.
"""
w = base_size
h = base_size
if center is None:
x_center = self.center_offset * w
y_center = self.center_offset * h
else:
x_center, y_center = center
h_ratios = torch.sqrt(ratios)
w_ratios = 1 / h_ratios
if self.scale_major:
ws = (w * scales[:, None] * w_ratios[None, :]).view(-1)
hs = (h * scales[:, None] * h_ratios[None, :]).view(-1)
else:
ws = (w * scales[:, None] * w_ratios[None, :]).view(-1)
hs = (h * scales[:, None] * h_ratios[None, :]).view(-1)
# use float anchor and the anchor's center is aligned with the
# pixel center
base_anchors = [
y_center - 0.5 * hs,
x_center - 0.5 * ws,
y_center + 0.5 * hs,
x_center + 0.5 * ws,
]
base_anchors = torch.stack(base_anchors, dim=-1)
return base_anchors
def single_level_grid_priors(self,
featmap_size: Tuple[int, int],
level_idx: int,
dtype: torch.dtype = torch.float32,
device: DeviceType = 'cuda') -> Tensor:
"""Generate grid anchors of a single level.
Note:
This function is usually called by method ``self.grid_priors``.
Args:
featmap_size (tuple[int, int]): Size of the feature maps.
level_idx (int): The index of corresponding feature map level.
dtype (obj:`torch.dtype`): Date type of points.Defaults to
``torch.float32``.
device (str | torch.device): The device the tensor will be put on.
Defaults to 'cuda'.
Returns:
torch.Tensor: Anchors in the overall feature maps.
"""
base_anchors = self.base_anchors[level_idx].to(device).to(dtype)
feat_h, feat_w = featmap_size
stride_w, stride_h = self.strides[level_idx]
# First create Range with the default dtype, than convert to
# target `dtype` for onnx exporting.
shift_x = torch.arange(0, feat_w, device=device).to(dtype) * stride_w
shift_y = torch.arange(0, feat_h, device=device).to(dtype) * stride_h
shift_xx, shift_yy = self._meshgrid(shift_x, shift_y)
shifts = torch.stack([shift_yy, shift_xx, shift_yy, shift_xx], dim=-1)
# first feat_w elements correspond to the first row of shifts
# add A anchors (1, A, 4) to K shifts (K, 1, 4) to get
# shifted anchors (K, A, 4), reshape to (K*A, 4)
all_anchors = base_anchors[None, :, :] + shifts[:, None, :]
all_anchors = all_anchors.view(-1, 4)
# first A rows correspond to A anchors of (0, 0) in feature map,
# then (0, 1), (0, 2), ...
if self.use_box_type:
all_anchors = HorizontalBoxes(all_anchors)
return all_anchors
| 4,261 | 37.745455 | 78 |
py
|
ERD
|
ERD-main/projects/EfficientDet/efficientdet/tensorflow/coco_90metric.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import datetime
import itertools
import os.path as osp
import tempfile
from collections import OrderedDict
from typing import Dict, List, Optional, Sequence, Union
import numpy as np
from mmengine.evaluator import BaseMetric
from mmengine.fileio import dump, get_local_path, load
from mmengine.logging import MMLogger
from terminaltables import AsciiTable
from mmdet.evaluation.functional import eval_recalls
from mmdet.registry import METRICS
from mmdet.structures.mask import encode_mask_results
from .api_wrappers import COCO, COCOeval
@METRICS.register_module()
class Coco90Metric(BaseMetric):
"""COCO evaluation metric.
Evaluate AR, AP, and mAP for detection tasks including proposal/box
detection and instance segmentation. Please refer to
https://cocodataset.org/#detection-eval for more details.
Args:
ann_file (str, optional): Path to the coco format annotation file.
If not specified, ground truth annotations from the dataset will
be converted to coco format. Defaults to None.
metric (str | List[str]): Metrics to be evaluated. Valid metrics
include 'bbox', 'segm', 'proposal', and 'proposal_fast'.
Defaults to 'bbox'.
classwise (bool): Whether to evaluate the metric class-wise.
Defaults to False.
proposal_nums (Sequence[int]): Numbers of proposals to be evaluated.
Defaults to (100, 300, 1000).
iou_thrs (float | List[float], optional): IoU threshold to compute AP
and AR. If not specified, IoUs from 0.5 to 0.95 will be used.
Defaults to None.
metric_items (List[str], optional): Metric result names to be
recorded in the evaluation result. Defaults to None.
format_only (bool): Format the output results without perform
evaluation. It is useful when you want to format the result
to a specific format and submit it to the test server.
Defaults to False.
outfile_prefix (str, optional): The prefix of json files. It includes
the file path and the prefix of filename, e.g., "a/b/prefix".
If not specified, a temp file will be created. Defaults to None.
backend_args (dict, optional): Arguments to instantiate the
corresponding backend. Defaults to None.
collect_device (str): Device name used for collecting results from
different ranks during distributed training. Must be 'cpu' or
'gpu'. Defaults to 'cpu'.
prefix (str, optional): The prefix that will be added in the metric
names to disambiguate homonymous metrics of different evaluators.
If prefix is not provided in the argument, self.default_prefix
will be used instead. Defaults to None.
"""
default_prefix: Optional[str] = 'coco'
def __init__(self,
ann_file: Optional[str] = None,
metric: Union[str, List[str]] = 'bbox',
classwise: bool = False,
proposal_nums: Sequence[int] = (100, 300, 1000),
iou_thrs: Optional[Union[float, Sequence[float]]] = None,
metric_items: Optional[Sequence[str]] = None,
format_only: bool = False,
outfile_prefix: Optional[str] = None,
backend_args: dict = None,
collect_device: str = 'cpu',
prefix: Optional[str] = None) -> None:
super().__init__(collect_device=collect_device, prefix=prefix)
# coco evaluation metrics
self.metrics = metric if isinstance(metric, list) else [metric]
allowed_metrics = ['bbox', 'segm', 'proposal', 'proposal_fast']
for metric in self.metrics:
if metric not in allowed_metrics:
raise KeyError(
"metric should be one of 'bbox', 'segm', 'proposal', "
f"'proposal_fast', but got {metric}.")
# do class wise evaluation, default False
self.classwise = classwise
# proposal_nums used to compute recall or precision.
self.proposal_nums = list(proposal_nums)
# iou_thrs used to compute recall or precision.
if iou_thrs is None:
iou_thrs = np.linspace(
.5, 0.95, int(np.round((0.95 - .5) / .05)) + 1, endpoint=True)
self.iou_thrs = iou_thrs
self.metric_items = metric_items
self.format_only = format_only
if self.format_only:
assert outfile_prefix is not None, 'outfile_prefix must be not'
'None when format_only is True, otherwise the result files will'
'be saved to a temp directory which will be cleaned up at the end.'
self.outfile_prefix = outfile_prefix
self.backend_args = backend_args
# if ann_file is not specified,
# initialize coco api with the converted dataset
if ann_file is not None:
with get_local_path(
ann_file, backend_args=self.backend_args) as local_path:
self._coco_api = COCO(local_path)
else:
self._coco_api = None
# handle dataset lazy init
self.cat_ids = None
self.img_ids = None
def fast_eval_recall(self,
results: List[dict],
proposal_nums: Sequence[int],
iou_thrs: Sequence[float],
logger: Optional[MMLogger] = None) -> np.ndarray:
"""Evaluate proposal recall with COCO's fast_eval_recall.
Args:
results (List[dict]): Results of the dataset.
proposal_nums (Sequence[int]): Proposal numbers used for
evaluation.
iou_thrs (Sequence[float]): IoU thresholds used for evaluation.
logger (MMLogger, optional): Logger used for logging the recall
summary.
Returns:
np.ndarray: Averaged recall results.
"""
gt_bboxes = []
pred_bboxes = [result['bboxes'] for result in results]
for i in range(len(self.img_ids)):
ann_ids = self._coco_api.get_ann_ids(img_ids=self.img_ids[i])
ann_info = self._coco_api.load_anns(ann_ids)
if len(ann_info) == 0:
gt_bboxes.append(np.zeros((0, 4)))
continue
bboxes = []
for ann in ann_info:
if ann.get('ignore', False) or ann['iscrowd']:
continue
x1, y1, w, h = ann['bbox']
bboxes.append([x1, y1, x1 + w, y1 + h])
bboxes = np.array(bboxes, dtype=np.float32)
if bboxes.shape[0] == 0:
bboxes = np.zeros((0, 4))
gt_bboxes.append(bboxes)
recalls = eval_recalls(
gt_bboxes, pred_bboxes, proposal_nums, iou_thrs, logger=logger)
ar = recalls.mean(axis=1)
return ar
def xyxy2xywh(self, bbox: np.ndarray) -> list:
"""Convert ``xyxy`` style bounding boxes to ``xywh`` style for COCO
evaluation.
Args:
bbox (numpy.ndarray): The bounding boxes, shape (4, ), in
``xyxy`` order.
Returns:
list[float]: The converted bounding boxes, in ``xywh`` order.
"""
_bbox: List = bbox.tolist()
return [
_bbox[0],
_bbox[1],
_bbox[2] - _bbox[0],
_bbox[3] - _bbox[1],
]
def results2json(self, results: Sequence[dict],
outfile_prefix: str) -> dict:
"""Dump the detection results to a COCO style json file.
There are 3 types of results: proposals, bbox predictions, mask
predictions, and they have different data types. This method will
automatically recognize the type, and dump them to json files.
Args:
results (Sequence[dict]): Testing results of the
dataset.
outfile_prefix (str): The filename prefix of the json files. If the
prefix is "somepath/xxx", the json files will be named
"somepath/xxx.bbox.json", "somepath/xxx.segm.json",
"somepath/xxx.proposal.json".
Returns:
dict: Possible keys are "bbox", "segm", "proposal", and
values are corresponding filenames.
"""
bbox_json_results = []
segm_json_results = [] if 'masks' in results[0] else None
for idx, result in enumerate(results):
image_id = result.get('img_id', idx)
labels = result['labels']
bboxes = result['bboxes']
scores = result['scores']
# bbox results
for i, label in enumerate(labels):
data = dict()
data['image_id'] = image_id
data['bbox'] = self.xyxy2xywh(bboxes[i])
data['score'] = float(scores[i])
data['category_id'] = self.cat_ids[label]
bbox_json_results.append(data)
if segm_json_results is None:
continue
# segm results
masks = result['masks']
mask_scores = result.get('mask_scores', scores)
for i, label in enumerate(labels):
data = dict()
data['image_id'] = image_id
data['bbox'] = self.xyxy2xywh(bboxes[i])
data['score'] = float(mask_scores[i])
data['category_id'] = self.cat_ids[label]
if isinstance(masks[i]['counts'], bytes):
masks[i]['counts'] = masks[i]['counts'].decode()
data['segmentation'] = masks[i]
segm_json_results.append(data)
result_files = dict()
result_files['bbox'] = f'{outfile_prefix}.bbox.json'
result_files['proposal'] = f'{outfile_prefix}.bbox.json'
dump(bbox_json_results, result_files['bbox'])
if segm_json_results is not None:
result_files['segm'] = f'{outfile_prefix}.segm.json'
dump(segm_json_results, result_files['segm'])
return result_files
def gt_to_coco_json(self, gt_dicts: Sequence[dict],
outfile_prefix: str) -> str:
"""Convert ground truth to coco format json file.
Args:
gt_dicts (Sequence[dict]): Ground truth of the dataset.
outfile_prefix (str): The filename prefix of the json files. If the
prefix is "somepath/xxx", the json file will be named
"somepath/xxx.gt.json".
Returns:
str: The filename of the json file.
"""
categories = [
dict(id=id, name=name)
for id, name in enumerate(self.dataset_meta['classes'])
]
image_infos = []
annotations = []
for idx, gt_dict in enumerate(gt_dicts):
img_id = gt_dict.get('img_id', idx)
image_info = dict(
id=img_id,
width=gt_dict['width'],
height=gt_dict['height'],
file_name='')
image_infos.append(image_info)
for ann in gt_dict['anns']:
label = ann['bbox_label']
bbox = ann['bbox']
coco_bbox = [
bbox[0],
bbox[1],
bbox[2] - bbox[0],
bbox[3] - bbox[1],
]
annotation = dict(
id=len(annotations) +
1, # coco api requires id starts with 1
image_id=img_id,
bbox=coco_bbox,
iscrowd=ann.get('ignore_flag', 0),
category_id=int(label),
area=coco_bbox[2] * coco_bbox[3])
if ann.get('mask', None):
mask = ann['mask']
# area = mask_util.area(mask)
if isinstance(mask, dict) and isinstance(
mask['counts'], bytes):
mask['counts'] = mask['counts'].decode()
annotation['segmentation'] = mask
# annotation['area'] = float(area)
annotations.append(annotation)
info = dict(
date_created=str(datetime.datetime.now()),
description='Coco json file converted by mmdet CocoMetric.')
coco_json = dict(
info=info,
images=image_infos,
categories=categories,
licenses=None,
)
if len(annotations) > 0:
coco_json['annotations'] = annotations
converted_json_path = f'{outfile_prefix}.gt.json'
dump(coco_json, converted_json_path)
return converted_json_path
# TODO: data_batch is no longer needed, consider adjusting the
# parameter position
def process(self, data_batch: dict, data_samples: Sequence[dict]) -> None:
"""Process one batch of data samples and predictions. The processed
results should be stored in ``self.results``, which will be used to
compute the metrics when all batches have been processed.
Args:
data_batch (dict): A batch of data from the dataloader.
data_samples (Sequence[dict]): A batch of data samples that
contain annotations and predictions.
"""
for data_sample in data_samples:
result = dict()
pred = data_sample['pred_instances']
result['img_id'] = data_sample['img_id']
result['bboxes'] = pred['bboxes'].cpu().numpy()
result['scores'] = pred['scores'].cpu().numpy()
result['labels'] = pred['labels'].cpu().numpy()
# encode mask to RLE
if 'masks' in pred:
result['masks'] = encode_mask_results(
pred['masks'].detach().cpu().numpy())
# some detectors use different scores for bbox and mask
if 'mask_scores' in pred:
result['mask_scores'] = pred['mask_scores'].cpu().numpy()
# parse gt
gt = dict()
gt['width'] = data_sample['ori_shape'][1]
gt['height'] = data_sample['ori_shape'][0]
gt['img_id'] = data_sample['img_id']
if self._coco_api is None:
# TODO: Need to refactor to support LoadAnnotations
assert 'instances' in data_sample, \
'ground truth is required for evaluation when ' \
'`ann_file` is not provided'
gt['anns'] = data_sample['instances']
# add converted result to the results list
self.results.append((gt, result))
def compute_metrics(self, results: list) -> Dict[str, float]:
"""Compute the metrics from processed results.
Args:
results (list): The processed results of each batch.
Returns:
Dict[str, float]: The computed metrics. The keys are the names of
the metrics, and the values are corresponding results.
"""
logger: MMLogger = MMLogger.get_current_instance()
# split gt and prediction list
gts, preds = zip(*results)
tmp_dir = None
if self.outfile_prefix is None:
tmp_dir = tempfile.TemporaryDirectory()
outfile_prefix = osp.join(tmp_dir.name, 'results')
else:
outfile_prefix = self.outfile_prefix
if self._coco_api is None:
# use converted gt json file to initialize coco api
logger.info('Converting ground truth to coco format...')
coco_json_path = self.gt_to_coco_json(
gt_dicts=gts, outfile_prefix=outfile_prefix)
self._coco_api = COCO(coco_json_path)
# handle lazy init
if self.cat_ids is None:
self.cat_ids = self._coco_api.get_cat_ids(
cat_names=self.dataset_meta['classes'])
if self.img_ids is None:
self.img_ids = self._coco_api.get_img_ids()
# convert predictions to coco format and dump to json file
result_files = self.results2json(preds, outfile_prefix)
eval_results = OrderedDict()
if self.format_only:
logger.info('results are saved in '
f'{osp.dirname(outfile_prefix)}')
return eval_results
for metric in self.metrics:
logger.info(f'Evaluating {metric}...')
# TODO: May refactor fast_eval_recall to an independent metric?
# fast eval recall
if metric == 'proposal_fast':
ar = self.fast_eval_recall(
preds, self.proposal_nums, self.iou_thrs, logger=logger)
log_msg = []
for i, num in enumerate(self.proposal_nums):
eval_results[f'AR@{num}'] = ar[i]
log_msg.append(f'\nAR@{num}\t{ar[i]:.4f}')
log_msg = ''.join(log_msg)
logger.info(log_msg)
continue
# evaluate proposal, bbox and segm
iou_type = 'bbox' if metric == 'proposal' else metric
if metric not in result_files:
raise KeyError(f'{metric} is not in results')
try:
predictions = load(result_files[metric])
if iou_type == 'segm':
# Refer to https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocotools/coco.py#L331 # noqa
# When evaluating mask AP, if the results contain bbox,
# cocoapi will use the box area instead of the mask area
# for calculating the instance area. Though the overall AP
# is not affected, this leads to different
# small/medium/large mask AP results.
for x in predictions:
x.pop('bbox')
coco_dt = self._coco_api.loadRes(predictions)
except IndexError:
logger.error(
'The testing results of the whole dataset is empty.')
break
coco_eval = COCOeval(self._coco_api, coco_dt, iou_type)
coco_eval.params.catIds = self.cat_ids
coco_eval.params.imgIds = self.img_ids
coco_eval.params.maxDets = list(self.proposal_nums)
coco_eval.params.iouThrs = self.iou_thrs
# mapping of cocoEval.stats
coco_metric_names = {
'mAP': 0,
'mAP_50': 1,
'mAP_75': 2,
'mAP_s': 3,
'mAP_m': 4,
'mAP_l': 5,
'AR@100': 6,
'AR@300': 7,
'AR@1000': 8,
'AR_s@1000': 9,
'AR_m@1000': 10,
'AR_l@1000': 11
}
metric_items = self.metric_items
if metric_items is not None:
for metric_item in metric_items:
if metric_item not in coco_metric_names:
raise KeyError(
f'metric item "{metric_item}" is not supported')
if metric == 'proposal':
coco_eval.params.useCats = 0
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
if metric_items is None:
metric_items = [
'AR@100', 'AR@300', 'AR@1000', 'AR_s@1000',
'AR_m@1000', 'AR_l@1000'
]
for item in metric_items:
val = float(
f'{coco_eval.stats[coco_metric_names[item]]:.3f}')
eval_results[item] = val
else:
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
if self.classwise: # Compute per-category AP
# Compute per-category AP
# from https://github.com/facebookresearch/detectron2/
precisions = coco_eval.eval['precision']
# precision: (iou, recall, cls, area range, max dets)
assert len(self.cat_ids) == precisions.shape[2]
results_per_category = []
for idx, cat_id in enumerate(self.cat_ids):
# area range index 0: all area ranges
# max dets index -1: typically 100 per image
nm = self._coco_api.loadCats(cat_id)[0]
precision = precisions[:, :, idx, 0, -1]
precision = precision[precision > -1]
if precision.size:
ap = np.mean(precision)
else:
ap = float('nan')
results_per_category.append(
(f'{nm["name"]}', f'{round(ap, 3)}'))
eval_results[f'{nm["name"]}_precision'] = round(ap, 3)
num_columns = min(6, len(results_per_category) * 2)
results_flatten = list(
itertools.chain(*results_per_category))
headers = ['category', 'AP'] * (num_columns // 2)
results_2d = itertools.zip_longest(*[
results_flatten[i::num_columns]
for i in range(num_columns)
])
table_data = [headers]
table_data += [result for result in results_2d]
table = AsciiTable(table_data)
logger.info('\n' + table.table)
if metric_items is None:
metric_items = [
'mAP', 'mAP_50', 'mAP_75', 'mAP_s', 'mAP_m', 'mAP_l'
]
for metric_item in metric_items:
key = f'{metric}_{metric_item}'
val = coco_eval.stats[coco_metric_names[metric_item]]
eval_results[key] = float(f'{round(val, 3)}')
ap = coco_eval.stats[:6]
logger.info(f'{metric}_mAP_copypaste: {ap[0]:.3f} '
f'{ap[1]:.3f} {ap[2]:.3f} {ap[3]:.3f} '
f'{ap[4]:.3f} {ap[5]:.3f}')
if tmp_dir is not None:
tmp_dir.cleanup()
return eval_results
| 22,717 | 40.992606 | 124 |
py
|
ERD
|
ERD-main/projects/EfficientDet/efficientdet/tensorflow/coco_90class.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import os.path as osp
from typing import List, Union
from mmengine.fileio import get_local_path
from mmdet.datasets.base_det_dataset import BaseDetDataset
from mmdet.registry import DATASETS
from .api_wrappers import COCO
@DATASETS.register_module()
class Coco90Dataset(BaseDetDataset):
"""Dataset for COCO."""
METAINFO = {
'classes':
('person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
'truck', 'boat', 'traffic light', 'fire hydrant', None, 'stop sign',
'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep',
'cow', 'elephant', 'bear', 'zebra', 'giraffe', None, 'backpack',
'umbrella', None, None, 'handbag', 'tie', 'suitcase', 'frisbee',
'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat',
'baseball glove', 'skateboard', 'surfboard', 'tennis racket',
'bottle', None, 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl',
'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant',
'bed', None, 'dining table', None, None, 'toilet', None, 'tv',
'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave',
'oven', 'toaster', 'sink', 'refrigerator', None, 'book', 'clock',
'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush'),
# palette is a list of color tuples, which is used for visualization.
'palette':
[(220, 20, 60), (119, 11, 32), (0, 0, 142), (0, 0, 230), (106, 0, 228),
(0, 60, 100), (0, 80, 100), (0, 0, 70), (0, 0, 192), (250, 170, 30),
(100, 170, 30), None, (220, 220, 0), (175, 116, 175), (250, 0, 30),
(165, 42, 42), (255, 77, 255), (0, 226, 252), (182, 182, 255),
(0, 82, 0), (120, 166, 157), (110, 76, 0), (174, 57, 255),
(199, 100, 0), (72, 0, 118), None,
(255, 179, 240), (0, 125, 92), None, None, (209, 0, 151),
(188, 208, 182), (0, 220, 176), (255, 99, 164), (92, 0, 73),
(133, 129, 255), (78, 180, 255), (0, 228, 0), (174, 255, 243),
(45, 89, 255), (134, 134, 103), (145, 148, 174), (255, 208, 186),
(197, 226, 255), None, (171, 134, 1), (109, 63, 54), (207, 138, 255),
(151, 0, 95), (9, 80, 61), (84, 105, 51), (74, 65, 105),
(166, 196, 102), (208, 195, 210), (255, 109, 65), (0, 143, 149),
(179, 0, 194), (209, 99, 106), (5, 121, 0), (227, 255, 205),
(147, 186, 208), (153, 69, 1), (3, 95, 161), (163, 255, 0),
(119, 0, 170), None, (0, 182, 199), None, None, (0, 165, 120), None,
(183, 130, 88), (95, 32, 0), (130, 114, 135), (110, 129, 133),
(166, 74, 118), (219, 142, 185), (79, 210, 114), (178, 90, 62),
(65, 70, 15), (127, 167, 115), (59, 105, 106), None, (142, 108, 45),
(196, 172, 0), (95, 54, 80), (128, 76, 255), (201, 57, 1),
(246, 0, 122), (191, 162, 208)]
}
COCOAPI = COCO
# ann_id is unique in coco dataset.
ANN_ID_UNIQUE = True
def load_data_list(self) -> List[dict]:
"""Load annotations from an annotation file named as ``self.ann_file``
Returns:
List[dict]: A list of annotation.
""" # noqa: E501
with get_local_path(
self.ann_file, backend_args=self.backend_args) as local_path:
self.coco = self.COCOAPI(local_path)
# The order of returned `cat_ids` will not
# change with the order of the `classes`
self.cat_ids = self.coco.get_cat_ids(
cat_names=self.metainfo['classes'])
self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)}
self.cat_img_map = copy.deepcopy(self.coco.cat_img_map)
img_ids = self.coco.get_img_ids()
data_list = []
total_ann_ids = []
for img_id in img_ids:
raw_img_info = self.coco.load_imgs([img_id])[0]
raw_img_info['img_id'] = img_id
ann_ids = self.coco.get_ann_ids(img_ids=[img_id])
raw_ann_info = self.coco.load_anns(ann_ids)
total_ann_ids.extend(ann_ids)
parsed_data_info = self.parse_data_info({
'raw_ann_info':
raw_ann_info,
'raw_img_info':
raw_img_info
})
data_list.append(parsed_data_info)
if self.ANN_ID_UNIQUE:
assert len(set(total_ann_ids)) == len(
total_ann_ids
), f"Annotation ids in '{self.ann_file}' are not unique!"
del self.coco
return data_list
def parse_data_info(self, raw_data_info: dict) -> Union[dict, List[dict]]:
"""Parse raw annotation to target format.
Args:
raw_data_info (dict): Raw data information load from ``ann_file``
Returns:
Union[dict, List[dict]]: Parsed annotation.
"""
img_info = raw_data_info['raw_img_info']
ann_info = raw_data_info['raw_ann_info']
data_info = {}
# TODO: need to change data_prefix['img'] to data_prefix['img_path']
img_path = osp.join(self.data_prefix['img'], img_info['file_name'])
if self.data_prefix.get('seg', None):
seg_map_path = osp.join(
self.data_prefix['seg'],
img_info['file_name'].rsplit('.', 1)[0] + self.seg_map_suffix)
else:
seg_map_path = None
data_info['img_path'] = img_path
data_info['img_id'] = img_info['img_id']
data_info['seg_map_path'] = seg_map_path
data_info['height'] = img_info['height']
data_info['width'] = img_info['width']
instances = []
for i, ann in enumerate(ann_info):
instance = {}
if ann.get('ignore', False):
continue
x1, y1, w, h = ann['bbox']
inter_w = max(0, min(x1 + w, img_info['width']) - max(x1, 0))
inter_h = max(0, min(y1 + h, img_info['height']) - max(y1, 0))
if inter_w * inter_h == 0:
continue
if ann['area'] <= 0 or w < 1 or h < 1:
continue
if ann['category_id'] not in self.cat_ids:
continue
bbox = [x1, y1, x1 + w, y1 + h]
if ann.get('iscrowd', False):
instance['ignore_flag'] = 1
else:
instance['ignore_flag'] = 0
instance['bbox'] = bbox
instance['bbox_label'] = self.cat2label[ann['category_id']]
if ann.get('segmentation', None):
instance['mask'] = ann['segmentation']
instances.append(instance)
data_info['instances'] = instances
return data_info
def filter_data(self) -> List[dict]:
"""Filter annotations according to filter_cfg.
Returns:
List[dict]: Filtered results.
"""
if self.test_mode:
return self.data_list
if self.filter_cfg is None:
return self.data_list
filter_empty_gt = self.filter_cfg.get('filter_empty_gt', False)
min_size = self.filter_cfg.get('min_size', 0)
# obtain images that contain annotation
ids_with_ann = set(data_info['img_id'] for data_info in self.data_list)
# obtain images that contain annotations of the required categories
ids_in_cat = set()
for i, class_id in enumerate(self.cat_ids):
ids_in_cat |= set(self.cat_img_map[class_id])
# merge the image id sets of the two conditions and use the merged set
# to filter out images if self.filter_empty_gt=True
ids_in_cat &= ids_with_ann
valid_data_infos = []
for i, data_info in enumerate(self.data_list):
img_id = data_info['img_id']
width = data_info['width']
height = data_info['height']
if filter_empty_gt and img_id not in ids_in_cat:
continue
if min(width, height) >= min_size:
valid_data_infos.append(data_info)
return valid_data_infos
| 8,204 | 40.231156 | 79 |
py
|
ERD
|
ERD-main/projects/EfficientDet/efficientdet/tensorflow/yxyx_bbox_coder.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import warnings
import numpy as np
import torch
from mmdet.models.task_modules.coders.delta_xywh_bbox_coder import \
DeltaXYWHBBoxCoder
from mmdet.registry import TASK_UTILS
from mmdet.structures.bbox import HorizontalBoxes, get_box_tensor
@TASK_UTILS.register_module()
class YXYXDeltaXYWHBBoxCoder(DeltaXYWHBBoxCoder):
def encode(self, bboxes, gt_bboxes):
"""Get box regression transformation deltas that can be used to
transform the ``bboxes`` into the ``gt_bboxes``.
Args:
bboxes (torch.Tensor or :obj:`BaseBoxes`): Source boxes,
e.g., object proposals.
gt_bboxes (torch.Tensor or :obj:`BaseBoxes`): Target of the
transformation, e.g., ground-truth boxes.
Returns:
torch.Tensor: Box transformation deltas
"""
bboxes = get_box_tensor(bboxes)
gt_bboxes = get_box_tensor(gt_bboxes)
assert bboxes.size(0) == gt_bboxes.size(0)
assert bboxes.size(-1) == gt_bboxes.size(-1) == 4
encoded_bboxes = YXbbox2delta(bboxes, gt_bboxes, self.means, self.stds)
return encoded_bboxes
def decode(self,
bboxes,
pred_bboxes,
max_shape=None,
wh_ratio_clip=16 / 1000):
"""Apply transformation `pred_bboxes` to `boxes`.
Args:
bboxes (torch.Tensor or :obj:`BaseBoxes`): Basic boxes. Shape
(B, N, 4) or (N, 4)
pred_bboxes (Tensor): Encoded offsets with respect to each roi.
Has shape (B, N, num_classes * 4) or (B, N, 4) or
(N, num_classes * 4) or (N, 4). Note N = num_anchors * W * H
when rois is a grid of anchors.Offset encoding follows [1]_.
max_shape (Sequence[int] or torch.Tensor or Sequence[
Sequence[int]],optional): Maximum bounds for boxes, specifies
(H, W, C) or (H, W). If bboxes shape is (B, N, 4), then
the max_shape should be a Sequence[Sequence[int]]
and the length of max_shape should also be B.
wh_ratio_clip (float, optional): The allowed ratio between
width and height.
Returns:
Union[torch.Tensor, :obj:`BaseBoxes`]: Decoded boxes.
"""
bboxes = get_box_tensor(bboxes)
assert pred_bboxes.size(0) == bboxes.size(0)
if pred_bboxes.ndim == 3:
assert pred_bboxes.size(1) == bboxes.size(1)
if pred_bboxes.ndim == 2 and not torch.onnx.is_in_onnx_export():
# single image decode
decoded_bboxes = YXdelta2bbox(bboxes, pred_bboxes, self.means,
self.stds, max_shape, wh_ratio_clip,
self.clip_border, self.add_ctr_clamp,
self.ctr_clamp)
else:
if pred_bboxes.ndim == 3 and not torch.onnx.is_in_onnx_export():
warnings.warn(
'DeprecationWarning: onnx_delta2bbox is deprecated '
'in the case of batch decoding and non-ONNX, '
'please use “delta2bbox” instead. In order to improve '
'the decoding speed, the batch function will no '
'longer be supported. ')
decoded_bboxes = YXonnx_delta2bbox(bboxes, pred_bboxes, self.means,
self.stds, max_shape,
wh_ratio_clip, self.clip_border,
self.add_ctr_clamp,
self.ctr_clamp)
if self.use_box_type:
assert decoded_bboxes.size(-1) == 4, \
('Cannot warp decoded boxes with box type when decoded boxes'
'have shape of (N, num_classes * 4)')
decoded_bboxes = HorizontalBoxes(decoded_bboxes)
return decoded_bboxes
def YXdelta2bbox(rois,
deltas,
means=(0., 0., 0., 0.),
stds=(1., 1., 1., 1.),
max_shape=None,
hw_ratio_clip=1000 / 16,
clip_border=True,
add_ctr_clamp=False,
ctr_clamp=32):
"""Apply deltas to shift/scale base boxes.
Typically the rois are anchor or proposed bounding boxes and the deltas are
network outputs used to shift/scale those boxes.
This is the inverse function of :func:`bbox2delta`.
Args:
rois (Tensor): Boxes to be transformed. Has shape (N, 4).
deltas (Tensor): Encoded offsets relative to each roi.
Has shape (N, num_classes * 4) or (N, 4). Note
N = num_base_anchors * W * H, when rois is a grid of
anchors. Offset encoding follows [1]_.
means (Sequence[float]): Denormalizing means for delta coordinates.
Default (0., 0., 0., 0.).
stds (Sequence[float]): Denormalizing standard deviation for delta
coordinates. Default (1., 1., 1., 1.).
max_shape (tuple[int, int]): Maximum bounds for boxes, specifies
(H, W). Default None.
wh_ratio_clip (float): Maximum aspect ratio for boxes. Default
16 / 1000.
clip_border (bool, optional): Whether clip the objects outside the
border of the image. Default True.
add_ctr_clamp (bool): Whether to add center clamp. When set to True,
the center of the prediction bounding box will be clamped to
avoid being too far away from the center of the anchor.
Only used by YOLOF. Default False.
ctr_clamp (int): the maximum pixel shift to clamp. Only used by YOLOF.
Default 32.
Returns:
Tensor: Boxes with shape (N, num_classes * 4) or (N, 4), where 4
represent tl_x, tl_y, br_x, br_y.
References:
.. [1] https://arxiv.org/abs/1311.2524
Example:
>>> rois = torch.Tensor([[ 0., 0., 1., 1.],
>>> [ 0., 0., 1., 1.],
>>> [ 0., 0., 1., 1.],
>>> [ 5., 5., 5., 5.]])
>>> deltas = torch.Tensor([[ 0., 0., 0., 0.],
>>> [ 1., 1., 1., 1.],
>>> [ 0., 0., 2., -1.],
>>> [ 0.7, -1.9, -0.5, 0.3]])
>>> delta2bbox(rois, deltas, max_shape=(32, 32, 3))
tensor([[0.0000, 0.0000, 1.0000, 1.0000],
[0.1409, 0.1409, 2.8591, 2.8591],
[0.0000, 0.3161, 4.1945, 0.6839],
[5.0000, 5.0000, 5.0000, 5.0000]])
"""
num_bboxes, num_classes = deltas.size(0), deltas.size(1) // 4
if num_bboxes == 0:
return deltas
deltas = deltas.reshape(-1, 4)
means = deltas.new_tensor(means).view(1, -1)
stds = deltas.new_tensor(stds).view(1, -1)
denorm_deltas = deltas * stds + means
dyx = denorm_deltas[:, :2]
dhw = denorm_deltas[:, 2:]
# Compute width/height of each roi
rois_ = rois.repeat(1, num_classes).reshape(-1, 4)
pyx = ((rois_[:, :2] + rois_[:, 2:]) * 0.5)
phw = (rois_[:, 2:] - rois_[:, :2])
dyx_hw = phw * dyx
max_ratio = np.abs(np.log(hw_ratio_clip))
if add_ctr_clamp:
dyx_hw = torch.clamp(dyx_hw, max=ctr_clamp, min=-ctr_clamp)
dhw = torch.clamp(dhw, max=max_ratio)
else:
dhw = dhw.clamp(min=-max_ratio, max=max_ratio)
gyx = pyx + dyx_hw
ghw = phw * dhw.exp()
y1x1 = gyx - (ghw * 0.5)
y2x2 = gyx + (ghw * 0.5)
ymin, xmin = y1x1[:, 0].reshape(-1, 1), y1x1[:, 1].reshape(-1, 1)
ymax, xmax = y2x2[:, 0].reshape(-1, 1), y2x2[:, 1].reshape(-1, 1)
bboxes = torch.cat([xmin, ymin, xmax, ymax], dim=-1)
if clip_border and max_shape is not None:
bboxes[..., 0::2].clamp_(min=0, max=max_shape[1])
bboxes[..., 1::2].clamp_(min=0, max=max_shape[0])
bboxes = bboxes.reshape(num_bboxes, -1)
return bboxes
def YXbbox2delta(proposals, gt, means=(0., 0., 0., 0.), stds=(1., 1., 1., 1.)):
"""Compute deltas of proposals w.r.t. gt.
We usually compute the deltas of x, y, w, h of proposals w.r.t ground
truth bboxes to get regression target.
This is the inverse function of :func:`delta2bbox`.
Args:
proposals (Tensor): Boxes to be transformed, shape (N, ..., 4)
gt (Tensor): Gt bboxes to be used as base, shape (N, ..., 4)
means (Sequence[float]): Denormalizing means for delta coordinates
stds (Sequence[float]): Denormalizing standard deviation for delta
coordinates
Returns:
Tensor: deltas with shape (N, 4), where columns represent dx, dy,
dw, dh.
"""
assert proposals.size() == gt.size()
proposals = proposals.float()
gt = gt.float()
py = (proposals[..., 0] + proposals[..., 2]) * 0.5
px = (proposals[..., 1] + proposals[..., 3]) * 0.5
ph = proposals[..., 2] - proposals[..., 0]
pw = proposals[..., 3] - proposals[..., 1]
gx = (gt[..., 0] + gt[..., 2]) * 0.5
gy = (gt[..., 1] + gt[..., 3]) * 0.5
gw = gt[..., 2] - gt[..., 0]
gh = gt[..., 3] - gt[..., 1]
dx = (gx - px) / pw
dy = (gy - py) / ph
dw = torch.log(gw / pw)
dh = torch.log(gh / ph)
deltas = torch.stack([dy, dx, dh, dw], dim=-1)
means = deltas.new_tensor(means).unsqueeze(0)
stds = deltas.new_tensor(stds).unsqueeze(0)
deltas = deltas.sub_(means).div_(stds)
return deltas
def YXonnx_delta2bbox(rois,
deltas,
means=(0., 0., 0., 0.),
stds=(1., 1., 1., 1.),
max_shape=None,
wh_ratio_clip=16 / 1000,
clip_border=True,
add_ctr_clamp=False,
ctr_clamp=32):
"""Apply deltas to shift/scale base boxes.
Typically the rois are anchor or proposed bounding boxes and the deltas are
network outputs used to shift/scale those boxes.
This is the inverse function of :func:`bbox2delta`.
Args:
rois (Tensor): Boxes to be transformed. Has shape (N, 4) or (B, N, 4)
deltas (Tensor): Encoded offsets with respect to each roi.
Has shape (B, N, num_classes * 4) or (B, N, 4) or
(N, num_classes * 4) or (N, 4). Note N = num_anchors * W * H
when rois is a grid of anchors.Offset encoding follows [1]_.
means (Sequence[float]): Denormalizing means for delta coordinates.
Default (0., 0., 0., 0.).
stds (Sequence[float]): Denormalizing standard deviation for delta
coordinates. Default (1., 1., 1., 1.).
max_shape (Sequence[int] or torch.Tensor or Sequence[
Sequence[int]],optional): Maximum bounds for boxes, specifies
(H, W, C) or (H, W). If rois shape is (B, N, 4), then
the max_shape should be a Sequence[Sequence[int]]
and the length of max_shape should also be B. Default None.
wh_ratio_clip (float): Maximum aspect ratio for boxes.
Default 16 / 1000.
clip_border (bool, optional): Whether clip the objects outside the
border of the image. Default True.
add_ctr_clamp (bool): Whether to add center clamp, when added, the
predicted box is clamped is its center is too far away from
the original anchor's center. Only used by YOLOF. Default False.
ctr_clamp (int): the maximum pixel shift to clamp. Only used by YOLOF.
Default 32.
Returns:
Tensor: Boxes with shape (B, N, num_classes * 4) or (B, N, 4) or
(N, num_classes * 4) or (N, 4), where 4 represent
tl_x, tl_y, br_x, br_y.
References:
.. [1] https://arxiv.org/abs/1311.2524
Example:
>>> rois = torch.Tensor([[ 0., 0., 1., 1.],
>>> [ 0., 0., 1., 1.],
>>> [ 0., 0., 1., 1.],
>>> [ 5., 5., 5., 5.]])
>>> deltas = torch.Tensor([[ 0., 0., 0., 0.],
>>> [ 1., 1., 1., 1.],
>>> [ 0., 0., 2., -1.],
>>> [ 0.7, -1.9, -0.5, 0.3]])
>>> delta2bbox(rois, deltas, max_shape=(32, 32, 3))
tensor([[0.0000, 0.0000, 1.0000, 1.0000],
[0.1409, 0.1409, 2.8591, 2.8591],
[0.0000, 0.3161, 4.1945, 0.6839],
[5.0000, 5.0000, 5.0000, 5.0000]])
"""
means = deltas.new_tensor(means).view(1,
-1).repeat(1,
deltas.size(-1) // 4)
stds = deltas.new_tensor(stds).view(1, -1).repeat(1, deltas.size(-1) // 4)
denorm_deltas = deltas * stds + means
dy = denorm_deltas[..., 0::4]
dx = denorm_deltas[..., 1::4]
dh = denorm_deltas[..., 2::4]
dw = denorm_deltas[..., 3::4]
y1, x1 = rois[..., 0], rois[..., 1]
y2, x2 = rois[..., 2], rois[..., 3]
# Compute center of each roi
px = ((x1 + x2) * 0.5).unsqueeze(-1).expand_as(dx)
py = ((y1 + y2) * 0.5).unsqueeze(-1).expand_as(dy)
# Compute width/height of each roi
pw = (x2 - x1).unsqueeze(-1).expand_as(dw)
ph = (y2 - y1).unsqueeze(-1).expand_as(dh)
dx_width = pw * dx
dy_height = ph * dy
max_ratio = np.abs(np.log(wh_ratio_clip))
if add_ctr_clamp:
dx_width = torch.clamp(dx_width, max=ctr_clamp, min=-ctr_clamp)
dy_height = torch.clamp(dy_height, max=ctr_clamp, min=-ctr_clamp)
dw = torch.clamp(dw, max=max_ratio)
dh = torch.clamp(dh, max=max_ratio)
else:
dw = dw.clamp(min=-max_ratio, max=max_ratio)
dh = dh.clamp(min=-max_ratio, max=max_ratio)
# Use exp(network energy) to enlarge/shrink each roi
gw = pw * dw.exp()
gh = ph * dh.exp()
# Use network energy to shift the center of each roi
gx = px + dx_width
gy = py + dy_height
# Convert center-xy/width/height to top-left, bottom-right
x1 = gx - gw * 0.5
y1 = gy - gh * 0.5
x2 = gx + gw * 0.5
y2 = gy + gh * 0.5
bboxes = torch.stack([x1, y1, x2, y2], dim=-1).view(deltas.size())
if clip_border and max_shape is not None:
# clip bboxes with dynamic `min` and `max` for onnx
if torch.onnx.is_in_onnx_export():
from mmdet.core.export import dynamic_clip_for_onnx
x1, y1, x2, y2 = dynamic_clip_for_onnx(x1, y1, x2, y2, max_shape)
bboxes = torch.stack([x1, y1, x2, y2], dim=-1).view(deltas.size())
return bboxes
if not isinstance(max_shape, torch.Tensor):
max_shape = x1.new_tensor(max_shape)
max_shape = max_shape[..., :2].type_as(x1)
if max_shape.ndim == 2:
assert bboxes.ndim == 3
assert max_shape.size(0) == bboxes.size(0)
min_xy = x1.new_tensor(0)
max_xy = torch.cat(
[max_shape] * (deltas.size(-1) // 2),
dim=-1).flip(-1).unsqueeze(-2)
bboxes = torch.where(bboxes < min_xy, min_xy, bboxes)
bboxes = torch.where(bboxes > max_xy, max_xy, bboxes)
return bboxes
| 15,367 | 40.535135 | 79 |
py
|
ERD
|
ERD-main/projects/EfficientDet/efficientdet/tensorflow/trans_max_iou_assigner.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional
import torch
from mmengine.structures import InstanceData
from mmdet.models.task_modules.assigners.assign_result import AssignResult
from mmdet.models.task_modules.assigners.max_iou_assigner import MaxIoUAssigner
from mmdet.registry import TASK_UTILS
@TASK_UTILS.register_module()
class TransMaxIoUAssigner(MaxIoUAssigner):
def assign(self,
pred_instances: InstanceData,
gt_instances: InstanceData,
gt_instances_ignore: Optional[InstanceData] = None,
**kwargs) -> AssignResult:
"""Assign gt to bboxes.
This method assign a gt bbox to every bbox (proposal/anchor), each bbox
will be assigned with -1, or a semi-positive number. -1 means negative
sample, semi-positive number is the index (0-based) of assigned gt.
The assignment is done in following steps, the order matters.
1. assign every bbox to the background
2. assign proposals whose iou with all gts < neg_iou_thr to 0
3. for each bbox, if the iou with its nearest gt >= pos_iou_thr,
assign it to that bbox
4. for each gt bbox, assign its nearest proposals (may be more than
one) to itself
Args:
pred_instances (:obj:`InstanceData`): Instances of model
predictions. It includes ``priors``, and the priors can
be anchors or points, or the bboxes predicted by the
previous stage, has shape (n, 4). The bboxes predicted by
the current model or stage will be named ``bboxes``,
``labels``, and ``scores``, the same as the ``InstanceData``
in other places.
gt_instances (:obj:`InstanceData`): Ground truth of instance
annotations. It usually includes ``bboxes``, with shape (k, 4),
and ``labels``, with shape (k, ).
gt_instances_ignore (:obj:`InstanceData`, optional): Instances
to be ignored during training. It includes ``bboxes``
attribute data that is ignored during training and testing.
Defaults to None.
Returns:
:obj:`AssignResult`: The assign result.
Example:
>>> from mmengine.structures import InstanceData
>>> self = MaxIoUAssigner(0.5, 0.5)
>>> pred_instances = InstanceData()
>>> pred_instances.priors = torch.Tensor([[0, 0, 10, 10],
... [10, 10, 20, 20]])
>>> gt_instances = InstanceData()
>>> gt_instances.bboxes = torch.Tensor([[0, 0, 10, 9]])
>>> gt_instances.labels = torch.Tensor([0])
>>> assign_result = self.assign(pred_instances, gt_instances)
>>> expected_gt_inds = torch.LongTensor([1, 0])
>>> assert torch.all(assign_result.gt_inds == expected_gt_inds)
"""
gt_bboxes = gt_instances.bboxes
priors = pred_instances.priors
gt_labels = gt_instances.labels
if gt_instances_ignore is not None:
gt_bboxes_ignore = gt_instances_ignore.bboxes
else:
gt_bboxes_ignore = None
assign_on_cpu = True if (self.gpu_assign_thr > 0) and (
gt_bboxes.shape[0] > self.gpu_assign_thr) else False
# compute overlap and assign gt on CPU when number of GT is large
if assign_on_cpu:
device = priors.device
priors = priors.cpu()
gt_bboxes = gt_bboxes.cpu()
gt_labels = gt_labels.cpu()
if gt_bboxes_ignore is not None:
gt_bboxes_ignore = gt_bboxes_ignore.cpu()
trans_priors = torch.cat([
priors[..., 1].view(-1, 1), priors[..., 0].view(-1, 1),
priors[..., 3].view(-1, 1), priors[..., 2].view(-1, 1)
],
dim=-1)
overlaps = self.iou_calculator(gt_bboxes, trans_priors)
if (self.ignore_iof_thr > 0 and gt_bboxes_ignore is not None
and gt_bboxes_ignore.numel() > 0 and trans_priors.numel() > 0):
if self.ignore_wrt_candidates:
ignore_overlaps = self.iou_calculator(
trans_priors, gt_bboxes_ignore, mode='iof')
ignore_max_overlaps, _ = ignore_overlaps.max(dim=1)
else:
ignore_overlaps = self.iou_calculator(
gt_bboxes_ignore, trans_priors, mode='iof')
ignore_max_overlaps, _ = ignore_overlaps.max(dim=0)
overlaps[:, ignore_max_overlaps > self.ignore_iof_thr] = -1
assign_result = self.assign_wrt_overlaps(overlaps, gt_labels)
if assign_on_cpu:
assign_result.gt_inds = assign_result.gt_inds.to(device)
assign_result.max_overlaps = assign_result.max_overlaps.to(device)
if assign_result.labels is not None:
assign_result.labels = assign_result.labels.to(device)
return assign_result
| 5,094 | 44.900901 | 79 |
py
|
ERD
|
ERD-main/projects/EfficientDet/efficientdet/tensorflow/api_wrappers/coco_api.py
|
# Copyright (c) OpenMMLab. All rights reserved.
# This file add snake case alias for coco api
import warnings
from collections import defaultdict
from typing import List, Optional, Union
import pycocotools
from pycocotools.coco import COCO as _COCO
from pycocotools.cocoeval import COCOeval as _COCOeval
class COCO(_COCO):
"""This class is almost the same as official pycocotools package.
It implements some snake case function aliases. So that the COCO class has
the same interface as LVIS class.
"""
def __init__(self, annotation_file=None):
if getattr(pycocotools, '__version__', '0') >= '12.0.2':
warnings.warn(
'mmpycocotools is deprecated. Please install official pycocotools by "pip install pycocotools"', # noqa: E501
UserWarning)
super().__init__(annotation_file=annotation_file)
self.img_ann_map = self.imgToAnns
self.cat_img_map = self.catToImgs
def get_ann_ids(self, img_ids=[], cat_ids=[], area_rng=[], iscrowd=None):
return self.getAnnIds(img_ids, cat_ids, area_rng, iscrowd)
def get_cat_ids(self, cat_names=[], sup_names=[], cat_ids=[]):
cat_ids_coco = self.getCatIds(cat_names, sup_names, cat_ids)
if None in cat_names:
index = [i for i, v in enumerate(cat_names) if v is not None]
cat_ids = list(range(len(cat_names)))
for i in range(len(index)):
cat_ids[index[i]] = cat_ids_coco[i]
return cat_ids
else:
return cat_ids_coco
def get_img_ids(self, img_ids=[], cat_ids=[]):
return self.getImgIds(img_ids, cat_ids)
def load_anns(self, ids):
return self.loadAnns(ids)
def load_cats(self, ids):
return self.loadCats(ids)
def load_imgs(self, ids):
return self.loadImgs(ids)
# just for the ease of import
COCOeval = _COCOeval
class COCOPanoptic(COCO):
"""This wrapper is for loading the panoptic style annotation file.
The format is shown in the CocoPanopticDataset class.
Args:
annotation_file (str, optional): Path of annotation file.
Defaults to None.
"""
def __init__(self, annotation_file: Optional[str] = None) -> None:
super(COCOPanoptic, self).__init__(annotation_file)
def createIndex(self) -> None:
"""Create index."""
# create index
print('creating index...')
# anns stores 'segment_id -> annotation'
anns, cats, imgs = {}, {}, {}
img_to_anns, cat_to_imgs = defaultdict(list), defaultdict(list)
if 'annotations' in self.dataset:
for ann in self.dataset['annotations']:
for seg_ann in ann['segments_info']:
# to match with instance.json
seg_ann['image_id'] = ann['image_id']
img_to_anns[ann['image_id']].append(seg_ann)
# segment_id is not unique in coco dataset orz...
# annotations from different images but
# may have same segment_id
if seg_ann['id'] in anns.keys():
anns[seg_ann['id']].append(seg_ann)
else:
anns[seg_ann['id']] = [seg_ann]
# filter out annotations from other images
img_to_anns_ = defaultdict(list)
for k, v in img_to_anns.items():
img_to_anns_[k] = [x for x in v if x['image_id'] == k]
img_to_anns = img_to_anns_
if 'images' in self.dataset:
for img_info in self.dataset['images']:
img_info['segm_file'] = img_info['file_name'].replace(
'jpg', 'png')
imgs[img_info['id']] = img_info
if 'categories' in self.dataset:
for cat in self.dataset['categories']:
cats[cat['id']] = cat
if 'annotations' in self.dataset and 'categories' in self.dataset:
for ann in self.dataset['annotations']:
for seg_ann in ann['segments_info']:
cat_to_imgs[seg_ann['category_id']].append(ann['image_id'])
print('index created!')
self.anns = anns
self.imgToAnns = img_to_anns
self.catToImgs = cat_to_imgs
self.imgs = imgs
self.cats = cats
def load_anns(self,
ids: Union[List[int], int] = []) -> Optional[List[dict]]:
"""Load anns with the specified ids.
``self.anns`` is a list of annotation lists instead of a
list of annotations.
Args:
ids (Union[List[int], int]): Integer ids specifying anns.
Returns:
anns (List[dict], optional): Loaded ann objects.
"""
anns = []
if hasattr(ids, '__iter__') and hasattr(ids, '__len__'):
# self.anns is a list of annotation lists instead of
# a list of annotations
for id in ids:
anns += self.anns[id]
return anns
elif type(ids) == int:
return self.anns[ids]
| 5,118 | 34.061644 | 126 |
py
|
ERD
|
ERD-main/projects/EfficientDet/efficientdet/tensorflow/api_wrappers/__init__.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from .coco_api import COCO, COCOeval, COCOPanoptic
__all__ = ['COCO', 'COCOeval', 'COCOPanoptic']
| 147 | 28.6 | 50 |
py
|
ERD
|
ERD-main/projects/example_project/dummy/dummy_resnet.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.models.backbones import ResNet
from mmdet.registry import MODELS
@MODELS.register_module()
class DummyResNet(ResNet):
"""Implements a dummy ResNet wrapper for demonstration purpose.
Args:
**kwargs: All the arguments are passed to the parent class.
"""
def __init__(self, **kwargs) -> None:
print('Hello world!')
super().__init__(**kwargs)
| 441 | 26.625 | 67 |
py
|
ERD
|
ERD-main/projects/example_project/dummy/__init__.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from .dummy_resnet import DummyResNet
__all__ = ['DummyResNet']
| 113 | 21.8 | 47 |
py
|
ERD
|
ERD-main/projects/example_project/configs/faster-rcnn_dummy-resnet_fpn_1x_coco.py
|
_base_ = ['../../../configs/faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py']
custom_imports = dict(imports=['projects.example_project.dummy'])
_base_.model.backbone.type = 'DummyResNet'
| 184 | 29.833333 | 72 |
py
|
ERD
|
ERD-main/projects/LabelStudio/backend_template/mmdetection.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import io
import json
import logging
import os
from urllib.parse import urlparse
import boto3
from botocore.exceptions import ClientError
from label_studio_ml.model import LabelStudioMLBase
from label_studio_ml.utils import (DATA_UNDEFINED_NAME, get_image_size,
get_single_tag_keys)
from label_studio_tools.core.utils.io import get_data_dir
from mmdet.apis import inference_detector, init_detector
logger = logging.getLogger(__name__)
class MMDetection(LabelStudioMLBase):
"""Object detector based on https://github.com/open-mmlab/mmdetection."""
def __init__(self,
config_file=None,
checkpoint_file=None,
image_dir=None,
labels_file=None,
score_threshold=0.5,
device='cpu',
**kwargs):
super(MMDetection, self).__init__(**kwargs)
config_file = config_file or os.environ['config_file']
checkpoint_file = checkpoint_file or os.environ['checkpoint_file']
self.config_file = config_file
self.checkpoint_file = checkpoint_file
self.labels_file = labels_file
# default Label Studio image upload folder
upload_dir = os.path.join(get_data_dir(), 'media', 'upload')
self.image_dir = image_dir or upload_dir
logger.debug(
f'{self.__class__.__name__} reads images from {self.image_dir}')
if self.labels_file and os.path.exists(self.labels_file):
self.label_map = json_load(self.labels_file)
else:
self.label_map = {}
self.from_name, self.to_name, self.value, self.labels_in_config = get_single_tag_keys( # noqa E501
self.parsed_label_config, 'RectangleLabels', 'Image')
schema = list(self.parsed_label_config.values())[0]
self.labels_in_config = set(self.labels_in_config)
# Collect label maps from `predicted_values="airplane,car"` attribute in <Label> tag # noqa E501
self.labels_attrs = schema.get('labels_attrs')
if self.labels_attrs:
for label_name, label_attrs in self.labels_attrs.items():
for predicted_value in label_attrs.get('predicted_values',
'').split(','):
self.label_map[predicted_value] = label_name
print('Load new model from: ', config_file, checkpoint_file)
self.model = init_detector(config_file, checkpoint_file, device=device)
self.score_thresh = score_threshold
def _get_image_url(self, task):
image_url = task['data'].get(
self.value) or task['data'].get(DATA_UNDEFINED_NAME)
if image_url.startswith('s3://'):
# presign s3 url
r = urlparse(image_url, allow_fragments=False)
bucket_name = r.netloc
key = r.path.lstrip('/')
client = boto3.client('s3')
try:
image_url = client.generate_presigned_url(
ClientMethod='get_object',
Params={
'Bucket': bucket_name,
'Key': key
})
except ClientError as exc:
logger.warning(
f'Can\'t generate presigned URL for {image_url}. Reason: {exc}' # noqa E501
)
return image_url
def predict(self, tasks, **kwargs):
assert len(tasks) == 1
task = tasks[0]
image_url = self._get_image_url(task)
image_path = self.get_local_path(image_url)
model_results = inference_detector(self.model,
image_path).pred_instances
results = []
all_scores = []
img_width, img_height = get_image_size(image_path)
print(f'>>> model_results: {model_results}')
print(f'>>> label_map {self.label_map}')
print(f'>>> self.model.dataset_meta: {self.model.dataset_meta}')
classes = self.model.dataset_meta.get('classes')
print(f'Classes >>> {classes}')
for item in model_results:
print(f'item >>>>> {item}')
bboxes, label, scores = item['bboxes'], item['labels'], item[
'scores']
score = float(scores[-1])
if score < self.score_thresh:
continue
print(f'bboxes >>>>> {bboxes}')
print(f'label >>>>> {label}')
output_label = classes[list(self.label_map.get(label, label))[0]]
print(f'>>> output_label: {output_label}')
if output_label not in self.labels_in_config:
print(output_label + ' label not found in project config.')
continue
for bbox in bboxes:
bbox = list(bbox)
if not bbox:
continue
x, y, xmax, ymax = bbox[:4]
results.append({
'from_name': self.from_name,
'to_name': self.to_name,
'type': 'rectanglelabels',
'value': {
'rectanglelabels': [output_label],
'x': float(x) / img_width * 100,
'y': float(y) / img_height * 100,
'width': (float(xmax) - float(x)) / img_width * 100,
'height': (float(ymax) - float(y)) / img_height * 100
},
'score': score
})
all_scores.append(score)
avg_score = sum(all_scores) / max(len(all_scores), 1)
print(f'>>> RESULTS: {results}')
return [{'result': results, 'score': avg_score}]
def json_load(file, int_keys=False):
with io.open(file, encoding='utf8') as f:
data = json.load(f)
if int_keys:
return {int(k): v for k, v in data.items()}
else:
return data
| 6,028 | 39.463087 | 107 |
py
|
ERD
|
ERD-main/projects/LabelStudio/backend_template/_wsgi.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import json
import logging
import logging.config
import os
logging.config.dictConfig({
'version': 1,
'formatters': {
'standard': {
'format':
'[%(asctime)s] [%(levelname)s] [%(name)s::%(funcName)s::%(lineno)d] %(message)s' # noqa E501
}
},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'level': 'DEBUG',
'stream': 'ext://sys.stdout',
'formatter': 'standard'
}
},
'root': {
'level': 'ERROR',
'handlers': ['console'],
'propagate': True
}
})
_DEFAULT_CONFIG_PATH = os.path.join(os.path.dirname(__file__), 'config.json')
def get_kwargs_from_config(config_path=_DEFAULT_CONFIG_PATH):
if not os.path.exists(config_path):
return dict()
with open(config_path) as f:
config = json.load(f)
assert isinstance(config, dict)
return config
if __name__ == '__main__':
from label_studio_ml.api import init_app
from projects.LabelStudio.backend_template.mmdetection import MMDetection
parser = argparse.ArgumentParser(description='Label studio')
parser.add_argument(
'-p',
'--port',
dest='port',
type=int,
default=9090,
help='Server port')
parser.add_argument(
'--host', dest='host', type=str, default='0.0.0.0', help='Server host')
parser.add_argument(
'--kwargs',
'--with',
dest='kwargs',
metavar='KEY=VAL',
nargs='+',
type=lambda kv: kv.split('='),
help='Additional LabelStudioMLBase model initialization kwargs')
parser.add_argument(
'-d',
'--debug',
dest='debug',
action='store_true',
help='Switch debug mode')
parser.add_argument(
'--log-level',
dest='log_level',
choices=['DEBUG', 'INFO', 'WARNING', 'ERROR'],
default=None,
help='Logging level')
parser.add_argument(
'--model-dir',
dest='model_dir',
default=os.path.dirname(__file__),
help='Directory models are store',
)
parser.add_argument(
'--check',
dest='check',
action='store_true',
help='Validate model instance before launching server')
args = parser.parse_args()
# setup logging level
if args.log_level:
logging.root.setLevel(args.log_level)
def isfloat(value):
try:
float(value)
return True
except ValueError:
return False
def parse_kwargs():
param = dict()
for k, v in args.kwargs:
if v.isdigit():
param[k] = int(v)
elif v == 'True' or v == 'true':
param[k] = True
elif v == 'False' or v == 'False':
param[k] = False
elif isfloat(v):
param[k] = float(v)
else:
param[k] = v
return param
kwargs = get_kwargs_from_config()
if args.kwargs:
kwargs.update(parse_kwargs())
if args.check:
print('Check "' + MMDetection.__name__ + '" instance creation..')
model = MMDetection(**kwargs)
app = init_app(
model_class=MMDetection,
model_dir=os.environ.get('MODEL_DIR', args.model_dir),
redis_queue=os.environ.get('RQ_QUEUE_NAME', 'default'),
redis_host=os.environ.get('REDIS_HOST', 'localhost'),
redis_port=os.environ.get('REDIS_PORT', 6379),
**kwargs)
app.run(host=args.host, port=args.port, debug=args.debug)
else:
# for uWSGI use
app = init_app(
model_class=MMDetection,
model_dir=os.environ.get('MODEL_DIR', os.path.dirname(__file__)),
redis_queue=os.environ.get('RQ_QUEUE_NAME', 'default'),
redis_host=os.environ.get('REDIS_HOST', 'localhost'),
redis_port=os.environ.get('REDIS_PORT', 6379))
| 4,005 | 26.438356 | 105 |
py
|
ERD
|
ERD-main/projects/ConvNeXt-V2/configs/mask-rcnn_convnext-v2-b_fpn_lsj-3x-fcmae_coco.py
|
_base_ = [
'mmdet::_base_/models/mask-rcnn_r50_fpn.py',
'mmdet::_base_/datasets/coco_instance.py',
'mmdet::_base_/schedules/schedule_1x.py',
'mmdet::_base_/default_runtime.py'
]
# please install the mmclassification dev-1.x branch
# import mmcls.models to trigger register_module in mmcls
custom_imports = dict(imports=['mmcls.models'], allow_failed_imports=False)
checkpoint_file = 'https://download.openmmlab.com/mmclassification/v0/convnext-v2/convnext-v2-base_3rdparty-fcmae_in1k_20230104-8a798eaf.pth' # noqa
image_size = (1024, 1024)
model = dict(
backbone=dict(
_delete_=True,
type='mmcls.ConvNeXt',
arch='base',
out_indices=[0, 1, 2, 3],
# TODO: verify stochastic depth rate {0.1, 0.2, 0.3, 0.4}
drop_path_rate=0.4,
layer_scale_init_value=0., # disable layer scale when using GRN
gap_before_final_norm=False,
use_grn=True, # V2 uses GRN
init_cfg=dict(
type='Pretrained', checkpoint=checkpoint_file,
prefix='backbone.')),
neck=dict(in_channels=[128, 256, 512, 1024]),
test_cfg=dict(
rpn=dict(nms=dict(type='nms')), # TODO: does RPN use soft_nms?
rcnn=dict(nms=dict(type='soft_nms'))))
train_pipeline = [
dict(type='LoadImageFromFile', backend_args=_base_.backend_args),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomResize',
scale=image_size,
ratio_range=(0.1, 2.0),
keep_ratio=True),
dict(
type='RandomCrop',
crop_type='absolute_range',
crop_size=image_size,
recompute_bbox=True,
allow_negative_crop=True),
dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-2, 1e-2)),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(
batch_size=4, # total_batch_size 32 = 8 GPUS x 4 images
num_workers=8,
dataset=dict(pipeline=train_pipeline))
max_epochs = 36
train_cfg = dict(max_epochs=max_epochs)
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0,
end=1000),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[27, 33],
gamma=0.1)
]
# Enable automatic-mixed-precision training with AmpOptimWrapper.
optim_wrapper = dict(
type='AmpOptimWrapper',
constructor='LearningRateDecayOptimizerConstructor',
paramwise_cfg={
'decay_rate': 0.95,
'decay_type': 'layer_wise', # TODO: sweep layer-wise lr decay?
'num_layers': 12
},
optimizer=dict(
_delete_=True,
type='AdamW',
lr=0.0001,
betas=(0.9, 0.999),
weight_decay=0.05,
))
default_hooks = dict(checkpoint=dict(max_keep_ckpts=1))
| 2,852 | 30.01087 | 149 |
py
|
ERD
|
ERD-main/scripts/select_categories.py
|
# coding=utf-8
import os
import time
import json
def sel_cat(anno_file, sel_num):
print('loading annotations into memory...')
tic = time.time()
dataset = json.load(open(anno_file, 'r'))
assert type(dataset)==dict, 'annotation file format {} not supported'.format(type(dataset))
print('Done (t={:0.2f}s)'.format(time.time()- tic))
# sort by cat_ids
dataset['categories'] = sorted(dataset['categories'], key=lambda k: k['id'])
# select first 40 cats
sel_cats = dataset['categories'][:sel_num]
# selected annotations
sel_cats_ids = [cat['id'] for cat in sel_cats]
sel_anno = []
sel_image_ids = []
for anno in dataset['annotations']:
if anno['category_id'] in sel_cats_ids:
sel_anno.append(anno)
sel_image_ids.append(anno['image_id'])
# selected images
sel_images = []
for image in dataset['images']:
if image['id'] in sel_image_ids:
sel_images.append(image)
# selected dataset
sel_dataset = dict()
sel_dataset['categories'] = sel_cats
sel_dataset['annotations'] = sel_anno
sel_dataset['images'] = sel_images
# writing results
# fp = open(os.path.splitext(anno_file)[0] + '_sel_first_40_cats.json', 'w')
# json.dump(sel_dataset, fp)
# select last 40 cats
sel_cats = dataset['categories'][sel_num:]
# selected annotations
sel_cats_ids = [cat['id'] for cat in sel_cats]
sel_anno = []
sel_image_ids = []
for anno in dataset['annotations']:
if anno['category_id'] in sel_cats_ids:
sel_anno.append(anno)
sel_image_ids.append(anno['image_id'])
# selected images
sel_images = []
for image in dataset['images']:
if image['id'] in sel_image_ids:
sel_images.append(image)
# selected dataset
sel_dataset = dict()
sel_dataset['categories'] = sel_cats
sel_dataset['annotations'] = sel_anno
sel_dataset['images'] = sel_images
# writing results
fp = open(os.path.splitext(anno_file)[0] + '_sel_last_40_cats.json', 'w')
json.dump(sel_dataset, fp)
if __name__ == "__main__":
anno_file = '/data-nas/sy/coco/annotations/instances_val2017.json'
sel_num = 40
sel_cat(anno_file, sel_num)
| 2,262 | 30.873239 | 95 |
py
|
ERD
|
ERD-main/.dev_scripts/download_checkpoints.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import math
import os
import os.path as osp
from multiprocessing import Pool
import torch
from mmengine.config import Config
from mmengine.utils import mkdir_or_exist
def download(url, out_file, min_bytes=math.pow(1024, 2), progress=True):
# math.pow(1024, 2) is mean 1 MB
assert_msg = f"Downloaded url '{url}' does not exist " \
f'or size is < min_bytes={min_bytes}'
try:
print(f'Downloading {url} to {out_file}...')
torch.hub.download_url_to_file(url, str(out_file), progress=progress)
assert osp.exists(
out_file) and osp.getsize(out_file) > min_bytes, assert_msg
except Exception as e:
if osp.exists(out_file):
os.remove(out_file)
print(f'ERROR: {e}\nRe-attempting {url} to {out_file} ...')
os.system(f"curl -L '{url}' -o '{out_file}' --retry 3 -C -"
) # curl download, retry and resume on fail
finally:
if osp.exists(out_file) and osp.getsize(out_file) < min_bytes:
os.remove(out_file) # remove partial downloads
if not osp.exists(out_file):
print(f'ERROR: {assert_msg}\n')
print('=========================================\n')
def parse_args():
parser = argparse.ArgumentParser(description='Download checkpoints')
parser.add_argument('config', help='test config file path')
parser.add_argument(
'out', type=str, help='output dir of checkpoints to be stored')
parser.add_argument(
'--nproc', type=int, default=16, help='num of Processes')
parser.add_argument(
'--intranet',
action='store_true',
help='switch to internal network url')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
mkdir_or_exist(args.out)
cfg = Config.fromfile(args.config)
checkpoint_url_list = []
checkpoint_out_list = []
for model in cfg:
model_infos = cfg[model]
if not isinstance(model_infos, list):
model_infos = [model_infos]
for model_info in model_infos:
checkpoint = model_info['checkpoint']
out_file = osp.join(args.out, checkpoint)
if not osp.exists(out_file):
url = model_info['url']
if args.intranet is True:
url = url.replace('.com', '.sensetime.com')
url = url.replace('https', 'http')
checkpoint_url_list.append(url)
checkpoint_out_list.append(out_file)
if len(checkpoint_url_list) > 0:
pool = Pool(min(os.cpu_count(), args.nproc))
pool.starmap(download, zip(checkpoint_url_list, checkpoint_out_list))
else:
print('No files to download!')
| 2,822 | 32.607143 | 77 |
py
|
ERD
|
ERD-main/.dev_scripts/convert_train_benchmark_script.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
import os.path as osp
def parse_args():
parser = argparse.ArgumentParser(
description='Convert benchmark model json to script')
parser.add_argument(
'txt_path', type=str, help='txt path output by benchmark_filter')
parser.add_argument(
'--run', action='store_true', help='run script directly')
parser.add_argument(
'--out', type=str, help='path to save model benchmark script')
args = parser.parse_args()
return args
def determine_gpus(cfg_name):
gpus = 8
gpus_pre_node = 8
if cfg_name.find('16x') >= 0:
gpus = 16
elif cfg_name.find('4xb4') >= 0:
gpus = 4
gpus_pre_node = 4
elif 'lad' in cfg_name:
gpus = 2
gpus_pre_node = 2
return gpus, gpus_pre_node
def main():
args = parse_args()
if args.out:
out_suffix = args.out.split('.')[-1]
assert args.out.endswith('.sh'), \
f'Expected out file path suffix is .sh, but get .{out_suffix}'
assert args.out or args.run, \
('Please specify at least one operation (save/run/ the '
'script) with the argument "--out" or "--run"')
root_name = './tools'
train_script_name = osp.join(root_name, 'slurm_train.sh')
commands = []
partition_name = 'PARTITION=$1 '
commands.append(partition_name)
commands.append('\n')
work_dir = 'WORK_DIR=$2 '
commands.append(work_dir)
commands.append('\n')
cpus_pre_task = 'CPUS_PER_TASK=${3:-4} '
commands.append(cpus_pre_task)
commands.append('\n')
commands.append('\n')
with open(args.txt_path, 'r') as f:
model_cfgs = f.readlines()
for i, cfg in enumerate(model_cfgs):
cfg = cfg.strip()
if len(cfg) == 0:
continue
# print cfg name
echo_info = f'echo \'{cfg}\' &'
commands.append(echo_info)
commands.append('\n')
fname, _ = osp.splitext(osp.basename(cfg))
out_fname = '$WORK_DIR/' + fname
gpus, gpus_pre_node = determine_gpus(cfg)
command_info = f'GPUS={gpus} GPUS_PER_NODE={gpus_pre_node} ' \
f'CPUS_PER_TASK=$CPUS_PRE_TASK {train_script_name} '
command_info += '$PARTITION '
command_info += f'{fname} '
command_info += f'{cfg} '
command_info += f'{out_fname} '
command_info += '--cfg-options default_hooks.checkpoint.' \
'max_keep_ckpts=1 '
command_info += '&'
commands.append(command_info)
if i < len(model_cfgs):
commands.append('\n')
command_str = ''.join(commands)
if args.out:
with open(args.out, 'w') as f:
f.write(command_str)
if args.run:
os.system(command_str)
if __name__ == '__main__':
main()
| 2,993 | 27.514286 | 79 |
py
|
ERD
|
ERD-main/.dev_scripts/gather_test_benchmark_metric.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import glob
import os.path as osp
from mmengine.config import Config
from mmengine.fileio import dump, load
from mmengine.utils import mkdir_or_exist
def parse_args():
parser = argparse.ArgumentParser(
description='Gather benchmarked models metric')
parser.add_argument('config', help='test config file path')
parser.add_argument(
'root',
type=str,
help='root path of benchmarked models to be gathered')
parser.add_argument(
'--out', type=str, help='output path of gathered metrics to be stored')
parser.add_argument(
'--not-show', action='store_true', help='not show metrics')
parser.add_argument(
'--show-all', action='store_true', help='show all model metrics')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
root_path = args.root
metrics_out = args.out
result_dict = {}
cfg = Config.fromfile(args.config)
for model_key in cfg:
model_infos = cfg[model_key]
if not isinstance(model_infos, list):
model_infos = [model_infos]
for model_info in model_infos:
record_metrics = model_info['metric']
config = model_info['config'].strip()
fname, _ = osp.splitext(osp.basename(config))
metric_json_dir = osp.join(root_path, fname)
if osp.exists(metric_json_dir):
json_list = glob.glob(osp.join(metric_json_dir, '*.json'))
if len(json_list) > 0:
log_json_path = list(sorted(json_list))[-1]
metric = load(log_json_path)
if config in metric.get('config', {}):
new_metrics = dict()
for record_metric_key in record_metrics:
record_metric_key_bk = record_metric_key
old_metric = record_metrics[record_metric_key]
if record_metric_key == 'AR_1000':
record_metric_key = 'AR@1000'
if record_metric_key not in metric['metric']:
raise KeyError(
'record_metric_key not exist, please '
'check your config')
new_metric = round(
metric['metric'][record_metric_key] * 100, 1)
new_metrics[record_metric_key_bk] = new_metric
if args.show_all:
result_dict[config] = dict(
before=record_metrics, after=new_metrics)
else:
for record_metric_key in record_metrics:
old_metric = record_metrics[record_metric_key]
new_metric = new_metrics[record_metric_key]
if old_metric != new_metric:
result_dict[config] = dict(
before=record_metrics,
after=new_metrics)
break
else:
print(f'{config} not included in: {log_json_path}')
else:
print(f'{config} not exist file: {metric_json_dir}')
else:
print(f'{config} not exist dir: {metric_json_dir}')
if metrics_out:
mkdir_or_exist(metrics_out)
dump(result_dict, osp.join(metrics_out, 'batch_test_metric_info.json'))
if not args.not_show:
print('===================================')
for config_name, metrics in result_dict.items():
print(config_name, metrics)
print('===================================')
| 3,963 | 39.865979 | 79 |
py
|
ERD
|
ERD-main/.dev_scripts/benchmark_filter.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
import os.path as osp
def parse_args():
parser = argparse.ArgumentParser(description='Filter configs to train')
parser.add_argument(
'--basic-arch',
action='store_true',
help='to train models in basic arch')
parser.add_argument(
'--datasets', action='store_true', help='to train models in dataset')
parser.add_argument(
'--data-pipeline',
action='store_true',
help='to train models related to data pipeline, e.g. augmentations')
parser.add_argument(
'--nn-module',
action='store_true',
help='to train models related to neural network modules')
parser.add_argument(
'--model-options',
nargs='+',
help='custom options to special model benchmark')
parser.add_argument(
'--out',
type=str,
default='batch_train_list.txt',
help='output path of gathered metrics to be stored')
args = parser.parse_args()
return args
basic_arch_root = [
'atss', 'autoassign', 'cascade_rcnn', 'cascade_rpn', 'centripetalnet',
'cornernet', 'detectors', 'deformable_detr', 'detr', 'double_heads',
'dynamic_rcnn', 'faster_rcnn', 'fcos', 'foveabox', 'fp16', 'free_anchor',
'fsaf', 'gfl', 'ghm', 'grid_rcnn', 'guided_anchoring', 'htc', 'ld',
'libra_rcnn', 'mask_rcnn', 'ms_rcnn', 'nas_fcos', 'paa', 'pisa',
'point_rend', 'reppoints', 'retinanet', 'rpn', 'sabl', 'ssd', 'tridentnet',
'vfnet', 'yolact', 'yolo', 'sparse_rcnn', 'scnet', 'yolof', 'centernet'
]
datasets_root = [
'wider_face', 'pascal_voc', 'cityscapes', 'lvis', 'deepfashion'
]
data_pipeline_root = ['albu_example', 'instaboost']
nn_module_root = [
'carafe', 'dcn', 'empirical_attention', 'gcnet', 'gn', 'gn+ws', 'hrnet',
'pafpn', 'nas_fpn', 'regnet', 'resnest', 'res2net', 'groie'
]
benchmark_pool = [
'configs/albu_example/mask_rcnn_r50_fpn_albu_1x_coco.py',
'configs/atss/atss_r50_fpn_1x_coco.py',
'configs/autoassign/autoassign_r50_fpn_8x2_1x_coco.py',
'configs/carafe/mask_rcnn_r50_fpn_carafe_1x_coco.py',
'configs/cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py',
'configs/cascade_rpn/crpn_faster_rcnn_r50_caffe_fpn_1x_coco.py',
'configs/centernet/centernet_resnet18_dcnv2_140e_coco.py',
'configs/centripetalnet/'
'centripetalnet_hourglass104_mstest_16x6_210e_coco.py',
'configs/cityscapes/mask_rcnn_r50_fpn_1x_cityscapes.py',
'configs/cornernet/'
'cornernet_hourglass104_mstest_8x6_210e_coco.py',
'configs/dcn/mask_rcnn_r50_fpn_mdconv_c3-c5_1x_coco.py',
'configs/dcn/faster_rcnn_r50_fpn_dpool_1x_coco.py',
'configs/dcn/faster_rcnn_r50_fpn_mdpool_1x_coco.py',
'configs/dcn/mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py',
'configs/deformable_detr/deformable_detr_r50_16x2_50e_coco.py',
'configs/detectors/detectors_htc_r50_1x_coco.py',
'configs/detr/detr_r50_8x2_150e_coco.py',
'configs/double_heads/dh_faster_rcnn_r50_fpn_1x_coco.py',
'configs/dynamic_rcnn/dynamic_rcnn_r50_fpn_1x_coco.py',
'configs/empirical_attention/faster_rcnn_r50_fpn_attention_1111_dcn_1x_coco.py', # noqa
'configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py',
'configs/faster_rcnn/faster_rcnn_r50_fpn_ohem_1x_coco.py',
'configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_1x_coco.py',
'configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_1x_coco.py',
'configs/faster_rcnn/faster_rcnn_r50_caffe_dc5_mstrain_1x_coco.py',
'configs/fcos/fcos_center_r50_caffe_fpn_gn-head_4x4_1x_coco.py',
'configs/foveabox/fovea_align_r50_fpn_gn-head_4x4_2x_coco.py',
'configs/retinanet/retinanet_r50_fpn_fp16_1x_coco.py',
'configs/mask_rcnn/mask_rcnn_r50_fpn_fp16_1x_coco.py',
'configs/free_anchor/retinanet_free_anchor_r50_fpn_1x_coco.py',
'configs/fsaf/fsaf_r50_fpn_1x_coco.py',
'configs/gcnet/mask_rcnn_r50_fpn_r4_gcb_c3-c5_1x_coco.py',
'configs/gfl/gfl_r50_fpn_1x_coco.py',
'configs/ghm/retinanet_ghm_r50_fpn_1x_coco.py',
'configs/gn/mask_rcnn_r50_fpn_gn-all_2x_coco.py',
'configs/gn+ws/mask_rcnn_r50_fpn_gn_ws-all_2x_coco.py',
'configs/grid_rcnn/grid_rcnn_r50_fpn_gn-head_2x_coco.py',
'configs/groie/faster_rcnn_r50_fpn_groie_1x_coco.py',
'configs/guided_anchoring/ga_faster_r50_caffe_fpn_1x_coco.py',
'configs/hrnet/mask_rcnn_hrnetv2p_w18_1x_coco.py',
'configs/htc/htc_r50_fpn_1x_coco.py',
'configs/instaboost/mask_rcnn_r50_fpn_instaboost_4x_coco.py',
'configs/ld/ld_r18_gflv1_r101_fpn_coco_1x.py',
'configs/libra_rcnn/libra_faster_rcnn_r50_fpn_1x_coco.py',
'configs/lvis/mask_rcnn_r50_fpn_sample1e-3_mstrain_1x_lvis_v1.py',
'configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain-poly_1x_coco.py',
'configs/ms_rcnn/ms_rcnn_r50_caffe_fpn_1x_coco.py',
'configs/nas_fcos/nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco.py',
'configs/nas_fpn/retinanet_r50_nasfpn_crop640_50e_coco.py',
'configs/paa/paa_r50_fpn_1x_coco.py',
'configs/pafpn/faster_rcnn_r50_pafpn_1x_coco.py',
'configs/pisa/pisa_mask_rcnn_r50_fpn_1x_coco.py',
'configs/point_rend/point_rend_r50_caffe_fpn_mstrain_1x_coco.py',
'configs/regnet/mask_rcnn_regnetx-3.2GF_fpn_1x_coco.py',
'configs/reppoints/reppoints_moment_r50_fpn_gn-neck+head_1x_coco.py',
'configs/res2net/faster_rcnn_r2_101_fpn_2x_coco.py',
'configs/resnest/'
'mask_rcnn_s50_fpn_syncbn-backbone+head_mstrain_1x_coco.py',
'configs/retinanet/retinanet_r50_caffe_fpn_1x_coco.py',
'configs/rpn/rpn_r50_fpn_1x_coco.py',
'configs/sabl/sabl_retinanet_r50_fpn_1x_coco.py',
'configs/ssd/ssd300_coco.py',
'configs/tridentnet/tridentnet_r50_caffe_1x_coco.py',
'configs/vfnet/vfnet_r50_fpn_1x_coco.py',
'configs/yolact/yolact_r50_1x8_coco.py',
'configs/yolo/yolov3_d53_320_273e_coco.py',
'configs/sparse_rcnn/sparse_rcnn_r50_fpn_1x_coco.py',
'configs/scnet/scnet_r50_fpn_1x_coco.py',
'configs/yolof/yolof_r50_c5_8x8_1x_coco.py',
]
def main():
args = parse_args()
benchmark_type = []
if args.basic_arch:
benchmark_type += basic_arch_root
if args.datasets:
benchmark_type += datasets_root
if args.data_pipeline:
benchmark_type += data_pipeline_root
if args.nn_module:
benchmark_type += nn_module_root
special_model = args.model_options
if special_model is not None:
benchmark_type += special_model
config_dpath = 'configs/'
benchmark_configs = []
for cfg_root in benchmark_type:
cfg_dir = osp.join(config_dpath, cfg_root)
configs = os.scandir(cfg_dir)
for cfg in configs:
config_path = osp.join(cfg_dir, cfg.name)
if (config_path in benchmark_pool
and config_path not in benchmark_configs):
benchmark_configs.append(config_path)
print(f'Totally found {len(benchmark_configs)} configs to benchmark')
with open(args.out, 'w') as f:
for config in benchmark_configs:
f.write(config + '\n')
if __name__ == '__main__':
main()
| 7,106 | 41.303571 | 92 |
py
|
ERD
|
ERD-main/.dev_scripts/benchmark_options.py
|
# Copyright (c) OpenMMLab. All rights reserved.
third_part_libs = [
'pip install -r ../requirements/albu.txt',
'pip install instaboostfast',
'pip install git+https://github.com/cocodataset/panopticapi.git',
'pip install timm',
'pip install mmcls>=1.0.0rc0',
'pip install git+https://github.com/lvis-dataset/lvis-api.git',
]
default_floating_range = 0.5
model_floating_ranges = {'atss/atss_r50_fpn_1x_coco.py': 0.3}
| 441 | 30.571429 | 69 |
py
|
ERD
|
ERD-main/.dev_scripts/gather_models.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import glob
import json
import os.path as osp
import shutil
import subprocess
from collections import OrderedDict
import torch
import yaml
from mmengine.config import Config
from mmengine.fileio import dump
from mmengine.utils import mkdir_or_exist, scandir
def ordered_yaml_dump(data, stream=None, Dumper=yaml.SafeDumper, **kwds):
class OrderedDumper(Dumper):
pass
def _dict_representer(dumper, data):
return dumper.represent_mapping(
yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, data.items())
OrderedDumper.add_representer(OrderedDict, _dict_representer)
return yaml.dump(data, stream, OrderedDumper, **kwds)
def process_checkpoint(in_file, out_file):
checkpoint = torch.load(in_file, map_location='cpu')
# remove optimizer for smaller file size
if 'optimizer' in checkpoint:
del checkpoint['optimizer']
# remove ema state_dict
for key in list(checkpoint['state_dict']):
if key.startswith('ema_'):
checkpoint['state_dict'].pop(key)
# if it is necessary to remove some sensitive data in checkpoint['meta'],
# add the code here.
if torch.__version__ >= '1.6':
torch.save(checkpoint, out_file, _use_new_zipfile_serialization=False)
else:
torch.save(checkpoint, out_file)
sha = subprocess.check_output(['sha256sum', out_file]).decode()
final_file = out_file.rstrip('.pth') + '-{}.pth'.format(sha[:8])
subprocess.Popen(['mv', out_file, final_file])
return final_file
def is_by_epoch(config):
cfg = Config.fromfile('./configs/' + config)
return cfg.runner.type == 'EpochBasedRunner'
def get_final_epoch_or_iter(config):
cfg = Config.fromfile('./configs/' + config)
if cfg.runner.type == 'EpochBasedRunner':
return cfg.runner.max_epochs
else:
return cfg.runner.max_iters
def get_best_epoch_or_iter(exp_dir):
best_epoch_iter_full_path = list(
sorted(glob.glob(osp.join(exp_dir, 'best_*.pth'))))[-1]
best_epoch_or_iter_model_path = best_epoch_iter_full_path.split('/')[-1]
best_epoch_or_iter = best_epoch_or_iter_model_path.\
split('_')[-1].split('.')[0]
return best_epoch_or_iter_model_path, int(best_epoch_or_iter)
def get_real_epoch_or_iter(config):
cfg = Config.fromfile('./configs/' + config)
if cfg.runner.type == 'EpochBasedRunner':
epoch = cfg.runner.max_epochs
if cfg.data.train.type == 'RepeatDataset':
epoch *= cfg.data.train.times
return epoch
else:
return cfg.runner.max_iters
def get_final_results(log_json_path,
epoch_or_iter,
results_lut,
by_epoch=True):
result_dict = dict()
last_val_line = None
last_train_line = None
last_val_line_idx = -1
last_train_line_idx = -1
with open(log_json_path, 'r') as f:
for i, line in enumerate(f.readlines()):
log_line = json.loads(line)
if 'mode' not in log_line.keys():
continue
if by_epoch:
if (log_line['mode'] == 'train'
and log_line['epoch'] == epoch_or_iter):
result_dict['memory'] = log_line['memory']
if (log_line['mode'] == 'val'
and log_line['epoch'] == epoch_or_iter):
result_dict.update({
key: log_line[key]
for key in results_lut if key in log_line
})
return result_dict
else:
if log_line['mode'] == 'train':
last_train_line_idx = i
last_train_line = log_line
if log_line and log_line['mode'] == 'val':
last_val_line_idx = i
last_val_line = log_line
# bug: max_iters = 768, last_train_line['iter'] = 750
assert last_val_line_idx == last_train_line_idx + 1, \
'Log file is incomplete'
result_dict['memory'] = last_train_line['memory']
result_dict.update({
key: last_val_line[key]
for key in results_lut if key in last_val_line
})
return result_dict
def get_dataset_name(config):
# If there are more dataset, add here.
name_map = dict(
CityscapesDataset='Cityscapes',
CocoDataset='COCO',
CocoPanopticDataset='COCO',
DeepFashionDataset='Deep Fashion',
LVISV05Dataset='LVIS v0.5',
LVISV1Dataset='LVIS v1',
VOCDataset='Pascal VOC',
WIDERFaceDataset='WIDER Face',
OpenImagesDataset='OpenImagesDataset',
OpenImagesChallengeDataset='OpenImagesChallengeDataset',
Objects365V1Dataset='Objects365 v1',
Objects365V2Dataset='Objects365 v2')
cfg = Config.fromfile('./configs/' + config)
return name_map[cfg.dataset_type]
def convert_model_info_to_pwc(model_infos):
pwc_files = {}
for model in model_infos:
cfg_folder_name = osp.split(model['config'])[-2]
pwc_model_info = OrderedDict()
pwc_model_info['Name'] = osp.split(model['config'])[-1].split('.')[0]
pwc_model_info['In Collection'] = 'Please fill in Collection name'
pwc_model_info['Config'] = osp.join('configs', model['config'])
# get metadata
memory = round(model['results']['memory'] / 1024, 1)
meta_data = OrderedDict()
meta_data['Training Memory (GB)'] = memory
if 'epochs' in model:
meta_data['Epochs'] = get_real_epoch_or_iter(model['config'])
else:
meta_data['Iterations'] = get_real_epoch_or_iter(model['config'])
pwc_model_info['Metadata'] = meta_data
# get dataset name
dataset_name = get_dataset_name(model['config'])
# get results
results = []
# if there are more metrics, add here.
if 'bbox_mAP' in model['results']:
metric = round(model['results']['bbox_mAP'] * 100, 1)
results.append(
OrderedDict(
Task='Object Detection',
Dataset=dataset_name,
Metrics={'box AP': metric}))
if 'segm_mAP' in model['results']:
metric = round(model['results']['segm_mAP'] * 100, 1)
results.append(
OrderedDict(
Task='Instance Segmentation',
Dataset=dataset_name,
Metrics={'mask AP': metric}))
if 'PQ' in model['results']:
metric = round(model['results']['PQ'], 1)
results.append(
OrderedDict(
Task='Panoptic Segmentation',
Dataset=dataset_name,
Metrics={'PQ': metric}))
pwc_model_info['Results'] = results
link_string = 'https://download.openmmlab.com/mmdetection/v2.0/'
link_string += '{}/{}'.format(model['config'].rstrip('.py'),
osp.split(model['model_path'])[-1])
pwc_model_info['Weights'] = link_string
if cfg_folder_name in pwc_files:
pwc_files[cfg_folder_name].append(pwc_model_info)
else:
pwc_files[cfg_folder_name] = [pwc_model_info]
return pwc_files
def parse_args():
parser = argparse.ArgumentParser(description='Gather benchmarked models')
parser.add_argument(
'root',
type=str,
help='root path of benchmarked models to be gathered')
parser.add_argument(
'out', type=str, help='output path of gathered models to be stored')
parser.add_argument(
'--best',
action='store_true',
help='whether to gather the best model.')
args = parser.parse_args()
return args
def main():
args = parse_args()
models_root = args.root
models_out = args.out
mkdir_or_exist(models_out)
# find all models in the root directory to be gathered
raw_configs = list(scandir('./configs', '.py', recursive=True))
# filter configs that is not trained in the experiments dir
used_configs = []
for raw_config in raw_configs:
if osp.exists(osp.join(models_root, raw_config)):
used_configs.append(raw_config)
print(f'Find {len(used_configs)} models to be gathered')
# find final_ckpt and log file for trained each config
# and parse the best performance
model_infos = []
for used_config in used_configs:
exp_dir = osp.join(models_root, used_config)
by_epoch = is_by_epoch(used_config)
# check whether the exps is finished
if args.best is True:
final_model, final_epoch_or_iter = get_best_epoch_or_iter(exp_dir)
else:
final_epoch_or_iter = get_final_epoch_or_iter(used_config)
final_model = '{}_{}.pth'.format('epoch' if by_epoch else 'iter',
final_epoch_or_iter)
model_path = osp.join(exp_dir, final_model)
# skip if the model is still training
if not osp.exists(model_path):
continue
# get the latest logs
log_json_path = list(
sorted(glob.glob(osp.join(exp_dir, '*.log.json'))))[-1]
log_txt_path = list(sorted(glob.glob(osp.join(exp_dir, '*.log'))))[-1]
cfg = Config.fromfile('./configs/' + used_config)
results_lut = cfg.evaluation.metric
if not isinstance(results_lut, list):
results_lut = [results_lut]
# case when using VOC, the evaluation key is only 'mAP'
# when using Panoptic Dataset, the evaluation key is 'PQ'.
for i, key in enumerate(results_lut):
if 'mAP' not in key and 'PQ' not in key:
results_lut[i] = key + '_mAP'
model_performance = get_final_results(log_json_path,
final_epoch_or_iter, results_lut,
by_epoch)
if model_performance is None:
continue
model_time = osp.split(log_txt_path)[-1].split('.')[0]
model_info = dict(
config=used_config,
results=model_performance,
model_time=model_time,
final_model=final_model,
log_json_path=osp.split(log_json_path)[-1])
model_info['epochs' if by_epoch else 'iterations'] =\
final_epoch_or_iter
model_infos.append(model_info)
# publish model for each checkpoint
publish_model_infos = []
for model in model_infos:
model_publish_dir = osp.join(models_out, model['config'].rstrip('.py'))
mkdir_or_exist(model_publish_dir)
model_name = osp.split(model['config'])[-1].split('.')[0]
model_name += '_' + model['model_time']
publish_model_path = osp.join(model_publish_dir, model_name)
trained_model_path = osp.join(models_root, model['config'],
model['final_model'])
# convert model
final_model_path = process_checkpoint(trained_model_path,
publish_model_path)
# copy log
shutil.copy(
osp.join(models_root, model['config'], model['log_json_path']),
osp.join(model_publish_dir, f'{model_name}.log.json'))
shutil.copy(
osp.join(models_root, model['config'],
model['log_json_path'].rstrip('.json')),
osp.join(model_publish_dir, f'{model_name}.log'))
# copy config to guarantee reproducibility
config_path = model['config']
config_path = osp.join(
'configs',
config_path) if 'configs' not in config_path else config_path
target_config_path = osp.split(config_path)[-1]
shutil.copy(config_path, osp.join(model_publish_dir,
target_config_path))
model['model_path'] = final_model_path
publish_model_infos.append(model)
models = dict(models=publish_model_infos)
print(f'Totally gathered {len(publish_model_infos)} models')
dump(models, osp.join(models_out, 'model_info.json'))
pwc_files = convert_model_info_to_pwc(publish_model_infos)
for name in pwc_files:
with open(osp.join(models_out, name + '_metafile.yml'), 'w') as f:
ordered_yaml_dump(pwc_files[name], f, encoding='utf-8')
if __name__ == '__main__':
main()
| 12,549 | 35.376812 | 79 |
py
|
ERD
|
ERD-main/.dev_scripts/test_init_backbone.py
|
# Copyright (c) OpenMMLab. All rights reserved.
"""Check out backbone whether successfully load pretrained checkpoint."""
import copy
import os
from os.path import dirname, exists, join
import pytest
from mmengine.config import Config
from mmengine.runner import CheckpointLoader
from mmengine.utils import ProgressBar
from mmdet.registry import MODELS
def _get_config_directory():
"""Find the predefined detector config directory."""
try:
# Assume we are running in the source mmdetection repo
repo_dpath = dirname(dirname(__file__))
except NameError:
# For IPython development when this __file__ is not defined
import mmdet
repo_dpath = dirname(dirname(mmdet.__file__))
config_dpath = join(repo_dpath, 'configs')
if not exists(config_dpath):
raise Exception('Cannot find config path')
return config_dpath
def _get_config_module(fname):
"""Load a configuration as a python module."""
config_dpath = _get_config_directory()
config_fpath = join(config_dpath, fname)
config_mod = Config.fromfile(config_fpath)
return config_mod
def _get_detector_cfg(fname):
"""Grab configs necessary to create a detector.
These are deep copied to allow for safe modification of parameters without
influencing other tests.
"""
config = _get_config_module(fname)
model = copy.deepcopy(config.model)
return model
def _traversed_config_file():
"""We traversed all potential config files under the `config` file. If you
need to print details or debug code, you can use this function.
If the `backbone.init_cfg` is None (do not use `Pretrained` init way), you
need add the folder name in `ignores_folder` (if the config files in this
folder all set backbone.init_cfg is None) or add config name in
`ignores_file` (if the config file set backbone.init_cfg is None)
"""
config_path = _get_config_directory()
check_cfg_names = []
# `base`, `legacy_1.x` and `common` ignored by default.
ignores_folder = ['_base_', 'legacy_1.x', 'common']
# 'ld' need load teacher model, if want to check 'ld',
# please check teacher_config path first.
ignores_folder += ['ld']
# `selfsup_pretrain` need convert model, if want to check this model,
# need to convert the model first.
ignores_folder += ['selfsup_pretrain']
# the `init_cfg` in 'centripetalnet', 'cornernet', 'cityscapes',
# 'scratch' is None.
# the `init_cfg` in ssdlite(`ssdlite_mobilenetv2_scratch_600e_coco.py`)
# is None
# Please confirm `bockbone.init_cfg` is None first.
ignores_folder += ['centripetalnet', 'cornernet', 'cityscapes', 'scratch']
ignores_file = ['ssdlite_mobilenetv2_scratch_600e_coco.py']
for config_file_name in os.listdir(config_path):
if config_file_name not in ignores_folder:
config_file = join(config_path, config_file_name)
if os.path.isdir(config_file):
for config_sub_file in os.listdir(config_file):
if config_sub_file.endswith('py') and \
config_sub_file not in ignores_file:
name = join(config_file, config_sub_file)
check_cfg_names.append(name)
return check_cfg_names
def _check_backbone(config, print_cfg=True):
"""Check out backbone whether successfully load pretrained model, by using
`backbone.init_cfg`.
First, using `CheckpointLoader.load_checkpoint` to load the checkpoint
without loading models.
Then, using `MODELS.build` to build models, and using
`model.init_weights()` to initialize the parameters.
Finally, assert weights and bias of each layer loaded from pretrained
checkpoint are equal to the weights and bias of original checkpoint.
For the convenience of comparison, we sum up weights and bias of
each loaded layer separately.
Args:
config (str): Config file path.
print_cfg (bool): Whether print logger and return the result.
Returns:
results (str or None): If backbone successfully load pretrained
checkpoint, return None; else, return config file path.
"""
if print_cfg:
print('-' * 15 + 'loading ', config)
cfg = Config.fromfile(config)
init_cfg = None
try:
init_cfg = cfg.model.backbone.init_cfg
init_flag = True
except AttributeError:
init_flag = False
if init_cfg is None or init_cfg.get('type') != 'Pretrained':
init_flag = False
if init_flag:
checkpoint = CheckpointLoader.load_checkpoint(init_cfg.checkpoint)
if 'state_dict' in checkpoint:
state_dict = checkpoint['state_dict']
else:
state_dict = checkpoint
model = MODELS.build(cfg.model)
model.init_weights()
checkpoint_layers = state_dict.keys()
for name, value in model.backbone.state_dict().items():
if name in checkpoint_layers:
assert value.equal(state_dict[name])
if print_cfg:
print('-' * 10 + 'Successfully load checkpoint' + '-' * 10 +
'\n', )
return None
else:
if print_cfg:
print(config + '\n' + '-' * 10 +
'config file do not have init_cfg' + '-' * 10 + '\n')
return config
@pytest.mark.parametrize('config', _traversed_config_file())
def test_load_pretrained(config):
"""Check out backbone whether successfully load pretrained model by using
`backbone.init_cfg`.
Details please refer to `_check_backbone`
"""
_check_backbone(config, print_cfg=False)
def _test_load_pretrained():
"""We traversed all potential config files under the `config` file. If you
need to print details or debug code, you can use this function.
Returns:
check_cfg_names (list[str]): Config files that backbone initialized
from pretrained checkpoint might be problematic. Need to recheck
the config file. The output including the config files that the
backbone.init_cfg is None
"""
check_cfg_names = _traversed_config_file()
need_check_cfg = []
prog_bar = ProgressBar(len(check_cfg_names))
for config in check_cfg_names:
init_cfg_name = _check_backbone(config)
if init_cfg_name is not None:
need_check_cfg.append(init_cfg_name)
prog_bar.update()
print('These config files need to be checked again')
print(need_check_cfg)
| 6,556 | 35.631285 | 78 |
py
|
ERD
|
ERD-main/.dev_scripts/benchmark_inference_fps.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
import os.path as osp
from mmengine.config import Config, DictAction
from mmengine.dist import init_dist
from mmengine.fileio import dump
from mmengine.utils import mkdir_or_exist
from terminaltables import GithubFlavoredMarkdownTable
from tools.analysis_tools.benchmark import repeat_measure_inference_speed
def parse_args():
parser = argparse.ArgumentParser(
description='MMDet benchmark a model of FPS')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint_root', help='Checkpoint file root path')
parser.add_argument(
'--round-num',
type=int,
default=1,
help='round a number to a given precision in decimal digits')
parser.add_argument(
'--repeat-num',
type=int,
default=1,
help='number of repeat times of measurement for averaging the results')
parser.add_argument(
'--out', type=str, help='output path of gathered fps to be stored')
parser.add_argument(
'--max-iter', type=int, default=2000, help='num of max iter')
parser.add_argument(
'--log-interval', type=int, default=50, help='interval of logging')
parser.add_argument(
'--fuse-conv-bn',
action='store_true',
help='Whether to fuse conv and bn, this will slightly increase'
'the inference speed')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def results2markdown(result_dict):
table_data = []
is_multiple_results = False
for cfg_name, value in result_dict.items():
name = cfg_name.replace('configs/', '')
fps = value['fps']
ms_times_pre_image = value['ms_times_pre_image']
if isinstance(fps, list):
is_multiple_results = True
mean_fps = value['mean_fps']
mean_times_pre_image = value['mean_times_pre_image']
fps_str = ','.join([str(s) for s in fps])
ms_times_pre_image_str = ','.join(
[str(s) for s in ms_times_pre_image])
table_data.append([
name, fps_str, mean_fps, ms_times_pre_image_str,
mean_times_pre_image
])
else:
table_data.append([name, fps, ms_times_pre_image])
if is_multiple_results:
table_data.insert(0, [
'model', 'fps', 'mean_fps', 'times_pre_image(ms)',
'mean_times_pre_image(ms)'
])
else:
table_data.insert(0, ['model', 'fps', 'times_pre_image(ms)'])
table = GithubFlavoredMarkdownTable(table_data)
print(table.table, flush=True)
if __name__ == '__main__':
args = parse_args()
assert args.round_num >= 0
assert args.repeat_num >= 1
config = Config.fromfile(args.config)
if args.launcher == 'none':
raise NotImplementedError('Only supports distributed mode')
else:
init_dist(args.launcher)
result_dict = {}
for model_key in config:
model_infos = config[model_key]
if not isinstance(model_infos, list):
model_infos = [model_infos]
for model_info in model_infos:
record_metrics = model_info['metric']
cfg_path = model_info['config'].strip()
cfg = Config.fromfile(cfg_path)
checkpoint = osp.join(args.checkpoint_root,
model_info['checkpoint'].strip())
try:
fps = repeat_measure_inference_speed(cfg, checkpoint,
args.max_iter,
args.log_interval,
args.fuse_conv_bn,
args.repeat_num)
if args.repeat_num > 1:
fps_list = [round(fps_, args.round_num) for fps_ in fps]
times_pre_image_list = [
round(1000 / fps_, args.round_num) for fps_ in fps
]
mean_fps = round(
sum(fps_list) / len(fps_list), args.round_num)
mean_times_pre_image = round(
sum(times_pre_image_list) / len(times_pre_image_list),
args.round_num)
print(
f'{cfg_path} '
f'Overall fps: {fps_list}[{mean_fps}] img / s, '
f'times per image: '
f'{times_pre_image_list}[{mean_times_pre_image}] '
f'ms / img',
flush=True)
result_dict[cfg_path] = dict(
fps=fps_list,
mean_fps=mean_fps,
ms_times_pre_image=times_pre_image_list,
mean_times_pre_image=mean_times_pre_image)
else:
print(
f'{cfg_path} fps : {fps:.{args.round_num}f} img / s, '
f'times per image: {1000 / fps:.{args.round_num}f} '
f'ms / img',
flush=True)
result_dict[cfg_path] = dict(
fps=round(fps, args.round_num),
ms_times_pre_image=round(1000 / fps, args.round_num))
except Exception as e:
print(f'{cfg_path} error: {repr(e)}')
if args.repeat_num > 1:
result_dict[cfg_path] = dict(
fps=[0],
mean_fps=0,
ms_times_pre_image=[0],
mean_times_pre_image=0)
else:
result_dict[cfg_path] = dict(fps=0, ms_times_pre_image=0)
if args.out:
mkdir_or_exist(args.out)
dump(result_dict, osp.join(args.out, 'batch_inference_fps.json'))
results2markdown(result_dict)
| 6,830 | 38.715116 | 79 |
py
|
ERD
|
ERD-main/.dev_scripts/gather_train_benchmark_metric.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import glob
import os.path as osp
from gather_models import get_final_results
from mmengine.config import Config
from mmengine.fileio import dump
from mmengine.utils import mkdir_or_exist
try:
import xlrd
except ImportError:
xlrd = None
try:
import xlutils
from xlutils.copy import copy
except ImportError:
xlutils = None
def parse_args():
parser = argparse.ArgumentParser(
description='Gather benchmarked models metric')
parser.add_argument(
'root',
type=str,
help='root path of benchmarked models to be gathered')
parser.add_argument(
'txt_path', type=str, help='txt path output by benchmark_filter')
parser.add_argument(
'--out', type=str, help='output path of gathered metrics to be stored')
parser.add_argument(
'--not-show', action='store_true', help='not show metrics')
parser.add_argument(
'--excel', type=str, help='input path of excel to be recorded')
parser.add_argument(
'--ncol', type=int, help='Number of column to be modified or appended')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
if args.excel:
assert args.ncol, 'Please specify "--excel" and "--ncol" ' \
'at the same time'
if xlrd is None:
raise RuntimeError(
'xlrd is not installed,'
'Please use “pip install xlrd==1.2.0” to install')
if xlutils is None:
raise RuntimeError(
'xlutils is not installed,'
'Please use “pip install xlutils==2.0.0” to install')
readbook = xlrd.open_workbook(args.excel)
sheet = readbook.sheet_by_name('Sheet1')
sheet_info = {}
total_nrows = sheet.nrows
for i in range(3, sheet.nrows):
sheet_info[sheet.row_values(i)[0]] = i
xlrw = copy(readbook)
table = xlrw.get_sheet(0)
root_path = args.root
metrics_out = args.out
result_dict = {}
with open(args.txt_path, 'r') as f:
model_cfgs = f.readlines()
for i, config in enumerate(model_cfgs):
config = config.strip()
if len(config) == 0:
continue
config_name = osp.split(config)[-1]
config_name = osp.splitext(config_name)[0]
result_path = osp.join(root_path, config_name)
if osp.exists(result_path):
# 1 read config
cfg = Config.fromfile(config)
total_epochs = cfg.runner.max_epochs
final_results = cfg.evaluation.metric
if not isinstance(final_results, list):
final_results = [final_results]
final_results_out = []
for key in final_results:
if 'proposal_fast' in key:
final_results_out.append('AR@1000') # RPN
elif 'mAP' not in key:
final_results_out.append(key + '_mAP')
# 2 determine whether total_epochs ckpt exists
ckpt_path = f'epoch_{total_epochs}.pth'
if osp.exists(osp.join(result_path, ckpt_path)):
log_json_path = list(
sorted(glob.glob(osp.join(result_path,
'*.log.json'))))[-1]
# 3 read metric
model_performance = get_final_results(
log_json_path, total_epochs, final_results_out)
if model_performance is None:
print(f'log file error: {log_json_path}')
continue
for performance in model_performance:
if performance in ['AR@1000', 'bbox_mAP', 'segm_mAP']:
metric = round(
model_performance[performance] * 100, 1)
model_performance[performance] = metric
result_dict[config] = model_performance
# update and append excel content
if args.excel:
if 'AR@1000' in model_performance:
metrics = f'{model_performance["AR@1000"]}' \
f'(AR@1000)'
elif 'segm_mAP' in model_performance:
metrics = f'{model_performance["bbox_mAP"]}/' \
f'{model_performance["segm_mAP"]}'
else:
metrics = f'{model_performance["bbox_mAP"]}'
row_num = sheet_info.get(config, None)
if row_num:
table.write(row_num, args.ncol, metrics)
else:
table.write(total_nrows, 0, config)
table.write(total_nrows, args.ncol, metrics)
total_nrows += 1
else:
print(f'{config} not exist: {ckpt_path}')
else:
print(f'not exist: {config}')
# 4 save or print results
if metrics_out:
mkdir_or_exist(metrics_out)
dump(result_dict, osp.join(metrics_out, 'model_metric_info.json'))
if not args.not_show:
print('===================================')
for config_name, metrics in result_dict.items():
print(config_name, metrics)
print('===================================')
if args.excel:
filename, sufflx = osp.splitext(args.excel)
xlrw.save(f'{filename}_o{sufflx}')
print(f'>>> Output {filename}_o{sufflx}')
| 5,904 | 37.848684 | 79 |
py
|
ERD
|
ERD-main/.dev_scripts/check_links.py
|
# Modified from:
# https://github.com/allenai/allennlp/blob/main/scripts/check_links.py
import argparse
import logging
import os
import pathlib
import re
import sys
from multiprocessing.dummy import Pool
from typing import NamedTuple, Optional, Tuple
import requests
from mmengine.logging import MMLogger
def parse_args():
parser = argparse.ArgumentParser(
description='Goes through all the inline-links '
'in markdown files and reports the breakages')
parser.add_argument(
'--num-threads',
type=int,
default=100,
help='Number of processes to confirm the link')
parser.add_argument('--https-proxy', type=str, help='https proxy')
parser.add_argument(
'--out',
type=str,
default='link_reports.txt',
help='output path of reports')
args = parser.parse_args()
return args
OK_STATUS_CODES = (
200,
401, # the resource exists but may require some sort of login.
403, # ^ same
405, # HEAD method not allowed.
# the resource exists, but our default 'Accept-' header may not
# match what the server can provide.
406,
)
class MatchTuple(NamedTuple):
source: str
name: str
link: str
def check_link(
match_tuple: MatchTuple,
http_session: requests.Session,
logger: logging = None) -> Tuple[MatchTuple, bool, Optional[str]]:
reason: Optional[str] = None
if match_tuple.link.startswith('http'):
result_ok, reason = check_url(match_tuple, http_session)
else:
result_ok = check_path(match_tuple)
if logger is None:
print(f" {'✓' if result_ok else '✗'} {match_tuple.link}")
else:
logger.info(f" {'✓' if result_ok else '✗'} {match_tuple.link}")
return match_tuple, result_ok, reason
def check_url(match_tuple: MatchTuple,
http_session: requests.Session) -> Tuple[bool, str]:
"""Check if a URL is reachable."""
try:
result = http_session.head(
match_tuple.link, timeout=5, allow_redirects=True)
return (
result.ok or result.status_code in OK_STATUS_CODES,
f'status code = {result.status_code}',
)
except (requests.ConnectionError, requests.Timeout):
return False, 'connection error'
def check_path(match_tuple: MatchTuple) -> bool:
"""Check if a file in this repository exists."""
relative_path = match_tuple.link.split('#')[0]
full_path = os.path.join(
os.path.dirname(str(match_tuple.source)), relative_path)
return os.path.exists(full_path)
def main():
args = parse_args()
# setup logger
logger = MMLogger.get_instance(name='mmdet', log_file=args.out)
# setup https_proxy
if args.https_proxy:
os.environ['https_proxy'] = args.https_proxy
# setup http_session
http_session = requests.Session()
for resource_prefix in ('http://', 'https://'):
http_session.mount(
resource_prefix,
requests.adapters.HTTPAdapter(
max_retries=5,
pool_connections=20,
pool_maxsize=args.num_threads),
)
logger.info('Finding all markdown files in the current directory...')
project_root = (pathlib.Path(__file__).parent / '..').resolve()
markdown_files = project_root.glob('**/*.md')
all_matches = set()
url_regex = re.compile(r'\[([^!][^\]]+)\]\(([^)(]+)\)')
for markdown_file in markdown_files:
with open(markdown_file) as handle:
for line in handle.readlines():
matches = url_regex.findall(line)
for name, link in matches:
if 'localhost' not in link:
all_matches.add(
MatchTuple(
source=str(markdown_file),
name=name,
link=link))
logger.info(f' {len(all_matches)} markdown files found')
logger.info('Checking to make sure we can retrieve each link...')
with Pool(processes=args.num_threads) as pool:
results = pool.starmap(check_link, [(match, http_session, logger)
for match in list(all_matches)])
# collect unreachable results
unreachable_results = [(match_tuple, reason)
for match_tuple, success, reason in results
if not success]
if unreachable_results:
logger.info('================================================')
logger.info(f'Unreachable links ({len(unreachable_results)}):')
for match_tuple, reason in unreachable_results:
logger.info(' > Source: ' + match_tuple.source)
logger.info(' Name: ' + match_tuple.name)
logger.info(' Link: ' + match_tuple.link)
if reason is not None:
logger.info(' Reason: ' + reason)
sys.exit(1)
logger.info('No Unreachable link found.')
if __name__ == '__main__':
main()
| 5,064 | 31.056962 | 76 |
py
|
ERD
|
ERD-main/.dev_scripts/batch_test_list.py
|
# Copyright (c) OpenMMLab. All rights reserved.
# missing wider_face/timm_example/strong_baselines/simple_copy_paste/
# selfsup_pretrain/seesaw_loss/pascal_voc/openimages/lvis/ld/lad/cityscapes/deepfashion
# yapf: disable
atss = dict(
config='configs/atss/atss_r50_fpn_1x_coco.py',
checkpoint='atss_r50_fpn_1x_coco_20200209-985f7bd0.pth',
url='https://download.openmmlab.com/mmdetection/v2.0/atss/atss_r50_fpn_1x_coco/atss_r50_fpn_1x_coco_20200209-985f7bd0.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=39.4),
)
autoassign = dict(
config='configs/autoassign/autoassign_r50-caffe_fpn_1x_coco.py',
checkpoint='auto_assign_r50_fpn_1x_coco_20210413_115540-5e17991f.pth',
url='https://download.openmmlab.com/mmdetection/v2.0/autoassign/auto_assign_r50_fpn_1x_coco/auto_assign_r50_fpn_1x_coco_20210413_115540-5e17991f.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=40.4),
)
carafe = dict(
config='configs/carafe/faster-rcnn_r50_fpn-carafe_1x_coco.py',
checkpoint='faster_rcnn_r50_fpn_carafe_1x_coco_bbox_mAP-0.386_20200504_175733-385a75b7.pth', # noqa
url='https://download.openmmlab.com/mmdetection/v2.0/carafe/faster_rcnn_r50_fpn_carafe_1x_coco/faster_rcnn_r50_fpn_carafe_1x_coco_bbox_mAP-0.386_20200504_175733-385a75b7.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=38.6),
)
cascade_rcnn = [
dict(
config='configs/cascade_rcnn/cascade-rcnn_r50_fpn_1x_coco.py',
checkpoint='cascade_rcnn_r50_fpn_1x_coco_20200316-3dc56deb.pth',
eval='bbox',
url='https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_r50_fpn_1x_coco/cascade_rcnn_r50_fpn_1x_coco_20200316-3dc56deb.pth', # noqa
metric=dict(bbox_mAP=40.3),
),
dict(
config='configs/cascade_rcnn/cascade-mask-rcnn_r50_fpn_1x_coco.py',
checkpoint='cascade_mask_rcnn_r50_fpn_1x_coco_20200203-9d4dcb24.pth',
url='https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco/cascade_mask_rcnn_r50_fpn_1x_coco_20200203-9d4dcb24.pth', # noqa
eval=['bbox', 'segm'],
metric=dict(bbox_mAP=41.2, segm_mAP=35.9),
),
]
cascade_rpn = dict(
config='configs/cascade_rpn/cascade-rpn_faster-rcnn_r50-caffe_fpn_1x_coco.py', # noqa
checkpoint='crpn_faster_rcnn_r50_caffe_fpn_1x_coco-c8283cca.pth',
url='https://download.openmmlab.com/mmdetection/v2.0/cascade_rpn/crpn_faster_rcnn_r50_caffe_fpn_1x_coco/crpn_faster_rcnn_r50_caffe_fpn_1x_coco-c8283cca.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=40.4),
)
centernet = dict(
config='configs/centernet/centernet_r18-dcnv2_8xb16-crop512-140e_coco.py',
checkpoint='centernet_resnet18_dcnv2_140e_coco_20210702_155131-c8cd631f.pth', # noqa
url='https://download.openmmlab.com/mmdetection/v2.0/centernet/centernet_resnet18_dcnv2_140e_coco/centernet_resnet18_dcnv2_140e_coco_20210702_155131-c8cd631f.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=29.5),
)
centripetalnet = dict(
config='configs/centripetalnet/centripetalnet_hourglass104_16xb6-crop511-210e-mstest_coco.py', # noqa
checkpoint='centripetalnet_hourglass104_mstest_16x6_210e_coco_20200915_204804-3ccc61e5.pth', # noqa
url='https://download.openmmlab.com/mmdetection/v2.0/centripetalnet/centripetalnet_hourglass104_mstest_16x6_210e_coco/centripetalnet_hourglass104_mstest_16x6_210e_coco_20200915_204804-3ccc61e5.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=44.7),
)
convnext = dict(
config='configs/convnext/cascade-mask-rcnn_convnext-s-p4-w7_fpn_4conv1fc-giou_amp-ms-crop-3x_coco.py', # noqa
checkpoint='cascade_mask_rcnn_convnext-s_p4_w7_fpn_giou_4conv1f_fp16_ms-crop_3x_coco_20220510_201004-3d24f5a4.pth', # noqa
url='https://download.openmmlab.com/mmdetection/v2.0/convnext/cascade_mask_rcnn_convnext-s_p4_w7_fpn_giou_4conv1f_fp16_ms-crop_3x_coco/cascade_mask_rcnn_convnext-s_p4_w7_fpn_giou_4conv1f_fp16_ms-crop_3x_coco_20220510_201004-3d24f5a4.pth', # noqa
eval=['bbox', 'segm'],
metric=dict(bbox_mAP=51.8, segm_mAP=44.8),
)
cornernet = dict(
config='configs/cornernet/cornernet_hourglass104_8xb6-210e-mstest_coco.py',
checkpoint='cornernet_hourglass104_mstest_8x6_210e_coco_20200825_150618-79b44c30.pth', # noqa
url='https://download.openmmlab.com/mmdetection/v2.0/cornernet/cornernet_hourglass104_mstest_8x6_210e_coco/cornernet_hourglass104_mstest_8x6_210e_coco_20200825_150618-79b44c30.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=41.2),
)
dcn = dict(
config='configs/dcn/faster-rcnn_r50-dconv-c3-c5_fpn_1x_coco.py',
checkpoint='faster_rcnn_r50_fpn_dconv_c3-c5_1x_coco_20200130-d68aed1e.pth',
url='https://download.openmmlab.com/mmdetection/v2.0/dcn/faster_rcnn_r50_fpn_dconv_c3-c5_1x_coco/faster_rcnn_r50_fpn_dconv_c3-c5_1x_coco_20200130-d68aed1e.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=41.3),
)
dcnv2 = dict(
config='configs/dcnv2/faster-rcnn_r50_fpn_mdpool_1x_coco.py',
checkpoint='faster_rcnn_r50_fpn_mdpool_1x_coco_20200307-c0df27ff.pth',
url='https://download.openmmlab.com/mmdetection/v2.0/dcn/faster_rcnn_r50_fpn_mdpool_1x_coco/faster_rcnn_r50_fpn_mdpool_1x_coco_20200307-c0df27ff.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=38.7),
)
ddod = dict(
config='configs/ddod/ddod_r50_fpn_1x_coco.py',
checkpoint='ddod_r50_fpn_1x_coco_20220523_223737-29b2fc67.pth',
url='https://download.openmmlab.com/mmdetection/v2.0/ddod/ddod_r50_fpn_1x_coco/ddod_r50_fpn_1x_coco_20220523_223737-29b2fc67.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=41.7),
)
deformable_detr = dict(
config='configs/deformable_detr/deformable-detr_r50_16xb2-50e_coco.py',
checkpoint='deformable_detr_r50_16x2_50e_coco_20210419_220030-a12b9512.pth', # noqa
url='https://download.openmmlab.com/mmdetection/v2.0/deformable_detr/deformable_detr_r50_16x2_50e_coco/deformable_detr_r50_16x2_50e_coco_20210419_220030-a12b9512.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=44.5),
)
detectors = dict(
config='configs/detectors/detectors_htc-r50_1x_coco.py',
checkpoint='detectors_htc_r50_1x_coco-329b1453.pth',
url='https://download.openmmlab.com/mmdetection/v2.0/detectors/detectors_htc_r50_1x_coco/detectors_htc_r50_1x_coco-329b1453.pth', # noqa
eval=['bbox', 'segm'],
metric=dict(bbox_mAP=49.1, segm_mAP=42.6),
)
detr = dict(
config='configs/detr/detr_r50_8xb2-150e_coco.py',
checkpoint='detr_r50_8x2_150e_coco_20201130_194835-2c4b8974.pth',
url='https://download.openmmlab.com/mmdetection/v2.0/detr/detr_r50_8x2_150e_coco/detr_r50_8x2_150e_coco_20201130_194835-2c4b8974.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=40.1),
)
double_heads = dict(
config='configs/double_heads/dh-faster-rcnn_r50_fpn_1x_coco.py',
checkpoint='dh_faster_rcnn_r50_fpn_1x_coco_20200130-586b67df.pth',
url='https://download.openmmlab.com/mmdetection/v2.0/double_heads/dh_faster_rcnn_r50_fpn_1x_coco/dh_faster_rcnn_r50_fpn_1x_coco_20200130-586b67df.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=40.0),
)
dyhead = dict(
config='configs/dyhead/atss_r50_fpn_dyhead_1x_coco.py',
checkpoint='atss_r50_fpn_dyhead_4x4_1x_coco_20211219_023314-eaa620c6.pth',
url='https://download.openmmlab.com/mmdetection/v2.0/dyhead/atss_r50_fpn_dyhead_4x4_1x_coco/atss_r50_fpn_dyhead_4x4_1x_coco_20211219_023314-eaa620c6.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=43.3),
)
dynamic_rcnn = dict(
config='configs/dynamic_rcnn/dynamic-rcnn_r50_fpn_1x_coco.py',
checkpoint='dynamic_rcnn_r50_fpn_1x-62a3f276.pth',
url='https://download.openmmlab.com/mmdetection/v2.0/dynamic_rcnn/dynamic_rcnn_r50_fpn_1x/dynamic_rcnn_r50_fpn_1x-62a3f276.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=38.9),
)
efficientnet = dict(
config='configs/efficientnet/retinanet_effb3_fpn_8xb4-crop896-1x_coco.py',
checkpoint='retinanet_effb3_fpn_crop896_8x4_1x_coco_20220322_234806-615a0dda.pth', # noqa
url='https://download.openmmlab.com/mmdetection/v2.0/efficientnet/retinanet_effb3_fpn_crop896_8x4_1x_coco/retinanet_effb3_fpn_crop896_8x4_1x_coco_20220322_234806-615a0dda.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=40.5),
)
empirical_attention = dict(
config='configs/empirical_attention/faster-rcnn_r50-attn1111_fpn_1x_coco.py', # noqa
checkpoint='faster_rcnn_r50_fpn_attention_1111_1x_coco_20200130-403cccba.pth', # noqa
url='https://download.openmmlab.com/mmdetection/v2.0/empirical_attention/faster_rcnn_r50_fpn_attention_1111_1x_coco/faster_rcnn_r50_fpn_attention_1111_1x_coco_20200130-403cccba.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=40.0),
)
faster_rcnn = dict(
config='configs/faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py',
checkpoint='faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth',
url='https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=37.4),
)
fcos = dict(
config='configs/fcos/fcos_r50-caffe_fpn_gn-head-center-normbbox-centeronreg-giou_1x_coco.py', # noqa
checkpoint='fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_1x_coco-0a0d75a8.pth', # noqa
url='https://download.openmmlab.com/mmdetection/v2.0/fcos/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_1x_coco/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_1x_coco-0a0d75a8.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=38.7),
)
foveabox = dict(
config='configs/foveabox/fovea_r50_fpn_gn-head-align_4xb4-2x_coco.py',
checkpoint='fovea_align_r50_fpn_gn-head_4x4_2x_coco_20200203-8987880d.pth',
url='https://download.openmmlab.com/mmdetection/v2.0/foveabox/fovea_align_r50_fpn_gn-head_4x4_2x_coco/fovea_align_r50_fpn_gn-head_4x4_2x_coco_20200203-8987880d.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=37.9),
)
fpg = dict(
config='configs/fpg/mask-rcnn_r50_fpg_crop640-50e_coco.py',
checkpoint='mask_rcnn_r50_fpg_crop640_50e_coco_20220311_011857-233b8334.pth', # noqa
url='https://download.openmmlab.com/mmdetection/v2.0/fpg/mask_rcnn_r50_fpg_crop640_50e_coco/mask_rcnn_r50_fpg_crop640_50e_coco_20220311_011857-233b8334.pth', # noqa
eval=['bbox', 'segm'],
metric=dict(bbox_mAP=43.0, segm_mAP=38.1),
)
free_anchor = dict(
config='configs/free_anchor/freeanchor_r50_fpn_1x_coco.py',
checkpoint='retinanet_free_anchor_r50_fpn_1x_coco_20200130-0f67375f.pth',
url='https://download.openmmlab.com/mmdetection/v2.0/free_anchor/retinanet_free_anchor_r50_fpn_1x_coco/retinanet_free_anchor_r50_fpn_1x_coco_20200130-0f67375f.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=38.7),
)
fsaf = dict(
config='configs/fsaf/fsaf_r50_fpn_1x_coco.py',
checkpoint='fsaf_r50_fpn_1x_coco-94ccc51f.pth',
url='https://download.openmmlab.com/mmdetection/v2.0/fsaf/fsaf_r50_fpn_1x_coco/fsaf_r50_fpn_1x_coco-94ccc51f.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=37.4),
)
gcnet = dict(
config='configs/gcnet/mask-rcnn_r50-syncbn-gcb-r16-c3-c5_fpn_1x_coco.py', # noqa
checkpoint='mask_rcnn_r50_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco_20200202-587b99aa.pth', # noqa
url='https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r50_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco/mask_rcnn_r50_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco_20200202-587b99aa.pth', # noqa
eval=['bbox', 'segm'],
metric=dict(bbox_mAP=40.4, segm_mAP=36.2),
)
gfl = dict(
config='configs/gfl/gfl_r50_fpn_1x_coco.py',
checkpoint='gfl_r50_fpn_1x_coco_20200629_121244-25944287.pth',
url='https://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_r50_fpn_1x_coco/gfl_r50_fpn_1x_coco_20200629_121244-25944287.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=40.2),
)
ghm = dict(
config='configs/ghm/retinanet_r50_fpn_ghm-1x_coco.py',
checkpoint='retinanet_ghm_r50_fpn_1x_coco_20200130-a437fda3.pth',
url='https://download.openmmlab.com/mmdetection/v2.0/ghm/retinanet_ghm_r50_fpn_1x_coco/retinanet_ghm_r50_fpn_1x_coco_20200130-a437fda3.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=37.0),
)
gn = dict(
config='configs/gn/mask-rcnn_r50_fpn_gn-all_2x_coco.py',
checkpoint='mask_rcnn_r50_fpn_gn-all_2x_coco_20200206-8eee02a6.pth',
url='https://download.openmmlab.com/mmdetection/v2.0/gn/mask_rcnn_r50_fpn_gn-all_2x_coco/mask_rcnn_r50_fpn_gn-all_2x_coco_20200206-8eee02a6.pth', # noqa
eval=['bbox', 'segm'],
metric=dict(bbox_mAP=40.1, segm_mAP=36.4),
)
gn_ws = dict(
config='configs/gn+ws/faster-rcnn_r50_fpn_gn-ws-all_1x_coco.py',
checkpoint='faster_rcnn_r50_fpn_gn_ws-all_1x_coco_20200130-613d9fe2.pth',
url='https://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/faster_rcnn_r50_fpn_gn_ws-all_1x_coco/faster_rcnn_r50_fpn_gn_ws-all_1x_coco_20200130-613d9fe2.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=39.7),
)
grid_rcnn = dict(
config='configs/grid_rcnn/grid-rcnn_r50_fpn_gn-head_2x_coco.py',
checkpoint='grid_rcnn_r50_fpn_gn-head_2x_coco_20200130-6cca8223.pth',
url='https://download.openmmlab.com/mmdetection/v2.0/grid_rcnn/grid_rcnn_r50_fpn_gn-head_2x_coco/grid_rcnn_r50_fpn_gn-head_2x_coco_20200130-6cca8223.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=40.4),
)
groie = dict(
config='configs/groie/faste-rcnn_r50_fpn_groie_1x_coco.py',
checkpoint='faster_rcnn_r50_fpn_groie_1x_coco_20200604_211715-66ee9516.pth', # noqa
url='https://download.openmmlab.com/mmdetection/v2.0/groie/faster_rcnn_r50_fpn_groie_1x_coco/faster_rcnn_r50_fpn_groie_1x_coco_20200604_211715-66ee9516.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=38.3),
)
guided_anchoring = dict(
config='configs/guided_anchoring/ga-retinanet_r50-caffe_fpn_1x_coco.py', # noqa
checkpoint='ga_retinanet_r50_caffe_fpn_1x_coco_20201020-39581c6f.pth',
url='https://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_retinanet_r50_caffe_fpn_1x_coco/ga_retinanet_r50_caffe_fpn_1x_coco_20201020-39581c6f.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=36.9),
)
hrnet = dict(
config='configs/hrnet/faster-rcnn_hrnetv2p-w18-1x_coco.py',
checkpoint='faster_rcnn_hrnetv2p_w18_1x_coco_20200130-56651a6d.pth',
url='https://download.openmmlab.com/mmdetection/v2.0/hrnet/faster_rcnn_hrnetv2p_w18_1x_coco/faster_rcnn_hrnetv2p_w18_1x_coco_20200130-56651a6d.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=36.9),
)
htc = dict(
config='configs/htc/htc_r50_fpn_1x_coco.py',
checkpoint='htc_r50_fpn_1x_coco_20200317-7332cf16.pth',
url='https://download.openmmlab.com/mmdetection/v2.0/htc/htc_r50_fpn_1x_coco/htc_r50_fpn_1x_coco_20200317-7332cf16.pth', # noqa
eval=['bbox', 'segm'],
metric=dict(bbox_mAP=42.3, segm_mAP=37.4),
)
instaboost = dict(
config='configs/instaboost/mask-rcnn_r50_fpn_instaboost-4x_coco.py',
checkpoint='mask_rcnn_r50_fpn_instaboost_4x_coco_20200307-d025f83a.pth',
url='https://download.openmmlab.com/mmdetection/v2.0/instaboost/mask_rcnn_r50_fpn_instaboost_4x_coco/mask_rcnn_r50_fpn_instaboost_4x_coco_20200307-d025f83a.pth', # noqa
eval=['bbox', 'segm'],
metric=dict(bbox_mAP=40.6, segm_mAP=36.6),
)
libra_rcnn = dict(
config='configs/libra_rcnn/libra-faster-rcnn_r50_fpn_1x_coco.py',
checkpoint='libra_faster_rcnn_r50_fpn_1x_coco_20200130-3afee3a9.pth',
url='https://download.openmmlab.com/mmdetection/v2.0/libra_rcnn/libra_faster_rcnn_r50_fpn_1x_coco/libra_faster_rcnn_r50_fpn_1x_coco_20200130-3afee3a9.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=38.3),
)
mask2former = dict(
config='configs/mask2former/mask2former_r50_8xb2-lsj-50e_coco-panoptic.py',
checkpoint='mask2former_r50_lsj_8x2_50e_coco-panoptic_20220326_224516-11a44721.pth', # noqa
url='https://download.openmmlab.com/mmdetection/v2.0/mask2former/mask2former_r50_lsj_8x2_50e_coco-panoptic/mask2former_r50_lsj_8x2_50e_coco-panoptic_20220326_224516-11a44721.pth', # noqa
eval=['bbox', 'segm', 'PQ'],
metric=dict(PQ=51.9, bbox_mAP=44.8, segm_mAP=41.9),
)
mask_rcnn = dict(
config='configs/mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py',
checkpoint='mask_rcnn_r50_fpn_1x_coco_20200205-d4b0c5d6.pth',
url='https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_fpn_1x_coco/mask_rcnn_r50_fpn_1x_coco_20200205-d4b0c5d6.pth', # noqa
eval=['bbox', 'segm'],
metric=dict(bbox_mAP=38.2, segm_mAP=34.7),
)
maskformer = dict(
config='configs/maskformer/maskformer_r50_ms-16xb1-75e_coco.py',
checkpoint='maskformer_r50_mstrain_16x1_75e_coco_20220221_141956-bc2699cb.pth', # noqa
url='https://download.openmmlab.com/mmdetection/v2.0/maskformer/maskformer_r50_mstrain_16x1_75e_coco/maskformer_r50_mstrain_16x1_75e_coco_20220221_141956-bc2699cb.pth', # noqa
eval='PQ',
metric=dict(PQ=46.9),
)
ms_rcnn = dict(
config='configs/ms_rcnn/ms-rcnn_r50-caffe_fpn_1x_coco.py',
checkpoint='ms_rcnn_r50_caffe_fpn_1x_coco_20200702_180848-61c9355e.pth',
url='https://download.openmmlab.com/mmdetection/v2.0/ms_rcnn/ms_rcnn_r50_caffe_fpn_1x_coco/ms_rcnn_r50_caffe_fpn_1x_coco_20200702_180848-61c9355e.pth', # noqa
eval=['bbox', 'segm'],
metric=dict(bbox_mAP=38.2, segm_mAP=36.0),
)
nas_fcos = dict(
config='configs/nas_fcos/nas-fcos_r50-caffe_fpn_nashead-gn-head_4xb4-1x_coco.py', # noqa
checkpoint='nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco_20200520-1bdba3ce.pth', # noqa
url='https://download.openmmlab.com/mmdetection/v2.0/nas_fcos/nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco/nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco_20200520-1bdba3ce.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=39.4),
)
nas_fpn = dict(
config='configs/nas_fpn/retinanet_r50_nasfpn_crop640-50e_coco.py',
checkpoint='retinanet_r50_nasfpn_crop640_50e_coco-0ad1f644.pth',
url='https://download.openmmlab.com/mmdetection/v2.0/nas_fpn/retinanet_r50_nasfpn_crop640_50e_coco/retinanet_r50_nasfpn_crop640_50e_coco-0ad1f644.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=40.5),
)
paa = dict(
config='configs/paa/paa_r50_fpn_1x_coco.py',
checkpoint='paa_r50_fpn_1x_coco_20200821-936edec3.pth',
url='https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r50_fpn_1x_coco/paa_r50_fpn_1x_coco_20200821-936edec3.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=40.4),
)
pafpn = dict(
config='configs/pafpn/faster-rcnn_r50_pafpn_1x_coco.py',
checkpoint='faster_rcnn_r50_pafpn_1x_coco_bbox_mAP-0.375_20200503_105836-b7b4b9bd.pth', # noqa
url='https://download.openmmlab.com/mmdetection/v2.0/pafpn/faster_rcnn_r50_pafpn_1x_coco/faster_rcnn_r50_pafpn_1x_coco_bbox_mAP-0.375_20200503_105836-b7b4b9bd.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=37.5),
)
panoptic_fpn = dict(
config='configs/panoptic_fpn/panoptic-fpn_r50_fpn_1x_coco.py',
checkpoint='panoptic_fpn_r50_fpn_1x_coco_20210821_101153-9668fd13.pth',
url='https://download.openmmlab.com/mmdetection/v2.0/panoptic_fpn/panoptic_fpn_r50_fpn_1x_coco/panoptic_fpn_r50_fpn_1x_coco_20210821_101153-9668fd13.pth', # noqa
eval='PQ',
metric=dict(PQ=40.2),
)
pisa = dict(
config='configs/pisa/faster-rcnn_r50_fpn_pisa_1x_coco.py',
checkpoint='pisa_faster_rcnn_r50_fpn_1x_coco-dea93523.pth',
url='https://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_faster_rcnn_r50_fpn_1x_coco/pisa_faster_rcnn_r50_fpn_1x_coco-dea93523.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=38.4),
)
point_rend = dict(
config='configs/point_rend/point-rend_r50-caffe_fpn_ms-1x_coco.py',
checkpoint='point_rend_r50_caffe_fpn_mstrain_1x_coco-1bcb5fb4.pth',
url='https://download.openmmlab.com/mmdetection/v2.0/point_rend/point_rend_r50_caffe_fpn_mstrain_1x_coco/point_rend_r50_caffe_fpn_mstrain_1x_coco-1bcb5fb4.pth', # noqa
eval=['bbox', 'segm'],
metric=dict(bbox_mAP=38.4, segm_mAP=36.3),
)
pvt = dict(
config='configs/pvt/retinanet_pvt-s_fpn_1x_coco.py',
checkpoint='retinanet_pvt-s_fpn_1x_coco_20210906_142921-b6c94a5b.pth',
url='https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvt-s_fpn_1x_coco/retinanet_pvt-s_fpn_1x_coco_20210906_142921-b6c94a5b.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=40.4),
)
queryinst = dict(
config='configs/queryinst/queryinst_r50_fpn_1x_coco.py',
checkpoint='queryinst_r50_fpn_1x_coco_20210907_084916-5a8f1998.pth',
url='https://download.openmmlab.com/mmdetection/v2.0/queryinst/queryinst_r50_fpn_1x_coco/queryinst_r50_fpn_1x_coco_20210907_084916-5a8f1998.pth', # noqa
eval=['bbox', 'segm'],
metric=dict(bbox_mAP=42.0, segm_mAP=37.5),
)
regnet = dict(
config='configs/regnet/mask-rcnn_regnetx-3.2GF_fpn_1x_coco.py',
checkpoint='mask_rcnn_regnetx-3.2GF_fpn_1x_coco_20200520_163141-2a9d1814.pth', # noqa
url='https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-3.2GF_fpn_1x_coco/mask_rcnn_regnetx-3.2GF_fpn_1x_coco_20200520_163141-2a9d1814.pth', # noqa
eval=['bbox', 'segm'],
metric=dict(bbox_mAP=40.4, segm_mAP=36.7),
)
reppoints = dict(
config='configs/reppoints/reppoints-moment_r50_fpn_1x_coco.py',
checkpoint='reppoints_moment_r50_fpn_1x_coco_20200330-b73db8d1.pth',
url='https://download.openmmlab.com/mmdetection/v2.0/reppoints/reppoints_moment_r50_fpn_1x_coco/reppoints_moment_r50_fpn_1x_coco_20200330-b73db8d1.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=37.0),
)
res2net = dict(
config='configs/res2net/faster-rcnn_res2net-101_fpn_2x_coco.py',
checkpoint='faster_rcnn_r2_101_fpn_2x_coco-175f1da6.pth',
url='https://download.openmmlab.com/mmdetection/v2.0/res2net/faster_rcnn_r2_101_fpn_2x_coco/faster_rcnn_r2_101_fpn_2x_coco-175f1da6.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=43.0),
)
resnest = dict(
config='configs/resnest/faster-rcnn_s50_fpn_syncbn-backbone+head_ms-range-1x_coco.py', # noqa
checkpoint='faster_rcnn_s50_fpn_syncbn-backbone+head_mstrain-range_1x_coco_20200926_125502-20289c16.pth', # noqa
url='https://download.openmmlab.com/mmdetection/v2.0/resnest/faster_rcnn_s50_fpn_syncbn-backbone%2Bhead_mstrain-range_1x_coco/faster_rcnn_s50_fpn_syncbn-backbone%2Bhead_mstrain-range_1x_coco_20200926_125502-20289c16.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=42.0),
)
resnet_strikes_back = dict(
config='configs/resnet_strikes_back/mask-rcnn_r50-rsb-pre_fpn_1x_coco.py', # noqa
checkpoint='mask_rcnn_r50_fpn_rsb-pretrain_1x_coco_20220113_174054-06ce8ba0.pth', # noqa
url='https://download.openmmlab.com/mmdetection/v2.0/resnet_strikes_back/mask_rcnn_r50_fpn_rsb-pretrain_1x_coco/mask_rcnn_r50_fpn_rsb-pretrain_1x_coco_20220113_174054-06ce8ba0.pth', # noqa
eval=['bbox', 'segm'],
metric=dict(bbox_mAP=41.2, segm_mAP=38.2),
)
retinanet = dict(
config='configs/retinanet/retinanet_r50_fpn_1x_coco.py',
checkpoint='retinanet_r50_fpn_1x_coco_20200130-c2398f9e.pth',
url='https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r50_fpn_1x_coco/retinanet_r50_fpn_1x_coco_20200130-c2398f9e.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=36.5),
)
rpn = dict(
config='configs/rpn/rpn_r50_fpn_1x_coco.py',
checkpoint='rpn_r50_fpn_1x_coco_20200218-5525fa2e.pth',
url='https://download.openmmlab.com/mmdetection/v2.0/rpn/rpn_r50_fpn_1x_coco/rpn_r50_fpn_1x_coco_20200218-5525fa2e.pth', # noqa
eval='proposal_fast',
metric=dict(AR_1000=58.2),
)
sabl = [
dict(
config='configs/sabl/sabl-retinanet_r50_fpn_1x_coco.py',
checkpoint='sabl_retinanet_r50_fpn_1x_coco-6c54fd4f.pth',
url='https://download.openmmlab.com/mmdetection/v2.0/sabl/sabl_retinanet_r50_fpn_1x_coco/sabl_retinanet_r50_fpn_1x_coco-6c54fd4f.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=37.7),
),
dict(
config='configs/sabl/sabl-faster-rcnn_r50_fpn_1x_coco.py',
checkpoint='sabl_faster_rcnn_r50_fpn_1x_coco-e867595b.pth',
url='https://download.openmmlab.com/mmdetection/v2.0/sabl/sabl_faster_rcnn_r50_fpn_1x_coco/sabl_faster_rcnn_r50_fpn_1x_coco-e867595b.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=39.9),
),
]
scnet = dict(
config='configs/scnet/scnet_r50_fpn_1x_coco.py',
checkpoint='scnet_r50_fpn_1x_coco-c3f09857.pth',
url='https://download.openmmlab.com/mmdetection/v2.0/scnet/scnet_r50_fpn_1x_coco/scnet_r50_fpn_1x_coco-c3f09857.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=43.5),
)
scratch = dict(
config='configs/scratch/mask-rcnn_r50-scratch_fpn_gn-all_6x_coco.py',
checkpoint='scratch_mask_rcnn_r50_fpn_gn_6x_bbox_mAP-0.412__segm_mAP-0.374_20200201_193051-1e190a40.pth', # noqa
url='https://download.openmmlab.com/mmdetection/v2.0/scratch/mask_rcnn_r50_fpn_gn-all_scratch_6x_coco/scratch_mask_rcnn_r50_fpn_gn_6x_bbox_mAP-0.412__segm_mAP-0.374_20200201_193051-1e190a40.pth', # noqa
eval=['bbox', 'segm'],
metric=dict(bbox_mAP=41.2, segm_mAP=37.4),
)
solo = dict(
config='configs/solo/decoupled-solo_r50_fpn_1x_coco.py',
checkpoint='decoupled_solo_r50_fpn_1x_coco_20210820_233348-6337c589.pth',
url='https://download.openmmlab.com/mmdetection/v2.0/solo/decoupled_solo_r50_fpn_1x_coco/decoupled_solo_r50_fpn_1x_coco_20210820_233348-6337c589.pth', # noqa
eval='segm',
metric=dict(segm_mAP=33.9),
)
solov2 = dict(
config='configs/solov2/solov2_r50_fpn_1x_coco.py',
checkpoint='solov2_r50_fpn_1x_coco_20220512_125858-a357fa23.pth',
url='https://download.openmmlab.com/mmdetection/v2.0/solov2/solov2_r50_fpn_1x_coco/solov2_r50_fpn_1x_coco_20220512_125858-a357fa23.pth', # noqa
eval='segm',
metric=dict(segm_mAP=34.8),
)
sparse_rcnn = dict(
config='configs/sparse_rcnn/sparse-rcnn_r50_fpn_1x_coco.py',
checkpoint='sparse_rcnn_r50_fpn_1x_coco_20201222_214453-dc79b137.pth',
url='https://download.openmmlab.com/mmdetection/v2.0/sparse_rcnn/sparse_rcnn_r50_fpn_1x_coco/sparse_rcnn_r50_fpn_1x_coco_20201222_214453-dc79b137.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=37.9),
)
ssd = [
dict(
config='configs/ssd/ssd300_coco.py',
checkpoint='ssd300_coco_20210803_015428-d231a06e.pth',
url='https://download.openmmlab.com/mmdetection/v2.0/ssd/ssd300_coco/ssd300_coco_20210803_015428-d231a06e.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=25.5),
),
dict(
config='configs/ssd/ssdlite_mobilenetv2-scratch_8xb24-600e_coco.py',
checkpoint='ssdlite_mobilenetv2_scratch_600e_coco_20210629_110627-974d9307.pth', # noqa
url='https://download.openmmlab.com/mmdetection/v2.0/ssd/ssdlite_mobilenetv2_scratch_600e_coco/ssdlite_mobilenetv2_scratch_600e_coco_20210629_110627-974d9307.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=21.3),
),
]
swin = dict(
config='configs/swin/mask-rcnn_swin-t-p4-w7_fpn_1x_coco.py',
checkpoint='mask_rcnn_swin-t-p4-w7_fpn_1x_coco_20210902_120937-9d6b7cfa.pth', # noqa
url='https://download.openmmlab.com/mmdetection/v2.0/swin/mask_rcnn_swin-t-p4-w7_fpn_1x_coco/mask_rcnn_swin-t-p4-w7_fpn_1x_coco_20210902_120937-9d6b7cfa.pth', # noqa
eval=['bbox', 'segm'],
metric=dict(bbox_mAP=42.7, segm_mAP=39.3),
)
tood = dict(
config='configs/tood/tood_r50_fpn_1x_coco.py',
checkpoint='tood_r50_fpn_1x_coco_20211210_103425-20e20746.pth',
url='https://download.openmmlab.com/mmdetection/v2.0/tood/tood_r50_fpn_1x_coco/tood_r50_fpn_1x_coco_20211210_103425-20e20746.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=42.4),
)
tridentnet = dict(
config='configs/tridentnet/tridentnet_r50-caffe_1x_coco.py',
checkpoint='tridentnet_r50_caffe_1x_coco_20201230_141838-2ec0b530.pth',
url='https://download.openmmlab.com/mmdetection/v2.0/tridentnet/tridentnet_r50_caffe_1x_coco/tridentnet_r50_caffe_1x_coco_20201230_141838-2ec0b530.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=37.6),
)
vfnet = dict(
config='configs/vfnet/vfnet_r50_fpn_1x_coco.py',
checkpoint='vfnet_r50_fpn_1x_coco_20201027-38db6f58.pth',
url='https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_r50_fpn_1x_coco/vfnet_r50_fpn_1x_coco_20201027-38db6f58.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=41.6),
)
yolact = dict(
config='configs/yolact/yolact_r50_1xb8-55e_coco.py',
checkpoint='yolact_r50_1x8_coco_20200908-f38d58df.pth',
url='https://download.openmmlab.com/mmdetection/v2.0/yolact/yolact_r50_1x8_coco/yolact_r50_1x8_coco_20200908-f38d58df.pth', # noqa
eval=['bbox', 'segm'],
metric=dict(bbox_mAP=31.2, segm_mAP=29.0),
)
yolo = dict(
config='configs/yolo/yolov3_d53_8xb8-320-273e_coco.py',
checkpoint='yolov3_d53_320_273e_coco-421362b6.pth',
url='https://download.openmmlab.com/mmdetection/v2.0/yolo/yolov3_d53_320_273e_coco/yolov3_d53_320_273e_coco-421362b6.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=27.9),
)
yolof = dict(
config='configs/yolof/yolof_r50-c5_8xb8-1x_coco.py',
checkpoint='yolof_r50_c5_8x8_1x_coco_20210425_024427-8e864411.pth',
url='https://download.openmmlab.com/mmdetection/v2.0/yolof/yolof_r50_c5_8x8_1x_coco/yolof_r50_c5_8x8_1x_coco_20210425_024427-8e864411.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=37.5),
)
yolox = dict(
config='configs/yolox/yolox_tiny_8xb8-300e_coco.py',
checkpoint='yolox_tiny_8x8_300e_coco_20211124_171234-b4047906.pth', # noqa
url='https://download.openmmlab.com/mmdetection/v2.0/yolox/yolox_tiny_8x8_300e_coco/yolox_tiny_8x8_300e_coco_20211124_171234-b4047906.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=31.8),
)
# yapf: enable
| 29,576 | 53.17033 | 249 |
py
|
ERD
|
ERD-main/.dev_scripts/benchmark_train.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import logging
import os
import os.path as osp
from argparse import ArgumentParser
from mmengine.config import Config, DictAction
from mmengine.logging import MMLogger, print_log
from mmengine.registry import RUNNERS
from mmengine.runner import Runner
from mmdet.testing import replace_to_ceph
from mmdet.utils import register_all_modules, replace_cfg_vals
def parse_args():
parser = ArgumentParser()
parser.add_argument('config', help='test config file path')
parser.add_argument('--work-dir', help='the dir to save logs and models')
parser.add_argument('--ceph', action='store_true')
parser.add_argument('--save-ckpt', action='store_true')
parser.add_argument(
'--amp',
action='store_true',
default=False,
help='enable automatic-mixed-precision training')
parser.add_argument(
'--auto-scale-lr',
action='store_true',
help='enable automatically scaling LR.')
parser.add_argument(
'--resume',
action='store_true',
help='resume from the latest checkpoint in the work_dir automatically')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
args = parser.parse_args()
return args
# TODO: Need to refactor train.py so that it can be reused.
def fast_train_model(config_name, args, logger=None):
cfg = Config.fromfile(config_name)
cfg = replace_cfg_vals(cfg)
cfg.launcher = args.launcher
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# work_dir is determined in this priority: CLI > segment in file > filename
if args.work_dir is not None:
# update configs according to CLI args if args.work_dir is not None
cfg.work_dir = osp.join(args.work_dir,
osp.splitext(osp.basename(config_name))[0])
elif cfg.get('work_dir', None) is None:
# use config filename as default work_dir if cfg.work_dir is None
cfg.work_dir = osp.join('./work_dirs',
osp.splitext(osp.basename(config_name))[0])
ckpt_hook = cfg.default_hooks.checkpoint
by_epoch = ckpt_hook.get('by_epoch', True)
fast_stop_hook = dict(type='FastStopTrainingHook')
fast_stop_hook['by_epoch'] = by_epoch
if args.save_ckpt:
if by_epoch:
interval = 1
stop_iter_or_epoch = 2
else:
interval = 4
stop_iter_or_epoch = 10
fast_stop_hook['stop_iter_or_epoch'] = stop_iter_or_epoch
fast_stop_hook['save_ckpt'] = True
ckpt_hook.interval = interval
if 'custom_hooks' in cfg:
cfg.custom_hooks.append(fast_stop_hook)
else:
custom_hooks = [fast_stop_hook]
cfg.custom_hooks = custom_hooks
# TODO: temporary plan
if 'visualizer' in cfg:
if 'name' in cfg.visualizer:
del cfg.visualizer.name
# enable automatic-mixed-precision training
if args.amp is True:
optim_wrapper = cfg.optim_wrapper.type
if optim_wrapper == 'AmpOptimWrapper':
print_log(
'AMP training is already enabled in your config.',
logger='current',
level=logging.WARNING)
else:
assert optim_wrapper == 'OptimWrapper', (
'`--amp` is only supported when the optimizer wrapper type is '
f'`OptimWrapper` but got {optim_wrapper}.')
cfg.optim_wrapper.type = 'AmpOptimWrapper'
cfg.optim_wrapper.loss_scale = 'dynamic'
# enable automatically scaling LR
if args.auto_scale_lr:
if 'auto_scale_lr' in cfg and \
'enable' in cfg.auto_scale_lr and \
'base_batch_size' in cfg.auto_scale_lr:
cfg.auto_scale_lr.enable = True
else:
raise RuntimeError('Can not find "auto_scale_lr" or '
'"auto_scale_lr.enable" or '
'"auto_scale_lr.base_batch_size" in your'
' configuration file.')
if args.ceph:
replace_to_ceph(cfg)
cfg.resume = args.resume
# build the runner from config
if 'runner_type' not in cfg:
# build the default runner
runner = Runner.from_cfg(cfg)
else:
# build customized runner from the registry
# if 'runner_type' is set in the cfg
runner = RUNNERS.build(cfg)
runner.train()
# Sample test whether the train code is correct
def main(args):
# register all modules in mmdet into the registries
register_all_modules(init_default_scope=False)
config = Config.fromfile(args.config)
# test all model
logger = MMLogger.get_instance(
name='MMLogger',
log_file='benchmark_train.log',
log_level=logging.ERROR)
for model_key in config:
model_infos = config[model_key]
if not isinstance(model_infos, list):
model_infos = [model_infos]
for model_info in model_infos:
print('processing: ', model_info['config'], flush=True)
config_name = model_info['config'].strip()
try:
fast_train_model(config_name, args, logger)
except RuntimeError as e:
# quick exit is the normal exit message
if 'quick exit' not in repr(e):
logger.error(f'{config_name} " : {repr(e)}')
except Exception as e:
logger.error(f'{config_name} " : {repr(e)}')
if __name__ == '__main__':
args = parse_args()
main(args)
| 6,412 | 34.826816 | 79 |
py
|
ERD
|
ERD-main/.dev_scripts/benchmark_valid_flops.py
|
import logging
import re
import tempfile
from argparse import ArgumentParser
from collections import OrderedDict
from functools import partial
from pathlib import Path
import numpy as np
import pandas as pd
import torch
from mmengine import Config, DictAction
from mmengine.analysis import get_model_complexity_info
from mmengine.analysis.print_helper import _format_size
from mmengine.fileio import FileClient
from mmengine.logging import MMLogger
from mmengine.model import revert_sync_batchnorm
from mmengine.runner import Runner
from modelindex.load_model_index import load
from rich.console import Console
from rich.table import Table
from rich.text import Text
from tqdm import tqdm
from mmdet.registry import MODELS
from mmdet.utils import register_all_modules
console = Console()
MMDET_ROOT = Path(__file__).absolute().parents[1]
def parse_args():
parser = ArgumentParser(description='Valid all models in model-index.yml')
parser.add_argument(
'--shape',
type=int,
nargs='+',
default=[1280, 800],
help='input image size')
parser.add_argument(
'--checkpoint_root',
help='Checkpoint file root path. If set, load checkpoint before test.')
parser.add_argument('--img', default='demo/demo.jpg', help='Image file')
parser.add_argument('--models', nargs='+', help='models name to inference')
parser.add_argument(
'--batch-size',
type=int,
default=1,
help='The batch size during the inference.')
parser.add_argument(
'--flops', action='store_true', help='Get Flops and Params of models')
parser.add_argument(
'--flops-str',
action='store_true',
help='Output FLOPs and params counts in a string form.')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--size_divisor',
type=int,
default=32,
help='Pad the input image, the minimum size that is divisible '
'by size_divisor, -1 means do not pad the image.')
args = parser.parse_args()
return args
def inference(config_file, checkpoint, work_dir, args, exp_name):
logger = MMLogger.get_instance(name='MMLogger')
logger.warning('if you want test flops, please make sure torch>=1.12')
cfg = Config.fromfile(config_file)
cfg.work_dir = work_dir
cfg.load_from = checkpoint
cfg.log_level = 'WARN'
cfg.experiment_name = exp_name
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# forward the model
result = {'model': config_file.stem}
if args.flops:
if len(args.shape) == 1:
h = w = args.shape[0]
elif len(args.shape) == 2:
h, w = args.shape
else:
raise ValueError('invalid input shape')
divisor = args.size_divisor
if divisor > 0:
h = int(np.ceil(h / divisor)) * divisor
w = int(np.ceil(w / divisor)) * divisor
input_shape = (3, h, w)
result['resolution'] = input_shape
try:
cfg = Config.fromfile(config_file)
if hasattr(cfg, 'head_norm_cfg'):
cfg['head_norm_cfg'] = dict(type='SyncBN', requires_grad=True)
cfg['model']['roi_head']['bbox_head']['norm_cfg'] = dict(
type='SyncBN', requires_grad=True)
cfg['model']['roi_head']['mask_head']['norm_cfg'] = dict(
type='SyncBN', requires_grad=True)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
model = MODELS.build(cfg.model)
input = torch.rand(1, *input_shape)
if torch.cuda.is_available():
model.cuda()
input = input.cuda()
model = revert_sync_batchnorm(model)
inputs = (input, )
model.eval()
outputs = get_model_complexity_info(
model, input_shape, inputs, show_table=False, show_arch=False)
flops = outputs['flops']
params = outputs['params']
activations = outputs['activations']
result['Get Types'] = 'direct'
except: # noqa 772
logger = MMLogger.get_instance(name='MMLogger')
logger.warning(
'Direct get flops failed, try to get flops with data')
cfg = Config.fromfile(config_file)
if hasattr(cfg, 'head_norm_cfg'):
cfg['head_norm_cfg'] = dict(type='SyncBN', requires_grad=True)
cfg['model']['roi_head']['bbox_head']['norm_cfg'] = dict(
type='SyncBN', requires_grad=True)
cfg['model']['roi_head']['mask_head']['norm_cfg'] = dict(
type='SyncBN', requires_grad=True)
data_loader = Runner.build_dataloader(cfg.val_dataloader)
data_batch = next(iter(data_loader))
model = MODELS.build(cfg.model)
if torch.cuda.is_available():
model = model.cuda()
model = revert_sync_batchnorm(model)
model.eval()
_forward = model.forward
data = model.data_preprocessor(data_batch)
del data_loader
model.forward = partial(
_forward, data_samples=data['data_samples'])
outputs = get_model_complexity_info(
model,
input_shape,
data['inputs'],
show_table=False,
show_arch=False)
flops = outputs['flops']
params = outputs['params']
activations = outputs['activations']
result['Get Types'] = 'dataloader'
if args.flops_str:
flops = _format_size(flops)
params = _format_size(params)
activations = _format_size(activations)
result['flops'] = flops
result['params'] = params
return result
def show_summary(summary_data, args):
table = Table(title='Validation Benchmark Regression Summary')
table.add_column('Model')
table.add_column('Validation')
table.add_column('Resolution (c, h, w)')
if args.flops:
table.add_column('Flops', justify='right', width=11)
table.add_column('Params', justify='right')
for model_name, summary in summary_data.items():
row = [model_name]
valid = summary['valid']
color = 'green' if valid == 'PASS' else 'red'
row.append(f'[{color}]{valid}[/{color}]')
if valid == 'PASS':
row.append(str(summary['resolution']))
if args.flops:
row.append(str(summary['flops']))
row.append(str(summary['params']))
table.add_row(*row)
console.print(table)
table_data = {
x.header: [Text.from_markup(y).plain for y in x.cells]
for x in table.columns
}
table_pd = pd.DataFrame(table_data)
table_pd.to_csv('./mmdetection_flops.csv')
# Sample test whether the inference code is correct
def main(args):
register_all_modules()
model_index_file = MMDET_ROOT / 'model-index.yml'
model_index = load(str(model_index_file))
model_index.build_models_with_collections()
models = OrderedDict({model.name: model for model in model_index.models})
logger = MMLogger(
'validation',
logger_name='validation',
log_file='benchmark_test_image.log',
log_level=logging.INFO)
if args.models:
patterns = [
re.compile(pattern.replace('+', '_')) for pattern in args.models
]
filter_models = {}
for k, v in models.items():
k = k.replace('+', '_')
if any([re.match(pattern, k) for pattern in patterns]):
filter_models[k] = v
if len(filter_models) == 0:
print('No model found, please specify models in:')
print('\n'.join(models.keys()))
return
models = filter_models
summary_data = {}
tmpdir = tempfile.TemporaryDirectory()
for model_name, model_info in tqdm(models.items()):
if model_info.config is None:
continue
model_info.config = model_info.config.replace('%2B', '+')
config = Path(model_info.config)
try:
config.exists()
except: # noqa 722
logger.error(f'{model_name}: {config} not found.')
continue
logger.info(f'Processing: {model_name}')
http_prefix = 'https://download.openmmlab.com/mmdetection/'
if args.checkpoint_root is not None:
root = args.checkpoint_root
if 's3://' in args.checkpoint_root:
from petrel_client.common.exception import AccessDeniedError
file_client = FileClient.infer_client(uri=root)
checkpoint = file_client.join_path(
root, model_info.weights[len(http_prefix):])
try:
exists = file_client.exists(checkpoint)
except AccessDeniedError:
exists = False
else:
checkpoint = Path(root) / model_info.weights[len(http_prefix):]
exists = checkpoint.exists()
if exists:
checkpoint = str(checkpoint)
else:
print(f'WARNING: {model_name}: {checkpoint} not found.')
checkpoint = None
else:
checkpoint = None
try:
# build the model from a config file and a checkpoint file
result = inference(MMDET_ROOT / config, checkpoint, tmpdir.name,
args, model_name)
result['valid'] = 'PASS'
except Exception: # noqa 722
import traceback
logger.error(f'"{config}" :\n{traceback.format_exc()}')
result = {'valid': 'FAIL'}
summary_data[model_name] = result
tmpdir.cleanup()
show_summary(summary_data, args)
if __name__ == '__main__':
args = parse_args()
main(args)
| 10,539 | 34.608108 | 79 |
py
|
ERD
|
ERD-main/.dev_scripts/benchmark_test_image.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import logging
import os.path as osp
from argparse import ArgumentParser
import mmcv
from mmengine.config import Config
from mmengine.logging import MMLogger
from mmengine.utils import mkdir_or_exist
from mmdet.apis import inference_detector, init_detector
from mmdet.registry import VISUALIZERS
from mmdet.utils import register_all_modules
def parse_args():
parser = ArgumentParser()
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint_root', help='Checkpoint file root path')
parser.add_argument('--img', default='demo/demo.jpg', help='Image file')
parser.add_argument('--aug', action='store_true', help='aug test')
parser.add_argument('--model-name', help='model name to inference')
parser.add_argument('--show', action='store_true', help='show results')
parser.add_argument('--out-dir', default=None, help='Dir to output file')
parser.add_argument(
'--wait-time',
type=float,
default=1,
help='the interval of show (s), 0 is block')
parser.add_argument(
'--device', default='cuda:0', help='Device used for inference')
parser.add_argument(
'--palette',
default='coco',
choices=['coco', 'voc', 'citys', 'random'],
help='Color palette used for visualization')
parser.add_argument(
'--score-thr', type=float, default=0.3, help='bbox score threshold')
args = parser.parse_args()
return args
def inference_model(config_name, checkpoint, visualizer, args, logger=None):
cfg = Config.fromfile(config_name)
if args.aug:
raise NotImplementedError()
model = init_detector(
cfg, checkpoint, palette=args.palette, device=args.device)
visualizer.dataset_meta = model.dataset_meta
# test a single image
result = inference_detector(model, args.img)
# show the results
if args.show or args.out_dir is not None:
img = mmcv.imread(args.img)
img = mmcv.imconvert(img, 'bgr', 'rgb')
out_file = None
if args.out_dir is not None:
out_dir = args.out_dir
mkdir_or_exist(out_dir)
out_file = osp.join(
out_dir,
config_name.split('/')[-1].replace('py', 'jpg'))
visualizer.add_datasample(
'result',
img,
data_sample=result,
draw_gt=False,
show=args.show,
wait_time=args.wait_time,
out_file=out_file,
pred_score_thr=args.score_thr)
return result
# Sample test whether the inference code is correct
def main(args):
# register all modules in mmdet into the registries
register_all_modules()
config = Config.fromfile(args.config)
# init visualizer
visualizer_cfg = dict(type='DetLocalVisualizer', name='visualizer')
visualizer = VISUALIZERS.build(visualizer_cfg)
# test single model
if args.model_name:
if args.model_name in config:
model_infos = config[args.model_name]
if not isinstance(model_infos, list):
model_infos = [model_infos]
model_info = model_infos[0]
config_name = model_info['config'].strip()
print(f'processing: {config_name}', flush=True)
checkpoint = osp.join(args.checkpoint_root,
model_info['checkpoint'].strip())
# build the model from a config file and a checkpoint file
inference_model(config_name, checkpoint, visualizer, args)
return
else:
raise RuntimeError('model name input error.')
# test all model
logger = MMLogger.get_instance(
name='MMLogger',
log_file='benchmark_test_image.log',
log_level=logging.ERROR)
for model_key in config:
model_infos = config[model_key]
if not isinstance(model_infos, list):
model_infos = [model_infos]
for model_info in model_infos:
print('processing: ', model_info['config'], flush=True)
config_name = model_info['config'].strip()
checkpoint = osp.join(args.checkpoint_root,
model_info['checkpoint'].strip())
try:
# build the model from a config file and a checkpoint file
inference_model(config_name, checkpoint, visualizer, args,
logger)
except Exception as e:
logger.error(f'{config_name} " : {repr(e)}')
if __name__ == '__main__':
args = parse_args()
main(args)
| 4,670 | 33.6 | 77 |
py
|
ERD
|
ERD-main/.dev_scripts/convert_test_benchmark_script.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
import os.path as osp
from mmengine import Config
def parse_args():
parser = argparse.ArgumentParser(
description='Convert benchmark model list to script')
parser.add_argument('config', help='test config file path')
parser.add_argument('--port', type=int, default=29666, help='dist port')
parser.add_argument(
'--run', action='store_true', help='run script directly')
parser.add_argument(
'--out', type=str, help='path to save model benchmark script')
args = parser.parse_args()
return args
def process_model_info(model_info, work_dir):
config = model_info['config'].strip()
fname, _ = osp.splitext(osp.basename(config))
job_name = fname
work_dir = '$WORK_DIR/' + fname
checkpoint = model_info['checkpoint'].strip()
return dict(
config=config,
job_name=job_name,
work_dir=work_dir,
checkpoint=checkpoint)
def create_test_bash_info(commands, model_test_dict, port, script_name,
partition):
config = model_test_dict['config']
job_name = model_test_dict['job_name']
checkpoint = model_test_dict['checkpoint']
work_dir = model_test_dict['work_dir']
echo_info = f' \necho \'{config}\' &'
commands.append(echo_info)
commands.append('\n')
command_info = f'GPUS=8 GPUS_PER_NODE=8 ' \
f'CPUS_PER_TASK=$CPUS_PRE_TASK {script_name} '
command_info += f'{partition} '
command_info += f'{job_name} '
command_info += f'{config} '
command_info += f'$CHECKPOINT_DIR/{checkpoint} '
command_info += f'--work-dir {work_dir} '
command_info += f'--cfg-option env_cfg.dist_cfg.port={port} '
command_info += ' &'
commands.append(command_info)
def main():
args = parse_args()
if args.out:
out_suffix = args.out.split('.')[-1]
assert args.out.endswith('.sh'), \
f'Expected out file path suffix is .sh, but get .{out_suffix}'
assert args.out or args.run, \
('Please specify at least one operation (save/run/ the '
'script) with the argument "--out" or "--run"')
commands = []
partition_name = 'PARTITION=$1 '
commands.append(partition_name)
commands.append('\n')
checkpoint_root = 'CHECKPOINT_DIR=$2 '
commands.append(checkpoint_root)
commands.append('\n')
work_dir = 'WORK_DIR=$3 '
commands.append(work_dir)
commands.append('\n')
cpus_pre_task = 'CPUS_PER_TASK=${4:-2} '
commands.append(cpus_pre_task)
commands.append('\n')
script_name = osp.join('tools', 'slurm_test.sh')
port = args.port
cfg = Config.fromfile(args.config)
for model_key in cfg:
model_infos = cfg[model_key]
if not isinstance(model_infos, list):
model_infos = [model_infos]
for model_info in model_infos:
print('processing: ', model_info['config'])
model_test_dict = process_model_info(model_info, work_dir)
create_test_bash_info(commands, model_test_dict, port, script_name,
'$PARTITION')
port += 1
command_str = ''.join(commands)
if args.out:
with open(args.out, 'w') as f:
f.write(command_str)
if args.run:
os.system(command_str)
if __name__ == '__main__':
main()
| 3,414 | 28.695652 | 79 |
py
|
ERD
|
ERD-main/.dev_scripts/benchmark_test.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import logging
import os
import os.path as osp
from argparse import ArgumentParser
from mmengine.config import Config, DictAction
from mmengine.logging import MMLogger
from mmengine.registry import RUNNERS
from mmengine.runner import Runner
from mmdet.testing import replace_to_ceph
from mmdet.utils import register_all_modules, replace_cfg_vals
def parse_args():
parser = ArgumentParser()
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint_root', help='Checkpoint file root path')
parser.add_argument('--work-dir', help='the dir to save logs')
parser.add_argument('--ceph', action='store_true')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
args = parser.parse_args()
return args
# TODO: Need to refactor test.py so that it can be reused.
def fast_test_model(config_name, checkpoint, args, logger=None):
cfg = Config.fromfile(config_name)
cfg = replace_cfg_vals(cfg)
cfg.launcher = args.launcher
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# work_dir is determined in this priority: CLI > segment in file > filename
if args.work_dir is not None:
# update configs according to CLI args if args.work_dir is not None
cfg.work_dir = osp.join(args.work_dir,
osp.splitext(osp.basename(config_name))[0])
elif cfg.get('work_dir', None) is None:
# use config filename as default work_dir if cfg.work_dir is None
cfg.work_dir = osp.join('./work_dirs',
osp.splitext(osp.basename(config_name))[0])
if args.ceph:
replace_to_ceph(cfg)
cfg.load_from = checkpoint
# TODO: temporary plan
if 'visualizer' in cfg:
if 'name' in cfg.visualizer:
del cfg.visualizer.name
# build the runner from config
if 'runner_type' not in cfg:
# build the default runner
runner = Runner.from_cfg(cfg)
else:
# build customized runner from the registry
# if 'runner_type' is set in the cfg
runner = RUNNERS.build(cfg)
runner.test()
# Sample test whether the inference code is correct
def main(args):
# register all modules in mmdet into the registries
register_all_modules(init_default_scope=False)
config = Config.fromfile(args.config)
# test all model
logger = MMLogger.get_instance(
name='MMLogger',
log_file='benchmark_test.log',
log_level=logging.ERROR)
for model_key in config:
model_infos = config[model_key]
if not isinstance(model_infos, list):
model_infos = [model_infos]
for model_info in model_infos:
print('processing: ', model_info['config'], flush=True)
config_name = model_info['config'].strip()
checkpoint = osp.join(args.checkpoint_root,
model_info['checkpoint'].strip())
try:
fast_test_model(config_name, checkpoint, args, logger)
except Exception as e:
logger.error(f'{config_name} " : {repr(e)}')
if __name__ == '__main__':
args = parse_args()
main(args)
| 4,055 | 33.965517 | 79 |
py
|
ERD
|
ERD-main/tests/test_engine/__init__.py
| 0 | 0 | 0 |
py
|
|
ERD
|
ERD-main/tests/test_engine/test_schedulers/test_quadratic_warmup.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
import torch.nn.functional as F
import torch.optim as optim
from mmengine.optim.scheduler import _ParamScheduler
from mmengine.testing import assert_allclose
from mmdet.engine.schedulers import (QuadraticWarmupLR,
QuadraticWarmupMomentum,
QuadraticWarmupParamScheduler)
class ToyModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv1 = torch.nn.Conv2d(1, 1, 1)
self.conv2 = torch.nn.Conv2d(1, 1, 1)
def forward(self, x):
return self.conv2(F.relu(self.conv1(x)))
class TestQuadraticWarmupScheduler(TestCase):
def setUp(self):
"""Setup the model and optimizer which are used in every test method.
TestCase calls functions in this order: setUp() -> testMethod() ->
tearDown() -> cleanUp()
"""
self.model = ToyModel()
self.optimizer = optim.SGD(
self.model.parameters(), lr=0.05, momentum=0.01, weight_decay=5e-4)
def _test_scheduler_value(self,
schedulers,
targets,
epochs=10,
param_name='lr'):
if isinstance(schedulers, _ParamScheduler):
schedulers = [schedulers]
for epoch in range(epochs):
for param_group, target in zip(self.optimizer.param_groups,
targets):
print(param_group[param_name])
assert_allclose(
target[epoch],
param_group[param_name],
msg='{} is wrong in epoch {}: expected {}, got {}'.format(
param_name, epoch, target[epoch],
param_group[param_name]),
atol=1e-5,
rtol=0)
[scheduler.step() for scheduler in schedulers]
def test_quadratic_warmup_scheduler(self):
with self.assertRaises(ValueError):
QuadraticWarmupParamScheduler(self.optimizer, param_name='lr')
epochs = 10
iters = 5
warmup_factor = [pow((i + 1) / float(iters), 2) for i in range(iters)]
single_targets = [x * 0.05 for x in warmup_factor] + [0.05] * (
epochs - iters)
targets = [single_targets, [x * epochs for x in single_targets]]
scheduler = QuadraticWarmupParamScheduler(
self.optimizer, param_name='lr', end=iters)
self._test_scheduler_value(scheduler, targets, epochs)
def test_quadratic_warmup_scheduler_convert_iterbased(self):
epochs = 10
end = 5
epoch_length = 11
iters = end * epoch_length
warmup_factor = [pow((i + 1) / float(iters), 2) for i in range(iters)]
single_targets = [x * 0.05 for x in warmup_factor] + [0.05] * (
epochs * epoch_length - iters)
targets = [single_targets, [x * epochs for x in single_targets]]
scheduler = QuadraticWarmupParamScheduler.build_iter_from_epoch(
self.optimizer,
param_name='lr',
end=end,
epoch_length=epoch_length)
self._test_scheduler_value(scheduler, targets, epochs * epoch_length)
def test_quadratic_warmup_lr(self):
epochs = 10
iters = 5
warmup_factor = [pow((i + 1) / float(iters), 2) for i in range(iters)]
single_targets = [x * 0.05 for x in warmup_factor] + [0.05] * (
epochs - iters)
targets = [single_targets, [x * epochs for x in single_targets]]
scheduler = QuadraticWarmupLR(self.optimizer, end=iters)
self._test_scheduler_value(scheduler, targets, epochs)
def test_quadratic_warmup_momentum(self):
epochs = 10
iters = 5
warmup_factor = [pow((i + 1) / float(iters), 2) for i in range(iters)]
single_targets = [x * 0.01 for x in warmup_factor] + [0.01] * (
epochs - iters)
targets = [single_targets, [x * epochs for x in single_targets]]
scheduler = QuadraticWarmupMomentum(self.optimizer, end=iters)
self._test_scheduler_value(
scheduler, targets, epochs, param_name='momentum')
| 4,323 | 38.669725 | 79 |
py
|
ERD
|
ERD-main/tests/test_engine/test_hooks/test_num_class_check_hook.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from copy import deepcopy
from unittest import TestCase
from unittest.mock import Mock
from mmcv.cnn import VGG
from mmengine.dataset import BaseDataset
from torch import nn
from mmdet.engine.hooks import NumClassCheckHook
from mmdet.models.roi_heads.mask_heads import FusedSemanticHead
class TestNumClassCheckHook(TestCase):
def setUp(self):
# Setup NumClassCheckHook
hook = NumClassCheckHook()
self.hook = hook
# Setup runner mock
runner = Mock()
runner.model = Mock()
runner.logger = Mock()
runner.logger.warning = Mock()
runner.train_dataloader = Mock()
runner.val_dataloader = Mock()
self.runner = runner
# Setup dataset
metainfo = dict(classes=None)
self.none_classmeta_dataset = BaseDataset(
metainfo=metainfo, lazy_init=True)
metainfo = dict(classes='class_name')
self.str_classmeta_dataset = BaseDataset(
metainfo=metainfo, lazy_init=True)
metainfo = dict(classes=('bus', 'car'))
self.normal_classmeta_dataset = BaseDataset(
metainfo=metainfo, lazy_init=True)
# Setup valid model
valid_model = nn.Module()
valid_model.add_module('backbone', VGG(depth=11))
fused_semantic_head = FusedSemanticHead(
num_ins=1,
fusion_level=0,
num_convs=1,
in_channels=1,
conv_out_channels=1)
valid_model.add_module('semantic_head', fused_semantic_head)
rpn_head = nn.Module()
rpn_head.num_classes = 1
valid_model.add_module('rpn_head', rpn_head)
bbox_head = nn.Module()
bbox_head.num_classes = 2
valid_model.add_module('bbox_head', bbox_head)
self.valid_model = valid_model
# Setup invalid model
invalid_model = nn.Module()
bbox_head = nn.Module()
bbox_head.num_classes = 4
invalid_model.add_module('bbox_head', bbox_head)
self.invalid_model = invalid_model
def test_before_train_epch(self):
runner = deepcopy(self.runner)
# Test when dataset.metainfo['classes'] is None
runner.train_dataloader.dataset = self.none_classmeta_dataset
self.hook.before_train_epoch(runner)
runner.logger.warning.assert_called_once()
# Test when dataset.metainfo['classes'] is a str
runner.train_dataloader.dataset = self.str_classmeta_dataset
with self.assertRaises(AssertionError):
self.hook.before_train_epoch(runner)
runner.train_dataloader.dataset = self.normal_classmeta_dataset
# Test `num_classes` of model is compatible with dataset
runner.model = self.valid_model
self.hook.before_train_epoch(runner)
# Test `num_classes` of model is not compatible with dataset
runner.model = self.invalid_model
with self.assertRaises(AssertionError):
self.hook.before_train_epoch(runner)
def test_before_val_epoch(self):
runner = deepcopy(self.runner)
# Test when dataset.metainfo['classes'] is None
runner.val_dataloader.dataset = self.none_classmeta_dataset
self.hook.before_val_epoch(runner)
runner.logger.warning.assert_called_once()
# Test when dataset.metainfo['classes'] is a str
runner.val_dataloader.dataset = self.str_classmeta_dataset
with self.assertRaises(AssertionError):
self.hook.before_val_epoch(runner)
runner.val_dataloader.dataset = self.normal_classmeta_dataset
# Test `num_classes` of model is compatible with dataset
runner.model = self.valid_model
self.hook.before_val_epoch(runner)
# Test `num_classes` of model is not compatible with dataset
runner.model = self.invalid_model
with self.assertRaises(AssertionError):
self.hook.before_val_epoch(runner)
| 3,988 | 36.280374 | 71 |
py
|
ERD
|
ERD-main/tests/test_engine/test_hooks/test_memory_profiler_hook.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
from unittest.mock import Mock
from mmdet.engine.hooks import MemoryProfilerHook
class TestMemoryProfilerHook(TestCase):
def test_after_train_iter(self):
hook = MemoryProfilerHook(2)
runner = Mock()
runner.logger = Mock()
runner.logger.info = Mock()
hook.after_train_iter(runner, 0)
runner.logger.info.assert_not_called()
hook.after_train_iter(runner, 1)
runner.logger.info.assert_called_once()
def test_after_val_iter(self):
hook = MemoryProfilerHook(2)
runner = Mock()
runner.logger = Mock()
runner.logger.info = Mock()
hook.after_val_iter(runner, 0)
runner.logger.info.assert_not_called()
hook.after_val_iter(runner, 1)
runner.logger.info.assert_called_once()
def test_after_test_iter(self):
hook = MemoryProfilerHook(2)
runner = Mock()
runner.logger = Mock()
runner.logger.info = Mock()
hook.after_test_iter(runner, 0)
runner.logger.info.assert_not_called()
hook.after_test_iter(runner, 1)
runner.logger.info.assert_called_once()
| 1,222 | 30.358974 | 49 |
py
|
ERD
|
ERD-main/tests/test_engine/test_hooks/test_sync_norm_hook.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
from unittest.mock import Mock, patch
import torch.nn as nn
from mmdet.engine.hooks import SyncNormHook
class TestSyncNormHook(TestCase):
@patch(
'mmdet.engine.hooks.sync_norm_hook.get_dist_info', return_value=(0, 1))
def test_before_val_epoch_non_dist(self, mock):
model = nn.Sequential(
nn.Conv2d(1, 5, kernel_size=3), nn.BatchNorm2d(5, momentum=0.3),
nn.Linear(5, 10))
runner = Mock()
runner.model = model
hook = SyncNormHook()
hook.before_val_epoch(runner)
@patch(
'mmdet.engine.hooks.sync_norm_hook.get_dist_info', return_value=(0, 2))
def test_before_val_epoch_dist(self, mock):
model = nn.Sequential(
nn.Conv2d(1, 5, kernel_size=3), nn.BatchNorm2d(5, momentum=0.3),
nn.Linear(5, 10))
runner = Mock()
runner.model = model
hook = SyncNormHook()
hook.before_val_epoch(runner)
@patch(
'mmdet.engine.hooks.sync_norm_hook.get_dist_info', return_value=(0, 2))
def test_before_val_epoch_dist_no_norm(self, mock):
model = nn.Sequential(nn.Conv2d(1, 5, kernel_size=3), nn.Linear(5, 10))
runner = Mock()
runner.model = model
hook = SyncNormHook()
hook.before_val_epoch(runner)
| 1,374 | 31.738095 | 79 |
py
|
ERD
|
ERD-main/tests/test_engine/test_hooks/test_mean_teacher_hook.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
import tempfile
from unittest import TestCase
import torch
import torch.nn as nn
from mmengine.evaluator import BaseMetric
from mmengine.model import BaseModel
from mmengine.optim import OptimWrapper
from mmengine.registry import MODEL_WRAPPERS
from mmengine.runner import Runner
from torch.utils.data import Dataset
from mmdet.registry import DATASETS
from mmdet.utils import register_all_modules
register_all_modules()
class ToyModel(nn.Module):
def __init__(self):
super().__init__()
self.linear = nn.Linear(2, 1)
def forward(self, inputs, data_samples, mode='tensor'):
labels = torch.stack(data_samples)
inputs = torch.stack(inputs)
outputs = self.linear(inputs)
if mode == 'tensor':
return outputs
elif mode == 'loss':
loss = (labels - outputs).sum()
outputs = dict(loss=loss)
return outputs
else:
return outputs
class ToyModel1(BaseModel, ToyModel):
def __init__(self):
super().__init__()
def forward(self, *args, **kwargs):
return super(BaseModel, self).forward(*args, **kwargs)
class ToyModel2(BaseModel):
def __init__(self):
super().__init__()
self.teacher = ToyModel1()
self.student = ToyModel1()
def forward(self, *args, **kwargs):
return self.student(*args, **kwargs)
@DATASETS.register_module(force=True)
class DummyDataset(Dataset):
METAINFO = dict() # type: ignore
data = torch.randn(12, 2)
label = torch.ones(12)
@property
def metainfo(self):
return self.METAINFO
def __len__(self):
return self.data.size(0)
def __getitem__(self, index):
return dict(inputs=self.data[index], data_samples=self.label[index])
class ToyMetric1(BaseMetric):
def __init__(self, collect_device='cpu', dummy_metrics=None):
super().__init__(collect_device=collect_device)
self.dummy_metrics = dummy_metrics
def process(self, data_batch, predictions):
result = {'acc': 1}
self.results.append(result)
def compute_metrics(self, results):
return dict(acc=1)
class TestMeanTeacherHook(TestCase):
def setUp(self):
self.temp_dir = tempfile.TemporaryDirectory()
def tearDown(self):
self.temp_dir.cleanup()
def test_mean_teacher_hook(self):
device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
model = ToyModel2().to(device)
runner = Runner(
model=model,
train_dataloader=dict(
dataset=DummyDataset(),
sampler=dict(type='DefaultSampler', shuffle=True),
batch_size=3,
num_workers=0),
val_dataloader=dict(
dataset=DummyDataset(),
sampler=dict(type='DefaultSampler', shuffle=False),
batch_size=3,
num_workers=0),
val_evaluator=[ToyMetric1()],
work_dir=self.temp_dir.name,
default_scope='mmdet',
optim_wrapper=OptimWrapper(
torch.optim.Adam(ToyModel().parameters())),
train_cfg=dict(by_epoch=True, max_epochs=2, val_interval=1),
val_cfg=dict(),
default_hooks=dict(logger=None),
custom_hooks=[dict(type='MeanTeacherHook')],
experiment_name='test1')
runner.train()
self.assertTrue(
osp.exists(osp.join(self.temp_dir.name, 'epoch_2.pth')))
# checkpoint = torch.load(osp.join(self.temp_dir.name, 'epoch_2.pth'))
# load and testing
runner = Runner(
model=model,
test_dataloader=dict(
dataset=DummyDataset(),
sampler=dict(type='DefaultSampler', shuffle=True),
batch_size=3,
num_workers=0),
test_evaluator=[ToyMetric1()],
test_cfg=dict(),
work_dir=self.temp_dir.name,
default_scope='mmdet',
load_from=osp.join(self.temp_dir.name, 'epoch_2.pth'),
default_hooks=dict(logger=None),
custom_hooks=[dict(type='MeanTeacherHook')],
experiment_name='test2')
runner.test()
@MODEL_WRAPPERS.register_module()
class DummyWrapper(BaseModel):
def __init__(self, model):
super().__init__()
self.module = model
def forward(self, *args, **kwargs):
return self.module(*args, **kwargs)
# with model wrapper
runner = Runner(
model=DummyWrapper(ToyModel2()),
test_dataloader=dict(
dataset=DummyDataset(),
sampler=dict(type='DefaultSampler', shuffle=True),
batch_size=3,
num_workers=0),
test_evaluator=[ToyMetric1()],
test_cfg=dict(),
work_dir=self.temp_dir.name,
default_scope='mmdet',
load_from=osp.join(self.temp_dir.name, 'epoch_2.pth'),
default_hooks=dict(logger=None),
custom_hooks=[dict(type='MeanTeacherHook')],
experiment_name='test3')
runner.test()
| 5,299 | 29.113636 | 78 |
py
|
ERD
|
ERD-main/tests/test_engine/test_hooks/test_yolox_mode_switch_hook.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
from unittest.mock import Mock, patch
from mmdet.engine.hooks import YOLOXModeSwitchHook
class TestYOLOXModeSwitchHook(TestCase):
@patch('mmdet.engine.hooks.yolox_mode_switch_hook.is_model_wrapper')
def test_is_model_wrapper_and_persistent_workers_on(
self, mock_is_model_wrapper):
mock_is_model_wrapper.return_value = True
runner = Mock()
runner.model = Mock()
runner.model.module = Mock()
runner.model.module.bbox_head.use_l1 = False
runner.train_dataloader = Mock()
runner.train_dataloader.persistent_workers = True
runner.train_dataloader._DataLoader__initialized = True
runner.epoch = 284
runner.max_epochs = 300
hook = YOLOXModeSwitchHook(num_last_epochs=15)
hook.before_train_epoch(runner)
self.assertTrue(hook._restart_dataloader)
self.assertTrue(runner.model.module.bbox_head.use_l1)
self.assertFalse(runner.train_dataloader._DataLoader__initialized)
runner.epoch = 285
hook.before_train_epoch(runner)
self.assertTrue(runner.train_dataloader._DataLoader__initialized)
def test_not_model_wrapper_and_persistent_workers_off(self):
runner = Mock()
runner.model = Mock()
runner.model.bbox_head.use_l1 = False
runner.train_dataloader = Mock()
runner.train_dataloader.persistent_workers = False
runner.train_dataloader._DataLoader__initialized = True
runner.epoch = 284
runner.max_epochs = 300
hook = YOLOXModeSwitchHook(num_last_epochs=15)
hook.before_train_epoch(runner)
self.assertFalse(hook._restart_dataloader)
self.assertTrue(runner.model.bbox_head.use_l1)
self.assertTrue(runner.train_dataloader._DataLoader__initialized)
runner.epoch = 285
hook.before_train_epoch(runner)
self.assertFalse(hook._restart_dataloader)
self.assertTrue(runner.train_dataloader._DataLoader__initialized)
| 2,083 | 37.592593 | 74 |
py
|
ERD
|
ERD-main/tests/test_engine/test_hooks/test_visualization_hook.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
import shutil
import time
from unittest import TestCase
from unittest.mock import Mock
import torch
from mmengine.structures import InstanceData
from mmdet.engine.hooks import DetVisualizationHook
from mmdet.structures import DetDataSample
from mmdet.visualization import DetLocalVisualizer
def _rand_bboxes(num_boxes, h, w):
cx, cy, bw, bh = torch.rand(num_boxes, 4).T
tl_x = ((cx * w) - (w * bw / 2)).clamp(0, w)
tl_y = ((cy * h) - (h * bh / 2)).clamp(0, h)
br_x = ((cx * w) + (w * bw / 2)).clamp(0, w)
br_y = ((cy * h) + (h * bh / 2)).clamp(0, h)
bboxes = torch.stack([tl_x, tl_y, br_x, br_y], dim=0).T
return bboxes
class TestVisualizationHook(TestCase):
def setUp(self) -> None:
DetLocalVisualizer.get_instance('current_visualizer')
pred_instances = InstanceData()
pred_instances.bboxes = _rand_bboxes(5, 10, 12)
pred_instances.labels = torch.randint(0, 2, (5, ))
pred_instances.scores = torch.rand((5, ))
pred_det_data_sample = DetDataSample()
pred_det_data_sample.set_metainfo({
'img_path':
osp.join(osp.dirname(__file__), '../../data/color.jpg')
})
pred_det_data_sample.pred_instances = pred_instances
self.outputs = [pred_det_data_sample] * 2
def test_after_val_iter(self):
runner = Mock()
runner.iter = 1
hook = DetVisualizationHook()
hook.after_val_iter(runner, 1, {}, self.outputs)
def test_after_test_iter(self):
runner = Mock()
runner.iter = 1
hook = DetVisualizationHook(draw=True)
hook.after_test_iter(runner, 1, {}, self.outputs)
self.assertEqual(hook._test_index, 2)
# test
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
test_out_dir = timestamp + '1'
runner.work_dir = timestamp
runner.timestamp = '1'
hook = DetVisualizationHook(draw=False, test_out_dir=test_out_dir)
hook.after_test_iter(runner, 1, {}, self.outputs)
self.assertTrue(not osp.exists(f'{timestamp}/1/{test_out_dir}'))
hook = DetVisualizationHook(draw=True, test_out_dir=test_out_dir)
hook.after_test_iter(runner, 1, {}, self.outputs)
self.assertTrue(osp.exists(f'{timestamp}/1/{test_out_dir}'))
shutil.rmtree(f'{timestamp}')
| 2,420 | 33.098592 | 74 |
py
|
ERD
|
ERD-main/tests/test_engine/test_hooks/test_checkloss_hook.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
from unittest.mock import Mock
import torch
from mmdet.engine.hooks import CheckInvalidLossHook
class TestCheckInvalidLossHook(TestCase):
def test_after_train_iter(self):
n = 50
hook = CheckInvalidLossHook(n)
runner = Mock()
runner.logger = Mock()
runner.logger.info = Mock()
# Test `after_train_iter` function within the n iteration.
runner.iter = 10
outputs = dict(loss=torch.LongTensor([2]))
hook.after_train_iter(runner, 10, outputs=outputs)
outputs = dict(loss=torch.tensor(float('nan')))
hook.after_train_iter(runner, 10, outputs=outputs)
outputs = dict(loss=torch.tensor(float('inf')))
hook.after_train_iter(runner, 10, outputs=outputs)
# Test `after_train_iter` at the n iteration.
runner.iter = n - 1
outputs = dict(loss=torch.LongTensor([2]))
hook.after_train_iter(runner, n - 1, outputs=outputs)
outputs = dict(loss=torch.tensor(float('nan')))
with self.assertRaises(AssertionError):
hook.after_train_iter(runner, n - 1, outputs=outputs)
outputs = dict(loss=torch.tensor(float('inf')))
with self.assertRaises(AssertionError):
hook.after_train_iter(runner, n - 1, outputs=outputs)
| 1,372 | 35.131579 | 66 |
py
|
ERD
|
ERD-main/tests/test_engine/test_runner/test_loops.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import tempfile
from unittest import TestCase
from unittest.mock import Mock
import torch
import torch.nn as nn
from mmengine.evaluator import Evaluator
from mmengine.model import BaseModel
from mmengine.optim import OptimWrapper
from mmengine.runner import Runner
from torch.utils.data import Dataset
from mmdet.registry import DATASETS
from mmdet.utils import register_all_modules
register_all_modules()
class ToyModel(nn.Module):
def __init__(self):
super().__init__()
self.linear = nn.Linear(2, 1)
def forward(self, inputs, data_samples, mode='tensor'):
labels = torch.stack(data_samples)
inputs = torch.stack(inputs)
outputs = self.linear(inputs)
if mode == 'tensor':
return outputs
elif mode == 'loss':
loss = (labels - outputs).sum()
outputs = dict(loss=loss)
return outputs
else:
return outputs
class ToyModel1(BaseModel, ToyModel):
def __init__(self):
super().__init__()
def forward(self, *args, **kwargs):
return super(BaseModel, self).forward(*args, **kwargs)
class ToyModel2(BaseModel):
def __init__(self):
super().__init__()
self.teacher = ToyModel1()
self.student = ToyModel1()
self.semi_test_cfg = dict(predict_on='teacher')
def forward(self, *args, **kwargs):
return self.student(*args, **kwargs)
@DATASETS.register_module(force=True)
class DummyDataset(Dataset):
METAINFO = dict() # type: ignore
data = torch.randn(12, 2)
label = torch.ones(12)
@property
def metainfo(self):
return self.METAINFO
def __len__(self):
return self.data.size(0)
def __getitem__(self, index):
return dict(inputs=self.data[index], data_samples=self.label[index])
class TestTeacherStudentValLoop(TestCase):
def setUp(self):
self.temp_dir = tempfile.TemporaryDirectory()
def tearDown(self):
self.temp_dir.cleanup()
def test_teacher_student_val_loop(self):
device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
model = ToyModel2().to(device)
evaluator = Mock()
evaluator.evaluate = Mock(return_value=dict(acc=0.5))
evaluator.__class__ = Evaluator
runner = Runner(
model=model,
train_dataloader=dict(
dataset=dict(type='DummyDataset'),
sampler=dict(type='DefaultSampler', shuffle=True),
batch_size=3,
num_workers=0),
val_dataloader=dict(
dataset=dict(type='DummyDataset'),
sampler=dict(type='DefaultSampler', shuffle=False),
batch_size=3,
num_workers=0),
val_evaluator=evaluator,
work_dir=self.temp_dir.name,
default_scope='mmdet',
optim_wrapper=OptimWrapper(
torch.optim.Adam(ToyModel().parameters())),
train_cfg=dict(by_epoch=True, max_epochs=2, val_interval=1),
val_cfg=dict(type='TeacherStudentValLoop'),
default_hooks=dict(logger=dict(type='LoggerHook', interval=1)),
experiment_name='test1')
runner.train()
| 3,296 | 27.921053 | 76 |
py
|
ERD
|
ERD-main/tests/test_engine/test_optimizers/__init__.py
| 0 | 0 | 0 |
py
|
|
ERD
|
ERD-main/tests/test_engine/test_optimizers/test_layer_decay_optimizer_constructor.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
from mmcv.cnn import ConvModule
from mmdet.engine import LearningRateDecayOptimizerConstructor
base_lr = 1
decay_rate = 2
base_wd = 0.05
weight_decay = 0.05
expected_stage_wise_lr_wd_convnext = [{
'weight_decay': 0.0,
'lr_scale': 128
}, {
'weight_decay': 0.0,
'lr_scale': 1
}, {
'weight_decay': 0.05,
'lr_scale': 64
}, {
'weight_decay': 0.0,
'lr_scale': 64
}, {
'weight_decay': 0.05,
'lr_scale': 32
}, {
'weight_decay': 0.0,
'lr_scale': 32
}, {
'weight_decay': 0.05,
'lr_scale': 16
}, {
'weight_decay': 0.0,
'lr_scale': 16
}, {
'weight_decay': 0.05,
'lr_scale': 8
}, {
'weight_decay': 0.0,
'lr_scale': 8
}, {
'weight_decay': 0.05,
'lr_scale': 128
}, {
'weight_decay': 0.05,
'lr_scale': 1
}]
expected_layer_wise_lr_wd_convnext = [{
'weight_decay': 0.0,
'lr_scale': 128
}, {
'weight_decay': 0.0,
'lr_scale': 1
}, {
'weight_decay': 0.05,
'lr_scale': 64
}, {
'weight_decay': 0.0,
'lr_scale': 64
}, {
'weight_decay': 0.05,
'lr_scale': 32
}, {
'weight_decay': 0.0,
'lr_scale': 32
}, {
'weight_decay': 0.05,
'lr_scale': 16
}, {
'weight_decay': 0.0,
'lr_scale': 16
}, {
'weight_decay': 0.05,
'lr_scale': 2
}, {
'weight_decay': 0.0,
'lr_scale': 2
}, {
'weight_decay': 0.05,
'lr_scale': 128
}, {
'weight_decay': 0.05,
'lr_scale': 1
}]
class ToyConvNeXt(nn.Module):
def __init__(self):
super().__init__()
self.stages = nn.ModuleList()
for i in range(4):
stage = nn.Sequential(ConvModule(3, 4, kernel_size=1, bias=True))
self.stages.append(stage)
self.norm0 = nn.BatchNorm2d(2)
# add some variables to meet unit test coverate rate
self.cls_token = nn.Parameter(torch.ones(1))
self.mask_token = nn.Parameter(torch.ones(1))
self.pos_embed = nn.Parameter(torch.ones(1))
self.stem_norm = nn.Parameter(torch.ones(1))
self.downsample_norm0 = nn.BatchNorm2d(2)
self.downsample_norm1 = nn.BatchNorm2d(2)
self.downsample_norm2 = nn.BatchNorm2d(2)
self.lin = nn.Parameter(torch.ones(1))
self.lin.requires_grad = False
self.downsample_layers = nn.ModuleList()
for _ in range(4):
stage = nn.Sequential(nn.Conv2d(3, 4, kernel_size=1, bias=True))
self.downsample_layers.append(stage)
class ToyDetector(nn.Module):
def __init__(self, backbone):
super().__init__()
self.backbone = backbone
self.head = nn.Conv2d(2, 2, kernel_size=1, groups=2)
class PseudoDataParallel(nn.Module):
def __init__(self, model):
super().__init__()
self.module = model
def check_optimizer_lr_wd(optimizer, gt_lr_wd):
assert isinstance(optimizer, torch.optim.AdamW)
assert optimizer.defaults['lr'] == base_lr
assert optimizer.defaults['weight_decay'] == base_wd
param_groups = optimizer.param_groups
print(param_groups)
assert len(param_groups) == len(gt_lr_wd)
for i, param_dict in enumerate(param_groups):
assert param_dict['weight_decay'] == gt_lr_wd[i]['weight_decay']
assert param_dict['lr_scale'] == gt_lr_wd[i]['lr_scale']
assert param_dict['lr_scale'] == param_dict['lr']
def test_learning_rate_decay_optimizer_constructor():
# Test lr wd for ConvNeXT
backbone = ToyConvNeXt()
model = PseudoDataParallel(ToyDetector(backbone))
optim_wrapper_cfg = dict(
type='OptimWrapper',
optimizer=dict(
type='AdamW', lr=base_lr, betas=(0.9, 0.999), weight_decay=0.05))
# stagewise decay
stagewise_paramwise_cfg = dict(
decay_rate=decay_rate, decay_type='stage_wise', num_layers=6)
optim_constructor = LearningRateDecayOptimizerConstructor(
optim_wrapper_cfg, stagewise_paramwise_cfg)
optim_wrapper = optim_constructor(model)
check_optimizer_lr_wd(optim_wrapper.optimizer,
expected_stage_wise_lr_wd_convnext)
# layerwise decay
layerwise_paramwise_cfg = dict(
decay_rate=decay_rate, decay_type='layer_wise', num_layers=6)
optim_constructor = LearningRateDecayOptimizerConstructor(
optim_wrapper_cfg, layerwise_paramwise_cfg)
optim_wrapper = optim_constructor(model)
check_optimizer_lr_wd(optim_wrapper.optimizer,
expected_layer_wise_lr_wd_convnext)
| 4,532 | 25.822485 | 77 |
py
|
ERD
|
ERD-main/tests/test_structures/test_det_data_sample.py
|
from unittest import TestCase
import numpy as np
import pytest
import torch
from mmengine.structures import InstanceData, PixelData
from mmdet.structures import DetDataSample
def _equal(a, b):
if isinstance(a, (torch.Tensor, np.ndarray)):
return (a == b).all()
else:
return a == b
class TestDetDataSample(TestCase):
def test_init(self):
meta_info = dict(
img_size=[256, 256],
scale_factor=np.array([1.5, 1.5]),
img_shape=torch.rand(4))
det_data_sample = DetDataSample(metainfo=meta_info)
assert 'img_size' in det_data_sample
assert det_data_sample.img_size == [256, 256]
assert det_data_sample.get('img_size') == [256, 256]
def test_setter(self):
det_data_sample = DetDataSample()
# test gt_instances
gt_instances_data = dict(
bboxes=torch.rand(4, 4),
labels=torch.rand(4),
masks=np.random.rand(4, 2, 2))
gt_instances = InstanceData(**gt_instances_data)
det_data_sample.gt_instances = gt_instances
assert 'gt_instances' in det_data_sample
assert _equal(det_data_sample.gt_instances.bboxes,
gt_instances_data['bboxes'])
assert _equal(det_data_sample.gt_instances.labels,
gt_instances_data['labels'])
assert _equal(det_data_sample.gt_instances.masks,
gt_instances_data['masks'])
# test pred_instances
pred_instances_data = dict(
bboxes=torch.rand(2, 4),
labels=torch.rand(2),
masks=np.random.rand(2, 2, 2))
pred_instances = InstanceData(**pred_instances_data)
det_data_sample.pred_instances = pred_instances
assert 'pred_instances' in det_data_sample
assert _equal(det_data_sample.pred_instances.bboxes,
pred_instances_data['bboxes'])
assert _equal(det_data_sample.pred_instances.labels,
pred_instances_data['labels'])
assert _equal(det_data_sample.pred_instances.masks,
pred_instances_data['masks'])
# test proposals
proposals_data = dict(bboxes=torch.rand(4, 4), labels=torch.rand(4))
proposals = InstanceData(**proposals_data)
det_data_sample.proposals = proposals
assert 'proposals' in det_data_sample
assert _equal(det_data_sample.proposals.bboxes,
proposals_data['bboxes'])
assert _equal(det_data_sample.proposals.labels,
proposals_data['labels'])
# test ignored_instances
ignored_instances_data = dict(
bboxes=torch.rand(4, 4), labels=torch.rand(4))
ignored_instances = InstanceData(**ignored_instances_data)
det_data_sample.ignored_instances = ignored_instances
assert 'ignored_instances' in det_data_sample
assert _equal(det_data_sample.ignored_instances.bboxes,
ignored_instances_data['bboxes'])
assert _equal(det_data_sample.ignored_instances.labels,
ignored_instances_data['labels'])
# test gt_panoptic_seg
gt_panoptic_seg_data = dict(panoptic_seg=torch.rand(5, 4))
gt_panoptic_seg = PixelData(**gt_panoptic_seg_data)
det_data_sample.gt_panoptic_seg = gt_panoptic_seg
assert 'gt_panoptic_seg' in det_data_sample
assert _equal(det_data_sample.gt_panoptic_seg.panoptic_seg,
gt_panoptic_seg_data['panoptic_seg'])
# test pred_panoptic_seg
pred_panoptic_seg_data = dict(panoptic_seg=torch.rand(5, 4))
pred_panoptic_seg = PixelData(**pred_panoptic_seg_data)
det_data_sample.pred_panoptic_seg = pred_panoptic_seg
assert 'pred_panoptic_seg' in det_data_sample
assert _equal(det_data_sample.pred_panoptic_seg.panoptic_seg,
pred_panoptic_seg_data['panoptic_seg'])
# test gt_sem_seg
gt_segm_seg_data = dict(segm_seg=torch.rand(5, 4, 2))
gt_segm_seg = PixelData(**gt_segm_seg_data)
det_data_sample.gt_segm_seg = gt_segm_seg
assert 'gt_segm_seg' in det_data_sample
assert _equal(det_data_sample.gt_segm_seg.segm_seg,
gt_segm_seg_data['segm_seg'])
# test pred_segm_seg
pred_segm_seg_data = dict(segm_seg=torch.rand(5, 4, 2))
pred_segm_seg = PixelData(**pred_segm_seg_data)
det_data_sample.pred_segm_seg = pred_segm_seg
assert 'pred_segm_seg' in det_data_sample
assert _equal(det_data_sample.pred_segm_seg.segm_seg,
pred_segm_seg_data['segm_seg'])
# test type error
with pytest.raises(AssertionError):
det_data_sample.pred_instances = torch.rand(2, 4)
with pytest.raises(AssertionError):
det_data_sample.pred_panoptic_seg = torch.rand(2, 4)
with pytest.raises(AssertionError):
det_data_sample.pred_sem_seg = torch.rand(2, 4)
def test_deleter(self):
gt_instances_data = dict(
bboxes=torch.rand(4, 4),
labels=torch.rand(4),
masks=np.random.rand(4, 2, 2))
det_data_sample = DetDataSample()
gt_instances = InstanceData(data=gt_instances_data)
det_data_sample.gt_instances = gt_instances
assert 'gt_instances' in det_data_sample
del det_data_sample.gt_instances
assert 'gt_instances' not in det_data_sample
pred_panoptic_seg_data = torch.rand(5, 4)
pred_panoptic_seg = PixelData(data=pred_panoptic_seg_data)
det_data_sample.pred_panoptic_seg = pred_panoptic_seg
assert 'pred_panoptic_seg' in det_data_sample
del det_data_sample.pred_panoptic_seg
assert 'pred_panoptic_seg' not in det_data_sample
pred_segm_seg_data = dict(segm_seg=torch.rand(5, 4, 2))
pred_segm_seg = PixelData(**pred_segm_seg_data)
det_data_sample.pred_segm_seg = pred_segm_seg
assert 'pred_segm_seg' in det_data_sample
del det_data_sample.pred_segm_seg
assert 'pred_segm_seg' not in det_data_sample
| 6,181 | 39.671053 | 76 |
py
|
ERD
|
ERD-main/tests/test_structures/__init__.py
| 0 | 0 | 0 |
py
|
|
ERD
|
ERD-main/tests/test_structures/test_bbox/test_horizontal_boxes.py
|
import random
from math import sqrt
from unittest import TestCase
import cv2
import numpy as np
import torch
from mmengine.testing import assert_allclose
from mmdet.structures.bbox import HorizontalBoxes
from mmdet.structures.mask import BitmapMasks, PolygonMasks
class TestHorizontalBoxes(TestCase):
def test_init(self):
th_boxes = torch.Tensor([10, 10, 20, 20]).reshape(1, 1, 4)
th_boxes_cxcywh = torch.Tensor([15, 15, 10, 10]).reshape(1, 1, 4)
boxes = HorizontalBoxes(th_boxes)
assert_allclose(boxes.tensor, th_boxes)
boxes = HorizontalBoxes(th_boxes, in_mode='xyxy')
assert_allclose(boxes.tensor, th_boxes)
boxes = HorizontalBoxes(th_boxes_cxcywh, in_mode='cxcywh')
assert_allclose(boxes.tensor, th_boxes)
with self.assertRaises(ValueError):
boxes = HorizontalBoxes(th_boxes, in_mode='invalid')
def test_cxcywh(self):
th_boxes = torch.Tensor([10, 10, 20, 20]).reshape(1, 1, 4)
th_boxes_cxcywh = torch.Tensor([15, 15, 10, 10]).reshape(1, 1, 4)
boxes = HorizontalBoxes(th_boxes)
assert_allclose(
HorizontalBoxes.xyxy_to_cxcywh(th_boxes), th_boxes_cxcywh)
assert_allclose(th_boxes,
HorizontalBoxes.cxcywh_to_xyxy(th_boxes_cxcywh))
assert_allclose(boxes.cxcywh, th_boxes_cxcywh)
def test_propoerty(self):
th_boxes = torch.Tensor([10, 10, 20, 20]).reshape(1, 1, 4)
boxes = HorizontalBoxes(th_boxes)
# Centers
centers = torch.Tensor([15, 15]).reshape(1, 1, 2)
assert_allclose(boxes.centers, centers)
# Areas
areas = torch.Tensor([100]).reshape(1, 1)
assert_allclose(boxes.areas, areas)
# widths
widths = torch.Tensor([10]).reshape(1, 1)
assert_allclose(boxes.widths, widths)
# heights
heights = torch.Tensor([10]).reshape(1, 1)
assert_allclose(boxes.heights, heights)
def test_flip(self):
img_shape = [50, 85]
# horizontal flip
th_boxes = torch.Tensor([10, 10, 20, 20]).reshape(1, 1, 4)
flipped_boxes_th = torch.Tensor([65, 10, 75, 20]).reshape(1, 1, 4)
boxes = HorizontalBoxes(th_boxes)
boxes.flip_(img_shape, direction='horizontal')
assert_allclose(boxes.tensor, flipped_boxes_th)
# vertical flip
th_boxes = torch.Tensor([10, 10, 20, 20]).reshape(1, 1, 4)
flipped_boxes_th = torch.Tensor([10, 30, 20, 40]).reshape(1, 1, 4)
boxes = HorizontalBoxes(th_boxes)
boxes.flip_(img_shape, direction='vertical')
assert_allclose(boxes.tensor, flipped_boxes_th)
# diagonal flip
th_boxes = torch.Tensor([10, 10, 20, 20]).reshape(1, 1, 4)
flipped_boxes_th = torch.Tensor([65, 30, 75, 40]).reshape(1, 1, 4)
boxes = HorizontalBoxes(th_boxes)
boxes.flip_(img_shape, direction='diagonal')
assert_allclose(boxes.tensor, flipped_boxes_th)
def test_translate(self):
th_boxes = torch.Tensor([10, 10, 20, 20]).reshape(1, 1, 4)
boxes = HorizontalBoxes(th_boxes)
boxes.translate_([23, 46])
translated_boxes_th = torch.Tensor([33, 56, 43, 66]).reshape(1, 1, 4)
assert_allclose(boxes.tensor, translated_boxes_th)
def test_clip(self):
th_boxes = torch.Tensor([10, 10, 20, 20]).reshape(1, 1, 4)
img_shape = [13, 14]
boxes = HorizontalBoxes(th_boxes)
boxes.clip_(img_shape)
cliped_boxes_th = torch.Tensor([10, 10, 14, 13]).reshape(1, 1, 4)
assert_allclose(boxes.tensor, cliped_boxes_th)
def test_rotate(self):
th_boxes = torch.Tensor([10, 10, 20, 20]).reshape(1, 1, 4)
center = (15, 15)
angle = -45
boxes = HorizontalBoxes(th_boxes)
boxes.rotate_(center, angle)
rotated_boxes_th = torch.Tensor([
15 - 5 * sqrt(2), 15 - 5 * sqrt(2), 15 + 5 * sqrt(2),
15 + 5 * sqrt(2)
]).reshape(1, 1, 4)
assert_allclose(boxes.tensor, rotated_boxes_th)
def test_project(self):
th_boxes = torch.Tensor([10, 10, 20, 20]).reshape(1, 1, 4)
boxes1 = HorizontalBoxes(th_boxes)
boxes2 = boxes1.clone()
matrix = np.zeros((3, 3), dtype=np.float32)
center = [random.random() * 80, random.random() * 80]
angle = random.random() * 180
matrix[:2, :3] = cv2.getRotationMatrix2D(center, angle, 1)
x_translate = random.random() * 40
y_translate = random.random() * 40
matrix[0, 2] = matrix[0, 2] + x_translate
matrix[1, 2] = matrix[1, 2] + y_translate
scale_factor = random.random() * 2
matrix[2, 2] = 1 / scale_factor
boxes1.project_(matrix)
boxes2.rotate_(center, -angle)
boxes2.translate_([x_translate, y_translate])
boxes2.rescale_([scale_factor, scale_factor])
assert_allclose(boxes1.tensor, boxes2.tensor)
# test empty boxes
empty_boxes = HorizontalBoxes(torch.zeros((0, 4)))
empty_boxes.project_(matrix)
def test_rescale(self):
scale_factor = [0.4, 0.8]
# rescale
th_boxes = torch.Tensor([10, 10, 20, 20]).reshape(1, 1, 4)
boxes = HorizontalBoxes(th_boxes)
boxes.rescale_(scale_factor)
rescaled_boxes_th = torch.Tensor([4, 8, 8, 16]).reshape(1, 1, 4)
assert_allclose(boxes.tensor, rescaled_boxes_th)
def test_resize(self):
scale_factor = [0.4, 0.8]
th_boxes = torch.Tensor([10, 10, 20, 20]).reshape(1, 1, 4)
boxes = HorizontalBoxes(th_boxes)
boxes.resize_(scale_factor)
resized_boxes_th = torch.Tensor([13, 11, 17, 19]).reshape(1, 1, 4)
assert_allclose(boxes.tensor, resized_boxes_th)
def test_is_inside(self):
th_boxes = torch.Tensor([[10, 10, 20, 20], [-5, -5, 15, 15],
[45, 45, 55, 55]]).reshape(1, 3, 4)
img_shape = [30, 30]
boxes = HorizontalBoxes(th_boxes)
index = boxes.is_inside(img_shape)
index_th = torch.BoolTensor([True, True, False]).reshape(1, 3)
assert_allclose(index, index_th)
def test_find_inside_points(self):
th_boxes = torch.Tensor([10, 10, 20, 20]).reshape(1, 4)
boxes = HorizontalBoxes(th_boxes)
points = torch.Tensor([[0, 0], [0, 15], [15, 0], [15, 15]])
index = boxes.find_inside_points(points)
index_th = torch.BoolTensor([False, False, False, True]).reshape(4, 1)
assert_allclose(index, index_th)
# is_aligned
boxes = boxes.expand(4, 4)
index = boxes.find_inside_points(points, is_aligned=True)
index_th = torch.BoolTensor([False, False, False, True])
assert_allclose(index, index_th)
def test_from_instance_masks(self):
bitmap_masks = BitmapMasks.random()
boxes = HorizontalBoxes.from_instance_masks(bitmap_masks)
self.assertIsInstance(boxes, HorizontalBoxes)
self.assertEqual(len(boxes), len(bitmap_masks))
polygon_masks = PolygonMasks.random()
boxes = HorizontalBoxes.from_instance_masks(polygon_masks)
self.assertIsInstance(boxes, HorizontalBoxes)
self.assertEqual(len(boxes), len(bitmap_masks))
# zero length masks
bitmap_masks = BitmapMasks.random(num_masks=0)
boxes = HorizontalBoxes.from_instance_masks(bitmap_masks)
self.assertIsInstance(boxes, HorizontalBoxes)
self.assertEqual(len(boxes), 0)
polygon_masks = PolygonMasks.random(num_masks=0)
boxes = HorizontalBoxes.from_instance_masks(polygon_masks)
self.assertIsInstance(boxes, HorizontalBoxes)
self.assertEqual(len(boxes), 0)
| 7,719 | 40.06383 | 78 |
py
|
ERD
|
ERD-main/tests/test_structures/test_bbox/utils.py
|
from mmdet.structures.bbox import BaseBoxes
class ToyBaseBoxes(BaseBoxes):
box_dim = 4
@property
def centers(self):
pass
@property
def areas(self):
pass
@property
def widths(self):
pass
@property
def heights(self):
pass
def flip_(self, img_shape, direction='horizontal'):
pass
def translate_(self, distances):
pass
def clip_(self, img_shape):
pass
def rotate_(self, center, angle):
pass
def project_(self, homography_matrix):
pass
def rescale_(self, scale_factor):
pass
def resize_(self, scale_factor):
pass
def is_inside(self, img_shape):
pass
def find_inside_points(self, points, is_aligned=False):
pass
def overlaps(bboxes1, bboxes2, mode='iou', is_aligned=False, eps=1e-6):
pass
def from_instance_masks(masks):
pass
| 938 | 15.767857 | 75 |
py
|
ERD
|
ERD-main/tests/test_structures/test_bbox/test_base_boxes.py
|
from unittest import TestCase
import numpy as np
import torch
from mmengine.testing import assert_allclose
from .utils import ToyBaseBoxes
class TestBaseBoxes(TestCase):
def test_init(self):
box_tensor = torch.rand((3, 4, 4))
boxes = ToyBaseBoxes(box_tensor)
boxes = ToyBaseBoxes(box_tensor, dtype=torch.float64)
self.assertEqual(boxes.tensor.dtype, torch.float64)
if torch.cuda.is_available():
boxes = ToyBaseBoxes(box_tensor, device='cuda')
self.assertTrue(boxes.tensor.is_cuda)
with self.assertRaises(AssertionError):
box_tensor = torch.rand((4, ))
boxes = ToyBaseBoxes(box_tensor)
with self.assertRaises(AssertionError):
box_tensor = torch.rand((3, 4, 3))
boxes = ToyBaseBoxes(box_tensor)
def test_getitem(self):
boxes = ToyBaseBoxes(torch.rand(3, 4, 4))
# test single dimension index
# int
new_boxes = boxes[0]
self.assertIsInstance(new_boxes, ToyBaseBoxes)
self.assertEqual(new_boxes.tensor.shape, (4, 4))
# list
new_boxes = boxes[[0, 2]]
self.assertIsInstance(new_boxes, ToyBaseBoxes)
self.assertEqual(new_boxes.tensor.shape, (2, 4, 4))
# slice
new_boxes = boxes[0:2]
self.assertIsInstance(new_boxes, ToyBaseBoxes)
self.assertEqual(new_boxes.tensor.shape, (2, 4, 4))
# torch.LongTensor
new_boxes = boxes[torch.LongTensor([0, 1])]
self.assertIsInstance(new_boxes, ToyBaseBoxes)
self.assertEqual(new_boxes.tensor.shape, (2, 4, 4))
# torch.BoolTensor
new_boxes = boxes[torch.BoolTensor([True, False, True])]
self.assertIsInstance(new_boxes, ToyBaseBoxes)
self.assertEqual(new_boxes.tensor.shape, (2, 4, 4))
with self.assertRaises(AssertionError):
index = torch.rand((2, 4, 4)) > 0
new_boxes = boxes[index]
# test multiple dimension index
# select single box
new_boxes = boxes[1, 2]
self.assertIsInstance(new_boxes, ToyBaseBoxes)
self.assertEqual(new_boxes.tensor.shape, (1, 4))
# select the last dimension
with self.assertRaises(AssertionError):
new_boxes = boxes[1, 2, 1]
# has Ellipsis
new_boxes = boxes[None, ...]
self.assertIsInstance(new_boxes, ToyBaseBoxes)
self.assertEqual(new_boxes.tensor.shape, (1, 3, 4, 4))
with self.assertRaises(AssertionError):
new_boxes = boxes[..., None]
def test_setitem(self):
values = ToyBaseBoxes(torch.rand(3, 4, 4))
tensor = torch.rand(3, 4, 4)
# only support BaseBoxes type
with self.assertRaises(AssertionError):
boxes = ToyBaseBoxes(torch.rand(3, 4, 4))
boxes[0:2] = tensor[0:2]
# test single dimension index
# int
boxes = ToyBaseBoxes(torch.rand(3, 4, 4))
boxes[1] = values[1]
assert_allclose(boxes.tensor[1], values.tensor[1])
# list
boxes = ToyBaseBoxes(torch.rand(3, 4, 4))
boxes[[1, 2]] = values[[1, 2]]
assert_allclose(boxes.tensor[[1, 2]], values.tensor[[1, 2]])
# slice
boxes = ToyBaseBoxes(torch.rand(3, 4, 4))
boxes[0:2] = values[0:2]
assert_allclose(boxes.tensor[0:2], values.tensor[0:2])
# torch.BoolTensor
boxes = ToyBaseBoxes(torch.rand(3, 4, 4))
index = torch.rand(3, 4) > 0.5
boxes[index] = values[index]
assert_allclose(boxes.tensor[index], values.tensor[index])
# multiple dimension index
boxes = ToyBaseBoxes(torch.rand(3, 4, 4))
boxes[0:2, 0:2] = values[0:2, 0:2]
assert_allclose(boxes.tensor[0:2, 0:2], values.tensor[0:2, 0:2])
# select single box
boxes = ToyBaseBoxes(torch.rand(3, 4, 4))
boxes[1, 1] = values[1, 1]
assert_allclose(boxes.tensor[1, 1], values.tensor[1, 1])
# select the last dimension
with self.assertRaises(AssertionError):
boxes = ToyBaseBoxes(torch.rand(3, 4, 4))
boxes[1, 1, 1] = values[1, 1, 1]
# has Ellipsis
boxes = ToyBaseBoxes(torch.rand(3, 4, 4))
boxes[0:2, ...] = values[0:2, ...]
assert_allclose(boxes.tensor[0:2, ...], values.tensor[0:2, ...])
def test_tensor_like_functions(self):
boxes = ToyBaseBoxes(torch.rand(3, 4, 4))
# new_tensor
boxes.new_tensor([1, 2, 3])
# new_full
boxes.new_full((3, 4), 0)
# new_empty
boxes.new_empty((3, 4))
# new_ones
boxes.new_ones((3, 4))
# new_zeros
boxes.new_zeros((3, 4))
# size
self.assertEqual(boxes.size(0), 3)
self.assertEqual(tuple(boxes.size()), (3, 4, 4))
# dim
self.assertEqual(boxes.dim(), 3)
# device
self.assertIsInstance(boxes.device, torch.device)
# dtype
self.assertIsInstance(boxes.dtype, torch.dtype)
# numpy
np_boxes = boxes.numpy()
self.assertIsInstance(np_boxes, np.ndarray)
self.assertTrue((np_boxes == np_boxes).all())
# to
new_boxes = boxes.to(torch.uint8)
self.assertEqual(new_boxes.tensor.dtype, torch.uint8)
if torch.cuda.is_available():
new_boxes = boxes.to(device='cuda')
self.assertTrue(new_boxes.tensor.is_cuda)
# cpu
if torch.cuda.is_available():
new_boxes = boxes.to(device='cuda')
new_boxes = new_boxes.cpu()
self.assertFalse(new_boxes.tensor.is_cuda)
# cuda
if torch.cuda.is_available():
new_boxes = boxes.cuda()
self.assertTrue(new_boxes.tensor.is_cuda)
# clone
boxes.clone()
# detach
boxes.detach()
# view
new_boxes = boxes.view(12, 4)
self.assertEqual(tuple(new_boxes.size()), (12, 4))
new_boxes = boxes.view(-1, 4)
self.assertEqual(tuple(new_boxes.size()), (12, 4))
with self.assertRaises(AssertionError):
new_boxes = boxes.view(-1)
# reshape
new_boxes = boxes.reshape(12, 4)
self.assertEqual(tuple(new_boxes.size()), (12, 4))
new_boxes = boxes.reshape(-1, 4)
self.assertEqual(tuple(new_boxes.size()), (12, 4))
with self.assertRaises(AssertionError):
new_boxes = boxes.reshape(-1)
# expand
new_boxes = boxes[None, ...].expand(4, -1, -1, -1)
self.assertEqual(tuple(new_boxes.size()), (4, 3, 4, 4))
# repeat
new_boxes = boxes.repeat(2, 2, 1)
self.assertEqual(tuple(new_boxes.size()), (6, 8, 4))
with self.assertRaises(AssertionError):
new_boxes = boxes.repeat(2, 2, 2)
# transpose
new_boxes = boxes.transpose(0, 1)
self.assertEqual(tuple(new_boxes.size()), (4, 3, 4))
with self.assertRaises(AssertionError):
new_boxes = boxes.transpose(1, 2)
# permute
new_boxes = boxes.permute(1, 0, 2)
self.assertEqual(tuple(new_boxes.size()), (4, 3, 4))
with self.assertRaises(AssertionError):
new_boxes = boxes.permute(2, 1, 0)
# split
boxes_list = boxes.split(1, dim=0)
for box in boxes_list:
self.assertIsInstance(box, ToyBaseBoxes)
self.assertEqual(tuple(box.size()), (1, 4, 4))
boxes_list = boxes.split([1, 2], dim=0)
with self.assertRaises(AssertionError):
boxes_list = boxes.split(1, dim=2)
# chunk
boxes_list = boxes.split(3, dim=1)
self.assertEqual(len(boxes_list), 2)
for box in boxes_list:
self.assertIsInstance(box, ToyBaseBoxes)
with self.assertRaises(AssertionError):
boxes_list = boxes.split(3, dim=2)
# unbind
boxes_list = boxes.unbind(dim=1)
self.assertEqual(len(boxes_list), 4)
for box in boxes_list:
self.assertIsInstance(box, ToyBaseBoxes)
self.assertEqual(tuple(box.size()), (3, 4))
with self.assertRaises(AssertionError):
boxes_list = boxes.unbind(dim=2)
# flatten
new_boxes = boxes.flatten()
self.assertEqual(tuple(new_boxes.size()), (12, 4))
with self.assertRaises(AssertionError):
new_boxes = boxes.flatten(end_dim=2)
# squeeze
boxes = ToyBaseBoxes(torch.rand(1, 3, 1, 4, 4))
new_boxes = boxes.squeeze()
self.assertEqual(tuple(new_boxes.size()), (3, 4, 4))
new_boxes = boxes.squeeze(dim=2)
self.assertEqual(tuple(new_boxes.size()), (1, 3, 4, 4))
# unsqueeze
boxes = ToyBaseBoxes(torch.rand(3, 4, 4))
new_boxes = boxes.unsqueeze(0)
self.assertEqual(tuple(new_boxes.size()), (1, 3, 4, 4))
with self.assertRaises(AssertionError):
new_boxes = boxes.unsqueeze(3)
# cat
with self.assertRaises(ValueError):
ToyBaseBoxes.cat([])
box_list = []
box_list.append(ToyBaseBoxes(torch.rand(3, 4, 4)))
box_list.append(ToyBaseBoxes(torch.rand(1, 4, 4)))
with self.assertRaises(AssertionError):
ToyBaseBoxes.cat(box_list, dim=2)
cat_boxes = ToyBaseBoxes.cat(box_list, dim=0)
self.assertIsInstance(cat_boxes, ToyBaseBoxes)
self.assertEqual((cat_boxes.size()), (4, 4, 4))
# stack
with self.assertRaises(ValueError):
ToyBaseBoxes.stack([])
box_list = []
box_list.append(ToyBaseBoxes(torch.rand(3, 4, 4)))
box_list.append(ToyBaseBoxes(torch.rand(3, 4, 4)))
with self.assertRaises(AssertionError):
ToyBaseBoxes.stack(box_list, dim=3)
stack_boxes = ToyBaseBoxes.stack(box_list, dim=1)
self.assertIsInstance(stack_boxes, ToyBaseBoxes)
self.assertEqual((stack_boxes.size()), (3, 2, 4, 4))
def test_misc(self):
boxes = ToyBaseBoxes(torch.rand(3, 4, 4))
# __len__
self.assertEqual(len(boxes), 3)
# __repr__
repr(boxes)
# fake_boxes
new_boxes = boxes.fake_boxes((3, 4, 4), 1)
self.assertEqual(tuple(new_boxes.size()), (3, 4, 4))
self.assertEqual(boxes.dtype, new_boxes.dtype)
self.assertEqual(boxes.device, new_boxes.device)
self.assertTrue((new_boxes.tensor == 1).all())
with self.assertRaises(AssertionError):
new_boxes = boxes.fake_boxes((3, 4, 1))
new_boxes = boxes.fake_boxes((3, 4, 4), dtype=torch.uint8)
self.assertEqual(new_boxes.dtype, torch.uint8)
if torch.cuda.is_available():
new_boxes = boxes.fake_boxes((3, 4, 4), device='cuda')
self.assertTrue(new_boxes.tensor.is_cuda)
| 10,821 | 38.068592 | 72 |
py
|
ERD
|
ERD-main/tests/test_structures/test_bbox/__init__.py
| 0 | 0 | 0 |
py
|
|
ERD
|
ERD-main/tests/test_structures/test_bbox/test_box_type.py
|
from unittest import TestCase
from unittest.mock import MagicMock
import torch
from mmdet.structures.bbox.box_type import (_box_type_to_name, box_converters,
box_types, convert_box_type,
get_box_type, register_box,
register_box_converter)
from .utils import ToyBaseBoxes
class TestBoxType(TestCase):
def setUp(self):
self.box_types = box_types.copy()
self.box_converters = box_converters.copy()
self._box_type_to_name = _box_type_to_name.copy()
def tearDown(self):
# Clear registered items
box_types.clear()
box_converters.clear()
_box_type_to_name.clear()
# Restore original items
box_types.update(self.box_types)
box_converters.update(self.box_converters)
_box_type_to_name.update(self._box_type_to_name)
def test_register_box(self):
# test usage of decorator
@register_box('A')
class A(ToyBaseBoxes):
pass
# test usage of normal function
class B(ToyBaseBoxes):
pass
register_box('B', B)
# register class without inheriting from BaseBoxes
with self.assertRaises(AssertionError):
@register_box('C')
class C:
pass
# test register registered class
with self.assertRaises(KeyError):
@register_box('A')
class AA(ToyBaseBoxes):
pass
with self.assertRaises(KeyError):
register_box('BB', B)
@register_box('A', force=True)
class AAA(ToyBaseBoxes):
pass
self.assertIs(box_types['a'], AAA)
self.assertEqual(_box_type_to_name[AAA], 'a')
register_box('BB', B, force=True)
self.assertIs(box_types['bb'], B)
self.assertEqual(_box_type_to_name[B], 'bb')
self.assertEqual(len(box_types), len(_box_type_to_name))
def test_register_box_converter(self):
@register_box('A')
class A(ToyBaseBoxes):
pass
@register_box('B')
class B(ToyBaseBoxes):
pass
@register_box('C')
class C(ToyBaseBoxes):
pass
# test usage of decorator
@register_box_converter('A', 'B')
def converter_A(bboxes):
return bboxes
# test usage of normal function
def converter_B(bboxes):
return bboxes
register_box_converter('B'
'A', converter_B)
# register uncallable object
with self.assertRaises(AssertionError):
register_box_converter('A', 'C', 'uncallable str')
# test register unregistered bbox mode
with self.assertRaises(AssertionError):
@register_box_converter('A', 'D')
def converter_C(bboxes):
return bboxes
# test register registered converter
with self.assertRaises(KeyError):
@register_box_converter('A', 'B')
def converter_D(bboxes):
return bboxes
@register_box_converter('A', 'B', force=True)
def converter_E(bboxes):
return bboxes
self.assertIs(box_converters['a2b'], converter_E)
def test_get_box_type(self):
@register_box('A')
class A(ToyBaseBoxes):
pass
mode_name, mode_cls = get_box_type('A')
self.assertEqual(mode_name, 'a')
self.assertIs(mode_cls, A)
mode_name, mode_cls = get_box_type(A)
self.assertEqual(mode_name, 'a')
self.assertIs(mode_cls, A)
# get unregistered mode
class B(ToyBaseBoxes):
pass
with self.assertRaises(AssertionError):
mode_name, mode_cls = get_box_type('B')
with self.assertRaises(AssertionError):
mode_name, mode_cls = get_box_type(B)
def test_convert_box_type(self):
@register_box('A')
class A(ToyBaseBoxes):
pass
@register_box('B')
class B(ToyBaseBoxes):
pass
@register_box('C')
class C(ToyBaseBoxes):
pass
converter = MagicMock()
converter.return_value = torch.rand(3, 4, 4)
register_box_converter('A', 'B', converter)
bboxes_a = A(torch.rand(3, 4, 4))
th_bboxes_a = bboxes_a.tensor
np_bboxes_a = th_bboxes_a.numpy()
# test convert to mode
convert_box_type(bboxes_a, dst_type='B')
self.assertTrue(converter.called)
converted_bboxes = convert_box_type(bboxes_a, dst_type='A')
self.assertIs(converted_bboxes, bboxes_a)
# test convert to unregistered mode
with self.assertRaises(AssertionError):
convert_box_type(bboxes_a, dst_type='C')
# test convert tensor and ndarray
# without specific src_type
with self.assertRaises(AssertionError):
convert_box_type(th_bboxes_a, dst_type='B')
with self.assertRaises(AssertionError):
convert_box_type(np_bboxes_a, dst_type='B')
# test np.ndarray
convert_box_type(np_bboxes_a, src_type='A', dst_type='B')
converted_bboxes = convert_box_type(
np_bboxes_a, src_type='A', dst_type='A')
self.assertIs(converted_bboxes, np_bboxes_a)
# test tensor
convert_box_type(th_bboxes_a, src_type='A', dst_type='B')
converted_bboxes = convert_box_type(
th_bboxes_a, src_type='A', dst_type='A')
self.assertIs(converted_bboxes, th_bboxes_a)
# test other type
with self.assertRaises(TypeError):
convert_box_type([[1, 2, 3, 4]], src_type='A', dst_type='B')
| 5,813 | 29.28125 | 78 |
py
|
ERD
|
ERD-main/tests/test_structures/test_mask/test_mask_structures.py
|
from unittest import TestCase
import numpy as np
from mmengine.testing import assert_allclose
from mmdet.structures.mask import BitmapMasks, PolygonMasks
class TestMaskStructures(TestCase):
def test_bitmap_translate_same_size(self):
mask_array = np.zeros((5, 10, 10), dtype=np.uint8)
mask_array[:, 0:5, 0:5] = 1
mask_target = np.zeros((5, 10, 10), dtype=np.uint8)
mask_target[:, 0:5, 5:10] = 1
mask = BitmapMasks(mask_array, 10, 10)
mask = mask.translate((10, 10), 5)
assert mask.masks.shape == (5, 10, 10)
assert_allclose(mask_target, mask.masks)
def test_bitmap_translate_diff_size(self):
# test out shape larger
mask_array = np.zeros((5, 10, 10), dtype=np.uint8)
mask_array[:, 0:5, 0:5] = 1
mask_target = np.zeros((5, 20, 20), dtype=np.uint8)
mask_target[:, 0:5, 5:10] = 1
mask = BitmapMasks(mask_array, 10, 10)
mask = mask.translate((20, 20), 5)
assert mask.masks.shape == (5, 20, 20)
assert_allclose(mask_target, mask.masks)
# test out shape smaller
mask_array = np.zeros((5, 10, 10), dtype=np.uint8)
mask_array[:, 0:5, 0:5] = 1
mask_target = np.zeros((5, 20, 8), dtype=np.uint8)
mask_target[:, 0:5, 5:] = 1
mask = BitmapMasks(mask_array, 10, 10)
mask = mask.translate((20, 8), 5)
assert mask.masks.shape == (5, 20, 8)
assert_allclose(mask_target, mask.masks)
def test_bitmap_cat(self):
# test invalid inputs
with self.assertRaises(AssertionError):
BitmapMasks.cat(BitmapMasks.random(4))
with self.assertRaises(ValueError):
BitmapMasks.cat([])
with self.assertRaises(AssertionError):
BitmapMasks.cat([BitmapMasks.random(2), PolygonMasks.random(3)])
masks = [BitmapMasks.random(num_masks=3) for _ in range(5)]
cat_mask = BitmapMasks.cat(masks)
assert len(cat_mask) == 3 * 5
for i, m in enumerate(masks):
assert_allclose(m.masks, cat_mask.masks[i * 3:(i + 1) * 3])
def test_polygon_cat(self):
# test invalid inputs
with self.assertRaises(AssertionError):
PolygonMasks.cat(PolygonMasks.random(4))
with self.assertRaises(ValueError):
PolygonMasks.cat([])
with self.assertRaises(AssertionError):
PolygonMasks.cat([BitmapMasks.random(2), PolygonMasks.random(3)])
masks = [PolygonMasks.random(num_masks=3) for _ in range(5)]
cat_mask = PolygonMasks.cat(masks)
assert len(cat_mask) == 3 * 5
for i, m in enumerate(masks):
assert_allclose(m.masks, cat_mask.masks[i * 3:(i + 1) * 3])
| 2,740 | 36.040541 | 77 |
py
|
ERD
|
ERD-main/tests/test_evaluation/test_metrics/test_coco_panoptic_metric.py
|
import os
import os.path as osp
import tempfile
import unittest
from copy import deepcopy
import mmcv
import numpy as np
import torch
from mmengine.fileio import dump
from mmdet.evaluation import INSTANCE_OFFSET, CocoPanopticMetric
try:
import panopticapi
except ImportError:
panopticapi = None
class TestCocoPanopticMetric(unittest.TestCase):
def _create_panoptic_gt_annotations(self, ann_file, seg_map_dir):
categories = [{
'id': 0,
'name': 'person',
'supercategory': 'person',
'isthing': 1
}, {
'id': 1,
'name': 'cat',
'supercategory': 'cat',
'isthing': 1
}, {
'id': 2,
'name': 'dog',
'supercategory': 'dog',
'isthing': 1
}, {
'id': 3,
'name': 'wall',
'supercategory': 'wall',
'isthing': 0
}]
images = [{
'id': 0,
'width': 80,
'height': 60,
'file_name': 'fake_name1.jpg',
}]
annotations = [{
'segments_info': [{
'id': 1,
'category_id': 0,
'area': 400,
'bbox': [10, 10, 10, 40],
'iscrowd': 0
}, {
'id': 2,
'category_id': 0,
'area': 400,
'bbox': [30, 10, 10, 40],
'iscrowd': 0
}, {
'id': 3,
'category_id': 2,
'iscrowd': 0,
'bbox': [50, 10, 10, 5],
'area': 50
}, {
'id': 4,
'category_id': 3,
'iscrowd': 0,
'bbox': [0, 0, 80, 60],
'area': 3950
}],
'file_name':
'fake_name1.png',
'image_id':
0
}]
gt_json = {
'images': images,
'annotations': annotations,
'categories': categories
}
# 4 is the id of the background class annotation.
gt = np.zeros((60, 80), dtype=np.int64) + 4
gt_bboxes = np.array(
[[10, 10, 10, 40], [30, 10, 10, 40], [50, 10, 10, 5]],
dtype=np.int64)
for i in range(3):
x, y, w, h = gt_bboxes[i]
gt[y:y + h, x:x + w] = i + 1 # id starts from 1
rgb_gt_seg_map = np.zeros(gt.shape + (3, ), dtype=np.uint8)
rgb_gt_seg_map[:, :, 2] = gt // (256 * 256)
rgb_gt_seg_map[:, :, 1] = gt % (256 * 256) // 256
rgb_gt_seg_map[:, :, 0] = gt % 256
img_path = osp.join(seg_map_dir, 'fake_name1.png')
mmcv.imwrite(rgb_gt_seg_map[:, :, ::-1], img_path)
dump(gt_json, ann_file)
return gt_json
def _create_panoptic_data_samples(self):
# predictions
# TP for background class, IoU=3576/4324=0.827
# 2 the category id of the background class
pred = np.zeros((60, 80), dtype=np.int64) + 2
pred_bboxes = np.array(
[
[11, 11, 10, 40], # TP IoU=351/449=0.78
[38, 10, 10, 40], # FP
[51, 10, 10, 5] # TP IoU=45/55=0.818
],
dtype=np.int64)
pred_labels = np.array([0, 0, 1], dtype=np.int64)
for i in range(3):
x, y, w, h = pred_bboxes[i]
pred[y:y + h, x:x + w] = (i + 1) * INSTANCE_OFFSET + pred_labels[i]
data_samples = [{
'img_id':
0,
'ori_shape': (60, 80),
'img_path':
'xxx/fake_name1.jpg',
'segments_info': [{
'id': 1,
'category': 0,
'is_thing': 1
}, {
'id': 2,
'category': 0,
'is_thing': 1
}, {
'id': 3,
'category': 1,
'is_thing': 1
}, {
'id': 4,
'category': 2,
'is_thing': 0
}],
'seg_map_path':
osp.join(self.gt_seg_dir, 'fake_name1.png'),
'pred_panoptic_seg': {
'sem_seg': torch.from_numpy(pred).unsqueeze(0)
},
}]
return data_samples
def setUp(self):
self.tmp_dir = tempfile.TemporaryDirectory()
self.gt_json_path = osp.join(self.tmp_dir.name, 'gt.json')
self.gt_seg_dir = osp.join(self.tmp_dir.name, 'gt_seg')
os.mkdir(self.gt_seg_dir)
self._create_panoptic_gt_annotations(self.gt_json_path,
self.gt_seg_dir)
self.dataset_meta = {
'classes': ('person', 'dog', 'wall'),
'thing_classes': ('person', 'dog'),
'stuff_classes': ('wall', )
}
self.target = {
'coco_panoptic/PQ': 67.86874803219071,
'coco_panoptic/SQ': 80.89770126158936,
'coco_panoptic/RQ': 83.33333333333334,
'coco_panoptic/PQ_th': 60.45252075318891,
'coco_panoptic/SQ_th': 79.9959505972869,
'coco_panoptic/RQ_th': 75.0,
'coco_panoptic/PQ_st': 82.70120259019427,
'coco_panoptic/SQ_st': 82.70120259019427,
'coco_panoptic/RQ_st': 100.0
}
self.data_samples = self._create_panoptic_data_samples()
def tearDown(self):
self.tmp_dir.cleanup()
@unittest.skipIf(panopticapi is not None, 'panopticapi is installed')
def test_init(self):
with self.assertRaises(RuntimeError):
CocoPanopticMetric()
@unittest.skipIf(panopticapi is None, 'panopticapi is not installed')
def test_evaluate_without_json(self):
# with tmpfile, without json
metric = CocoPanopticMetric(
ann_file=None,
seg_prefix=self.gt_seg_dir,
classwise=False,
nproc=1,
outfile_prefix=None)
metric.dataset_meta = self.dataset_meta
metric.process({}, deepcopy(self.data_samples))
eval_results = metric.evaluate(size=1)
self.assertDictEqual(eval_results, self.target)
# without tmpfile and json
outfile_prefix = f'{self.tmp_dir.name}/test'
metric = CocoPanopticMetric(
ann_file=None,
seg_prefix=self.gt_seg_dir,
classwise=False,
nproc=1,
outfile_prefix=outfile_prefix)
metric.dataset_meta = self.dataset_meta
metric.process({}, deepcopy(self.data_samples))
eval_results = metric.evaluate(size=1)
self.assertDictEqual(eval_results, self.target)
@unittest.skipIf(panopticapi is None, 'panopticapi is not installed')
def test_evaluate_with_json(self):
# with tmpfile and json
metric = CocoPanopticMetric(
ann_file=self.gt_json_path,
seg_prefix=self.gt_seg_dir,
classwise=False,
nproc=1,
outfile_prefix=None)
metric.dataset_meta = self.dataset_meta
metric.process({}, deepcopy(self.data_samples))
eval_results = metric.evaluate(size=1)
self.assertDictEqual(eval_results, self.target)
# classwise
metric = CocoPanopticMetric(
ann_file=self.gt_json_path,
seg_prefix=self.gt_seg_dir,
classwise=True,
nproc=1,
outfile_prefix=None)
metric.dataset_meta = self.dataset_meta
metric.process({}, deepcopy(self.data_samples))
eval_results = metric.evaluate(size=1)
self.assertDictEqual(eval_results, self.target)
# without tmpfile, with json
outfile_prefix = f'{self.tmp_dir.name}/test1'
metric = CocoPanopticMetric(
ann_file=self.gt_json_path,
seg_prefix=self.gt_seg_dir,
classwise=False,
nproc=1,
outfile_prefix=outfile_prefix)
metric.dataset_meta = self.dataset_meta
metric.process({}, deepcopy(self.data_samples))
eval_results = metric.evaluate(size=1)
self.assertDictEqual(eval_results, self.target)
@unittest.skipIf(panopticapi is None, 'panopticapi is not installed')
def test_format_only(self):
with self.assertRaises(AssertionError):
metric = CocoPanopticMetric(
ann_file=self.gt_json_path,
seg_prefix=self.gt_seg_dir,
classwise=False,
nproc=1,
format_only=True,
outfile_prefix=None)
outfile_prefix = f'{self.tmp_dir.name}/test'
metric = CocoPanopticMetric(
ann_file=self.gt_json_path,
seg_prefix=self.gt_seg_dir,
classwise=False,
nproc=1,
format_only=True,
outfile_prefix=outfile_prefix)
metric.dataset_meta = self.dataset_meta
metric.process({}, deepcopy(self.data_samples))
eval_results = metric.evaluate(size=1)
self.assertDictEqual(eval_results, dict())
self.assertTrue(osp.exists(f'{self.tmp_dir.name}/test.panoptic'))
self.assertTrue(osp.exists(f'{self.tmp_dir.name}/test.panoptic.json'))
| 9,287 | 31.475524 | 79 |
py
|
ERD
|
ERD-main/tests/test_evaluation/test_metrics/test_crowdhuman_metric.py
|
import os.path as osp
import tempfile
from unittest import TestCase
import numpy as np
import torch
from mmdet.evaluation import CrowdHumanMetric
class TestCrowdHumanMetric(TestCase):
def _create_dummy_results(self):
bboxes = np.array([[1330, 317, 418, 1338], [792, 24, 723, 2017],
[693, 291, 307, 894], [522, 290, 285, 826],
[728, 336, 175, 602], [92, 337, 267, 681]])
bboxes[:, 2:4] += bboxes[:, 0:2]
scores = np.array([1.0, 1.0, 1.0, 1.0, 1.0, 1.0])
return dict(
bboxes=torch.from_numpy(bboxes), scores=torch.from_numpy(scores))
def setUp(self):
self.tmp_dir = tempfile.TemporaryDirectory()
self.ann_file_path = \
'tests/data/crowdhuman_dataset/test_annotation_train.odgt',
def tearDown(self):
self.tmp_dir.cleanup()
def test_init(self):
with self.assertRaisesRegex(KeyError, 'metric should be one of'):
CrowdHumanMetric(ann_file=self.ann_file_path[0], metric='unknown')
def test_evaluate(self):
# create dummy data
dummy_pred = self._create_dummy_results()
crowdhuman_metric = CrowdHumanMetric(
ann_file=self.ann_file_path[0],
outfile_prefix=f'{self.tmp_dir.name}/test')
crowdhuman_metric.process({}, [
dict(
pred_instances=dummy_pred,
img_id='283554,35288000868e92d4',
ori_shape=(1640, 1640))
])
eval_results = crowdhuman_metric.evaluate(size=1)
target = {
'crowd_human/mAP': 0.8333,
'crowd_human/mMR': 0.0,
'crowd_human/JI': 1.0
}
self.assertDictEqual(eval_results, target)
self.assertTrue(osp.isfile(osp.join(self.tmp_dir.name, 'test.json')))
| 1,834 | 32.363636 | 78 |
py
|
ERD
|
ERD-main/tests/test_evaluation/test_metrics/test_coco_metric.py
|
import os.path as osp
import tempfile
from unittest import TestCase
import numpy as np
import pycocotools.mask as mask_util
import torch
from mmengine.fileio import dump
from mmdet.evaluation import CocoMetric
class TestCocoMetric(TestCase):
def _create_dummy_coco_json(self, json_name):
dummy_mask = np.zeros((10, 10), order='F', dtype=np.uint8)
dummy_mask[:5, :5] = 1
rle_mask = mask_util.encode(dummy_mask)
rle_mask['counts'] = rle_mask['counts'].decode('utf-8')
image = {
'id': 0,
'width': 640,
'height': 640,
'file_name': 'fake_name.jpg',
}
annotation_1 = {
'id': 1,
'image_id': 0,
'category_id': 0,
'area': 400,
'bbox': [50, 60, 20, 20],
'iscrowd': 0,
'segmentation': rle_mask,
}
annotation_2 = {
'id': 2,
'image_id': 0,
'category_id': 0,
'area': 900,
'bbox': [100, 120, 30, 30],
'iscrowd': 0,
'segmentation': rle_mask,
}
annotation_3 = {
'id': 3,
'image_id': 0,
'category_id': 1,
'area': 1600,
'bbox': [150, 160, 40, 40],
'iscrowd': 0,
'segmentation': rle_mask,
}
annotation_4 = {
'id': 4,
'image_id': 0,
'category_id': 0,
'area': 10000,
'bbox': [250, 260, 100, 100],
'iscrowd': 0,
'segmentation': rle_mask,
}
categories = [
{
'id': 0,
'name': 'car',
'supercategory': 'car',
},
{
'id': 1,
'name': 'bicycle',
'supercategory': 'bicycle',
},
]
fake_json = {
'images': [image],
'annotations':
[annotation_1, annotation_2, annotation_3, annotation_4],
'categories': categories
}
dump(fake_json, json_name)
def _create_dummy_results(self):
bboxes = np.array([[50, 60, 70, 80], [100, 120, 130, 150],
[150, 160, 190, 200], [250, 260, 350, 360]])
scores = np.array([1.0, 0.98, 0.96, 0.95])
labels = np.array([0, 0, 1, 0])
dummy_mask = np.zeros((4, 10, 10), dtype=np.uint8)
dummy_mask[:, :5, :5] = 1
return dict(
bboxes=torch.from_numpy(bboxes),
scores=torch.from_numpy(scores),
labels=torch.from_numpy(labels),
masks=torch.from_numpy(dummy_mask))
def setUp(self):
self.tmp_dir = tempfile.TemporaryDirectory()
def tearDown(self):
self.tmp_dir.cleanup()
def test_init(self):
fake_json_file = osp.join(self.tmp_dir.name, 'fake_data.json')
self._create_dummy_coco_json(fake_json_file)
with self.assertRaisesRegex(KeyError, 'metric should be one of'):
CocoMetric(ann_file=fake_json_file, metric='unknown')
def test_evaluate(self):
# create dummy data
fake_json_file = osp.join(self.tmp_dir.name, 'fake_data.json')
self._create_dummy_coco_json(fake_json_file)
dummy_pred = self._create_dummy_results()
# test single coco dataset evaluation
coco_metric = CocoMetric(
ann_file=fake_json_file,
classwise=False,
outfile_prefix=f'{self.tmp_dir.name}/test')
coco_metric.dataset_meta = dict(classes=['car', 'bicycle'])
coco_metric.process(
{},
[dict(pred_instances=dummy_pred, img_id=0, ori_shape=(640, 640))])
eval_results = coco_metric.evaluate(size=1)
target = {
'coco/bbox_mAP': 1.0,
'coco/bbox_mAP_50': 1.0,
'coco/bbox_mAP_75': 1.0,
'coco/bbox_mAP_s': 1.0,
'coco/bbox_mAP_m': 1.0,
'coco/bbox_mAP_l': 1.0,
}
self.assertDictEqual(eval_results, target)
self.assertTrue(
osp.isfile(osp.join(self.tmp_dir.name, 'test.bbox.json')))
# test box and segm coco dataset evaluation
coco_metric = CocoMetric(
ann_file=fake_json_file,
metric=['bbox', 'segm'],
classwise=False,
outfile_prefix=f'{self.tmp_dir.name}/test')
coco_metric.dataset_meta = dict(classes=['car', 'bicycle'])
coco_metric.process(
{},
[dict(pred_instances=dummy_pred, img_id=0, ori_shape=(640, 640))])
eval_results = coco_metric.evaluate(size=1)
target = {
'coco/bbox_mAP': 1.0,
'coco/bbox_mAP_50': 1.0,
'coco/bbox_mAP_75': 1.0,
'coco/bbox_mAP_s': 1.0,
'coco/bbox_mAP_m': 1.0,
'coco/bbox_mAP_l': 1.0,
'coco/segm_mAP': 1.0,
'coco/segm_mAP_50': 1.0,
'coco/segm_mAP_75': 1.0,
'coco/segm_mAP_s': 1.0,
'coco/segm_mAP_m': 1.0,
'coco/segm_mAP_l': 1.0,
}
self.assertDictEqual(eval_results, target)
self.assertTrue(
osp.isfile(osp.join(self.tmp_dir.name, 'test.bbox.json')))
self.assertTrue(
osp.isfile(osp.join(self.tmp_dir.name, 'test.segm.json')))
# test invalid custom metric_items
with self.assertRaisesRegex(KeyError,
'metric item "invalid" is not supported'):
coco_metric = CocoMetric(
ann_file=fake_json_file, metric_items=['invalid'])
coco_metric.dataset_meta = dict(classes=['car', 'bicycle'])
coco_metric.process({}, [
dict(
pred_instances=dummy_pred, img_id=0, ori_shape=(640, 640))
])
coco_metric.evaluate(size=1)
# test custom metric_items
coco_metric = CocoMetric(
ann_file=fake_json_file, metric_items=['mAP_m'])
coco_metric.dataset_meta = dict(classes=['car', 'bicycle'])
coco_metric.process(
{},
[dict(pred_instances=dummy_pred, img_id=0, ori_shape=(640, 640))])
eval_results = coco_metric.evaluate(size=1)
target = {
'coco/bbox_mAP_m': 1.0,
}
self.assertDictEqual(eval_results, target)
def test_classwise_evaluate(self):
# create dummy data
fake_json_file = osp.join(self.tmp_dir.name, 'fake_data.json')
self._create_dummy_coco_json(fake_json_file)
dummy_pred = self._create_dummy_results()
# test single coco dataset evaluation
coco_metric = CocoMetric(
ann_file=fake_json_file, metric='bbox', classwise=True)
# coco_metric1 = CocoMetric(
# ann_file=fake_json_file, metric='bbox', classwise=True)
coco_metric.dataset_meta = dict(classes=['car', 'bicycle'])
coco_metric.process(
{},
[dict(pred_instances=dummy_pred, img_id=0, ori_shape=(640, 640))])
eval_results = coco_metric.evaluate(size=1)
target = {
'coco/bbox_mAP': 1.0,
'coco/bbox_mAP_50': 1.0,
'coco/bbox_mAP_75': 1.0,
'coco/bbox_mAP_s': 1.0,
'coco/bbox_mAP_m': 1.0,
'coco/bbox_mAP_l': 1.0,
'coco/car_precision': 1.0,
'coco/bicycle_precision': 1.0,
}
self.assertDictEqual(eval_results, target)
def test_manually_set_iou_thrs(self):
# create dummy data
fake_json_file = osp.join(self.tmp_dir.name, 'fake_data.json')
self._create_dummy_coco_json(fake_json_file)
# test single coco dataset evaluation
coco_metric = CocoMetric(
ann_file=fake_json_file, metric='bbox', iou_thrs=[0.3, 0.6])
coco_metric.dataset_meta = dict(classes=['car', 'bicycle'])
self.assertEqual(coco_metric.iou_thrs, [0.3, 0.6])
def test_fast_eval_recall(self):
# create dummy data
fake_json_file = osp.join(self.tmp_dir.name, 'fake_data.json')
self._create_dummy_coco_json(fake_json_file)
dummy_pred = self._create_dummy_results()
# test default proposal nums
coco_metric = CocoMetric(
ann_file=fake_json_file, metric='proposal_fast')
coco_metric.dataset_meta = dict(classes=['car', 'bicycle'])
coco_metric.process(
{},
[dict(pred_instances=dummy_pred, img_id=0, ori_shape=(640, 640))])
eval_results = coco_metric.evaluate(size=1)
target = {'coco/AR@100': 1.0, 'coco/AR@300': 1.0, 'coco/AR@1000': 1.0}
self.assertDictEqual(eval_results, target)
# test manually set proposal nums
coco_metric = CocoMetric(
ann_file=fake_json_file,
metric='proposal_fast',
proposal_nums=(2, 4))
coco_metric.dataset_meta = dict(classes=['car', 'bicycle'])
coco_metric.process(
{},
[dict(pred_instances=dummy_pred, img_id=0, ori_shape=(640, 640))])
eval_results = coco_metric.evaluate(size=1)
target = {'coco/AR@2': 0.5, 'coco/AR@4': 1.0}
self.assertDictEqual(eval_results, target)
def test_evaluate_proposal(self):
# create dummy data
fake_json_file = osp.join(self.tmp_dir.name, 'fake_data.json')
self._create_dummy_coco_json(fake_json_file)
dummy_pred = self._create_dummy_results()
coco_metric = CocoMetric(ann_file=fake_json_file, metric='proposal')
coco_metric.dataset_meta = dict(classes=['car', 'bicycle'])
coco_metric.process(
{},
[dict(pred_instances=dummy_pred, img_id=0, ori_shape=(640, 640))])
eval_results = coco_metric.evaluate(size=1)
print(eval_results)
target = {
'coco/AR@100': 1,
'coco/AR@300': 1.0,
'coco/AR@1000': 1.0,
'coco/AR_s@1000': 1.0,
'coco/AR_m@1000': 1.0,
'coco/AR_l@1000': 1.0
}
self.assertDictEqual(eval_results, target)
def test_empty_results(self):
# create dummy data
fake_json_file = osp.join(self.tmp_dir.name, 'fake_data.json')
self._create_dummy_coco_json(fake_json_file)
coco_metric = CocoMetric(ann_file=fake_json_file, metric='bbox')
coco_metric.dataset_meta = dict(classes=['car', 'bicycle'])
bboxes = np.zeros((0, 4))
labels = np.array([])
scores = np.array([])
dummy_mask = np.zeros((0, 10, 10), dtype=np.uint8)
empty_pred = dict(
bboxes=torch.from_numpy(bboxes),
scores=torch.from_numpy(scores),
labels=torch.from_numpy(labels),
masks=torch.from_numpy(dummy_mask))
coco_metric.process(
{},
[dict(pred_instances=empty_pred, img_id=0, ori_shape=(640, 640))])
# coco api Index error will be caught
coco_metric.evaluate(size=1)
def test_evaluate_without_json(self):
dummy_pred = self._create_dummy_results()
dummy_mask = np.zeros((10, 10), order='F', dtype=np.uint8)
dummy_mask[:5, :5] = 1
rle_mask = mask_util.encode(dummy_mask)
rle_mask['counts'] = rle_mask['counts'].decode('utf-8')
instances = [{
'bbox_label': 0,
'bbox': [50, 60, 70, 80],
'ignore_flag': 0,
'mask': rle_mask,
}, {
'bbox_label': 0,
'bbox': [100, 120, 130, 150],
'ignore_flag': 0,
'mask': rle_mask,
}, {
'bbox_label': 1,
'bbox': [150, 160, 190, 200],
'ignore_flag': 0,
'mask': rle_mask,
}, {
'bbox_label': 0,
'bbox': [250, 260, 350, 360],
'ignore_flag': 0,
'mask': rle_mask,
}]
coco_metric = CocoMetric(
ann_file=None,
metric=['bbox', 'segm'],
classwise=False,
outfile_prefix=f'{self.tmp_dir.name}/test')
coco_metric.dataset_meta = dict(classes=['car', 'bicycle'])
coco_metric.process({}, [
dict(
pred_instances=dummy_pred,
img_id=0,
ori_shape=(640, 640),
instances=instances)
])
eval_results = coco_metric.evaluate(size=1)
print(eval_results)
target = {
'coco/bbox_mAP': 1.0,
'coco/bbox_mAP_50': 1.0,
'coco/bbox_mAP_75': 1.0,
'coco/bbox_mAP_s': 1.0,
'coco/bbox_mAP_m': 1.0,
'coco/bbox_mAP_l': 1.0,
'coco/segm_mAP': 1.0,
'coco/segm_mAP_50': 1.0,
'coco/segm_mAP_75': 1.0,
'coco/segm_mAP_s': 1.0,
'coco/segm_mAP_m': 1.0,
'coco/segm_mAP_l': 1.0,
}
self.assertDictEqual(eval_results, target)
self.assertTrue(
osp.isfile(osp.join(self.tmp_dir.name, 'test.bbox.json')))
self.assertTrue(
osp.isfile(osp.join(self.tmp_dir.name, 'test.segm.json')))
self.assertTrue(
osp.isfile(osp.join(self.tmp_dir.name, 'test.gt.json')))
def test_format_only(self):
# create dummy data
fake_json_file = osp.join(self.tmp_dir.name, 'fake_data.json')
self._create_dummy_coco_json(fake_json_file)
dummy_pred = self._create_dummy_results()
with self.assertRaises(AssertionError):
CocoMetric(
ann_file=fake_json_file,
classwise=False,
format_only=True,
outfile_prefix=None)
coco_metric = CocoMetric(
ann_file=fake_json_file,
metric='bbox',
classwise=False,
format_only=True,
outfile_prefix=f'{self.tmp_dir.name}/test')
coco_metric.dataset_meta = dict(classes=['car', 'bicycle'])
coco_metric.process(
{},
[dict(pred_instances=dummy_pred, img_id=0, ori_shape=(640, 640))])
eval_results = coco_metric.evaluate(size=1)
self.assertDictEqual(eval_results, dict())
self.assertTrue(osp.exists(f'{self.tmp_dir.name}/test.bbox.json'))
| 14,349 | 34.696517 | 78 |
py
|
ERD
|
ERD-main/tests/test_evaluation/test_metrics/test_cityscapes_metric.py
|
import os
import os.path as osp
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from mmdet.evaluation import CityScapesMetric
try:
import cityscapesscripts
except ImportError:
cityscapesscripts = None
class TestCityScapesMetric(unittest.TestCase):
def setUp(self):
self.tmp_dir = tempfile.TemporaryDirectory()
def tearDown(self):
self.tmp_dir.cleanup()
@unittest.skipIf(cityscapesscripts is None,
'cityscapesscripts is not installed.')
def test_init(self):
# test with outfile_prefix = None
with self.assertRaises(AssertionError):
CityScapesMetric(outfile_prefix=None)
@unittest.skipIf(cityscapesscripts is None,
'cityscapesscripts is not installed.')
def test_evaluate(self):
dummy_mask1 = np.zeros((1, 20, 20), dtype=np.uint8)
dummy_mask1[:, :10, :10] = 1
dummy_mask2 = np.zeros((1, 20, 20), dtype=np.uint8)
dummy_mask2[:, :10, :10] = 1
self.outfile_prefix = osp.join(self.tmp_dir.name, 'test')
self.seg_prefix = osp.join(self.tmp_dir.name, 'cityscapes/gtFine/val')
city = 'lindau'
sequenceNb = '000000'
frameNb = '000019'
img_name1 = f'{city}_{sequenceNb}_{frameNb}_gtFine_instanceIds.png'
img_path1 = osp.join(self.seg_prefix, city, img_name1)
frameNb = '000020'
img_name2 = f'{city}_{sequenceNb}_{frameNb}_gtFine_instanceIds.png'
img_path2 = osp.join(self.seg_prefix, city, img_name2)
os.makedirs(osp.join(self.seg_prefix, city))
masks1 = np.zeros((20, 20), dtype=np.int32)
masks1[:10, :10] = 24 * 1000
Image.fromarray(masks1).save(img_path1)
masks2 = np.zeros((20, 20), dtype=np.int32)
masks2[:10, :10] = 24 * 1000 + 1
Image.fromarray(masks2).save(img_path2)
data_samples = [{
'img_path': img_path1,
'pred_instances': {
'scores': torch.from_numpy(np.array([1.0])),
'labels': torch.from_numpy(np.array([0])),
'masks': torch.from_numpy(dummy_mask1)
}
}, {
'img_path': img_path2,
'pred_instances': {
'scores': torch.from_numpy(np.array([0.98])),
'labels': torch.from_numpy(np.array([1])),
'masks': torch.from_numpy(dummy_mask2)
}
}]
target = {'cityscapes/mAP': 0.5, 'cityscapes/AP@50': 0.5}
metric = CityScapesMetric(
seg_prefix=self.seg_prefix,
format_only=False,
outfile_prefix=self.outfile_prefix)
metric.dataset_meta = dict(
classes=('person', 'rider', 'car', 'truck', 'bus', 'train',
'motorcycle', 'bicycle'))
metric.process({}, data_samples)
results = metric.evaluate(size=2)
self.assertDictEqual(results, target)
del metric
self.assertTrue(not osp.exists('{self.outfile_prefix}.results'))
# test format_only
metric = CityScapesMetric(
seg_prefix=self.seg_prefix,
format_only=True,
outfile_prefix=self.outfile_prefix)
metric.dataset_meta = dict(
classes=('person', 'rider', 'car', 'truck', 'bus', 'train',
'motorcycle', 'bicycle'))
metric.process({}, data_samples)
results = metric.evaluate(size=2)
self.assertDictEqual(results, dict())
| 3,528 | 33.262136 | 78 |
py
|
ERD
|
ERD-main/tests/test_evaluation/test_metrics/test_dump_det_results.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
import tempfile
from unittest import TestCase
import torch
from mmengine.fileio import load
from torch import Tensor
from mmdet.evaluation import DumpDetResults
from mmdet.structures.mask import encode_mask_results
class TestDumpResults(TestCase):
def test_init(self):
with self.assertRaisesRegex(ValueError,
'The output file must be a pkl file.'):
DumpDetResults(out_file_path='./results.json')
def test_process(self):
metric = DumpDetResults(out_file_path='./results.pkl')
data_samples = [dict(data=(Tensor([1, 2, 3]), Tensor([4, 5, 6])))]
metric.process(None, data_samples)
self.assertEqual(len(metric.results), 1)
self.assertEqual(metric.results[0]['data'][0].device,
torch.device('cpu'))
metric = DumpDetResults(out_file_path='./results.pkl')
masks = torch.zeros(10, 10, 4)
data_samples = [
dict(pred_instances=dict(masks=masks), gt_instances=[])
]
metric.process(None, data_samples)
self.assertEqual(len(metric.results), 1)
self.assertEqual(metric.results[0]['pred_instances']['masks'],
encode_mask_results(masks.numpy()))
self.assertNotIn('gt_instances', metric.results[0])
def test_compute_metrics(self):
temp_dir = tempfile.TemporaryDirectory()
path = osp.join(temp_dir.name, 'results.pkl')
metric = DumpDetResults(out_file_path=path)
data_samples = [dict(data=(Tensor([1, 2, 3]), Tensor([4, 5, 6])))]
metric.process(None, data_samples)
metric.compute_metrics(metric.results)
self.assertTrue(osp.isfile(path))
results = load(path)
self.assertEqual(len(results), 1)
self.assertEqual(results[0]['data'][0].device, torch.device('cpu'))
temp_dir.cleanup()
| 1,963 | 35.37037 | 75 |
py
|
ERD
|
ERD-main/tests/test_evaluation/test_metrics/test_openimages_metric.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
import numpy as np
import torch
from mmdet.datasets import OpenImagesDataset
from mmdet.evaluation import OpenImagesMetric
from mmdet.utils import register_all_modules
class TestOpenImagesMetric(unittest.TestCase):
def _create_dummy_results(self):
bboxes = np.array([[23.2172, 31.7541, 987.3413, 357.8443],
[100, 120, 130, 150], [150, 160, 190, 200],
[250, 260, 350, 360]])
scores = np.array([1.0, 0.98, 0.96, 0.95])
labels = np.array([0, 0, 0, 0])
return dict(
bboxes=torch.from_numpy(bboxes),
scores=torch.from_numpy(scores),
labels=torch.from_numpy(labels))
def test_init(self):
# test invalid iou_thrs
with self.assertRaises(AssertionError):
OpenImagesMetric(iou_thrs={'a', 0.5}, ioa_thrs={'b', 0.5})
# test ioa and iou_thrs length not equal
with self.assertRaises(AssertionError):
OpenImagesMetric(iou_thrs=[0.5, 0.75], ioa_thrs=[0.5])
metric = OpenImagesMetric(iou_thrs=0.6)
self.assertEqual(metric.iou_thrs, [0.6])
def test_eval(self):
register_all_modules()
dataset = OpenImagesDataset(
data_root='tests/data/OpenImages/',
ann_file='annotations/oidv6-train-annotations-bbox.csv',
data_prefix=dict(img='OpenImages/train/'),
label_file='annotations/class-descriptions-boxable.csv',
hierarchy_file='annotations/bbox_labels_600_hierarchy.json',
meta_file='annotations/image-metas.pkl',
pipeline=[
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'instances'))
])
dataset.full_init()
data_sample = dataset[0]['data_samples'].to_dict()
data_sample['pred_instances'] = self._create_dummy_results()
metric = OpenImagesMetric()
metric.dataset_meta = dataset.metainfo
metric.process({}, [data_sample])
results = metric.evaluate(size=len(dataset))
targets = {'openimages/AP50': 1.0, 'openimages/mAP': 1.0}
self.assertDictEqual(results, targets)
# test multi-threshold
metric = OpenImagesMetric(iou_thrs=[0.1, 0.5], ioa_thrs=[0.1, 0.5])
metric.dataset_meta = dataset.metainfo
metric.process({}, [data_sample])
results = metric.evaluate(size=len(dataset))
targets = {
'openimages/AP10': 1.0,
'openimages/AP50': 1.0,
'openimages/mAP': 1.0
}
self.assertDictEqual(results, targets)
| 2,768 | 36.931507 | 75 |
py
|
ERD
|
ERD-main/tests/test_evaluation/test_metrics/__init__.py
| 0 | 0 | 0 |
py
|
|
ERD
|
ERD-main/tests/test_evaluation/test_metrics/test_lvis_metric.py
|
import os.path as osp
import tempfile
import unittest
import numpy as np
import pycocotools.mask as mask_util
import torch
from mmdet.evaluation.metrics import LVISMetric
try:
import lvis
except ImportError:
lvis = None
from mmengine.fileio import dump
class TestLVISMetric(unittest.TestCase):
def _create_dummy_lvis_json(self, json_name):
dummy_mask = np.zeros((10, 10), order='F', dtype=np.uint8)
dummy_mask[:5, :5] = 1
rle_mask = mask_util.encode(dummy_mask)
rle_mask['counts'] = rle_mask['counts'].decode('utf-8')
image = {
'id': 0,
'width': 640,
'height': 640,
'neg_category_ids': [],
'not_exhaustive_category_ids': [],
'coco_url': 'http://images.cocodataset.org/val2017/0.jpg',
}
annotation_1 = {
'id': 1,
'image_id': 0,
'category_id': 1,
'area': 400,
'bbox': [50, 60, 20, 20],
'segmentation': rle_mask,
}
annotation_2 = {
'id': 2,
'image_id': 0,
'category_id': 1,
'area': 900,
'bbox': [100, 120, 30, 30],
'segmentation': rle_mask,
}
annotation_3 = {
'id': 3,
'image_id': 0,
'category_id': 2,
'area': 1600,
'bbox': [150, 160, 40, 40],
'segmentation': rle_mask,
}
annotation_4 = {
'id': 4,
'image_id': 0,
'category_id': 1,
'area': 10000,
'bbox': [250, 260, 100, 100],
'segmentation': rle_mask,
}
categories = [
{
'id': 1,
'name': 'aerosol_can',
'frequency': 'c',
'image_count': 64
},
{
'id': 2,
'name': 'air_conditioner',
'frequency': 'f',
'image_count': 364
},
]
fake_json = {
'images': [image],
'annotations':
[annotation_1, annotation_2, annotation_3, annotation_4],
'categories': categories
}
dump(fake_json, json_name)
def _create_dummy_results(self):
bboxes = np.array([[50, 60, 70, 80], [100, 120, 130, 150],
[150, 160, 190, 200], [250, 260, 350, 360]])
scores = np.array([1.0, 0.98, 0.96, 0.95])
labels = np.array([0, 0, 1, 0])
dummy_mask = np.zeros((4, 10, 10), dtype=np.uint8)
dummy_mask[:, :5, :5] = 1
return dict(
bboxes=torch.from_numpy(bboxes),
scores=torch.from_numpy(scores),
labels=torch.from_numpy(labels),
masks=torch.from_numpy(dummy_mask))
def setUp(self):
self.tmp_dir = tempfile.TemporaryDirectory()
def tearDown(self):
self.tmp_dir.cleanup()
@unittest.skipIf(lvis is None, 'lvis is not installed.')
def test_init(self):
fake_json_file = osp.join(self.tmp_dir.name, 'fake_data.json')
self._create_dummy_lvis_json(fake_json_file)
with self.assertRaisesRegex(KeyError, 'metric should be one of'):
LVISMetric(ann_file=fake_json_file, metric='unknown')
@unittest.skipIf(lvis is None, 'lvis is not installed.')
def test_evaluate(self):
# create dummy data
fake_json_file = osp.join(self.tmp_dir.name, 'fake_data.json')
self._create_dummy_lvis_json(fake_json_file)
dummy_pred = self._create_dummy_results()
# test single lvis dataset evaluation
lvis_metric = LVISMetric(
ann_file=fake_json_file,
classwise=False,
outfile_prefix=f'{self.tmp_dir.name}/test')
lvis_metric.dataset_meta = dict(
classes=['aerosol_can', 'air_conditioner'])
lvis_metric.process(
{},
[dict(pred_instances=dummy_pred, img_id=0, ori_shape=(640, 640))])
eval_results = lvis_metric.evaluate(size=1)
target = {
'lvis/bbox_AP': 1.0,
'lvis/bbox_AP50': 1.0,
'lvis/bbox_AP75': 1.0,
'lvis/bbox_APs': 1.0,
'lvis/bbox_APm': 1.0,
'lvis/bbox_APl': 1.0,
'lvis/bbox_APr': -1.0,
'lvis/bbox_APc': 1.0,
'lvis/bbox_APf': 1.0
}
self.assertDictEqual(eval_results, target)
self.assertTrue(
osp.isfile(osp.join(self.tmp_dir.name, 'test.bbox.json')))
# test box and segm lvis dataset evaluation
lvis_metric = LVISMetric(
ann_file=fake_json_file,
metric=['bbox', 'segm'],
classwise=False,
outfile_prefix=f'{self.tmp_dir.name}/test')
lvis_metric.dataset_meta = dict(
classes=['aerosol_can', 'air_conditioner'])
lvis_metric.process(
{},
[dict(pred_instances=dummy_pred, img_id=0, ori_shape=(640, 640))])
eval_results = lvis_metric.evaluate(size=1)
target = {
'lvis/bbox_AP': 1.0,
'lvis/bbox_AP50': 1.0,
'lvis/bbox_AP75': 1.0,
'lvis/bbox_APs': 1.0,
'lvis/bbox_APm': 1.0,
'lvis/bbox_APl': 1.0,
'lvis/bbox_APr': -1.0,
'lvis/bbox_APc': 1.0,
'lvis/bbox_APf': 1.0,
'lvis/segm_AP': 1.0,
'lvis/segm_AP50': 1.0,
'lvis/segm_AP75': 1.0,
'lvis/segm_APs': 1.0,
'lvis/segm_APm': 1.0,
'lvis/segm_APl': 1.0,
'lvis/segm_APr': -1.0,
'lvis/segm_APc': 1.0,
'lvis/segm_APf': 1.0
}
self.assertDictEqual(eval_results, target)
self.assertTrue(
osp.isfile(osp.join(self.tmp_dir.name, 'test.bbox.json')))
self.assertTrue(
osp.isfile(osp.join(self.tmp_dir.name, 'test.segm.json')))
# test invalid custom metric_items
with self.assertRaisesRegex(
KeyError,
"metric should be one of 'bbox', 'segm', 'proposal', "
"'proposal_fast', but got invalid."):
lvis_metric = LVISMetric(
ann_file=fake_json_file, metric=['invalid'])
lvis_metric.evaluate(size=1)
# test custom metric_items
lvis_metric = LVISMetric(ann_file=fake_json_file, metric_items=['APm'])
lvis_metric.dataset_meta = dict(
classes=['aerosol_can', 'air_conditioner'])
lvis_metric.process(
{},
[dict(pred_instances=dummy_pred, img_id=0, ori_shape=(640, 640))])
eval_results = lvis_metric.evaluate(size=1)
target = {
'lvis/bbox_APm': 1.0,
}
self.assertDictEqual(eval_results, target)
@unittest.skipIf(lvis is None, 'lvis is not installed.')
def test_classwise_evaluate(self):
# create dummy data
fake_json_file = osp.join(self.tmp_dir.name, 'fake_data.json')
self._create_dummy_lvis_json(fake_json_file)
dummy_pred = self._create_dummy_results()
# test single lvis dataset evaluation
lvis_metric = LVISMetric(
ann_file=fake_json_file, metric='bbox', classwise=True)
lvis_metric.dataset_meta = dict(
classes=['aerosol_can', 'air_conditioner'])
lvis_metric.process(
{},
[dict(pred_instances=dummy_pred, img_id=0, ori_shape=(640, 640))])
eval_results = lvis_metric.evaluate(size=1)
target = {
'lvis/bbox_AP': 1.0,
'lvis/bbox_AP50': 1.0,
'lvis/bbox_AP75': 1.0,
'lvis/bbox_APs': 1.0,
'lvis/bbox_APm': 1.0,
'lvis/bbox_APl': 1.0,
'lvis/bbox_APr': -1.0,
'lvis/bbox_APc': 1.0,
'lvis/bbox_APf': 1.0,
'lvis/aerosol_can_precision': 1.0,
'lvis/air_conditioner_precision': 1.0,
}
self.assertDictEqual(eval_results, target)
@unittest.skipIf(lvis is None, 'lvis is not installed.')
def test_manually_set_iou_thrs(self):
# create dummy data
fake_json_file = osp.join(self.tmp_dir.name, 'fake_data.json')
self._create_dummy_lvis_json(fake_json_file)
# test single lvis dataset evaluation
lvis_metric = LVISMetric(
ann_file=fake_json_file, metric='bbox', iou_thrs=[0.3, 0.6])
lvis_metric.dataset_meta = dict(
classes=['aerosol_can', 'air_conditioner'])
self.assertEqual(lvis_metric.iou_thrs, [0.3, 0.6])
@unittest.skipIf(lvis is None, 'lvis is not installed.')
def test_fast_eval_recall(self):
# create dummy data
fake_json_file = osp.join(self.tmp_dir.name, 'fake_data.json')
self._create_dummy_lvis_json(fake_json_file)
dummy_pred = self._create_dummy_results()
# test default proposal nums
lvis_metric = LVISMetric(
ann_file=fake_json_file, metric='proposal_fast')
lvis_metric.dataset_meta = dict(
classes=['aerosol_can', 'air_conditioner'])
lvis_metric.process(
{},
[dict(pred_instances=dummy_pred, img_id=0, ori_shape=(640, 640))])
eval_results = lvis_metric.evaluate(size=1)
target = {'lvis/AR@100': 1.0, 'lvis/AR@300': 1.0, 'lvis/AR@1000': 1.0}
self.assertDictEqual(eval_results, target)
# test manually set proposal nums
lvis_metric = LVISMetric(
ann_file=fake_json_file,
metric='proposal_fast',
proposal_nums=(2, 4))
lvis_metric.dataset_meta = dict(
classes=['aerosol_can', 'air_conditioner'])
lvis_metric.process(
{},
[dict(pred_instances=dummy_pred, img_id=0, ori_shape=(640, 640))])
eval_results = lvis_metric.evaluate(size=1)
target = {'lvis/AR@2': 0.5, 'lvis/AR@4': 1.0}
self.assertDictEqual(eval_results, target)
@unittest.skipIf(lvis is None, 'lvis is not installed.')
def test_evaluate_proposal(self):
# create dummy data
fake_json_file = osp.join(self.tmp_dir.name, 'fake_data.json')
self._create_dummy_lvis_json(fake_json_file)
dummy_pred = self._create_dummy_results()
lvis_metric = LVISMetric(ann_file=fake_json_file, metric='proposal')
lvis_metric.dataset_meta = dict(
classes=['aerosol_can', 'air_conditioner'])
lvis_metric.process(
{},
[dict(pred_instances=dummy_pred, img_id=0, ori_shape=(640, 640))])
eval_results = lvis_metric.evaluate(size=1)
target = {
'lvis/AR@300': 1.0,
'lvis/ARs@300': 1.0,
'lvis/ARm@300': 1.0,
'lvis/ARl@300': 1.0
}
self.assertDictEqual(eval_results, target)
@unittest.skipIf(lvis is None, 'lvis is not installed.')
def test_empty_results(self):
# create dummy data
fake_json_file = osp.join(self.tmp_dir.name, 'fake_data.json')
self._create_dummy_lvis_json(fake_json_file)
lvis_metric = LVISMetric(ann_file=fake_json_file, metric='bbox')
lvis_metric.dataset_meta = dict(
classes=['aerosol_can', 'air_conditioner'])
bboxes = np.zeros((0, 4))
labels = np.array([])
scores = np.array([])
dummy_mask = np.zeros((0, 10, 10), dtype=np.uint8)
empty_pred = dict(
bboxes=torch.from_numpy(bboxes),
scores=torch.from_numpy(scores),
labels=torch.from_numpy(labels),
masks=torch.from_numpy(dummy_mask))
lvis_metric.process(
{},
[dict(pred_instances=empty_pred, img_id=0, ori_shape=(640, 640))])
# lvis api Index error will be caught
lvis_metric.evaluate(size=1)
@unittest.skipIf(lvis is None, 'lvis is not installed.')
def test_format_only(self):
# create dummy data
fake_json_file = osp.join(self.tmp_dir.name, 'fake_data.json')
self._create_dummy_lvis_json(fake_json_file)
dummy_pred = self._create_dummy_results()
with self.assertRaises(AssertionError):
LVISMetric(
ann_file=fake_json_file,
classwise=False,
format_only=True,
outfile_prefix=None)
lvis_metric = LVISMetric(
ann_file=fake_json_file,
metric='bbox',
classwise=False,
format_only=True,
outfile_prefix=f'{self.tmp_dir.name}/test')
lvis_metric.dataset_meta = dict(
classes=['aerosol_can', 'air_conditioner'])
lvis_metric.process(
{},
[dict(pred_instances=dummy_pred, img_id=0, ori_shape=(640, 640))])
eval_results = lvis_metric.evaluate(size=1)
self.assertDictEqual(eval_results, dict())
self.assertTrue(osp.exists(f'{self.tmp_dir.name}/test.bbox.json'))
| 13,025 | 34.785714 | 79 |
py
|
ERD
|
ERD-main/tests/test_evaluation/test_metrics/test_coco_occluded_metric.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
from tempfile import TemporaryDirectory
import mmengine
import numpy as np
from mmdet.datasets import CocoDataset
from mmdet.evaluation import CocoOccludedSeparatedMetric
def test_coco_occluded_separated_metric():
ann = [[
'fake1.jpg', 'person', 8, [219.9, 176.12, 11.14, 34.23], {
'size': [480, 640],
'counts': b'nYW31n>2N2FNbA48Kf=?XBDe=m0OM3M4YOPB8_>L4JXao5'
}
]] * 3
dummy_mask = np.zeros((10, 10), dtype=np.uint8)
dummy_mask[:5, :5] = 1
rle = {
'size': [480, 640],
'counts': b'nYW31n>2N2FNbA48Kf=?XBDe=m0OM3M4YOPB8_>L4JXao5'
}
res = [(None,
dict(
img_id=0,
bboxes=np.array([[50, 60, 70, 80]] * 2),
masks=[rle] * 2,
labels=np.array([0, 1], dtype=np.int64),
scores=np.array([0.77, 0.77])))] * 3
tempdir = TemporaryDirectory()
ann_path = osp.join(tempdir.name, 'coco_occluded.pkl')
mmengine.dump(ann, ann_path)
metric = CocoOccludedSeparatedMetric(
ann_file='tests/data/coco_sample.json',
occluded_ann=ann_path,
separated_ann=ann_path,
metric=[])
metric.dataset_meta = CocoDataset.METAINFO
eval_res = metric.compute_metrics(res)
assert isinstance(eval_res, dict)
assert eval_res['occluded_recall'] == 100
assert eval_res['separated_recall'] == 100
| 1,470 | 30.297872 | 71 |
py
|
ERD
|
ERD-main/tests/test_models/test_backbones/test_pvt.py
|
import pytest
import torch
from mmdet.models.backbones.pvt import (PVTEncoderLayer,
PyramidVisionTransformer,
PyramidVisionTransformerV2)
def test_pvt_block():
# test PVT structure and forward
block = PVTEncoderLayer(
embed_dims=64, num_heads=4, feedforward_channels=256)
assert block.ffn.embed_dims == 64
assert block.attn.num_heads == 4
assert block.ffn.feedforward_channels == 256
x = torch.randn(1, 56 * 56, 64)
x_out = block(x, (56, 56))
assert x_out.shape == torch.Size([1, 56 * 56, 64])
def test_pvt():
"""Test PVT backbone."""
with pytest.raises(TypeError):
# Pretrained arg must be str or None.
PyramidVisionTransformer(pretrained=123)
# test pretrained image size
with pytest.raises(AssertionError):
PyramidVisionTransformer(pretrain_img_size=(224, 224, 224))
# Test absolute position embedding
temp = torch.randn((1, 3, 224, 224))
model = PyramidVisionTransformer(
pretrain_img_size=224, use_abs_pos_embed=True)
model.init_weights()
model(temp)
# Test normal inference
temp = torch.randn((1, 3, 32, 32))
model = PyramidVisionTransformer()
outs = model(temp)
assert outs[0].shape == (1, 64, 8, 8)
assert outs[1].shape == (1, 128, 4, 4)
assert outs[2].shape == (1, 320, 2, 2)
assert outs[3].shape == (1, 512, 1, 1)
# Test abnormal inference size
temp = torch.randn((1, 3, 33, 33))
model = PyramidVisionTransformer()
outs = model(temp)
assert outs[0].shape == (1, 64, 8, 8)
assert outs[1].shape == (1, 128, 4, 4)
assert outs[2].shape == (1, 320, 2, 2)
assert outs[3].shape == (1, 512, 1, 1)
# Test abnormal inference size
temp = torch.randn((1, 3, 112, 137))
model = PyramidVisionTransformer()
outs = model(temp)
assert outs[0].shape == (1, 64, 28, 34)
assert outs[1].shape == (1, 128, 14, 17)
assert outs[2].shape == (1, 320, 7, 8)
assert outs[3].shape == (1, 512, 3, 4)
def test_pvtv2():
"""Test PVTv2 backbone."""
with pytest.raises(TypeError):
# Pretrained arg must be str or None.
PyramidVisionTransformerV2(pretrained=123)
# test pretrained image size
with pytest.raises(AssertionError):
PyramidVisionTransformerV2(pretrain_img_size=(224, 224, 224))
# Test normal inference
temp = torch.randn((1, 3, 32, 32))
model = PyramidVisionTransformerV2()
outs = model(temp)
assert outs[0].shape == (1, 64, 8, 8)
assert outs[1].shape == (1, 128, 4, 4)
assert outs[2].shape == (1, 320, 2, 2)
assert outs[3].shape == (1, 512, 1, 1)
# Test abnormal inference size
temp = torch.randn((1, 3, 31, 31))
model = PyramidVisionTransformerV2()
outs = model(temp)
assert outs[0].shape == (1, 64, 8, 8)
assert outs[1].shape == (1, 128, 4, 4)
assert outs[2].shape == (1, 320, 2, 2)
assert outs[3].shape == (1, 512, 1, 1)
# Test abnormal inference size
temp = torch.randn((1, 3, 112, 137))
model = PyramidVisionTransformerV2()
outs = model(temp)
assert outs[0].shape == (1, 64, 28, 35)
assert outs[1].shape == (1, 128, 14, 18)
assert outs[2].shape == (1, 320, 7, 9)
assert outs[3].shape == (1, 512, 4, 5)
| 3,332 | 31.048077 | 69 |
py
|
ERD
|
ERD-main/tests/test_models/test_backbones/test_hourglass.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmdet.models.backbones.hourglass import HourglassNet
def test_hourglass_backbone():
with pytest.raises(AssertionError):
# HourglassNet's num_stacks should larger than 0
HourglassNet(num_stacks=0)
with pytest.raises(AssertionError):
# len(stage_channels) should equal len(stage_blocks)
HourglassNet(
stage_channels=[256, 256, 384, 384, 384],
stage_blocks=[2, 2, 2, 2, 2, 4])
with pytest.raises(AssertionError):
# len(stage_channels) should lagrer than downsample_times
HourglassNet(
downsample_times=5,
stage_channels=[256, 256, 384, 384, 384],
stage_blocks=[2, 2, 2, 2, 2])
# Test HourglassNet-52
model = HourglassNet(
num_stacks=1,
stage_channels=(64, 64, 96, 96, 96, 128),
feat_channel=64)
model.train()
imgs = torch.randn(1, 3, 256, 256)
feat = model(imgs)
assert len(feat) == 1
assert feat[0].shape == torch.Size([1, 64, 64, 64])
# Test HourglassNet-104
model = HourglassNet(
num_stacks=2,
stage_channels=(64, 64, 96, 96, 96, 128),
feat_channel=64)
model.train()
imgs = torch.randn(1, 3, 256, 256)
feat = model(imgs)
assert len(feat) == 2
assert feat[0].shape == torch.Size([1, 64, 64, 64])
assert feat[1].shape == torch.Size([1, 64, 64, 64])
| 1,464 | 28.3 | 65 |
py
|
ERD
|
ERD-main/tests/test_models/test_backbones/test_res2net.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmdet.models.backbones import Res2Net
from mmdet.models.backbones.res2net import Bottle2neck
from .utils import is_block
def test_res2net_bottle2neck():
with pytest.raises(AssertionError):
# Style must be in ['pytorch', 'caffe']
Bottle2neck(64, 64, base_width=26, scales=4, style='tensorflow')
with pytest.raises(AssertionError):
# Scale must be larger than 1
Bottle2neck(64, 64, base_width=26, scales=1, style='pytorch')
# Test Res2Net Bottle2neck structure
block = Bottle2neck(
64, 64, base_width=26, stride=2, scales=4, style='pytorch')
assert block.scales == 4
# Test Res2Net Bottle2neck with DCN
dcn = dict(type='DCN', deform_groups=1, fallback_on_stride=False)
with pytest.raises(AssertionError):
# conv_cfg must be None if dcn is not None
Bottle2neck(
64,
64,
base_width=26,
scales=4,
dcn=dcn,
conv_cfg=dict(type='Conv'))
Bottle2neck(64, 64, dcn=dcn)
# Test Res2Net Bottle2neck forward
block = Bottle2neck(64, 16, base_width=26, scales=4)
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size([1, 64, 56, 56])
def test_res2net_backbone():
with pytest.raises(KeyError):
# Res2Net depth should be in [50, 101, 152]
Res2Net(depth=18)
# Test Res2Net with scales 4, base_width 26
model = Res2Net(depth=50, scales=4, base_width=26)
for m in model.modules():
if is_block(m):
assert m.scales == 4
model.train()
imgs = torch.randn(1, 3, 32, 32)
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size([1, 256, 8, 8])
assert feat[1].shape == torch.Size([1, 512, 4, 4])
assert feat[2].shape == torch.Size([1, 1024, 2, 2])
assert feat[3].shape == torch.Size([1, 2048, 1, 1])
| 1,976 | 30.380952 | 72 |
py
|
ERD
|
ERD-main/tests/test_models/test_backbones/test_swin.py
|
import pytest
import torch
from mmdet.models.backbones.swin import SwinBlock, SwinTransformer
def test_swin_block():
# test SwinBlock structure and forward
block = SwinBlock(embed_dims=64, num_heads=4, feedforward_channels=256)
assert block.ffn.embed_dims == 64
assert block.attn.w_msa.num_heads == 4
assert block.ffn.feedforward_channels == 256
x = torch.randn(1, 56 * 56, 64)
x_out = block(x, (56, 56))
assert x_out.shape == torch.Size([1, 56 * 56, 64])
# Test BasicBlock with checkpoint forward
block = SwinBlock(
embed_dims=64, num_heads=4, feedforward_channels=256, with_cp=True)
assert block.with_cp
x = torch.randn(1, 56 * 56, 64)
x_out = block(x, (56, 56))
assert x_out.shape == torch.Size([1, 56 * 56, 64])
def test_swin_transformer():
"""Test Swin Transformer backbone."""
with pytest.raises(TypeError):
# Pretrained arg must be str or None.
SwinTransformer(pretrained=123)
with pytest.raises(AssertionError):
# Because swin uses non-overlapping patch embed, so the stride of patch
# embed must be equal to patch size.
SwinTransformer(strides=(2, 2, 2, 2), patch_size=4)
# test pretrained image size
with pytest.raises(AssertionError):
SwinTransformer(pretrain_img_size=(224, 224, 224))
# Test absolute position embedding
temp = torch.randn((1, 3, 224, 224))
model = SwinTransformer(pretrain_img_size=224, use_abs_pos_embed=True)
model.init_weights()
model(temp)
# Test patch norm
model = SwinTransformer(patch_norm=False)
model(temp)
# Test normal inference
temp = torch.randn((1, 3, 32, 32))
model = SwinTransformer()
outs = model(temp)
assert outs[0].shape == (1, 96, 8, 8)
assert outs[1].shape == (1, 192, 4, 4)
assert outs[2].shape == (1, 384, 2, 2)
assert outs[3].shape == (1, 768, 1, 1)
# Test abnormal inference size
temp = torch.randn((1, 3, 31, 31))
model = SwinTransformer()
outs = model(temp)
assert outs[0].shape == (1, 96, 8, 8)
assert outs[1].shape == (1, 192, 4, 4)
assert outs[2].shape == (1, 384, 2, 2)
assert outs[3].shape == (1, 768, 1, 1)
# Test abnormal inference size
temp = torch.randn((1, 3, 112, 137))
model = SwinTransformer()
outs = model(temp)
assert outs[0].shape == (1, 96, 28, 35)
assert outs[1].shape == (1, 192, 14, 18)
assert outs[2].shape == (1, 384, 7, 9)
assert outs[3].shape == (1, 768, 4, 5)
model = SwinTransformer(frozen_stages=4)
model.train()
for p in model.parameters():
assert not p.requires_grad
| 2,648 | 30.915663 | 79 |
py
|
ERD
|
ERD-main/tests/test_models/test_backbones/test_efficientnet.py
|
import pytest
import torch
from mmdet.models.backbones import EfficientNet
def test_efficientnet_backbone():
"""Test EfficientNet backbone."""
with pytest.raises(AssertionError):
# EfficientNet arch should be a key in EfficientNet.arch_settings
EfficientNet(arch='c3')
model = EfficientNet(arch='b0', out_indices=(0, 1, 2, 3, 4, 5, 6))
model.train()
imgs = torch.randn(2, 3, 32, 32)
feat = model(imgs)
assert len(feat) == 7
assert feat[0].shape == torch.Size([2, 32, 16, 16])
assert feat[1].shape == torch.Size([2, 16, 16, 16])
assert feat[2].shape == torch.Size([2, 24, 8, 8])
assert feat[3].shape == torch.Size([2, 40, 4, 4])
assert feat[4].shape == torch.Size([2, 112, 2, 2])
assert feat[5].shape == torch.Size([2, 320, 1, 1])
assert feat[6].shape == torch.Size([2, 1280, 1, 1])
| 859 | 32.076923 | 73 |
py
|
ERD
|
ERD-main/tests/test_models/test_backbones/test_resnet.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmcv.ops import DeformConv2dPack
from torch.nn.modules import AvgPool2d, GroupNorm
from torch.nn.modules.batchnorm import _BatchNorm
from mmdet.models.backbones import ResNet, ResNetV1d
from mmdet.models.backbones.resnet import BasicBlock, Bottleneck
from mmdet.models.layers import ResLayer, SimplifiedBasicBlock
from .utils import check_norm_state, is_block, is_norm
def assert_params_all_zeros(module) -> bool:
"""Check if the parameters of the module is all zeros.
Args:
module (nn.Module): The module to be checked.
Returns:
bool: Whether the parameters of the module is all zeros.
"""
weight_data = module.weight.data
is_weight_zero = weight_data.allclose(
weight_data.new_zeros(weight_data.size()))
if hasattr(module, 'bias') and module.bias is not None:
bias_data = module.bias.data
is_bias_zero = bias_data.allclose(
bias_data.new_zeros(bias_data.size()))
else:
is_bias_zero = True
return is_weight_zero and is_bias_zero
def test_resnet_basic_block():
with pytest.raises(AssertionError):
# Not implemented yet.
dcn = dict(type='DCN', deform_groups=1, fallback_on_stride=False)
BasicBlock(64, 64, dcn=dcn)
with pytest.raises(AssertionError):
# Not implemented yet.
plugins = [
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16),
position='after_conv3')
]
BasicBlock(64, 64, plugins=plugins)
with pytest.raises(AssertionError):
# Not implemented yet
plugins = [
dict(
cfg=dict(
type='GeneralizedAttention',
spatial_range=-1,
num_heads=8,
attention_type='0010',
kv_stride=2),
position='after_conv2')
]
BasicBlock(64, 64, plugins=plugins)
# test BasicBlock structure and forward
block = BasicBlock(64, 64)
assert block.conv1.in_channels == 64
assert block.conv1.out_channels == 64
assert block.conv1.kernel_size == (3, 3)
assert block.conv2.in_channels == 64
assert block.conv2.out_channels == 64
assert block.conv2.kernel_size == (3, 3)
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size([1, 64, 56, 56])
# Test BasicBlock with checkpoint forward
block = BasicBlock(64, 64, with_cp=True)
assert block.with_cp
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size([1, 64, 56, 56])
def test_resnet_bottleneck():
with pytest.raises(AssertionError):
# Style must be in ['pytorch', 'caffe']
Bottleneck(64, 64, style='tensorflow')
with pytest.raises(AssertionError):
# Allowed positions are 'after_conv1', 'after_conv2', 'after_conv3'
plugins = [
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16),
position='after_conv4')
]
Bottleneck(64, 16, plugins=plugins)
with pytest.raises(AssertionError):
# Need to specify different postfix to avoid duplicate plugin name
plugins = [
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16),
position='after_conv3'),
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16),
position='after_conv3')
]
Bottleneck(64, 16, plugins=plugins)
with pytest.raises(KeyError):
# Plugin type is not supported
plugins = [dict(cfg=dict(type='WrongPlugin'), position='after_conv3')]
Bottleneck(64, 16, plugins=plugins)
# Test Bottleneck with checkpoint forward
block = Bottleneck(64, 16, with_cp=True)
assert block.with_cp
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size([1, 64, 56, 56])
# Test Bottleneck style
block = Bottleneck(64, 64, stride=2, style='pytorch')
assert block.conv1.stride == (1, 1)
assert block.conv2.stride == (2, 2)
block = Bottleneck(64, 64, stride=2, style='caffe')
assert block.conv1.stride == (2, 2)
assert block.conv2.stride == (1, 1)
# Test Bottleneck DCN
dcn = dict(type='DCN', deform_groups=1, fallback_on_stride=False)
with pytest.raises(AssertionError):
Bottleneck(64, 64, dcn=dcn, conv_cfg=dict(type='Conv'))
block = Bottleneck(64, 64, dcn=dcn)
assert isinstance(block.conv2, DeformConv2dPack)
# Test Bottleneck forward
block = Bottleneck(64, 16)
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size([1, 64, 56, 56])
# Test Bottleneck with 1 ContextBlock after conv3
plugins = [
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16),
position='after_conv3')
]
block = Bottleneck(64, 16, plugins=plugins)
assert block.context_block.in_channels == 64
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size([1, 64, 56, 56])
# Test Bottleneck with 1 GeneralizedAttention after conv2
plugins = [
dict(
cfg=dict(
type='GeneralizedAttention',
spatial_range=-1,
num_heads=8,
attention_type='0010',
kv_stride=2),
position='after_conv2')
]
block = Bottleneck(64, 16, plugins=plugins)
assert block.gen_attention_block.in_channels == 16
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size([1, 64, 56, 56])
# Test Bottleneck with 1 GeneralizedAttention after conv2, 1 NonLocal2D
# after conv2, 1 ContextBlock after conv3
plugins = [
dict(
cfg=dict(
type='GeneralizedAttention',
spatial_range=-1,
num_heads=8,
attention_type='0010',
kv_stride=2),
position='after_conv2'),
dict(cfg=dict(type='NonLocal2d'), position='after_conv2'),
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16),
position='after_conv3')
]
block = Bottleneck(64, 16, plugins=plugins)
assert block.gen_attention_block.in_channels == 16
assert block.nonlocal_block.in_channels == 16
assert block.context_block.in_channels == 64
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size([1, 64, 56, 56])
# Test Bottleneck with 1 ContextBlock after conv2, 2 ContextBlock after
# conv3
plugins = [
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16, postfix=1),
position='after_conv2'),
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16, postfix=2),
position='after_conv3'),
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16, postfix=3),
position='after_conv3')
]
block = Bottleneck(64, 16, plugins=plugins)
assert block.context_block1.in_channels == 16
assert block.context_block2.in_channels == 64
assert block.context_block3.in_channels == 64
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size([1, 64, 56, 56])
def test_simplied_basic_block():
with pytest.raises(AssertionError):
# Not implemented yet.
dcn = dict(type='DCN', deform_groups=1, fallback_on_stride=False)
SimplifiedBasicBlock(64, 64, dcn=dcn)
with pytest.raises(AssertionError):
# Not implemented yet.
plugins = [
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16),
position='after_conv3')
]
SimplifiedBasicBlock(64, 64, plugins=plugins)
with pytest.raises(AssertionError):
# Not implemented yet
plugins = [
dict(
cfg=dict(
type='GeneralizedAttention',
spatial_range=-1,
num_heads=8,
attention_type='0010',
kv_stride=2),
position='after_conv2')
]
SimplifiedBasicBlock(64, 64, plugins=plugins)
with pytest.raises(AssertionError):
# Not implemented yet
SimplifiedBasicBlock(64, 64, with_cp=True)
# test SimplifiedBasicBlock structure and forward
block = SimplifiedBasicBlock(64, 64)
assert block.conv1.in_channels == 64
assert block.conv1.out_channels == 64
assert block.conv1.kernel_size == (3, 3)
assert block.conv2.in_channels == 64
assert block.conv2.out_channels == 64
assert block.conv2.kernel_size == (3, 3)
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size([1, 64, 56, 56])
# test SimplifiedBasicBlock without norm
block = SimplifiedBasicBlock(64, 64, norm_cfg=None)
assert block.norm1 is None
assert block.norm2 is None
x_out = block(x)
assert x_out.shape == torch.Size([1, 64, 56, 56])
def test_resnet_res_layer():
# Test ResLayer of 3 Bottleneck w\o downsample
layer = ResLayer(Bottleneck, 64, 16, 3)
assert len(layer) == 3
assert layer[0].conv1.in_channels == 64
assert layer[0].conv1.out_channels == 16
for i in range(1, len(layer)):
assert layer[i].conv1.in_channels == 64
assert layer[i].conv1.out_channels == 16
for i in range(len(layer)):
assert layer[i].downsample is None
x = torch.randn(1, 64, 56, 56)
x_out = layer(x)
assert x_out.shape == torch.Size([1, 64, 56, 56])
# Test ResLayer of 3 Bottleneck with downsample
layer = ResLayer(Bottleneck, 64, 64, 3)
assert layer[0].downsample[0].out_channels == 256
for i in range(1, len(layer)):
assert layer[i].downsample is None
x = torch.randn(1, 64, 56, 56)
x_out = layer(x)
assert x_out.shape == torch.Size([1, 256, 56, 56])
# Test ResLayer of 3 Bottleneck with stride=2
layer = ResLayer(Bottleneck, 64, 64, 3, stride=2)
assert layer[0].downsample[0].out_channels == 256
assert layer[0].downsample[0].stride == (2, 2)
for i in range(1, len(layer)):
assert layer[i].downsample is None
x = torch.randn(1, 64, 56, 56)
x_out = layer(x)
assert x_out.shape == torch.Size([1, 256, 28, 28])
# Test ResLayer of 3 Bottleneck with stride=2 and average downsample
layer = ResLayer(Bottleneck, 64, 64, 3, stride=2, avg_down=True)
assert isinstance(layer[0].downsample[0], AvgPool2d)
assert layer[0].downsample[1].out_channels == 256
assert layer[0].downsample[1].stride == (1, 1)
for i in range(1, len(layer)):
assert layer[i].downsample is None
x = torch.randn(1, 64, 56, 56)
x_out = layer(x)
assert x_out.shape == torch.Size([1, 256, 28, 28])
# Test ResLayer of 3 BasicBlock with stride=2 and downsample_first=False
layer = ResLayer(BasicBlock, 64, 64, 3, stride=2, downsample_first=False)
assert layer[2].downsample[0].out_channels == 64
assert layer[2].downsample[0].stride == (2, 2)
for i in range(len(layer) - 1):
assert layer[i].downsample is None
x = torch.randn(1, 64, 56, 56)
x_out = layer(x)
assert x_out.shape == torch.Size([1, 64, 28, 28])
def test_resnest_stem():
# Test default stem_channels
model = ResNet(50)
assert model.stem_channels == 64
assert model.conv1.out_channels == 64
assert model.norm1.num_features == 64
# Test default stem_channels, with base_channels=3
model = ResNet(50, base_channels=3)
assert model.stem_channels == 3
assert model.conv1.out_channels == 3
assert model.norm1.num_features == 3
assert model.layer1[0].conv1.in_channels == 3
# Test stem_channels=3
model = ResNet(50, stem_channels=3)
assert model.stem_channels == 3
assert model.conv1.out_channels == 3
assert model.norm1.num_features == 3
assert model.layer1[0].conv1.in_channels == 3
# Test stem_channels=3, with base_channels=2
model = ResNet(50, stem_channels=3, base_channels=2)
assert model.stem_channels == 3
assert model.conv1.out_channels == 3
assert model.norm1.num_features == 3
assert model.layer1[0].conv1.in_channels == 3
# Test V1d stem_channels
model = ResNetV1d(depth=50, stem_channels=6)
model.train()
assert model.stem[0].out_channels == 3
assert model.stem[1].num_features == 3
assert model.stem[3].out_channels == 3
assert model.stem[4].num_features == 3
assert model.stem[6].out_channels == 6
assert model.stem[7].num_features == 6
assert model.layer1[0].conv1.in_channels == 6
def test_resnet_backbone():
"""Test resnet backbone."""
with pytest.raises(KeyError):
# ResNet depth should be in [18, 34, 50, 101, 152]
ResNet(20)
with pytest.raises(AssertionError):
# In ResNet: 1 <= num_stages <= 4
ResNet(50, num_stages=0)
with pytest.raises(AssertionError):
# len(stage_with_dcn) == num_stages
dcn = dict(type='DCN', deform_groups=1, fallback_on_stride=False)
ResNet(50, dcn=dcn, stage_with_dcn=(True, ))
with pytest.raises(AssertionError):
# len(stage_with_plugin) == num_stages
plugins = [
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16),
stages=(False, True, True),
position='after_conv3')
]
ResNet(50, plugins=plugins)
with pytest.raises(AssertionError):
# In ResNet: 1 <= num_stages <= 4
ResNet(50, num_stages=5)
with pytest.raises(AssertionError):
# len(strides) == len(dilations) == num_stages
ResNet(50, strides=(1, ), dilations=(1, 1), num_stages=3)
with pytest.raises(TypeError):
# pretrained must be a string path
model = ResNet(50, pretrained=0)
with pytest.raises(AssertionError):
# Style must be in ['pytorch', 'caffe']
ResNet(50, style='tensorflow')
# Test ResNet50 norm_eval=True
model = ResNet(50, norm_eval=True, base_channels=1)
model.train()
assert check_norm_state(model.modules(), False)
# Test ResNet50 with torchvision pretrained weight
model = ResNet(
depth=50, norm_eval=True, pretrained='torchvision://resnet50')
model.train()
assert check_norm_state(model.modules(), False)
# Test ResNet50 with first stage frozen
frozen_stages = 1
model = ResNet(50, frozen_stages=frozen_stages, base_channels=1)
model.train()
assert model.norm1.training is False
for layer in [model.conv1, model.norm1]:
for param in layer.parameters():
assert param.requires_grad is False
for i in range(1, frozen_stages + 1):
layer = getattr(model, f'layer{i}')
for mod in layer.modules():
if isinstance(mod, _BatchNorm):
assert mod.training is False
for param in layer.parameters():
assert param.requires_grad is False
# Test ResNet50V1d with first stage frozen
model = ResNetV1d(depth=50, frozen_stages=frozen_stages, base_channels=2)
assert len(model.stem) == 9
model.train()
assert check_norm_state(model.stem, False)
for param in model.stem.parameters():
assert param.requires_grad is False
for i in range(1, frozen_stages + 1):
layer = getattr(model, f'layer{i}')
for mod in layer.modules():
if isinstance(mod, _BatchNorm):
assert mod.training is False
for param in layer.parameters():
assert param.requires_grad is False
# Test ResNet18 forward
model = ResNet(18)
model.train()
imgs = torch.randn(1, 3, 32, 32)
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size([1, 64, 8, 8])
assert feat[1].shape == torch.Size([1, 128, 4, 4])
assert feat[2].shape == torch.Size([1, 256, 2, 2])
assert feat[3].shape == torch.Size([1, 512, 1, 1])
# Test ResNet18 with checkpoint forward
model = ResNet(18, with_cp=True)
for m in model.modules():
if is_block(m):
assert m.with_cp
# Test ResNet50 with BatchNorm forward
model = ResNet(50, base_channels=1)
for m in model.modules():
if is_norm(m):
assert isinstance(m, _BatchNorm)
model.train()
imgs = torch.randn(1, 3, 32, 32)
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size([1, 4, 8, 8])
assert feat[1].shape == torch.Size([1, 8, 4, 4])
assert feat[2].shape == torch.Size([1, 16, 2, 2])
assert feat[3].shape == torch.Size([1, 32, 1, 1])
# Test ResNet50 with layers 1, 2, 3 out forward
model = ResNet(50, out_indices=(0, 1, 2), base_channels=1)
model.train()
imgs = torch.randn(1, 3, 32, 32)
feat = model(imgs)
assert len(feat) == 3
assert feat[0].shape == torch.Size([1, 4, 8, 8])
assert feat[1].shape == torch.Size([1, 8, 4, 4])
assert feat[2].shape == torch.Size([1, 16, 2, 2])
# Test ResNet50 with checkpoint forward
model = ResNet(50, with_cp=True, base_channels=1)
for m in model.modules():
if is_block(m):
assert m.with_cp
model.train()
imgs = torch.randn(1, 3, 32, 32)
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size([1, 4, 8, 8])
assert feat[1].shape == torch.Size([1, 8, 4, 4])
assert feat[2].shape == torch.Size([1, 16, 2, 2])
assert feat[3].shape == torch.Size([1, 32, 1, 1])
# Test ResNet50 with GroupNorm forward
model = ResNet(
50,
base_channels=4,
norm_cfg=dict(type='GN', num_groups=2, requires_grad=True))
for m in model.modules():
if is_norm(m):
assert isinstance(m, GroupNorm)
model.train()
imgs = torch.randn(1, 3, 32, 32)
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size([1, 16, 8, 8])
assert feat[1].shape == torch.Size([1, 32, 4, 4])
assert feat[2].shape == torch.Size([1, 64, 2, 2])
assert feat[3].shape == torch.Size([1, 128, 1, 1])
# Test ResNet50 with 1 GeneralizedAttention after conv2, 1 NonLocal2D
# after conv2, 1 ContextBlock after conv3 in layers 2, 3, 4
plugins = [
dict(
cfg=dict(
type='GeneralizedAttention',
spatial_range=-1,
num_heads=8,
attention_type='0010',
kv_stride=2),
stages=(False, True, True, True),
position='after_conv2'),
dict(cfg=dict(type='NonLocal2d'), position='after_conv2'),
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16),
stages=(False, True, True, False),
position='after_conv3')
]
model = ResNet(50, plugins=plugins, base_channels=8)
for m in model.layer1.modules():
if is_block(m):
assert not hasattr(m, 'context_block')
assert not hasattr(m, 'gen_attention_block')
assert m.nonlocal_block.in_channels == 8
for m in model.layer2.modules():
if is_block(m):
assert m.nonlocal_block.in_channels == 16
assert m.gen_attention_block.in_channels == 16
assert m.context_block.in_channels == 64
for m in model.layer3.modules():
if is_block(m):
assert m.nonlocal_block.in_channels == 32
assert m.gen_attention_block.in_channels == 32
assert m.context_block.in_channels == 128
for m in model.layer4.modules():
if is_block(m):
assert m.nonlocal_block.in_channels == 64
assert m.gen_attention_block.in_channels == 64
assert not hasattr(m, 'context_block')
model.train()
imgs = torch.randn(1, 3, 32, 32)
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size([1, 32, 8, 8])
assert feat[1].shape == torch.Size([1, 64, 4, 4])
assert feat[2].shape == torch.Size([1, 128, 2, 2])
assert feat[3].shape == torch.Size([1, 256, 1, 1])
# Test ResNet50 with 1 ContextBlock after conv2, 1 ContextBlock after
# conv3 in layers 2, 3, 4
plugins = [
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16, postfix=1),
stages=(False, True, True, False),
position='after_conv3'),
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16, postfix=2),
stages=(False, True, True, False),
position='after_conv3')
]
model = ResNet(50, plugins=plugins, base_channels=8)
for m in model.layer1.modules():
if is_block(m):
assert not hasattr(m, 'context_block')
assert not hasattr(m, 'context_block1')
assert not hasattr(m, 'context_block2')
for m in model.layer2.modules():
if is_block(m):
assert not hasattr(m, 'context_block')
assert m.context_block1.in_channels == 64
assert m.context_block2.in_channels == 64
for m in model.layer3.modules():
if is_block(m):
assert not hasattr(m, 'context_block')
assert m.context_block1.in_channels == 128
assert m.context_block2.in_channels == 128
for m in model.layer4.modules():
if is_block(m):
assert not hasattr(m, 'context_block')
assert not hasattr(m, 'context_block1')
assert not hasattr(m, 'context_block2')
model.train()
imgs = torch.randn(1, 3, 32, 32)
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size([1, 32, 8, 8])
assert feat[1].shape == torch.Size([1, 64, 4, 4])
assert feat[2].shape == torch.Size([1, 128, 2, 2])
assert feat[3].shape == torch.Size([1, 256, 1, 1])
# Test ResNet50 zero initialization of residual
model = ResNet(50, zero_init_residual=True, base_channels=1)
model.init_weights()
for m in model.modules():
if isinstance(m, Bottleneck):
assert assert_params_all_zeros(m.norm3)
elif isinstance(m, BasicBlock):
assert assert_params_all_zeros(m.norm2)
model.train()
imgs = torch.randn(1, 3, 32, 32)
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size([1, 4, 8, 8])
assert feat[1].shape == torch.Size([1, 8, 4, 4])
assert feat[2].shape == torch.Size([1, 16, 2, 2])
assert feat[3].shape == torch.Size([1, 32, 1, 1])
# Test ResNetV1d forward
model = ResNetV1d(depth=50, base_channels=2)
model.train()
imgs = torch.randn(1, 3, 32, 32)
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size([1, 8, 8, 8])
assert feat[1].shape == torch.Size([1, 16, 4, 4])
assert feat[2].shape == torch.Size([1, 32, 2, 2])
assert feat[3].shape == torch.Size([1, 64, 1, 1])
| 23,003 | 34.120611 | 78 |
py
|
ERD
|
ERD-main/tests/test_models/test_backbones/utils.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from torch.nn.modules import GroupNorm
from torch.nn.modules.batchnorm import _BatchNorm
from mmdet.models.backbones.res2net import Bottle2neck
from mmdet.models.backbones.resnet import BasicBlock, Bottleneck
from mmdet.models.backbones.resnext import Bottleneck as BottleneckX
from mmdet.models.layers import SimplifiedBasicBlock
def is_block(modules):
"""Check if is ResNet building block."""
if isinstance(modules, (BasicBlock, Bottleneck, BottleneckX, Bottle2neck,
SimplifiedBasicBlock)):
return True
return False
def is_norm(modules):
"""Check if is one of the norms."""
if isinstance(modules, (GroupNorm, _BatchNorm)):
return True
return False
def check_norm_state(modules, train_state):
"""Check if norm layer is in correct train state."""
for mod in modules:
if isinstance(mod, _BatchNorm):
if mod.training != train_state:
return False
return True
| 1,027 | 30.151515 | 77 |
py
|
ERD
|
ERD-main/tests/test_models/test_backbones/test_mobilenet_v2.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from torch.nn.modules import GroupNorm
from torch.nn.modules.batchnorm import _BatchNorm
from mmdet.models.backbones.mobilenet_v2 import MobileNetV2
from .utils import check_norm_state, is_block, is_norm
def test_mobilenetv2_backbone():
with pytest.raises(ValueError):
# frozen_stages must in range(-1, 8)
MobileNetV2(frozen_stages=8)
with pytest.raises(ValueError):
# out_indices in range(-1, 8)
MobileNetV2(out_indices=[8])
# Test MobileNetV2 with first stage frozen
frozen_stages = 1
model = MobileNetV2(frozen_stages=frozen_stages)
model.train()
for mod in model.conv1.modules():
for param in mod.parameters():
assert param.requires_grad is False
for i in range(1, frozen_stages + 1):
layer = getattr(model, f'layer{i}')
for mod in layer.modules():
if isinstance(mod, _BatchNorm):
assert mod.training is False
for param in layer.parameters():
assert param.requires_grad is False
# Test MobileNetV2 with norm_eval=True
model = MobileNetV2(norm_eval=True)
model.train()
assert check_norm_state(model.modules(), False)
# Test MobileNetV2 forward with widen_factor=1.0
model = MobileNetV2(widen_factor=1.0, out_indices=range(0, 8))
model.train()
assert check_norm_state(model.modules(), True)
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert len(feat) == 8
assert feat[0].shape == torch.Size((1, 16, 112, 112))
assert feat[1].shape == torch.Size((1, 24, 56, 56))
assert feat[2].shape == torch.Size((1, 32, 28, 28))
assert feat[3].shape == torch.Size((1, 64, 14, 14))
assert feat[4].shape == torch.Size((1, 96, 14, 14))
assert feat[5].shape == torch.Size((1, 160, 7, 7))
assert feat[6].shape == torch.Size((1, 320, 7, 7))
assert feat[7].shape == torch.Size((1, 1280, 7, 7))
# Test MobileNetV2 forward with widen_factor=0.5
model = MobileNetV2(widen_factor=0.5, out_indices=range(0, 7))
model.train()
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert len(feat) == 7
assert feat[0].shape == torch.Size((1, 8, 112, 112))
assert feat[1].shape == torch.Size((1, 16, 56, 56))
assert feat[2].shape == torch.Size((1, 16, 28, 28))
assert feat[3].shape == torch.Size((1, 32, 14, 14))
assert feat[4].shape == torch.Size((1, 48, 14, 14))
assert feat[5].shape == torch.Size((1, 80, 7, 7))
assert feat[6].shape == torch.Size((1, 160, 7, 7))
# Test MobileNetV2 forward with widen_factor=2.0
model = MobileNetV2(widen_factor=2.0, out_indices=range(0, 8))
model.train()
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert feat[0].shape == torch.Size((1, 32, 112, 112))
assert feat[1].shape == torch.Size((1, 48, 56, 56))
assert feat[2].shape == torch.Size((1, 64, 28, 28))
assert feat[3].shape == torch.Size((1, 128, 14, 14))
assert feat[4].shape == torch.Size((1, 192, 14, 14))
assert feat[5].shape == torch.Size((1, 320, 7, 7))
assert feat[6].shape == torch.Size((1, 640, 7, 7))
assert feat[7].shape == torch.Size((1, 2560, 7, 7))
# Test MobileNetV2 forward with dict(type='ReLU')
model = MobileNetV2(
widen_factor=1.0, act_cfg=dict(type='ReLU'), out_indices=range(0, 7))
model.train()
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert len(feat) == 7
assert feat[0].shape == torch.Size((1, 16, 112, 112))
assert feat[1].shape == torch.Size((1, 24, 56, 56))
assert feat[2].shape == torch.Size((1, 32, 28, 28))
assert feat[3].shape == torch.Size((1, 64, 14, 14))
assert feat[4].shape == torch.Size((1, 96, 14, 14))
assert feat[5].shape == torch.Size((1, 160, 7, 7))
assert feat[6].shape == torch.Size((1, 320, 7, 7))
# Test MobileNetV2 with BatchNorm forward
model = MobileNetV2(widen_factor=1.0, out_indices=range(0, 7))
for m in model.modules():
if is_norm(m):
assert isinstance(m, _BatchNorm)
model.train()
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert len(feat) == 7
assert feat[0].shape == torch.Size((1, 16, 112, 112))
assert feat[1].shape == torch.Size((1, 24, 56, 56))
assert feat[2].shape == torch.Size((1, 32, 28, 28))
assert feat[3].shape == torch.Size((1, 64, 14, 14))
assert feat[4].shape == torch.Size((1, 96, 14, 14))
assert feat[5].shape == torch.Size((1, 160, 7, 7))
assert feat[6].shape == torch.Size((1, 320, 7, 7))
# Test MobileNetV2 with GroupNorm forward
model = MobileNetV2(
widen_factor=1.0,
norm_cfg=dict(type='GN', num_groups=2, requires_grad=True),
out_indices=range(0, 7))
for m in model.modules():
if is_norm(m):
assert isinstance(m, GroupNorm)
model.train()
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert len(feat) == 7
assert feat[0].shape == torch.Size((1, 16, 112, 112))
assert feat[1].shape == torch.Size((1, 24, 56, 56))
assert feat[2].shape == torch.Size((1, 32, 28, 28))
assert feat[3].shape == torch.Size((1, 64, 14, 14))
assert feat[4].shape == torch.Size((1, 96, 14, 14))
assert feat[5].shape == torch.Size((1, 160, 7, 7))
assert feat[6].shape == torch.Size((1, 320, 7, 7))
# Test MobileNetV2 with layers 1, 3, 5 out forward
model = MobileNetV2(widen_factor=1.0, out_indices=(0, 2, 4))
model.train()
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert len(feat) == 3
assert feat[0].shape == torch.Size((1, 16, 112, 112))
assert feat[1].shape == torch.Size((1, 32, 28, 28))
assert feat[2].shape == torch.Size((1, 96, 14, 14))
# Test MobileNetV2 with checkpoint forward
model = MobileNetV2(
widen_factor=1.0, with_cp=True, out_indices=range(0, 7))
for m in model.modules():
if is_block(m):
assert m.with_cp
model.train()
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert len(feat) == 7
assert feat[0].shape == torch.Size((1, 16, 112, 112))
assert feat[1].shape == torch.Size((1, 24, 56, 56))
assert feat[2].shape == torch.Size((1, 32, 28, 28))
assert feat[3].shape == torch.Size((1, 64, 14, 14))
assert feat[4].shape == torch.Size((1, 96, 14, 14))
assert feat[5].shape == torch.Size((1, 160, 7, 7))
assert feat[6].shape == torch.Size((1, 320, 7, 7))
| 6,546 | 36.626437 | 77 |
py
|
ERD
|
ERD-main/tests/test_models/test_backbones/test_hrnet.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmdet.models.backbones.hrnet import HRModule, HRNet
from mmdet.models.backbones.resnet import BasicBlock, Bottleneck
@pytest.mark.parametrize('block', [BasicBlock, Bottleneck])
def test_hrmodule(block):
# Test multiscale forward
num_channles = (32, 64)
in_channels = [c * block.expansion for c in num_channles]
hrmodule = HRModule(
num_branches=2,
blocks=block,
in_channels=in_channels,
num_blocks=(4, 4),
num_channels=num_channles,
)
feats = [
torch.randn(1, in_channels[0], 64, 64),
torch.randn(1, in_channels[1], 32, 32)
]
feats = hrmodule(feats)
assert len(feats) == 2
assert feats[0].shape == torch.Size([1, in_channels[0], 64, 64])
assert feats[1].shape == torch.Size([1, in_channels[1], 32, 32])
# Test single scale forward
num_channles = (32, 64)
in_channels = [c * block.expansion for c in num_channles]
hrmodule = HRModule(
num_branches=2,
blocks=block,
in_channels=in_channels,
num_blocks=(4, 4),
num_channels=num_channles,
multiscale_output=False,
)
feats = [
torch.randn(1, in_channels[0], 64, 64),
torch.randn(1, in_channels[1], 32, 32)
]
feats = hrmodule(feats)
assert len(feats) == 1
assert feats[0].shape == torch.Size([1, in_channels[0], 64, 64])
def test_hrnet_backbone():
# only have 3 stages
extra = dict(
stage1=dict(
num_modules=1,
num_branches=1,
block='BOTTLENECK',
num_blocks=(4, ),
num_channels=(64, )),
stage2=dict(
num_modules=1,
num_branches=2,
block='BASIC',
num_blocks=(4, 4),
num_channels=(32, 64)),
stage3=dict(
num_modules=4,
num_branches=3,
block='BASIC',
num_blocks=(4, 4, 4),
num_channels=(32, 64, 128)))
with pytest.raises(AssertionError):
# HRNet now only support 4 stages
HRNet(extra=extra)
extra['stage4'] = dict(
num_modules=3,
num_branches=3, # should be 4
block='BASIC',
num_blocks=(4, 4, 4, 4),
num_channels=(32, 64, 128, 256))
with pytest.raises(AssertionError):
# len(num_blocks) should equal num_branches
HRNet(extra=extra)
extra['stage4']['num_branches'] = 4
# Test hrnetv2p_w32
model = HRNet(extra=extra)
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 256, 256)
feats = model(imgs)
assert len(feats) == 4
assert feats[0].shape == torch.Size([1, 32, 64, 64])
assert feats[3].shape == torch.Size([1, 256, 8, 8])
# Test single scale output
model = HRNet(extra=extra, multiscale_output=False)
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 256, 256)
feats = model(imgs)
assert len(feats) == 1
assert feats[0].shape == torch.Size([1, 32, 64, 64])
| 3,089 | 26.589286 | 68 |
py
|
ERD
|
ERD-main/tests/test_models/test_backbones/test_csp_darknet.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from torch.nn.modules.batchnorm import _BatchNorm
from mmdet.models.backbones.csp_darknet import CSPDarknet
from .utils import check_norm_state, is_norm
def test_csp_darknet_backbone():
with pytest.raises(ValueError):
# frozen_stages must in range(-1, len(arch_setting) + 1)
CSPDarknet(frozen_stages=6)
with pytest.raises(AssertionError):
# out_indices in range(len(arch_setting) + 1)
CSPDarknet(out_indices=[6])
# Test CSPDarknet with first stage frozen
frozen_stages = 1
model = CSPDarknet(frozen_stages=frozen_stages)
model.train()
for mod in model.stem.modules():
for param in mod.parameters():
assert param.requires_grad is False
for i in range(1, frozen_stages + 1):
layer = getattr(model, f'stage{i}')
for mod in layer.modules():
if isinstance(mod, _BatchNorm):
assert mod.training is False
for param in layer.parameters():
assert param.requires_grad is False
# Test CSPDarknet with norm_eval=True
model = CSPDarknet(norm_eval=True)
model.train()
assert check_norm_state(model.modules(), False)
# Test CSPDarknet-P5 forward with widen_factor=0.5
model = CSPDarknet(arch='P5', widen_factor=0.25, out_indices=range(0, 5))
model.train()
imgs = torch.randn(1, 3, 64, 64)
feat = model(imgs)
assert len(feat) == 5
assert feat[0].shape == torch.Size((1, 16, 32, 32))
assert feat[1].shape == torch.Size((1, 32, 16, 16))
assert feat[2].shape == torch.Size((1, 64, 8, 8))
assert feat[3].shape == torch.Size((1, 128, 4, 4))
assert feat[4].shape == torch.Size((1, 256, 2, 2))
# Test CSPDarknet-P6 forward with widen_factor=0.5
model = CSPDarknet(
arch='P6',
widen_factor=0.25,
out_indices=range(0, 6),
spp_kernal_sizes=(3, 5, 7))
model.train()
imgs = torch.randn(1, 3, 128, 128)
feat = model(imgs)
assert feat[0].shape == torch.Size((1, 16, 64, 64))
assert feat[1].shape == torch.Size((1, 32, 32, 32))
assert feat[2].shape == torch.Size((1, 64, 16, 16))
assert feat[3].shape == torch.Size((1, 128, 8, 8))
assert feat[4].shape == torch.Size((1, 192, 4, 4))
assert feat[5].shape == torch.Size((1, 256, 2, 2))
# Test CSPDarknet forward with dict(type='ReLU')
model = CSPDarknet(
widen_factor=0.125, act_cfg=dict(type='ReLU'), out_indices=range(0, 5))
model.train()
imgs = torch.randn(1, 3, 64, 64)
feat = model(imgs)
assert len(feat) == 5
assert feat[0].shape == torch.Size((1, 8, 32, 32))
assert feat[1].shape == torch.Size((1, 16, 16, 16))
assert feat[2].shape == torch.Size((1, 32, 8, 8))
assert feat[3].shape == torch.Size((1, 64, 4, 4))
assert feat[4].shape == torch.Size((1, 128, 2, 2))
# Test CSPDarknet with BatchNorm forward
model = CSPDarknet(widen_factor=0.125, out_indices=range(0, 5))
for m in model.modules():
if is_norm(m):
assert isinstance(m, _BatchNorm)
model.train()
imgs = torch.randn(1, 3, 64, 64)
feat = model(imgs)
assert len(feat) == 5
assert feat[0].shape == torch.Size((1, 8, 32, 32))
assert feat[1].shape == torch.Size((1, 16, 16, 16))
assert feat[2].shape == torch.Size((1, 32, 8, 8))
assert feat[3].shape == torch.Size((1, 64, 4, 4))
assert feat[4].shape == torch.Size((1, 128, 2, 2))
# Test CSPDarknet with custom arch forward
arch_ovewrite = [[32, 56, 3, True, False], [56, 224, 2, True, False],
[224, 512, 1, True, False]]
model = CSPDarknet(
arch_ovewrite=arch_ovewrite,
widen_factor=0.25,
out_indices=(0, 1, 2, 3))
model.train()
imgs = torch.randn(1, 3, 32, 32)
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size((1, 8, 16, 16))
assert feat[1].shape == torch.Size((1, 14, 8, 8))
assert feat[2].shape == torch.Size((1, 56, 4, 4))
assert feat[3].shape == torch.Size((1, 128, 2, 2))
| 4,117 | 34.196581 | 79 |
py
|
ERD
|
ERD-main/tests/test_models/test_backbones/test_renext.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmdet.models.backbones import ResNeXt
from mmdet.models.backbones.resnext import Bottleneck as BottleneckX
from .utils import is_block
def test_renext_bottleneck():
with pytest.raises(AssertionError):
# Style must be in ['pytorch', 'caffe']
BottleneckX(64, 64, groups=32, base_width=4, style='tensorflow')
# Test ResNeXt Bottleneck structure
block = BottleneckX(
64, 64, groups=32, base_width=4, stride=2, style='pytorch')
assert block.conv2.stride == (2, 2)
assert block.conv2.groups == 32
assert block.conv2.out_channels == 128
# Test ResNeXt Bottleneck with DCN
dcn = dict(type='DCN', deform_groups=1, fallback_on_stride=False)
with pytest.raises(AssertionError):
# conv_cfg must be None if dcn is not None
BottleneckX(
64,
64,
groups=32,
base_width=4,
dcn=dcn,
conv_cfg=dict(type='Conv'))
BottleneckX(64, 64, dcn=dcn)
# Test ResNeXt Bottleneck forward
block = BottleneckX(64, 16, groups=32, base_width=4)
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size([1, 64, 56, 56])
# Test ResNeXt Bottleneck forward with plugins
plugins = [
dict(
cfg=dict(
type='GeneralizedAttention',
spatial_range=-1,
num_heads=8,
attention_type='0010',
kv_stride=2),
stages=(False, False, True, True),
position='after_conv2')
]
block = BottleneckX(64, 16, groups=32, base_width=4, plugins=plugins)
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size([1, 64, 56, 56])
def test_resnext_backbone():
with pytest.raises(KeyError):
# ResNeXt depth should be in [50, 101, 152]
ResNeXt(depth=18)
# Test ResNeXt with group 32, base_width 4
model = ResNeXt(depth=50, groups=32, base_width=4)
for m in model.modules():
if is_block(m):
assert m.conv2.groups == 32
model.train()
imgs = torch.randn(1, 3, 32, 32)
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size([1, 256, 8, 8])
assert feat[1].shape == torch.Size([1, 512, 4, 4])
assert feat[2].shape == torch.Size([1, 1024, 2, 2])
assert feat[3].shape == torch.Size([1, 2048, 1, 1])
regnet_test_data = [
('regnetx_400mf',
dict(w0=24, wa=24.48, wm=2.54, group_w=16, depth=22,
bot_mul=1.0), [32, 64, 160, 384]),
('regnetx_800mf',
dict(w0=56, wa=35.73, wm=2.28, group_w=16, depth=16,
bot_mul=1.0), [64, 128, 288, 672]),
('regnetx_1.6gf',
dict(w0=80, wa=34.01, wm=2.25, group_w=24, depth=18,
bot_mul=1.0), [72, 168, 408, 912]),
('regnetx_3.2gf',
dict(w0=88, wa=26.31, wm=2.25, group_w=48, depth=25,
bot_mul=1.0), [96, 192, 432, 1008]),
('regnetx_4.0gf',
dict(w0=96, wa=38.65, wm=2.43, group_w=40, depth=23,
bot_mul=1.0), [80, 240, 560, 1360]),
('regnetx_6.4gf',
dict(w0=184, wa=60.83, wm=2.07, group_w=56, depth=17,
bot_mul=1.0), [168, 392, 784, 1624]),
('regnetx_8.0gf',
dict(w0=80, wa=49.56, wm=2.88, group_w=120, depth=23,
bot_mul=1.0), [80, 240, 720, 1920]),
('regnetx_12gf',
dict(w0=168, wa=73.36, wm=2.37, group_w=112, depth=19,
bot_mul=1.0), [224, 448, 896, 2240]),
]
| 3,528 | 32.292453 | 73 |
py
|
ERD
|
ERD-main/tests/test_models/test_backbones/test_trident_resnet.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmdet.models.backbones import TridentResNet
from mmdet.models.backbones.trident_resnet import TridentBottleneck
def test_trident_resnet_bottleneck():
trident_dilations = (1, 2, 3)
test_branch_idx = 1
concat_output = True
trident_build_config = (trident_dilations, test_branch_idx, concat_output)
with pytest.raises(AssertionError):
# Style must be in ['pytorch', 'caffe']
TridentBottleneck(
*trident_build_config, inplanes=64, planes=64, style='tensorflow')
with pytest.raises(AssertionError):
# Allowed positions are 'after_conv1', 'after_conv2', 'after_conv3'
plugins = [
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16),
position='after_conv4')
]
TridentBottleneck(
*trident_build_config, inplanes=64, planes=16, plugins=plugins)
with pytest.raises(AssertionError):
# Need to specify different postfix to avoid duplicate plugin name
plugins = [
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16),
position='after_conv3'),
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16),
position='after_conv3')
]
TridentBottleneck(
*trident_build_config, inplanes=64, planes=16, plugins=plugins)
with pytest.raises(KeyError):
# Plugin type is not supported
plugins = [dict(cfg=dict(type='WrongPlugin'), position='after_conv3')]
TridentBottleneck(
*trident_build_config, inplanes=64, planes=16, plugins=plugins)
# Test Bottleneck with checkpoint forward
block = TridentBottleneck(
*trident_build_config, inplanes=64, planes=16, with_cp=True)
assert block.with_cp
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size([block.num_branch, 64, 56, 56])
# Test Bottleneck style
block = TridentBottleneck(
*trident_build_config,
inplanes=64,
planes=64,
stride=2,
style='pytorch')
assert block.conv1.stride == (1, 1)
assert block.conv2.stride == (2, 2)
block = TridentBottleneck(
*trident_build_config, inplanes=64, planes=64, stride=2, style='caffe')
assert block.conv1.stride == (2, 2)
assert block.conv2.stride == (1, 1)
# Test Bottleneck forward
block = TridentBottleneck(*trident_build_config, inplanes=64, planes=16)
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size([block.num_branch, 64, 56, 56])
# Test Bottleneck with 1 ContextBlock after conv3
plugins = [
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16),
position='after_conv3')
]
block = TridentBottleneck(
*trident_build_config, inplanes=64, planes=16, plugins=plugins)
assert block.context_block.in_channels == 64
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size([block.num_branch, 64, 56, 56])
# Test Bottleneck with 1 GeneralizedAttention after conv2
plugins = [
dict(
cfg=dict(
type='GeneralizedAttention',
spatial_range=-1,
num_heads=8,
attention_type='0010',
kv_stride=2),
position='after_conv2')
]
block = TridentBottleneck(
*trident_build_config, inplanes=64, planes=16, plugins=plugins)
assert block.gen_attention_block.in_channels == 16
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size([block.num_branch, 64, 56, 56])
# Test Bottleneck with 1 GeneralizedAttention after conv2, 1 NonLocal2D
# after conv2, 1 ContextBlock after conv3
plugins = [
dict(
cfg=dict(
type='GeneralizedAttention',
spatial_range=-1,
num_heads=8,
attention_type='0010',
kv_stride=2),
position='after_conv2'),
dict(cfg=dict(type='NonLocal2d'), position='after_conv2'),
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16),
position='after_conv3')
]
block = TridentBottleneck(
*trident_build_config, inplanes=64, planes=16, plugins=plugins)
assert block.gen_attention_block.in_channels == 16
assert block.nonlocal_block.in_channels == 16
assert block.context_block.in_channels == 64
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size([block.num_branch, 64, 56, 56])
# Test Bottleneck with 1 ContextBlock after conv2, 2 ContextBlock after
# conv3
plugins = [
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16, postfix=1),
position='after_conv2'),
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16, postfix=2),
position='after_conv3'),
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16, postfix=3),
position='after_conv3')
]
block = TridentBottleneck(
*trident_build_config, inplanes=64, planes=16, plugins=plugins)
assert block.context_block1.in_channels == 16
assert block.context_block2.in_channels == 64
assert block.context_block3.in_channels == 64
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size([block.num_branch, 64, 56, 56])
def test_trident_resnet_backbone():
tridentresnet_config = dict(
num_branch=3,
test_branch_idx=1,
strides=(1, 2, 2),
dilations=(1, 1, 1),
trident_dilations=(1, 2, 3),
out_indices=(2, ),
)
"""Test tridentresnet backbone."""
with pytest.raises(AssertionError):
# TridentResNet depth should be in [50, 101, 152]
TridentResNet(18, **tridentresnet_config)
with pytest.raises(AssertionError):
# In TridentResNet: num_stages == 3
TridentResNet(50, num_stages=4, **tridentresnet_config)
model = TridentResNet(50, num_stages=3, **tridentresnet_config)
model.train()
imgs = torch.randn(1, 3, 32, 32)
feat = model(imgs)
assert len(feat) == 1
assert feat[0].shape == torch.Size([3, 1024, 2, 2])
| 6,372 | 34.209945 | 79 |
py
|
ERD
|
ERD-main/tests/test_models/test_backbones/test_resnest.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmdet.models.backbones import ResNeSt
from mmdet.models.backbones.resnest import Bottleneck as BottleneckS
def test_resnest_bottleneck():
with pytest.raises(AssertionError):
# Style must be in ['pytorch', 'caffe']
BottleneckS(64, 64, radix=2, reduction_factor=4, style='tensorflow')
# Test ResNeSt Bottleneck structure
block = BottleneckS(
2, 4, radix=2, reduction_factor=4, stride=2, style='pytorch')
assert block.avd_layer.stride == 2
assert block.conv2.channels == 4
# Test ResNeSt Bottleneck forward
block = BottleneckS(16, 4, radix=2, reduction_factor=4)
x = torch.randn(2, 16, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size([2, 16, 56, 56])
def test_resnest_backbone():
with pytest.raises(KeyError):
# ResNeSt depth should be in [50, 101, 152, 200]
ResNeSt(depth=18)
# Test ResNeSt with radix 2, reduction_factor 4
model = ResNeSt(
depth=50,
base_channels=4,
radix=2,
reduction_factor=4,
out_indices=(0, 1, 2, 3))
model.train()
imgs = torch.randn(2, 3, 32, 32)
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size([2, 16, 8, 8])
assert feat[1].shape == torch.Size([2, 32, 4, 4])
assert feat[2].shape == torch.Size([2, 64, 2, 2])
assert feat[3].shape == torch.Size([2, 128, 1, 1])
| 1,473 | 29.708333 | 76 |
py
|
ERD
|
ERD-main/tests/test_models/test_backbones/test_regnet.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmdet.models.backbones import RegNet
regnet_test_data = [
('regnetx_400mf',
dict(w0=24, wa=24.48, wm=2.54, group_w=16, depth=22,
bot_mul=1.0), [32, 64, 160, 384]),
('regnetx_800mf',
dict(w0=56, wa=35.73, wm=2.28, group_w=16, depth=16,
bot_mul=1.0), [64, 128, 288, 672]),
('regnetx_1.6gf',
dict(w0=80, wa=34.01, wm=2.25, group_w=24, depth=18,
bot_mul=1.0), [72, 168, 408, 912]),
('regnetx_3.2gf',
dict(w0=88, wa=26.31, wm=2.25, group_w=48, depth=25,
bot_mul=1.0), [96, 192, 432, 1008]),
('regnetx_4.0gf',
dict(w0=96, wa=38.65, wm=2.43, group_w=40, depth=23,
bot_mul=1.0), [80, 240, 560, 1360]),
('regnetx_6.4gf',
dict(w0=184, wa=60.83, wm=2.07, group_w=56, depth=17,
bot_mul=1.0), [168, 392, 784, 1624]),
('regnetx_8.0gf',
dict(w0=80, wa=49.56, wm=2.88, group_w=120, depth=23,
bot_mul=1.0), [80, 240, 720, 1920]),
('regnetx_12gf',
dict(w0=168, wa=73.36, wm=2.37, group_w=112, depth=19,
bot_mul=1.0), [224, 448, 896, 2240]),
]
@pytest.mark.parametrize('arch_name,arch,out_channels', regnet_test_data)
def test_regnet_backbone(arch_name, arch, out_channels):
with pytest.raises(AssertionError):
# ResNeXt depth should be in [50, 101, 152]
RegNet(arch_name + '233')
# Test RegNet with arch_name
model = RegNet(arch_name)
model.train()
imgs = torch.randn(1, 3, 32, 32)
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size([1, out_channels[0], 8, 8])
assert feat[1].shape == torch.Size([1, out_channels[1], 4, 4])
assert feat[2].shape == torch.Size([1, out_channels[2], 2, 2])
assert feat[3].shape == torch.Size([1, out_channels[3], 1, 1])
# Test RegNet with arch
model = RegNet(arch)
assert feat[0].shape == torch.Size([1, out_channels[0], 8, 8])
assert feat[1].shape == torch.Size([1, out_channels[1], 4, 4])
assert feat[2].shape == torch.Size([1, out_channels[2], 2, 2])
assert feat[3].shape == torch.Size([1, out_channels[3], 1, 1])
| 2,177 | 35.915254 | 73 |
py
|
ERD
|
ERD-main/tests/test_models/test_backbones/__init__.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from .utils import check_norm_state, is_block, is_norm
__all__ = ['is_block', 'is_norm', 'check_norm_state']
| 158 | 30.8 | 54 |
py
|
ERD
|
ERD-main/tests/test_models/test_backbones/test_detectors_resnet.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
from mmdet.models.backbones import DetectoRS_ResNet
def test_detectorrs_resnet_backbone():
detectorrs_cfg = dict(
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
conv_cfg=dict(type='ConvAWS'),
sac=dict(type='SAC', use_deform=True),
stage_with_sac=(False, True, True, True),
output_img=True)
"""Test init_weights config"""
with pytest.raises(AssertionError):
# pretrained and init_cfg cannot be specified at the same time
DetectoRS_ResNet(
**detectorrs_cfg, pretrained='Pretrained', init_cfg='Pretrained')
with pytest.raises(AssertionError):
# init_cfg must be a dict
DetectoRS_ResNet(
**detectorrs_cfg, pretrained=None, init_cfg=['Pretrained'])
with pytest.raises(KeyError):
# init_cfg must contain the key `type`
DetectoRS_ResNet(
**detectorrs_cfg,
pretrained=None,
init_cfg=dict(checkpoint='Pretrained'))
with pytest.raises(AssertionError):
# init_cfg only support initialize pretrained model way
DetectoRS_ResNet(
**detectorrs_cfg, pretrained=None, init_cfg=dict(type='Trained'))
with pytest.raises(TypeError):
# pretrained mast be a str or None
model = DetectoRS_ResNet(
**detectorrs_cfg, pretrained=['Pretrained'], init_cfg=None)
model.init_weights()
| 1,611 | 32.583333 | 77 |
py
|
ERD
|
ERD-main/tests/test_models/test_layers/test_position_encoding.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmdet.models.layers import (LearnedPositionalEncoding,
SinePositionalEncoding)
def test_sine_positional_encoding(num_feats=16, batch_size=2):
# test invalid type of scale
with pytest.raises(AssertionError):
module = SinePositionalEncoding(
num_feats, scale=(3., ), normalize=True)
module = SinePositionalEncoding(num_feats)
h, w = 10, 6
mask = (torch.rand(batch_size, h, w) > 0.5).to(torch.int)
assert not module.normalize
out = module(mask)
assert out.shape == (batch_size, num_feats * 2, h, w)
# set normalize
module = SinePositionalEncoding(num_feats, normalize=True)
assert module.normalize
out = module(mask)
assert out.shape == (batch_size, num_feats * 2, h, w)
def test_learned_positional_encoding(num_feats=16,
row_num_embed=10,
col_num_embed=10,
batch_size=2):
module = LearnedPositionalEncoding(num_feats, row_num_embed, col_num_embed)
assert module.row_embed.weight.shape == (row_num_embed, num_feats)
assert module.col_embed.weight.shape == (col_num_embed, num_feats)
h, w = 10, 6
mask = torch.rand(batch_size, h, w) > 0.5
out = module(mask)
assert out.shape == (batch_size, num_feats * 2, h, w)
| 1,439 | 35 | 79 |
py
|
ERD
|
ERD-main/tests/test_models/test_layers/test_ema.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import itertools
import math
from unittest import TestCase
import torch
import torch.nn as nn
from mmengine.testing import assert_allclose
from mmdet.models.layers import ExpMomentumEMA
class TestEMA(TestCase):
def test_exp_momentum_ema(self):
model = nn.Sequential(nn.Conv2d(1, 5, kernel_size=3), nn.Linear(5, 10))
# Test invalid gamma
with self.assertRaisesRegex(AssertionError,
'gamma must be greater than 0'):
ExpMomentumEMA(model, gamma=-1)
# Test EMA
model = torch.nn.Sequential(
torch.nn.Conv2d(1, 5, kernel_size=3), torch.nn.Linear(5, 10))
momentum = 0.1
gamma = 4
ema_model = ExpMomentumEMA(model, momentum=momentum, gamma=gamma)
averaged_params = [
torch.zeros_like(param) for param in model.parameters()
]
n_updates = 10
for i in range(n_updates):
updated_averaged_params = []
for p, p_avg in zip(model.parameters(), averaged_params):
p.detach().add_(torch.randn_like(p))
if i == 0:
updated_averaged_params.append(p.clone())
else:
m = (1 - momentum) * math.exp(-(1 + i) / gamma) + momentum
updated_averaged_params.append(
(p_avg * (1 - m) + p * m).clone())
ema_model.update_parameters(model)
averaged_params = updated_averaged_params
for p_target, p_ema in zip(averaged_params, ema_model.parameters()):
assert_allclose(p_target, p_ema)
def test_exp_momentum_ema_update_buffer(self):
model = nn.Sequential(
nn.Conv2d(1, 5, kernel_size=3), nn.BatchNorm2d(5, momentum=0.3),
nn.Linear(5, 10))
# Test invalid gamma
with self.assertRaisesRegex(AssertionError,
'gamma must be greater than 0'):
ExpMomentumEMA(model, gamma=-1)
# Test EMA with momentum annealing.
momentum = 0.1
gamma = 4
ema_model = ExpMomentumEMA(
model, gamma=gamma, momentum=momentum, update_buffers=True)
averaged_params = [
torch.zeros_like(param)
for param in itertools.chain(model.parameters(), model.buffers())
if param.size() != torch.Size([])
]
n_updates = 10
for i in range(n_updates):
updated_averaged_params = []
params = [
param for param in itertools.chain(model.parameters(),
model.buffers())
if param.size() != torch.Size([])
]
for p, p_avg in zip(params, averaged_params):
p.detach().add_(torch.randn_like(p))
if i == 0:
updated_averaged_params.append(p.clone())
else:
m = (1 - momentum) * math.exp(-(1 + i) / gamma) + momentum
updated_averaged_params.append(
(p_avg * (1 - m) + p * m).clone())
ema_model.update_parameters(model)
averaged_params = updated_averaged_params
ema_params = [
param for param in itertools.chain(ema_model.module.parameters(),
ema_model.module.buffers())
if param.size() != torch.Size([])
]
for p_target, p_ema in zip(averaged_params, ema_params):
assert_allclose(p_target, p_ema)
| 3,633 | 37.252632 | 79 |
py
|
ERD
|
ERD-main/tests/test_models/test_layers/test_se_layer.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
import torch.nn.functional as F
from mmengine.model import constant_init
from mmdet.models.layers import DyReLU, SELayer
def test_se_layer():
with pytest.raises(AssertionError):
# act_cfg sequence length must equal to 2
SELayer(channels=32, act_cfg=(dict(type='ReLU'), ))
with pytest.raises(AssertionError):
# act_cfg sequence must be a tuple of dict
SELayer(channels=32, act_cfg=[dict(type='ReLU'), dict(type='ReLU')])
# Test SELayer forward
layer = SELayer(channels=32)
layer.init_weights()
layer.train()
x = torch.randn((1, 32, 10, 10))
x_out = layer(x)
assert x_out.shape == torch.Size((1, 32, 10, 10))
def test_dyrelu():
with pytest.raises(AssertionError):
# act_cfg sequence length must equal to 2
DyReLU(channels=32, act_cfg=(dict(type='ReLU'), ))
with pytest.raises(AssertionError):
# act_cfg sequence must be a tuple of dict
DyReLU(channels=32, act_cfg=[dict(type='ReLU'), dict(type='ReLU')])
# Test DyReLU forward
layer = DyReLU(channels=32)
layer.init_weights()
layer.train()
x = torch.randn((1, 32, 10, 10))
x_out = layer(x)
assert x_out.shape == torch.Size((1, 32, 10, 10))
# DyReLU should act as standard (static) ReLU
# when eliminating the effect of SE-like module
layer = DyReLU(channels=32)
constant_init(layer.conv2.conv, 0)
layer.train()
x = torch.randn((1, 32, 10, 10))
x_out = layer(x)
relu_out = F.relu(x)
assert torch.equal(x_out, relu_out)
| 1,623 | 28.527273 | 76 |
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.