repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
ENCAS
|
ENCAS-main/subset_selectors/referencebased_subset_selector.py
|
import numpy as np
from pymoo.algorithms.nsga3 import ReferenceDirectionSurvival, get_extreme_points_c, get_nadir_point, \
associate_to_niches, calc_niche_count, niching
from pymoo.factory import get_reference_directions
from pymoo.util.nds.non_dominated_sorting import NonDominatedSorting
from subset_selectors.base_subset_selector import BaseSubsetSelector
from matplotlib import pyplot as plt
class ReferenceBasedSubsetSelector(BaseSubsetSelector):
'''
same as in nsga3
'''
def __init__(self, n_select, **kwargs):
super().__init__()
self.n_select = n_select
ref_dirs = get_reference_directions("riesz", kwargs['n_objs'], 100)
self.selector = ReferenceDirectionSurvivalMy(ref_dirs)
def select(self, archive, objs):
# objs_cur shape is (n_archs, n_objs)
objs = np.copy(objs)
n_total = objs.shape[0]
if n_total > self.n_select:
indices_selected = self.selector._do(None, objs, self.n_select)
print(f'rbf_ensemble: Selected {np.sum(indices_selected)} indices properly')
else:
indices_selected = [True] * n_total
print(f'rbf_ensemble: Selected {n_total} indices by default')
return indices_selected
class ReferenceDirectionSurvivalMy(ReferenceDirectionSurvival):
'''
Modified to work with np archives instead of the "population" structures.
Behaviourally is the same as ReferenceDirectionSurvival.
'''
def __init__(self, ref_dirs):
super().__init__(ref_dirs)
self.ref_dirs = ref_dirs
self.extreme_points = None
self.intercepts = None
self.nadir_point = None
self.opt = None
self.ideal_point = np.full(ref_dirs.shape[1], np.inf)
self.worst_point = np.full(ref_dirs.shape[1], -np.inf)
def _do(self, problem, objs, n_survive, D=None, **kwargs):
n_total = objs.shape[0]
indices_selected = np.array([False] * n_total)
# find or usually update the new ideal point - from feasible solutions
self.ideal_point = np.min(np.vstack((self.ideal_point, objs)), axis=0)
self.worst_point = np.max(np.vstack((self.worst_point, objs)), axis=0)
# calculate the fronts of the population
fronts, rank = NonDominatedSorting().do(objs, return_rank=True, n_stop_if_ranked=n_survive)
non_dominated, last_front = fronts[0], fronts[-1]
# find the extreme points for normalization
self.extreme_points = get_extreme_points_c(objs[non_dominated, :], self.ideal_point,
extreme_points=self.extreme_points)
# find the intercepts for normalization and do backup if gaussian elimination fails
worst_of_population = np.max(objs, axis=0)
worst_of_front = np.max(objs[non_dominated, :], axis=0)
self.nadir_point = get_nadir_point(self.extreme_points, self.ideal_point, self.worst_point,
worst_of_population, worst_of_front)
# consider only the population until we come to the splitting front
I = np.concatenate(fronts)
indices_selected[I] = True
# update the front indices for the current population
new_idx_to_old_idx = {}
counter = 0
for i in range(len(fronts)):
for j in range(len(fronts[i])):
new_idx_to_old_idx[counter] = fronts[i][j]
fronts[i][j] = counter
counter += 1
last_front = fronts[-1]
# associate individuals to niches
niche_of_individuals, dist_to_niche, dist_matrix = \
associate_to_niches(objs[indices_selected], self.ref_dirs, self.ideal_point, self.nadir_point)
# if we need to select individuals to survive
if len(objs[indices_selected]) > n_survive:
# if there is only one front
if len(fronts) == 1:
n_remaining = n_survive
until_last_front = np.array([], dtype=int)
niche_count = np.zeros(len(self.ref_dirs), dtype=int)
# if some individuals already survived
else:
until_last_front = np.concatenate(fronts[:-1])
niche_count = calc_niche_count(len(self.ref_dirs), niche_of_individuals[until_last_front])
n_remaining = n_survive - len(until_last_front)
S = niching(objs[indices_selected][last_front], n_remaining, niche_count, niche_of_individuals[last_front],
dist_to_niche[last_front])
survivors = np.concatenate((until_last_front, last_front[S].tolist()))
# only survivors need to remain active
indices_selected[:] = False
indices_selected[[new_idx_to_old_idx[s] for s in survivors]] = True
return indices_selected
if __name__ == '__main__':
gss = ReferenceBasedSubsetSelector(5, n_objs=2)
objs_cur = np.array([[0, 0], [0.2, 1.5], [0.9, 0],
[1, 2], [2, 4], [3, 3],
[-0.5, -0.5], [0.5, 0.7], [0.7, 0.5]])
plt.scatter(objs_cur[:, 0], objs_cur[:, 1])
idx = gss.select(None, objs_cur)
print(objs_cur[idx])
plt.scatter(objs_cur[idx, 0], objs_cur[idx, 1])
plt.show()
| 5,298 | 40.724409 | 119 |
py
|
ENCAS
|
ENCAS-main/subset_selectors/__init__.py
|
from .referencebased_subset_selector import ReferenceBasedSubsetSelector
# since only 1 subset selector is used in the paper, this level of indirectness is not needed
# But if I (or someone else) want to add another one later, this would be convenient.
selector_name_to_class = {
'reference': ReferenceBasedSubsetSelector
}
def create_subset_selector(name, n_to_select, **kwargs):
clazz = selector_name_to_class[name]
return clazz(n_to_select, n_objs=2, **kwargs)
| 477 | 42.454545 | 93 |
py
|
ENCAS
|
ENCAS-main/data_utils/viz_during_run.py
|
'''
These are just some useful commands for debugging
'''
im = images.cpu().detach().numpy()
def stats(x):
print(f'{x.mean()=:.3f} | {x.std()=:.3f} | {x.min()=:.3f} | {x.max()=:.3f}')
lb = labels.cpu().detach().numpy().transpose((0, 2, 3, 1))
def s(xs):
fig, axs = plt.subplots(1, len(xs))
fig.subplots_adjust(left=0, right=1, bottom=0, top=1)
for ax, x in zip(axs, xs):
ax.imshow(x)
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.show()
for i in range(8):
s([im[i, 0], im[i, 1], lb[i] * 255])
| 576 | 20.37037 | 80 |
py
|
ENCAS
|
ENCAS-main/data_utils/split_imagenet_train.py
|
'''
Separate a part of the train for setting batch norms. If you have the full ImageNet train set on your machine, this
is not necessary.
'''
import numpy as np
import os
from os.path import join
import random
import shutil
from pathlib import Path
random.seed(3179)
np.random.seed(3179)
n_select = 10
in_path = join('', 'train')
out_path1 = join('', 'train_part')
try:
Path(out_path1).mkdir()
except FileExistsError:
shutil.rmtree(out_path1)
Path(out_path1).mkdir()
for path_class in os.scandir(in_path):
if not path_class.is_dir():
continue
name_cur = path_class.name
out_dir1_cur = join(out_path1, name_cur)
for p in [out_dir1_cur]:
Path(p).mkdir(exist_ok=True)
file_names = np.array([f for f in os.scandir(path_class) if not f.is_dir()])
permutation = np.arange(len(file_names))
np.random.shuffle(permutation)
idx1 = permutation[:n_select]
print(name_cur, idx1)
for f in file_names[idx1]:
shutil.copy(f.path, join(out_dir1_cur, f.name))
| 1,026 | 22.340909 | 115 |
py
|
ENCAS
|
ENCAS-main/data_utils/store_labels.py
|
import numpy as np
import torch
import yaml
from run_manager import get_run_config
from utils import NAT_PATH, NAT_DATA_PATH
import os
'''
Store labels of val & test sets for all the datasets.
To not specify all data-related information again, simply use any NAT config that uses the appropriate dataset
'''
def get_val_and_test_labels(nat_config_name):
nat_config = yaml.safe_load(open(os.path.join(NAT_PATH, 'configs_nat', nat_config_name), 'r'))
run_config = get_run_config(dataset=nat_config['dataset'], data_path=nat_config['data'],
train_batch_size=nat_config['trn_batch_size'], total_epochs=0,
test_batch_size=nat_config['vld_batch_size'], n_worker=4,
cutout_size=nat_config['cutout_size'], image_size=32,
valid_size=nat_config['vld_size'], dataset_name=nat_config['dataset'])
run_config.valid_loader.collate_fn.set_resolutions([32]) # images are not used, but need to set some size.
lbls_val = [b[1] for b in run_config.valid_loader]
lbls_val = torch.cat(lbls_val).detach().numpy()
lbls_test = [b[1] for b in run_config.test_loader]
lbls_test = torch.cat(lbls_test).detach().numpy()
return lbls_val, lbls_test
if __name__ == '__main__':
lbls_val, lbls_test = get_val_and_test_labels('cifar10_r0_ofa10_sep.yml')
np.save(os.path.join(NAT_DATA_PATH, 'labels_cifar10_val10000'), lbls_val)
np.save(os.path.join(NAT_DATA_PATH, 'labels_cifar10_test'), lbls_test)
lbls_val, lbls_test = get_val_and_test_labels('cifar100_r0_ofa10_sep.yml')
np.save(os.path.join(NAT_DATA_PATH, 'labels_cifar100_val10000'), lbls_val)
np.save(os.path.join(NAT_DATA_PATH, 'labels_cifar100_test'), lbls_test)
lbls_val, lbls_test = get_val_and_test_labels('imagenet_r0_ofa10_sep.yml')
np.save(os.path.join(NAT_DATA_PATH, 'labels_imagenet_val20683'), lbls_val)
np.save(os.path.join(NAT_DATA_PATH, 'labels_imagenet_test'), lbls_test)
| 2,011 | 48.073171 | 110 |
py
|
ENCAS
|
ENCAS-main/data_providers/auto_augment_tf.py
|
# taken verbatim from https://github.com/facebookresearch/AttentiveNAS
""" Auto Augment
Implementation adapted from timm: https://github.com/rwightman/pytorch-image-models
"""
import random
import math
from PIL import Image, ImageOps, ImageEnhance
import PIL
_PIL_VER = tuple([int(x) for x in PIL.__version__.split('.')[:2]])
_FILL = (128, 128, 128)
# This signifies the max integer that the controller RNN could predict for the
# augmentation scheme.
_MAX_LEVEL = 10.
_HPARAMS_DEFAULT = dict(
translate_const=250,
img_mean=_FILL,
)
_RANDOM_INTERPOLATION = (Image.NEAREST, Image.BILINEAR, Image.BICUBIC)
def _interpolation(kwargs):
interpolation = kwargs.pop('resample', Image.NEAREST)
if isinstance(interpolation, (list, tuple)):
return random.choice(interpolation)
else:
return interpolation
def _check_args_tf(kwargs):
if 'fillcolor' in kwargs and _PIL_VER < (5, 0):
kwargs.pop('fillcolor')
kwargs['resample'] = _interpolation(kwargs)
def shear_x(img, factor, **kwargs):
_check_args_tf(kwargs)
return img.transform(img.size, Image.AFFINE, (1, factor, 0, 0, 1, 0), **kwargs)
def shear_y(img, factor, **kwargs):
_check_args_tf(kwargs)
return img.transform(img.size, Image.AFFINE, (1, 0, 0, factor, 1, 0), **kwargs)
def translate_x_rel(img, pct, **kwargs):
pixels = pct * img.size[0]
_check_args_tf(kwargs)
return img.transform(img.size, Image.AFFINE, (1, 0, pixels, 0, 1, 0), **kwargs)
def translate_y_rel(img, pct, **kwargs):
pixels = pct * img.size[1]
_check_args_tf(kwargs)
return img.transform(img.size, Image.AFFINE, (1, 0, 0, 0, 1, pixels), **kwargs)
def translate_x_abs(img, pixels, **kwargs):
_check_args_tf(kwargs)
return img.transform(img.size, Image.AFFINE, (1, 0, pixels, 0, 1, 0), **kwargs)
def translate_y_abs(img, pixels, **kwargs):
_check_args_tf(kwargs)
return img.transform(img.size, Image.AFFINE, (1, 0, 0, 0, 1, pixels), **kwargs)
def rotate(img, degrees, **kwargs):
_check_args_tf(kwargs)
if _PIL_VER >= (5, 2):
return img.rotate(degrees, **kwargs)
elif _PIL_VER >= (5, 0):
w, h = img.size
post_trans = (0, 0)
rotn_center = (w / 2.0, h / 2.0)
angle = -math.radians(degrees)
matrix = [
round(math.cos(angle), 15),
round(math.sin(angle), 15),
0.0,
round(-math.sin(angle), 15),
round(math.cos(angle), 15),
0.0,
]
def transform(x, y, matrix):
(a, b, c, d, e, f) = matrix
return a * x + b * y + c, d * x + e * y + f
matrix[2], matrix[5] = transform(
-rotn_center[0] - post_trans[0], -rotn_center[1] - post_trans[1], matrix
)
matrix[2] += rotn_center[0]
matrix[5] += rotn_center[1]
return img.transform(img.size, Image.AFFINE, matrix, **kwargs)
else:
return img.rotate(degrees, resample=kwargs['resample'])
def auto_contrast(img, **__):
return ImageOps.autocontrast(img)
def invert(img, **__):
return ImageOps.invert(img)
def equalize(img, **__):
return ImageOps.equalize(img)
def solarize(img, thresh, **__):
return ImageOps.solarize(img, thresh)
def solarize_add(img, add, thresh=128, **__):
lut = []
for i in range(256):
if i < thresh:
lut.append(min(255, i + add))
else:
lut.append(i)
if img.mode in ("L", "RGB"):
if img.mode == "RGB" and len(lut) == 256:
lut = lut + lut + lut
return img.point(lut)
else:
return img
def posterize(img, bits_to_keep, **__):
if bits_to_keep >= 8:
return img
bits_to_keep = max(1, bits_to_keep) # prevent all 0 images
return ImageOps.posterize(img, bits_to_keep)
def contrast(img, factor, **__):
return ImageEnhance.Contrast(img).enhance(factor)
def color(img, factor, **__):
return ImageEnhance.Color(img).enhance(factor)
def brightness(img, factor, **__):
return ImageEnhance.Brightness(img).enhance(factor)
def sharpness(img, factor, **__):
return ImageEnhance.Sharpness(img).enhance(factor)
def _randomly_negate(v):
"""With 50% prob, negate the value"""
return -v if random.random() > 0.5 else v
def _rotate_level_to_arg(level):
# range [-30, 30]
level = (level / _MAX_LEVEL) * 30.
level = _randomly_negate(level)
return (level,)
def _enhance_level_to_arg(level):
# range [0.1, 1.9]
return ((level / _MAX_LEVEL) * 1.8 + 0.1,)
def _shear_level_to_arg(level):
# range [-0.3, 0.3]
level = (level / _MAX_LEVEL) * 0.3
level = _randomly_negate(level)
return (level,)
def _translate_abs_level_to_arg(level, translate_const):
level = (level / _MAX_LEVEL) * float(translate_const)
level = _randomly_negate(level)
return (level,)
def _translate_abs_level_to_arg2(level):
level = (level / _MAX_LEVEL) * float(_HPARAMS_DEFAULT['translate_const'])
level = _randomly_negate(level)
return (level,)
def _translate_rel_level_to_arg(level):
# range [-0.45, 0.45]
level = (level / _MAX_LEVEL) * 0.45
level = _randomly_negate(level)
return (level,)
# def level_to_arg(hparams):
# return {
# 'AutoContrast': lambda level: (),
# 'Equalize': lambda level: (),
# 'Invert': lambda level: (),
# 'Rotate': _rotate_level_to_arg,
# # FIXME these are both different from original impl as I believe there is a bug,
# # not sure what is the correct alternative, hence 2 options that look better
# 'Posterize': lambda level: (int((level / _MAX_LEVEL) * 4) + 4,), # range [4, 8]
# 'Posterize2': lambda level: (4 - int((level / _MAX_LEVEL) * 4),), # range [4, 0]
# 'Solarize': lambda level: (int((level / _MAX_LEVEL) * 256),), # range [0, 256]
# 'SolarizeAdd': lambda level: (int((level / _MAX_LEVEL) * 110),), # range [0, 110]
# 'Color': _enhance_level_to_arg,
# 'Contrast': _enhance_level_to_arg,
# 'Brightness': _enhance_level_to_arg,
# 'Sharpness': _enhance_level_to_arg,
# 'ShearX': _shear_level_to_arg,
# 'ShearY': _shear_level_to_arg,
# 'TranslateX': lambda level: _translate_abs_level_to_arg(level, hparams['translate_const']),
# 'TranslateY': lambda level: _translate_abs_level_to_arg(level, hparams['translate_const']),
# 'TranslateXRel': lambda level: _translate_rel_level_to_arg(level),
# 'TranslateYRel': lambda level: _translate_rel_level_to_arg(level),
# }
NAME_TO_OP = {
'AutoContrast': auto_contrast,
'Equalize': equalize,
'Invert': invert,
'Rotate': rotate,
'Posterize': posterize,
'Posterize2': posterize,
'Solarize': solarize,
'SolarizeAdd': solarize_add,
'Color': color,
'Contrast': contrast,
'Brightness': brightness,
'Sharpness': sharpness,
'ShearX': shear_x,
'ShearY': shear_y,
'TranslateX': translate_x_abs,
'TranslateY': translate_y_abs,
'TranslateXRel': translate_x_rel,
'TranslateYRel': translate_y_rel,
}
def pass_fn(input):
return ()
def _conversion0(input):
return (int((input / _MAX_LEVEL) * 4) + 4,)
def _conversion1(input):
return (4 - int((input / _MAX_LEVEL) * 4),)
def _conversion2(input):
return (int((input / _MAX_LEVEL) * 256),)
def _conversion3(input):
return (int((input / _MAX_LEVEL) * 110),)
class AutoAugmentOp:
def __init__(self, name, prob, magnitude, hparams={}):
self.aug_fn = NAME_TO_OP[name]
# self.level_fn = level_to_arg(hparams)[name]
if name == 'AutoContrast' or name == 'Equalize' or name == 'Invert':
self.level_fn = pass_fn
elif name == 'Rotate':
self.level_fn = _rotate_level_to_arg
elif name == 'Posterize':
self.level_fn = _conversion0
elif name == 'Posterize2':
self.level_fn = _conversion1
elif name == 'Solarize':
self.level_fn = _conversion2
elif name == 'SolarizeAdd':
self.level_fn = _conversion3
elif name == 'Color' or name == 'Contrast' or name == 'Brightness' or name == 'Sharpness':
self.level_fn = _enhance_level_to_arg
elif name == 'ShearX' or name == 'ShearY':
self.level_fn = _shear_level_to_arg
elif name == 'TranslateX' or name == 'TranslateY':
self.level_fn = _translate_abs_level_to_arg2
elif name == 'TranslateXRel' or name == 'TranslateYRel':
self.level_fn = _translate_rel_level_to_arg
else:
print("{} not recognized".format({}))
self.prob = prob
self.magnitude = magnitude
# If std deviation of magnitude is > 0, we introduce some randomness
# in the usually fixed policy and sample magnitude from normal dist
# with mean magnitude and std-dev of magnitude_std.
# NOTE This is being tested as it's not in paper or reference impl.
self.magnitude_std = 0.5 # FIXME add arg/hparam
self.kwargs = {
'fillcolor': hparams['img_mean'] if 'img_mean' in hparams else _FILL,
'resample': hparams['interpolation'] if 'interpolation' in hparams else _RANDOM_INTERPOLATION
}
def __call__(self, img):
if self.prob < random.random():
return img
magnitude = self.magnitude
if self.magnitude_std and self.magnitude_std > 0:
magnitude = random.gauss(magnitude, self.magnitude_std)
magnitude = min(_MAX_LEVEL, max(0, magnitude))
level_args = self.level_fn(magnitude)
return self.aug_fn(img, *level_args, **self.kwargs)
def auto_augment_policy_v0(hparams=_HPARAMS_DEFAULT):
# ImageNet policy from TPU EfficientNet impl, cannot find
# a paper reference.
policy = [
[('Equalize', 0.8, 1), ('ShearY', 0.8, 4)],
[('Color', 0.4, 9), ('Equalize', 0.6, 3)],
[('Color', 0.4, 1), ('Rotate', 0.6, 8)],
[('Solarize', 0.8, 3), ('Equalize', 0.4, 7)],
[('Solarize', 0.4, 2), ('Solarize', 0.6, 2)],
[('Color', 0.2, 0), ('Equalize', 0.8, 8)],
[('Equalize', 0.4, 8), ('SolarizeAdd', 0.8, 3)],
[('ShearX', 0.2, 9), ('Rotate', 0.6, 8)],
[('Color', 0.6, 1), ('Equalize', 1.0, 2)],
[('Invert', 0.4, 9), ('Rotate', 0.6, 0)],
[('Equalize', 1.0, 9), ('ShearY', 0.6, 3)],
[('Color', 0.4, 7), ('Equalize', 0.6, 0)],
[('Posterize', 0.4, 6), ('AutoContrast', 0.4, 7)],
[('Solarize', 0.6, 8), ('Color', 0.6, 9)],
[('Solarize', 0.2, 4), ('Rotate', 0.8, 9)],
[('Rotate', 1.0, 7), ('TranslateYRel', 0.8, 9)],
[('ShearX', 0.0, 0), ('Solarize', 0.8, 4)],
[('ShearY', 0.8, 0), ('Color', 0.6, 4)],
[('Color', 1.0, 0), ('Rotate', 0.6, 2)],
[('Equalize', 0.8, 4), ('Equalize', 0.0, 8)],
[('Equalize', 1.0, 4), ('AutoContrast', 0.6, 2)],
[('ShearY', 0.4, 7), ('SolarizeAdd', 0.6, 7)],
[('Posterize', 0.8, 2), ('Solarize', 0.6, 10)],
[('Solarize', 0.6, 8), ('Equalize', 0.6, 1)],
[('Color', 0.8, 6), ('Rotate', 0.4, 5)],
]
pc = [[AutoAugmentOp(*a, hparams) for a in sp] for sp in policy]
return pc
def auto_augment_policy_original(hparams=_HPARAMS_DEFAULT):
# ImageNet policy from https://arxiv.org/abs/1805.09501
policy = [
[('Posterize', 0.4, 8), ('Rotate', 0.6, 9)],
[('Solarize', 0.6, 5), ('AutoContrast', 0.6, 5)],
[('Equalize', 0.8, 8), ('Equalize', 0.6, 3)],
[('Posterize', 0.6, 7), ('Posterize', 0.6, 6)],
[('Equalize', 0.4, 7), ('Solarize', 0.2, 4)],
[('Equalize', 0.4, 4), ('Rotate', 0.8, 8)],
[('Solarize', 0.6, 3), ('Equalize', 0.6, 7)],
[('Posterize', 0.8, 5), ('Equalize', 1.0, 2)],
[('Rotate', 0.2, 3), ('Solarize', 0.6, 8)],
[('Equalize', 0.6, 8), ('Posterize', 0.4, 6)],
[('Rotate', 0.8, 8), ('Color', 0.4, 0)],
[('Rotate', 0.4, 9), ('Equalize', 0.6, 2)],
[('Equalize', 0.0, 7), ('Equalize', 0.8, 8)],
[('Invert', 0.6, 4), ('Equalize', 1.0, 8)],
[('Color', 0.6, 4), ('Contrast', 1.0, 8)],
[('Rotate', 0.8, 8), ('Color', 1.0, 2)],
[('Color', 0.8, 8), ('Solarize', 0.8, 7)],
[('Sharpness', 0.4, 7), ('Invert', 0.6, 8)],
[('ShearX', 0.6, 5), ('Equalize', 1.0, 9)],
[('Color', 0.4, 0), ('Equalize', 0.6, 3)],
[('Equalize', 0.4, 7), ('Solarize', 0.2, 4)],
[('Solarize', 0.6, 5), ('AutoContrast', 0.6, 5)],
[('Invert', 0.6, 4), ('Equalize', 1.0, 8)],
[('Color', 0.6, 4), ('Contrast', 1.0, 8)],
[('Equalize', 0.8, 8), ('Equalize', 0.6, 3)],
]
pc = [[AutoAugmentOp(*a, hparams) for a in sp] for sp in policy]
return pc
def auto_augment_policy(name='v0', hparams=_HPARAMS_DEFAULT):
if name == 'original':
return auto_augment_policy_original(hparams)
elif name == 'v0':
return auto_augment_policy_v0(hparams)
else:
print("Unknown auto_augmentation policy {}".format(name))
raise AssertionError()
class AutoAugment:
def __init__(self, policy):
self.policy = policy
def __call__(self, img):
sub_policy = random.choice(self.policy)
for op in sub_policy:
img = op(img)
return img
| 13,406 | 32.185644 | 105 |
py
|
ENCAS
|
ENCAS-main/data_providers/cifar.py
|
import functools
import math
import numpy as np
import torchvision
import torch.utils.data
import torchvision.transforms as transforms
from ofa.imagenet_classification.data_providers.imagenet import DataProvider
from timm.data import rand_augment_transform
import utils_train
from utils import _pil_interp
from dynamic_resolution_collator import DynamicResolutionCollator
import utils
from utils_train import Cutout
class CIFARBaseDataProvider(DataProvider):
def __init__(self, save_path=None, train_batch_size=96, test_batch_size=256, valid_size=None,
n_worker=2, resize_scale=0.08, distort_color=None, image_size=224, num_replicas=None, rank=None,
total_size=None, **kwargs):
self._save_path = save_path
self.image_size = image_size
self.distort_color = distort_color
self.resize_scale = resize_scale
self.cutout_size = kwargs['cutout_size']
self.auto_augment = kwargs.get('auto_augment', 'rand-m9-mstd0.5')
self.if_flip = kwargs.get('if_flip', True)
self.if_center_crop = kwargs.get('if_center_crop', True)
self.if_cutmix = kwargs.get('if_cutmix', False)
self._valid_transform_dict = {}
self._train_transform_dict = {}
self.active_img_size = self.image_size
valid_transforms = self.build_valid_transform()
train_transforms = self.build_train_transform()
self.train_dataset_actual = self.train_dataset(train_transforms)
n_datapoints = len(self.train_dataset_actual.data)
self.cutmix_kwargs = None
if self.if_cutmix:
self.cutmix_kwargs = {'beta': 1.0, 'prob': 0.5, 'n_classes': self.n_classes}
# depending on combinations of flags may need even more than a 1000:
self.collator_train = DynamicResolutionCollator(1000, if_cutmix=self.if_cutmix, cutmix_kwargs=self.cutmix_kwargs)
self.collator_val = DynamicResolutionCollator(1)
self.collator_subtrain = DynamicResolutionCollator(1, if_return_target_idx=False, if_cutmix=self.if_cutmix,
cutmix_kwargs=self.cutmix_kwargs)
assert valid_size is not None
if total_size is not None:
n_datapoints = total_size
if not isinstance(valid_size, int):
assert isinstance(valid_size, float) and 0 < valid_size < 1
valid_size = int(n_datapoints * valid_size)
self.valid_dataset_actual = self.train_dataset(valid_transforms)
train_indexes, valid_indexes = self.random_sample_valid_set(n_datapoints, valid_size)
train_sampler = torch.utils.data.sampler.SubsetRandomSampler(train_indexes)
# for validation use Subset instead of SubsetRandomSampler to keep the ordering the same
self.valid_dataset_actual = torch.utils.data.Subset(self.valid_dataset_actual, valid_indexes)
self.train = torch.utils.data.DataLoader(
self.train_dataset_actual, batch_size=train_batch_size, sampler=train_sampler,
num_workers=n_worker, pin_memory=True, collate_fn=self.collator_train,
worker_init_fn=utils_train.init_dataloader_worker_state, persistent_workers=True
)
self.valid = torch.utils.data.DataLoader(
self.valid_dataset_actual, batch_size=test_batch_size, num_workers=n_worker, pin_memory=True,
prefetch_factor=1, persistent_workers=True, collate_fn=self.collator_val
)
test_dataset = self.test_dataset(valid_transforms)
self.test = torch.utils.data.DataLoader(
test_dataset, batch_size=test_batch_size, shuffle=False, num_workers=n_worker, pin_memory=True,
collate_fn=self.collator_val, prefetch_factor=1
)
self.collator_train.set_info_for_transforms(self.resize_class_lambda_train,
self.train_transforms_after_resize)
self.collator_val.set_info_for_transforms(self.resize_class_lambda_val,
self.val_transforms_after_resize)
self.collator_subtrain.set_info_for_transforms(self.resize_class_lambda_train,
self.train_transforms_after_resize)
def set_collator_train_resolutions(self, resolutions):
self.collator_train.set_resolutions(resolutions)
@staticmethod
def name():
raise NotImplementedError
@property
def n_classes(self):
raise NotImplementedError
def train_dataset(self, _transforms):
raise NotImplementedError
def test_dataset(self, _transforms):
raise NotImplementedError
@property
def data_shape(self):
return 3, self.active_img_size, self.active_img_size # C, H, W
@property
def save_path(self):
return self._save_path
@property
def data_url(self):
raise ValueError('unable to download %s' % self.name())
@property
def train_path(self):
return self.save_path
@property
def valid_path(self):
return self.save_path
@property
def normalize(self):
return transforms.Normalize(
mean=[0.49139968, 0.48215827, 0.44653124], std=[0.24703233, 0.24348505, 0.26158768])
def build_train_transform(self, image_size=None, print_log=True):
self.active_img_size = image_size
if image_size is None:
image_size = self.image_size
if print_log:
print('Color jitter: %s, resize_scale: %s, img_size: %s' %
(self.distort_color, self.resize_scale, image_size))
if self.active_img_size in self._train_transform_dict:
return self._train_transform_dict[self.active_img_size]
if self.distort_color == 'torch':
color_transform = transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4, hue=0.1)
elif self.distort_color == 'tf':
color_transform = transforms.ColorJitter(brightness=32. / 255., saturation=0.5)
else:
color_transform = None
if self.resize_scale:
resize_scale = self.resize_scale
self.resize_class_lambda_train = functools.partial(utils_train.create_resize_class_lambda_train,
transforms.RandomResizedCrop, scale=[resize_scale, 1.0],
interpolation=torchvision.transforms.InterpolationMode.BICUBIC)
else:
self.resize_class_lambda_train = functools.partial(utils_train.create_resize_class_lambda_train,
transforms.Resize,
interpolation=torchvision.transforms.InterpolationMode.BICUBIC)
train_transforms = []
if self.if_flip:
train_transforms.append(transforms.RandomHorizontalFlip())
if color_transform is not None:
train_transforms.append(color_transform)
if self.auto_augment:
aa_params = dict(
translate_const=int(image_size * 0.45),
img_mean=tuple([min(255, round(255 * x)) for x in [0.49139968, 0.48215827, 0.44653124]]),
)
aa_params['interpolation'] = _pil_interp('bicubic')
train_transforms += [rand_augment_transform(self.auto_augment, aa_params)]
train_transforms += [
transforms.ToTensor(),
self.normalize,
Cutout(length=self.cutout_size)
]
self.train_transforms_after_resize = train_transforms
train_transforms = []
# these transforms are irrelevant
train_transforms = transforms.Compose(train_transforms)
self._train_transform_dict[self.active_img_size] = train_transforms
return train_transforms
@staticmethod
def resize_class_lambda_val(if_center_crop, image_size):
if if_center_crop:
return transforms.Compose([
transforms.Resize(int(math.ceil(image_size / 0.875)),
interpolation=torchvision.transforms.InterpolationMode.BICUBIC),
transforms.CenterCrop(image_size)])
return transforms.Resize(image_size, interpolation=torchvision.transforms.InterpolationMode.BICUBIC)
def build_valid_transform(self, image_size=None):
self.resize_class_lambda_val = functools.partial(CIFAR100DataProvider.resize_class_lambda_val, self.if_center_crop)
val_transforms = [transforms.ToTensor(),
self.normalize]
self.val_transforms_after_resize = val_transforms
val_transforms = []
return transforms.Compose(val_transforms)
def assign_active_img_size(self, new_img_size):
self.active_img_size = new_img_size
if self.active_img_size not in self._valid_transform_dict:
self._valid_transform_dict[self.active_img_size] = self.build_valid_transform()
self.valid.dataset.transform = self._valid_transform_dict[self.active_img_size]
self.test.dataset.transform = self._valid_transform_dict[self.active_img_size]
def build_sub_train_loader(self, n_images, batch_size, img_size, num_worker=None, num_replicas=None, rank=None):
# used for resetting running statistics of BN
if not hasattr(self, 'sub_data_loader'):
if num_worker is None:
num_worker = self.train.num_workers
new_train_dataset = self.train_dataset(self.build_train_transform(image_size=img_size, print_log=False))
g = torch.Generator()
g.manual_seed(DataProvider.SUB_SEED)
indices_train = self.train.sampler.indices
n_indices = len(indices_train)
rand_permutation = torch.randperm(n_indices, generator=g).numpy()
indices_train = np.array(indices_train)[rand_permutation]
chosen_indexes = indices_train[:n_images].tolist()
sub_sampler = torch.utils.data.sampler.SubsetRandomSampler(chosen_indexes)
self.collator_subtrain.set_resolutions([img_size])
self.sub_data_loader = torch.utils.data.DataLoader(
new_train_dataset, batch_size=batch_size, sampler=sub_sampler,
num_workers=num_worker, pin_memory=False, collate_fn=self.collator_subtrain, persistent_workers=True
)
else:
self.collator_subtrain.set_resolutions([img_size])
return self.sub_data_loader
class CIFAR10DataProvider(CIFARBaseDataProvider):
@staticmethod
def name():
return 'cifar10'
@property
def n_classes(self):
return 10
def train_dataset(self, _transforms):
dataset = torchvision.datasets.CIFAR10(root=self.train_path, train=True,
download=True, transform=_transforms)
return dataset
def test_dataset(self, _transforms):
dataset = torchvision.datasets.CIFAR10(root=self.valid_path, train=False,
download=True, transform=_transforms)
return dataset
class CIFAR100DataProvider(CIFARBaseDataProvider):
@staticmethod
def name():
return 'cifar100'
@property
def n_classes(self):
return 100
def train_dataset(self, _transforms):
dataset = torchvision.datasets.CIFAR100(root=self.train_path, train=True,
download=True, transform=_transforms)
return dataset
def test_dataset(self, _transforms):
dataset = torchvision.datasets.CIFAR100(root=self.valid_path, train=False,
download=True, transform=_transforms)
return dataset
| 11,902 | 40.618881 | 126 |
py
|
ENCAS
|
ENCAS-main/data_providers/imagenet.py
|
import functools
import warnings
import os
import math
import numpy as np
import torch.utils.data
import torchvision
import torchvision.transforms as transforms
import torchvision.datasets as datasets
from ofa.imagenet_classification.data_providers.imagenet import DataProvider
import utils
import utils_train
from dynamic_resolution_collator import DynamicResolutionCollator
from .auto_augment_tf import auto_augment_policy, AutoAugment
class ImagenetDataProvider(DataProvider):
def __init__(self, save_path=None, train_batch_size=256, test_batch_size=512, valid_size=None, n_worker=32,
resize_scale=0.08, distort_color=None, image_size=224,
num_replicas=None, rank=None, total_size=None, **kwargs):
warnings.filterwarnings('ignore')
self._save_path = save_path
self.image_size = image_size
self.distort_color = distort_color
self.resize_scale = resize_scale
self.if_flip = kwargs.get('if_flip', True)
self.auto_augment = kwargs.get('auto_augment', 'v0')
self.crop_pct = kwargs.get('crop_pct', None)
self.if_timm = self.crop_pct is not None
self.preproc_alphanet = kwargs.get('preproc_alphanet', False)
self._train_transform_dict = {}
self._valid_transform_dict = {}
self.active_img_size = self.image_size
valid_transforms = self.build_valid_transform()
train_loader_class = torch.utils.data.DataLoader
train_transforms = self.build_train_transform()
self.train_dataset_actual = self.train_dataset(train_transforms)
self.if_segmentation=False
self.collator_train = DynamicResolutionCollator(1000)
self.collator_val = DynamicResolutionCollator(1)
self.collator_subtrain = DynamicResolutionCollator(1, if_return_target_idx=False)
self.valid_dataset_actual = self.val_dataset(valid_transforms)
self.train = train_loader_class(
self.train_dataset_actual, batch_size=train_batch_size, shuffle=True,
num_workers=n_worker, pin_memory=True, collate_fn=self.collator_train,
worker_init_fn=utils_train.init_dataloader_worker_state, persistent_workers=True
)
self.valid = torch.utils.data.DataLoader(
self.valid_dataset_actual, batch_size=test_batch_size,
num_workers=n_worker
, pin_memory=True, collate_fn=self.collator_val,
persistent_workers=True
)
test_dataset = self.test_dataset(valid_transforms)
self.test = torch.utils.data.DataLoader(
test_dataset, batch_size=test_batch_size, shuffle=False, num_workers=n_worker*2, pin_memory=True,
collate_fn=self.collator_val, persistent_workers=True
)
self.collator_train.set_info_for_transforms(self.resize_class_lambda_train,
self.train_transforms_after_resize,
self.train_transforms_pre_resize)
self.collator_val.set_info_for_transforms(self.resize_class_lambda_val,
self.val_transforms_after_resize, self.val_transforms_pre_resize)
self.collator_subtrain.set_info_for_transforms(self.resize_class_lambda_train,
self.train_transforms_after_resize,
self.train_transforms_pre_resize)
def set_collator_train_resolutions(self, resolutions):
self.collator_train.set_resolutions(resolutions)
@staticmethod
def name():
return 'imagenet'
@property
def data_shape(self):
return 3, self.active_img_size, self.active_img_size # C, H, W
@property
def n_classes(self):
return 1000
@property
def save_path(self):
return self._save_path
@property
def data_url(self):
raise ValueError('unable to download %s' % self.name())
def train_dataset(self, _transforms):
dataset = datasets.ImageFolder(os.path.join(self.save_path, 'train_part'), _transforms)
return dataset
def val_dataset(self, _transforms):
dataset = datasets.ImageFolder(os.path.join(self.save_path, 'imagenetv2_all'), _transforms)
return dataset
def test_dataset(self, _transforms):
dataset = datasets.ImageFolder(os.path.join(self.save_path, 'val'), _transforms)
return dataset
@property
def normalize(self):
return transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
@staticmethod
def create_resize_class_lambda_train(resize_transform_class, image_size, **kwargs): # pickle can't handle lambdas
return resize_transform_class((image_size, image_size), **kwargs)
def build_train_transform(self, image_size=None, print_log=True):
self.active_img_size = image_size
default_image_size = 224
if print_log:
print('Color jitter: %s, resize_scale: %s, img_size: %s' %
(self.distort_color, self.resize_scale, default_image_size))
if self.active_img_size in self._train_transform_dict:
return self._train_transform_dict[self.active_img_size]
if self.distort_color == 'torch':
color_transform = transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4, hue=0.1)
elif self.distort_color == 'tf':
color_transform = transforms.ColorJitter(brightness=32. / 255., saturation=0.5)
else:
color_transform = None
# OFA and AlphaNet preprocess ImageNet differently, and leads to significant performance changes
# => I match the preprocessing to the supernetwork.
#
# In Alphanet, there are 2 resizes: when loading, images are always resized to 224*224;
# and in the call to forward they are resized to the proper resolution of the current subnetwork.
# I want to resize in advance, but still need to do the 2 resizes to match the preprocessing & performance
# => for Alphanet, I add the first resize to 224 to the train_transforms, and I run all the transforms before
# the final resize.
if not self.preproc_alphanet:
if self.resize_scale:
resize_scale = self.resize_scale
self.resize_class_lambda_train = functools.partial(ImagenetDataProvider.create_resize_class_lambda_train,
transforms.RandomResizedCrop, scale=[resize_scale, 1.0],
interpolation=torchvision.transforms.InterpolationMode.BICUBIC)
else:
self.resize_class_lambda_train = functools.partial(utils_train.create_resize_class_lambda_train,
transforms.Resize,
interpolation=torchvision.transforms.InterpolationMode.BICUBIC)
train_transforms = []
else:
resize_scale = self.resize_scale
train_transforms = [transforms.RandomResizedCrop((default_image_size, default_image_size), scale=[resize_scale, 1.0],
interpolation=torchvision.transforms.InterpolationMode.BICUBIC)]
self.resize_class_lambda_train = functools.partial(ImagenetDataProvider.resize_class_lambda_val, self.preproc_alphanet)
if self.if_flip:
train_transforms.append(transforms.RandomHorizontalFlip())
if color_transform is not None:
train_transforms.append(color_transform)
if self.auto_augment:
IMAGENET_PIXEL_MEAN = [123.675, 116.280, 103.530]
aa_params = {
"translate_const": int(default_image_size * 0.45),
"img_mean": tuple(round(x) for x in IMAGENET_PIXEL_MEAN),
}
aa_policy = AutoAugment(auto_augment_policy(self.auto_augment, aa_params))
train_transforms.append(aa_policy)
train_transforms += [
transforms.ToTensor(),
self.normalize,
]
if not self.preproc_alphanet:
self.train_transforms_pre_resize = []
self.train_transforms_after_resize = train_transforms
else:
self.train_transforms_pre_resize = train_transforms
self.train_transforms_after_resize = []
train_transforms = []
# the transforms below are irrelevant (actual transforms will be in the collator)
train_transforms = transforms.Compose(train_transforms)
self._train_transform_dict[self.active_img_size] = train_transforms
return train_transforms
@staticmethod
def resize_class_lambda_val(preproc_alphanet, image_size):
if preproc_alphanet:
return transforms.Resize((image_size, image_size),
interpolation=torchvision.transforms.InterpolationMode.BICUBIC)
return transforms.Compose([transforms.Resize(int(math.ceil(image_size / 0.875))),
transforms.CenterCrop(image_size)])
def build_valid_transform(self, image_size=None):
self.resize_class_lambda_val = functools.partial(ImagenetDataProvider.resize_class_lambda_val, self.preproc_alphanet)
if not self.preproc_alphanet: # see the comment about "self.preproc_alphanet" in build_train_transform
val_transforms = [
transforms.ToTensor(),
self.normalize
]
self.val_transforms_pre_resize = []
self.val_transforms_after_resize = val_transforms
else:
val_transforms = [
transforms.Resize(256, interpolation=torchvision.transforms.InterpolationMode.BICUBIC),
transforms.CenterCrop(224),
transforms.ToTensor(),
self.normalize
]
self.val_transforms_pre_resize = val_transforms
self.val_transforms_after_resize = []
val_transforms = []
return transforms.Compose(val_transforms)
def assign_active_img_size(self, new_img_size):
self.active_img_size = new_img_size
if self.active_img_size not in self._valid_transform_dict:
self._valid_transform_dict[self.active_img_size] = self.build_valid_transform()
# change the transform of the valid and test set
self.valid.dataset.transform = self._valid_transform_dict[self.active_img_size]
self.test.dataset.transform = self._valid_transform_dict[self.active_img_size]
# Presumably, this also leads to OOM, but I haven't verified it on ImageNet, just assuming it's the same as with CIFARs.
# def build_sub_train_loader(self, n_images, batch_size, img_size, num_worker=None, num_replicas=None, rank=None):
# # used for resetting running statistics
# if self.__dict__.get('sub_train_%d' % img_size, None) is None:
# if not hasattr(self, 'sub_data_loader'):
# if num_worker is None:
# num_worker = self.train.num_workers
#
# new_train_dataset = self.train_dataset(
# self.build_train_transform(image_size=img_size, print_log=False))
#
# g = torch.Generator()
# g.manual_seed(DataProvider.SUB_SEED)
#
# # don't need to change sampling here (unlike in cifars) because val is not part of train
# n_samples = len(self.train.dataset.samples)
# rand_indexes = torch.randperm(n_samples, generator=g).tolist()
# chosen_indexes = rand_indexes[:n_images]
#
# if num_replicas is not None:
# sub_sampler = MyDistributedSampler(new_train_dataset, num_replicas, rank, np.array(chosen_indexes))
# else:
# sub_sampler = torch.utils.data.sampler.SubsetRandomSampler(chosen_indexes)
# self.collator_subtrain.set_resolutions([img_size])
# self.sub_data_loader = torch.utils.data.DataLoader(
# new_train_dataset, batch_size=batch_size, sampler=sub_sampler,
# num_workers=0, pin_memory=False, collate_fn=self.collator_subtrain
# )
#
# self.collator_subtrain.set_resolutions([img_size])
# self.__dict__['sub_train_%d' % img_size] = []
# for images, labels, *_ in self.sub_data_loader:
# self.__dict__['sub_train_%d' % img_size].append((images, labels))
# return self.__dict__['sub_train_%d' % img_size]
def build_sub_train_loader(self, n_images, batch_size, img_size, num_worker=None, num_replicas=None, rank=None):
# used for resetting running statistics of BN
if not hasattr(self, 'sub_data_loader'):
if num_worker is None:
num_worker = self.train.num_workers
new_train_dataset = self.train_dataset(self.build_train_transform(image_size=img_size, print_log=False))
g = torch.Generator()
g.manual_seed(DataProvider.SUB_SEED)
# don't need to change sampling here (unlike in cifars) because val is not part of train
n_samples = len(self.train.dataset.samples)
rand_indexes = torch.randperm(n_samples, generator=g).tolist()
chosen_indexes = rand_indexes[:n_images]
sub_sampler = torch.utils.data.sampler.SubsetRandomSampler(chosen_indexes)
self.collator_subtrain.set_resolutions([img_size])
self.sub_data_loader = torch.utils.data.DataLoader(
new_train_dataset, batch_size=batch_size, sampler=sub_sampler,
num_workers=num_worker, pin_memory=False, collate_fn=self.collator_subtrain, persistent_workers=True
)
else:
self.collator_subtrain.set_resolutions([img_size])
return self.sub_data_loader
| 14,256 | 46.052805 | 131 |
py
|
LATTE
|
LATTE-master/tools/DLtab/DLtab.py
|
#!/usr/bin/env python
'''
Convert DFTB parameters in skf format into LATTE's table format
Usage
-----
```bash
DLtab.py <skf folder> <LATTE parameter folder>
```
Three files will be generate for LATTE, electrons.dat, bondints.table and ppots.dftb.
[note for electron.dat file]
* The spin parameter is not included in the skf file, thus, they will be
set to zero, if spin polarized DFTB is needed, don't forget to modify
the electrons.dat file
* The masses defined in skf file is different than the one defined in
changecoord code. Thus, if you need run LATTE-LAMMPS code, don't forget
to modify the masses .
* The script uses Hubbard U for the s shell as U parameter for the
element, which is the same as the default for DFTB+ code.
'''
__license__ = "GPL V2"
__author__ = "Chang Liu"
__email__ = "[email protected]"
__version__ = 1.01
import os
import sys
import glob
import numpy as np
# Constants for atomic unit <-> ev, angstroms
bohr2angstroms = 0.529177249
hatree2ev = 27.21138602
# strconv
def index2str(indexint):
# convert interaction string such as "sss" to index of matrix in dftb parameter
return{
0: 'dds',
1: 'ddp',
2: 'ddd',
3: 'pds',
4: 'pdp',
5: 'pps',
6: 'ppp',
7: 'sds',
8: 'sps',
9: 'sss',
}[indexint]
def str2index(strint):
# convert interaction string such as "sss" to index of matrix in dftb parameter
return{
'dds': 0,
'ddp': 1,
'ddd': 2,
'pds': 3,
'pdp': 4,
'pps': 5,
'ppp': 6,
'sds': 7,
'sps': 8,
'sss': 9,
}[strint]
# define the base for H and s
baseH = 0
baseS = 10
def not_printed_bi(currType, printedPair):
# decide bi printed or not
items = currType.split()
atom1 = items[0]
atom2 = items[1]
bitype = items[2]
for currPair in printedPair:
items = currPair.split()
tmpatom1 = items[0]
tmpatom2 = items[1]
tmpbitype = items[2]
# only when atom1 and atom2 swap, it is possible redundant
if (atom1 == tmpatom2) and (atom2 == tmpatom1) \
and (bitype == tmpbitype) and (bitype[0] == bitype[1]):
return False
return True
def DLconv_ee(atom,line1,line2):
# get element informations
Element = atom
Mass = float(line2.replace(',',' ').split()[0])
# this part will need to get from somewhere else
Wss = 0
Wpp = 0
Wdd = 0
Wff = 0
# other are from line1
Ef = 0 # till now, no f elements
(Ed,Ep,Es,SPE,Ud,Up,Us,fd,fp,fs) = map(float,line1.replace(',',' ').split())
Ed = Ed * hatree2ev
Ep = Ep * hatree2ev
Es = Es * hatree2ev
Numel = fd + fp + fs
# to reproduce the DFTB+ method, HubbardU will be the value of s shell
HubbardU = Us * hatree2ev
# spin coefficient are also pre-computed
if fd != 0:
basis = 'spd'
elif fp != 0:
basis = 'sp'
else:
basis = 's'
# unpacked Done
currEE = '%s %s %.1f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f' \
% (Element,basis,Numel,Es,Ep,Ed,Ef,Mass,HubbardU,Wss,Wpp,Wdd,Wff)
return currEE
def DLconv_HS_tab(strDHS,gridDistB,nPts,atom1, atom2):
# convert to LATTE format
# alloc the matrix
MdHS = np.zeros((20,nPts))
# convert gird distant to angstroms and generate grid points array
gridDistA = gridDistB * bohr2angstroms
gridPts = np.linspace(gridDistA, gridDistA*nPts, num=nPts)
# filling the matrix
iPts = 0
for line in strDHS:
iOrbs = 0
for item in line.split():
tmpList = item.split('*')
if len(tmpList) == 1:
MdHS[iOrbs][iPts] = float(tmpList[0].split(',')[0])
iOrbs += 1
else:
for _ in range(int(tmpList[0])):
MdHS[iOrbs][iPts] = float(tmpList[1].split(',')[0])
iOrbs += 1
iPts += 1
# generate the cell for print
BIlist = [gridPts] # only 1 distance info
for iOrbs in range(10):
# strip the leading 1s (which is typical to see in most skf file)
for istart in range(len(gridPts)):
if (MdHS[baseS+iOrbs][istart] != 1.) and (MdHS[baseH+iOrbs][istart] != 1.):
break
# get prepared for information needed
intStr = index2str(iOrbs)
tmpH = MdHS[baseH+iOrbs] * hatree2ev
tmpS = MdHS[baseS+iOrbs]
# determine whether parameter exist or not
if (np.linalg.norm(tmpH[istart:]) * np.linalg.norm(tmpS[istart:]) == 0):
continue
else:
bitypeStr = "%s %s %s" % (atom1, atom2, intStr)
BIlist.append([bitypeStr,tmpS,tmpH, istart])
return BIlist
def compute_rep_preexp(expStr, endR, dR):
"""compute the exponential part of repulsive potential"""
(a1, a2, a3) = map(float, expStr.split())
npt = int(np.floor(endR/dR) + 1)
preRptsB = np.zeros(npt)
preEptsH = np.zeros(npt)
for i in range(npt):
preRptsB[i] = (i+1) * dR
preEptsH[i] = np.exp(-a1*preRptsB[i] + a2) + a3
return (preRptsB,preEptsH)
def DLconv_repp(atom1, atom2, strDrp,latteFD):
# convert ppots to latte format
# read informations from dftb parameters
strDrp.pop(0) # skip the header "Spline"
tmplist = strDrp.pop(0).split()
nDPts = int(tmplist[0]) # number of spline domain in dftb parameters
RcutB = float(tmplist[1]) # cutoff radii in bohr
RptsB = np.zeros(nDPts+1) # points radii in Bohr
EptsH = np.zeros(nDPts+1) # list to save energy in Hartree
iPts = 0
shortEXP = strDrp.pop(0) # the short distant exponential coefficients
# direct use the gird point from dftb pp list
for tmpline in strDrp:
if len(tmpline.split()) != 6:
break
(r0, _, c0, _, _, _) = map(float,tmpline.split())
EptsH[iPts] = c0
RptsB[iPts] = r0
iPts += 1
# the last part
(r0, _, c0, _, _, _, _, _) = map(float,tmpline.split())
EptsH[iPts] = c0
RptsB[iPts] = r0
iPts += 1
EptsH[iPts] = 0
RptsB[iPts] = RcutB
# compute the beginning exp part
(preRptsB, preEptsH) = compute_rep_preexp(shortEXP, RptsB[0], RptsB[1]-RptsB[0])
# convert atomic unit to ev and angstroms
RptsA = RptsB * bohr2angstroms
EptsEv = EptsH * hatree2ev
preRptsA = preRptsB * bohr2angstroms
preEptsEv = preEptsH * hatree2ev
# return the final string
strLATTE = "%s %s \n %d\n" % (atom1, atom2, nDPts+1+len(preRptsB)) # header for LATTE block
for i in range(len(preRptsB)):
strLATTE += "%.15E %.15E \n" % (preRptsA[i], preEptsEv[i])
for i in range(nDPts+1):
strLATTE += "%.15E %.15E \n" % (RptsA[i], EptsEv[i])
return strLATTE
def doDLconvert(skfFile,latteFD):
# get atom names
atomPair = skfFile.replace('/','.').split('.')[-2]
#print atomPair
(atom1,atom2) = atomPair.split('-')
# read the file
fp = open(skfFile)
strList = fp.read().splitlines()
fp.close
# get HS info and atomic info(if atom1 == atom2)
header = strList.pop(0).replace(',',' ').split()
gridDistB = float(header[0])
#nPts = int(header[1]) in principle.. However it's wrong
if atom1 == atom2:
line1 = strList.pop(0)
line2 = strList.pop(0)
currEE = DLconv_ee(atom1,line1,line2)
else:
Drepfun = strList.pop(0) # save it just in case no spline
currEE = ''
strLHS = []
while strList:
line = strList.pop(0)
if 'Spline' in line:
# this is how the two section split, nPts might be wrong
break
strLHS.append(line)
nPts = len(strLHS)
BItable = DLconv_HS_tab(strLHS,gridDistB,nPts,atom1,atom2)
# get PP info
strDrp = [line]
while strList:
line = strList.pop(0)
if ('Documentation' in line) or (len(line.strip())==0):
break
strDrp.append(line)
currPP = DLconv_repp(atom1, atom2, strDrp,latteFD)
# Done!
return (currEE,BItable,currPP)
def printPP(ppots):
fp = open('ppots.dftb','w')
header = '%d \n' % len(ppots)
fp.write(header)
for line in ppots:
fp.write(line)
fp.close()
def printEE(electrons):
fp = open('electrons.dat','w')
header1 = 'Noelem= %d \n' % len(set(filter(None, electrons)))
fp.write(header1)
header2 = 'Element basis Numel Es Ep Ed Ef Mass HubbardU Wss Wpp Wdd Wff \n'
fp.write(header2)
for line in electrons:
if line:
fp.write(line+' \n')
fp.close()
def printBItab(biTab,nbiTab):
BIstr = []
printedPair = []
for biList in biTab:
distList = biList[0]
nline = len(distList)
for currtypeBI in biList[1:]:
if not_printed_bi(currtypeBI[0], printedPair):
istart = currtypeBI[-1]
BIstr.append('%s\n' % currtypeBI[0])
BIstr.append('%d\n' % (nline-istart))
for i in range(nline-istart):
BIstr.append('%12.5f %15e %15e \n' % \
(distList[i+istart], currtypeBI[1][i+istart], currtypeBI[2][i+istart]))
printedPair.append(currtypeBI[0])
else:
nbiTab -= 1
# done, write the file
fp = open('bondints.table','w')
fp.write('Noints= %d\n' % nbiTab)
for line in BIstr:
fp.write(line)
fp.close()
if __name__=='__main__':
# record dir and get path
# CWD dftbFD latteFD
if len(sys.argv) < 3:
sys.exit(__doc__)
CWD = os.getcwd() # record curr dir
dftbFD = sys.argv[1]
if not(os.path.exists(dftbFD) and os.path.isdir(dftbFD)):
sys.exit("The dftb folder not exists!")
elif not(glob.glob(dftbFD+'/*.skf')):
sys.exit("No skf file in dftb folder!")
latteFD = sys.argv[2]
if not(os.path.exists(latteFD)):
os.mkdir(latteFD)
elif not(os.path.isdir(dftbFD)):
sys.exit("Cannot create LATTE folder!")
# get skf list and loop over it to generate the string lists
skfList = glob.glob(dftbFD+'/*.skf')
bondint = []
biTab = []
nbiTab = 0
ppots = []
electrons = []
existPP = []
existBI = []
for skfFile in skfList:
print("reading %s" % skfFile)
(atom1,atom2) = (skfFile.replace('/','.').split('.')[-2]).split('-')
(currEE,currBItab,currPP) = doDLconvert(skfFile,latteFD)
biTab.append(currBItab)
nbiTab += (len(currBItab) - 1)
if not ([atom2,atom1] in existPP):
ppots.append(currPP)
existPP.append([atom1,atom2])
electrons.append(currEE)
# do the output
os.chdir(latteFD)
printPP(ppots)
printEE(electrons)
printBItab(biTab,nbiTab)
# Done
os.chdir(CWD)
exit(0)
| 10,808 | 30.97929 | 95 |
py
|
LATTE
|
LATTE-master/tests/test-optim.py
|
#!/usr/bin/env python
def compare_coordinates(reference, current, reltol):
"""Compare coordinates.
Given a reference output and the current output, compare the
coordinates to within the relative tolerance given by reltol.
"""
import re
position = re.compile("^\s*([a-zA-Z]+)\s+([0-9eE.+-]+)\s+([0-9eE.+-]+)\s+([0-9eE.+-]+)")
fd = open(reference)
lines = fd.readlines()
fd.close()
reference_positions = []
i = 0
while i < len(lines):
N = int(lines[i])
i += 2
reference_positions.append([])
for j in range(i, i+N):
result = position.search(lines[j])
reference_positions[-1].append({"name": result.group(1),
"position": [float(result.group(2)),
float(result.group(3)),
float(result.group(4))]})
i += N
fd = open(current)
lines = fd.readlines()
fd.close()
current_positions = []
i = 0
while i < len(lines):
N = int(lines[i])
i += 2
current_positions.append([])
for j in range(i, i+N):
result = position.search(lines[j])
current_positions[-1].append({"name": result.group(1),
"position": [float(result.group(2)),
float(result.group(3)),
float(result.group(4))]})
i += N
if len(reference_positions) != len(current_positions):
raise Exception("[error] different number of optimization steps\n"
+ (" reference ran for %4d steps\n" % (len(reference_positions)))
+ (" current ran for %4d steps\n" % (len(current_positions)))
+ " can not compare")
import math
result = True
for i in range(len(reference_positions)):
rmsd = 0
for j in range(len(reference_positions[i])):
ref = reference_positions[i][j]["position"]
cur = current_positions[i][j]["position"]
rmsd += (ref[0] - cur[0])**2 + (ref[1] - cur[1])**2 + (ref[2] - cur[2])**2
rmsd = math.sqrt(rmsd/len(reference_positions[i]))
if rmsd > reltol:
print("failure in optimization step %d" % (i+1))
print("rmsd = %e" % (rmsd))
result = False
if not result:
raise Exception(("[error] when comparing '%s' with '%s'" % (reference, current))
+ "structures do not agree")
print("PASSED")
def main():
"""The main function.
"""
import argparse, os, sys
parser = argparse.ArgumentParser(description="""Script to get compare two optimization results""")
parser.add_argument("--reference",
help="The reference output")
parser.add_argument("--current",
help="The current output")
parser.add_argument("--reltol",
help="Relative tolerance when comparing, default is %(default)s",
type=float,
default=1e-10)
options = parser.parse_args()
compare_coordinates(options.reference, options.current, options.reltol)
if __name__ == "__main__":
main()
| 3,418 | 34.614583 | 102 |
py
|
LATTE
|
LATTE-master/tests/test-energy.py
|
#!/usr/bin/env python
def compare_MD(reference, current, reltol):
"""Compare MD energies.
Given a reference output and the current output, compare the MD
energies to within the relative tolerance given by reltol.
"""
import sys
#energy = re.compile("MD_data\s+([0-9eE.+-]+)\s+([0-9eE.+-]+)")
fd = open(reference)
reference_energies = []
for line in fd:
result = line.split()
reference_energies.append(float(result[0]))
fd.close()
fd = open(current)
current_energies = []
for line in fd:
result = line.split()
current_energies.append(float(result[0]))
fd.close()
if len(reference_energies) != len(current_energies):
raise Exception("[error] different number of MD steps\n"
+ (" reference ran for %4d steps\n" % (len(reference_energies)))
+ (" current ran for %4d steps\n" % (len(current_energies)))
+ " can not compare")
result = True
for i in range(len(reference_energies)):
diff = abs(reference_energies[i] - current_energies[i])
if reference_energies[i] != 0:
diff = abs(diff/reference_energies[i])
if diff > reltol:
print("failure in MD step %d" % (i+1))
result = False
if not result:
raise Exception(("[error] when comparing '%s' with '%s'" % (reference, current))
+ "energies do not agree")
print("PASSED")
def main():
"""The main function.
"""
import argparse, os, sys
parser = argparse.ArgumentParser(description="""Script to compare MD results by using the total energy""")
parser.add_argument("--reference",
help="The reference output")
parser.add_argument("--current",
help="The current output")
parser.add_argument("--reltol",
help="Relative tolerance when comparing, default is %(default)s",
type=float,
default=1e-10)
options = parser.parse_args()
compare_MD(options.reference, options.current, options.reltol)
if __name__ == "__main__":
main()
| 2,228 | 30.842857 | 110 |
py
|
Likelihood-free_OICA
|
Likelihood-free_OICA-master/LFOICA.py
|
import numpy as np
import torch
import torch.nn as nn
from torch import Tensor
import torch.optim as optim
import torch.nn.functional as F
from libs.distance_measure.mmd import mix_rbf_mmd2
from libs.common_utils import cos_act, normalize_mixing_matrix
from scipy.stats import semicircular
from scipy.stats import hypsecant
import math
import matplotlib.pyplot as plt
from torch.utils.data import Dataset, DataLoader
import argparse
from libs.pytorch_pgm import PGM, prox_soft, prox_plus
# standard pytorch dataset
class dataset_simul(Dataset):
def __init__(self, data_path):
data_arrs = np.load(data_path)
self.mixtures = Tensor(data_arrs['arr_0'])
self.components = data_arrs['arr_1']
self.A = Tensor(data_arrs['arr_2'])
self.data_size = self.mixtures.shape[0]
def __len__(self):
return self.data_size
def __getitem__(self, idx):
mixtures_sample = self.mixtures[idx, :]
components_sample = self.components[idx, :]
return mixtures_sample, components_sample
def get_real_A(self):
return self.A
def get_real_components(self, batch_size):
assert batch_size <= self.data_size
np.random.shuffle(self.components)
return self.components[0:batch_size, :]
# transform random noise into components
class Gaussian_transformer(nn.Module):
def __init__(self, num_components):
super().__init__()
self.num_components = num_components
self.m = 1 # number of gaussian components for each channel in our non-gaussian noise generation model
self.random_feature_mapping = nn.ModuleList()
self.D = 8
self.models = nn.ModuleList()
for i in range(num_components):
random_feature_mapping = nn.Linear(self.m, self.D)
torch.nn.init.normal_(random_feature_mapping.weight, mean=0, std=1)
torch.nn.init.uniform_(random_feature_mapping.bias, a=0, b=2 * math.pi)
random_feature_mapping.weight.requires_grad = False
random_feature_mapping.bias.requires_grad = False
self.random_feature_mapping.append(random_feature_mapping)
for i in range(num_components): # different channels have different networks to guarantee independent
model = nn.Sequential(
nn.Linear(self.D, 2 * self.D),
nn.ELU(),
# nn.Linear(2*self.D, 4*self.D),
# nn.ELU(),
# nn.Linear(4 * self.D, 2*self.D),
# nn.ELU(),
nn.Linear(2 * self.D, 1)
)
self.models.append(model)
def forward(self, batch_size):
# gaussianNoise = Tensor(np.random.normal(0, 1, [batch_size, num_components, self.m])).to(device)
gaussianNoise = Tensor(np.random.uniform(-1, 1, [batch_size, self.num_components, self.m])).to(device)
output = Tensor(np.zeros([batch_size, self.num_components])).to(device) # batchSize * k * channels
cos_act_func = cos_act()
for i in range(self.num_components): # output shape [batchSize, k, n]
tmp = self.random_feature_mapping[i](gaussianNoise[:, i, :])
tmp = cos_act_func(tmp)
output[:, i] = self.models[i](tmp).squeeze()
return output
# the generative process mimic the mixing procedure from components to mixtures
class Generative_net(nn.Module):
def __init__(self, num_mixtures, num_components, A):
super().__init__()
# for simulation exp, we initialize A with it's true value added with some large noise to avoid local optimum.
# all methods are compared under the same initialization
self.A = nn.Parameter(A + torch.Tensor(np.random.uniform(-0.2, 0.2, [num_mixtures, num_components])))
def forward(self, components):
batch_size = components.shape[0]
result = torch.mm(components, self.A.t())
return result
parser = argparse.ArgumentParser()
parser.add_argument('--num_mixtures', type=int, default=5)
parser.add_argument('--num_components', type=int, default=10)
parser.add_argument('--batch_size', type=int, default=2000)
parser.add_argument('--num_epochs', type=int, default=1000)
parser.add_argument('--lr_T', type=float, default=0.01)
parser.add_argument('--lr_G', type=float, default=0.001)
parser.add_argument('--reg_lambda', type=float, default=0)
parser.add_argument('--print_int', type=int, default=50)
parser.add_argument('--verbose', action='store_true')
parser.add_argument('--plot', action='store_true')
parser.add_argument('--cuda', action='store_true')
args = parser.parse_args()
sigmaList = [0.001, 0.01]
if (args.cuda):
assert torch.cuda.is_available()
device = torch.device('cuda:0')
else:
device = torch.device('cpu')
dataset = dataset_simul(
'data/OICA_data/OICA_data_{}mixtures_{}components.npz'.format(args.num_mixtures, args.num_components))
dataloader = DataLoader(dataset, batch_size=args.batch_size, shuffle=True)
transformer = Gaussian_transformer(args.num_components).to(device)
generator = Generative_net(args.num_mixtures, args.num_components, dataset.get_real_A()).to(device)
transformer_optimizer = optim.Adam(transformer.parameters(), lr=args.lr_T)
generator_optimizer = optim.Adam(generator.parameters(), lr=args.lr_G)
for epoch in range(args.num_epochs):
for step, (real_mixtures, real_components) in enumerate(dataloader):
generator.zero_grad()
transformer.zero_grad()
real_mixtures = real_mixtures.to(device)
batch_size_i = real_mixtures.shape[0]
fake_components = transformer.forward(batch_size_i)
fake_mixtures = generator.forward(fake_components)
MMD = torch.sqrt(F.relu(mix_rbf_mmd2(fake_mixtures, real_mixtures, sigmaList)))
MMD.backward()
transformer_optimizer.step()
generator_optimizer.step()
if epoch % args.print_int == 0 and epoch != 0:
print('##########epoch{}##########'.format(epoch))
MSE_func = nn.MSELoss()
real_A = torch.abs(dataset.get_real_A())
fake_A = torch.abs(list(generator.parameters())[0]).detach().cpu()
real_A, fake_A = normalize_mixing_matrix(real_A, fake_A)
# for i in range(num_components):
# fake_A[:, i]/=normalize_factor[i]
print('estimated A', fake_A)
print('real A', real_A)
MSE = MSE_func(real_A, fake_A)
print('MSE: {}, MMD: {}'.format(MSE, MMD))
| 6,446 | 41.695364 | 118 |
py
|
Likelihood-free_OICA
|
Likelihood-free_OICA-master/libs/pytorch_pgm.py
|
from torch.optim.sgd import SGD
import torch
from torch.optim.optimizer import required
class PGM(SGD):
def __init__(self, params, proxs, lr=required, reg_lambda=0, momentum=0, dampening=0,
nesterov=False):
kwargs = dict(lr=lr, momentum=momentum, dampening=dampening, weight_decay=0, nesterov=nesterov)
super().__init__(params, **kwargs)
self.lr = lr
self.reg_lambda = reg_lambda
if len(proxs) != len(self.param_groups):
raise ValueError("Invalid length of argument proxs: {} instead of {}".format(len(proxs), len(self.param_groups)))
for group, prox in zip(self.param_groups, list(proxs)):
group.setdefault('prox', prox)
def step(self, closure=None):
# this performs a gradient step
# optionally with momentum or nesterov acceleration
super().step(closure=closure)
for group in self.param_groups:
prox = group['prox']
# here we apply the proximal operator to each parameter in a group
for p in group['params']:
p.data = prox(p.data, self.lr, self.reg_lambda)
def prox_soft(X, step, thresh=0):
"""Soft thresholding proximal operator
"""
thresh_ = step_gamma(step, thresh)
return torch.sign(X)*prox_plus(torch.abs(X) - thresh_)
def prox_plus(X):
"""Projection onto non-negative numbers
"""
below = X < 0
X[below] = 0
return X
def step_gamma(step, gamma):
"""Update gamma parameter for use inside of continuous proximal operator.
Every proximal operator for a function with a continuous parameter,
e.g. gamma ||x||_1, needs to update that parameter to account for the
stepsize of the algorithm.
Returns:
gamma * step
"""
return gamma * step
| 1,798 | 34.98 | 125 |
py
|
Likelihood-free_OICA
|
Likelihood-free_OICA-master/libs/common_utils.py
|
import torch
import numpy as np
def gumbel_softmax(p, device, sample_num=1, tau=0.2):
g = np.random.uniform(0.0001, 0.9999, [sample_num, p.shape[0]])
g = -torch.log(-torch.log(torch.Tensor(g)))
numerator = torch.exp((torch.log(p).repeat(sample_num, 1) + g.to(device)) / tau)
denominator = torch.sum(numerator, dim=1)
denominator_repeat = denominator.repeat(p.shape[0], 1).t()
return numerator / denominator_repeat
class cos_act(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
D = x.shape[1]
x = (2.0 / D) ** 0.5 * torch.cos(x)
return x
def normalize_components(components, fake_components):
num_components = components.shape[1]
normalize_factor = torch.zeros([num_components])
components = torch.Tensor(components.float())
for i in range(num_components):
normalize_factor[i] = torch.sqrt(torch.var(fake_components[i, :]) / torch.var(components[i, :]))
return normalize_factor
def normalize_mixing_matrix(real_A, estimated_A):
real_A = torch.abs(real_A)
estimated_A = torch.abs(estimated_A)
factor_real_A = 1/torch.norm(real_A[:, 0])
factor_estimated_A = 1/torch.norm(estimated_A[:, 0])
real_A_normalized = real_A * factor_real_A
estimated_A_normalized = estimated_A * factor_estimated_A
return real_A_normalized, estimated_A_normalized
if __name__ == '__main__':
print(gumbel_softmax(torch.Tensor(np.array([0.1, 0.9])), 100, 0.05))
| 1,497 | 33.045455 | 104 |
py
|
Likelihood-free_OICA
|
Likelihood-free_OICA-master/libs/__init__.py
| 0 | 0 | 0 |
py
|
|
Likelihood-free_OICA
|
Likelihood-free_OICA-master/libs/distance_measure/kernel_density.py
|
import torch
def kernel_density_loss(X, Y, sigma_list):
assert (X.size(0) == Y.size(0))
m = X.size(0)
n = X.size(1)
expand_X = X.unsqueeze(2).expand(-1, -1, m).permute(0, 2, 1)
expand_Y = Y.unsqueeze(2).expand(-1, -1, m).permute(2, 0, 1)
X_Y = expand_X - expand_Y
X_Y_norm = torch.zeros((m, m))
for i in range(n):
X_Y_norm = X_Y_norm + X_Y[:, :, i]**2
X_Y_norm_rbf = torch.zeros((m, m))
for sigma in sigma_list:
X_Y_norm_rbf = X_Y_norm_rbf + torch.exp(-1 * X_Y_norm / (2 * (sigma ** 2))) / np.sqrt(2*np.pi*(sigma**2))
print("X_Y", X_Y)
print("X_Y_norm_rbf", X_Y_norm_rbf)
loss = -1 * torch.sum(torch.log(torch.sum(X_Y_norm_rbf, dim=0) / m)) / m
return loss
| 729 | 37.421053 | 113 |
py
|
Likelihood-free_OICA
|
Likelihood-free_OICA-master/libs/distance_measure/mmd.py
|
#!/usr/bin/env python
# encoding: utf-8
import torch
min_var_est = 1e-8
# Consider linear time MMD with a linear kernel:
# K(f(x), f(y)) = f(x)^Tf(y)
# h(z_i, z_j) = k(x_i, x_j) + k(y_i, y_j) - k(x_i, y_j) - k(x_j, y_i)
# = [f(x_i) - f(y_i)]^T[f(x_j) - f(y_j)]
#
# f_of_X: batch_size * k
# f_of_Y: batch_size * k
def linear_mmd2(f_of_X, f_of_Y):
loss = 0.0
delta = f_of_X - f_of_Y
loss = torch.mean((delta[:-1] * delta[1:]).sum(1))
return loss
# Consider linear time MMD with a polynomial kernel:
# K(f(x), f(y)) = (alpha*f(x)^Tf(y) + c)^d
# f_of_X: batch_size * k
# f_of_Y: batch_size * k
def poly_mmd2(f_of_X, f_of_Y, d=2, alpha=1.0, c=2.0):
K_XX = (alpha * (f_of_X[:-1] * f_of_X[1:]).sum(1) + c)
K_XX_mean = torch.mean(K_XX.pow(d))
K_YY = (alpha * (f_of_Y[:-1] * f_of_Y[1:]).sum(1) + c)
K_YY_mean = torch.mean(K_YY.pow(d))
K_XY = (alpha * (f_of_X[:-1] * f_of_Y[1:]).sum(1) + c)
K_XY_mean = torch.mean(K_XY.pow(d))
K_YX = (alpha * (f_of_Y[:-1] * f_of_X[1:]).sum(1) + c)
K_YX_mean = torch.mean(K_YX.pow(d))
return K_XX_mean + K_YY_mean - K_XY_mean - K_YX_mean
def _mix_rbf_kernel(X, Y, sigma_list):
assert(X.size(0) == Y.size(0))
m = X.size(0)
Z = torch.cat((X, Y), 0)
ZZT = torch.mm(Z, Z.t())
diag_ZZT = torch.diag(ZZT).unsqueeze(1)
Z_norm_sqr = diag_ZZT.expand_as(ZZT)
exponent = Z_norm_sqr - 2 * ZZT + Z_norm_sqr.t()
K = 0.0
for sigma in sigma_list:
# gamma = 1.0 / (2 * sigma**2)
# K += torch.exp(-gamma * exponent)
K += torch.pow(1+exponent/(2*sigma), -sigma) # rational quadratic kernel
return K[:m, :m], K[:m, m:], K[m:, m:], len(sigma_list)
def mix_rbf_mmd2(X, Y, sigma_list, biased=True):
K_XX, K_XY, K_YY, d = _mix_rbf_kernel(X, Y, sigma_list)
# return _mmd2(K_XX, K_XY, K_YY, const_diagonal=d, biased=biased)
return _mmd2(K_XX, K_XY, K_YY, const_diagonal=False, biased=biased)
def mix_rbf_mmd2_and_ratio(X, Y, sigma_list, biased=True):
K_XX, K_XY, K_YY, d = _mix_rbf_kernel(X, Y, sigma_list)
# return _mmd2_and_ratio(K_XX, K_XY, K_YY, const_diagonal=d, biased=biased)
return _mmd2_and_ratio(K_XX, K_XY, K_YY, const_diagonal=False, biased=biased)
################################################################################
# Helper functions to compute variances based on kernel matrices
################################################################################
def _mmd2(K_XX, K_XY, K_YY, const_diagonal=False, biased=False):
m = K_XX.size(0) # assume X, Y are same shape
if biased:
mmd2 = K_XX + K_YY - K_XY - K_XY.transpose(0, 1)
mmd2 = mmd2.sum()
mmd2 = mmd2/(m*m)
else:
diag_X = torch.diag(K_XX) # (m,)
diag_Y = torch.diag(K_YY) # (m,)
sum_diag_X = torch.sum(diag_X)/(m*(m-1))
sum_diag_Y = torch.sum(diag_Y)/(m*(m-1))
mmd2 = K_XX/(m*(m-1)) + K_YY(m*(m-1)) - K_XY - K_XY.transpose(0, 1)/(m*m)
mmd2 = mmd2.sum()
mmd2 = mmd2 - sum_diag_X - sum_diag_X
return mmd2
# def _mmd2(K_XX, K_XY, K_YY, const_diagonal=False, biased=False):
# m = K_XX.size(0) # assume X, Y are same shape
# # Get the various sums of kernels that we'll use
# # Kts drop the diagonal, but we don't need to compute them explicitly
# if const_diagonal is not False:
# diag_X = diag_Y = const_diagonal
# sum_diag_X = sum_diag_Y = m * const_diagonal
# else:
# diag_X = torch.diag(K_XX) # (m,)
# diag_Y = torch.diag(K_YY) # (m,)
# sum_diag_X = torch.sum(diag_X)
# sum_diag_Y = torch.sum(diag_Y)
# Kt_XX_sums = K_XX.sum(dim=1) - diag_X # \tilde{K}_XX * e = K_XX * e - diag_X
# Kt_YY_sums = K_YY.sum(dim=1) - diag_Y # \tilde{K}_YY * e = K_YY * e - diag_Y
# K_XY_sums_0 = K_XY.sum(dim=0) # K_{XY}^T * e
# Kt_XX_sum = Kt_XX_sums.sum() # e^T * \tilde{K}_XX * e
# Kt_YY_sum = Kt_YY_sums.sum() # e^T * \tilde{K}_YY * e
# K_XY_sum = K_XY_sums_0.sum() # e^T * K_{XY} * e
# if biased:
# mmd2 = ((Kt_XX_sum + sum_diag_X) / (m * m)
# + (Kt_YY_sum + sum_diag_Y) / (m * m)
# - 2.0 * K_XY_sum / (m * m))
# else:
# mmd2 = (Kt_XX_sum / (m * (m - 1))
# + Kt_YY_sum / (m * (m - 1))
# - 2.0 * K_XY_sum / (m * m))
# return mmd2
def _mmd2_and_ratio(K_XX, K_XY, K_YY, const_diagonal=False, biased=False):
mmd2, var_est = _mmd2_and_variance(K_XX, K_XY, K_YY, const_diagonal=const_diagonal, biased=biased)
loss = mmd2 / torch.sqrt(torch.clamp(var_est, min=min_var_est))
return loss, mmd2, var_est
def _mmd2_and_variance(K_XX, K_XY, K_YY, const_diagonal=False, biased=False):
m = K_XX.size(0) # assume X, Y are same shape
# Get the various sums of kernels that we'll use
# Kts drop the diagonal, but we don't need to compute them explicitly
if const_diagonal is not False:
diag_X = diag_Y = const_diagonal
sum_diag_X = sum_diag_Y = m * const_diagonal
sum_diag2_X = sum_diag2_Y = m * const_diagonal**2
else:
diag_X = torch.diag(K_XX) # (m,)
diag_Y = torch.diag(K_YY) # (m,)
sum_diag_X = torch.sum(diag_X)
sum_diag_Y = torch.sum(diag_Y)
sum_diag2_X = diag_X.dot(diag_X)
sum_diag2_Y = diag_Y.dot(diag_Y)
Kt_XX_sums = K_XX.sum(dim=1) - diag_X # \tilde{K}_XX * e = K_XX * e - diag_X
Kt_YY_sums = K_YY.sum(dim=1) - diag_Y # \tilde{K}_YY * e = K_YY * e - diag_Y
K_XY_sums_0 = K_XY.sum(dim=0) # K_{XY}^T * e
K_XY_sums_1 = K_XY.sum(dim=1) # K_{XY} * e
Kt_XX_sum = Kt_XX_sums.sum() # e^T * \tilde{K}_XX * e
Kt_YY_sum = Kt_YY_sums.sum() # e^T * \tilde{K}_YY * e
K_XY_sum = K_XY_sums_0.sum() # e^T * K_{XY} * e
Kt_XX_2_sum = (K_XX ** 2).sum() - sum_diag2_X # \| \tilde{K}_XX \|_F^2
Kt_YY_2_sum = (K_YY ** 2).sum() - sum_diag2_Y # \| \tilde{K}_YY \|_F^2
K_XY_2_sum = (K_XY ** 2).sum() # \| K_{XY} \|_F^2
if biased:
mmd2 = ((Kt_XX_sum + sum_diag_X) / (m * m)
+ (Kt_YY_sum + sum_diag_Y) / (m * m)
- 2.0 * K_XY_sum / (m * m))
else:
mmd2 = (Kt_XX_sum / (m * (m - 1))
+ Kt_YY_sum / (m * (m - 1))
- 2.0 * K_XY_sum / (m * m))
var_est = (
2.0 / (m**2 * (m - 1.0)**2) * (2 * Kt_XX_sums.dot(Kt_XX_sums) - Kt_XX_2_sum + 2 * Kt_YY_sums.dot(Kt_YY_sums) - Kt_YY_2_sum)
- (4.0*m - 6.0) / (m**3 * (m - 1.0)**3) * (Kt_XX_sum**2 + Kt_YY_sum**2)
+ 4.0*(m - 2.0) / (m**3 * (m - 1.0)**2) * (K_XY_sums_1.dot(K_XY_sums_1) + K_XY_sums_0.dot(K_XY_sums_0))
- 4.0*(m - 3.0) / (m**3 * (m - 1.0)**2) * (K_XY_2_sum) - (8 * m - 12) / (m**5 * (m - 1)) * K_XY_sum**2
+ 8.0 / (m**3 * (m - 1.0)) * (
1.0 / m * (Kt_XX_sum + Kt_YY_sum) * K_XY_sum
- Kt_XX_sums.dot(K_XY_sums_1)
- Kt_YY_sums.dot(K_XY_sums_0))
)
return mmd2, var_est
| 7,282 | 37.739362 | 131 |
py
|
Likelihood-free_OICA
|
Likelihood-free_OICA-master/libs/distance_measure/__init__.py
| 0 | 0 | 0 |
py
|
|
DSLA-DSLA
|
DSLA-DSLA/setup.py
|
#!/usr/bin/env python
# Copyright (c) OpenMMLab. All rights reserved.
import os
import os.path as osp
import platform
import shutil
import sys
import warnings
from setuptools import find_packages, setup
import torch
from torch.utils.cpp_extension import (BuildExtension, CppExtension,
CUDAExtension)
def readme():
with open('README.md', encoding='utf-8') as f:
content = f.read()
return content
version_file = 'mmdet/version.py'
def get_version():
with open(version_file, 'r') as f:
exec(compile(f.read(), version_file, 'exec'))
return locals()['__version__']
def make_cuda_ext(name, module, sources, sources_cuda=[]):
define_macros = []
extra_compile_args = {'cxx': []}
if torch.cuda.is_available() or os.getenv('FORCE_CUDA', '0') == '1':
define_macros += [('WITH_CUDA', None)]
extension = CUDAExtension
extra_compile_args['nvcc'] = [
'-D__CUDA_NO_HALF_OPERATORS__',
'-D__CUDA_NO_HALF_CONVERSIONS__',
'-D__CUDA_NO_HALF2_OPERATORS__',
]
sources += sources_cuda
else:
print(f'Compiling {name} without CUDA')
extension = CppExtension
return extension(
name=f'{module}.{name}',
sources=[os.path.join(*module.split('.'), p) for p in sources],
define_macros=define_macros,
extra_compile_args=extra_compile_args)
def parse_requirements(fname='requirements.txt', with_version=True):
"""Parse the package dependencies listed in a requirements file but strips
specific versioning information.
Args:
fname (str): path to requirements file
with_version (bool, default=False): if True include version specs
Returns:
List[str]: list of requirements items
CommandLine:
python -c "import setup; print(setup.parse_requirements())"
"""
import sys
from os.path import exists
import re
require_fpath = fname
def parse_line(line):
"""Parse information from a line in a requirements text file."""
if line.startswith('-r '):
# Allow specifying requirements in other files
target = line.split(' ')[1]
for info in parse_require_file(target):
yield info
else:
info = {'line': line}
if line.startswith('-e '):
info['package'] = line.split('#egg=')[1]
elif '@git+' in line:
info['package'] = line
else:
# Remove versioning from the package
pat = '(' + '|'.join(['>=', '==', '>']) + ')'
parts = re.split(pat, line, maxsplit=1)
parts = [p.strip() for p in parts]
info['package'] = parts[0]
if len(parts) > 1:
op, rest = parts[1:]
if ';' in rest:
# Handle platform specific dependencies
# http://setuptools.readthedocs.io/en/latest/setuptools.html#declaring-platform-specific-dependencies
version, platform_deps = map(str.strip,
rest.split(';'))
info['platform_deps'] = platform_deps
else:
version = rest # NOQA
info['version'] = (op, version)
yield info
def parse_require_file(fpath):
with open(fpath, 'r') as f:
for line in f.readlines():
line = line.strip()
if line and not line.startswith('#'):
for info in parse_line(line):
yield info
def gen_packages_items():
if exists(require_fpath):
for info in parse_require_file(require_fpath):
parts = [info['package']]
if with_version and 'version' in info:
parts.extend(info['version'])
if not sys.version.startswith('3.4'):
# apparently package_deps are broken in 3.4
platform_deps = info.get('platform_deps')
if platform_deps is not None:
parts.append(';' + platform_deps)
item = ''.join(parts)
yield item
packages = list(gen_packages_items())
return packages
def add_mim_extension():
"""Add extra files that are required to support MIM into the package.
These files will be added by creating a symlink to the originals if the
package is installed in `editable` mode (e.g. pip install -e .), or by
copying from the originals otherwise.
"""
# parse installment mode
if 'develop' in sys.argv:
# installed by `pip install -e .`
if platform.system() == 'Windows':
# set `copy` mode here since symlink fails on Windows.
mode = 'copy'
else:
mode = 'symlink'
elif 'sdist' in sys.argv or 'bdist_wheel' in sys.argv:
# installed by `pip install .`
# or create source distribution by `python setup.py sdist`
mode = 'copy'
else:
return
filenames = ['tools', 'configs', 'demo', 'model-index.yml']
repo_path = osp.dirname(__file__)
mim_path = osp.join(repo_path, 'mmdet', '.mim')
os.makedirs(mim_path, exist_ok=True)
for filename in filenames:
if osp.exists(filename):
src_path = osp.join(repo_path, filename)
tar_path = osp.join(mim_path, filename)
if osp.isfile(tar_path) or osp.islink(tar_path):
os.remove(tar_path)
elif osp.isdir(tar_path):
shutil.rmtree(tar_path)
if mode == 'symlink':
src_relpath = osp.relpath(src_path, osp.dirname(tar_path))
os.symlink(src_relpath, tar_path)
elif mode == 'copy':
if osp.isfile(src_path):
shutil.copyfile(src_path, tar_path)
elif osp.isdir(src_path):
shutil.copytree(src_path, tar_path)
else:
warnings.warn(f'Cannot copy file {src_path}.')
else:
raise ValueError(f'Invalid mode {mode}')
if __name__ == '__main__':
add_mim_extension()
setup(
name='mmdet',
version=get_version(),
description='OpenMMLab Detection Toolbox and Benchmark',
long_description=readme(),
long_description_content_type='text/markdown',
author='MMDetection Contributors',
author_email='[email protected]',
keywords='computer vision, object detection',
url='https://github.com/open-mmlab/mmdetection',
packages=find_packages(exclude=('configs', 'tools', 'demo')),
include_package_data=True,
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
],
license='Apache License 2.0',
setup_requires=parse_requirements('requirements/build.txt'),
tests_require=parse_requirements('requirements/tests.txt'),
install_requires=parse_requirements('requirements/runtime.txt'),
extras_require={
'all': parse_requirements('requirements.txt'),
'tests': parse_requirements('requirements/tests.txt'),
'build': parse_requirements('requirements/build.txt'),
'optional': parse_requirements('requirements/optional.txt'),
},
ext_modules=[],
cmdclass={'build_ext': BuildExtension},
zip_safe=False)
| 8,008 | 34.914798 | 125 |
py
|
DSLA-DSLA
|
DSLA-DSLA/tools/test.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
import os.path as osp
import time
import warnings
import mmcv
import torch
from mmcv import Config, DictAction
from mmcv.cnn import fuse_conv_bn
from mmcv.parallel import MMDataParallel, MMDistributedDataParallel
from mmcv.runner import (get_dist_info, init_dist, load_checkpoint,
wrap_fp16_model)
from mmdet.apis import multi_gpu_test, single_gpu_test
from mmdet.datasets import (build_dataloader, build_dataset,
replace_ImageToTensor)
from mmdet.models import build_detector
def parse_args():
parser = argparse.ArgumentParser(
description='MMDet test (and eval) a model')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument(
'--work-dir',
help='the directory to save the file containing evaluation metrics')
parser.add_argument('--out', help='output result file in pickle format')
parser.add_argument(
'--fuse-conv-bn',
action='store_true',
help='Whether to fuse conv and bn, this will slightly increase'
'the inference speed')
parser.add_argument(
'--gpu-ids',
type=int,
nargs='+',
help='ids of gpus to use '
'(only applicable to non-distributed testing)')
parser.add_argument(
'--format-only',
action='store_true',
help='Format the output results without perform evaluation. It is'
'useful when you want to format the result to a specific format and '
'submit it to the test server')
parser.add_argument(
'--eval',
type=str,
nargs='+',
help='evaluation metrics, which depends on the dataset, e.g., "bbox",'
' "segm", "proposal" for COCO, and "mAP", "recall" for PASCAL VOC')
parser.add_argument('--show', action='store_true', help='show results')
parser.add_argument(
'--show-dir', help='directory where painted images will be saved')
parser.add_argument(
'--show-score-thr',
type=float,
default=0.3,
help='score threshold (default: 0.3)')
parser.add_argument(
'--gpu-collect',
action='store_true',
help='whether to use gpu to collect results.')
parser.add_argument(
'--tmpdir',
help='tmp directory used for collecting results from multiple '
'workers, available when gpu-collect is not specified')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--options',
nargs='+',
action=DictAction,
help='custom options for evaluation, the key-value pair in xxx=yyy '
'format will be kwargs for dataset.evaluate() function (deprecate), '
'change to --eval-options instead.')
parser.add_argument(
'--eval-options',
nargs='+',
action=DictAction,
help='custom options for evaluation, the key-value pair in xxx=yyy '
'format will be kwargs for dataset.evaluate() function')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
if args.options and args.eval_options:
raise ValueError(
'--options and --eval-options cannot be both '
'specified, --options is deprecated in favor of --eval-options')
if args.options:
warnings.warn('--options is deprecated in favor of --eval-options')
args.eval_options = args.options
return args
def main():
args = parse_args()
assert args.out or args.eval or args.format_only or args.show \
or args.show_dir, \
('Please specify at least one operation (save/eval/format/show the '
'results / save the results) with the argument "--out", "--eval"'
', "--format-only", "--show" or "--show-dir"')
if args.eval and args.format_only:
raise ValueError('--eval and --format_only cannot be both specified')
if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
raise ValueError('The output file must be a pkl file.')
cfg = Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
cfg.model.pretrained = None
if cfg.model.get('neck'):
if isinstance(cfg.model.neck, list):
for neck_cfg in cfg.model.neck:
if neck_cfg.get('rfp_backbone'):
if neck_cfg.rfp_backbone.get('pretrained'):
neck_cfg.rfp_backbone.pretrained = None
elif cfg.model.neck.get('rfp_backbone'):
if cfg.model.neck.rfp_backbone.get('pretrained'):
cfg.model.neck.rfp_backbone.pretrained = None
# in case the test dataset is concatenated
samples_per_gpu = 1
if isinstance(cfg.data.test, dict):
cfg.data.test.test_mode = True
samples_per_gpu = cfg.data.test.pop('samples_per_gpu', 1)
if samples_per_gpu > 1:
# Replace 'ImageToTensor' to 'DefaultFormatBundle'
cfg.data.test.pipeline = replace_ImageToTensor(
cfg.data.test.pipeline)
elif isinstance(cfg.data.test, list):
for ds_cfg in cfg.data.test:
ds_cfg.test_mode = True
samples_per_gpu = max(
[ds_cfg.pop('samples_per_gpu', 1) for ds_cfg in cfg.data.test])
if samples_per_gpu > 1:
for ds_cfg in cfg.data.test:
ds_cfg.pipeline = replace_ImageToTensor(ds_cfg.pipeline)
if args.gpu_ids is not None:
cfg.gpu_ids = args.gpu_ids
else:
cfg.gpu_ids = range(1)
# init distributed env first, since logger depends on the dist info.
if args.launcher == 'none':
distributed = False
if len(cfg.gpu_ids) > 1:
warnings.warn(
f'We treat {cfg.gpu_ids} as gpu-ids, and reset to '
f'{cfg.gpu_ids[0:1]} as gpu-ids to avoid potential error in '
'non-distribute testing time.')
cfg.gpu_ids = cfg.gpu_ids[0:1]
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
rank, _ = get_dist_info()
# allows not to create
if args.work_dir is not None and rank == 0:
mmcv.mkdir_or_exist(osp.abspath(args.work_dir))
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
json_file = osp.join(args.work_dir, f'eval_{timestamp}.json')
# build the dataloader
dataset = build_dataset(cfg.data.test)
data_loader = build_dataloader(
dataset,
samples_per_gpu=samples_per_gpu,
workers_per_gpu=cfg.data.workers_per_gpu,
dist=distributed,
shuffle=False)
# build the model and load checkpoint
cfg.model.train_cfg = None
model = build_detector(cfg.model, test_cfg=cfg.get('test_cfg'))
fp16_cfg = cfg.get('fp16', None)
if fp16_cfg is not None:
wrap_fp16_model(model)
checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu')
if args.fuse_conv_bn:
model = fuse_conv_bn(model)
# old versions did not save class info in checkpoints, this walkaround is
# for backward compatibility
if 'CLASSES' in checkpoint.get('meta', {}):
model.CLASSES = checkpoint['meta']['CLASSES']
else:
model.CLASSES = dataset.CLASSES
if not distributed:
model = MMDataParallel(model, device_ids=cfg.gpu_ids)
outputs = single_gpu_test(model, data_loader, args.show, args.show_dir,
args.show_score_thr)
else:
model = MMDistributedDataParallel(
model.cuda(),
device_ids=[torch.cuda.current_device()],
broadcast_buffers=False)
outputs = multi_gpu_test(model, data_loader, args.tmpdir,
args.gpu_collect)
rank, _ = get_dist_info()
if rank == 0:
if args.out:
print(f'\nwriting results to {args.out}')
mmcv.dump(outputs, args.out)
kwargs = {} if args.eval_options is None else args.eval_options
if args.format_only:
dataset.format_results(outputs, **kwargs)
if args.eval:
eval_kwargs = cfg.get('evaluation', {}).copy()
# hard-code way to remove EvalHook args
for key in [
'interval', 'tmpdir', 'start', 'gpu_collect', 'save_best',
'rule', 'dynamic_intervals'
]:
eval_kwargs.pop(key, None)
eval_kwargs.update(dict(metric=args.eval, **kwargs))
metric = dataset.evaluate(outputs, **eval_kwargs)
print(metric)
metric_dict = dict(config=args.config, metric=metric)
if args.work_dir is not None and rank == 0:
mmcv.dump(metric_dict, json_file)
if __name__ == '__main__':
main()
| 9,774 | 37.789683 | 79 |
py
|
DSLA-DSLA
|
DSLA-DSLA/tools/train.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import copy
import os
import os.path as osp
import time
import warnings
import mmcv
import torch
from mmcv import Config, DictAction
from mmcv.runner import get_dist_info, init_dist
from mmcv.utils import get_git_hash
from mmdet import __version__
from mmdet.apis import init_random_seed, set_random_seed, train_detector
from mmdet.datasets import build_dataset
from mmdet.models import build_detector
from mmdet.utils import collect_env, get_root_logger
def parse_args():
parser = argparse.ArgumentParser(description='Train a detector')
parser.add_argument('config', help='train config file path')
parser.add_argument('--work-dir', help='the dir to save logs and models')
parser.add_argument(
'--resume-from', help='the checkpoint file to resume from')
parser.add_argument(
'--auto-resume',
action='store_true',
help='resume from the latest checkpoint automatically')
parser.add_argument(
'--no-validate',
action='store_true',
help='whether not to evaluate the checkpoint during training')
group_gpus = parser.add_mutually_exclusive_group()
group_gpus.add_argument(
'--gpus',
type=int,
help='number of gpus to use '
'(only applicable to non-distributed training)')
group_gpus.add_argument(
'--gpu-ids',
type=int,
nargs='+',
help='ids of gpus to use '
'(only applicable to non-distributed training)')
parser.add_argument('--seed', type=int, default=None, help='random seed')
parser.add_argument(
'--deterministic',
action='store_true',
help='whether to set deterministic options for CUDNN backend.')
parser.add_argument(
'--options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file (deprecate), '
'change to --cfg-options instead.')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
if args.options and args.cfg_options:
raise ValueError(
'--options and --cfg-options cannot be both '
'specified, --options is deprecated in favor of --cfg-options')
if args.options:
warnings.warn('--options is deprecated in favor of --cfg-options')
args.cfg_options = args.options
return args
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
# work_dir is determined in this priority: CLI > segment in file > filename
if args.work_dir is not None:
# update configs according to CLI args if args.work_dir is not None
cfg.work_dir = args.work_dir + '_' + time.strftime('%Y%m%d_%H%M00', time.localtime()) # 给dir加上一个时间戳 ------
elif cfg.get('work_dir', None) is None:
# use config filename as default work_dir if cfg.work_dir is None
cfg.work_dir = osp.join('./work_dirs',
osp.splitext(osp.basename(args.config))[0])
if args.resume_from is not None:
cfg.resume_from = args.resume_from
cfg.auto_resume = args.auto_resume
if args.gpu_ids is not None:
cfg.gpu_ids = args.gpu_ids
else:
cfg.gpu_ids = range(1) if args.gpus is None else range(args.gpus)
# init distributed env first, since logger depends on the dist info.
if args.launcher == 'none':
distributed = False
if len(cfg.gpu_ids) > 1:
warnings.warn(
f'We treat {cfg.gpu_ids} as gpu-ids, and reset to '
f'{cfg.gpu_ids[0:1]} as gpu-ids to avoid potential error in '
'non-distribute training time.')
cfg.gpu_ids = cfg.gpu_ids[0:1]
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
# re-set gpu_ids with distributed training mode
_, world_size = get_dist_info()
cfg.gpu_ids = range(world_size)
# create work_dir
mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir))
# dump config
cfg.dump(osp.join(cfg.work_dir, osp.basename(args.config)))
# init the logger before other steps
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
log_file = osp.join(cfg.work_dir, f'{timestamp}.log')
logger = get_root_logger(log_file=log_file, log_level=cfg.log_level)
# init the meta dict to record some important information such as
# environment info and seed, which will be logged
meta = dict()
# log env info
env_info_dict = collect_env()
env_info = '\n'.join([(f'{k}: {v}') for k, v in env_info_dict.items()])
dash_line = '-' * 60 + '\n'
logger.info('Environment info:\n' + dash_line + env_info + '\n' +
dash_line)
meta['env_info'] = env_info
meta['config'] = cfg.pretty_text
# log some basic info
logger.info(f'Distributed training: {distributed}')
logger.info(f'Config:\n{cfg.pretty_text}')
# set random seeds
seed = init_random_seed(args.seed)
logger.info(f'Set random seed to {seed}, '
f'deterministic: {args.deterministic}')
set_random_seed(seed, deterministic=args.deterministic)
cfg.seed = seed
meta['seed'] = seed
meta['exp_name'] = osp.basename(args.config)
model = build_detector(
cfg.model,
train_cfg=cfg.get('train_cfg'),
test_cfg=cfg.get('test_cfg'))
model.init_weights()
datasets = [build_dataset(cfg.data.train)]
if len(cfg.workflow) == 2:
val_dataset = copy.deepcopy(cfg.data.val)
val_dataset.pipeline = cfg.data.train.pipeline
datasets.append(build_dataset(val_dataset))
if cfg.checkpoint_config is not None:
# save mmdet version, config file content and class names in
# checkpoints as meta data
cfg.checkpoint_config.meta = dict(
mmdet_version=__version__ + get_git_hash()[:7],
CLASSES=datasets[0].CLASSES)
# add an attribute for visualization convenience
model.CLASSES = datasets[0].CLASSES
train_detector(
model,
datasets,
cfg,
distributed=distributed,
validate=(not args.no_validate),
timestamp=timestamp,
meta=meta)
if __name__ == '__main__':
main()
| 7,317 | 36.147208 | 115 |
py
|
DSLA-DSLA
|
DSLA-DSLA/tools/deployment/test_torchserver.py
|
from argparse import ArgumentParser
import numpy as np
import requests
from mmdet.apis import inference_detector, init_detector, show_result_pyplot
from mmdet.core import bbox2result
def parse_args():
parser = ArgumentParser()
parser.add_argument('img', help='Image file')
parser.add_argument('config', help='Config file')
parser.add_argument('checkpoint', help='Checkpoint file')
parser.add_argument('model_name', help='The model name in the server')
parser.add_argument(
'--inference-addr',
default='127.0.0.1:8080',
help='Address and port of the inference server')
parser.add_argument(
'--device', default='cuda:0', help='Device used for inference')
parser.add_argument(
'--score-thr', type=float, default=0.5, help='bbox score threshold')
args = parser.parse_args()
return args
def parse_result(input, model_class):
bbox = []
label = []
score = []
for anchor in input:
bbox.append(anchor['bbox'])
label.append(model_class.index(anchor['class_name']))
score.append([anchor['score']])
bboxes = np.append(bbox, score, axis=1)
labels = np.array(label)
result = bbox2result(bboxes, labels, len(model_class))
return result
def main(args):
# build the model from a config file and a checkpoint file
model = init_detector(args.config, args.checkpoint, device=args.device)
# test a single image
model_result = inference_detector(model, args.img)
for i, anchor_set in enumerate(model_result):
anchor_set = anchor_set[anchor_set[:, 4] >= 0.5]
model_result[i] = anchor_set
# show the results
show_result_pyplot(
model,
args.img,
model_result,
score_thr=args.score_thr,
title='pytorch_result')
url = 'http://' + args.inference_addr + '/predictions/' + args.model_name
with open(args.img, 'rb') as image:
response = requests.post(url, image)
server_result = parse_result(response.json(), model.CLASSES)
show_result_pyplot(
model,
args.img,
server_result,
score_thr=args.score_thr,
title='server_result')
for i in range(len(model.CLASSES)):
assert np.allclose(model_result[i], server_result[i])
if __name__ == '__main__':
args = parse_args()
main(args)
| 2,357 | 30.44 | 77 |
py
|
DSLA-DSLA
|
DSLA-DSLA/tools/deployment/mmdet2torchserve.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from argparse import ArgumentParser, Namespace
from pathlib import Path
from tempfile import TemporaryDirectory
import mmcv
try:
from model_archiver.model_packaging import package_model
from model_archiver.model_packaging_utils import ModelExportUtils
except ImportError:
package_model = None
def mmdet2torchserve(
config_file: str,
checkpoint_file: str,
output_folder: str,
model_name: str,
model_version: str = '1.0',
force: bool = False,
):
"""Converts MMDetection model (config + checkpoint) to TorchServe `.mar`.
Args:
config_file:
In MMDetection config format.
The contents vary for each task repository.
checkpoint_file:
In MMDetection checkpoint format.
The contents vary for each task repository.
output_folder:
Folder where `{model_name}.mar` will be created.
The file created will be in TorchServe archive format.
model_name:
If not None, used for naming the `{model_name}.mar` file
that will be created under `output_folder`.
If None, `{Path(checkpoint_file).stem}` will be used.
model_version:
Model's version.
force:
If True, if there is an existing `{model_name}.mar`
file under `output_folder` it will be overwritten.
"""
mmcv.mkdir_or_exist(output_folder)
config = mmcv.Config.fromfile(config_file)
with TemporaryDirectory() as tmpdir:
config.dump(f'{tmpdir}/config.py')
args = Namespace(
**{
'model_file': f'{tmpdir}/config.py',
'serialized_file': checkpoint_file,
'handler': f'{Path(__file__).parent}/mmdet_handler.py',
'model_name': model_name or Path(checkpoint_file).stem,
'version': model_version,
'export_path': output_folder,
'force': force,
'requirements_file': None,
'extra_files': None,
'runtime': 'python',
'archive_format': 'default'
})
manifest = ModelExportUtils.generate_manifest_json(args)
package_model(args, manifest)
def parse_args():
parser = ArgumentParser(
description='Convert MMDetection models to TorchServe `.mar` format.')
parser.add_argument('config', type=str, help='config file path')
parser.add_argument('checkpoint', type=str, help='checkpoint file path')
parser.add_argument(
'--output-folder',
type=str,
required=True,
help='Folder where `{model_name}.mar` will be created.')
parser.add_argument(
'--model-name',
type=str,
default=None,
help='If not None, used for naming the `{model_name}.mar`'
'file that will be created under `output_folder`.'
'If None, `{Path(checkpoint_file).stem}` will be used.')
parser.add_argument(
'--model-version',
type=str,
default='1.0',
help='Number used for versioning.')
parser.add_argument(
'-f',
'--force',
action='store_true',
help='overwrite the existing `{model_name}.mar`')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
if package_model is None:
raise ImportError('`torch-model-archiver` is required.'
'Try: pip install torch-model-archiver')
mmdet2torchserve(args.config, args.checkpoint, args.output_folder,
args.model_name, args.model_version, args.force)
| 3,693 | 32.279279 | 78 |
py
|
DSLA-DSLA
|
DSLA-DSLA/tools/deployment/test.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import mmcv
from mmcv import Config, DictAction
from mmcv.parallel import MMDataParallel
from mmdet.apis import single_gpu_test
from mmdet.datasets import (build_dataloader, build_dataset,
replace_ImageToTensor)
def parse_args():
parser = argparse.ArgumentParser(
description='MMDet test (and eval) an ONNX model using ONNXRuntime')
parser.add_argument('config', help='test config file path')
parser.add_argument('model', help='Input model file')
parser.add_argument('--out', help='output result file in pickle format')
parser.add_argument(
'--format-only',
action='store_true',
help='Format the output results without perform evaluation. It is'
'useful when you want to format the result to a specific format and '
'submit it to the test server')
parser.add_argument(
'--backend',
required=True,
choices=['onnxruntime', 'tensorrt'],
help='Backend for input model to run. ')
parser.add_argument(
'--eval',
type=str,
nargs='+',
help='evaluation metrics, which depends on the dataset, e.g., "bbox",'
' "segm", "proposal" for COCO, and "mAP", "recall" for PASCAL VOC')
parser.add_argument('--show', action='store_true', help='show results')
parser.add_argument(
'--show-dir', help='directory where painted images will be saved')
parser.add_argument(
'--show-score-thr',
type=float,
default=0.3,
help='score threshold (default: 0.3)')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--eval-options',
nargs='+',
action=DictAction,
help='custom options for evaluation, the key-value pair in xxx=yyy '
'format will be kwargs for dataset.evaluate() function')
args = parser.parse_args()
return args
def main():
args = parse_args()
assert args.out or args.eval or args.format_only or args.show \
or args.show_dir, \
('Please specify at least one operation (save/eval/format/show the '
'results / save the results) with the argument "--out", "--eval"'
', "--format-only", "--show" or "--show-dir"')
if args.eval and args.format_only:
raise ValueError('--eval and --format_only cannot be both specified')
if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
raise ValueError('The output file must be a pkl file.')
cfg = Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# in case the test dataset is concatenated
samples_per_gpu = 1
if isinstance(cfg.data.test, dict):
cfg.data.test.test_mode = True
samples_per_gpu = cfg.data.test.pop('samples_per_gpu', 1)
if samples_per_gpu > 1:
# Replace 'ImageToTensor' to 'DefaultFormatBundle'
cfg.data.test.pipeline = replace_ImageToTensor(
cfg.data.test.pipeline)
elif isinstance(cfg.data.test, list):
for ds_cfg in cfg.data.test:
ds_cfg.test_mode = True
samples_per_gpu = max(
[ds_cfg.pop('samples_per_gpu', 1) for ds_cfg in cfg.data.test])
if samples_per_gpu > 1:
for ds_cfg in cfg.data.test:
ds_cfg.pipeline = replace_ImageToTensor(ds_cfg.pipeline)
# build the dataloader
dataset = build_dataset(cfg.data.test)
data_loader = build_dataloader(
dataset,
samples_per_gpu=samples_per_gpu,
workers_per_gpu=cfg.data.workers_per_gpu,
dist=False,
shuffle=False)
if args.backend == 'onnxruntime':
from mmdet.core.export.model_wrappers import ONNXRuntimeDetector
model = ONNXRuntimeDetector(
args.model, class_names=dataset.CLASSES, device_id=0)
elif args.backend == 'tensorrt':
from mmdet.core.export.model_wrappers import TensorRTDetector
model = TensorRTDetector(
args.model, class_names=dataset.CLASSES, device_id=0)
model = MMDataParallel(model, device_ids=[0])
outputs = single_gpu_test(model, data_loader, args.show, args.show_dir,
args.show_score_thr)
if args.out:
print(f'\nwriting results to {args.out}')
mmcv.dump(outputs, args.out)
kwargs = {} if args.eval_options is None else args.eval_options
if args.format_only:
dataset.format_results(outputs, **kwargs)
if args.eval:
eval_kwargs = cfg.get('evaluation', {}).copy()
# hard-code way to remove EvalHook args
for key in [
'interval', 'tmpdir', 'start', 'gpu_collect', 'save_best',
'rule'
]:
eval_kwargs.pop(key, None)
eval_kwargs.update(dict(metric=args.eval, **kwargs))
print(dataset.evaluate(outputs, **eval_kwargs))
if __name__ == '__main__':
main()
| 5,481 | 37.069444 | 78 |
py
|
DSLA-DSLA
|
DSLA-DSLA/tools/deployment/onnx2tensorrt.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
import os.path as osp
import warnings
import numpy as np
import onnx
import torch
from mmcv import Config
from mmcv.tensorrt import is_tensorrt_plugin_loaded, onnx2trt, save_trt_engine
from mmdet.core.export import preprocess_example_input
from mmdet.core.export.model_wrappers import (ONNXRuntimeDetector,
TensorRTDetector)
from mmdet.datasets import DATASETS
def get_GiB(x: int):
"""return x GiB."""
return x * (1 << 30)
def onnx2tensorrt(onnx_file,
trt_file,
input_config,
verify=False,
show=False,
workspace_size=1,
verbose=False):
import tensorrt as trt
onnx_model = onnx.load(onnx_file)
max_shape = input_config['max_shape']
min_shape = input_config['min_shape']
opt_shape = input_config['opt_shape']
fp16_mode = False
# create trt engine and wrapper
opt_shape_dict = {'input': [min_shape, opt_shape, max_shape]}
max_workspace_size = get_GiB(workspace_size)
trt_engine = onnx2trt(
onnx_model,
opt_shape_dict,
log_level=trt.Logger.VERBOSE if verbose else trt.Logger.ERROR,
fp16_mode=fp16_mode,
max_workspace_size=max_workspace_size)
save_dir, _ = osp.split(trt_file)
if save_dir:
os.makedirs(save_dir, exist_ok=True)
save_trt_engine(trt_engine, trt_file)
print(f'Successfully created TensorRT engine: {trt_file}')
if verify:
# prepare input
one_img, one_meta = preprocess_example_input(input_config)
img_list, img_meta_list = [one_img], [[one_meta]]
img_list = [_.cuda().contiguous() for _ in img_list]
# wrap ONNX and TensorRT model
onnx_model = ONNXRuntimeDetector(onnx_file, CLASSES, device_id=0)
trt_model = TensorRTDetector(trt_file, CLASSES, device_id=0)
# inference with wrapped model
with torch.no_grad():
onnx_results = onnx_model(
img_list, img_metas=img_meta_list, return_loss=False)[0]
trt_results = trt_model(
img_list, img_metas=img_meta_list, return_loss=False)[0]
if show:
out_file_ort, out_file_trt = None, None
else:
out_file_ort, out_file_trt = 'show-ort.png', 'show-trt.png'
show_img = one_meta['show_img']
score_thr = 0.3
onnx_model.show_result(
show_img,
onnx_results,
score_thr=score_thr,
show=True,
win_name='ONNXRuntime',
out_file=out_file_ort)
trt_model.show_result(
show_img,
trt_results,
score_thr=score_thr,
show=True,
win_name='TensorRT',
out_file=out_file_trt)
with_mask = trt_model.with_masks
# compare a part of result
if with_mask:
compare_pairs = list(zip(onnx_results, trt_results))
else:
compare_pairs = [(onnx_results, trt_results)]
err_msg = 'The numerical values are different between Pytorch' + \
' and ONNX, but it does not necessarily mean the' + \
' exported ONNX model is problematic.'
# check the numerical value
for onnx_res, pytorch_res in compare_pairs:
for o_res, p_res in zip(onnx_res, pytorch_res):
np.testing.assert_allclose(
o_res, p_res, rtol=1e-03, atol=1e-05, err_msg=err_msg)
print('The numerical values are the same between Pytorch and ONNX')
def parse_normalize_cfg(test_pipeline):
transforms = None
for pipeline in test_pipeline:
if 'transforms' in pipeline:
transforms = pipeline['transforms']
break
assert transforms is not None, 'Failed to find `transforms`'
norm_config_li = [_ for _ in transforms if _['type'] == 'Normalize']
assert len(norm_config_li) == 1, '`norm_config` should only have one'
norm_config = norm_config_li[0]
return norm_config
def parse_args():
parser = argparse.ArgumentParser(
description='Convert MMDetection models from ONNX to TensorRT')
parser.add_argument('config', help='test config file path')
parser.add_argument('model', help='Filename of input ONNX model')
parser.add_argument(
'--trt-file',
type=str,
default='tmp.trt',
help='Filename of output TensorRT engine')
parser.add_argument(
'--input-img', type=str, default='', help='Image for test')
parser.add_argument(
'--show', action='store_true', help='Whether to show output results')
parser.add_argument(
'--dataset',
type=str,
default='coco',
help='Dataset name. This argument is deprecated and will be \
removed in future releases.')
parser.add_argument(
'--verify',
action='store_true',
help='Verify the outputs of ONNXRuntime and TensorRT')
parser.add_argument(
'--verbose',
action='store_true',
help='Whether to verbose logging messages while creating \
TensorRT engine. Defaults to False.')
parser.add_argument(
'--to-rgb',
action='store_false',
help='Feed model with RGB or BGR image. Default is RGB. This \
argument is deprecated and will be removed in future releases.')
parser.add_argument(
'--shape',
type=int,
nargs='+',
default=[400, 600],
help='Input size of the model')
parser.add_argument(
'--mean',
type=float,
nargs='+',
default=[123.675, 116.28, 103.53],
help='Mean value used for preprocess input data. This argument \
is deprecated and will be removed in future releases.')
parser.add_argument(
'--std',
type=float,
nargs='+',
default=[58.395, 57.12, 57.375],
help='Variance value used for preprocess input data. \
This argument is deprecated and will be removed in future releases.')
parser.add_argument(
'--min-shape',
type=int,
nargs='+',
default=None,
help='Minimum input size of the model in TensorRT')
parser.add_argument(
'--max-shape',
type=int,
nargs='+',
default=None,
help='Maximum input size of the model in TensorRT')
parser.add_argument(
'--workspace-size',
type=int,
default=1,
help='Max workspace size in GiB')
args = parser.parse_args()
return args
if __name__ == '__main__':
assert is_tensorrt_plugin_loaded(), 'TensorRT plugin should be compiled.'
args = parse_args()
warnings.warn(
'Arguments like `--to-rgb`, `--mean`, `--std`, `--dataset` would be \
parsed directly from config file and are deprecated and will be \
removed in future releases.')
if not args.input_img:
args.input_img = osp.join(osp.dirname(__file__), '../demo/demo.jpg')
cfg = Config.fromfile(args.config)
def parse_shape(shape):
if len(shape) == 1:
shape = (1, 3, shape[0], shape[0])
elif len(args.shape) == 2:
shape = (1, 3) + tuple(shape)
else:
raise ValueError('invalid input shape')
return shape
if args.shape:
input_shape = parse_shape(args.shape)
else:
img_scale = cfg.test_pipeline[1]['img_scale']
input_shape = (1, 3, img_scale[1], img_scale[0])
if not args.max_shape:
max_shape = input_shape
else:
max_shape = parse_shape(args.max_shape)
if not args.min_shape:
min_shape = input_shape
else:
min_shape = parse_shape(args.min_shape)
dataset = DATASETS.get(cfg.data.test['type'])
assert (dataset is not None)
CLASSES = dataset.CLASSES
normalize_cfg = parse_normalize_cfg(cfg.test_pipeline)
input_config = {
'min_shape': min_shape,
'opt_shape': input_shape,
'max_shape': max_shape,
'input_shape': input_shape,
'input_path': args.input_img,
'normalize_cfg': normalize_cfg
}
# Create TensorRT engine
onnx2tensorrt(
args.model,
args.trt_file,
input_config,
verify=args.verify,
show=args.show,
workspace_size=args.workspace_size,
verbose=args.verbose)
| 8,516 | 32.4 | 78 |
py
|
DSLA-DSLA
|
DSLA-DSLA/tools/deployment/mmdet_handler.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import base64
import os
import mmcv
import torch
from ts.torch_handler.base_handler import BaseHandler
from mmdet.apis import inference_detector, init_detector
class MMdetHandler(BaseHandler):
threshold = 0.5
def initialize(self, context):
properties = context.system_properties
self.map_location = 'cuda' if torch.cuda.is_available() else 'cpu'
self.device = torch.device(self.map_location + ':' +
str(properties.get('gpu_id')) if torch.cuda.
is_available() else self.map_location)
self.manifest = context.manifest
model_dir = properties.get('model_dir')
serialized_file = self.manifest['model']['serializedFile']
checkpoint = os.path.join(model_dir, serialized_file)
self.config_file = os.path.join(model_dir, 'config.py')
self.model = init_detector(self.config_file, checkpoint, self.device)
self.initialized = True
def preprocess(self, data):
images = []
for row in data:
image = row.get('data') or row.get('body')
if isinstance(image, str):
image = base64.b64decode(image)
image = mmcv.imfrombytes(image)
images.append(image)
return images
def inference(self, data, *args, **kwargs):
results = inference_detector(self.model, data)
return results
def postprocess(self, data):
# Format output following the example ObjectDetectionHandler format
output = []
for image_index, image_result in enumerate(data):
output.append([])
if isinstance(image_result, tuple):
bbox_result, segm_result = image_result
if isinstance(segm_result, tuple):
segm_result = segm_result[0] # ms rcnn
else:
bbox_result, segm_result = image_result, None
for class_index, class_result in enumerate(bbox_result):
class_name = self.model.CLASSES[class_index]
for bbox in class_result:
bbox_coords = bbox[:-1].tolist()
score = float(bbox[-1])
if score >= self.threshold:
output[image_index].append({
'class_name': class_name,
'bbox': bbox_coords,
'score': score
})
return output
| 2,560 | 34.569444 | 79 |
py
|
DSLA-DSLA
|
DSLA-DSLA/tools/deployment/pytorch2onnx.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os.path as osp
import warnings
from functools import partial
import numpy as np
import onnx
import torch
from mmcv import Config, DictAction
from mmdet.core.export import build_model_from_cfg, preprocess_example_input
from mmdet.core.export.model_wrappers import ONNXRuntimeDetector
def pytorch2onnx(model,
input_img,
input_shape,
normalize_cfg,
opset_version=11,
show=False,
output_file='tmp.onnx',
verify=False,
test_img=None,
do_simplify=False,
dynamic_export=None,
skip_postprocess=False):
input_config = {
'input_shape': input_shape,
'input_path': input_img,
'normalize_cfg': normalize_cfg
}
# prepare input
one_img, one_meta = preprocess_example_input(input_config)
img_list, img_meta_list = [one_img], [[one_meta]]
if skip_postprocess:
warnings.warn('Not all models support export onnx without post '
'process, especially two stage detectors!')
model.forward = model.forward_dummy
torch.onnx.export(
model,
one_img,
output_file,
input_names=['input'],
export_params=True,
keep_initializers_as_inputs=True,
do_constant_folding=True,
verbose=show,
opset_version=opset_version)
print(f'Successfully exported ONNX model without '
f'post process: {output_file}')
return
# replace original forward function
origin_forward = model.forward
model.forward = partial(
model.forward,
img_metas=img_meta_list,
return_loss=False,
rescale=False)
output_names = ['dets', 'labels']
if model.with_mask:
output_names.append('masks')
input_name = 'input'
dynamic_axes = None
if dynamic_export:
dynamic_axes = {
input_name: {
0: 'batch',
2: 'height',
3: 'width'
},
'dets': {
0: 'batch',
1: 'num_dets',
},
'labels': {
0: 'batch',
1: 'num_dets',
},
}
if model.with_mask:
dynamic_axes['masks'] = {0: 'batch', 1: 'num_dets'}
torch.onnx.export(
model,
img_list,
output_file,
input_names=[input_name],
output_names=output_names,
export_params=True,
keep_initializers_as_inputs=True,
do_constant_folding=True,
verbose=show,
opset_version=opset_version,
dynamic_axes=dynamic_axes)
model.forward = origin_forward
# get the custom op path
ort_custom_op_path = ''
try:
from mmcv.ops import get_onnxruntime_op_path
ort_custom_op_path = get_onnxruntime_op_path()
except (ImportError, ModuleNotFoundError):
warnings.warn('If input model has custom op from mmcv, \
you may have to build mmcv with ONNXRuntime from source.')
if do_simplify:
import onnxsim
from mmdet import digit_version
min_required_version = '0.3.0'
assert digit_version(onnxsim.__version__) >= digit_version(
min_required_version
), f'Requires to install onnx-simplify>={min_required_version}'
input_dic = {'input': img_list[0].detach().cpu().numpy()}
model_opt, check_ok = onnxsim.simplify(
output_file,
input_data=input_dic,
custom_lib=ort_custom_op_path,
dynamic_input_shape=dynamic_export)
if check_ok:
onnx.save(model_opt, output_file)
print(f'Successfully simplified ONNX model: {output_file}')
else:
warnings.warn('Failed to simplify ONNX model.')
print(f'Successfully exported ONNX model: {output_file}')
if verify:
# check by onnx
onnx_model = onnx.load(output_file)
onnx.checker.check_model(onnx_model)
# wrap onnx model
onnx_model = ONNXRuntimeDetector(output_file, model.CLASSES, 0)
if dynamic_export:
# scale up to test dynamic shape
h, w = [int((_ * 1.5) // 32 * 32) for _ in input_shape[2:]]
h, w = min(1344, h), min(1344, w)
input_config['input_shape'] = (1, 3, h, w)
if test_img is None:
input_config['input_path'] = input_img
# prepare input once again
one_img, one_meta = preprocess_example_input(input_config)
img_list, img_meta_list = [one_img], [[one_meta]]
# get pytorch output
with torch.no_grad():
pytorch_results = model(
img_list,
img_metas=img_meta_list,
return_loss=False,
rescale=True)[0]
img_list = [_.cuda().contiguous() for _ in img_list]
if dynamic_export:
img_list = img_list + [_.flip(-1).contiguous() for _ in img_list]
img_meta_list = img_meta_list * 2
# get onnx output
onnx_results = onnx_model(
img_list, img_metas=img_meta_list, return_loss=False)[0]
# visualize predictions
score_thr = 0.3
if show:
out_file_ort, out_file_pt = None, None
else:
out_file_ort, out_file_pt = 'show-ort.png', 'show-pt.png'
show_img = one_meta['show_img']
model.show_result(
show_img,
pytorch_results,
score_thr=score_thr,
show=True,
win_name='PyTorch',
out_file=out_file_pt)
onnx_model.show_result(
show_img,
onnx_results,
score_thr=score_thr,
show=True,
win_name='ONNXRuntime',
out_file=out_file_ort)
# compare a part of result
if model.with_mask:
compare_pairs = list(zip(onnx_results, pytorch_results))
else:
compare_pairs = [(onnx_results, pytorch_results)]
err_msg = 'The numerical values are different between Pytorch' + \
' and ONNX, but it does not necessarily mean the' + \
' exported ONNX model is problematic.'
# check the numerical value
for onnx_res, pytorch_res in compare_pairs:
for o_res, p_res in zip(onnx_res, pytorch_res):
np.testing.assert_allclose(
o_res, p_res, rtol=1e-03, atol=1e-05, err_msg=err_msg)
print('The numerical values are the same between Pytorch and ONNX')
def parse_normalize_cfg(test_pipeline):
transforms = None
for pipeline in test_pipeline:
if 'transforms' in pipeline:
transforms = pipeline['transforms']
break
assert transforms is not None, 'Failed to find `transforms`'
norm_config_li = [_ for _ in transforms if _['type'] == 'Normalize']
assert len(norm_config_li) == 1, '`norm_config` should only have one'
norm_config = norm_config_li[0]
return norm_config
def parse_args():
parser = argparse.ArgumentParser(
description='Convert MMDetection models to ONNX')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument('--input-img', type=str, help='Images for input')
parser.add_argument(
'--show',
action='store_true',
help='Show onnx graph and detection outputs')
parser.add_argument('--output-file', type=str, default='tmp.onnx')
parser.add_argument('--opset-version', type=int, default=11)
parser.add_argument(
'--test-img', type=str, default=None, help='Images for test')
parser.add_argument(
'--dataset',
type=str,
default='coco',
help='Dataset name. This argument is deprecated and will be removed \
in future releases.')
parser.add_argument(
'--verify',
action='store_true',
help='verify the onnx model output against pytorch output')
parser.add_argument(
'--simplify',
action='store_true',
help='Whether to simplify onnx model.')
parser.add_argument(
'--shape',
type=int,
nargs='+',
default=[800, 1216],
help='input image size')
parser.add_argument(
'--mean',
type=float,
nargs='+',
default=[123.675, 116.28, 103.53],
help='mean value used for preprocess input data.This argument \
is deprecated and will be removed in future releases.')
parser.add_argument(
'--std',
type=float,
nargs='+',
default=[58.395, 57.12, 57.375],
help='variance value used for preprocess input data. '
'This argument is deprecated and will be removed in future releases.')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='Override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--dynamic-export',
action='store_true',
help='Whether to export onnx with dynamic axis.')
parser.add_argument(
'--skip-postprocess',
action='store_true',
help='Whether to export model without post process. Experimental '
'option. We do not guarantee the correctness of the exported '
'model.')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
warnings.warn('Arguments like `--mean`, `--std`, `--dataset` would be \
parsed directly from config file and are deprecated and \
will be removed in future releases.')
assert args.opset_version == 11, 'MMDet only support opset 11 now'
try:
from mmcv.onnx.symbolic import register_extra_symbolics
except ModuleNotFoundError:
raise NotImplementedError('please update mmcv to version>=v1.0.4')
register_extra_symbolics(args.opset_version)
cfg = Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
if args.shape is None:
img_scale = cfg.test_pipeline[1]['img_scale']
input_shape = (1, 3, img_scale[1], img_scale[0])
elif len(args.shape) == 1:
input_shape = (1, 3, args.shape[0], args.shape[0])
elif len(args.shape) == 2:
input_shape = (1, 3) + tuple(args.shape)
else:
raise ValueError('invalid input shape')
# build the model and load checkpoint
model = build_model_from_cfg(args.config, args.checkpoint,
args.cfg_options)
if not args.input_img:
args.input_img = osp.join(osp.dirname(__file__), '../../demo/demo.jpg')
normalize_cfg = parse_normalize_cfg(cfg.test_pipeline)
# convert model to onnx file
pytorch2onnx(
model,
args.input_img,
input_shape,
normalize_cfg,
opset_version=args.opset_version,
show=args.show,
output_file=args.output_file,
verify=args.verify,
test_img=args.test_img,
do_simplify=args.simplify,
dynamic_export=args.dynamic_export,
skip_postprocess=args.skip_postprocess)
| 11,781 | 33.052023 | 79 |
py
|
DSLA-DSLA
|
DSLA-DSLA/tools/misc/print_config.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import warnings
from mmcv import Config, DictAction
def parse_args():
parser = argparse.ArgumentParser(description='Print the whole config')
parser.add_argument('config', help='config file path')
parser.add_argument(
'--options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file (deprecate), '
'change to --cfg-options instead.')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
args = parser.parse_args()
if args.options and args.cfg_options:
raise ValueError(
'--options and --cfg-options cannot be both '
'specified, --options is deprecated in favor of --cfg-options')
if args.options:
warnings.warn('--options is deprecated in favor of --cfg-options')
args.cfg_options = args.options
return args
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
print(f'Config:\n{cfg.pretty_text}')
if __name__ == '__main__':
main()
| 1,697 | 31.653846 | 78 |
py
|
DSLA-DSLA
|
DSLA-DSLA/tools/misc/browse_dataset.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
from collections import Sequence
from pathlib import Path
import mmcv
from mmcv import Config, DictAction
from mmdet.core.utils import mask2ndarray
from mmdet.core.visualization import imshow_det_bboxes
from mmdet.datasets.builder import build_dataset
def parse_args():
parser = argparse.ArgumentParser(description='Browse a dataset')
parser.add_argument('config', help='train config file path')
parser.add_argument(
'--skip-type',
type=str,
nargs='+',
default=['DefaultFormatBundle', 'Normalize', 'Collect'],
help='skip some useless pipeline')
parser.add_argument(
'--output-dir',
default=None,
type=str,
help='If there is no display interface, you can save it')
parser.add_argument('--not-show', default=False, action='store_true')
parser.add_argument(
'--show-interval',
type=float,
default=2,
help='the interval of show (s)')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
args = parser.parse_args()
return args
def retrieve_data_cfg(config_path, skip_type, cfg_options):
def skip_pipeline_steps(config):
config['pipeline'] = [
x for x in config.pipeline if x['type'] not in skip_type
]
cfg = Config.fromfile(config_path)
if cfg_options is not None:
cfg.merge_from_dict(cfg_options)
train_data_cfg = cfg.data.train
while 'dataset' in train_data_cfg and train_data_cfg[
'type'] != 'MultiImageMixDataset':
train_data_cfg = train_data_cfg['dataset']
if isinstance(train_data_cfg, Sequence):
[skip_pipeline_steps(c) for c in train_data_cfg]
else:
skip_pipeline_steps(train_data_cfg)
return cfg
def main():
args = parse_args()
cfg = retrieve_data_cfg(args.config, args.skip_type, args.cfg_options)
dataset = build_dataset(cfg.data.train)
progress_bar = mmcv.ProgressBar(len(dataset))
for item in dataset:
filename = os.path.join(args.output_dir,
Path(item['filename']).name
) if args.output_dir is not None else None
gt_masks = item.get('gt_masks', None)
if gt_masks is not None:
gt_masks = mask2ndarray(gt_masks)
imshow_det_bboxes(
item['img'],
item['gt_bboxes'],
item['gt_labels'],
gt_masks,
class_names=dataset.CLASSES,
show=not args.not_show,
wait_time=args.show_interval,
out_file=filename,
bbox_color=(255, 102, 61),
text_color=(255, 102, 61))
progress_bar.update()
if __name__ == '__main__':
main()
| 3,261 | 29.773585 | 78 |
py
|
DSLA-DSLA
|
DSLA-DSLA/tools/model_converters/selfsup2mmdet.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
from collections import OrderedDict
import torch
def moco_convert(src, dst):
"""Convert keys in pycls pretrained moco models to mmdet style."""
# load caffe model
moco_model = torch.load(src)
blobs = moco_model['state_dict']
# convert to pytorch style
state_dict = OrderedDict()
for k, v in blobs.items():
if not k.startswith('module.encoder_q.'):
continue
old_k = k
k = k.replace('module.encoder_q.', '')
state_dict[k] = v
print(old_k, '->', k)
# save checkpoint
checkpoint = dict()
checkpoint['state_dict'] = state_dict
torch.save(checkpoint, dst)
def main():
parser = argparse.ArgumentParser(description='Convert model keys')
parser.add_argument('src', help='src detectron model path')
parser.add_argument('dst', help='save path')
parser.add_argument(
'--selfsup', type=str, choices=['moco', 'swav'], help='save path')
args = parser.parse_args()
if args.selfsup == 'moco':
moco_convert(args.src, args.dst)
elif args.selfsup == 'swav':
print('SWAV does not need to convert the keys')
if __name__ == '__main__':
main()
| 1,243 | 27.930233 | 74 |
py
|
DSLA-DSLA
|
DSLA-DSLA/tools/model_converters/publish_model.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import subprocess
import torch
def parse_args():
parser = argparse.ArgumentParser(
description='Process a checkpoint to be published')
parser.add_argument('in_file', help='input checkpoint filename')
parser.add_argument('out_file', help='output checkpoint filename')
args = parser.parse_args()
return args
def process_checkpoint(in_file, out_file):
checkpoint = torch.load(in_file, map_location='cpu')
# remove optimizer for smaller file size
if 'optimizer' in checkpoint:
del checkpoint['optimizer']
# if it is necessary to remove some sensitive data in checkpoint['meta'],
# add the code here.
if torch.__version__ >= '1.6':
torch.save(checkpoint, out_file, _use_new_zipfile_serialization=False)
else:
torch.save(checkpoint, out_file)
sha = subprocess.check_output(['sha256sum', out_file]).decode()
if out_file.endswith('.pth'):
out_file_name = out_file[:-4]
else:
out_file_name = out_file
final_file = out_file_name + f'-{sha[:8]}.pth'
subprocess.Popen(['mv', out_file, final_file])
def main():
args = parse_args()
process_checkpoint(args.in_file, args.out_file)
if __name__ == '__main__':
main()
| 1,301 | 28.590909 | 78 |
py
|
DSLA-DSLA
|
DSLA-DSLA/tools/model_converters/regnet2mmdet.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
from collections import OrderedDict
import torch
def convert_stem(model_key, model_weight, state_dict, converted_names):
new_key = model_key.replace('stem.conv', 'conv1')
new_key = new_key.replace('stem.bn', 'bn1')
state_dict[new_key] = model_weight
converted_names.add(model_key)
print(f'Convert {model_key} to {new_key}')
def convert_head(model_key, model_weight, state_dict, converted_names):
new_key = model_key.replace('head.fc', 'fc')
state_dict[new_key] = model_weight
converted_names.add(model_key)
print(f'Convert {model_key} to {new_key}')
def convert_reslayer(model_key, model_weight, state_dict, converted_names):
split_keys = model_key.split('.')
layer, block, module = split_keys[:3]
block_id = int(block[1:])
layer_name = f'layer{int(layer[1:])}'
block_name = f'{block_id - 1}'
if block_id == 1 and module == 'bn':
new_key = f'{layer_name}.{block_name}.downsample.1.{split_keys[-1]}'
elif block_id == 1 and module == 'proj':
new_key = f'{layer_name}.{block_name}.downsample.0.{split_keys[-1]}'
elif module == 'f':
if split_keys[3] == 'a_bn':
module_name = 'bn1'
elif split_keys[3] == 'b_bn':
module_name = 'bn2'
elif split_keys[3] == 'c_bn':
module_name = 'bn3'
elif split_keys[3] == 'a':
module_name = 'conv1'
elif split_keys[3] == 'b':
module_name = 'conv2'
elif split_keys[3] == 'c':
module_name = 'conv3'
new_key = f'{layer_name}.{block_name}.{module_name}.{split_keys[-1]}'
else:
raise ValueError(f'Unsupported conversion of key {model_key}')
print(f'Convert {model_key} to {new_key}')
state_dict[new_key] = model_weight
converted_names.add(model_key)
def convert(src, dst):
"""Convert keys in pycls pretrained RegNet models to mmdet style."""
# load caffe model
regnet_model = torch.load(src)
blobs = regnet_model['model_state']
# convert to pytorch style
state_dict = OrderedDict()
converted_names = set()
for key, weight in blobs.items():
if 'stem' in key:
convert_stem(key, weight, state_dict, converted_names)
elif 'head' in key:
convert_head(key, weight, state_dict, converted_names)
elif key.startswith('s'):
convert_reslayer(key, weight, state_dict, converted_names)
# check if all layers are converted
for key in blobs:
if key not in converted_names:
print(f'not converted: {key}')
# save checkpoint
checkpoint = dict()
checkpoint['state_dict'] = state_dict
torch.save(checkpoint, dst)
def main():
parser = argparse.ArgumentParser(description='Convert model keys')
parser.add_argument('src', help='src detectron model path')
parser.add_argument('dst', help='save path')
args = parser.parse_args()
convert(args.src, args.dst)
if __name__ == '__main__':
main()
| 3,063 | 32.67033 | 77 |
py
|
DSLA-DSLA
|
DSLA-DSLA/tools/model_converters/upgrade_model_version.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import re
import tempfile
from collections import OrderedDict
import torch
from mmcv import Config
def is_head(key):
valid_head_list = [
'bbox_head', 'mask_head', 'semantic_head', 'grid_head', 'mask_iou_head'
]
return any(key.startswith(h) for h in valid_head_list)
def parse_config(config_strings):
temp_file = tempfile.NamedTemporaryFile()
config_path = f'{temp_file.name}.py'
with open(config_path, 'w') as f:
f.write(config_strings)
config = Config.fromfile(config_path)
is_two_stage = True
is_ssd = False
is_retina = False
reg_cls_agnostic = False
if 'rpn_head' not in config.model:
is_two_stage = False
# check whether it is SSD
if config.model.bbox_head.type == 'SSDHead':
is_ssd = True
elif config.model.bbox_head.type == 'RetinaHead':
is_retina = True
elif isinstance(config.model['bbox_head'], list):
reg_cls_agnostic = True
elif 'reg_class_agnostic' in config.model.bbox_head:
reg_cls_agnostic = config.model.bbox_head \
.reg_class_agnostic
temp_file.close()
return is_two_stage, is_ssd, is_retina, reg_cls_agnostic
def reorder_cls_channel(val, num_classes=81):
# bias
if val.dim() == 1:
new_val = torch.cat((val[1:], val[:1]), dim=0)
# weight
else:
out_channels, in_channels = val.shape[:2]
# conv_cls for softmax output
if out_channels != num_classes and out_channels % num_classes == 0:
new_val = val.reshape(-1, num_classes, in_channels, *val.shape[2:])
new_val = torch.cat((new_val[:, 1:], new_val[:, :1]), dim=1)
new_val = new_val.reshape(val.size())
# fc_cls
elif out_channels == num_classes:
new_val = torch.cat((val[1:], val[:1]), dim=0)
# agnostic | retina_cls | rpn_cls
else:
new_val = val
return new_val
def truncate_cls_channel(val, num_classes=81):
# bias
if val.dim() == 1:
if val.size(0) % num_classes == 0:
new_val = val[:num_classes - 1]
else:
new_val = val
# weight
else:
out_channels, in_channels = val.shape[:2]
# conv_logits
if out_channels % num_classes == 0:
new_val = val.reshape(num_classes, in_channels, *val.shape[2:])[1:]
new_val = new_val.reshape(-1, *val.shape[1:])
# agnostic
else:
new_val = val
return new_val
def truncate_reg_channel(val, num_classes=81):
# bias
if val.dim() == 1:
# fc_reg | rpn_reg
if val.size(0) % num_classes == 0:
new_val = val.reshape(num_classes, -1)[:num_classes - 1]
new_val = new_val.reshape(-1)
# agnostic
else:
new_val = val
# weight
else:
out_channels, in_channels = val.shape[:2]
# fc_reg | rpn_reg
if out_channels % num_classes == 0:
new_val = val.reshape(num_classes, -1, in_channels,
*val.shape[2:])[1:]
new_val = new_val.reshape(-1, *val.shape[1:])
# agnostic
else:
new_val = val
return new_val
def convert(in_file, out_file, num_classes):
"""Convert keys in checkpoints.
There can be some breaking changes during the development of mmdetection,
and this tool is used for upgrading checkpoints trained with old versions
to the latest one.
"""
checkpoint = torch.load(in_file)
in_state_dict = checkpoint.pop('state_dict')
out_state_dict = OrderedDict()
meta_info = checkpoint['meta']
is_two_stage, is_ssd, is_retina, reg_cls_agnostic = parse_config(
'#' + meta_info['config'])
if meta_info['mmdet_version'] <= '0.5.3' and is_retina:
upgrade_retina = True
else:
upgrade_retina = False
# MMDetection v2.5.0 unifies the class order in RPN
# if the model is trained in version<v2.5.0
# The RPN model should be upgraded to be used in version>=2.5.0
if meta_info['mmdet_version'] < '2.5.0':
upgrade_rpn = True
else:
upgrade_rpn = False
for key, val in in_state_dict.items():
new_key = key
new_val = val
if is_two_stage and is_head(key):
new_key = 'roi_head.{}'.format(key)
# classification
if upgrade_rpn:
m = re.search(
r'(conv_cls|retina_cls|rpn_cls|fc_cls|fcos_cls|'
r'fovea_cls).(weight|bias)', new_key)
else:
m = re.search(
r'(conv_cls|retina_cls|fc_cls|fcos_cls|'
r'fovea_cls).(weight|bias)', new_key)
if m is not None:
print(f'reorder cls channels of {new_key}')
new_val = reorder_cls_channel(val, num_classes)
# regression
if upgrade_rpn:
m = re.search(r'(fc_reg).(weight|bias)', new_key)
else:
m = re.search(r'(fc_reg|rpn_reg).(weight|bias)', new_key)
if m is not None and not reg_cls_agnostic:
print(f'truncate regression channels of {new_key}')
new_val = truncate_reg_channel(val, num_classes)
# mask head
m = re.search(r'(conv_logits).(weight|bias)', new_key)
if m is not None:
print(f'truncate mask prediction channels of {new_key}')
new_val = truncate_cls_channel(val, num_classes)
m = re.search(r'(cls_convs|reg_convs).\d.(weight|bias)', key)
# Legacy issues in RetinaNet since V1.x
# Use ConvModule instead of nn.Conv2d in RetinaNet
# cls_convs.0.weight -> cls_convs.0.conv.weight
if m is not None and upgrade_retina:
param = m.groups()[1]
new_key = key.replace(param, f'conv.{param}')
out_state_dict[new_key] = val
print(f'rename the name of {key} to {new_key}')
continue
m = re.search(r'(cls_convs).\d.(weight|bias)', key)
if m is not None and is_ssd:
print(f'reorder cls channels of {new_key}')
new_val = reorder_cls_channel(val, num_classes)
out_state_dict[new_key] = new_val
checkpoint['state_dict'] = out_state_dict
torch.save(checkpoint, out_file)
def main():
parser = argparse.ArgumentParser(description='Upgrade model version')
parser.add_argument('in_file', help='input checkpoint file')
parser.add_argument('out_file', help='output checkpoint file')
parser.add_argument(
'--num-classes',
type=int,
default=81,
help='number of classes of the original model')
args = parser.parse_args()
convert(args.in_file, args.out_file, args.num_classes)
if __name__ == '__main__':
main()
| 6,848 | 31.459716 | 79 |
py
|
DSLA-DSLA
|
DSLA-DSLA/tools/model_converters/upgrade_ssd_version.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import tempfile
from collections import OrderedDict
import torch
from mmcv import Config
def parse_config(config_strings):
temp_file = tempfile.NamedTemporaryFile()
config_path = f'{temp_file.name}.py'
with open(config_path, 'w') as f:
f.write(config_strings)
config = Config.fromfile(config_path)
# check whether it is SSD
if config.model.bbox_head.type != 'SSDHead':
raise AssertionError('This is not a SSD model.')
def convert(in_file, out_file):
checkpoint = torch.load(in_file)
in_state_dict = checkpoint.pop('state_dict')
out_state_dict = OrderedDict()
meta_info = checkpoint['meta']
parse_config('#' + meta_info['config'])
for key, value in in_state_dict.items():
if 'extra' in key:
layer_idx = int(key.split('.')[2])
new_key = 'neck.extra_layers.{}.{}.conv.'.format(
layer_idx // 2, layer_idx % 2) + key.split('.')[-1]
elif 'l2_norm' in key:
new_key = 'neck.l2_norm.weight'
elif 'bbox_head' in key:
new_key = key[:21] + '.0' + key[21:]
else:
new_key = key
out_state_dict[new_key] = value
checkpoint['state_dict'] = out_state_dict
if torch.__version__ >= '1.6':
torch.save(checkpoint, out_file, _use_new_zipfile_serialization=False)
else:
torch.save(checkpoint, out_file)
def main():
parser = argparse.ArgumentParser(description='Upgrade SSD version')
parser.add_argument('in_file', help='input checkpoint file')
parser.add_argument('out_file', help='output checkpoint file')
args = parser.parse_args()
convert(args.in_file, args.out_file)
if __name__ == '__main__':
main()
| 1,789 | 29.338983 | 78 |
py
|
DSLA-DSLA
|
DSLA-DSLA/tools/model_converters/detectron2pytorch.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
from collections import OrderedDict
import mmcv
import torch
arch_settings = {50: (3, 4, 6, 3), 101: (3, 4, 23, 3)}
def convert_bn(blobs, state_dict, caffe_name, torch_name, converted_names):
# detectron replace bn with affine channel layer
state_dict[torch_name + '.bias'] = torch.from_numpy(blobs[caffe_name +
'_b'])
state_dict[torch_name + '.weight'] = torch.from_numpy(blobs[caffe_name +
'_s'])
bn_size = state_dict[torch_name + '.weight'].size()
state_dict[torch_name + '.running_mean'] = torch.zeros(bn_size)
state_dict[torch_name + '.running_var'] = torch.ones(bn_size)
converted_names.add(caffe_name + '_b')
converted_names.add(caffe_name + '_s')
def convert_conv_fc(blobs, state_dict, caffe_name, torch_name,
converted_names):
state_dict[torch_name + '.weight'] = torch.from_numpy(blobs[caffe_name +
'_w'])
converted_names.add(caffe_name + '_w')
if caffe_name + '_b' in blobs:
state_dict[torch_name + '.bias'] = torch.from_numpy(blobs[caffe_name +
'_b'])
converted_names.add(caffe_name + '_b')
def convert(src, dst, depth):
"""Convert keys in detectron pretrained ResNet models to pytorch style."""
# load arch_settings
if depth not in arch_settings:
raise ValueError('Only support ResNet-50 and ResNet-101 currently')
block_nums = arch_settings[depth]
# load caffe model
caffe_model = mmcv.load(src, encoding='latin1')
blobs = caffe_model['blobs'] if 'blobs' in caffe_model else caffe_model
# convert to pytorch style
state_dict = OrderedDict()
converted_names = set()
convert_conv_fc(blobs, state_dict, 'conv1', 'conv1', converted_names)
convert_bn(blobs, state_dict, 'res_conv1_bn', 'bn1', converted_names)
for i in range(1, len(block_nums) + 1):
for j in range(block_nums[i - 1]):
if j == 0:
convert_conv_fc(blobs, state_dict, f'res{i + 1}_{j}_branch1',
f'layer{i}.{j}.downsample.0', converted_names)
convert_bn(blobs, state_dict, f'res{i + 1}_{j}_branch1_bn',
f'layer{i}.{j}.downsample.1', converted_names)
for k, letter in enumerate(['a', 'b', 'c']):
convert_conv_fc(blobs, state_dict,
f'res{i + 1}_{j}_branch2{letter}',
f'layer{i}.{j}.conv{k+1}', converted_names)
convert_bn(blobs, state_dict,
f'res{i + 1}_{j}_branch2{letter}_bn',
f'layer{i}.{j}.bn{k + 1}', converted_names)
# check if all layers are converted
for key in blobs:
if key not in converted_names:
print(f'Not Convert: {key}')
# save checkpoint
checkpoint = dict()
checkpoint['state_dict'] = state_dict
torch.save(checkpoint, dst)
def main():
parser = argparse.ArgumentParser(description='Convert model keys')
parser.add_argument('src', help='src detectron model path')
parser.add_argument('dst', help='save path')
parser.add_argument('depth', type=int, help='ResNet model depth')
args = parser.parse_args()
convert(args.src, args.dst, args.depth)
if __name__ == '__main__':
main()
| 3,578 | 41.607143 | 78 |
py
|
DSLA-DSLA
|
DSLA-DSLA/tools/dataset_converters/images2coco.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
import mmcv
from PIL import Image
def parse_args():
parser = argparse.ArgumentParser(
description='Convert images to coco format without annotations')
parser.add_argument('img_path', help='The root path of images')
parser.add_argument(
'classes', type=str, help='The text file name of storage class list')
parser.add_argument(
'out',
type=str,
help='The output annotation json file name, The save dir is in the '
'same directory as img_path')
parser.add_argument(
'-e',
'--exclude-extensions',
type=str,
nargs='+',
help='The suffix of images to be excluded, such as "png" and "bmp"')
args = parser.parse_args()
return args
def collect_image_infos(path, exclude_extensions=None):
img_infos = []
images_generator = mmcv.scandir(path, recursive=True)
for image_path in mmcv.track_iter_progress(list(images_generator)):
if exclude_extensions is None or (
exclude_extensions is not None
and not image_path.lower().endswith(exclude_extensions)):
image_path = os.path.join(path, image_path)
img_pillow = Image.open(image_path)
img_info = {
'filename': image_path,
'width': img_pillow.width,
'height': img_pillow.height,
}
img_infos.append(img_info)
return img_infos
def cvt_to_coco_json(img_infos, classes):
image_id = 0
coco = dict()
coco['images'] = []
coco['type'] = 'instance'
coco['categories'] = []
coco['annotations'] = []
image_set = set()
for category_id, name in enumerate(classes):
category_item = dict()
category_item['supercategory'] = str('none')
category_item['id'] = int(category_id)
category_item['name'] = str(name)
coco['categories'].append(category_item)
for img_dict in img_infos:
file_name = img_dict['filename']
assert file_name not in image_set
image_item = dict()
image_item['id'] = int(image_id)
image_item['file_name'] = str(file_name)
image_item['height'] = int(img_dict['height'])
image_item['width'] = int(img_dict['width'])
coco['images'].append(image_item)
image_set.add(file_name)
image_id += 1
return coco
def main():
args = parse_args()
assert args.out.endswith(
'json'), 'The output file name must be json suffix'
# 1 load image list info
img_infos = collect_image_infos(args.img_path, args.exclude_extensions)
# 2 convert to coco format data
classes = mmcv.list_from_file(args.classes)
coco_info = cvt_to_coco_json(img_infos, classes)
# 3 dump
save_dir = os.path.join(args.img_path, '..', 'annotations')
mmcv.mkdir_or_exist(save_dir)
save_path = os.path.join(save_dir, args.out)
mmcv.dump(coco_info, save_path)
print(f'save json file: {save_path}')
if __name__ == '__main__':
main()
| 3,109 | 29.490196 | 77 |
py
|
DSLA-DSLA
|
DSLA-DSLA/tools/dataset_converters/cityscapes.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import glob
import os.path as osp
import cityscapesscripts.helpers.labels as CSLabels
import mmcv
import numpy as np
import pycocotools.mask as maskUtils
def collect_files(img_dir, gt_dir):
suffix = 'leftImg8bit.png'
files = []
for img_file in glob.glob(osp.join(img_dir, '**/*.png')):
assert img_file.endswith(suffix), img_file
inst_file = gt_dir + img_file[
len(img_dir):-len(suffix)] + 'gtFine_instanceIds.png'
# Note that labelIds are not converted to trainId for seg map
segm_file = gt_dir + img_file[
len(img_dir):-len(suffix)] + 'gtFine_labelIds.png'
files.append((img_file, inst_file, segm_file))
assert len(files), f'No images found in {img_dir}'
print(f'Loaded {len(files)} images from {img_dir}')
return files
def collect_annotations(files, nproc=1):
print('Loading annotation images')
if nproc > 1:
images = mmcv.track_parallel_progress(
load_img_info, files, nproc=nproc)
else:
images = mmcv.track_progress(load_img_info, files)
return images
def load_img_info(files):
img_file, inst_file, segm_file = files
inst_img = mmcv.imread(inst_file, 'unchanged')
# ids < 24 are stuff labels (filtering them first is about 5% faster)
unique_inst_ids = np.unique(inst_img[inst_img >= 24])
anno_info = []
for inst_id in unique_inst_ids:
# For non-crowd annotations, inst_id // 1000 is the label_id
# Crowd annotations have <1000 instance ids
label_id = inst_id // 1000 if inst_id >= 1000 else inst_id
label = CSLabels.id2label[label_id]
if not label.hasInstances or label.ignoreInEval:
continue
category_id = label.id
iscrowd = int(inst_id < 1000)
mask = np.asarray(inst_img == inst_id, dtype=np.uint8, order='F')
mask_rle = maskUtils.encode(mask[:, :, None])[0]
area = maskUtils.area(mask_rle)
# convert to COCO style XYWH format
bbox = maskUtils.toBbox(mask_rle)
# for json encoding
mask_rle['counts'] = mask_rle['counts'].decode()
anno = dict(
iscrowd=iscrowd,
category_id=category_id,
bbox=bbox.tolist(),
area=area.tolist(),
segmentation=mask_rle)
anno_info.append(anno)
video_name = osp.basename(osp.dirname(img_file))
img_info = dict(
# remove img_prefix for filename
file_name=osp.join(video_name, osp.basename(img_file)),
height=inst_img.shape[0],
width=inst_img.shape[1],
anno_info=anno_info,
segm_file=osp.join(video_name, osp.basename(segm_file)))
return img_info
def cvt_annotations(image_infos, out_json_name):
out_json = dict()
img_id = 0
ann_id = 0
out_json['images'] = []
out_json['categories'] = []
out_json['annotations'] = []
for image_info in image_infos:
image_info['id'] = img_id
anno_infos = image_info.pop('anno_info')
out_json['images'].append(image_info)
for anno_info in anno_infos:
anno_info['image_id'] = img_id
anno_info['id'] = ann_id
out_json['annotations'].append(anno_info)
ann_id += 1
img_id += 1
for label in CSLabels.labels:
if label.hasInstances and not label.ignoreInEval:
cat = dict(id=label.id, name=label.name)
out_json['categories'].append(cat)
if len(out_json['annotations']) == 0:
out_json.pop('annotations')
mmcv.dump(out_json, out_json_name)
return out_json
def parse_args():
parser = argparse.ArgumentParser(
description='Convert Cityscapes annotations to COCO format')
parser.add_argument('cityscapes_path', help='cityscapes data path')
parser.add_argument('--img-dir', default='leftImg8bit', type=str)
parser.add_argument('--gt-dir', default='gtFine', type=str)
parser.add_argument('-o', '--out-dir', help='output path')
parser.add_argument(
'--nproc', default=1, type=int, help='number of process')
args = parser.parse_args()
return args
def main():
args = parse_args()
cityscapes_path = args.cityscapes_path
out_dir = args.out_dir if args.out_dir else cityscapes_path
mmcv.mkdir_or_exist(out_dir)
img_dir = osp.join(cityscapes_path, args.img_dir)
gt_dir = osp.join(cityscapes_path, args.gt_dir)
set_name = dict(
train='instancesonly_filtered_gtFine_train.json',
val='instancesonly_filtered_gtFine_val.json',
test='instancesonly_filtered_gtFine_test.json')
for split, json_name in set_name.items():
print(f'Converting {split} into {json_name}')
with mmcv.Timer(
print_tmpl='It took {}s to convert Cityscapes annotation'):
files = collect_files(
osp.join(img_dir, split), osp.join(gt_dir, split))
image_infos = collect_annotations(files, nproc=args.nproc)
cvt_annotations(image_infos, osp.join(out_dir, json_name))
if __name__ == '__main__':
main()
| 5,172 | 32.810458 | 75 |
py
|
DSLA-DSLA
|
DSLA-DSLA/tools/dataset_converters/pascal_voc.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os.path as osp
import xml.etree.ElementTree as ET
import mmcv
import numpy as np
from mmdet.core import voc_classes
label_ids = {name: i for i, name in enumerate(voc_classes())}
def parse_xml(args):
xml_path, img_path = args
tree = ET.parse(xml_path)
root = tree.getroot()
size = root.find('size')
w = int(size.find('width').text)
h = int(size.find('height').text)
bboxes = []
labels = []
bboxes_ignore = []
labels_ignore = []
for obj in root.findall('object'):
name = obj.find('name').text
label = label_ids[name]
difficult = int(obj.find('difficult').text)
bnd_box = obj.find('bndbox')
bbox = [
int(bnd_box.find('xmin').text),
int(bnd_box.find('ymin').text),
int(bnd_box.find('xmax').text),
int(bnd_box.find('ymax').text)
]
if difficult:
bboxes_ignore.append(bbox)
labels_ignore.append(label)
else:
bboxes.append(bbox)
labels.append(label)
if not bboxes:
bboxes = np.zeros((0, 4))
labels = np.zeros((0, ))
else:
bboxes = np.array(bboxes, ndmin=2) - 1
labels = np.array(labels)
if not bboxes_ignore:
bboxes_ignore = np.zeros((0, 4))
labels_ignore = np.zeros((0, ))
else:
bboxes_ignore = np.array(bboxes_ignore, ndmin=2) - 1
labels_ignore = np.array(labels_ignore)
annotation = {
'filename': img_path,
'width': w,
'height': h,
'ann': {
'bboxes': bboxes.astype(np.float32),
'labels': labels.astype(np.int64),
'bboxes_ignore': bboxes_ignore.astype(np.float32),
'labels_ignore': labels_ignore.astype(np.int64)
}
}
return annotation
def cvt_annotations(devkit_path, years, split, out_file):
if not isinstance(years, list):
years = [years]
annotations = []
for year in years:
filelist = osp.join(devkit_path,
f'VOC{year}/ImageSets/Main/{split}.txt')
if not osp.isfile(filelist):
print(f'filelist does not exist: {filelist}, '
f'skip voc{year} {split}')
return
img_names = mmcv.list_from_file(filelist)
xml_paths = [
osp.join(devkit_path, f'VOC{year}/Annotations/{img_name}.xml')
for img_name in img_names
]
img_paths = [
f'VOC{year}/JPEGImages/{img_name}.jpg' for img_name in img_names
]
part_annotations = mmcv.track_progress(parse_xml,
list(zip(xml_paths, img_paths)))
annotations.extend(part_annotations)
if out_file.endswith('json'):
annotations = cvt_to_coco_json(annotations)
mmcv.dump(annotations, out_file)
return annotations
def cvt_to_coco_json(annotations):
image_id = 0
annotation_id = 0
coco = dict()
coco['images'] = []
coco['type'] = 'instance'
coco['categories'] = []
coco['annotations'] = []
image_set = set()
def addAnnItem(annotation_id, image_id, category_id, bbox, difficult_flag):
annotation_item = dict()
annotation_item['segmentation'] = []
seg = []
# bbox[] is x1,y1,x2,y2
# left_top
seg.append(int(bbox[0]))
seg.append(int(bbox[1]))
# left_bottom
seg.append(int(bbox[0]))
seg.append(int(bbox[3]))
# right_bottom
seg.append(int(bbox[2]))
seg.append(int(bbox[3]))
# right_top
seg.append(int(bbox[2]))
seg.append(int(bbox[1]))
annotation_item['segmentation'].append(seg)
xywh = np.array(
[bbox[0], bbox[1], bbox[2] - bbox[0], bbox[3] - bbox[1]])
annotation_item['area'] = int(xywh[2] * xywh[3])
if difficult_flag == 1:
annotation_item['ignore'] = 0
annotation_item['iscrowd'] = 1
else:
annotation_item['ignore'] = 0
annotation_item['iscrowd'] = 0
annotation_item['image_id'] = int(image_id)
annotation_item['bbox'] = xywh.astype(int).tolist()
annotation_item['category_id'] = int(category_id)
annotation_item['id'] = int(annotation_id)
coco['annotations'].append(annotation_item)
return annotation_id + 1
for category_id, name in enumerate(voc_classes()):
category_item = dict()
category_item['supercategory'] = str('none')
category_item['id'] = int(category_id)
category_item['name'] = str(name)
coco['categories'].append(category_item)
for ann_dict in annotations:
file_name = ann_dict['filename']
ann = ann_dict['ann']
assert file_name not in image_set
image_item = dict()
image_item['id'] = int(image_id)
image_item['file_name'] = str(file_name)
image_item['height'] = int(ann_dict['height'])
image_item['width'] = int(ann_dict['width'])
coco['images'].append(image_item)
image_set.add(file_name)
bboxes = ann['bboxes'][:, :4]
labels = ann['labels']
for bbox_id in range(len(bboxes)):
bbox = bboxes[bbox_id]
label = labels[bbox_id]
annotation_id = addAnnItem(
annotation_id, image_id, label, bbox, difficult_flag=0)
bboxes_ignore = ann['bboxes_ignore'][:, :4]
labels_ignore = ann['labels_ignore']
for bbox_id in range(len(bboxes_ignore)):
bbox = bboxes_ignore[bbox_id]
label = labels_ignore[bbox_id]
annotation_id = addAnnItem(
annotation_id, image_id, label, bbox, difficult_flag=1)
image_id += 1
return coco
def parse_args():
parser = argparse.ArgumentParser(
description='Convert PASCAL VOC annotations to mmdetection format')
parser.add_argument('devkit_path', help='pascal voc devkit path')
parser.add_argument('-o', '--out-dir', help='output path')
parser.add_argument(
'--out-format',
default='pkl',
choices=('pkl', 'coco'),
help='output format, "coco" indicates coco annotation format')
args = parser.parse_args()
return args
def main():
args = parse_args()
devkit_path = args.devkit_path
out_dir = args.out_dir if args.out_dir else devkit_path
mmcv.mkdir_or_exist(out_dir)
years = []
if osp.isdir(osp.join(devkit_path, 'VOC2007')):
years.append('2007')
if osp.isdir(osp.join(devkit_path, 'VOC2012')):
years.append('2012')
if '2007' in years and '2012' in years:
years.append(['2007', '2012'])
if not years:
raise IOError(f'The devkit path {devkit_path} contains neither '
'"VOC2007" nor "VOC2012" subfolder')
out_fmt = f'.{args.out_format}'
if args.out_format == 'coco':
out_fmt = '.json'
for year in years:
if year == '2007':
prefix = 'voc07'
elif year == '2012':
prefix = 'voc12'
elif year == ['2007', '2012']:
prefix = 'voc0712'
for split in ['train', 'val', 'trainval']:
dataset_name = prefix + '_' + split
print(f'processing {dataset_name} ...')
cvt_annotations(devkit_path, year, split,
osp.join(out_dir, dataset_name + out_fmt))
if not isinstance(year, list):
dataset_name = prefix + '_test'
print(f'processing {dataset_name} ...')
cvt_annotations(devkit_path, year, 'test',
osp.join(out_dir, dataset_name + out_fmt))
print('Done!')
if __name__ == '__main__':
main()
| 7,841 | 31.94958 | 79 |
py
|
DSLA-DSLA
|
DSLA-DSLA/tools/analysis_tools/analyze_results.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os.path as osp
import mmcv
import numpy as np
from mmcv import Config, DictAction
from mmdet.core.evaluation import eval_map
from mmdet.core.visualization import imshow_gt_det_bboxes
from mmdet.datasets import build_dataset, get_loading_pipeline
def bbox_map_eval(det_result, annotation):
"""Evaluate mAP of single image det result.
Args:
det_result (list[list]): [[cls1_det, cls2_det, ...], ...].
The outer list indicates images, and the inner list indicates
per-class detected bboxes.
annotation (dict): Ground truth annotations where keys of
annotations are:
- bboxes: numpy array of shape (n, 4)
- labels: numpy array of shape (n, )
- bboxes_ignore (optional): numpy array of shape (k, 4)
- labels_ignore (optional): numpy array of shape (k, )
Returns:
float: mAP
"""
# use only bbox det result
if isinstance(det_result, tuple):
bbox_det_result = [det_result[0]]
else:
bbox_det_result = [det_result]
# mAP
iou_thrs = np.linspace(
.5, 0.95, int(np.round((0.95 - .5) / .05)) + 1, endpoint=True)
mean_aps = []
for thr in iou_thrs:
mean_ap, _ = eval_map(
bbox_det_result, [annotation], iou_thr=thr, logger='silent')
mean_aps.append(mean_ap)
return sum(mean_aps) / len(mean_aps)
class ResultVisualizer:
"""Display and save evaluation results.
Args:
show (bool): Whether to show the image. Default: True
wait_time (float): Value of waitKey param. Default: 0.
score_thr (float): Minimum score of bboxes to be shown.
Default: 0
"""
def __init__(self, show=False, wait_time=0, score_thr=0):
self.show = show
self.wait_time = wait_time
self.score_thr = score_thr
def _save_image_gts_results(self, dataset, results, mAPs, out_dir=None):
mmcv.mkdir_or_exist(out_dir)
for mAP_info in mAPs:
index, mAP = mAP_info
data_info = dataset.prepare_train_img(index)
# calc save file path
filename = data_info['filename']
if data_info['img_prefix'] is not None:
filename = osp.join(data_info['img_prefix'], filename)
else:
filename = data_info['filename']
fname, name = osp.splitext(osp.basename(filename))
save_filename = fname + '_' + str(round(mAP, 3)) + name
out_file = osp.join(out_dir, save_filename)
imshow_gt_det_bboxes(
data_info['img'],
data_info,
results[index],
dataset.CLASSES,
show=self.show,
score_thr=self.score_thr,
wait_time=self.wait_time,
out_file=out_file)
def evaluate_and_show(self,
dataset,
results,
topk=20,
show_dir='work_dir',
eval_fn=None):
"""Evaluate and show results.
Args:
dataset (Dataset): A PyTorch dataset.
results (list): Det results from test results pkl file
topk (int): Number of the highest topk and
lowest topk after evaluation index sorting. Default: 20
show_dir (str, optional): The filename to write the image.
Default: 'work_dir'
eval_fn (callable, optional): Eval function, Default: None
"""
assert topk > 0
if (topk * 2) > len(dataset):
topk = len(dataset) // 2
if eval_fn is None:
eval_fn = bbox_map_eval
else:
assert callable(eval_fn)
prog_bar = mmcv.ProgressBar(len(results))
_mAPs = {}
for i, (result, ) in enumerate(zip(results)):
# self.dataset[i] should not call directly
# because there is a risk of mismatch
data_info = dataset.prepare_train_img(i)
mAP = eval_fn(result, data_info['ann_info'])
_mAPs[i] = mAP
prog_bar.update()
# descending select topk image
_mAPs = list(sorted(_mAPs.items(), key=lambda kv: kv[1]))
good_mAPs = _mAPs[-topk:]
bad_mAPs = _mAPs[:topk]
good_dir = osp.abspath(osp.join(show_dir, 'good'))
bad_dir = osp.abspath(osp.join(show_dir, 'bad'))
self._save_image_gts_results(dataset, results, good_mAPs, good_dir)
self._save_image_gts_results(dataset, results, bad_mAPs, bad_dir)
def parse_args():
parser = argparse.ArgumentParser(
description='MMDet eval image prediction result for each')
parser.add_argument('config', help='test config file path')
parser.add_argument(
'prediction_path', help='prediction path where test pkl result')
parser.add_argument(
'show_dir', help='directory where painted images will be saved')
parser.add_argument('--show', action='store_true', help='show results')
parser.add_argument(
'--wait-time',
type=float,
default=0,
help='the interval of show (s), 0 is block')
parser.add_argument(
'--topk',
default=20,
type=int,
help='saved Number of the highest topk '
'and lowest topk after index sorting')
parser.add_argument(
'--show-score-thr',
type=float,
default=0,
help='score threshold (default: 0.)')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
args = parser.parse_args()
return args
def main():
args = parse_args()
mmcv.check_file_exist(args.prediction_path)
cfg = Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
cfg.data.test.test_mode = True
cfg.data.test.pop('samples_per_gpu', 0)
cfg.data.test.pipeline = get_loading_pipeline(cfg.data.train.pipeline)
dataset = build_dataset(cfg.data.test)
outputs = mmcv.load(args.prediction_path)
result_visualizer = ResultVisualizer(args.show, args.wait_time,
args.show_score_thr)
result_visualizer.evaluate_and_show(
dataset, outputs, topk=args.topk, show_dir=args.show_dir)
if __name__ == '__main__':
main()
| 6,897 | 33.49 | 78 |
py
|
DSLA-DSLA
|
DSLA-DSLA/tools/analysis_tools/eval_metric.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import mmcv
from mmcv import Config, DictAction
from mmdet.datasets import build_dataset
def parse_args():
parser = argparse.ArgumentParser(description='Evaluate metric of the '
'results saved in pkl format')
parser.add_argument('config', help='Config of the model')
parser.add_argument('pkl_results', help='Results in pickle format')
parser.add_argument(
'--format-only',
action='store_true',
help='Format the output results without perform evaluation. It is'
'useful when you want to format the result to a specific format and '
'submit it to the test server')
parser.add_argument(
'--eval',
type=str,
nargs='+',
help='Evaluation metrics, which depends on the dataset, e.g., "bbox",'
' "segm", "proposal" for COCO, and "mAP", "recall" for PASCAL VOC')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--eval-options',
nargs='+',
action=DictAction,
help='custom options for evaluation, the key-value pair in xxx=yyy '
'format will be kwargs for dataset.evaluate() function')
args = parser.parse_args()
return args
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
assert args.eval or args.format_only, (
'Please specify at least one operation (eval/format the results) with '
'the argument "--eval", "--format-only"')
if args.eval and args.format_only:
raise ValueError('--eval and --format_only cannot be both specified')
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
cfg.data.test.test_mode = True
dataset = build_dataset(cfg.data.test)
outputs = mmcv.load(args.pkl_results)
kwargs = {} if args.eval_options is None else args.eval_options
if args.format_only:
dataset.format_results(outputs, **kwargs)
if args.eval:
eval_kwargs = cfg.get('evaluation', {}).copy()
# hard-code way to remove EvalHook args
for key in [
'interval', 'tmpdir', 'start', 'gpu_collect', 'save_best',
'rule'
]:
eval_kwargs.pop(key, None)
eval_kwargs.update(dict(metric=args.eval, **kwargs))
print(dataset.evaluate(outputs, **eval_kwargs))
if __name__ == '__main__':
main()
| 2,919 | 35.049383 | 79 |
py
|
DSLA-DSLA
|
DSLA-DSLA/tools/analysis_tools/benchmark.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import copy
import os
import time
import torch
from mmcv import Config, DictAction
from mmcv.cnn import fuse_conv_bn
from mmcv.parallel import MMDistributedDataParallel
from mmcv.runner import init_dist, load_checkpoint, wrap_fp16_model
from mmdet.datasets import (build_dataloader, build_dataset,
replace_ImageToTensor)
from mmdet.models import build_detector
def parse_args():
parser = argparse.ArgumentParser(description='MMDet benchmark a model')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument(
'--repeat-num',
type=int,
default=1,
help='number of repeat times of measurement for averaging the results')
parser.add_argument(
'--max-iter', type=int, default=2000, help='num of max iter')
parser.add_argument(
'--log-interval', type=int, default=50, help='interval of logging')
parser.add_argument(
'--fuse-conv-bn',
action='store_true',
help='Whether to fuse conv and bn, this will slightly increase'
'the inference speed')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def measure_inference_speed(cfg, checkpoint, max_iter, log_interval,
is_fuse_conv_bn):
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
cfg.model.pretrained = None
cfg.data.test.test_mode = True
# build the dataloader
samples_per_gpu = cfg.data.test.pop('samples_per_gpu', 1)
if samples_per_gpu > 1:
# Replace 'ImageToTensor' to 'DefaultFormatBundle'
cfg.data.test.pipeline = replace_ImageToTensor(cfg.data.test.pipeline)
dataset = build_dataset(cfg.data.test)
data_loader = build_dataloader(
dataset,
samples_per_gpu=1,
# Because multiple processes will occupy additional CPU resources,
# FPS statistics will be more unstable when workers_per_gpu is not 0.
# It is reasonable to set workers_per_gpu to 0.
workers_per_gpu=0,
dist=True,
shuffle=False)
# build the model and load checkpoint
cfg.model.train_cfg = None
model = build_detector(cfg.model, test_cfg=cfg.get('test_cfg'))
fp16_cfg = cfg.get('fp16', None)
if fp16_cfg is not None:
wrap_fp16_model(model)
load_checkpoint(model, checkpoint, map_location='cpu')
if is_fuse_conv_bn:
model = fuse_conv_bn(model)
model = MMDistributedDataParallel(
model.cuda(),
device_ids=[torch.cuda.current_device()],
broadcast_buffers=False)
model.eval()
# the first several iterations may be very slow so skip them
num_warmup = 5
pure_inf_time = 0
fps = 0
# benchmark with 2000 image and take the average
for i, data in enumerate(data_loader):
torch.cuda.synchronize()
start_time = time.perf_counter()
with torch.no_grad():
model(return_loss=False, rescale=True, **data)
torch.cuda.synchronize()
elapsed = time.perf_counter() - start_time
if i >= num_warmup:
pure_inf_time += elapsed
if (i + 1) % log_interval == 0:
fps = (i + 1 - num_warmup) / pure_inf_time
print(
f'Done image [{i + 1:<3}/ {max_iter}], '
f'fps: {fps:.1f} img / s, '
f'times per image: {1000 / fps:.1f} ms / img',
flush=True)
if (i + 1) == max_iter:
fps = (i + 1 - num_warmup) / pure_inf_time
print(
f'Overall fps: {fps:.1f} img / s, '
f'times per image: {1000 / fps:.1f} ms / img',
flush=True)
break
return fps
def repeat_measure_inference_speed(cfg,
checkpoint,
max_iter,
log_interval,
is_fuse_conv_bn,
repeat_num=1):
assert repeat_num >= 1
fps_list = []
for _ in range(repeat_num):
#
cp_cfg = copy.deepcopy(cfg)
fps_list.append(
measure_inference_speed(cp_cfg, checkpoint, max_iter, log_interval,
is_fuse_conv_bn))
if repeat_num > 1:
fps_list_ = [round(fps, 1) for fps in fps_list]
times_pre_image_list_ = [round(1000 / fps, 1) for fps in fps_list]
mean_fps_ = sum(fps_list_) / len(fps_list_)
mean_times_pre_image_ = sum(times_pre_image_list_) / len(
times_pre_image_list_)
print(
f'Overall fps: {fps_list_}[{mean_fps_:.1f}] img / s, '
f'times per image: '
f'{times_pre_image_list_}[{mean_times_pre_image_:.1f}] ms / img',
flush=True)
return fps_list
return fps_list[0]
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
if args.launcher == 'none':
raise NotImplementedError('Only supports distributed mode')
else:
init_dist(args.launcher, **cfg.dist_params)
repeat_measure_inference_speed(cfg, args.checkpoint, args.max_iter,
args.log_interval, args.fuse_conv_bn,
args.repeat_num)
if __name__ == '__main__':
main()
| 6,416 | 33.132979 | 79 |
py
|
DSLA-DSLA
|
DSLA-DSLA/tools/analysis_tools/optimize_anchors.py
|
# Copyright (c) OpenMMLab. All rights reserved.
"""Optimize anchor settings on a specific dataset.
This script provides two method to optimize YOLO anchors including k-means
anchor cluster and differential evolution. You can use ``--algorithm k-means``
and ``--algorithm differential_evolution`` to switch two method.
Example:
Use k-means anchor cluster::
python tools/analysis_tools/optimize_anchors.py ${CONFIG} \
--algorithm k-means --input-shape ${INPUT_SHAPE [WIDTH HEIGHT]} \
--output-dir ${OUTPUT_DIR}
Use differential evolution to optimize anchors::
python tools/analysis_tools/optimize_anchors.py ${CONFIG} \
--algorithm differential_evolution \
--input-shape ${INPUT_SHAPE [WIDTH HEIGHT]} \
--output-dir ${OUTPUT_DIR}
"""
import argparse
import os.path as osp
import mmcv
import numpy as np
import torch
from mmcv import Config
from scipy.optimize import differential_evolution
from mmdet.core import bbox_cxcywh_to_xyxy, bbox_overlaps, bbox_xyxy_to_cxcywh
from mmdet.datasets import build_dataset
from mmdet.utils import get_root_logger
def parse_args():
parser = argparse.ArgumentParser(description='Optimize anchor parameters.')
parser.add_argument('config', help='Train config file path.')
parser.add_argument(
'--device', default='cuda:0', help='Device used for calculating.')
parser.add_argument(
'--input-shape',
type=int,
nargs='+',
default=[608, 608],
help='input image size')
parser.add_argument(
'--algorithm',
default='differential_evolution',
help='Algorithm used for anchor optimizing.'
'Support k-means and differential_evolution for YOLO.')
parser.add_argument(
'--iters',
default=1000,
type=int,
help='Maximum iterations for optimizer.')
parser.add_argument(
'--output-dir',
default=None,
type=str,
help='Path to save anchor optimize result.')
args = parser.parse_args()
return args
class BaseAnchorOptimizer:
"""Base class for anchor optimizer.
Args:
dataset (obj:`Dataset`): Dataset object.
input_shape (list[int]): Input image shape of the model.
Format in [width, height].
logger (obj:`logging.Logger`): The logger for logging.
device (str, optional): Device used for calculating.
Default: 'cuda:0'
out_dir (str, optional): Path to save anchor optimize result.
Default: None
"""
def __init__(self,
dataset,
input_shape,
logger,
device='cuda:0',
out_dir=None):
self.dataset = dataset
self.input_shape = input_shape
self.logger = logger
self.device = device
self.out_dir = out_dir
bbox_whs, img_shapes = self.get_whs_and_shapes()
ratios = img_shapes.max(1, keepdims=True) / np.array([input_shape])
# resize to input shape
self.bbox_whs = bbox_whs / ratios
def get_whs_and_shapes(self):
"""Get widths and heights of bboxes and shapes of images.
Returns:
tuple[np.ndarray]: Array of bbox shapes and array of image
shapes with shape (num_bboxes, 2) in [width, height] format.
"""
self.logger.info('Collecting bboxes from annotation...')
bbox_whs = []
img_shapes = []
prog_bar = mmcv.ProgressBar(len(self.dataset))
for idx in range(len(self.dataset)):
ann = self.dataset.get_ann_info(idx)
data_info = self.dataset.data_infos[idx]
img_shape = np.array([data_info['width'], data_info['height']])
gt_bboxes = ann['bboxes']
for bbox in gt_bboxes:
wh = bbox[2:4] - bbox[0:2]
img_shapes.append(img_shape)
bbox_whs.append(wh)
prog_bar.update()
print('\n')
bbox_whs = np.array(bbox_whs)
img_shapes = np.array(img_shapes)
self.logger.info(f'Collected {bbox_whs.shape[0]} bboxes.')
return bbox_whs, img_shapes
def get_zero_center_bbox_tensor(self):
"""Get a tensor of bboxes centered at (0, 0).
Returns:
Tensor: Tensor of bboxes with shape (num_bboxes, 4)
in [xmin, ymin, xmax, ymax] format.
"""
whs = torch.from_numpy(self.bbox_whs).to(
self.device, dtype=torch.float32)
bboxes = bbox_cxcywh_to_xyxy(
torch.cat([torch.zeros_like(whs), whs], dim=1))
return bboxes
def optimize(self):
raise NotImplementedError
def save_result(self, anchors, path=None):
anchor_results = []
for w, h in anchors:
anchor_results.append([round(w), round(h)])
self.logger.info(f'Anchor optimize result:{anchor_results}')
if path:
json_path = osp.join(path, 'anchor_optimize_result.json')
mmcv.dump(anchor_results, json_path)
self.logger.info(f'Result saved in {json_path}')
class YOLOKMeansAnchorOptimizer(BaseAnchorOptimizer):
r"""YOLO anchor optimizer using k-means. Code refer to `AlexeyAB/darknet.
<https://github.com/AlexeyAB/darknet/blob/master/src/detector.c>`_.
Args:
num_anchors (int) : Number of anchors.
iters (int): Maximum iterations for k-means.
"""
def __init__(self, num_anchors, iters, **kwargs):
super(YOLOKMeansAnchorOptimizer, self).__init__(**kwargs)
self.num_anchors = num_anchors
self.iters = iters
def optimize(self):
anchors = self.kmeans_anchors()
self.save_result(anchors, self.out_dir)
def kmeans_anchors(self):
self.logger.info(
f'Start cluster {self.num_anchors} YOLO anchors with K-means...')
bboxes = self.get_zero_center_bbox_tensor()
cluster_center_idx = torch.randint(
0, bboxes.shape[0], (self.num_anchors, )).to(self.device)
assignments = torch.zeros((bboxes.shape[0], )).to(self.device)
cluster_centers = bboxes[cluster_center_idx]
if self.num_anchors == 1:
cluster_centers = self.kmeans_maximization(bboxes, assignments,
cluster_centers)
anchors = bbox_xyxy_to_cxcywh(cluster_centers)[:, 2:].cpu().numpy()
anchors = sorted(anchors, key=lambda x: x[0] * x[1])
return anchors
prog_bar = mmcv.ProgressBar(self.iters)
for i in range(self.iters):
converged, assignments = self.kmeans_expectation(
bboxes, assignments, cluster_centers)
if converged:
self.logger.info(f'K-means process has converged at iter {i}.')
break
cluster_centers = self.kmeans_maximization(bboxes, assignments,
cluster_centers)
prog_bar.update()
print('\n')
avg_iou = bbox_overlaps(bboxes,
cluster_centers).max(1)[0].mean().item()
anchors = bbox_xyxy_to_cxcywh(cluster_centers)[:, 2:].cpu().numpy()
anchors = sorted(anchors, key=lambda x: x[0] * x[1])
self.logger.info(f'Anchor cluster finish. Average IOU: {avg_iou}')
return anchors
def kmeans_maximization(self, bboxes, assignments, centers):
"""Maximization part of EM algorithm(Expectation-Maximization)"""
new_centers = torch.zeros_like(centers)
for i in range(centers.shape[0]):
mask = (assignments == i)
if mask.sum():
new_centers[i, :] = bboxes[mask].mean(0)
return new_centers
def kmeans_expectation(self, bboxes, assignments, centers):
"""Expectation part of EM algorithm(Expectation-Maximization)"""
ious = bbox_overlaps(bboxes, centers)
closest = ious.argmax(1)
converged = (closest == assignments).all()
return converged, closest
class YOLODEAnchorOptimizer(BaseAnchorOptimizer):
"""YOLO anchor optimizer using differential evolution algorithm.
Args:
num_anchors (int) : Number of anchors.
iters (int): Maximum iterations for k-means.
strategy (str): The differential evolution strategy to use.
Should be one of:
- 'best1bin'
- 'best1exp'
- 'rand1exp'
- 'randtobest1exp'
- 'currenttobest1exp'
- 'best2exp'
- 'rand2exp'
- 'randtobest1bin'
- 'currenttobest1bin'
- 'best2bin'
- 'rand2bin'
- 'rand1bin'
Default: 'best1bin'.
population_size (int): Total population size of evolution algorithm.
Default: 15.
convergence_thr (float): Tolerance for convergence, the
optimizing stops when ``np.std(pop) <= abs(convergence_thr)
+ convergence_thr * np.abs(np.mean(population_energies))``,
respectively. Default: 0.0001.
mutation (tuple[float]): Range of dithering randomly changes the
mutation constant. Default: (0.5, 1).
recombination (float): Recombination constant of crossover probability.
Default: 0.7.
"""
def __init__(self,
num_anchors,
iters,
strategy='best1bin',
population_size=15,
convergence_thr=0.0001,
mutation=(0.5, 1),
recombination=0.7,
**kwargs):
super(YOLODEAnchorOptimizer, self).__init__(**kwargs)
self.num_anchors = num_anchors
self.iters = iters
self.strategy = strategy
self.population_size = population_size
self.convergence_thr = convergence_thr
self.mutation = mutation
self.recombination = recombination
def optimize(self):
anchors = self.differential_evolution()
self.save_result(anchors, self.out_dir)
def differential_evolution(self):
bboxes = self.get_zero_center_bbox_tensor()
bounds = []
for i in range(self.num_anchors):
bounds.extend([(0, self.input_shape[0]), (0, self.input_shape[1])])
result = differential_evolution(
func=self.avg_iou_cost,
bounds=bounds,
args=(bboxes, ),
strategy=self.strategy,
maxiter=self.iters,
popsize=self.population_size,
tol=self.convergence_thr,
mutation=self.mutation,
recombination=self.recombination,
updating='immediate',
disp=True)
self.logger.info(
f'Anchor evolution finish. Average IOU: {1 - result.fun}')
anchors = [(w, h) for w, h in zip(result.x[::2], result.x[1::2])]
anchors = sorted(anchors, key=lambda x: x[0] * x[1])
return anchors
@staticmethod
def avg_iou_cost(anchor_params, bboxes):
assert len(anchor_params) % 2 == 0
anchor_whs = torch.tensor(
[[w, h]
for w, h in zip(anchor_params[::2], anchor_params[1::2])]).to(
bboxes.device, dtype=bboxes.dtype)
anchor_boxes = bbox_cxcywh_to_xyxy(
torch.cat([torch.zeros_like(anchor_whs), anchor_whs], dim=1))
ious = bbox_overlaps(bboxes, anchor_boxes)
max_ious, _ = ious.max(1)
cost = 1 - max_ious.mean().item()
return cost
def main():
logger = get_root_logger()
args = parse_args()
cfg = args.config
cfg = Config.fromfile(cfg)
input_shape = args.input_shape
assert len(input_shape) == 2
anchor_type = cfg.model.bbox_head.anchor_generator.type
assert anchor_type == 'YOLOAnchorGenerator', \
f'Only support optimize YOLOAnchor, but get {anchor_type}.'
base_sizes = cfg.model.bbox_head.anchor_generator.base_sizes
num_anchors = sum([len(sizes) for sizes in base_sizes])
train_data_cfg = cfg.data.train
while 'dataset' in train_data_cfg:
train_data_cfg = train_data_cfg['dataset']
dataset = build_dataset(train_data_cfg)
if args.algorithm == 'k-means':
optimizer = YOLOKMeansAnchorOptimizer(
dataset=dataset,
input_shape=input_shape,
device=args.device,
num_anchors=num_anchors,
iters=args.iters,
logger=logger,
out_dir=args.output_dir)
elif args.algorithm == 'differential_evolution':
optimizer = YOLODEAnchorOptimizer(
dataset=dataset,
input_shape=input_shape,
device=args.device,
num_anchors=num_anchors,
iters=args.iters,
logger=logger,
out_dir=args.output_dir)
else:
raise NotImplementedError(
f'Only support k-means and differential_evolution, '
f'but get {args.algorithm}')
optimizer.optimize()
if __name__ == '__main__':
main()
| 13,161 | 34.477089 | 79 |
py
|
DSLA-DSLA
|
DSLA-DSLA/tools/analysis_tools/get_flops.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import numpy as np
import torch
from mmcv import Config, DictAction
from mmdet.models import build_detector
try:
from mmcv.cnn import get_model_complexity_info
except ImportError:
raise ImportError('Please upgrade mmcv to >0.6.2')
def parse_args():
parser = argparse.ArgumentParser(description='Train a detector')
parser.add_argument('config', help='train config file path')
parser.add_argument(
'--shape',
type=int,
nargs='+',
default=[1280, 800],
help='input image size')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--size-divisor',
type=int,
default=32,
help='Pad the input image, the minimum size that is divisible '
'by size_divisor, -1 means do not pad the image.')
args = parser.parse_args()
return args
def main():
args = parse_args()
if len(args.shape) == 1:
h = w = args.shape[0]
elif len(args.shape) == 2:
h, w = args.shape
else:
raise ValueError('invalid input shape')
orig_shape = (3, h, w)
divisor = args.size_divisor
if divisor > 0:
h = int(np.ceil(h / divisor)) * divisor
w = int(np.ceil(w / divisor)) * divisor
input_shape = (3, h, w)
cfg = Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
model = build_detector(
cfg.model,
train_cfg=cfg.get('train_cfg'),
test_cfg=cfg.get('test_cfg'))
if torch.cuda.is_available():
model.cuda()
model.eval()
if hasattr(model, 'forward_dummy'):
model.forward = model.forward_dummy
else:
raise NotImplementedError(
'FLOPs counter is currently not currently supported with {}'.
format(model.__class__.__name__))
flops, params = get_model_complexity_info(model, input_shape)
split_line = '=' * 30
if divisor > 0 and \
input_shape != orig_shape:
print(f'{split_line}\nUse size divisor set input shape '
f'from {orig_shape} to {input_shape}\n')
print(f'{split_line}\nInput shape: {input_shape}\n'
f'Flops: {flops}\nParams: {params}\n{split_line}')
print('!!!Please be cautious if you use the results in papers. '
'You may need to check if all ops are supported and verify that the '
'flops computation is correct.')
if __name__ == '__main__':
main()
| 2,995 | 29.571429 | 79 |
py
|
DSLA-DSLA
|
DSLA-DSLA/tools/analysis_tools/analyze_logs.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import json
from collections import defaultdict
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
def cal_train_time(log_dicts, args):
for i, log_dict in enumerate(log_dicts):
print(f'{"-" * 5}Analyze train time of {args.json_logs[i]}{"-" * 5}')
all_times = []
for epoch in log_dict.keys():
if args.include_outliers:
all_times.append(log_dict[epoch]['time'])
else:
all_times.append(log_dict[epoch]['time'][1:])
all_times = np.array(all_times)
epoch_ave_time = all_times.mean(-1)
slowest_epoch = epoch_ave_time.argmax()
fastest_epoch = epoch_ave_time.argmin()
std_over_epoch = epoch_ave_time.std()
print(f'slowest epoch {slowest_epoch + 1}, '
f'average time is {epoch_ave_time[slowest_epoch]:.4f}')
print(f'fastest epoch {fastest_epoch + 1}, '
f'average time is {epoch_ave_time[fastest_epoch]:.4f}')
print(f'time std over epochs is {std_over_epoch:.4f}')
print(f'average iter time: {np.mean(all_times):.4f} s/iter')
print()
def plot_curve(log_dicts, args):
if args.backend is not None:
plt.switch_backend(args.backend)
sns.set_style(args.style)
# if legend is None, use {filename}_{key} as legend
legend = args.legend
if legend is None:
legend = []
for json_log in args.json_logs:
for metric in args.keys:
legend.append(f'{json_log}_{metric}')
assert len(legend) == (len(args.json_logs) * len(args.keys))
metrics = args.keys
num_metrics = len(metrics)
for i, log_dict in enumerate(log_dicts):
epochs = list(log_dict.keys())
for j, metric in enumerate(metrics):
print(f'plot curve of {args.json_logs[i]}, metric is {metric}')
if metric not in log_dict[epochs[0]]:
raise KeyError(
f'{args.json_logs[i]} does not contain metric {metric}')
if 'mAP' in metric:
xs = np.arange(1, max(epochs) + 1)
ys = []
for epoch in epochs:
ys += log_dict[epoch][metric]
ax = plt.gca()
ax.set_xticks(xs)
plt.xlabel('epoch')
plt.plot(xs, ys, label=legend[i * num_metrics + j], marker='o')
else:
xs = []
ys = []
num_iters_per_epoch = log_dict[epochs[0]]['iter'][-2]
for epoch in epochs:
iters = log_dict[epoch]['iter']
if log_dict[epoch]['mode'][-1] == 'val':
iters = iters[:-1]
xs.append(
np.array(iters) + (epoch - 1) * num_iters_per_epoch)
ys.append(np.array(log_dict[epoch][metric][:len(iters)]))
xs = np.concatenate(xs)
ys = np.concatenate(ys)
plt.xlabel('iter')
plt.plot(
xs, ys, label=legend[i * num_metrics + j], linewidth=0.5)
plt.legend()
if args.title is not None:
plt.title(args.title)
if args.out is None:
plt.show()
else:
print(f'save curve to: {args.out}')
plt.savefig(args.out)
plt.cla()
def add_plot_parser(subparsers):
parser_plt = subparsers.add_parser(
'plot_curve', help='parser for plotting curves')
parser_plt.add_argument(
'json_logs',
type=str,
nargs='+',
help='path of train log in json format')
parser_plt.add_argument(
'--keys',
type=str,
nargs='+',
default=['bbox_mAP'],
help='the metric that you want to plot')
parser_plt.add_argument('--title', type=str, help='title of figure')
parser_plt.add_argument(
'--legend',
type=str,
nargs='+',
default=None,
help='legend of each plot')
parser_plt.add_argument(
'--backend', type=str, default=None, help='backend of plt')
parser_plt.add_argument(
'--style', type=str, default='dark', help='style of plt')
parser_plt.add_argument('--out', type=str, default=None)
def add_time_parser(subparsers):
parser_time = subparsers.add_parser(
'cal_train_time',
help='parser for computing the average time per training iteration')
parser_time.add_argument(
'json_logs',
type=str,
nargs='+',
help='path of train log in json format')
parser_time.add_argument(
'--include-outliers',
action='store_true',
help='include the first value of every epoch when computing '
'the average time')
def parse_args():
parser = argparse.ArgumentParser(description='Analyze Json Log')
# currently only support plot curve and calculate average train time
subparsers = parser.add_subparsers(dest='task', help='task parser')
add_plot_parser(subparsers)
add_time_parser(subparsers)
args = parser.parse_args()
return args
def load_json_logs(json_logs):
# load and convert json_logs to log_dict, key is epoch, value is a sub dict
# keys of sub dict is different metrics, e.g. memory, bbox_mAP
# value of sub dict is a list of corresponding values of all iterations
log_dicts = [dict() for _ in json_logs]
for json_log, log_dict in zip(json_logs, log_dicts):
with open(json_log, 'r') as log_file:
for line in log_file:
log = json.loads(line.strip())
# skip lines without `epoch` field
if 'epoch' not in log:
continue
epoch = log.pop('epoch')
if epoch not in log_dict:
log_dict[epoch] = defaultdict(list)
for k, v in log.items():
log_dict[epoch][k].append(v)
return log_dicts
def main():
args = parse_args()
json_logs = args.json_logs
for json_log in json_logs:
assert json_log.endswith('.json')
log_dicts = load_json_logs(json_logs)
eval(args.task)(log_dicts, args)
if __name__ == '__main__':
main()
| 6,300 | 33.812155 | 79 |
py
|
DSLA-DSLA
|
DSLA-DSLA/tools/analysis_tools/confusion_matrix.py
|
import argparse
import os
import matplotlib.pyplot as plt
import mmcv
import numpy as np
from matplotlib.ticker import MultipleLocator
from mmcv import Config, DictAction
from mmcv.ops import nms
from mmdet.core.evaluation.bbox_overlaps import bbox_overlaps
from mmdet.datasets import build_dataset
def parse_args():
parser = argparse.ArgumentParser(
description='Generate confusion matrix from detection results')
parser.add_argument('config', help='test config file path')
parser.add_argument(
'prediction_path', help='prediction path where test .pkl result')
parser.add_argument(
'save_dir', help='directory where confusion matrix will be saved')
parser.add_argument(
'--show', action='store_true', help='show confusion matrix')
parser.add_argument(
'--color-theme',
default='plasma',
help='theme of the matrix color map')
parser.add_argument(
'--score-thr',
type=float,
default=0.3,
help='score threshold to filter detection bboxes')
parser.add_argument(
'--tp-iou-thr',
type=float,
default=0.5,
help='IoU threshold to be considered as matched')
parser.add_argument(
'--nms-iou-thr',
type=float,
default=None,
help='nms IoU threshold, only applied when users want to change the'
'nms IoU threshold.')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
args = parser.parse_args()
return args
def calculate_confusion_matrix(dataset,
results,
score_thr=0,
nms_iou_thr=None,
tp_iou_thr=0.5):
"""Calculate the confusion matrix.
Args:
dataset (Dataset): Test or val dataset.
results (list[ndarray]): A list of detection results in each image.
score_thr (float|optional): Score threshold to filter bboxes.
Default: 0.
nms_iou_thr (float|optional): nms IoU threshold, the detection results
have done nms in the detector, only applied when users want to
change the nms IoU threshold. Default: None.
tp_iou_thr (float|optional): IoU threshold to be considered as matched.
Default: 0.5.
"""
num_classes = len(dataset.CLASSES)
confusion_matrix = np.zeros(shape=[num_classes + 1, num_classes + 1])
assert len(dataset) == len(results)
prog_bar = mmcv.ProgressBar(len(results))
for idx, per_img_res in enumerate(results):
if isinstance(per_img_res, tuple):
res_bboxes, _ = per_img_res
else:
res_bboxes = per_img_res
ann = dataset.get_ann_info(idx)
gt_bboxes = ann['bboxes']
labels = ann['labels']
analyze_per_img_dets(confusion_matrix, gt_bboxes, labels, res_bboxes,
score_thr, tp_iou_thr, nms_iou_thr)
prog_bar.update()
return confusion_matrix
def analyze_per_img_dets(confusion_matrix,
gt_bboxes,
gt_labels,
result,
score_thr=0,
tp_iou_thr=0.5,
nms_iou_thr=None):
"""Analyze detection results on each image.
Args:
confusion_matrix (ndarray): The confusion matrix,
has shape (num_classes + 1, num_classes + 1).
gt_bboxes (ndarray): Ground truth bboxes, has shape (num_gt, 4).
gt_labels (ndarray): Ground truth labels, has shape (num_gt).
result (ndarray): Detection results, has shape
(num_classes, num_bboxes, 5).
score_thr (float): Score threshold to filter bboxes.
Default: 0.
tp_iou_thr (float): IoU threshold to be considered as matched.
Default: 0.5.
nms_iou_thr (float|optional): nms IoU threshold, the detection results
have done nms in the detector, only applied when users want to
change the nms IoU threshold. Default: None.
"""
true_positives = np.zeros_like(gt_labels)
for det_label, det_bboxes in enumerate(result):
if nms_iou_thr:
det_bboxes, _ = nms(
det_bboxes[:, :4],
det_bboxes[:, -1],
nms_iou_thr,
score_threshold=score_thr)
ious = bbox_overlaps(det_bboxes[:, :4], gt_bboxes)
for i, det_bbox in enumerate(det_bboxes):
score = det_bbox[4]
det_match = 0
if score >= score_thr:
for j, gt_label in enumerate(gt_labels):
if ious[i, j] >= tp_iou_thr:
det_match += 1
if gt_label == det_label:
true_positives[j] += 1 # TP
confusion_matrix[gt_label, det_label] += 1
if det_match == 0: # BG FP
confusion_matrix[-1, det_label] += 1
for num_tp, gt_label in zip(true_positives, gt_labels):
if num_tp == 0: # FN
confusion_matrix[gt_label, -1] += 1
def plot_confusion_matrix(confusion_matrix,
labels,
save_dir=None,
show=True,
title='Normalized Confusion Matrix',
color_theme='plasma'):
"""Draw confusion matrix with matplotlib.
Args:
confusion_matrix (ndarray): The confusion matrix.
labels (list[str]): List of class names.
save_dir (str|optional): If set, save the confusion matrix plot to the
given path. Default: None.
show (bool): Whether to show the plot. Default: True.
title (str): Title of the plot. Default: `Normalized Confusion Matrix`.
color_theme (str): Theme of the matrix color map. Default: `plasma`.
"""
# normalize the confusion matrix
per_label_sums = confusion_matrix.sum(axis=1)[:, np.newaxis]
confusion_matrix = \
confusion_matrix.astype(np.float32) / per_label_sums * 100
num_classes = len(labels)
fig, ax = plt.subplots(
figsize=(0.5 * num_classes, 0.5 * num_classes * 0.8), dpi=180)
cmap = plt.get_cmap(color_theme)
im = ax.imshow(confusion_matrix, cmap=cmap)
plt.colorbar(mappable=im, ax=ax)
title_font = {'weight': 'bold', 'size': 12}
ax.set_title(title, fontdict=title_font)
label_font = {'size': 10}
plt.ylabel('Ground Truth Label', fontdict=label_font)
plt.xlabel('Prediction Label', fontdict=label_font)
# draw locator
xmajor_locator = MultipleLocator(1)
xminor_locator = MultipleLocator(0.5)
ax.xaxis.set_major_locator(xmajor_locator)
ax.xaxis.set_minor_locator(xminor_locator)
ymajor_locator = MultipleLocator(1)
yminor_locator = MultipleLocator(0.5)
ax.yaxis.set_major_locator(ymajor_locator)
ax.yaxis.set_minor_locator(yminor_locator)
# draw grid
ax.grid(True, which='minor', linestyle='-')
# draw label
ax.set_xticks(np.arange(num_classes))
ax.set_yticks(np.arange(num_classes))
ax.set_xticklabels(labels)
ax.set_yticklabels(labels)
ax.tick_params(
axis='x', bottom=False, top=True, labelbottom=False, labeltop=True)
plt.setp(
ax.get_xticklabels(), rotation=45, ha='left', rotation_mode='anchor')
# draw confution matrix value
for i in range(num_classes):
for j in range(num_classes):
ax.text(
j,
i,
'{}%'.format(int(confusion_matrix[i, j])),
ha='center',
va='center',
color='w',
size=7)
ax.set_ylim(len(confusion_matrix) - 0.5, -0.5) # matplotlib>3.1.1
fig.tight_layout()
if save_dir is not None:
plt.savefig(
os.path.join(save_dir, 'confusion_matrix.png'), format='png')
if show:
plt.show()
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
results = mmcv.load(args.prediction_path)
assert isinstance(results, list)
if isinstance(results[0], list):
pass
elif isinstance(results[0], tuple):
results = [result[0] for result in results]
else:
raise TypeError('invalid type of prediction results')
if isinstance(cfg.data.test, dict):
cfg.data.test.test_mode = True
elif isinstance(cfg.data.test, list):
for ds_cfg in cfg.data.test:
ds_cfg.test_mode = True
dataset = build_dataset(cfg.data.test)
confusion_matrix = calculate_confusion_matrix(dataset, results,
args.score_thr,
args.nms_iou_thr,
args.tp_iou_thr)
plot_confusion_matrix(
confusion_matrix,
dataset.CLASSES + ('background', ),
save_dir=args.save_dir,
show=args.show)
if __name__ == '__main__':
main()
| 9,603 | 35.656489 | 79 |
py
|
DSLA-DSLA
|
DSLA-DSLA/tools/analysis_tools/test_robustness.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import copy
import os
import os.path as osp
import mmcv
import torch
from mmcv import DictAction
from mmcv.parallel import MMDataParallel, MMDistributedDataParallel
from mmcv.runner import (get_dist_info, init_dist, load_checkpoint,
wrap_fp16_model)
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
from tools.analysis_tools.robustness_eval import get_results
from mmdet import datasets
from mmdet.apis import multi_gpu_test, set_random_seed, single_gpu_test
from mmdet.core import eval_map
from mmdet.datasets import build_dataloader, build_dataset
from mmdet.models import build_detector
def coco_eval_with_return(result_files,
result_types,
coco,
max_dets=(100, 300, 1000)):
for res_type in result_types:
assert res_type in ['proposal', 'bbox', 'segm', 'keypoints']
if mmcv.is_str(coco):
coco = COCO(coco)
assert isinstance(coco, COCO)
eval_results = {}
for res_type in result_types:
result_file = result_files[res_type]
assert result_file.endswith('.json')
coco_dets = coco.loadRes(result_file)
img_ids = coco.getImgIds()
iou_type = 'bbox' if res_type == 'proposal' else res_type
cocoEval = COCOeval(coco, coco_dets, iou_type)
cocoEval.params.imgIds = img_ids
if res_type == 'proposal':
cocoEval.params.useCats = 0
cocoEval.params.maxDets = list(max_dets)
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
if res_type == 'segm' or res_type == 'bbox':
metric_names = [
'AP', 'AP50', 'AP75', 'APs', 'APm', 'APl', 'AR1', 'AR10',
'AR100', 'ARs', 'ARm', 'ARl'
]
eval_results[res_type] = {
metric_names[i]: cocoEval.stats[i]
for i in range(len(metric_names))
}
else:
eval_results[res_type] = cocoEval.stats
return eval_results
def voc_eval_with_return(result_file,
dataset,
iou_thr=0.5,
logger='print',
only_ap=True):
det_results = mmcv.load(result_file)
annotations = [dataset.get_ann_info(i) for i in range(len(dataset))]
if hasattr(dataset, 'year') and dataset.year == 2007:
dataset_name = 'voc07'
else:
dataset_name = dataset.CLASSES
mean_ap, eval_results = eval_map(
det_results,
annotations,
scale_ranges=None,
iou_thr=iou_thr,
dataset=dataset_name,
logger=logger)
if only_ap:
eval_results = [{
'ap': eval_results[i]['ap']
} for i in range(len(eval_results))]
return mean_ap, eval_results
def parse_args():
parser = argparse.ArgumentParser(description='MMDet test detector')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument('--out', help='output result file')
parser.add_argument(
'--corruptions',
type=str,
nargs='+',
default='benchmark',
choices=[
'all', 'benchmark', 'noise', 'blur', 'weather', 'digital',
'holdout', 'None', 'gaussian_noise', 'shot_noise', 'impulse_noise',
'defocus_blur', 'glass_blur', 'motion_blur', 'zoom_blur', 'snow',
'frost', 'fog', 'brightness', 'contrast', 'elastic_transform',
'pixelate', 'jpeg_compression', 'speckle_noise', 'gaussian_blur',
'spatter', 'saturate'
],
help='corruptions')
parser.add_argument(
'--severities',
type=int,
nargs='+',
default=[0, 1, 2, 3, 4, 5],
help='corruption severity levels')
parser.add_argument(
'--eval',
type=str,
nargs='+',
choices=['proposal', 'proposal_fast', 'bbox', 'segm', 'keypoints'],
help='eval types')
parser.add_argument(
'--iou-thr',
type=float,
default=0.5,
help='IoU threshold for pascal voc evaluation')
parser.add_argument(
'--summaries',
type=bool,
default=False,
help='Print summaries for every corruption and severity')
parser.add_argument(
'--workers', type=int, default=32, help='workers per gpu')
parser.add_argument('--show', action='store_true', help='show results')
parser.add_argument(
'--show-dir', help='directory where painted images will be saved')
parser.add_argument(
'--show-score-thr',
type=float,
default=0.3,
help='score threshold (default: 0.3)')
parser.add_argument('--tmpdir', help='tmp dir for writing some results')
parser.add_argument('--seed', type=int, default=None, help='random seed')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
parser.add_argument(
'--final-prints',
type=str,
nargs='+',
choices=['P', 'mPC', 'rPC'],
default='mPC',
help='corruption benchmark metric to print at the end')
parser.add_argument(
'--final-prints-aggregate',
type=str,
choices=['all', 'benchmark'],
default='benchmark',
help='aggregate all results or only those for benchmark corruptions')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def main():
args = parse_args()
assert args.out or args.show or args.show_dir, \
('Please specify at least one operation (save or show the results) '
'with the argument "--out", "--show" or "show-dir"')
if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
raise ValueError('The output file must be a pkl file.')
cfg = mmcv.Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
cfg.model.pretrained = None
cfg.data.test.test_mode = True
if args.workers == 0:
args.workers = cfg.data.workers_per_gpu
# init distributed env first, since logger depends on the dist info.
if args.launcher == 'none':
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
# set random seeds
if args.seed is not None:
set_random_seed(args.seed)
if 'all' in args.corruptions:
corruptions = [
'gaussian_noise', 'shot_noise', 'impulse_noise', 'defocus_blur',
'glass_blur', 'motion_blur', 'zoom_blur', 'snow', 'frost', 'fog',
'brightness', 'contrast', 'elastic_transform', 'pixelate',
'jpeg_compression', 'speckle_noise', 'gaussian_blur', 'spatter',
'saturate'
]
elif 'benchmark' in args.corruptions:
corruptions = [
'gaussian_noise', 'shot_noise', 'impulse_noise', 'defocus_blur',
'glass_blur', 'motion_blur', 'zoom_blur', 'snow', 'frost', 'fog',
'brightness', 'contrast', 'elastic_transform', 'pixelate',
'jpeg_compression'
]
elif 'noise' in args.corruptions:
corruptions = ['gaussian_noise', 'shot_noise', 'impulse_noise']
elif 'blur' in args.corruptions:
corruptions = [
'defocus_blur', 'glass_blur', 'motion_blur', 'zoom_blur'
]
elif 'weather' in args.corruptions:
corruptions = ['snow', 'frost', 'fog', 'brightness']
elif 'digital' in args.corruptions:
corruptions = [
'contrast', 'elastic_transform', 'pixelate', 'jpeg_compression'
]
elif 'holdout' in args.corruptions:
corruptions = ['speckle_noise', 'gaussian_blur', 'spatter', 'saturate']
elif 'None' in args.corruptions:
corruptions = ['None']
args.severities = [0]
else:
corruptions = args.corruptions
rank, _ = get_dist_info()
aggregated_results = {}
for corr_i, corruption in enumerate(corruptions):
aggregated_results[corruption] = {}
for sev_i, corruption_severity in enumerate(args.severities):
# evaluate severity 0 (= no corruption) only once
if corr_i > 0 and corruption_severity == 0:
aggregated_results[corruption][0] = \
aggregated_results[corruptions[0]][0]
continue
test_data_cfg = copy.deepcopy(cfg.data.test)
# assign corruption and severity
if corruption_severity > 0:
corruption_trans = dict(
type='Corrupt',
corruption=corruption,
severity=corruption_severity)
# TODO: hard coded "1", we assume that the first step is
# loading images, which needs to be fixed in the future
test_data_cfg['pipeline'].insert(1, corruption_trans)
# print info
print(f'\nTesting {corruption} at severity {corruption_severity}')
# build the dataloader
# TODO: support multiple images per gpu
# (only minor changes are needed)
dataset = build_dataset(test_data_cfg)
data_loader = build_dataloader(
dataset,
samples_per_gpu=1,
workers_per_gpu=args.workers,
dist=distributed,
shuffle=False)
# build the model and load checkpoint
cfg.model.train_cfg = None
model = build_detector(cfg.model, test_cfg=cfg.get('test_cfg'))
fp16_cfg = cfg.get('fp16', None)
if fp16_cfg is not None:
wrap_fp16_model(model)
checkpoint = load_checkpoint(
model, args.checkpoint, map_location='cpu')
# old versions did not save class info in checkpoints,
# this walkaround is for backward compatibility
if 'CLASSES' in checkpoint.get('meta', {}):
model.CLASSES = checkpoint['meta']['CLASSES']
else:
model.CLASSES = dataset.CLASSES
if not distributed:
model = MMDataParallel(model, device_ids=[0])
show_dir = args.show_dir
if show_dir is not None:
show_dir = osp.join(show_dir, corruption)
show_dir = osp.join(show_dir, str(corruption_severity))
if not osp.exists(show_dir):
osp.makedirs(show_dir)
outputs = single_gpu_test(model, data_loader, args.show,
show_dir, args.show_score_thr)
else:
model = MMDistributedDataParallel(
model.cuda(),
device_ids=[torch.cuda.current_device()],
broadcast_buffers=False)
outputs = multi_gpu_test(model, data_loader, args.tmpdir)
if args.out and rank == 0:
eval_results_filename = (
osp.splitext(args.out)[0] + '_results' +
osp.splitext(args.out)[1])
mmcv.dump(outputs, args.out)
eval_types = args.eval
if cfg.dataset_type == 'VOCDataset':
if eval_types:
for eval_type in eval_types:
if eval_type == 'bbox':
test_dataset = mmcv.runner.obj_from_dict(
cfg.data.test, datasets)
logger = 'print' if args.summaries else None
mean_ap, eval_results = \
voc_eval_with_return(
args.out, test_dataset,
args.iou_thr, logger)
aggregated_results[corruption][
corruption_severity] = eval_results
else:
print('\nOnly "bbox" evaluation \
is supported for pascal voc')
else:
if eval_types:
print(f'Starting evaluate {" and ".join(eval_types)}')
if eval_types == ['proposal_fast']:
result_file = args.out
else:
if not isinstance(outputs[0], dict):
result_files = dataset.results2json(
outputs, args.out)
else:
for name in outputs[0]:
print(f'\nEvaluating {name}')
outputs_ = [out[name] for out in outputs]
result_file = args.out
+ f'.{name}'
result_files = dataset.results2json(
outputs_, result_file)
eval_results = coco_eval_with_return(
result_files, eval_types, dataset.coco)
aggregated_results[corruption][
corruption_severity] = eval_results
else:
print('\nNo task was selected for evaluation;'
'\nUse --eval to select a task')
# save results after each evaluation
mmcv.dump(aggregated_results, eval_results_filename)
if rank == 0:
# print final results
print('\nAggregated results:')
prints = args.final_prints
aggregate = args.final_prints_aggregate
if cfg.dataset_type == 'VOCDataset':
get_results(
eval_results_filename,
dataset='voc',
prints=prints,
aggregate=aggregate)
else:
get_results(
eval_results_filename,
dataset='coco',
prints=prints,
aggregate=aggregate)
if __name__ == '__main__':
main()
| 15,222 | 38.234536 | 79 |
py
|
DSLA-DSLA
|
DSLA-DSLA/tools/analysis_tools/coco_error_analysis.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import os
from argparse import ArgumentParser
from multiprocessing import Pool
import matplotlib.pyplot as plt
import numpy as np
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
def makeplot(rs, ps, outDir, class_name, iou_type):
cs = np.vstack([
np.ones((2, 3)),
np.array([0.31, 0.51, 0.74]),
np.array([0.75, 0.31, 0.30]),
np.array([0.36, 0.90, 0.38]),
np.array([0.50, 0.39, 0.64]),
np.array([1, 0.6, 0]),
])
areaNames = ['allarea', 'small', 'medium', 'large']
types = ['C75', 'C50', 'Loc', 'Sim', 'Oth', 'BG', 'FN']
for i in range(len(areaNames)):
area_ps = ps[..., i, 0]
figure_title = iou_type + '-' + class_name + '-' + areaNames[i]
aps = [ps_.mean() for ps_ in area_ps]
ps_curve = [
ps_.mean(axis=1) if ps_.ndim > 1 else ps_ for ps_ in area_ps
]
ps_curve.insert(0, np.zeros(ps_curve[0].shape))
fig = plt.figure()
ax = plt.subplot(111)
for k in range(len(types)):
ax.plot(rs, ps_curve[k + 1], color=[0, 0, 0], linewidth=0.5)
ax.fill_between(
rs,
ps_curve[k],
ps_curve[k + 1],
color=cs[k],
label=str(f'[{aps[k]:.3f}]' + types[k]),
)
plt.xlabel('recall')
plt.ylabel('precision')
plt.xlim(0, 1.0)
plt.ylim(0, 1.0)
plt.title(figure_title)
plt.legend()
# plt.show()
fig.savefig(outDir + f'/{figure_title}.png')
plt.close(fig)
def autolabel(ax, rects):
"""Attach a text label above each bar in *rects*, displaying its height."""
for rect in rects:
height = rect.get_height()
if height > 0 and height <= 1: # for percent values
text_label = '{:2.0f}'.format(height * 100)
else:
text_label = '{:2.0f}'.format(height)
ax.annotate(
text_label,
xy=(rect.get_x() + rect.get_width() / 2, height),
xytext=(0, 3), # 3 points vertical offset
textcoords='offset points',
ha='center',
va='bottom',
fontsize='x-small',
)
def makebarplot(rs, ps, outDir, class_name, iou_type):
areaNames = ['allarea', 'small', 'medium', 'large']
types = ['C75', 'C50', 'Loc', 'Sim', 'Oth', 'BG', 'FN']
fig, ax = plt.subplots()
x = np.arange(len(areaNames)) # the areaNames locations
width = 0.60 # the width of the bars
rects_list = []
figure_title = iou_type + '-' + class_name + '-' + 'ap bar plot'
for i in range(len(types) - 1):
type_ps = ps[i, ..., 0]
aps = [ps_.mean() for ps_ in type_ps.T]
rects_list.append(
ax.bar(
x - width / 2 + (i + 1) * width / len(types),
aps,
width / len(types),
label=types[i],
))
# Add some text for labels, title and custom x-axis tick labels, etc.
ax.set_ylabel('Mean Average Precision (mAP)')
ax.set_title(figure_title)
ax.set_xticks(x)
ax.set_xticklabels(areaNames)
ax.legend()
# Add score texts over bars
for rects in rects_list:
autolabel(ax, rects)
# Save plot
fig.savefig(outDir + f'/{figure_title}.png')
plt.close(fig)
def get_gt_area_group_numbers(cocoEval):
areaRng = cocoEval.params.areaRng
areaRngStr = [str(aRng) for aRng in areaRng]
areaRngLbl = cocoEval.params.areaRngLbl
areaRngStr2areaRngLbl = dict(zip(areaRngStr, areaRngLbl))
areaRngLbl2Number = dict.fromkeys(areaRngLbl, 0)
for evalImg in cocoEval.evalImgs:
if evalImg:
for gtIgnore in evalImg['gtIgnore']:
if not gtIgnore:
aRngLbl = areaRngStr2areaRngLbl[str(evalImg['aRng'])]
areaRngLbl2Number[aRngLbl] += 1
return areaRngLbl2Number
def make_gt_area_group_numbers_plot(cocoEval, outDir, verbose=True):
areaRngLbl2Number = get_gt_area_group_numbers(cocoEval)
areaRngLbl = areaRngLbl2Number.keys()
if verbose:
print('number of annotations per area group:', areaRngLbl2Number)
# Init figure
fig, ax = plt.subplots()
x = np.arange(len(areaRngLbl)) # the areaNames locations
width = 0.60 # the width of the bars
figure_title = 'number of annotations per area group'
rects = ax.bar(x, areaRngLbl2Number.values(), width)
# Add some text for labels, title and custom x-axis tick labels, etc.
ax.set_ylabel('Number of annotations')
ax.set_title(figure_title)
ax.set_xticks(x)
ax.set_xticklabels(areaRngLbl)
# Add score texts over bars
autolabel(ax, rects)
# Save plot
fig.tight_layout()
fig.savefig(outDir + f'/{figure_title}.png')
plt.close(fig)
def make_gt_area_histogram_plot(cocoEval, outDir):
n_bins = 100
areas = [ann['area'] for ann in cocoEval.cocoGt.anns.values()]
# init figure
figure_title = 'gt annotation areas histogram plot'
fig, ax = plt.subplots()
# Set the number of bins
ax.hist(np.sqrt(areas), bins=n_bins)
# Add some text for labels, title and custom x-axis tick labels, etc.
ax.set_xlabel('Squareroot Area')
ax.set_ylabel('Number of annotations')
ax.set_title(figure_title)
# Save plot
fig.tight_layout()
fig.savefig(outDir + f'/{figure_title}.png')
plt.close(fig)
def analyze_individual_category(k,
cocoDt,
cocoGt,
catId,
iou_type,
areas=None):
nm = cocoGt.loadCats(catId)[0]
print(f'--------------analyzing {k + 1}-{nm["name"]}---------------')
ps_ = {}
dt = copy.deepcopy(cocoDt)
nm = cocoGt.loadCats(catId)[0]
imgIds = cocoGt.getImgIds()
dt_anns = dt.dataset['annotations']
select_dt_anns = []
for ann in dt_anns:
if ann['category_id'] == catId:
select_dt_anns.append(ann)
dt.dataset['annotations'] = select_dt_anns
dt.createIndex()
# compute precision but ignore superclass confusion
gt = copy.deepcopy(cocoGt)
child_catIds = gt.getCatIds(supNms=[nm['supercategory']])
for idx, ann in enumerate(gt.dataset['annotations']):
if ann['category_id'] in child_catIds and ann['category_id'] != catId:
gt.dataset['annotations'][idx]['ignore'] = 1
gt.dataset['annotations'][idx]['iscrowd'] = 1
gt.dataset['annotations'][idx]['category_id'] = catId
cocoEval = COCOeval(gt, copy.deepcopy(dt), iou_type)
cocoEval.params.imgIds = imgIds
cocoEval.params.maxDets = [100]
cocoEval.params.iouThrs = [0.1]
cocoEval.params.useCats = 1
if areas:
cocoEval.params.areaRng = [[0**2, areas[2]], [0**2, areas[0]],
[areas[0], areas[1]], [areas[1], areas[2]]]
cocoEval.evaluate()
cocoEval.accumulate()
ps_supercategory = cocoEval.eval['precision'][0, :, k, :, :]
ps_['ps_supercategory'] = ps_supercategory
# compute precision but ignore any class confusion
gt = copy.deepcopy(cocoGt)
for idx, ann in enumerate(gt.dataset['annotations']):
if ann['category_id'] != catId:
gt.dataset['annotations'][idx]['ignore'] = 1
gt.dataset['annotations'][idx]['iscrowd'] = 1
gt.dataset['annotations'][idx]['category_id'] = catId
cocoEval = COCOeval(gt, copy.deepcopy(dt), iou_type)
cocoEval.params.imgIds = imgIds
cocoEval.params.maxDets = [100]
cocoEval.params.iouThrs = [0.1]
cocoEval.params.useCats = 1
if areas:
cocoEval.params.areaRng = [[0**2, areas[2]], [0**2, areas[0]],
[areas[0], areas[1]], [areas[1], areas[2]]]
cocoEval.evaluate()
cocoEval.accumulate()
ps_allcategory = cocoEval.eval['precision'][0, :, k, :, :]
ps_['ps_allcategory'] = ps_allcategory
return k, ps_
def analyze_results(res_file,
ann_file,
res_types,
out_dir,
extraplots=None,
areas=None):
for res_type in res_types:
assert res_type in ['bbox', 'segm']
if areas:
assert len(areas) == 3, '3 integers should be specified as areas, \
representing 3 area regions'
directory = os.path.dirname(out_dir + '/')
if not os.path.exists(directory):
print(f'-------------create {out_dir}-----------------')
os.makedirs(directory)
cocoGt = COCO(ann_file)
cocoDt = cocoGt.loadRes(res_file)
imgIds = cocoGt.getImgIds()
for res_type in res_types:
res_out_dir = out_dir + '/' + res_type + '/'
res_directory = os.path.dirname(res_out_dir)
if not os.path.exists(res_directory):
print(f'-------------create {res_out_dir}-----------------')
os.makedirs(res_directory)
iou_type = res_type
cocoEval = COCOeval(
copy.deepcopy(cocoGt), copy.deepcopy(cocoDt), iou_type)
cocoEval.params.imgIds = imgIds
cocoEval.params.iouThrs = [0.75, 0.5, 0.1]
cocoEval.params.maxDets = [100]
if areas:
cocoEval.params.areaRng = [[0**2, areas[2]], [0**2, areas[0]],
[areas[0], areas[1]],
[areas[1], areas[2]]]
cocoEval.evaluate()
cocoEval.accumulate()
ps = cocoEval.eval['precision']
ps = np.vstack([ps, np.zeros((4, *ps.shape[1:]))])
catIds = cocoGt.getCatIds()
recThrs = cocoEval.params.recThrs
with Pool(processes=48) as pool:
args = [(k, cocoDt, cocoGt, catId, iou_type, areas)
for k, catId in enumerate(catIds)]
analyze_results = pool.starmap(analyze_individual_category, args)
for k, catId in enumerate(catIds):
nm = cocoGt.loadCats(catId)[0]
print(f'--------------saving {k + 1}-{nm["name"]}---------------')
analyze_result = analyze_results[k]
assert k == analyze_result[0]
ps_supercategory = analyze_result[1]['ps_supercategory']
ps_allcategory = analyze_result[1]['ps_allcategory']
# compute precision but ignore superclass confusion
ps[3, :, k, :, :] = ps_supercategory
# compute precision but ignore any class confusion
ps[4, :, k, :, :] = ps_allcategory
# fill in background and false negative errors and plot
ps[ps == -1] = 0
ps[5, :, k, :, :] = ps[4, :, k, :, :] > 0
ps[6, :, k, :, :] = 1.0
makeplot(recThrs, ps[:, :, k], res_out_dir, nm['name'], iou_type)
if extraplots:
makebarplot(recThrs, ps[:, :, k], res_out_dir, nm['name'],
iou_type)
makeplot(recThrs, ps, res_out_dir, 'allclass', iou_type)
if extraplots:
makebarplot(recThrs, ps, res_out_dir, 'allclass', iou_type)
make_gt_area_group_numbers_plot(
cocoEval=cocoEval, outDir=res_out_dir, verbose=True)
make_gt_area_histogram_plot(cocoEval=cocoEval, outDir=res_out_dir)
def main():
parser = ArgumentParser(description='COCO Error Analysis Tool')
parser.add_argument('result', help='result file (json format) path')
parser.add_argument('out_dir', help='dir to save analyze result images')
parser.add_argument(
'--ann',
default='data/coco/annotations/instances_val2017.json',
help='annotation file path')
parser.add_argument(
'--types', type=str, nargs='+', default=['bbox'], help='result types')
parser.add_argument(
'--extraplots',
action='store_true',
help='export extra bar/stat plots')
parser.add_argument(
'--areas',
type=int,
nargs='+',
default=[1024, 9216, 10000000000],
help='area regions')
args = parser.parse_args()
analyze_results(
args.result,
args.ann,
args.types,
out_dir=args.out_dir,
extraplots=args.extraplots,
areas=args.areas)
if __name__ == '__main__':
main()
| 12,389 | 35.441176 | 79 |
py
|
DSLA-DSLA
|
DSLA-DSLA/tools/analysis_tools/robustness_eval.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
from argparse import ArgumentParser
import mmcv
import numpy as np
def print_coco_results(results):
def _print(result, ap=1, iouThr=None, areaRng='all', maxDets=100):
titleStr = 'Average Precision' if ap == 1 else 'Average Recall'
typeStr = '(AP)' if ap == 1 else '(AR)'
iouStr = '0.50:0.95' \
if iouThr is None else f'{iouThr:0.2f}'
iStr = f' {titleStr:<18} {typeStr} @[ IoU={iouStr:<9} | '
iStr += f'area={areaRng:>6s} | maxDets={maxDets:>3d} ] = {result:0.3f}'
print(iStr)
stats = np.zeros((12, ))
stats[0] = _print(results[0], 1)
stats[1] = _print(results[1], 1, iouThr=.5)
stats[2] = _print(results[2], 1, iouThr=.75)
stats[3] = _print(results[3], 1, areaRng='small')
stats[4] = _print(results[4], 1, areaRng='medium')
stats[5] = _print(results[5], 1, areaRng='large')
stats[6] = _print(results[6], 0, maxDets=1)
stats[7] = _print(results[7], 0, maxDets=10)
stats[8] = _print(results[8], 0)
stats[9] = _print(results[9], 0, areaRng='small')
stats[10] = _print(results[10], 0, areaRng='medium')
stats[11] = _print(results[11], 0, areaRng='large')
def get_coco_style_results(filename,
task='bbox',
metric=None,
prints='mPC',
aggregate='benchmark'):
assert aggregate in ['benchmark', 'all']
if prints == 'all':
prints = ['P', 'mPC', 'rPC']
elif isinstance(prints, str):
prints = [prints]
for p in prints:
assert p in ['P', 'mPC', 'rPC']
if metric is None:
metrics = [
'AP', 'AP50', 'AP75', 'APs', 'APm', 'APl', 'AR1', 'AR10', 'AR100',
'ARs', 'ARm', 'ARl'
]
elif isinstance(metric, list):
metrics = metric
else:
metrics = [metric]
for metric_name in metrics:
assert metric_name in [
'AP', 'AP50', 'AP75', 'APs', 'APm', 'APl', 'AR1', 'AR10', 'AR100',
'ARs', 'ARm', 'ARl'
]
eval_output = mmcv.load(filename)
num_distortions = len(list(eval_output.keys()))
results = np.zeros((num_distortions, 6, len(metrics)), dtype='float32')
for corr_i, distortion in enumerate(eval_output):
for severity in eval_output[distortion]:
for metric_j, metric_name in enumerate(metrics):
mAP = eval_output[distortion][severity][task][metric_name]
results[corr_i, severity, metric_j] = mAP
P = results[0, 0, :]
if aggregate == 'benchmark':
mPC = np.mean(results[:15, 1:, :], axis=(0, 1))
else:
mPC = np.mean(results[:, 1:, :], axis=(0, 1))
rPC = mPC / P
print(f'\nmodel: {osp.basename(filename)}')
if metric is None:
if 'P' in prints:
print(f'Performance on Clean Data [P] ({task})')
print_coco_results(P)
if 'mPC' in prints:
print(f'Mean Performance under Corruption [mPC] ({task})')
print_coco_results(mPC)
if 'rPC' in prints:
print(f'Relative Performance under Corruption [rPC] ({task})')
print_coco_results(rPC)
else:
if 'P' in prints:
print(f'Performance on Clean Data [P] ({task})')
for metric_i, metric_name in enumerate(metrics):
print(f'{metric_name:5} = {P[metric_i]:0.3f}')
if 'mPC' in prints:
print(f'Mean Performance under Corruption [mPC] ({task})')
for metric_i, metric_name in enumerate(metrics):
print(f'{metric_name:5} = {mPC[metric_i]:0.3f}')
if 'rPC' in prints:
print(f'Relative Performance under Corruption [rPC] ({task})')
for metric_i, metric_name in enumerate(metrics):
print(f'{metric_name:5} => {rPC[metric_i] * 100:0.1f} %')
return results
def get_voc_style_results(filename, prints='mPC', aggregate='benchmark'):
assert aggregate in ['benchmark', 'all']
if prints == 'all':
prints = ['P', 'mPC', 'rPC']
elif isinstance(prints, str):
prints = [prints]
for p in prints:
assert p in ['P', 'mPC', 'rPC']
eval_output = mmcv.load(filename)
num_distortions = len(list(eval_output.keys()))
results = np.zeros((num_distortions, 6, 20), dtype='float32')
for i, distortion in enumerate(eval_output):
for severity in eval_output[distortion]:
mAP = [
eval_output[distortion][severity][j]['ap']
for j in range(len(eval_output[distortion][severity]))
]
results[i, severity, :] = mAP
P = results[0, 0, :]
if aggregate == 'benchmark':
mPC = np.mean(results[:15, 1:, :], axis=(0, 1))
else:
mPC = np.mean(results[:, 1:, :], axis=(0, 1))
rPC = mPC / P
print(f'\nmodel: {osp.basename(filename)}')
if 'P' in prints:
print(f'Performance on Clean Data [P] in AP50 = {np.mean(P):0.3f}')
if 'mPC' in prints:
print('Mean Performance under Corruption [mPC] in AP50 = '
f'{np.mean(mPC):0.3f}')
if 'rPC' in prints:
print('Relative Performance under Corruption [rPC] in % = '
f'{np.mean(rPC) * 100:0.1f}')
return np.mean(results, axis=2, keepdims=True)
def get_results(filename,
dataset='coco',
task='bbox',
metric=None,
prints='mPC',
aggregate='benchmark'):
assert dataset in ['coco', 'voc', 'cityscapes']
if dataset in ['coco', 'cityscapes']:
results = get_coco_style_results(
filename,
task=task,
metric=metric,
prints=prints,
aggregate=aggregate)
elif dataset == 'voc':
if task != 'bbox':
print('Only bbox analysis is supported for Pascal VOC')
print('Will report bbox results\n')
if metric not in [None, ['AP'], ['AP50']]:
print('Only the AP50 metric is supported for Pascal VOC')
print('Will report AP50 metric\n')
results = get_voc_style_results(
filename, prints=prints, aggregate=aggregate)
return results
def get_distortions_from_file(filename):
eval_output = mmcv.load(filename)
return get_distortions_from_results(eval_output)
def get_distortions_from_results(eval_output):
distortions = []
for i, distortion in enumerate(eval_output):
distortions.append(distortion.replace('_', ' '))
return distortions
def main():
parser = ArgumentParser(description='Corruption Result Analysis')
parser.add_argument('filename', help='result file path')
parser.add_argument(
'--dataset',
type=str,
choices=['coco', 'voc', 'cityscapes'],
default='coco',
help='dataset type')
parser.add_argument(
'--task',
type=str,
nargs='+',
choices=['bbox', 'segm'],
default=['bbox'],
help='task to report')
parser.add_argument(
'--metric',
nargs='+',
choices=[
None, 'AP', 'AP50', 'AP75', 'APs', 'APm', 'APl', 'AR1', 'AR10',
'AR100', 'ARs', 'ARm', 'ARl'
],
default=None,
help='metric to report')
parser.add_argument(
'--prints',
type=str,
nargs='+',
choices=['P', 'mPC', 'rPC'],
default='mPC',
help='corruption benchmark metric to print')
parser.add_argument(
'--aggregate',
type=str,
choices=['all', 'benchmark'],
default='benchmark',
help='aggregate all results or only those \
for benchmark corruptions')
args = parser.parse_args()
for task in args.task:
get_results(
args.filename,
dataset=args.dataset,
task=task,
metric=args.metric,
prints=args.prints,
aggregate=args.aggregate)
if __name__ == '__main__':
main()
| 8,112 | 31.194444 | 79 |
py
|
DSLA-DSLA
|
DSLA-DSLA/.dev_scripts/convert_train_benchmark_script.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
import os.path as osp
def parse_args():
parser = argparse.ArgumentParser(
description='Convert benchmark model json to script')
parser.add_argument(
'txt_path', type=str, help='txt path output by benchmark_filter')
parser.add_argument(
'--partition',
type=str,
default='openmmlab',
help='slurm partition name')
parser.add_argument(
'--max-keep-ckpts',
type=int,
default=1,
help='The maximum checkpoints to keep')
parser.add_argument(
'--run', action='store_true', help='run script directly')
parser.add_argument(
'--out', type=str, help='path to save model benchmark script')
args = parser.parse_args()
return args
def main():
args = parse_args()
if args.out:
out_suffix = args.out.split('.')[-1]
assert args.out.endswith('.sh'), \
f'Expected out file path suffix is .sh, but get .{out_suffix}'
assert args.out or args.run, \
('Please specify at least one operation (save/run/ the '
'script) with the argument "--out" or "--run"')
partition = args.partition # cluster name
root_name = './tools'
train_script_name = osp.join(root_name, 'slurm_train.sh')
# stdout is no output
stdout_cfg = '>/dev/null'
max_keep_ckpts = args.max_keep_ckpts
commands = []
with open(args.txt_path, 'r') as f:
model_cfgs = f.readlines()
for i, cfg in enumerate(model_cfgs):
cfg = cfg.strip()
if len(cfg) == 0:
continue
# print cfg name
echo_info = f'echo \'{cfg}\' &'
commands.append(echo_info)
commands.append('\n')
fname, _ = osp.splitext(osp.basename(cfg))
out_fname = osp.join(root_name, 'work_dir', fname)
# default setting
if cfg.find('16x') >= 0:
command_info = f'GPUS=16 GPUS_PER_NODE=8 ' \
f'CPUS_PER_TASK=2 {train_script_name} '
elif cfg.find('gn-head_4x4_1x_coco.py') >= 0 or \
cfg.find('gn-head_4x4_2x_coco.py') >= 0:
command_info = f'GPUS=4 GPUS_PER_NODE=4 ' \
f'CPUS_PER_TASK=2 {train_script_name} '
else:
command_info = f'GPUS=8 GPUS_PER_NODE=8 ' \
f'CPUS_PER_TASK=2 {train_script_name} '
command_info += f'{partition} '
command_info += f'{fname} '
command_info += f'{cfg} '
command_info += f'{out_fname} '
if max_keep_ckpts:
command_info += f'--cfg-options ' \
f'checkpoint_config.max_keep_ckpts=' \
f'{max_keep_ckpts}' + ' '
command_info += f'{stdout_cfg} &'
commands.append(command_info)
if i < len(model_cfgs):
commands.append('\n')
command_str = ''.join(commands)
if args.out:
with open(args.out, 'w') as f:
f.write(command_str)
if args.run:
os.system(command_str)
if __name__ == '__main__':
main()
| 3,307 | 32.08 | 74 |
py
|
DSLA-DSLA
|
DSLA-DSLA/.dev_scripts/gather_test_benchmark_metric.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import glob
import os.path as osp
import mmcv
from mmcv import Config
def parse_args():
parser = argparse.ArgumentParser(
description='Gather benchmarked models metric')
parser.add_argument('config', help='test config file path')
parser.add_argument(
'root',
type=str,
help='root path of benchmarked models to be gathered')
parser.add_argument(
'--out', type=str, help='output path of gathered metrics to be stored')
parser.add_argument(
'--not-show', action='store_true', help='not show metrics')
parser.add_argument(
'--show-all', action='store_true', help='show all model metrics')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
root_path = args.root
metrics_out = args.out
result_dict = {}
cfg = Config.fromfile(args.config)
for model_key in cfg:
model_infos = cfg[model_key]
if not isinstance(model_infos, list):
model_infos = [model_infos]
for model_info in model_infos:
record_metrics = model_info['metric']
config = model_info['config'].strip()
fname, _ = osp.splitext(osp.basename(config))
metric_json_dir = osp.join(root_path, fname)
if osp.exists(metric_json_dir):
json_list = glob.glob(osp.join(metric_json_dir, '*.json'))
if len(json_list) > 0:
log_json_path = list(sorted(json_list))[-1]
metric = mmcv.load(log_json_path)
if config in metric.get('config', {}):
new_metrics = dict()
for record_metric_key in record_metrics:
record_metric_key_bk = record_metric_key
old_metric = record_metrics[record_metric_key]
if record_metric_key == 'AR_1000':
record_metric_key = 'AR@1000'
if record_metric_key not in metric['metric']:
raise KeyError(
'record_metric_key not exist, please '
'check your config')
new_metric = round(
metric['metric'][record_metric_key] * 100, 1)
new_metrics[record_metric_key_bk] = new_metric
if args.show_all:
result_dict[config] = dict(
before=record_metrics, after=new_metrics)
else:
for record_metric_key in record_metrics:
old_metric = record_metrics[record_metric_key]
new_metric = new_metrics[record_metric_key]
if old_metric != new_metric:
result_dict[config] = dict(
before=record_metrics,
after=new_metrics)
break
else:
print(f'{config} not included in: {log_json_path}')
else:
print(f'{config} not exist file: {metric_json_dir}')
else:
print(f'{config} not exist dir: {metric_json_dir}')
if metrics_out:
mmcv.mkdir_or_exist(metrics_out)
mmcv.dump(result_dict,
osp.join(metrics_out, 'batch_test_metric_info.json'))
if not args.not_show:
print('===================================')
for config_name, metrics in result_dict.items():
print(config_name, metrics)
print('===================================')
| 3,916 | 39.381443 | 79 |
py
|
DSLA-DSLA
|
DSLA-DSLA/.dev_scripts/benchmark_filter.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
import os.path as osp
def parse_args():
parser = argparse.ArgumentParser(description='Filter configs to train')
parser.add_argument(
'--basic-arch',
action='store_true',
help='to train models in basic arch')
parser.add_argument(
'--datasets', action='store_true', help='to train models in dataset')
parser.add_argument(
'--data-pipeline',
action='store_true',
help='to train models related to data pipeline, e.g. augmentations')
parser.add_argument(
'--nn-module',
action='store_true',
help='to train models related to neural network modules')
parser.add_argument(
'--model-options',
nargs='+',
help='custom options to special model benchmark')
parser.add_argument(
'--out',
type=str,
default='batch_train_list.txt',
help='output path of gathered metrics to be stored')
args = parser.parse_args()
return args
basic_arch_root = [
'atss', 'autoassign', 'cascade_rcnn', 'cascade_rpn', 'centripetalnet',
'cornernet', 'detectors', 'deformable_detr', 'detr', 'double_heads',
'dynamic_rcnn', 'faster_rcnn', 'fcos', 'foveabox', 'fp16', 'free_anchor',
'fsaf', 'gfl', 'ghm', 'grid_rcnn', 'guided_anchoring', 'htc', 'ld',
'libra_rcnn', 'mask_rcnn', 'ms_rcnn', 'nas_fcos', 'paa', 'pisa',
'point_rend', 'reppoints', 'retinanet', 'rpn', 'sabl', 'ssd', 'tridentnet',
'vfnet', 'yolact', 'yolo', 'sparse_rcnn', 'scnet', 'yolof', 'centernet'
]
datasets_root = [
'wider_face', 'pascal_voc', 'cityscapes', 'lvis', 'deepfashion'
]
data_pipeline_root = ['albu_example', 'instaboost']
nn_module_root = [
'carafe', 'dcn', 'empirical_attention', 'gcnet', 'gn', 'gn+ws', 'hrnet',
'pafpn', 'nas_fpn', 'regnet', 'resnest', 'res2net', 'groie'
]
benchmark_pool = [
'configs/albu_example/mask_rcnn_r50_fpn_albu_1x_coco.py',
'configs/atss/atss_r50_fpn_1x_coco.py',
'configs/autoassign/autoassign_r50_fpn_8x2_1x_coco.py',
'configs/carafe/mask_rcnn_r50_fpn_carafe_1x_coco.py',
'configs/cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py',
'configs/cascade_rpn/crpn_faster_rcnn_r50_caffe_fpn_1x_coco.py',
'configs/centernet/centernet_resnet18_dcnv2_140e_coco.py',
'configs/centripetalnet/'
'centripetalnet_hourglass104_mstest_16x6_210e_coco.py',
'configs/cityscapes/mask_rcnn_r50_fpn_1x_cityscapes.py',
'configs/cornernet/'
'cornernet_hourglass104_mstest_8x6_210e_coco.py',
'configs/dcn/mask_rcnn_r50_fpn_mdconv_c3-c5_1x_coco.py',
'configs/dcn/faster_rcnn_r50_fpn_dpool_1x_coco.py',
'configs/dcn/faster_rcnn_r50_fpn_mdpool_1x_coco.py',
'configs/dcn/mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py',
'configs/deformable_detr/deformable_detr_r50_16x2_50e_coco.py',
'configs/detectors/detectors_htc_r50_1x_coco.py',
'configs/detr/detr_r50_8x2_150e_coco.py',
'configs/double_heads/dh_faster_rcnn_r50_fpn_1x_coco.py',
'configs/dynamic_rcnn/dynamic_rcnn_r50_fpn_1x_coco.py',
'configs/empirical_attention/faster_rcnn_r50_fpn_attention_1111_dcn_1x_coco.py', # noqa
'configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py',
'configs/faster_rcnn/faster_rcnn_r50_fpn_ohem_1x_coco.py',
'configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_1x_coco.py',
'configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_1x_coco.py',
'configs/faster_rcnn/faster_rcnn_r50_caffe_dc5_mstrain_1x_coco.py',
'configs/fcos/fcos_center_r50_caffe_fpn_gn-head_4x4_1x_coco.py',
'configs/foveabox/fovea_align_r50_fpn_gn-head_4x4_2x_coco.py',
'configs/fp16/retinanet_r50_fpn_fp16_1x_coco.py',
'configs/mask_rcnn/mask_rcnn_r50_fpn_fp16_1x_coco.py',
'configs/free_anchor/retinanet_free_anchor_r50_fpn_1x_coco.py',
'configs/fsaf/fsaf_r50_fpn_1x_coco.py',
'configs/gcnet/mask_rcnn_r50_fpn_r4_gcb_c3-c5_1x_coco.py',
'configs/gfl/gfl_r50_fpn_1x_coco.py',
'configs/ghm/retinanet_ghm_r50_fpn_1x_coco.py',
'configs/gn/mask_rcnn_r50_fpn_gn-all_2x_coco.py',
'configs/gn+ws/mask_rcnn_r50_fpn_gn_ws-all_2x_coco.py',
'configs/grid_rcnn/grid_rcnn_r50_fpn_gn-head_2x_coco.py',
'configs/groie/faster_rcnn_r50_fpn_groie_1x_coco.py',
'configs/guided_anchoring/ga_faster_r50_caffe_fpn_1x_coco.py',
'configs/hrnet/mask_rcnn_hrnetv2p_w18_1x_coco.py',
'configs/htc/htc_r50_fpn_1x_coco.py',
'configs/instaboost/mask_rcnn_r50_fpn_instaboost_4x_coco.py',
'configs/ld/ld_r18_gflv1_r101_fpn_coco_1x.py',
'configs/libra_rcnn/libra_faster_rcnn_r50_fpn_1x_coco.py',
'configs/lvis/mask_rcnn_r50_fpn_sample1e-3_mstrain_1x_lvis_v1.py',
'configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain-poly_1x_coco.py',
'configs/ms_rcnn/ms_rcnn_r50_caffe_fpn_1x_coco.py',
'configs/nas_fcos/nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco.py',
'configs/nas_fpn/retinanet_r50_nasfpn_crop640_50e_coco.py',
'configs/paa/paa_r50_fpn_1x_coco.py',
'configs/pafpn/faster_rcnn_r50_pafpn_1x_coco.py',
'configs/pisa/pisa_mask_rcnn_r50_fpn_1x_coco.py',
'configs/point_rend/point_rend_r50_caffe_fpn_mstrain_1x_coco.py',
'configs/regnet/mask_rcnn_regnetx-3.2GF_fpn_1x_coco.py',
'configs/reppoints/reppoints_moment_r50_fpn_gn-neck+head_1x_coco.py',
'configs/res2net/faster_rcnn_r2_101_fpn_2x_coco.py',
'configs/resnest/'
'mask_rcnn_s50_fpn_syncbn-backbone+head_mstrain_1x_coco.py',
'configs/retinanet/retinanet_r50_caffe_fpn_1x_coco.py',
'configs/rpn/rpn_r50_fpn_1x_coco.py',
'configs/sabl/sabl_retinanet_r50_fpn_1x_coco.py',
'configs/ssd/ssd300_coco.py',
'configs/tridentnet/tridentnet_r50_caffe_1x_coco.py',
'configs/vfnet/vfnet_r50_fpn_1x_coco.py',
'configs/yolact/yolact_r50_1x8_coco.py',
'configs/yolo/yolov3_d53_320_273e_coco.py',
'configs/sparse_rcnn/sparse_rcnn_r50_fpn_1x_coco.py',
'configs/scnet/scnet_r50_fpn_1x_coco.py',
'configs/yolof/yolof_r50_c5_8x8_1x_coco.py',
]
def main():
args = parse_args()
benchmark_type = []
if args.basic_arch:
benchmark_type += basic_arch_root
if args.datasets:
benchmark_type += datasets_root
if args.data_pipeline:
benchmark_type += data_pipeline_root
if args.nn_module:
benchmark_type += nn_module_root
special_model = args.model_options
if special_model is not None:
benchmark_type += special_model
config_dpath = 'configs/'
benchmark_configs = []
for cfg_root in benchmark_type:
cfg_dir = osp.join(config_dpath, cfg_root)
configs = os.scandir(cfg_dir)
for cfg in configs:
config_path = osp.join(cfg_dir, cfg.name)
if (config_path in benchmark_pool
and config_path not in benchmark_configs):
benchmark_configs.append(config_path)
print(f'Totally found {len(benchmark_configs)} configs to benchmark')
with open(args.out, 'w') as f:
for config in benchmark_configs:
f.write(config + '\n')
if __name__ == '__main__':
main()
| 7,101 | 41.27381 | 92 |
py
|
DSLA-DSLA
|
DSLA-DSLA/.dev_scripts/gather_models.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import glob
import json
import os.path as osp
import shutil
import subprocess
from collections import OrderedDict
import mmcv
import torch
import yaml
def ordered_yaml_dump(data, stream=None, Dumper=yaml.SafeDumper, **kwds):
class OrderedDumper(Dumper):
pass
def _dict_representer(dumper, data):
return dumper.represent_mapping(
yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, data.items())
OrderedDumper.add_representer(OrderedDict, _dict_representer)
return yaml.dump(data, stream, OrderedDumper, **kwds)
def process_checkpoint(in_file, out_file):
checkpoint = torch.load(in_file, map_location='cpu')
# remove optimizer for smaller file size
if 'optimizer' in checkpoint:
del checkpoint['optimizer']
# remove ema state_dict
for key in list(checkpoint['state_dict']):
if key.startswith('ema_'):
checkpoint['state_dict'].pop(key)
# if it is necessary to remove some sensitive data in checkpoint['meta'],
# add the code here.
if torch.__version__ >= '1.6':
torch.save(checkpoint, out_file, _use_new_zipfile_serialization=False)
else:
torch.save(checkpoint, out_file)
sha = subprocess.check_output(['sha256sum', out_file]).decode()
final_file = out_file.rstrip('.pth') + '-{}.pth'.format(sha[:8])
subprocess.Popen(['mv', out_file, final_file])
return final_file
def get_final_epoch(config):
cfg = mmcv.Config.fromfile('./configs/' + config)
return cfg.runner.max_epochs
def get_best_epoch(exp_dir):
best_epoch_full_path = list(
sorted(glob.glob(osp.join(exp_dir, 'best_*.pth'))))[-1]
best_epoch_model_path = best_epoch_full_path.split('/')[-1]
best_epoch = best_epoch_model_path.split('_')[-1].split('.')[0]
return best_epoch_model_path, int(best_epoch)
def get_real_epoch(config):
cfg = mmcv.Config.fromfile('./configs/' + config)
epoch = cfg.runner.max_epochs
if cfg.data.train.type == 'RepeatDataset':
epoch *= cfg.data.train.times
return epoch
def get_final_results(log_json_path, epoch, results_lut):
result_dict = dict()
with open(log_json_path, 'r') as f:
for line in f.readlines():
log_line = json.loads(line)
if 'mode' not in log_line.keys():
continue
if log_line['mode'] == 'train' and log_line['epoch'] == epoch:
result_dict['memory'] = log_line['memory']
if log_line['mode'] == 'val' and log_line['epoch'] == epoch:
result_dict.update({
key: log_line[key]
for key in results_lut if key in log_line
})
return result_dict
def get_dataset_name(config):
# If there are more dataset, add here.
name_map = dict(
CityscapesDataset='Cityscapes',
CocoDataset='COCO',
CocoPanopticDataset='COCO',
DeepFashionDataset='Deep Fashion',
LVISV05Dataset='LVIS v0.5',
LVISV1Dataset='LVIS v1',
VOCDataset='Pascal VOC',
WIDERFaceDataset='WIDER Face')
cfg = mmcv.Config.fromfile('./configs/' + config)
return name_map[cfg.dataset_type]
def convert_model_info_to_pwc(model_infos):
pwc_files = {}
for model in model_infos:
cfg_folder_name = osp.split(model['config'])[-2]
pwc_model_info = OrderedDict()
pwc_model_info['Name'] = osp.split(model['config'])[-1].split('.')[0]
pwc_model_info['In Collection'] = 'Please fill in Collection name'
pwc_model_info['Config'] = osp.join('configs', model['config'])
# get metadata
memory = round(model['results']['memory'] / 1024, 1)
epochs = get_real_epoch(model['config'])
meta_data = OrderedDict()
meta_data['Training Memory (GB)'] = memory
meta_data['Epochs'] = epochs
pwc_model_info['Metadata'] = meta_data
# get dataset name
dataset_name = get_dataset_name(model['config'])
# get results
results = []
# if there are more metrics, add here.
if 'bbox_mAP' in model['results']:
metric = round(model['results']['bbox_mAP'] * 100, 1)
results.append(
OrderedDict(
Task='Object Detection',
Dataset=dataset_name,
Metrics={'box AP': metric}))
if 'segm_mAP' in model['results']:
metric = round(model['results']['segm_mAP'] * 100, 1)
results.append(
OrderedDict(
Task='Instance Segmentation',
Dataset=dataset_name,
Metrics={'mask AP': metric}))
if 'PQ' in model['results']:
metric = round(model['results']['PQ'], 1)
results.append(
OrderedDict(
Task='Panoptic Segmentation',
Dataset=dataset_name,
Metrics={'PQ': metric}))
pwc_model_info['Results'] = results
link_string = 'https://download.openmmlab.com/mmdetection/v2.0/'
link_string += '{}/{}'.format(model['config'].rstrip('.py'),
osp.split(model['model_path'])[-1])
pwc_model_info['Weights'] = link_string
if cfg_folder_name in pwc_files:
pwc_files[cfg_folder_name].append(pwc_model_info)
else:
pwc_files[cfg_folder_name] = [pwc_model_info]
return pwc_files
def parse_args():
parser = argparse.ArgumentParser(description='Gather benchmarked models')
parser.add_argument(
'root',
type=str,
help='root path of benchmarked models to be gathered')
parser.add_argument(
'out', type=str, help='output path of gathered models to be stored')
parser.add_argument(
'--best',
action='store_true',
help='whether to gather the best model.')
args = parser.parse_args()
return args
def main():
args = parse_args()
models_root = args.root
models_out = args.out
mmcv.mkdir_or_exist(models_out)
# find all models in the root directory to be gathered
raw_configs = list(mmcv.scandir('./configs', '.py', recursive=True))
# filter configs that is not trained in the experiments dir
used_configs = []
for raw_config in raw_configs:
if osp.exists(osp.join(models_root, raw_config)):
used_configs.append(raw_config)
print(f'Find {len(used_configs)} models to be gathered')
# find final_ckpt and log file for trained each config
# and parse the best performance
model_infos = []
for used_config in used_configs:
exp_dir = osp.join(models_root, used_config)
# check whether the exps is finished
if args.best is True:
final_model, final_epoch = get_best_epoch(exp_dir)
else:
final_epoch = get_final_epoch(used_config)
final_model = 'epoch_{}.pth'.format(final_epoch)
model_path = osp.join(exp_dir, final_model)
# skip if the model is still training
if not osp.exists(model_path):
continue
# get the latest logs
log_json_path = list(
sorted(glob.glob(osp.join(exp_dir, '*.log.json'))))[-1]
log_txt_path = list(sorted(glob.glob(osp.join(exp_dir, '*.log'))))[-1]
cfg = mmcv.Config.fromfile('./configs/' + used_config)
results_lut = cfg.evaluation.metric
if not isinstance(results_lut, list):
results_lut = [results_lut]
# case when using VOC, the evaluation key is only 'mAP'
# when using Panoptic Dataset, the evaluation key is 'PQ'.
for i, key in enumerate(results_lut):
if 'mAP' not in key and 'PQ' not in key:
results_lut[i] = key + 'm_AP'
model_performance = get_final_results(log_json_path, final_epoch,
results_lut)
if model_performance is None:
continue
model_time = osp.split(log_txt_path)[-1].split('.')[0]
model_infos.append(
dict(
config=used_config,
results=model_performance,
epochs=final_epoch,
model_time=model_time,
final_model=final_model,
log_json_path=osp.split(log_json_path)[-1]))
# publish model for each checkpoint
publish_model_infos = []
for model in model_infos:
model_publish_dir = osp.join(models_out, model['config'].rstrip('.py'))
mmcv.mkdir_or_exist(model_publish_dir)
model_name = osp.split(model['config'])[-1].split('.')[0]
model_name += '_' + model['model_time']
publish_model_path = osp.join(model_publish_dir, model_name)
trained_model_path = osp.join(models_root, model['config'],
model['final_model'])
# convert model
final_model_path = process_checkpoint(trained_model_path,
publish_model_path)
# copy log
shutil.copy(
osp.join(models_root, model['config'], model['log_json_path']),
osp.join(model_publish_dir, f'{model_name}.log.json'))
shutil.copy(
osp.join(models_root, model['config'],
model['log_json_path'].rstrip('.json')),
osp.join(model_publish_dir, f'{model_name}.log'))
# copy config to guarantee reproducibility
config_path = model['config']
config_path = osp.join(
'configs',
config_path) if 'configs' not in config_path else config_path
target_config_path = osp.split(config_path)[-1]
shutil.copy(config_path, osp.join(model_publish_dir,
target_config_path))
model['model_path'] = final_model_path
publish_model_infos.append(model)
models = dict(models=publish_model_infos)
print(f'Totally gathered {len(publish_model_infos)} models')
mmcv.dump(models, osp.join(models_out, 'model_info.json'))
pwc_files = convert_model_info_to_pwc(publish_model_infos)
for name in pwc_files:
with open(osp.join(models_out, name + '_metafile.yml'), 'w') as f:
ordered_yaml_dump(pwc_files[name], f, encoding='utf-8')
if __name__ == '__main__':
main()
| 10,489 | 34.924658 | 79 |
py
|
DSLA-DSLA
|
DSLA-DSLA/.dev_scripts/test_init_backbone.py
|
# Copyright (c) OpenMMLab. All rights reserved.
"""Check out backbone whether successfully load pretrained checkpoint."""
import copy
import os
from os.path import dirname, exists, join
import pytest
from mmcv import Config, ProgressBar
from mmcv.runner import _load_checkpoint
from mmdet.models import build_detector
def _get_config_directory():
"""Find the predefined detector config directory."""
try:
# Assume we are running in the source mmdetection repo
repo_dpath = dirname(dirname(__file__))
except NameError:
# For IPython development when this __file__ is not defined
import mmdet
repo_dpath = dirname(dirname(mmdet.__file__))
config_dpath = join(repo_dpath, 'configs')
if not exists(config_dpath):
raise Exception('Cannot find config path')
return config_dpath
def _get_config_module(fname):
"""Load a configuration as a python module."""
from mmcv import Config
config_dpath = _get_config_directory()
config_fpath = join(config_dpath, fname)
config_mod = Config.fromfile(config_fpath)
return config_mod
def _get_detector_cfg(fname):
"""Grab configs necessary to create a detector.
These are deep copied to allow for safe modification of parameters without
influencing other tests.
"""
config = _get_config_module(fname)
model = copy.deepcopy(config.model)
return model
def _traversed_config_file():
"""We traversed all potential config files under the `config` file. If you
need to print details or debug code, you can use this function.
If the `backbone.init_cfg` is None (do not use `Pretrained` init way), you
need add the folder name in `ignores_folder` (if the config files in this
folder all set backbone.init_cfg is None) or add config name in
`ignores_file` (if the config file set backbone.init_cfg is None)
"""
config_path = _get_config_directory()
check_cfg_names = []
# `base`, `legacy_1.x` and `common` ignored by default.
ignores_folder = ['_base_', 'legacy_1.x', 'common']
# 'ld' need load teacher model, if want to check 'ld',
# please check teacher_config path first.
ignores_folder += ['ld']
# `selfsup_pretrain` need convert model, if want to check this model,
# need to convert the model first.
ignores_folder += ['selfsup_pretrain']
# the `init_cfg` in 'centripetalnet', 'cornernet', 'cityscapes',
# 'scratch' is None.
# the `init_cfg` in ssdlite(`ssdlite_mobilenetv2_scratch_600e_coco.py`)
# is None
# Please confirm `bockbone.init_cfg` is None first.
ignores_folder += ['centripetalnet', 'cornernet', 'cityscapes', 'scratch']
ignores_file = ['ssdlite_mobilenetv2_scratch_600e_coco.py']
for config_file_name in os.listdir(config_path):
if config_file_name not in ignores_folder:
config_file = join(config_path, config_file_name)
if os.path.isdir(config_file):
for config_sub_file in os.listdir(config_file):
if config_sub_file.endswith('py') and \
config_sub_file not in ignores_file:
name = join(config_file, config_sub_file)
check_cfg_names.append(name)
return check_cfg_names
def _check_backbone(config, print_cfg=True):
"""Check out backbone whether successfully load pretrained model, by using
`backbone.init_cfg`.
First, using `mmcv._load_checkpoint` to load the checkpoint without
loading models.
Then, using `build_detector` to build models, and using
`model.init_weights()` to initialize the parameters.
Finally, assert weights and bias of each layer loaded from pretrained
checkpoint are equal to the weights and bias of original checkpoint.
For the convenience of comparison, we sum up weights and bias of
each loaded layer separately.
Args:
config (str): Config file path.
print_cfg (bool): Whether print logger and return the result.
Returns:
results (str or None): If backbone successfully load pretrained
checkpoint, return None; else, return config file path.
"""
if print_cfg:
print('-' * 15 + 'loading ', config)
cfg = Config.fromfile(config)
init_cfg = None
try:
init_cfg = cfg.model.backbone.init_cfg
init_flag = True
except AttributeError:
init_flag = False
if init_cfg is None or init_cfg.get('type') != 'Pretrained':
init_flag = False
if init_flag:
checkpoint = _load_checkpoint(init_cfg.checkpoint)
if 'state_dict' in checkpoint:
state_dict = checkpoint['state_dict']
else:
state_dict = checkpoint
model = build_detector(
cfg.model,
train_cfg=cfg.get('train_cfg'),
test_cfg=cfg.get('test_cfg'))
model.init_weights()
checkpoint_layers = state_dict.keys()
for name, value in model.backbone.state_dict().items():
if name in checkpoint_layers:
assert value.equal(state_dict[name])
if print_cfg:
print('-' * 10 + 'Successfully load checkpoint' + '-' * 10 +
'\n', )
return None
else:
if print_cfg:
print(config + '\n' + '-' * 10 +
'config file do not have init_cfg' + '-' * 10 + '\n')
return config
@pytest.mark.parametrize('config', _traversed_config_file())
def test_load_pretrained(config):
"""Check out backbone whether successfully load pretrained model by using
`backbone.init_cfg`.
Details please refer to `_check_backbone`
"""
_check_backbone(config, print_cfg=False)
def _test_load_pretrained():
"""We traversed all potential config files under the `config` file. If you
need to print details or debug code, you can use this function.
Returns:
check_cfg_names (list[str]): Config files that backbone initialized
from pretrained checkpoint might be problematic. Need to recheck
the config file. The output including the config files that the
backbone.init_cfg is None
"""
check_cfg_names = _traversed_config_file()
need_check_cfg = []
prog_bar = ProgressBar(len(check_cfg_names))
for config in check_cfg_names:
init_cfg_name = _check_backbone(config)
if init_cfg_name is not None:
need_check_cfg.append(init_cfg_name)
prog_bar.update()
print('These config files need to be checked again')
print(need_check_cfg)
| 6,625 | 35.406593 | 78 |
py
|
DSLA-DSLA
|
DSLA-DSLA/.dev_scripts/benchmark_inference_fps.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
import os.path as osp
import mmcv
from mmcv import Config, DictAction
from mmcv.runner import init_dist
from terminaltables import GithubFlavoredMarkdownTable
from tools.analysis_tools.benchmark import repeat_measure_inference_speed
def parse_args():
parser = argparse.ArgumentParser(
description='MMDet benchmark a model of FPS')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint_root', help='Checkpoint file root path')
parser.add_argument(
'--round-num',
type=int,
default=1,
help='round a number to a given precision in decimal digits')
parser.add_argument(
'--repeat-num',
type=int,
default=1,
help='number of repeat times of measurement for averaging the results')
parser.add_argument(
'--out', type=str, help='output path of gathered fps to be stored')
parser.add_argument(
'--max-iter', type=int, default=2000, help='num of max iter')
parser.add_argument(
'--log-interval', type=int, default=50, help='interval of logging')
parser.add_argument(
'--fuse-conv-bn',
action='store_true',
help='Whether to fuse conv and bn, this will slightly increase'
'the inference speed')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def results2markdown(result_dict):
table_data = []
is_multiple_results = False
for cfg_name, value in result_dict.items():
name = cfg_name.replace('configs/', '')
fps = value['fps']
ms_times_pre_image = value['ms_times_pre_image']
if isinstance(fps, list):
is_multiple_results = True
mean_fps = value['mean_fps']
mean_times_pre_image = value['mean_times_pre_image']
fps_str = ','.join([str(s) for s in fps])
ms_times_pre_image_str = ','.join(
[str(s) for s in ms_times_pre_image])
table_data.append([
name, fps_str, mean_fps, ms_times_pre_image_str,
mean_times_pre_image
])
else:
table_data.append([name, fps, ms_times_pre_image])
if is_multiple_results:
table_data.insert(0, [
'model', 'fps', 'mean_fps', 'times_pre_image(ms)',
'mean_times_pre_image(ms)'
])
else:
table_data.insert(0, ['model', 'fps', 'times_pre_image(ms)'])
table = GithubFlavoredMarkdownTable(table_data)
print(table.table, flush=True)
if __name__ == '__main__':
args = parse_args()
assert args.round_num >= 0
assert args.repeat_num >= 1
config = Config.fromfile(args.config)
if args.launcher == 'none':
raise NotImplementedError('Only supports distributed mode')
else:
init_dist(args.launcher)
result_dict = {}
for model_key in config:
model_infos = config[model_key]
if not isinstance(model_infos, list):
model_infos = [model_infos]
for model_info in model_infos:
record_metrics = model_info['metric']
cfg_path = model_info['config'].strip()
cfg = Config.fromfile(cfg_path)
checkpoint = osp.join(args.checkpoint_root,
model_info['checkpoint'].strip())
try:
fps = repeat_measure_inference_speed(cfg, checkpoint,
args.max_iter,
args.log_interval,
args.fuse_conv_bn,
args.repeat_num)
if args.repeat_num > 1:
fps_list = [round(fps_, args.round_num) for fps_ in fps]
times_pre_image_list = [
round(1000 / fps_, args.round_num) for fps_ in fps
]
mean_fps = round(
sum(fps_list) / len(fps_list), args.round_num)
mean_times_pre_image = round(
sum(times_pre_image_list) / len(times_pre_image_list),
args.round_num)
print(
f'{cfg_path} '
f'Overall fps: {fps_list}[{mean_fps}] img / s, '
f'times per image: '
f'{times_pre_image_list}[{mean_times_pre_image}] '
f'ms / img',
flush=True)
result_dict[cfg_path] = dict(
fps=fps_list,
mean_fps=mean_fps,
ms_times_pre_image=times_pre_image_list,
mean_times_pre_image=mean_times_pre_image)
else:
print(
f'{cfg_path} fps : {fps:.{args.round_num}f} img / s, '
f'times per image: {1000 / fps:.{args.round_num}f} '
f'ms / img',
flush=True)
result_dict[cfg_path] = dict(
fps=round(fps, args.round_num),
ms_times_pre_image=round(1000 / fps, args.round_num))
except Exception as e:
print(f'{cfg_path} error: {repr(e)}')
if args.repeat_num > 1:
result_dict[cfg_path] = dict(
fps=[0],
mean_fps=0,
ms_times_pre_image=[0],
mean_times_pre_image=0)
else:
result_dict[cfg_path] = dict(fps=0, ms_times_pre_image=0)
if args.out:
mmcv.mkdir_or_exist(args.out)
mmcv.dump(result_dict, osp.join(args.out, 'batch_inference_fps.json'))
results2markdown(result_dict)
| 6,763 | 38.788235 | 79 |
py
|
DSLA-DSLA
|
DSLA-DSLA/.dev_scripts/gather_train_benchmark_metric.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import glob
import os.path as osp
import mmcv
from gather_models import get_final_results
try:
import xlrd
except ImportError:
xlrd = None
try:
import xlutils
from xlutils.copy import copy
except ImportError:
xlutils = None
def parse_args():
parser = argparse.ArgumentParser(
description='Gather benchmarked models metric')
parser.add_argument(
'root',
type=str,
help='root path of benchmarked models to be gathered')
parser.add_argument(
'txt_path', type=str, help='txt path output by benchmark_filter')
parser.add_argument(
'--out', type=str, help='output path of gathered metrics to be stored')
parser.add_argument(
'--not-show', action='store_true', help='not show metrics')
parser.add_argument(
'--excel', type=str, help='input path of excel to be recorded')
parser.add_argument(
'--ncol', type=int, help='Number of column to be modified or appended')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
if args.excel:
assert args.ncol, 'Please specify "--excel" and "--ncol" ' \
'at the same time'
if xlrd is None:
raise RuntimeError(
'xlrd is not installed,'
'Please use “pip install xlrd==1.2.0” to install')
if xlutils is None:
raise RuntimeError(
'xlutils is not installed,'
'Please use “pip install xlutils==2.0.0” to install')
readbook = xlrd.open_workbook(args.excel)
sheet = readbook.sheet_by_name('Sheet1')
sheet_info = {}
total_nrows = sheet.nrows
for i in range(3, sheet.nrows):
sheet_info[sheet.row_values(i)[0]] = i
xlrw = copy(readbook)
table = xlrw.get_sheet(0)
root_path = args.root
metrics_out = args.out
result_dict = {}
with open(args.txt_path, 'r') as f:
model_cfgs = f.readlines()
for i, config in enumerate(model_cfgs):
config = config.strip()
if len(config) == 0:
continue
config_name = osp.split(config)[-1]
config_name = osp.splitext(config_name)[0]
result_path = osp.join(root_path, config_name)
if osp.exists(result_path):
# 1 read config
cfg = mmcv.Config.fromfile(config)
total_epochs = cfg.runner.max_epochs
final_results = cfg.evaluation.metric
if not isinstance(final_results, list):
final_results = [final_results]
final_results_out = []
for key in final_results:
if 'proposal_fast' in key:
final_results_out.append('AR@1000') # RPN
elif 'mAP' not in key:
final_results_out.append(key + '_mAP')
# 2 determine whether total_epochs ckpt exists
ckpt_path = f'epoch_{total_epochs}.pth'
if osp.exists(osp.join(result_path, ckpt_path)):
log_json_path = list(
sorted(glob.glob(osp.join(result_path,
'*.log.json'))))[-1]
# 3 read metric
model_performance = get_final_results(
log_json_path, total_epochs, final_results_out)
if model_performance is None:
print(f'log file error: {log_json_path}')
continue
for performance in model_performance:
if performance in ['AR@1000', 'bbox_mAP', 'segm_mAP']:
metric = round(
model_performance[performance] * 100, 1)
model_performance[performance] = metric
result_dict[config] = model_performance
# update and append excel content
if args.excel:
if 'AR@1000' in model_performance:
metrics = f'{model_performance["AR@1000"]}' \
f'(AR@1000)'
elif 'segm_mAP' in model_performance:
metrics = f'{model_performance["bbox_mAP"]}/' \
f'{model_performance["segm_mAP"]}'
else:
metrics = f'{model_performance["bbox_mAP"]}'
row_num = sheet_info.get(config, None)
if row_num:
table.write(row_num, args.ncol, metrics)
else:
table.write(total_nrows, 0, config)
table.write(total_nrows, args.ncol, metrics)
total_nrows += 1
else:
print(f'{config} not exist: {ckpt_path}')
else:
print(f'not exist: {config}')
# 4 save or print results
if metrics_out:
mmcv.mkdir_or_exist(metrics_out)
mmcv.dump(result_dict,
osp.join(metrics_out, 'model_metric_info.json'))
if not args.not_show:
print('===================================')
for config_name, metrics in result_dict.items():
print(config_name, metrics)
print('===================================')
if args.excel:
filename, sufflx = osp.splitext(args.excel)
xlrw.save(f'{filename}_o{sufflx}')
print(f'>>> Output {filename}_o{sufflx}')
| 5,843 | 37.701987 | 79 |
py
|
DSLA-DSLA
|
DSLA-DSLA/.dev_scripts/batch_test_list.py
|
# Copyright (c) OpenMMLab. All rights reserved.
# yapf: disable
atss = dict(
config='configs/atss/atss_r50_fpn_1x_coco.py',
checkpoint='atss_r50_fpn_1x_coco_20200209-985f7bd0.pth',
eval='bbox',
metric=dict(bbox_mAP=39.4),
)
autoassign = dict(
config='configs/autoassign/autoassign_r50_fpn_8x2_1x_coco.py',
checkpoint='auto_assign_r50_fpn_1x_coco_20210413_115540-5e17991f.pth',
eval='bbox',
metric=dict(bbox_mAP=40.4),
)
carafe = dict(
config='configs/carafe/faster_rcnn_r50_fpn_carafe_1x_coco.py',
checkpoint='faster_rcnn_r50_fpn_carafe_1x_coco_bbox_mAP-0.386_20200504_175733-385a75b7.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=38.6),
)
cascade_rcnn = [
dict(
config='configs/cascade_rcnn/cascade_rcnn_r50_fpn_1x_coco.py',
checkpoint='cascade_rcnn_r50_fpn_1x_coco_20200316-3dc56deb.pth',
eval='bbox',
metric=dict(bbox_mAP=40.3),
),
dict(
config='configs/cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py',
checkpoint='cascade_mask_rcnn_r50_fpn_1x_coco_20200203-9d4dcb24.pth',
eval=['bbox', 'segm'],
metric=dict(bbox_mAP=41.2, segm_mAP=35.9),
),
]
cascade_rpn = dict(
config='configs/cascade_rpn/crpn_faster_rcnn_r50_caffe_fpn_1x_coco.py',
checkpoint='crpn_faster_rcnn_r50_caffe_fpn_1x_coco-c8283cca.pth',
eval='bbox',
metric=dict(bbox_mAP=40.4),
)
centripetalnet = dict(
config='configs/centripetalnet/centripetalnet_hourglass104_mstest_16x6_210e_coco.py', # noqa
checkpoint='centripetalnet_hourglass104_mstest_16x6_210e_coco_20200915_204804-3ccc61e5.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=44.7),
)
cornernet = dict(
config='configs/cornernet/cornernet_hourglass104_mstest_8x6_210e_coco.py',
checkpoint='cornernet_hourglass104_mstest_8x6_210e_coco_20200825_150618-79b44c30.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=41.2),
)
dcn = dict(
config='configs/dcn/faster_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py',
checkpoint='faster_rcnn_r50_fpn_dconv_c3-c5_1x_coco_20200130-d68aed1e.pth',
eval='bbox',
metric=dict(bbox_mAP=41.3),
)
deformable_detr = dict(
config='configs/deformable_detr/deformable_detr_r50_16x2_50e_coco.py',
checkpoint='deformable_detr_r50_16x2_50e_coco_20210419_220030-a12b9512.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=44.5),
)
detectors = dict(
config='configs/detectors/detectors_htc_r50_1x_coco.py',
checkpoint='detectors_htc_r50_1x_coco-329b1453.pth',
eval=['bbox', 'segm'],
metric=dict(bbox_mAP=49.1, segm_mAP=42.6),
)
detr = dict(
config='configs/detr/detr_r50_8x2_150e_coco.py',
checkpoint='detr_r50_8x2_150e_coco_20201130_194835-2c4b8974.pth',
eval='bbox',
metric=dict(bbox_mAP=40.1),
)
double_heads = dict(
config='configs/double_heads/dh_faster_rcnn_r50_fpn_1x_coco.py',
checkpoint='dh_faster_rcnn_r50_fpn_1x_coco_20200130-586b67df.pth',
eval='bbox',
metric=dict(bbox_mAP=40.0),
)
dynamic_rcnn = dict(
config='configs/dynamic_rcnn/dynamic_rcnn_r50_fpn_1x_coco.py',
checkpoint='dynamic_rcnn_r50_fpn_1x-62a3f276.pth',
eval='bbox',
metric=dict(bbox_mAP=38.9),
)
empirical_attention = dict(
config='configs/empirical_attention/faster_rcnn_r50_fpn_attention_1111_1x_coco.py', # noqa
checkpoint='faster_rcnn_r50_fpn_attention_1111_1x_coco_20200130-403cccba.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=40.0),
)
faster_rcnn = dict(
config='configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py',
checkpoint='faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth',
eval='bbox',
metric=dict(bbox_mAP=37.4),
)
fcos = dict(
config='configs/fcos/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_1x_coco.py', # noqa
checkpoint='fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_1x_coco-0a0d75a8.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=38.7),
)
foveabox = dict(
config='configs/foveabox/fovea_align_r50_fpn_gn-head_4x4_2x_coco.py',
checkpoint='fovea_align_r50_fpn_gn-head_4x4_2x_coco_20200203-8987880d.pth',
eval='bbox',
metric=dict(bbox_mAP=37.9),
)
free_anchor = dict(
config='configs/free_anchor/retinanet_free_anchor_r50_fpn_1x_coco.py',
checkpoint='retinanet_free_anchor_r50_fpn_1x_coco_20200130-0f67375f.pth',
eval='bbox',
metric=dict(bbox_mAP=38.7),
)
fsaf = dict(
config='configs/fsaf/fsaf_r50_fpn_1x_coco.py',
checkpoint='fsaf_r50_fpn_1x_coco-94ccc51f.pth',
eval='bbox',
metric=dict(bbox_mAP=37.4),
)
gcnet = dict(
config='configs/gcnet/mask_rcnn_r50_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco.py', # noqa
checkpoint='mask_rcnn_r50_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco_20200202-587b99aa.pth', # noqa
eval=['bbox', 'segm'],
metric=dict(bbox_mAP=40.4, segm_mAP=36.2),
)
gfl = dict(
config='configs/gfl/gfl_r50_fpn_1x_coco.py',
checkpoint='gfl_r50_fpn_1x_coco_20200629_121244-25944287.pth',
eval='bbox',
metric=dict(bbox_mAP=40.2),
)
gn = dict(
config='configs/gn/mask_rcnn_r50_fpn_gn-all_2x_coco.py',
checkpoint='mask_rcnn_r50_fpn_gn-all_2x_coco_20200206-8eee02a6.pth',
eval=['bbox', 'segm'],
metric=dict(bbox_mAP=40.1, segm_mAP=36.4),
)
gn_ws = dict(
config='configs/gn+ws/faster_rcnn_r50_fpn_gn_ws-all_1x_coco.py',
checkpoint='faster_rcnn_r50_fpn_gn_ws-all_1x_coco_20200130-613d9fe2.pth',
eval='bbox',
metric=dict(bbox_mAP=39.7),
)
grid_rcnn = dict(
config='configs/grid_rcnn/grid_rcnn_r50_fpn_gn-head_2x_coco.py',
checkpoint='grid_rcnn_r50_fpn_gn-head_2x_coco_20200130-6cca8223.pth',
eval='bbox',
metric=dict(bbox_mAP=40.4),
)
groie = dict(
config='configs/groie/faster_rcnn_r50_fpn_groie_1x_coco.py',
checkpoint='faster_rcnn_r50_fpn_groie_1x_coco_20200604_211715-66ee9516.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=38.3),
)
guided_anchoring = [
dict(
config='configs/guided_anchoring/ga_retinanet_r50_caffe_fpn_1x_coco.py', # noqa
checkpoint='ga_retinanet_r50_caffe_fpn_1x_coco_20201020-39581c6f.pth',
eval='bbox',
metric=dict(bbox_mAP=36.9),
),
dict(
config='configs/guided_anchoring/ga_faster_r50_caffe_fpn_1x_coco.py',
checkpoint='ga_faster_r50_caffe_fpn_1x_coco_20200702_000718-a11ccfe6.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=39.6),
),
]
hrnet = dict(
config='configs/hrnet/faster_rcnn_hrnetv2p_w18_1x_coco.py',
checkpoint='faster_rcnn_hrnetv2p_w18_1x_coco_20200130-56651a6d.pth',
eval='bbox',
metric=dict(bbox_mAP=36.9),
)
htc = dict(
config='configs/htc/htc_r50_fpn_1x_coco.py',
checkpoint='htc_r50_fpn_1x_coco_20200317-7332cf16.pth',
eval=['bbox', 'segm'],
metric=dict(bbox_mAP=42.3, segm_mAP=37.4),
)
libra_rcnn = dict(
config='configs/libra_rcnn/libra_faster_rcnn_r50_fpn_1x_coco.py',
checkpoint='libra_faster_rcnn_r50_fpn_1x_coco_20200130-3afee3a9.pth',
eval='bbox',
metric=dict(bbox_mAP=38.3),
)
mask_rcnn = dict(
config='configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py',
checkpoint='mask_rcnn_r50_fpn_1x_coco_20200205-d4b0c5d6.pth',
eval=['bbox', 'segm'],
metric=dict(bbox_mAP=38.2, segm_mAP=34.7),
)
ms_rcnn = dict(
config='configs/ms_rcnn/ms_rcnn_r50_caffe_fpn_1x_coco.py',
checkpoint='ms_rcnn_r50_caffe_fpn_1x_coco_20200702_180848-61c9355e.pth',
eval=['bbox', 'segm'],
metric=dict(bbox_mAP=38.2, segm_mAP=36.0),
)
nas_fcos = dict(
config='configs/nas_fcos/nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco.py', # noqa
checkpoint='nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco_20200520-1bdba3ce.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=39.4),
)
nas_fpn = dict(
config='configs/nas_fpn/retinanet_r50_nasfpn_crop640_50e_coco.py',
checkpoint='retinanet_r50_nasfpn_crop640_50e_coco-0ad1f644.pth',
eval='bbox',
metric=dict(bbox_mAP=40.5),
)
paa = dict(
config='configs/paa/paa_r50_fpn_1x_coco.py',
checkpoint='paa_r50_fpn_1x_coco_20200821-936edec3.pth',
eval='bbox',
metric=dict(bbox_mAP=40.4),
)
pafpn = dict(
config='configs/pafpn/faster_rcnn_r50_pafpn_1x_coco.py',
checkpoint='faster_rcnn_r50_pafpn_1x_coco_bbox_mAP-0.375_20200503_105836-b7b4b9bd.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=37.5),
)
pisa = dict(
config='configs/pisa/pisa_faster_rcnn_r50_fpn_1x_coco.py',
checkpoint='pisa_faster_rcnn_r50_fpn_1x_coco-dea93523.pth',
eval='bbox',
metric=dict(bbox_mAP=38.4),
)
point_rend = dict(
config='configs/point_rend/point_rend_r50_caffe_fpn_mstrain_1x_coco.py',
checkpoint='point_rend_r50_caffe_fpn_mstrain_1x_coco-1bcb5fb4.pth',
eval=['bbox', 'segm'],
metric=dict(bbox_mAP=38.4, segm_mAP=36.3),
)
regnet = dict(
config='configs/regnet/mask_rcnn_regnetx-3.2GF_fpn_1x_coco.py',
checkpoint='mask_rcnn_regnetx-3.2GF_fpn_1x_coco_20200520_163141-2a9d1814.pth', # noqa
eval=['bbox', 'segm'],
metric=dict(bbox_mAP=40.4, segm_mAP=36.7),
)
reppoints = dict(
config='configs/reppoints/reppoints_moment_r50_fpn_1x_coco.py',
checkpoint='reppoints_moment_r50_fpn_1x_coco_20200330-b73db8d1.pth',
eval='bbox',
metric=dict(bbox_mAP=37.0),
)
res2net = dict(
config='configs/res2net/faster_rcnn_r2_101_fpn_2x_coco.py',
checkpoint='faster_rcnn_r2_101_fpn_2x_coco-175f1da6.pth',
eval='bbox',
metric=dict(bbox_mAP=43.0),
)
resnest = dict(
config='configs/resnest/faster_rcnn_s50_fpn_syncbn-backbone+head_mstrain-range_1x_coco.py', # noqa
checkpoint='faster_rcnn_s50_fpn_syncbn-backbone+head_mstrain-range_1x_coco_20200926_125502-20289c16.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=42.0),
)
retinanet = dict(
config='configs/retinanet/retinanet_r50_fpn_1x_coco.py',
checkpoint='retinanet_r50_fpn_1x_coco_20200130-c2398f9e.pth',
eval='bbox',
metric=dict(bbox_mAP=36.5),
)
rpn = dict(
config='configs/rpn/rpn_r50_fpn_1x_coco.py',
checkpoint='rpn_r50_fpn_1x_coco_20200218-5525fa2e.pth',
eval='proposal_fast',
metric=dict(AR_1000=58.2),
)
sabl = [
dict(
config='configs/sabl/sabl_retinanet_r50_fpn_1x_coco.py',
checkpoint='sabl_retinanet_r50_fpn_1x_coco-6c54fd4f.pth',
eval='bbox',
metric=dict(bbox_mAP=37.7),
),
dict(
config='configs/sabl/sabl_faster_rcnn_r50_fpn_1x_coco.py',
checkpoint='sabl_faster_rcnn_r50_fpn_1x_coco-e867595b.pth',
eval='bbox',
metric=dict(bbox_mAP=39.9),
),
]
scnet = dict(
config='configs/scnet/scnet_r50_fpn_1x_coco.py',
checkpoint='scnet_r50_fpn_1x_coco-c3f09857.pth',
eval='bbox',
metric=dict(bbox_mAP=43.5),
)
sparse_rcnn = dict(
config='configs/sparse_rcnn/sparse_rcnn_r50_fpn_1x_coco.py',
checkpoint='sparse_rcnn_r50_fpn_1x_coco_20201222_214453-dc79b137.pth',
eval='bbox',
metric=dict(bbox_mAP=37.9),
)
ssd = [
dict(
config='configs/ssd/ssd300_coco.py',
checkpoint='ssd300_coco_20210803_015428-d231a06e.pth',
eval='bbox',
metric=dict(bbox_mAP=25.5),
),
dict(
config='configs/ssd/ssdlite_mobilenetv2_scratch_600e_coco.py',
checkpoint='ssdlite_mobilenetv2_scratch_600e_coco_20210629_110627-974d9307.pth',# noqa
eval='bbox',
metric=dict(bbox_mAP=21.3),
),
]
tridentnet = dict(
config='configs/tridentnet/tridentnet_r50_caffe_1x_coco.py',
checkpoint='tridentnet_r50_caffe_1x_coco_20201230_141838-2ec0b530.pth',
eval='bbox',
metric=dict(bbox_mAP=37.6),
)
vfnet = dict(
config='configs/vfnet/vfnet_r50_fpn_1x_coco.py',
checkpoint='vfnet_r50_fpn_1x_coco_20201027-38db6f58.pth',
eval='bbox',
metric=dict(bbox_mAP=41.6),
)
yolact = dict(
config='configs/yolact/yolact_r50_1x8_coco.py',
checkpoint='yolact_r50_1x8_coco_20200908-f38d58df.pth',
eval=['bbox', 'segm'],
metric=dict(bbox_mAP=31.2, segm_mAP=29.0),
)
yolo = dict(
config='configs/yolo/yolov3_d53_320_273e_coco.py',
checkpoint='yolov3_d53_320_273e_coco-421362b6.pth',
eval='bbox',
metric=dict(bbox_mAP=27.9),
)
yolof = dict(
config='configs/yolof/yolof_r50_c5_8x8_1x_coco.py',
checkpoint='yolof_r50_c5_8x8_1x_coco_20210425_024427-8e864411.pth',
eval='bbox',
metric=dict(bbox_mAP=37.5),
)
centernet = dict(
config='configs/centernet/centernet_resnet18_dcnv2_140e_coco.py',
checkpoint='centernet_resnet18_dcnv2_140e_coco_20210702_155131-c8cd631f.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=29.5),
)
yolox = dict(
config='configs/yolox/yolox_tiny_8x8_300e_coco.py',
checkpoint='yolox_tiny_8x8_300e_coco_20210806_234250-4ff3b67e.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=31.5),
)
# yapf: enable
| 12,707 | 34.3 | 117 |
py
|
DSLA-DSLA
|
DSLA-DSLA/.dev_scripts/benchmark_test_image.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import logging
import os.path as osp
from argparse import ArgumentParser
from mmcv import Config
from mmdet.apis import inference_detector, init_detector, show_result_pyplot
from mmdet.utils import get_root_logger
def parse_args():
parser = ArgumentParser()
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint_root', help='Checkpoint file root path')
parser.add_argument('--img', default='demo/demo.jpg', help='Image file')
parser.add_argument('--aug', action='store_true', help='aug test')
parser.add_argument('--model-name', help='model name to inference')
parser.add_argument('--show', action='store_true', help='show results')
parser.add_argument(
'--wait-time',
type=float,
default=1,
help='the interval of show (s), 0 is block')
parser.add_argument(
'--device', default='cuda:0', help='Device used for inference')
parser.add_argument(
'--score-thr', type=float, default=0.3, help='bbox score threshold')
args = parser.parse_args()
return args
def inference_model(config_name, checkpoint, args, logger=None):
cfg = Config.fromfile(config_name)
if args.aug:
if 'flip' in cfg.data.test.pipeline[1]:
cfg.data.test.pipeline[1].flip = True
else:
if logger is not None:
logger.error(f'{config_name}: unable to start aug test')
else:
print(f'{config_name}: unable to start aug test', flush=True)
model = init_detector(cfg, checkpoint, device=args.device)
# test a single image
result = inference_detector(model, args.img)
# show the results
if args.show:
show_result_pyplot(
model,
args.img,
result,
score_thr=args.score_thr,
wait_time=args.wait_time)
return result
# Sample test whether the inference code is correct
def main(args):
config = Config.fromfile(args.config)
# test single model
if args.model_name:
if args.model_name in config:
model_infos = config[args.model_name]
if not isinstance(model_infos, list):
model_infos = [model_infos]
model_info = model_infos[0]
config_name = model_info['config'].strip()
print(f'processing: {config_name}', flush=True)
checkpoint = osp.join(args.checkpoint_root,
model_info['checkpoint'].strip())
# build the model from a config file and a checkpoint file
inference_model(config_name, checkpoint, args)
return
else:
raise RuntimeError('model name input error.')
# test all model
logger = get_root_logger(
log_file='benchmark_test_image.log', log_level=logging.ERROR)
for model_key in config:
model_infos = config[model_key]
if not isinstance(model_infos, list):
model_infos = [model_infos]
for model_info in model_infos:
print('processing: ', model_info['config'], flush=True)
config_name = model_info['config'].strip()
checkpoint = osp.join(args.checkpoint_root,
model_info['checkpoint'].strip())
try:
# build the model from a config file and a checkpoint file
inference_model(config_name, checkpoint, args, logger)
except Exception as e:
logger.error(f'{config_name} " : {repr(e)}')
if __name__ == '__main__':
args = parse_args()
main(args)
| 3,674 | 34.679612 | 77 |
py
|
DSLA-DSLA
|
DSLA-DSLA/.dev_scripts/convert_test_benchmark_script.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
import os.path as osp
from mmcv import Config
def parse_args():
parser = argparse.ArgumentParser(
description='Convert benchmark model list to script')
parser.add_argument('config', help='test config file path')
parser.add_argument('--port', type=int, default=29666, help='dist port')
parser.add_argument(
'--work-dir',
default='tools/batch_test',
help='the dir to save metric')
parser.add_argument(
'--run', action='store_true', help='run script directly')
parser.add_argument(
'--out', type=str, help='path to save model benchmark script')
args = parser.parse_args()
return args
def process_model_info(model_info, work_dir):
config = model_info['config'].strip()
fname, _ = osp.splitext(osp.basename(config))
job_name = fname
work_dir = osp.join(work_dir, fname)
checkpoint = model_info['checkpoint'].strip()
if not isinstance(model_info['eval'], list):
evals = [model_info['eval']]
else:
evals = model_info['eval']
eval = ' '.join(evals)
return dict(
config=config,
job_name=job_name,
work_dir=work_dir,
checkpoint=checkpoint,
eval=eval)
def create_test_bash_info(commands, model_test_dict, port, script_name,
partition):
config = model_test_dict['config']
job_name = model_test_dict['job_name']
checkpoint = model_test_dict['checkpoint']
work_dir = model_test_dict['work_dir']
eval = model_test_dict['eval']
echo_info = f' \necho \'{config}\' &'
commands.append(echo_info)
commands.append('\n')
command_info = f'GPUS=8 GPUS_PER_NODE=8 ' \
f'CPUS_PER_TASK=2 {script_name} '
command_info += f'{partition} '
command_info += f'{job_name} '
command_info += f'{config} '
command_info += f'$CHECKPOINT_DIR/{checkpoint} '
command_info += f'--work-dir {work_dir} '
command_info += f'--eval {eval} '
command_info += f'--cfg-option dist_params.port={port} '
command_info += ' &'
commands.append(command_info)
def main():
args = parse_args()
if args.out:
out_suffix = args.out.split('.')[-1]
assert args.out.endswith('.sh'), \
f'Expected out file path suffix is .sh, but get .{out_suffix}'
assert args.out or args.run, \
('Please specify at least one operation (save/run/ the '
'script) with the argument "--out" or "--run"')
commands = []
partition_name = 'PARTITION=$1 '
commands.append(partition_name)
commands.append('\n')
checkpoint_root = 'CHECKPOINT_DIR=$2 '
commands.append(checkpoint_root)
commands.append('\n')
script_name = osp.join('tools', 'slurm_test.sh')
port = args.port
work_dir = args.work_dir
cfg = Config.fromfile(args.config)
for model_key in cfg:
model_infos = cfg[model_key]
if not isinstance(model_infos, list):
model_infos = [model_infos]
for model_info in model_infos:
print('processing: ', model_info['config'])
model_test_dict = process_model_info(model_info, work_dir)
create_test_bash_info(commands, model_test_dict, port, script_name,
'$PARTITION')
port += 1
command_str = ''.join(commands)
if args.out:
with open(args.out, 'w') as f:
f.write(command_str)
if args.run:
os.system(command_str)
if __name__ == '__main__':
main()
| 3,604 | 29.041667 | 79 |
py
|
DSLA-DSLA
|
DSLA-DSLA/tests/test_runtime/async_benchmark.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import asyncio
import os
import shutil
import urllib
import mmcv
import torch
from mmdet.apis import (async_inference_detector, inference_detector,
init_detector)
from mmdet.utils.contextmanagers import concurrent
from mmdet.utils.profiling import profile_time
async def main():
"""Benchmark between async and synchronous inference interfaces.
Sample runs for 20 demo images on K80 GPU, model - mask_rcnn_r50_fpn_1x:
async sync
7981.79 ms 9660.82 ms
8074.52 ms 9660.94 ms
7976.44 ms 9406.83 ms
Async variant takes about 0.83-0.85 of the time of the synchronous
interface.
"""
project_dir = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
project_dir = os.path.join(project_dir, '..')
config_file = os.path.join(
project_dir, 'configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py')
checkpoint_file = os.path.join(
project_dir,
'checkpoints/mask_rcnn_r50_fpn_1x_coco_20200205-d4b0c5d6.pth')
if not os.path.exists(checkpoint_file):
url = ('https://download.openmmlab.com/mmdetection/v2.0'
'/mask_rcnn/mask_rcnn_r50_fpn_1x_coco'
'/mask_rcnn_r50_fpn_1x_coco_20200205-d4b0c5d6.pth')
print(f'Downloading {url} ...')
local_filename, _ = urllib.request.urlretrieve(url)
os.makedirs(os.path.dirname(checkpoint_file), exist_ok=True)
shutil.move(local_filename, checkpoint_file)
print(f'Saved as {checkpoint_file}')
else:
print(f'Using existing checkpoint {checkpoint_file}')
device = 'cuda:0'
model = init_detector(
config_file, checkpoint=checkpoint_file, device=device)
# queue is used for concurrent inference of multiple images
streamqueue = asyncio.Queue()
# queue size defines concurrency level
streamqueue_size = 4
for _ in range(streamqueue_size):
streamqueue.put_nowait(torch.cuda.Stream(device=device))
# test a single image and show the results
img = mmcv.imread(os.path.join(project_dir, 'demo/demo.jpg'))
# warmup
await async_inference_detector(model, img)
async def detect(img):
async with concurrent(streamqueue):
return await async_inference_detector(model, img)
num_of_images = 20
with profile_time('benchmark', 'async'):
tasks = [
asyncio.create_task(detect(img)) for _ in range(num_of_images)
]
async_results = await asyncio.gather(*tasks)
with torch.cuda.stream(torch.cuda.default_stream()):
with profile_time('benchmark', 'sync'):
sync_results = [
inference_detector(model, img) for _ in range(num_of_images)
]
result_dir = os.path.join(project_dir, 'demo')
model.show_result(
img,
async_results[0],
score_thr=0.5,
show=False,
out_file=os.path.join(result_dir, 'result_async.jpg'))
model.show_result(
img,
sync_results[0],
score_thr=0.5,
show=False,
out_file=os.path.join(result_dir, 'result_sync.jpg'))
if __name__ == '__main__':
asyncio.run(main())
| 3,215 | 30.223301 | 77 |
py
|
DSLA-DSLA
|
DSLA-DSLA/tests/test_runtime/test_async.py
|
# Copyright (c) OpenMMLab. All rights reserved.
"""Tests for async interface."""
import asyncio
import os
import sys
import asynctest
import mmcv
import torch
from mmdet.apis import async_inference_detector, init_detector
if sys.version_info >= (3, 7):
from mmdet.utils.contextmanagers import concurrent
class AsyncTestCase(asynctest.TestCase):
use_default_loop = False
forbid_get_event_loop = True
TEST_TIMEOUT = int(os.getenv('ASYNCIO_TEST_TIMEOUT', '30'))
def _run_test_method(self, method):
result = method()
if asyncio.iscoroutine(result):
self.loop.run_until_complete(
asyncio.wait_for(result, timeout=self.TEST_TIMEOUT))
class MaskRCNNDetector:
def __init__(self,
model_config,
checkpoint=None,
streamqueue_size=3,
device='cuda:0'):
self.streamqueue_size = streamqueue_size
self.device = device
# build the model and load checkpoint
self.model = init_detector(
model_config, checkpoint=None, device=self.device)
self.streamqueue = None
async def init(self):
self.streamqueue = asyncio.Queue()
for _ in range(self.streamqueue_size):
stream = torch.cuda.Stream(device=self.device)
self.streamqueue.put_nowait(stream)
if sys.version_info >= (3, 7):
async def apredict(self, img):
if isinstance(img, str):
img = mmcv.imread(img)
async with concurrent(self.streamqueue):
result = await async_inference_detector(self.model, img)
return result
class AsyncInferenceTestCase(AsyncTestCase):
if sys.version_info >= (3, 7):
async def test_simple_inference(self):
if not torch.cuda.is_available():
import pytest
pytest.skip('test requires GPU and torch+cuda')
ori_grad_enabled = torch.is_grad_enabled()
root_dir = os.path.dirname(os.path.dirname(__name__))
model_config = os.path.join(
root_dir, 'configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py')
detector = MaskRCNNDetector(model_config)
await detector.init()
img_path = os.path.join(root_dir, 'demo/demo.jpg')
bboxes, _ = await detector.apredict(img_path)
self.assertTrue(bboxes)
# asy inference detector will hack grad_enabled,
# so restore here to avoid it to influence other tests
torch.set_grad_enabled(ori_grad_enabled)
| 2,608 | 30.059524 | 75 |
py
|
DSLA-DSLA
|
DSLA-DSLA/tests/test_runtime/test_config.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from os.path import dirname, exists, join
from unittest.mock import Mock
import pytest
from mmdet.core import BitmapMasks, PolygonMasks
from mmdet.datasets.builder import DATASETS
from mmdet.datasets.utils import NumClassCheckHook
def _get_config_directory():
"""Find the predefined detector config directory."""
try:
# Assume we are running in the source mmdetection repo
repo_dpath = dirname(dirname(__file__))
repo_dpath = join(repo_dpath, '..')
except NameError:
# For IPython development when this __file__ is not defined
import mmdet
repo_dpath = dirname(dirname(mmdet.__file__))
config_dpath = join(repo_dpath, 'configs')
if not exists(config_dpath):
raise Exception('Cannot find config path')
return config_dpath
def _check_numclasscheckhook(detector, config_mod):
dummy_runner = Mock()
dummy_runner.model = detector
def get_dataset_name_classes(dataset):
# deal with `RepeatDataset`,`ConcatDataset`,`ClassBalancedDataset`..
if isinstance(dataset, (list, tuple)):
dataset = dataset[0]
while ('dataset' in dataset):
dataset = dataset['dataset']
# ConcatDataset
if isinstance(dataset, (list, tuple)):
dataset = dataset[0]
return dataset['type'], dataset.get('classes', None)
compatible_check = NumClassCheckHook()
dataset_name, CLASSES = get_dataset_name_classes(
config_mod['data']['train'])
if CLASSES is None:
CLASSES = DATASETS.get(dataset_name).CLASSES
dummy_runner.data_loader.dataset.CLASSES = CLASSES
compatible_check.before_train_epoch(dummy_runner)
dummy_runner.data_loader.dataset.CLASSES = None
compatible_check.before_train_epoch(dummy_runner)
dataset_name, CLASSES = get_dataset_name_classes(config_mod['data']['val'])
if CLASSES is None:
CLASSES = DATASETS.get(dataset_name).CLASSES
dummy_runner.data_loader.dataset.CLASSES = CLASSES
compatible_check.before_val_epoch(dummy_runner)
dummy_runner.data_loader.dataset.CLASSES = None
compatible_check.before_val_epoch(dummy_runner)
def _check_roi_head(config, head):
# check consistency between head_config and roi_head
assert config['type'] == head.__class__.__name__
# check roi_align
bbox_roi_cfg = config.bbox_roi_extractor
bbox_roi_extractor = head.bbox_roi_extractor
_check_roi_extractor(bbox_roi_cfg, bbox_roi_extractor)
# check bbox head infos
bbox_cfg = config.bbox_head
bbox_head = head.bbox_head
_check_bbox_head(bbox_cfg, bbox_head)
if head.with_mask:
# check roi_align
if config.mask_roi_extractor:
mask_roi_cfg = config.mask_roi_extractor
mask_roi_extractor = head.mask_roi_extractor
_check_roi_extractor(mask_roi_cfg, mask_roi_extractor,
bbox_roi_extractor)
# check mask head infos
mask_head = head.mask_head
mask_cfg = config.mask_head
_check_mask_head(mask_cfg, mask_head)
# check arch specific settings, e.g., cascade/htc
if config['type'] in ['CascadeRoIHead', 'HybridTaskCascadeRoIHead']:
assert config.num_stages == len(head.bbox_head)
assert config.num_stages == len(head.bbox_roi_extractor)
if head.with_mask:
assert config.num_stages == len(head.mask_head)
assert config.num_stages == len(head.mask_roi_extractor)
elif config['type'] in ['MaskScoringRoIHead']:
assert (hasattr(head, 'mask_iou_head')
and head.mask_iou_head is not None)
mask_iou_cfg = config.mask_iou_head
mask_iou_head = head.mask_iou_head
assert (mask_iou_cfg.fc_out_channels ==
mask_iou_head.fc_mask_iou.in_features)
elif config['type'] in ['GridRoIHead']:
grid_roi_cfg = config.grid_roi_extractor
grid_roi_extractor = head.grid_roi_extractor
_check_roi_extractor(grid_roi_cfg, grid_roi_extractor,
bbox_roi_extractor)
config.grid_head.grid_points = head.grid_head.grid_points
def _check_roi_extractor(config, roi_extractor, prev_roi_extractor=None):
import torch.nn as nn
# Separate roi_extractor and prev_roi_extractor checks for flexibility
if isinstance(roi_extractor, nn.ModuleList):
roi_extractor = roi_extractor[0]
if prev_roi_extractor and isinstance(prev_roi_extractor, nn.ModuleList):
prev_roi_extractor = prev_roi_extractor[0]
assert (len(config.featmap_strides) == len(roi_extractor.roi_layers))
assert (config.out_channels == roi_extractor.out_channels)
from torch.nn.modules.utils import _pair
assert (_pair(config.roi_layer.output_size) ==
roi_extractor.roi_layers[0].output_size)
if 'use_torchvision' in config.roi_layer:
assert (config.roi_layer.use_torchvision ==
roi_extractor.roi_layers[0].use_torchvision)
elif 'aligned' in config.roi_layer:
assert (
config.roi_layer.aligned == roi_extractor.roi_layers[0].aligned)
if prev_roi_extractor:
assert (roi_extractor.roi_layers[0].aligned ==
prev_roi_extractor.roi_layers[0].aligned)
assert (roi_extractor.roi_layers[0].use_torchvision ==
prev_roi_extractor.roi_layers[0].use_torchvision)
def _check_mask_head(mask_cfg, mask_head):
import torch.nn as nn
if isinstance(mask_cfg, list):
for single_mask_cfg, single_mask_head in zip(mask_cfg, mask_head):
_check_mask_head(single_mask_cfg, single_mask_head)
elif isinstance(mask_head, nn.ModuleList):
for single_mask_head in mask_head:
_check_mask_head(mask_cfg, single_mask_head)
else:
assert mask_cfg['type'] == mask_head.__class__.__name__
assert mask_cfg.in_channels == mask_head.in_channels
class_agnostic = mask_cfg.get('class_agnostic', False)
out_dim = (1 if class_agnostic else mask_cfg.num_classes)
if hasattr(mask_head, 'conv_logits'):
assert (mask_cfg.conv_out_channels ==
mask_head.conv_logits.in_channels)
assert mask_head.conv_logits.out_channels == out_dim
else:
assert mask_cfg.fc_out_channels == mask_head.fc_logits.in_features
assert (mask_head.fc_logits.out_features == out_dim *
mask_head.output_area)
def _check_bbox_head(bbox_cfg, bbox_head):
import torch.nn as nn
if isinstance(bbox_cfg, list):
for single_bbox_cfg, single_bbox_head in zip(bbox_cfg, bbox_head):
_check_bbox_head(single_bbox_cfg, single_bbox_head)
elif isinstance(bbox_head, nn.ModuleList):
for single_bbox_head in bbox_head:
_check_bbox_head(bbox_cfg, single_bbox_head)
else:
assert bbox_cfg['type'] == bbox_head.__class__.__name__
if bbox_cfg['type'] == 'SABLHead':
assert bbox_cfg.cls_in_channels == bbox_head.cls_in_channels
assert bbox_cfg.reg_in_channels == bbox_head.reg_in_channels
cls_out_channels = bbox_cfg.get('cls_out_channels', 1024)
assert (cls_out_channels == bbox_head.fc_cls.in_features)
assert (bbox_cfg.num_classes + 1 == bbox_head.fc_cls.out_features)
elif bbox_cfg['type'] == 'DIIHead':
assert bbox_cfg['num_ffn_fcs'] == bbox_head.ffn.num_fcs
# 3 means FC and LN and Relu
assert bbox_cfg['num_cls_fcs'] == len(bbox_head.cls_fcs) // 3
assert bbox_cfg['num_reg_fcs'] == len(bbox_head.reg_fcs) // 3
assert bbox_cfg['in_channels'] == bbox_head.in_channels
assert bbox_cfg['in_channels'] == bbox_head.fc_cls.in_features
assert bbox_cfg['in_channels'] == bbox_head.fc_reg.in_features
assert bbox_cfg['in_channels'] == bbox_head.attention.embed_dims
assert bbox_cfg[
'feedforward_channels'] == bbox_head.ffn.feedforward_channels
else:
assert bbox_cfg.in_channels == bbox_head.in_channels
with_cls = bbox_cfg.get('with_cls', True)
if with_cls:
fc_out_channels = bbox_cfg.get('fc_out_channels', 2048)
assert (fc_out_channels == bbox_head.fc_cls.in_features)
if bbox_head.custom_cls_channels:
assert (bbox_head.loss_cls.get_cls_channels(
bbox_head.num_classes) == bbox_head.fc_cls.out_features
)
else:
assert (bbox_cfg.num_classes +
1 == bbox_head.fc_cls.out_features)
with_reg = bbox_cfg.get('with_reg', True)
if with_reg:
out_dim = (4 if bbox_cfg.reg_class_agnostic else 4 *
bbox_cfg.num_classes)
assert bbox_head.fc_reg.out_features == out_dim
def _check_anchorhead(config, head):
# check consistency between head_config and roi_head
assert config['type'] == head.__class__.__name__
assert config.in_channels == head.in_channels
num_classes = (
config.num_classes -
1 if config.loss_cls.get('use_sigmoid', False) else config.num_classes)
if config['type'] == 'ATSSHead':
assert (config.feat_channels == head.atss_cls.in_channels)
assert (config.feat_channels == head.atss_reg.in_channels)
assert (config.feat_channels == head.atss_centerness.in_channels)
elif config['type'] == 'SABLRetinaHead':
assert (config.feat_channels == head.retina_cls.in_channels)
assert (config.feat_channels == head.retina_bbox_reg.in_channels)
assert (config.feat_channels == head.retina_bbox_cls.in_channels)
else:
assert (config.in_channels == head.conv_cls.in_channels)
assert (config.in_channels == head.conv_reg.in_channels)
assert (head.conv_cls.out_channels == num_classes * head.num_anchors)
assert head.fc_reg.out_channels == 4 * head.num_anchors
# Only tests a representative subset of configurations
# TODO: test pipelines using Albu, current Albu throw None given empty GT
@pytest.mark.parametrize(
'config_rpath',
[
'wider_face/ssd300_wider_face.py',
'pascal_voc/ssd300_voc0712.py',
'pascal_voc/ssd512_voc0712.py',
# 'albu_example/mask_rcnn_r50_fpn_1x.py',
'foveabox/fovea_align_r50_fpn_gn-head_mstrain_640-800_4x4_2x_coco.py',
'mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain-poly_1x_coco.py',
'mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain_1x_coco.py',
'mask_rcnn/mask_rcnn_r50_fpn_fp16_1x_coco.py'
])
def test_config_data_pipeline(config_rpath):
"""Test whether the data pipeline is valid and can process corner cases.
CommandLine:
xdoctest -m tests/test_runtime/
test_config.py test_config_build_data_pipeline
"""
from mmcv import Config
from mmdet.datasets.pipelines import Compose
import numpy as np
config_dpath = _get_config_directory()
print(f'Found config_dpath = {config_dpath}')
def dummy_masks(h, w, num_obj=3, mode='bitmap'):
assert mode in ('polygon', 'bitmap')
if mode == 'bitmap':
masks = np.random.randint(0, 2, (num_obj, h, w), dtype=np.uint8)
masks = BitmapMasks(masks, h, w)
else:
masks = []
for i in range(num_obj):
masks.append([])
masks[-1].append(
np.random.uniform(0, min(h - 1, w - 1), (8 + 4 * i, )))
masks[-1].append(
np.random.uniform(0, min(h - 1, w - 1), (10 + 4 * i, )))
masks = PolygonMasks(masks, h, w)
return masks
config_fpath = join(config_dpath, config_rpath)
cfg = Config.fromfile(config_fpath)
# remove loading pipeline
loading_pipeline = cfg.train_pipeline.pop(0)
loading_ann_pipeline = cfg.train_pipeline.pop(0)
cfg.test_pipeline.pop(0)
train_pipeline = Compose(cfg.train_pipeline)
test_pipeline = Compose(cfg.test_pipeline)
print(f'Building data pipeline, config_fpath = {config_fpath}')
print(f'Test training data pipeline: \n{train_pipeline!r}')
img = np.random.randint(0, 255, size=(888, 666, 3), dtype=np.uint8)
if loading_pipeline.get('to_float32', False):
img = img.astype(np.float32)
mode = 'bitmap' if loading_ann_pipeline.get('poly2mask',
True) else 'polygon'
results = dict(
filename='test_img.png',
ori_filename='test_img.png',
img=img,
img_shape=img.shape,
ori_shape=img.shape,
gt_bboxes=np.array([[35.2, 11.7, 39.7, 15.7]], dtype=np.float32),
gt_labels=np.array([1], dtype=np.int64),
gt_masks=dummy_masks(img.shape[0], img.shape[1], mode=mode),
)
results['img_fields'] = ['img']
results['bbox_fields'] = ['gt_bboxes']
results['mask_fields'] = ['gt_masks']
output_results = train_pipeline(results)
assert output_results is not None
print(f'Test testing data pipeline: \n{test_pipeline!r}')
results = dict(
filename='test_img.png',
ori_filename='test_img.png',
img=img,
img_shape=img.shape,
ori_shape=img.shape,
gt_bboxes=np.array([[35.2, 11.7, 39.7, 15.7]], dtype=np.float32),
gt_labels=np.array([1], dtype=np.int64),
gt_masks=dummy_masks(img.shape[0], img.shape[1], mode=mode),
)
results['img_fields'] = ['img']
results['bbox_fields'] = ['gt_bboxes']
results['mask_fields'] = ['gt_masks']
output_results = test_pipeline(results)
assert output_results is not None
# test empty GT
print('Test empty GT with training data pipeline: '
f'\n{train_pipeline!r}')
results = dict(
filename='test_img.png',
ori_filename='test_img.png',
img=img,
img_shape=img.shape,
ori_shape=img.shape,
gt_bboxes=np.zeros((0, 4), dtype=np.float32),
gt_labels=np.array([], dtype=np.int64),
gt_masks=dummy_masks(img.shape[0], img.shape[1], num_obj=0, mode=mode),
)
results['img_fields'] = ['img']
results['bbox_fields'] = ['gt_bboxes']
results['mask_fields'] = ['gt_masks']
output_results = train_pipeline(results)
assert output_results is not None
print(f'Test empty GT with testing data pipeline: \n{test_pipeline!r}')
results = dict(
filename='test_img.png',
ori_filename='test_img.png',
img=img,
img_shape=img.shape,
ori_shape=img.shape,
gt_bboxes=np.zeros((0, 4), dtype=np.float32),
gt_labels=np.array([], dtype=np.int64),
gt_masks=dummy_masks(img.shape[0], img.shape[1], num_obj=0, mode=mode),
)
results['img_fields'] = ['img']
results['bbox_fields'] = ['gt_bboxes']
results['mask_fields'] = ['gt_masks']
output_results = test_pipeline(results)
assert output_results is not None
| 15,152 | 39.733871 | 79 |
py
|
DSLA-DSLA
|
DSLA-DSLA/tests/test_runtime/test_eval_hook.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
import tempfile
import unittest.mock as mock
from collections import OrderedDict
from unittest.mock import MagicMock, patch
import pytest
import torch
import torch.nn as nn
from mmcv.runner import EpochBasedRunner, build_optimizer
from mmcv.utils import get_logger
from torch.utils.data import DataLoader, Dataset
from mmdet.core import DistEvalHook, EvalHook
class ExampleDataset(Dataset):
def __init__(self):
self.index = 0
self.eval_result = [0.1, 0.4, 0.3, 0.7, 0.2, 0.05, 0.4, 0.6]
def __getitem__(self, idx):
results = dict(imgs=torch.tensor([1]))
return results
def __len__(self):
return 1
@mock.create_autospec
def evaluate(self, results, logger=None):
pass
class EvalDataset(ExampleDataset):
def evaluate(self, results, logger=None):
mean_ap = self.eval_result[self.index]
output = OrderedDict(mAP=mean_ap, index=self.index, score=mean_ap)
self.index += 1
return output
class ExampleModel(nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Linear(1, 1)
self.test_cfg = None
def forward(self, imgs, rescale=False, return_loss=False):
return imgs
def train_step(self, data_batch, optimizer, **kwargs):
outputs = {
'loss': 0.5,
'log_vars': {
'accuracy': 0.98
},
'num_samples': 1
}
return outputs
@pytest.mark.skipif(
not torch.cuda.is_available(), reason='requires CUDA support')
@patch('mmdet.apis.single_gpu_test', MagicMock)
@patch('mmdet.apis.multi_gpu_test', MagicMock)
@pytest.mark.parametrize('EvalHookCls', (EvalHook, DistEvalHook))
def test_eval_hook(EvalHookCls):
with pytest.raises(TypeError):
# dataloader must be a pytorch DataLoader
test_dataset = ExampleDataset()
data_loader = [
DataLoader(
test_dataset,
batch_size=1,
sampler=None,
num_worker=0,
shuffle=False)
]
EvalHookCls(data_loader)
with pytest.raises(KeyError):
# rule must be in keys of rule_map
test_dataset = ExampleDataset()
data_loader = DataLoader(
test_dataset,
batch_size=1,
sampler=None,
num_workers=0,
shuffle=False)
EvalHookCls(data_loader, save_best='auto', rule='unsupport')
with pytest.raises(ValueError):
# key_indicator must be valid when rule_map is None
test_dataset = ExampleDataset()
data_loader = DataLoader(
test_dataset,
batch_size=1,
sampler=None,
num_workers=0,
shuffle=False)
EvalHookCls(data_loader, save_best='unsupport')
optimizer_cfg = dict(
type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
test_dataset = ExampleDataset()
loader = DataLoader(test_dataset, batch_size=1)
model = ExampleModel()
optimizer = build_optimizer(model, optimizer_cfg)
data_loader = DataLoader(test_dataset, batch_size=1)
eval_hook = EvalHookCls(data_loader, save_best=None)
with tempfile.TemporaryDirectory() as tmpdir:
logger = get_logger('test_eval')
runner = EpochBasedRunner(
model=model,
batch_processor=None,
optimizer=optimizer,
work_dir=tmpdir,
logger=logger)
runner.register_hook(eval_hook)
runner.run([loader], [('train', 1)], 1)
assert runner.meta is None or 'best_score' not in runner.meta[
'hook_msgs']
assert runner.meta is None or 'best_ckpt' not in runner.meta[
'hook_msgs']
# when `save_best` is set to 'auto', first metric will be used.
loader = DataLoader(EvalDataset(), batch_size=1)
model = ExampleModel()
data_loader = DataLoader(EvalDataset(), batch_size=1)
eval_hook = EvalHookCls(data_loader, interval=1, save_best='auto')
with tempfile.TemporaryDirectory() as tmpdir:
logger = get_logger('test_eval')
runner = EpochBasedRunner(
model=model,
batch_processor=None,
optimizer=optimizer,
work_dir=tmpdir,
logger=logger)
runner.register_checkpoint_hook(dict(interval=1))
runner.register_hook(eval_hook)
runner.run([loader], [('train', 1)], 8)
real_path = osp.join(tmpdir, 'best_mAP_epoch_4.pth')
assert runner.meta['hook_msgs']['best_ckpt'] == osp.realpath(real_path)
assert runner.meta['hook_msgs']['best_score'] == 0.7
loader = DataLoader(EvalDataset(), batch_size=1)
model = ExampleModel()
data_loader = DataLoader(EvalDataset(), batch_size=1)
eval_hook = EvalHookCls(data_loader, interval=1, save_best='mAP')
with tempfile.TemporaryDirectory() as tmpdir:
logger = get_logger('test_eval')
runner = EpochBasedRunner(
model=model,
batch_processor=None,
optimizer=optimizer,
work_dir=tmpdir,
logger=logger)
runner.register_checkpoint_hook(dict(interval=1))
runner.register_hook(eval_hook)
runner.run([loader], [('train', 1)], 8)
real_path = osp.join(tmpdir, 'best_mAP_epoch_4.pth')
assert runner.meta['hook_msgs']['best_ckpt'] == osp.realpath(real_path)
assert runner.meta['hook_msgs']['best_score'] == 0.7
data_loader = DataLoader(EvalDataset(), batch_size=1)
eval_hook = EvalHookCls(
data_loader, interval=1, save_best='score', rule='greater')
with tempfile.TemporaryDirectory() as tmpdir:
logger = get_logger('test_eval')
runner = EpochBasedRunner(
model=model,
batch_processor=None,
optimizer=optimizer,
work_dir=tmpdir,
logger=logger)
runner.register_checkpoint_hook(dict(interval=1))
runner.register_hook(eval_hook)
runner.run([loader], [('train', 1)], 8)
real_path = osp.join(tmpdir, 'best_score_epoch_4.pth')
assert runner.meta['hook_msgs']['best_ckpt'] == osp.realpath(real_path)
assert runner.meta['hook_msgs']['best_score'] == 0.7
data_loader = DataLoader(EvalDataset(), batch_size=1)
eval_hook = EvalHookCls(data_loader, save_best='mAP', rule='less')
with tempfile.TemporaryDirectory() as tmpdir:
logger = get_logger('test_eval')
runner = EpochBasedRunner(
model=model,
batch_processor=None,
optimizer=optimizer,
work_dir=tmpdir,
logger=logger)
runner.register_checkpoint_hook(dict(interval=1))
runner.register_hook(eval_hook)
runner.run([loader], [('train', 1)], 8)
real_path = osp.join(tmpdir, 'best_mAP_epoch_6.pth')
assert runner.meta['hook_msgs']['best_ckpt'] == osp.realpath(real_path)
assert runner.meta['hook_msgs']['best_score'] == 0.05
data_loader = DataLoader(EvalDataset(), batch_size=1)
eval_hook = EvalHookCls(data_loader, save_best='mAP')
with tempfile.TemporaryDirectory() as tmpdir:
logger = get_logger('test_eval')
runner = EpochBasedRunner(
model=model,
batch_processor=None,
optimizer=optimizer,
work_dir=tmpdir,
logger=logger)
runner.register_checkpoint_hook(dict(interval=1))
runner.register_hook(eval_hook)
runner.run([loader], [('train', 1)], 2)
real_path = osp.join(tmpdir, 'best_mAP_epoch_2.pth')
assert runner.meta['hook_msgs']['best_ckpt'] == osp.realpath(real_path)
assert runner.meta['hook_msgs']['best_score'] == 0.4
resume_from = osp.join(tmpdir, 'latest.pth')
loader = DataLoader(ExampleDataset(), batch_size=1)
eval_hook = EvalHookCls(data_loader, save_best='mAP')
runner = EpochBasedRunner(
model=model,
batch_processor=None,
optimizer=optimizer,
work_dir=tmpdir,
logger=logger)
runner.register_checkpoint_hook(dict(interval=1))
runner.register_hook(eval_hook)
runner.resume(resume_from)
runner.run([loader], [('train', 1)], 8)
real_path = osp.join(tmpdir, 'best_mAP_epoch_4.pth')
assert runner.meta['hook_msgs']['best_ckpt'] == osp.realpath(real_path)
assert runner.meta['hook_msgs']['best_score'] == 0.7
| 8,590 | 32.956522 | 79 |
py
|
DSLA-DSLA
|
DSLA-DSLA/tests/test_runtime/test_fp16.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import pytest
import torch
import torch.nn as nn
from mmcv.runner import auto_fp16, force_fp32
from mmcv.runner.fp16_utils import cast_tensor_type
def test_cast_tensor_type():
inputs = torch.FloatTensor([5.])
src_type = torch.float32
dst_type = torch.int32
outputs = cast_tensor_type(inputs, src_type, dst_type)
assert isinstance(outputs, torch.Tensor)
assert outputs.dtype == dst_type
inputs = 'tensor'
src_type = str
dst_type = str
outputs = cast_tensor_type(inputs, src_type, dst_type)
assert isinstance(outputs, str)
inputs = np.array([5.])
src_type = np.ndarray
dst_type = np.ndarray
outputs = cast_tensor_type(inputs, src_type, dst_type)
assert isinstance(outputs, np.ndarray)
inputs = dict(
tensor_a=torch.FloatTensor([1.]), tensor_b=torch.FloatTensor([2.]))
src_type = torch.float32
dst_type = torch.int32
outputs = cast_tensor_type(inputs, src_type, dst_type)
assert isinstance(outputs, dict)
assert outputs['tensor_a'].dtype == dst_type
assert outputs['tensor_b'].dtype == dst_type
inputs = [torch.FloatTensor([1.]), torch.FloatTensor([2.])]
src_type = torch.float32
dst_type = torch.int32
outputs = cast_tensor_type(inputs, src_type, dst_type)
assert isinstance(outputs, list)
assert outputs[0].dtype == dst_type
assert outputs[1].dtype == dst_type
inputs = 5
outputs = cast_tensor_type(inputs, None, None)
assert isinstance(outputs, int)
def test_auto_fp16():
with pytest.raises(TypeError):
# ExampleObject is not a subclass of nn.Module
class ExampleObject:
@auto_fp16()
def __call__(self, x):
return x
model = ExampleObject()
input_x = torch.ones(1, dtype=torch.float32)
model(input_x)
# apply to all input args
class ExampleModule(nn.Module):
@auto_fp16()
def forward(self, x, y):
return x, y
model = ExampleModule()
input_x = torch.ones(1, dtype=torch.float32)
input_y = torch.ones(1, dtype=torch.float32)
output_x, output_y = model(input_x, input_y)
assert output_x.dtype == torch.float32
assert output_y.dtype == torch.float32
model.fp16_enabled = True
output_x, output_y = model(input_x, input_y)
assert output_x.dtype == torch.half
assert output_y.dtype == torch.half
if torch.cuda.is_available():
model.cuda()
output_x, output_y = model(input_x.cuda(), input_y.cuda())
assert output_x.dtype == torch.half
assert output_y.dtype == torch.half
# apply to specified input args
class ExampleModule(nn.Module):
@auto_fp16(apply_to=('x', ))
def forward(self, x, y):
return x, y
model = ExampleModule()
input_x = torch.ones(1, dtype=torch.float32)
input_y = torch.ones(1, dtype=torch.float32)
output_x, output_y = model(input_x, input_y)
assert output_x.dtype == torch.float32
assert output_y.dtype == torch.float32
model.fp16_enabled = True
output_x, output_y = model(input_x, input_y)
assert output_x.dtype == torch.half
assert output_y.dtype == torch.float32
if torch.cuda.is_available():
model.cuda()
output_x, output_y = model(input_x.cuda(), input_y.cuda())
assert output_x.dtype == torch.half
assert output_y.dtype == torch.float32
# apply to optional input args
class ExampleModule(nn.Module):
@auto_fp16(apply_to=('x', 'y'))
def forward(self, x, y=None, z=None):
return x, y, z
model = ExampleModule()
input_x = torch.ones(1, dtype=torch.float32)
input_y = torch.ones(1, dtype=torch.float32)
input_z = torch.ones(1, dtype=torch.float32)
output_x, output_y, output_z = model(input_x, y=input_y, z=input_z)
assert output_x.dtype == torch.float32
assert output_y.dtype == torch.float32
assert output_z.dtype == torch.float32
model.fp16_enabled = True
output_x, output_y, output_z = model(input_x, y=input_y, z=input_z)
assert output_x.dtype == torch.half
assert output_y.dtype == torch.half
assert output_z.dtype == torch.float32
if torch.cuda.is_available():
model.cuda()
output_x, output_y, output_z = model(
input_x.cuda(), y=input_y.cuda(), z=input_z.cuda())
assert output_x.dtype == torch.half
assert output_y.dtype == torch.half
assert output_z.dtype == torch.float32
# out_fp32=True
class ExampleModule(nn.Module):
@auto_fp16(apply_to=('x', 'y'), out_fp32=True)
def forward(self, x, y=None, z=None):
return x, y, z
model = ExampleModule()
input_x = torch.ones(1, dtype=torch.half)
input_y = torch.ones(1, dtype=torch.float32)
input_z = torch.ones(1, dtype=torch.float32)
output_x, output_y, output_z = model(input_x, y=input_y, z=input_z)
assert output_x.dtype == torch.half
assert output_y.dtype == torch.float32
assert output_z.dtype == torch.float32
model.fp16_enabled = True
output_x, output_y, output_z = model(input_x, y=input_y, z=input_z)
assert output_x.dtype == torch.float32
assert output_y.dtype == torch.float32
assert output_z.dtype == torch.float32
if torch.cuda.is_available():
model.cuda()
output_x, output_y, output_z = model(
input_x.cuda(), y=input_y.cuda(), z=input_z.cuda())
assert output_x.dtype == torch.float32
assert output_y.dtype == torch.float32
assert output_z.dtype == torch.float32
def test_force_fp32():
with pytest.raises(TypeError):
# ExampleObject is not a subclass of nn.Module
class ExampleObject:
@force_fp32()
def __call__(self, x):
return x
model = ExampleObject()
input_x = torch.ones(1, dtype=torch.float32)
model(input_x)
# apply to all input args
class ExampleModule(nn.Module):
@force_fp32()
def forward(self, x, y):
return x, y
model = ExampleModule()
input_x = torch.ones(1, dtype=torch.half)
input_y = torch.ones(1, dtype=torch.half)
output_x, output_y = model(input_x, input_y)
assert output_x.dtype == torch.half
assert output_y.dtype == torch.half
model.fp16_enabled = True
output_x, output_y = model(input_x, input_y)
assert output_x.dtype == torch.float32
assert output_y.dtype == torch.float32
if torch.cuda.is_available():
model.cuda()
output_x, output_y = model(input_x.cuda(), input_y.cuda())
assert output_x.dtype == torch.float32
assert output_y.dtype == torch.float32
# apply to specified input args
class ExampleModule(nn.Module):
@force_fp32(apply_to=('x', ))
def forward(self, x, y):
return x, y
model = ExampleModule()
input_x = torch.ones(1, dtype=torch.half)
input_y = torch.ones(1, dtype=torch.half)
output_x, output_y = model(input_x, input_y)
assert output_x.dtype == torch.half
assert output_y.dtype == torch.half
model.fp16_enabled = True
output_x, output_y = model(input_x, input_y)
assert output_x.dtype == torch.float32
assert output_y.dtype == torch.half
if torch.cuda.is_available():
model.cuda()
output_x, output_y = model(input_x.cuda(), input_y.cuda())
assert output_x.dtype == torch.float32
assert output_y.dtype == torch.half
# apply to optional input args
class ExampleModule(nn.Module):
@force_fp32(apply_to=('x', 'y'))
def forward(self, x, y=None, z=None):
return x, y, z
model = ExampleModule()
input_x = torch.ones(1, dtype=torch.half)
input_y = torch.ones(1, dtype=torch.half)
input_z = torch.ones(1, dtype=torch.half)
output_x, output_y, output_z = model(input_x, y=input_y, z=input_z)
assert output_x.dtype == torch.half
assert output_y.dtype == torch.half
assert output_z.dtype == torch.half
model.fp16_enabled = True
output_x, output_y, output_z = model(input_x, y=input_y, z=input_z)
assert output_x.dtype == torch.float32
assert output_y.dtype == torch.float32
assert output_z.dtype == torch.half
if torch.cuda.is_available():
model.cuda()
output_x, output_y, output_z = model(
input_x.cuda(), y=input_y.cuda(), z=input_z.cuda())
assert output_x.dtype == torch.float32
assert output_y.dtype == torch.float32
assert output_z.dtype == torch.half
# out_fp16=True
class ExampleModule(nn.Module):
@force_fp32(apply_to=('x', 'y'), out_fp16=True)
def forward(self, x, y=None, z=None):
return x, y, z
model = ExampleModule()
input_x = torch.ones(1, dtype=torch.float32)
input_y = torch.ones(1, dtype=torch.half)
input_z = torch.ones(1, dtype=torch.half)
output_x, output_y, output_z = model(input_x, y=input_y, z=input_z)
assert output_x.dtype == torch.float32
assert output_y.dtype == torch.half
assert output_z.dtype == torch.half
model.fp16_enabled = True
output_x, output_y, output_z = model(input_x, y=input_y, z=input_z)
assert output_x.dtype == torch.half
assert output_y.dtype == torch.half
assert output_z.dtype == torch.half
if torch.cuda.is_available():
model.cuda()
output_x, output_y, output_z = model(
input_x.cuda(), y=input_y.cuda(), z=input_z.cuda())
assert output_x.dtype == torch.half
assert output_y.dtype == torch.half
assert output_z.dtype == torch.half
| 9,746 | 31.274834 | 75 |
py
|
DSLA-DSLA
|
DSLA-DSLA/tests/test_models/test_loss.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmcv.utils import digit_version
from mmdet.models.losses import (BalancedL1Loss, CrossEntropyLoss, DiceLoss,
DistributionFocalLoss, FocalLoss,
GaussianFocalLoss,
KnowledgeDistillationKLDivLoss, L1Loss,
MSELoss, QualityFocalLoss, SeesawLoss,
SmoothL1Loss, VarifocalLoss)
from mmdet.models.losses.ghm_loss import GHMC, GHMR
from mmdet.models.losses.iou_loss import (BoundedIoULoss, CIoULoss, DIoULoss,
GIoULoss, IoULoss)
@pytest.mark.parametrize(
'loss_class', [IoULoss, BoundedIoULoss, GIoULoss, DIoULoss, CIoULoss])
def test_iou_type_loss_zeros_weight(loss_class):
pred = torch.rand((10, 4))
target = torch.rand((10, 4))
weight = torch.zeros(10)
loss = loss_class()(pred, target, weight)
assert loss == 0.
@pytest.mark.parametrize('loss_class', [
BalancedL1Loss, BoundedIoULoss, CIoULoss, CrossEntropyLoss, DIoULoss,
FocalLoss, DistributionFocalLoss, MSELoss, SeesawLoss, GaussianFocalLoss,
GIoULoss, IoULoss, L1Loss, QualityFocalLoss, VarifocalLoss, GHMR, GHMC,
SmoothL1Loss, KnowledgeDistillationKLDivLoss, DiceLoss
])
def test_loss_with_reduction_override(loss_class):
pred = torch.rand((10, 4))
target = torch.rand((10, 4)),
weight = None
with pytest.raises(AssertionError):
# only reduction_override from [None, 'none', 'mean', 'sum']
# is not allowed
reduction_override = True
loss_class()(
pred, target, weight, reduction_override=reduction_override)
@pytest.mark.parametrize('loss_class', [
IoULoss, BoundedIoULoss, GIoULoss, DIoULoss, CIoULoss, MSELoss, L1Loss,
SmoothL1Loss, BalancedL1Loss
])
@pytest.mark.parametrize('input_shape', [(10, 4), (0, 4)])
def test_regression_losses(loss_class, input_shape):
pred = torch.rand(input_shape)
target = torch.rand(input_shape)
weight = torch.rand(input_shape)
# Test loss forward
loss = loss_class()(pred, target)
assert isinstance(loss, torch.Tensor)
# Test loss forward with weight
loss = loss_class()(pred, target, weight)
assert isinstance(loss, torch.Tensor)
# Test loss forward with reduction_override
loss = loss_class()(pred, target, reduction_override='mean')
assert isinstance(loss, torch.Tensor)
# Test loss forward with avg_factor
loss = loss_class()(pred, target, avg_factor=10)
assert isinstance(loss, torch.Tensor)
with pytest.raises(ValueError):
# loss can evaluate with avg_factor only if
# reduction is None, 'none' or 'mean'.
reduction_override = 'sum'
loss_class()(
pred, target, avg_factor=10, reduction_override=reduction_override)
# Test loss forward with avg_factor and reduction
for reduction_override in [None, 'none', 'mean']:
loss_class()(
pred, target, avg_factor=10, reduction_override=reduction_override)
assert isinstance(loss, torch.Tensor)
@pytest.mark.parametrize('loss_class', [FocalLoss, CrossEntropyLoss])
@pytest.mark.parametrize('input_shape', [(10, 5), (0, 5)])
def test_classification_losses(loss_class, input_shape):
if input_shape[0] == 0 and digit_version(
torch.__version__) < digit_version('1.5.0'):
pytest.skip(
f'CELoss in PyTorch {torch.__version__} does not support empty'
f'tensor.')
pred = torch.rand(input_shape)
target = torch.randint(0, 5, (input_shape[0], ))
# Test loss forward
loss = loss_class()(pred, target)
assert isinstance(loss, torch.Tensor)
# Test loss forward with reduction_override
loss = loss_class()(pred, target, reduction_override='mean')
assert isinstance(loss, torch.Tensor)
# Test loss forward with avg_factor
loss = loss_class()(pred, target, avg_factor=10)
assert isinstance(loss, torch.Tensor)
with pytest.raises(ValueError):
# loss can evaluate with avg_factor only if
# reduction is None, 'none' or 'mean'.
reduction_override = 'sum'
loss_class()(
pred, target, avg_factor=10, reduction_override=reduction_override)
# Test loss forward with avg_factor and reduction
for reduction_override in [None, 'none', 'mean']:
loss_class()(
pred, target, avg_factor=10, reduction_override=reduction_override)
assert isinstance(loss, torch.Tensor)
@pytest.mark.parametrize('loss_class', [GHMR])
@pytest.mark.parametrize('input_shape', [(10, 4), (0, 4)])
def test_GHMR_loss(loss_class, input_shape):
pred = torch.rand(input_shape)
target = torch.rand(input_shape)
weight = torch.rand(input_shape)
# Test loss forward
loss = loss_class()(pred, target, weight)
assert isinstance(loss, torch.Tensor)
@pytest.mark.parametrize('use_sigmoid', [True, False])
def test_loss_with_ignore_index(use_sigmoid):
# Test cross_entropy loss
loss_class = CrossEntropyLoss(
use_sigmoid=use_sigmoid, use_mask=False, ignore_index=255)
pred = torch.rand((10, 5))
target = torch.randint(0, 5, (10, ))
ignored_indices = torch.randint(0, 10, (2, ), dtype=torch.long)
target[ignored_indices] = 255
# Test loss forward with default ignore
loss_with_ignore = loss_class(pred, target, reduction_override='sum')
assert isinstance(loss_with_ignore, torch.Tensor)
# Test loss forward with forward ignore
target[ignored_indices] = 250
loss_with_forward_ignore = loss_class(
pred, target, ignore_index=250, reduction_override='sum')
assert isinstance(loss_with_forward_ignore, torch.Tensor)
# Verify correctness
not_ignored_indices = (target != 250)
pred = pred[not_ignored_indices]
target = target[not_ignored_indices]
loss = loss_class(pred, target, reduction_override='sum')
assert torch.allclose(loss, loss_with_ignore)
assert torch.allclose(loss, loss_with_forward_ignore)
def test_dice_loss():
loss_class = DiceLoss
pred = torch.rand((10, 4, 4))
target = torch.rand((10, 4, 4))
weight = torch.rand((10))
# Test loss forward
loss = loss_class()(pred, target)
assert isinstance(loss, torch.Tensor)
# Test loss forward with weight
loss = loss_class()(pred, target, weight)
assert isinstance(loss, torch.Tensor)
# Test loss forward with reduction_override
loss = loss_class()(pred, target, reduction_override='mean')
assert isinstance(loss, torch.Tensor)
# Test loss forward with avg_factor
loss = loss_class()(pred, target, avg_factor=10)
assert isinstance(loss, torch.Tensor)
with pytest.raises(ValueError):
# loss can evaluate with avg_factor only if
# reduction is None, 'none' or 'mean'.
reduction_override = 'sum'
loss_class()(
pred, target, avg_factor=10, reduction_override=reduction_override)
# Test loss forward with avg_factor and reduction
for reduction_override in [None, 'none', 'mean']:
loss_class()(
pred, target, avg_factor=10, reduction_override=reduction_override)
assert isinstance(loss, torch.Tensor)
# Test loss forward with has_acted=False and use_sigmoid=False
with pytest.raises(NotImplementedError):
loss_class(use_sigmoid=False, activate=True)(pred, target)
# Test loss forward with weight.ndim != loss.ndim
with pytest.raises(AssertionError):
weight = torch.rand((2, 8))
loss_class()(pred, target, weight)
# Test loss forward with len(weight) != len(pred)
with pytest.raises(AssertionError):
weight = torch.rand((8))
loss_class()(pred, target, weight)
| 7,860 | 35.393519 | 79 |
py
|
DSLA-DSLA
|
DSLA-DSLA/tests/test_models/test_forward.py
|
# Copyright (c) OpenMMLab. All rights reserved.
"""pytest tests/test_forward.py."""
import copy
from os.path import dirname, exists, join
import numpy as np
import pytest
import torch
def _get_config_directory():
"""Find the predefined detector config directory."""
try:
# Assume we are running in the source mmdetection repo
repo_dpath = dirname(dirname(dirname(__file__)))
except NameError:
# For IPython development when this __file__ is not defined
import mmdet
repo_dpath = dirname(dirname(mmdet.__file__))
config_dpath = join(repo_dpath, 'configs')
if not exists(config_dpath):
raise Exception('Cannot find config path')
return config_dpath
def _get_config_module(fname):
"""Load a configuration as a python module."""
from mmcv import Config
config_dpath = _get_config_directory()
config_fpath = join(config_dpath, fname)
config_mod = Config.fromfile(config_fpath)
return config_mod
def _get_detector_cfg(fname):
"""Grab configs necessary to create a detector.
These are deep copied to allow for safe modification of parameters without
influencing other tests.
"""
config = _get_config_module(fname)
model = copy.deepcopy(config.model)
return model
def _replace_r50_with_r18(model):
"""Replace ResNet50 with ResNet18 in config."""
model = copy.deepcopy(model)
if model.backbone.type == 'ResNet':
model.backbone.depth = 18
model.backbone.base_channels = 2
model.neck.in_channels = [2, 4, 8, 16]
return model
def test_sparse_rcnn_forward():
config_path = 'sparse_rcnn/sparse_rcnn_r50_fpn_1x_coco.py'
model = _get_detector_cfg(config_path)
model = _replace_r50_with_r18(model)
model.backbone.init_cfg = None
from mmdet.models import build_detector
detector = build_detector(model)
detector.init_weights()
input_shape = (1, 3, 100, 100)
mm_inputs = _demo_mm_inputs(input_shape, num_items=[5])
imgs = mm_inputs.pop('imgs')
img_metas = mm_inputs.pop('img_metas')
# Test forward train with non-empty truth batch
detector.train()
gt_bboxes = mm_inputs['gt_bboxes']
gt_bboxes = [item for item in gt_bboxes]
gt_labels = mm_inputs['gt_labels']
gt_labels = [item for item in gt_labels]
losses = detector.forward(
imgs,
img_metas,
gt_bboxes=gt_bboxes,
gt_labels=gt_labels,
return_loss=True)
assert isinstance(losses, dict)
loss, _ = detector._parse_losses(losses)
assert float(loss.item()) > 0
detector.forward_dummy(imgs)
# Test forward train with an empty truth batch
mm_inputs = _demo_mm_inputs(input_shape, num_items=[0])
imgs = mm_inputs.pop('imgs')
img_metas = mm_inputs.pop('img_metas')
gt_bboxes = mm_inputs['gt_bboxes']
gt_bboxes = [item for item in gt_bboxes]
gt_labels = mm_inputs['gt_labels']
gt_labels = [item for item in gt_labels]
losses = detector.forward(
imgs,
img_metas,
gt_bboxes=gt_bboxes,
gt_labels=gt_labels,
return_loss=True)
assert isinstance(losses, dict)
loss, _ = detector._parse_losses(losses)
assert float(loss.item()) > 0
# Test forward test
detector.eval()
with torch.no_grad():
img_list = [g[None, :] for g in imgs]
batch_results = []
for one_img, one_meta in zip(img_list, img_metas):
result = detector.forward([one_img], [[one_meta]],
rescale=True,
return_loss=False)
batch_results.append(result)
# test empty proposal in roi_head
with torch.no_grad():
# test no proposal in the whole batch
detector.roi_head.simple_test([imgs[0][None, :]], torch.empty(
(1, 0, 4)), torch.empty((1, 100, 4)), [img_metas[0]],
torch.ones((1, 4)))
def test_rpn_forward():
model = _get_detector_cfg('rpn/rpn_r50_fpn_1x_coco.py')
model = _replace_r50_with_r18(model)
model.backbone.init_cfg = None
from mmdet.models import build_detector
detector = build_detector(model)
input_shape = (1, 3, 100, 100)
mm_inputs = _demo_mm_inputs(input_shape)
imgs = mm_inputs.pop('imgs')
img_metas = mm_inputs.pop('img_metas')
# Test forward train
gt_bboxes = mm_inputs['gt_bboxes']
losses = detector.forward(
imgs, img_metas, gt_bboxes=gt_bboxes, return_loss=True)
assert isinstance(losses, dict)
# Test forward test
with torch.no_grad():
img_list = [g[None, :] for g in imgs]
batch_results = []
for one_img, one_meta in zip(img_list, img_metas):
result = detector.forward([one_img], [[one_meta]],
return_loss=False)
batch_results.append(result)
@pytest.mark.parametrize(
'cfg_file',
[
'reppoints/reppoints_moment_r50_fpn_1x_coco.py',
'retinanet/retinanet_r50_fpn_1x_coco.py',
'guided_anchoring/ga_retinanet_r50_fpn_1x_coco.py',
'ghm/retinanet_ghm_r50_fpn_1x_coco.py',
'fcos/fcos_center_r50_caffe_fpn_gn-head_1x_coco.py',
'foveabox/fovea_align_r50_fpn_gn-head_4x4_2x_coco.py',
# 'free_anchor/retinanet_free_anchor_r50_fpn_1x_coco.py',
# 'atss/atss_r50_fpn_1x_coco.py', # not ready for topk
'yolo/yolov3_mobilenetv2_320_300e_coco.py',
'yolox/yolox_tiny_8x8_300e_coco.py'
])
def test_single_stage_forward_gpu(cfg_file):
if not torch.cuda.is_available():
import pytest
pytest.skip('test requires GPU and torch+cuda')
model = _get_detector_cfg(cfg_file)
model = _replace_r50_with_r18(model)
model.backbone.init_cfg = None
from mmdet.models import build_detector
detector = build_detector(model)
input_shape = (2, 3, 128, 128)
mm_inputs = _demo_mm_inputs(input_shape)
imgs = mm_inputs.pop('imgs')
img_metas = mm_inputs.pop('img_metas')
detector = detector.cuda()
imgs = imgs.cuda()
# Test forward train
gt_bboxes = [b.cuda() for b in mm_inputs['gt_bboxes']]
gt_labels = [g.cuda() for g in mm_inputs['gt_labels']]
losses = detector.forward(
imgs,
img_metas,
gt_bboxes=gt_bboxes,
gt_labels=gt_labels,
return_loss=True)
assert isinstance(losses, dict)
# Test forward test
detector.eval()
with torch.no_grad():
img_list = [g[None, :] for g in imgs]
batch_results = []
for one_img, one_meta in zip(img_list, img_metas):
result = detector.forward([one_img], [[one_meta]],
return_loss=False)
batch_results.append(result)
def test_faster_rcnn_ohem_forward():
model = _get_detector_cfg(
'faster_rcnn/faster_rcnn_r50_fpn_ohem_1x_coco.py')
model = _replace_r50_with_r18(model)
model.backbone.init_cfg = None
from mmdet.models import build_detector
detector = build_detector(model)
input_shape = (1, 3, 100, 100)
# Test forward train with a non-empty truth batch
mm_inputs = _demo_mm_inputs(input_shape, num_items=[10])
imgs = mm_inputs.pop('imgs')
img_metas = mm_inputs.pop('img_metas')
gt_bboxes = mm_inputs['gt_bboxes']
gt_labels = mm_inputs['gt_labels']
losses = detector.forward(
imgs,
img_metas,
gt_bboxes=gt_bboxes,
gt_labels=gt_labels,
return_loss=True)
assert isinstance(losses, dict)
loss, _ = detector._parse_losses(losses)
assert float(loss.item()) > 0
# Test forward train with an empty truth batch
mm_inputs = _demo_mm_inputs(input_shape, num_items=[0])
imgs = mm_inputs.pop('imgs')
img_metas = mm_inputs.pop('img_metas')
gt_bboxes = mm_inputs['gt_bboxes']
gt_labels = mm_inputs['gt_labels']
losses = detector.forward(
imgs,
img_metas,
gt_bboxes=gt_bboxes,
gt_labels=gt_labels,
return_loss=True)
assert isinstance(losses, dict)
loss, _ = detector._parse_losses(losses)
assert float(loss.item()) > 0
# Test RoI forward train with an empty proposals
feature = detector.extract_feat(imgs[0][None, :])
losses = detector.roi_head.forward_train(
feature,
img_metas, [torch.empty((0, 5))],
gt_bboxes=gt_bboxes,
gt_labels=gt_labels)
assert isinstance(losses, dict)
@pytest.mark.parametrize(
'cfg_file',
[
# 'cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py',
'mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py',
# 'grid_rcnn/grid_rcnn_r50_fpn_gn-head_2x_coco.py',
# 'ms_rcnn/ms_rcnn_r50_fpn_1x_coco.py',
# 'htc/htc_r50_fpn_1x_coco.py',
# 'panoptic_fpn/panoptic_fpn_r50_fpn_1x_coco.py',
# 'scnet/scnet_r50_fpn_20e_coco.py',
# 'seesaw_loss/mask_rcnn_r50_fpn_random_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py' # noqa: E501
])
def test_two_stage_forward(cfg_file):
models_with_semantic = [
'htc/htc_r50_fpn_1x_coco.py',
'panoptic_fpn/panoptic_fpn_r50_fpn_1x_coco.py',
'scnet/scnet_r50_fpn_20e_coco.py',
]
if cfg_file in models_with_semantic:
with_semantic = True
else:
with_semantic = False
model = _get_detector_cfg(cfg_file)
model = _replace_r50_with_r18(model)
model.backbone.init_cfg = None
# Save cost
if cfg_file in [
'seesaw_loss/mask_rcnn_r50_fpn_random_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py' # noqa: E501
]:
model.roi_head.bbox_head.num_classes = 80
model.roi_head.bbox_head.loss_cls.num_classes = 80
model.roi_head.mask_head.num_classes = 80
model.test_cfg.rcnn.score_thr = 0.05
model.test_cfg.rcnn.max_per_img = 100
from mmdet.models import build_detector
detector = build_detector(model)
input_shape = (1, 3, 128, 128)
# Test forward train with a non-empty truth batch
mm_inputs = _demo_mm_inputs(
input_shape, num_items=[10], with_semantic=with_semantic)
imgs = mm_inputs.pop('imgs')
img_metas = mm_inputs.pop('img_metas')
losses = detector.forward(imgs, img_metas, return_loss=True, **mm_inputs)
assert isinstance(losses, dict)
loss, _ = detector._parse_losses(losses)
loss.requires_grad_(True)
assert float(loss.item()) > 0
loss.backward()
# Test forward train with an empty truth batch
mm_inputs = _demo_mm_inputs(
input_shape, num_items=[0], with_semantic=with_semantic)
imgs = mm_inputs.pop('imgs')
img_metas = mm_inputs.pop('img_metas')
losses = detector.forward(imgs, img_metas, return_loss=True, **mm_inputs)
assert isinstance(losses, dict)
loss, _ = detector._parse_losses(losses)
loss.requires_grad_(True)
assert float(loss.item()) > 0
loss.backward()
# Test RoI forward train with an empty proposals
if cfg_file in [
'panoptic_fpn/panoptic_fpn_r50_fpn_1x_coco.py' # noqa: E501
]:
mm_inputs.pop('gt_semantic_seg')
feature = detector.extract_feat(imgs[0][None, :])
losses = detector.roi_head.forward_train(feature, img_metas,
[torch.empty(
(0, 5))], **mm_inputs)
assert isinstance(losses, dict)
# Test forward test
with torch.no_grad():
img_list = [g[None, :] for g in imgs]
batch_results = []
for one_img, one_meta in zip(img_list, img_metas):
result = detector.forward([one_img], [[one_meta]],
return_loss=False)
batch_results.append(result)
cascade_models = [
'cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py',
'htc/htc_r50_fpn_1x_coco.py',
'scnet/scnet_r50_fpn_20e_coco.py',
]
# test empty proposal in roi_head
with torch.no_grad():
# test no proposal in the whole batch
detector.simple_test(
imgs[0][None, :], [img_metas[0]], proposals=[torch.empty((0, 4))])
# test no proposal of aug
features = detector.extract_feats([imgs[0][None, :]] * 2)
detector.roi_head.aug_test(features, [torch.empty((0, 4))] * 2,
[[img_metas[0]]] * 2)
# test rcnn_test_cfg is None
if cfg_file not in cascade_models:
feature = detector.extract_feat(imgs[0][None, :])
bboxes, scores = detector.roi_head.simple_test_bboxes(
feature, [img_metas[0]], [torch.empty((0, 4))], None)
assert all([bbox.shape == torch.Size((0, 4)) for bbox in bboxes])
assert all([
score.shape == torch.Size(
(0, detector.roi_head.bbox_head.fc_cls.out_features))
for score in scores
])
# test no proposal in the some image
x1y1 = torch.randint(1, 100, (10, 2)).float()
# x2y2 must be greater than x1y1
x2y2 = x1y1 + torch.randint(1, 100, (10, 2))
detector.simple_test(
imgs[0][None, :].repeat(2, 1, 1, 1), [img_metas[0]] * 2,
proposals=[torch.empty((0, 4)),
torch.cat([x1y1, x2y2], dim=-1)])
# test no proposal of aug
detector.roi_head.aug_test(
features, [torch.cat([x1y1, x2y2], dim=-1),
torch.empty((0, 4))], [[img_metas[0]]] * 2)
# test rcnn_test_cfg is None
if cfg_file not in cascade_models:
feature = detector.extract_feat(imgs[0][None, :].repeat(
2, 1, 1, 1))
bboxes, scores = detector.roi_head.simple_test_bboxes(
feature, [img_metas[0]] * 2,
[torch.empty((0, 4)),
torch.cat([x1y1, x2y2], dim=-1)], None)
assert bboxes[0].shape == torch.Size((0, 4))
assert scores[0].shape == torch.Size(
(0, detector.roi_head.bbox_head.fc_cls.out_features))
@pytest.mark.parametrize(
'cfg_file', ['ghm/retinanet_ghm_r50_fpn_1x_coco.py', 'ssd/ssd300_coco.py'])
def test_single_stage_forward_cpu(cfg_file):
model = _get_detector_cfg(cfg_file)
model = _replace_r50_with_r18(model)
model.backbone.init_cfg = None
from mmdet.models import build_detector
detector = build_detector(model)
input_shape = (1, 3, 300, 300)
mm_inputs = _demo_mm_inputs(input_shape)
imgs = mm_inputs.pop('imgs')
img_metas = mm_inputs.pop('img_metas')
# Test forward train
gt_bboxes = mm_inputs['gt_bboxes']
gt_labels = mm_inputs['gt_labels']
losses = detector.forward(
imgs,
img_metas,
gt_bboxes=gt_bboxes,
gt_labels=gt_labels,
return_loss=True)
assert isinstance(losses, dict)
# Test forward test
detector.eval()
with torch.no_grad():
img_list = [g[None, :] for g in imgs]
batch_results = []
for one_img, one_meta in zip(img_list, img_metas):
result = detector.forward([one_img], [[one_meta]],
return_loss=False)
batch_results.append(result)
def _demo_mm_inputs(input_shape=(1, 3, 300, 300),
num_items=None, num_classes=10,
with_semantic=False): # yapf: disable
"""Create a superset of inputs needed to run test or train batches.
Args:
input_shape (tuple):
input batch dimensions
num_items (None | List[int]):
specifies the number of boxes in each batch item
num_classes (int):
number of different labels a box might have
"""
from mmdet.core import BitmapMasks
(N, C, H, W) = input_shape
rng = np.random.RandomState(0)
imgs = rng.rand(*input_shape)
img_metas = [{
'img_shape': (H, W, C),
'ori_shape': (H, W, C),
'pad_shape': (H, W, C),
'filename': '<demo>.png',
'scale_factor': np.array([1.1, 1.2, 1.1, 1.2]),
'flip': False,
'flip_direction': None,
} for _ in range(N)]
gt_bboxes = []
gt_labels = []
gt_masks = []
for batch_idx in range(N):
if num_items is None:
num_boxes = rng.randint(1, 10)
else:
num_boxes = num_items[batch_idx]
cx, cy, bw, bh = rng.rand(num_boxes, 4).T
tl_x = ((cx * W) - (W * bw / 2)).clip(0, W)
tl_y = ((cy * H) - (H * bh / 2)).clip(0, H)
br_x = ((cx * W) + (W * bw / 2)).clip(0, W)
br_y = ((cy * H) + (H * bh / 2)).clip(0, H)
boxes = np.vstack([tl_x, tl_y, br_x, br_y]).T
class_idxs = rng.randint(1, num_classes, size=num_boxes)
gt_bboxes.append(torch.FloatTensor(boxes))
gt_labels.append(torch.LongTensor(class_idxs))
mask = np.random.randint(0, 2, (len(boxes), H, W), dtype=np.uint8)
gt_masks.append(BitmapMasks(mask, H, W))
mm_inputs = {
'imgs': torch.FloatTensor(imgs).requires_grad_(True),
'img_metas': img_metas,
'gt_bboxes': gt_bboxes,
'gt_labels': gt_labels,
'gt_bboxes_ignore': None,
'gt_masks': gt_masks,
}
if with_semantic:
# assume gt_semantic_seg using scale 1/8 of the img
gt_semantic_seg = np.random.randint(
0, num_classes, (1, 1, H // 8, W // 8), dtype=np.uint8)
mm_inputs.update(
{'gt_semantic_seg': torch.ByteTensor(gt_semantic_seg)})
return mm_inputs
def test_yolact_forward():
model = _get_detector_cfg('yolact/yolact_r50_1x8_coco.py')
model = _replace_r50_with_r18(model)
model.backbone.init_cfg = None
from mmdet.models import build_detector
detector = build_detector(model)
input_shape = (1, 3, 100, 100)
mm_inputs = _demo_mm_inputs(input_shape)
imgs = mm_inputs.pop('imgs')
img_metas = mm_inputs.pop('img_metas')
# Test forward train
detector.train()
gt_bboxes = mm_inputs['gt_bboxes']
gt_labels = mm_inputs['gt_labels']
gt_masks = mm_inputs['gt_masks']
losses = detector.forward(
imgs,
img_metas,
gt_bboxes=gt_bboxes,
gt_labels=gt_labels,
gt_masks=gt_masks,
return_loss=True)
assert isinstance(losses, dict)
# Test forward dummy for get_flops
detector.forward_dummy(imgs)
# Test forward test
detector.eval()
with torch.no_grad():
img_list = [g[None, :] for g in imgs]
batch_results = []
for one_img, one_meta in zip(img_list, img_metas):
result = detector.forward([one_img], [[one_meta]],
rescale=True,
return_loss=False)
batch_results.append(result)
def test_detr_forward():
model = _get_detector_cfg('detr/detr_r50_8x2_150e_coco.py')
model.backbone.depth = 18
model.bbox_head.in_channels = 512
model.backbone.init_cfg = None
from mmdet.models import build_detector
detector = build_detector(model)
input_shape = (1, 3, 100, 100)
mm_inputs = _demo_mm_inputs(input_shape)
imgs = mm_inputs.pop('imgs')
img_metas = mm_inputs.pop('img_metas')
# Test forward train with non-empty truth batch
detector.train()
gt_bboxes = mm_inputs['gt_bboxes']
gt_labels = mm_inputs['gt_labels']
losses = detector.forward(
imgs,
img_metas,
gt_bboxes=gt_bboxes,
gt_labels=gt_labels,
return_loss=True)
assert isinstance(losses, dict)
loss, _ = detector._parse_losses(losses)
assert float(loss.item()) > 0
# Test forward train with an empty truth batch
mm_inputs = _demo_mm_inputs(input_shape, num_items=[0])
imgs = mm_inputs.pop('imgs')
img_metas = mm_inputs.pop('img_metas')
gt_bboxes = mm_inputs['gt_bboxes']
gt_labels = mm_inputs['gt_labels']
losses = detector.forward(
imgs,
img_metas,
gt_bboxes=gt_bboxes,
gt_labels=gt_labels,
return_loss=True)
assert isinstance(losses, dict)
loss, _ = detector._parse_losses(losses)
assert float(loss.item()) > 0
# Test forward test
detector.eval()
with torch.no_grad():
img_list = [g[None, :] for g in imgs]
batch_results = []
for one_img, one_meta in zip(img_list, img_metas):
result = detector.forward([one_img], [[one_meta]],
rescale=True,
return_loss=False)
batch_results.append(result)
def test_inference_detector():
from mmdet.apis import inference_detector
from mmdet.models import build_detector
from mmcv import ConfigDict
# small RetinaNet
num_class = 3
model_dict = dict(
type='RetinaNet',
backbone=dict(
type='ResNet',
depth=18,
num_stages=4,
out_indices=(3, ),
norm_cfg=dict(type='BN', requires_grad=False),
norm_eval=True,
style='pytorch'),
neck=None,
bbox_head=dict(
type='RetinaHead',
num_classes=num_class,
in_channels=512,
stacked_convs=1,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
octave_base_scale=4,
scales_per_octave=3,
ratios=[0.5],
strides=[32]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100))
rng = np.random.RandomState(0)
img1 = rng.rand(100, 100, 3)
img2 = rng.rand(100, 100, 3)
model = build_detector(ConfigDict(model_dict))
config = _get_config_module('retinanet/retinanet_r50_fpn_1x_coco.py')
model.cfg = config
# test single image
result = inference_detector(model, img1)
assert len(result) == num_class
# test multiple image
result = inference_detector(model, [img1, img2])
assert len(result) == 2 and len(result[0]) == num_class
@pytest.mark.skipif(
not torch.cuda.is_available(), reason='requires CUDA support')
def test_yolox_random_size():
from mmdet.models import build_detector
model = _get_detector_cfg('yolox/yolox_tiny_8x8_300e_coco.py')
model.random_size_range = (2, 2)
model.input_size = (64, 96)
model.random_size_interval = 1
detector = build_detector(model)
input_shape = (1, 3, 64, 64)
mm_inputs = _demo_mm_inputs(input_shape)
imgs = mm_inputs.pop('imgs')
img_metas = mm_inputs.pop('img_metas')
# Test forward train with non-empty truth batch
detector.train()
gt_bboxes = mm_inputs['gt_bboxes']
gt_labels = mm_inputs['gt_labels']
detector.forward(
imgs,
img_metas,
gt_bboxes=gt_bboxes,
gt_labels=gt_labels,
return_loss=True)
assert detector._input_size == (64, 96)
| 23,230 | 32.092593 | 110 |
py
|
DSLA-DSLA
|
DSLA-DSLA/tests/test_models/test_necks.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from torch.nn.modules.batchnorm import _BatchNorm
from mmdet.models.necks import (FPN, YOLOXPAFPN, ChannelMapper, CTResNetNeck,
DilatedEncoder, SSDNeck, YOLOV3Neck)
def test_fpn():
"""Tests fpn."""
s = 64
in_channels = [8, 16, 32, 64]
feat_sizes = [s // 2**i for i in range(4)] # [64, 32, 16, 8]
out_channels = 8
# `num_outs` is not equal to len(in_channels) - start_level
with pytest.raises(AssertionError):
FPN(in_channels=in_channels,
out_channels=out_channels,
start_level=1,
num_outs=2)
# `end_level` is larger than len(in_channels) - 1
with pytest.raises(AssertionError):
FPN(in_channels=in_channels,
out_channels=out_channels,
start_level=1,
end_level=4,
num_outs=2)
# `num_outs` is not equal to end_level - start_level
with pytest.raises(AssertionError):
FPN(in_channels=in_channels,
out_channels=out_channels,
start_level=1,
end_level=3,
num_outs=1)
# Invalid `add_extra_convs` option
with pytest.raises(AssertionError):
FPN(in_channels=in_channels,
out_channels=out_channels,
start_level=1,
add_extra_convs='on_xxx',
num_outs=5)
fpn_model = FPN(
in_channels=in_channels,
out_channels=out_channels,
start_level=1,
add_extra_convs=True,
num_outs=5)
# FPN expects a multiple levels of features per image
feats = [
torch.rand(1, in_channels[i], feat_sizes[i], feat_sizes[i])
for i in range(len(in_channels))
]
outs = fpn_model(feats)
assert fpn_model.add_extra_convs == 'on_input'
assert len(outs) == fpn_model.num_outs
for i in range(fpn_model.num_outs):
outs[i].shape[1] == out_channels
outs[i].shape[2] == outs[i].shape[3] == s // (2**i)
# Tests for fpn with no extra convs (pooling is used instead)
fpn_model = FPN(
in_channels=in_channels,
out_channels=out_channels,
start_level=1,
add_extra_convs=False,
num_outs=5)
outs = fpn_model(feats)
assert len(outs) == fpn_model.num_outs
assert not fpn_model.add_extra_convs
for i in range(fpn_model.num_outs):
outs[i].shape[1] == out_channels
outs[i].shape[2] == outs[i].shape[3] == s // (2**i)
# Tests for fpn with lateral bns
fpn_model = FPN(
in_channels=in_channels,
out_channels=out_channels,
start_level=1,
add_extra_convs=True,
no_norm_on_lateral=False,
norm_cfg=dict(type='BN', requires_grad=True),
num_outs=5)
outs = fpn_model(feats)
assert len(outs) == fpn_model.num_outs
assert fpn_model.add_extra_convs == 'on_input'
for i in range(fpn_model.num_outs):
outs[i].shape[1] == out_channels
outs[i].shape[2] == outs[i].shape[3] == s // (2**i)
bn_exist = False
for m in fpn_model.modules():
if isinstance(m, _BatchNorm):
bn_exist = True
assert bn_exist
# Bilinear upsample
fpn_model = FPN(
in_channels=in_channels,
out_channels=out_channels,
start_level=1,
add_extra_convs=True,
upsample_cfg=dict(mode='bilinear', align_corners=True),
num_outs=5)
fpn_model(feats)
outs = fpn_model(feats)
assert len(outs) == fpn_model.num_outs
assert fpn_model.add_extra_convs == 'on_input'
for i in range(fpn_model.num_outs):
outs[i].shape[1] == out_channels
outs[i].shape[2] == outs[i].shape[3] == s // (2**i)
# Scale factor instead of fixed upsample size upsample
fpn_model = FPN(
in_channels=in_channels,
out_channels=out_channels,
start_level=1,
add_extra_convs=True,
upsample_cfg=dict(scale_factor=2),
num_outs=5)
outs = fpn_model(feats)
assert len(outs) == fpn_model.num_outs
for i in range(fpn_model.num_outs):
outs[i].shape[1] == out_channels
outs[i].shape[2] == outs[i].shape[3] == s // (2**i)
# Extra convs source is 'inputs'
fpn_model = FPN(
in_channels=in_channels,
out_channels=out_channels,
add_extra_convs='on_input',
start_level=1,
num_outs=5)
assert fpn_model.add_extra_convs == 'on_input'
outs = fpn_model(feats)
assert len(outs) == fpn_model.num_outs
for i in range(fpn_model.num_outs):
outs[i].shape[1] == out_channels
outs[i].shape[2] == outs[i].shape[3] == s // (2**i)
# Extra convs source is 'laterals'
fpn_model = FPN(
in_channels=in_channels,
out_channels=out_channels,
add_extra_convs='on_lateral',
start_level=1,
num_outs=5)
assert fpn_model.add_extra_convs == 'on_lateral'
outs = fpn_model(feats)
assert len(outs) == fpn_model.num_outs
for i in range(fpn_model.num_outs):
outs[i].shape[1] == out_channels
outs[i].shape[2] == outs[i].shape[3] == s // (2**i)
# Extra convs source is 'outputs'
fpn_model = FPN(
in_channels=in_channels,
out_channels=out_channels,
add_extra_convs='on_output',
start_level=1,
num_outs=5)
assert fpn_model.add_extra_convs == 'on_output'
outs = fpn_model(feats)
assert len(outs) == fpn_model.num_outs
for i in range(fpn_model.num_outs):
outs[i].shape[1] == out_channels
outs[i].shape[2] == outs[i].shape[3] == s // (2**i)
def test_channel_mapper():
"""Tests ChannelMapper."""
s = 64
in_channels = [8, 16, 32, 64]
feat_sizes = [s // 2**i for i in range(4)] # [64, 32, 16, 8]
out_channels = 8
kernel_size = 3
feats = [
torch.rand(1, in_channels[i], feat_sizes[i], feat_sizes[i])
for i in range(len(in_channels))
]
# in_channels must be a list
with pytest.raises(AssertionError):
channel_mapper = ChannelMapper(
in_channels=10, out_channels=out_channels, kernel_size=kernel_size)
# the length of channel_mapper's inputs must be equal to the length of
# in_channels
with pytest.raises(AssertionError):
channel_mapper = ChannelMapper(
in_channels=in_channels[:-1],
out_channels=out_channels,
kernel_size=kernel_size)
channel_mapper(feats)
channel_mapper = ChannelMapper(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size)
outs = channel_mapper(feats)
assert len(outs) == len(feats)
for i in range(len(feats)):
outs[i].shape[1] == out_channels
outs[i].shape[2] == outs[i].shape[3] == s // (2**i)
def test_dilated_encoder():
in_channels = 16
out_channels = 32
out_shape = 34
dilated_encoder = DilatedEncoder(in_channels, out_channels, 16, 2)
feat = [torch.rand(1, in_channels, 34, 34)]
out_feat = dilated_encoder(feat)[0]
assert out_feat.shape == (1, out_channels, out_shape, out_shape)
def test_ct_resnet_neck():
# num_filters/num_kernels must be a list
with pytest.raises(TypeError):
CTResNetNeck(
in_channel=10, num_deconv_filters=10, num_deconv_kernels=4)
# num_filters/num_kernels must be same length
with pytest.raises(AssertionError):
CTResNetNeck(
in_channel=10,
num_deconv_filters=(10, 10),
num_deconv_kernels=(4, ))
in_channels = 16
num_filters = (8, 8)
num_kernels = (4, 4)
feat = torch.rand(1, 16, 4, 4)
ct_resnet_neck = CTResNetNeck(
in_channel=in_channels,
num_deconv_filters=num_filters,
num_deconv_kernels=num_kernels,
use_dcn=False)
# feat must be list or tuple
with pytest.raises(AssertionError):
ct_resnet_neck(feat)
out_feat = ct_resnet_neck([feat])[0]
assert out_feat.shape == (1, num_filters[-1], 16, 16)
if torch.cuda.is_available():
# test dcn
ct_resnet_neck = CTResNetNeck(
in_channel=in_channels,
num_deconv_filters=num_filters,
num_deconv_kernels=num_kernels)
ct_resnet_neck = ct_resnet_neck.cuda()
feat = feat.cuda()
out_feat = ct_resnet_neck([feat])[0]
assert out_feat.shape == (1, num_filters[-1], 16, 16)
def test_yolov3_neck():
# num_scales, in_channels, out_channels must be same length
with pytest.raises(AssertionError):
YOLOV3Neck(num_scales=3, in_channels=[16, 8, 4], out_channels=[8, 4])
# len(feats) must equal to num_scales
with pytest.raises(AssertionError):
neck = YOLOV3Neck(
num_scales=3, in_channels=[16, 8, 4], out_channels=[8, 4, 2])
feats = (torch.rand(1, 4, 16, 16), torch.rand(1, 8, 16, 16))
neck(feats)
# test normal channels
s = 32
in_channels = [16, 8, 4]
out_channels = [8, 4, 2]
feat_sizes = [s // 2**i for i in range(len(in_channels) - 1, -1, -1)]
feats = [
torch.rand(1, in_channels[i], feat_sizes[i], feat_sizes[i])
for i in range(len(in_channels) - 1, -1, -1)
]
neck = YOLOV3Neck(
num_scales=3, in_channels=in_channels, out_channels=out_channels)
outs = neck(feats)
assert len(outs) == len(feats)
for i in range(len(outs)):
assert outs[i].shape == \
(1, out_channels[i], feat_sizes[i], feat_sizes[i])
# test more flexible setting
s = 32
in_channels = [32, 8, 16]
out_channels = [19, 21, 5]
feat_sizes = [s // 2**i for i in range(len(in_channels) - 1, -1, -1)]
feats = [
torch.rand(1, in_channels[i], feat_sizes[i], feat_sizes[i])
for i in range(len(in_channels) - 1, -1, -1)
]
neck = YOLOV3Neck(
num_scales=3, in_channels=in_channels, out_channels=out_channels)
outs = neck(feats)
assert len(outs) == len(feats)
for i in range(len(outs)):
assert outs[i].shape == \
(1, out_channels[i], feat_sizes[i], feat_sizes[i])
def test_ssd_neck():
# level_strides/level_paddings must be same length
with pytest.raises(AssertionError):
SSDNeck(
in_channels=[8, 16],
out_channels=[8, 16, 32],
level_strides=[2],
level_paddings=[2, 1])
# length of out_channels must larger than in_channels
with pytest.raises(AssertionError):
SSDNeck(
in_channels=[8, 16],
out_channels=[8],
level_strides=[2],
level_paddings=[2])
# len(out_channels) - len(in_channels) must equal to len(level_strides)
with pytest.raises(AssertionError):
SSDNeck(
in_channels=[8, 16],
out_channels=[4, 16, 64],
level_strides=[2, 2],
level_paddings=[2, 2])
# in_channels must be same with out_channels[:len(in_channels)]
with pytest.raises(AssertionError):
SSDNeck(
in_channels=[8, 16],
out_channels=[4, 16, 64],
level_strides=[2],
level_paddings=[2])
ssd_neck = SSDNeck(
in_channels=[4],
out_channels=[4, 8, 16],
level_strides=[2, 1],
level_paddings=[1, 0])
feats = (torch.rand(1, 4, 16, 16), )
outs = ssd_neck(feats)
assert outs[0].shape == (1, 4, 16, 16)
assert outs[1].shape == (1, 8, 8, 8)
assert outs[2].shape == (1, 16, 6, 6)
# test SSD-Lite Neck
ssd_neck = SSDNeck(
in_channels=[4, 8],
out_channels=[4, 8, 16],
level_strides=[1],
level_paddings=[1],
l2_norm_scale=None,
use_depthwise=True,
norm_cfg=dict(type='BN'),
act_cfg=dict(type='ReLU6'))
assert not hasattr(ssd_neck, 'l2_norm')
from mmcv.cnn.bricks import DepthwiseSeparableConvModule
assert isinstance(ssd_neck.extra_layers[0][-1],
DepthwiseSeparableConvModule)
feats = (torch.rand(1, 4, 8, 8), torch.rand(1, 8, 8, 8))
outs = ssd_neck(feats)
assert outs[0].shape == (1, 4, 8, 8)
assert outs[1].shape == (1, 8, 8, 8)
assert outs[2].shape == (1, 16, 8, 8)
def test_yolox_pafpn():
s = 64
in_channels = [8, 16, 32, 64]
feat_sizes = [s // 2**i for i in range(4)] # [64, 32, 16, 8]
out_channels = 24
feats = [
torch.rand(1, in_channels[i], feat_sizes[i], feat_sizes[i])
for i in range(len(in_channels))
]
neck = YOLOXPAFPN(in_channels=in_channels, out_channels=out_channels)
outs = neck(feats)
assert len(outs) == len(feats)
for i in range(len(feats)):
assert outs[i].shape[1] == out_channels
assert outs[i].shape[2] == outs[i].shape[3] == s // (2**i)
# test depth-wise
neck = YOLOXPAFPN(
in_channels=in_channels, out_channels=out_channels, use_depthwise=True)
from mmcv.cnn.bricks import DepthwiseSeparableConvModule
assert isinstance(neck.downsamples[0], DepthwiseSeparableConvModule)
outs = neck(feats)
assert len(outs) == len(feats)
for i in range(len(feats)):
assert outs[i].shape[1] == out_channels
assert outs[i].shape[2] == outs[i].shape[3] == s // (2**i)
| 13,273 | 31.614251 | 79 |
py
|
DSLA-DSLA
|
DSLA-DSLA/tests/test_models/test_plugins.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmdet.models.plugins import DropBlock
def test_dropblock():
feat = torch.rand(1, 1, 11, 11)
drop_prob = 1.0
dropblock = DropBlock(drop_prob, block_size=11, warmup_iters=0)
out_feat = dropblock(feat)
assert (out_feat == 0).all() and out_feat.shape == feat.shape
drop_prob = 0.5
dropblock = DropBlock(drop_prob, block_size=5, warmup_iters=0)
out_feat = dropblock(feat)
assert out_feat.shape == feat.shape
# drop_prob must be (0,1]
with pytest.raises(AssertionError):
DropBlock(1.5, 3)
# block_size cannot be an even number
with pytest.raises(AssertionError):
DropBlock(0.5, 2)
# warmup_iters cannot be less than 0
with pytest.raises(AssertionError):
DropBlock(0.5, 3, -1)
| 840 | 27.033333 | 67 |
py
|
DSLA-DSLA
|
DSLA-DSLA/tests/test_models/test_loss_compatibility.py
|
# Copyright (c) OpenMMLab. All rights reserved.
"""pytest tests/test_loss_compatibility.py."""
import copy
from os.path import dirname, exists, join
import numpy as np
import pytest
import torch
def _get_config_directory():
"""Find the predefined detector config directory."""
try:
# Assume we are running in the source mmdetection repo
repo_dpath = dirname(dirname(dirname(__file__)))
except NameError:
# For IPython development when this __file__ is not defined
import mmdet
repo_dpath = dirname(dirname(mmdet.__file__))
config_dpath = join(repo_dpath, 'configs')
if not exists(config_dpath):
raise Exception('Cannot find config path')
return config_dpath
def _get_config_module(fname):
"""Load a configuration as a python module."""
from mmcv import Config
config_dpath = _get_config_directory()
config_fpath = join(config_dpath, fname)
config_mod = Config.fromfile(config_fpath)
return config_mod
def _get_detector_cfg(fname):
"""Grab configs necessary to create a detector.
These are deep copied to allow for safe modification of parameters without
influencing other tests.
"""
config = _get_config_module(fname)
model = copy.deepcopy(config.model)
return model
@pytest.mark.parametrize('loss_bbox', [
dict(type='L1Loss', loss_weight=1.0),
dict(type='GHMR', mu=0.02, bins=10, momentum=0.7, loss_weight=10.0),
dict(type='IoULoss', loss_weight=1.0),
dict(type='BoundedIoULoss', loss_weight=1.0),
dict(type='GIoULoss', loss_weight=1.0),
dict(type='DIoULoss', loss_weight=1.0),
dict(type='CIoULoss', loss_weight=1.0),
dict(type='MSELoss', loss_weight=1.0),
dict(type='SmoothL1Loss', loss_weight=1.0),
dict(type='BalancedL1Loss', loss_weight=1.0)
])
def test_bbox_loss_compatibility(loss_bbox):
"""Test loss_bbox compatibility.
Using Faster R-CNN as a sample, modifying the loss function in the config
file to verify the compatibility of Loss APIS
"""
# Faster R-CNN config dict
config_path = '_base_/models/faster_rcnn_r50_fpn.py'
cfg_model = _get_detector_cfg(config_path)
input_shape = (1, 3, 256, 256)
mm_inputs = _demo_mm_inputs(input_shape, num_items=[10])
imgs = mm_inputs.pop('imgs')
img_metas = mm_inputs.pop('img_metas')
if 'IoULoss' in loss_bbox['type']:
cfg_model.roi_head.bbox_head.reg_decoded_bbox = True
cfg_model.roi_head.bbox_head.loss_bbox = loss_bbox
from mmdet.models import build_detector
detector = build_detector(cfg_model)
loss = detector.forward(imgs, img_metas, return_loss=True, **mm_inputs)
assert isinstance(loss, dict)
loss, _ = detector._parse_losses(loss)
assert float(loss.item()) > 0
@pytest.mark.parametrize('loss_cls', [
dict(type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
dict(
type='GHMC', bins=30, momentum=0.75, use_sigmoid=True, loss_weight=1.0)
])
def test_cls_loss_compatibility(loss_cls):
"""Test loss_cls compatibility.
Using Faster R-CNN as a sample, modifying the loss function in the config
file to verify the compatibility of Loss APIS
"""
# Faster R-CNN config dict
config_path = '_base_/models/faster_rcnn_r50_fpn.py'
cfg_model = _get_detector_cfg(config_path)
input_shape = (1, 3, 256, 256)
mm_inputs = _demo_mm_inputs(input_shape, num_items=[10])
imgs = mm_inputs.pop('imgs')
img_metas = mm_inputs.pop('img_metas')
# verify class loss function compatibility
# for loss_cls in loss_clses:
cfg_model.roi_head.bbox_head.loss_cls = loss_cls
from mmdet.models import build_detector
detector = build_detector(cfg_model)
loss = detector.forward(imgs, img_metas, return_loss=True, **mm_inputs)
assert isinstance(loss, dict)
loss, _ = detector._parse_losses(loss)
assert float(loss.item()) > 0
def _demo_mm_inputs(input_shape=(1, 3, 300, 300),
num_items=None, num_classes=10,
with_semantic=False): # yapf: disable
"""Create a superset of inputs needed to run test or train batches.
Args:
input_shape (tuple):
input batch dimensions
num_items (None | List[int]):
specifies the number of boxes in each batch item
num_classes (int):
number of different labels a box might have
"""
from mmdet.core import BitmapMasks
(N, C, H, W) = input_shape
rng = np.random.RandomState(0)
imgs = rng.rand(*input_shape)
img_metas = [{
'img_shape': (H, W, C),
'ori_shape': (H, W, C),
'pad_shape': (H, W, C),
'filename': '<demo>.png',
'scale_factor': np.array([1.1, 1.2, 1.1, 1.2]),
'flip': False,
'flip_direction': None,
} for _ in range(N)]
gt_bboxes = []
gt_labels = []
gt_masks = []
for batch_idx in range(N):
if num_items is None:
num_boxes = rng.randint(1, 10)
else:
num_boxes = num_items[batch_idx]
cx, cy, bw, bh = rng.rand(num_boxes, 4).T
tl_x = ((cx * W) - (W * bw / 2)).clip(0, W)
tl_y = ((cy * H) - (H * bh / 2)).clip(0, H)
br_x = ((cx * W) + (W * bw / 2)).clip(0, W)
br_y = ((cy * H) + (H * bh / 2)).clip(0, H)
boxes = np.vstack([tl_x, tl_y, br_x, br_y]).T
class_idxs = rng.randint(1, num_classes, size=num_boxes)
gt_bboxes.append(torch.FloatTensor(boxes))
gt_labels.append(torch.LongTensor(class_idxs))
mask = np.random.randint(0, 2, (len(boxes), H, W), dtype=np.uint8)
gt_masks.append(BitmapMasks(mask, H, W))
mm_inputs = {
'imgs': torch.FloatTensor(imgs).requires_grad_(True),
'img_metas': img_metas,
'gt_bboxes': gt_bboxes,
'gt_labels': gt_labels,
'gt_bboxes_ignore': None,
'gt_masks': gt_masks,
}
if with_semantic:
# assume gt_semantic_seg using scale 1/8 of the img
gt_semantic_seg = np.random.randint(
0, num_classes, (1, 1, H // 8, W // 8), dtype=np.uint8)
mm_inputs.update(
{'gt_semantic_seg': torch.ByteTensor(gt_semantic_seg)})
return mm_inputs
| 6,361 | 30.49505 | 79 |
py
|
DSLA-DSLA
|
DSLA-DSLA/tests/test_models/test_backbones/test_pvt.py
|
import pytest
import torch
from mmdet.models.backbones.pvt import (PVTEncoderLayer,
PyramidVisionTransformer,
PyramidVisionTransformerV2)
def test_pvt_block():
# test PVT structure and forward
block = PVTEncoderLayer(
embed_dims=64, num_heads=4, feedforward_channels=256)
assert block.ffn.embed_dims == 64
assert block.attn.num_heads == 4
assert block.ffn.feedforward_channels == 256
x = torch.randn(1, 56 * 56, 64)
x_out = block(x, (56, 56))
assert x_out.shape == torch.Size([1, 56 * 56, 64])
def test_pvt():
"""Test PVT backbone."""
with pytest.raises(TypeError):
# Pretrained arg must be str or None.
PyramidVisionTransformer(pretrained=123)
# test pretrained image size
with pytest.raises(AssertionError):
PyramidVisionTransformer(pretrain_img_size=(224, 224, 224))
# Test absolute position embedding
temp = torch.randn((1, 3, 224, 224))
model = PyramidVisionTransformer(
pretrain_img_size=224, use_abs_pos_embed=True)
model.init_weights()
model(temp)
# Test normal inference
temp = torch.randn((1, 3, 32, 32))
model = PyramidVisionTransformer()
outs = model(temp)
assert outs[0].shape == (1, 64, 8, 8)
assert outs[1].shape == (1, 128, 4, 4)
assert outs[2].shape == (1, 320, 2, 2)
assert outs[3].shape == (1, 512, 1, 1)
# Test abnormal inference size
temp = torch.randn((1, 3, 33, 33))
model = PyramidVisionTransformer()
outs = model(temp)
assert outs[0].shape == (1, 64, 8, 8)
assert outs[1].shape == (1, 128, 4, 4)
assert outs[2].shape == (1, 320, 2, 2)
assert outs[3].shape == (1, 512, 1, 1)
# Test abnormal inference size
temp = torch.randn((1, 3, 112, 137))
model = PyramidVisionTransformer()
outs = model(temp)
assert outs[0].shape == (1, 64, 28, 34)
assert outs[1].shape == (1, 128, 14, 17)
assert outs[2].shape == (1, 320, 7, 8)
assert outs[3].shape == (1, 512, 3, 4)
def test_pvtv2():
"""Test PVTv2 backbone."""
with pytest.raises(TypeError):
# Pretrained arg must be str or None.
PyramidVisionTransformerV2(pretrained=123)
# test pretrained image size
with pytest.raises(AssertionError):
PyramidVisionTransformerV2(pretrain_img_size=(224, 224, 224))
# Test normal inference
temp = torch.randn((1, 3, 32, 32))
model = PyramidVisionTransformerV2()
outs = model(temp)
assert outs[0].shape == (1, 64, 8, 8)
assert outs[1].shape == (1, 128, 4, 4)
assert outs[2].shape == (1, 320, 2, 2)
assert outs[3].shape == (1, 512, 1, 1)
# Test abnormal inference size
temp = torch.randn((1, 3, 31, 31))
model = PyramidVisionTransformerV2()
outs = model(temp)
assert outs[0].shape == (1, 64, 8, 8)
assert outs[1].shape == (1, 128, 4, 4)
assert outs[2].shape == (1, 320, 2, 2)
assert outs[3].shape == (1, 512, 1, 1)
# Test abnormal inference size
temp = torch.randn((1, 3, 112, 137))
model = PyramidVisionTransformerV2()
outs = model(temp)
assert outs[0].shape == (1, 64, 28, 35)
assert outs[1].shape == (1, 128, 14, 18)
assert outs[2].shape == (1, 320, 7, 9)
assert outs[3].shape == (1, 512, 4, 5)
| 3,332 | 31.048077 | 69 |
py
|
DSLA-DSLA
|
DSLA-DSLA/tests/test_models/test_backbones/test_hourglass.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmdet.models.backbones.hourglass import HourglassNet
def test_hourglass_backbone():
with pytest.raises(AssertionError):
# HourglassNet's num_stacks should larger than 0
HourglassNet(num_stacks=0)
with pytest.raises(AssertionError):
# len(stage_channels) should equal len(stage_blocks)
HourglassNet(
stage_channels=[256, 256, 384, 384, 384],
stage_blocks=[2, 2, 2, 2, 2, 4])
with pytest.raises(AssertionError):
# len(stage_channels) should lagrer than downsample_times
HourglassNet(
downsample_times=5,
stage_channels=[256, 256, 384, 384, 384],
stage_blocks=[2, 2, 2, 2, 2])
# Test HourglassNet-52
model = HourglassNet(
num_stacks=1,
stage_channels=(64, 64, 96, 96, 96, 128),
feat_channel=64)
model.train()
imgs = torch.randn(1, 3, 256, 256)
feat = model(imgs)
assert len(feat) == 1
assert feat[0].shape == torch.Size([1, 64, 64, 64])
# Test HourglassNet-104
model = HourglassNet(
num_stacks=2,
stage_channels=(64, 64, 96, 96, 96, 128),
feat_channel=64)
model.train()
imgs = torch.randn(1, 3, 256, 256)
feat = model(imgs)
assert len(feat) == 2
assert feat[0].shape == torch.Size([1, 64, 64, 64])
assert feat[1].shape == torch.Size([1, 64, 64, 64])
| 1,464 | 28.3 | 65 |
py
|
DSLA-DSLA
|
DSLA-DSLA/tests/test_models/test_backbones/test_res2net.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmdet.models.backbones import Res2Net
from mmdet.models.backbones.res2net import Bottle2neck
from .utils import is_block
def test_res2net_bottle2neck():
with pytest.raises(AssertionError):
# Style must be in ['pytorch', 'caffe']
Bottle2neck(64, 64, base_width=26, scales=4, style='tensorflow')
with pytest.raises(AssertionError):
# Scale must be larger than 1
Bottle2neck(64, 64, base_width=26, scales=1, style='pytorch')
# Test Res2Net Bottle2neck structure
block = Bottle2neck(
64, 64, base_width=26, stride=2, scales=4, style='pytorch')
assert block.scales == 4
# Test Res2Net Bottle2neck with DCN
dcn = dict(type='DCN', deform_groups=1, fallback_on_stride=False)
with pytest.raises(AssertionError):
# conv_cfg must be None if dcn is not None
Bottle2neck(
64,
64,
base_width=26,
scales=4,
dcn=dcn,
conv_cfg=dict(type='Conv'))
Bottle2neck(64, 64, dcn=dcn)
# Test Res2Net Bottle2neck forward
block = Bottle2neck(64, 16, base_width=26, scales=4)
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size([1, 64, 56, 56])
def test_res2net_backbone():
with pytest.raises(KeyError):
# Res2Net depth should be in [50, 101, 152]
Res2Net(depth=18)
# Test Res2Net with scales 4, base_width 26
model = Res2Net(depth=50, scales=4, base_width=26)
for m in model.modules():
if is_block(m):
assert m.scales == 4
model.train()
imgs = torch.randn(1, 3, 32, 32)
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size([1, 256, 8, 8])
assert feat[1].shape == torch.Size([1, 512, 4, 4])
assert feat[2].shape == torch.Size([1, 1024, 2, 2])
assert feat[3].shape == torch.Size([1, 2048, 1, 1])
| 1,976 | 30.380952 | 72 |
py
|
DSLA-DSLA
|
DSLA-DSLA/tests/test_models/test_backbones/test_swin.py
|
import pytest
import torch
from mmdet.models.backbones.swin import SwinBlock, SwinTransformer
def test_swin_block():
# test SwinBlock structure and forward
block = SwinBlock(embed_dims=64, num_heads=4, feedforward_channels=256)
assert block.ffn.embed_dims == 64
assert block.attn.w_msa.num_heads == 4
assert block.ffn.feedforward_channels == 256
x = torch.randn(1, 56 * 56, 64)
x_out = block(x, (56, 56))
assert x_out.shape == torch.Size([1, 56 * 56, 64])
# Test BasicBlock with checkpoint forward
block = SwinBlock(
embed_dims=64, num_heads=4, feedforward_channels=256, with_cp=True)
assert block.with_cp
x = torch.randn(1, 56 * 56, 64)
x_out = block(x, (56, 56))
assert x_out.shape == torch.Size([1, 56 * 56, 64])
def test_swin_transformer():
"""Test Swin Transformer backbone."""
with pytest.raises(TypeError):
# Pretrained arg must be str or None.
SwinTransformer(pretrained=123)
with pytest.raises(AssertionError):
# Because swin uses non-overlapping patch embed, so the stride of patch
# embed must be equal to patch size.
SwinTransformer(strides=(2, 2, 2, 2), patch_size=4)
# test pretrained image size
with pytest.raises(AssertionError):
SwinTransformer(pretrain_img_size=(224, 224, 224))
# Test absolute position embedding
temp = torch.randn((1, 3, 224, 224))
model = SwinTransformer(pretrain_img_size=224, use_abs_pos_embed=True)
model.init_weights()
model(temp)
# Test patch norm
model = SwinTransformer(patch_norm=False)
model(temp)
# Test normal inference
temp = torch.randn((1, 3, 32, 32))
model = SwinTransformer()
outs = model(temp)
assert outs[0].shape == (1, 96, 8, 8)
assert outs[1].shape == (1, 192, 4, 4)
assert outs[2].shape == (1, 384, 2, 2)
assert outs[3].shape == (1, 768, 1, 1)
# Test abnormal inference size
temp = torch.randn((1, 3, 31, 31))
model = SwinTransformer()
outs = model(temp)
assert outs[0].shape == (1, 96, 8, 8)
assert outs[1].shape == (1, 192, 4, 4)
assert outs[2].shape == (1, 384, 2, 2)
assert outs[3].shape == (1, 768, 1, 1)
# Test abnormal inference size
temp = torch.randn((1, 3, 112, 137))
model = SwinTransformer()
outs = model(temp)
assert outs[0].shape == (1, 96, 28, 35)
assert outs[1].shape == (1, 192, 14, 18)
assert outs[2].shape == (1, 384, 7, 9)
assert outs[3].shape == (1, 768, 4, 5)
model = SwinTransformer(frozen_stages=4)
model.train()
for p in model.parameters():
assert not p.requires_grad
| 2,648 | 30.915663 | 79 |
py
|
DSLA-DSLA
|
DSLA-DSLA/tests/test_models/test_backbones/test_resnet.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmcv import assert_params_all_zeros
from mmcv.ops import DeformConv2dPack
from torch.nn.modules import AvgPool2d, GroupNorm
from torch.nn.modules.batchnorm import _BatchNorm
from mmdet.models.backbones import ResNet, ResNetV1d
from mmdet.models.backbones.resnet import BasicBlock, Bottleneck
from mmdet.models.utils import ResLayer, SimplifiedBasicBlock
from .utils import check_norm_state, is_block, is_norm
def test_resnet_basic_block():
with pytest.raises(AssertionError):
# Not implemented yet.
dcn = dict(type='DCN', deform_groups=1, fallback_on_stride=False)
BasicBlock(64, 64, dcn=dcn)
with pytest.raises(AssertionError):
# Not implemented yet.
plugins = [
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16),
position='after_conv3')
]
BasicBlock(64, 64, plugins=plugins)
with pytest.raises(AssertionError):
# Not implemented yet
plugins = [
dict(
cfg=dict(
type='GeneralizedAttention',
spatial_range=-1,
num_heads=8,
attention_type='0010',
kv_stride=2),
position='after_conv2')
]
BasicBlock(64, 64, plugins=plugins)
# test BasicBlock structure and forward
block = BasicBlock(64, 64)
assert block.conv1.in_channels == 64
assert block.conv1.out_channels == 64
assert block.conv1.kernel_size == (3, 3)
assert block.conv2.in_channels == 64
assert block.conv2.out_channels == 64
assert block.conv2.kernel_size == (3, 3)
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size([1, 64, 56, 56])
# Test BasicBlock with checkpoint forward
block = BasicBlock(64, 64, with_cp=True)
assert block.with_cp
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size([1, 64, 56, 56])
def test_resnet_bottleneck():
with pytest.raises(AssertionError):
# Style must be in ['pytorch', 'caffe']
Bottleneck(64, 64, style='tensorflow')
with pytest.raises(AssertionError):
# Allowed positions are 'after_conv1', 'after_conv2', 'after_conv3'
plugins = [
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16),
position='after_conv4')
]
Bottleneck(64, 16, plugins=plugins)
with pytest.raises(AssertionError):
# Need to specify different postfix to avoid duplicate plugin name
plugins = [
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16),
position='after_conv3'),
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16),
position='after_conv3')
]
Bottleneck(64, 16, plugins=plugins)
with pytest.raises(KeyError):
# Plugin type is not supported
plugins = [dict(cfg=dict(type='WrongPlugin'), position='after_conv3')]
Bottleneck(64, 16, plugins=plugins)
# Test Bottleneck with checkpoint forward
block = Bottleneck(64, 16, with_cp=True)
assert block.with_cp
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size([1, 64, 56, 56])
# Test Bottleneck style
block = Bottleneck(64, 64, stride=2, style='pytorch')
assert block.conv1.stride == (1, 1)
assert block.conv2.stride == (2, 2)
block = Bottleneck(64, 64, stride=2, style='caffe')
assert block.conv1.stride == (2, 2)
assert block.conv2.stride == (1, 1)
# Test Bottleneck DCN
dcn = dict(type='DCN', deform_groups=1, fallback_on_stride=False)
with pytest.raises(AssertionError):
Bottleneck(64, 64, dcn=dcn, conv_cfg=dict(type='Conv'))
block = Bottleneck(64, 64, dcn=dcn)
assert isinstance(block.conv2, DeformConv2dPack)
# Test Bottleneck forward
block = Bottleneck(64, 16)
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size([1, 64, 56, 56])
# Test Bottleneck with 1 ContextBlock after conv3
plugins = [
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16),
position='after_conv3')
]
block = Bottleneck(64, 16, plugins=plugins)
assert block.context_block.in_channels == 64
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size([1, 64, 56, 56])
# Test Bottleneck with 1 GeneralizedAttention after conv2
plugins = [
dict(
cfg=dict(
type='GeneralizedAttention',
spatial_range=-1,
num_heads=8,
attention_type='0010',
kv_stride=2),
position='after_conv2')
]
block = Bottleneck(64, 16, plugins=plugins)
assert block.gen_attention_block.in_channels == 16
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size([1, 64, 56, 56])
# Test Bottleneck with 1 GeneralizedAttention after conv2, 1 NonLocal2D
# after conv2, 1 ContextBlock after conv3
plugins = [
dict(
cfg=dict(
type='GeneralizedAttention',
spatial_range=-1,
num_heads=8,
attention_type='0010',
kv_stride=2),
position='after_conv2'),
dict(cfg=dict(type='NonLocal2d'), position='after_conv2'),
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16),
position='after_conv3')
]
block = Bottleneck(64, 16, plugins=plugins)
assert block.gen_attention_block.in_channels == 16
assert block.nonlocal_block.in_channels == 16
assert block.context_block.in_channels == 64
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size([1, 64, 56, 56])
# Test Bottleneck with 1 ContextBlock after conv2, 2 ContextBlock after
# conv3
plugins = [
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16, postfix=1),
position='after_conv2'),
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16, postfix=2),
position='after_conv3'),
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16, postfix=3),
position='after_conv3')
]
block = Bottleneck(64, 16, plugins=plugins)
assert block.context_block1.in_channels == 16
assert block.context_block2.in_channels == 64
assert block.context_block3.in_channels == 64
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size([1, 64, 56, 56])
def test_simplied_basic_block():
with pytest.raises(AssertionError):
# Not implemented yet.
dcn = dict(type='DCN', deform_groups=1, fallback_on_stride=False)
SimplifiedBasicBlock(64, 64, dcn=dcn)
with pytest.raises(AssertionError):
# Not implemented yet.
plugins = [
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16),
position='after_conv3')
]
SimplifiedBasicBlock(64, 64, plugins=plugins)
with pytest.raises(AssertionError):
# Not implemented yet
plugins = [
dict(
cfg=dict(
type='GeneralizedAttention',
spatial_range=-1,
num_heads=8,
attention_type='0010',
kv_stride=2),
position='after_conv2')
]
SimplifiedBasicBlock(64, 64, plugins=plugins)
with pytest.raises(AssertionError):
# Not implemented yet
SimplifiedBasicBlock(64, 64, with_cp=True)
# test SimplifiedBasicBlock structure and forward
block = SimplifiedBasicBlock(64, 64)
assert block.conv1.in_channels == 64
assert block.conv1.out_channels == 64
assert block.conv1.kernel_size == (3, 3)
assert block.conv2.in_channels == 64
assert block.conv2.out_channels == 64
assert block.conv2.kernel_size == (3, 3)
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size([1, 64, 56, 56])
# test SimplifiedBasicBlock without norm
block = SimplifiedBasicBlock(64, 64, norm_cfg=None)
assert block.norm1 is None
assert block.norm2 is None
x_out = block(x)
assert x_out.shape == torch.Size([1, 64, 56, 56])
def test_resnet_res_layer():
# Test ResLayer of 3 Bottleneck w\o downsample
layer = ResLayer(Bottleneck, 64, 16, 3)
assert len(layer) == 3
assert layer[0].conv1.in_channels == 64
assert layer[0].conv1.out_channels == 16
for i in range(1, len(layer)):
assert layer[i].conv1.in_channels == 64
assert layer[i].conv1.out_channels == 16
for i in range(len(layer)):
assert layer[i].downsample is None
x = torch.randn(1, 64, 56, 56)
x_out = layer(x)
assert x_out.shape == torch.Size([1, 64, 56, 56])
# Test ResLayer of 3 Bottleneck with downsample
layer = ResLayer(Bottleneck, 64, 64, 3)
assert layer[0].downsample[0].out_channels == 256
for i in range(1, len(layer)):
assert layer[i].downsample is None
x = torch.randn(1, 64, 56, 56)
x_out = layer(x)
assert x_out.shape == torch.Size([1, 256, 56, 56])
# Test ResLayer of 3 Bottleneck with stride=2
layer = ResLayer(Bottleneck, 64, 64, 3, stride=2)
assert layer[0].downsample[0].out_channels == 256
assert layer[0].downsample[0].stride == (2, 2)
for i in range(1, len(layer)):
assert layer[i].downsample is None
x = torch.randn(1, 64, 56, 56)
x_out = layer(x)
assert x_out.shape == torch.Size([1, 256, 28, 28])
# Test ResLayer of 3 Bottleneck with stride=2 and average downsample
layer = ResLayer(Bottleneck, 64, 64, 3, stride=2, avg_down=True)
assert isinstance(layer[0].downsample[0], AvgPool2d)
assert layer[0].downsample[1].out_channels == 256
assert layer[0].downsample[1].stride == (1, 1)
for i in range(1, len(layer)):
assert layer[i].downsample is None
x = torch.randn(1, 64, 56, 56)
x_out = layer(x)
assert x_out.shape == torch.Size([1, 256, 28, 28])
# Test ResLayer of 3 BasicBlock with stride=2 and downsample_first=False
layer = ResLayer(BasicBlock, 64, 64, 3, stride=2, downsample_first=False)
assert layer[2].downsample[0].out_channels == 64
assert layer[2].downsample[0].stride == (2, 2)
for i in range(len(layer) - 1):
assert layer[i].downsample is None
x = torch.randn(1, 64, 56, 56)
x_out = layer(x)
assert x_out.shape == torch.Size([1, 64, 28, 28])
def test_resnest_stem():
# Test default stem_channels
model = ResNet(50)
assert model.stem_channels == 64
assert model.conv1.out_channels == 64
assert model.norm1.num_features == 64
# Test default stem_channels, with base_channels=3
model = ResNet(50, base_channels=3)
assert model.stem_channels == 3
assert model.conv1.out_channels == 3
assert model.norm1.num_features == 3
assert model.layer1[0].conv1.in_channels == 3
# Test stem_channels=3
model = ResNet(50, stem_channels=3)
assert model.stem_channels == 3
assert model.conv1.out_channels == 3
assert model.norm1.num_features == 3
assert model.layer1[0].conv1.in_channels == 3
# Test stem_channels=3, with base_channels=2
model = ResNet(50, stem_channels=3, base_channels=2)
assert model.stem_channels == 3
assert model.conv1.out_channels == 3
assert model.norm1.num_features == 3
assert model.layer1[0].conv1.in_channels == 3
# Test V1d stem_channels
model = ResNetV1d(depth=50, stem_channels=6)
model.train()
assert model.stem[0].out_channels == 3
assert model.stem[1].num_features == 3
assert model.stem[3].out_channels == 3
assert model.stem[4].num_features == 3
assert model.stem[6].out_channels == 6
assert model.stem[7].num_features == 6
assert model.layer1[0].conv1.in_channels == 6
def test_resnet_backbone():
"""Test resnet backbone."""
with pytest.raises(KeyError):
# ResNet depth should be in [18, 34, 50, 101, 152]
ResNet(20)
with pytest.raises(AssertionError):
# In ResNet: 1 <= num_stages <= 4
ResNet(50, num_stages=0)
with pytest.raises(AssertionError):
# len(stage_with_dcn) == num_stages
dcn = dict(type='DCN', deform_groups=1, fallback_on_stride=False)
ResNet(50, dcn=dcn, stage_with_dcn=(True, ))
with pytest.raises(AssertionError):
# len(stage_with_plugin) == num_stages
plugins = [
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16),
stages=(False, True, True),
position='after_conv3')
]
ResNet(50, plugins=plugins)
with pytest.raises(AssertionError):
# In ResNet: 1 <= num_stages <= 4
ResNet(50, num_stages=5)
with pytest.raises(AssertionError):
# len(strides) == len(dilations) == num_stages
ResNet(50, strides=(1, ), dilations=(1, 1), num_stages=3)
with pytest.raises(TypeError):
# pretrained must be a string path
model = ResNet(50, pretrained=0)
with pytest.raises(AssertionError):
# Style must be in ['pytorch', 'caffe']
ResNet(50, style='tensorflow')
# Test ResNet50 norm_eval=True
model = ResNet(50, norm_eval=True, base_channels=1)
model.train()
assert check_norm_state(model.modules(), False)
# Test ResNet50 with torchvision pretrained weight
model = ResNet(
depth=50, norm_eval=True, pretrained='torchvision://resnet50')
model.train()
assert check_norm_state(model.modules(), False)
# Test ResNet50 with first stage frozen
frozen_stages = 1
model = ResNet(50, frozen_stages=frozen_stages, base_channels=1)
model.train()
assert model.norm1.training is False
for layer in [model.conv1, model.norm1]:
for param in layer.parameters():
assert param.requires_grad is False
for i in range(1, frozen_stages + 1):
layer = getattr(model, f'layer{i}')
for mod in layer.modules():
if isinstance(mod, _BatchNorm):
assert mod.training is False
for param in layer.parameters():
assert param.requires_grad is False
# Test ResNet50V1d with first stage frozen
model = ResNetV1d(depth=50, frozen_stages=frozen_stages, base_channels=2)
assert len(model.stem) == 9
model.train()
assert check_norm_state(model.stem, False)
for param in model.stem.parameters():
assert param.requires_grad is False
for i in range(1, frozen_stages + 1):
layer = getattr(model, f'layer{i}')
for mod in layer.modules():
if isinstance(mod, _BatchNorm):
assert mod.training is False
for param in layer.parameters():
assert param.requires_grad is False
# Test ResNet18 forward
model = ResNet(18)
model.train()
imgs = torch.randn(1, 3, 32, 32)
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size([1, 64, 8, 8])
assert feat[1].shape == torch.Size([1, 128, 4, 4])
assert feat[2].shape == torch.Size([1, 256, 2, 2])
assert feat[3].shape == torch.Size([1, 512, 1, 1])
# Test ResNet18 with checkpoint forward
model = ResNet(18, with_cp=True)
for m in model.modules():
if is_block(m):
assert m.with_cp
# Test ResNet50 with BatchNorm forward
model = ResNet(50, base_channels=1)
for m in model.modules():
if is_norm(m):
assert isinstance(m, _BatchNorm)
model.train()
imgs = torch.randn(1, 3, 32, 32)
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size([1, 4, 8, 8])
assert feat[1].shape == torch.Size([1, 8, 4, 4])
assert feat[2].shape == torch.Size([1, 16, 2, 2])
assert feat[3].shape == torch.Size([1, 32, 1, 1])
# Test ResNet50 with layers 1, 2, 3 out forward
model = ResNet(50, out_indices=(0, 1, 2), base_channels=1)
model.train()
imgs = torch.randn(1, 3, 32, 32)
feat = model(imgs)
assert len(feat) == 3
assert feat[0].shape == torch.Size([1, 4, 8, 8])
assert feat[1].shape == torch.Size([1, 8, 4, 4])
assert feat[2].shape == torch.Size([1, 16, 2, 2])
# Test ResNet50 with checkpoint forward
model = ResNet(50, with_cp=True, base_channels=1)
for m in model.modules():
if is_block(m):
assert m.with_cp
model.train()
imgs = torch.randn(1, 3, 32, 32)
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size([1, 4, 8, 8])
assert feat[1].shape == torch.Size([1, 8, 4, 4])
assert feat[2].shape == torch.Size([1, 16, 2, 2])
assert feat[3].shape == torch.Size([1, 32, 1, 1])
# Test ResNet50 with GroupNorm forward
model = ResNet(
50,
base_channels=4,
norm_cfg=dict(type='GN', num_groups=2, requires_grad=True))
for m in model.modules():
if is_norm(m):
assert isinstance(m, GroupNorm)
model.train()
imgs = torch.randn(1, 3, 32, 32)
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size([1, 16, 8, 8])
assert feat[1].shape == torch.Size([1, 32, 4, 4])
assert feat[2].shape == torch.Size([1, 64, 2, 2])
assert feat[3].shape == torch.Size([1, 128, 1, 1])
# Test ResNet50 with 1 GeneralizedAttention after conv2, 1 NonLocal2D
# after conv2, 1 ContextBlock after conv3 in layers 2, 3, 4
plugins = [
dict(
cfg=dict(
type='GeneralizedAttention',
spatial_range=-1,
num_heads=8,
attention_type='0010',
kv_stride=2),
stages=(False, True, True, True),
position='after_conv2'),
dict(cfg=dict(type='NonLocal2d'), position='after_conv2'),
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16),
stages=(False, True, True, False),
position='after_conv3')
]
model = ResNet(50, plugins=plugins, base_channels=8)
for m in model.layer1.modules():
if is_block(m):
assert not hasattr(m, 'context_block')
assert not hasattr(m, 'gen_attention_block')
assert m.nonlocal_block.in_channels == 8
for m in model.layer2.modules():
if is_block(m):
assert m.nonlocal_block.in_channels == 16
assert m.gen_attention_block.in_channels == 16
assert m.context_block.in_channels == 64
for m in model.layer3.modules():
if is_block(m):
assert m.nonlocal_block.in_channels == 32
assert m.gen_attention_block.in_channels == 32
assert m.context_block.in_channels == 128
for m in model.layer4.modules():
if is_block(m):
assert m.nonlocal_block.in_channels == 64
assert m.gen_attention_block.in_channels == 64
assert not hasattr(m, 'context_block')
model.train()
imgs = torch.randn(1, 3, 32, 32)
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size([1, 32, 8, 8])
assert feat[1].shape == torch.Size([1, 64, 4, 4])
assert feat[2].shape == torch.Size([1, 128, 2, 2])
assert feat[3].shape == torch.Size([1, 256, 1, 1])
# Test ResNet50 with 1 ContextBlock after conv2, 1 ContextBlock after
# conv3 in layers 2, 3, 4
plugins = [
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16, postfix=1),
stages=(False, True, True, False),
position='after_conv3'),
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16, postfix=2),
stages=(False, True, True, False),
position='after_conv3')
]
model = ResNet(50, plugins=plugins, base_channels=8)
for m in model.layer1.modules():
if is_block(m):
assert not hasattr(m, 'context_block')
assert not hasattr(m, 'context_block1')
assert not hasattr(m, 'context_block2')
for m in model.layer2.modules():
if is_block(m):
assert not hasattr(m, 'context_block')
assert m.context_block1.in_channels == 64
assert m.context_block2.in_channels == 64
for m in model.layer3.modules():
if is_block(m):
assert not hasattr(m, 'context_block')
assert m.context_block1.in_channels == 128
assert m.context_block2.in_channels == 128
for m in model.layer4.modules():
if is_block(m):
assert not hasattr(m, 'context_block')
assert not hasattr(m, 'context_block1')
assert not hasattr(m, 'context_block2')
model.train()
imgs = torch.randn(1, 3, 32, 32)
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size([1, 32, 8, 8])
assert feat[1].shape == torch.Size([1, 64, 4, 4])
assert feat[2].shape == torch.Size([1, 128, 2, 2])
assert feat[3].shape == torch.Size([1, 256, 1, 1])
# Test ResNet50 zero initialization of residual
model = ResNet(50, zero_init_residual=True, base_channels=1)
model.init_weights()
for m in model.modules():
if isinstance(m, Bottleneck):
assert assert_params_all_zeros(m.norm3)
elif isinstance(m, BasicBlock):
assert assert_params_all_zeros(m.norm2)
model.train()
imgs = torch.randn(1, 3, 32, 32)
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size([1, 4, 8, 8])
assert feat[1].shape == torch.Size([1, 8, 4, 4])
assert feat[2].shape == torch.Size([1, 16, 2, 2])
assert feat[3].shape == torch.Size([1, 32, 1, 1])
# Test ResNetV1d forward
model = ResNetV1d(depth=50, base_channels=2)
model.train()
imgs = torch.randn(1, 3, 32, 32)
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size([1, 8, 8, 8])
assert feat[1].shape == torch.Size([1, 16, 4, 4])
assert feat[2].shape == torch.Size([1, 32, 2, 2])
assert feat[3].shape == torch.Size([1, 64, 1, 1])
| 22,380 | 34.35703 | 78 |
py
|
DSLA-DSLA
|
DSLA-DSLA/tests/test_models/test_backbones/utils.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from torch.nn.modules import GroupNorm
from torch.nn.modules.batchnorm import _BatchNorm
from mmdet.models.backbones.res2net import Bottle2neck
from mmdet.models.backbones.resnet import BasicBlock, Bottleneck
from mmdet.models.backbones.resnext import Bottleneck as BottleneckX
from mmdet.models.utils import SimplifiedBasicBlock
def is_block(modules):
"""Check if is ResNet building block."""
if isinstance(modules, (BasicBlock, Bottleneck, BottleneckX, Bottle2neck,
SimplifiedBasicBlock)):
return True
return False
def is_norm(modules):
"""Check if is one of the norms."""
if isinstance(modules, (GroupNorm, _BatchNorm)):
return True
return False
def check_norm_state(modules, train_state):
"""Check if norm layer is in correct train state."""
for mod in modules:
if isinstance(mod, _BatchNorm):
if mod.training != train_state:
return False
return True
| 1,026 | 30.121212 | 77 |
py
|
DSLA-DSLA
|
DSLA-DSLA/tests/test_models/test_backbones/test_mobilenet_v2.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from torch.nn.modules import GroupNorm
from torch.nn.modules.batchnorm import _BatchNorm
from mmdet.models.backbones.mobilenet_v2 import MobileNetV2
from .utils import check_norm_state, is_block, is_norm
def test_mobilenetv2_backbone():
with pytest.raises(ValueError):
# frozen_stages must in range(-1, 8)
MobileNetV2(frozen_stages=8)
with pytest.raises(ValueError):
# out_indices in range(-1, 8)
MobileNetV2(out_indices=[8])
# Test MobileNetV2 with first stage frozen
frozen_stages = 1
model = MobileNetV2(frozen_stages=frozen_stages)
model.train()
for mod in model.conv1.modules():
for param in mod.parameters():
assert param.requires_grad is False
for i in range(1, frozen_stages + 1):
layer = getattr(model, f'layer{i}')
for mod in layer.modules():
if isinstance(mod, _BatchNorm):
assert mod.training is False
for param in layer.parameters():
assert param.requires_grad is False
# Test MobileNetV2 with norm_eval=True
model = MobileNetV2(norm_eval=True)
model.train()
assert check_norm_state(model.modules(), False)
# Test MobileNetV2 forward with widen_factor=1.0
model = MobileNetV2(widen_factor=1.0, out_indices=range(0, 8))
model.train()
assert check_norm_state(model.modules(), True)
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert len(feat) == 8
assert feat[0].shape == torch.Size((1, 16, 112, 112))
assert feat[1].shape == torch.Size((1, 24, 56, 56))
assert feat[2].shape == torch.Size((1, 32, 28, 28))
assert feat[3].shape == torch.Size((1, 64, 14, 14))
assert feat[4].shape == torch.Size((1, 96, 14, 14))
assert feat[5].shape == torch.Size((1, 160, 7, 7))
assert feat[6].shape == torch.Size((1, 320, 7, 7))
assert feat[7].shape == torch.Size((1, 1280, 7, 7))
# Test MobileNetV2 forward with widen_factor=0.5
model = MobileNetV2(widen_factor=0.5, out_indices=range(0, 7))
model.train()
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert len(feat) == 7
assert feat[0].shape == torch.Size((1, 8, 112, 112))
assert feat[1].shape == torch.Size((1, 16, 56, 56))
assert feat[2].shape == torch.Size((1, 16, 28, 28))
assert feat[3].shape == torch.Size((1, 32, 14, 14))
assert feat[4].shape == torch.Size((1, 48, 14, 14))
assert feat[5].shape == torch.Size((1, 80, 7, 7))
assert feat[6].shape == torch.Size((1, 160, 7, 7))
# Test MobileNetV2 forward with widen_factor=2.0
model = MobileNetV2(widen_factor=2.0, out_indices=range(0, 8))
model.train()
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert feat[0].shape == torch.Size((1, 32, 112, 112))
assert feat[1].shape == torch.Size((1, 48, 56, 56))
assert feat[2].shape == torch.Size((1, 64, 28, 28))
assert feat[3].shape == torch.Size((1, 128, 14, 14))
assert feat[4].shape == torch.Size((1, 192, 14, 14))
assert feat[5].shape == torch.Size((1, 320, 7, 7))
assert feat[6].shape == torch.Size((1, 640, 7, 7))
assert feat[7].shape == torch.Size((1, 2560, 7, 7))
# Test MobileNetV2 forward with dict(type='ReLU')
model = MobileNetV2(
widen_factor=1.0, act_cfg=dict(type='ReLU'), out_indices=range(0, 7))
model.train()
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert len(feat) == 7
assert feat[0].shape == torch.Size((1, 16, 112, 112))
assert feat[1].shape == torch.Size((1, 24, 56, 56))
assert feat[2].shape == torch.Size((1, 32, 28, 28))
assert feat[3].shape == torch.Size((1, 64, 14, 14))
assert feat[4].shape == torch.Size((1, 96, 14, 14))
assert feat[5].shape == torch.Size((1, 160, 7, 7))
assert feat[6].shape == torch.Size((1, 320, 7, 7))
# Test MobileNetV2 with BatchNorm forward
model = MobileNetV2(widen_factor=1.0, out_indices=range(0, 7))
for m in model.modules():
if is_norm(m):
assert isinstance(m, _BatchNorm)
model.train()
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert len(feat) == 7
assert feat[0].shape == torch.Size((1, 16, 112, 112))
assert feat[1].shape == torch.Size((1, 24, 56, 56))
assert feat[2].shape == torch.Size((1, 32, 28, 28))
assert feat[3].shape == torch.Size((1, 64, 14, 14))
assert feat[4].shape == torch.Size((1, 96, 14, 14))
assert feat[5].shape == torch.Size((1, 160, 7, 7))
assert feat[6].shape == torch.Size((1, 320, 7, 7))
# Test MobileNetV2 with GroupNorm forward
model = MobileNetV2(
widen_factor=1.0,
norm_cfg=dict(type='GN', num_groups=2, requires_grad=True),
out_indices=range(0, 7))
for m in model.modules():
if is_norm(m):
assert isinstance(m, GroupNorm)
model.train()
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert len(feat) == 7
assert feat[0].shape == torch.Size((1, 16, 112, 112))
assert feat[1].shape == torch.Size((1, 24, 56, 56))
assert feat[2].shape == torch.Size((1, 32, 28, 28))
assert feat[3].shape == torch.Size((1, 64, 14, 14))
assert feat[4].shape == torch.Size((1, 96, 14, 14))
assert feat[5].shape == torch.Size((1, 160, 7, 7))
assert feat[6].shape == torch.Size((1, 320, 7, 7))
# Test MobileNetV2 with layers 1, 3, 5 out forward
model = MobileNetV2(widen_factor=1.0, out_indices=(0, 2, 4))
model.train()
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert len(feat) == 3
assert feat[0].shape == torch.Size((1, 16, 112, 112))
assert feat[1].shape == torch.Size((1, 32, 28, 28))
assert feat[2].shape == torch.Size((1, 96, 14, 14))
# Test MobileNetV2 with checkpoint forward
model = MobileNetV2(
widen_factor=1.0, with_cp=True, out_indices=range(0, 7))
for m in model.modules():
if is_block(m):
assert m.with_cp
model.train()
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert len(feat) == 7
assert feat[0].shape == torch.Size((1, 16, 112, 112))
assert feat[1].shape == torch.Size((1, 24, 56, 56))
assert feat[2].shape == torch.Size((1, 32, 28, 28))
assert feat[3].shape == torch.Size((1, 64, 14, 14))
assert feat[4].shape == torch.Size((1, 96, 14, 14))
assert feat[5].shape == torch.Size((1, 160, 7, 7))
assert feat[6].shape == torch.Size((1, 320, 7, 7))
| 6,546 | 36.626437 | 77 |
py
|
DSLA-DSLA
|
DSLA-DSLA/tests/test_models/test_backbones/test_hrnet.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmdet.models.backbones.hrnet import HRModule, HRNet
from mmdet.models.backbones.resnet import BasicBlock, Bottleneck
@pytest.mark.parametrize('block', [BasicBlock, Bottleneck])
def test_hrmodule(block):
# Test multiscale forward
num_channles = (32, 64)
in_channels = [c * block.expansion for c in num_channles]
hrmodule = HRModule(
num_branches=2,
blocks=block,
in_channels=in_channels,
num_blocks=(4, 4),
num_channels=num_channles,
)
feats = [
torch.randn(1, in_channels[0], 64, 64),
torch.randn(1, in_channels[1], 32, 32)
]
feats = hrmodule(feats)
assert len(feats) == 2
assert feats[0].shape == torch.Size([1, in_channels[0], 64, 64])
assert feats[1].shape == torch.Size([1, in_channels[1], 32, 32])
# Test single scale forward
num_channles = (32, 64)
in_channels = [c * block.expansion for c in num_channles]
hrmodule = HRModule(
num_branches=2,
blocks=block,
in_channels=in_channels,
num_blocks=(4, 4),
num_channels=num_channles,
multiscale_output=False,
)
feats = [
torch.randn(1, in_channels[0], 64, 64),
torch.randn(1, in_channels[1], 32, 32)
]
feats = hrmodule(feats)
assert len(feats) == 1
assert feats[0].shape == torch.Size([1, in_channels[0], 64, 64])
def test_hrnet_backbone():
# only have 3 stages
extra = dict(
stage1=dict(
num_modules=1,
num_branches=1,
block='BOTTLENECK',
num_blocks=(4, ),
num_channels=(64, )),
stage2=dict(
num_modules=1,
num_branches=2,
block='BASIC',
num_blocks=(4, 4),
num_channels=(32, 64)),
stage3=dict(
num_modules=4,
num_branches=3,
block='BASIC',
num_blocks=(4, 4, 4),
num_channels=(32, 64, 128)))
with pytest.raises(AssertionError):
# HRNet now only support 4 stages
HRNet(extra=extra)
extra['stage4'] = dict(
num_modules=3,
num_branches=3, # should be 4
block='BASIC',
num_blocks=(4, 4, 4, 4),
num_channels=(32, 64, 128, 256))
with pytest.raises(AssertionError):
# len(num_blocks) should equal num_branches
HRNet(extra=extra)
extra['stage4']['num_branches'] = 4
# Test hrnetv2p_w32
model = HRNet(extra=extra)
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 256, 256)
feats = model(imgs)
assert len(feats) == 4
assert feats[0].shape == torch.Size([1, 32, 64, 64])
assert feats[3].shape == torch.Size([1, 256, 8, 8])
# Test single scale output
model = HRNet(extra=extra, multiscale_output=False)
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 256, 256)
feats = model(imgs)
assert len(feats) == 1
assert feats[0].shape == torch.Size([1, 32, 64, 64])
| 3,089 | 26.589286 | 68 |
py
|
DSLA-DSLA
|
DSLA-DSLA/tests/test_models/test_backbones/test_csp_darknet.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from torch.nn.modules.batchnorm import _BatchNorm
from mmdet.models.backbones.csp_darknet import CSPDarknet
from .utils import check_norm_state, is_norm
def test_csp_darknet_backbone():
with pytest.raises(ValueError):
# frozen_stages must in range(-1, len(arch_setting) + 1)
CSPDarknet(frozen_stages=6)
with pytest.raises(AssertionError):
# out_indices in range(len(arch_setting) + 1)
CSPDarknet(out_indices=[6])
# Test CSPDarknet with first stage frozen
frozen_stages = 1
model = CSPDarknet(frozen_stages=frozen_stages)
model.train()
for mod in model.stem.modules():
for param in mod.parameters():
assert param.requires_grad is False
for i in range(1, frozen_stages + 1):
layer = getattr(model, f'stage{i}')
for mod in layer.modules():
if isinstance(mod, _BatchNorm):
assert mod.training is False
for param in layer.parameters():
assert param.requires_grad is False
# Test CSPDarknet with norm_eval=True
model = CSPDarknet(norm_eval=True)
model.train()
assert check_norm_state(model.modules(), False)
# Test CSPDarknet-P5 forward with widen_factor=0.5
model = CSPDarknet(arch='P5', widen_factor=0.25, out_indices=range(0, 5))
model.train()
imgs = torch.randn(1, 3, 64, 64)
feat = model(imgs)
assert len(feat) == 5
assert feat[0].shape == torch.Size((1, 16, 32, 32))
assert feat[1].shape == torch.Size((1, 32, 16, 16))
assert feat[2].shape == torch.Size((1, 64, 8, 8))
assert feat[3].shape == torch.Size((1, 128, 4, 4))
assert feat[4].shape == torch.Size((1, 256, 2, 2))
# Test CSPDarknet-P6 forward with widen_factor=0.5
model = CSPDarknet(
arch='P6',
widen_factor=0.25,
out_indices=range(0, 6),
spp_kernal_sizes=(3, 5, 7))
model.train()
imgs = torch.randn(1, 3, 128, 128)
feat = model(imgs)
assert feat[0].shape == torch.Size((1, 16, 64, 64))
assert feat[1].shape == torch.Size((1, 32, 32, 32))
assert feat[2].shape == torch.Size((1, 64, 16, 16))
assert feat[3].shape == torch.Size((1, 128, 8, 8))
assert feat[4].shape == torch.Size((1, 192, 4, 4))
assert feat[5].shape == torch.Size((1, 256, 2, 2))
# Test CSPDarknet forward with dict(type='ReLU')
model = CSPDarknet(
widen_factor=0.125, act_cfg=dict(type='ReLU'), out_indices=range(0, 5))
model.train()
imgs = torch.randn(1, 3, 64, 64)
feat = model(imgs)
assert len(feat) == 5
assert feat[0].shape == torch.Size((1, 8, 32, 32))
assert feat[1].shape == torch.Size((1, 16, 16, 16))
assert feat[2].shape == torch.Size((1, 32, 8, 8))
assert feat[3].shape == torch.Size((1, 64, 4, 4))
assert feat[4].shape == torch.Size((1, 128, 2, 2))
# Test CSPDarknet with BatchNorm forward
model = CSPDarknet(widen_factor=0.125, out_indices=range(0, 5))
for m in model.modules():
if is_norm(m):
assert isinstance(m, _BatchNorm)
model.train()
imgs = torch.randn(1, 3, 64, 64)
feat = model(imgs)
assert len(feat) == 5
assert feat[0].shape == torch.Size((1, 8, 32, 32))
assert feat[1].shape == torch.Size((1, 16, 16, 16))
assert feat[2].shape == torch.Size((1, 32, 8, 8))
assert feat[3].shape == torch.Size((1, 64, 4, 4))
assert feat[4].shape == torch.Size((1, 128, 2, 2))
# Test CSPDarknet with custom arch forward
arch_ovewrite = [[32, 56, 3, True, False], [56, 224, 2, True, False],
[224, 512, 1, True, False]]
model = CSPDarknet(
arch_ovewrite=arch_ovewrite,
widen_factor=0.25,
out_indices=(0, 1, 2, 3))
model.train()
imgs = torch.randn(1, 3, 32, 32)
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size((1, 8, 16, 16))
assert feat[1].shape == torch.Size((1, 14, 8, 8))
assert feat[2].shape == torch.Size((1, 56, 4, 4))
assert feat[3].shape == torch.Size((1, 128, 2, 2))
| 4,117 | 34.196581 | 79 |
py
|
DSLA-DSLA
|
DSLA-DSLA/tests/test_models/test_backbones/test_renext.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmdet.models.backbones import ResNeXt
from mmdet.models.backbones.resnext import Bottleneck as BottleneckX
from .utils import is_block
def test_renext_bottleneck():
with pytest.raises(AssertionError):
# Style must be in ['pytorch', 'caffe']
BottleneckX(64, 64, groups=32, base_width=4, style='tensorflow')
# Test ResNeXt Bottleneck structure
block = BottleneckX(
64, 64, groups=32, base_width=4, stride=2, style='pytorch')
assert block.conv2.stride == (2, 2)
assert block.conv2.groups == 32
assert block.conv2.out_channels == 128
# Test ResNeXt Bottleneck with DCN
dcn = dict(type='DCN', deform_groups=1, fallback_on_stride=False)
with pytest.raises(AssertionError):
# conv_cfg must be None if dcn is not None
BottleneckX(
64,
64,
groups=32,
base_width=4,
dcn=dcn,
conv_cfg=dict(type='Conv'))
BottleneckX(64, 64, dcn=dcn)
# Test ResNeXt Bottleneck forward
block = BottleneckX(64, 16, groups=32, base_width=4)
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size([1, 64, 56, 56])
# Test ResNeXt Bottleneck forward with plugins
plugins = [
dict(
cfg=dict(
type='GeneralizedAttention',
spatial_range=-1,
num_heads=8,
attention_type='0010',
kv_stride=2),
stages=(False, False, True, True),
position='after_conv2')
]
block = BottleneckX(64, 16, groups=32, base_width=4, plugins=plugins)
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size([1, 64, 56, 56])
def test_resnext_backbone():
with pytest.raises(KeyError):
# ResNeXt depth should be in [50, 101, 152]
ResNeXt(depth=18)
# Test ResNeXt with group 32, base_width 4
model = ResNeXt(depth=50, groups=32, base_width=4)
for m in model.modules():
if is_block(m):
assert m.conv2.groups == 32
model.train()
imgs = torch.randn(1, 3, 32, 32)
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size([1, 256, 8, 8])
assert feat[1].shape == torch.Size([1, 512, 4, 4])
assert feat[2].shape == torch.Size([1, 1024, 2, 2])
assert feat[3].shape == torch.Size([1, 2048, 1, 1])
regnet_test_data = [
('regnetx_400mf',
dict(w0=24, wa=24.48, wm=2.54, group_w=16, depth=22,
bot_mul=1.0), [32, 64, 160, 384]),
('regnetx_800mf',
dict(w0=56, wa=35.73, wm=2.28, group_w=16, depth=16,
bot_mul=1.0), [64, 128, 288, 672]),
('regnetx_1.6gf',
dict(w0=80, wa=34.01, wm=2.25, group_w=24, depth=18,
bot_mul=1.0), [72, 168, 408, 912]),
('regnetx_3.2gf',
dict(w0=88, wa=26.31, wm=2.25, group_w=48, depth=25,
bot_mul=1.0), [96, 192, 432, 1008]),
('regnetx_4.0gf',
dict(w0=96, wa=38.65, wm=2.43, group_w=40, depth=23,
bot_mul=1.0), [80, 240, 560, 1360]),
('regnetx_6.4gf',
dict(w0=184, wa=60.83, wm=2.07, group_w=56, depth=17,
bot_mul=1.0), [168, 392, 784, 1624]),
('regnetx_8.0gf',
dict(w0=80, wa=49.56, wm=2.88, group_w=120, depth=23,
bot_mul=1.0), [80, 240, 720, 1920]),
('regnetx_12gf',
dict(w0=168, wa=73.36, wm=2.37, group_w=112, depth=19,
bot_mul=1.0), [224, 448, 896, 2240]),
]
| 3,528 | 32.292453 | 73 |
py
|
DSLA-DSLA
|
DSLA-DSLA/tests/test_models/test_backbones/test_trident_resnet.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmdet.models.backbones import TridentResNet
from mmdet.models.backbones.trident_resnet import TridentBottleneck
def test_trident_resnet_bottleneck():
trident_dilations = (1, 2, 3)
test_branch_idx = 1
concat_output = True
trident_build_config = (trident_dilations, test_branch_idx, concat_output)
with pytest.raises(AssertionError):
# Style must be in ['pytorch', 'caffe']
TridentBottleneck(
*trident_build_config, inplanes=64, planes=64, style='tensorflow')
with pytest.raises(AssertionError):
# Allowed positions are 'after_conv1', 'after_conv2', 'after_conv3'
plugins = [
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16),
position='after_conv4')
]
TridentBottleneck(
*trident_build_config, inplanes=64, planes=16, plugins=plugins)
with pytest.raises(AssertionError):
# Need to specify different postfix to avoid duplicate plugin name
plugins = [
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16),
position='after_conv3'),
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16),
position='after_conv3')
]
TridentBottleneck(
*trident_build_config, inplanes=64, planes=16, plugins=plugins)
with pytest.raises(KeyError):
# Plugin type is not supported
plugins = [dict(cfg=dict(type='WrongPlugin'), position='after_conv3')]
TridentBottleneck(
*trident_build_config, inplanes=64, planes=16, plugins=plugins)
# Test Bottleneck with checkpoint forward
block = TridentBottleneck(
*trident_build_config, inplanes=64, planes=16, with_cp=True)
assert block.with_cp
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size([block.num_branch, 64, 56, 56])
# Test Bottleneck style
block = TridentBottleneck(
*trident_build_config,
inplanes=64,
planes=64,
stride=2,
style='pytorch')
assert block.conv1.stride == (1, 1)
assert block.conv2.stride == (2, 2)
block = TridentBottleneck(
*trident_build_config, inplanes=64, planes=64, stride=2, style='caffe')
assert block.conv1.stride == (2, 2)
assert block.conv2.stride == (1, 1)
# Test Bottleneck forward
block = TridentBottleneck(*trident_build_config, inplanes=64, planes=16)
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size([block.num_branch, 64, 56, 56])
# Test Bottleneck with 1 ContextBlock after conv3
plugins = [
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16),
position='after_conv3')
]
block = TridentBottleneck(
*trident_build_config, inplanes=64, planes=16, plugins=plugins)
assert block.context_block.in_channels == 64
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size([block.num_branch, 64, 56, 56])
# Test Bottleneck with 1 GeneralizedAttention after conv2
plugins = [
dict(
cfg=dict(
type='GeneralizedAttention',
spatial_range=-1,
num_heads=8,
attention_type='0010',
kv_stride=2),
position='after_conv2')
]
block = TridentBottleneck(
*trident_build_config, inplanes=64, planes=16, plugins=plugins)
assert block.gen_attention_block.in_channels == 16
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size([block.num_branch, 64, 56, 56])
# Test Bottleneck with 1 GeneralizedAttention after conv2, 1 NonLocal2D
# after conv2, 1 ContextBlock after conv3
plugins = [
dict(
cfg=dict(
type='GeneralizedAttention',
spatial_range=-1,
num_heads=8,
attention_type='0010',
kv_stride=2),
position='after_conv2'),
dict(cfg=dict(type='NonLocal2d'), position='after_conv2'),
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16),
position='after_conv3')
]
block = TridentBottleneck(
*trident_build_config, inplanes=64, planes=16, plugins=plugins)
assert block.gen_attention_block.in_channels == 16
assert block.nonlocal_block.in_channels == 16
assert block.context_block.in_channels == 64
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size([block.num_branch, 64, 56, 56])
# Test Bottleneck with 1 ContextBlock after conv2, 2 ContextBlock after
# conv3
plugins = [
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16, postfix=1),
position='after_conv2'),
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16, postfix=2),
position='after_conv3'),
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16, postfix=3),
position='after_conv3')
]
block = TridentBottleneck(
*trident_build_config, inplanes=64, planes=16, plugins=plugins)
assert block.context_block1.in_channels == 16
assert block.context_block2.in_channels == 64
assert block.context_block3.in_channels == 64
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size([block.num_branch, 64, 56, 56])
def test_trident_resnet_backbone():
tridentresnet_config = dict(
num_branch=3,
test_branch_idx=1,
strides=(1, 2, 2),
dilations=(1, 1, 1),
trident_dilations=(1, 2, 3),
out_indices=(2, ),
)
"""Test tridentresnet backbone."""
with pytest.raises(AssertionError):
# TridentResNet depth should be in [50, 101, 152]
TridentResNet(18, **tridentresnet_config)
with pytest.raises(AssertionError):
# In TridentResNet: num_stages == 3
TridentResNet(50, num_stages=4, **tridentresnet_config)
model = TridentResNet(50, num_stages=3, **tridentresnet_config)
model.train()
imgs = torch.randn(1, 3, 32, 32)
feat = model(imgs)
assert len(feat) == 1
assert feat[0].shape == torch.Size([3, 1024, 2, 2])
| 6,372 | 34.209945 | 79 |
py
|
DSLA-DSLA
|
DSLA-DSLA/tests/test_models/test_backbones/test_resnest.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmdet.models.backbones import ResNeSt
from mmdet.models.backbones.resnest import Bottleneck as BottleneckS
def test_resnest_bottleneck():
with pytest.raises(AssertionError):
# Style must be in ['pytorch', 'caffe']
BottleneckS(64, 64, radix=2, reduction_factor=4, style='tensorflow')
# Test ResNeSt Bottleneck structure
block = BottleneckS(
2, 4, radix=2, reduction_factor=4, stride=2, style='pytorch')
assert block.avd_layer.stride == 2
assert block.conv2.channels == 4
# Test ResNeSt Bottleneck forward
block = BottleneckS(16, 4, radix=2, reduction_factor=4)
x = torch.randn(2, 16, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size([2, 16, 56, 56])
def test_resnest_backbone():
with pytest.raises(KeyError):
# ResNeSt depth should be in [50, 101, 152, 200]
ResNeSt(depth=18)
# Test ResNeSt with radix 2, reduction_factor 4
model = ResNeSt(
depth=50,
base_channels=4,
radix=2,
reduction_factor=4,
out_indices=(0, 1, 2, 3))
model.train()
imgs = torch.randn(2, 3, 32, 32)
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size([2, 16, 8, 8])
assert feat[1].shape == torch.Size([2, 32, 4, 4])
assert feat[2].shape == torch.Size([2, 64, 2, 2])
assert feat[3].shape == torch.Size([2, 128, 1, 1])
| 1,473 | 29.708333 | 76 |
py
|
DSLA-DSLA
|
DSLA-DSLA/tests/test_models/test_backbones/test_regnet.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmdet.models.backbones import RegNet
regnet_test_data = [
('regnetx_400mf',
dict(w0=24, wa=24.48, wm=2.54, group_w=16, depth=22,
bot_mul=1.0), [32, 64, 160, 384]),
('regnetx_800mf',
dict(w0=56, wa=35.73, wm=2.28, group_w=16, depth=16,
bot_mul=1.0), [64, 128, 288, 672]),
('regnetx_1.6gf',
dict(w0=80, wa=34.01, wm=2.25, group_w=24, depth=18,
bot_mul=1.0), [72, 168, 408, 912]),
('regnetx_3.2gf',
dict(w0=88, wa=26.31, wm=2.25, group_w=48, depth=25,
bot_mul=1.0), [96, 192, 432, 1008]),
('regnetx_4.0gf',
dict(w0=96, wa=38.65, wm=2.43, group_w=40, depth=23,
bot_mul=1.0), [80, 240, 560, 1360]),
('regnetx_6.4gf',
dict(w0=184, wa=60.83, wm=2.07, group_w=56, depth=17,
bot_mul=1.0), [168, 392, 784, 1624]),
('regnetx_8.0gf',
dict(w0=80, wa=49.56, wm=2.88, group_w=120, depth=23,
bot_mul=1.0), [80, 240, 720, 1920]),
('regnetx_12gf',
dict(w0=168, wa=73.36, wm=2.37, group_w=112, depth=19,
bot_mul=1.0), [224, 448, 896, 2240]),
]
@pytest.mark.parametrize('arch_name,arch,out_channels', regnet_test_data)
def test_regnet_backbone(arch_name, arch, out_channels):
with pytest.raises(AssertionError):
# ResNeXt depth should be in [50, 101, 152]
RegNet(arch_name + '233')
# Test RegNet with arch_name
model = RegNet(arch_name)
model.train()
imgs = torch.randn(1, 3, 32, 32)
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size([1, out_channels[0], 8, 8])
assert feat[1].shape == torch.Size([1, out_channels[1], 4, 4])
assert feat[2].shape == torch.Size([1, out_channels[2], 2, 2])
assert feat[3].shape == torch.Size([1, out_channels[3], 1, 1])
# Test RegNet with arch
model = RegNet(arch)
assert feat[0].shape == torch.Size([1, out_channels[0], 8, 8])
assert feat[1].shape == torch.Size([1, out_channels[1], 4, 4])
assert feat[2].shape == torch.Size([1, out_channels[2], 2, 2])
assert feat[3].shape == torch.Size([1, out_channels[3], 1, 1])
| 2,177 | 35.915254 | 73 |
py
|
DSLA-DSLA
|
DSLA-DSLA/tests/test_models/test_backbones/__init__.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from .utils import check_norm_state, is_block, is_norm
__all__ = ['is_block', 'is_norm', 'check_norm_state']
| 158 | 30.8 | 54 |
py
|
DSLA-DSLA
|
DSLA-DSLA/tests/test_models/test_backbones/test_detectors_resnet.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
from mmdet.models.backbones import DetectoRS_ResNet
def test_detectorrs_resnet_backbone():
detectorrs_cfg = dict(
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
conv_cfg=dict(type='ConvAWS'),
sac=dict(type='SAC', use_deform=True),
stage_with_sac=(False, True, True, True),
output_img=True)
"""Test init_weights config"""
with pytest.raises(AssertionError):
# pretrained and init_cfg cannot be specified at the same time
DetectoRS_ResNet(
**detectorrs_cfg, pretrained='Pretrained', init_cfg='Pretrained')
with pytest.raises(AssertionError):
# init_cfg must be a dict
DetectoRS_ResNet(
**detectorrs_cfg, pretrained=None, init_cfg=['Pretrained'])
with pytest.raises(KeyError):
# init_cfg must contain the key `type`
DetectoRS_ResNet(
**detectorrs_cfg,
pretrained=None,
init_cfg=dict(checkpoint='Pretrained'))
with pytest.raises(AssertionError):
# init_cfg only support initialize pretrained model way
DetectoRS_ResNet(
**detectorrs_cfg, pretrained=None, init_cfg=dict(type='Trained'))
with pytest.raises(TypeError):
# pretrained mast be a str or None
model = DetectoRS_ResNet(
**detectorrs_cfg, pretrained=['Pretrained'], init_cfg=None)
model.init_weights()
| 1,611 | 32.583333 | 77 |
py
|
DSLA-DSLA
|
DSLA-DSLA/tests/test_models/test_utils/test_position_encoding.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmdet.models.utils import (LearnedPositionalEncoding,
SinePositionalEncoding)
def test_sine_positional_encoding(num_feats=16, batch_size=2):
# test invalid type of scale
with pytest.raises(AssertionError):
module = SinePositionalEncoding(
num_feats, scale=(3., ), normalize=True)
module = SinePositionalEncoding(num_feats)
h, w = 10, 6
mask = (torch.rand(batch_size, h, w) > 0.5).to(torch.int)
assert not module.normalize
out = module(mask)
assert out.shape == (batch_size, num_feats * 2, h, w)
# set normalize
module = SinePositionalEncoding(num_feats, normalize=True)
assert module.normalize
out = module(mask)
assert out.shape == (batch_size, num_feats * 2, h, w)
def test_learned_positional_encoding(num_feats=16,
row_num_embed=10,
col_num_embed=10,
batch_size=2):
module = LearnedPositionalEncoding(num_feats, row_num_embed, col_num_embed)
assert module.row_embed.weight.shape == (row_num_embed, num_feats)
assert module.col_embed.weight.shape == (col_num_embed, num_feats)
h, w = 10, 6
mask = torch.rand(batch_size, h, w) > 0.5
out = module(mask)
assert out.shape == (batch_size, num_feats * 2, h, w)
| 1,437 | 34.95 | 79 |
py
|
DSLA-DSLA
|
DSLA-DSLA/tests/test_models/test_utils/test_model_misc.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import torch
from mmdet.models.utils import interpolate_as
def test_interpolate_as():
source = torch.rand((1, 5, 4, 4))
target = torch.rand((1, 1, 16, 16))
# Test 4D source and target
result = interpolate_as(source, target)
assert result.shape == torch.Size((1, 5, 16, 16))
# Test 3D target
result = interpolate_as(source, target.squeeze(0))
assert result.shape == torch.Size((1, 5, 16, 16))
# Test 3D source
result = interpolate_as(source.squeeze(0), target)
assert result.shape == torch.Size((5, 16, 16))
# Test type(target) == np.ndarray
target = np.random.rand(16, 16)
result = interpolate_as(source.squeeze(0), target)
assert result.shape == torch.Size((5, 16, 16))
| 805 | 27.785714 | 54 |
py
|
DSLA-DSLA
|
DSLA-DSLA/tests/test_models/test_utils/test_se_layer.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmdet.models.utils import SELayer
def test_se_layer():
with pytest.raises(AssertionError):
# act_cfg sequence length must equal to 2
SELayer(channels=32, act_cfg=(dict(type='ReLU'), ))
with pytest.raises(AssertionError):
# act_cfg sequence must be a tuple of dict
SELayer(channels=32, act_cfg=[dict(type='ReLU'), dict(type='ReLU')])
# Test SELayer forward
layer = SELayer(channels=32)
layer.init_weights()
layer.train()
x = torch.randn((1, 32, 10, 10))
x_out = layer(x)
assert x_out.shape == torch.Size((1, 32, 10, 10))
| 674 | 26 | 76 |
py
|
DSLA-DSLA
|
DSLA-DSLA/tests/test_models/test_utils/test_inverted_residual.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmcv.cnn import is_norm
from torch.nn.modules import GroupNorm
from mmdet.models.utils import InvertedResidual, SELayer
def test_inverted_residual():
with pytest.raises(AssertionError):
# stride must be in [1, 2]
InvertedResidual(16, 16, 32, stride=3)
with pytest.raises(AssertionError):
# se_cfg must be None or dict
InvertedResidual(16, 16, 32, se_cfg=list())
with pytest.raises(AssertionError):
# in_channeld and mid_channels must be the same if
# with_expand_conv is False
InvertedResidual(16, 16, 32, with_expand_conv=False)
# Test InvertedResidual forward, stride=1
block = InvertedResidual(16, 16, 32, stride=1)
x = torch.randn(1, 16, 56, 56)
x_out = block(x)
assert getattr(block, 'se', None) is None
assert block.with_res_shortcut
assert x_out.shape == torch.Size((1, 16, 56, 56))
# Test InvertedResidual forward, stride=2
block = InvertedResidual(16, 16, 32, stride=2)
x = torch.randn(1, 16, 56, 56)
x_out = block(x)
assert not block.with_res_shortcut
assert x_out.shape == torch.Size((1, 16, 28, 28))
# Test InvertedResidual forward with se layer
se_cfg = dict(channels=32)
block = InvertedResidual(16, 16, 32, stride=1, se_cfg=se_cfg)
x = torch.randn(1, 16, 56, 56)
x_out = block(x)
assert isinstance(block.se, SELayer)
assert x_out.shape == torch.Size((1, 16, 56, 56))
# Test InvertedResidual forward, with_expand_conv=False
block = InvertedResidual(32, 16, 32, with_expand_conv=False)
x = torch.randn(1, 32, 56, 56)
x_out = block(x)
assert getattr(block, 'expand_conv', None) is None
assert x_out.shape == torch.Size((1, 16, 56, 56))
# Test InvertedResidual forward with GroupNorm
block = InvertedResidual(
16, 16, 32, norm_cfg=dict(type='GN', num_groups=2))
x = torch.randn(1, 16, 56, 56)
x_out = block(x)
for m in block.modules():
if is_norm(m):
assert isinstance(m, GroupNorm)
assert x_out.shape == torch.Size((1, 16, 56, 56))
# Test InvertedResidual forward with HSigmoid
block = InvertedResidual(16, 16, 32, act_cfg=dict(type='HSigmoid'))
x = torch.randn(1, 16, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size((1, 16, 56, 56))
# Test InvertedResidual forward with checkpoint
block = InvertedResidual(16, 16, 32, with_cp=True)
x = torch.randn(1, 16, 56, 56)
x_out = block(x)
assert block.with_cp
assert x_out.shape == torch.Size((1, 16, 56, 56))
| 2,635 | 33.233766 | 71 |
py
|
DSLA-DSLA
|
DSLA-DSLA/tests/test_models/test_utils/test_conv_upsample.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmdet.models.utils import ConvUpsample
@pytest.mark.parametrize('num_layers', [0, 1, 2])
def test_conv_upsample(num_layers):
num_upsample = num_layers if num_layers > 0 else 0
num_layers = num_layers if num_layers > 0 else 1
layer = ConvUpsample(
10,
5,
num_layers=num_layers,
num_upsample=num_upsample,
conv_cfg=None,
norm_cfg=None)
size = 5
x = torch.randn((1, 10, size, size))
size = size * pow(2, num_upsample)
x = layer(x)
assert x.shape[-2:] == (size, size)
| 628 | 24.16 | 54 |
py
|
DSLA-DSLA
|
DSLA-DSLA/tests/test_models/test_utils/test_transformer.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmcv.utils import ConfigDict
from mmdet.models.utils.transformer import (AdaptivePadding,
DetrTransformerDecoder,
DetrTransformerEncoder, PatchEmbed,
PatchMerging, Transformer)
def test_adaptive_padding():
for padding in ('same', 'corner'):
kernel_size = 16
stride = 16
dilation = 1
input = torch.rand(1, 1, 15, 17)
pool = AdaptivePadding(
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
padding=padding)
out = pool(input)
# padding to divisible by 16
assert (out.shape[2], out.shape[3]) == (16, 32)
input = torch.rand(1, 1, 16, 17)
out = pool(input)
# padding to divisible by 16
assert (out.shape[2], out.shape[3]) == (16, 32)
kernel_size = (2, 2)
stride = (2, 2)
dilation = (1, 1)
adap_pad = AdaptivePadding(
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
padding=padding)
input = torch.rand(1, 1, 11, 13)
out = adap_pad(input)
# padding to divisible by 2
assert (out.shape[2], out.shape[3]) == (12, 14)
kernel_size = (2, 2)
stride = (10, 10)
dilation = (1, 1)
adap_pad = AdaptivePadding(
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
padding=padding)
input = torch.rand(1, 1, 10, 13)
out = adap_pad(input)
# no padding
assert (out.shape[2], out.shape[3]) == (10, 13)
kernel_size = (11, 11)
adap_pad = AdaptivePadding(
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
padding=padding)
input = torch.rand(1, 1, 11, 13)
out = adap_pad(input)
# all padding
assert (out.shape[2], out.shape[3]) == (21, 21)
# test padding as kernel is (7,9)
input = torch.rand(1, 1, 11, 13)
stride = (3, 4)
kernel_size = (4, 5)
dilation = (2, 2)
# actually (7, 9)
adap_pad = AdaptivePadding(
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
padding=padding)
dilation_out = adap_pad(input)
assert (dilation_out.shape[2], dilation_out.shape[3]) == (16, 21)
kernel_size = (7, 9)
dilation = (1, 1)
adap_pad = AdaptivePadding(
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
padding=padding)
kernel79_out = adap_pad(input)
assert (kernel79_out.shape[2], kernel79_out.shape[3]) == (16, 21)
assert kernel79_out.shape == dilation_out.shape
# assert only support "same" "corner"
with pytest.raises(AssertionError):
AdaptivePadding(
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
padding=1)
def test_patch_embed():
B = 2
H = 3
W = 4
C = 3
embed_dims = 10
kernel_size = 3
stride = 1
dummy_input = torch.rand(B, C, H, W)
patch_merge_1 = PatchEmbed(
in_channels=C,
embed_dims=embed_dims,
kernel_size=kernel_size,
stride=stride,
padding=0,
dilation=1,
norm_cfg=None)
x1, shape = patch_merge_1(dummy_input)
# test out shape
assert x1.shape == (2, 2, 10)
# test outsize is correct
assert shape == (1, 2)
# test L = out_h * out_w
assert shape[0] * shape[1] == x1.shape[1]
B = 2
H = 10
W = 10
C = 3
embed_dims = 10
kernel_size = 5
stride = 2
dummy_input = torch.rand(B, C, H, W)
# test dilation
patch_merge_2 = PatchEmbed(
in_channels=C,
embed_dims=embed_dims,
kernel_size=kernel_size,
stride=stride,
padding=0,
dilation=2,
norm_cfg=None,
)
x2, shape = patch_merge_2(dummy_input)
# test out shape
assert x2.shape == (2, 1, 10)
# test outsize is correct
assert shape == (1, 1)
# test L = out_h * out_w
assert shape[0] * shape[1] == x2.shape[1]
stride = 2
input_size = (10, 10)
dummy_input = torch.rand(B, C, H, W)
# test stride and norm
patch_merge_3 = PatchEmbed(
in_channels=C,
embed_dims=embed_dims,
kernel_size=kernel_size,
stride=stride,
padding=0,
dilation=2,
norm_cfg=dict(type='LN'),
input_size=input_size)
x3, shape = patch_merge_3(dummy_input)
# test out shape
assert x3.shape == (2, 1, 10)
# test outsize is correct
assert shape == (1, 1)
# test L = out_h * out_w
assert shape[0] * shape[1] == x3.shape[1]
# test the init_out_size with nn.Unfold
assert patch_merge_3.init_out_size[1] == (input_size[0] - 2 * 4 -
1) // 2 + 1
assert patch_merge_3.init_out_size[0] == (input_size[0] - 2 * 4 -
1) // 2 + 1
H = 11
W = 12
input_size = (H, W)
dummy_input = torch.rand(B, C, H, W)
# test stride and norm
patch_merge_3 = PatchEmbed(
in_channels=C,
embed_dims=embed_dims,
kernel_size=kernel_size,
stride=stride,
padding=0,
dilation=2,
norm_cfg=dict(type='LN'),
input_size=input_size)
_, shape = patch_merge_3(dummy_input)
# when input_size equal to real input
# the out_size should be equal to `init_out_size`
assert shape == patch_merge_3.init_out_size
input_size = (H, W)
dummy_input = torch.rand(B, C, H, W)
# test stride and norm
patch_merge_3 = PatchEmbed(
in_channels=C,
embed_dims=embed_dims,
kernel_size=kernel_size,
stride=stride,
padding=0,
dilation=2,
norm_cfg=dict(type='LN'),
input_size=input_size)
_, shape = patch_merge_3(dummy_input)
# when input_size equal to real input
# the out_size should be equal to `init_out_size`
assert shape == patch_merge_3.init_out_size
# test adap padding
for padding in ('same', 'corner'):
in_c = 2
embed_dims = 3
B = 2
# test stride is 1
input_size = (5, 5)
kernel_size = (5, 5)
stride = (1, 1)
dilation = 1
bias = False
x = torch.rand(B, in_c, *input_size)
patch_embed = PatchEmbed(
in_channels=in_c,
embed_dims=embed_dims,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias)
x_out, out_size = patch_embed(x)
assert x_out.size() == (B, 25, 3)
assert out_size == (5, 5)
assert x_out.size(1) == out_size[0] * out_size[1]
# test kernel_size == stride
input_size = (5, 5)
kernel_size = (5, 5)
stride = (5, 5)
dilation = 1
bias = False
x = torch.rand(B, in_c, *input_size)
patch_embed = PatchEmbed(
in_channels=in_c,
embed_dims=embed_dims,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias)
x_out, out_size = patch_embed(x)
assert x_out.size() == (B, 1, 3)
assert out_size == (1, 1)
assert x_out.size(1) == out_size[0] * out_size[1]
# test kernel_size == stride
input_size = (6, 5)
kernel_size = (5, 5)
stride = (5, 5)
dilation = 1
bias = False
x = torch.rand(B, in_c, *input_size)
patch_embed = PatchEmbed(
in_channels=in_c,
embed_dims=embed_dims,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias)
x_out, out_size = patch_embed(x)
assert x_out.size() == (B, 2, 3)
assert out_size == (2, 1)
assert x_out.size(1) == out_size[0] * out_size[1]
# test different kernel_size with different stride
input_size = (6, 5)
kernel_size = (6, 2)
stride = (6, 2)
dilation = 1
bias = False
x = torch.rand(B, in_c, *input_size)
patch_embed = PatchEmbed(
in_channels=in_c,
embed_dims=embed_dims,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias)
x_out, out_size = patch_embed(x)
assert x_out.size() == (B, 3, 3)
assert out_size == (1, 3)
assert x_out.size(1) == out_size[0] * out_size[1]
def test_patch_merging():
# Test the model with int padding
in_c = 3
out_c = 4
kernel_size = 3
stride = 3
padding = 1
dilation = 1
bias = False
# test the case `pad_to_stride` is False
patch_merge = PatchMerging(
in_channels=in_c,
out_channels=out_c,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias)
B, L, C = 1, 100, 3
input_size = (10, 10)
x = torch.rand(B, L, C)
x_out, out_size = patch_merge(x, input_size)
assert x_out.size() == (1, 16, 4)
assert out_size == (4, 4)
# assert out size is consistent with real output
assert x_out.size(1) == out_size[0] * out_size[1]
in_c = 4
out_c = 5
kernel_size = 6
stride = 3
padding = 2
dilation = 2
bias = False
patch_merge = PatchMerging(
in_channels=in_c,
out_channels=out_c,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias)
B, L, C = 1, 100, 4
input_size = (10, 10)
x = torch.rand(B, L, C)
x_out, out_size = patch_merge(x, input_size)
assert x_out.size() == (1, 4, 5)
assert out_size == (2, 2)
# assert out size is consistent with real output
assert x_out.size(1) == out_size[0] * out_size[1]
# Test with adaptive padding
for padding in ('same', 'corner'):
in_c = 2
out_c = 3
B = 2
# test stride is 1
input_size = (5, 5)
kernel_size = (5, 5)
stride = (1, 1)
dilation = 1
bias = False
L = input_size[0] * input_size[1]
x = torch.rand(B, L, in_c)
patch_merge = PatchMerging(
in_channels=in_c,
out_channels=out_c,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias)
x_out, out_size = patch_merge(x, input_size)
assert x_out.size() == (B, 25, 3)
assert out_size == (5, 5)
assert x_out.size(1) == out_size[0] * out_size[1]
# test kernel_size == stride
input_size = (5, 5)
kernel_size = (5, 5)
stride = (5, 5)
dilation = 1
bias = False
L = input_size[0] * input_size[1]
x = torch.rand(B, L, in_c)
patch_merge = PatchMerging(
in_channels=in_c,
out_channels=out_c,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias)
x_out, out_size = patch_merge(x, input_size)
assert x_out.size() == (B, 1, 3)
assert out_size == (1, 1)
assert x_out.size(1) == out_size[0] * out_size[1]
# test kernel_size == stride
input_size = (6, 5)
kernel_size = (5, 5)
stride = (5, 5)
dilation = 1
bias = False
L = input_size[0] * input_size[1]
x = torch.rand(B, L, in_c)
patch_merge = PatchMerging(
in_channels=in_c,
out_channels=out_c,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias)
x_out, out_size = patch_merge(x, input_size)
assert x_out.size() == (B, 2, 3)
assert out_size == (2, 1)
assert x_out.size(1) == out_size[0] * out_size[1]
# test different kernel_size with different stride
input_size = (6, 5)
kernel_size = (6, 2)
stride = (6, 2)
dilation = 1
bias = False
L = input_size[0] * input_size[1]
x = torch.rand(B, L, in_c)
patch_merge = PatchMerging(
in_channels=in_c,
out_channels=out_c,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias)
x_out, out_size = patch_merge(x, input_size)
assert x_out.size() == (B, 3, 3)
assert out_size == (1, 3)
assert x_out.size(1) == out_size[0] * out_size[1]
def test_detr_transformer_dencoder_encoder_layer():
config = ConfigDict(
dict(
return_intermediate=True,
num_layers=6,
transformerlayers=dict(
type='DetrTransformerDecoderLayer',
attn_cfgs=dict(
type='MultiheadAttention',
embed_dims=256,
num_heads=8,
dropout=0.1),
feedforward_channels=2048,
ffn_dropout=0.1,
operation_order=(
'norm',
'self_attn',
'norm',
'cross_attn',
'norm',
'ffn',
))))
assert DetrTransformerDecoder(**config).layers[0].pre_norm
assert len(DetrTransformerDecoder(**config).layers) == 6
DetrTransformerDecoder(**config)
with pytest.raises(AssertionError):
config = ConfigDict(
dict(
return_intermediate=True,
num_layers=6,
transformerlayers=[
dict(
type='DetrTransformerDecoderLayer',
attn_cfgs=dict(
type='MultiheadAttention',
embed_dims=256,
num_heads=8,
dropout=0.1),
feedforward_channels=2048,
ffn_dropout=0.1,
operation_order=('self_attn', 'norm', 'cross_attn',
'norm', 'ffn', 'norm'))
] * 5))
DetrTransformerDecoder(**config)
config = ConfigDict(
dict(
num_layers=6,
transformerlayers=dict(
type='DetrTransformerDecoderLayer',
attn_cfgs=dict(
type='MultiheadAttention',
embed_dims=256,
num_heads=8,
dropout=0.1),
feedforward_channels=2048,
ffn_dropout=0.1,
operation_order=('norm', 'self_attn', 'norm', 'cross_attn',
'norm', 'ffn', 'norm'))))
with pytest.raises(AssertionError):
# len(operation_order) == 6
DetrTransformerEncoder(**config)
def test_transformer():
config = ConfigDict(
dict(
encoder=dict(
type='DetrTransformerEncoder',
num_layers=6,
transformerlayers=dict(
type='BaseTransformerLayer',
attn_cfgs=[
dict(
type='MultiheadAttention',
embed_dims=256,
num_heads=8,
dropout=0.1)
],
feedforward_channels=2048,
ffn_dropout=0.1,
operation_order=('self_attn', 'norm', 'ffn', 'norm'))),
decoder=dict(
type='DetrTransformerDecoder',
return_intermediate=True,
num_layers=6,
transformerlayers=dict(
type='DetrTransformerDecoderLayer',
attn_cfgs=dict(
type='MultiheadAttention',
embed_dims=256,
num_heads=8,
dropout=0.1),
feedforward_channels=2048,
ffn_dropout=0.1,
operation_order=('self_attn', 'norm', 'cross_attn', 'norm',
'ffn', 'norm')),
)))
transformer = Transformer(**config)
transformer.init_weights()
| 16,994 | 28.815789 | 79 |
py
|
DSLA-DSLA
|
DSLA-DSLA/tests/test_models/test_utils/test_brick_wrappers.py
|
from unittest.mock import patch
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmdet.models.utils import AdaptiveAvgPool2d, adaptive_avg_pool2d
if torch.__version__ != 'parrots':
torch_version = '1.7'
else:
torch_version = 'parrots'
@patch('torch.__version__', torch_version)
def test_adaptive_avg_pool2d():
# Test the empty batch dimension
# Test the two input conditions
x_empty = torch.randn(0, 3, 4, 5)
# 1. tuple[int, int]
wrapper_out = adaptive_avg_pool2d(x_empty, (2, 2))
assert wrapper_out.shape == (0, 3, 2, 2)
# 2. int
wrapper_out = adaptive_avg_pool2d(x_empty, 2)
assert wrapper_out.shape == (0, 3, 2, 2)
# wrapper op with 3-dim input
x_normal = torch.randn(3, 3, 4, 5)
wrapper_out = adaptive_avg_pool2d(x_normal, (2, 2))
ref_out = F.adaptive_avg_pool2d(x_normal, (2, 2))
assert wrapper_out.shape == (3, 3, 2, 2)
assert torch.equal(wrapper_out, ref_out)
wrapper_out = adaptive_avg_pool2d(x_normal, 2)
ref_out = F.adaptive_avg_pool2d(x_normal, 2)
assert wrapper_out.shape == (3, 3, 2, 2)
assert torch.equal(wrapper_out, ref_out)
@patch('torch.__version__', torch_version)
def test_AdaptiveAvgPool2d():
# Test the empty batch dimension
x_empty = torch.randn(0, 3, 4, 5)
# Test the four input conditions
# 1. tuple[int, int]
wrapper = AdaptiveAvgPool2d((2, 2))
wrapper_out = wrapper(x_empty)
assert wrapper_out.shape == (0, 3, 2, 2)
# 2. int
wrapper = AdaptiveAvgPool2d(2)
wrapper_out = wrapper(x_empty)
assert wrapper_out.shape == (0, 3, 2, 2)
# 3. tuple[None, int]
wrapper = AdaptiveAvgPool2d((None, 2))
wrapper_out = wrapper(x_empty)
assert wrapper_out.shape == (0, 3, 4, 2)
# 3. tuple[int, None]
wrapper = AdaptiveAvgPool2d((2, None))
wrapper_out = wrapper(x_empty)
assert wrapper_out.shape == (0, 3, 2, 5)
# Test the normal batch dimension
x_normal = torch.randn(3, 3, 4, 5)
wrapper = AdaptiveAvgPool2d((2, 2))
ref = nn.AdaptiveAvgPool2d((2, 2))
wrapper_out = wrapper(x_normal)
ref_out = ref(x_normal)
assert wrapper_out.shape == (3, 3, 2, 2)
assert torch.equal(wrapper_out, ref_out)
wrapper = AdaptiveAvgPool2d(2)
ref = nn.AdaptiveAvgPool2d(2)
wrapper_out = wrapper(x_normal)
ref_out = ref(x_normal)
assert wrapper_out.shape == (3, 3, 2, 2)
assert torch.equal(wrapper_out, ref_out)
wrapper = AdaptiveAvgPool2d((None, 2))
ref = nn.AdaptiveAvgPool2d((None, 2))
wrapper_out = wrapper(x_normal)
ref_out = ref(x_normal)
assert wrapper_out.shape == (3, 3, 4, 2)
assert torch.equal(wrapper_out, ref_out)
wrapper = AdaptiveAvgPool2d((2, None))
ref = nn.AdaptiveAvgPool2d((2, None))
wrapper_out = wrapper(x_normal)
ref_out = ref(x_normal)
assert wrapper_out.shape == (3, 3, 2, 5)
assert torch.equal(wrapper_out, ref_out)
| 2,931 | 30.191489 | 69 |
py
|
DSLA-DSLA
|
DSLA-DSLA/tests/test_models/test_dense_heads/test_lad_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import numpy as np
import torch
from mmdet.models.dense_heads import LADHead, lad_head
from mmdet.models.dense_heads.lad_head import levels_to_images
def test_lad_head_loss():
"""Tests lad head loss when truth is empty and non-empty."""
class mock_skm:
def GaussianMixture(self, *args, **kwargs):
return self
def fit(self, loss):
pass
def predict(self, loss):
components = np.zeros_like(loss, dtype=np.long)
return components.reshape(-1)
def score_samples(self, loss):
scores = np.random.random(len(loss))
return scores
lad_head.skm = mock_skm()
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'pad_shape': (s, s, 3)
}]
train_cfg = mmcv.Config(
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.1,
neg_iou_thr=0.1,
min_pos_iou=0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False))
# since Focal Loss is not supported on CPU
self = LADHead(
num_classes=4,
in_channels=1,
train_cfg=train_cfg,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.3),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5))
teacher_model = LADHead(
num_classes=4,
in_channels=1,
train_cfg=train_cfg,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.3),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5))
feat = [
torch.rand(1, 1, s // feat_size, s // feat_size)
for feat_size in [4, 8, 16, 32, 64]
]
self.init_weights()
teacher_model.init_weights()
# Test that empty ground truth encourages the network to predict background
gt_bboxes = [torch.empty((0, 4))]
gt_labels = [torch.LongTensor([])]
gt_bboxes_ignore = None
outs_teacher = teacher_model(feat)
label_assignment_results = teacher_model.get_label_assignment(
*outs_teacher, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore)
outs = teacher_model(feat)
empty_gt_losses = self.loss(*outs, gt_bboxes, gt_labels, img_metas,
gt_bboxes_ignore, label_assignment_results)
# When there is no truth, the cls loss should be nonzero but there should
# be no box loss.
empty_cls_loss = empty_gt_losses['loss_cls']
empty_box_loss = empty_gt_losses['loss_bbox']
empty_iou_loss = empty_gt_losses['loss_iou']
assert empty_cls_loss.item() > 0, 'cls loss should be non-zero'
assert empty_box_loss.item() == 0, (
'there should be no box loss when there are no true boxes')
assert empty_iou_loss.item() == 0, (
'there should be no box loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero for
# random inputs
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
]
gt_labels = [torch.LongTensor([2])]
label_assignment_results = teacher_model.get_label_assignment(
*outs_teacher, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore)
one_gt_losses = self.loss(*outs, gt_bboxes, gt_labels, img_metas,
gt_bboxes_ignore, label_assignment_results)
onegt_cls_loss = one_gt_losses['loss_cls']
onegt_box_loss = one_gt_losses['loss_bbox']
onegt_iou_loss = one_gt_losses['loss_iou']
assert onegt_cls_loss.item() > 0, 'cls loss should be non-zero'
assert onegt_box_loss.item() > 0, 'box loss should be non-zero'
assert onegt_iou_loss.item() > 0, 'box loss should be non-zero'
n, c, h, w = 10, 4, 20, 20
mlvl_tensor = [torch.ones(n, c, h, w) for i in range(5)]
results = levels_to_images(mlvl_tensor)
assert len(results) == n
assert results[0].size() == (h * w * 5, c)
assert self.with_score_voting
self = LADHead(
num_classes=4,
in_channels=1,
train_cfg=train_cfg,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8]),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.3),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5))
cls_scores = [torch.ones(2, 4, 5, 5)]
bbox_preds = [torch.ones(2, 4, 5, 5)]
iou_preds = [torch.ones(2, 1, 5, 5)]
cfg = mmcv.Config(
dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
rescale = False
self.get_bboxes(
cls_scores, bbox_preds, iou_preds, img_metas, cfg, rescale=rescale)
| 5,294 | 34.3 | 79 |
py
|
DSLA-DSLA
|
DSLA-DSLA/tests/test_models/test_dense_heads/test_anchor_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch
from mmdet.models.dense_heads import AnchorHead
def test_anchor_head_loss():
"""Tests anchor head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'pad_shape': (s, s, 3)
}]
cfg = mmcv.Config(
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
match_low_quality=True,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False))
self = AnchorHead(num_classes=4, in_channels=1, train_cfg=cfg)
# Anchor head expects a multiple levels of features per image
feat = [
torch.rand(1, 1, s // (2**(i + 2)), s // (2**(i + 2)))
for i in range(len(self.anchor_generator.strides))
]
cls_scores, bbox_preds = self.forward(feat)
# Test that empty ground truth encourages the network to predict background
gt_bboxes = [torch.empty((0, 4))]
gt_labels = [torch.LongTensor([])]
gt_bboxes_ignore = None
empty_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels,
img_metas, gt_bboxes_ignore)
# When there is no truth, the cls loss should be nonzero but there should
# be no box loss.
empty_cls_loss = sum(empty_gt_losses['loss_cls'])
empty_box_loss = sum(empty_gt_losses['loss_bbox'])
assert empty_cls_loss.item() > 0, 'cls loss should be non-zero'
assert empty_box_loss.item() == 0, (
'there should be no box loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero for
# random inputs
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
]
gt_labels = [torch.LongTensor([2])]
one_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels,
img_metas, gt_bboxes_ignore)
onegt_cls_loss = sum(one_gt_losses['loss_cls'])
onegt_box_loss = sum(one_gt_losses['loss_bbox'])
assert onegt_cls_loss.item() > 0, 'cls loss should be non-zero'
assert onegt_box_loss.item() > 0, 'box loss should be non-zero'
| 2,548 | 34.901408 | 79 |
py
|
DSLA-DSLA
|
DSLA-DSLA/tests/test_models/test_dense_heads/test_centernet_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import torch
from mmcv import ConfigDict
from mmdet.models.dense_heads import CenterNetHead
def test_center_head_loss():
"""Tests center head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'pad_shape': (s, s, 3)
}]
test_cfg = dict(topK=100, max_per_img=100)
self = CenterNetHead(
num_classes=4, in_channel=1, feat_channel=4, test_cfg=test_cfg)
feat = [torch.rand(1, 1, s, s)]
center_out, wh_out, offset_out = self.forward(feat)
# Test that empty ground truth encourages the network to predict background
gt_bboxes = [torch.empty((0, 4))]
gt_labels = [torch.LongTensor([])]
gt_bboxes_ignore = None
empty_gt_losses = self.loss(center_out, wh_out, offset_out, gt_bboxes,
gt_labels, img_metas, gt_bboxes_ignore)
loss_center = empty_gt_losses['loss_center_heatmap']
loss_wh = empty_gt_losses['loss_wh']
loss_offset = empty_gt_losses['loss_offset']
assert loss_center.item() > 0, 'loss_center should be non-zero'
assert loss_wh.item() == 0, (
'there should be no loss_wh when there are no true boxes')
assert loss_offset.item() == 0, (
'there should be no loss_offset when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero for
# random inputs
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
]
gt_labels = [torch.LongTensor([2])]
one_gt_losses = self.loss(center_out, wh_out, offset_out, gt_bboxes,
gt_labels, img_metas, gt_bboxes_ignore)
loss_center = one_gt_losses['loss_center_heatmap']
loss_wh = one_gt_losses['loss_wh']
loss_offset = one_gt_losses['loss_offset']
assert loss_center.item() > 0, 'loss_center should be non-zero'
assert loss_wh.item() > 0, 'loss_wh should be non-zero'
assert loss_offset.item() > 0, 'loss_offset should be non-zero'
def test_centernet_head_get_bboxes():
"""Tests center head generating and decoding the heatmap."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': np.array([1., 1., 1., 1.]),
'pad_shape': (s, s, 3),
'batch_input_shape': (s, s),
'border': (0, 0, 0, 0),
'flip': False
}]
test_cfg = ConfigDict(
dict(topk=100, local_maximum_kernel=3, max_per_img=100))
gt_bboxes = [
torch.Tensor([[10, 20, 200, 240], [40, 50, 100, 200],
[10, 20, 100, 240]])
]
gt_labels = [torch.LongTensor([1, 1, 2])]
self = CenterNetHead(
num_classes=4, in_channel=1, feat_channel=4, test_cfg=test_cfg)
self.feat_shape = (1, 1, s // 4, s // 4)
targets, _ = self.get_targets(gt_bboxes, gt_labels, self.feat_shape,
img_metas[0]['pad_shape'])
center_target = targets['center_heatmap_target']
wh_target = targets['wh_target']
offset_target = targets['offset_target']
# make sure assign target right
for i in range(len(gt_bboxes[0])):
bbox, label = gt_bboxes[0][i] / 4, gt_labels[0][i]
ctx, cty = sum(bbox[0::2]) / 2, sum(bbox[1::2]) / 2
int_ctx, int_cty = int(sum(bbox[0::2]) / 2), int(sum(bbox[1::2]) / 2)
w, h = bbox[2] - bbox[0], bbox[3] - bbox[1]
x_off = ctx - int(ctx)
y_off = cty - int(cty)
assert center_target[0, label, int_cty, int_ctx] == 1
assert wh_target[0, 0, int_cty, int_ctx] == w
assert wh_target[0, 1, int_cty, int_ctx] == h
assert offset_target[0, 0, int_cty, int_ctx] == x_off
assert offset_target[0, 1, int_cty, int_ctx] == y_off
# make sure get_bboxes is right
detections = self.get_bboxes([center_target], [wh_target], [offset_target],
img_metas,
rescale=True,
with_nms=False)
out_bboxes = detections[0][0][:3]
out_clses = detections[0][1][:3]
for bbox, cls in zip(out_bboxes, out_clses):
flag = False
for gt_bbox, gt_cls in zip(gt_bboxes[0], gt_labels[0]):
if (bbox[:4] == gt_bbox[:4]).all():
flag = True
assert flag, 'get_bboxes is wrong'
| 4,385 | 39.611111 | 79 |
py
|
DSLA-DSLA
|
DSLA-DSLA/tests/test_models/test_dense_heads/test_dense_heads_attr.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import warnings
from terminaltables import AsciiTable
from mmdet.models import dense_heads
from mmdet.models.dense_heads import * # noqa: F401,F403
def test_dense_heads_test_attr():
"""Tests inference methods such as simple_test and aug_test."""
# make list of dense heads
exceptions = ['FeatureAdaption'] # module used in head
all_dense_heads = [m for m in dense_heads.__all__ if m not in exceptions]
# search attributes
check_attributes = [
'simple_test', 'aug_test', 'simple_test_bboxes', 'simple_test_rpn',
'aug_test_rpn'
]
table_header = ['head name'] + check_attributes
table_data = [table_header]
not_found = {k: [] for k in check_attributes}
for target_head_name in all_dense_heads:
target_head = globals()[target_head_name]
target_head_attributes = dir(target_head)
check_results = [target_head_name]
for check_attribute in check_attributes:
found = check_attribute in target_head_attributes
check_results.append(found)
if not found:
not_found[check_attribute].append(target_head_name)
table_data.append(check_results)
table = AsciiTable(table_data)
print()
print(table.table)
# NOTE: this test just checks attributes.
# simple_test of RPN heads will not work now.
assert len(not_found['simple_test']) == 0, \
f'simple_test not found in {not_found["simple_test"]}'
if len(not_found['aug_test']) != 0:
warnings.warn(f'aug_test not found in {not_found["aug_test"]}. '
'Please implement it or raise NotImplementedError.')
| 1,702 | 36.844444 | 77 |
py
|
DSLA-DSLA
|
DSLA-DSLA/tests/test_models/test_dense_heads/test_ga_anchor_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch
from mmdet.models.dense_heads import GuidedAnchorHead
def test_ga_anchor_head_loss():
"""Tests anchor head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'pad_shape': (s, s, 3)
}]
cfg = mmcv.Config(
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
match_low_quality=True,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
ga_assigner=dict(
type='ApproxMaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
ga_sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=-1,
center_ratio=0.2,
ignore_ratio=0.5,
pos_weight=-1,
debug=False))
head = GuidedAnchorHead(num_classes=4, in_channels=4, train_cfg=cfg)
# Anchor head expects a multiple levels of features per image
if torch.cuda.is_available():
head.cuda()
feat = [
torch.rand(1, 4, s // (2**(i + 2)), s // (2**(i + 2))).cuda()
for i in range(len(head.approx_anchor_generator.base_anchors))
]
cls_scores, bbox_preds, shape_preds, loc_preds = head.forward(feat)
# Test that empty ground truth encourages the network to predict
# background
gt_bboxes = [torch.empty((0, 4)).cuda()]
gt_labels = [torch.LongTensor([]).cuda()]
gt_bboxes_ignore = None
empty_gt_losses = head.loss(cls_scores, bbox_preds, shape_preds,
loc_preds, gt_bboxes, gt_labels, img_metas,
gt_bboxes_ignore)
# When there is no truth, the cls loss should be nonzero but there
# should be no box loss.
empty_cls_loss = sum(empty_gt_losses['loss_cls'])
empty_box_loss = sum(empty_gt_losses['loss_bbox'])
assert empty_cls_loss.item() > 0, 'cls loss should be non-zero'
assert empty_box_loss.item() == 0, (
'there should be no box loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero
# for random inputs
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]).cuda(),
]
gt_labels = [torch.LongTensor([2]).cuda()]
one_gt_losses = head.loss(cls_scores, bbox_preds, shape_preds,
loc_preds, gt_bboxes, gt_labels, img_metas,
gt_bboxes_ignore)
onegt_cls_loss = sum(one_gt_losses['loss_cls'])
onegt_box_loss = sum(one_gt_losses['loss_bbox'])
assert onegt_cls_loss.item() > 0, 'cls loss should be non-zero'
assert onegt_box_loss.item() > 0, 'box loss should be non-zero'
| 3,410 | 36.076087 | 79 |
py
|
DSLA-DSLA
|
DSLA-DSLA/tests/test_models/test_dense_heads/test_yolof_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch
from mmdet.models.dense_heads import YOLOFHead
def test_yolof_head_loss():
"""Tests yolof head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'pad_shape': (s, s, 3)
}]
train_cfg = mmcv.Config(
dict(
assigner=dict(
type='UniformAssigner',
pos_ignore_thr=0.15,
neg_ignore_thr=0.7),
allowed_border=-1,
pos_weight=-1,
debug=False))
self = YOLOFHead(
num_classes=4,
in_channels=1,
reg_decoded_bbox=True,
train_cfg=train_cfg,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
scales=[1, 2, 4, 8, 16],
strides=[32]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1., 1., 1., 1.],
add_ctr_clamp=True,
ctr_clamp=32),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.0))
feat = [torch.rand(1, 1, s // 32, s // 32)]
cls_scores, bbox_preds = self.forward(feat)
# Test that empty ground truth encourages the network to predict background
gt_bboxes = [torch.empty((0, 4))]
gt_labels = [torch.LongTensor([])]
gt_bboxes_ignore = None
empty_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels,
img_metas, gt_bboxes_ignore)
# When there is no truth, the cls loss should be nonzero but there should
# be no box loss.
empty_cls_loss = empty_gt_losses['loss_cls']
empty_box_loss = empty_gt_losses['loss_bbox']
assert empty_cls_loss.item() > 0, 'cls loss should be non-zero'
assert empty_box_loss.item() == 0, (
'there should be no box loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero for
# random inputs
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
]
gt_labels = [torch.LongTensor([2])]
one_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels,
img_metas, gt_bboxes_ignore)
onegt_cls_loss = one_gt_losses['loss_cls']
onegt_box_loss = one_gt_losses['loss_bbox']
assert onegt_cls_loss.item() > 0, 'cls loss should be non-zero'
assert onegt_box_loss.item() > 0, 'box loss should be non-zero'
| 2,716 | 34.285714 | 79 |
py
|
DSLA-DSLA
|
DSLA-DSLA/tests/test_models/test_dense_heads/test_vfnet_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch
from mmdet.models.dense_heads import VFNetHead
def test_vfnet_head_loss():
"""Tests vfnet head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'pad_shape': (s, s, 3)
}]
train_cfg = mmcv.Config(
dict(
assigner=dict(type='ATSSAssigner', topk=9),
allowed_border=-1,
pos_weight=-1,
debug=False))
# since Focal Loss is not supported on CPU
self = VFNetHead(
num_classes=4,
in_channels=1,
train_cfg=train_cfg,
loss_cls=dict(type='VarifocalLoss', use_sigmoid=True, loss_weight=1.0))
if torch.cuda.is_available():
self.cuda()
feat = [
torch.rand(1, 1, s // feat_size, s // feat_size).cuda()
for feat_size in [4, 8, 16, 32, 64]
]
cls_scores, bbox_preds, bbox_preds_refine = self.forward(feat)
# Test that empty ground truth encourages the network to predict
# background
gt_bboxes = [torch.empty((0, 4)).cuda()]
gt_labels = [torch.LongTensor([]).cuda()]
gt_bboxes_ignore = None
empty_gt_losses = self.loss(cls_scores, bbox_preds, bbox_preds_refine,
gt_bboxes, gt_labels, img_metas,
gt_bboxes_ignore)
# When there is no truth, the cls loss should be nonzero but there
# should be no box loss.
empty_cls_loss = empty_gt_losses['loss_cls']
empty_box_loss = empty_gt_losses['loss_bbox']
assert empty_cls_loss.item() > 0, 'cls loss should be non-zero'
assert empty_box_loss.item() == 0, (
'there should be no box loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero
# for random inputs
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]).cuda(),
]
gt_labels = [torch.LongTensor([2]).cuda()]
one_gt_losses = self.loss(cls_scores, bbox_preds, bbox_preds_refine,
gt_bboxes, gt_labels, img_metas,
gt_bboxes_ignore)
onegt_cls_loss = one_gt_losses['loss_cls']
onegt_box_loss = one_gt_losses['loss_bbox']
assert onegt_cls_loss.item() > 0, 'cls loss should be non-zero'
assert onegt_box_loss.item() > 0, 'box loss should be non-zero'
| 2,561 | 39.03125 | 79 |
py
|
DSLA-DSLA
|
DSLA-DSLA/tests/test_models/test_dense_heads/test_pisa_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch
from mmdet.models.dense_heads import PISARetinaHead, PISASSDHead
from mmdet.models.roi_heads import PISARoIHead
def test_pisa_retinanet_head_loss():
"""Tests pisa retinanet head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'pad_shape': (s, s, 3)
}]
cfg = mmcv.Config(
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
match_low_quality=True,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
isr=dict(k=2., bias=0.),
carl=dict(k=1., bias=0.2),
allowed_border=0,
pos_weight=-1,
debug=False))
self = PISARetinaHead(num_classes=4, in_channels=1, train_cfg=cfg)
# Anchor head expects a multiple levels of features per image
feat = [
torch.rand(1, 1, s // (2**(i + 2)), s // (2**(i + 2)))
for i in range(len(self.anchor_generator.strides))
]
cls_scores, bbox_preds = self.forward(feat)
# Test that empty ground truth encourages the network to predict background
gt_bboxes = [torch.empty((0, 4))]
gt_labels = [torch.LongTensor([])]
gt_bboxes_ignore = None
empty_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels,
img_metas, gt_bboxes_ignore)
# When there is no truth, the cls loss should be nonzero but there should
# be no box loss.
empty_cls_loss = empty_gt_losses['loss_cls'].sum()
empty_box_loss = empty_gt_losses['loss_bbox'].sum()
assert empty_cls_loss.item() > 0, 'cls loss should be non-zero'
assert empty_box_loss.item() == 0, (
'there should be no box loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero for
# random inputs
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
]
gt_labels = [torch.LongTensor([2])]
one_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels,
img_metas, gt_bboxes_ignore)
onegt_cls_loss = one_gt_losses['loss_cls'].sum()
onegt_box_loss = one_gt_losses['loss_bbox'].sum()
assert onegt_cls_loss.item() > 0, 'cls loss should be non-zero'
assert onegt_box_loss.item() > 0, 'box loss should be non-zero'
def test_pisa_ssd_head_loss():
"""Tests pisa ssd head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'pad_shape': (s, s, 3)
}]
cfg = mmcv.Config(
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.,
ignore_iof_thr=-1,
gt_max_assign_all=False),
isr=dict(k=2., bias=0.),
carl=dict(k=1., bias=0.2),
smoothl1_beta=1.,
allowed_border=-1,
pos_weight=-1,
neg_pos_ratio=3,
debug=False))
ssd_anchor_generator = dict(
type='SSDAnchorGenerator',
scale_major=False,
input_size=300,
strides=[1],
ratios=([2], ),
basesize_ratio_range=(0.15, 0.9))
self = PISASSDHead(
num_classes=4,
in_channels=(1, ),
train_cfg=cfg,
anchor_generator=ssd_anchor_generator)
# Anchor head expects a multiple levels of features per image
feat = [
torch.rand(1, 1, s // (2**(i + 2)), s // (2**(i + 2)))
for i in range(len(self.anchor_generator.strides))
]
cls_scores, bbox_preds = self.forward(feat)
# Test that empty ground truth encourages the network to predict background
gt_bboxes = [torch.empty((0, 4))]
gt_labels = [torch.LongTensor([])]
gt_bboxes_ignore = None
empty_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels,
img_metas, gt_bboxes_ignore)
# When there is no truth, the cls loss should be nonzero but there should
# be no box loss.
empty_cls_loss = sum(empty_gt_losses['loss_cls'])
empty_box_loss = sum(empty_gt_losses['loss_bbox'])
# SSD is special, #pos:#neg = 1: 3, so empth gt will also lead loss cls = 0
assert empty_cls_loss.item() == 0, 'cls loss should be non-zero'
assert empty_box_loss.item() == 0, (
'there should be no box loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero for
# random inputs
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
]
gt_labels = [torch.LongTensor([2])]
one_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels,
img_metas, gt_bboxes_ignore)
onegt_cls_loss = sum(one_gt_losses['loss_cls'])
onegt_box_loss = sum(one_gt_losses['loss_bbox'])
assert onegt_cls_loss.item() > 0, 'cls loss should be non-zero'
assert onegt_box_loss.item() > 0, 'box loss should be non-zero'
def test_pisa_roi_head_loss():
"""Tests pisa roi head loss when truth is empty and non-empty."""
train_cfg = mmcv.Config(
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
match_low_quality=True,
ignore_iof_thr=-1),
sampler=dict(
type='ScoreHLRSampler',
num=4,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True,
k=0.5,
bias=0.),
isr=dict(k=2., bias=0.),
carl=dict(k=1., bias=0.2),
allowed_border=0,
pos_weight=-1,
debug=False))
bbox_roi_extractor = dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0),
out_channels=1,
featmap_strides=[1])
bbox_head = dict(
type='Shared2FCBBoxHead',
in_channels=1,
fc_out_channels=2,
roi_feat_size=7,
num_classes=4,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=False,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=1.0))
self = PISARoIHead(bbox_roi_extractor, bbox_head, train_cfg=train_cfg)
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'pad_shape': (s, s, 3)
}]
# Anchor head expects a multiple levels of features per image
feat = [
torch.rand(1, 1, s // (2**(i + 2)), s // (2**(i + 2)))
for i in range(1)
]
proposal_list = [
torch.Tensor([[22.6667, 22.8757, 238.6326, 151.8874], [0, 3, 5, 7]])
]
# Test that empty ground truth encourages the network to predict background
gt_bboxes = [torch.empty((0, 4))]
gt_labels = [torch.LongTensor([])]
gt_bboxes_ignore = None
empty_gt_losses = self.forward_train(feat, img_metas, proposal_list,
gt_bboxes, gt_labels,
gt_bboxes_ignore)
# When there is no truth, the cls loss should be nonzero but there should
# be no box loss.
empty_cls_loss = empty_gt_losses['loss_cls'].sum()
empty_box_loss = empty_gt_losses['loss_bbox'].sum()
assert empty_cls_loss.item() > 0, 'cls loss should be non-zero'
assert empty_box_loss.item() == 0, (
'there should be no box loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero for
# random inputs
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
]
gt_labels = [torch.LongTensor([2])]
one_gt_losses = self.forward_train(feat, img_metas, proposal_list,
gt_bboxes, gt_labels, gt_bboxes_ignore)
onegt_cls_loss = one_gt_losses['loss_cls'].sum()
onegt_box_loss = one_gt_losses['loss_bbox'].sum()
assert onegt_cls_loss.item() > 0, 'cls loss should be non-zero'
assert onegt_box_loss.item() > 0, 'box loss should be non-zero'
| 8,805 | 34.796748 | 79 |
py
|
DSLA-DSLA
|
DSLA-DSLA/tests/test_models/test_dense_heads/test_corner_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmdet.core.evaluation.bbox_overlaps import bbox_overlaps
from mmdet.models.dense_heads import CornerHead
def test_corner_head_loss():
"""Tests corner head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'pad_shape': (s, s, 3)
}]
self = CornerHead(num_classes=4, in_channels=1)
# Corner head expects a multiple levels of features per image
feat = [
torch.rand(1, 1, s // 4, s // 4) for _ in range(self.num_feat_levels)
]
tl_heats, br_heats, tl_embs, br_embs, tl_offs, br_offs = self.forward(feat)
# Test that empty ground truth encourages the network to predict background
gt_bboxes = [torch.empty((0, 4))]
gt_labels = [torch.LongTensor([])]
gt_bboxes_ignore = None
empty_gt_losses = self.loss(tl_heats, br_heats, tl_embs, br_embs, tl_offs,
br_offs, gt_bboxes, gt_labels, img_metas,
gt_bboxes_ignore)
empty_det_loss = sum(empty_gt_losses['det_loss'])
empty_push_loss = sum(empty_gt_losses['push_loss'])
empty_pull_loss = sum(empty_gt_losses['pull_loss'])
empty_off_loss = sum(empty_gt_losses['off_loss'])
assert empty_det_loss.item() > 0, 'det loss should be non-zero'
assert empty_push_loss.item() == 0, (
'there should be no push loss when there are no true boxes')
assert empty_pull_loss.item() == 0, (
'there should be no pull loss when there are no true boxes')
assert empty_off_loss.item() == 0, (
'there should be no box loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero for
# random inputs
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
]
gt_labels = [torch.LongTensor([2])]
one_gt_losses = self.loss(tl_heats, br_heats, tl_embs, br_embs, tl_offs,
br_offs, gt_bboxes, gt_labels, img_metas,
gt_bboxes_ignore)
onegt_det_loss = sum(one_gt_losses['det_loss'])
onegt_push_loss = sum(one_gt_losses['push_loss'])
onegt_pull_loss = sum(one_gt_losses['pull_loss'])
onegt_off_loss = sum(one_gt_losses['off_loss'])
assert onegt_det_loss.item() > 0, 'det loss should be non-zero'
assert onegt_push_loss.item() == 0, (
'there should be no push loss when there are only one true box')
assert onegt_pull_loss.item() > 0, 'pull loss should be non-zero'
assert onegt_off_loss.item() > 0, 'off loss should be non-zero'
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874],
[123.6667, 123.8757, 138.6326, 251.8874]]),
]
gt_labels = [torch.LongTensor([2, 3])]
# equalize the corners' embedding value of different objects to make the
# push_loss larger than 0
gt_bboxes_ind = (gt_bboxes[0] // 4).int().tolist()
for tl_emb_feat, br_emb_feat in zip(tl_embs, br_embs):
tl_emb_feat[:, :, gt_bboxes_ind[0][1],
gt_bboxes_ind[0][0]] = tl_emb_feat[:, :,
gt_bboxes_ind[1][1],
gt_bboxes_ind[1][0]]
br_emb_feat[:, :, gt_bboxes_ind[0][3],
gt_bboxes_ind[0][2]] = br_emb_feat[:, :,
gt_bboxes_ind[1][3],
gt_bboxes_ind[1][2]]
two_gt_losses = self.loss(tl_heats, br_heats, tl_embs, br_embs, tl_offs,
br_offs, gt_bboxes, gt_labels, img_metas,
gt_bboxes_ignore)
twogt_det_loss = sum(two_gt_losses['det_loss'])
twogt_push_loss = sum(two_gt_losses['push_loss'])
twogt_pull_loss = sum(two_gt_losses['pull_loss'])
twogt_off_loss = sum(two_gt_losses['off_loss'])
assert twogt_det_loss.item() > 0, 'det loss should be non-zero'
assert twogt_push_loss.item() > 0, 'push loss should be non-zero'
assert twogt_pull_loss.item() > 0, 'pull loss should be non-zero'
assert twogt_off_loss.item() > 0, 'off loss should be non-zero'
def test_corner_head_encode_and_decode_heatmap():
"""Tests corner head generating and decoding the heatmap."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'pad_shape': (s, s, 3),
'border': (0, 0, 0, 0)
}]
gt_bboxes = [
torch.Tensor([[10, 20, 200, 240], [40, 50, 100, 200],
[10, 20, 200, 240]])
]
gt_labels = [torch.LongTensor([1, 1, 2])]
self = CornerHead(num_classes=4, in_channels=1, corner_emb_channels=1)
feat = [
torch.rand(1, 1, s // 4, s // 4) for _ in range(self.num_feat_levels)
]
targets = self.get_targets(
gt_bboxes,
gt_labels,
feat[0].shape,
img_metas[0]['pad_shape'],
with_corner_emb=self.with_corner_emb)
gt_tl_heatmap = targets['topleft_heatmap']
gt_br_heatmap = targets['bottomright_heatmap']
gt_tl_offset = targets['topleft_offset']
gt_br_offset = targets['bottomright_offset']
embedding = targets['corner_embedding']
[top, left], [bottom, right] = embedding[0][0]
gt_tl_embedding_heatmap = torch.zeros([1, 1, s // 4, s // 4])
gt_br_embedding_heatmap = torch.zeros([1, 1, s // 4, s // 4])
gt_tl_embedding_heatmap[0, 0, top, left] = 1
gt_br_embedding_heatmap[0, 0, bottom, right] = 1
batch_bboxes, batch_scores, batch_clses = self.decode_heatmap(
tl_heat=gt_tl_heatmap,
br_heat=gt_br_heatmap,
tl_off=gt_tl_offset,
br_off=gt_br_offset,
tl_emb=gt_tl_embedding_heatmap,
br_emb=gt_br_embedding_heatmap,
img_meta=img_metas[0],
k=100,
kernel=3,
distance_threshold=0.5)
bboxes = batch_bboxes.view(-1, 4)
scores = batch_scores.view(-1, 1)
clses = batch_clses.view(-1, 1)
idx = scores.argsort(dim=0, descending=True)
bboxes = bboxes[idx].view(-1, 4)
scores = scores[idx].view(-1)
clses = clses[idx].view(-1)
valid_bboxes = bboxes[torch.where(scores > 0.05)]
valid_labels = clses[torch.where(scores > 0.05)]
max_coordinate = valid_bboxes.max()
offsets = valid_labels.to(valid_bboxes) * (max_coordinate + 1)
gt_offsets = gt_labels[0].to(gt_bboxes[0]) * (max_coordinate + 1)
offset_bboxes = valid_bboxes + offsets[:, None]
offset_gtbboxes = gt_bboxes[0] + gt_offsets[:, None]
iou_matrix = bbox_overlaps(offset_bboxes.numpy(), offset_gtbboxes.numpy())
assert (iou_matrix == 1).sum() == 3
| 6,756 | 39.220238 | 79 |
py
|
DSLA-DSLA
|
DSLA-DSLA/tests/test_models/test_dense_heads/test_fsaf_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch
from mmdet.models.dense_heads import FSAFHead
def test_fsaf_head_loss():
"""Tests anchor head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'pad_shape': (s, s, 3)
}]
cfg = dict(
reg_decoded_bbox=True,
anchor_generator=dict(
type='AnchorGenerator',
octave_base_scale=1,
scales_per_octave=1,
ratios=[1.0],
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(type='TBLRBBoxCoder', normalizer=4.0),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0,
reduction='none'),
loss_bbox=dict(
type='IoULoss', eps=1e-6, loss_weight=1.0, reduction='none'))
train_cfg = mmcv.Config(
dict(
assigner=dict(
type='CenterRegionAssigner',
pos_scale=0.2,
neg_scale=0.2,
min_pos_iof=0.01),
allowed_border=-1,
pos_weight=-1,
debug=False))
head = FSAFHead(num_classes=4, in_channels=1, train_cfg=train_cfg, **cfg)
if torch.cuda.is_available():
head.cuda()
# FSAF head expects a multiple levels of features per image
feat = [
torch.rand(1, 1, s // (2**(i + 2)), s // (2**(i + 2))).cuda()
for i in range(len(head.anchor_generator.strides))
]
cls_scores, bbox_preds = head.forward(feat)
gt_bboxes_ignore = None
# When truth is non-empty then both cls and box loss should be nonzero
# for random inputs
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]).cuda(),
]
gt_labels = [torch.LongTensor([2]).cuda()]
one_gt_losses = head.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels,
img_metas, gt_bboxes_ignore)
onegt_cls_loss = sum(one_gt_losses['loss_cls'])
onegt_box_loss = sum(one_gt_losses['loss_bbox'])
assert onegt_cls_loss.item() > 0, 'cls loss should be non-zero'
assert onegt_box_loss.item() > 0, 'box loss should be non-zero'
# Test that empty ground truth encourages the network to predict bkg
gt_bboxes = [torch.empty((0, 4)).cuda()]
gt_labels = [torch.LongTensor([]).cuda()]
empty_gt_losses = head.loss(cls_scores, bbox_preds, gt_bboxes,
gt_labels, img_metas, gt_bboxes_ignore)
# When there is no truth, the cls loss should be nonzero but there
# should be no box loss.
empty_cls_loss = sum(empty_gt_losses['loss_cls'])
empty_box_loss = sum(empty_gt_losses['loss_bbox'])
assert empty_cls_loss.item() > 0, 'cls loss should be non-zero'
assert empty_box_loss.item() == 0, (
'there should be no box loss when there are no true boxes')
| 3,097 | 36.325301 | 79 |
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.