repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
aodisaggregation
|
aodisaggregation-main/utils/dict.py
|
import itertools
def product_dict(**kwargs):
keys = list(kwargs.keys())
vals = list(kwargs.values())
sorted_idx = sorted(range(len(keys)), key=keys.__getitem__)
sorted_keys = [keys[i] for i in sorted_idx]
sorted_vals = [vals[i] for i in sorted_idx]
for instance in itertools.product(*sorted_vals):
yield dict(zip(sorted_keys, instance))
def flatten_dict(buffer, parent_key="", output_dict=None):
if output_dict is None:
output_dict = dict()
if isinstance(buffer, dict) and len(buffer.keys()) > 0:
for child_key in buffer.keys():
key = '_'.join([parent_key, child_key])
flatten_dict(buffer[child_key], key, output_dict)
else:
output_dict.update({parent_key[1:]: buffer})
return output_dict
def flatten_dict_as_str(my_dict):
flattened_dict = flatten_dict(my_dict)
dict_as_str = '_'.join(f'{k}={v}' for k, v in flattened_dict.items())
return dict_as_str
| 966 | 30.193548 | 73 |
py
|
aodisaggregation
|
aodisaggregation-main/utils/__init__.py
|
from .dict import *
| 20 | 9.5 | 19 |
py
|
KnowledgeFactor
|
KnowledgeFactor-main/cls/tools/test.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
import warnings
import mmcv
import numpy as np
import torch
from mmcv import DictAction
from mmcv.parallel import MMDataParallel, MMDistributedDataParallel
from mmcv.runner import get_dist_info, init_dist, load_checkpoint
from mmcls.apis import multi_gpu_test, single_gpu_test, multitask_multi_gpu_test, multitask_single_gpu_test
from mmcls.datasets import build_dataloader, build_dataset
from mmcls.models import build_classifier
# TODO import `wrap_fp16_model` from mmcv and delete them from mmcls
try:
from mmcv.runner import wrap_fp16_model
except ImportError:
warnings.warn('wrap_fp16_model from mmcls will be deprecated.'
'Please install mmcv>=1.1.4.')
from mmcls.core import wrap_fp16_model
def parse_args():
parser = argparse.ArgumentParser(description='mmcls test model')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument('--out', help='output result file')
parser.add_argument(
'--metrics',
type=str,
nargs='+',
help='evaluation metrics, which depends on the dataset, e.g., '
'"accuracy", "precision", "recall", "f1_score", "support" for single '
'label dataset, and "mAP", "CP", "CR", "CF1", "OP", "OR", "OF1" for '
'multi-label dataset')
parser.add_argument('--show', action='store_true', help='show results')
parser.add_argument(
'--show-dir', help='directory where painted images will be saved')
parser.add_argument(
'--gpu-collect',
action='store_true',
help='whether to use gpu to collect results')
parser.add_argument('--tmpdir', help='tmp dir for writing some results')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file (deprecate), '
'change to --cfg-options instead.')
parser.add_argument(
'--metric-options',
nargs='+',
action=DictAction,
default={},
help='custom options for evaluation, the key-value pair in xxx=yyy '
'format will be parsed as a dict metric_options for dataset.evaluate()'
' function.')
parser.add_argument(
'--show-options',
nargs='+',
action=DictAction,
help='custom options for show_result. key-value pair in xxx=yyy.'
'Check available options in `model.show_result`.')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
parser.add_argument(
'--device',
choices=['cpu', 'cuda'],
default='cuda',
help='device used for testing')
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
if args.options and args.cfg_options:
raise ValueError(
'--options and --cfg-options cannot be both '
'specified, --options is deprecated in favor of --cfg-options')
if args.options:
warnings.warn('--options is deprecated in favor of --cfg-options')
args.cfg_options = args.options
return args
def main():
args = parse_args()
cfg = mmcv.Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
cfg.model.pretrained = None
cfg.data.test.test_mode = True
assert args.metrics or args.out, \
'Please specify at least one of output path and evaluation metrics.'
# init distributed env first, since logger depends on the dist info.
if args.launcher == 'none':
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
# build the dataloader
dataset = build_dataset(cfg.data.test)
# the extra round_up data will be removed during gpu/cpu collect
data_loader = build_dataloader(
dataset,
samples_per_gpu=cfg.data.samples_per_gpu,
workers_per_gpu=cfg.data.workers_per_gpu,
dist=distributed,
shuffle=False,
round_up=True)
# build the model and load checkpoint
model = build_classifier(cfg.model)
fp16_cfg = cfg.get('fp16', None)
if fp16_cfg is not None:
wrap_fp16_model(model)
checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu')
if 'CLASSES' in checkpoint.get('meta', {}):
CLASSES = checkpoint['meta']['CLASSES']
else:
from mmcls.datasets import ImageNet
warnings.simplefilter('once')
warnings.warn('Class names are not saved in the checkpoint\'s '
'meta data, use imagenet by default.')
CLASSES = ImageNet.CLASSES
if not distributed:
if args.device == 'cpu':
model = model.cpu()
else:
model = MMDataParallel(model, device_ids=[0])
model.CLASSES = CLASSES
show_kwargs = {} if args.show_options is None else args.show_options
if cfg.get('multi_task', False):
outputs = multitask_single_gpu_test(model, data_loader, args.show, args.show_dir,
**show_kwargs)
else:
outputs = single_gpu_test(model, data_loader, args.show, args.show_dir,
**show_kwargs)
else:
model = MMDistributedDataParallel(
model.cuda(),
device_ids=[torch.cuda.current_device()],
broadcast_buffers=False)
if cfg.get('multi_task', False):
outputs = multitask_multi_gpu_test(model, data_loader, args.tmpdir,
args.gpu_collect)
else:
outputs = multi_gpu_test(model, data_loader, args.tmpdir,
args.gpu_collect)
rank, _ = get_dist_info()
if rank == 0:
results = {}
if args.metrics:
eval_results = dataset.evaluate(outputs, args.metrics,
args.metric_options)
results.update(eval_results)
for k, v in eval_results.items():
print(f'\n{k} : {v:.2f}')
if args.out:
scores = np.vstack(outputs)
pred_score = np.max(scores, axis=1)
pred_label = np.argmax(scores, axis=1)
pred_class = [CLASSES[lb] for lb in pred_label]
results.update({
'class_scores': scores,
'pred_score': pred_score,
'pred_label': pred_label,
'pred_class': pred_class
})
print(f'\ndumping results to {args.out}')
mmcv.dump(results, args.out)
if __name__ == '__main__':
main()
| 7,640 | 36.455882 | 107 |
py
|
KnowledgeFactor
|
KnowledgeFactor-main/cls/tools/load_ckp.py
|
import torch
def main():
# PATH = 'resnet18_R182R18_common_network_20211112v2.pth'
# ckp_path = '/home/yangxingyi/NeuralFactor/Multi-task-Depth-Seg/result/NYUD/kd_resnet_50_to_resnet_18/multi_task_baseline/best_model.pth.tar'
ckp_path = ''
model_dict = torch.load(ckp_path)
# save_dict= dict(common_network=model_dict)
# torch.save(save_dict, PATH, _use_new_zipfile_serialization=False)
print(model_dict.keys())
def ckp_to_load():
ckp_path = '/home/yangxingyi/.cache/torch/checkpoints/resnet50_8xb32_in1k_20210831-ea4938fc.pth'
save_path = '/home/yangxingyi/.cache/torch/checkpoints/resnet50_8xb32_in1k_20210831-ea4938fc_converted.pth'
model_dict = torch.load(ckp_path)
if 'state_dict' in model_dict.keys():
model_dict = model_dict['state_dict']
new_dict = dict()
for k, v in model_dict.items():
if k.startswith('fc'):
new_k = 'head.{}'.format(k)
else:
new_k = 'backbone.{}'.format(k)
print('Old Key:', k, '-> New Key:', new_k)
new_dict[new_k] = v
save_dict= dict(state_dict=new_dict)
torch.save(save_dict, save_path, _use_new_zipfile_serialization=False)
if __name__ == '__main__':
# main()
ckp_to_load()
| 1,245 | 37.9375 | 146 |
py
|
KnowledgeFactor
|
KnowledgeFactor-main/cls/tools/train.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import copy
import os
import os.path as osp
import time
import warnings
import mmcv
import torch
from mmcv import Config, DictAction
from mmcv.runner import get_dist_info, init_dist
from mmcls import __version__
from mmcls.apis import set_random_seed, train_model
from mmcls.datasets import build_dataset
from mmcls.models import build_classifier
from mmcls.utils import collect_env, get_root_logger
def parse_args():
parser = argparse.ArgumentParser(description='Train a model')
parser.add_argument('config', help='train config file path')
parser.add_argument('--work-dir', help='the dir to save logs and models')
parser.add_argument(
'--resume-from', help='the checkpoint file to resume from')
parser.add_argument(
'--no-validate',
action='store_true',
help='whether not to evaluate the checkpoint during training')
group_gpus = parser.add_mutually_exclusive_group()
group_gpus.add_argument('--device', help='device used for training')
group_gpus.add_argument(
'--gpus',
type=int,
help='number of gpus to use '
'(only applicable to non-distributed training)')
group_gpus.add_argument(
'--gpu-ids',
type=int,
nargs='+',
help='ids of gpus to use '
'(only applicable to non-distributed training)')
parser.add_argument('--seed', type=int, default=None, help='random seed')
parser.add_argument(
'--deterministic',
action='store_true',
help='whether to set deterministic options for CUDNN backend.')
parser.add_argument(
'--options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file (deprecate), '
'change to --cfg-options instead.')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
if args.options and args.cfg_options:
raise ValueError(
'--options and --cfg-options cannot be both '
'specified, --options is deprecated in favor of --cfg-options')
if args.options:
warnings.warn('--options is deprecated in favor of --cfg-options')
args.cfg_options = args.options
return args
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
# work_dir is determined in this priority: CLI > segment in file > filename
if args.work_dir is not None:
# update configs according to CLI args if args.work_dir is not None
cfg.work_dir = args.work_dir
elif cfg.get('work_dir', None) is None:
# use config filename as default work_dir if cfg.work_dir is None
cfg.work_dir = osp.join('./work_dirs',
osp.splitext(osp.basename(args.config))[0])
if args.resume_from is not None:
cfg.resume_from = args.resume_from
if args.gpu_ids is not None:
cfg.gpu_ids = args.gpu_ids
else:
cfg.gpu_ids = range(1) if args.gpus is None else range(args.gpus)
# init distributed env first, since logger depends on the dist info.
if args.launcher == 'none':
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
_, world_size = get_dist_info()
cfg.gpu_ids = range(world_size)
# create work_dir
mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir))
# dump config
cfg.dump(osp.join(cfg.work_dir, osp.basename(args.config)))
# init the logger before other steps
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
log_file = osp.join(cfg.work_dir, f'{timestamp}.log')
logger = get_root_logger(log_file=log_file, log_level=cfg.log_level)
# init the meta dict to record some important information such as
# environment info and seed, which will be logged
meta = dict()
# log env info
env_info_dict = collect_env()
env_info = '\n'.join([(f'{k}: {v}') for k, v in env_info_dict.items()])
dash_line = '-' * 60 + '\n'
logger.info('Environment info:\n' + dash_line + env_info + '\n' +
dash_line)
meta['env_info'] = env_info
# log some basic info
logger.info(f'Distributed training: {distributed}')
logger.info(f'Config:\n{cfg.pretty_text}')
# set random seeds
if args.seed is not None:
logger.info(f'Set random seed to {args.seed}, '
f'deterministic: {args.deterministic}')
set_random_seed(args.seed, deterministic=args.deterministic)
cfg.seed = args.seed
meta['seed'] = args.seed
model = build_classifier(cfg.model)
model.init_weights()
datasets = [build_dataset(cfg.data.train)]
if len(cfg.workflow) == 2:
val_dataset = copy.deepcopy(cfg.data.val)
val_dataset.pipeline = cfg.data.train.pipeline
datasets.append(build_dataset(val_dataset))
if cfg.checkpoint_config is not None:
# save mmcls version, config file content and class names in
# checkpoints as meta data
cfg.checkpoint_config.meta = dict(
mmcls_version=__version__,
config=cfg.pretty_text,
CLASSES=datasets[0].CLASSES)
# add an attribute for visualization convenience
train_model(
model,
datasets,
cfg,
distributed=distributed,
validate=(not args.no_validate),
timestamp=timestamp,
device='cpu' if args.device == 'cpu' else 'cuda',
meta=meta)
if __name__ == '__main__':
main()
| 6,600 | 35.269231 | 79 |
py
|
KnowledgeFactor
|
KnowledgeFactor-main/cls/tools/dist_train.py
|
import os
import subprocess
import time
import argparse
def assign_free_gpus(threshold_vram_usage=1500, max_gpus=1, wait=True, sleep_time=10):
"""
Assigns free gpus to the current process via the CUDA_AVAILABLE_DEVICES env variable
This function should be called after all imports,
in case you are setting CUDA_AVAILABLE_DEVICES elsewhere
Borrowed and fixed from https://gist.github.com/afspies/7e211b83ca5a8902849b05ded9a10696
Args:
threshold_vram_usage (int, optional): A GPU is considered free if the vram usage is below the threshold
Defaults to 1500 (MiB).
max_gpus (int, optional): Max GPUs is the maximum number of gpus to assign.
Defaults to 2.
wait (bool, optional): Whether to wait until a GPU is free. Default False.
sleep_time (int, optional): Sleep time (in seconds) to wait before checking GPUs, if wait=True. Default 10.
"""
def _check():
# Get the list of GPUs via nvidia-smi
smi_query_result = subprocess.check_output(
"nvidia-smi -q -d Memory | grep -A4 GPU", shell=True
)
# Extract the usage information
gpu_info = smi_query_result.decode("utf-8").split("\n")
gpu_info = list(filter(lambda info: "Used" in info, gpu_info))
gpu_info = [
int(x.split(":")[1].replace("MiB", "").strip()) for x in gpu_info
] # Remove garbage
# Keep gpus under threshold only
free_gpus = [
str(i) for i, mem in enumerate(gpu_info) if mem < threshold_vram_usage
]
if len(free_gpus) < max_gpus:
return False
free_gpus = free_gpus[: min(max_gpus, len(free_gpus))]
gpus_to_use = ",".join(free_gpus)
return gpus_to_use
while True:
gpus_to_use = _check()
if gpus_to_use or not wait:
break
print(f"No free GPUs found, retrying in {sleep_time}s")
time.sleep(sleep_time)
if not gpus_to_use:
raise RuntimeError("No free GPUs found")
os.environ["CUDA_VISIBLE_DEVICES"] = gpus_to_use
print(f"Using GPU(s): {gpus_to_use}")
def assign_free_port():
import socket
sock = socket.socket()
sock.bind(('', 0))
free_port = sock.getsockname()[1]
free_port = str(free_port)
os.environ["PORT"] = free_port
print(f"Using PORT: {free_port}")
parser = argparse.ArgumentParser(description='Train a model')
parser.add_argument('config', help='train config file path')
parser.add_argument('GPUS', type=int, help='number of gpus')
# parser.add_argument('ARGS', type=str, default='', help='number of gpus')
args = parser.parse_args()
assign_free_gpus(threshold_vram_usage=1500, max_gpus=args.GPUS, wait=True, sleep_time=3 * 60)
assign_free_port()
os.system(f'bash tools/dist_train.sh {args.config} {args.GPUS}') #{args.ARGS}
| 2,904 | 40.5 | 115 |
py
|
KnowledgeFactor
|
KnowledgeFactor-main/cls/tools/deployment/mmcls2torchserve.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from argparse import ArgumentParser, Namespace
from pathlib import Path
from tempfile import TemporaryDirectory
import mmcv
try:
from model_archiver.model_packaging import package_model
from model_archiver.model_packaging_utils import ModelExportUtils
except ImportError:
package_model = None
def mmcls2torchserve(
config_file: str,
checkpoint_file: str,
output_folder: str,
model_name: str,
model_version: str = '1.0',
force: bool = False,
):
"""Converts mmclassification model (config + checkpoint) to TorchServe
`.mar`.
Args:
config_file:
In MMClassification config format.
The contents vary for each task repository.
checkpoint_file:
In MMClassification checkpoint format.
The contents vary for each task repository.
output_folder:
Folder where `{model_name}.mar` will be created.
The file created will be in TorchServe archive format.
model_name:
If not None, used for naming the `{model_name}.mar` file
that will be created under `output_folder`.
If None, `{Path(checkpoint_file).stem}` will be used.
model_version:
Model's version.
force:
If True, if there is an existing `{model_name}.mar`
file under `output_folder` it will be overwritten.
"""
mmcv.mkdir_or_exist(output_folder)
config = mmcv.Config.fromfile(config_file)
with TemporaryDirectory() as tmpdir:
config.dump(f'{tmpdir}/config.py')
args = Namespace(
**{
'model_file': f'{tmpdir}/config.py',
'serialized_file': checkpoint_file,
'handler': f'{Path(__file__).parent}/mmcls_handler.py',
'model_name': model_name or Path(checkpoint_file).stem,
'version': model_version,
'export_path': output_folder,
'force': force,
'requirements_file': None,
'extra_files': None,
'runtime': 'python',
'archive_format': 'default'
})
manifest = ModelExportUtils.generate_manifest_json(args)
package_model(args, manifest)
def parse_args():
parser = ArgumentParser(
description='Convert mmcls models to TorchServe `.mar` format.')
parser.add_argument('config', type=str, help='config file path')
parser.add_argument('checkpoint', type=str, help='checkpoint file path')
parser.add_argument(
'--output-folder',
type=str,
required=True,
help='Folder where `{model_name}.mar` will be created.')
parser.add_argument(
'--model-name',
type=str,
default=None,
help='If not None, used for naming the `{model_name}.mar`'
'file that will be created under `output_folder`.'
'If None, `{Path(checkpoint_file).stem}` will be used.')
parser.add_argument(
'--model-version',
type=str,
default='1.0',
help='Number used for versioning.')
parser.add_argument(
'-f',
'--force',
action='store_true',
help='overwrite the existing `{model_name}.mar`')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
if package_model is None:
raise ImportError('`torch-model-archiver` is required.'
'Try: pip install torch-model-archiver')
mmcls2torchserve(args.config, args.checkpoint, args.output_folder,
args.model_name, args.model_version, args.force)
| 3,706 | 32.098214 | 76 |
py
|
KnowledgeFactor
|
KnowledgeFactor-main/cls/tools/deployment/test.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import warnings
import mmcv
import numpy as np
from mmcv import DictAction
from mmcv.parallel import MMDataParallel
from mmcls.apis import single_gpu_test
from mmcls.core.export import ONNXRuntimeClassifier, TensorRTClassifier
from mmcls.datasets import build_dataloader, build_dataset
def parse_args():
parser = argparse.ArgumentParser(
description='Test (and eval) an ONNX model using ONNXRuntime.')
parser.add_argument('config', help='model config file')
parser.add_argument('model', help='filename of the input ONNX model')
parser.add_argument(
'--backend',
help='Backend of the model.',
choices=['onnxruntime', 'tensorrt'])
parser.add_argument(
'--out', type=str, help='output result file in pickle format')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file.')
parser.add_argument(
'--metrics',
type=str,
nargs='+',
help='evaluation metrics, which depends on the dataset, e.g., '
'"accuracy", "precision", "recall", "f1_score", "support" for single '
'label dataset, and "mAP", "CP", "CR", "CF1", "OP", "OR", "OF1" for '
'multi-label dataset')
parser.add_argument(
'--metric-options',
nargs='+',
action=DictAction,
default={},
help='custom options for evaluation, the key-value pair in xxx=yyy '
'format will be parsed as a dict metric_options for dataset.evaluate()'
' function.')
parser.add_argument('--show', action='store_true', help='show results')
parser.add_argument(
'--show-dir', help='directory where painted images will be saved')
args = parser.parse_args()
return args
def main():
args = parse_args()
if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
raise ValueError('The output file must be a pkl file.')
cfg = mmcv.Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# build dataset and dataloader
dataset = build_dataset(cfg.data.test)
data_loader = build_dataloader(
dataset,
samples_per_gpu=cfg.data.samples_per_gpu,
workers_per_gpu=cfg.data.workers_per_gpu,
shuffle=False,
round_up=False)
# build onnxruntime model and run inference.
if args.backend == 'onnxruntime':
model = ONNXRuntimeClassifier(
args.model, class_names=dataset.CLASSES, device_id=0)
elif args.backend == 'tensorrt':
model = TensorRTClassifier(
args.model, class_names=dataset.CLASSES, device_id=0)
else:
print('Unknown backend: {}.'.format(args.model))
exit()
model = MMDataParallel(model, device_ids=[0])
model.CLASSES = dataset.CLASSES
outputs = single_gpu_test(model, data_loader, args.show, args.show_dir)
if args.metrics:
results = dataset.evaluate(outputs, args.metrics, args.metric_options)
for k, v in results.items():
print(f'\n{k} : {v:.2f}')
else:
warnings.warn('Evaluation metrics are not specified.')
scores = np.vstack(outputs)
pred_score = np.max(scores, axis=1)
pred_label = np.argmax(scores, axis=1)
pred_class = [dataset.CLASSES[lb] for lb in pred_label]
results = {
'pred_score': pred_score,
'pred_label': pred_label,
'pred_class': pred_class
}
if not args.out:
print('\nthe predicted result for the first element is '
f'pred_score = {pred_score[0]:.2f}, '
f'pred_label = {pred_label[0]} '
f'and pred_class = {pred_class[0]}. '
'Specify --out to save all results to files.')
if args.out:
print(f'\nwriting results to {args.out}')
mmcv.dump(results, args.out)
if __name__ == '__main__':
main()
| 4,153 | 34.504274 | 79 |
py
|
KnowledgeFactor
|
KnowledgeFactor-main/cls/tools/deployment/onnx2tensorrt.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
import os.path as osp
import numpy as np
def get_GiB(x: int):
"""return x GiB."""
return x * (1 << 30)
def onnx2tensorrt(onnx_file,
trt_file,
input_shape,
max_batch_size,
fp16_mode=False,
verify=False,
workspace_size=1):
"""Create tensorrt engine from onnx model.
Args:
onnx_file (str): Filename of the input ONNX model file.
trt_file (str): Filename of the output TensorRT engine file.
input_shape (list[int]): Input shape of the model.
eg [1, 3, 224, 224].
max_batch_size (int): Max batch size of the model.
verify (bool, optional): Whether to verify the converted model.
Defaults to False.
workspace_size (int, optional): Maximium workspace of GPU.
Defaults to 1.
"""
import onnx
from mmcv.tensorrt import TRTWraper, onnx2trt, save_trt_engine
onnx_model = onnx.load(onnx_file)
# create trt engine and wraper
assert max_batch_size >= 1
max_shape = [max_batch_size] + list(input_shape[1:])
opt_shape_dict = {'input': [input_shape, input_shape, max_shape]}
max_workspace_size = get_GiB(workspace_size)
trt_engine = onnx2trt(
onnx_model,
opt_shape_dict,
fp16_mode=fp16_mode,
max_workspace_size=max_workspace_size)
save_dir, _ = osp.split(trt_file)
if save_dir:
os.makedirs(save_dir, exist_ok=True)
save_trt_engine(trt_engine, trt_file)
print(f'Successfully created TensorRT engine: {trt_file}')
if verify:
import torch
import onnxruntime as ort
input_img = torch.randn(*input_shape)
input_img_cpu = input_img.detach().cpu().numpy()
input_img_cuda = input_img.cuda()
# Get results from ONNXRuntime
session_options = ort.SessionOptions()
sess = ort.InferenceSession(onnx_file, session_options)
# get input and output names
input_names = [_.name for _ in sess.get_inputs()]
output_names = [_.name for _ in sess.get_outputs()]
onnx_outputs = sess.run(None, {
input_names[0]: input_img_cpu,
})
# Get results from TensorRT
trt_model = TRTWraper(trt_file, input_names, output_names)
with torch.no_grad():
trt_outputs = trt_model({input_names[0]: input_img_cuda})
trt_outputs = [
trt_outputs[_].detach().cpu().numpy() for _ in output_names
]
# Compare results
np.testing.assert_allclose(
onnx_outputs[0], trt_outputs[0], rtol=1e-05, atol=1e-05)
print('The numerical values are the same ' +
'between ONNXRuntime and TensorRT')
def parse_args():
parser = argparse.ArgumentParser(
description='Convert MMClassification models from ONNX to TensorRT')
parser.add_argument('model', help='Filename of the input ONNX model')
parser.add_argument(
'--trt-file',
type=str,
default='tmp.trt',
help='Filename of the output TensorRT engine')
parser.add_argument(
'--verify',
action='store_true',
help='Verify the outputs of ONNXRuntime and TensorRT')
parser.add_argument(
'--shape',
type=int,
nargs='+',
default=[224, 224],
help='Input size of the model')
parser.add_argument(
'--max-batch-size',
type=int,
default=1,
help='Maximum batch size of TensorRT model.')
parser.add_argument('--fp16', action='store_true', help='Enable fp16 mode')
parser.add_argument(
'--workspace-size',
type=int,
default=1,
help='Max workspace size of GPU in GiB')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
if len(args.shape) == 1:
input_shape = (1, 3, args.shape[0], args.shape[0])
elif len(args.shape) == 2:
input_shape = (1, 3) + tuple(args.shape)
else:
raise ValueError('invalid input shape')
# Create TensorRT engine
onnx2tensorrt(
args.model,
args.trt_file,
input_shape,
args.max_batch_size,
fp16_mode=args.fp16,
verify=args.verify,
workspace_size=args.workspace_size)
| 4,419 | 29.909091 | 79 |
py
|
KnowledgeFactor
|
KnowledgeFactor-main/cls/tools/deployment/pytorch2onnx.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
from functools import partial
import mmcv
import numpy as np
import onnxruntime as rt
import torch
from mmcv.onnx import register_extra_symbolics
from mmcv.runner import load_checkpoint
from mmcls.models import build_classifier
torch.manual_seed(3)
def _demo_mm_inputs(input_shape, num_classes):
"""Create a superset of inputs needed to run test or train batches.
Args:
input_shape (tuple):
input batch dimensions
num_classes (int):
number of semantic classes
"""
(N, C, H, W) = input_shape
rng = np.random.RandomState(0)
imgs = rng.rand(*input_shape)
gt_labels = rng.randint(
low=0, high=num_classes, size=(N, 1)).astype(np.uint8)
mm_inputs = {
'imgs': torch.FloatTensor(imgs).requires_grad_(True),
'gt_labels': torch.LongTensor(gt_labels),
}
return mm_inputs
def pytorch2onnx(model,
input_shape,
opset_version=11,
dynamic_export=False,
show=False,
output_file='tmp.onnx',
do_simplify=False,
verify=False):
"""Export Pytorch model to ONNX model and verify the outputs are same
between Pytorch and ONNX.
Args:
model (nn.Module): Pytorch model we want to export.
input_shape (tuple): Use this input shape to construct
the corresponding dummy input and execute the model.
opset_version (int): The onnx op version. Default: 11.
show (bool): Whether print the computation graph. Default: False.
output_file (string): The path to where we store the output ONNX model.
Default: `tmp.onnx`.
verify (bool): Whether compare the outputs between Pytorch and ONNX.
Default: False.
"""
model.cpu().eval()
num_classes = model.head.num_classes
mm_inputs = _demo_mm_inputs(input_shape, num_classes)
imgs = mm_inputs.pop('imgs')
img_list = [img[None, :] for img in imgs]
# replace original forward function
origin_forward = model.forward
model.forward = partial(model.forward, img_metas={}, return_loss=False)
register_extra_symbolics(opset_version)
# support dynamic shape export
if dynamic_export:
dynamic_axes = {
'input': {
0: 'batch',
2: 'width',
3: 'height'
},
'probs': {
0: 'batch'
}
}
else:
dynamic_axes = {}
with torch.no_grad():
torch.onnx.export(
model, (img_list, ),
output_file,
input_names=['input'],
output_names=['probs'],
export_params=True,
keep_initializers_as_inputs=True,
dynamic_axes=dynamic_axes,
verbose=show,
opset_version=opset_version)
print(f'Successfully exported ONNX model: {output_file}')
model.forward = origin_forward
if do_simplify:
from mmcv import digit_version
import onnxsim
import onnx
min_required_version = '0.3.0'
assert digit_version(mmcv.__version__) >= digit_version(
min_required_version
), f'Requires to install onnx-simplify>={min_required_version}'
if dynamic_axes:
input_shape = (input_shape[0], input_shape[1], input_shape[2] * 2,
input_shape[3] * 2)
else:
input_shape = (input_shape[0], input_shape[1], input_shape[2],
input_shape[3])
imgs = _demo_mm_inputs(input_shape, model.head.num_classes).pop('imgs')
input_dic = {'input': imgs.detach().cpu().numpy()}
input_shape_dic = {'input': list(input_shape)}
model_opt, check_ok = onnxsim.simplify(
output_file,
input_shapes=input_shape_dic,
input_data=input_dic,
dynamic_input_shape=dynamic_export)
if check_ok:
onnx.save(model_opt, output_file)
print(f'Successfully simplified ONNX model: {output_file}')
else:
print('Failed to simplify ONNX model.')
if verify:
# check by onnx
import onnx
onnx_model = onnx.load(output_file)
onnx.checker.check_model(onnx_model)
# test the dynamic model
if dynamic_export:
dynamic_test_inputs = _demo_mm_inputs(
(input_shape[0], input_shape[1], input_shape[2] * 2,
input_shape[3] * 2), model.head.num_classes)
imgs = dynamic_test_inputs.pop('imgs')
img_list = [img[None, :] for img in imgs]
# check the numerical value
# get pytorch output
pytorch_result = model(img_list, img_metas={}, return_loss=False)[0]
# get onnx output
input_all = [node.name for node in onnx_model.graph.input]
input_initializer = [
node.name for node in onnx_model.graph.initializer
]
net_feed_input = list(set(input_all) - set(input_initializer))
assert (len(net_feed_input) == 1)
sess = rt.InferenceSession(output_file)
onnx_result = sess.run(
None, {net_feed_input[0]: img_list[0].detach().numpy()})[0]
if not np.allclose(pytorch_result, onnx_result):
raise ValueError(
'The outputs are different between Pytorch and ONNX')
print('The outputs are same between Pytorch and ONNX')
def parse_args():
parser = argparse.ArgumentParser(description='Convert MMCls to ONNX')
parser.add_argument('config', help='test config file path')
parser.add_argument('--checkpoint', help='checkpoint file', default=None)
parser.add_argument('--show', action='store_true', help='show onnx graph')
parser.add_argument(
'--verify', action='store_true', help='verify the onnx model')
parser.add_argument('--output-file', type=str, default='tmp.onnx')
parser.add_argument('--opset-version', type=int, default=11)
parser.add_argument(
'--simplify',
action='store_true',
help='Whether to simplify onnx model.')
parser.add_argument(
'--shape',
type=int,
nargs='+',
default=[224, 224],
help='input image size')
parser.add_argument(
'--dynamic-export',
action='store_true',
help='Whether to export ONNX with dynamic input shape. \
Defaults to False.')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
if len(args.shape) == 1:
input_shape = (1, 3, args.shape[0], args.shape[0])
elif len(args.shape) == 2:
input_shape = (
1,
3,
) + tuple(args.shape)
else:
raise ValueError('invalid input shape')
cfg = mmcv.Config.fromfile(args.config)
cfg.model.pretrained = None
# build the model and load checkpoint
classifier = build_classifier(cfg.model)
if args.checkpoint:
load_checkpoint(classifier, args.checkpoint, map_location='cpu')
# conver model to onnx file
pytorch2onnx(
classifier,
input_shape,
opset_version=args.opset_version,
show=args.show,
dynamic_export=args.dynamic_export,
output_file=args.output_file,
do_simplify=args.simplify,
verify=args.verify)
| 7,488 | 32.137168 | 79 |
py
|
KnowledgeFactor
|
KnowledgeFactor-main/cls/tools/deployment/mmcls_handler.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import base64
import os
import mmcv
import torch
from ts.torch_handler.base_handler import BaseHandler
from mmcls.apis import inference_model, init_model
class MMclsHandler(BaseHandler):
def initialize(self, context):
properties = context.system_properties
self.map_location = 'cuda' if torch.cuda.is_available() else 'cpu'
self.device = torch.device(self.map_location + ':' +
str(properties.get('gpu_id')) if torch.cuda.
is_available() else self.map_location)
self.manifest = context.manifest
model_dir = properties.get('model_dir')
serialized_file = self.manifest['model']['serializedFile']
checkpoint = os.path.join(model_dir, serialized_file)
self.config_file = os.path.join(model_dir, 'config.py')
self.model = init_model(self.config_file, checkpoint, self.device)
self.initialized = True
def preprocess(self, data):
images = []
for row in data:
image = row.get('data') or row.get('body')
if isinstance(image, str):
image = base64.b64decode(image)
image = mmcv.imfrombytes(image)
images.append(image)
return images
def inference(self, data, *args, **kwargs):
results = []
for image in data:
results.append(inference_model(self.model, image))
return results
def postprocess(self, data):
for result in data:
result['pred_label'] = int(result['pred_label'])
return data
| 1,650 | 30.75 | 79 |
py
|
KnowledgeFactor
|
KnowledgeFactor-main/cls/tools/deployment/pytorch2torchscript.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
import os.path as osp
from functools import partial
import mmcv
import numpy as np
import torch
from mmcv.runner import load_checkpoint
from torch import nn
from mmcls.models import build_classifier
torch.manual_seed(3)
def _demo_mm_inputs(input_shape: tuple, num_classes: int):
"""Create a superset of inputs needed to run test or train batches.
Args:
input_shape (tuple):
input batch dimensions
num_classes (int):
number of semantic classes
"""
(N, C, H, W) = input_shape
rng = np.random.RandomState(0)
imgs = rng.rand(*input_shape)
gt_labels = rng.randint(
low=0, high=num_classes, size=(N, 1)).astype(np.uint8)
mm_inputs = {
'imgs': torch.FloatTensor(imgs).requires_grad_(False),
'gt_labels': torch.LongTensor(gt_labels),
}
return mm_inputs
def pytorch2torchscript(model: nn.Module, input_shape: tuple, output_file: str,
verify: bool):
"""Export Pytorch model to TorchScript model through torch.jit.trace and
verify the outputs are same between Pytorch and TorchScript.
Args:
model (nn.Module): Pytorch model we want to export.
input_shape (tuple): Use this input shape to construct
the corresponding dummy input and execute the model.
show (bool): Whether print the computation graph. Default: False.
output_file (string): The path to where we store the output
TorchScript model.
verify (bool): Whether compare the outputs between Pytorch
and TorchScript through loading generated output_file.
"""
model.cpu().eval()
num_classes = model.head.num_classes
mm_inputs = _demo_mm_inputs(input_shape, num_classes)
imgs = mm_inputs.pop('imgs')
img_list = [img[None, :] for img in imgs]
# replace original forward function
origin_forward = model.forward
model.forward = partial(model.forward, img_metas={}, return_loss=False)
with torch.no_grad():
trace_model = torch.jit.trace(model, img_list[0])
save_dir, _ = osp.split(output_file)
if save_dir:
os.makedirs(save_dir, exist_ok=True)
trace_model.save(output_file)
print(f'Successfully exported TorchScript model: {output_file}')
model.forward = origin_forward
if verify:
# load by torch.jit
jit_model = torch.jit.load(output_file)
# check the numerical value
# get pytorch output
pytorch_result = model(img_list, img_metas={}, return_loss=False)[0]
# get jit output
jit_result = jit_model(img_list[0])[0].detach().numpy()
if not np.allclose(pytorch_result, jit_result):
raise ValueError(
'The outputs are different between Pytorch and TorchScript')
print('The outputs are same between Pytorch and TorchScript')
def parse_args():
parser = argparse.ArgumentParser(
description='Convert MMCls to TorchScript')
parser.add_argument('config', help='test config file path')
parser.add_argument('--checkpoint', help='checkpoint file', type=str)
parser.add_argument(
'--verify',
action='store_true',
help='verify the TorchScript model',
default=False)
parser.add_argument('--output-file', type=str, default='tmp.pt')
parser.add_argument(
'--shape',
type=int,
nargs='+',
default=[224, 224],
help='input image size')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
if len(args.shape) == 1:
input_shape = (1, 3, args.shape[0], args.shape[0])
elif len(args.shape) == 2:
input_shape = (
1,
3,
) + tuple(args.shape)
else:
raise ValueError('invalid input shape')
cfg = mmcv.Config.fromfile(args.config)
cfg.model.pretrained = None
# build the model and load checkpoint
classifier = build_classifier(cfg.model)
if args.checkpoint:
load_checkpoint(classifier, args.checkpoint, map_location='cpu')
# conver model to TorchScript file
pytorch2torchscript(
classifier,
input_shape,
output_file=args.output_file,
verify=args.verify)
| 4,363 | 30.171429 | 79 |
py
|
KnowledgeFactor
|
KnowledgeFactor-main/cls/tools/misc/print_config.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import warnings
from mmcv import Config, DictAction
def parse_args():
parser = argparse.ArgumentParser(description='Print the whole config')
parser.add_argument('config', help='config file path')
parser.add_argument(
'--options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file (deprecate), '
'change to --cfg-options instead.')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
args = parser.parse_args()
if args.options and args.cfg_options:
raise ValueError(
'--options and --cfg-options cannot be both '
'specified, --options is deprecated in favor of --cfg-options')
if args.options:
warnings.warn('--options is deprecated in favor of --cfg-options')
args.cfg_options = args.options
return args
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# import modules from string list.
if cfg.get('custom_imports', None):
from mmcv.utils import import_modules_from_strings
import_modules_from_strings(**cfg['custom_imports'])
print(f'Config:\n{cfg.pretty_text}')
if __name__ == '__main__':
main()
| 1,896 | 32.875 | 78 |
py
|
KnowledgeFactor
|
KnowledgeFactor-main/cls/tools/convert_models/mobilenetv2_to_mmcls.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
from collections import OrderedDict
import torch
def convert_conv1(model_key, model_weight, state_dict, converted_names):
if model_key.find('features.0.0') >= 0:
new_key = model_key.replace('features.0.0', 'backbone.conv1.conv')
else:
new_key = model_key.replace('features.0.1', 'backbone.conv1.bn')
state_dict[new_key] = model_weight
converted_names.add(model_key)
print(f'Convert {model_key} to {new_key}')
def convert_conv5(model_key, model_weight, state_dict, converted_names):
if model_key.find('features.18.0') >= 0:
new_key = model_key.replace('features.18.0', 'backbone.conv2.conv')
else:
new_key = model_key.replace('features.18.1', 'backbone.conv2.bn')
state_dict[new_key] = model_weight
converted_names.add(model_key)
print(f'Convert {model_key} to {new_key}')
def convert_head(model_key, model_weight, state_dict, converted_names):
new_key = model_key.replace('classifier.1', 'head.fc')
state_dict[new_key] = model_weight
converted_names.add(model_key)
print(f'Convert {model_key} to {new_key}')
def convert_block(model_key, model_weight, state_dict, converted_names):
split_keys = model_key.split('.')
layer_id = int(split_keys[1])
new_layer_id = 0
sub_id = 0
if layer_id == 1:
new_layer_id = 1
sub_id = 0
elif layer_id in range(2, 4):
new_layer_id = 2
sub_id = layer_id - 2
elif layer_id in range(4, 7):
new_layer_id = 3
sub_id = layer_id - 4
elif layer_id in range(7, 11):
new_layer_id = 4
sub_id = layer_id - 7
elif layer_id in range(11, 14):
new_layer_id = 5
sub_id = layer_id - 11
elif layer_id in range(14, 17):
new_layer_id = 6
sub_id = layer_id - 14
elif layer_id == 17:
new_layer_id = 7
sub_id = 0
new_key = model_key.replace(f'features.{layer_id}',
f'backbone.layer{new_layer_id}.{sub_id}')
if new_layer_id == 1:
if new_key.find('conv.0.0') >= 0:
new_key = new_key.replace('conv.0.0', 'conv.0.conv')
elif new_key.find('conv.0.1') >= 0:
new_key = new_key.replace('conv.0.1', 'conv.0.bn')
elif new_key.find('conv.1') >= 0:
new_key = new_key.replace('conv.1', 'conv.1.conv')
elif new_key.find('conv.2') >= 0:
new_key = new_key.replace('conv.2', 'conv.1.bn')
else:
raise ValueError(f'Unsupported conversion of key {model_key}')
else:
if new_key.find('conv.0.0') >= 0:
new_key = new_key.replace('conv.0.0', 'conv.0.conv')
elif new_key.find('conv.0.1') >= 0:
new_key = new_key.replace('conv.0.1', 'conv.0.bn')
elif new_key.find('conv.1.0') >= 0:
new_key = new_key.replace('conv.1.0', 'conv.1.conv')
elif new_key.find('conv.1.1') >= 0:
new_key = new_key.replace('conv.1.1', 'conv.1.bn')
elif new_key.find('conv.2') >= 0:
new_key = new_key.replace('conv.2', 'conv.2.conv')
elif new_key.find('conv.3') >= 0:
new_key = new_key.replace('conv.3', 'conv.2.bn')
else:
raise ValueError(f'Unsupported conversion of key {model_key}')
print(f'Convert {model_key} to {new_key}')
state_dict[new_key] = model_weight
converted_names.add(model_key)
def convert(src, dst):
"""Convert keys in torchvision pretrained MobileNetV2 models to mmcls
style."""
# load pytorch model
blobs = torch.load(src, map_location='cpu')
# convert to pytorch style
state_dict = OrderedDict()
converted_names = set()
for key, weight in blobs.items():
if 'features.0' in key:
convert_conv1(key, weight, state_dict, converted_names)
elif 'classifier' in key:
convert_head(key, weight, state_dict, converted_names)
elif 'features.18' in key:
convert_conv5(key, weight, state_dict, converted_names)
else:
convert_block(key, weight, state_dict, converted_names)
# check if all layers are converted
for key in blobs:
if key not in converted_names:
print(f'not converted: {key}')
# save checkpoint
checkpoint = dict()
checkpoint['state_dict'] = state_dict
torch.save(checkpoint, dst)
def main():
parser = argparse.ArgumentParser(description='Convert model keys')
parser.add_argument('src', help='src detectron model path')
parser.add_argument('dst', help='save path')
args = parser.parse_args()
convert(args.src, args.dst)
if __name__ == '__main__':
main()
| 4,732 | 33.801471 | 75 |
py
|
KnowledgeFactor
|
KnowledgeFactor-main/cls/tools/convert_models/vgg_to_mmcls.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
from collections import OrderedDict
import torch
def get_layer_maps(layer_num, with_bn):
layer_maps = {'conv': {}, 'bn': {}}
if with_bn:
if layer_num == 11:
layer_idxs = [0, 4, 8, 11, 15, 18, 22, 25]
elif layer_num == 13:
layer_idxs = [0, 3, 7, 10, 14, 17, 21, 24, 28, 31]
elif layer_num == 16:
layer_idxs = [0, 3, 7, 10, 14, 17, 20, 24, 27, 30, 34, 37, 40]
elif layer_num == 19:
layer_idxs = [
0, 3, 7, 10, 14, 17, 20, 23, 27, 30, 33, 36, 40, 43, 46, 49
]
else:
raise ValueError(f'Invalid number of layers: {layer_num}')
for i, layer_idx in enumerate(layer_idxs):
if i == 0:
new_layer_idx = layer_idx
else:
new_layer_idx += int((layer_idx - layer_idxs[i - 1]) / 2)
layer_maps['conv'][layer_idx] = new_layer_idx
layer_maps['bn'][layer_idx + 1] = new_layer_idx
else:
if layer_num == 11:
layer_idxs = [0, 3, 6, 8, 11, 13, 16, 18]
new_layer_idxs = [0, 2, 4, 5, 7, 8, 10, 11]
elif layer_num == 13:
layer_idxs = [0, 2, 5, 7, 10, 12, 15, 17, 20, 22]
new_layer_idxs = [0, 1, 3, 4, 6, 7, 9, 10, 12, 13]
elif layer_num == 16:
layer_idxs = [0, 2, 5, 7, 10, 12, 14, 17, 19, 21, 24, 26, 28]
new_layer_idxs = [0, 1, 3, 4, 6, 7, 8, 10, 11, 12, 14, 15, 16]
elif layer_num == 19:
layer_idxs = [
0, 2, 5, 7, 10, 12, 14, 16, 19, 21, 23, 25, 28, 30, 32, 34
]
new_layer_idxs = [
0, 1, 3, 4, 6, 7, 8, 9, 11, 12, 13, 14, 16, 17, 18, 19
]
else:
raise ValueError(f'Invalid number of layers: {layer_num}')
layer_maps['conv'] = {
layer_idx: new_layer_idx
for layer_idx, new_layer_idx in zip(layer_idxs, new_layer_idxs)
}
return layer_maps
def convert(src, dst, layer_num, with_bn=False):
"""Convert keys in torchvision pretrained VGG models to mmcls style."""
# load pytorch model
assert os.path.isfile(src), f'no checkpoint found at {src}'
blobs = torch.load(src, map_location='cpu')
# convert to pytorch style
state_dict = OrderedDict()
layer_maps = get_layer_maps(layer_num, with_bn)
prefix = 'backbone'
delimiter = '.'
for key, weight in blobs.items():
if 'features' in key:
module, layer_idx, weight_type = key.split(delimiter)
new_key = delimiter.join([prefix, key])
layer_idx = int(layer_idx)
for layer_key, maps in layer_maps.items():
if layer_idx in maps:
new_layer_idx = maps[layer_idx]
new_key = delimiter.join([
prefix, 'features',
str(new_layer_idx), layer_key, weight_type
])
state_dict[new_key] = weight
print(f'Convert {key} to {new_key}')
elif 'classifier' in key:
new_key = delimiter.join([prefix, key])
state_dict[new_key] = weight
print(f'Convert {key} to {new_key}')
else:
state_dict[key] = weight
# save checkpoint
checkpoint = dict()
checkpoint['state_dict'] = state_dict
torch.save(checkpoint, dst)
def main():
parser = argparse.ArgumentParser(description='Convert model keys')
parser.add_argument('src', help='src torchvision model path')
parser.add_argument('dst', help='save path')
parser.add_argument(
'--bn', action='store_true', help='whether original vgg has BN')
parser.add_argument(
'--layer_num',
type=int,
choices=[11, 13, 16, 19],
default=11,
help='number of VGG layers')
args = parser.parse_args()
convert(args.src, args.dst, layer_num=args.layer_num, with_bn=args.bn)
if __name__ == '__main__':
main()
| 4,084 | 33.618644 | 75 |
py
|
KnowledgeFactor
|
KnowledgeFactor-main/cls/tools/convert_models/publish_model.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import datetime
import os
import subprocess
import torch
from mmcv import digit_version
def parse_args():
parser = argparse.ArgumentParser(
description='Process a checkpoint to be published')
parser.add_argument('in_file', help='input checkpoint filename')
parser.add_argument('out_file', help='output checkpoint filename')
args = parser.parse_args()
return args
def process_checkpoint(in_file, out_file):
checkpoint = torch.load(in_file, map_location='cpu')
# remove optimizer for smaller file size
if 'optimizer' in checkpoint:
del checkpoint['optimizer']
# if it is necessary to remove some sensitive data in checkpoint['meta'],
# add the code here.
if digit_version(torch.__version__) >= digit_version('1.6'):
torch.save(checkpoint, out_file, _use_new_zipfile_serialization=False)
else:
torch.save(checkpoint, out_file)
sha = subprocess.check_output(['sha256sum', out_file]).decode()
if out_file.endswith('.pth'):
out_file_name = out_file[:-4]
else:
out_file_name = out_file
current_date = datetime.datetime.now().strftime('%Y%m%d')
final_file = out_file_name + f'_{current_date}-{sha[:8]}.pth'
subprocess.Popen(['mv', out_file, final_file])
print(f'Successfully generated the publish-ckpt as {final_file}.')
def main():
args = parse_args()
out_dir = os.path.dirname(args.out_file)
if not os.path.exists(out_dir):
raise ValueError(f'Directory {out_dir} does not exist, '
'please generate it manually.')
process_checkpoint(args.in_file, args.out_file)
if __name__ == '__main__':
main()
| 1,742 | 30.125 | 78 |
py
|
KnowledgeFactor
|
KnowledgeFactor-main/cls/tools/convert_models/shufflenetv2_to_mmcls.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
from collections import OrderedDict
import torch
def convert_conv1(model_key, model_weight, state_dict, converted_names):
if model_key.find('conv1.0') >= 0:
new_key = model_key.replace('conv1.0', 'backbone.conv1.conv')
else:
new_key = model_key.replace('conv1.1', 'backbone.conv1.bn')
state_dict[new_key] = model_weight
converted_names.add(model_key)
print(f'Convert {model_key} to {new_key}')
def convert_conv5(model_key, model_weight, state_dict, converted_names):
if model_key.find('conv5.0') >= 0:
new_key = model_key.replace('conv5.0', 'backbone.layers.3.conv')
else:
new_key = model_key.replace('conv5.1', 'backbone.layers.3.bn')
state_dict[new_key] = model_weight
converted_names.add(model_key)
print(f'Convert {model_key} to {new_key}')
def convert_head(model_key, model_weight, state_dict, converted_names):
new_key = model_key.replace('fc', 'head.fc')
state_dict[new_key] = model_weight
converted_names.add(model_key)
print(f'Convert {model_key} to {new_key}')
def convert_block(model_key, model_weight, state_dict, converted_names):
split_keys = model_key.split('.')
layer, block, branch = split_keys[:3]
layer_id = int(layer[-1]) - 2
new_key = model_key.replace(layer, f'backbone.layers.{layer_id}')
if branch == 'branch1':
if new_key.find('branch1.0') >= 0:
new_key = new_key.replace('branch1.0', 'branch1.0.conv')
elif new_key.find('branch1.1') >= 0:
new_key = new_key.replace('branch1.1', 'branch1.0.bn')
elif new_key.find('branch1.2') >= 0:
new_key = new_key.replace('branch1.2', 'branch1.1.conv')
elif new_key.find('branch1.3') >= 0:
new_key = new_key.replace('branch1.3', 'branch1.1.bn')
elif branch == 'branch2':
if new_key.find('branch2.0') >= 0:
new_key = new_key.replace('branch2.0', 'branch2.0.conv')
elif new_key.find('branch2.1') >= 0:
new_key = new_key.replace('branch2.1', 'branch2.0.bn')
elif new_key.find('branch2.3') >= 0:
new_key = new_key.replace('branch2.3', 'branch2.1.conv')
elif new_key.find('branch2.4') >= 0:
new_key = new_key.replace('branch2.4', 'branch2.1.bn')
elif new_key.find('branch2.5') >= 0:
new_key = new_key.replace('branch2.5', 'branch2.2.conv')
elif new_key.find('branch2.6') >= 0:
new_key = new_key.replace('branch2.6', 'branch2.2.bn')
else:
raise ValueError(f'Unsupported conversion of key {model_key}')
else:
raise ValueError(f'Unsupported conversion of key {model_key}')
print(f'Convert {model_key} to {new_key}')
state_dict[new_key] = model_weight
converted_names.add(model_key)
def convert(src, dst):
"""Convert keys in torchvision pretrained ShuffleNetV2 models to mmcls
style."""
# load pytorch model
blobs = torch.load(src, map_location='cpu')
# convert to pytorch style
state_dict = OrderedDict()
converted_names = set()
for key, weight in blobs.items():
if 'conv1' in key:
convert_conv1(key, weight, state_dict, converted_names)
elif 'fc' in key:
convert_head(key, weight, state_dict, converted_names)
elif key.startswith('s'):
convert_block(key, weight, state_dict, converted_names)
elif 'conv5' in key:
convert_conv5(key, weight, state_dict, converted_names)
# check if all layers are converted
for key in blobs:
if key not in converted_names:
print(f'not converted: {key}')
# save checkpoint
checkpoint = dict()
checkpoint['state_dict'] = state_dict
torch.save(checkpoint, dst)
def main():
parser = argparse.ArgumentParser(description='Convert model keys')
parser.add_argument('src', help='src detectron model path')
parser.add_argument('dst', help='save path')
args = parser.parse_args()
convert(args.src, args.dst)
if __name__ == '__main__':
main()
| 4,137 | 35.298246 | 74 |
py
|
KnowledgeFactor
|
KnowledgeFactor-main/cls/tools/analysis_tools/analyze_results.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os.path as osp
import warnings
import mmcv
from mmcv import DictAction
from mmcls.datasets import build_dataset
from mmcls.models import build_classifier
def parse_args():
parser = argparse.ArgumentParser(
description='MMCls evaluate prediction success/fail')
parser.add_argument('config', help='test config file path')
parser.add_argument('result', help='test result json/pkl file')
parser.add_argument('--out-dir', help='dir to store output files')
parser.add_argument(
'--topk',
default=20,
type=int,
help='Number of images to select for success/fail')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file (deprecate), '
'change to --cfg-options instead.')
args = parser.parse_args()
if args.options and args.cfg_options:
raise ValueError(
'--options and --cfg-options cannot be both '
'specified, --options is deprecated in favor of --cfg-options')
if args.options:
warnings.warn('--options is deprecated in favor of --cfg-options')
args.cfg_options = args.options
return args
def save_imgs(result_dir, folder_name, results, model):
full_dir = osp.join(result_dir, folder_name)
mmcv.mkdir_or_exist(full_dir)
mmcv.dump(results, osp.join(full_dir, folder_name + '.json'))
# save imgs
show_keys = ['pred_score', 'pred_class', 'gt_class']
for result in results:
result_show = dict((k, v) for k, v in result.items() if k in show_keys)
outfile = osp.join(full_dir, osp.basename(result['filename']))
model.show_result(result['filename'], result_show, out_file=outfile)
def main():
args = parse_args()
cfg = mmcv.Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
model = build_classifier(cfg.model)
# build the dataloader
dataset = build_dataset(cfg.data.test)
filenames = list()
for info in dataset.data_infos:
if info['img_prefix'] is not None:
filename = osp.join(info['img_prefix'],
info['img_info']['filename'])
else:
filename = info['img_info']['filename']
filenames.append(filename)
gt_labels = list(dataset.get_gt_labels())
gt_classes = [dataset.CLASSES[x] for x in gt_labels]
# load test results
outputs = mmcv.load(args.result)
outputs['filename'] = filenames
outputs['gt_label'] = gt_labels
outputs['gt_class'] = gt_classes
outputs_list = list()
for i in range(len(gt_labels)):
output = dict()
for k in outputs.keys():
output[k] = outputs[k][i]
outputs_list.append(output)
# sort result
outputs_list = sorted(outputs_list, key=lambda x: x['pred_score'])
success = list()
fail = list()
for output in outputs_list:
if output['pred_label'] == output['gt_label']:
success.append(output)
else:
fail.append(output)
success = success[:args.topk]
fail = fail[:args.topk]
save_imgs(args.out_dir, 'success', success, model)
save_imgs(args.out_dir, 'fail', fail, model)
if __name__ == '__main__':
main()
| 3,980 | 31.631148 | 79 |
py
|
KnowledgeFactor
|
KnowledgeFactor-main/cls/tools/analysis_tools/eval_metric.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import mmcv
from mmcv import Config, DictAction
from mmcls.datasets import build_dataset
def parse_args():
parser = argparse.ArgumentParser(description='Evaluate metric of the '
'results saved in pkl format')
parser.add_argument('config', help='Config of the model')
parser.add_argument('pkl_results', help='Results in pickle format')
parser.add_argument(
'--metrics',
type=str,
nargs='+',
help='Evaluation metrics, which depends on the dataset, e.g., '
'"accuracy", "precision", "recall" and "support".')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--eval-options',
nargs='+',
action=DictAction,
help='custom options for evaluation, the key-value pair in xxx=yyy '
'format will be kwargs for dataset.evaluate() function')
args = parser.parse_args()
return args
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
assert args.metrics, (
'Please specify at least one metric the argument "--metrics".')
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# import modules from string list.
if cfg.get('custom_imports', None):
from mmcv.utils import import_modules_from_strings
import_modules_from_strings(**cfg['custom_imports'])
cfg.data.test.test_mode = True
dataset = build_dataset(cfg.data.test)
outputs = mmcv.load(args.pkl_results)
pred_score = outputs['class_scores']
kwargs = {} if args.eval_options is None else args.eval_options
eval_kwargs = cfg.get('evaluation', {}).copy()
# hard-code way to remove EvalHook args
for key in [
'interval', 'tmpdir', 'start', 'gpu_collect', 'save_best', 'rule'
]:
eval_kwargs.pop(key, None)
eval_kwargs.update(dict(metric=args.metrics, **kwargs))
print(dataset.evaluate(pred_score, **eval_kwargs))
if __name__ == '__main__':
main()
| 2,540 | 33.808219 | 78 |
py
|
KnowledgeFactor
|
KnowledgeFactor-main/cls/tools/analysis_tools/analysis_para.py
|
import argparse
import torch
from mmcv import Config
from prettytable import PrettyTable
from mmcls.models.builder import build_classifier
def count_parameters(model):
table = PrettyTable(["Modules", "Parameters"])
total_params = 0
for name, parameter in model.named_parameters():
if not parameter.requires_grad:
continue
param = parameter.numel()
table.add_row([name, param])
total_params += param
print(table)
print(f"Total Trainable Params: {total_params}")
return total_params
def parse_args():
parser = argparse.ArgumentParser(description='Explain a model')
parser.add_argument('config', help='train config file path')
args = parser.parse_args()
return args
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
print(cfg)
model = build_classifier(cfg.model)
count_parameters(model)
if __name__ == '__main__':
main()
| 953 | 22.268293 | 67 |
py
|
KnowledgeFactor
|
KnowledgeFactor-main/cls/tools/analysis_tools/get_flops.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
from mmcv import Config
from mmcv.cnn.utils import get_model_complexity_info
from mmcls.models import build_classifier
def parse_args():
parser = argparse.ArgumentParser(description='Get model flops and params')
parser.add_argument('config', help='config file path')
parser.add_argument(
'--shape',
type=int,
nargs='+',
default=[224, 224],
help='input image size')
args = parser.parse_args()
return args
def main():
args = parse_args()
if len(args.shape) == 1:
input_shape = (3, args.shape[0], args.shape[0])
elif len(args.shape) == 2:
input_shape = (3, ) + tuple(args.shape)
else:
raise ValueError('invalid input shape')
cfg = Config.fromfile(args.config)
model = build_classifier(cfg.model)
model.eval()
if hasattr(model, 'extract_feat'):
model.forward = model.extract_feat
else:
raise NotImplementedError(
'FLOPs counter is currently not currently supported with {}'.
format(model.__class__.__name__))
flops, params = get_model_complexity_info(model, input_shape)
split_line = '=' * 30
print(f'{split_line}\nInput shape: {input_shape}\n'
f'Flops: {flops}\nParams: {params}\n{split_line}')
print('!!!Please be cautious if you use the results in papers. '
'You may need to check if all ops are supported and verify that the '
'flops computation is correct.')
if __name__ == '__main__':
main()
| 1,581 | 27.25 | 79 |
py
|
KnowledgeFactor
|
KnowledgeFactor-main/cls/tools/analysis_tools/analyze_logs.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import json
from collections import defaultdict
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
def cal_train_time(log_dicts, args):
"""Compute the average time per training iteration."""
for i, log_dict in enumerate(log_dicts):
print(f'{"-" * 5}Analyze train time of {args.json_logs[i]}{"-" * 5}')
all_times = []
for epoch in log_dict.keys():
if args.include_outliers:
all_times.append(log_dict[epoch]['time'])
else:
all_times.append(log_dict[epoch]['time'][1:])
all_times = np.array(all_times)
epoch_ave_time = all_times.mean(-1)
slowest_epoch = epoch_ave_time.argmax()
fastest_epoch = epoch_ave_time.argmin()
std_over_epoch = epoch_ave_time.std()
print(f'slowest epoch {slowest_epoch + 1}, '
f'average time is {epoch_ave_time[slowest_epoch]:.4f}')
print(f'fastest epoch {fastest_epoch + 1}, '
f'average time is {epoch_ave_time[fastest_epoch]:.4f}')
print(f'time std over epochs is {std_over_epoch:.4f}')
print(f'average iter time: {np.mean(all_times):.4f} s/iter')
print()
def plot_curve(log_dicts, args):
"""Plot train metric-iter graph."""
if args.backend is not None:
plt.switch_backend(args.backend)
sns.set_style(args.style)
# if legend is None, use {filename}_{key} as legend
legend = args.legend
if legend is None:
legend = []
for json_log in args.json_logs:
for metric in args.keys:
legend.append(f'{json_log}_{metric}')
assert len(legend) == (len(args.json_logs) * len(args.keys))
metrics = args.keys
num_metrics = len(metrics)
for i, log_dict in enumerate(log_dicts):
epochs = list(log_dict.keys())
for j, metric in enumerate(metrics):
print(f'plot curve of {args.json_logs[i]}, metric is {metric}')
if metric not in log_dict[epochs[0]]:
raise KeyError(
f'{args.json_logs[i]} does not contain metric {metric} '
f'in train mode')
if 'mAP' in metric:
xs = np.arange(1, max(epochs) + 1)
ys = []
for epoch in epochs:
ys += log_dict[epoch][metric]
ax = plt.gca()
ax.set_xticks(xs)
plt.xlabel('epoch')
plt.plot(xs, ys, label=legend[i * num_metrics + j], marker='o')
else:
xs = []
ys = []
num_iters_per_epoch = log_dict[epochs[0]]['iter'][-1]
for epoch in epochs:
iters = log_dict[epoch]['iter']
if log_dict[epoch]['mode'][-1] == 'val':
iters = iters[:-1]
xs.append(
np.array(iters) + (epoch - 1) * num_iters_per_epoch)
ys.append(np.array(log_dict[epoch][metric][:len(iters)]))
xs = np.concatenate(xs)
ys = np.concatenate(ys)
plt.xlabel('iter')
plt.plot(
xs, ys, label=legend[i * num_metrics + j], linewidth=0.5)
plt.legend()
if args.title is not None:
plt.title(args.title)
if args.out is None:
plt.show()
else:
print(f'save curve to: {args.out}')
plt.savefig(args.out)
plt.cla()
def add_plot_parser(subparsers):
parser_plt = subparsers.add_parser(
'plot_curve', help='parser for plotting curves')
parser_plt.add_argument(
'json_logs',
type=str,
nargs='+',
help='path of train log in json format')
parser_plt.add_argument(
'--keys',
type=str,
nargs='+',
default=['loss'],
help='the metric that you want to plot')
parser_plt.add_argument('--title', type=str, help='title of figure')
parser_plt.add_argument(
'--legend',
type=str,
nargs='+',
default=None,
help='legend of each plot')
parser_plt.add_argument(
'--backend', type=str, default=None, help='backend of plt')
parser_plt.add_argument(
'--style', type=str, default='dark', help='style of plt')
parser_plt.add_argument('--out', type=str, default=None)
def add_time_parser(subparsers):
parser_time = subparsers.add_parser(
'cal_train_time',
help='parser for computing the average time per training iteration')
parser_time.add_argument(
'json_logs',
type=str,
nargs='+',
help='path of train log in json format')
parser_time.add_argument(
'--include-outliers',
action='store_true',
help='include the first value of every epoch when computing '
'the average time')
def parse_args():
parser = argparse.ArgumentParser(description='Analyze Json Log')
# currently only support plot curve and calculate average train time
subparsers = parser.add_subparsers(dest='task', help='task parser')
add_plot_parser(subparsers)
add_time_parser(subparsers)
args = parser.parse_args()
return args
def load_json_logs(json_logs):
# load and convert json_logs to log_dict, key is epoch, value is a sub dict
# keys of sub dict is different metrics, e.g. memory, bbox_mAP
# value of sub dict is a list of corresponding values of all iterations
log_dicts = [dict() for _ in json_logs]
for json_log, log_dict in zip(json_logs, log_dicts):
with open(json_log, 'r') as log_file:
for line in log_file:
log = json.loads(line.strip())
# skip lines without `epoch` field
if 'epoch' not in log:
continue
epoch = log.pop('epoch')
if epoch not in log_dict:
log_dict[epoch] = defaultdict(list)
for k, v in log.items():
log_dict[epoch][k].append(v)
return log_dicts
def main():
args = parse_args()
json_logs = args.json_logs
for json_log in json_logs:
assert json_log.endswith('.json')
log_dicts = load_json_logs(json_logs)
eval(args.task)(log_dicts, args)
if __name__ == '__main__':
main()
| 6,433 | 33.967391 | 79 |
py
|
KnowledgeFactor
|
KnowledgeFactor-main/cls/mmcls/version.py
|
# Copyright (c) OpenMMLab. All rights reserved
__version__ = '0.15.0'
def parse_version_info(version_str):
"""Parse a version string into a tuple.
Args:
version_str (str): The version string.
Returns:
tuple[int | str]: The version info, e.g., "1.3.0" is parsed into
(1, 3, 0), and "2.0.0rc1" is parsed into (2, 0, 0, 'rc1').
"""
version_info = []
for x in version_str.split('.'):
if x.isdigit():
version_info.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
version_info.append(int(patch_version[0]))
version_info.append(f'rc{patch_version[1]}')
return tuple(version_info)
version_info = parse_version_info(__version__)
__all__ = ['__version__', 'version_info', 'parse_version_info']
| 832 | 27.724138 | 72 |
py
|
KnowledgeFactor
|
KnowledgeFactor-main/cls/mmcls/__init__.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import warnings
import mmcv
from packaging.version import parse
from .version import __version__
def digit_version(version_str: str, length: int = 4):
"""Convert a version string into a tuple of integers.
This method is usually used for comparing two versions. For pre-release
versions: alpha < beta < rc.
Args:
version_str (str): The version string.
length (int): The maximum number of version levels. Default: 4.
Returns:
tuple[int]: The version info in digits (integers).
"""
version = parse(version_str)
assert version.release, f'failed to parse version {version_str}'
release = list(version.release)
release = release[:length]
if len(release) < length:
release = release + [0] * (length - len(release))
if version.is_prerelease:
mapping = {'a': -3, 'b': -2, 'rc': -1}
val = -4
# version.pre can be None
if version.pre:
if version.pre[0] not in mapping:
warnings.warn(f'unknown prerelease version {version.pre[0]}, '
'version checking may go wrong')
else:
val = mapping[version.pre[0]]
release.extend([val, version.pre[-1]])
else:
release.extend([val, 0])
elif version.is_postrelease:
release.extend([1, version.post])
else:
release.extend([0, 0])
return tuple(release)
mmcv_minimum_version = '1.3.8'
mmcv_maximum_version = '1.5.0'
mmcv_version = digit_version(mmcv.__version__)
assert (mmcv_version >= digit_version(mmcv_minimum_version)
and mmcv_version <= digit_version(mmcv_maximum_version)), \
f'MMCV=={mmcv.__version__} is used but incompatible. ' \
f'Please install mmcv>={mmcv_minimum_version}, <={mmcv_maximum_version}.'
__all__ = ['__version__', 'digit_version']
| 1,912 | 30.360656 | 78 |
py
|
KnowledgeFactor
|
KnowledgeFactor-main/cls/mmcls/apis/inference.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import warnings
import mmcv
import numpy as np
import torch
from mmcv.parallel import collate, scatter
from mmcv.runner import load_checkpoint
from mmcls.datasets.pipelines import Compose
from mmcls.models import build_classifier
def init_model(config, checkpoint=None, device='cuda:0', options=None):
"""Initialize a classifier from config file.
Args:
config (str or :obj:`mmcv.Config`): Config file path or the config
object.
checkpoint (str, optional): Checkpoint path. If left as None, the model
will not load any weights.
options (dict): Options to override some settings in the used config.
Returns:
nn.Module: The constructed classifier.
"""
if isinstance(config, str):
config = mmcv.Config.fromfile(config)
elif not isinstance(config, mmcv.Config):
raise TypeError('config must be a filename or Config object, '
f'but got {type(config)}')
if options is not None:
config.merge_from_dict(options)
config.model.pretrained = None
model = build_classifier(config.model)
if checkpoint is not None:
map_loc = 'cpu' if device == 'cpu' else None
checkpoint = load_checkpoint(model, checkpoint, map_location=map_loc)
if 'CLASSES' in checkpoint.get('meta', {}):
model.CLASSES = checkpoint['meta']['CLASSES']
else:
from mmcls.datasets import ImageNet
warnings.simplefilter('once')
warnings.warn('Class names are not saved in the checkpoint\'s '
'meta data, use imagenet by default.')
model.CLASSES = ImageNet.CLASSES
model.cfg = config # save the config in the model for convenience
model.to(device)
model.eval()
return model
def inference_model(model, img):
"""Inference image(s) with the classifier.
Args:
model (nn.Module): The loaded classifier.
img (str/ndarray): The image filename or loaded image.
Returns:
result (dict): The classification results that contains
`class_name`, `pred_label` and `pred_score`.
"""
cfg = model.cfg
device = next(model.parameters()).device # model device
# build the data pipeline
if isinstance(img, str):
if cfg.data.test.pipeline[0]['type'] != 'LoadImageFromFile':
cfg.data.test.pipeline.insert(0, dict(type='LoadImageFromFile'))
data = dict(img_info=dict(filename=img), img_prefix=None)
else:
if cfg.data.test.pipeline[0]['type'] == 'LoadImageFromFile':
cfg.data.test.pipeline.pop(0)
data = dict(img=img)
test_pipeline = Compose(cfg.data.test.pipeline)
data = test_pipeline(data)
data = collate([data], samples_per_gpu=1)
if next(model.parameters()).is_cuda:
# scatter to specified GPU
data = scatter(data, [device])[0]
# forward the model
with torch.no_grad():
scores = model(return_loss=False, **data)
pred_score = np.max(scores, axis=1)[0]
pred_label = np.argmax(scores, axis=1)[0]
result = {'pred_label': pred_label, 'pred_score': float(pred_score)}
result['pred_class'] = model.CLASSES[result['pred_label']]
return result
def show_result_pyplot(model, img, result, fig_size=(15, 10), wait_time=0):
"""Visualize the classification results on the image.
Args:
model (nn.Module): The loaded classifier.
img (str or np.ndarray): Image filename or loaded image.
result (list): The classification result.
fig_size (tuple): Figure size of the pyplot figure.
Defaults to (15, 10).
wait_time (int): How many seconds to display the image.
Defaults to 0.
"""
if hasattr(model, 'module'):
model = model.module
model.show_result(
img, result, show=True, fig_size=fig_size, wait_time=wait_time)
| 3,971 | 35.777778 | 79 |
py
|
KnowledgeFactor
|
KnowledgeFactor-main/cls/mmcls/apis/multitask_test.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
import pickle
import shutil
import tempfile
import time
import mmcv
import numpy as np
import torch
import torch.distributed as dist
from mmcv.image import tensor2imgs
from mmcv.runner import get_dist_info
def multitask_single_gpu_test(model,
data_loader,
show=False,
out_dir=None,
**show_kwargs):
model.eval()
results = dict()
dataset = data_loader.dataset
prog_bar = mmcv.ProgressBar(len(dataset))
for i, data in enumerate(data_loader):
with torch.no_grad():
batch_results = model(return_loss=False, **data)
for i, result in enumerate(batch_results):
key = f'task_{i}'
if key not in results.keys():
results[key] = []
results[key].extend(result)
if show or out_dir:
scores = np.vstack(result)
pred_score = np.max(scores, axis=1)
pred_label = np.argmax(scores, axis=1)
pred_class = [model.CLASSES[lb] for lb in pred_label]
img_metas = data['img_metas'].data[0]
imgs = tensor2imgs(data['img'], **img_metas[0]['img_norm_cfg'])
assert len(imgs) == len(img_metas)
for i, (img, img_meta) in enumerate(zip(imgs, img_metas)):
h, w, _ = img_meta['img_shape']
img_show = img[:h, :w, :]
ori_h, ori_w = img_meta['ori_shape'][:-1]
img_show = mmcv.imresize(img_show, (ori_w, ori_h))
if out_dir:
out_file = osp.join(out_dir, img_meta['ori_filename'])
else:
out_file = None
result_show = {
'pred_score': pred_score[i],
'pred_label': pred_label[i],
'pred_class': pred_class[i]
}
model.module.show_result(
img_show,
result_show,
show=show,
out_file=out_file,
**show_kwargs)
batch_size = data['img'].size(0)
for _ in range(batch_size):
prog_bar.update()
return results
def multitask_multi_gpu_test(model, data_loader, tmpdir=None, gpu_collect=False):
"""Test model with multiple gpus.
This method tests model with multiple gpus and collects the results
under two different modes: gpu and cpu modes. By setting 'gpu_collect=True'
it encodes results to gpu tensors and use gpu communication for results
collection. On cpu mode it saves the results on different gpus to 'tmpdir'
and collects them by the rank 0 worker.
Args:
model (nn.Module): Model to be tested.
data_loader (nn.Dataloader): Pytorch data loader.
tmpdir (str): Path of directory to save the temporary results from
different gpus under cpu mode.
gpu_collect (bool): Option to use either gpu or cpu to collect results.
Returns:
list: The prediction results.
"""
model.eval()
results = dict()
dataset = data_loader.dataset
rank, world_size = get_dist_info()
if rank == 0:
# Check if tmpdir is valid for cpu_collect
if (not gpu_collect) and (tmpdir is not None and osp.exists(tmpdir)):
raise OSError((f'The tmpdir {tmpdir} already exists.',
' Since tmpdir will be deleted after testing,',
' please make sure you specify an empty one.'))
prog_bar = mmcv.ProgressBar(len(dataset))
time.sleep(2) # This line can prevent deadlock problem in some cases.
for i, data in enumerate(data_loader):
with torch.no_grad():
batch_results = model(return_loss=False, **data)
for i, result in enumerate(batch_results):
key = f'task_{i}'
if key not in results.keys():
results[key] = []
if isinstance(result, list):
results[key].extend(result)
else:
results[key].append(result)
if rank == 0:
batch_size = data['img'].size(0)
for _ in range(batch_size * world_size):
prog_bar.update()
# collect results from all ranks
for task_name in results.keys():
if gpu_collect:
results[task_name] = collect_results_gpu(results[task_name], len(dataset))
else:
results[task_name] = collect_results_cpu(results[task_name], len(dataset), tmpdir)
return results
def collect_results_cpu(result_part, size, tmpdir=None):
rank, world_size = get_dist_info()
# create a tmp dir if it is not specified
if tmpdir is None:
MAX_LEN = 512
# 32 is whitespace
dir_tensor = torch.full((MAX_LEN, ),
32,
dtype=torch.uint8,
device='cuda')
if rank == 0:
mmcv.mkdir_or_exist('.dist_test')
tmpdir = tempfile.mkdtemp(dir='.dist_test')
tmpdir = torch.tensor(
bytearray(tmpdir.encode()), dtype=torch.uint8, device='cuda')
dir_tensor[:len(tmpdir)] = tmpdir
dist.broadcast(dir_tensor, 0)
tmpdir = dir_tensor.cpu().numpy().tobytes().decode().rstrip()
else:
mmcv.mkdir_or_exist(tmpdir)
# dump the part result to the dir
mmcv.dump(result_part, osp.join(tmpdir, f'part_{rank}.pkl'))
dist.barrier()
# collect all parts
if rank != 0:
return None
else:
# load results of all parts from tmp dir
part_list = []
for i in range(world_size):
part_file = osp.join(tmpdir, f'part_{i}.pkl')
part_result = mmcv.load(part_file)
part_list.append(part_result)
# sort the results
ordered_results = []
for res in zip(*part_list):
ordered_results.extend(list(res))
# the dataloader may pad some samples
ordered_results = ordered_results[:size]
# remove tmp dir
shutil.rmtree(tmpdir)
return ordered_results
def collect_results_gpu(result_part, size):
rank, world_size = get_dist_info()
# dump result part to tensor with pickle
part_tensor = torch.tensor(
bytearray(pickle.dumps(result_part)), dtype=torch.uint8, device='cuda')
# gather all result part tensor shape
shape_tensor = torch.tensor(part_tensor.shape, device='cuda')
shape_list = [shape_tensor.clone() for _ in range(world_size)]
dist.all_gather(shape_list, shape_tensor)
# padding result part tensor to max length
shape_max = torch.tensor(shape_list).max()
part_send = torch.zeros(shape_max, dtype=torch.uint8, device='cuda')
part_send[:shape_tensor[0]] = part_tensor
part_recv_list = [
part_tensor.new_zeros(shape_max) for _ in range(world_size)
]
# gather all result part
dist.all_gather(part_recv_list, part_send)
if rank == 0:
part_list = []
for recv, shape in zip(part_recv_list, shape_list):
part_result = pickle.loads(recv[:shape[0]].cpu().numpy().tobytes())
part_list.append(part_result)
# sort the results
ordered_results = []
for res in zip(*part_list):
ordered_results.extend(list(res))
# the dataloader may pad some samples
ordered_results = ordered_results[:size]
return ordered_results
| 7,703 | 36.398058 | 94 |
py
|
KnowledgeFactor
|
KnowledgeFactor-main/cls/mmcls/apis/test.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
import pickle
import shutil
import tempfile
import time
import mmcv
import numpy as np
import torch
import torch.distributed as dist
from mmcv.image import tensor2imgs
from mmcv.runner import get_dist_info
def single_gpu_test(model,
data_loader,
show=False,
out_dir=None,
**show_kwargs):
model.eval()
results = []
dataset = data_loader.dataset
prog_bar = mmcv.ProgressBar(len(dataset))
for i, data in enumerate(data_loader):
with torch.no_grad():
result = model(return_loss=False, **data)
batch_size = len(result)
results.extend(result)
if show or out_dir:
scores = np.vstack(result)
pred_score = np.max(scores, axis=1)
pred_label = np.argmax(scores, axis=1)
pred_class = [model.CLASSES[lb] for lb in pred_label]
img_metas = data['img_metas'].data[0]
imgs = tensor2imgs(data['img'], **img_metas[0]['img_norm_cfg'])
assert len(imgs) == len(img_metas)
for i, (img, img_meta) in enumerate(zip(imgs, img_metas)):
h, w, _ = img_meta['img_shape']
img_show = img[:h, :w, :]
ori_h, ori_w = img_meta['ori_shape'][:-1]
img_show = mmcv.imresize(img_show, (ori_w, ori_h))
if out_dir:
out_file = osp.join(out_dir, img_meta['ori_filename'])
else:
out_file = None
result_show = {
'pred_score': pred_score[i],
'pred_label': pred_label[i],
'pred_class': pred_class[i]
}
model.module.show_result(
img_show,
result_show,
show=show,
out_file=out_file,
**show_kwargs)
batch_size = data['img'].size(0)
for _ in range(batch_size):
prog_bar.update()
return results
def multi_gpu_test(model, data_loader, tmpdir=None, gpu_collect=False):
"""Test model with multiple gpus.
This method tests model with multiple gpus and collects the results
under two different modes: gpu and cpu modes. By setting 'gpu_collect=True'
it encodes results to gpu tensors and use gpu communication for results
collection. On cpu mode it saves the results on different gpus to 'tmpdir'
and collects them by the rank 0 worker.
Args:
model (nn.Module): Model to be tested.
data_loader (nn.Dataloader): Pytorch data loader.
tmpdir (str): Path of directory to save the temporary results from
different gpus under cpu mode.
gpu_collect (bool): Option to use either gpu or cpu to collect results.
Returns:
list: The prediction results.
"""
model.eval()
results = []
dataset = data_loader.dataset
rank, world_size = get_dist_info()
if rank == 0:
# Check if tmpdir is valid for cpu_collect
if (not gpu_collect) and (tmpdir is not None and osp.exists(tmpdir)):
raise OSError((f'The tmpdir {tmpdir} already exists.',
' Since tmpdir will be deleted after testing,',
' please make sure you specify an empty one.'))
prog_bar = mmcv.ProgressBar(len(dataset))
time.sleep(2) # This line can prevent deadlock problem in some cases.
for i, data in enumerate(data_loader):
with torch.no_grad():
result = model(return_loss=False, **data)
if isinstance(result, list):
results.extend(result)
else:
results.append(result)
if rank == 0:
batch_size = data['img'].size(0)
for _ in range(batch_size * world_size):
prog_bar.update()
# collect results from all ranks
if gpu_collect:
results = collect_results_gpu(results, len(dataset))
else:
results = collect_results_cpu(results, len(dataset), tmpdir)
return results
def collect_results_cpu(result_part, size, tmpdir=None):
rank, world_size = get_dist_info()
# create a tmp dir if it is not specified
if tmpdir is None:
MAX_LEN = 512
# 32 is whitespace
dir_tensor = torch.full((MAX_LEN, ),
32,
dtype=torch.uint8,
device='cuda')
if rank == 0:
mmcv.mkdir_or_exist('.dist_test')
tmpdir = tempfile.mkdtemp(dir='.dist_test')
tmpdir = torch.tensor(
bytearray(tmpdir.encode()), dtype=torch.uint8, device='cuda')
dir_tensor[:len(tmpdir)] = tmpdir
dist.broadcast(dir_tensor, 0)
tmpdir = dir_tensor.cpu().numpy().tobytes().decode().rstrip()
else:
mmcv.mkdir_or_exist(tmpdir)
# dump the part result to the dir
mmcv.dump(result_part, osp.join(tmpdir, f'part_{rank}.pkl'))
dist.barrier()
# collect all parts
if rank != 0:
return None
else:
# load results of all parts from tmp dir
part_list = []
for i in range(world_size):
part_file = osp.join(tmpdir, f'part_{i}.pkl')
part_result = mmcv.load(part_file)
part_list.append(part_result)
# sort the results
ordered_results = []
for res in zip(*part_list):
ordered_results.extend(list(res))
# the dataloader may pad some samples
ordered_results = ordered_results[:size]
# remove tmp dir
shutil.rmtree(tmpdir)
return ordered_results
def collect_results_gpu(result_part, size):
rank, world_size = get_dist_info()
# dump result part to tensor with pickle
part_tensor = torch.tensor(
bytearray(pickle.dumps(result_part)), dtype=torch.uint8, device='cuda')
# gather all result part tensor shape
shape_tensor = torch.tensor(part_tensor.shape, device='cuda')
shape_list = [shape_tensor.clone() for _ in range(world_size)]
dist.all_gather(shape_list, shape_tensor)
# padding result part tensor to max length
shape_max = torch.tensor(shape_list).max()
part_send = torch.zeros(shape_max, dtype=torch.uint8, device='cuda')
part_send[:shape_tensor[0]] = part_tensor
part_recv_list = [
part_tensor.new_zeros(shape_max) for _ in range(world_size)
]
# gather all result part
dist.all_gather(part_recv_list, part_send)
if rank == 0:
part_list = []
for recv, shape in zip(part_recv_list, shape_list):
part_result = pickle.loads(recv[:shape[0]].cpu().numpy().tobytes())
part_list.append(part_result)
# sort the results
ordered_results = []
for res in zip(*part_list):
ordered_results.extend(list(res))
# the dataloader may pad some samples
ordered_results = ordered_results[:size]
return ordered_results
| 7,129 | 34.829146 | 79 |
py
|
KnowledgeFactor
|
KnowledgeFactor-main/cls/mmcls/apis/__init__.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from .inference import inference_model, init_model, show_result_pyplot
from .test import multi_gpu_test, single_gpu_test
from .multitask_test import multitask_multi_gpu_test, multitask_single_gpu_test
from .train import set_random_seed, train_model
__all__ = [
'set_random_seed', 'train_model', 'init_model', 'inference_model',
'multi_gpu_test', 'single_gpu_test', 'show_result_pyplot', 'multitask_multi_gpu_test',
'multitask_single_gpu_test'
]
| 506 | 41.25 | 90 |
py
|
KnowledgeFactor
|
KnowledgeFactor-main/cls/mmcls/apis/train.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import random
import warnings
import numpy as np
import torch
from mmcv.parallel import MMDataParallel, MMDistributedDataParallel
from mmcv.runner import DistSamplerSeedHook, build_optimizer, build_runner
from mmcls.core import DistOptimizerHook
from mmcls.datasets import build_dataloader, build_dataset
from mmcls.utils import get_root_logger
# TODO import eval hooks from mmcv and delete them from mmcls
try:
from mmcv.runner.hooks import EvalHook, DistEvalHook
except ImportError:
warnings.warn('DeprecationWarning: EvalHook and DistEvalHook from mmcls '
'will be deprecated.'
'Please install mmcv through master branch.')
from mmcls.core import EvalHook, DistEvalHook
from ..core.evaluation import MultiTaskEvalHook, DistMultiTaskEvalHook
# TODO import optimizer hook from mmcv and delete them from mmcls
try:
from mmcv.runner import Fp16OptimizerHook
except ImportError:
warnings.warn('DeprecationWarning: FP16OptimizerHook from mmcls will be '
'deprecated. Please install mmcv>=1.1.4.')
from mmcls.core import Fp16OptimizerHook
from ..core.utils.visualize import LogBuffer_ignore
def set_random_seed(seed, deterministic=False):
"""Set random seed.
Args:
seed (int): Seed to be used.
deterministic (bool): Whether to set the deterministic option for
CUDNN backend, i.e., set `torch.backends.cudnn.deterministic`
to True and `torch.backends.cudnn.benchmark` to False.
Default: False.
"""
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
if deterministic:
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def train_model(model,
dataset,
cfg,
distributed=False,
validate=False,
timestamp=None,
device='cuda',
meta=None):
logger = get_root_logger(cfg.log_level)
# prepare data loaders
dataset = dataset if isinstance(dataset, (list, tuple)) else [dataset]
data_loaders = [
build_dataloader(
ds,
cfg.data.samples_per_gpu,
cfg.data.workers_per_gpu,
# cfg.gpus will be ignored if distributed
num_gpus=len(cfg.gpu_ids),
dist=distributed,
round_up=True,
seed=cfg.seed) for ds in dataset
]
# put model on gpus
if distributed:
find_unused_parameters = cfg.get('find_unused_parameters', False)
# Sets the `find_unused_parameters` parameter in
# torch.nn.parallel.DistributedDataParallel
model = MMDistributedDataParallel(
model.cuda(),
device_ids=[torch.cuda.current_device()],
broadcast_buffers=False,
find_unused_parameters=find_unused_parameters)
else:
if device == 'cuda':
model = MMDataParallel(
model.cuda(cfg.gpu_ids[0]), device_ids=cfg.gpu_ids)
elif device == 'cpu':
model = model.cpu()
else:
raise ValueError(F'unsupported device name {device}.')
# build runner
optimizer = build_optimizer(model, cfg.optimizer)
if cfg.get('runner') is None:
cfg.runner = {
'type': 'EpochBasedRunner',
'max_epochs': cfg.total_epochs
}
warnings.warn(
'config is now expected to have a `runner` section, '
'please set `runner` in your config.', UserWarning)
runner = build_runner(
cfg.runner,
default_args=dict(
model=model,
batch_processor=None,
optimizer=optimizer,
work_dir=cfg.work_dir,
logger=logger,
meta=meta))
runner.log_buffer = LogBuffer_ignore()
# an ugly walkaround to make the .log and .log.json filenames the same
runner.timestamp = timestamp
# fp16 setting
fp16_cfg = cfg.get('fp16', None)
if fp16_cfg is not None:
optimizer_config = Fp16OptimizerHook(
**cfg.optimizer_config, **fp16_cfg, distributed=distributed)
elif distributed and 'type' not in cfg.optimizer_config:
optimizer_config = DistOptimizerHook(**cfg.optimizer_config)
else:
optimizer_config = cfg.optimizer_config
# register hooks
runner.register_training_hooks(
cfg.lr_config,
optimizer_config,
cfg.checkpoint_config,
cfg.log_config,
cfg.get('momentum_config', None),
custom_hooks_config=cfg.get('custom_hooks', None))
if distributed:
runner.register_hook(DistSamplerSeedHook())
# register eval hooks
if validate:
val_dataset = build_dataset(cfg.data.val, dict(test_mode=True))
val_dataloader = build_dataloader(
val_dataset,
samples_per_gpu=cfg.data.samples_per_gpu,
workers_per_gpu=cfg.data.workers_per_gpu,
dist=distributed,
shuffle=False,
round_up=True)
eval_cfg = cfg.get('evaluation', {})
eval_cfg['by_epoch'] = cfg.runner['type'] != 'IterBasedRunner'
if cfg.get('multi_task', False):
eval_hook = DistMultiTaskEvalHook if distributed else MultiTaskEvalHook
else:
eval_hook = DistEvalHook if distributed else EvalHook
runner.register_hook(eval_hook(val_dataloader, **eval_cfg))
if cfg.resume_from:
runner.resume(cfg.resume_from)
elif cfg.load_from:
runner.load_checkpoint(cfg.load_from)
runner.run(data_loaders, cfg.workflow)
| 5,723 | 33.481928 | 83 |
py
|
KnowledgeFactor
|
KnowledgeFactor-main/cls/mmcls/core/__init__.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from .evaluation import * # noqa: F401, F403
from .fp16 import * # noqa: F401, F403
from .utils import * # noqa: F401, F403
| 175 | 34.2 | 47 |
py
|
KnowledgeFactor
|
KnowledgeFactor-main/cls/mmcls/core/evaluation/multilabel_eval_metrics.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import warnings
import numpy as np
import torch
def average_performance(pred, target, thr=None, k=None):
"""Calculate CP, CR, CF1, OP, OR, OF1, where C stands for per-class
average, O stands for overall average, P stands for precision, R stands for
recall and F1 stands for F1-score.
Args:
pred (torch.Tensor | np.ndarray): The model prediction with shape
(N, C), where C is the number of classes.
target (torch.Tensor | np.ndarray): The target of each prediction with
shape (N, C), where C is the number of classes. 1 stands for
positive examples, 0 stands for negative examples and -1 stands for
difficult examples.
thr (float): The confidence threshold. Defaults to None.
k (int): Top-k performance. Note that if thr and k are both given, k
will be ignored. Defaults to None.
Returns:
tuple: (CP, CR, CF1, OP, OR, OF1)
"""
if isinstance(pred, torch.Tensor) and isinstance(target, torch.Tensor):
pred = pred.detach().cpu().numpy()
target = target.detach().cpu().numpy()
elif not (isinstance(pred, np.ndarray) and isinstance(target, np.ndarray)):
raise TypeError('pred and target should both be torch.Tensor or'
'np.ndarray')
if thr is None and k is None:
thr = 0.5
warnings.warn('Neither thr nor k is given, set thr as 0.5 by '
'default.')
elif thr is not None and k is not None:
warnings.warn('Both thr and k are given, use threshold in favor of '
'top-k.')
assert pred.shape == \
target.shape, 'pred and target should be in the same shape.'
eps = np.finfo(np.float32).eps
target[target == -1] = 0
if thr is not None:
# a label is predicted positive if the confidence is no lower than thr
pos_inds = pred >= thr
else:
# top-k labels will be predicted positive for any example
sort_inds = np.argsort(-pred, axis=1)
sort_inds_ = sort_inds[:, :k]
inds = np.indices(sort_inds_.shape)
pos_inds = np.zeros_like(pred)
pos_inds[inds[0], sort_inds_] = 1
tp = (pos_inds * target) == 1
fp = (pos_inds * (1 - target)) == 1
fn = ((1 - pos_inds) * target) == 1
precision_class = tp.sum(axis=0) / np.maximum(
tp.sum(axis=0) + fp.sum(axis=0), eps)
recall_class = tp.sum(axis=0) / np.maximum(
tp.sum(axis=0) + fn.sum(axis=0), eps)
CP = precision_class.mean() * 100.0
CR = recall_class.mean() * 100.0
CF1 = 2 * CP * CR / np.maximum(CP + CR, eps)
OP = tp.sum() / np.maximum(tp.sum() + fp.sum(), eps) * 100.0
OR = tp.sum() / np.maximum(tp.sum() + fn.sum(), eps) * 100.0
OF1 = 2 * OP * OR / np.maximum(OP + OR, eps)
return CP, CR, CF1, OP, OR, OF1
| 2,900 | 38.739726 | 79 |
py
|
KnowledgeFactor
|
KnowledgeFactor-main/cls/mmcls/core/evaluation/eval_metrics.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from numbers import Number
import numpy as np
import torch
def calculate_confusion_matrix(pred, target):
"""Calculate confusion matrix according to the prediction and target.
Args:
pred (torch.Tensor | np.array): The model prediction with shape (N, C).
target (torch.Tensor | np.array): The target of each prediction with
shape (N, 1) or (N,).
Returns:
torch.Tensor: Confusion matrix
The shape is (C, C), where C is the number of classes.
"""
if isinstance(pred, np.ndarray):
pred = torch.from_numpy(pred)
if isinstance(target, np.ndarray):
target = torch.from_numpy(target)
assert (
isinstance(pred, torch.Tensor) and isinstance(target, torch.Tensor)), \
(f'pred and target should be torch.Tensor or np.ndarray, '
f'but got {type(pred)} and {type(target)}.')
num_classes = pred.size(1)
_, pred_label = pred.topk(1, dim=1)
pred_label = pred_label.view(-1)
target_label = target.view(-1)
assert len(pred_label) == len(target_label)
confusion_matrix = torch.zeros(num_classes, num_classes)
with torch.no_grad():
for t, p in zip(target_label, pred_label):
confusion_matrix[t.long(), p.long()] += 1
return confusion_matrix
def precision_recall_f1(pred, target, average_mode='macro', thrs=0.):
"""Calculate precision, recall and f1 score according to the prediction and
target.
Args:
pred (torch.Tensor | np.array): The model prediction with shape (N, C).
target (torch.Tensor | np.array): The target of each prediction with
shape (N, 1) or (N,).
average_mode (str): The type of averaging performed on the result.
Options are 'macro' and 'none'. If 'none', the scores for each
class are returned. If 'macro', calculate metrics for each class,
and find their unweighted mean.
Defaults to 'macro'.
thrs (Number | tuple[Number], optional): Predictions with scores under
the thresholds are considered negative. Default to 0.
Returns:
tuple: tuple containing precision, recall, f1 score.
The type of precision, recall, f1 score is one of the following:
+----------------------------+--------------------+-------------------+
| Args | ``thrs`` is number | ``thrs`` is tuple |
+============================+====================+===================+
| ``average_mode`` = "macro" | float | list[float] |
+----------------------------+--------------------+-------------------+
| ``average_mode`` = "none" | np.array | list[np.array] |
+----------------------------+--------------------+-------------------+
"""
allowed_average_mode = ['macro', 'none']
if average_mode not in allowed_average_mode:
raise ValueError(f'Unsupport type of averaging {average_mode}.')
if isinstance(pred, torch.Tensor):
pred = pred.numpy()
if isinstance(target, torch.Tensor):
target = target.numpy()
assert (isinstance(pred, np.ndarray) and isinstance(target, np.ndarray)),\
(f'pred and target should be torch.Tensor or np.ndarray, '
f'but got {type(pred)} and {type(target)}.')
if isinstance(thrs, Number):
thrs = (thrs, )
return_single = True
elif isinstance(thrs, tuple):
return_single = False
else:
raise TypeError(
f'thrs should be a number or tuple, but got {type(thrs)}.')
label = np.indices(pred.shape)[1]
pred_label = np.argsort(pred, axis=1)[:, -1]
pred_score = np.sort(pred, axis=1)[:, -1]
precisions = []
recalls = []
f1_scores = []
for thr in thrs:
# Only prediction values larger than thr are counted as positive
_pred_label = pred_label.copy()
if thr is not None:
_pred_label[pred_score <= thr] = -1
pred_positive = label == _pred_label.reshape(-1, 1)
gt_positive = label == target.reshape(-1, 1)
precision = (pred_positive & gt_positive).sum(0) / np.maximum(
pred_positive.sum(0), 1) * 100
recall = (pred_positive & gt_positive).sum(0) / np.maximum(
gt_positive.sum(0), 1) * 100
f1_score = 2 * precision * recall / np.maximum(precision + recall,
1e-20)
if average_mode == 'macro':
precision = float(precision.mean())
recall = float(recall.mean())
f1_score = float(f1_score.mean())
precisions.append(precision)
recalls.append(recall)
f1_scores.append(f1_score)
if return_single:
return precisions[0], recalls[0], f1_scores[0]
else:
return precisions, recalls, f1_scores
def precision(pred, target, average_mode='macro', thrs=0.):
"""Calculate precision according to the prediction and target.
Args:
pred (torch.Tensor | np.array): The model prediction with shape (N, C).
target (torch.Tensor | np.array): The target of each prediction with
shape (N, 1) or (N,).
average_mode (str): The type of averaging performed on the result.
Options are 'macro' and 'none'. If 'none', the scores for each
class are returned. If 'macro', calculate metrics for each class,
and find their unweighted mean.
Defaults to 'macro'.
thrs (Number | tuple[Number], optional): Predictions with scores under
the thresholds are considered negative. Default to 0.
Returns:
float | np.array | list[float | np.array]: Precision.
+----------------------------+--------------------+-------------------+
| Args | ``thrs`` is number | ``thrs`` is tuple |
+============================+====================+===================+
| ``average_mode`` = "macro" | float | list[float] |
+----------------------------+--------------------+-------------------+
| ``average_mode`` = "none" | np.array | list[np.array] |
+----------------------------+--------------------+-------------------+
"""
precisions, _, _ = precision_recall_f1(pred, target, average_mode, thrs)
return precisions
def recall(pred, target, average_mode='macro', thrs=0.):
"""Calculate recall according to the prediction and target.
Args:
pred (torch.Tensor | np.array): The model prediction with shape (N, C).
target (torch.Tensor | np.array): The target of each prediction with
shape (N, 1) or (N,).
average_mode (str): The type of averaging performed on the result.
Options are 'macro' and 'none'. If 'none', the scores for each
class are returned. If 'macro', calculate metrics for each class,
and find their unweighted mean.
Defaults to 'macro'.
thrs (Number | tuple[Number], optional): Predictions with scores under
the thresholds are considered negative. Default to 0.
Returns:
float | np.array | list[float | np.array]: Recall.
+----------------------------+--------------------+-------------------+
| Args | ``thrs`` is number | ``thrs`` is tuple |
+============================+====================+===================+
| ``average_mode`` = "macro" | float | list[float] |
+----------------------------+--------------------+-------------------+
| ``average_mode`` = "none" | np.array | list[np.array] |
+----------------------------+--------------------+-------------------+
"""
_, recalls, _ = precision_recall_f1(pred, target, average_mode, thrs)
return recalls
def f1_score(pred, target, average_mode='macro', thrs=0.):
"""Calculate F1 score according to the prediction and target.
Args:
pred (torch.Tensor | np.array): The model prediction with shape (N, C).
target (torch.Tensor | np.array): The target of each prediction with
shape (N, 1) or (N,).
average_mode (str): The type of averaging performed on the result.
Options are 'macro' and 'none'. If 'none', the scores for each
class are returned. If 'macro', calculate metrics for each class,
and find their unweighted mean.
Defaults to 'macro'.
thrs (Number | tuple[Number], optional): Predictions with scores under
the thresholds are considered negative. Default to 0.
Returns:
float | np.array | list[float | np.array]: F1 score.
+----------------------------+--------------------+-------------------+
| Args | ``thrs`` is number | ``thrs`` is tuple |
+============================+====================+===================+
| ``average_mode`` = "macro" | float | list[float] |
+----------------------------+--------------------+-------------------+
| ``average_mode`` = "none" | np.array | list[np.array] |
+----------------------------+--------------------+-------------------+
"""
_, _, f1_scores = precision_recall_f1(pred, target, average_mode, thrs)
return f1_scores
def support(pred, target, average_mode='macro'):
"""Calculate the total number of occurrences of each label according to the
prediction and target.
Args:
pred (torch.Tensor | np.array): The model prediction with shape (N, C).
target (torch.Tensor | np.array): The target of each prediction with
shape (N, 1) or (N,).
average_mode (str): The type of averaging performed on the result.
Options are 'macro' and 'none'. If 'none', the scores for each
class are returned. If 'macro', calculate metrics for each class,
and find their unweighted sum.
Defaults to 'macro'.
Returns:
float | np.array: Support.
- If the ``average_mode`` is set to macro, the function returns
a single float.
- If the ``average_mode`` is set to none, the function returns
a np.array with shape C.
"""
confusion_matrix = calculate_confusion_matrix(pred, target)
with torch.no_grad():
res = confusion_matrix.sum(1)
if average_mode == 'macro':
res = float(res.sum().numpy())
elif average_mode == 'none':
res = res.numpy()
else:
raise ValueError(f'Unsupport type of averaging {average_mode}.')
return res
| 10,811 | 42.421687 | 79 |
py
|
KnowledgeFactor
|
KnowledgeFactor-main/cls/mmcls/core/evaluation/eval_hooks.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
import warnings
from mmcv.runner import Hook
from torch.utils.data import DataLoader
class EvalHook(Hook):
"""Evaluation hook.
Args:
dataloader (DataLoader): A PyTorch dataloader.
interval (int): Evaluation interval (by epochs). Default: 1.
"""
def __init__(self, dataloader, interval=1, by_epoch=True, **eval_kwargs):
warnings.warn(
'DeprecationWarning: EvalHook and DistEvalHook in mmcls will be '
'deprecated, please install mmcv through master branch.')
if not isinstance(dataloader, DataLoader):
raise TypeError('dataloader must be a pytorch DataLoader, but got'
f' {type(dataloader)}')
self.dataloader = dataloader
self.interval = interval
self.eval_kwargs = eval_kwargs
self.by_epoch = by_epoch
def after_train_epoch(self, runner):
if not self.by_epoch or not self.every_n_epochs(runner, self.interval):
return
from mmcls.apis import single_gpu_test
results = single_gpu_test(runner.model, self.dataloader, show=False)
self.evaluate(runner, results)
def after_train_iter(self, runner):
if self.by_epoch or not self.every_n_iters(runner, self.interval):
return
from mmcls.apis import single_gpu_test
runner.log_buffer.clear()
results = single_gpu_test(runner.model, self.dataloader, show=False)
self.evaluate(runner, results)
def evaluate(self, runner, results):
eval_res = self.dataloader.dataset.evaluate(
results, logger=runner.logger, **self.eval_kwargs)
for name, val in eval_res.items():
runner.log_buffer.output[name] = val
runner.log_buffer.ready = True
class DistEvalHook(EvalHook):
"""Distributed evaluation hook.
Args:
dataloader (DataLoader): A PyTorch dataloader.
interval (int): Evaluation interval (by epochs). Default: 1.
tmpdir (str, optional): Temporary directory to save the results of all
processes. Default: None.
gpu_collect (bool): Whether to use gpu or cpu to collect results.
Default: False.
"""
def __init__(self,
dataloader,
interval=1,
gpu_collect=False,
by_epoch=True,
**eval_kwargs):
warnings.warn(
'DeprecationWarning: EvalHook and DistEvalHook in mmcls will be '
'deprecated, please install mmcv through master branch.')
if not isinstance(dataloader, DataLoader):
raise TypeError('dataloader must be a pytorch DataLoader, but got '
f'{type(dataloader)}')
self.dataloader = dataloader
self.interval = interval
self.gpu_collect = gpu_collect
self.by_epoch = by_epoch
self.eval_kwargs = eval_kwargs
def after_train_epoch(self, runner):
if not self.by_epoch or not self.every_n_epochs(runner, self.interval):
return
from mmcls.apis import multi_gpu_test
results = multi_gpu_test(
runner.model,
self.dataloader,
tmpdir=osp.join(runner.work_dir, '.eval_hook'),
gpu_collect=self.gpu_collect)
if runner.rank == 0:
print('\n')
self.evaluate(runner, results)
def after_train_iter(self, runner):
if self.by_epoch or not self.every_n_iters(runner, self.interval):
return
from mmcls.apis import multi_gpu_test
runner.log_buffer.clear()
results = multi_gpu_test(
runner.model,
self.dataloader,
tmpdir=osp.join(runner.work_dir, '.eval_hook'),
gpu_collect=self.gpu_collect)
if runner.rank == 0:
print('\n')
self.evaluate(runner, results)
| 3,967 | 36.433962 | 79 |
py
|
KnowledgeFactor
|
KnowledgeFactor-main/cls/mmcls/core/evaluation/multitask_eval_hooks.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
import warnings
from mmcv.runner import Hook
from torch.utils.data import DataLoader
class MultiTaskEvalHook(Hook):
"""Evaluation hook.
Args:
dataloader (DataLoader): A PyTorch dataloader.
interval (int): Evaluation interval (by epochs). Default: 1.
"""
def __init__(self, dataloader, interval=1, by_epoch=True, **eval_kwargs):
warnings.warn(
'DeprecationWarning: EvalHook and DistEvalHook in mmcls will be '
'deprecated, please install mmcv through master branch.')
if not isinstance(dataloader, DataLoader):
raise TypeError('dataloader must be a pytorch DataLoader, but got'
f' {type(dataloader)}')
self.dataloader = dataloader
self.interval = interval
self.eval_kwargs = eval_kwargs
self.by_epoch = by_epoch
def after_train_epoch(self, runner):
if not self.by_epoch or not self.every_n_epochs(runner, self.interval):
return
from mmcls.apis import multitask_single_gpu_test
results = multitask_single_gpu_test(runner.model, self.dataloader, show=False)
self.evaluate(runner, results)
def after_train_iter(self, runner):
if self.by_epoch or not self.every_n_iters(runner, self.interval):
return
from mmcls.apis import multitask_single_gpu_test
runner.log_buffer.clear()
results = multitask_single_gpu_test(runner.model, self.dataloader, show=False)
self.evaluate(runner, results)
def evaluate(self, runner, results):
eval_res = self.dataloader.dataset.evaluate(
results, logger=runner.logger, **self.eval_kwargs)
for name, val in eval_res.items():
runner.log_buffer.output[name] = val
runner.log_buffer.ready = True
class DistMultiTaskEvalHook(MultiTaskEvalHook):
"""Distributed evaluation hook.
Args:
dataloader (DataLoader): A PyTorch dataloader.
interval (int): Evaluation interval (by epochs). Default: 1.
tmpdir (str, optional): Temporary directory to save the results of all
processes. Default: None.
gpu_collect (bool): Whether to use gpu or cpu to collect results.
Default: False.
"""
def __init__(self,
dataloader,
interval=1,
gpu_collect=False,
by_epoch=True,
**eval_kwargs):
warnings.warn(
'DeprecationWarning: EvalHook and DistEvalHook in mmcls will be '
'deprecated, please install mmcv through master branch.')
if not isinstance(dataloader, DataLoader):
raise TypeError('dataloader must be a pytorch DataLoader, but got '
f'{type(dataloader)}')
self.dataloader = dataloader
self.interval = interval
self.gpu_collect = gpu_collect
self.by_epoch = by_epoch
self.eval_kwargs = eval_kwargs
def after_train_epoch(self, runner):
if not self.by_epoch or not self.every_n_epochs(runner, self.interval):
return
from mmcls.apis import multitask_multi_gpu_test
results = multitask_multi_gpu_test(
runner.model,
self.dataloader,
tmpdir=osp.join(runner.work_dir, '.eval_hook'),
gpu_collect=self.gpu_collect)
if runner.rank == 0:
print('\n')
self.evaluate(runner, results)
def after_train_iter(self, runner):
if self.by_epoch or not self.every_n_iters(runner, self.interval):
return
from mmcls.apis import multitask_multi_gpu_test
runner.log_buffer.clear()
results = multitask_multi_gpu_test(
runner.model,
self.dataloader,
tmpdir=osp.join(runner.work_dir, '.eval_hook'),
gpu_collect=self.gpu_collect)
if runner.rank == 0:
print('\n')
self.evaluate(runner, results)
| 4,076 | 36.75 | 86 |
py
|
KnowledgeFactor
|
KnowledgeFactor-main/cls/mmcls/core/evaluation/__init__.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from .eval_hooks import DistEvalHook, EvalHook
from .multitask_eval_hooks import MultiTaskEvalHook, DistMultiTaskEvalHook
from .eval_metrics import (calculate_confusion_matrix, f1_score, precision,
precision_recall_f1, recall, support)
from .mean_ap import average_precision, mAP
from .multilabel_eval_metrics import average_performance
__all__ = [
'DistEvalHook', 'EvalHook', 'precision', 'recall', 'f1_score', 'support',
'average_precision', 'mAP', 'average_performance',
'calculate_confusion_matrix', 'precision_recall_f1', 'MultiTaskEvalHook',
'DistMultiTaskEvalHook'
]
| 666 | 43.466667 | 77 |
py
|
KnowledgeFactor
|
KnowledgeFactor-main/cls/mmcls/core/evaluation/mean_ap.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import torch
def average_precision(pred, target):
r"""Calculate the average precision for a single class.
AP summarizes a precision-recall curve as the weighted mean of maximum
precisions obtained for any r'>r, where r is the recall:
.. math::
\text{AP} = \sum_n (R_n - R_{n-1}) P_n
Note that no approximation is involved since the curve is piecewise
constant.
Args:
pred (np.ndarray): The model prediction with shape (N, ).
target (np.ndarray): The target of each prediction with shape (N, ).
Returns:
float: a single float as average precision value.
"""
eps = np.finfo(np.float32).eps
# sort examples
sort_inds = np.argsort(-pred)
sort_target = target[sort_inds]
# count true positive examples
pos_inds = sort_target == 1
tp = np.cumsum(pos_inds)
total_pos = tp[-1]
# count not difficult examples
pn_inds = sort_target != -1
pn = np.cumsum(pn_inds)
tp[np.logical_not(pos_inds)] = 0
precision = tp / np.maximum(pn, eps)
ap = np.sum(precision) / np.maximum(total_pos, eps)
return ap
def mAP(pred, target):
"""Calculate the mean average precision with respect of classes.
Args:
pred (torch.Tensor | np.ndarray): The model prediction with shape
(N, C), where C is the number of classes.
target (torch.Tensor | np.ndarray): The target of each prediction with
shape (N, C), where C is the number of classes. 1 stands for
positive examples, 0 stands for negative examples and -1 stands for
difficult examples.
Returns:
float: A single float as mAP value.
"""
if isinstance(pred, torch.Tensor) and isinstance(target, torch.Tensor):
pred = pred.detach().cpu().numpy()
target = target.detach().cpu().numpy()
elif not (isinstance(pred, np.ndarray) and isinstance(target, np.ndarray)):
raise TypeError('pred and target should both be torch.Tensor or'
'np.ndarray')
assert pred.shape == \
target.shape, 'pred and target should be in the same shape.'
num_classes = pred.shape[1]
ap = np.zeros(num_classes)
for k in range(num_classes):
ap[k] = average_precision(pred[:, k], target[:, k])
mean_ap = ap.mean() * 100.0
return mean_ap
| 2,414 | 31.2 | 79 |
py
|
KnowledgeFactor
|
KnowledgeFactor-main/cls/mmcls/core/fp16/hooks.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import torch
import torch.nn as nn
from mmcv.runner import OptimizerHook
from mmcv.utils.parrots_wrapper import _BatchNorm
from ..utils import allreduce_grads
from .utils import cast_tensor_type
class Fp16OptimizerHook(OptimizerHook):
"""FP16 optimizer hook.
The steps of fp16 optimizer is as follows.
1. Scale the loss value.
2. BP in the fp16 model.
2. Copy gradients from fp16 model to fp32 weights.
3. Update fp32 weights.
4. Copy updated parameters from fp32 weights to fp16 model.
Refer to https://arxiv.org/abs/1710.03740 for more details.
Args:
loss_scale (float): Scale factor multiplied with loss.
"""
def __init__(self,
grad_clip=None,
coalesce=True,
bucket_size_mb=-1,
loss_scale=512.,
distributed=True):
self.grad_clip = grad_clip
self.coalesce = coalesce
self.bucket_size_mb = bucket_size_mb
self.loss_scale = loss_scale
self.distributed = distributed
def before_run(self, runner):
# keep a copy of fp32 weights
runner.optimizer.param_groups = copy.deepcopy(
runner.optimizer.param_groups)
# convert model to fp16
wrap_fp16_model(runner.model)
def copy_grads_to_fp32(self, fp16_net, fp32_weights):
"""Copy gradients from fp16 model to fp32 weight copy."""
for fp32_param, fp16_param in zip(fp32_weights, fp16_net.parameters()):
if fp16_param.grad is not None:
if fp32_param.grad is None:
fp32_param.grad = fp32_param.data.new(fp32_param.size())
fp32_param.grad.copy_(fp16_param.grad)
def copy_params_to_fp16(self, fp16_net, fp32_weights):
"""Copy updated params from fp32 weight copy to fp16 model."""
for fp16_param, fp32_param in zip(fp16_net.parameters(), fp32_weights):
fp16_param.data.copy_(fp32_param.data)
def after_train_iter(self, runner):
# clear grads of last iteration
runner.model.zero_grad()
runner.optimizer.zero_grad()
# scale the loss value
scaled_loss = runner.outputs['loss'] * self.loss_scale
scaled_loss.backward()
# copy fp16 grads in the model to fp32 params in the optimizer
fp32_weights = []
for param_group in runner.optimizer.param_groups:
fp32_weights += param_group['params']
self.copy_grads_to_fp32(runner.model, fp32_weights)
# allreduce grads
if self.distributed:
allreduce_grads(fp32_weights, self.coalesce, self.bucket_size_mb)
# scale the gradients back
for param in fp32_weights:
if param.grad is not None:
param.grad.div_(self.loss_scale)
if self.grad_clip is not None:
self.clip_grads(fp32_weights)
# update fp32 params
runner.optimizer.step()
# copy fp32 params to the fp16 model
self.copy_params_to_fp16(runner.model, fp32_weights)
def wrap_fp16_model(model):
# convert model to fp16
model.half()
# patch the normalization layers to make it work in fp32 mode
patch_norm_fp32(model)
# set `fp16_enabled` flag
for m in model.modules():
if hasattr(m, 'fp16_enabled'):
m.fp16_enabled = True
def patch_norm_fp32(module):
if isinstance(module, (_BatchNorm, nn.GroupNorm)):
module.float()
module.forward = patch_forward_method(module.forward, torch.half,
torch.float)
for child in module.children():
patch_norm_fp32(child)
return module
def patch_forward_method(func, src_type, dst_type, convert_output=True):
"""Patch the forward method of a module.
Args:
func (callable): The original forward method.
src_type (torch.dtype): Type of input arguments to be converted from.
dst_type (torch.dtype): Type of input arguments to be converted to.
convert_output (bool): Whether to convert the output back to src_type.
Returns:
callable: The patched forward method.
"""
def new_forward(*args, **kwargs):
output = func(*cast_tensor_type(args, src_type, dst_type),
**cast_tensor_type(kwargs, src_type, dst_type))
if convert_output:
output = cast_tensor_type(output, dst_type, src_type)
return output
return new_forward
| 4,548 | 33.992308 | 79 |
py
|
KnowledgeFactor
|
KnowledgeFactor-main/cls/mmcls/core/fp16/utils.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from collections import abc
import numpy as np
import torch
def cast_tensor_type(inputs, src_type, dst_type):
if isinstance(inputs, torch.Tensor):
return inputs.to(dst_type)
elif isinstance(inputs, str):
return inputs
elif isinstance(inputs, np.ndarray):
return inputs
elif isinstance(inputs, abc.Mapping):
return type(inputs)({
k: cast_tensor_type(v, src_type, dst_type)
for k, v in inputs.items()
})
elif isinstance(inputs, abc.Iterable):
return type(inputs)(
cast_tensor_type(item, src_type, dst_type) for item in inputs)
else:
return inputs
| 712 | 27.52 | 74 |
py
|
KnowledgeFactor
|
KnowledgeFactor-main/cls/mmcls/core/fp16/__init__.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from .decorators import auto_fp16, force_fp32
from .hooks import Fp16OptimizerHook, wrap_fp16_model
__all__ = ['auto_fp16', 'force_fp32', 'Fp16OptimizerHook', 'wrap_fp16_model']
| 227 | 37 | 77 |
py
|
KnowledgeFactor
|
KnowledgeFactor-main/cls/mmcls/core/fp16/decorators.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import functools
from inspect import getfullargspec
import torch
from .utils import cast_tensor_type
def auto_fp16(apply_to=None, out_fp32=False):
"""Decorator to enable fp16 training automatically.
This decorator is useful when you write custom modules and want to support
mixed precision training. If inputs arguments are fp32 tensors, they will
be converted to fp16 automatically. Arguments other than fp32 tensors are
ignored.
Args:
apply_to (Iterable, optional): The argument names to be converted.
`None` indicates all arguments.
out_fp32 (bool): Whether to convert the output back to fp32.
:Example:
class MyModule1(nn.Module)
# Convert x and y to fp16
@auto_fp16()
def forward(self, x, y):
pass
class MyModule2(nn.Module):
# convert pred to fp16
@auto_fp16(apply_to=('pred', ))
def do_something(self, pred, others):
pass
"""
def auto_fp16_wrapper(old_func):
@functools.wraps(old_func)
def new_func(*args, **kwargs):
# check if the module has set the attribute `fp16_enabled`, if not,
# just fallback to the original method.
if not isinstance(args[0], torch.nn.Module):
raise TypeError('@auto_fp16 can only be used to decorate the '
'method of nn.Module')
if not (hasattr(args[0], 'fp16_enabled') and args[0].fp16_enabled):
return old_func(*args, **kwargs)
# get the arg spec of the decorated method
args_info = getfullargspec(old_func)
# get the argument names to be casted
args_to_cast = args_info.args if apply_to is None else apply_to
# convert the args that need to be processed
new_args = []
# NOTE: default args are not taken into consideration
if args:
arg_names = args_info.args[:len(args)]
for i, arg_name in enumerate(arg_names):
if arg_name in args_to_cast:
new_args.append(
cast_tensor_type(args[i], torch.float, torch.half))
else:
new_args.append(args[i])
# convert the kwargs that need to be processed
new_kwargs = {}
if kwargs:
for arg_name, arg_value in kwargs.items():
if arg_name in args_to_cast:
new_kwargs[arg_name] = cast_tensor_type(
arg_value, torch.float, torch.half)
else:
new_kwargs[arg_name] = arg_value
# apply converted arguments to the decorated method
output = old_func(*new_args, **new_kwargs)
# cast the results back to fp32 if necessary
if out_fp32:
output = cast_tensor_type(output, torch.half, torch.float)
return output
return new_func
return auto_fp16_wrapper
def force_fp32(apply_to=None, out_fp16=False):
"""Decorator to convert input arguments to fp32 in force.
This decorator is useful when you write custom modules and want to support
mixed precision training. If there are some inputs that must be processed
in fp32 mode, then this decorator can handle it. If inputs arguments are
fp16 tensors, they will be converted to fp32 automatically. Arguments other
than fp16 tensors are ignored.
Args:
apply_to (Iterable, optional): The argument names to be converted.
`None` indicates all arguments.
out_fp16 (bool): Whether to convert the output back to fp16.
:Example:
class MyModule1(nn.Module)
# Convert x and y to fp32
@force_fp32()
def loss(self, x, y):
pass
class MyModule2(nn.Module):
# convert pred to fp32
@force_fp32(apply_to=('pred', ))
def post_process(self, pred, others):
pass
"""
def force_fp32_wrapper(old_func):
@functools.wraps(old_func)
def new_func(*args, **kwargs):
# check if the module has set the attribute `fp16_enabled`, if not,
# just fallback to the original method.
if not isinstance(args[0], torch.nn.Module):
raise TypeError('@force_fp32 can only be used to decorate the '
'method of nn.Module')
if not (hasattr(args[0], 'fp16_enabled') and args[0].fp16_enabled):
return old_func(*args, **kwargs)
# get the arg spec of the decorated method
args_info = getfullargspec(old_func)
# get the argument names to be casted
args_to_cast = args_info.args if apply_to is None else apply_to
# convert the args that need to be processed
new_args = []
if args:
arg_names = args_info.args[:len(args)]
for i, arg_name in enumerate(arg_names):
if arg_name in args_to_cast:
new_args.append(
cast_tensor_type(args[i], torch.half, torch.float))
else:
new_args.append(args[i])
# convert the kwargs that need to be processed
new_kwargs = dict()
if kwargs:
for arg_name, arg_value in kwargs.items():
if arg_name in args_to_cast:
new_kwargs[arg_name] = cast_tensor_type(
arg_value, torch.half, torch.float)
else:
new_kwargs[arg_name] = arg_value
# apply converted arguments to the decorated method
output = old_func(*new_args, **new_kwargs)
# cast the results back to fp32 if necessary
if out_fp16:
output = cast_tensor_type(output, torch.float, torch.half)
return output
return new_func
return force_fp32_wrapper
| 6,259 | 37.641975 | 79 |
py
|
KnowledgeFactor
|
KnowledgeFactor-main/cls/mmcls/core/export/test.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import warnings
import numpy as np
import onnxruntime as ort
import torch
from mmcls.models.classifiers import BaseClassifier
class ONNXRuntimeClassifier(BaseClassifier):
"""Wrapper for classifier's inference with ONNXRuntime."""
def __init__(self, onnx_file, class_names, device_id):
super(ONNXRuntimeClassifier, self).__init__()
sess = ort.InferenceSession(onnx_file)
providers = ['CPUExecutionProvider']
options = [{}]
is_cuda_available = ort.get_device() == 'GPU'
if is_cuda_available:
providers.insert(0, 'CUDAExecutionProvider')
options.insert(0, {'device_id': device_id})
sess.set_providers(providers, options)
self.sess = sess
self.CLASSES = class_names
self.device_id = device_id
self.io_binding = sess.io_binding()
self.output_names = [_.name for _ in sess.get_outputs()]
self.is_cuda_available = is_cuda_available
def simple_test(self, img, img_metas, **kwargs):
raise NotImplementedError('This method is not implemented.')
def extract_feat(self, imgs):
raise NotImplementedError('This method is not implemented.')
def forward_train(self, imgs, **kwargs):
raise NotImplementedError('This method is not implemented.')
def forward_test(self, imgs, img_metas, **kwargs):
input_data = imgs
# set io binding for inputs/outputs
device_type = 'cuda' if self.is_cuda_available else 'cpu'
if not self.is_cuda_available:
input_data = input_data.cpu()
self.io_binding.bind_input(
name='input',
device_type=device_type,
device_id=self.device_id,
element_type=np.float32,
shape=input_data.shape,
buffer_ptr=input_data.data_ptr())
for name in self.output_names:
self.io_binding.bind_output(name)
# run session to get outputs
self.sess.run_with_iobinding(self.io_binding)
results = self.io_binding.copy_outputs_to_cpu()[0]
return list(results)
class TensorRTClassifier(BaseClassifier):
def __init__(self, trt_file, class_names, device_id):
super(TensorRTClassifier, self).__init__()
from mmcv.tensorrt import TRTWraper, load_tensorrt_plugin
try:
load_tensorrt_plugin()
except (ImportError, ModuleNotFoundError):
warnings.warn('If input model has custom op from mmcv, \
you may have to build mmcv with TensorRT from source.')
model = TRTWraper(
trt_file, input_names=['input'], output_names=['probs'])
self.model = model
self.device_id = device_id
self.CLASSES = class_names
def simple_test(self, img, img_metas, **kwargs):
raise NotImplementedError('This method is not implemented.')
def extract_feat(self, imgs):
raise NotImplementedError('This method is not implemented.')
def forward_train(self, imgs, **kwargs):
raise NotImplementedError('This method is not implemented.')
def forward_test(self, imgs, img_metas, **kwargs):
input_data = imgs
with torch.cuda.device(self.device_id), torch.no_grad():
results = self.model({'input': input_data})['probs']
results = results.detach().cpu().numpy()
return list(results)
| 3,439 | 34.463918 | 71 |
py
|
KnowledgeFactor
|
KnowledgeFactor-main/cls/mmcls/core/export/__init__.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from .test import ONNXRuntimeClassifier, TensorRTClassifier
__all__ = ['ONNXRuntimeClassifier', 'TensorRTClassifier']
| 167 | 32.6 | 59 |
py
|
KnowledgeFactor
|
KnowledgeFactor-main/cls/mmcls/core/visualization/image.py
|
import matplotlib.pyplot as plt
import mmcv
import numpy as np
# A small value
EPS = 1e-2
def color_val_matplotlib(color):
"""Convert various input in BGR order to normalized RGB matplotlib color
tuples,
Args:
color (:obj:`mmcv.Color`/str/tuple/int/ndarray): Color inputs
Returns:
tuple[float]: A tuple of 3 normalized floats indicating RGB channels.
"""
color = mmcv.color_val(color)
color = [color / 255 for color in color[::-1]]
return tuple(color)
def imshow_infos(img,
infos,
text_color='white',
font_size=26,
row_width=20,
win_name='',
show=True,
fig_size=(15, 10),
wait_time=0,
out_file=None):
"""Show image with extra infomation.
Args:
img (str | ndarray): The image to be displayed.
infos (dict): Extra infos to display in the image.
text_color (:obj:`mmcv.Color`/str/tuple/int/ndarray): Extra infos
display color. Defaults to 'white'.
font_size (int): Extra infos display font size. Defaults to 26.
row_width (int): width between each row of results on the image.
win_name (str): The image title. Defaults to ''
show (bool): Whether to show the image. Defaults to True.
fig_size (tuple): Image show figure size. Defaults to (15, 10).
wait_time (int): How many seconds to display the image. Defaults to 0.
out_file (Optional[str]): The filename to write the image.
Defaults to None.
Returns:
np.ndarray: The image with extra infomations.
"""
img = mmcv.imread(img).astype(np.uint8)
x, y = 3, row_width // 2
text_color = color_val_matplotlib(text_color)
img = mmcv.bgr2rgb(img)
width, height = img.shape[1], img.shape[0]
img = np.ascontiguousarray(img)
# A proper dpi for image save with default font size.
fig = plt.figure(win_name, frameon=False, figsize=fig_size, dpi=36)
plt.title(win_name)
canvas = fig.canvas
dpi = fig.get_dpi()
# add a small EPS to avoid precision lost due to matplotlib's truncation
# (https://github.com/matplotlib/matplotlib/issues/15363)
fig.set_size_inches((width + EPS) / dpi, (height + EPS) / dpi)
# remove white edges by set subplot margin
plt.subplots_adjust(left=0, right=1, bottom=0, top=1)
ax = plt.gca()
ax.axis('off')
for k, v in infos.items():
if isinstance(v, float):
v = f'{v:.2f}'
label_text = f'{k}: {v}'
ax.text(
x,
y,
f'{label_text}',
bbox={
'facecolor': 'black',
'alpha': 0.7,
'pad': 0.2,
'edgecolor': 'none',
'boxstyle': 'round'
},
color=text_color,
fontsize=font_size,
family='monospace',
verticalalignment='top',
horizontalalignment='left')
y += row_width
plt.imshow(img)
stream, _ = canvas.print_to_buffer()
buffer = np.frombuffer(stream, dtype='uint8')
img_rgba = buffer.reshape(height, width, 4)
rgb, _ = np.split(img_rgba, [3], axis=2)
img = rgb.astype('uint8')
img = mmcv.rgb2bgr(img)
if show:
# Matplotlib will adjust text size depends on window size and image
# aspect ratio. It's hard to get, so here we set an adaptive dpi
# according to screen height. 20 here is an empirical parameter.
fig_manager = plt.get_current_fig_manager()
if hasattr(fig_manager, 'window'):
# Figure manager doesn't have window if no screen.
screen_dpi = fig_manager.window.winfo_screenheight() // 20
fig.set_dpi(screen_dpi)
# We do not use cv2 for display because in some cases, opencv will
# conflict with Qt, it will output a warning: Current thread
# is not the object's thread. You can refer to
# https://github.com/opencv/opencv-python/issues/46 for details
if wait_time == 0:
plt.show()
else:
plt.show(block=False)
plt.pause(wait_time)
if out_file is not None:
mmcv.imwrite(img, out_file)
plt.close()
return img
| 4,341 | 32.145038 | 78 |
py
|
KnowledgeFactor
|
KnowledgeFactor-main/cls/mmcls/core/visualization/__init__.py
|
from .image import color_val_matplotlib, imshow_infos
__all__ = ['imshow_infos', 'color_val_matplotlib']
| 106 | 25.75 | 53 |
py
|
KnowledgeFactor
|
KnowledgeFactor-main/cls/mmcls/core/utils/kd_hook.py
|
import torch
from mmcv.parallel import is_module_wrapper
from mmcv.runner import (HOOKS, OPTIMIZER_BUILDERS, OPTIMIZERS,
DefaultOptimizerConstructor, Hook, OptimizerHook)
from mmcv.utils import build_from_cfg
@OPTIMIZER_BUILDERS.register_module()
class KDOptimizerBuilder(DefaultOptimizerConstructor):
def __init__(self, optimizer_cfg, paramwise_cfg=None):
super(KDOptimizerBuilder, self).__init__(optimizer_cfg,
paramwise_cfg)
def __call__(self, model):
if hasattr(model, 'module'):
model = model.module
optimizer_cfg = self.optimizer_cfg.copy()
# if no paramwise option is specified, just use the global setting
if not self.paramwise_cfg:
optimizer_cfg['params'] = model.student.parameters()
student_optimizer = build_from_cfg(optimizer_cfg,
OPTIMIZERS)
return student_optimizer
| 1,003 | 37.615385 | 74 |
py
|
KnowledgeFactor
|
KnowledgeFactor-main/cls/mmcls/core/utils/dist_utils.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from collections import OrderedDict
import torch.distributed as dist
from mmcv.runner import OptimizerHook
from torch._utils import (_flatten_dense_tensors, _take_tensors,
_unflatten_dense_tensors)
def _allreduce_coalesced(tensors, world_size, bucket_size_mb=-1):
if bucket_size_mb > 0:
bucket_size_bytes = bucket_size_mb * 1024 * 1024
buckets = _take_tensors(tensors, bucket_size_bytes)
else:
buckets = OrderedDict()
for tensor in tensors:
tp = tensor.type()
if tp not in buckets:
buckets[tp] = []
buckets[tp].append(tensor)
buckets = buckets.values()
for bucket in buckets:
flat_tensors = _flatten_dense_tensors(bucket)
dist.all_reduce(flat_tensors)
flat_tensors.div_(world_size)
for tensor, synced in zip(
bucket, _unflatten_dense_tensors(flat_tensors, bucket)):
tensor.copy_(synced)
def allreduce_grads(params, coalesce=True, bucket_size_mb=-1):
grads = [
param.grad.data for param in params
if param.requires_grad and param.grad is not None
]
world_size = dist.get_world_size()
if coalesce:
_allreduce_coalesced(grads, world_size, bucket_size_mb)
else:
for tensor in grads:
dist.all_reduce(tensor.div_(world_size))
class DistOptimizerHook(OptimizerHook):
def __init__(self, grad_clip=None, coalesce=True, bucket_size_mb=-1):
self.grad_clip = grad_clip
self.coalesce = coalesce
self.bucket_size_mb = bucket_size_mb
def after_train_iter(self, runner):
runner.optimizer.zero_grad()
runner.outputs['loss'].backward()
if self.grad_clip is not None:
self.clip_grads(runner.model.parameters())
runner.optimizer.step()
| 1,904 | 31.844828 | 73 |
py
|
KnowledgeFactor
|
KnowledgeFactor-main/cls/mmcls/core/utils/misc.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from functools import partial
def multi_apply(func, *args, **kwargs):
pfunc = partial(func, **kwargs) if kwargs else func
map_results = map(pfunc, *args)
return tuple(map(list, zip(*map_results)))
| 259 | 27.888889 | 55 |
py
|
KnowledgeFactor
|
KnowledgeFactor-main/cls/mmcls/core/utils/__init__.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from .dist_utils import DistOptimizerHook, allreduce_grads
from .misc import multi_apply
from .kd_hook import KDOptimizerBuilder
from .visualize import TensorboardVisLoggerHook
__all__ = ['allreduce_grads', 'DistOptimizerHook',
'multi_apply', 'KDOptimizerBuilder',
'TensorboardVisLoggerHook']
| 364 | 35.5 | 58 |
py
|
KnowledgeFactor
|
KnowledgeFactor-main/cls/mmcls/core/utils/visualize.py
|
import os.path as osp
from mmcv.utils import TORCH_VERSION, digit_version
from mmcv.runner.dist_utils import master_only
from mmcv.runner.hooks import HOOKS
from mmcv.runner.hooks.logger.base import LoggerHook
from collections import OrderedDict
import numpy as np
@HOOKS.register_module()
class TensorboardVisLoggerHook(LoggerHook):
def __init__(self,
log_dir=None,
interval=10,
vis_tags=None,
ignore_last=True,
reset_flag=False,
by_epoch=True):
super(TensorboardVisLoggerHook, self).__init__(interval, ignore_last,
reset_flag, by_epoch)
self.log_dir = log_dir
self.vis_tags = vis_tags
@master_only
def before_run(self, runner):
super(TensorboardVisLoggerHook, self).before_run(runner)
if (TORCH_VERSION == 'parrots'
or digit_version(TORCH_VERSION) < digit_version('1.1')):
try:
from tensorboardX import SummaryWriter
except ImportError:
raise ImportError('Please install tensorboardX to use '
'TensorboardLoggerHook.')
else:
try:
from torch.utils.tensorboard import SummaryWriter
except ImportError:
raise ImportError(
'Please run "pip install future tensorboard" to install '
'the dependencies to use torch.utils.tensorboard '
'(applicable to PyTorch 1.1 or higher)')
if self.log_dir is None:
self.log_dir = osp.join(runner.work_dir, 'tf_logs')
self.writer = SummaryWriter(self.log_dir)
@master_only
def log(self, runner):
tags = self.get_loggable_tags(runner, allow_text=True, tags_to_skip=('time', 'data_time', 'relation'))
for tag, val in tags.items():
if isinstance(val, str):
self.writer.add_text(tag, val, self.get_iter(runner))
else:
self.writer.add_scalar(tag, val, self.get_iter(runner))
if self.vis_tags is not None:
for tag in self.vis_tags:
if tag in runner.log_buffer.output.keys():
val = runner.log_buffer.output[tag]
self.writer.add_image(tag, val, self.get_iter(runner))
@master_only
def after_run(self, runner):
self.writer.close()
class LogBuffer_ignore:
def __init__(self, igore_key=['relation']):
self.val_history = OrderedDict()
self.n_history = OrderedDict()
self.output = OrderedDict()
self.ignore_keys = igore_key
self.ready = False
def clear(self):
self.val_history.clear()
self.n_history.clear()
self.clear_output()
def clear_output(self):
self.output.clear()
self.ready = False
def update(self, vars, count=1):
assert isinstance(vars, dict)
for key, var in vars.items():
if key not in self.val_history:
self.val_history[key] = []
self.n_history[key] = []
self.val_history[key].append(var)
self.n_history[key].append(count)
def average(self, n=0):
"""Average latest n values or all values."""
assert n >= 0
for key in self.val_history:
if key in self.ignore_keys:
self.output[key] = self.val_history[key][-1]
else:
values = np.array(self.val_history[key][-n:])
nums = np.array(self.n_history[key][-n:])
avg = np.sum(values * nums) / np.sum(nums)
self.output[key] = avg
self.ready = True
| 3,792 | 34.12037 | 110 |
py
|
KnowledgeFactor
|
KnowledgeFactor-main/cls/mmcls/models/__init__.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from .backbones import * # noqa: F401,F403
from .builder import (BACKBONES, CLASSIFIERS, HEADS, LOSSES, NECKS,
build_backbone, build_classifier, build_head, build_loss,
build_neck)
from .classifiers import * # noqa: F401,F403
from .heads import * # noqa: F401,F403
from .losses import * # noqa: F401,F403
from .necks import * # noqa: F401,F403
__all__ = [
'BACKBONES', 'HEADS', 'NECKS', 'LOSSES', 'CLASSIFIERS', 'build_backbone',
'build_head', 'build_neck', 'build_loss', 'build_classifier'
]
| 599 | 39 | 79 |
py
|
KnowledgeFactor
|
KnowledgeFactor-main/cls/mmcls/models/builder.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmcv.cnn import MODELS as MMCV_MODELS
from mmcv.cnn.bricks.registry import ATTENTION as MMCV_ATTENTION
from mmcv.utils import Registry
MODELS = Registry('models', parent=MMCV_MODELS)
BACKBONES = MODELS
NECKS = MODELS
HEADS = MODELS
LOSSES = MODELS
CLASSIFIERS = MODELS
ATTENTION = Registry('attention', parent=MMCV_ATTENTION)
def build_backbone(cfg):
"""Build backbone."""
return BACKBONES.build(cfg)
def build_neck(cfg):
"""Build neck."""
return NECKS.build(cfg)
def build_head(cfg):
"""Build head."""
return HEADS.build(cfg)
def build_loss(cfg):
"""Build loss."""
return LOSSES.build(cfg)
def build_classifier(cfg):
return CLASSIFIERS.build(cfg)
| 750 | 18.25641 | 64 |
py
|
KnowledgeFactor
|
KnowledgeFactor-main/cls/mmcls/models/necks/gap.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
from ..builder import NECKS
@NECKS.register_module()
class GlobalAveragePooling(nn.Module):
"""Global Average Pooling neck.
Note that we use `view` to remove extra channel after pooling. We do not
use `squeeze` as it will also remove the batch dimension when the tensor
has a batch dimension of size 1, which can lead to unexpected errors.
Args:
dim (int): Dimensions of each sample channel, can be one of {1, 2, 3}.
Default: 2
"""
def __init__(self, dim=2):
super(GlobalAveragePooling, self).__init__()
assert dim in [1, 2, 3], 'GlobalAveragePooling dim only support ' \
f'{1, 2, 3}, get {dim} instead.'
if dim == 1:
self.gap = nn.AdaptiveAvgPool1d(1)
elif dim == 2:
self.gap = nn.AdaptiveAvgPool2d((1, 1))
else:
self.gap = nn.AdaptiveAvgPool3d((1, 1, 1))
def init_weights(self):
pass
def forward(self, inputs):
if isinstance(inputs, tuple):
outs = tuple([self.gap(x) for x in inputs])
outs = tuple(
[out.view(x.size(0), -1) for out, x in zip(outs, inputs)])
elif isinstance(inputs, torch.Tensor):
outs = self.gap(inputs)
outs = outs.view(inputs.size(0), -1)
else:
raise TypeError('neck inputs should be tuple or torch.tensor')
return outs
| 1,492 | 31.456522 | 78 |
py
|
KnowledgeFactor
|
KnowledgeFactor-main/cls/mmcls/models/necks/__init__.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from .gap import GlobalAveragePooling
__all__ = ['GlobalAveragePooling']
| 122 | 23.6 | 47 |
py
|
KnowledgeFactor
|
KnowledgeFactor-main/cls/mmcls/models/classifiers/base.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import warnings
from abc import ABCMeta, abstractmethod
from collections import OrderedDict
import mmcv
import torch
import torch.distributed as dist
from mmcv.runner import BaseModule
from mmcls.core.visualization import imshow_infos
# TODO import `auto_fp16` from mmcv and delete them from mmcls
try:
from mmcv.runner import auto_fp16
except ImportError:
warnings.warn('auto_fp16 from mmcls will be deprecated.'
'Please install mmcv>=1.1.4.')
from mmcls.core import auto_fp16
class BaseClassifier(BaseModule, metaclass=ABCMeta):
"""Base class for classifiers."""
def __init__(self, init_cfg=None):
super(BaseClassifier, self).__init__(init_cfg)
self.fp16_enabled = False
@property
def with_neck(self):
return hasattr(self, 'neck') and self.neck is not None
@property
def with_head(self):
return hasattr(self, 'head') and self.head is not None
@abstractmethod
def extract_feat(self, imgs):
pass
def extract_feats(self, imgs):
assert isinstance(imgs, list)
for img in imgs:
yield self.extract_feat(img)
@abstractmethod
def forward_train(self, imgs, **kwargs):
"""
Args:
img (list[Tensor]): List of tensors of shape (1, C, H, W).
Typically these should be mean centered and std scaled.
kwargs (keyword arguments): Specific to concrete implementation.
"""
pass
@abstractmethod
def simple_test(self, img, **kwargs):
pass
def forward_test(self, imgs, **kwargs):
"""
Args:
imgs (List[Tensor]): the outer list indicates test-time
augmentations and inner Tensor should have a shape NxCxHxW,
which contains all images in the batch.
"""
if isinstance(imgs, torch.Tensor):
imgs = [imgs]
for var, name in [(imgs, 'imgs')]:
if not isinstance(var, list):
raise TypeError(f'{name} must be a list, but got {type(var)}')
if len(imgs) == 1:
return self.simple_test(imgs[0], **kwargs)
else:
raise NotImplementedError('aug_test has not been implemented')
@auto_fp16(apply_to=('img', ))
def forward(self, img, return_loss=True, **kwargs):
"""Calls either forward_train or forward_test depending on whether
return_loss=True.
Note this setting will change the expected inputs. When
`return_loss=True`, img and img_meta are single-nested (i.e. Tensor and
List[dict]), and when `resturn_loss=False`, img and img_meta should be
double nested (i.e. List[Tensor], List[List[dict]]), with the outer
list indicating test time augmentations.
"""
if return_loss:
return self.forward_train(img, **kwargs)
else:
return self.forward_test(img, **kwargs)
def _parse_losses(self, losses):
log_vars = OrderedDict()
for loss_name, loss_value in losses.items():
if isinstance(loss_value, torch.Tensor):
log_vars[loss_name] = loss_value.mean()
elif isinstance(loss_value, list):
log_vars[loss_name] = sum(_loss.mean() for _loss in loss_value)
elif isinstance(loss_value, dict):
for name, value in loss_value.items():
log_vars[name] = value
else:
raise TypeError(
f'{loss_name} is not a tensor or list of tensors')
loss = sum(_value for _key, _value in log_vars.items()
if 'loss' in _key)
log_vars['loss'] = loss
for loss_name, loss_value in log_vars.items():
# reduce loss when distributed training
if dist.is_available() and dist.is_initialized():
loss_value = loss_value.data.clone()
dist.all_reduce(loss_value.div_(dist.get_world_size()))
log_vars[loss_name] = loss_value.item()
return loss, log_vars
def train_step(self, data, optimizer):
"""The iteration step during training.
This method defines an iteration step during training, except for the
back propagation and optimizer updating, which are done in an optimizer
hook. Note that in some complicated cases or models, the whole process
including back propagation and optimizer updating are also defined in
this method, such as GAN.
Args:
data (dict): The output of dataloader.
optimizer (:obj:`torch.optim.Optimizer` | dict): The optimizer of
runner is passed to ``train_step()``. This argument is unused
and reserved.
Returns:
dict: Dict of outputs. The following fields are contained.
- loss (torch.Tensor): A tensor for back propagation, which \
can be a weighted sum of multiple losses.
- log_vars (dict): Dict contains all the variables to be sent \
to the logger.
- num_samples (int): Indicates the batch size (when the model \
is DDP, it means the batch size on each GPU), which is \
used for averaging the logs.
"""
losses = self(**data)
loss, log_vars = self._parse_losses(losses)
outputs = dict(
loss=loss, log_vars=log_vars, num_samples=len(data['img'].data))
return outputs
def val_step(self, data, optimizer):
"""The iteration step during validation.
This method shares the same signature as :func:`train_step`, but used
during val epochs. Note that the evaluation after training epochs is
not implemented with this method, but an evaluation hook.
"""
losses = self(**data)
loss, log_vars = self._parse_losses(losses)
outputs = dict(
loss=loss, log_vars=log_vars, num_samples=len(data['img'].data))
return outputs
def show_result(self,
img,
result,
text_color='white',
font_scale=0.5,
row_width=20,
show=False,
fig_size=(15, 10),
win_name='',
wait_time=0,
out_file=None):
"""Draw `result` over `img`.
Args:
img (str or ndarray): The image to be displayed.
result (dict): The classification results to draw over `img`.
text_color (str or tuple or :obj:`Color`): Color of texts.
font_scale (float): Font scales of texts.
row_width (int): width between each row of results on the image.
show (bool): Whether to show the image.
Default: False.
fig_size (tuple): Image show figure size. Defaults to (15, 10).
win_name (str): The window name.
wait_time (int): How many seconds to display the image.
Defaults to 0.
out_file (str or None): The filename to write the image.
Default: None.
Returns:
img (ndarray): Image with overlayed results.
"""
img = mmcv.imread(img)
img = img.copy()
img = imshow_infos(
img,
result,
text_color=text_color,
font_size=int(font_scale * 50),
row_width=row_width,
win_name=win_name,
show=show,
fig_size=fig_size,
wait_time=wait_time,
out_file=out_file)
return img
| 7,775 | 35 | 79 |
py
|
KnowledgeFactor
|
KnowledgeFactor-main/cls/mmcls/models/classifiers/image.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import numpy as np
import warnings
from re import S
import torch.nn as nn
import torch.nn.functional as F
from ..builder import CLASSIFIERS, build_backbone, build_head, build_neck
from ..utils.augment import Augments
from .base import BaseClassifier
@CLASSIFIERS.register_module()
class ImageClassifier(BaseClassifier):
def __init__(self,
backbone,
neck=None,
head=None,
pretrained=None,
train_cfg=None,
init_cfg=None):
super(ImageClassifier, self).__init__(init_cfg)
if pretrained is not None:
warnings.warn('DeprecationWarning: pretrained is a deprecated \
key, please consider using init_cfg')
self.init_cfg = dict(type='Pretrained', checkpoint=pretrained)
return_tuple = backbone.pop('return_tuple', True)
self.backbone = build_backbone(backbone)
if return_tuple is False:
warnings.warn(
'The `return_tuple` is a temporary arg, we will force to '
'return tuple in the future. Please handle tuple in your '
'custom neck or head.', DeprecationWarning)
self.return_tuple = return_tuple
if neck is not None:
self.neck = build_neck(neck)
if head is not None:
self.head = build_head(head)
self.augments = None
if train_cfg is not None:
augments_cfg = train_cfg.get('augments', None)
if augments_cfg is not None:
self.augments = Augments(augments_cfg)
else:
# Considering BC-breaking
mixup_cfg = train_cfg.get('mixup', None)
cutmix_cfg = train_cfg.get('cutmix', None)
assert mixup_cfg is None or cutmix_cfg is None, \
'If mixup and cutmix are set simultaneously,' \
'use augments instead.'
if mixup_cfg is not None:
warnings.warn('The mixup attribute will be deprecated. '
'Please use augments instead.')
cfg = copy.deepcopy(mixup_cfg)
cfg['type'] = 'BatchMixup'
# In the previous version, mixup_prob is always 1.0.
cfg['prob'] = 1.0
self.augments = Augments(cfg)
if cutmix_cfg is not None:
warnings.warn('The cutmix attribute will be deprecated. '
'Please use augments instead.')
cfg = copy.deepcopy(cutmix_cfg)
cutmix_prob = cfg.pop('cutmix_prob')
cfg['type'] = 'BatchCutMix'
cfg['prob'] = cutmix_prob
self.augments = Augments(cfg)
def get_logit(self, img):
x = self.extract_feat(img)
if isinstance(x, tuple):
x = x[-1]
return self.head.fc(x)
def extract_feat(self, img):
"""Directly extract features from the backbone + neck."""
x = self.backbone(img)
if self.return_tuple:
if not isinstance(x, tuple):
x = (x, )
warnings.simplefilter('once')
warnings.warn(
'We will force all backbones to return a tuple in the '
'future. Please check your backbone and wrap the output '
'as a tuple.', DeprecationWarning)
else:
if isinstance(x, tuple):
x = x[-1]
if self.with_neck:
x = self.neck(x)
return x
def forward_train(self, img, gt_label, **kwargs):
"""Forward computation during training.
Args:
img (Tensor): of shape (N, C, H, W) encoding input images.
Typically these should be mean centered and std scaled.
gt_label (Tensor): It should be of shape (N, 1) encoding the
ground-truth label of input images for single label task. It
shoulf be of shape (N, C) encoding the ground-truth label
of input images for multi-labels task.
Returns:
dict[str, Tensor]: a dictionary of loss components
"""
if self.augments is not None:
img, gt_label = self.augments(img, gt_label)
x = self.extract_feat(img)
losses = dict()
try:
loss = self.head.forward_train(x, gt_label)
except TypeError as e:
if 'not tuple' in str(e) and self.return_tuple:
return TypeError(
'Seems the head cannot handle tuple input. We have '
'changed all backbones\' output to a tuple. Please '
'update your custom head\'s forward function. '
'Temporarily, you can set "return_tuple=False" in '
'your backbone config to disable this feature.')
raise e
losses.update(loss)
return losses
def simple_test(self, img, img_metas):
"""Test without augmentation."""
x = self.extract_feat(img)
try:
res = self.head.simple_test(x)
except TypeError as e:
if 'not tuple' in str(e) and self.return_tuple:
return TypeError(
'Seems the head cannot handle tuple input. We have '
'changed all backbones\' output to a tuple. Please '
'update your custom head\'s forward function. '
'Temporarily, you can set "return_tuple=False" in '
'your backbone config to disable this feature.')
raise e
return res
| 5,829 | 37.355263 | 77 |
py
|
KnowledgeFactor
|
KnowledgeFactor-main/cls/mmcls/models/classifiers/kf.py
|
import copy
import numpy as np
import torch
import torch.nn.functional as F
import warnings
from shutil import ExecError
from torch import nn
from mmcls.models.losses.kd_loss import (InfoMax_loss, InfoMin_loss)
from ..builder import (CLASSIFIERS, build_backbone, build_head, build_loss,
build_neck)
from ..utils.augment import Augments
from .base import BaseClassifier
@CLASSIFIERS.register_module()
class KFImageClassifier(BaseClassifier):
def __init__(self,
backbone,
kd_loss,
neck=None,
head=None,
pretrained=None,
train_cfg=None,
init_cfg=None):
super(KFImageClassifier, self).__init__(init_cfg)
if pretrained is not None:
warnings.warn('DeprecationWarning: pretrained is a deprecated \
key, please consider using init_cfg')
self.init_cfg = dict(type='Pretrained', checkpoint=pretrained)
assert 'student' in backbone.keys(), 'student network should be specified'
assert 'teacher' in backbone.keys(), 'teacher network should be specified'
return_tuple = backbone.pop('return_tuple', True)
self.num_task = backbone['num_task']
self.student = nn.ModuleDict(
{
'CKN': build_backbone(backbone['student']['CKN']),
'TSN': nn.ModuleList([build_backbone(backbone['student']['TSN']) for i in range(self.num_task)]),
'neck': build_neck(neck['student']),
'head_task': build_head(head['task']),
'head': build_head(head['student'])
}
)
self.teacher = nn.ModuleDict(
{
'backbone': build_backbone(backbone['teacher']),
'neck': build_neck(neck['teacher']),
'head': build_head(head['teacher'])
}
)
self.feat_channels_student = train_cfg['feat_channels']['student']
self.feat_channels_teacher = train_cfg['feat_channels']['teacher']
feat_fcs = []
for i in range(len(self.feat_channels_student)):
feat_fcs.append(nn.Sequential(
nn.Linear(
self.feat_channels_teacher[i], self.feat_channels_student[i]),
nn.BatchNorm1d(self.feat_channels_student[i]),
nn.ReLU(True),
nn.Linear(
self.feat_channels_student[i], self.feat_channels_student[i])
)
)
self.feat_fcs = nn.ModuleList(feat_fcs)
self.criterionCls = F.cross_entropy
self.criterionTask = F.binary_cross_entropy_with_logits
self.criterionKD = build_loss(kd_loss)
self.lambda_kd = train_cfg['lambda_kd']
self.alpha = train_cfg['alpha']
self.beta = train_cfg['beta']
self.lambda_feat = train_cfg['lambda_feat']
self.teacher_ckpt = train_cfg['teacher_checkpoint']
self.task_weight = train_cfg['task_weight']
if return_tuple is False:
warnings.warn(
'The `return_tuple` is a temporary arg, we will force to '
'return tuple in the future. Please handle tuple in your '
'custom neck or head.', DeprecationWarning)
self.return_tuple = return_tuple
self.load_teacher()
self.augments = None
if train_cfg is not None:
augments_cfg = train_cfg.get('augments', None)
if augments_cfg is not None:
self.augments = Augments(augments_cfg)
else:
# Considering BC-breaking
mixup_cfg = train_cfg.get('mixup', None)
cutmix_cfg = train_cfg.get('cutmix', None)
assert mixup_cfg is None or cutmix_cfg is None, \
'If mixup and cutmix are set simultaneously,' \
'use augments instead.'
if mixup_cfg is not None:
warnings.warn('The mixup attribute will be deprecated. '
'Please use augments instead.')
cfg = copy.deepcopy(mixup_cfg)
cfg['type'] = 'BatchMixup'
# In the previous version, mixup_prob is always 1.0.
cfg['prob'] = 1.0
self.augments = Augments(cfg)
if cutmix_cfg is not None:
warnings.warn('The cutmix attribute will be deprecated. '
'Please use augments instead.')
cfg = copy.deepcopy(cutmix_cfg)
cutmix_prob = cfg.pop('cutmix_prob')
cfg['type'] = 'BatchCutMix'
cfg['prob'] = cutmix_prob
self.augments = Augments(cfg)
def extract_feat(self, imgs):
pass
def load_teacher(self):
split_lins = '*' * 20
state_dict = torch.load(self.teacher_ckpt)
if 'state_dict' in state_dict.keys():
state_dict = state_dict['state_dict']
try:
self.teacher.load_state_dict(state_dict)
print(split_lins)
print(
f'Teacher pretrained model has been loaded {self.teacher_ckpt}')
print(split_lins)
except:
print('Teacher model not loaded')
print(state_dict.keys())
print(self.teacher.state_dict().keys())
AssertionError('Teacher model not loaded')
exit()
for param in self.teacher.parameters():
param.requires_grad = False
#####################################################
# Functions for teacher network
def extract_teacher_feat(self, img):
"""Directly extract features from the backbone + neck."""
x = self.teacher['backbone'](img)
if self.return_tuple:
if not isinstance(x, tuple):
x = (x, )
warnings.simplefilter('once')
warnings.warn(
'We will force all backbones to return a tuple in the '
'future. Please check your backbone and wrap the output '
'as a tuple.', DeprecationWarning)
else:
if isinstance(x, tuple):
x = x[-1]
# if self.with_neck:
x = self.teacher['neck'](x)
return x
def get_teacher_logit(self, img):
"""Directly extract features from the backbone + neck."""
x = self.extract_teacher_feat(img)
if isinstance(x, tuple):
last_x = x[-1]
logit = self.teacher['head'].fc(last_x) # head
return logit, x
#####################################################
# Functions for student network
def extract_common_feat(self, img):
"""Directly extract features from the backbone + neck."""
x = self.student['CKN'](img)
if self.return_tuple:
if not isinstance(x, tuple):
x = (x, )
warnings.simplefilter('once')
warnings.warn(
'We will force all backbones to return a tuple in the '
'future. Please check your backbone and wrap the output '
'as a tuple.', DeprecationWarning)
else:
if isinstance(x, tuple):
x = x[-1]
x = self.student['neck'](x)
return x
def extract_task_feat(self, img):
"""Directly extract features from the backbone + neck."""
result = dict(feats=[],
mu_vars=[])
for i in range(self.num_task):
(mu, var), x = self.student['TSN'][i](img)
if self.return_tuple:
if not isinstance(x, tuple):
x = (x, )
else:
if isinstance(x, tuple):
x = x[-1]
result['feats'].append(x)
result['mu_vars'].append((mu, var))
return result
def extract_student_feat(self, img):
common_xs = self.extract_common_feat(img)
task_result = self.extract_task_feat(img)
if self.num_task == 1:
return common_xs, task_result['feats'][0], task_result
else:
return common_xs, task_result['feats'], task_result
def get_student_logit(self, img):
"""Directly extract features from the backbone + neck."""
common_xs, task_feat, task_result = self.extract_student_feat(
img)
if isinstance(common_xs, tuple):
common_x = common_xs[-1]
if isinstance(task_feat, tuple):
task_feat = task_feat[-1]
if isinstance(task_feat, list):
feat = [common_x + task_f[-1] for task_f in task_feat]
else:
feat = common_x + task_feat
logit = self.student['head'].get_logits(feat) # head
task_logit = self.student['head_task'].get_logits(task_feat)
return logit, task_logit, common_xs, task_result
def get_logit(self, img):
logit, _, _, _ = self.get_student_logit(img)
return logit
def get_adv_logit(self, img):
_, task_logit, _, _ = self.get_student_logit(
img)
return task_logit
def forward_train(self, img, gt_label, **kwargs):
"""Forward computation during training.
Args:
img (Tensor): of shape (N, C, H, W) encoding input images.
Typically these should be mean centered and std scaled.
gt_label (Tensor): It should be of shape (N, 1) encoding the
ground-truth label of input images for single label task. It
shoulf be of shape (N, C) encoding the ground-truth label
of input images for multi-labels task.
Returns:
dict[str, Tensor]: a dictionary of loss components
"""
if self.augments is not None:
img, gt_label = self.augments(img, gt_label)
with torch.no_grad():
teacher_logit, teacher_x = self.get_teacher_logit(img)
student_logit, task_logit, student_common_x, task_result = self.get_student_logit(
img)
loss_infomax = 0.0
# Deep feature simulation for KD
assert len(teacher_x) == len(student_common_x)
for layer_id, (teacher_x_layer, student_x_layer) in enumerate(zip(teacher_x, student_common_x)):
loss_infomax += InfoMax_loss(self.feat_fcs[layer_id](teacher_x_layer),
student_x_layer) * self.lambda_feat
loss_infomax = loss_infomax/len(student_common_x)
# Output simulation for KD
loss_kd = self.criterionKD(
student_logit, teacher_logit.detach()) * self.lambda_kd
# Cls loss and infor loss
loss_cls = self.student['head'].loss(student_logit, gt_label)['loss']
# onehot_gt_label = F.one_hot(gt_label,
# num_classes=student_logit.shape[1]).float()
loss_task = self.student['head_task'].loss(task_logit, gt_label)['loss'] * self.task_weight
# InfoMin Loss for task feature
loss_infomin = 0.0
for mu, log_var in task_result['mu_vars']:
loss_infomin += InfoMin_loss(mu, log_var) * self.beta
losses = dict(loss_infomax=loss_infomax,
loss_kd=loss_kd,
loss_cls=loss_cls,
loss_task=loss_task,
loss_infomin=loss_infomin)
# print(losses)
return losses
def simple_test(self, img, img_metas):
"""Test without augmentation."""
cls_score = self.get_logit(img)
try:
if isinstance(cls_score, list):
cls_score = sum(cls_score) / float(len(cls_score))
pred = F.softmax(
cls_score, dim=1) if cls_score is not None else None
res = self.student['head'].post_process(pred)
except TypeError as e:
if 'not tuple' in str(e) and self.return_tuple:
return TypeError(
'Seems the head cannot handle tuple input. We have '
'changed all backbones\' output to a tuple. Please '
'update your custom head\'s forward function. '
'Temporarily, you can set "return_tuple=False" in '
'your backbone config to disable this feature.')
raise e
return res
| 12,546 | 38.332288 | 113 |
py
|
KnowledgeFactor
|
KnowledgeFactor-main/cls/mmcls/models/classifiers/kd.py
|
import copy
import warnings
from shutil import ExecError
import torch
import torch.nn.functional as F
from torch import nn
from ..builder import (CLASSIFIERS, build_backbone, build_head, build_loss,
build_neck)
from ..utils.augment import Augments
from .base import BaseClassifier
@CLASSIFIERS.register_module()
class KDImageClassifier(BaseClassifier):
def __init__(self,
backbone,
kd_loss,
neck=None,
head=None,
pretrained=None,
train_cfg=None,
init_cfg=None):
super(KDImageClassifier, self).__init__(init_cfg)
if pretrained is not None:
warnings.warn('DeprecationWarning: pretrained is a deprecated \
key, please consider using init_cfg')
self.init_cfg = dict(type='Pretrained', checkpoint=pretrained)
assert 'student' in backbone.keys(), 'student network should be specified'
assert 'teacher' in backbone.keys(), 'teacher network should be specified'
return_tuple = backbone.pop('return_tuple', True)
self.student = nn.ModuleDict(
{
'backbone': build_backbone(backbone['student']),
'neck': build_neck(neck['student']),
'head': build_head(head['student'])
}
)
self.teacher = nn.ModuleDict(
{
'backbone': build_backbone(backbone['teacher']),
'neck': build_neck(neck['teacher']),
'head': build_head(head['teacher'])
}
)
self.criterionCls = F.cross_entropy
self.criterionKD = build_loss(kd_loss)
self.lambda_kd = train_cfg['lambda_kd']
self.teacher_ckpt = train_cfg['teacher_checkpoint']
if return_tuple is False:
warnings.warn(
'The `return_tuple` is a temporary arg, we will force to '
'return tuple in the future. Please handle tuple in your '
'custom neck or head.', DeprecationWarning)
self.return_tuple = return_tuple
self.load_teacher()
self.augments = None
if train_cfg is not None:
augments_cfg = train_cfg.get('augments', None)
if augments_cfg is not None:
self.augments = Augments(augments_cfg)
else:
# Considering BC-breaking
mixup_cfg = train_cfg.get('mixup', None)
cutmix_cfg = train_cfg.get('cutmix', None)
assert mixup_cfg is None or cutmix_cfg is None, \
'If mixup and cutmix are set simultaneously,' \
'use augments instead.'
if mixup_cfg is not None:
warnings.warn('The mixup attribute will be deprecated. '
'Please use augments instead.')
cfg = copy.deepcopy(mixup_cfg)
cfg['type'] = 'BatchMixup'
# In the previous version, mixup_prob is always 1.0.
cfg['prob'] = 1.0
self.augments = Augments(cfg)
if cutmix_cfg is not None:
warnings.warn('The cutmix attribute will be deprecated. '
'Please use augments instead.')
cfg = copy.deepcopy(cutmix_cfg)
cutmix_prob = cfg.pop('cutmix_prob')
cfg['type'] = 'BatchCutMix'
cfg['prob'] = cutmix_prob
self.augments = Augments(cfg)
def load_teacher(self):
try:
self.teacher.load_state_dict(
torch.load(self.teacher_ckpt)['state_dict'])
print(
f'Teacher pretrained model has been loaded {self.teacher_ckpt}')
except:
ExecError('Teacher model not loaded')
for param in self.teacher.parameters():
param.requires_grad = False
###########################
def get_logit(self, img):
"""Directly extract features from the backbone + neck."""
x = self.extract_feat(self.student, img)
if isinstance(x, tuple):
x = x[-1]
logit = self.student['head'].fc(x) # head
return logit
def get_logits(self, model, img):
"""Directly extract features from the backbone + neck."""
x = self.extract_feat(model, img)
if isinstance(x, tuple):
x = x[-1]
logit = model['head'].fc(x) # head
return logit
def extract_feat(self, model, img):
"""Directly extract features from the backbone + neck."""
x = model['backbone'](img)
if self.return_tuple:
if not isinstance(x, tuple):
x = (x, )
warnings.simplefilter('once')
warnings.warn(
'We will force all backbones to return a tuple in the '
'future. Please check your backbone and wrap the output '
'as a tuple.', DeprecationWarning)
else:
if isinstance(x, tuple):
x = x[-1]
# if self.with_neck:
x = model['neck'](x)
return x
def forward_train(self, img, gt_label, **kwargs):
"""Forward computation during training.
Args:
img (Tensor): of shape (N, C, H, W) encoding input images.
Typically these should be mean centered and std scaled.
gt_label (Tensor): It should be of shape (N, 1) encoding the
ground-truth label of input images for single label task. It
shoulf be of shape (N, C) encoding the ground-truth label
of input images for multi-labels task.
Returns:
dict[str, Tensor]: a dictionary of loss components
"""
if self.augments is not None:
img, gt_label = self.augments(img, gt_label)
with torch.no_grad():
teacher_logit = self.get_logits(self.teacher, img)
student_logit = self.get_logits(self.student, img)
loss_cls = self.criterionCls(student_logit, gt_label)
loss_kd = self.criterionKD(
student_logit, teacher_logit.detach()) * self.lambda_kd
losses = dict(loss_cls=loss_cls,
loss_kd=loss_kd)
return losses
def simple_test(self, img, img_metas):
"""Test without augmentation."""
x = self.extract_feat(self.student, img)
try:
res = self.student['head'].simple_test(x)
except TypeError as e:
if 'not tuple' in str(e) and self.return_tuple:
return TypeError(
'Seems the head cannot handle tuple input. We have '
'changed all backbones\' output to a tuple. Please '
'update your custom head\'s forward function. '
'Temporarily, you can set "return_tuple=False" in '
'your backbone config to disable this feature.')
raise e
return res
| 7,148 | 37.643243 | 82 |
py
|
KnowledgeFactor
|
KnowledgeFactor-main/cls/mmcls/models/classifiers/__init__.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from .base import BaseClassifier
from .image import ImageClassifier
from .kd import KDImageClassifier
from .kf import KFImageClassifier
| 187 | 19.888889 | 47 |
py
|
KnowledgeFactor
|
KnowledgeFactor-main/cls/mmcls/models/utils/embed.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
from mmcv.cnn import build_conv_layer, build_norm_layer
from mmcv.runner.base_module import BaseModule
from .helpers import to_2tuple
class PatchEmbed(BaseModule):
"""Image to Patch Embedding.
We use a conv layer to implement PatchEmbed.
Args:
img_size (int | tuple): The size of input image. Default: 224
in_channels (int): The num of input channels. Default: 3
embed_dims (int): The dimensions of embedding. Default: 768
norm_cfg (dict, optional): Config dict for normalization layer.
Default: None
conv_cfg (dict, optional): The config dict for conv layers.
Default: None
init_cfg (`mmcv.ConfigDict`, optional): The Config for initialization.
Default: None
"""
def __init__(self,
img_size=224,
in_channels=3,
embed_dims=768,
norm_cfg=None,
conv_cfg=None,
init_cfg=None):
super(PatchEmbed, self).__init__(init_cfg)
if isinstance(img_size, int):
img_size = to_2tuple(img_size)
elif isinstance(img_size, tuple):
if len(img_size) == 1:
img_size = to_2tuple(img_size[0])
assert len(img_size) == 2, \
f'The size of image should have length 1 or 2, ' \
f'but got {len(img_size)}'
self.img_size = img_size
self.embed_dims = embed_dims
# Use conv layer to embed
conv_cfg = conv_cfg or dict()
_conv_cfg = dict(
type='Conv2d', kernel_size=16, stride=16, padding=0, dilation=1)
_conv_cfg.update(conv_cfg)
self.projection = build_conv_layer(_conv_cfg, in_channels, embed_dims)
# Calculate how many patches a input image is splited to.
h_out, w_out = [(self.img_size[i] + 2 * self.projection.padding[i] -
self.projection.dilation[i] *
(self.projection.kernel_size[i] - 1) - 1) //
self.projection.stride[i] + 1 for i in range(2)]
self.patches_resolution = (h_out, w_out)
self.num_patches = h_out * w_out
if norm_cfg is not None:
self.norm = build_norm_layer(norm_cfg, embed_dims)[1]
else:
self.norm = None
def forward(self, x):
B, C, H, W = x.shape
assert H == self.img_size[0] and W == self.img_size[1], \
f"Input image size ({H}*{W}) doesn't " \
f'match model ({self.img_size[0]}*{self.img_size[1]}).'
# The output size is (B, N, D), where N=H*W/P/P, D is embid_dim
x = self.projection(x).flatten(2).transpose(1, 2)
if self.norm is not None:
x = self.norm(x)
return x
# Modified from pytorch-image-models
class HybridEmbed(BaseModule):
"""CNN Feature Map Embedding.
Extract feature map from CNN, flatten,
project to embedding dim.
Args:
backbone (nn.Module): CNN backbone
img_size (int | tuple): The size of input image. Default: 224
feature_size (int | tuple, optional): Size of feature map extracted by
CNN backbone. Default: None
in_channels (int): The num of input channels. Default: 3
embed_dims (int): The dimensions of embedding. Default: 768
conv_cfg (dict, optional): The config dict for conv layers.
Default: None.
init_cfg (`mmcv.ConfigDict`, optional): The Config for initialization.
Default: None.
"""
def __init__(self,
backbone,
img_size=224,
feature_size=None,
in_channels=3,
embed_dims=768,
conv_cfg=None,
init_cfg=None):
super(HybridEmbed, self).__init__(init_cfg)
assert isinstance(backbone, nn.Module)
if isinstance(img_size, int):
img_size = to_2tuple(img_size)
elif isinstance(img_size, tuple):
if len(img_size) == 1:
img_size = to_2tuple(img_size[0])
assert len(img_size) == 2, \
f'The size of image should have length 1 or 2, ' \
f'but got {len(img_size)}'
self.img_size = img_size
self.backbone = backbone
if feature_size is None:
with torch.no_grad():
# FIXME this is hacky, but most reliable way of
# determining the exact dim of the output feature
# map for all networks, the feature metadata has
# reliable channel and stride info, but using
# stride to calc feature dim requires info about padding of
# each stage that isn't captured.
training = backbone.training
if training:
backbone.eval()
o = self.backbone(
torch.zeros(1, in_channels, img_size[0], img_size[1]))
if isinstance(o, (list, tuple)):
# last feature if backbone outputs list/tuple of features
o = o[-1]
feature_size = o.shape[-2:]
feature_dim = o.shape[1]
backbone.train(training)
else:
feature_size = to_2tuple(feature_size)
if hasattr(self.backbone, 'feature_info'):
feature_dim = self.backbone.feature_info.channels()[-1]
else:
feature_dim = self.backbone.num_features
self.num_patches = feature_size[0] * feature_size[1]
# Use conv layer to embed
conv_cfg = conv_cfg or dict()
_conv_cfg = dict(
type='Conv2d', kernel_size=1, stride=1, padding=0, dilation=1)
_conv_cfg.update(conv_cfg)
self.projection = build_conv_layer(_conv_cfg, feature_dim, embed_dims)
def forward(self, x):
x = self.backbone(x)
if isinstance(x, (list, tuple)):
# last feature if backbone outputs list/tuple of features
x = x[-1]
x = self.projection(x).flatten(2).transpose(1, 2)
return x
class PatchMerging(BaseModule):
"""Merge patch feature map.
This layer use nn.Unfold to group feature map by kernel_size, and use norm
and linear layer to embed grouped feature map.
Args:
input_resolution (tuple): The size of input patch resolution.
in_channels (int): The num of input channels.
expansion_ratio (Number): Expansion ratio of output channels. The num
of output channels is equal to int(expansion_ratio * in_channels).
kernel_size (int | tuple, optional): the kernel size in the unfold
layer. Defaults to 2.
stride (int | tuple, optional): the stride of the sliding blocks in the
unfold layer. Defaults to be equal with kernel_size.
padding (int | tuple, optional): zero padding width in the unfold
layer. Defaults to 0.
dilation (int | tuple, optional): dilation parameter in the unfold
layer. Defaults to 1.
bias (bool, optional): Whether to add bias in linear layer or not.
Defaults to False.
norm_cfg (dict, optional): Config dict for normalization layer.
Defaults to dict(type='LN').
init_cfg (dict, optional): The extra config for initialization.
Defaults to None.
"""
def __init__(self,
input_resolution,
in_channels,
expansion_ratio,
kernel_size=2,
stride=None,
padding=0,
dilation=1,
bias=False,
norm_cfg=dict(type='LN'),
init_cfg=None):
super().__init__(init_cfg)
H, W = input_resolution
self.input_resolution = input_resolution
self.in_channels = in_channels
self.out_channels = int(expansion_ratio * in_channels)
if stride is None:
stride = kernel_size
kernel_size = to_2tuple(kernel_size)
stride = to_2tuple(stride)
padding = to_2tuple(padding)
dilation = to_2tuple(dilation)
self.sampler = nn.Unfold(kernel_size, dilation, padding, stride)
sample_dim = kernel_size[0] * kernel_size[1] * in_channels
if norm_cfg is not None:
self.norm = build_norm_layer(norm_cfg, sample_dim)[1]
else:
self.norm = None
self.reduction = nn.Linear(sample_dim, self.out_channels, bias=bias)
# See https://pytorch.org/docs/stable/generated/torch.nn.Conv2d.html
H_out = (H + 2 * padding[0] - dilation[0] *
(kernel_size[0] - 1) - 1) // stride[0] + 1
W_out = (W + 2 * padding[1] - dilation[1] *
(kernel_size[1] - 1) - 1) // stride[1] + 1
self.output_resolution = (H_out, W_out)
def forward(self, x):
"""
x: B, H*W, C
"""
H, W = self.input_resolution
B, L, C = x.shape
assert L == H * W, 'input feature has wrong size'
x = x.view(B, H, W, C).permute([0, 3, 1, 2]) # B, C, H, W
# Use nn.Unfold to merge patch. About 25% faster than original method,
# but need to modify pretrained model for compatibility
x = self.sampler(x) # B, 4*C, H/2*W/2
x = x.transpose(1, 2) # B, H/2*W/2, 4*C
x = self.norm(x) if self.norm else x
x = self.reduction(x)
return x
| 9,624 | 36.893701 | 79 |
py
|
KnowledgeFactor
|
KnowledgeFactor-main/cls/mmcls/models/utils/se_layer.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch.nn as nn
from mmcv.cnn import ConvModule
from mmcv.runner import BaseModule
from .make_divisible import make_divisible
class SELayer(BaseModule):
"""Squeeze-and-Excitation Module.
Args:
channels (int): The input (and output) channels of the SE layer.
squeeze_channels (None or int): The intermediate channel number of
SElayer. Default: None, means the value of ``squeeze_channels``
is ``make_divisible(channels // ratio, divisor)``.
ratio (int): Squeeze ratio in SELayer, the intermediate channel will
be ``make_divisible(channels // ratio, divisor)``. Only used when
``squeeze_channels`` is None. Default: 16.
divisor(int): The divisor to true divide the channel number. Only
used when ``squeeze_channels`` is None. Default: 8.
conv_cfg (None or dict): Config dict for convolution layer. Default:
None, which means using conv2d.
act_cfg (dict or Sequence[dict]): Config dict for activation layer.
If act_cfg is a dict, two activation layers will be configurated
by this dict. If act_cfg is a sequence of dicts, the first
activation layer will be configurated by the first dict and the
second activation layer will be configurated by the second dict.
Default: (dict(type='ReLU'), dict(type='Sigmoid'))
"""
def __init__(self,
channels,
squeeze_channels=None,
ratio=16,
divisor=8,
bias='auto',
conv_cfg=None,
act_cfg=(dict(type='ReLU'), dict(type='Sigmoid')),
init_cfg=None):
super(SELayer, self).__init__(init_cfg)
if isinstance(act_cfg, dict):
act_cfg = (act_cfg, act_cfg)
assert len(act_cfg) == 2
assert mmcv.is_tuple_of(act_cfg, dict)
self.global_avgpool = nn.AdaptiveAvgPool2d(1)
if squeeze_channels is None:
squeeze_channels = make_divisible(channels // ratio, divisor)
assert isinstance(squeeze_channels, int) and squeeze_channels > 0, \
'"squeeze_channels" should be a positive integer, but get ' + \
f'{squeeze_channels} instead.'
self.conv1 = ConvModule(
in_channels=channels,
out_channels=squeeze_channels,
kernel_size=1,
stride=1,
bias=bias,
conv_cfg=conv_cfg,
act_cfg=act_cfg[0])
self.conv2 = ConvModule(
in_channels=squeeze_channels,
out_channels=channels,
kernel_size=1,
stride=1,
bias=bias,
conv_cfg=conv_cfg,
act_cfg=act_cfg[1])
def forward(self, x):
out = self.global_avgpool(x)
out = self.conv1(out)
out = self.conv2(out)
return x * out
| 2,989 | 38.866667 | 77 |
py
|
KnowledgeFactor
|
KnowledgeFactor-main/cls/mmcls/models/utils/make_divisible.py
|
# Copyright (c) OpenMMLab. All rights reserved.
def make_divisible(value, divisor, min_value=None, min_ratio=0.9):
"""Make divisible function.
This function rounds the channel number down to the nearest value that can
be divisible by the divisor.
Args:
value (int): The original channel number.
divisor (int): The divisor to fully divide the channel number.
min_value (int, optional): The minimum value of the output channel.
Default: None, means that the minimum value equal to the divisor.
min_ratio (float): The minimum ratio of the rounded channel
number to the original channel number. Default: 0.9.
Returns:
int: The modified output channel number
"""
if min_value is None:
min_value = divisor
new_value = max(min_value, int(value + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than (1-min_ratio).
if new_value < min_ratio * value:
new_value += divisor
return new_value
| 1,046 | 39.269231 | 78 |
py
|
KnowledgeFactor
|
KnowledgeFactor-main/cls/mmcls/models/utils/inverted_residual.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch.utils.checkpoint as cp
from mmcv.cnn import ConvModule
from mmcv.runner import BaseModule
from .se_layer import SELayer
# class InvertedResidual(nn.Module):
class InvertedResidual(BaseModule):
"""Inverted Residual Block.
Args:
in_channels (int): The input channels of this Module.
out_channels (int): The output channels of this Module.
mid_channels (int): The input channels of the depthwise convolution.
kernel_size (int): The kernal size of the depthwise convolution.
Default: 3.
stride (int): The stride of the depthwise convolution. Default: 1.
se_cfg (dict): Config dict for se layer. Defaul: None, which means no
se layer.
conv_cfg (dict): Config dict for convolution layer. Default: None,
which means using conv2d.
norm_cfg (dict): Config dict for normalization layer.
Default: dict(type='BN').
act_cfg (dict): Config dict for activation layer.
Default: dict(type='ReLU').
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed. Default: False.
Returns:
Tensor: The output tensor.
"""
def __init__(self,
in_channels,
out_channels,
mid_channels,
kernel_size=3,
stride=1,
se_cfg=None,
conv_cfg=None,
norm_cfg=dict(type='BN'),
act_cfg=dict(type='ReLU'),
with_cp=False,
init_cfg=None):
super(InvertedResidual, self).__init__(init_cfg)
self.with_res_shortcut = (stride == 1 and in_channels == out_channels)
assert stride in [1, 2]
self.with_cp = with_cp
self.with_se = se_cfg is not None
self.with_expand_conv = (mid_channels != in_channels)
if self.with_se:
assert isinstance(se_cfg, dict)
if self.with_expand_conv:
self.expand_conv = ConvModule(
in_channels=in_channels,
out_channels=mid_channels,
kernel_size=1,
stride=1,
padding=0,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
self.depthwise_conv = ConvModule(
in_channels=mid_channels,
out_channels=mid_channels,
kernel_size=kernel_size,
stride=stride,
padding=kernel_size // 2,
groups=mid_channels,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
if self.with_se:
self.se = SELayer(**se_cfg)
self.linear_conv = ConvModule(
in_channels=mid_channels,
out_channels=out_channels,
kernel_size=1,
stride=1,
padding=0,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=None)
def forward(self, x):
def _inner_forward(x):
out = x
if self.with_expand_conv:
out = self.expand_conv(out)
out = self.depthwise_conv(out)
if self.with_se:
out = self.se(out)
out = self.linear_conv(out)
if self.with_res_shortcut:
return x + out
else:
return out
if self.with_cp and x.requires_grad:
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
return out
| 3,688 | 31.078261 | 78 |
py
|
KnowledgeFactor
|
KnowledgeFactor-main/cls/mmcls/models/utils/__init__.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from .attention import ShiftWindowMSA
from .augment.augments import Augments
from .channel_shuffle import channel_shuffle
from .embed import HybridEmbed, PatchEmbed, PatchMerging
from .helpers import is_tracing, to_2tuple, to_3tuple, to_4tuple, to_ntuple
from .inverted_residual import InvertedResidual
from .make_divisible import make_divisible
from .se_layer import SELayer
__all__ = [
'channel_shuffle', 'make_divisible', 'InvertedResidual', 'SELayer',
'to_ntuple', 'to_2tuple', 'to_3tuple', 'to_4tuple', 'PatchEmbed',
'PatchMerging', 'HybridEmbed', 'Augments', 'ShiftWindowMSA', 'is_tracing'
]
| 659 | 40.25 | 77 |
py
|
KnowledgeFactor
|
KnowledgeFactor-main/cls/mmcls/models/utils/attention.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn.bricks.transformer import build_dropout
from mmcv.cnn.utils.weight_init import trunc_normal_
from mmcv.runner.base_module import BaseModule
from ..builder import ATTENTION
from .helpers import to_2tuple
class WindowMSA(BaseModule):
"""Window based multi-head self-attention (W-MSA) module with relative
position bias.
Args:
embed_dims (int): Number of input channels.
window_size (tuple[int]): The height and width of the window.
num_heads (int): Number of attention heads.
qkv_bias (bool, optional): If True, add a learnable bias to q, k, v.
Defaults to True.
qk_scale (float | None, optional): Override default qk scale of
head_dim ** -0.5 if set. Defaults to None.
attn_drop (float, optional): Dropout ratio of attention weight.
Defaults to 0.
proj_drop (float, optional): Dropout ratio of output. Defaults to 0.
init_cfg (dict, optional): The extra config for initialization.
Defaults to None.
"""
def __init__(self,
embed_dims,
window_size,
num_heads,
qkv_bias=True,
qk_scale=None,
attn_drop=0.,
proj_drop=0.,
init_cfg=None):
super().__init__(init_cfg)
self.embed_dims = embed_dims
self.window_size = window_size # Wh, Ww
self.num_heads = num_heads
head_embed_dims = embed_dims // num_heads
self.scale = qk_scale or head_embed_dims**-0.5
# define a parameter table of relative position bias
self.relative_position_bias_table = nn.Parameter(
torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1),
num_heads)) # 2*Wh-1 * 2*Ww-1, nH
# About 2x faster than original impl
Wh, Ww = self.window_size
rel_index_coords = self.double_step_seq(2 * Ww - 1, Wh, 1, Ww)
rel_position_index = rel_index_coords + rel_index_coords.T
rel_position_index = rel_position_index.flip(1).contiguous()
self.register_buffer('relative_position_index', rel_position_index)
self.qkv = nn.Linear(embed_dims, embed_dims * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(embed_dims, embed_dims)
self.proj_drop = nn.Dropout(proj_drop)
self.softmax = nn.Softmax(dim=-1)
def init_weights(self):
super(WindowMSA, self).init_weights()
trunc_normal_(self.relative_position_bias_table, std=0.02)
def forward(self, x, mask=None):
"""
Args:
x (tensor): input features with shape of (num_windows*B, N, C)
mask (tensor, Optional): mask with shape of (num_windows, Wh*Ww,
Wh*Ww), value should be between (-inf, 0].
"""
B_, N, C = x.shape
qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads,
C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[
2] # make torchscript happy (cannot use tensor as tuple)
q = q * self.scale
attn = (q @ k.transpose(-2, -1))
relative_position_bias = self.relative_position_bias_table[
self.relative_position_index.view(-1)].view(
self.window_size[0] * self.window_size[1],
self.window_size[0] * self.window_size[1],
-1) # Wh*Ww,Wh*Ww,nH
relative_position_bias = relative_position_bias.permute(
2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
attn = attn + relative_position_bias.unsqueeze(0)
if mask is not None:
nW = mask.shape[0]
attn = attn.view(B_ // nW, nW, self.num_heads, N,
N) + mask.unsqueeze(1).unsqueeze(0)
attn = attn.view(-1, self.num_heads, N, N)
attn = self.softmax(attn)
else:
attn = self.softmax(attn)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B_, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
@staticmethod
def double_step_seq(step1, len1, step2, len2):
seq1 = torch.arange(0, step1 * len1, step1)
seq2 = torch.arange(0, step2 * len2, step2)
return (seq1[:, None] + seq2[None, :]).reshape(1, -1)
@ATTENTION.register_module()
class ShiftWindowMSA(BaseModule):
"""Shift Window Multihead Self-Attention Module.
Args:
embed_dims (int): Number of input channels.
input_resolution (Tuple[int, int]): The resolution of the input feature
map.
num_heads (int): Number of attention heads.
window_size (int): The height and width of the window.
shift_size (int, optional): The shift step of each window towards
right-bottom. If zero, act as regular window-msa. Defaults to 0.
qkv_bias (bool, optional): If True, add a learnable bias to q, k, v.
Default: True
qk_scale (float | None, optional): Override default qk scale of
head_dim ** -0.5 if set. Defaults to None.
attn_drop (float, optional): Dropout ratio of attention weight.
Defaults to 0.0.
proj_drop (float, optional): Dropout ratio of output. Defaults to 0.
dropout_layer (dict, optional): The dropout_layer used before output.
Defaults to dict(type='DropPath', drop_prob=0.).
auto_pad (bool, optional): Auto pad the feature map to be divisible by
window_size, Defaults to False.
init_cfg (dict, optional): The extra config for initialization.
Default: None.
"""
def __init__(self,
embed_dims,
input_resolution,
num_heads,
window_size,
shift_size=0,
qkv_bias=True,
qk_scale=None,
attn_drop=0,
proj_drop=0,
dropout_layer=dict(type='DropPath', drop_prob=0.),
auto_pad=False,
init_cfg=None):
super().__init__(init_cfg)
self.embed_dims = embed_dims
self.input_resolution = input_resolution
self.shift_size = shift_size
self.window_size = window_size
if min(self.input_resolution) <= self.window_size:
# if window size is larger than input resolution, don't partition
self.shift_size = 0
self.window_size = min(self.input_resolution)
self.w_msa = WindowMSA(embed_dims, to_2tuple(self.window_size),
num_heads, qkv_bias, qk_scale, attn_drop,
proj_drop)
self.drop = build_dropout(dropout_layer)
H, W = self.input_resolution
# Handle auto padding
self.auto_pad = auto_pad
if self.auto_pad:
self.pad_r = (self.window_size -
W % self.window_size) % self.window_size
self.pad_b = (self.window_size -
H % self.window_size) % self.window_size
self.H_pad = H + self.pad_b
self.W_pad = W + self.pad_r
else:
H_pad, W_pad = self.input_resolution
assert H_pad % self.window_size + W_pad % self.window_size == 0,\
f'input_resolution({self.input_resolution}) is not divisible '\
f'by window_size({self.window_size}). Please check feature '\
f'map shape or set `auto_pad=True`.'
self.H_pad, self.W_pad = H_pad, W_pad
self.pad_r, self.pad_b = 0, 0
if self.shift_size > 0:
# calculate attention mask for SW-MSA
img_mask = torch.zeros((1, self.H_pad, self.W_pad, 1)) # 1 H W 1
h_slices = (slice(0, -self.window_size),
slice(-self.window_size,
-self.shift_size), slice(-self.shift_size, None))
w_slices = (slice(0, -self.window_size),
slice(-self.window_size,
-self.shift_size), slice(-self.shift_size, None))
cnt = 0
for h in h_slices:
for w in w_slices:
img_mask[:, h, w, :] = cnt
cnt += 1
# nW, window_size, window_size, 1
mask_windows = self.window_partition(img_mask)
mask_windows = mask_windows.view(
-1, self.window_size * self.window_size)
attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
attn_mask = attn_mask.masked_fill(attn_mask != 0,
float(-100.0)).masked_fill(
attn_mask == 0, float(0.0))
else:
attn_mask = None
self.register_buffer('attn_mask', attn_mask)
def forward(self, query):
H, W = self.input_resolution
B, L, C = query.shape
assert L == H * W, 'input feature has wrong size'
query = query.view(B, H, W, C)
if self.pad_r or self.pad_b:
query = F.pad(query, (0, 0, 0, self.pad_r, 0, self.pad_b))
# cyclic shift
if self.shift_size > 0:
shifted_query = torch.roll(
query,
shifts=(-self.shift_size, -self.shift_size),
dims=(1, 2))
else:
shifted_query = query
# nW*B, window_size, window_size, C
query_windows = self.window_partition(shifted_query)
# nW*B, window_size*window_size, C
query_windows = query_windows.view(-1, self.window_size**2, C)
# W-MSA/SW-MSA (nW*B, window_size*window_size, C)
attn_windows = self.w_msa(query_windows, mask=self.attn_mask)
# merge windows
attn_windows = attn_windows.view(-1, self.window_size,
self.window_size, C)
# B H' W' C
shifted_x = self.window_reverse(attn_windows, self.H_pad, self.W_pad)
# reverse cyclic shift
if self.shift_size > 0:
x = torch.roll(
shifted_x,
shifts=(self.shift_size, self.shift_size),
dims=(1, 2))
else:
x = shifted_x
if self.pad_r or self.pad_b:
x = x[:, :H, :W, :].contiguous()
x = x.view(B, H * W, C)
x = self.drop(x)
return x
def window_reverse(self, windows, H, W):
window_size = self.window_size
B = int(windows.shape[0] / (H * W / window_size / window_size))
x = windows.view(B, H // window_size, W // window_size, window_size,
window_size, -1)
x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)
return x
def window_partition(self, x):
B, H, W, C = x.shape
window_size = self.window_size
x = x.view(B, H // window_size, window_size, W // window_size,
window_size, C)
windows = x.permute(0, 1, 3, 2, 4, 5).contiguous()
windows = windows.view(-1, window_size, window_size, C)
return windows
| 11,410 | 38.213058 | 79 |
py
|
KnowledgeFactor
|
KnowledgeFactor-main/cls/mmcls/models/utils/helpers.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import collections.abc
import warnings
from distutils.version import LooseVersion
from itertools import repeat
import torch
def is_tracing() -> bool:
if LooseVersion(torch.__version__) >= LooseVersion('1.6.0'):
on_trace = torch.jit.is_tracing()
# In PyTorch 1.6, torch.jit.is_tracing has a bug.
# Refers to https://github.com/pytorch/pytorch/issues/42448
if isinstance(on_trace, bool):
return on_trace
else:
return torch._C._is_tracing()
else:
warnings.warn(
'torch.jit.is_tracing is only supported after v1.6.0. '
'Therefore is_tracing returns False automatically. Please '
'set on_trace manually if you are using trace.', UserWarning)
return False
# From PyTorch internals
def _ntuple(n):
def parse(x):
if isinstance(x, collections.abc.Iterable):
return x
return tuple(repeat(x, n))
return parse
to_1tuple = _ntuple(1)
to_2tuple = _ntuple(2)
to_3tuple = _ntuple(3)
to_4tuple = _ntuple(4)
to_ntuple = _ntuple
| 1,127 | 25.232558 | 73 |
py
|
KnowledgeFactor
|
KnowledgeFactor-main/cls/mmcls/models/utils/channel_shuffle.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
def channel_shuffle(x, groups):
"""Channel Shuffle operation.
This function enables cross-group information flow for multiple groups
convolution layers.
Args:
x (Tensor): The input tensor.
groups (int): The number of groups to divide the input tensor
in the channel dimension.
Returns:
Tensor: The output tensor after channel shuffle operation.
"""
batch_size, num_channels, height, width = x.size()
assert (num_channels % groups == 0), ('num_channels should be '
'divisible by groups')
channels_per_group = num_channels // groups
x = x.view(batch_size, groups, channels_per_group, height, width)
x = torch.transpose(x, 1, 2).contiguous()
x = x.view(batch_size, -1, height, width)
return x
| 889 | 28.666667 | 74 |
py
|
KnowledgeFactor
|
KnowledgeFactor-main/cls/mmcls/models/utils/augment/identity.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch.nn.functional as F
from .builder import AUGMENT
@AUGMENT.register_module(name='Identity')
class Identity(object):
"""Change gt_label to one_hot encoding and keep img as the same.
Args:
num_classes (int): The number of classes.
prob (float): MixUp probability. It should be in range [0, 1].
Default to 1.0
"""
def __init__(self, num_classes, prob=1.0):
super(Identity, self).__init__()
assert isinstance(num_classes, int)
assert isinstance(prob, float) and 0.0 <= prob <= 1.0
self.num_classes = num_classes
self.prob = prob
def one_hot(self, gt_label):
return F.one_hot(gt_label, num_classes=self.num_classes)
def __call__(self, img, gt_label):
return img, self.one_hot(gt_label)
| 857 | 26.677419 | 70 |
py
|
KnowledgeFactor
|
KnowledgeFactor-main/cls/mmcls/models/utils/augment/cutmix.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from abc import ABCMeta, abstractmethod
import numpy as np
import torch
import torch.nn.functional as F
from .builder import AUGMENT
class BaseCutMixLayer(object, metaclass=ABCMeta):
"""Base class for CutMixLayer.
Args:
alpha (float): Parameters for Beta distribution. Positive(>0)
num_classes (int): The number of classes
prob (float): MixUp probability. It should be in range [0, 1].
Default to 1.0
cutmix_minmax (List[float], optional): cutmix min/max image ratio.
(as percent of image size). When cutmix_minmax is not None, we
generate cutmix bounding-box using cutmix_minmax instead of alpha
correct_lam (bool): Whether to apply lambda correction when cutmix bbox
clipped by image borders. Default to True
"""
def __init__(self,
alpha,
num_classes,
prob=1.0,
cutmix_minmax=None,
correct_lam=True):
super(BaseCutMixLayer, self).__init__()
assert isinstance(alpha, float) and alpha > 0
assert isinstance(num_classes, int)
assert isinstance(prob, float) and 0.0 <= prob <= 1.0
self.alpha = alpha
self.num_classes = num_classes
self.prob = prob
self.cutmix_minmax = cutmix_minmax
self.correct_lam = correct_lam
def rand_bbox_minmax(self, img_shape, count=None):
"""Min-Max CutMix bounding-box Inspired by Darknet cutmix
implementation. It generates a random rectangular bbox based on min/max
percent values applied to each dimension of the input image.
Typical defaults for minmax are usually in the .2-.3 for min and
.8-.9 range for max.
Args:
img_shape (tuple): Image shape as tuple
count (int, optional): Number of bbox to generate. Default to None
"""
assert len(self.cutmix_minmax) == 2
img_h, img_w = img_shape[-2:]
cut_h = np.random.randint(
int(img_h * self.cutmix_minmax[0]),
int(img_h * self.cutmix_minmax[1]),
size=count)
cut_w = np.random.randint(
int(img_w * self.cutmix_minmax[0]),
int(img_w * self.cutmix_minmax[1]),
size=count)
yl = np.random.randint(0, img_h - cut_h, size=count)
xl = np.random.randint(0, img_w - cut_w, size=count)
yu = yl + cut_h
xu = xl + cut_w
return yl, yu, xl, xu
def rand_bbox(self, img_shape, lam, margin=0., count=None):
"""Standard CutMix bounding-box that generates a random square bbox
based on lambda value. This implementation includes support for
enforcing a border margin as percent of bbox dimensions.
Args:
img_shape (tuple): Image shape as tuple
lam (float): Cutmix lambda value
margin (float): Percentage of bbox dimension to enforce as margin
(reduce amount of box outside image). Default to 0.
count (int, optional): Number of bbox to generate. Default to None
"""
ratio = np.sqrt(1 - lam)
img_h, img_w = img_shape[-2:]
cut_h, cut_w = int(img_h * ratio), int(img_w * ratio)
margin_y, margin_x = int(margin * cut_h), int(margin * cut_w)
cy = np.random.randint(0 + margin_y, img_h - margin_y, size=count)
cx = np.random.randint(0 + margin_x, img_w - margin_x, size=count)
yl = np.clip(cy - cut_h // 2, 0, img_h)
yh = np.clip(cy + cut_h // 2, 0, img_h)
xl = np.clip(cx - cut_w // 2, 0, img_w)
xh = np.clip(cx + cut_w // 2, 0, img_w)
return yl, yh, xl, xh
def cutmix_bbox_and_lam(self, img_shape, lam, count=None):
"""Generate bbox and apply lambda correction.
Args:
img_shape (tuple): Image shape as tuple
lam (float): Cutmix lambda value
count (int, optional): Number of bbox to generate. Default to None
"""
if self.cutmix_minmax is not None:
yl, yu, xl, xu = self.rand_bbox_minmax(img_shape, count=count)
else:
yl, yu, xl, xu = self.rand_bbox(img_shape, lam, count=count)
if self.correct_lam or self.cutmix_minmax is not None:
bbox_area = (yu - yl) * (xu - xl)
lam = 1. - bbox_area / float(img_shape[-2] * img_shape[-1])
return (yl, yu, xl, xu), lam
@abstractmethod
def cutmix(self, imgs, gt_label):
pass
@AUGMENT.register_module(name='BatchCutMix')
class BatchCutMixLayer(BaseCutMixLayer):
"""CutMix layer for batch CutMix."""
def __init__(self, *args, **kwargs):
super(BatchCutMixLayer, self).__init__(*args, **kwargs)
def cutmix(self, img, gt_label):
one_hot_gt_label = F.one_hot(gt_label, num_classes=self.num_classes)
lam = np.random.beta(self.alpha, self.alpha)
batch_size = img.size(0)
index = torch.randperm(batch_size)
(bby1, bby2, bbx1,
bbx2), lam = self.cutmix_bbox_and_lam(img.shape, lam)
img[:, :, bby1:bby2, bbx1:bbx2] = \
img[index, :, bby1:bby2, bbx1:bbx2]
mixed_gt_label = lam * one_hot_gt_label + (
1 - lam) * one_hot_gt_label[index, :]
return img, mixed_gt_label
def __call__(self, img, gt_label):
return self.cutmix(img, gt_label)
| 5,453 | 37.680851 | 79 |
py
|
KnowledgeFactor
|
KnowledgeFactor-main/cls/mmcls/models/utils/augment/mixup.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from abc import ABCMeta, abstractmethod
import numpy as np
import torch
import torch.nn.functional as F
from .builder import AUGMENT
class BaseMixupLayer(object, metaclass=ABCMeta):
"""Base class for MixupLayer.
Args:
alpha (float): Parameters for Beta distribution.
num_classes (int): The number of classes.
prob (float): MixUp probability. It should be in range [0, 1].
Default to 1.0
"""
def __init__(self, alpha, num_classes, prob=1.0):
super(BaseMixupLayer, self).__init__()
assert isinstance(alpha, float) and alpha > 0
assert isinstance(num_classes, int)
assert isinstance(prob, float) and 0.0 <= prob <= 1.0
self.alpha = alpha
self.num_classes = num_classes
self.prob = prob
@abstractmethod
def mixup(self, imgs, gt_label):
pass
@AUGMENT.register_module(name='BatchMixup')
class BatchMixupLayer(BaseMixupLayer):
"""Mixup layer for batch mixup."""
def __init__(self, *args, **kwargs):
super(BatchMixupLayer, self).__init__(*args, **kwargs)
def mixup(self, img, gt_label):
one_hot_gt_label = F.one_hot(gt_label, num_classes=self.num_classes)
lam = np.random.beta(self.alpha, self.alpha)
batch_size = img.size(0)
index = torch.randperm(batch_size)
mixed_img = lam * img + (1 - lam) * img[index, :]
mixed_gt_label = lam * one_hot_gt_label + (
1 - lam) * one_hot_gt_label[index, :]
return mixed_img, mixed_gt_label
def __call__(self, img, gt_label):
return self.mixup(img, gt_label)
| 1,674 | 27.87931 | 76 |
py
|
KnowledgeFactor
|
KnowledgeFactor-main/cls/mmcls/models/utils/augment/__init__.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from .augments import Augments
from .cutmix import BatchCutMixLayer
from .identity import Identity
from .mixup import BatchMixupLayer
__all__ = ['Augments', 'BatchCutMixLayer', 'Identity', 'BatchMixupLayer']
| 257 | 31.25 | 73 |
py
|
KnowledgeFactor
|
KnowledgeFactor-main/cls/mmcls/models/utils/augment/builder.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmcv.utils import Registry, build_from_cfg
AUGMENT = Registry('augment')
def build_augment(cfg, default_args=None):
return build_from_cfg(cfg, AUGMENT, default_args)
| 226 | 24.222222 | 53 |
py
|
KnowledgeFactor
|
KnowledgeFactor-main/cls/mmcls/models/utils/augment/augments.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import random
import numpy as np
from .builder import build_augment
class Augments(object):
"""Data augments.
We implement some data augmentation methods, such as mixup, cutmix.
Args:
augments_cfg (list[`mmcv.ConfigDict`] | obj:`mmcv.ConfigDict`):
Config dict of augments
Example:
>>> augments_cfg = [
dict(type='BatchCutMix', alpha=1., num_classes=10, prob=0.5),
dict(type='BatchMixup', alpha=1., num_classes=10, prob=0.3)
]
>>> augments = Augments(augments_cfg)
>>> imgs = torch.randn(16, 3, 32, 32)
>>> label = torch.randint(0, 10, (16, ))
>>> imgs, label = augments(imgs, label)
To decide which augmentation within Augments block is used
the following rule is applied.
We pick augmentation based on the probabilities. In the example above,
we decide if we should use BatchCutMix with probability 0.5,
BatchMixup 0.3. As Identity is not in augments_cfg, we use Identity with
probability 1 - 0.5 - 0.3 = 0.2.
"""
def __init__(self, augments_cfg):
super(Augments, self).__init__()
if isinstance(augments_cfg, dict):
augments_cfg = [augments_cfg]
assert len(augments_cfg) > 0, \
'The length of augments_cfg should be positive.'
self.augments = [build_augment(cfg) for cfg in augments_cfg]
self.augment_probs = [aug.prob for aug in self.augments]
has_identity = any([cfg['type'] == 'Identity' for cfg in augments_cfg])
if has_identity:
assert sum(self.augment_probs) == 1.0,\
'The sum of augmentation probabilities should equal to 1,' \
' but got {:.2f}'.format(sum(self.augment_probs))
else:
assert sum(self.augment_probs) <= 1.0,\
'The sum of augmentation probabilities should less than or ' \
'equal to 1, but got {:.2f}'.format(sum(self.augment_probs))
identity_prob = 1 - sum(self.augment_probs)
if identity_prob > 0:
num_classes = self.augments[0].num_classes
self.augments += [
build_augment(
dict(
type='Identity',
num_classes=num_classes,
prob=identity_prob))
]
self.augment_probs += [identity_prob]
def __call__(self, img, gt_label):
if self.augments:
random_state = np.random.RandomState(random.randint(0, 2**32 - 1))
aug = random_state.choice(self.augments, p=self.augment_probs)
return aug(img, gt_label)
return img, gt_label
| 2,799 | 36.837838 | 79 |
py
|
KnowledgeFactor
|
KnowledgeFactor-main/cls/mmcls/models/losses/label_smooth_loss.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import warnings
import torch
import torch.nn as nn
from ..builder import LOSSES
from .cross_entropy_loss import CrossEntropyLoss
from .utils import convert_to_one_hot
@LOSSES.register_module()
class LabelSmoothLoss(nn.Module):
r"""Intializer for the label smoothed cross entropy loss.
Refers to `Rethinking the Inception Architecture for Computer Vision
<https://arxiv.org/abs/1512.00567>`_
This decreases gap between output scores and encourages generalization.
Labels provided to forward can be one-hot like vectors (NxC) or class
indices (Nx1).
And this accepts linear combination of one-hot like labels from mixup or
cutmix except multi-label task.
Args:
label_smooth_val (float): The degree of label smoothing.
num_classes (int, optional): Number of classes. Defaults to None.
mode (str): Refers to notes, Options are 'original', 'classy_vision',
'multi_label'. Defaults to 'classy_vision'
reduction (str): The method used to reduce the loss.
Options are "none", "mean" and "sum". Defaults to 'mean'.
loss_weight (float): Weight of the loss. Defaults to 1.0.
Notes:
if the mode is "original", this will use the same label smooth method
as the original paper as:
.. math::
(1-\epsilon)\delta_{k, y} + \frac{\epsilon}{K}
where epsilon is the `label_smooth_val`, K is the num_classes and
delta(k,y) is Dirac delta, which equals 1 for k=y and 0 otherwise.
if the mode is "classy_vision", this will use the same label smooth
method as the facebookresearch/ClassyVision repo as:
.. math::
\frac{\delta_{k, y} + \epsilon/K}{1+\epsilon}
if the mode is "multi_label", this will accept labels from multi-label
task and smoothing them as:
.. math::
(1-2\epsilon)\delta_{k, y} + \epsilon
"""
def __init__(self,
label_smooth_val,
num_classes=None,
mode=None,
reduction='mean',
loss_weight=1.0):
super().__init__()
self.num_classes = num_classes
self.loss_weight = loss_weight
assert (isinstance(label_smooth_val, float)
and 0 <= label_smooth_val < 1), \
f'LabelSmoothLoss accepts a float label_smooth_val ' \
f'over [0, 1), but gets {label_smooth_val}'
self.label_smooth_val = label_smooth_val
accept_reduction = {'none', 'mean', 'sum'}
assert reduction in accept_reduction, \
f'LabelSmoothLoss supports reduction {accept_reduction}, ' \
f'but gets {mode}.'
self.reduction = reduction
if mode is None:
warnings.warn(
'LabelSmoothLoss mode is not set, use "classy_vision" '
'by default. The default value will be changed to '
'"original" recently. Please set mode manually if want '
'to keep "classy_vision".', UserWarning)
mode = 'classy_vision'
accept_mode = {'original', 'classy_vision', 'multi_label'}
assert mode in accept_mode, \
f'LabelSmoothLoss supports mode {accept_mode}, but gets {mode}.'
self.mode = mode
self._eps = label_smooth_val
if mode == 'classy_vision':
self._eps = label_smooth_val / (1 + label_smooth_val)
if mode == 'multi_label':
self.ce = CrossEntropyLoss(use_sigmoid=True)
self.smooth_label = self.multilabel_smooth_label
else:
self.ce = CrossEntropyLoss(use_soft=True)
self.smooth_label = self.original_smooth_label
def generate_one_hot_like_label(self, label):
"""This function takes one-hot or index label vectors and computes one-
hot like label vectors (float)"""
# check if targets are inputted as class integers
if label.dim() == 1 or (label.dim() == 2 and label.shape[1] == 1):
label = convert_to_one_hot(label.view(-1, 1), self.num_classes)
return label.float()
def original_smooth_label(self, one_hot_like_label):
assert self.num_classes > 0
smooth_label = one_hot_like_label * (1 - self._eps)
smooth_label += self._eps / self.num_classes
return smooth_label
def multilabel_smooth_label(self, one_hot_like_label):
assert self.num_classes > 0
smooth_label = torch.full_like(one_hot_like_label, self._eps)
smooth_label.masked_fill_(one_hot_like_label > 0, 1 - self._eps)
return smooth_label
def forward(self,
cls_score,
label,
weight=None,
avg_factor=None,
reduction_override=None,
**kwargs):
r"""Label smooth loss.
Args:
pred (torch.Tensor): The prediction with shape (N, \*).
label (torch.Tensor): The ground truth label of the prediction
with shape (N, \*).
weight (torch.Tensor, optional): Sample-wise loss weight with shape
(N, \*). Dafaults to None.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction_override (str, optional): The method used to reduce the
loss into a scalar. Options are "none", "mean" and "sum".
Defaults to None.
Returns:
torch.Tensor: Loss.
"""
if self.num_classes is not None:
assert self.num_classes == cls_score.shape[1], \
f'num_classes should equal to cls_score.shape[1], ' \
f'but got num_classes: {self.num_classes} and ' \
f'cls_score.shape[1]: {cls_score.shape[1]}'
else:
self.num_classes = cls_score.shape[1]
one_hot_like_label = self.generate_one_hot_like_label(label=label)
assert one_hot_like_label.shape == cls_score.shape, \
f'LabelSmoothLoss requires output and target ' \
f'to be same shape, but got output.shape: {cls_score.shape} ' \
f'and target.shape: {one_hot_like_label.shape}'
smoothed_label = self.smooth_label(one_hot_like_label)
return self.ce.forward(
cls_score,
smoothed_label,
weight=weight,
avg_factor=avg_factor,
reduction_override=reduction_override,
**kwargs)
| 6,591 | 38.238095 | 79 |
py
|
KnowledgeFactor
|
KnowledgeFactor-main/cls/mmcls/models/losses/asymmetric_loss.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
from ..builder import LOSSES
from .utils import weight_reduce_loss
def asymmetric_loss(pred,
target,
weight=None,
gamma_pos=1.0,
gamma_neg=4.0,
clip=0.05,
reduction='mean',
avg_factor=None):
r"""asymmetric loss.
Please refer to the `paper <https://arxiv.org/abs/2009.14119>`__ for
details.
Args:
pred (torch.Tensor): The prediction with shape (N, \*).
target (torch.Tensor): The ground truth label of the prediction with
shape (N, \*).
weight (torch.Tensor, optional): Sample-wise loss weight with shape
(N, ). Dafaults to None.
gamma_pos (float): positive focusing parameter. Defaults to 0.0.
gamma_neg (float): Negative focusing parameter. We usually set
gamma_neg > gamma_pos. Defaults to 4.0.
clip (float, optional): Probability margin. Defaults to 0.05.
reduction (str): The method used to reduce the loss.
Options are "none", "mean" and "sum". If reduction is 'none' , loss
is same shape as pred and label. Defaults to 'mean'.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
Returns:
torch.Tensor: Loss.
"""
assert pred.shape == \
target.shape, 'pred and target should be in the same shape.'
eps = 1e-8
pred_sigmoid = pred.sigmoid()
target = target.type_as(pred)
if clip and clip > 0:
pt = (1 - pred_sigmoid +
clip).clamp(max=1) * (1 - target) + pred_sigmoid * target
else:
pt = (1 - pred_sigmoid) * (1 - target) + pred_sigmoid * target
asymmetric_weight = (1 - pt).pow(gamma_pos * target + gamma_neg *
(1 - target))
loss = -torch.log(pt.clamp(min=eps)) * asymmetric_weight
if weight is not None:
assert weight.dim() == 1
weight = weight.float()
if pred.dim() > 1:
weight = weight.reshape(-1, 1)
loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
return loss
@LOSSES.register_module()
class AsymmetricLoss(nn.Module):
"""asymmetric loss.
Args:
gamma_pos (float): positive focusing parameter.
Defaults to 0.0.
gamma_neg (float): Negative focusing parameter. We
usually set gamma_neg > gamma_pos. Defaults to 4.0.
clip (float, optional): Probability margin. Defaults to 0.05.
reduction (str): The method used to reduce the loss into
a scalar.
loss_weight (float): Weight of loss. Defaults to 1.0.
"""
def __init__(self,
gamma_pos=0.0,
gamma_neg=4.0,
clip=0.05,
reduction='mean',
loss_weight=1.0):
super(AsymmetricLoss, self).__init__()
self.gamma_pos = gamma_pos
self.gamma_neg = gamma_neg
self.clip = clip
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self,
pred,
target,
weight=None,
avg_factor=None,
reduction_override=None):
"""asymmetric loss."""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
loss_cls = self.loss_weight * asymmetric_loss(
pred,
target,
weight,
gamma_pos=self.gamma_pos,
gamma_neg=self.gamma_neg,
clip=self.clip,
reduction=reduction,
avg_factor=avg_factor)
return loss_cls
| 3,887 | 33.40708 | 79 |
py
|
KnowledgeFactor
|
KnowledgeFactor-main/cls/mmcls/models/losses/utils.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import functools
import torch
import torch.nn.functional as F
def reduce_loss(loss, reduction):
"""Reduce loss as specified.
Args:
loss (Tensor): Elementwise loss tensor.
reduction (str): Options are "none", "mean" and "sum".
Return:
Tensor: Reduced loss tensor.
"""
reduction_enum = F._Reduction.get_enum(reduction)
# none: 0, elementwise_mean:1, sum: 2
if reduction_enum == 0:
return loss
elif reduction_enum == 1:
return loss.mean()
elif reduction_enum == 2:
return loss.sum()
def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None):
"""Apply element-wise weight and reduce loss.
Args:
loss (Tensor): Element-wise loss.
weight (Tensor): Element-wise weights.
reduction (str): Same as built-in losses of PyTorch.
avg_factor (float): Avarage factor when computing the mean of losses.
Returns:
Tensor: Processed loss values.
"""
# if weight is specified, apply element-wise weight
if weight is not None:
loss = loss * weight
# if avg_factor is not specified, just reduce the loss
if avg_factor is None:
loss = reduce_loss(loss, reduction)
else:
# if reduction is mean, then average the loss by avg_factor
if reduction == 'mean':
loss = loss.sum() / avg_factor
# if reduction is 'none', then do nothing, otherwise raise an error
elif reduction != 'none':
raise ValueError('avg_factor can not be used with reduction="sum"')
return loss
def weighted_loss(loss_func):
"""Create a weighted version of a given loss function.
To use this decorator, the loss function must have the signature like
``loss_func(pred, target, **kwargs)``. The function only needs to compute
element-wise loss without any reduction. This decorator will add weight
and reduction arguments to the function. The decorated function will have
the signature like ``loss_func(pred, target, weight=None, reduction='mean',
avg_factor=None, **kwargs)``.
:Example:
>>> import torch
>>> @weighted_loss
>>> def l1_loss(pred, target):
>>> return (pred - target).abs()
>>> pred = torch.Tensor([0, 2, 3])
>>> target = torch.Tensor([1, 1, 1])
>>> weight = torch.Tensor([1, 0, 1])
>>> l1_loss(pred, target)
tensor(1.3333)
>>> l1_loss(pred, target, weight)
tensor(1.)
>>> l1_loss(pred, target, reduction='none')
tensor([1., 1., 2.])
>>> l1_loss(pred, target, weight, avg_factor=2)
tensor(1.5000)
"""
@functools.wraps(loss_func)
def wrapper(pred,
target,
weight=None,
reduction='mean',
avg_factor=None,
**kwargs):
# get element-wise loss
loss = loss_func(pred, target, **kwargs)
loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
return loss
return wrapper
def convert_to_one_hot(targets: torch.Tensor, classes) -> torch.Tensor:
"""This function converts target class indices to one-hot vectors, given
the number of classes.
Args:
targets (Tensor): The ground truth label of the prediction
with shape (N, 1)
classes (int): the number of classes.
Returns:
Tensor: Processed loss values.
"""
assert (torch.max(targets).item() <
classes), 'Class Index must be less than number of classes'
one_hot_targets = torch.zeros((targets.shape[0], classes),
dtype=torch.long,
device=targets.device)
one_hot_targets.scatter_(1, targets.long(), 1)
return one_hot_targets
| 3,827 | 30.377049 | 79 |
py
|
KnowledgeFactor
|
KnowledgeFactor-main/cls/mmcls/models/losses/accuracy.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from numbers import Number
import numpy as np
import torch
import torch.nn as nn
def accuracy_numpy(pred, target, topk=1, thrs=0.):
if isinstance(thrs, Number):
thrs = (thrs, )
res_single = True
elif isinstance(thrs, tuple):
res_single = False
else:
raise TypeError(
f'thrs should be a number or tuple, but got {type(thrs)}.')
res = []
maxk = max(topk)
num = pred.shape[0]
pred_label = pred.argsort(axis=1)[:, -maxk:][:, ::-1]
pred_score = np.sort(pred, axis=1)[:, -maxk:][:, ::-1]
for k in topk:
correct_k = pred_label[:, :k] == target.reshape(-1, 1)
res_thr = []
for thr in thrs:
# Only prediction values larger than thr are counted as correct
_correct_k = correct_k & (pred_score[:, :k] > thr)
_correct_k = np.logical_or.reduce(_correct_k, axis=1)
res_thr.append(_correct_k.sum() * 100. / num)
if res_single:
res.append(res_thr[0])
else:
res.append(res_thr)
return res
def accuracy_torch(pred, target, topk=1, thrs=0.):
if isinstance(thrs, Number):
thrs = (thrs, )
res_single = True
elif isinstance(thrs, tuple):
res_single = False
else:
raise TypeError(
f'thrs should be a number or tuple, but got {type(thrs)}.')
res = []
maxk = max(topk)
num = pred.size(0)
pred_score, pred_label = pred.topk(maxk, dim=1)
pred_label = pred_label.t()
correct = pred_label.eq(target.view(1, -1).expand_as(pred_label))
for k in topk:
res_thr = []
for thr in thrs:
# Only prediction values larger than thr are counted as correct
_correct = correct & (pred_score.t() > thr)
correct_k = _correct[:k].reshape(-1).float().sum(0, keepdim=True)
res_thr.append(correct_k.mul_(100. / num))
if res_single:
res.append(res_thr[0])
else:
res.append(res_thr)
return res
def accuracy(pred, target, topk=1, thrs=0.):
"""Calculate accuracy according to the prediction and target.
Args:
pred (torch.Tensor | np.array): The model prediction.
target (torch.Tensor | np.array): The target of each prediction
topk (int | tuple[int]): If the predictions in ``topk``
matches the target, the predictions will be regarded as
correct ones. Defaults to 1.
thrs (Number | tuple[Number], optional): Predictions with scores under
the thresholds are considered negative. Default to 0.
Returns:
float | list[float] | list[list[float]]: Accuracy
- float: If both ``topk`` and ``thrs`` is a single value.
- list[float]: If one of ``topk`` or ``thrs`` is a tuple.
- list[list[float]]: If both ``topk`` and ``thrs`` is a tuple. \
And the first dim is ``topk``, the second dim is ``thrs``.
"""
assert isinstance(topk, (int, tuple))
if isinstance(topk, int):
topk = (topk, )
return_single = True
else:
return_single = False
if isinstance(pred, torch.Tensor) and isinstance(target, torch.Tensor):
res = accuracy_torch(pred, target, topk, thrs)
elif isinstance(pred, np.ndarray) and isinstance(target, np.ndarray):
res = accuracy_numpy(pred, target, topk, thrs)
else:
raise TypeError(
f'pred and target should both be torch.Tensor or np.ndarray, '
f'but got {type(pred)} and {type(target)}.')
return res[0] if return_single else res
class Accuracy(nn.Module):
def __init__(self, topk=(1, )):
"""Module to calculate the accuracy.
Args:
topk (tuple): The criterion used to calculate the
accuracy. Defaults to (1,).
"""
super().__init__()
self.topk = topk
def forward(self, pred, target):
"""Forward function to calculate accuracy.
Args:
pred (torch.Tensor): Prediction of models.
target (torch.Tensor): Target for each prediction.
Returns:
list[float]: The accuracies under different topk criterions.
"""
return accuracy(pred, target, self.topk)
| 4,342 | 32.152672 | 78 |
py
|
KnowledgeFactor
|
KnowledgeFactor-main/cls/mmcls/models/losses/focal_loss.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
import torch.nn.functional as F
from ..builder import LOSSES
from .utils import weight_reduce_loss
def sigmoid_focal_loss(pred,
target,
weight=None,
gamma=2.0,
alpha=0.25,
reduction='mean',
avg_factor=None):
r"""Sigmoid focal loss.
Args:
pred (torch.Tensor): The prediction with shape (N, \*).
target (torch.Tensor): The ground truth label of the prediction with
shape (N, \*).
weight (torch.Tensor, optional): Sample-wise loss weight with shape
(N, ). Dafaults to None.
gamma (float): The gamma for calculating the modulating factor.
Defaults to 2.0.
alpha (float): A balanced form for Focal Loss. Defaults to 0.25.
reduction (str): The method used to reduce the loss.
Options are "none", "mean" and "sum". If reduction is 'none' ,
loss is same shape as pred and label. Defaults to 'mean'.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
Returns:
torch.Tensor: Loss.
"""
assert pred.shape == \
target.shape, 'pred and target should be in the same shape.'
pred_sigmoid = pred.sigmoid()
target = target.type_as(pred)
pt = (1 - pred_sigmoid) * target + pred_sigmoid * (1 - target)
focal_weight = (alpha * target + (1 - alpha) *
(1 - target)) * pt.pow(gamma)
loss = F.binary_cross_entropy_with_logits(
pred, target, reduction='none') * focal_weight
if weight is not None:
assert weight.dim() == 1
weight = weight.float()
if pred.dim() > 1:
weight = weight.reshape(-1, 1)
loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
return loss
@LOSSES.register_module()
class FocalLoss(nn.Module):
"""Focal loss.
Args:
gamma (float): Focusing parameter in focal loss.
Defaults to 2.0.
alpha (float): The parameter in balanced form of focal
loss. Defaults to 0.25.
reduction (str): The method used to reduce the loss into
a scalar. Options are "none" and "mean". Defaults to 'mean'.
loss_weight (float): Weight of loss. Defaults to 1.0.
"""
def __init__(self,
gamma=2.0,
alpha=0.25,
reduction='mean',
loss_weight=1.0):
super(FocalLoss, self).__init__()
self.gamma = gamma
self.alpha = alpha
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self,
pred,
target,
weight=None,
avg_factor=None,
reduction_override=None):
r"""Sigmoid focal loss.
Args:
pred (torch.Tensor): The prediction with shape (N, \*).
target (torch.Tensor): The ground truth label of the prediction
with shape (N, \*).
weight (torch.Tensor, optional): Sample-wise loss weight with shape
(N, \*). Dafaults to None.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction_override (str, optional): The method used to reduce the
loss into a scalar. Options are "none", "mean" and "sum".
Defaults to None.
Returns:
torch.Tensor: Loss.
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
loss_cls = self.loss_weight * sigmoid_focal_loss(
pred,
target,
weight,
gamma=self.gamma,
alpha=self.alpha,
reduction=reduction,
avg_factor=avg_factor)
return loss_cls
| 4,089 | 34.565217 | 79 |
py
|
KnowledgeFactor
|
KnowledgeFactor-main/cls/mmcls/models/losses/cross_entropy_loss.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
import torch.nn.functional as F
from ..builder import LOSSES
from .utils import weight_reduce_loss
def cross_entropy(pred,
label,
weight=None,
reduction='mean',
avg_factor=None,
class_weight=None):
"""Calculate the CrossEntropy loss.
Args:
pred (torch.Tensor): The prediction with shape (N, C), C is the number
of classes.
label (torch.Tensor): The gt label of the prediction.
weight (torch.Tensor, optional): Sample-wise loss weight.
reduction (str): The method used to reduce the loss.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
class_weight (torch.Tensor, optional): The weight for each class with
shape (C), C is the number of classes. Default None.
Returns:
torch.Tensor: The calculated loss
"""
# element-wise losses
loss = F.cross_entropy(pred, label, weight=class_weight, reduction='none')
# apply weights and do the reduction
if weight is not None:
weight = weight.float()
loss = weight_reduce_loss(
loss, weight=weight, reduction=reduction, avg_factor=avg_factor)
return loss
def soft_cross_entropy(pred,
label,
weight=None,
reduction='mean',
class_weight=None,
avg_factor=None):
"""Calculate the Soft CrossEntropy loss. The label can be float.
Args:
pred (torch.Tensor): The prediction with shape (N, C), C is the number
of classes.
label (torch.Tensor): The gt label of the prediction with shape (N, C).
When using "mixup", the label can be float.
weight (torch.Tensor, optional): Sample-wise loss weight.
reduction (str): The method used to reduce the loss.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
class_weight (torch.Tensor, optional): The weight for each class with
shape (C), C is the number of classes. Default None.
Returns:
torch.Tensor: The calculated loss
"""
# element-wise losses
loss = -label * F.log_softmax(pred, dim=-1)
if class_weight is not None:
loss *= class_weight
loss = loss.sum(dim=-1)
# apply weights and do the reduction
if weight is not None:
weight = weight.float()
loss = weight_reduce_loss(
loss, weight=weight, reduction=reduction, avg_factor=avg_factor)
return loss
def binary_cross_entropy(pred,
label,
weight=None,
reduction='mean',
avg_factor=None,
class_weight=None):
r"""Calculate the binary CrossEntropy loss with logits.
Args:
pred (torch.Tensor): The prediction with shape (N, \*).
label (torch.Tensor): The gt label with shape (N, \*).
weight (torch.Tensor, optional): Element-wise weight of loss with shape
(N, ). Defaults to None.
reduction (str): The method used to reduce the loss.
Options are "none", "mean" and "sum". If reduction is 'none' , loss
is same shape as pred and label. Defaults to 'mean'.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
class_weight (torch.Tensor, optional): The weight for each class with
shape (C), C is the number of classes. Default None.
Returns:
torch.Tensor: The calculated loss
"""
assert pred.dim() == label.dim()
# Ensure that the size of class_weight is consistent with pred and label to
# avoid automatic boracast,
if class_weight is not None:
N = pred.size()[0]
class_weight = class_weight.repeat(N, 1)
loss = F.binary_cross_entropy_with_logits(
pred, label, weight=class_weight, reduction='none')
# apply weights and do the reduction
if weight is not None:
assert weight.dim() == 1
weight = weight.float()
if pred.dim() > 1:
weight = weight.reshape(-1, 1)
loss = weight_reduce_loss(
loss, weight=weight, reduction=reduction, avg_factor=avg_factor)
return loss
@LOSSES.register_module()
class CrossEntropyLoss(nn.Module):
"""Cross entropy loss.
Args:
use_sigmoid (bool): Whether the prediction uses sigmoid
of softmax. Defaults to False.
use_soft (bool): Whether to use the soft version of CrossEntropyLoss.
Defaults to False.
reduction (str): The method used to reduce the loss.
Options are "none", "mean" and "sum". Defaults to 'mean'.
loss_weight (float): Weight of the loss. Defaults to 1.0.
class_weight (List[float], optional): The weight for each class with
shape (C), C is the number of classes. Default None.
"""
def __init__(self,
use_sigmoid=False,
use_soft=False,
reduction='mean',
loss_weight=1.0,
class_weight=None):
super(CrossEntropyLoss, self).__init__()
self.use_sigmoid = use_sigmoid
self.use_soft = use_soft
assert not (
self.use_soft and self.use_sigmoid
), 'use_sigmoid and use_soft could not be set simultaneously'
self.reduction = reduction
self.loss_weight = loss_weight
self.class_weight = class_weight
if self.use_sigmoid:
self.cls_criterion = binary_cross_entropy
elif self.use_soft:
self.cls_criterion = soft_cross_entropy
else:
self.cls_criterion = cross_entropy
def forward(self,
cls_score,
label,
weight=None,
avg_factor=None,
reduction_override=None,
**kwargs):
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
if self.class_weight is not None:
class_weight = cls_score.new_tensor(self.class_weight)
else:
class_weight = None
loss_cls = self.loss_weight * self.cls_criterion(
cls_score,
label,
weight,
class_weight=class_weight,
reduction=reduction,
avg_factor=avg_factor,
**kwargs)
return loss_cls
| 6,753 | 34.547368 | 79 |
py
|
KnowledgeFactor
|
KnowledgeFactor-main/cls/mmcls/models/losses/__init__.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from .accuracy import Accuracy, accuracy
from .asymmetric_loss import AsymmetricLoss, asymmetric_loss
from .cross_entropy_loss import (CrossEntropyLoss, binary_cross_entropy,
cross_entropy)
from .focal_loss import FocalLoss, sigmoid_focal_loss
from .label_smooth_loss import LabelSmoothLoss
from .utils import (convert_to_one_hot, reduce_loss, weight_reduce_loss,
weighted_loss)
from .kd_loss import Logits, SoftTarget
__all__ = [
'accuracy', 'Accuracy', 'asymmetric_loss', 'AsymmetricLoss',
'cross_entropy', 'binary_cross_entropy', 'CrossEntropyLoss', 'reduce_loss',
'weight_reduce_loss', 'LabelSmoothLoss', 'weighted_loss', 'FocalLoss',
'sigmoid_focal_loss', 'convert_to_one_hot', 'Logits', 'SoftTarget'
]
| 825 | 47.588235 | 79 |
py
|
KnowledgeFactor
|
KnowledgeFactor-main/cls/mmcls/models/losses/kd_loss.py
|
import re
from numpy import inf
import torch
import torch.nn as nn
import torch.nn.functional as F
from ..builder import LOSSES
@LOSSES.register_module()
class Logits(nn.Module):
'''
Do Deep Nets Really Need to be Deep?
http://papers.nips.cc/paper/5484-do-deep-nets-really-need-to-be-deep.pdf
'''
def __init__(self):
super(Logits, self).__init__()
def forward(self, out_s, out_t):
loss = F.mse_loss(out_s, out_t)
return loss
@LOSSES.register_module()
class SoftTarget(nn.Module):
'''
Distilling the Knowledge in a Neural Network
https://arxiv.org/pdf/1503.02531.pdf
'''
def __init__(self, temperature):
super(SoftTarget, self).__init__()
self.T = temperature
def forward(self, out_s, out_t):
loss = F.kl_div(F.log_softmax(out_s/self.T, dim=1),
F.softmax(out_t/self.T, dim=1),
reduction='batchmean') * self.T * self.T
return loss
def InfoMin_loss(mu, log_var):
shape = mu.shape
if len(shape) == 2:
return torch.mean(-0.5 * torch.sum(1 + log_var - mu ** 2 - log_var.exp(), dim=1), dim=0)
elif len(shape) == 1:
# print(torch.mean(1 + log_var - mu ** 2 - log_var.exp()))
return -0.5 * torch.mean(1 + log_var - mu ** 2 - log_var.exp())
def InfoMax_loss(x1, x2):
x1 = x1 / (torch.norm(x1, p=2, dim=1, keepdim=True) + 1e-10)
x2 = x2 / (torch.norm(x2, p=2, dim=1, keepdim=True) + 1e-10)
bs = x1.size(0)
s = torch.matmul(x1, x2.permute(1, 0))
mask_joint = torch.eye(bs).cuda()
mask_marginal = 1 - mask_joint
Ej = (s * mask_joint).mean()
Em = torch.exp(s * mask_marginal).mean()
# decoupled comtrastive learning?!!!!
# infomax_loss = - (Ej - torch.log(Em)) * self.alpha
infomax_loss = - (Ej - torch.log(Em)) #/ Em
return infomax_loss
| 1,876 | 26.602941 | 96 |
py
|
KnowledgeFactor
|
KnowledgeFactor-main/cls/mmcls/models/backbones/mobilenet_v2.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
import torch.utils.checkpoint as cp
from mmcv.cnn import ConvModule
from mmcv.runner import BaseModule
from torch.nn.modules.batchnorm import _BatchNorm
from mmcls.models.utils import make_divisible
from ..builder import BACKBONES
from .base_backbone import BaseBackbone
class InvertedResidual(BaseModule):
"""InvertedResidual block for MobileNetV2.
Args:
in_channels (int): The input channels of the InvertedResidual block.
out_channels (int): The output channels of the InvertedResidual block.
stride (int): Stride of the middle (first) 3x3 convolution.
expand_ratio (int): adjusts number of channels of the hidden layer
in InvertedResidual by this amount.
conv_cfg (dict, optional): Config dict for convolution layer.
Default: None, which means using conv2d.
norm_cfg (dict): Config dict for normalization layer.
Default: dict(type='BN').
act_cfg (dict): Config dict for activation layer.
Default: dict(type='ReLU6').
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed. Default: False.
Returns:
Tensor: The output tensor
"""
def __init__(self,
in_channels,
out_channels,
stride,
expand_ratio,
conv_cfg=None,
norm_cfg=dict(type='BN'),
act_cfg=dict(type='ReLU6'),
with_cp=False,
init_cfg=None):
super(InvertedResidual, self).__init__(init_cfg)
self.stride = stride
assert stride in [1, 2], f'stride must in [1, 2]. ' \
f'But received {stride}.'
self.with_cp = with_cp
self.use_res_connect = self.stride == 1 and in_channels == out_channels
hidden_dim = int(round(in_channels * expand_ratio))
layers = []
if expand_ratio != 1:
layers.append(
ConvModule(
in_channels=in_channels,
out_channels=hidden_dim,
kernel_size=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg))
layers.extend([
ConvModule(
in_channels=hidden_dim,
out_channels=hidden_dim,
kernel_size=3,
stride=stride,
padding=1,
groups=hidden_dim,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg),
ConvModule(
in_channels=hidden_dim,
out_channels=out_channels,
kernel_size=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=None)
])
self.conv = nn.Sequential(*layers)
def forward(self, x):
def _inner_forward(x):
if self.use_res_connect:
return x + self.conv(x)
else:
return self.conv(x)
if self.with_cp and x.requires_grad:
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
return out
@BACKBONES.register_module()
class MobileNetV2(BaseBackbone):
"""MobileNetV2 backbone.
Args:
widen_factor (float): Width multiplier, multiply number of
channels in each layer by this amount. Default: 1.0.
out_indices (None or Sequence[int]): Output from which stages.
Default: (7, ).
frozen_stages (int): Stages to be frozen (all param fixed).
Default: -1, which means not freezing any parameters.
conv_cfg (dict, optional): Config dict for convolution layer.
Default: None, which means using conv2d.
norm_cfg (dict): Config dict for normalization layer.
Default: dict(type='BN').
act_cfg (dict): Config dict for activation layer.
Default: dict(type='ReLU6').
norm_eval (bool): Whether to set norm layers to eval mode, namely,
freeze running stats (mean and var). Note: Effect on Batch Norm
and its variants only. Default: False.
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed. Default: False.
"""
# Parameters to build layers. 4 parameters are needed to construct a
# layer, from left to right: expand_ratio, channel, num_blocks, stride.
arch_settings = [[1, 16, 1, 1], [6, 24, 2, 2], [6, 32, 3, 2],
[6, 64, 4, 2], [6, 96, 3, 1], [6, 160, 3, 2],
[6, 320, 1, 1]]
def __init__(self,
widen_factor=1.,
out_indices=(7, ),
frozen_stages=-1,
conv_cfg=None,
norm_cfg=dict(type='BN'),
act_cfg=dict(type='ReLU6'),
norm_eval=False,
with_cp=False,
init_cfg=[
dict(type='Kaiming', layer=['Conv2d']),
dict(
type='Constant',
val=1,
layer=['_BatchNorm', 'GroupNorm'])
]):
super(MobileNetV2, self).__init__(init_cfg)
self.widen_factor = widen_factor
self.out_indices = out_indices
for index in out_indices:
if index not in range(0, 8):
raise ValueError('the item in out_indices must in '
f'range(0, 8). But received {index}')
if frozen_stages not in range(-1, 8):
raise ValueError('frozen_stages must be in range(-1, 8). '
f'But received {frozen_stages}')
self.out_indices = out_indices
self.frozen_stages = frozen_stages
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.act_cfg = act_cfg
self.norm_eval = norm_eval
self.with_cp = with_cp
self.in_channels = make_divisible(32 * widen_factor, 8)
self.conv1 = ConvModule(
in_channels=3,
out_channels=self.in_channels,
kernel_size=3,
stride=2,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
self.layers = []
for i, layer_cfg in enumerate(self.arch_settings):
expand_ratio, channel, num_blocks, stride = layer_cfg
out_channels = make_divisible(channel * widen_factor, 8)
inverted_res_layer = self.make_layer(
out_channels=out_channels,
num_blocks=num_blocks,
stride=stride,
expand_ratio=expand_ratio)
layer_name = f'layer{i + 1}'
self.add_module(layer_name, inverted_res_layer)
self.layers.append(layer_name)
if widen_factor > 1.0:
self.out_channel = int(1280 * widen_factor)
else:
self.out_channel = 1280
layer = ConvModule(
in_channels=self.in_channels,
out_channels=self.out_channel,
kernel_size=1,
stride=1,
padding=0,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
self.add_module('conv2', layer)
self.layers.append('conv2')
def make_layer(self, out_channels, num_blocks, stride, expand_ratio):
"""Stack InvertedResidual blocks to build a layer for MobileNetV2.
Args:
out_channels (int): out_channels of block.
num_blocks (int): number of blocks.
stride (int): stride of the first block. Default: 1
expand_ratio (int): Expand the number of channels of the
hidden layer in InvertedResidual by this ratio. Default: 6.
"""
layers = []
for i in range(num_blocks):
if i >= 1:
stride = 1
layers.append(
InvertedResidual(
self.in_channels,
out_channels,
stride,
expand_ratio=expand_ratio,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg,
with_cp=self.with_cp))
self.in_channels = out_channels
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
outs = []
for i, layer_name in enumerate(self.layers):
layer = getattr(self, layer_name)
x = layer(x)
if i in self.out_indices:
outs.append(x)
return tuple(outs)
def _freeze_stages(self):
if self.frozen_stages >= 0:
for param in self.conv1.parameters():
param.requires_grad = False
for i in range(1, self.frozen_stages + 1):
layer = getattr(self, f'layer{i}')
layer.eval()
for param in layer.parameters():
param.requires_grad = False
def train(self, mode=True):
super(MobileNetV2, self).train(mode)
self._freeze_stages()
if mode and self.norm_eval:
for m in self.modules():
if isinstance(m, _BatchNorm):
m.eval()
| 9,588 | 35.184906 | 79 |
py
|
KnowledgeFactor
|
KnowledgeFactor-main/cls/mmcls/models/backbones/resnet.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
import torch.utils.checkpoint as cp
from mmcv.cnn import (ConvModule, build_conv_layer, build_norm_layer,
constant_init)
from mmcv.utils.parrots_wrapper import _BatchNorm
from ..builder import BACKBONES
from .base_backbone import BaseBackbone
class WideBasicBlock(nn.Module):
"""BasicBlock for ResNet.
Args:
in_channels (int): Input channels of this block.
out_channels (int): Output channels of this block.
expansion (int): The ratio of ``out_channels/mid_channels`` where
``mid_channels`` is the output channels of conv1. This is a
reserved argument in BasicBlock and should always be 1. Default: 1.
stride (int): stride of the block. Default: 1
dilation (int): dilation of convolution. Default: 1
downsample (nn.Module, optional): downsample operation on identity
branch. Default: None.
style (str): `pytorch` or `caffe`. It is unused and reserved for
unified API with Bottleneck.
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed.
conv_cfg (dict, optional): dictionary to construct and config conv
layer. Default: None
norm_cfg (dict): dictionary to construct and config norm layer.
Default: dict(type='BN')
"""
def __init__(self,
in_channels,
out_channels,
expansion=1,
stride=1,
dilation=1,
dropout=0,
downsample=None,
style='pytorch',
with_cp=False,
conv_cfg=None,
norm_cfg=dict(type='SyncBN',
momentum=0.001,
requires_grad=True)):
super(WideBasicBlock, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.expansion = expansion
assert self.expansion == 1
assert out_channels % expansion == 0
self.mid_channels = out_channels // expansion
self.stride = stride
self.dropout = dropout
self.dilation = dilation
self.style = style
self.with_cp = with_cp
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
if self.dropout > 0:
self.drop = nn.Dropout2d(p=self.dropout)
self.norm1_name, norm1 = build_norm_layer(
norm_cfg, self.in_channels, postfix=1)
self.norm2_name, norm2 = build_norm_layer(
norm_cfg, self.mid_channels, postfix=2)
self.conv1 = build_conv_layer(
conv_cfg,
in_channels,
self.mid_channels,
3,
stride=stride,
padding=dilation,
dilation=dilation,
bias=False)
self.add_module(self.norm1_name, norm1)
self.conv2 = build_conv_layer(
conv_cfg,
self.mid_channels,
out_channels,
3,
padding=1,
bias=False)
self.add_module(self.norm2_name, norm2)
self.relu = nn.LeakyReLU(0.1, inplace=True)
self.downsample = downsample
@property
def norm1(self):
return getattr(self, self.norm1_name)
@property
def norm2(self):
return getattr(self, self.norm2_name)
def forward(self, x):
def _inner_forward(x):
identity = x
out = self.norm1(x)
out = self.relu(out)
out = self.conv1(out)
if self.dropout > 0:
out = self.drop(out)
out = self.norm2(out)
out = self.relu(out)
out = self.conv2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
return out
if self.with_cp and x.requires_grad:
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
# out = self.relu(out)
return out
class BasicBlock(nn.Module):
"""BasicBlock for ResNet.
Args:
in_channels (int): Input channels of this block.
out_channels (int): Output channels of this block.
expansion (int): The ratio of ``out_channels/mid_channels`` where
``mid_channels`` is the output channels of conv1. This is a
reserved argument in BasicBlock and should always be 1. Default: 1.
stride (int): stride of the block. Default: 1
dilation (int): dilation of convolution. Default: 1
downsample (nn.Module, optional): downsample operation on identity
branch. Default: None.
style (str): `pytorch` or `caffe`. It is unused and reserved for
unified API with Bottleneck.
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed.
conv_cfg (dict, optional): dictionary to construct and config conv
layer. Default: None
norm_cfg (dict): dictionary to construct and config norm layer.
Default: dict(type='BN')
"""
def __init__(self,
in_channels,
out_channels,
expansion=1,
stride=1,
dilation=1,
downsample=None,
style='pytorch',
with_cp=False,
conv_cfg=None,
norm_cfg=dict(type='BN')):
super(BasicBlock, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.expansion = expansion
assert self.expansion == 1
assert out_channels % expansion == 0
self.mid_channels = out_channels // expansion
self.stride = stride
self.dilation = dilation
self.style = style
self.with_cp = with_cp
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.norm1_name, norm1 = build_norm_layer(
norm_cfg, self.mid_channels, postfix=1)
self.norm2_name, norm2 = build_norm_layer(
norm_cfg, out_channels, postfix=2)
self.conv1 = build_conv_layer(
conv_cfg,
in_channels,
self.mid_channels,
3,
stride=stride,
padding=dilation,
dilation=dilation,
bias=False)
self.add_module(self.norm1_name, norm1)
self.conv2 = build_conv_layer(
conv_cfg,
self.mid_channels,
out_channels,
3,
padding=1,
bias=False)
self.add_module(self.norm2_name, norm2)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
@property
def norm1(self):
return getattr(self, self.norm1_name)
@property
def norm2(self):
return getattr(self, self.norm2_name)
def forward(self, x):
def _inner_forward(x):
identity = x
out = self.conv1(x)
out = self.norm1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.norm2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
return out
if self.with_cp and x.requires_grad:
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
out = self.relu(out)
return out
class Bottleneck(nn.Module):
"""Bottleneck block for ResNet.
Args:
in_channels (int): Input channels of this block.
out_channels (int): Output channels of this block.
expansion (int): The ratio of ``out_channels/mid_channels`` where
``mid_channels`` is the input/output channels of conv2. Default: 4.
stride (int): stride of the block. Default: 1
dilation (int): dilation of convolution. Default: 1
downsample (nn.Module, optional): downsample operation on identity
branch. Default: None.
style (str): ``"pytorch"`` or ``"caffe"``. If set to "pytorch", the
stride-two layer is the 3x3 conv layer, otherwise the stride-two
layer is the first 1x1 conv layer. Default: "pytorch".
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed.
conv_cfg (dict, optional): dictionary to construct and config conv
layer. Default: None
norm_cfg (dict): dictionary to construct and config norm layer.
Default: dict(type='BN')
"""
def __init__(self,
in_channels,
out_channels,
expansion=4,
stride=1,
dilation=1,
downsample=None,
style='pytorch',
with_cp=False,
conv_cfg=None,
norm_cfg=dict(type='BN')):
super(Bottleneck, self).__init__()
assert style in ['pytorch', 'caffe']
self.in_channels = in_channels
self.out_channels = out_channels
self.expansion = expansion
assert out_channels % expansion == 0
self.mid_channels = out_channels // expansion
self.stride = stride
self.dilation = dilation
self.style = style
self.with_cp = with_cp
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
if self.style == 'pytorch':
self.conv1_stride = 1
self.conv2_stride = stride
else:
self.conv1_stride = stride
self.conv2_stride = 1
self.norm1_name, norm1 = build_norm_layer(
norm_cfg, self.mid_channels, postfix=1)
self.norm2_name, norm2 = build_norm_layer(
norm_cfg, self.mid_channels, postfix=2)
self.norm3_name, norm3 = build_norm_layer(
norm_cfg, out_channels, postfix=3)
self.conv1 = build_conv_layer(
conv_cfg,
in_channels,
self.mid_channels,
kernel_size=1,
stride=self.conv1_stride,
bias=False)
self.add_module(self.norm1_name, norm1)
self.conv2 = build_conv_layer(
conv_cfg,
self.mid_channels,
self.mid_channels,
kernel_size=3,
stride=self.conv2_stride,
padding=dilation,
dilation=dilation,
bias=False)
self.add_module(self.norm2_name, norm2)
self.conv3 = build_conv_layer(
conv_cfg,
self.mid_channels,
out_channels,
kernel_size=1,
bias=False)
self.add_module(self.norm3_name, norm3)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
@property
def norm1(self):
return getattr(self, self.norm1_name)
@property
def norm2(self):
return getattr(self, self.norm2_name)
@property
def norm3(self):
return getattr(self, self.norm3_name)
def forward(self, x):
def _inner_forward(x):
identity = x
out = self.conv1(x)
out = self.norm1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.norm2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.norm3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
return out
if self.with_cp and x.requires_grad:
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
out = self.relu(out)
return out
def get_expansion(block, expansion=None):
"""Get the expansion of a residual block.
The block expansion will be obtained by the following order:
1. If ``expansion`` is given, just return it.
2. If ``block`` has the attribute ``expansion``, then return
``block.expansion``.
3. Return the default value according the the block type:
1 for ``BasicBlock`` and 4 for ``Bottleneck``.
Args:
block (class): The block class.
expansion (int | None): The given expansion ratio.
Returns:
int: The expansion of the block.
"""
if isinstance(expansion, int):
assert expansion > 0
elif expansion is None:
if hasattr(block, 'expansion'):
expansion = block.expansion
elif issubclass(block, WideBasicBlock):
expansion = 1
elif issubclass(block, BasicBlock):
expansion = 1
elif issubclass(block, Bottleneck):
expansion = 4
else:
raise TypeError(f'expansion is not specified for {block.__name__}')
else:
raise TypeError('expansion must be an integer or None')
return expansion
class ResLayer(nn.Sequential):
"""ResLayer to build ResNet style backbone.
Args:
block (nn.Module): Residual block used to build ResLayer.
num_blocks (int): Number of blocks.
in_channels (int): Input channels of this block.
out_channels (int): Output channels of this block.
expansion (int, optional): The expansion for BasicBlock/Bottleneck.
If not specified, it will firstly be obtained via
``block.expansion``. If the block has no attribute "expansion",
the following default values will be used: 1 for BasicBlock and
4 for Bottleneck. Default: None.
stride (int): stride of the first block. Default: 1.
avg_down (bool): Use AvgPool instead of stride conv when
downsampling in the bottleneck. Default: False
conv_cfg (dict, optional): dictionary to construct and config conv
layer. Default: None
norm_cfg (dict): dictionary to construct and config norm layer.
Default: dict(type='BN')
"""
def __init__(self,
block,
num_blocks,
in_channels,
out_channels,
expansion=None,
stride=1,
avg_down=False,
conv_cfg=None,
norm_cfg=dict(type='BN'),
**kwargs):
self.block = block
self.expansion = get_expansion(block, expansion)
downsample = None
if stride != 1 or in_channels != out_channels:
downsample = []
conv_stride = stride
if avg_down and stride != 1:
conv_stride = 1
downsample.append(
nn.AvgPool2d(
kernel_size=stride,
stride=stride,
ceil_mode=True,
count_include_pad=False))
downsample.extend([
build_conv_layer(
conv_cfg,
in_channels,
out_channels,
kernel_size=1,
stride=conv_stride,
bias=False),
build_norm_layer(norm_cfg, out_channels)[1]
])
downsample = nn.Sequential(*downsample)
layers = []
layers.append(
block(
in_channels=in_channels,
out_channels=out_channels,
expansion=self.expansion,
stride=stride,
downsample=downsample,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
**kwargs))
in_channels = out_channels
for i in range(1, num_blocks):
layers.append(
block(
in_channels=in_channels,
out_channels=out_channels,
expansion=self.expansion,
stride=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
**kwargs))
super(ResLayer, self).__init__(*layers)
@BACKBONES.register_module()
class ResNet(BaseBackbone):
"""ResNet backbone.
Please refer to the `paper <https://arxiv.org/abs/1512.03385>`__ for
details.
Args:
depth (int): Network depth, from {18, 34, 50, 101, 152}.
in_channels (int): Number of input image channels. Default: 3.
stem_channels (int): Output channels of the stem layer. Default: 64.
base_channels (int): Middle channels of the first stage. Default: 64.
num_stages (int): Stages of the network. Default: 4.
strides (Sequence[int]): Strides of the first block of each stage.
Default: ``(1, 2, 2, 2)``.
dilations (Sequence[int]): Dilation of each stage.
Default: ``(1, 1, 1, 1)``.
out_indices (Sequence[int]): Output from which stages. If only one
stage is specified, a single tensor (feature map) is returned,
otherwise multiple stages are specified, a tuple of tensors will
be returned. Default: ``(3, )``.
style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two
layer is the 3x3 conv layer, otherwise the stride-two layer is
the first 1x1 conv layer.
deep_stem (bool): Replace 7x7 conv in input stem with 3 3x3 conv.
Default: False.
avg_down (bool): Use AvgPool instead of stride conv when
downsampling in the bottleneck. Default: False.
frozen_stages (int): Stages to be frozen (stop grad and set eval mode).
-1 means not freezing any parameters. Default: -1.
conv_cfg (dict | None): The config dict for conv layers. Default: None.
norm_cfg (dict): The config dict for norm layers.
norm_eval (bool): Whether to set norm layers to eval mode, namely,
freeze running stats (mean and var). Note: Effect on Batch Norm
and its variants only. Default: False.
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed. Default: False.
zero_init_residual (bool): Whether to use zero init for last norm layer
in resblocks to let them behave as identity. Default: True.
Example:
>>> from mmcls.models import ResNet
>>> import torch
>>> self = ResNet(depth=18)
>>> self.eval()
>>> inputs = torch.rand(1, 3, 32, 32)
>>> level_outputs = self.forward(inputs)
>>> for level_out in level_outputs:
... print(tuple(level_out.shape))
(1, 64, 8, 8)
(1, 128, 4, 4)
(1, 256, 2, 2)
(1, 512, 1, 1)
"""
arch_settings = {
18: (BasicBlock, (2, 2, 2, 2)),
34: (BasicBlock, (3, 4, 6, 3)),
50: (Bottleneck, (3, 4, 6, 3)),
101: (Bottleneck, (3, 4, 23, 3)),
152: (Bottleneck, (3, 8, 36, 3))
}
def __init__(self,
depth,
in_channels=3,
stem_channels=64,
base_channels=64,
expansion=None,
num_stages=4,
strides=(1, 2, 2, 2),
dilations=(1, 1, 1, 1),
out_indices=(3, ),
style='pytorch',
deep_stem=False,
avg_down=False,
frozen_stages=-1,
conv_cfg=None,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=False,
with_cp=False,
zero_init_residual=True,
init_cfg=[
dict(type='Kaiming', layer=['Conv2d']),
dict(
type='Constant',
val=1,
layer=['_BatchNorm', 'GroupNorm'])
]):
super(ResNet, self).__init__(init_cfg)
if depth not in self.arch_settings:
raise KeyError(f'invalid depth {depth} for resnet')
self.depth = depth
self.stem_channels = stem_channels
self.base_channels = base_channels
self.num_stages = num_stages
assert num_stages >= 1 and num_stages <= 4
self.strides = strides
self.dilations = dilations
assert len(strides) == len(dilations) == num_stages
self.out_indices = out_indices
assert max(out_indices) < num_stages
self.style = style
self.deep_stem = deep_stem
self.avg_down = avg_down
self.frozen_stages = frozen_stages
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.with_cp = with_cp
self.norm_eval = norm_eval
self.zero_init_residual = zero_init_residual
self.block, stage_blocks = self.arch_settings[depth]
self.stage_blocks = stage_blocks[:num_stages]
self.expansion = get_expansion(self.block, expansion)
self._make_stem_layer(in_channels, stem_channels)
self.res_layers = []
_in_channels = stem_channels
_out_channels = base_channels * self.expansion
for i, num_blocks in enumerate(self.stage_blocks):
stride = strides[i]
dilation = dilations[i]
res_layer = self.make_res_layer(
block=self.block,
num_blocks=num_blocks,
in_channels=_in_channels,
out_channels=_out_channels,
expansion=self.expansion,
stride=stride,
dilation=dilation,
style=self.style,
avg_down=self.avg_down,
with_cp=with_cp,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg)
_in_channels = _out_channels
_out_channels *= 2
layer_name = f'layer{i + 1}'
self.add_module(layer_name, res_layer)
self.res_layers.append(layer_name)
self._freeze_stages()
self.feat_dim = res_layer[-1].out_channels
def make_res_layer(self, **kwargs):
return ResLayer(**kwargs)
@property
def norm1(self):
return getattr(self, self.norm1_name)
def _make_stem_layer(self, in_channels, stem_channels):
if self.deep_stem:
self.stem = nn.Sequential(
ConvModule(
in_channels,
stem_channels // 2,
kernel_size=3,
stride=2,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
inplace=True),
ConvModule(
stem_channels // 2,
stem_channels // 2,
kernel_size=3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
inplace=True),
ConvModule(
stem_channels // 2,
stem_channels,
kernel_size=3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
inplace=True))
else:
self.conv1 = build_conv_layer(
self.conv_cfg,
in_channels,
stem_channels,
kernel_size=7,
stride=2,
padding=3,
bias=False)
self.norm1_name, norm1 = build_norm_layer(
self.norm_cfg, stem_channels, postfix=1)
self.add_module(self.norm1_name, norm1)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
def _freeze_stages(self):
if self.frozen_stages >= 0:
if self.deep_stem:
self.stem.eval()
for param in self.stem.parameters():
param.requires_grad = False
else:
self.norm1.eval()
for m in [self.conv1, self.norm1]:
for param in m.parameters():
param.requires_grad = False
for i in range(1, self.frozen_stages + 1):
m = getattr(self, f'layer{i}')
m.eval()
for param in m.parameters():
param.requires_grad = False
def init_weights(self):
super(ResNet, self).init_weights()
if (isinstance(self.init_cfg, dict)
and self.init_cfg['type'] == 'Pretrained'):
# Suppress zero_init_residual if use pretrained model.
return
if self.zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
constant_init(m.norm3, 0)
elif isinstance(m, BasicBlock):
constant_init(m.norm2, 0)
def forward(self, x):
if self.deep_stem:
x = self.stem(x)
else:
x = self.conv1(x)
x = self.norm1(x)
x = self.relu(x)
x = self.maxpool(x)
outs = []
for i, layer_name in enumerate(self.res_layers):
res_layer = getattr(self, layer_name)
x = res_layer(x)
if i in self.out_indices:
outs.append(x)
return tuple(outs)
def train(self, mode=True):
super(ResNet, self).train(mode)
self._freeze_stages()
if mode and self.norm_eval:
for m in self.modules():
# trick: eval have effect on BatchNorm only
if isinstance(m, _BatchNorm):
m.eval()
@BACKBONES.register_module()
class ResNetV1d(ResNet):
"""ResNetV1d backbone.
This variant is described in `Bag of Tricks.
<https://arxiv.org/pdf/1812.01187.pdf>`_.
Compared with default ResNet(ResNetV1b), ResNetV1d replaces the 7x7 conv in
the input stem with three 3x3 convs. And in the downsampling block, a 2x2
avg_pool with stride 2 is added before conv, whose stride is changed to 1.
"""
def __init__(self, **kwargs):
super(ResNetV1d, self).__init__(
deep_stem=True, avg_down=True, **kwargs)
| 26,579 | 33.474708 | 79 |
py
|
KnowledgeFactor
|
KnowledgeFactor-main/cls/mmcls/models/backbones/tsn.py
|
from re import S
import torch.nn as nn
import torch
from ..builder import BACKBONES, build_backbone
from .base_backbone import BaseBackbone
import torch.nn.functional as F
@BACKBONES.register_module()
class TSN_backbone(BaseBackbone):
def __init__(self, backbone, in_channels, out_channels):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.encoder = build_backbone(backbone)
self.fc = nn.Linear(self.in_channels, self.out_channels, bias=False)
def forward(self, x):
x = self.encoder(x)
if isinstance(x, tuple):
x = x[-1]
x = F.adaptive_avg_pool2d(x, (1,1))
x = x.view(x.size(0), -1)
x = self.fc(x)
mu = torch.mean(x, 0)
log_var = torch.log(torch.var(x, 0))
return (mu, log_var), x
| 860 | 25.90625 | 76 |
py
|
KnowledgeFactor
|
KnowledgeFactor-main/cls/mmcls/models/backbones/disentangle.py
|
import torch
import torch.nn as nn
from ..builder import BACKBONES
class Flatten3D(nn.Module):
def forward(self, x):
x = x.view(x.size()[0], -1)
return x
@BACKBONES.register_module()
class SimpleConv64(nn.Module):
def __init__(self,
latent_dim=10,
num_channels=1,
image_size=64
):
super().__init__()
assert image_size == 64, 'This model only works with image size 64x64.'
self.main = nn.Sequential(
nn.Conv2d(num_channels, 32, 4, 2, 1),
nn.ReLU(True),
nn.Conv2d(32, 32, 4, 2, 1),
nn.ReLU(True),
nn.Conv2d(32, 64, 4, 2, 1),
nn.ReLU(True),
nn.Conv2d(64, 128, 4, 2, 1),
nn.ReLU(True),
nn.Conv2d(128, 256, 4, 2, 1),
nn.ReLU(True),
nn.Conv2d(256, 256, 4, 2, 1),
nn.ReLU(True),
Flatten3D(),
nn.Linear(256, latent_dim, bias=True)
)
def forward(self, x):
output = self.main(x)
return (output, )
@BACKBONES.register_module()
class SimpleGaussianConv64(SimpleConv64):
def __init__(self, latent_dim, num_channels, image_size):
super().__init__(latent_dim * 2, num_channels, image_size)
# override value of _latent_dim
self._latent_dim = latent_dim
def forward(self, x):
mu_logvar = self.main(x)
mu = mu_logvar[:, :self._latent_dim]
logvar = mu_logvar[:, self._latent_dim:]
output = self.reparameterize(mu, logvar)
return (mu, logvar), output
def reparameterize(self, mu, logvar):
"""
Will a single z be enough ti compute the expectation
for the loss??
:param mu: (Tensor) Mean of the latent Gaussian
:param logvar: (Tensor) Standard deviation of the latent Gaussian
:return:
"""
std = torch.exp(0.5 * logvar)
eps = torch.randn_like(std)
return eps * std + mu
| 2,028 | 27.577465 | 79 |
py
|
KnowledgeFactor
|
KnowledgeFactor-main/cls/mmcls/models/backbones/base_backbone.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from abc import ABCMeta, abstractmethod
from mmcv.runner import BaseModule
class BaseBackbone(BaseModule, metaclass=ABCMeta):
"""Base backbone.
This class defines the basic functions of a backbone. Any backbone that
inherits this class should at least define its own `forward` function.
"""
def __init__(self, init_cfg=None):
super(BaseBackbone, self).__init__(init_cfg)
@abstractmethod
def forward(self, x):
"""Forward computation.
Args:
x (tensor | tuple[tensor]): x could be a Torch.tensor or a tuple of
Torch.tensor, containing input data for forward computation.
"""
pass
def train(self, mode=True):
"""Set module status before forward computation.
Args:
mode (bool): Whether it is train_mode or test_mode
"""
super(BaseBackbone, self).train(mode)
| 954 | 27.088235 | 79 |
py
|
KnowledgeFactor
|
KnowledgeFactor-main/cls/mmcls/models/backbones/resnet_cifar.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
from mmcv.cnn import build_conv_layer, build_norm_layer
from ..builder import BACKBONES
from .resnet import ResNet
@BACKBONES.register_module()
class ResNet_CIFAR(ResNet):
"""ResNet backbone for CIFAR.
Compared to standard ResNet, it uses `kernel_size=3` and `stride=1` in
conv1, and does not apply MaxPoolinng after stem. It has been proven to
be more efficient than standard ResNet in other public codebase, e.g.,
`https://github.com/kuangliu/pytorch-cifar/blob/master/models/resnet.py`.
Args:
depth (int): Network depth, from {18, 34, 50, 101, 152}.
in_channels (int): Number of input image channels. Default: 3.
stem_channels (int): Output channels of the stem layer. Default: 64.
base_channels (int): Middle channels of the first stage. Default: 64.
num_stages (int): Stages of the network. Default: 4.
strides (Sequence[int]): Strides of the first block of each stage.
Default: ``(1, 2, 2, 2)``.
dilations (Sequence[int]): Dilation of each stage.
Default: ``(1, 1, 1, 1)``.
out_indices (Sequence[int]): Output from which stages. If only one
stage is specified, a single tensor (feature map) is returned,
otherwise multiple stages are specified, a tuple of tensors will
be returned. Default: ``(3, )``.
style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two
layer is the 3x3 conv layer, otherwise the stride-two layer is
the first 1x1 conv layer.
deep_stem (bool): This network has specific designed stem, thus it is
asserted to be False.
avg_down (bool): Use AvgPool instead of stride conv when
downsampling in the bottleneck. Default: False.
frozen_stages (int): Stages to be frozen (stop grad and set eval mode).
-1 means not freezing any parameters. Default: -1.
conv_cfg (dict | None): The config dict for conv layers. Default: None.
norm_cfg (dict): The config dict for norm layers.
norm_eval (bool): Whether to set norm layers to eval mode, namely,
freeze running stats (mean and var). Note: Effect on Batch Norm
and its variants only. Default: False.
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed. Default: False.
zero_init_residual (bool): Whether to use zero init for last norm layer
in resblocks to let them behave as identity. Default: True.
"""
def __init__(self, depth, deep_stem=False, **kwargs):
super(ResNet_CIFAR, self).__init__(
depth, deep_stem=deep_stem, **kwargs)
assert not self.deep_stem, 'ResNet_CIFAR do not support deep_stem'
def _make_stem_layer(self, in_channels, base_channels):
self.conv1 = build_conv_layer(
self.conv_cfg,
in_channels,
base_channels,
kernel_size=3,
stride=1,
padding=1,
bias=False)
self.norm1_name, norm1 = build_norm_layer(
self.norm_cfg, base_channels, postfix=1)
self.add_module(self.norm1_name, norm1)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv1(x)
x = self.norm1(x)
x = self.relu(x)
outs = []
for i, layer_name in enumerate(self.res_layers):
res_layer = getattr(self, layer_name)
x = res_layer(x)
if i in self.out_indices:
outs.append(x)
return tuple(outs)
| 3,707 | 44.219512 | 79 |
py
|
KnowledgeFactor
|
KnowledgeFactor-main/cls/mmcls/models/backbones/__init__.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from .mobilenet_v2 import MobileNetV2
from .mobilenet_v2_cifar import MobileNetV2_CIFAR
from .resnet import ResNet, ResNetV1d
from .resnet_cifar import ResNet_CIFAR
from .shufflenet_v2 import ShuffleNetV2
from .tsn import TSN_backbone
from .wideresnet import WideResNet_CIFAR
from .disentangle import SimpleConv64, SimpleGaussianConv64
| 384 | 37.5 | 59 |
py
|
KnowledgeFactor
|
KnowledgeFactor-main/cls/mmcls/models/backbones/mobilenet_v2_cifar.py
|
from mmcls.models.backbones.mobilenet_v2 import MobileNetV2
from mmcls.models.builder import BACKBONES
from mmcv.cnn import ConvModule
from mmcls.models.utils import make_divisible
@BACKBONES.register_module()
class MobileNetV2_CIFAR(MobileNetV2):
arch_settings = [[1, 16, 1, 1], [6, 24, 2, 2], [6, 32, 3, 1],
[6, 64, 4, 2], [6, 96, 3, 1], [6, 160, 3, 2],
[6, 320, 1, 1]]
def __init__(self,
widen_factor=1.,
out_indices=(7, ),
frozen_stages=-1,
conv_cfg=None,
norm_cfg=dict(type='BN'),
act_cfg=dict(type='ReLU6'),
norm_eval=False,
with_cp=False,
init_cfg=[
dict(type='Kaiming', layer=['Conv2d']),
dict(
type='Constant',
val=1,
layer=['_BatchNorm', 'GroupNorm'])
]):
super().__init__(widen_factor=widen_factor,
out_indices=out_indices,
frozen_stages=frozen_stages,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg,
norm_eval=norm_eval,
with_cp=with_cp,
init_cfg=init_cfg)
self.in_channels = make_divisible(32 * widen_factor, 8)
self.conv1 = ConvModule(
in_channels=3,
out_channels=self.in_channels,
kernel_size=3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
def forward(self, x):
x = self.conv1(x)
outs = []
for i, layer_name in enumerate(self.layers):
layer = getattr(self, layer_name)
x = layer(x)
if i in self.out_indices:
outs.append(x)
return tuple(outs)
| 2,067 | 35.280702 | 66 |
py
|
KnowledgeFactor
|
KnowledgeFactor-main/cls/mmcls/models/backbones/shufflenet_v2.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
import torch.utils.checkpoint as cp
from mmcv.cnn import ConvModule, constant_init, normal_init
from mmcv.runner import BaseModule
from torch.nn.modules.batchnorm import _BatchNorm
from mmcls.models.utils import channel_shuffle
from ..builder import BACKBONES
from .base_backbone import BaseBackbone
class InvertedResidual(BaseModule):
"""InvertedResidual block for ShuffleNetV2 backbone.
Args:
in_channels (int): The input channels of the block.
out_channels (int): The output channels of the block.
stride (int): Stride of the 3x3 convolution layer. Default: 1
conv_cfg (dict, optional): Config dict for convolution layer.
Default: None, which means using conv2d.
norm_cfg (dict): Config dict for normalization layer.
Default: dict(type='BN').
act_cfg (dict): Config dict for activation layer.
Default: dict(type='ReLU').
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed. Default: False.
Returns:
Tensor: The output tensor.
"""
def __init__(self,
in_channels,
out_channels,
stride=1,
conv_cfg=None,
norm_cfg=dict(type='BN'),
act_cfg=dict(type='ReLU'),
with_cp=False,
init_cfg=None):
super(InvertedResidual, self).__init__(init_cfg)
self.stride = stride
self.with_cp = with_cp
branch_features = out_channels // 2
if self.stride == 1:
assert in_channels == branch_features * 2, (
f'in_channels ({in_channels}) should equal to '
f'branch_features * 2 ({branch_features * 2}) '
'when stride is 1')
if in_channels != branch_features * 2:
assert self.stride != 1, (
f'stride ({self.stride}) should not equal 1 when '
f'in_channels != branch_features * 2')
if self.stride > 1:
self.branch1 = nn.Sequential(
ConvModule(
in_channels,
in_channels,
kernel_size=3,
stride=self.stride,
padding=1,
groups=in_channels,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=None),
ConvModule(
in_channels,
branch_features,
kernel_size=1,
stride=1,
padding=0,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg),
)
self.branch2 = nn.Sequential(
ConvModule(
in_channels if (self.stride > 1) else branch_features,
branch_features,
kernel_size=1,
stride=1,
padding=0,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg),
ConvModule(
branch_features,
branch_features,
kernel_size=3,
stride=self.stride,
padding=1,
groups=branch_features,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=None),
ConvModule(
branch_features,
branch_features,
kernel_size=1,
stride=1,
padding=0,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg))
def forward(self, x):
def _inner_forward(x):
if self.stride > 1:
out = torch.cat((self.branch1(x), self.branch2(x)), dim=1)
else:
x1, x2 = x.chunk(2, dim=1)
out = torch.cat((x1, self.branch2(x2)), dim=1)
out = channel_shuffle(out, 2)
return out
if self.with_cp and x.requires_grad:
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
return out
@BACKBONES.register_module()
class ShuffleNetV2(BaseBackbone):
"""ShuffleNetV2 backbone.
Args:
widen_factor (float): Width multiplier - adjusts the number of
channels in each layer by this amount. Default: 1.0.
out_indices (Sequence[int]): Output from which stages.
Default: (0, 1, 2, 3).
frozen_stages (int): Stages to be frozen (all param fixed).
Default: -1, which means not freezing any parameters.
conv_cfg (dict, optional): Config dict for convolution layer.
Default: None, which means using conv2d.
norm_cfg (dict): Config dict for normalization layer.
Default: dict(type='BN').
act_cfg (dict): Config dict for activation layer.
Default: dict(type='ReLU').
norm_eval (bool): Whether to set norm layers to eval mode, namely,
freeze running stats (mean and var). Note: Effect on Batch Norm
and its variants only. Default: False.
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed. Default: False.
"""
def __init__(self,
widen_factor=1.0,
out_indices=(3, ),
frozen_stages=-1,
conv_cfg=None,
norm_cfg=dict(type='BN'),
act_cfg=dict(type='ReLU'),
norm_eval=False,
with_cp=False,
init_cfg=None):
super(ShuffleNetV2, self).__init__(init_cfg)
self.stage_blocks = [4, 8, 4]
for index in out_indices:
if index not in range(0, 4):
raise ValueError('the item in out_indices must in '
f'range(0, 4). But received {index}')
if frozen_stages not in range(-1, 4):
raise ValueError('frozen_stages must be in range(-1, 4). '
f'But received {frozen_stages}')
self.out_indices = out_indices
self.frozen_stages = frozen_stages
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.act_cfg = act_cfg
self.norm_eval = norm_eval
self.with_cp = with_cp
if widen_factor == 0.5:
channels = [48, 96, 192, 1024]
elif widen_factor == 1.0:
channels = [116, 232, 464, 1024]
elif widen_factor == 1.5:
channels = [176, 352, 704, 1024]
elif widen_factor == 2.0:
channels = [244, 488, 976, 2048]
else:
raise ValueError('widen_factor must be in [0.5, 1.0, 1.5, 2.0]. '
f'But received {widen_factor}')
self.in_channels = 24
self.conv1 = ConvModule(
in_channels=3,
out_channels=self.in_channels,
kernel_size=3,
stride=2,
padding=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layers = nn.ModuleList()
for i, num_blocks in enumerate(self.stage_blocks):
layer = self._make_layer(channels[i], num_blocks)
self.layers.append(layer)
output_channels = channels[-1]
self.layers.append(
ConvModule(
in_channels=self.in_channels,
out_channels=output_channels,
kernel_size=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg))
def _make_layer(self, out_channels, num_blocks):
"""Stack blocks to make a layer.
Args:
out_channels (int): out_channels of the block.
num_blocks (int): number of blocks.
"""
layers = []
for i in range(num_blocks):
stride = 2 if i == 0 else 1
layers.append(
InvertedResidual(
in_channels=self.in_channels,
out_channels=out_channels,
stride=stride,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg,
with_cp=self.with_cp))
self.in_channels = out_channels
return nn.Sequential(*layers)
def _freeze_stages(self):
if self.frozen_stages >= 0:
for param in self.conv1.parameters():
param.requires_grad = False
for i in range(self.frozen_stages):
m = self.layers[i]
m.eval()
for param in m.parameters():
param.requires_grad = False
def init_weights(self):
super(ShuffleNetV2, self).init_weights()
if (isinstance(self.init_cfg, dict)
and self.init_cfg['type'] == 'Pretrained'):
# Suppress default init if use pretrained model.
return
for name, m in self.named_modules():
if isinstance(m, nn.Conv2d):
if 'conv1' in name:
normal_init(m, mean=0, std=0.01)
else:
normal_init(m, mean=0, std=1.0 / m.weight.shape[1])
elif isinstance(m, (_BatchNorm, nn.GroupNorm)):
constant_init(m.weight, val=1, bias=0.0001)
if isinstance(m, _BatchNorm):
if m.running_mean is not None:
nn.init.constant_(m.running_mean, 0)
def forward(self, x):
x = self.conv1(x)
x = self.maxpool(x)
outs = []
for i, layer in enumerate(self.layers):
x = layer(x)
if i in self.out_indices:
outs.append(x)
return tuple(outs)
def train(self, mode=True):
super(ShuffleNetV2, self).train(mode)
self._freeze_stages()
if mode and self.norm_eval:
for m in self.modules():
if isinstance(m, nn.BatchNorm2d):
m.eval()
| 10,408 | 33.92953 | 78 |
py
|
KnowledgeFactor
|
KnowledgeFactor-main/cls/mmcls/models/backbones/wideresnet.py
|
import torch.nn as nn
import torch.utils.checkpoint as cp
from mmcv.cnn import (build_conv_layer, build_norm_layer)
from .resnet import ResNet, WideBasicBlock
from ..builder import BACKBONES
@BACKBONES.register_module()
class WideResNet_CIFAR(ResNet):
"""Wide ResNet-50-2 model from
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_.
The model is the same as ResNet except for the bottleneck number of
channels which is twice larger in every block. The number of channels
in outer 1x1 convolutions is the same, e.g. last block in ResNet-50
has 2048-512-2048 channels, and in Wide ResNet-50-2 has 2048-1024-2048.
"""
arch_settings = {
28: (WideBasicBlock, (4, 4, 4)),
}
def __init__(self, depth, out_channel, deep_stem=False,
norm_cfg=dict(type='BN',
momentum=0.1,
requires_grad=True),
**kwargs):
super(WideResNet_CIFAR, self).__init__(
depth,
deep_stem=deep_stem,
norm_cfg=norm_cfg, **kwargs)
assert not self.deep_stem, 'ResNet_CIFAR do not support deep_stem'
self.norm_cfg = norm_cfg
self.norm1_name, norm1 = build_norm_layer(
self.norm_cfg, out_channel, postfix=1)
self.add_module(self.norm1_name, norm1)
def _make_stem_layer(self, in_channels, base_channels):
self.conv1 = build_conv_layer(
self.conv_cfg,
in_channels,
base_channels,
kernel_size=3,
stride=1,
padding=1,
bias=False)
self.relu = nn.LeakyReLU(0.1, inplace=True)
def forward(self, x):
x = self.conv1(x)
outs = []
for i, layer_name in enumerate(self.res_layers):
res_layer = getattr(self, layer_name)
x = res_layer(x)
if i in self.out_indices:
if i == self.out_indices[-1]:
x = self.relu(self.norm1(x))
else:
x = self.relu(x)
outs.append(x)
else:
return tuple(outs)
| 2,163 | 33.349206 | 75 |
py
|
KnowledgeFactor
|
KnowledgeFactor-main/cls/mmcls/models/heads/base_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from abc import ABCMeta, abstractmethod
from mmcv.runner import BaseModule
class BaseHead(BaseModule, metaclass=ABCMeta):
"""Base head."""
def __init__(self, init_cfg=None):
super(BaseHead, self).__init__(init_cfg)
@abstractmethod
def forward_train(self, x, gt_label, **kwargs):
pass
| 369 | 22.125 | 51 |
py
|
KnowledgeFactor
|
KnowledgeFactor-main/cls/mmcls/models/heads/cls_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn.functional as F
from mmcls.models.losses import Accuracy
from ..builder import HEADS, build_loss
from ..utils import is_tracing
from .base_head import BaseHead
@HEADS.register_module()
class ClsHead(BaseHead):
"""classification head.
Args:
loss (dict): Config of classification loss.
topk (int | tuple): Top-k accuracy.
cal_acc (bool): Whether to calculate accuracy during training.
If you use Mixup/CutMix or something like that during training,
it is not reasonable to calculate accuracy. Defaults to False.
"""
def __init__(self,
loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
topk=(1, ),
cal_acc=False,
init_cfg=None):
super(ClsHead, self).__init__(init_cfg=init_cfg)
assert isinstance(loss, dict)
assert isinstance(topk, (int, tuple))
if isinstance(topk, int):
topk = (topk, )
for _topk in topk:
assert _topk > 0, 'Top-k should be larger than 0'
self.topk = topk
self.compute_loss = build_loss(loss)
self.compute_accuracy = Accuracy(topk=self.topk)
self.cal_acc = cal_acc
def loss(self, cls_score, gt_label):
num_samples = len(cls_score)
losses = dict()
# compute loss
loss = self.compute_loss(cls_score, gt_label, avg_factor=num_samples)
if self.cal_acc:
# compute accuracy
acc = self.compute_accuracy(cls_score, gt_label)
assert len(acc) == len(self.topk)
losses['accuracy'] = {
f'top-{k}': a
for k, a in zip(self.topk, acc)
}
losses['loss'] = loss
return losses
def forward_train(self, cls_score, gt_label):
if isinstance(cls_score, tuple):
cls_score = cls_score[-1]
losses = self.loss(cls_score, gt_label)
return losses
def simple_test(self, cls_score):
"""Test without augmentation."""
if isinstance(cls_score, tuple):
cls_score = cls_score[-1]
if isinstance(cls_score, list):
cls_score = sum(cls_score) / float(len(cls_score))
pred = F.softmax(cls_score, dim=1) if cls_score is not None else None
return self.post_process(pred)
def post_process(self, pred):
on_trace = is_tracing()
if torch.onnx.is_in_onnx_export() or on_trace:
return pred
pred = list(pred.detach().cpu().numpy())
return pred
| 2,636 | 31.9625 | 77 |
py
|
KnowledgeFactor
|
KnowledgeFactor-main/cls/mmcls/models/heads/multi_label_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn.functional as F
from ..builder import HEADS, build_loss
from ..utils import is_tracing
from .base_head import BaseHead
@HEADS.register_module()
class MultiLabelClsHead(BaseHead):
"""Classification head for multilabel task.
Args:
loss (dict): Config of classification loss.
"""
def __init__(self,
loss=dict(
type='CrossEntropyLoss',
use_sigmoid=True,
reduction='mean',
loss_weight=1.0),
init_cfg=None):
super(MultiLabelClsHead, self).__init__(init_cfg=init_cfg)
assert isinstance(loss, dict)
self.compute_loss = build_loss(loss)
def loss(self, cls_score, gt_label):
gt_label = gt_label.type_as(cls_score)
num_samples = len(cls_score)
losses = dict()
# map difficult examples to positive ones
_gt_label = torch.abs(gt_label)
# compute loss
loss = self.compute_loss(cls_score, _gt_label, avg_factor=num_samples)
losses['loss'] = loss
return losses
def forward_train(self, cls_score, gt_label):
if isinstance(cls_score, tuple):
cls_score = cls_score[-1]
gt_label = gt_label.type_as(cls_score)
losses = self.loss(cls_score, gt_label)
return losses
def simple_test(self, x):
if isinstance(x, tuple):
x = x[-1]
if isinstance(x, list):
x = sum(x) / float(len(x))
pred = F.sigmoid(x) if x is not None else None
return self.post_process(pred)
def post_process(self, pred):
on_trace = is_tracing()
if torch.onnx.is_in_onnx_export() or on_trace:
return pred
pred = list(pred.detach().cpu().numpy())
return pred
| 1,887 | 28.046154 | 78 |
py
|
KnowledgeFactor
|
KnowledgeFactor-main/cls/mmcls/models/heads/__init__.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from .cls_head import ClsHead
from .linear_head import LinearBCEClsHead, LinearClsHead
from .multi_label_head import MultiLabelClsHead
from .multitask_linear_head import MultiTaskLinearClsHead
| 242 | 33.714286 | 57 |
py
|
KnowledgeFactor
|
KnowledgeFactor-main/cls/mmcls/models/heads/multitask_linear_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
import torch.nn.functional as F
from ..builder import HEADS
from .cls_head import ClsHead
@HEADS.register_module()
class MultiTaskLinearClsHead(ClsHead):
"""Linear classifier head.
Args:
num_classes (int): Number of categories excluding the background
category.
in_channels (int): Number of channels in the input feature map.
init_cfg (dict | optional): The extra init config of layers.
Defaults to use dict(type='Normal', layer='Linear', std=0.01).
"""
def __init__(self,
num_classes,
in_channels,
init_cfg=dict(type='Normal', layer='Linear', std=0.01),
*args,
**kwargs):
super(MultiTaskLinearClsHead, self).__init__(
init_cfg=init_cfg, *args, **kwargs)
self.in_channels = in_channels
self.num_classes = num_classes
self.num_task = len(self.num_classes)
self.fcs = nn.ModuleList(
[nn.Linear(self.in_channels, self.num_classes[i])
for i in range(self.num_task)]
)
def simple_test(self, x):
"""Test without augmentation."""
if isinstance(x, tuple):
x = x[-1]
preds = []
for i in range(self.num_task):
cls_score = self.fcs[i](x)
if isinstance(cls_score, list):
cls_score = sum(cls_score) / float(len(cls_score))
pred = F.softmax(
cls_score, dim=1) if cls_score is not None else None
preds.append(self.post_process(pred))
return preds
def get_logits(self, x):
if isinstance(x, tuple):
x = x[-1]
logits = []
for i in range(self.num_task):
logit = self.fcs[i](x)
logits.append(logit)
return logits
def forward_train(self, x, gt_label):
if isinstance(x, tuple):
x = x[-1]
losses = dict()
for i in range(self.num_task):
cls_score = self.fcs[i](x)
loss_task = self.loss(cls_score, gt_label[:, i])['loss']
losses[f'task{i}_loss'] = loss_task
return losses
| 2,253 | 30.746479 | 74 |
py
|
KnowledgeFactor
|
KnowledgeFactor-main/cls/mmcls/models/heads/linear_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
import torch.nn.functional as F
from ..builder import HEADS
from .cls_head import ClsHead
@HEADS.register_module()
class LinearClsHead(ClsHead):
"""Linear classifier head.
Args:
num_classes (int): Number of categories excluding the background
category.
in_channels (int): Number of channels in the input feature map.
init_cfg (dict | optional): The extra init config of layers.
Defaults to use dict(type='Normal', layer='Linear', std=0.01).
"""
def __init__(self,
num_classes,
in_channels,
init_cfg=dict(type='Normal', layer='Linear', std=0.01),
*args,
**kwargs):
super(LinearClsHead, self).__init__(init_cfg=init_cfg, *args, **kwargs)
self.in_channels = in_channels
self.num_classes = num_classes
if self.num_classes <= 0:
raise ValueError(
f'num_classes={num_classes} must be a positive integer')
self.fc = nn.Linear(self.in_channels, self.num_classes)
def simple_test(self, x):
"""Test without augmentation."""
if isinstance(x, tuple):
x = x[-1]
cls_score = self.fc(x)
if isinstance(cls_score, list):
cls_score = sum(cls_score) / float(len(cls_score))
pred = F.softmax(cls_score, dim=1) if cls_score is not None else None
return self.post_process(pred)
def forward_train(self, x, gt_label):
if isinstance(x, tuple):
x = x[-1]
cls_score = self.fc(x)
losses = self.loss(cls_score, gt_label)
return losses
def get_logits(self, x):
if isinstance(x, tuple):
x = x[-1]
cls_score = self.fc(x)
return cls_score
@HEADS.register_module()
class LinearBCEClsHead(ClsHead):
"""Linear classifier head.
Args:
num_classes (int): Number of categories excluding the background
category.
in_channels (int): Number of channels in the input feature map.
init_cfg (dict | optional): The extra init config of layers.
Defaults to use dict(type='Normal', layer='Linear', std=0.01).
"""
def __init__(self,
num_classes,
in_channels,
init_cfg=dict(type='Normal', layer='Linear', std=0.01),
*args,
**kwargs):
super(LinearBCEClsHead, self).__init__(
init_cfg=init_cfg, *args, **kwargs)
self.in_channels = in_channels
self.num_classes = num_classes
if self.num_classes <= 0:
raise ValueError(
f'num_classes={num_classes} must be a positive integer')
self.fc = nn.Linear(self.in_channels, self.num_classes)
def simple_test(self, x):
"""Test without augmentation."""
if isinstance(x, tuple):
x = x[-1]
cls_score = self.fc(x)
if isinstance(cls_score, list):
cls_score = sum(cls_score) / float(len(cls_score))
pred = F.softmax(cls_score, dim=1) if cls_score is not None else None
return self.post_process(pred)
def forward_train(self, x, gt_label):
if isinstance(x, tuple):
x = x[-1]
cls_score = self.fc(x)
onehot_gt_label = F.one_hot(gt_label,
num_classes=self.num_classes).float()
losses = self.loss(cls_score, onehot_gt_label)
return losses
def get_logits(self, x):
if isinstance(x, tuple):
x = x[-1]
cls_score = self.fc(x)
return cls_score
| 3,723 | 30.559322 | 79 |
py
|
KnowledgeFactor
|
KnowledgeFactor-main/cls/mmcls/datasets/base_dataset.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import copy
from abc import ABCMeta, abstractmethod
import mmcv
import numpy as np
from torch.utils.data import Dataset
from mmcls.core.evaluation import precision_recall_f1, support
from mmcls.models.losses import accuracy
from .pipelines import Compose
class BaseDataset(Dataset, metaclass=ABCMeta):
"""Base dataset.
Args:
data_prefix (str): the prefix of data path
pipeline (list): a list of dict, where each element represents
a operation defined in `mmcls.datasets.pipelines`
ann_file (str | None): the annotation file. When ann_file is str,
the subclass is expected to read from the ann_file. When ann_file
is None, the subclass is expected to read according to data_prefix
test_mode (bool): in train mode or test mode
"""
CLASSES = None
def __init__(self,
data_prefix,
pipeline,
classes=None,
ann_file=None,
test_mode=False):
super(BaseDataset, self).__init__()
self.ann_file = ann_file
self.data_prefix = data_prefix
self.test_mode = test_mode
self.pipeline = Compose(pipeline)
self.CLASSES = self.get_classes(classes)
self.data_infos = self.load_annotations()
@abstractmethod
def load_annotations(self):
pass
@property
def class_to_idx(self):
"""Map mapping class name to class index.
Returns:
dict: mapping from class name to class index.
"""
return {_class: i for i, _class in enumerate(self.CLASSES)}
def get_gt_labels(self):
"""Get all ground-truth labels (categories).
Returns:
list[int]: categories for all images.
"""
gt_labels = np.array([data['gt_label'] for data in self.data_infos])
return gt_labels
def get_cat_ids(self, idx):
"""Get category id by index.
Args:
idx (int): Index of data.
Returns:
int: Image category of specified index.
"""
return self.data_infos[idx]['gt_label'].astype(np.int)
def prepare_data(self, idx):
results = copy.deepcopy(self.data_infos[idx])
return self.pipeline(results)
def __len__(self):
return len(self.data_infos)
def __getitem__(self, idx):
return self.prepare_data(idx)
@classmethod
def get_classes(cls, classes=None):
"""Get class names of current dataset.
Args:
classes (Sequence[str] | str | None): If classes is None, use
default CLASSES defined by builtin dataset. If classes is a
string, take it as a file name. The file contains the name of
classes where each line contains one class name. If classes is
a tuple or list, override the CLASSES defined by the dataset.
Returns:
tuple[str] or list[str]: Names of categories of the dataset.
"""
if classes is None:
return cls.CLASSES
if isinstance(classes, str):
# take it as a file path
class_names = mmcv.list_from_file(classes)
elif isinstance(classes, (tuple, list)):
class_names = classes
else:
raise ValueError(f'Unsupported type {type(classes)} of classes.')
return class_names
def evaluate(self,
results,
metric='accuracy',
metric_options=None,
logger=None):
"""Evaluate the dataset.
Args:
results (list): Testing results of the dataset.
metric (str | list[str]): Metrics to be evaluated.
Default value is `accuracy`.
metric_options (dict, optional): Options for calculating metrics.
Allowed keys are 'topk', 'thrs' and 'average_mode'.
Defaults to None.
logger (logging.Logger | str, optional): Logger used for printing
related information during evaluation. Defaults to None.
Returns:
dict: evaluation results
"""
if metric_options is None:
metric_options = {'topk': (1, 5)}
if isinstance(metric, str):
metrics = [metric]
else:
metrics = metric
allowed_metrics = [
'accuracy', 'precision', 'recall', 'f1_score', 'support'
]
eval_results = {}
results = np.vstack(results)
gt_labels = self.get_gt_labels()
num_imgs = len(results)
assert len(gt_labels) == num_imgs, 'dataset testing results should '\
'be of the same length as gt_labels.'
invalid_metrics = set(metrics) - set(allowed_metrics)
if len(invalid_metrics) != 0:
raise ValueError(f'metric {invalid_metrics} is not supported.')
topk = metric_options.get('topk', (1, 5))
thrs = metric_options.get('thrs')
average_mode = metric_options.get('average_mode', 'macro')
if 'accuracy' in metrics:
if thrs is not None:
acc = accuracy(results, gt_labels, topk=topk, thrs=thrs)
else:
acc = accuracy(results, gt_labels, topk=topk)
if isinstance(topk, tuple):
eval_results_ = {
f'accuracy_top-{k}': a
for k, a in zip(topk, acc)
}
else:
eval_results_ = {'accuracy': acc}
if isinstance(thrs, tuple):
for key, values in eval_results_.items():
eval_results.update({
f'{key}_thr_{thr:.2f}': value.item()
for thr, value in zip(thrs, values)
})
else:
eval_results.update(
{k: v.item()
for k, v in eval_results_.items()})
if 'support' in metrics:
support_value = support(
results, gt_labels, average_mode=average_mode)
eval_results['support'] = support_value
precision_recall_f1_keys = ['precision', 'recall', 'f1_score']
if len(set(metrics) & set(precision_recall_f1_keys)) != 0:
if thrs is not None:
precision_recall_f1_values = precision_recall_f1(
results, gt_labels, average_mode=average_mode, thrs=thrs)
else:
precision_recall_f1_values = precision_recall_f1(
results, gt_labels, average_mode=average_mode)
for key, values in zip(precision_recall_f1_keys,
precision_recall_f1_values):
if key in metrics:
if isinstance(thrs, tuple):
eval_results.update({
f'{key}_thr_{thr:.2f}': value
for thr, value in zip(thrs, values)
})
else:
eval_results[key] = values
return eval_results
| 7,191 | 33.576923 | 78 |
py
|
KnowledgeFactor
|
KnowledgeFactor-main/cls/mmcls/datasets/utils.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import gzip
import hashlib
import os
import os.path
import shutil
import tarfile
import urllib.error
import urllib.request
import zipfile
__all__ = ['rm_suffix', 'check_integrity', 'download_and_extract_archive']
def rm_suffix(s, suffix=None):
if suffix is None:
return s[:s.rfind('.')]
else:
return s[:s.rfind(suffix)]
def calculate_md5(fpath, chunk_size=1024 * 1024):
md5 = hashlib.md5()
with open(fpath, 'rb') as f:
for chunk in iter(lambda: f.read(chunk_size), b''):
md5.update(chunk)
return md5.hexdigest()
def check_md5(fpath, md5, **kwargs):
return md5 == calculate_md5(fpath, **kwargs)
def check_integrity(fpath, md5=None):
if not os.path.isfile(fpath):
return False
if md5 is None:
return True
return check_md5(fpath, md5)
def download_url_to_file(url, fpath):
with urllib.request.urlopen(url) as resp, open(fpath, 'wb') as of:
shutil.copyfileobj(resp, of)
def download_url(url, root, filename=None, md5=None):
"""Download a file from a url and place it in root.
Args:
url (str): URL to download file from.
root (str): Directory to place downloaded file in.
filename (str | None): Name to save the file under.
If filename is None, use the basename of the URL.
md5 (str | None): MD5 checksum of the download.
If md5 is None, download without md5 check.
"""
root = os.path.expanduser(root)
if not filename:
filename = os.path.basename(url)
fpath = os.path.join(root, filename)
os.makedirs(root, exist_ok=True)
if check_integrity(fpath, md5):
print(f'Using downloaded and verified file: {fpath}')
else:
try:
print(f'Downloading {url} to {fpath}')
download_url_to_file(url, fpath)
except (urllib.error.URLError, IOError) as e:
if url[:5] == 'https':
url = url.replace('https:', 'http:')
print('Failed download. Trying https -> http instead.'
f' Downloading {url} to {fpath}')
download_url_to_file(url, fpath)
else:
raise e
# check integrity of downloaded file
if not check_integrity(fpath, md5):
raise RuntimeError('File not found or corrupted.')
def _is_tarxz(filename):
return filename.endswith('.tar.xz')
def _is_tar(filename):
return filename.endswith('.tar')
def _is_targz(filename):
return filename.endswith('.tar.gz')
def _is_tgz(filename):
return filename.endswith('.tgz')
def _is_gzip(filename):
return filename.endswith('.gz') and not filename.endswith('.tar.gz')
def _is_zip(filename):
return filename.endswith('.zip')
def extract_archive(from_path, to_path=None, remove_finished=False):
if to_path is None:
to_path = os.path.dirname(from_path)
if _is_tar(from_path):
with tarfile.open(from_path, 'r') as tar:
tar.extractall(path=to_path)
elif _is_targz(from_path) or _is_tgz(from_path):
with tarfile.open(from_path, 'r:gz') as tar:
tar.extractall(path=to_path)
elif _is_tarxz(from_path):
with tarfile.open(from_path, 'r:xz') as tar:
tar.extractall(path=to_path)
elif _is_gzip(from_path):
to_path = os.path.join(
to_path,
os.path.splitext(os.path.basename(from_path))[0])
with open(to_path, 'wb') as out_f, gzip.GzipFile(from_path) as zip_f:
out_f.write(zip_f.read())
elif _is_zip(from_path):
with zipfile.ZipFile(from_path, 'r') as z:
z.extractall(to_path)
else:
raise ValueError(f'Extraction of {from_path} not supported')
if remove_finished:
os.remove(from_path)
def download_and_extract_archive(url,
download_root,
extract_root=None,
filename=None,
md5=None,
remove_finished=False):
download_root = os.path.expanduser(download_root)
if extract_root is None:
extract_root = download_root
if not filename:
filename = os.path.basename(url)
download_url(url, download_root, filename, md5)
archive = os.path.join(download_root, filename)
print(f'Extracting {archive} to {extract_root}')
extract_archive(archive, extract_root, remove_finished)
| 4,549 | 28.545455 | 77 |
py
|
KnowledgeFactor
|
KnowledgeFactor-main/cls/mmcls/datasets/dataset_wrappers.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import bisect
import math
from collections import defaultdict
import numpy as np
from torch.utils.data.dataset import ConcatDataset as _ConcatDataset
from .builder import DATASETS
@DATASETS.register_module()
class ConcatDataset(_ConcatDataset):
"""A wrapper of concatenated dataset.
Same as :obj:`torch.utils.data.dataset.ConcatDataset`, but
add `get_cat_ids` function.
Args:
datasets (list[:obj:`Dataset`]): A list of datasets.
"""
def __init__(self, datasets):
super(ConcatDataset, self).__init__(datasets)
self.CLASSES = datasets[0].CLASSES
def get_cat_ids(self, idx):
if idx < 0:
if -idx > len(self):
raise ValueError(
'absolute value of index should not exceed dataset length')
idx = len(self) + idx
dataset_idx = bisect.bisect_right(self.cumulative_sizes, idx)
if dataset_idx == 0:
sample_idx = idx
else:
sample_idx = idx - self.cumulative_sizes[dataset_idx - 1]
return self.datasets[dataset_idx].get_cat_ids(sample_idx)
@DATASETS.register_module()
class RepeatDataset(object):
"""A wrapper of repeated dataset.
The length of repeated dataset will be `times` larger than the original
dataset. This is useful when the data loading time is long but the dataset
is small. Using RepeatDataset can reduce the data loading time between
epochs.
Args:
dataset (:obj:`Dataset`): The dataset to be repeated.
times (int): Repeat times.
"""
def __init__(self, dataset, times):
self.dataset = dataset
self.times = times
self.CLASSES = dataset.CLASSES
self._ori_len = len(self.dataset)
def __getitem__(self, idx):
return self.dataset[idx % self._ori_len]
def get_cat_ids(self, idx):
return self.dataset.get_cat_ids(idx % self._ori_len)
def __len__(self):
return self.times * self._ori_len
# Modified from https://github.com/facebookresearch/detectron2/blob/41d475b75a230221e21d9cac5d69655e3415e3a4/detectron2/data/samplers/distributed_sampler.py#L57 # noqa
@DATASETS.register_module()
class ClassBalancedDataset(object):
r"""A wrapper of repeated dataset with repeat factor.
Suitable for training on class imbalanced datasets like LVIS. Following
the sampling strategy in [#1]_, in each epoch, an image may appear multiple
times based on its "repeat factor".
The repeat factor for an image is a function of the frequency the rarest
category labeled in that image. The "frequency of category c" in [0, 1]
is defined by the fraction of images in the training set (without repeats)
in which category c appears.
The dataset needs to implement :func:`self.get_cat_ids` to support
ClassBalancedDataset.
The repeat factor is computed as followed.
1. For each category c, compute the fraction :math:`f(c)` of images that
contain it.
2. For each category c, compute the category-level repeat factor
.. math::
r(c) = \max(1, \sqrt{\frac{t}{f(c)}})
3. For each image I and its labels :math:`L(I)`, compute the image-level
repeat factor
.. math::
r(I) = \max_{c \in L(I)} r(c)
References:
.. [#1] https://arxiv.org/pdf/1908.03195.pdf
Args:
dataset (:obj:`CustomDataset`): The dataset to be repeated.
oversample_thr (float): frequency threshold below which data is
repeated. For categories with `f_c` >= `oversample_thr`, there is
no oversampling. For categories with `f_c` < `oversample_thr`, the
degree of oversampling following the square-root inverse frequency
heuristic above.
"""
def __init__(self, dataset, oversample_thr):
self.dataset = dataset
self.oversample_thr = oversample_thr
self.CLASSES = dataset.CLASSES
repeat_factors = self._get_repeat_factors(dataset, oversample_thr)
repeat_indices = []
for dataset_index, repeat_factor in enumerate(repeat_factors):
repeat_indices.extend([dataset_index] * math.ceil(repeat_factor))
self.repeat_indices = repeat_indices
flags = []
if hasattr(self.dataset, 'flag'):
for flag, repeat_factor in zip(self.dataset.flag, repeat_factors):
flags.extend([flag] * int(math.ceil(repeat_factor)))
assert len(flags) == len(repeat_indices)
self.flag = np.asarray(flags, dtype=np.uint8)
def _get_repeat_factors(self, dataset, repeat_thr):
# 1. For each category c, compute the fraction # of images
# that contain it: f(c)
category_freq = defaultdict(int)
num_images = len(dataset)
for idx in range(num_images):
cat_ids = set(self.dataset.get_cat_ids(idx))
for cat_id in cat_ids:
category_freq[cat_id] += 1
for k, v in category_freq.items():
assert v > 0, f'caterogy {k} does not contain any images'
category_freq[k] = v / num_images
# 2. For each category c, compute the category-level repeat factor:
# r(c) = max(1, sqrt(t/f(c)))
category_repeat = {
cat_id: max(1.0, math.sqrt(repeat_thr / cat_freq))
for cat_id, cat_freq in category_freq.items()
}
# 3. For each image I and its labels L(I), compute the image-level
# repeat factor:
# r(I) = max_{c in L(I)} r(c)
repeat_factors = []
for idx in range(num_images):
cat_ids = set(self.dataset.get_cat_ids(idx))
repeat_factor = max(
{category_repeat[cat_id]
for cat_id in cat_ids})
repeat_factors.append(repeat_factor)
return repeat_factors
def __getitem__(self, idx):
ori_index = self.repeat_indices[idx]
return self.dataset[ori_index]
def __len__(self):
return len(self.repeat_indices)
| 6,092 | 34.219653 | 167 |
py
|
KnowledgeFactor
|
KnowledgeFactor-main/cls/mmcls/datasets/__init__.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from .base_dataset import BaseDataset
from .builder import DATASETS, PIPELINES, build_dataloader, build_dataset
from .cifar import CIFAR10, CIFAR100, CIFAR10_MultiTask, CIFAR10_2Task, CIFAR10_Select
from .dataset_wrappers import (ClassBalancedDataset, ConcatDataset,
RepeatDataset)
from .imagenet import ImageNet, ImageNet_MultiTask
from .samplers import DistributedSampler
from .disentangle_data import dSprites, Shape3D
| 503 | 41 | 86 |
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.