python_code
stringlengths
0
679k
repo_name
stringlengths
9
41
file_path
stringlengths
6
149
# Copyright (c) OpenMMLab. All rights reserved. import copy import logging import os.path as osp import warnings from abc import ABCMeta, abstractmethod import torch from torch.optim import Optimizer import annotator.uniformer.mmcv as mmcv from ..parallel import is_module_wrapper from .checkpoint import load_checkpoint from .dist_utils import get_dist_info from .hooks import HOOKS, Hook from .log_buffer import LogBuffer from .priority import Priority, get_priority from .utils import get_time_str class BaseRunner(metaclass=ABCMeta): """The base class of Runner, a training helper for PyTorch. All subclasses should implement the following APIs: - ``run()`` - ``train()`` - ``val()`` - ``save_checkpoint()`` Args: model (:obj:`torch.nn.Module`): The model to be run. batch_processor (callable): A callable method that process a data batch. The interface of this method should be `batch_processor(model, data, train_mode) -> dict` optimizer (dict or :obj:`torch.optim.Optimizer`): It can be either an optimizer (in most cases) or a dict of optimizers (in models that requires more than one optimizer, e.g., GAN). work_dir (str, optional): The working directory to save checkpoints and logs. Defaults to None. logger (:obj:`logging.Logger`): Logger used during training. Defaults to None. (The default value is just for backward compatibility) meta (dict | None): A dict records some import information such as environment info and seed, which will be logged in logger hook. Defaults to None. max_epochs (int, optional): Total training epochs. max_iters (int, optional): Total training iterations. """ def __init__(self, model, batch_processor=None, optimizer=None, work_dir=None, logger=None, meta=None, max_iters=None, max_epochs=None): if batch_processor is not None: if not callable(batch_processor): raise TypeError('batch_processor must be callable, ' f'but got {type(batch_processor)}') warnings.warn('batch_processor is deprecated, please implement ' 'train_step() and val_step() in the model instead.') # raise an error is `batch_processor` is not None and # `model.train_step()` exists. if is_module_wrapper(model): _model = model.module else: _model = model if hasattr(_model, 'train_step') or hasattr(_model, 'val_step'): raise RuntimeError( 'batch_processor and model.train_step()/model.val_step() ' 'cannot be both available.') else: assert hasattr(model, 'train_step') # check the type of `optimizer` if isinstance(optimizer, dict): for name, optim in optimizer.items(): if not isinstance(optim, Optimizer): raise TypeError( f'optimizer must be a dict of torch.optim.Optimizers, ' f'but optimizer["{name}"] is a {type(optim)}') elif not isinstance(optimizer, Optimizer) and optimizer is not None: raise TypeError( f'optimizer must be a torch.optim.Optimizer object ' f'or dict or None, but got {type(optimizer)}') # check the type of `logger` if not isinstance(logger, logging.Logger): raise TypeError(f'logger must be a logging.Logger object, ' f'but got {type(logger)}') # check the type of `meta` if meta is not None and not isinstance(meta, dict): raise TypeError( f'meta must be a dict or None, but got {type(meta)}') self.model = model self.batch_processor = batch_processor self.optimizer = optimizer self.logger = logger self.meta = meta # create work_dir if mmcv.is_str(work_dir): self.work_dir = osp.abspath(work_dir) mmcv.mkdir_or_exist(self.work_dir) elif work_dir is None: self.work_dir = None else: raise TypeError('"work_dir" must be a str or None') # get model name from the model class if hasattr(self.model, 'module'): self._model_name = self.model.module.__class__.__name__ else: self._model_name = self.model.__class__.__name__ self._rank, self._world_size = get_dist_info() self.timestamp = get_time_str() self.mode = None self._hooks = [] self._epoch = 0 self._iter = 0 self._inner_iter = 0 if max_epochs is not None and max_iters is not None: raise ValueError( 'Only one of `max_epochs` or `max_iters` can be set.') self._max_epochs = max_epochs self._max_iters = max_iters # TODO: Redesign LogBuffer, it is not flexible and elegant enough self.log_buffer = LogBuffer() @property def model_name(self): """str: Name of the model, usually the module class name.""" return self._model_name @property def rank(self): """int: Rank of current process. (distributed training)""" return self._rank @property def world_size(self): """int: Number of processes participating in the job. (distributed training)""" return self._world_size @property def hooks(self): """list[:obj:`Hook`]: A list of registered hooks.""" return self._hooks @property def epoch(self): """int: Current epoch.""" return self._epoch @property def iter(self): """int: Current iteration.""" return self._iter @property def inner_iter(self): """int: Iteration in an epoch.""" return self._inner_iter @property def max_epochs(self): """int: Maximum training epochs.""" return self._max_epochs @property def max_iters(self): """int: Maximum training iterations.""" return self._max_iters @abstractmethod def train(self): pass @abstractmethod def val(self): pass @abstractmethod def run(self, data_loaders, workflow, **kwargs): pass @abstractmethod def save_checkpoint(self, out_dir, filename_tmpl, save_optimizer=True, meta=None, create_symlink=True): pass def current_lr(self): """Get current learning rates. Returns: list[float] | dict[str, list[float]]: Current learning rates of all param groups. If the runner has a dict of optimizers, this method will return a dict. """ if isinstance(self.optimizer, torch.optim.Optimizer): lr = [group['lr'] for group in self.optimizer.param_groups] elif isinstance(self.optimizer, dict): lr = dict() for name, optim in self.optimizer.items(): lr[name] = [group['lr'] for group in optim.param_groups] else: raise RuntimeError( 'lr is not applicable because optimizer does not exist.') return lr def current_momentum(self): """Get current momentums. Returns: list[float] | dict[str, list[float]]: Current momentums of all param groups. If the runner has a dict of optimizers, this method will return a dict. """ def _get_momentum(optimizer): momentums = [] for group in optimizer.param_groups: if 'momentum' in group.keys(): momentums.append(group['momentum']) elif 'betas' in group.keys(): momentums.append(group['betas'][0]) else: momentums.append(0) return momentums if self.optimizer is None: raise RuntimeError( 'momentum is not applicable because optimizer does not exist.') elif isinstance(self.optimizer, torch.optim.Optimizer): momentums = _get_momentum(self.optimizer) elif isinstance(self.optimizer, dict): momentums = dict() for name, optim in self.optimizer.items(): momentums[name] = _get_momentum(optim) return momentums def register_hook(self, hook, priority='NORMAL'): """Register a hook into the hook list. The hook will be inserted into a priority queue, with the specified priority (See :class:`Priority` for details of priorities). For hooks with the same priority, they will be triggered in the same order as they are registered. Args: hook (:obj:`Hook`): The hook to be registered. priority (int or str or :obj:`Priority`): Hook priority. Lower value means higher priority. """ assert isinstance(hook, Hook) if hasattr(hook, 'priority'): raise ValueError('"priority" is a reserved attribute for hooks') priority = get_priority(priority) hook.priority = priority # insert the hook to a sorted list inserted = False for i in range(len(self._hooks) - 1, -1, -1): if priority >= self._hooks[i].priority: self._hooks.insert(i + 1, hook) inserted = True break if not inserted: self._hooks.insert(0, hook) def register_hook_from_cfg(self, hook_cfg): """Register a hook from its cfg. Args: hook_cfg (dict): Hook config. It should have at least keys 'type' and 'priority' indicating its type and priority. Notes: The specific hook class to register should not use 'type' and 'priority' arguments during initialization. """ hook_cfg = hook_cfg.copy() priority = hook_cfg.pop('priority', 'NORMAL') hook = mmcv.build_from_cfg(hook_cfg, HOOKS) self.register_hook(hook, priority=priority) def call_hook(self, fn_name): """Call all hooks. Args: fn_name (str): The function name in each hook to be called, such as "before_train_epoch". """ for hook in self._hooks: getattr(hook, fn_name)(self) def get_hook_info(self): # Get hooks info in each stage stage_hook_map = {stage: [] for stage in Hook.stages} for hook in self.hooks: try: priority = Priority(hook.priority).name except ValueError: priority = hook.priority classname = hook.__class__.__name__ hook_info = f'({priority:<12}) {classname:<35}' for trigger_stage in hook.get_triggered_stages(): stage_hook_map[trigger_stage].append(hook_info) stage_hook_infos = [] for stage in Hook.stages: hook_infos = stage_hook_map[stage] if len(hook_infos) > 0: info = f'{stage}:\n' info += '\n'.join(hook_infos) info += '\n -------------------- ' stage_hook_infos.append(info) return '\n'.join(stage_hook_infos) def load_checkpoint(self, filename, map_location='cpu', strict=False, revise_keys=[(r'^module.', '')]): return load_checkpoint( self.model, filename, map_location, strict, self.logger, revise_keys=revise_keys) def resume(self, checkpoint, resume_optimizer=True, map_location='default'): if map_location == 'default': if torch.cuda.is_available(): device_id = torch.cuda.current_device() checkpoint = self.load_checkpoint( checkpoint, map_location=lambda storage, loc: storage.cuda(device_id)) else: checkpoint = self.load_checkpoint(checkpoint) else: checkpoint = self.load_checkpoint( checkpoint, map_location=map_location) self._epoch = checkpoint['meta']['epoch'] self._iter = checkpoint['meta']['iter'] if self.meta is None: self.meta = {} self.meta.setdefault('hook_msgs', {}) # load `last_ckpt`, `best_score`, `best_ckpt`, etc. for hook messages self.meta['hook_msgs'].update(checkpoint['meta'].get('hook_msgs', {})) # Re-calculate the number of iterations when resuming # models with different number of GPUs if 'config' in checkpoint['meta']: config = mmcv.Config.fromstring( checkpoint['meta']['config'], file_format='.py') previous_gpu_ids = config.get('gpu_ids', None) if previous_gpu_ids and len(previous_gpu_ids) > 0 and len( previous_gpu_ids) != self.world_size: self._iter = int(self._iter * len(previous_gpu_ids) / self.world_size) self.logger.info('the iteration number is changed due to ' 'change of GPU number') # resume meta information meta self.meta = checkpoint['meta'] if 'optimizer' in checkpoint and resume_optimizer: if isinstance(self.optimizer, Optimizer): self.optimizer.load_state_dict(checkpoint['optimizer']) elif isinstance(self.optimizer, dict): for k in self.optimizer.keys(): self.optimizer[k].load_state_dict( checkpoint['optimizer'][k]) else: raise TypeError( 'Optimizer should be dict or torch.optim.Optimizer ' f'but got {type(self.optimizer)}') self.logger.info('resumed epoch %d, iter %d', self.epoch, self.iter) def register_lr_hook(self, lr_config): if lr_config is None: return elif isinstance(lr_config, dict): assert 'policy' in lr_config policy_type = lr_config.pop('policy') # If the type of policy is all in lower case, e.g., 'cyclic', # then its first letter will be capitalized, e.g., to be 'Cyclic'. # This is for the convenient usage of Lr updater. # Since this is not applicable for ` # CosineAnnealingLrUpdater`, # the string will not be changed if it contains capital letters. if policy_type == policy_type.lower(): policy_type = policy_type.title() hook_type = policy_type + 'LrUpdaterHook' lr_config['type'] = hook_type hook = mmcv.build_from_cfg(lr_config, HOOKS) else: hook = lr_config self.register_hook(hook, priority='VERY_HIGH') def register_momentum_hook(self, momentum_config): if momentum_config is None: return if isinstance(momentum_config, dict): assert 'policy' in momentum_config policy_type = momentum_config.pop('policy') # If the type of policy is all in lower case, e.g., 'cyclic', # then its first letter will be capitalized, e.g., to be 'Cyclic'. # This is for the convenient usage of momentum updater. # Since this is not applicable for # `CosineAnnealingMomentumUpdater`, # the string will not be changed if it contains capital letters. if policy_type == policy_type.lower(): policy_type = policy_type.title() hook_type = policy_type + 'MomentumUpdaterHook' momentum_config['type'] = hook_type hook = mmcv.build_from_cfg(momentum_config, HOOKS) else: hook = momentum_config self.register_hook(hook, priority='HIGH') def register_optimizer_hook(self, optimizer_config): if optimizer_config is None: return if isinstance(optimizer_config, dict): optimizer_config.setdefault('type', 'OptimizerHook') hook = mmcv.build_from_cfg(optimizer_config, HOOKS) else: hook = optimizer_config self.register_hook(hook, priority='ABOVE_NORMAL') def register_checkpoint_hook(self, checkpoint_config): if checkpoint_config is None: return if isinstance(checkpoint_config, dict): checkpoint_config.setdefault('type', 'CheckpointHook') hook = mmcv.build_from_cfg(checkpoint_config, HOOKS) else: hook = checkpoint_config self.register_hook(hook, priority='NORMAL') def register_logger_hooks(self, log_config): if log_config is None: return log_interval = log_config['interval'] for info in log_config['hooks']: logger_hook = mmcv.build_from_cfg( info, HOOKS, default_args=dict(interval=log_interval)) self.register_hook(logger_hook, priority='VERY_LOW') def register_timer_hook(self, timer_config): if timer_config is None: return if isinstance(timer_config, dict): timer_config_ = copy.deepcopy(timer_config) hook = mmcv.build_from_cfg(timer_config_, HOOKS) else: hook = timer_config self.register_hook(hook, priority='LOW') def register_custom_hooks(self, custom_config): if custom_config is None: return if not isinstance(custom_config, list): custom_config = [custom_config] for item in custom_config: if isinstance(item, dict): self.register_hook_from_cfg(item) else: self.register_hook(item, priority='NORMAL') def register_profiler_hook(self, profiler_config): if profiler_config is None: return if isinstance(profiler_config, dict): profiler_config.setdefault('type', 'ProfilerHook') hook = mmcv.build_from_cfg(profiler_config, HOOKS) else: hook = profiler_config self.register_hook(hook) def register_training_hooks(self, lr_config, optimizer_config=None, checkpoint_config=None, log_config=None, momentum_config=None, timer_config=dict(type='IterTimerHook'), custom_hooks_config=None): """Register default and custom hooks for training. Default and custom hooks include: +----------------------+-------------------------+ | Hooks | Priority | +======================+=========================+ | LrUpdaterHook | VERY_HIGH (10) | +----------------------+-------------------------+ | MomentumUpdaterHook | HIGH (30) | +----------------------+-------------------------+ | OptimizerStepperHook | ABOVE_NORMAL (40) | +----------------------+-------------------------+ | CheckpointSaverHook | NORMAL (50) | +----------------------+-------------------------+ | IterTimerHook | LOW (70) | +----------------------+-------------------------+ | LoggerHook(s) | VERY_LOW (90) | +----------------------+-------------------------+ | CustomHook(s) | defaults to NORMAL (50) | +----------------------+-------------------------+ If custom hooks have same priority with default hooks, custom hooks will be triggered after default hooks. """ self.register_lr_hook(lr_config) self.register_momentum_hook(momentum_config) self.register_optimizer_hook(optimizer_config) self.register_checkpoint_hook(checkpoint_config) self.register_timer_hook(timer_config) self.register_logger_hooks(log_config) self.register_custom_hooks(custom_hooks_config)
trt-samples-for-hackathon-cn-master
Hackathon2023/controlnet/annotator/uniformer/mmcv/runner/base_runner.py
# Copyright (c) OpenMMLab. All rights reserved. import io import os import os.path as osp import pkgutil import re import time import warnings from collections import OrderedDict from importlib import import_module from tempfile import TemporaryDirectory import torch import torchvision from torch.optim import Optimizer from torch.utils import model_zoo import annotator.uniformer.mmcv as mmcv from ..fileio import FileClient from ..fileio import load as load_file from ..parallel import is_module_wrapper from ..utils import mkdir_or_exist from .dist_utils import get_dist_info ENV_MMCV_HOME = 'MMCV_HOME' ENV_XDG_CACHE_HOME = 'XDG_CACHE_HOME' DEFAULT_CACHE_DIR = '~/.cache' def _get_mmcv_home(): mmcv_home = os.path.expanduser( os.getenv( ENV_MMCV_HOME, os.path.join( os.getenv(ENV_XDG_CACHE_HOME, DEFAULT_CACHE_DIR), 'mmcv'))) mkdir_or_exist(mmcv_home) return mmcv_home def load_state_dict(module, state_dict, strict=False, logger=None): """Load state_dict to a module. This method is modified from :meth:`torch.nn.Module.load_state_dict`. Default value for ``strict`` is set to ``False`` and the message for param mismatch will be shown even if strict is False. Args: module (Module): Module that receives the state_dict. state_dict (OrderedDict): Weights. strict (bool): whether to strictly enforce that the keys in :attr:`state_dict` match the keys returned by this module's :meth:`~torch.nn.Module.state_dict` function. Default: ``False``. logger (:obj:`logging.Logger`, optional): Logger to log the error message. If not specified, print function will be used. """ unexpected_keys = [] all_missing_keys = [] err_msg = [] metadata = getattr(state_dict, '_metadata', None) state_dict = state_dict.copy() if metadata is not None: state_dict._metadata = metadata # use _load_from_state_dict to enable checkpoint version control def load(module, prefix=''): # recursively check parallel module in case that the model has a # complicated structure, e.g., nn.Module(nn.Module(DDP)) if is_module_wrapper(module): module = module.module local_metadata = {} if metadata is None else metadata.get( prefix[:-1], {}) module._load_from_state_dict(state_dict, prefix, local_metadata, True, all_missing_keys, unexpected_keys, err_msg) for name, child in module._modules.items(): if child is not None: load(child, prefix + name + '.') load(module) load = None # break load->load reference cycle # ignore "num_batches_tracked" of BN layers missing_keys = [ key for key in all_missing_keys if 'num_batches_tracked' not in key ] if unexpected_keys: err_msg.append('unexpected key in source ' f'state_dict: {", ".join(unexpected_keys)}\n') if missing_keys: err_msg.append( f'missing keys in source state_dict: {", ".join(missing_keys)}\n') rank, _ = get_dist_info() if len(err_msg) > 0 and rank == 0: err_msg.insert( 0, 'The model and loaded state dict do not match exactly\n') err_msg = '\n'.join(err_msg) if strict: raise RuntimeError(err_msg) elif logger is not None: logger.warning(err_msg) else: print(err_msg) def get_torchvision_models(): model_urls = dict() for _, name, ispkg in pkgutil.walk_packages(torchvision.models.__path__): if ispkg: continue _zoo = import_module(f'torchvision.models.{name}') if hasattr(_zoo, 'model_urls'): _urls = getattr(_zoo, 'model_urls') model_urls.update(_urls) return model_urls def get_external_models(): mmcv_home = _get_mmcv_home() default_json_path = osp.join(mmcv.__path__[0], 'model_zoo/open_mmlab.json') default_urls = load_file(default_json_path) assert isinstance(default_urls, dict) external_json_path = osp.join(mmcv_home, 'open_mmlab.json') if osp.exists(external_json_path): external_urls = load_file(external_json_path) assert isinstance(external_urls, dict) default_urls.update(external_urls) return default_urls def get_mmcls_models(): mmcls_json_path = osp.join(mmcv.__path__[0], 'model_zoo/mmcls.json') mmcls_urls = load_file(mmcls_json_path) return mmcls_urls def get_deprecated_model_names(): deprecate_json_path = osp.join(mmcv.__path__[0], 'model_zoo/deprecated.json') deprecate_urls = load_file(deprecate_json_path) assert isinstance(deprecate_urls, dict) return deprecate_urls def _process_mmcls_checkpoint(checkpoint): state_dict = checkpoint['state_dict'] new_state_dict = OrderedDict() for k, v in state_dict.items(): if k.startswith('backbone.'): new_state_dict[k[9:]] = v new_checkpoint = dict(state_dict=new_state_dict) return new_checkpoint class CheckpointLoader: """A general checkpoint loader to manage all schemes.""" _schemes = {} @classmethod def _register_scheme(cls, prefixes, loader, force=False): if isinstance(prefixes, str): prefixes = [prefixes] else: assert isinstance(prefixes, (list, tuple)) for prefix in prefixes: if (prefix not in cls._schemes) or force: cls._schemes[prefix] = loader else: raise KeyError( f'{prefix} is already registered as a loader backend, ' 'add "force=True" if you want to override it') # sort, longer prefixes take priority cls._schemes = OrderedDict( sorted(cls._schemes.items(), key=lambda t: t[0], reverse=True)) @classmethod def register_scheme(cls, prefixes, loader=None, force=False): """Register a loader to CheckpointLoader. This method can be used as a normal class method or a decorator. Args: prefixes (str or list[str] or tuple[str]): The prefix of the registered loader. loader (function, optional): The loader function to be registered. When this method is used as a decorator, loader is None. Defaults to None. force (bool, optional): Whether to override the loader if the prefix has already been registered. Defaults to False. """ if loader is not None: cls._register_scheme(prefixes, loader, force=force) return def _register(loader_cls): cls._register_scheme(prefixes, loader_cls, force=force) return loader_cls return _register @classmethod def _get_checkpoint_loader(cls, path): """Finds a loader that supports the given path. Falls back to the local loader if no other loader is found. Args: path (str): checkpoint path Returns: loader (function): checkpoint loader """ for p in cls._schemes: if path.startswith(p): return cls._schemes[p] @classmethod def load_checkpoint(cls, filename, map_location=None, logger=None): """load checkpoint through URL scheme path. Args: filename (str): checkpoint file name with given prefix map_location (str, optional): Same as :func:`torch.load`. Default: None logger (:mod:`logging.Logger`, optional): The logger for message. Default: None Returns: dict or OrderedDict: The loaded checkpoint. """ checkpoint_loader = cls._get_checkpoint_loader(filename) class_name = checkpoint_loader.__name__ mmcv.print_log( f'load checkpoint from {class_name[10:]} path: {filename}', logger) return checkpoint_loader(filename, map_location) @CheckpointLoader.register_scheme(prefixes='') def load_from_local(filename, map_location): """load checkpoint by local file path. Args: filename (str): local checkpoint file path map_location (str, optional): Same as :func:`torch.load`. Returns: dict or OrderedDict: The loaded checkpoint. """ if not osp.isfile(filename): raise IOError(f'{filename} is not a checkpoint file') checkpoint = torch.load(filename, map_location=map_location) return checkpoint @CheckpointLoader.register_scheme(prefixes=('http://', 'https://')) def load_from_http(filename, map_location=None, model_dir=None): """load checkpoint through HTTP or HTTPS scheme path. In distributed setting, this function only download checkpoint at local rank 0. Args: filename (str): checkpoint file path with modelzoo or torchvision prefix map_location (str, optional): Same as :func:`torch.load`. model_dir (string, optional): directory in which to save the object, Default: None Returns: dict or OrderedDict: The loaded checkpoint. """ rank, world_size = get_dist_info() rank = int(os.environ.get('LOCAL_RANK', rank)) if rank == 0: checkpoint = model_zoo.load_url( filename, model_dir=model_dir, map_location=map_location) if world_size > 1: torch.distributed.barrier() if rank > 0: checkpoint = model_zoo.load_url( filename, model_dir=model_dir, map_location=map_location) return checkpoint @CheckpointLoader.register_scheme(prefixes='pavi://') def load_from_pavi(filename, map_location=None): """load checkpoint through the file path prefixed with pavi. In distributed setting, this function download ckpt at all ranks to different temporary directories. Args: filename (str): checkpoint file path with pavi prefix map_location (str, optional): Same as :func:`torch.load`. Default: None Returns: dict or OrderedDict: The loaded checkpoint. """ assert filename.startswith('pavi://'), \ f'Expected filename startswith `pavi://`, but get {filename}' model_path = filename[7:] try: from pavi import modelcloud except ImportError: raise ImportError( 'Please install pavi to load checkpoint from modelcloud.') model = modelcloud.get(model_path) with TemporaryDirectory() as tmp_dir: downloaded_file = osp.join(tmp_dir, model.name) model.download(downloaded_file) checkpoint = torch.load(downloaded_file, map_location=map_location) return checkpoint @CheckpointLoader.register_scheme(prefixes='s3://') def load_from_ceph(filename, map_location=None, backend='petrel'): """load checkpoint through the file path prefixed with s3. In distributed setting, this function download ckpt at all ranks to different temporary directories. Args: filename (str): checkpoint file path with s3 prefix map_location (str, optional): Same as :func:`torch.load`. backend (str, optional): The storage backend type. Options are 'ceph', 'petrel'. Default: 'petrel'. .. warning:: :class:`mmcv.fileio.file_client.CephBackend` will be deprecated, please use :class:`mmcv.fileio.file_client.PetrelBackend` instead. Returns: dict or OrderedDict: The loaded checkpoint. """ allowed_backends = ['ceph', 'petrel'] if backend not in allowed_backends: raise ValueError(f'Load from Backend {backend} is not supported.') if backend == 'ceph': warnings.warn( 'CephBackend will be deprecated, please use PetrelBackend instead') # CephClient and PetrelBackend have the same prefix 's3://' and the latter # will be chosen as default. If PetrelBackend can not be instantiated # successfully, the CephClient will be chosen. try: file_client = FileClient(backend=backend) except ImportError: allowed_backends.remove(backend) file_client = FileClient(backend=allowed_backends[0]) with io.BytesIO(file_client.get(filename)) as buffer: checkpoint = torch.load(buffer, map_location=map_location) return checkpoint @CheckpointLoader.register_scheme(prefixes=('modelzoo://', 'torchvision://')) def load_from_torchvision(filename, map_location=None): """load checkpoint through the file path prefixed with modelzoo or torchvision. Args: filename (str): checkpoint file path with modelzoo or torchvision prefix map_location (str, optional): Same as :func:`torch.load`. Returns: dict or OrderedDict: The loaded checkpoint. """ model_urls = get_torchvision_models() if filename.startswith('modelzoo://'): warnings.warn('The URL scheme of "modelzoo://" is deprecated, please ' 'use "torchvision://" instead') model_name = filename[11:] else: model_name = filename[14:] return load_from_http(model_urls[model_name], map_location=map_location) @CheckpointLoader.register_scheme(prefixes=('open-mmlab://', 'openmmlab://')) def load_from_openmmlab(filename, map_location=None): """load checkpoint through the file path prefixed with open-mmlab or openmmlab. Args: filename (str): checkpoint file path with open-mmlab or openmmlab prefix map_location (str, optional): Same as :func:`torch.load`. Default: None Returns: dict or OrderedDict: The loaded checkpoint. """ model_urls = get_external_models() prefix_str = 'open-mmlab://' if filename.startswith(prefix_str): model_name = filename[13:] else: model_name = filename[12:] prefix_str = 'openmmlab://' deprecated_urls = get_deprecated_model_names() if model_name in deprecated_urls: warnings.warn(f'{prefix_str}{model_name} is deprecated in favor ' f'of {prefix_str}{deprecated_urls[model_name]}') model_name = deprecated_urls[model_name] model_url = model_urls[model_name] # check if is url if model_url.startswith(('http://', 'https://')): checkpoint = load_from_http(model_url, map_location=map_location) else: filename = osp.join(_get_mmcv_home(), model_url) if not osp.isfile(filename): raise IOError(f'{filename} is not a checkpoint file') checkpoint = torch.load(filename, map_location=map_location) return checkpoint @CheckpointLoader.register_scheme(prefixes='mmcls://') def load_from_mmcls(filename, map_location=None): """load checkpoint through the file path prefixed with mmcls. Args: filename (str): checkpoint file path with mmcls prefix map_location (str, optional): Same as :func:`torch.load`. Returns: dict or OrderedDict: The loaded checkpoint. """ model_urls = get_mmcls_models() model_name = filename[8:] checkpoint = load_from_http( model_urls[model_name], map_location=map_location) checkpoint = _process_mmcls_checkpoint(checkpoint) return checkpoint def _load_checkpoint(filename, map_location=None, logger=None): """Load checkpoint from somewhere (modelzoo, file, url). Args: filename (str): Accept local filepath, URL, ``torchvision://xxx``, ``open-mmlab://xxx``. Please refer to ``docs/model_zoo.md`` for details. map_location (str, optional): Same as :func:`torch.load`. Default: None. logger (:mod:`logging.Logger`, optional): The logger for error message. Default: None Returns: dict or OrderedDict: The loaded checkpoint. It can be either an OrderedDict storing model weights or a dict containing other information, which depends on the checkpoint. """ return CheckpointLoader.load_checkpoint(filename, map_location, logger) def _load_checkpoint_with_prefix(prefix, filename, map_location=None): """Load partial pretrained model with specific prefix. Args: prefix (str): The prefix of sub-module. filename (str): Accept local filepath, URL, ``torchvision://xxx``, ``open-mmlab://xxx``. Please refer to ``docs/model_zoo.md`` for details. map_location (str | None): Same as :func:`torch.load`. Default: None. Returns: dict or OrderedDict: The loaded checkpoint. """ checkpoint = _load_checkpoint(filename, map_location=map_location) if 'state_dict' in checkpoint: state_dict = checkpoint['state_dict'] else: state_dict = checkpoint if not prefix.endswith('.'): prefix += '.' prefix_len = len(prefix) state_dict = { k[prefix_len:]: v for k, v in state_dict.items() if k.startswith(prefix) } assert state_dict, f'{prefix} is not in the pretrained model' return state_dict def load_checkpoint(model, filename, map_location=None, strict=False, logger=None, revise_keys=[(r'^module\.', '')]): """Load checkpoint from a file or URI. Args: model (Module): Module to load checkpoint. filename (str): Accept local filepath, URL, ``torchvision://xxx``, ``open-mmlab://xxx``. Please refer to ``docs/model_zoo.md`` for details. map_location (str): Same as :func:`torch.load`. strict (bool): Whether to allow different params for the model and checkpoint. logger (:mod:`logging.Logger` or None): The logger for error message. revise_keys (list): A list of customized keywords to modify the state_dict in checkpoint. Each item is a (pattern, replacement) pair of the regular expression operations. Default: strip the prefix 'module.' by [(r'^module\\.', '')]. Returns: dict or OrderedDict: The loaded checkpoint. """ checkpoint = _load_checkpoint(filename, map_location, logger) # OrderedDict is a subclass of dict if not isinstance(checkpoint, dict): raise RuntimeError( f'No state_dict found in checkpoint file {filename}') # get state_dict from checkpoint if 'state_dict' in checkpoint: state_dict = checkpoint['state_dict'] else: state_dict = checkpoint # strip prefix of state_dict metadata = getattr(state_dict, '_metadata', OrderedDict()) for p, r in revise_keys: state_dict = OrderedDict( {re.sub(p, r, k): v for k, v in state_dict.items()}) # Keep metadata in state_dict state_dict._metadata = metadata # load state_dict load_state_dict(model, state_dict, strict, logger) return checkpoint def weights_to_cpu(state_dict): """Copy a model state_dict to cpu. Args: state_dict (OrderedDict): Model weights on GPU. Returns: OrderedDict: Model weights on GPU. """ state_dict_cpu = OrderedDict() for key, val in state_dict.items(): state_dict_cpu[key] = val.cpu() # Keep metadata in state_dict state_dict_cpu._metadata = getattr(state_dict, '_metadata', OrderedDict()) return state_dict_cpu def _save_to_state_dict(module, destination, prefix, keep_vars): """Saves module state to `destination` dictionary. This method is modified from :meth:`torch.nn.Module._save_to_state_dict`. Args: module (nn.Module): The module to generate state_dict. destination (dict): A dict where state will be stored. prefix (str): The prefix for parameters and buffers used in this module. """ for name, param in module._parameters.items(): if param is not None: destination[prefix + name] = param if keep_vars else param.detach() for name, buf in module._buffers.items(): # remove check of _non_persistent_buffers_set to allow nn.BatchNorm2d if buf is not None: destination[prefix + name] = buf if keep_vars else buf.detach() def get_state_dict(module, destination=None, prefix='', keep_vars=False): """Returns a dictionary containing a whole state of the module. Both parameters and persistent buffers (e.g. running averages) are included. Keys are corresponding parameter and buffer names. This method is modified from :meth:`torch.nn.Module.state_dict` to recursively check parallel module in case that the model has a complicated structure, e.g., nn.Module(nn.Module(DDP)). Args: module (nn.Module): The module to generate state_dict. destination (OrderedDict): Returned dict for the state of the module. prefix (str): Prefix of the key. keep_vars (bool): Whether to keep the variable property of the parameters. Default: False. Returns: dict: A dictionary containing a whole state of the module. """ # recursively check parallel module in case that the model has a # complicated structure, e.g., nn.Module(nn.Module(DDP)) if is_module_wrapper(module): module = module.module # below is the same as torch.nn.Module.state_dict() if destination is None: destination = OrderedDict() destination._metadata = OrderedDict() destination._metadata[prefix[:-1]] = local_metadata = dict( version=module._version) _save_to_state_dict(module, destination, prefix, keep_vars) for name, child in module._modules.items(): if child is not None: get_state_dict( child, destination, prefix + name + '.', keep_vars=keep_vars) for hook in module._state_dict_hooks.values(): hook_result = hook(module, destination, prefix, local_metadata) if hook_result is not None: destination = hook_result return destination def save_checkpoint(model, filename, optimizer=None, meta=None, file_client_args=None): """Save checkpoint to file. The checkpoint will have 3 fields: ``meta``, ``state_dict`` and ``optimizer``. By default ``meta`` will contain version and time info. Args: model (Module): Module whose params are to be saved. filename (str): Checkpoint filename. optimizer (:obj:`Optimizer`, optional): Optimizer to be saved. meta (dict, optional): Metadata to be saved in checkpoint. file_client_args (dict, optional): Arguments to instantiate a FileClient. See :class:`mmcv.fileio.FileClient` for details. Default: None. `New in version 1.3.16.` """ if meta is None: meta = {} elif not isinstance(meta, dict): raise TypeError(f'meta must be a dict or None, but got {type(meta)}') meta.update(mmcv_version=mmcv.__version__, time=time.asctime()) if is_module_wrapper(model): model = model.module if hasattr(model, 'CLASSES') and model.CLASSES is not None: # save class name to the meta meta.update(CLASSES=model.CLASSES) checkpoint = { 'meta': meta, 'state_dict': weights_to_cpu(get_state_dict(model)) } # save optimizer state dict in the checkpoint if isinstance(optimizer, Optimizer): checkpoint['optimizer'] = optimizer.state_dict() elif isinstance(optimizer, dict): checkpoint['optimizer'] = {} for name, optim in optimizer.items(): checkpoint['optimizer'][name] = optim.state_dict() if filename.startswith('pavi://'): if file_client_args is not None: raise ValueError( 'file_client_args should be "None" if filename starts with' f'"pavi://", but got {file_client_args}') try: from pavi import modelcloud from pavi import exception except ImportError: raise ImportError( 'Please install pavi to load checkpoint from modelcloud.') model_path = filename[7:] root = modelcloud.Folder() model_dir, model_name = osp.split(model_path) try: model = modelcloud.get(model_dir) except exception.NodeNotFoundError: model = root.create_training_model(model_dir) with TemporaryDirectory() as tmp_dir: checkpoint_file = osp.join(tmp_dir, model_name) with open(checkpoint_file, 'wb') as f: torch.save(checkpoint, f) f.flush() model.create_file(checkpoint_file, name=model_name) else: file_client = FileClient.infer_client(file_client_args, filename) with io.BytesIO() as f: torch.save(checkpoint, f) file_client.put(f.getvalue(), filename)
trt-samples-for-hackathon-cn-master
Hackathon2023/controlnet/annotator/uniformer/mmcv/runner/checkpoint.py
# Copyright (c) OpenMMLab. All rights reserved. import os.path as osp import platform import shutil import time import warnings import torch import annotator.uniformer.mmcv as mmcv from .base_runner import BaseRunner from .builder import RUNNERS from .checkpoint import save_checkpoint from .utils import get_host_info @RUNNERS.register_module() class EpochBasedRunner(BaseRunner): """Epoch-based Runner. This runner train models epoch by epoch. """ def run_iter(self, data_batch, train_mode, **kwargs): if self.batch_processor is not None: outputs = self.batch_processor( self.model, data_batch, train_mode=train_mode, **kwargs) elif train_mode: outputs = self.model.train_step(data_batch, self.optimizer, **kwargs) else: outputs = self.model.val_step(data_batch, self.optimizer, **kwargs) if not isinstance(outputs, dict): raise TypeError('"batch_processor()" or "model.train_step()"' 'and "model.val_step()" must return a dict') if 'log_vars' in outputs: self.log_buffer.update(outputs['log_vars'], outputs['num_samples']) self.outputs = outputs def train(self, data_loader, **kwargs): self.model.train() self.mode = 'train' self.data_loader = data_loader self._max_iters = self._max_epochs * len(self.data_loader) self.call_hook('before_train_epoch') time.sleep(2) # Prevent possible deadlock during epoch transition for i, data_batch in enumerate(self.data_loader): self._inner_iter = i self.call_hook('before_train_iter') self.run_iter(data_batch, train_mode=True, **kwargs) self.call_hook('after_train_iter') self._iter += 1 self.call_hook('after_train_epoch') self._epoch += 1 @torch.no_grad() def val(self, data_loader, **kwargs): self.model.eval() self.mode = 'val' self.data_loader = data_loader self.call_hook('before_val_epoch') time.sleep(2) # Prevent possible deadlock during epoch transition for i, data_batch in enumerate(self.data_loader): self._inner_iter = i self.call_hook('before_val_iter') self.run_iter(data_batch, train_mode=False) self.call_hook('after_val_iter') self.call_hook('after_val_epoch') def run(self, data_loaders, workflow, max_epochs=None, **kwargs): """Start running. Args: data_loaders (list[:obj:`DataLoader`]): Dataloaders for training and validation. workflow (list[tuple]): A list of (phase, epochs) to specify the running order and epochs. E.g, [('train', 2), ('val', 1)] means running 2 epochs for training and 1 epoch for validation, iteratively. """ assert isinstance(data_loaders, list) assert mmcv.is_list_of(workflow, tuple) assert len(data_loaders) == len(workflow) if max_epochs is not None: warnings.warn( 'setting max_epochs in run is deprecated, ' 'please set max_epochs in runner_config', DeprecationWarning) self._max_epochs = max_epochs assert self._max_epochs is not None, ( 'max_epochs must be specified during instantiation') for i, flow in enumerate(workflow): mode, epochs = flow if mode == 'train': self._max_iters = self._max_epochs * len(data_loaders[i]) break work_dir = self.work_dir if self.work_dir is not None else 'NONE' self.logger.info('Start running, host: %s, work_dir: %s', get_host_info(), work_dir) self.logger.info('Hooks will be executed in the following order:\n%s', self.get_hook_info()) self.logger.info('workflow: %s, max: %d epochs', workflow, self._max_epochs) self.call_hook('before_run') while self.epoch < self._max_epochs: for i, flow in enumerate(workflow): mode, epochs = flow if isinstance(mode, str): # self.train() if not hasattr(self, mode): raise ValueError( f'runner has no method named "{mode}" to run an ' 'epoch') epoch_runner = getattr(self, mode) else: raise TypeError( 'mode in workflow must be a str, but got {}'.format( type(mode))) for _ in range(epochs): if mode == 'train' and self.epoch >= self._max_epochs: break epoch_runner(data_loaders[i], **kwargs) time.sleep(1) # wait for some hooks like loggers to finish self.call_hook('after_run') def save_checkpoint(self, out_dir, filename_tmpl='epoch_{}.pth', save_optimizer=True, meta=None, create_symlink=True): """Save the checkpoint. Args: out_dir (str): The directory that checkpoints are saved. filename_tmpl (str, optional): The checkpoint filename template, which contains a placeholder for the epoch number. Defaults to 'epoch_{}.pth'. save_optimizer (bool, optional): Whether to save the optimizer to the checkpoint. Defaults to True. meta (dict, optional): The meta information to be saved in the checkpoint. Defaults to None. create_symlink (bool, optional): Whether to create a symlink "latest.pth" to point to the latest checkpoint. Defaults to True. """ if meta is None: meta = {} elif not isinstance(meta, dict): raise TypeError( f'meta should be a dict or None, but got {type(meta)}') if self.meta is not None: meta.update(self.meta) # Note: meta.update(self.meta) should be done before # meta.update(epoch=self.epoch + 1, iter=self.iter) otherwise # there will be problems with resumed checkpoints. # More details in https://github.com/open-mmlab/mmcv/pull/1108 meta.update(epoch=self.epoch + 1, iter=self.iter) filename = filename_tmpl.format(self.epoch + 1) filepath = osp.join(out_dir, filename) optimizer = self.optimizer if save_optimizer else None save_checkpoint(self.model, filepath, optimizer=optimizer, meta=meta) # in some environments, `os.symlink` is not supported, you may need to # set `create_symlink` to False if create_symlink: dst_file = osp.join(out_dir, 'latest.pth') if platform.system() != 'Windows': mmcv.symlink(filename, dst_file) else: shutil.copy(filepath, dst_file) @RUNNERS.register_module() class Runner(EpochBasedRunner): """Deprecated name of EpochBasedRunner.""" def __init__(self, *args, **kwargs): warnings.warn( 'Runner was deprecated, please use EpochBasedRunner instead') super().__init__(*args, **kwargs)
trt-samples-for-hackathon-cn-master
Hackathon2023/controlnet/annotator/uniformer/mmcv/runner/epoch_based_runner.py
# Copyright (c) OpenMMLab. All rights reserved. from .base_module import BaseModule, ModuleList, Sequential from .base_runner import BaseRunner from .builder import RUNNERS, build_runner from .checkpoint import (CheckpointLoader, _load_checkpoint, _load_checkpoint_with_prefix, load_checkpoint, load_state_dict, save_checkpoint, weights_to_cpu) from .default_constructor import DefaultRunnerConstructor from .dist_utils import (allreduce_grads, allreduce_params, get_dist_info, init_dist, master_only) from .epoch_based_runner import EpochBasedRunner, Runner from .fp16_utils import LossScaler, auto_fp16, force_fp32, wrap_fp16_model from .hooks import (HOOKS, CheckpointHook, ClosureHook, DistEvalHook, DistSamplerSeedHook, DvcliveLoggerHook, EMAHook, EvalHook, Fp16OptimizerHook, GradientCumulativeFp16OptimizerHook, GradientCumulativeOptimizerHook, Hook, IterTimerHook, LoggerHook, LrUpdaterHook, MlflowLoggerHook, NeptuneLoggerHook, OptimizerHook, PaviLoggerHook, SyncBuffersHook, TensorboardLoggerHook, TextLoggerHook, WandbLoggerHook) from .iter_based_runner import IterBasedRunner, IterLoader from .log_buffer import LogBuffer from .optimizer import (OPTIMIZER_BUILDERS, OPTIMIZERS, DefaultOptimizerConstructor, build_optimizer, build_optimizer_constructor) from .priority import Priority, get_priority from .utils import get_host_info, get_time_str, obj_from_dict, set_random_seed __all__ = [ 'BaseRunner', 'Runner', 'EpochBasedRunner', 'IterBasedRunner', 'LogBuffer', 'HOOKS', 'Hook', 'CheckpointHook', 'ClosureHook', 'LrUpdaterHook', 'OptimizerHook', 'IterTimerHook', 'DistSamplerSeedHook', 'LoggerHook', 'PaviLoggerHook', 'TextLoggerHook', 'TensorboardLoggerHook', 'NeptuneLoggerHook', 'WandbLoggerHook', 'MlflowLoggerHook', 'DvcliveLoggerHook', '_load_checkpoint', 'load_state_dict', 'load_checkpoint', 'weights_to_cpu', 'save_checkpoint', 'Priority', 'get_priority', 'get_host_info', 'get_time_str', 'obj_from_dict', 'init_dist', 'get_dist_info', 'master_only', 'OPTIMIZER_BUILDERS', 'OPTIMIZERS', 'DefaultOptimizerConstructor', 'build_optimizer', 'build_optimizer_constructor', 'IterLoader', 'set_random_seed', 'auto_fp16', 'force_fp32', 'wrap_fp16_model', 'Fp16OptimizerHook', 'SyncBuffersHook', 'EMAHook', 'build_runner', 'RUNNERS', 'allreduce_grads', 'allreduce_params', 'LossScaler', 'CheckpointLoader', 'BaseModule', '_load_checkpoint_with_prefix', 'EvalHook', 'DistEvalHook', 'Sequential', 'ModuleList', 'GradientCumulativeOptimizerHook', 'GradientCumulativeFp16OptimizerHook', 'DefaultRunnerConstructor' ]
trt-samples-for-hackathon-cn-master
Hackathon2023/controlnet/annotator/uniformer/mmcv/runner/__init__.py
# Copyright (c) OpenMMLab. All rights reserved. import os.path as osp import platform import shutil import time import warnings import torch from torch.optim import Optimizer import annotator.uniformer.mmcv as mmcv from .base_runner import BaseRunner from .builder import RUNNERS from .checkpoint import save_checkpoint from .hooks import IterTimerHook from .utils import get_host_info class IterLoader: def __init__(self, dataloader): self._dataloader = dataloader self.iter_loader = iter(self._dataloader) self._epoch = 0 @property def epoch(self): return self._epoch def __next__(self): try: data = next(self.iter_loader) except StopIteration: self._epoch += 1 if hasattr(self._dataloader.sampler, 'set_epoch'): self._dataloader.sampler.set_epoch(self._epoch) time.sleep(2) # Prevent possible deadlock during epoch transition self.iter_loader = iter(self._dataloader) data = next(self.iter_loader) return data def __len__(self): return len(self._dataloader) @RUNNERS.register_module() class IterBasedRunner(BaseRunner): """Iteration-based Runner. This runner train models iteration by iteration. """ def train(self, data_loader, **kwargs): self.model.train() self.mode = 'train' self.data_loader = data_loader self._epoch = data_loader.epoch data_batch = next(data_loader) self.call_hook('before_train_iter') outputs = self.model.train_step(data_batch, self.optimizer, **kwargs) if not isinstance(outputs, dict): raise TypeError('model.train_step() must return a dict') if 'log_vars' in outputs: self.log_buffer.update(outputs['log_vars'], outputs['num_samples']) self.outputs = outputs self.call_hook('after_train_iter') self._inner_iter += 1 self._iter += 1 @torch.no_grad() def val(self, data_loader, **kwargs): self.model.eval() self.mode = 'val' self.data_loader = data_loader data_batch = next(data_loader) self.call_hook('before_val_iter') outputs = self.model.val_step(data_batch, **kwargs) if not isinstance(outputs, dict): raise TypeError('model.val_step() must return a dict') if 'log_vars' in outputs: self.log_buffer.update(outputs['log_vars'], outputs['num_samples']) self.outputs = outputs self.call_hook('after_val_iter') self._inner_iter += 1 def run(self, data_loaders, workflow, max_iters=None, **kwargs): """Start running. Args: data_loaders (list[:obj:`DataLoader`]): Dataloaders for training and validation. workflow (list[tuple]): A list of (phase, iters) to specify the running order and iterations. E.g, [('train', 10000), ('val', 1000)] means running 10000 iterations for training and 1000 iterations for validation, iteratively. """ assert isinstance(data_loaders, list) assert mmcv.is_list_of(workflow, tuple) assert len(data_loaders) == len(workflow) if max_iters is not None: warnings.warn( 'setting max_iters in run is deprecated, ' 'please set max_iters in runner_config', DeprecationWarning) self._max_iters = max_iters assert self._max_iters is not None, ( 'max_iters must be specified during instantiation') work_dir = self.work_dir if self.work_dir is not None else 'NONE' self.logger.info('Start running, host: %s, work_dir: %s', get_host_info(), work_dir) self.logger.info('Hooks will be executed in the following order:\n%s', self.get_hook_info()) self.logger.info('workflow: %s, max: %d iters', workflow, self._max_iters) self.call_hook('before_run') iter_loaders = [IterLoader(x) for x in data_loaders] self.call_hook('before_epoch') while self.iter < self._max_iters: for i, flow in enumerate(workflow): self._inner_iter = 0 mode, iters = flow if not isinstance(mode, str) or not hasattr(self, mode): raise ValueError( 'runner has no method named "{}" to run a workflow'. format(mode)) iter_runner = getattr(self, mode) for _ in range(iters): if mode == 'train' and self.iter >= self._max_iters: break iter_runner(iter_loaders[i], **kwargs) time.sleep(1) # wait for some hooks like loggers to finish self.call_hook('after_epoch') self.call_hook('after_run') def resume(self, checkpoint, resume_optimizer=True, map_location='default'): """Resume model from checkpoint. Args: checkpoint (str): Checkpoint to resume from. resume_optimizer (bool, optional): Whether resume the optimizer(s) if the checkpoint file includes optimizer(s). Default to True. map_location (str, optional): Same as :func:`torch.load`. Default to 'default'. """ if map_location == 'default': device_id = torch.cuda.current_device() checkpoint = self.load_checkpoint( checkpoint, map_location=lambda storage, loc: storage.cuda(device_id)) else: checkpoint = self.load_checkpoint( checkpoint, map_location=map_location) self._epoch = checkpoint['meta']['epoch'] self._iter = checkpoint['meta']['iter'] self._inner_iter = checkpoint['meta']['iter'] if 'optimizer' in checkpoint and resume_optimizer: if isinstance(self.optimizer, Optimizer): self.optimizer.load_state_dict(checkpoint['optimizer']) elif isinstance(self.optimizer, dict): for k in self.optimizer.keys(): self.optimizer[k].load_state_dict( checkpoint['optimizer'][k]) else: raise TypeError( 'Optimizer should be dict or torch.optim.Optimizer ' f'but got {type(self.optimizer)}') self.logger.info(f'resumed from epoch: {self.epoch}, iter {self.iter}') def save_checkpoint(self, out_dir, filename_tmpl='iter_{}.pth', meta=None, save_optimizer=True, create_symlink=True): """Save checkpoint to file. Args: out_dir (str): Directory to save checkpoint files. filename_tmpl (str, optional): Checkpoint file template. Defaults to 'iter_{}.pth'. meta (dict, optional): Metadata to be saved in checkpoint. Defaults to None. save_optimizer (bool, optional): Whether save optimizer. Defaults to True. create_symlink (bool, optional): Whether create symlink to the latest checkpoint file. Defaults to True. """ if meta is None: meta = {} elif not isinstance(meta, dict): raise TypeError( f'meta should be a dict or None, but got {type(meta)}') if self.meta is not None: meta.update(self.meta) # Note: meta.update(self.meta) should be done before # meta.update(epoch=self.epoch + 1, iter=self.iter) otherwise # there will be problems with resumed checkpoints. # More details in https://github.com/open-mmlab/mmcv/pull/1108 meta.update(epoch=self.epoch + 1, iter=self.iter) filename = filename_tmpl.format(self.iter + 1) filepath = osp.join(out_dir, filename) optimizer = self.optimizer if save_optimizer else None save_checkpoint(self.model, filepath, optimizer=optimizer, meta=meta) # in some environments, `os.symlink` is not supported, you may need to # set `create_symlink` to False if create_symlink: dst_file = osp.join(out_dir, 'latest.pth') if platform.system() != 'Windows': mmcv.symlink(filename, dst_file) else: shutil.copy(filepath, dst_file) def register_training_hooks(self, lr_config, optimizer_config=None, checkpoint_config=None, log_config=None, momentum_config=None, custom_hooks_config=None): """Register default hooks for iter-based training. Checkpoint hook, optimizer stepper hook and logger hooks will be set to `by_epoch=False` by default. Default hooks include: +----------------------+-------------------------+ | Hooks | Priority | +======================+=========================+ | LrUpdaterHook | VERY_HIGH (10) | +----------------------+-------------------------+ | MomentumUpdaterHook | HIGH (30) | +----------------------+-------------------------+ | OptimizerStepperHook | ABOVE_NORMAL (40) | +----------------------+-------------------------+ | CheckpointSaverHook | NORMAL (50) | +----------------------+-------------------------+ | IterTimerHook | LOW (70) | +----------------------+-------------------------+ | LoggerHook(s) | VERY_LOW (90) | +----------------------+-------------------------+ | CustomHook(s) | defaults to NORMAL (50) | +----------------------+-------------------------+ If custom hooks have same priority with default hooks, custom hooks will be triggered after default hooks. """ if checkpoint_config is not None: checkpoint_config.setdefault('by_epoch', False) if lr_config is not None: lr_config.setdefault('by_epoch', False) if log_config is not None: for info in log_config['hooks']: info.setdefault('by_epoch', False) super(IterBasedRunner, self).register_training_hooks( lr_config=lr_config, momentum_config=momentum_config, optimizer_config=optimizer_config, checkpoint_config=checkpoint_config, log_config=log_config, timer_config=IterTimerHook(), custom_hooks_config=custom_hooks_config)
trt-samples-for-hackathon-cn-master
Hackathon2023/controlnet/annotator/uniformer/mmcv/runner/iter_based_runner.py
# Copyright (c) OpenMMLab. All rights reserved. from enum import Enum class Priority(Enum): """Hook priority levels. +--------------+------------+ | Level | Value | +==============+============+ | HIGHEST | 0 | +--------------+------------+ | VERY_HIGH | 10 | +--------------+------------+ | HIGH | 30 | +--------------+------------+ | ABOVE_NORMAL | 40 | +--------------+------------+ | NORMAL | 50 | +--------------+------------+ | BELOW_NORMAL | 60 | +--------------+------------+ | LOW | 70 | +--------------+------------+ | VERY_LOW | 90 | +--------------+------------+ | LOWEST | 100 | +--------------+------------+ """ HIGHEST = 0 VERY_HIGH = 10 HIGH = 30 ABOVE_NORMAL = 40 NORMAL = 50 BELOW_NORMAL = 60 LOW = 70 VERY_LOW = 90 LOWEST = 100 def get_priority(priority): """Get priority value. Args: priority (int or str or :obj:`Priority`): Priority. Returns: int: The priority value. """ if isinstance(priority, int): if priority < 0 or priority > 100: raise ValueError('priority must be between 0 and 100') return priority elif isinstance(priority, Priority): return priority.value elif isinstance(priority, str): return Priority[priority.upper()].value else: raise TypeError('priority must be an integer or Priority enum value')
trt-samples-for-hackathon-cn-master
Hackathon2023/controlnet/annotator/uniformer/mmcv/runner/priority.py
# Copyright (c) OpenMMLab. All rights reserved. import copy from ..utils import Registry RUNNERS = Registry('runner') RUNNER_BUILDERS = Registry('runner builder') def build_runner_constructor(cfg): return RUNNER_BUILDERS.build(cfg) def build_runner(cfg, default_args=None): runner_cfg = copy.deepcopy(cfg) constructor_type = runner_cfg.pop('constructor', 'DefaultRunnerConstructor') runner_constructor = build_runner_constructor( dict( type=constructor_type, runner_cfg=runner_cfg, default_args=default_args)) runner = runner_constructor() return runner
trt-samples-for-hackathon-cn-master
Hackathon2023/controlnet/annotator/uniformer/mmcv/runner/builder.py
# Copyright (c) OpenMMLab. All rights reserved. import os import random import sys import time import warnings from getpass import getuser from socket import gethostname import numpy as np import torch import annotator.uniformer.mmcv as mmcv def get_host_info(): """Get hostname and username. Return empty string if exception raised, e.g. ``getpass.getuser()`` will lead to error in docker container """ host = '' try: host = f'{getuser()}@{gethostname()}' except Exception as e: warnings.warn(f'Host or user not found: {str(e)}') finally: return host def get_time_str(): return time.strftime('%Y%m%d_%H%M%S', time.localtime()) def obj_from_dict(info, parent=None, default_args=None): """Initialize an object from dict. The dict must contain the key "type", which indicates the object type, it can be either a string or type, such as "list" or ``list``. Remaining fields are treated as the arguments for constructing the object. Args: info (dict): Object types and arguments. parent (:class:`module`): Module which may containing expected object classes. default_args (dict, optional): Default arguments for initializing the object. Returns: any type: Object built from the dict. """ assert isinstance(info, dict) and 'type' in info assert isinstance(default_args, dict) or default_args is None args = info.copy() obj_type = args.pop('type') if mmcv.is_str(obj_type): if parent is not None: obj_type = getattr(parent, obj_type) else: obj_type = sys.modules[obj_type] elif not isinstance(obj_type, type): raise TypeError('type must be a str or valid type, but ' f'got {type(obj_type)}') if default_args is not None: for name, value in default_args.items(): args.setdefault(name, value) return obj_type(**args) def set_random_seed(seed, deterministic=False, use_rank_shift=False): """Set random seed. Args: seed (int): Seed to be used. deterministic (bool): Whether to set the deterministic option for CUDNN backend, i.e., set `torch.backends.cudnn.deterministic` to True and `torch.backends.cudnn.benchmark` to False. Default: False. rank_shift (bool): Whether to add rank number to the random seed to have different random seed in different threads. Default: False. """ if use_rank_shift: rank, _ = mmcv.runner.get_dist_info() seed += rank random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed(seed) torch.cuda.manual_seed_all(seed) os.environ['PYTHONHASHSEED'] = str(seed) if deterministic: torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False
trt-samples-for-hackathon-cn-master
Hackathon2023/controlnet/annotator/uniformer/mmcv/runner/utils.py
# Copyright (c) OpenMMLab. All rights reserved. import copy import warnings from abc import ABCMeta from collections import defaultdict from logging import FileHandler import torch.nn as nn from annotator.uniformer.mmcv.runner.dist_utils import master_only from annotator.uniformer.mmcv.utils.logging import get_logger, logger_initialized, print_log class BaseModule(nn.Module, metaclass=ABCMeta): """Base module for all modules in openmmlab. ``BaseModule`` is a wrapper of ``torch.nn.Module`` with additional functionality of parameter initialization. Compared with ``torch.nn.Module``, ``BaseModule`` mainly adds three attributes. - ``init_cfg``: the config to control the initialization. - ``init_weights``: The function of parameter initialization and recording initialization information. - ``_params_init_info``: Used to track the parameter initialization information. This attribute only exists during executing the ``init_weights``. Args: init_cfg (dict, optional): Initialization config dict. """ def __init__(self, init_cfg=None): """Initialize BaseModule, inherited from `torch.nn.Module`""" # NOTE init_cfg can be defined in different levels, but init_cfg # in low levels has a higher priority. super(BaseModule, self).__init__() # define default value of init_cfg instead of hard code # in init_weights() function self._is_init = False self.init_cfg = copy.deepcopy(init_cfg) # Backward compatibility in derived classes # if pretrained is not None: # warnings.warn('DeprecationWarning: pretrained is a deprecated \ # key, please consider using init_cfg') # self.init_cfg = dict(type='Pretrained', checkpoint=pretrained) @property def is_init(self): return self._is_init def init_weights(self): """Initialize the weights.""" is_top_level_module = False # check if it is top-level module if not hasattr(self, '_params_init_info'): # The `_params_init_info` is used to record the initialization # information of the parameters # the key should be the obj:`nn.Parameter` of model and the value # should be a dict containing # - init_info (str): The string that describes the initialization. # - tmp_mean_value (FloatTensor): The mean of the parameter, # which indicates whether the parameter has been modified. # this attribute would be deleted after all parameters # is initialized. self._params_init_info = defaultdict(dict) is_top_level_module = True # Initialize the `_params_init_info`, # When detecting the `tmp_mean_value` of # the corresponding parameter is changed, update related # initialization information for name, param in self.named_parameters(): self._params_init_info[param][ 'init_info'] = f'The value is the same before and ' \ f'after calling `init_weights` ' \ f'of {self.__class__.__name__} ' self._params_init_info[param][ 'tmp_mean_value'] = param.data.mean() # pass `params_init_info` to all submodules # All submodules share the same `params_init_info`, # so it will be updated when parameters are # modified at any level of the model. for sub_module in self.modules(): sub_module._params_init_info = self._params_init_info # Get the initialized logger, if not exist, # create a logger named `mmcv` logger_names = list(logger_initialized.keys()) logger_name = logger_names[0] if logger_names else 'mmcv' from ..cnn import initialize from ..cnn.utils.weight_init import update_init_info module_name = self.__class__.__name__ if not self._is_init: if self.init_cfg: print_log( f'initialize {module_name} with init_cfg {self.init_cfg}', logger=logger_name) initialize(self, self.init_cfg) if isinstance(self.init_cfg, dict): # prevent the parameters of # the pre-trained model # from being overwritten by # the `init_weights` if self.init_cfg['type'] == 'Pretrained': return for m in self.children(): if hasattr(m, 'init_weights'): m.init_weights() # users may overload the `init_weights` update_init_info( m, init_info=f'Initialized by ' f'user-defined `init_weights`' f' in {m.__class__.__name__} ') self._is_init = True else: warnings.warn(f'init_weights of {self.__class__.__name__} has ' f'been called more than once.') if is_top_level_module: self._dump_init_info(logger_name) for sub_module in self.modules(): del sub_module._params_init_info @master_only def _dump_init_info(self, logger_name): """Dump the initialization information to a file named `initialization.log.json` in workdir. Args: logger_name (str): The name of logger. """ logger = get_logger(logger_name) with_file_handler = False # dump the information to the logger file if there is a `FileHandler` for handler in logger.handlers: if isinstance(handler, FileHandler): handler.stream.write( 'Name of parameter - Initialization information\n') for name, param in self.named_parameters(): handler.stream.write( f'\n{name} - {param.shape}: ' f"\n{self._params_init_info[param]['init_info']} \n") handler.stream.flush() with_file_handler = True if not with_file_handler: for name, param in self.named_parameters(): print_log( f'\n{name} - {param.shape}: ' f"\n{self._params_init_info[param]['init_info']} \n ", logger=logger_name) def __repr__(self): s = super().__repr__() if self.init_cfg: s += f'\ninit_cfg={self.init_cfg}' return s class Sequential(BaseModule, nn.Sequential): """Sequential module in openmmlab. Args: init_cfg (dict, optional): Initialization config dict. """ def __init__(self, *args, init_cfg=None): BaseModule.__init__(self, init_cfg) nn.Sequential.__init__(self, *args) class ModuleList(BaseModule, nn.ModuleList): """ModuleList in openmmlab. Args: modules (iterable, optional): an iterable of modules to add. init_cfg (dict, optional): Initialization config dict. """ def __init__(self, modules=None, init_cfg=None): BaseModule.__init__(self, init_cfg) nn.ModuleList.__init__(self, modules)
trt-samples-for-hackathon-cn-master
Hackathon2023/controlnet/annotator/uniformer/mmcv/runner/base_module.py
# Copyright (c) OpenMMLab. All rights reserved. from collections import OrderedDict import numpy as np class LogBuffer: def __init__(self): self.val_history = OrderedDict() self.n_history = OrderedDict() self.output = OrderedDict() self.ready = False def clear(self): self.val_history.clear() self.n_history.clear() self.clear_output() def clear_output(self): self.output.clear() self.ready = False def update(self, vars, count=1): assert isinstance(vars, dict) for key, var in vars.items(): if key not in self.val_history: self.val_history[key] = [] self.n_history[key] = [] self.val_history[key].append(var) self.n_history[key].append(count) def average(self, n=0): """Average latest n values or all values.""" assert n >= 0 for key in self.val_history: values = np.array(self.val_history[key][-n:]) nums = np.array(self.n_history[key][-n:]) avg = np.sum(values * nums) / np.sum(nums) self.output[key] = avg self.ready = True
trt-samples-for-hackathon-cn-master
Hackathon2023/controlnet/annotator/uniformer/mmcv/runner/log_buffer.py
# Copyright (c) OpenMMLab. All rights reserved. import functools import warnings from collections import abc from inspect import getfullargspec import numpy as np import torch import torch.nn as nn from annotator.uniformer.mmcv.utils import TORCH_VERSION, digit_version from .dist_utils import allreduce_grads as _allreduce_grads try: # If PyTorch version >= 1.6.0, torch.cuda.amp.autocast would be imported # and used; otherwise, auto fp16 will adopt mmcv's implementation. # Note that when PyTorch >= 1.6.0, we still cast tensor types to fp16 # manually, so the behavior may not be consistent with real amp. from torch.cuda.amp import autocast except ImportError: pass def cast_tensor_type(inputs, src_type, dst_type): """Recursively convert Tensor in inputs from src_type to dst_type. Args: inputs: Inputs that to be casted. src_type (torch.dtype): Source type.. dst_type (torch.dtype): Destination type. Returns: The same type with inputs, but all contained Tensors have been cast. """ if isinstance(inputs, nn.Module): return inputs elif isinstance(inputs, torch.Tensor): return inputs.to(dst_type) elif isinstance(inputs, str): return inputs elif isinstance(inputs, np.ndarray): return inputs elif isinstance(inputs, abc.Mapping): return type(inputs)({ k: cast_tensor_type(v, src_type, dst_type) for k, v in inputs.items() }) elif isinstance(inputs, abc.Iterable): return type(inputs)( cast_tensor_type(item, src_type, dst_type) for item in inputs) else: return inputs def auto_fp16(apply_to=None, out_fp32=False): """Decorator to enable fp16 training automatically. This decorator is useful when you write custom modules and want to support mixed precision training. If inputs arguments are fp32 tensors, they will be converted to fp16 automatically. Arguments other than fp32 tensors are ignored. If you are using PyTorch >= 1.6, torch.cuda.amp is used as the backend, otherwise, original mmcv implementation will be adopted. Args: apply_to (Iterable, optional): The argument names to be converted. `None` indicates all arguments. out_fp32 (bool): Whether to convert the output back to fp32. Example: >>> import torch.nn as nn >>> class MyModule1(nn.Module): >>> >>> # Convert x and y to fp16 >>> @auto_fp16() >>> def forward(self, x, y): >>> pass >>> import torch.nn as nn >>> class MyModule2(nn.Module): >>> >>> # convert pred to fp16 >>> @auto_fp16(apply_to=('pred', )) >>> def do_something(self, pred, others): >>> pass """ def auto_fp16_wrapper(old_func): @functools.wraps(old_func) def new_func(*args, **kwargs): # check if the module has set the attribute `fp16_enabled`, if not, # just fallback to the original method. if not isinstance(args[0], torch.nn.Module): raise TypeError('@auto_fp16 can only be used to decorate the ' 'method of nn.Module') if not (hasattr(args[0], 'fp16_enabled') and args[0].fp16_enabled): return old_func(*args, **kwargs) # get the arg spec of the decorated method args_info = getfullargspec(old_func) # get the argument names to be casted args_to_cast = args_info.args if apply_to is None else apply_to # convert the args that need to be processed new_args = [] # NOTE: default args are not taken into consideration if args: arg_names = args_info.args[:len(args)] for i, arg_name in enumerate(arg_names): if arg_name in args_to_cast: new_args.append( cast_tensor_type(args[i], torch.float, torch.half)) else: new_args.append(args[i]) # convert the kwargs that need to be processed new_kwargs = {} if kwargs: for arg_name, arg_value in kwargs.items(): if arg_name in args_to_cast: new_kwargs[arg_name] = cast_tensor_type( arg_value, torch.float, torch.half) else: new_kwargs[arg_name] = arg_value # apply converted arguments to the decorated method if (TORCH_VERSION != 'parrots' and digit_version(TORCH_VERSION) >= digit_version('1.6.0')): with autocast(enabled=True): output = old_func(*new_args, **new_kwargs) else: output = old_func(*new_args, **new_kwargs) # cast the results back to fp32 if necessary if out_fp32: output = cast_tensor_type(output, torch.half, torch.float) return output return new_func return auto_fp16_wrapper def force_fp32(apply_to=None, out_fp16=False): """Decorator to convert input arguments to fp32 in force. This decorator is useful when you write custom modules and want to support mixed precision training. If there are some inputs that must be processed in fp32 mode, then this decorator can handle it. If inputs arguments are fp16 tensors, they will be converted to fp32 automatically. Arguments other than fp16 tensors are ignored. If you are using PyTorch >= 1.6, torch.cuda.amp is used as the backend, otherwise, original mmcv implementation will be adopted. Args: apply_to (Iterable, optional): The argument names to be converted. `None` indicates all arguments. out_fp16 (bool): Whether to convert the output back to fp16. Example: >>> import torch.nn as nn >>> class MyModule1(nn.Module): >>> >>> # Convert x and y to fp32 >>> @force_fp32() >>> def loss(self, x, y): >>> pass >>> import torch.nn as nn >>> class MyModule2(nn.Module): >>> >>> # convert pred to fp32 >>> @force_fp32(apply_to=('pred', )) >>> def post_process(self, pred, others): >>> pass """ def force_fp32_wrapper(old_func): @functools.wraps(old_func) def new_func(*args, **kwargs): # check if the module has set the attribute `fp16_enabled`, if not, # just fallback to the original method. if not isinstance(args[0], torch.nn.Module): raise TypeError('@force_fp32 can only be used to decorate the ' 'method of nn.Module') if not (hasattr(args[0], 'fp16_enabled') and args[0].fp16_enabled): return old_func(*args, **kwargs) # get the arg spec of the decorated method args_info = getfullargspec(old_func) # get the argument names to be casted args_to_cast = args_info.args if apply_to is None else apply_to # convert the args that need to be processed new_args = [] if args: arg_names = args_info.args[:len(args)] for i, arg_name in enumerate(arg_names): if arg_name in args_to_cast: new_args.append( cast_tensor_type(args[i], torch.half, torch.float)) else: new_args.append(args[i]) # convert the kwargs that need to be processed new_kwargs = dict() if kwargs: for arg_name, arg_value in kwargs.items(): if arg_name in args_to_cast: new_kwargs[arg_name] = cast_tensor_type( arg_value, torch.half, torch.float) else: new_kwargs[arg_name] = arg_value # apply converted arguments to the decorated method if (TORCH_VERSION != 'parrots' and digit_version(TORCH_VERSION) >= digit_version('1.6.0')): with autocast(enabled=False): output = old_func(*new_args, **new_kwargs) else: output = old_func(*new_args, **new_kwargs) # cast the results back to fp32 if necessary if out_fp16: output = cast_tensor_type(output, torch.float, torch.half) return output return new_func return force_fp32_wrapper def allreduce_grads(params, coalesce=True, bucket_size_mb=-1): warnings.warning( '"mmcv.runner.fp16_utils.allreduce_grads" is deprecated, and will be ' 'removed in v2.8. Please switch to "mmcv.runner.allreduce_grads') _allreduce_grads(params, coalesce=coalesce, bucket_size_mb=bucket_size_mb) def wrap_fp16_model(model): """Wrap the FP32 model to FP16. If you are using PyTorch >= 1.6, torch.cuda.amp is used as the backend, otherwise, original mmcv implementation will be adopted. For PyTorch >= 1.6, this function will 1. Set fp16 flag inside the model to True. Otherwise: 1. Convert FP32 model to FP16. 2. Remain some necessary layers to be FP32, e.g., normalization layers. 3. Set `fp16_enabled` flag inside the model to True. Args: model (nn.Module): Model in FP32. """ if (TORCH_VERSION == 'parrots' or digit_version(TORCH_VERSION) < digit_version('1.6.0')): # convert model to fp16 model.half() # patch the normalization layers to make it work in fp32 mode patch_norm_fp32(model) # set `fp16_enabled` flag for m in model.modules(): if hasattr(m, 'fp16_enabled'): m.fp16_enabled = True def patch_norm_fp32(module): """Recursively convert normalization layers from FP16 to FP32. Args: module (nn.Module): The modules to be converted in FP16. Returns: nn.Module: The converted module, the normalization layers have been converted to FP32. """ if isinstance(module, (nn.modules.batchnorm._BatchNorm, nn.GroupNorm)): module.float() if isinstance(module, nn.GroupNorm) or torch.__version__ < '1.3': module.forward = patch_forward_method(module.forward, torch.half, torch.float) for child in module.children(): patch_norm_fp32(child) return module def patch_forward_method(func, src_type, dst_type, convert_output=True): """Patch the forward method of a module. Args: func (callable): The original forward method. src_type (torch.dtype): Type of input arguments to be converted from. dst_type (torch.dtype): Type of input arguments to be converted to. convert_output (bool): Whether to convert the output back to src_type. Returns: callable: The patched forward method. """ def new_forward(*args, **kwargs): output = func(*cast_tensor_type(args, src_type, dst_type), **cast_tensor_type(kwargs, src_type, dst_type)) if convert_output: output = cast_tensor_type(output, dst_type, src_type) return output return new_forward class LossScaler: """Class that manages loss scaling in mixed precision training which supports both dynamic or static mode. The implementation refers to https://github.com/NVIDIA/apex/blob/master/apex/fp16_utils/loss_scaler.py. Indirectly, by supplying ``mode='dynamic'`` for dynamic loss scaling. It's important to understand how :class:`LossScaler` operates. Loss scaling is designed to combat the problem of underflowing gradients encountered at long times when training fp16 networks. Dynamic loss scaling begins by attempting a very high loss scale. Ironically, this may result in OVERflowing gradients. If overflowing gradients are encountered, :class:`FP16_Optimizer` then skips the update step for this particular iteration/minibatch, and :class:`LossScaler` adjusts the loss scale to a lower value. If a certain number of iterations occur without overflowing gradients detected,:class:`LossScaler` increases the loss scale once more. In this way :class:`LossScaler` attempts to "ride the edge" of always using the highest loss scale possible without incurring overflow. Args: init_scale (float): Initial loss scale value, default: 2**32. scale_factor (float): Factor used when adjusting the loss scale. Default: 2. mode (str): Loss scaling mode. 'dynamic' or 'static' scale_window (int): Number of consecutive iterations without an overflow to wait before increasing the loss scale. Default: 1000. """ def __init__(self, init_scale=2**32, mode='dynamic', scale_factor=2., scale_window=1000): self.cur_scale = init_scale self.cur_iter = 0 assert mode in ('dynamic', 'static'), 'mode can only be dynamic or static' self.mode = mode self.last_overflow_iter = -1 self.scale_factor = scale_factor self.scale_window = scale_window def has_overflow(self, params): """Check if params contain overflow.""" if self.mode != 'dynamic': return False for p in params: if p.grad is not None and LossScaler._has_inf_or_nan(p.grad.data): return True return False def _has_inf_or_nan(x): """Check if params contain NaN.""" try: cpu_sum = float(x.float().sum()) except RuntimeError as instance: if 'value cannot be converted' not in instance.args[0]: raise return True else: if cpu_sum == float('inf') or cpu_sum == -float('inf') \ or cpu_sum != cpu_sum: return True return False def update_scale(self, overflow): """update the current loss scale value when overflow happens.""" if self.mode != 'dynamic': return if overflow: self.cur_scale = max(self.cur_scale / self.scale_factor, 1) self.last_overflow_iter = self.cur_iter else: if (self.cur_iter - self.last_overflow_iter) % \ self.scale_window == 0: self.cur_scale *= self.scale_factor self.cur_iter += 1 def state_dict(self): """Returns the state of the scaler as a :class:`dict`.""" return dict( cur_scale=self.cur_scale, cur_iter=self.cur_iter, mode=self.mode, last_overflow_iter=self.last_overflow_iter, scale_factor=self.scale_factor, scale_window=self.scale_window) def load_state_dict(self, state_dict): """Loads the loss_scaler state dict. Args: state_dict (dict): scaler state. """ self.cur_scale = state_dict['cur_scale'] self.cur_iter = state_dict['cur_iter'] self.mode = state_dict['mode'] self.last_overflow_iter = state_dict['last_overflow_iter'] self.scale_factor = state_dict['scale_factor'] self.scale_window = state_dict['scale_window'] @property def loss_scale(self): return self.cur_scale
trt-samples-for-hackathon-cn-master
Hackathon2023/controlnet/annotator/uniformer/mmcv/runner/fp16_utils.py
# Copyright (c) OpenMMLab. All rights reserved. import functools import os import subprocess from collections import OrderedDict import torch import torch.multiprocessing as mp from torch import distributed as dist from torch._utils import (_flatten_dense_tensors, _take_tensors, _unflatten_dense_tensors) def init_dist(launcher, backend='nccl', **kwargs): if mp.get_start_method(allow_none=True) is None: mp.set_start_method('spawn') if launcher == 'pytorch': _init_dist_pytorch(backend, **kwargs) elif launcher == 'mpi': _init_dist_mpi(backend, **kwargs) elif launcher == 'slurm': _init_dist_slurm(backend, **kwargs) else: raise ValueError(f'Invalid launcher type: {launcher}') def _init_dist_pytorch(backend, **kwargs): # TODO: use local_rank instead of rank % num_gpus rank = int(os.environ['RANK']) num_gpus = torch.cuda.device_count() torch.cuda.set_device(rank % num_gpus) dist.init_process_group(backend=backend, **kwargs) def _init_dist_mpi(backend, **kwargs): # TODO: use local_rank instead of rank % num_gpus rank = int(os.environ['OMPI_COMM_WORLD_RANK']) num_gpus = torch.cuda.device_count() torch.cuda.set_device(rank % num_gpus) dist.init_process_group(backend=backend, **kwargs) def _init_dist_slurm(backend, port=None): """Initialize slurm distributed training environment. If argument ``port`` is not specified, then the master port will be system environment variable ``MASTER_PORT``. If ``MASTER_PORT`` is not in system environment variable, then a default port ``29500`` will be used. Args: backend (str): Backend of torch.distributed. port (int, optional): Master port. Defaults to None. """ proc_id = int(os.environ['SLURM_PROCID']) ntasks = int(os.environ['SLURM_NTASKS']) node_list = os.environ['SLURM_NODELIST'] num_gpus = torch.cuda.device_count() torch.cuda.set_device(proc_id % num_gpus) addr = subprocess.getoutput( f'scontrol show hostname {node_list} | head -n1') # specify master port if port is not None: os.environ['MASTER_PORT'] = str(port) elif 'MASTER_PORT' in os.environ: pass # use MASTER_PORT in the environment variable else: # 29500 is torch.distributed default port os.environ['MASTER_PORT'] = '29500' # use MASTER_ADDR in the environment variable if it already exists if 'MASTER_ADDR' not in os.environ: os.environ['MASTER_ADDR'] = addr os.environ['WORLD_SIZE'] = str(ntasks) os.environ['LOCAL_RANK'] = str(proc_id % num_gpus) os.environ['RANK'] = str(proc_id) dist.init_process_group(backend=backend) def get_dist_info(): if dist.is_available() and dist.is_initialized(): rank = dist.get_rank() world_size = dist.get_world_size() else: rank = 0 world_size = 1 return rank, world_size def master_only(func): @functools.wraps(func) def wrapper(*args, **kwargs): rank, _ = get_dist_info() if rank == 0: return func(*args, **kwargs) return wrapper def allreduce_params(params, coalesce=True, bucket_size_mb=-1): """Allreduce parameters. Args: params (list[torch.Parameters]): List of parameters or buffers of a model. coalesce (bool, optional): Whether allreduce parameters as a whole. Defaults to True. bucket_size_mb (int, optional): Size of bucket, the unit is MB. Defaults to -1. """ _, world_size = get_dist_info() if world_size == 1: return params = [param.data for param in params] if coalesce: _allreduce_coalesced(params, world_size, bucket_size_mb) else: for tensor in params: dist.all_reduce(tensor.div_(world_size)) def allreduce_grads(params, coalesce=True, bucket_size_mb=-1): """Allreduce gradients. Args: params (list[torch.Parameters]): List of parameters of a model coalesce (bool, optional): Whether allreduce parameters as a whole. Defaults to True. bucket_size_mb (int, optional): Size of bucket, the unit is MB. Defaults to -1. """ grads = [ param.grad.data for param in params if param.requires_grad and param.grad is not None ] _, world_size = get_dist_info() if world_size == 1: return if coalesce: _allreduce_coalesced(grads, world_size, bucket_size_mb) else: for tensor in grads: dist.all_reduce(tensor.div_(world_size)) def _allreduce_coalesced(tensors, world_size, bucket_size_mb=-1): if bucket_size_mb > 0: bucket_size_bytes = bucket_size_mb * 1024 * 1024 buckets = _take_tensors(tensors, bucket_size_bytes) else: buckets = OrderedDict() for tensor in tensors: tp = tensor.type() if tp not in buckets: buckets[tp] = [] buckets[tp].append(tensor) buckets = buckets.values() for bucket in buckets: flat_tensors = _flatten_dense_tensors(bucket) dist.all_reduce(flat_tensors) flat_tensors.div_(world_size) for tensor, synced in zip( bucket, _unflatten_dense_tensors(flat_tensors, bucket)): tensor.copy_(synced)
trt-samples-for-hackathon-cn-master
Hackathon2023/controlnet/annotator/uniformer/mmcv/runner/dist_utils.py
# Copyright (c) OpenMMLab. All rights reserved. import warnings import torch from torch.nn import GroupNorm, LayerNorm from annotator.uniformer.mmcv.utils import _BatchNorm, _InstanceNorm, build_from_cfg, is_list_of from annotator.uniformer.mmcv.utils.ext_loader import check_ops_exist from .builder import OPTIMIZER_BUILDERS, OPTIMIZERS @OPTIMIZER_BUILDERS.register_module() class DefaultOptimizerConstructor: """Default constructor for optimizers. By default each parameter share the same optimizer settings, and we provide an argument ``paramwise_cfg`` to specify parameter-wise settings. It is a dict and may contain the following fields: - ``custom_keys`` (dict): Specified parameters-wise settings by keys. If one of the keys in ``custom_keys`` is a substring of the name of one parameter, then the setting of the parameter will be specified by ``custom_keys[key]`` and other setting like ``bias_lr_mult`` etc. will be ignored. It should be noted that the aforementioned ``key`` is the longest key that is a substring of the name of the parameter. If there are multiple matched keys with the same length, then the key with lower alphabet order will be chosen. ``custom_keys[key]`` should be a dict and may contain fields ``lr_mult`` and ``decay_mult``. See Example 2 below. - ``bias_lr_mult`` (float): It will be multiplied to the learning rate for all bias parameters (except for those in normalization layers and offset layers of DCN). - ``bias_decay_mult`` (float): It will be multiplied to the weight decay for all bias parameters (except for those in normalization layers, depthwise conv layers, offset layers of DCN). - ``norm_decay_mult`` (float): It will be multiplied to the weight decay for all weight and bias parameters of normalization layers. - ``dwconv_decay_mult`` (float): It will be multiplied to the weight decay for all weight and bias parameters of depthwise conv layers. - ``dcn_offset_lr_mult`` (float): It will be multiplied to the learning rate for parameters of offset layer in the deformable convs of a model. - ``bypass_duplicate`` (bool): If true, the duplicate parameters would not be added into optimizer. Default: False. Note: 1. If the option ``dcn_offset_lr_mult`` is used, the constructor will override the effect of ``bias_lr_mult`` in the bias of offset layer. So be careful when using both ``bias_lr_mult`` and ``dcn_offset_lr_mult``. If you wish to apply both of them to the offset layer in deformable convs, set ``dcn_offset_lr_mult`` to the original ``dcn_offset_lr_mult`` * ``bias_lr_mult``. 2. If the option ``dcn_offset_lr_mult`` is used, the constructor will apply it to all the DCN layers in the model. So be careful when the model contains multiple DCN layers in places other than backbone. Args: model (:obj:`nn.Module`): The model with parameters to be optimized. optimizer_cfg (dict): The config dict of the optimizer. Positional fields are - `type`: class name of the optimizer. Optional fields are - any arguments of the corresponding optimizer type, e.g., lr, weight_decay, momentum, etc. paramwise_cfg (dict, optional): Parameter-wise options. Example 1: >>> model = torch.nn.modules.Conv1d(1, 1, 1) >>> optimizer_cfg = dict(type='SGD', lr=0.01, momentum=0.9, >>> weight_decay=0.0001) >>> paramwise_cfg = dict(norm_decay_mult=0.) >>> optim_builder = DefaultOptimizerConstructor( >>> optimizer_cfg, paramwise_cfg) >>> optimizer = optim_builder(model) Example 2: >>> # assume model have attribute model.backbone and model.cls_head >>> optimizer_cfg = dict(type='SGD', lr=0.01, weight_decay=0.95) >>> paramwise_cfg = dict(custom_keys={ '.backbone': dict(lr_mult=0.1, decay_mult=0.9)}) >>> optim_builder = DefaultOptimizerConstructor( >>> optimizer_cfg, paramwise_cfg) >>> optimizer = optim_builder(model) >>> # Then the `lr` and `weight_decay` for model.backbone is >>> # (0.01 * 0.1, 0.95 * 0.9). `lr` and `weight_decay` for >>> # model.cls_head is (0.01, 0.95). """ def __init__(self, optimizer_cfg, paramwise_cfg=None): if not isinstance(optimizer_cfg, dict): raise TypeError('optimizer_cfg should be a dict', f'but got {type(optimizer_cfg)}') self.optimizer_cfg = optimizer_cfg self.paramwise_cfg = {} if paramwise_cfg is None else paramwise_cfg self.base_lr = optimizer_cfg.get('lr', None) self.base_wd = optimizer_cfg.get('weight_decay', None) self._validate_cfg() def _validate_cfg(self): if not isinstance(self.paramwise_cfg, dict): raise TypeError('paramwise_cfg should be None or a dict, ' f'but got {type(self.paramwise_cfg)}') if 'custom_keys' in self.paramwise_cfg: if not isinstance(self.paramwise_cfg['custom_keys'], dict): raise TypeError( 'If specified, custom_keys must be a dict, ' f'but got {type(self.paramwise_cfg["custom_keys"])}') if self.base_wd is None: for key in self.paramwise_cfg['custom_keys']: if 'decay_mult' in self.paramwise_cfg['custom_keys'][key]: raise ValueError('base_wd should not be None') # get base lr and weight decay # weight_decay must be explicitly specified if mult is specified if ('bias_decay_mult' in self.paramwise_cfg or 'norm_decay_mult' in self.paramwise_cfg or 'dwconv_decay_mult' in self.paramwise_cfg): if self.base_wd is None: raise ValueError('base_wd should not be None') def _is_in(self, param_group, param_group_list): assert is_list_of(param_group_list, dict) param = set(param_group['params']) param_set = set() for group in param_group_list: param_set.update(set(group['params'])) return not param.isdisjoint(param_set) def add_params(self, params, module, prefix='', is_dcn_module=None): """Add all parameters of module to the params list. The parameters of the given module will be added to the list of param groups, with specific rules defined by paramwise_cfg. Args: params (list[dict]): A list of param groups, it will be modified in place. module (nn.Module): The module to be added. prefix (str): The prefix of the module is_dcn_module (int|float|None): If the current module is a submodule of DCN, `is_dcn_module` will be passed to control conv_offset layer's learning rate. Defaults to None. """ # get param-wise options custom_keys = self.paramwise_cfg.get('custom_keys', {}) # first sort with alphabet order and then sort with reversed len of str sorted_keys = sorted(sorted(custom_keys.keys()), key=len, reverse=True) bias_lr_mult = self.paramwise_cfg.get('bias_lr_mult', 1.) bias_decay_mult = self.paramwise_cfg.get('bias_decay_mult', 1.) norm_decay_mult = self.paramwise_cfg.get('norm_decay_mult', 1.) dwconv_decay_mult = self.paramwise_cfg.get('dwconv_decay_mult', 1.) bypass_duplicate = self.paramwise_cfg.get('bypass_duplicate', False) dcn_offset_lr_mult = self.paramwise_cfg.get('dcn_offset_lr_mult', 1.) # special rules for norm layers and depth-wise conv layers is_norm = isinstance(module, (_BatchNorm, _InstanceNorm, GroupNorm, LayerNorm)) is_dwconv = ( isinstance(module, torch.nn.Conv2d) and module.in_channels == module.groups) for name, param in module.named_parameters(recurse=False): param_group = {'params': [param]} if not param.requires_grad: params.append(param_group) continue if bypass_duplicate and self._is_in(param_group, params): warnings.warn(f'{prefix} is duplicate. It is skipped since ' f'bypass_duplicate={bypass_duplicate}') continue # if the parameter match one of the custom keys, ignore other rules is_custom = False for key in sorted_keys: if key in f'{prefix}.{name}': is_custom = True lr_mult = custom_keys[key].get('lr_mult', 1.) param_group['lr'] = self.base_lr * lr_mult if self.base_wd is not None: decay_mult = custom_keys[key].get('decay_mult', 1.) param_group['weight_decay'] = self.base_wd * decay_mult break if not is_custom: # bias_lr_mult affects all bias parameters # except for norm.bias dcn.conv_offset.bias if name == 'bias' and not (is_norm or is_dcn_module): param_group['lr'] = self.base_lr * bias_lr_mult if (prefix.find('conv_offset') != -1 and is_dcn_module and isinstance(module, torch.nn.Conv2d)): # deal with both dcn_offset's bias & weight param_group['lr'] = self.base_lr * dcn_offset_lr_mult # apply weight decay policies if self.base_wd is not None: # norm decay if is_norm: param_group[ 'weight_decay'] = self.base_wd * norm_decay_mult # depth-wise conv elif is_dwconv: param_group[ 'weight_decay'] = self.base_wd * dwconv_decay_mult # bias lr and decay elif name == 'bias' and not is_dcn_module: # TODO: current bias_decay_mult will have affect on DCN param_group[ 'weight_decay'] = self.base_wd * bias_decay_mult params.append(param_group) if check_ops_exist(): from annotator.uniformer.mmcv.ops import DeformConv2d, ModulatedDeformConv2d is_dcn_module = isinstance(module, (DeformConv2d, ModulatedDeformConv2d)) else: is_dcn_module = False for child_name, child_mod in module.named_children(): child_prefix = f'{prefix}.{child_name}' if prefix else child_name self.add_params( params, child_mod, prefix=child_prefix, is_dcn_module=is_dcn_module) def __call__(self, model): if hasattr(model, 'module'): model = model.module optimizer_cfg = self.optimizer_cfg.copy() # if no paramwise option is specified, just use the global setting if not self.paramwise_cfg: optimizer_cfg['params'] = model.parameters() return build_from_cfg(optimizer_cfg, OPTIMIZERS) # set param-wise lr and weight decay recursively params = [] self.add_params(params, model) optimizer_cfg['params'] = params return build_from_cfg(optimizer_cfg, OPTIMIZERS)
trt-samples-for-hackathon-cn-master
Hackathon2023/controlnet/annotator/uniformer/mmcv/runner/optimizer/default_constructor.py
# Copyright (c) OpenMMLab. All rights reserved. from .builder import (OPTIMIZER_BUILDERS, OPTIMIZERS, build_optimizer, build_optimizer_constructor) from .default_constructor import DefaultOptimizerConstructor __all__ = [ 'OPTIMIZER_BUILDERS', 'OPTIMIZERS', 'DefaultOptimizerConstructor', 'build_optimizer', 'build_optimizer_constructor' ]
trt-samples-for-hackathon-cn-master
Hackathon2023/controlnet/annotator/uniformer/mmcv/runner/optimizer/__init__.py
# Copyright (c) OpenMMLab. All rights reserved. import copy import inspect import torch from ...utils import Registry, build_from_cfg OPTIMIZERS = Registry('optimizer') OPTIMIZER_BUILDERS = Registry('optimizer builder') def register_torch_optimizers(): torch_optimizers = [] for module_name in dir(torch.optim): if module_name.startswith('__'): continue _optim = getattr(torch.optim, module_name) if inspect.isclass(_optim) and issubclass(_optim, torch.optim.Optimizer): OPTIMIZERS.register_module()(_optim) torch_optimizers.append(module_name) return torch_optimizers TORCH_OPTIMIZERS = register_torch_optimizers() def build_optimizer_constructor(cfg): return build_from_cfg(cfg, OPTIMIZER_BUILDERS) def build_optimizer(model, cfg): optimizer_cfg = copy.deepcopy(cfg) constructor_type = optimizer_cfg.pop('constructor', 'DefaultOptimizerConstructor') paramwise_cfg = optimizer_cfg.pop('paramwise_cfg', None) optim_constructor = build_optimizer_constructor( dict( type=constructor_type, optimizer_cfg=optimizer_cfg, paramwise_cfg=paramwise_cfg)) optimizer = optim_constructor(model) return optimizer
trt-samples-for-hackathon-cn-master
Hackathon2023/controlnet/annotator/uniformer/mmcv/runner/optimizer/builder.py
# Copyright (c) OpenMMLab. All rights reserved. import annotator.uniformer.mmcv as mmcv from .hook import HOOKS, Hook from .lr_updater import annealing_cos, annealing_linear, format_param class MomentumUpdaterHook(Hook): def __init__(self, by_epoch=True, warmup=None, warmup_iters=0, warmup_ratio=0.9): # validate the "warmup" argument if warmup is not None: if warmup not in ['constant', 'linear', 'exp']: raise ValueError( f'"{warmup}" is not a supported type for warming up, valid' ' types are "constant" and "linear"') if warmup is not None: assert warmup_iters > 0, \ '"warmup_iters" must be a positive integer' assert 0 < warmup_ratio <= 1.0, \ '"warmup_momentum" must be in range (0,1]' self.by_epoch = by_epoch self.warmup = warmup self.warmup_iters = warmup_iters self.warmup_ratio = warmup_ratio self.base_momentum = [] # initial momentum for all param groups self.regular_momentum = [ ] # expected momentum if no warming up is performed def _set_momentum(self, runner, momentum_groups): if isinstance(runner.optimizer, dict): for k, optim in runner.optimizer.items(): for param_group, mom in zip(optim.param_groups, momentum_groups[k]): if 'momentum' in param_group.keys(): param_group['momentum'] = mom elif 'betas' in param_group.keys(): param_group['betas'] = (mom, param_group['betas'][1]) else: for param_group, mom in zip(runner.optimizer.param_groups, momentum_groups): if 'momentum' in param_group.keys(): param_group['momentum'] = mom elif 'betas' in param_group.keys(): param_group['betas'] = (mom, param_group['betas'][1]) def get_momentum(self, runner, base_momentum): raise NotImplementedError def get_regular_momentum(self, runner): if isinstance(runner.optimizer, dict): momentum_groups = {} for k in runner.optimizer.keys(): _momentum_group = [ self.get_momentum(runner, _base_momentum) for _base_momentum in self.base_momentum[k] ] momentum_groups.update({k: _momentum_group}) return momentum_groups else: return [ self.get_momentum(runner, _base_momentum) for _base_momentum in self.base_momentum ] def get_warmup_momentum(self, cur_iters): def _get_warmup_momentum(cur_iters, regular_momentum): if self.warmup == 'constant': warmup_momentum = [ _momentum / self.warmup_ratio for _momentum in self.regular_momentum ] elif self.warmup == 'linear': k = (1 - cur_iters / self.warmup_iters) * (1 - self.warmup_ratio) warmup_momentum = [ _momentum / (1 - k) for _momentum in self.regular_mom ] elif self.warmup == 'exp': k = self.warmup_ratio**(1 - cur_iters / self.warmup_iters) warmup_momentum = [ _momentum / k for _momentum in self.regular_mom ] return warmup_momentum if isinstance(self.regular_momentum, dict): momentum_groups = {} for key, regular_momentum in self.regular_momentum.items(): momentum_groups[key] = _get_warmup_momentum( cur_iters, regular_momentum) return momentum_groups else: return _get_warmup_momentum(cur_iters, self.regular_momentum) def before_run(self, runner): # NOTE: when resuming from a checkpoint, # if 'initial_momentum' is not saved, # it will be set according to the optimizer params if isinstance(runner.optimizer, dict): self.base_momentum = {} for k, optim in runner.optimizer.items(): for group in optim.param_groups: if 'momentum' in group.keys(): group.setdefault('initial_momentum', group['momentum']) else: group.setdefault('initial_momentum', group['betas'][0]) _base_momentum = [ group['initial_momentum'] for group in optim.param_groups ] self.base_momentum.update({k: _base_momentum}) else: for group in runner.optimizer.param_groups: if 'momentum' in group.keys(): group.setdefault('initial_momentum', group['momentum']) else: group.setdefault('initial_momentum', group['betas'][0]) self.base_momentum = [ group['initial_momentum'] for group in runner.optimizer.param_groups ] def before_train_epoch(self, runner): if not self.by_epoch: return self.regular_mom = self.get_regular_momentum(runner) self._set_momentum(runner, self.regular_mom) def before_train_iter(self, runner): cur_iter = runner.iter if not self.by_epoch: self.regular_mom = self.get_regular_momentum(runner) if self.warmup is None or cur_iter >= self.warmup_iters: self._set_momentum(runner, self.regular_mom) else: warmup_momentum = self.get_warmup_momentum(cur_iter) self._set_momentum(runner, warmup_momentum) elif self.by_epoch: if self.warmup is None or cur_iter > self.warmup_iters: return elif cur_iter == self.warmup_iters: self._set_momentum(runner, self.regular_mom) else: warmup_momentum = self.get_warmup_momentum(cur_iter) self._set_momentum(runner, warmup_momentum) @HOOKS.register_module() class StepMomentumUpdaterHook(MomentumUpdaterHook): """Step momentum scheduler with min value clipping. Args: step (int | list[int]): Step to decay the momentum. If an int value is given, regard it as the decay interval. If a list is given, decay momentum at these steps. gamma (float, optional): Decay momentum ratio. Default: 0.5. min_momentum (float, optional): Minimum momentum value to keep. If momentum after decay is lower than this value, it will be clipped accordingly. If None is given, we don't perform lr clipping. Default: None. """ def __init__(self, step, gamma=0.5, min_momentum=None, **kwargs): if isinstance(step, list): assert mmcv.is_list_of(step, int) assert all([s > 0 for s in step]) elif isinstance(step, int): assert step > 0 else: raise TypeError('"step" must be a list or integer') self.step = step self.gamma = gamma self.min_momentum = min_momentum super(StepMomentumUpdaterHook, self).__init__(**kwargs) def get_momentum(self, runner, base_momentum): progress = runner.epoch if self.by_epoch else runner.iter # calculate exponential term if isinstance(self.step, int): exp = progress // self.step else: exp = len(self.step) for i, s in enumerate(self.step): if progress < s: exp = i break momentum = base_momentum * (self.gamma**exp) if self.min_momentum is not None: # clip to a minimum value momentum = max(momentum, self.min_momentum) return momentum @HOOKS.register_module() class CosineAnnealingMomentumUpdaterHook(MomentumUpdaterHook): def __init__(self, min_momentum=None, min_momentum_ratio=None, **kwargs): assert (min_momentum is None) ^ (min_momentum_ratio is None) self.min_momentum = min_momentum self.min_momentum_ratio = min_momentum_ratio super(CosineAnnealingMomentumUpdaterHook, self).__init__(**kwargs) def get_momentum(self, runner, base_momentum): if self.by_epoch: progress = runner.epoch max_progress = runner.max_epochs else: progress = runner.iter max_progress = runner.max_iters if self.min_momentum_ratio is not None: target_momentum = base_momentum * self.min_momentum_ratio else: target_momentum = self.min_momentum return annealing_cos(base_momentum, target_momentum, progress / max_progress) @HOOKS.register_module() class CyclicMomentumUpdaterHook(MomentumUpdaterHook): """Cyclic momentum Scheduler. Implement the cyclical momentum scheduler policy described in https://arxiv.org/pdf/1708.07120.pdf This momentum scheduler usually used together with the CyclicLRUpdater to improve the performance in the 3D detection area. Attributes: target_ratio (tuple[float]): Relative ratio of the lowest momentum and the highest momentum to the initial momentum. cyclic_times (int): Number of cycles during training step_ratio_up (float): The ratio of the increasing process of momentum in the total cycle. by_epoch (bool): Whether to update momentum by epoch. """ def __init__(self, by_epoch=False, target_ratio=(0.85 / 0.95, 1), cyclic_times=1, step_ratio_up=0.4, **kwargs): if isinstance(target_ratio, float): target_ratio = (target_ratio, target_ratio / 1e5) elif isinstance(target_ratio, tuple): target_ratio = (target_ratio[0], target_ratio[0] / 1e5) \ if len(target_ratio) == 1 else target_ratio else: raise ValueError('target_ratio should be either float ' f'or tuple, got {type(target_ratio)}') assert len(target_ratio) == 2, \ '"target_ratio" must be list or tuple of two floats' assert 0 <= step_ratio_up < 1.0, \ '"step_ratio_up" must be in range [0,1)' self.target_ratio = target_ratio self.cyclic_times = cyclic_times self.step_ratio_up = step_ratio_up self.momentum_phases = [] # init momentum_phases # currently only support by_epoch=False assert not by_epoch, \ 'currently only support "by_epoch" = False' super(CyclicMomentumUpdaterHook, self).__init__(by_epoch, **kwargs) def before_run(self, runner): super(CyclicMomentumUpdaterHook, self).before_run(runner) # initiate momentum_phases # total momentum_phases are separated as up and down max_iter_per_phase = runner.max_iters // self.cyclic_times iter_up_phase = int(self.step_ratio_up * max_iter_per_phase) self.momentum_phases.append( [0, iter_up_phase, max_iter_per_phase, 1, self.target_ratio[0]]) self.momentum_phases.append([ iter_up_phase, max_iter_per_phase, max_iter_per_phase, self.target_ratio[0], self.target_ratio[1] ]) def get_momentum(self, runner, base_momentum): curr_iter = runner.iter for (start_iter, end_iter, max_iter_per_phase, start_ratio, end_ratio) in self.momentum_phases: curr_iter %= max_iter_per_phase if start_iter <= curr_iter < end_iter: progress = curr_iter - start_iter return annealing_cos(base_momentum * start_ratio, base_momentum * end_ratio, progress / (end_iter - start_iter)) @HOOKS.register_module() class OneCycleMomentumUpdaterHook(MomentumUpdaterHook): """OneCycle momentum Scheduler. This momentum scheduler usually used together with the OneCycleLrUpdater to improve the performance. Args: base_momentum (float or list): Lower momentum boundaries in the cycle for each parameter group. Note that momentum is cycled inversely to learning rate; at the peak of a cycle, momentum is 'base_momentum' and learning rate is 'max_lr'. Default: 0.85 max_momentum (float or list): Upper momentum boundaries in the cycle for each parameter group. Functionally, it defines the cycle amplitude (max_momentum - base_momentum). Note that momentum is cycled inversely to learning rate; at the start of a cycle, momentum is 'max_momentum' and learning rate is 'base_lr' Default: 0.95 pct_start (float): The percentage of the cycle (in number of steps) spent increasing the learning rate. Default: 0.3 anneal_strategy (str): {'cos', 'linear'} Specifies the annealing strategy: 'cos' for cosine annealing, 'linear' for linear annealing. Default: 'cos' three_phase (bool): If three_phase is True, use a third phase of the schedule to annihilate the learning rate according to final_div_factor instead of modifying the second phase (the first two phases will be symmetrical about the step indicated by pct_start). Default: False """ def __init__(self, base_momentum=0.85, max_momentum=0.95, pct_start=0.3, anneal_strategy='cos', three_phase=False, **kwargs): # validate by_epoch, currently only support by_epoch=False if 'by_epoch' not in kwargs: kwargs['by_epoch'] = False else: assert not kwargs['by_epoch'], \ 'currently only support "by_epoch" = False' if not isinstance(base_momentum, (float, list, dict)): raise ValueError('base_momentum must be the type among of float,' 'list or dict.') self._base_momentum = base_momentum if not isinstance(max_momentum, (float, list, dict)): raise ValueError('max_momentum must be the type among of float,' 'list or dict.') self._max_momentum = max_momentum # validate pct_start if pct_start < 0 or pct_start > 1 or not isinstance(pct_start, float): raise ValueError('Expected float between 0 and 1 pct_start, but ' f'got {pct_start}') self.pct_start = pct_start # validate anneal_strategy if anneal_strategy not in ['cos', 'linear']: raise ValueError('anneal_strategy must by one of "cos" or ' f'"linear", instead got {anneal_strategy}') elif anneal_strategy == 'cos': self.anneal_func = annealing_cos elif anneal_strategy == 'linear': self.anneal_func = annealing_linear self.three_phase = three_phase self.momentum_phases = [] # init momentum_phases super(OneCycleMomentumUpdaterHook, self).__init__(**kwargs) def before_run(self, runner): if isinstance(runner.optimizer, dict): for k, optim in runner.optimizer.items(): if ('momentum' not in optim.defaults and 'betas' not in optim.defaults): raise ValueError('optimizer must support momentum with' 'option enabled') self.use_beta1 = 'betas' in optim.defaults _base_momentum = format_param(k, optim, self._base_momentum) _max_momentum = format_param(k, optim, self._max_momentum) for group, b_momentum, m_momentum in zip( optim.param_groups, _base_momentum, _max_momentum): if self.use_beta1: _, beta2 = group['betas'] group['betas'] = (m_momentum, beta2) else: group['momentum'] = m_momentum group['base_momentum'] = b_momentum group['max_momentum'] = m_momentum else: optim = runner.optimizer if ('momentum' not in optim.defaults and 'betas' not in optim.defaults): raise ValueError('optimizer must support momentum with' 'option enabled') self.use_beta1 = 'betas' in optim.defaults k = type(optim).__name__ _base_momentum = format_param(k, optim, self._base_momentum) _max_momentum = format_param(k, optim, self._max_momentum) for group, b_momentum, m_momentum in zip(optim.param_groups, _base_momentum, _max_momentum): if self.use_beta1: _, beta2 = group['betas'] group['betas'] = (m_momentum, beta2) else: group['momentum'] = m_momentum group['base_momentum'] = b_momentum group['max_momentum'] = m_momentum if self.three_phase: self.momentum_phases.append({ 'end_iter': float(self.pct_start * runner.max_iters) - 1, 'start_momentum': 'max_momentum', 'end_momentum': 'base_momentum' }) self.momentum_phases.append({ 'end_iter': float(2 * self.pct_start * runner.max_iters) - 2, 'start_momentum': 'base_momentum', 'end_momentum': 'max_momentum' }) self.momentum_phases.append({ 'end_iter': runner.max_iters - 1, 'start_momentum': 'max_momentum', 'end_momentum': 'max_momentum' }) else: self.momentum_phases.append({ 'end_iter': float(self.pct_start * runner.max_iters) - 1, 'start_momentum': 'max_momentum', 'end_momentum': 'base_momentum' }) self.momentum_phases.append({ 'end_iter': runner.max_iters - 1, 'start_momentum': 'base_momentum', 'end_momentum': 'max_momentum' }) def _set_momentum(self, runner, momentum_groups): if isinstance(runner.optimizer, dict): for k, optim in runner.optimizer.items(): for param_group, mom in zip(optim.param_groups, momentum_groups[k]): if 'momentum' in param_group.keys(): param_group['momentum'] = mom elif 'betas' in param_group.keys(): param_group['betas'] = (mom, param_group['betas'][1]) else: for param_group, mom in zip(runner.optimizer.param_groups, momentum_groups): if 'momentum' in param_group.keys(): param_group['momentum'] = mom elif 'betas' in param_group.keys(): param_group['betas'] = (mom, param_group['betas'][1]) def get_momentum(self, runner, param_group): curr_iter = runner.iter start_iter = 0 for i, phase in enumerate(self.momentum_phases): end_iter = phase['end_iter'] if curr_iter <= end_iter or i == len(self.momentum_phases) - 1: pct = (curr_iter - start_iter) / (end_iter - start_iter) momentum = self.anneal_func( param_group[phase['start_momentum']], param_group[phase['end_momentum']], pct) break start_iter = end_iter return momentum def get_regular_momentum(self, runner): if isinstance(runner.optimizer, dict): momentum_groups = {} for k, optim in runner.optimizer.items(): _momentum_group = [ self.get_momentum(runner, param_group) for param_group in optim.param_groups ] momentum_groups.update({k: _momentum_group}) return momentum_groups else: momentum_groups = [] for param_group in runner.optimizer.param_groups: momentum_groups.append(self.get_momentum(runner, param_group)) return momentum_groups
trt-samples-for-hackathon-cn-master
Hackathon2023/controlnet/annotator/uniformer/mmcv/runner/hooks/momentum_updater.py
# Copyright (c) OpenMMLab. All rights reserved. import os.path as osp import warnings from annotator.uniformer.mmcv.fileio import FileClient from ..dist_utils import allreduce_params, master_only from .hook import HOOKS, Hook @HOOKS.register_module() class CheckpointHook(Hook): """Save checkpoints periodically. Args: interval (int): The saving period. If ``by_epoch=True``, interval indicates epochs, otherwise it indicates iterations. Default: -1, which means "never". by_epoch (bool): Saving checkpoints by epoch or by iteration. Default: True. save_optimizer (bool): Whether to save optimizer state_dict in the checkpoint. It is usually used for resuming experiments. Default: True. out_dir (str, optional): The root directory to save checkpoints. If not specified, ``runner.work_dir`` will be used by default. If specified, the ``out_dir`` will be the concatenation of ``out_dir`` and the last level directory of ``runner.work_dir``. `Changed in version 1.3.16.` max_keep_ckpts (int, optional): The maximum checkpoints to keep. In some cases we want only the latest few checkpoints and would like to delete old ones to save the disk space. Default: -1, which means unlimited. save_last (bool, optional): Whether to force the last checkpoint to be saved regardless of interval. Default: True. sync_buffer (bool, optional): Whether to synchronize buffers in different gpus. Default: False. file_client_args (dict, optional): Arguments to instantiate a FileClient. See :class:`mmcv.fileio.FileClient` for details. Default: None. `New in version 1.3.16.` .. warning:: Before v1.3.16, the ``out_dir`` argument indicates the path where the checkpoint is stored. However, since v1.3.16, ``out_dir`` indicates the root directory and the final path to save checkpoint is the concatenation of ``out_dir`` and the last level directory of ``runner.work_dir``. Suppose the value of ``out_dir`` is "/path/of/A" and the value of ``runner.work_dir`` is "/path/of/B", then the final path will be "/path/of/A/B". """ def __init__(self, interval=-1, by_epoch=True, save_optimizer=True, out_dir=None, max_keep_ckpts=-1, save_last=True, sync_buffer=False, file_client_args=None, **kwargs): self.interval = interval self.by_epoch = by_epoch self.save_optimizer = save_optimizer self.out_dir = out_dir self.max_keep_ckpts = max_keep_ckpts self.save_last = save_last self.args = kwargs self.sync_buffer = sync_buffer self.file_client_args = file_client_args def before_run(self, runner): if not self.out_dir: self.out_dir = runner.work_dir self.file_client = FileClient.infer_client(self.file_client_args, self.out_dir) # if `self.out_dir` is not equal to `runner.work_dir`, it means that # `self.out_dir` is set so the final `self.out_dir` is the # concatenation of `self.out_dir` and the last level directory of # `runner.work_dir` if self.out_dir != runner.work_dir: basename = osp.basename(runner.work_dir.rstrip(osp.sep)) self.out_dir = self.file_client.join_path(self.out_dir, basename) runner.logger.info((f'Checkpoints will be saved to {self.out_dir} by ' f'{self.file_client.name}.')) # disable the create_symlink option because some file backends do not # allow to create a symlink if 'create_symlink' in self.args: if self.args[ 'create_symlink'] and not self.file_client.allow_symlink: self.args['create_symlink'] = False warnings.warn( ('create_symlink is set as True by the user but is changed' 'to be False because creating symbolic link is not ' f'allowed in {self.file_client.name}')) else: self.args['create_symlink'] = self.file_client.allow_symlink def after_train_epoch(self, runner): if not self.by_epoch: return # save checkpoint for following cases: # 1. every ``self.interval`` epochs # 2. reach the last epoch of training if self.every_n_epochs( runner, self.interval) or (self.save_last and self.is_last_epoch(runner)): runner.logger.info( f'Saving checkpoint at {runner.epoch + 1} epochs') if self.sync_buffer: allreduce_params(runner.model.buffers()) self._save_checkpoint(runner) @master_only def _save_checkpoint(self, runner): """Save the current checkpoint and delete unwanted checkpoint.""" runner.save_checkpoint( self.out_dir, save_optimizer=self.save_optimizer, **self.args) if runner.meta is not None: if self.by_epoch: cur_ckpt_filename = self.args.get( 'filename_tmpl', 'epoch_{}.pth').format(runner.epoch + 1) else: cur_ckpt_filename = self.args.get( 'filename_tmpl', 'iter_{}.pth').format(runner.iter + 1) runner.meta.setdefault('hook_msgs', dict()) runner.meta['hook_msgs']['last_ckpt'] = self.file_client.join_path( self.out_dir, cur_ckpt_filename) # remove other checkpoints if self.max_keep_ckpts > 0: if self.by_epoch: name = 'epoch_{}.pth' current_ckpt = runner.epoch + 1 else: name = 'iter_{}.pth' current_ckpt = runner.iter + 1 redundant_ckpts = range( current_ckpt - self.max_keep_ckpts * self.interval, 0, -self.interval) filename_tmpl = self.args.get('filename_tmpl', name) for _step in redundant_ckpts: ckpt_path = self.file_client.join_path( self.out_dir, filename_tmpl.format(_step)) if self.file_client.isfile(ckpt_path): self.file_client.remove(ckpt_path) else: break def after_train_iter(self, runner): if self.by_epoch: return # save checkpoint for following cases: # 1. every ``self.interval`` iterations # 2. reach the last iteration of training if self.every_n_iters( runner, self.interval) or (self.save_last and self.is_last_iter(runner)): runner.logger.info( f'Saving checkpoint at {runner.iter + 1} iterations') if self.sync_buffer: allreduce_params(runner.model.buffers()) self._save_checkpoint(runner)
trt-samples-for-hackathon-cn-master
Hackathon2023/controlnet/annotator/uniformer/mmcv/runner/hooks/checkpoint.py
# Copyright (c) OpenMMLab. All rights reserved. import torch from .hook import HOOKS, Hook @HOOKS.register_module() class EmptyCacheHook(Hook): def __init__(self, before_epoch=False, after_epoch=True, after_iter=False): self._before_epoch = before_epoch self._after_epoch = after_epoch self._after_iter = after_iter def after_iter(self, runner): if self._after_iter: torch.cuda.empty_cache() def before_epoch(self, runner): if self._before_epoch: torch.cuda.empty_cache() def after_epoch(self, runner): if self._after_epoch: torch.cuda.empty_cache()
trt-samples-for-hackathon-cn-master
Hackathon2023/controlnet/annotator/uniformer/mmcv/runner/hooks/memory.py
# Copyright (c) OpenMMLab. All rights reserved. from ..dist_utils import allreduce_params from .hook import HOOKS, Hook @HOOKS.register_module() class SyncBuffersHook(Hook): """Synchronize model buffers such as running_mean and running_var in BN at the end of each epoch. Args: distributed (bool): Whether distributed training is used. It is effective only for distributed training. Defaults to True. """ def __init__(self, distributed=True): self.distributed = distributed def after_epoch(self, runner): """All-reduce model buffers at the end of each epoch.""" if self.distributed: allreduce_params(runner.model.buffers())
trt-samples-for-hackathon-cn-master
Hackathon2023/controlnet/annotator/uniformer/mmcv/runner/hooks/sync_buffer.py
# Copyright (c) OpenMMLab. All rights reserved. from ...parallel import is_module_wrapper from ..hooks.hook import HOOKS, Hook @HOOKS.register_module() class EMAHook(Hook): r"""Exponential Moving Average Hook. Use Exponential Moving Average on all parameters of model in training process. All parameters have a ema backup, which update by the formula as below. EMAHook takes priority over EvalHook and CheckpointSaverHook. .. math:: \text{Xema\_{t+1}} = (1 - \text{momentum}) \times \text{Xema\_{t}} + \text{momentum} \times X_t Args: momentum (float): The momentum used for updating ema parameter. Defaults to 0.0002. interval (int): Update ema parameter every interval iteration. Defaults to 1. warm_up (int): During first warm_up steps, we may use smaller momentum to update ema parameters more slowly. Defaults to 100. resume_from (str): The checkpoint path. Defaults to None. """ def __init__(self, momentum=0.0002, interval=1, warm_up=100, resume_from=None): assert isinstance(interval, int) and interval > 0 self.warm_up = warm_up self.interval = interval assert momentum > 0 and momentum < 1 self.momentum = momentum**interval self.checkpoint = resume_from def before_run(self, runner): """To resume model with it's ema parameters more friendly. Register ema parameter as ``named_buffer`` to model """ model = runner.model if is_module_wrapper(model): model = model.module self.param_ema_buffer = {} self.model_parameters = dict(model.named_parameters(recurse=True)) for name, value in self.model_parameters.items(): # "." is not allowed in module's buffer name buffer_name = f"ema_{name.replace('.', '_')}" self.param_ema_buffer[name] = buffer_name model.register_buffer(buffer_name, value.data.clone()) self.model_buffers = dict(model.named_buffers(recurse=True)) if self.checkpoint is not None: runner.resume(self.checkpoint) def after_train_iter(self, runner): """Update ema parameter every self.interval iterations.""" curr_step = runner.iter # We warm up the momentum considering the instability at beginning momentum = min(self.momentum, (1 + curr_step) / (self.warm_up + curr_step)) if curr_step % self.interval != 0: return for name, parameter in self.model_parameters.items(): buffer_name = self.param_ema_buffer[name] buffer_parameter = self.model_buffers[buffer_name] buffer_parameter.mul_(1 - momentum).add_(momentum, parameter.data) def after_train_epoch(self, runner): """We load parameter values from ema backup to model before the EvalHook.""" self._swap_ema_parameters() def before_train_epoch(self, runner): """We recover model's parameter from ema backup after last epoch's EvalHook.""" self._swap_ema_parameters() def _swap_ema_parameters(self): """Swap the parameter of model with parameter in ema_buffer.""" for name, value in self.model_parameters.items(): temp = value.data.clone() ema_buffer = self.model_buffers[self.param_ema_buffer[name]] value.data.copy_(ema_buffer.data) ema_buffer.data.copy_(temp)
trt-samples-for-hackathon-cn-master
Hackathon2023/controlnet/annotator/uniformer/mmcv/runner/hooks/ema.py
# Copyright (c) OpenMMLab. All rights reserved. import os.path as osp import warnings from math import inf import torch.distributed as dist from torch.nn.modules.batchnorm import _BatchNorm from torch.utils.data import DataLoader from annotator.uniformer.mmcv.fileio import FileClient from annotator.uniformer.mmcv.utils import is_seq_of from .hook import Hook from .logger import LoggerHook class EvalHook(Hook): """Non-Distributed evaluation hook. This hook will regularly perform evaluation in a given interval when performing in non-distributed environment. Args: dataloader (DataLoader): A PyTorch dataloader, whose dataset has implemented ``evaluate`` function. start (int | None, optional): Evaluation starting epoch. It enables evaluation before the training starts if ``start`` <= the resuming epoch. If None, whether to evaluate is merely decided by ``interval``. Default: None. interval (int): Evaluation interval. Default: 1. by_epoch (bool): Determine perform evaluation by epoch or by iteration. If set to True, it will perform by epoch. Otherwise, by iteration. Default: True. save_best (str, optional): If a metric is specified, it would measure the best checkpoint during evaluation. The information about best checkpoint would be saved in ``runner.meta['hook_msgs']`` to keep best score value and best checkpoint path, which will be also loaded when resume checkpoint. Options are the evaluation metrics on the test dataset. e.g., ``bbox_mAP``, ``segm_mAP`` for bbox detection and instance segmentation. ``AR@100`` for proposal recall. If ``save_best`` is ``auto``, the first key of the returned ``OrderedDict`` result will be used. Default: None. rule (str | None, optional): Comparison rule for best score. If set to None, it will infer a reasonable rule. Keys such as 'acc', 'top' .etc will be inferred by 'greater' rule. Keys contain 'loss' will be inferred by 'less' rule. Options are 'greater', 'less', None. Default: None. test_fn (callable, optional): test a model with samples from a dataloader, and return the test results. If ``None``, the default test function ``mmcv.engine.single_gpu_test`` will be used. (default: ``None``) greater_keys (List[str] | None, optional): Metric keys that will be inferred by 'greater' comparison rule. If ``None``, _default_greater_keys will be used. (default: ``None``) less_keys (List[str] | None, optional): Metric keys that will be inferred by 'less' comparison rule. If ``None``, _default_less_keys will be used. (default: ``None``) out_dir (str, optional): The root directory to save checkpoints. If not specified, `runner.work_dir` will be used by default. If specified, the `out_dir` will be the concatenation of `out_dir` and the last level directory of `runner.work_dir`. `New in version 1.3.16.` file_client_args (dict): Arguments to instantiate a FileClient. See :class:`mmcv.fileio.FileClient` for details. Default: None. `New in version 1.3.16.` **eval_kwargs: Evaluation arguments fed into the evaluate function of the dataset. Notes: If new arguments are added for EvalHook, tools/test.py, tools/eval_metric.py may be affected. """ # Since the key for determine greater or less is related to the downstream # tasks, downstream repos may need to overwrite the following inner # variable accordingly. rule_map = {'greater': lambda x, y: x > y, 'less': lambda x, y: x < y} init_value_map = {'greater': -inf, 'less': inf} _default_greater_keys = [ 'acc', 'top', 'AR@', 'auc', 'precision', 'mAP', 'mDice', 'mIoU', 'mAcc', 'aAcc' ] _default_less_keys = ['loss'] def __init__(self, dataloader, start=None, interval=1, by_epoch=True, save_best=None, rule=None, test_fn=None, greater_keys=None, less_keys=None, out_dir=None, file_client_args=None, **eval_kwargs): if not isinstance(dataloader, DataLoader): raise TypeError(f'dataloader must be a pytorch DataLoader, ' f'but got {type(dataloader)}') if interval <= 0: raise ValueError(f'interval must be a positive number, ' f'but got {interval}') assert isinstance(by_epoch, bool), '``by_epoch`` should be a boolean' if start is not None and start < 0: raise ValueError(f'The evaluation start epoch {start} is smaller ' f'than 0') self.dataloader = dataloader self.interval = interval self.start = start self.by_epoch = by_epoch assert isinstance(save_best, str) or save_best is None, \ '""save_best"" should be a str or None ' \ f'rather than {type(save_best)}' self.save_best = save_best self.eval_kwargs = eval_kwargs self.initial_flag = True if test_fn is None: from annotator.uniformer.mmcv.engine import single_gpu_test self.test_fn = single_gpu_test else: self.test_fn = test_fn if greater_keys is None: self.greater_keys = self._default_greater_keys else: if not isinstance(greater_keys, (list, tuple)): greater_keys = (greater_keys, ) assert is_seq_of(greater_keys, str) self.greater_keys = greater_keys if less_keys is None: self.less_keys = self._default_less_keys else: if not isinstance(less_keys, (list, tuple)): less_keys = (less_keys, ) assert is_seq_of(less_keys, str) self.less_keys = less_keys if self.save_best is not None: self.best_ckpt_path = None self._init_rule(rule, self.save_best) self.out_dir = out_dir self.file_client_args = file_client_args def _init_rule(self, rule, key_indicator): """Initialize rule, key_indicator, comparison_func, and best score. Here is the rule to determine which rule is used for key indicator when the rule is not specific (note that the key indicator matching is case-insensitive): 1. If the key indicator is in ``self.greater_keys``, the rule will be specified as 'greater'. 2. Or if the key indicator is in ``self.less_keys``, the rule will be specified as 'less'. 3. Or if the key indicator is equal to the substring in any one item in ``self.greater_keys``, the rule will be specified as 'greater'. 4. Or if the key indicator is equal to the substring in any one item in ``self.less_keys``, the rule will be specified as 'less'. Args: rule (str | None): Comparison rule for best score. key_indicator (str | None): Key indicator to determine the comparison rule. """ if rule not in self.rule_map and rule is not None: raise KeyError(f'rule must be greater, less or None, ' f'but got {rule}.') if rule is None: if key_indicator != 'auto': # `_lc` here means we use the lower case of keys for # case-insensitive matching key_indicator_lc = key_indicator.lower() greater_keys = [key.lower() for key in self.greater_keys] less_keys = [key.lower() for key in self.less_keys] if key_indicator_lc in greater_keys: rule = 'greater' elif key_indicator_lc in less_keys: rule = 'less' elif any(key in key_indicator_lc for key in greater_keys): rule = 'greater' elif any(key in key_indicator_lc for key in less_keys): rule = 'less' else: raise ValueError(f'Cannot infer the rule for key ' f'{key_indicator}, thus a specific rule ' f'must be specified.') self.rule = rule self.key_indicator = key_indicator if self.rule is not None: self.compare_func = self.rule_map[self.rule] def before_run(self, runner): if not self.out_dir: self.out_dir = runner.work_dir self.file_client = FileClient.infer_client(self.file_client_args, self.out_dir) # if `self.out_dir` is not equal to `runner.work_dir`, it means that # `self.out_dir` is set so the final `self.out_dir` is the # concatenation of `self.out_dir` and the last level directory of # `runner.work_dir` if self.out_dir != runner.work_dir: basename = osp.basename(runner.work_dir.rstrip(osp.sep)) self.out_dir = self.file_client.join_path(self.out_dir, basename) runner.logger.info( (f'The best checkpoint will be saved to {self.out_dir} by ' f'{self.file_client.name}')) if self.save_best is not None: if runner.meta is None: warnings.warn('runner.meta is None. Creating an empty one.') runner.meta = dict() runner.meta.setdefault('hook_msgs', dict()) self.best_ckpt_path = runner.meta['hook_msgs'].get( 'best_ckpt', None) def before_train_iter(self, runner): """Evaluate the model only at the start of training by iteration.""" if self.by_epoch or not self.initial_flag: return if self.start is not None and runner.iter >= self.start: self.after_train_iter(runner) self.initial_flag = False def before_train_epoch(self, runner): """Evaluate the model only at the start of training by epoch.""" if not (self.by_epoch and self.initial_flag): return if self.start is not None and runner.epoch >= self.start: self.after_train_epoch(runner) self.initial_flag = False def after_train_iter(self, runner): """Called after every training iter to evaluate the results.""" if not self.by_epoch and self._should_evaluate(runner): # Because the priority of EvalHook is higher than LoggerHook, the # training log and the evaluating log are mixed. Therefore, # we need to dump the training log and clear it before evaluating # log is generated. In addition, this problem will only appear in # `IterBasedRunner` whose `self.by_epoch` is False, because # `EpochBasedRunner` whose `self.by_epoch` is True calls # `_do_evaluate` in `after_train_epoch` stage, and at this stage # the training log has been printed, so it will not cause any # problem. more details at # https://github.com/open-mmlab/mmsegmentation/issues/694 for hook in runner._hooks: if isinstance(hook, LoggerHook): hook.after_train_iter(runner) runner.log_buffer.clear() self._do_evaluate(runner) def after_train_epoch(self, runner): """Called after every training epoch to evaluate the results.""" if self.by_epoch and self._should_evaluate(runner): self._do_evaluate(runner) def _do_evaluate(self, runner): """perform evaluation and save ckpt.""" results = self.test_fn(runner.model, self.dataloader) runner.log_buffer.output['eval_iter_num'] = len(self.dataloader) key_score = self.evaluate(runner, results) # the key_score may be `None` so it needs to skip the action to save # the best checkpoint if self.save_best and key_score: self._save_ckpt(runner, key_score) def _should_evaluate(self, runner): """Judge whether to perform evaluation. Here is the rule to judge whether to perform evaluation: 1. It will not perform evaluation during the epoch/iteration interval, which is determined by ``self.interval``. 2. It will not perform evaluation if the start time is larger than current time. 3. It will not perform evaluation when current time is larger than the start time but during epoch/iteration interval. Returns: bool: The flag indicating whether to perform evaluation. """ if self.by_epoch: current = runner.epoch check_time = self.every_n_epochs else: current = runner.iter check_time = self.every_n_iters if self.start is None: if not check_time(runner, self.interval): # No evaluation during the interval. return False elif (current + 1) < self.start: # No evaluation if start is larger than the current time. return False else: # Evaluation only at epochs/iters 3, 5, 7... # if start==3 and interval==2 if (current + 1 - self.start) % self.interval: return False return True def _save_ckpt(self, runner, key_score): """Save the best checkpoint. It will compare the score according to the compare function, write related information (best score, best checkpoint path) and save the best checkpoint into ``work_dir``. """ if self.by_epoch: current = f'epoch_{runner.epoch + 1}' cur_type, cur_time = 'epoch', runner.epoch + 1 else: current = f'iter_{runner.iter + 1}' cur_type, cur_time = 'iter', runner.iter + 1 best_score = runner.meta['hook_msgs'].get( 'best_score', self.init_value_map[self.rule]) if self.compare_func(key_score, best_score): best_score = key_score runner.meta['hook_msgs']['best_score'] = best_score if self.best_ckpt_path and self.file_client.isfile( self.best_ckpt_path): self.file_client.remove(self.best_ckpt_path) runner.logger.info( (f'The previous best checkpoint {self.best_ckpt_path} was ' 'removed')) best_ckpt_name = f'best_{self.key_indicator}_{current}.pth' self.best_ckpt_path = self.file_client.join_path( self.out_dir, best_ckpt_name) runner.meta['hook_msgs']['best_ckpt'] = self.best_ckpt_path runner.save_checkpoint( self.out_dir, best_ckpt_name, create_symlink=False) runner.logger.info( f'Now best checkpoint is saved as {best_ckpt_name}.') runner.logger.info( f'Best {self.key_indicator} is {best_score:0.4f} ' f'at {cur_time} {cur_type}.') def evaluate(self, runner, results): """Evaluate the results. Args: runner (:obj:`mmcv.Runner`): The underlined training runner. results (list): Output results. """ eval_res = self.dataloader.dataset.evaluate( results, logger=runner.logger, **self.eval_kwargs) for name, val in eval_res.items(): runner.log_buffer.output[name] = val runner.log_buffer.ready = True if self.save_best is not None: # If the performance of model is pool, the `eval_res` may be an # empty dict and it will raise exception when `self.save_best` is # not None. More details at # https://github.com/open-mmlab/mmdetection/issues/6265. if not eval_res: warnings.warn( 'Since `eval_res` is an empty dict, the behavior to save ' 'the best checkpoint will be skipped in this evaluation.') return None if self.key_indicator == 'auto': # infer from eval_results self._init_rule(self.rule, list(eval_res.keys())[0]) return eval_res[self.key_indicator] return None class DistEvalHook(EvalHook): """Distributed evaluation hook. This hook will regularly perform evaluation in a given interval when performing in distributed environment. Args: dataloader (DataLoader): A PyTorch dataloader, whose dataset has implemented ``evaluate`` function. start (int | None, optional): Evaluation starting epoch. It enables evaluation before the training starts if ``start`` <= the resuming epoch. If None, whether to evaluate is merely decided by ``interval``. Default: None. interval (int): Evaluation interval. Default: 1. by_epoch (bool): Determine perform evaluation by epoch or by iteration. If set to True, it will perform by epoch. Otherwise, by iteration. default: True. save_best (str, optional): If a metric is specified, it would measure the best checkpoint during evaluation. The information about best checkpoint would be saved in ``runner.meta['hook_msgs']`` to keep best score value and best checkpoint path, which will be also loaded when resume checkpoint. Options are the evaluation metrics on the test dataset. e.g., ``bbox_mAP``, ``segm_mAP`` for bbox detection and instance segmentation. ``AR@100`` for proposal recall. If ``save_best`` is ``auto``, the first key of the returned ``OrderedDict`` result will be used. Default: None. rule (str | None, optional): Comparison rule for best score. If set to None, it will infer a reasonable rule. Keys such as 'acc', 'top' .etc will be inferred by 'greater' rule. Keys contain 'loss' will be inferred by 'less' rule. Options are 'greater', 'less', None. Default: None. test_fn (callable, optional): test a model with samples from a dataloader in a multi-gpu manner, and return the test results. If ``None``, the default test function ``mmcv.engine.multi_gpu_test`` will be used. (default: ``None``) tmpdir (str | None): Temporary directory to save the results of all processes. Default: None. gpu_collect (bool): Whether to use gpu or cpu to collect results. Default: False. broadcast_bn_buffer (bool): Whether to broadcast the buffer(running_mean and running_var) of rank 0 to other rank before evaluation. Default: True. out_dir (str, optional): The root directory to save checkpoints. If not specified, `runner.work_dir` will be used by default. If specified, the `out_dir` will be the concatenation of `out_dir` and the last level directory of `runner.work_dir`. file_client_args (dict): Arguments to instantiate a FileClient. See :class:`mmcv.fileio.FileClient` for details. Default: None. **eval_kwargs: Evaluation arguments fed into the evaluate function of the dataset. """ def __init__(self, dataloader, start=None, interval=1, by_epoch=True, save_best=None, rule=None, test_fn=None, greater_keys=None, less_keys=None, broadcast_bn_buffer=True, tmpdir=None, gpu_collect=False, out_dir=None, file_client_args=None, **eval_kwargs): if test_fn is None: from annotator.uniformer.mmcv.engine import multi_gpu_test test_fn = multi_gpu_test super().__init__( dataloader, start=start, interval=interval, by_epoch=by_epoch, save_best=save_best, rule=rule, test_fn=test_fn, greater_keys=greater_keys, less_keys=less_keys, out_dir=out_dir, file_client_args=file_client_args, **eval_kwargs) self.broadcast_bn_buffer = broadcast_bn_buffer self.tmpdir = tmpdir self.gpu_collect = gpu_collect def _do_evaluate(self, runner): """perform evaluation and save ckpt.""" # Synchronization of BatchNorm's buffer (running_mean # and running_var) is not supported in the DDP of pytorch, # which may cause the inconsistent performance of models in # different ranks, so we broadcast BatchNorm's buffers # of rank 0 to other ranks to avoid this. if self.broadcast_bn_buffer: model = runner.model for name, module in model.named_modules(): if isinstance(module, _BatchNorm) and module.track_running_stats: dist.broadcast(module.running_var, 0) dist.broadcast(module.running_mean, 0) tmpdir = self.tmpdir if tmpdir is None: tmpdir = osp.join(runner.work_dir, '.eval_hook') results = self.test_fn( runner.model, self.dataloader, tmpdir=tmpdir, gpu_collect=self.gpu_collect) if runner.rank == 0: print('\n') runner.log_buffer.output['eval_iter_num'] = len(self.dataloader) key_score = self.evaluate(runner, results) # the key_score may be `None` so it needs to skip the action to # save the best checkpoint if self.save_best and key_score: self._save_ckpt(runner, key_score)
trt-samples-for-hackathon-cn-master
Hackathon2023/controlnet/annotator/uniformer/mmcv/runner/hooks/evaluation.py
# Copyright (c) OpenMMLab. All rights reserved. from annotator.uniformer.mmcv.utils import Registry, is_method_overridden HOOKS = Registry('hook') class Hook: stages = ('before_run', 'before_train_epoch', 'before_train_iter', 'after_train_iter', 'after_train_epoch', 'before_val_epoch', 'before_val_iter', 'after_val_iter', 'after_val_epoch', 'after_run') def before_run(self, runner): pass def after_run(self, runner): pass def before_epoch(self, runner): pass def after_epoch(self, runner): pass def before_iter(self, runner): pass def after_iter(self, runner): pass def before_train_epoch(self, runner): self.before_epoch(runner) def before_val_epoch(self, runner): self.before_epoch(runner) def after_train_epoch(self, runner): self.after_epoch(runner) def after_val_epoch(self, runner): self.after_epoch(runner) def before_train_iter(self, runner): self.before_iter(runner) def before_val_iter(self, runner): self.before_iter(runner) def after_train_iter(self, runner): self.after_iter(runner) def after_val_iter(self, runner): self.after_iter(runner) def every_n_epochs(self, runner, n): return (runner.epoch + 1) % n == 0 if n > 0 else False def every_n_inner_iters(self, runner, n): return (runner.inner_iter + 1) % n == 0 if n > 0 else False def every_n_iters(self, runner, n): return (runner.iter + 1) % n == 0 if n > 0 else False def end_of_epoch(self, runner): return runner.inner_iter + 1 == len(runner.data_loader) def is_last_epoch(self, runner): return runner.epoch + 1 == runner._max_epochs def is_last_iter(self, runner): return runner.iter + 1 == runner._max_iters def get_triggered_stages(self): trigger_stages = set() for stage in Hook.stages: if is_method_overridden(stage, Hook, self): trigger_stages.add(stage) # some methods will be triggered in multi stages # use this dict to map method to stages. method_stages_map = { 'before_epoch': ['before_train_epoch', 'before_val_epoch'], 'after_epoch': ['after_train_epoch', 'after_val_epoch'], 'before_iter': ['before_train_iter', 'before_val_iter'], 'after_iter': ['after_train_iter', 'after_val_iter'], } for method, map_stages in method_stages_map.items(): if is_method_overridden(method, Hook, self): trigger_stages.update(map_stages) return [stage for stage in Hook.stages if stage in trigger_stages]
trt-samples-for-hackathon-cn-master
Hackathon2023/controlnet/annotator/uniformer/mmcv/runner/hooks/hook.py
# Copyright (c) OpenMMLab. All rights reserved. from .checkpoint import CheckpointHook from .closure import ClosureHook from .ema import EMAHook from .evaluation import DistEvalHook, EvalHook from .hook import HOOKS, Hook from .iter_timer import IterTimerHook from .logger import (DvcliveLoggerHook, LoggerHook, MlflowLoggerHook, NeptuneLoggerHook, PaviLoggerHook, TensorboardLoggerHook, TextLoggerHook, WandbLoggerHook) from .lr_updater import LrUpdaterHook from .memory import EmptyCacheHook from .momentum_updater import MomentumUpdaterHook from .optimizer import (Fp16OptimizerHook, GradientCumulativeFp16OptimizerHook, GradientCumulativeOptimizerHook, OptimizerHook) from .profiler import ProfilerHook from .sampler_seed import DistSamplerSeedHook from .sync_buffer import SyncBuffersHook __all__ = [ 'HOOKS', 'Hook', 'CheckpointHook', 'ClosureHook', 'LrUpdaterHook', 'OptimizerHook', 'Fp16OptimizerHook', 'IterTimerHook', 'DistSamplerSeedHook', 'EmptyCacheHook', 'LoggerHook', 'MlflowLoggerHook', 'PaviLoggerHook', 'TextLoggerHook', 'TensorboardLoggerHook', 'NeptuneLoggerHook', 'WandbLoggerHook', 'DvcliveLoggerHook', 'MomentumUpdaterHook', 'SyncBuffersHook', 'EMAHook', 'EvalHook', 'DistEvalHook', 'ProfilerHook', 'GradientCumulativeOptimizerHook', 'GradientCumulativeFp16OptimizerHook' ]
trt-samples-for-hackathon-cn-master
Hackathon2023/controlnet/annotator/uniformer/mmcv/runner/hooks/__init__.py
# Copyright (c) OpenMMLab. All rights reserved. from .hook import HOOKS, Hook @HOOKS.register_module() class DistSamplerSeedHook(Hook): """Data-loading sampler for distributed training. When distributed training, it is only useful in conjunction with :obj:`EpochBasedRunner`, while :obj:`IterBasedRunner` achieves the same purpose with :obj:`IterLoader`. """ def before_epoch(self, runner): if hasattr(runner.data_loader.sampler, 'set_epoch'): # in case the data loader uses `SequentialSampler` in Pytorch runner.data_loader.sampler.set_epoch(runner.epoch) elif hasattr(runner.data_loader.batch_sampler.sampler, 'set_epoch'): # batch sampler in pytorch warps the sampler as its attributes. runner.data_loader.batch_sampler.sampler.set_epoch(runner.epoch)
trt-samples-for-hackathon-cn-master
Hackathon2023/controlnet/annotator/uniformer/mmcv/runner/hooks/sampler_seed.py
# Copyright (c) OpenMMLab. All rights reserved. import copy from collections import defaultdict from itertools import chain from torch.nn.utils import clip_grad from annotator.uniformer.mmcv.utils import TORCH_VERSION, _BatchNorm, digit_version from ..dist_utils import allreduce_grads from ..fp16_utils import LossScaler, wrap_fp16_model from .hook import HOOKS, Hook try: # If PyTorch version >= 1.6.0, torch.cuda.amp.GradScaler would be imported # and used; otherwise, auto fp16 will adopt mmcv's implementation. from torch.cuda.amp import GradScaler except ImportError: pass @HOOKS.register_module() class OptimizerHook(Hook): def __init__(self, grad_clip=None): self.grad_clip = grad_clip def clip_grads(self, params): params = list( filter(lambda p: p.requires_grad and p.grad is not None, params)) if len(params) > 0: return clip_grad.clip_grad_norm_(params, **self.grad_clip) def after_train_iter(self, runner): runner.optimizer.zero_grad() runner.outputs['loss'].backward() if self.grad_clip is not None: grad_norm = self.clip_grads(runner.model.parameters()) if grad_norm is not None: # Add grad norm to the logger runner.log_buffer.update({'grad_norm': float(grad_norm)}, runner.outputs['num_samples']) runner.optimizer.step() @HOOKS.register_module() class GradientCumulativeOptimizerHook(OptimizerHook): """Optimizer Hook implements multi-iters gradient cumulating. Args: cumulative_iters (int, optional): Num of gradient cumulative iters. The optimizer will step every `cumulative_iters` iters. Defaults to 1. Examples: >>> # Use cumulative_iters to simulate a large batch size >>> # It is helpful when the hardware cannot handle a large batch size. >>> loader = DataLoader(data, batch_size=64) >>> optim_hook = GradientCumulativeOptimizerHook(cumulative_iters=4) >>> # almost equals to >>> loader = DataLoader(data, batch_size=256) >>> optim_hook = OptimizerHook() """ def __init__(self, cumulative_iters=1, **kwargs): super(GradientCumulativeOptimizerHook, self).__init__(**kwargs) assert isinstance(cumulative_iters, int) and cumulative_iters > 0, \ f'cumulative_iters only accepts positive int, but got ' \ f'{type(cumulative_iters)} instead.' self.cumulative_iters = cumulative_iters self.divisible_iters = 0 self.remainder_iters = 0 self.initialized = False def has_batch_norm(self, module): if isinstance(module, _BatchNorm): return True for m in module.children(): if self.has_batch_norm(m): return True return False def _init(self, runner): if runner.iter % self.cumulative_iters != 0: runner.logger.warning( 'Resume iter number is not divisible by cumulative_iters in ' 'GradientCumulativeOptimizerHook, which means the gradient of ' 'some iters is lost and the result may be influenced slightly.' ) if self.has_batch_norm(runner.model) and self.cumulative_iters > 1: runner.logger.warning( 'GradientCumulativeOptimizerHook may slightly decrease ' 'performance if the model has BatchNorm layers.') residual_iters = runner.max_iters - runner.iter self.divisible_iters = ( residual_iters // self.cumulative_iters * self.cumulative_iters) self.remainder_iters = residual_iters - self.divisible_iters self.initialized = True def after_train_iter(self, runner): if not self.initialized: self._init(runner) if runner.iter < self.divisible_iters: loss_factor = self.cumulative_iters else: loss_factor = self.remainder_iters loss = runner.outputs['loss'] loss = loss / loss_factor loss.backward() if (self.every_n_iters(runner, self.cumulative_iters) or self.is_last_iter(runner)): if self.grad_clip is not None: grad_norm = self.clip_grads(runner.model.parameters()) if grad_norm is not None: # Add grad norm to the logger runner.log_buffer.update({'grad_norm': float(grad_norm)}, runner.outputs['num_samples']) runner.optimizer.step() runner.optimizer.zero_grad() if (TORCH_VERSION != 'parrots' and digit_version(TORCH_VERSION) >= digit_version('1.6.0')): @HOOKS.register_module() class Fp16OptimizerHook(OptimizerHook): """FP16 optimizer hook (using PyTorch's implementation). If you are using PyTorch >= 1.6, torch.cuda.amp is used as the backend, to take care of the optimization procedure. Args: loss_scale (float | str | dict): Scale factor configuration. If loss_scale is a float, static loss scaling will be used with the specified scale. If loss_scale is a string, it must be 'dynamic', then dynamic loss scaling will be used. It can also be a dict containing arguments of GradScalar. Defaults to 512. For Pytorch >= 1.6, mmcv uses official implementation of GradScaler. If you use a dict version of loss_scale to create GradScaler, please refer to: https://pytorch.org/docs/stable/amp.html#torch.cuda.amp.GradScaler for the parameters. Examples: >>> loss_scale = dict( ... init_scale=65536.0, ... growth_factor=2.0, ... backoff_factor=0.5, ... growth_interval=2000 ... ) >>> optimizer_hook = Fp16OptimizerHook(loss_scale=loss_scale) """ def __init__(self, grad_clip=None, coalesce=True, bucket_size_mb=-1, loss_scale=512., distributed=True): self.grad_clip = grad_clip self.coalesce = coalesce self.bucket_size_mb = bucket_size_mb self.distributed = distributed self._scale_update_param = None if loss_scale == 'dynamic': self.loss_scaler = GradScaler() elif isinstance(loss_scale, float): self._scale_update_param = loss_scale self.loss_scaler = GradScaler(init_scale=loss_scale) elif isinstance(loss_scale, dict): self.loss_scaler = GradScaler(**loss_scale) else: raise ValueError('loss_scale must be of type float, dict, or ' f'"dynamic", got {loss_scale}') def before_run(self, runner): """Preparing steps before Mixed Precision Training.""" # wrap model mode to fp16 wrap_fp16_model(runner.model) # resume from state dict if 'fp16' in runner.meta and 'loss_scaler' in runner.meta['fp16']: scaler_state_dict = runner.meta['fp16']['loss_scaler'] self.loss_scaler.load_state_dict(scaler_state_dict) def copy_grads_to_fp32(self, fp16_net, fp32_weights): """Copy gradients from fp16 model to fp32 weight copy.""" for fp32_param, fp16_param in zip(fp32_weights, fp16_net.parameters()): if fp16_param.grad is not None: if fp32_param.grad is None: fp32_param.grad = fp32_param.data.new( fp32_param.size()) fp32_param.grad.copy_(fp16_param.grad) def copy_params_to_fp16(self, fp16_net, fp32_weights): """Copy updated params from fp32 weight copy to fp16 model.""" for fp16_param, fp32_param in zip(fp16_net.parameters(), fp32_weights): fp16_param.data.copy_(fp32_param.data) def after_train_iter(self, runner): """Backward optimization steps for Mixed Precision Training. For dynamic loss scaling, please refer to https://pytorch.org/docs/stable/amp.html#torch.cuda.amp.GradScaler. 1. Scale the loss by a scale factor. 2. Backward the loss to obtain the gradients. 3. Unscale the optimizer’s gradient tensors. 4. Call optimizer.step() and update scale factor. 5. Save loss_scaler state_dict for resume purpose. """ # clear grads of last iteration runner.model.zero_grad() runner.optimizer.zero_grad() self.loss_scaler.scale(runner.outputs['loss']).backward() self.loss_scaler.unscale_(runner.optimizer) # grad clip if self.grad_clip is not None: grad_norm = self.clip_grads(runner.model.parameters()) if grad_norm is not None: # Add grad norm to the logger runner.log_buffer.update({'grad_norm': float(grad_norm)}, runner.outputs['num_samples']) # backward and update scaler self.loss_scaler.step(runner.optimizer) self.loss_scaler.update(self._scale_update_param) # save state_dict of loss_scaler runner.meta.setdefault( 'fp16', {})['loss_scaler'] = self.loss_scaler.state_dict() @HOOKS.register_module() class GradientCumulativeFp16OptimizerHook(GradientCumulativeOptimizerHook, Fp16OptimizerHook): """Fp16 optimizer Hook (using PyTorch's implementation) implements multi-iters gradient cumulating. If you are using PyTorch >= 1.6, torch.cuda.amp is used as the backend, to take care of the optimization procedure. """ def __init__(self, *args, **kwargs): super(GradientCumulativeFp16OptimizerHook, self).__init__(*args, **kwargs) def after_train_iter(self, runner): if not self.initialized: self._init(runner) if runner.iter < self.divisible_iters: loss_factor = self.cumulative_iters else: loss_factor = self.remainder_iters loss = runner.outputs['loss'] loss = loss / loss_factor self.loss_scaler.scale(loss).backward() if (self.every_n_iters(runner, self.cumulative_iters) or self.is_last_iter(runner)): # copy fp16 grads in the model to fp32 params in the optimizer self.loss_scaler.unscale_(runner.optimizer) if self.grad_clip is not None: grad_norm = self.clip_grads(runner.model.parameters()) if grad_norm is not None: # Add grad norm to the logger runner.log_buffer.update( {'grad_norm': float(grad_norm)}, runner.outputs['num_samples']) # backward and update scaler self.loss_scaler.step(runner.optimizer) self.loss_scaler.update(self._scale_update_param) # save state_dict of loss_scaler runner.meta.setdefault( 'fp16', {})['loss_scaler'] = self.loss_scaler.state_dict() # clear grads runner.model.zero_grad() runner.optimizer.zero_grad() else: @HOOKS.register_module() class Fp16OptimizerHook(OptimizerHook): """FP16 optimizer hook (mmcv's implementation). The steps of fp16 optimizer is as follows. 1. Scale the loss value. 2. BP in the fp16 model. 2. Copy gradients from fp16 model to fp32 weights. 3. Update fp32 weights. 4. Copy updated parameters from fp32 weights to fp16 model. Refer to https://arxiv.org/abs/1710.03740 for more details. Args: loss_scale (float | str | dict): Scale factor configuration. If loss_scale is a float, static loss scaling will be used with the specified scale. If loss_scale is a string, it must be 'dynamic', then dynamic loss scaling will be used. It can also be a dict containing arguments of LossScaler. Defaults to 512. """ def __init__(self, grad_clip=None, coalesce=True, bucket_size_mb=-1, loss_scale=512., distributed=True): self.grad_clip = grad_clip self.coalesce = coalesce self.bucket_size_mb = bucket_size_mb self.distributed = distributed if loss_scale == 'dynamic': self.loss_scaler = LossScaler(mode='dynamic') elif isinstance(loss_scale, float): self.loss_scaler = LossScaler( init_scale=loss_scale, mode='static') elif isinstance(loss_scale, dict): self.loss_scaler = LossScaler(**loss_scale) else: raise ValueError('loss_scale must be of type float, dict, or ' f'"dynamic", got {loss_scale}') def before_run(self, runner): """Preparing steps before Mixed Precision Training. 1. Make a master copy of fp32 weights for optimization. 2. Convert the main model from fp32 to fp16. """ # keep a copy of fp32 weights old_groups = runner.optimizer.param_groups runner.optimizer.param_groups = copy.deepcopy( runner.optimizer.param_groups) state = defaultdict(dict) p_map = { old_p: p for old_p, p in zip( chain(*(g['params'] for g in old_groups)), chain(*(g['params'] for g in runner.optimizer.param_groups))) } for k, v in runner.optimizer.state.items(): state[p_map[k]] = v runner.optimizer.state = state # convert model to fp16 wrap_fp16_model(runner.model) # resume from state dict if 'fp16' in runner.meta and 'loss_scaler' in runner.meta['fp16']: scaler_state_dict = runner.meta['fp16']['loss_scaler'] self.loss_scaler.load_state_dict(scaler_state_dict) def copy_grads_to_fp32(self, fp16_net, fp32_weights): """Copy gradients from fp16 model to fp32 weight copy.""" for fp32_param, fp16_param in zip(fp32_weights, fp16_net.parameters()): if fp16_param.grad is not None: if fp32_param.grad is None: fp32_param.grad = fp32_param.data.new( fp32_param.size()) fp32_param.grad.copy_(fp16_param.grad) def copy_params_to_fp16(self, fp16_net, fp32_weights): """Copy updated params from fp32 weight copy to fp16 model.""" for fp16_param, fp32_param in zip(fp16_net.parameters(), fp32_weights): fp16_param.data.copy_(fp32_param.data) def after_train_iter(self, runner): """Backward optimization steps for Mixed Precision Training. For dynamic loss scaling, please refer `loss_scalar.py` 1. Scale the loss by a scale factor. 2. Backward the loss to obtain the gradients (fp16). 3. Copy gradients from the model to the fp32 weight copy. 4. Scale the gradients back and update the fp32 weight copy. 5. Copy back the params from fp32 weight copy to the fp16 model. 6. Save loss_scaler state_dict for resume purpose. """ # clear grads of last iteration runner.model.zero_grad() runner.optimizer.zero_grad() # scale the loss value scaled_loss = runner.outputs['loss'] * self.loss_scaler.loss_scale scaled_loss.backward() # copy fp16 grads in the model to fp32 params in the optimizer fp32_weights = [] for param_group in runner.optimizer.param_groups: fp32_weights += param_group['params'] self.copy_grads_to_fp32(runner.model, fp32_weights) # allreduce grads if self.distributed: allreduce_grads(fp32_weights, self.coalesce, self.bucket_size_mb) has_overflow = self.loss_scaler.has_overflow(fp32_weights) # if has overflow, skip this iteration if not has_overflow: # scale the gradients back for param in fp32_weights: if param.grad is not None: param.grad.div_(self.loss_scaler.loss_scale) if self.grad_clip is not None: grad_norm = self.clip_grads(fp32_weights) if grad_norm is not None: # Add grad norm to the logger runner.log_buffer.update( {'grad_norm': float(grad_norm)}, runner.outputs['num_samples']) # update fp32 params runner.optimizer.step() # copy fp32 params to the fp16 model self.copy_params_to_fp16(runner.model, fp32_weights) self.loss_scaler.update_scale(has_overflow) if has_overflow: runner.logger.warning('Check overflow, downscale loss scale ' f'to {self.loss_scaler.cur_scale}') # save state_dict of loss_scaler runner.meta.setdefault( 'fp16', {})['loss_scaler'] = self.loss_scaler.state_dict() @HOOKS.register_module() class GradientCumulativeFp16OptimizerHook(GradientCumulativeOptimizerHook, Fp16OptimizerHook): """Fp16 optimizer Hook (using mmcv implementation) implements multi- iters gradient cumulating.""" def __init__(self, *args, **kwargs): super(GradientCumulativeFp16OptimizerHook, self).__init__(*args, **kwargs) def after_train_iter(self, runner): if not self.initialized: self._init(runner) if runner.iter < self.divisible_iters: loss_factor = self.cumulative_iters else: loss_factor = self.remainder_iters loss = runner.outputs['loss'] loss = loss / loss_factor # scale the loss value scaled_loss = loss * self.loss_scaler.loss_scale scaled_loss.backward() if (self.every_n_iters(runner, self.cumulative_iters) or self.is_last_iter(runner)): # copy fp16 grads in the model to fp32 params in the optimizer fp32_weights = [] for param_group in runner.optimizer.param_groups: fp32_weights += param_group['params'] self.copy_grads_to_fp32(runner.model, fp32_weights) # allreduce grads if self.distributed: allreduce_grads(fp32_weights, self.coalesce, self.bucket_size_mb) has_overflow = self.loss_scaler.has_overflow(fp32_weights) # if has overflow, skip this iteration if not has_overflow: # scale the gradients back for param in fp32_weights: if param.grad is not None: param.grad.div_(self.loss_scaler.loss_scale) if self.grad_clip is not None: grad_norm = self.clip_grads(fp32_weights) if grad_norm is not None: # Add grad norm to the logger runner.log_buffer.update( {'grad_norm': float(grad_norm)}, runner.outputs['num_samples']) # update fp32 params runner.optimizer.step() # copy fp32 params to the fp16 model self.copy_params_to_fp16(runner.model, fp32_weights) else: runner.logger.warning( 'Check overflow, downscale loss scale ' f'to {self.loss_scaler.cur_scale}') self.loss_scaler.update_scale(has_overflow) # save state_dict of loss_scaler runner.meta.setdefault( 'fp16', {})['loss_scaler'] = self.loss_scaler.state_dict() # clear grads runner.model.zero_grad() runner.optimizer.zero_grad()
trt-samples-for-hackathon-cn-master
Hackathon2023/controlnet/annotator/uniformer/mmcv/runner/hooks/optimizer.py
# Copyright (c) OpenMMLab. All rights reserved. import numbers from math import cos, pi import annotator.uniformer.mmcv as mmcv from .hook import HOOKS, Hook class LrUpdaterHook(Hook): """LR Scheduler in MMCV. Args: by_epoch (bool): LR changes epoch by epoch warmup (string): Type of warmup used. It can be None(use no warmup), 'constant', 'linear' or 'exp' warmup_iters (int): The number of iterations or epochs that warmup lasts warmup_ratio (float): LR used at the beginning of warmup equals to warmup_ratio * initial_lr warmup_by_epoch (bool): When warmup_by_epoch == True, warmup_iters means the number of epochs that warmup lasts, otherwise means the number of iteration that warmup lasts """ def __init__(self, by_epoch=True, warmup=None, warmup_iters=0, warmup_ratio=0.1, warmup_by_epoch=False): # validate the "warmup" argument if warmup is not None: if warmup not in ['constant', 'linear', 'exp']: raise ValueError( f'"{warmup}" is not a supported type for warming up, valid' ' types are "constant" and "linear"') if warmup is not None: assert warmup_iters > 0, \ '"warmup_iters" must be a positive integer' assert 0 < warmup_ratio <= 1.0, \ '"warmup_ratio" must be in range (0,1]' self.by_epoch = by_epoch self.warmup = warmup self.warmup_iters = warmup_iters self.warmup_ratio = warmup_ratio self.warmup_by_epoch = warmup_by_epoch if self.warmup_by_epoch: self.warmup_epochs = self.warmup_iters self.warmup_iters = None else: self.warmup_epochs = None self.base_lr = [] # initial lr for all param groups self.regular_lr = [] # expected lr if no warming up is performed def _set_lr(self, runner, lr_groups): if isinstance(runner.optimizer, dict): for k, optim in runner.optimizer.items(): for param_group, lr in zip(optim.param_groups, lr_groups[k]): param_group['lr'] = lr else: for param_group, lr in zip(runner.optimizer.param_groups, lr_groups): param_group['lr'] = lr def get_lr(self, runner, base_lr): raise NotImplementedError def get_regular_lr(self, runner): if isinstance(runner.optimizer, dict): lr_groups = {} for k in runner.optimizer.keys(): _lr_group = [ self.get_lr(runner, _base_lr) for _base_lr in self.base_lr[k] ] lr_groups.update({k: _lr_group}) return lr_groups else: return [self.get_lr(runner, _base_lr) for _base_lr in self.base_lr] def get_warmup_lr(self, cur_iters): def _get_warmup_lr(cur_iters, regular_lr): if self.warmup == 'constant': warmup_lr = [_lr * self.warmup_ratio for _lr in regular_lr] elif self.warmup == 'linear': k = (1 - cur_iters / self.warmup_iters) * (1 - self.warmup_ratio) warmup_lr = [_lr * (1 - k) for _lr in regular_lr] elif self.warmup == 'exp': k = self.warmup_ratio**(1 - cur_iters / self.warmup_iters) warmup_lr = [_lr * k for _lr in regular_lr] return warmup_lr if isinstance(self.regular_lr, dict): lr_groups = {} for key, regular_lr in self.regular_lr.items(): lr_groups[key] = _get_warmup_lr(cur_iters, regular_lr) return lr_groups else: return _get_warmup_lr(cur_iters, self.regular_lr) def before_run(self, runner): # NOTE: when resuming from a checkpoint, if 'initial_lr' is not saved, # it will be set according to the optimizer params if isinstance(runner.optimizer, dict): self.base_lr = {} for k, optim in runner.optimizer.items(): for group in optim.param_groups: group.setdefault('initial_lr', group['lr']) _base_lr = [ group['initial_lr'] for group in optim.param_groups ] self.base_lr.update({k: _base_lr}) else: for group in runner.optimizer.param_groups: group.setdefault('initial_lr', group['lr']) self.base_lr = [ group['initial_lr'] for group in runner.optimizer.param_groups ] def before_train_epoch(self, runner): if self.warmup_iters is None: epoch_len = len(runner.data_loader) self.warmup_iters = self.warmup_epochs * epoch_len if not self.by_epoch: return self.regular_lr = self.get_regular_lr(runner) self._set_lr(runner, self.regular_lr) def before_train_iter(self, runner): cur_iter = runner.iter if not self.by_epoch: self.regular_lr = self.get_regular_lr(runner) if self.warmup is None or cur_iter >= self.warmup_iters: self._set_lr(runner, self.regular_lr) else: warmup_lr = self.get_warmup_lr(cur_iter) self._set_lr(runner, warmup_lr) elif self.by_epoch: if self.warmup is None or cur_iter > self.warmup_iters: return elif cur_iter == self.warmup_iters: self._set_lr(runner, self.regular_lr) else: warmup_lr = self.get_warmup_lr(cur_iter) self._set_lr(runner, warmup_lr) @HOOKS.register_module() class FixedLrUpdaterHook(LrUpdaterHook): def __init__(self, **kwargs): super(FixedLrUpdaterHook, self).__init__(**kwargs) def get_lr(self, runner, base_lr): return base_lr @HOOKS.register_module() class StepLrUpdaterHook(LrUpdaterHook): """Step LR scheduler with min_lr clipping. Args: step (int | list[int]): Step to decay the LR. If an int value is given, regard it as the decay interval. If a list is given, decay LR at these steps. gamma (float, optional): Decay LR ratio. Default: 0.1. min_lr (float, optional): Minimum LR value to keep. If LR after decay is lower than `min_lr`, it will be clipped to this value. If None is given, we don't perform lr clipping. Default: None. """ def __init__(self, step, gamma=0.1, min_lr=None, **kwargs): if isinstance(step, list): assert mmcv.is_list_of(step, int) assert all([s > 0 for s in step]) elif isinstance(step, int): assert step > 0 else: raise TypeError('"step" must be a list or integer') self.step = step self.gamma = gamma self.min_lr = min_lr super(StepLrUpdaterHook, self).__init__(**kwargs) def get_lr(self, runner, base_lr): progress = runner.epoch if self.by_epoch else runner.iter # calculate exponential term if isinstance(self.step, int): exp = progress // self.step else: exp = len(self.step) for i, s in enumerate(self.step): if progress < s: exp = i break lr = base_lr * (self.gamma**exp) if self.min_lr is not None: # clip to a minimum value lr = max(lr, self.min_lr) return lr @HOOKS.register_module() class ExpLrUpdaterHook(LrUpdaterHook): def __init__(self, gamma, **kwargs): self.gamma = gamma super(ExpLrUpdaterHook, self).__init__(**kwargs) def get_lr(self, runner, base_lr): progress = runner.epoch if self.by_epoch else runner.iter return base_lr * self.gamma**progress @HOOKS.register_module() class PolyLrUpdaterHook(LrUpdaterHook): def __init__(self, power=1., min_lr=0., **kwargs): self.power = power self.min_lr = min_lr super(PolyLrUpdaterHook, self).__init__(**kwargs) def get_lr(self, runner, base_lr): if self.by_epoch: progress = runner.epoch max_progress = runner.max_epochs else: progress = runner.iter max_progress = runner.max_iters coeff = (1 - progress / max_progress)**self.power return (base_lr - self.min_lr) * coeff + self.min_lr @HOOKS.register_module() class InvLrUpdaterHook(LrUpdaterHook): def __init__(self, gamma, power=1., **kwargs): self.gamma = gamma self.power = power super(InvLrUpdaterHook, self).__init__(**kwargs) def get_lr(self, runner, base_lr): progress = runner.epoch if self.by_epoch else runner.iter return base_lr * (1 + self.gamma * progress)**(-self.power) @HOOKS.register_module() class CosineAnnealingLrUpdaterHook(LrUpdaterHook): def __init__(self, min_lr=None, min_lr_ratio=None, **kwargs): assert (min_lr is None) ^ (min_lr_ratio is None) self.min_lr = min_lr self.min_lr_ratio = min_lr_ratio super(CosineAnnealingLrUpdaterHook, self).__init__(**kwargs) def get_lr(self, runner, base_lr): if self.by_epoch: progress = runner.epoch max_progress = runner.max_epochs else: progress = runner.iter max_progress = runner.max_iters if self.min_lr_ratio is not None: target_lr = base_lr * self.min_lr_ratio else: target_lr = self.min_lr return annealing_cos(base_lr, target_lr, progress / max_progress) @HOOKS.register_module() class FlatCosineAnnealingLrUpdaterHook(LrUpdaterHook): """Flat + Cosine lr schedule. Modified from https://github.com/fastai/fastai/blob/master/fastai/callback/schedule.py#L128 # noqa: E501 Args: start_percent (float): When to start annealing the learning rate after the percentage of the total training steps. The value should be in range [0, 1). Default: 0.75 min_lr (float, optional): The minimum lr. Default: None. min_lr_ratio (float, optional): The ratio of minimum lr to the base lr. Either `min_lr` or `min_lr_ratio` should be specified. Default: None. """ def __init__(self, start_percent=0.75, min_lr=None, min_lr_ratio=None, **kwargs): assert (min_lr is None) ^ (min_lr_ratio is None) if start_percent < 0 or start_percent > 1 or not isinstance( start_percent, float): raise ValueError( 'expected float between 0 and 1 start_percent, but ' f'got {start_percent}') self.start_percent = start_percent self.min_lr = min_lr self.min_lr_ratio = min_lr_ratio super(FlatCosineAnnealingLrUpdaterHook, self).__init__(**kwargs) def get_lr(self, runner, base_lr): if self.by_epoch: start = round(runner.max_epochs * self.start_percent) progress = runner.epoch - start max_progress = runner.max_epochs - start else: start = round(runner.max_iters * self.start_percent) progress = runner.iter - start max_progress = runner.max_iters - start if self.min_lr_ratio is not None: target_lr = base_lr * self.min_lr_ratio else: target_lr = self.min_lr if progress < 0: return base_lr else: return annealing_cos(base_lr, target_lr, progress / max_progress) @HOOKS.register_module() class CosineRestartLrUpdaterHook(LrUpdaterHook): """Cosine annealing with restarts learning rate scheme. Args: periods (list[int]): Periods for each cosine anneling cycle. restart_weights (list[float], optional): Restart weights at each restart iteration. Default: [1]. min_lr (float, optional): The minimum lr. Default: None. min_lr_ratio (float, optional): The ratio of minimum lr to the base lr. Either `min_lr` or `min_lr_ratio` should be specified. Default: None. """ def __init__(self, periods, restart_weights=[1], min_lr=None, min_lr_ratio=None, **kwargs): assert (min_lr is None) ^ (min_lr_ratio is None) self.periods = periods self.min_lr = min_lr self.min_lr_ratio = min_lr_ratio self.restart_weights = restart_weights assert (len(self.periods) == len(self.restart_weights) ), 'periods and restart_weights should have the same length.' super(CosineRestartLrUpdaterHook, self).__init__(**kwargs) self.cumulative_periods = [ sum(self.periods[0:i + 1]) for i in range(0, len(self.periods)) ] def get_lr(self, runner, base_lr): if self.by_epoch: progress = runner.epoch else: progress = runner.iter if self.min_lr_ratio is not None: target_lr = base_lr * self.min_lr_ratio else: target_lr = self.min_lr idx = get_position_from_periods(progress, self.cumulative_periods) current_weight = self.restart_weights[idx] nearest_restart = 0 if idx == 0 else self.cumulative_periods[idx - 1] current_periods = self.periods[idx] alpha = min((progress - nearest_restart) / current_periods, 1) return annealing_cos(base_lr, target_lr, alpha, current_weight) def get_position_from_periods(iteration, cumulative_periods): """Get the position from a period list. It will return the index of the right-closest number in the period list. For example, the cumulative_periods = [100, 200, 300, 400], if iteration == 50, return 0; if iteration == 210, return 2; if iteration == 300, return 3. Args: iteration (int): Current iteration. cumulative_periods (list[int]): Cumulative period list. Returns: int: The position of the right-closest number in the period list. """ for i, period in enumerate(cumulative_periods): if iteration < period: return i raise ValueError(f'Current iteration {iteration} exceeds ' f'cumulative_periods {cumulative_periods}') @HOOKS.register_module() class CyclicLrUpdaterHook(LrUpdaterHook): """Cyclic LR Scheduler. Implement the cyclical learning rate policy (CLR) described in https://arxiv.org/pdf/1506.01186.pdf Different from the original paper, we use cosine annealing rather than triangular policy inside a cycle. This improves the performance in the 3D detection area. Args: by_epoch (bool): Whether to update LR by epoch. target_ratio (tuple[float]): Relative ratio of the highest LR and the lowest LR to the initial LR. cyclic_times (int): Number of cycles during training step_ratio_up (float): The ratio of the increasing process of LR in the total cycle. anneal_strategy (str): {'cos', 'linear'} Specifies the annealing strategy: 'cos' for cosine annealing, 'linear' for linear annealing. Default: 'cos'. """ def __init__(self, by_epoch=False, target_ratio=(10, 1e-4), cyclic_times=1, step_ratio_up=0.4, anneal_strategy='cos', **kwargs): if isinstance(target_ratio, float): target_ratio = (target_ratio, target_ratio / 1e5) elif isinstance(target_ratio, tuple): target_ratio = (target_ratio[0], target_ratio[0] / 1e5) \ if len(target_ratio) == 1 else target_ratio else: raise ValueError('target_ratio should be either float ' f'or tuple, got {type(target_ratio)}') assert len(target_ratio) == 2, \ '"target_ratio" must be list or tuple of two floats' assert 0 <= step_ratio_up < 1.0, \ '"step_ratio_up" must be in range [0,1)' self.target_ratio = target_ratio self.cyclic_times = cyclic_times self.step_ratio_up = step_ratio_up self.lr_phases = [] # init lr_phases # validate anneal_strategy if anneal_strategy not in ['cos', 'linear']: raise ValueError('anneal_strategy must be one of "cos" or ' f'"linear", instead got {anneal_strategy}') elif anneal_strategy == 'cos': self.anneal_func = annealing_cos elif anneal_strategy == 'linear': self.anneal_func = annealing_linear assert not by_epoch, \ 'currently only support "by_epoch" = False' super(CyclicLrUpdaterHook, self).__init__(by_epoch, **kwargs) def before_run(self, runner): super(CyclicLrUpdaterHook, self).before_run(runner) # initiate lr_phases # total lr_phases are separated as up and down max_iter_per_phase = runner.max_iters // self.cyclic_times iter_up_phase = int(self.step_ratio_up * max_iter_per_phase) self.lr_phases.append( [0, iter_up_phase, max_iter_per_phase, 1, self.target_ratio[0]]) self.lr_phases.append([ iter_up_phase, max_iter_per_phase, max_iter_per_phase, self.target_ratio[0], self.target_ratio[1] ]) def get_lr(self, runner, base_lr): curr_iter = runner.iter for (start_iter, end_iter, max_iter_per_phase, start_ratio, end_ratio) in self.lr_phases: curr_iter %= max_iter_per_phase if start_iter <= curr_iter < end_iter: progress = curr_iter - start_iter return self.anneal_func(base_lr * start_ratio, base_lr * end_ratio, progress / (end_iter - start_iter)) @HOOKS.register_module() class OneCycleLrUpdaterHook(LrUpdaterHook): """One Cycle LR Scheduler. The 1cycle learning rate policy changes the learning rate after every batch. The one cycle learning rate policy is described in https://arxiv.org/pdf/1708.07120.pdf Args: max_lr (float or list): Upper learning rate boundaries in the cycle for each parameter group. total_steps (int, optional): The total number of steps in the cycle. Note that if a value is not provided here, it will be the max_iter of runner. Default: None. pct_start (float): The percentage of the cycle (in number of steps) spent increasing the learning rate. Default: 0.3 anneal_strategy (str): {'cos', 'linear'} Specifies the annealing strategy: 'cos' for cosine annealing, 'linear' for linear annealing. Default: 'cos' div_factor (float): Determines the initial learning rate via initial_lr = max_lr/div_factor Default: 25 final_div_factor (float): Determines the minimum learning rate via min_lr = initial_lr/final_div_factor Default: 1e4 three_phase (bool): If three_phase is True, use a third phase of the schedule to annihilate the learning rate according to final_div_factor instead of modifying the second phase (the first two phases will be symmetrical about the step indicated by pct_start). Default: False """ def __init__(self, max_lr, total_steps=None, pct_start=0.3, anneal_strategy='cos', div_factor=25, final_div_factor=1e4, three_phase=False, **kwargs): # validate by_epoch, currently only support by_epoch = False if 'by_epoch' not in kwargs: kwargs['by_epoch'] = False else: assert not kwargs['by_epoch'], \ 'currently only support "by_epoch" = False' if not isinstance(max_lr, (numbers.Number, list, dict)): raise ValueError('the type of max_lr must be the one of list or ' f'dict, but got {type(max_lr)}') self._max_lr = max_lr if total_steps is not None: if not isinstance(total_steps, int): raise ValueError('the type of total_steps must be int, but' f'got {type(total_steps)}') self.total_steps = total_steps # validate pct_start if pct_start < 0 or pct_start > 1 or not isinstance(pct_start, float): raise ValueError('expected float between 0 and 1 pct_start, but ' f'got {pct_start}') self.pct_start = pct_start # validate anneal_strategy if anneal_strategy not in ['cos', 'linear']: raise ValueError('anneal_strategy must be one of "cos" or ' f'"linear", instead got {anneal_strategy}') elif anneal_strategy == 'cos': self.anneal_func = annealing_cos elif anneal_strategy == 'linear': self.anneal_func = annealing_linear self.div_factor = div_factor self.final_div_factor = final_div_factor self.three_phase = three_phase self.lr_phases = [] # init lr_phases super(OneCycleLrUpdaterHook, self).__init__(**kwargs) def before_run(self, runner): if hasattr(self, 'total_steps'): total_steps = self.total_steps else: total_steps = runner.max_iters if total_steps < runner.max_iters: raise ValueError( 'The total steps must be greater than or equal to max ' f'iterations {runner.max_iters} of runner, but total steps ' f'is {total_steps}.') if isinstance(runner.optimizer, dict): self.base_lr = {} for k, optim in runner.optimizer.items(): _max_lr = format_param(k, optim, self._max_lr) self.base_lr[k] = [lr / self.div_factor for lr in _max_lr] for group, lr in zip(optim.param_groups, self.base_lr[k]): group.setdefault('initial_lr', lr) else: k = type(runner.optimizer).__name__ _max_lr = format_param(k, runner.optimizer, self._max_lr) self.base_lr = [lr / self.div_factor for lr in _max_lr] for group, lr in zip(runner.optimizer.param_groups, self.base_lr): group.setdefault('initial_lr', lr) if self.three_phase: self.lr_phases.append( [float(self.pct_start * total_steps) - 1, 1, self.div_factor]) self.lr_phases.append([ float(2 * self.pct_start * total_steps) - 2, self.div_factor, 1 ]) self.lr_phases.append( [total_steps - 1, 1, 1 / self.final_div_factor]) else: self.lr_phases.append( [float(self.pct_start * total_steps) - 1, 1, self.div_factor]) self.lr_phases.append( [total_steps - 1, self.div_factor, 1 / self.final_div_factor]) def get_lr(self, runner, base_lr): curr_iter = runner.iter start_iter = 0 for i, (end_iter, start_lr, end_lr) in enumerate(self.lr_phases): if curr_iter <= end_iter: pct = (curr_iter - start_iter) / (end_iter - start_iter) lr = self.anneal_func(base_lr * start_lr, base_lr * end_lr, pct) break start_iter = end_iter return lr def annealing_cos(start, end, factor, weight=1): """Calculate annealing cos learning rate. Cosine anneal from `weight * start + (1 - weight) * end` to `end` as percentage goes from 0.0 to 1.0. Args: start (float): The starting learning rate of the cosine annealing. end (float): The ending learing rate of the cosine annealing. factor (float): The coefficient of `pi` when calculating the current percentage. Range from 0.0 to 1.0. weight (float, optional): The combination factor of `start` and `end` when calculating the actual starting learning rate. Default to 1. """ cos_out = cos(pi * factor) + 1 return end + 0.5 * weight * (start - end) * cos_out def annealing_linear(start, end, factor): """Calculate annealing linear learning rate. Linear anneal from `start` to `end` as percentage goes from 0.0 to 1.0. Args: start (float): The starting learning rate of the linear annealing. end (float): The ending learing rate of the linear annealing. factor (float): The coefficient of `pi` when calculating the current percentage. Range from 0.0 to 1.0. """ return start + (end - start) * factor def format_param(name, optim, param): if isinstance(param, numbers.Number): return [param] * len(optim.param_groups) elif isinstance(param, (list, tuple)): # multi param groups if len(param) != len(optim.param_groups): raise ValueError(f'expected {len(optim.param_groups)} ' f'values for {name}, got {len(param)}') return param else: # multi optimizers if name not in param: raise KeyError(f'{name} is not found in {param.keys()}') return param[name]
trt-samples-for-hackathon-cn-master
Hackathon2023/controlnet/annotator/uniformer/mmcv/runner/hooks/lr_updater.py
# Copyright (c) OpenMMLab. All rights reserved. import warnings from typing import Callable, List, Optional, Union import torch from ..dist_utils import master_only from .hook import HOOKS, Hook @HOOKS.register_module() class ProfilerHook(Hook): """Profiler to analyze performance during training. PyTorch Profiler is a tool that allows the collection of the performance metrics during the training. More details on Profiler can be found at https://pytorch.org/docs/1.8.1/profiler.html#torch.profiler.profile Args: by_epoch (bool): Profile performance by epoch or by iteration. Default: True. profile_iters (int): Number of iterations for profiling. If ``by_epoch=True``, profile_iters indicates that they are the first profile_iters epochs at the beginning of the training, otherwise it indicates the first profile_iters iterations. Default: 1. activities (list[str]): List of activity groups (CPU, CUDA) to use in profiling. Default: ['cpu', 'cuda']. schedule (dict, optional): Config of generating the callable schedule. if schedule is None, profiler will not add step markers into the trace and table view. Default: None. on_trace_ready (callable, dict): Either a handler or a dict of generate handler. Default: None. record_shapes (bool): Save information about operator's input shapes. Default: False. profile_memory (bool): Track tensor memory allocation/deallocation. Default: False. with_stack (bool): Record source information (file and line number) for the ops. Default: False. with_flops (bool): Use formula to estimate the FLOPS of specific operators (matrix multiplication and 2D convolution). Default: False. json_trace_path (str, optional): Exports the collected trace in Chrome JSON format. Default: None. Example: >>> runner = ... # instantiate a Runner >>> # tensorboard trace >>> trace_config = dict(type='tb_trace', dir_name='work_dir') >>> profiler_config = dict(on_trace_ready=trace_config) >>> runner.register_profiler_hook(profiler_config) >>> runner.run(data_loaders=[trainloader], workflow=[('train', 1)]) """ def __init__(self, by_epoch: bool = True, profile_iters: int = 1, activities: List[str] = ['cpu', 'cuda'], schedule: Optional[dict] = None, on_trace_ready: Optional[Union[Callable, dict]] = None, record_shapes: bool = False, profile_memory: bool = False, with_stack: bool = False, with_flops: bool = False, json_trace_path: Optional[str] = None) -> None: try: from torch import profiler # torch version >= 1.8.1 except ImportError: raise ImportError('profiler is the new feature of torch1.8.1, ' f'but your version is {torch.__version__}') assert isinstance(by_epoch, bool), '``by_epoch`` should be a boolean.' self.by_epoch = by_epoch if profile_iters < 1: raise ValueError('profile_iters should be greater than 0, but got ' f'{profile_iters}') self.profile_iters = profile_iters if not isinstance(activities, list): raise ValueError( f'activities should be list, but got {type(activities)}') self.activities = [] for activity in activities: activity = activity.lower() if activity == 'cpu': self.activities.append(profiler.ProfilerActivity.CPU) elif activity == 'cuda': self.activities.append(profiler.ProfilerActivity.CUDA) else: raise ValueError( f'activity should be "cpu" or "cuda", but got {activity}') if schedule is not None: self.schedule = profiler.schedule(**schedule) else: self.schedule = None self.on_trace_ready = on_trace_ready self.record_shapes = record_shapes self.profile_memory = profile_memory self.with_stack = with_stack self.with_flops = with_flops self.json_trace_path = json_trace_path @master_only def before_run(self, runner): if self.by_epoch and runner.max_epochs < self.profile_iters: raise ValueError('self.profile_iters should not be greater than ' f'{runner.max_epochs}') if not self.by_epoch and runner.max_iters < self.profile_iters: raise ValueError('self.profile_iters should not be greater than ' f'{runner.max_iters}') if callable(self.on_trace_ready): # handler _on_trace_ready = self.on_trace_ready elif isinstance(self.on_trace_ready, dict): # config of handler trace_cfg = self.on_trace_ready.copy() trace_type = trace_cfg.pop('type') # log_trace handler if trace_type == 'log_trace': def _log_handler(prof): print(prof.key_averages().table(**trace_cfg)) _on_trace_ready = _log_handler elif trace_type == 'tb_trace': # tensorboard_trace handler try: import torch_tb_profiler # noqa: F401 except ImportError: raise ImportError('please run "pip install ' 'torch-tb-profiler" to install ' 'torch_tb_profiler') _on_trace_ready = torch.profiler.tensorboard_trace_handler( **trace_cfg) else: raise ValueError('trace_type should be "log_trace" or ' f'"tb_trace", but got {trace_type}') elif self.on_trace_ready is None: _on_trace_ready = None # type: ignore else: raise ValueError('on_trace_ready should be handler, dict or None, ' f'but got {type(self.on_trace_ready)}') if runner.max_epochs > 1: warnings.warn(f'profiler will profile {runner.max_epochs} epochs ' 'instead of 1 epoch. Since profiler will slow down ' 'the training, it is recommended to train 1 epoch ' 'with ProfilerHook and adjust your setting according' ' to the profiler summary. During normal training ' '(epoch > 1), you may disable the ProfilerHook.') self.profiler = torch.profiler.profile( activities=self.activities, schedule=self.schedule, on_trace_ready=_on_trace_ready, record_shapes=self.record_shapes, profile_memory=self.profile_memory, with_stack=self.with_stack, with_flops=self.with_flops) self.profiler.__enter__() runner.logger.info('profiler is profiling...') @master_only def after_train_epoch(self, runner): if self.by_epoch and runner.epoch == self.profile_iters - 1: runner.logger.info('profiler may take a few minutes...') self.profiler.__exit__(None, None, None) if self.json_trace_path is not None: self.profiler.export_chrome_trace(self.json_trace_path) @master_only def after_train_iter(self, runner): self.profiler.step() if not self.by_epoch and runner.iter == self.profile_iters - 1: runner.logger.info('profiler may take a few minutes...') self.profiler.__exit__(None, None, None) if self.json_trace_path is not None: self.profiler.export_chrome_trace(self.json_trace_path)
trt-samples-for-hackathon-cn-master
Hackathon2023/controlnet/annotator/uniformer/mmcv/runner/hooks/profiler.py
# Copyright (c) OpenMMLab. All rights reserved. from .hook import HOOKS, Hook @HOOKS.register_module() class ClosureHook(Hook): def __init__(self, fn_name, fn): assert hasattr(self, fn_name) assert callable(fn) setattr(self, fn_name, fn)
trt-samples-for-hackathon-cn-master
Hackathon2023/controlnet/annotator/uniformer/mmcv/runner/hooks/closure.py
# Copyright (c) OpenMMLab. All rights reserved. import time from .hook import HOOKS, Hook @HOOKS.register_module() class IterTimerHook(Hook): def before_epoch(self, runner): self.t = time.time() def before_iter(self, runner): runner.log_buffer.update({'data_time': time.time() - self.t}) def after_iter(self, runner): runner.log_buffer.update({'time': time.time() - self.t}) self.t = time.time()
trt-samples-for-hackathon-cn-master
Hackathon2023/controlnet/annotator/uniformer/mmcv/runner/hooks/iter_timer.py
# Copyright (c) OpenMMLab. All rights reserved. from ...dist_utils import master_only from ..hook import HOOKS from .base import LoggerHook @HOOKS.register_module() class MlflowLoggerHook(LoggerHook): def __init__(self, exp_name=None, tags=None, log_model=True, interval=10, ignore_last=True, reset_flag=False, by_epoch=True): """Class to log metrics and (optionally) a trained model to MLflow. It requires `MLflow`_ to be installed. Args: exp_name (str, optional): Name of the experiment to be used. Default None. If not None, set the active experiment. If experiment does not exist, an experiment with provided name will be created. tags (dict of str: str, optional): Tags for the current run. Default None. If not None, set tags for the current run. log_model (bool, optional): Whether to log an MLflow artifact. Default True. If True, log runner.model as an MLflow artifact for the current run. interval (int): Logging interval (every k iterations). ignore_last (bool): Ignore the log of last iterations in each epoch if less than `interval`. reset_flag (bool): Whether to clear the output buffer after logging by_epoch (bool): Whether EpochBasedRunner is used. .. _MLflow: https://www.mlflow.org/docs/latest/index.html """ super(MlflowLoggerHook, self).__init__(interval, ignore_last, reset_flag, by_epoch) self.import_mlflow() self.exp_name = exp_name self.tags = tags self.log_model = log_model def import_mlflow(self): try: import mlflow import mlflow.pytorch as mlflow_pytorch except ImportError: raise ImportError( 'Please run "pip install mlflow" to install mlflow') self.mlflow = mlflow self.mlflow_pytorch = mlflow_pytorch @master_only def before_run(self, runner): super(MlflowLoggerHook, self).before_run(runner) if self.exp_name is not None: self.mlflow.set_experiment(self.exp_name) if self.tags is not None: self.mlflow.set_tags(self.tags) @master_only def log(self, runner): tags = self.get_loggable_tags(runner) if tags: self.mlflow.log_metrics(tags, step=self.get_iter(runner)) @master_only def after_run(self, runner): if self.log_model: self.mlflow_pytorch.log_model(runner.model, 'models')
trt-samples-for-hackathon-cn-master
Hackathon2023/controlnet/annotator/uniformer/mmcv/runner/hooks/logger/mlflow.py
# Copyright (c) OpenMMLab. All rights reserved. from ...dist_utils import master_only from ..hook import HOOKS from .base import LoggerHook @HOOKS.register_module() class WandbLoggerHook(LoggerHook): def __init__(self, init_kwargs=None, interval=10, ignore_last=True, reset_flag=False, commit=True, by_epoch=True, with_step=True): super(WandbLoggerHook, self).__init__(interval, ignore_last, reset_flag, by_epoch) self.import_wandb() self.init_kwargs = init_kwargs self.commit = commit self.with_step = with_step def import_wandb(self): try: import wandb except ImportError: raise ImportError( 'Please run "pip install wandb" to install wandb') self.wandb = wandb @master_only def before_run(self, runner): super(WandbLoggerHook, self).before_run(runner) if self.wandb is None: self.import_wandb() if self.init_kwargs: self.wandb.init(**self.init_kwargs) else: self.wandb.init() @master_only def log(self, runner): tags = self.get_loggable_tags(runner) if tags: if self.with_step: self.wandb.log( tags, step=self.get_iter(runner), commit=self.commit) else: tags['global_step'] = self.get_iter(runner) self.wandb.log(tags, commit=self.commit) @master_only def after_run(self, runner): self.wandb.join()
trt-samples-for-hackathon-cn-master
Hackathon2023/controlnet/annotator/uniformer/mmcv/runner/hooks/logger/wandb.py
# Copyright (c) OpenMMLab. All rights reserved. from .base import LoggerHook from .dvclive import DvcliveLoggerHook from .mlflow import MlflowLoggerHook from .neptune import NeptuneLoggerHook from .pavi import PaviLoggerHook from .tensorboard import TensorboardLoggerHook from .text import TextLoggerHook from .wandb import WandbLoggerHook __all__ = [ 'LoggerHook', 'MlflowLoggerHook', 'PaviLoggerHook', 'TensorboardLoggerHook', 'TextLoggerHook', 'WandbLoggerHook', 'NeptuneLoggerHook', 'DvcliveLoggerHook' ]
trt-samples-for-hackathon-cn-master
Hackathon2023/controlnet/annotator/uniformer/mmcv/runner/hooks/logger/__init__.py
# Copyright (c) OpenMMLab. All rights reserved. from ...dist_utils import master_only from ..hook import HOOKS from .base import LoggerHook @HOOKS.register_module() class DvcliveLoggerHook(LoggerHook): """Class to log metrics with dvclive. It requires `dvclive`_ to be installed. Args: path (str): Directory where dvclive will write TSV log files. interval (int): Logging interval (every k iterations). Default 10. ignore_last (bool): Ignore the log of last iterations in each epoch if less than `interval`. Default: True. reset_flag (bool): Whether to clear the output buffer after logging. Default: True. by_epoch (bool): Whether EpochBasedRunner is used. Default: True. .. _dvclive: https://dvc.org/doc/dvclive """ def __init__(self, path, interval=10, ignore_last=True, reset_flag=True, by_epoch=True): super(DvcliveLoggerHook, self).__init__(interval, ignore_last, reset_flag, by_epoch) self.path = path self.import_dvclive() def import_dvclive(self): try: import dvclive except ImportError: raise ImportError( 'Please run "pip install dvclive" to install dvclive') self.dvclive = dvclive @master_only def before_run(self, runner): self.dvclive.init(self.path) @master_only def log(self, runner): tags = self.get_loggable_tags(runner) if tags: for k, v in tags.items(): self.dvclive.log(k, v, step=self.get_iter(runner))
trt-samples-for-hackathon-cn-master
Hackathon2023/controlnet/annotator/uniformer/mmcv/runner/hooks/logger/dvclive.py
# Copyright (c) OpenMMLab. All rights reserved. import datetime import os import os.path as osp from collections import OrderedDict import torch import torch.distributed as dist import annotator.uniformer.mmcv as mmcv from annotator.uniformer.mmcv.fileio.file_client import FileClient from annotator.uniformer.mmcv.utils import is_tuple_of, scandir from ..hook import HOOKS from .base import LoggerHook @HOOKS.register_module() class TextLoggerHook(LoggerHook): """Logger hook in text. In this logger hook, the information will be printed on terminal and saved in json file. Args: by_epoch (bool, optional): Whether EpochBasedRunner is used. Default: True. interval (int, optional): Logging interval (every k iterations). Default: 10. ignore_last (bool, optional): Ignore the log of last iterations in each epoch if less than :attr:`interval`. Default: True. reset_flag (bool, optional): Whether to clear the output buffer after logging. Default: False. interval_exp_name (int, optional): Logging interval for experiment name. This feature is to help users conveniently get the experiment information from screen or log file. Default: 1000. out_dir (str, optional): Logs are saved in ``runner.work_dir`` default. If ``out_dir`` is specified, logs will be copied to a new directory which is the concatenation of ``out_dir`` and the last level directory of ``runner.work_dir``. Default: None. `New in version 1.3.16.` out_suffix (str or tuple[str], optional): Those filenames ending with ``out_suffix`` will be copied to ``out_dir``. Default: ('.log.json', '.log', '.py'). `New in version 1.3.16.` keep_local (bool, optional): Whether to keep local log when :attr:`out_dir` is specified. If False, the local log will be removed. Default: True. `New in version 1.3.16.` file_client_args (dict, optional): Arguments to instantiate a FileClient. See :class:`mmcv.fileio.FileClient` for details. Default: None. `New in version 1.3.16.` """ def __init__(self, by_epoch=True, interval=10, ignore_last=True, reset_flag=False, interval_exp_name=1000, out_dir=None, out_suffix=('.log.json', '.log', '.py'), keep_local=True, file_client_args=None): super(TextLoggerHook, self).__init__(interval, ignore_last, reset_flag, by_epoch) self.by_epoch = by_epoch self.time_sec_tot = 0 self.interval_exp_name = interval_exp_name if out_dir is None and file_client_args is not None: raise ValueError( 'file_client_args should be "None" when `out_dir` is not' 'specified.') self.out_dir = out_dir if not (out_dir is None or isinstance(out_dir, str) or is_tuple_of(out_dir, str)): raise TypeError('out_dir should be "None" or string or tuple of ' 'string, but got {out_dir}') self.out_suffix = out_suffix self.keep_local = keep_local self.file_client_args = file_client_args if self.out_dir is not None: self.file_client = FileClient.infer_client(file_client_args, self.out_dir) def before_run(self, runner): super(TextLoggerHook, self).before_run(runner) if self.out_dir is not None: self.file_client = FileClient.infer_client(self.file_client_args, self.out_dir) # The final `self.out_dir` is the concatenation of `self.out_dir` # and the last level directory of `runner.work_dir` basename = osp.basename(runner.work_dir.rstrip(osp.sep)) self.out_dir = self.file_client.join_path(self.out_dir, basename) runner.logger.info( (f'Text logs will be saved to {self.out_dir} by ' f'{self.file_client.name} after the training process.')) self.start_iter = runner.iter self.json_log_path = osp.join(runner.work_dir, f'{runner.timestamp}.log.json') if runner.meta is not None: self._dump_log(runner.meta, runner) def _get_max_memory(self, runner): device = getattr(runner.model, 'output_device', None) mem = torch.cuda.max_memory_allocated(device=device) mem_mb = torch.tensor([mem / (1024 * 1024)], dtype=torch.int, device=device) if runner.world_size > 1: dist.reduce(mem_mb, 0, op=dist.ReduceOp.MAX) return mem_mb.item() def _log_info(self, log_dict, runner): # print exp name for users to distinguish experiments # at every ``interval_exp_name`` iterations and the end of each epoch if runner.meta is not None and 'exp_name' in runner.meta: if (self.every_n_iters(runner, self.interval_exp_name)) or ( self.by_epoch and self.end_of_epoch(runner)): exp_info = f'Exp name: {runner.meta["exp_name"]}' runner.logger.info(exp_info) if log_dict['mode'] == 'train': if isinstance(log_dict['lr'], dict): lr_str = [] for k, val in log_dict['lr'].items(): lr_str.append(f'lr_{k}: {val:.3e}') lr_str = ' '.join(lr_str) else: lr_str = f'lr: {log_dict["lr"]:.3e}' # by epoch: Epoch [4][100/1000] # by iter: Iter [100/100000] if self.by_epoch: log_str = f'Epoch [{log_dict["epoch"]}]' \ f'[{log_dict["iter"]}/{len(runner.data_loader)}]\t' else: log_str = f'Iter [{log_dict["iter"]}/{runner.max_iters}]\t' log_str += f'{lr_str}, ' if 'time' in log_dict.keys(): self.time_sec_tot += (log_dict['time'] * self.interval) time_sec_avg = self.time_sec_tot / ( runner.iter - self.start_iter + 1) eta_sec = time_sec_avg * (runner.max_iters - runner.iter - 1) eta_str = str(datetime.timedelta(seconds=int(eta_sec))) log_str += f'eta: {eta_str}, ' log_str += f'time: {log_dict["time"]:.3f}, ' \ f'data_time: {log_dict["data_time"]:.3f}, ' # statistic memory if torch.cuda.is_available(): log_str += f'memory: {log_dict["memory"]}, ' else: # val/test time # here 1000 is the length of the val dataloader # by epoch: Epoch[val] [4][1000] # by iter: Iter[val] [1000] if self.by_epoch: log_str = f'Epoch({log_dict["mode"]}) ' \ f'[{log_dict["epoch"]}][{log_dict["iter"]}]\t' else: log_str = f'Iter({log_dict["mode"]}) [{log_dict["iter"]}]\t' log_items = [] for name, val in log_dict.items(): # TODO: resolve this hack # these items have been in log_str if name in [ 'mode', 'Epoch', 'iter', 'lr', 'time', 'data_time', 'memory', 'epoch' ]: continue if isinstance(val, float): val = f'{val:.4f}' log_items.append(f'{name}: {val}') log_str += ', '.join(log_items) runner.logger.info(log_str) def _dump_log(self, log_dict, runner): # dump log in json format json_log = OrderedDict() for k, v in log_dict.items(): json_log[k] = self._round_float(v) # only append log at last line if runner.rank == 0: with open(self.json_log_path, 'a+') as f: mmcv.dump(json_log, f, file_format='json') f.write('\n') def _round_float(self, items): if isinstance(items, list): return [self._round_float(item) for item in items] elif isinstance(items, float): return round(items, 5) else: return items def log(self, runner): if 'eval_iter_num' in runner.log_buffer.output: # this doesn't modify runner.iter and is regardless of by_epoch cur_iter = runner.log_buffer.output.pop('eval_iter_num') else: cur_iter = self.get_iter(runner, inner_iter=True) log_dict = OrderedDict( mode=self.get_mode(runner), epoch=self.get_epoch(runner), iter=cur_iter) # only record lr of the first param group cur_lr = runner.current_lr() if isinstance(cur_lr, list): log_dict['lr'] = cur_lr[0] else: assert isinstance(cur_lr, dict) log_dict['lr'] = {} for k, lr_ in cur_lr.items(): assert isinstance(lr_, list) log_dict['lr'].update({k: lr_[0]}) if 'time' in runner.log_buffer.output: # statistic memory if torch.cuda.is_available(): log_dict['memory'] = self._get_max_memory(runner) log_dict = dict(log_dict, **runner.log_buffer.output) self._log_info(log_dict, runner) self._dump_log(log_dict, runner) return log_dict def after_run(self, runner): # copy or upload logs to self.out_dir if self.out_dir is not None: for filename in scandir(runner.work_dir, self.out_suffix, True): local_filepath = osp.join(runner.work_dir, filename) out_filepath = self.file_client.join_path( self.out_dir, filename) with open(local_filepath, 'r') as f: self.file_client.put_text(f.read(), out_filepath) runner.logger.info( (f'The file {local_filepath} has been uploaded to ' f'{out_filepath}.')) if not self.keep_local: os.remove(local_filepath) runner.logger.info( (f'{local_filepath} was removed due to the ' '`self.keep_local=False`'))
trt-samples-for-hackathon-cn-master
Hackathon2023/controlnet/annotator/uniformer/mmcv/runner/hooks/logger/text.py
# Copyright (c) OpenMMLab. All rights reserved. import json import os import os.path as osp import torch import yaml import annotator.uniformer.mmcv as mmcv from ....parallel.utils import is_module_wrapper from ...dist_utils import master_only from ..hook import HOOKS from .base import LoggerHook @HOOKS.register_module() class PaviLoggerHook(LoggerHook): def __init__(self, init_kwargs=None, add_graph=False, add_last_ckpt=False, interval=10, ignore_last=True, reset_flag=False, by_epoch=True, img_key='img_info'): super(PaviLoggerHook, self).__init__(interval, ignore_last, reset_flag, by_epoch) self.init_kwargs = init_kwargs self.add_graph = add_graph self.add_last_ckpt = add_last_ckpt self.img_key = img_key @master_only def before_run(self, runner): super(PaviLoggerHook, self).before_run(runner) try: from pavi import SummaryWriter except ImportError: raise ImportError('Please run "pip install pavi" to install pavi.') self.run_name = runner.work_dir.split('/')[-1] if not self.init_kwargs: self.init_kwargs = dict() self.init_kwargs['name'] = self.run_name self.init_kwargs['model'] = runner._model_name if runner.meta is not None: if 'config_dict' in runner.meta: config_dict = runner.meta['config_dict'] assert isinstance( config_dict, dict), ('meta["config_dict"] has to be of a dict, ' f'but got {type(config_dict)}') elif 'config_file' in runner.meta: config_file = runner.meta['config_file'] config_dict = dict(mmcv.Config.fromfile(config_file)) else: config_dict = None if config_dict is not None: # 'max_.*iter' is parsed in pavi sdk as the maximum iterations # to properly set up the progress bar. config_dict = config_dict.copy() config_dict.setdefault('max_iter', runner.max_iters) # non-serializable values are first converted in # mmcv.dump to json config_dict = json.loads( mmcv.dump(config_dict, file_format='json')) session_text = yaml.dump(config_dict) self.init_kwargs['session_text'] = session_text self.writer = SummaryWriter(**self.init_kwargs) def get_step(self, runner): """Get the total training step/epoch.""" if self.get_mode(runner) == 'val' and self.by_epoch: return self.get_epoch(runner) else: return self.get_iter(runner) @master_only def log(self, runner): tags = self.get_loggable_tags(runner, add_mode=False) if tags: self.writer.add_scalars( self.get_mode(runner), tags, self.get_step(runner)) @master_only def after_run(self, runner): if self.add_last_ckpt: ckpt_path = osp.join(runner.work_dir, 'latest.pth') if osp.islink(ckpt_path): ckpt_path = osp.join(runner.work_dir, os.readlink(ckpt_path)) if osp.isfile(ckpt_path): # runner.epoch += 1 has been done before `after_run`. iteration = runner.epoch if self.by_epoch else runner.iter return self.writer.add_snapshot_file( tag=self.run_name, snapshot_file_path=ckpt_path, iteration=iteration) # flush the buffer and send a task ending signal to Pavi self.writer.close() @master_only def before_epoch(self, runner): if runner.epoch == 0 and self.add_graph: if is_module_wrapper(runner.model): _model = runner.model.module else: _model = runner.model device = next(_model.parameters()).device data = next(iter(runner.data_loader)) image = data[self.img_key][0:1].to(device) with torch.no_grad(): self.writer.add_graph(_model, image)
trt-samples-for-hackathon-cn-master
Hackathon2023/controlnet/annotator/uniformer/mmcv/runner/hooks/logger/pavi.py
# Copyright (c) OpenMMLab. All rights reserved. import numbers from abc import ABCMeta, abstractmethod import numpy as np import torch from ..hook import Hook class LoggerHook(Hook): """Base class for logger hooks. Args: interval (int): Logging interval (every k iterations). ignore_last (bool): Ignore the log of last iterations in each epoch if less than `interval`. reset_flag (bool): Whether to clear the output buffer after logging. by_epoch (bool): Whether EpochBasedRunner is used. """ __metaclass__ = ABCMeta def __init__(self, interval=10, ignore_last=True, reset_flag=False, by_epoch=True): self.interval = interval self.ignore_last = ignore_last self.reset_flag = reset_flag self.by_epoch = by_epoch @abstractmethod def log(self, runner): pass @staticmethod def is_scalar(val, include_np=True, include_torch=True): """Tell the input variable is a scalar or not. Args: val: Input variable. include_np (bool): Whether include 0-d np.ndarray as a scalar. include_torch (bool): Whether include 0-d torch.Tensor as a scalar. Returns: bool: True or False. """ if isinstance(val, numbers.Number): return True elif include_np and isinstance(val, np.ndarray) and val.ndim == 0: return True elif include_torch and isinstance(val, torch.Tensor) and len(val) == 1: return True else: return False def get_mode(self, runner): if runner.mode == 'train': if 'time' in runner.log_buffer.output: mode = 'train' else: mode = 'val' elif runner.mode == 'val': mode = 'val' else: raise ValueError(f"runner mode should be 'train' or 'val', " f'but got {runner.mode}') return mode def get_epoch(self, runner): if runner.mode == 'train': epoch = runner.epoch + 1 elif runner.mode == 'val': # normal val mode # runner.epoch += 1 has been done before val workflow epoch = runner.epoch else: raise ValueError(f"runner mode should be 'train' or 'val', " f'but got {runner.mode}') return epoch def get_iter(self, runner, inner_iter=False): """Get the current training iteration step.""" if self.by_epoch and inner_iter: current_iter = runner.inner_iter + 1 else: current_iter = runner.iter + 1 return current_iter def get_lr_tags(self, runner): tags = {} lrs = runner.current_lr() if isinstance(lrs, dict): for name, value in lrs.items(): tags[f'learning_rate/{name}'] = value[0] else: tags['learning_rate'] = lrs[0] return tags def get_momentum_tags(self, runner): tags = {} momentums = runner.current_momentum() if isinstance(momentums, dict): for name, value in momentums.items(): tags[f'momentum/{name}'] = value[0] else: tags['momentum'] = momentums[0] return tags def get_loggable_tags(self, runner, allow_scalar=True, allow_text=False, add_mode=True, tags_to_skip=('time', 'data_time')): tags = {} for var, val in runner.log_buffer.output.items(): if var in tags_to_skip: continue if self.is_scalar(val) and not allow_scalar: continue if isinstance(val, str) and not allow_text: continue if add_mode: var = f'{self.get_mode(runner)}/{var}' tags[var] = val tags.update(self.get_lr_tags(runner)) tags.update(self.get_momentum_tags(runner)) return tags def before_run(self, runner): for hook in runner.hooks[::-1]: if isinstance(hook, LoggerHook): hook.reset_flag = True break def before_epoch(self, runner): runner.log_buffer.clear() # clear logs of last epoch def after_train_iter(self, runner): if self.by_epoch and self.every_n_inner_iters(runner, self.interval): runner.log_buffer.average(self.interval) elif not self.by_epoch and self.every_n_iters(runner, self.interval): runner.log_buffer.average(self.interval) elif self.end_of_epoch(runner) and not self.ignore_last: # not precise but more stable runner.log_buffer.average(self.interval) if runner.log_buffer.ready: self.log(runner) if self.reset_flag: runner.log_buffer.clear_output() def after_train_epoch(self, runner): if runner.log_buffer.ready: self.log(runner) if self.reset_flag: runner.log_buffer.clear_output() def after_val_epoch(self, runner): runner.log_buffer.average() self.log(runner) if self.reset_flag: runner.log_buffer.clear_output()
trt-samples-for-hackathon-cn-master
Hackathon2023/controlnet/annotator/uniformer/mmcv/runner/hooks/logger/base.py
# Copyright (c) OpenMMLab. All rights reserved. import os.path as osp from annotator.uniformer.mmcv.utils import TORCH_VERSION, digit_version from ...dist_utils import master_only from ..hook import HOOKS from .base import LoggerHook @HOOKS.register_module() class TensorboardLoggerHook(LoggerHook): def __init__(self, log_dir=None, interval=10, ignore_last=True, reset_flag=False, by_epoch=True): super(TensorboardLoggerHook, self).__init__(interval, ignore_last, reset_flag, by_epoch) self.log_dir = log_dir @master_only def before_run(self, runner): super(TensorboardLoggerHook, self).before_run(runner) if (TORCH_VERSION == 'parrots' or digit_version(TORCH_VERSION) < digit_version('1.1')): try: from tensorboardX import SummaryWriter except ImportError: raise ImportError('Please install tensorboardX to use ' 'TensorboardLoggerHook.') else: try: from torch.utils.tensorboard import SummaryWriter except ImportError: raise ImportError( 'Please run "pip install future tensorboard" to install ' 'the dependencies to use torch.utils.tensorboard ' '(applicable to PyTorch 1.1 or higher)') if self.log_dir is None: self.log_dir = osp.join(runner.work_dir, 'tf_logs') self.writer = SummaryWriter(self.log_dir) @master_only def log(self, runner): tags = self.get_loggable_tags(runner, allow_text=True) for tag, val in tags.items(): if isinstance(val, str): self.writer.add_text(tag, val, self.get_iter(runner)) else: self.writer.add_scalar(tag, val, self.get_iter(runner)) @master_only def after_run(self, runner): self.writer.close()
trt-samples-for-hackathon-cn-master
Hackathon2023/controlnet/annotator/uniformer/mmcv/runner/hooks/logger/tensorboard.py
# Copyright (c) OpenMMLab. All rights reserved. from ...dist_utils import master_only from ..hook import HOOKS from .base import LoggerHook @HOOKS.register_module() class NeptuneLoggerHook(LoggerHook): """Class to log metrics to NeptuneAI. It requires `neptune-client` to be installed. Args: init_kwargs (dict): a dict contains the initialization keys as below: - project (str): Name of a project in a form of namespace/project_name. If None, the value of NEPTUNE_PROJECT environment variable will be taken. - api_token (str): User’s API token. If None, the value of NEPTUNE_API_TOKEN environment variable will be taken. Note: It is strongly recommended to use NEPTUNE_API_TOKEN environment variable rather than placing your API token in plain text in your source code. - name (str, optional, default is 'Untitled'): Editable name of the run. Name is displayed in the run's Details and in Runs table as a column. Check https://docs.neptune.ai/api-reference/neptune#init for more init arguments. interval (int): Logging interval (every k iterations). ignore_last (bool): Ignore the log of last iterations in each epoch if less than `interval`. reset_flag (bool): Whether to clear the output buffer after logging by_epoch (bool): Whether EpochBasedRunner is used. .. _NeptuneAI: https://docs.neptune.ai/you-should-know/logging-metadata """ def __init__(self, init_kwargs=None, interval=10, ignore_last=True, reset_flag=True, with_step=True, by_epoch=True): super(NeptuneLoggerHook, self).__init__(interval, ignore_last, reset_flag, by_epoch) self.import_neptune() self.init_kwargs = init_kwargs self.with_step = with_step def import_neptune(self): try: import neptune.new as neptune except ImportError: raise ImportError( 'Please run "pip install neptune-client" to install neptune') self.neptune = neptune self.run = None @master_only def before_run(self, runner): if self.init_kwargs: self.run = self.neptune.init(**self.init_kwargs) else: self.run = self.neptune.init() @master_only def log(self, runner): tags = self.get_loggable_tags(runner) if tags: for tag_name, tag_value in tags.items(): if self.with_step: self.run[tag_name].log( tag_value, step=self.get_iter(runner)) else: tags['global_step'] = self.get_iter(runner) self.run[tag_name].log(tags) @master_only def after_run(self, runner): self.run.stop()
trt-samples-for-hackathon-cn-master
Hackathon2023/controlnet/annotator/uniformer/mmcv/runner/hooks/logger/neptune.py
# Copyright (c) OpenMMLab. All rights reserved. import os import subprocess import warnings from packaging.version import parse def digit_version(version_str: str, length: int = 4): """Convert a version string into a tuple of integers. This method is usually used for comparing two versions. For pre-release versions: alpha < beta < rc. Args: version_str (str): The version string. length (int): The maximum number of version levels. Default: 4. Returns: tuple[int]: The version info in digits (integers). """ assert 'parrots' not in version_str version = parse(version_str) assert version.release, f'failed to parse version {version_str}' release = list(version.release) release = release[:length] if len(release) < length: release = release + [0] * (length - len(release)) if version.is_prerelease: mapping = {'a': -3, 'b': -2, 'rc': -1} val = -4 # version.pre can be None if version.pre: if version.pre[0] not in mapping: warnings.warn(f'unknown prerelease version {version.pre[0]}, ' 'version checking may go wrong') else: val = mapping[version.pre[0]] release.extend([val, version.pre[-1]]) else: release.extend([val, 0]) elif version.is_postrelease: release.extend([1, version.post]) else: release.extend([0, 0]) return tuple(release) def _minimal_ext_cmd(cmd): # construct minimal environment env = {} for k in ['SYSTEMROOT', 'PATH', 'HOME']: v = os.environ.get(k) if v is not None: env[k] = v # LANGUAGE is used on win32 env['LANGUAGE'] = 'C' env['LANG'] = 'C' env['LC_ALL'] = 'C' out = subprocess.Popen( cmd, stdout=subprocess.PIPE, env=env).communicate()[0] return out def get_git_hash(fallback='unknown', digits=None): """Get the git hash of the current repo. Args: fallback (str, optional): The fallback string when git hash is unavailable. Defaults to 'unknown'. digits (int, optional): kept digits of the hash. Defaults to None, meaning all digits are kept. Returns: str: Git commit hash. """ if digits is not None and not isinstance(digits, int): raise TypeError('digits must be None or an integer') try: out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD']) sha = out.strip().decode('ascii') if digits is not None: sha = sha[:digits] except OSError: sha = fallback return sha
trt-samples-for-hackathon-cn-master
Hackathon2023/controlnet/annotator/uniformer/mmcv/utils/version_utils.py
# Copyright (c) OpenMMLab. All rights reserved. import logging import torch.distributed as dist logger_initialized = {} def get_logger(name, log_file=None, log_level=logging.INFO, file_mode='w'): """Initialize and get a logger by name. If the logger has not been initialized, this method will initialize the logger by adding one or two handlers, otherwise the initialized logger will be directly returned. During initialization, a StreamHandler will always be added. If `log_file` is specified and the process rank is 0, a FileHandler will also be added. Args: name (str): Logger name. log_file (str | None): The log filename. If specified, a FileHandler will be added to the logger. log_level (int): The logger level. Note that only the process of rank 0 is affected, and other processes will set the level to "Error" thus be silent most of the time. file_mode (str): The file mode used in opening log file. Defaults to 'w'. Returns: logging.Logger: The expected logger. """ logger = logging.getLogger(name) if name in logger_initialized: return logger # handle hierarchical names # e.g., logger "a" is initialized, then logger "a.b" will skip the # initialization since it is a child of "a". for logger_name in logger_initialized: if name.startswith(logger_name): return logger # handle duplicate logs to the console # Starting in 1.8.0, PyTorch DDP attaches a StreamHandler <stderr> (NOTSET) # to the root logger. As logger.propagate is True by default, this root # level handler causes logging messages from rank>0 processes to # unexpectedly show up on the console, creating much unwanted clutter. # To fix this issue, we set the root logger's StreamHandler, if any, to log # at the ERROR level. for handler in logger.root.handlers: if type(handler) is logging.StreamHandler: handler.setLevel(logging.ERROR) stream_handler = logging.StreamHandler() handlers = [stream_handler] if dist.is_available() and dist.is_initialized(): rank = dist.get_rank() else: rank = 0 # only rank 0 will add a FileHandler if rank == 0 and log_file is not None: # Here, the default behaviour of the official logger is 'a'. Thus, we # provide an interface to change the file mode to the default # behaviour. file_handler = logging.FileHandler(log_file, file_mode) handlers.append(file_handler) formatter = logging.Formatter( '%(asctime)s - %(name)s - %(levelname)s - %(message)s') for handler in handlers: handler.setFormatter(formatter) handler.setLevel(log_level) logger.addHandler(handler) if rank == 0: logger.setLevel(log_level) else: logger.setLevel(logging.ERROR) logger_initialized[name] = True return logger def print_log(msg, logger=None, level=logging.INFO): """Print a log message. Args: msg (str): The message to be logged. logger (logging.Logger | str | None): The logger to be used. Some special loggers are: - "silent": no message will be printed. - other str: the logger obtained with `get_root_logger(logger)`. - None: The `print()` method will be used to print log messages. level (int): Logging level. Only available when `logger` is a Logger object or "root". """ if logger is None: print(msg) elif isinstance(logger, logging.Logger): logger.log(level, msg) elif logger == 'silent': pass elif isinstance(logger, str): _logger = get_logger(logger) _logger.log(level, msg) else: raise TypeError( 'logger should be either a logging.Logger object, str, ' f'"silent" or None, but got {type(logger)}')
trt-samples-for-hackathon-cn-master
Hackathon2023/controlnet/annotator/uniformer/mmcv/utils/logging.py
# Copyright (c) OpenMMLab. All rights reserved. import collections.abc import functools import itertools import subprocess import warnings from collections import abc from importlib import import_module from inspect import getfullargspec from itertools import repeat # From PyTorch internals def _ntuple(n): def parse(x): if isinstance(x, collections.abc.Iterable): return x return tuple(repeat(x, n)) return parse to_1tuple = _ntuple(1) to_2tuple = _ntuple(2) to_3tuple = _ntuple(3) to_4tuple = _ntuple(4) to_ntuple = _ntuple def is_str(x): """Whether the input is an string instance. Note: This method is deprecated since python 2 is no longer supported. """ return isinstance(x, str) def import_modules_from_strings(imports, allow_failed_imports=False): """Import modules from the given list of strings. Args: imports (list | str | None): The given module names to be imported. allow_failed_imports (bool): If True, the failed imports will return None. Otherwise, an ImportError is raise. Default: False. Returns: list[module] | module | None: The imported modules. Examples: >>> osp, sys = import_modules_from_strings( ... ['os.path', 'sys']) >>> import os.path as osp_ >>> import sys as sys_ >>> assert osp == osp_ >>> assert sys == sys_ """ if not imports: return single_import = False if isinstance(imports, str): single_import = True imports = [imports] if not isinstance(imports, list): raise TypeError( f'custom_imports must be a list but got type {type(imports)}') imported = [] for imp in imports: if not isinstance(imp, str): raise TypeError( f'{imp} is of type {type(imp)} and cannot be imported.') try: imported_tmp = import_module(imp) except ImportError: if allow_failed_imports: warnings.warn(f'{imp} failed to import and is ignored.', UserWarning) imported_tmp = None else: raise ImportError imported.append(imported_tmp) if single_import: imported = imported[0] return imported def iter_cast(inputs, dst_type, return_type=None): """Cast elements of an iterable object into some type. Args: inputs (Iterable): The input object. dst_type (type): Destination type. return_type (type, optional): If specified, the output object will be converted to this type, otherwise an iterator. Returns: iterator or specified type: The converted object. """ if not isinstance(inputs, abc.Iterable): raise TypeError('inputs must be an iterable object') if not isinstance(dst_type, type): raise TypeError('"dst_type" must be a valid type') out_iterable = map(dst_type, inputs) if return_type is None: return out_iterable else: return return_type(out_iterable) def list_cast(inputs, dst_type): """Cast elements of an iterable object into a list of some type. A partial method of :func:`iter_cast`. """ return iter_cast(inputs, dst_type, return_type=list) def tuple_cast(inputs, dst_type): """Cast elements of an iterable object into a tuple of some type. A partial method of :func:`iter_cast`. """ return iter_cast(inputs, dst_type, return_type=tuple) def is_seq_of(seq, expected_type, seq_type=None): """Check whether it is a sequence of some type. Args: seq (Sequence): The sequence to be checked. expected_type (type): Expected type of sequence items. seq_type (type, optional): Expected sequence type. Returns: bool: Whether the sequence is valid. """ if seq_type is None: exp_seq_type = abc.Sequence else: assert isinstance(seq_type, type) exp_seq_type = seq_type if not isinstance(seq, exp_seq_type): return False for item in seq: if not isinstance(item, expected_type): return False return True def is_list_of(seq, expected_type): """Check whether it is a list of some type. A partial method of :func:`is_seq_of`. """ return is_seq_of(seq, expected_type, seq_type=list) def is_tuple_of(seq, expected_type): """Check whether it is a tuple of some type. A partial method of :func:`is_seq_of`. """ return is_seq_of(seq, expected_type, seq_type=tuple) def slice_list(in_list, lens): """Slice a list into several sub lists by a list of given length. Args: in_list (list): The list to be sliced. lens(int or list): The expected length of each out list. Returns: list: A list of sliced list. """ if isinstance(lens, int): assert len(in_list) % lens == 0 lens = [lens] * int(len(in_list) / lens) if not isinstance(lens, list): raise TypeError('"indices" must be an integer or a list of integers') elif sum(lens) != len(in_list): raise ValueError('sum of lens and list length does not ' f'match: {sum(lens)} != {len(in_list)}') out_list = [] idx = 0 for i in range(len(lens)): out_list.append(in_list[idx:idx + lens[i]]) idx += lens[i] return out_list def concat_list(in_list): """Concatenate a list of list into a single list. Args: in_list (list): The list of list to be merged. Returns: list: The concatenated flat list. """ return list(itertools.chain(*in_list)) def check_prerequisites( prerequisites, checker, msg_tmpl='Prerequisites "{}" are required in method "{}" but not ' 'found, please install them first.'): # yapf: disable """A decorator factory to check if prerequisites are satisfied. Args: prerequisites (str of list[str]): Prerequisites to be checked. checker (callable): The checker method that returns True if a prerequisite is meet, False otherwise. msg_tmpl (str): The message template with two variables. Returns: decorator: A specific decorator. """ def wrap(func): @functools.wraps(func) def wrapped_func(*args, **kwargs): requirements = [prerequisites] if isinstance( prerequisites, str) else prerequisites missing = [] for item in requirements: if not checker(item): missing.append(item) if missing: print(msg_tmpl.format(', '.join(missing), func.__name__)) raise RuntimeError('Prerequisites not meet.') else: return func(*args, **kwargs) return wrapped_func return wrap def _check_py_package(package): try: import_module(package) except ImportError: return False else: return True def _check_executable(cmd): if subprocess.call(f'which {cmd}', shell=True) != 0: return False else: return True def requires_package(prerequisites): """A decorator to check if some python packages are installed. Example: >>> @requires_package('numpy') >>> func(arg1, args): >>> return numpy.zeros(1) array([0.]) >>> @requires_package(['numpy', 'non_package']) >>> func(arg1, args): >>> return numpy.zeros(1) ImportError """ return check_prerequisites(prerequisites, checker=_check_py_package) def requires_executable(prerequisites): """A decorator to check if some executable files are installed. Example: >>> @requires_executable('ffmpeg') >>> func(arg1, args): >>> print(1) 1 """ return check_prerequisites(prerequisites, checker=_check_executable) def deprecated_api_warning(name_dict, cls_name=None): """A decorator to check if some arguments are deprecate and try to replace deprecate src_arg_name to dst_arg_name. Args: name_dict(dict): key (str): Deprecate argument names. val (str): Expected argument names. Returns: func: New function. """ def api_warning_wrapper(old_func): @functools.wraps(old_func) def new_func(*args, **kwargs): # get the arg spec of the decorated method args_info = getfullargspec(old_func) # get name of the function func_name = old_func.__name__ if cls_name is not None: func_name = f'{cls_name}.{func_name}' if args: arg_names = args_info.args[:len(args)] for src_arg_name, dst_arg_name in name_dict.items(): if src_arg_name in arg_names: warnings.warn( f'"{src_arg_name}" is deprecated in ' f'`{func_name}`, please use "{dst_arg_name}" ' 'instead') arg_names[arg_names.index(src_arg_name)] = dst_arg_name if kwargs: for src_arg_name, dst_arg_name in name_dict.items(): if src_arg_name in kwargs: assert dst_arg_name not in kwargs, ( f'The expected behavior is to replace ' f'the deprecated key `{src_arg_name}` to ' f'new key `{dst_arg_name}`, but got them ' f'in the arguments at the same time, which ' f'is confusing. `{src_arg_name} will be ' f'deprecated in the future, please ' f'use `{dst_arg_name}` instead.') warnings.warn( f'"{src_arg_name}" is deprecated in ' f'`{func_name}`, please use "{dst_arg_name}" ' 'instead') kwargs[dst_arg_name] = kwargs.pop(src_arg_name) # apply converted arguments to the decorated method output = old_func(*args, **kwargs) return output return new_func return api_warning_wrapper def is_method_overridden(method, base_class, derived_class): """Check if a method of base class is overridden in derived class. Args: method (str): the method name to check. base_class (type): the class of the base class. derived_class (type | Any): the class or instance of the derived class. """ assert isinstance(base_class, type), \ "base_class doesn't accept instance, Please pass class instead." if not isinstance(derived_class, type): derived_class = derived_class.__class__ base_method = getattr(base_class, method) derived_method = getattr(derived_class, method) return derived_method != base_method def has_method(obj: object, method: str) -> bool: """Check whether the object has a method. Args: method (str): The method name to check. obj (object): The object to check. Returns: bool: True if the object has the method else False. """ return hasattr(obj, method) and callable(getattr(obj, method))
trt-samples-for-hackathon-cn-master
Hackathon2023/controlnet/annotator/uniformer/mmcv/utils/misc.py
import warnings import torch from annotator.uniformer.mmcv.utils import digit_version def is_jit_tracing() -> bool: if (torch.__version__ != 'parrots' and digit_version(torch.__version__) >= digit_version('1.6.0')): on_trace = torch.jit.is_tracing() # In PyTorch 1.6, torch.jit.is_tracing has a bug. # Refers to https://github.com/pytorch/pytorch/issues/42448 if isinstance(on_trace, bool): return on_trace else: return torch._C._is_tracing() else: warnings.warn( 'torch.jit.is_tracing is only supported after v1.6.0. ' 'Therefore is_tracing returns False automatically. Please ' 'set on_trace manually if you are using trace.', UserWarning) return False
trt-samples-for-hackathon-cn-master
Hackathon2023/controlnet/annotator/uniformer/mmcv/utils/trace.py
# Copyright (c) OpenMMLab. All rights reserved. import ast import copy import os import os.path as osp import platform import shutil import sys import tempfile import uuid import warnings from argparse import Action, ArgumentParser from collections import abc from importlib import import_module from addict import Dict from yapf.yapflib.yapf_api import FormatCode from .misc import import_modules_from_strings from .path import check_file_exist if platform.system() == 'Windows': import regex as re else: import re BASE_KEY = '_base_' DELETE_KEY = '_delete_' DEPRECATION_KEY = '_deprecation_' RESERVED_KEYS = ['filename', 'text', 'pretty_text'] class ConfigDict(Dict): def __missing__(self, name): raise KeyError(name) def __getattr__(self, name): try: value = super(ConfigDict, self).__getattr__(name) except KeyError: ex = AttributeError(f"'{self.__class__.__name__}' object has no " f"attribute '{name}'") except Exception as e: ex = e else: return value raise ex def add_args(parser, cfg, prefix=''): for k, v in cfg.items(): if isinstance(v, str): parser.add_argument('--' + prefix + k) elif isinstance(v, int): parser.add_argument('--' + prefix + k, type=int) elif isinstance(v, float): parser.add_argument('--' + prefix + k, type=float) elif isinstance(v, bool): parser.add_argument('--' + prefix + k, action='store_true') elif isinstance(v, dict): add_args(parser, v, prefix + k + '.') elif isinstance(v, abc.Iterable): parser.add_argument('--' + prefix + k, type=type(v[0]), nargs='+') else: print(f'cannot parse key {prefix + k} of type {type(v)}') return parser class Config: """A facility for config and config files. It supports common file formats as configs: python/json/yaml. The interface is the same as a dict object and also allows access config values as attributes. Example: >>> cfg = Config(dict(a=1, b=dict(b1=[0, 1]))) >>> cfg.a 1 >>> cfg.b {'b1': [0, 1]} >>> cfg.b.b1 [0, 1] >>> cfg = Config.fromfile('tests/data/config/a.py') >>> cfg.filename "/home/kchen/projects/mmcv/tests/data/config/a.py" >>> cfg.item4 'test' >>> cfg "Config [path: /home/kchen/projects/mmcv/tests/data/config/a.py]: " "{'item1': [1, 2], 'item2': {'a': 0}, 'item3': True, 'item4': 'test'}" """ @staticmethod def _validate_py_syntax(filename): with open(filename, 'r', encoding='utf-8') as f: # Setting encoding explicitly to resolve coding issue on windows content = f.read() try: ast.parse(content) except SyntaxError as e: raise SyntaxError('There are syntax errors in config ' f'file {filename}: {e}') @staticmethod def _substitute_predefined_vars(filename, temp_config_name): file_dirname = osp.dirname(filename) file_basename = osp.basename(filename) file_basename_no_extension = osp.splitext(file_basename)[0] file_extname = osp.splitext(filename)[1] support_templates = dict( fileDirname=file_dirname, fileBasename=file_basename, fileBasenameNoExtension=file_basename_no_extension, fileExtname=file_extname) with open(filename, 'r', encoding='utf-8') as f: # Setting encoding explicitly to resolve coding issue on windows config_file = f.read() for key, value in support_templates.items(): regexp = r'\{\{\s*' + str(key) + r'\s*\}\}' value = value.replace('\\', '/') config_file = re.sub(regexp, value, config_file) with open(temp_config_name, 'w', encoding='utf-8') as tmp_config_file: tmp_config_file.write(config_file) @staticmethod def _pre_substitute_base_vars(filename, temp_config_name): """Substitute base variable placehoders to string, so that parsing would work.""" with open(filename, 'r', encoding='utf-8') as f: # Setting encoding explicitly to resolve coding issue on windows config_file = f.read() base_var_dict = {} regexp = r'\{\{\s*' + BASE_KEY + r'\.([\w\.]+)\s*\}\}' base_vars = set(re.findall(regexp, config_file)) for base_var in base_vars: randstr = f'_{base_var}_{uuid.uuid4().hex.lower()[:6]}' base_var_dict[randstr] = base_var regexp = r'\{\{\s*' + BASE_KEY + r'\.' + base_var + r'\s*\}\}' config_file = re.sub(regexp, f'"{randstr}"', config_file) with open(temp_config_name, 'w', encoding='utf-8') as tmp_config_file: tmp_config_file.write(config_file) return base_var_dict @staticmethod def _substitute_base_vars(cfg, base_var_dict, base_cfg): """Substitute variable strings to their actual values.""" cfg = copy.deepcopy(cfg) if isinstance(cfg, dict): for k, v in cfg.items(): if isinstance(v, str) and v in base_var_dict: new_v = base_cfg for new_k in base_var_dict[v].split('.'): new_v = new_v[new_k] cfg[k] = new_v elif isinstance(v, (list, tuple, dict)): cfg[k] = Config._substitute_base_vars( v, base_var_dict, base_cfg) elif isinstance(cfg, tuple): cfg = tuple( Config._substitute_base_vars(c, base_var_dict, base_cfg) for c in cfg) elif isinstance(cfg, list): cfg = [ Config._substitute_base_vars(c, base_var_dict, base_cfg) for c in cfg ] elif isinstance(cfg, str) and cfg in base_var_dict: new_v = base_cfg for new_k in base_var_dict[cfg].split('.'): new_v = new_v[new_k] cfg = new_v return cfg @staticmethod def _file2dict(filename, use_predefined_variables=True): filename = osp.abspath(osp.expanduser(filename)) check_file_exist(filename) fileExtname = osp.splitext(filename)[1] if fileExtname not in ['.py', '.json', '.yaml', '.yml']: raise IOError('Only py/yml/yaml/json type are supported now!') with tempfile.TemporaryDirectory() as temp_config_dir: temp_config_file = tempfile.NamedTemporaryFile( dir=temp_config_dir, suffix=fileExtname) if platform.system() == 'Windows': temp_config_file.close() temp_config_name = osp.basename(temp_config_file.name) # Substitute predefined variables if use_predefined_variables: Config._substitute_predefined_vars(filename, temp_config_file.name) else: shutil.copyfile(filename, temp_config_file.name) # Substitute base variables from placeholders to strings base_var_dict = Config._pre_substitute_base_vars( temp_config_file.name, temp_config_file.name) if filename.endswith('.py'): temp_module_name = osp.splitext(temp_config_name)[0] sys.path.insert(0, temp_config_dir) Config._validate_py_syntax(filename) mod = import_module(temp_module_name) sys.path.pop(0) cfg_dict = { name: value for name, value in mod.__dict__.items() if not name.startswith('__') } # delete imported module del sys.modules[temp_module_name] elif filename.endswith(('.yml', '.yaml', '.json')): import annotator.uniformer.mmcv as mmcv cfg_dict = mmcv.load(temp_config_file.name) # close temp file temp_config_file.close() # check deprecation information if DEPRECATION_KEY in cfg_dict: deprecation_info = cfg_dict.pop(DEPRECATION_KEY) warning_msg = f'The config file {filename} will be deprecated ' \ 'in the future.' if 'expected' in deprecation_info: warning_msg += f' Please use {deprecation_info["expected"]} ' \ 'instead.' if 'reference' in deprecation_info: warning_msg += ' More information can be found at ' \ f'{deprecation_info["reference"]}' warnings.warn(warning_msg) cfg_text = filename + '\n' with open(filename, 'r', encoding='utf-8') as f: # Setting encoding explicitly to resolve coding issue on windows cfg_text += f.read() if BASE_KEY in cfg_dict: cfg_dir = osp.dirname(filename) base_filename = cfg_dict.pop(BASE_KEY) base_filename = base_filename if isinstance( base_filename, list) else [base_filename] cfg_dict_list = list() cfg_text_list = list() for f in base_filename: _cfg_dict, _cfg_text = Config._file2dict(osp.join(cfg_dir, f)) cfg_dict_list.append(_cfg_dict) cfg_text_list.append(_cfg_text) base_cfg_dict = dict() for c in cfg_dict_list: duplicate_keys = base_cfg_dict.keys() & c.keys() if len(duplicate_keys) > 0: raise KeyError('Duplicate key is not allowed among bases. ' f'Duplicate keys: {duplicate_keys}') base_cfg_dict.update(c) # Substitute base variables from strings to their actual values cfg_dict = Config._substitute_base_vars(cfg_dict, base_var_dict, base_cfg_dict) base_cfg_dict = Config._merge_a_into_b(cfg_dict, base_cfg_dict) cfg_dict = base_cfg_dict # merge cfg_text cfg_text_list.append(cfg_text) cfg_text = '\n'.join(cfg_text_list) return cfg_dict, cfg_text @staticmethod def _merge_a_into_b(a, b, allow_list_keys=False): """merge dict ``a`` into dict ``b`` (non-inplace). Values in ``a`` will overwrite ``b``. ``b`` is copied first to avoid in-place modifications. Args: a (dict): The source dict to be merged into ``b``. b (dict): The origin dict to be fetch keys from ``a``. allow_list_keys (bool): If True, int string keys (e.g. '0', '1') are allowed in source ``a`` and will replace the element of the corresponding index in b if b is a list. Default: False. Returns: dict: The modified dict of ``b`` using ``a``. Examples: # Normally merge a into b. >>> Config._merge_a_into_b( ... dict(obj=dict(a=2)), dict(obj=dict(a=1))) {'obj': {'a': 2}} # Delete b first and merge a into b. >>> Config._merge_a_into_b( ... dict(obj=dict(_delete_=True, a=2)), dict(obj=dict(a=1))) {'obj': {'a': 2}} # b is a list >>> Config._merge_a_into_b( ... {'0': dict(a=2)}, [dict(a=1), dict(b=2)], True) [{'a': 2}, {'b': 2}] """ b = b.copy() for k, v in a.items(): if allow_list_keys and k.isdigit() and isinstance(b, list): k = int(k) if len(b) <= k: raise KeyError(f'Index {k} exceeds the length of list {b}') b[k] = Config._merge_a_into_b(v, b[k], allow_list_keys) elif isinstance(v, dict) and k in b and not v.pop(DELETE_KEY, False): allowed_types = (dict, list) if allow_list_keys else dict if not isinstance(b[k], allowed_types): raise TypeError( f'{k}={v} in child config cannot inherit from base ' f'because {k} is a dict in the child config but is of ' f'type {type(b[k])} in base config. You may set ' f'`{DELETE_KEY}=True` to ignore the base config') b[k] = Config._merge_a_into_b(v, b[k], allow_list_keys) else: b[k] = v return b @staticmethod def fromfile(filename, use_predefined_variables=True, import_custom_modules=True): cfg_dict, cfg_text = Config._file2dict(filename, use_predefined_variables) if import_custom_modules and cfg_dict.get('custom_imports', None): import_modules_from_strings(**cfg_dict['custom_imports']) return Config(cfg_dict, cfg_text=cfg_text, filename=filename) @staticmethod def fromstring(cfg_str, file_format): """Generate config from config str. Args: cfg_str (str): Config str. file_format (str): Config file format corresponding to the config str. Only py/yml/yaml/json type are supported now! Returns: obj:`Config`: Config obj. """ if file_format not in ['.py', '.json', '.yaml', '.yml']: raise IOError('Only py/yml/yaml/json type are supported now!') if file_format != '.py' and 'dict(' in cfg_str: # check if users specify a wrong suffix for python warnings.warn( 'Please check "file_format", the file format may be .py') with tempfile.NamedTemporaryFile( 'w', encoding='utf-8', suffix=file_format, delete=False) as temp_file: temp_file.write(cfg_str) # on windows, previous implementation cause error # see PR 1077 for details cfg = Config.fromfile(temp_file.name) os.remove(temp_file.name) return cfg @staticmethod def auto_argparser(description=None): """Generate argparser from config file automatically (experimental)""" partial_parser = ArgumentParser(description=description) partial_parser.add_argument('config', help='config file path') cfg_file = partial_parser.parse_known_args()[0].config cfg = Config.fromfile(cfg_file) parser = ArgumentParser(description=description) parser.add_argument('config', help='config file path') add_args(parser, cfg) return parser, cfg def __init__(self, cfg_dict=None, cfg_text=None, filename=None): if cfg_dict is None: cfg_dict = dict() elif not isinstance(cfg_dict, dict): raise TypeError('cfg_dict must be a dict, but ' f'got {type(cfg_dict)}') for key in cfg_dict: if key in RESERVED_KEYS: raise KeyError(f'{key} is reserved for config file') super(Config, self).__setattr__('_cfg_dict', ConfigDict(cfg_dict)) super(Config, self).__setattr__('_filename', filename) if cfg_text: text = cfg_text elif filename: with open(filename, 'r') as f: text = f.read() else: text = '' super(Config, self).__setattr__('_text', text) @property def filename(self): return self._filename @property def text(self): return self._text @property def pretty_text(self): indent = 4 def _indent(s_, num_spaces): s = s_.split('\n') if len(s) == 1: return s_ first = s.pop(0) s = [(num_spaces * ' ') + line for line in s] s = '\n'.join(s) s = first + '\n' + s return s def _format_basic_types(k, v, use_mapping=False): if isinstance(v, str): v_str = f"'{v}'" else: v_str = str(v) if use_mapping: k_str = f"'{k}'" if isinstance(k, str) else str(k) attr_str = f'{k_str}: {v_str}' else: attr_str = f'{str(k)}={v_str}' attr_str = _indent(attr_str, indent) return attr_str def _format_list(k, v, use_mapping=False): # check if all items in the list are dict if all(isinstance(_, dict) for _ in v): v_str = '[\n' v_str += '\n'.join( f'dict({_indent(_format_dict(v_), indent)}),' for v_ in v).rstrip(',') if use_mapping: k_str = f"'{k}'" if isinstance(k, str) else str(k) attr_str = f'{k_str}: {v_str}' else: attr_str = f'{str(k)}={v_str}' attr_str = _indent(attr_str, indent) + ']' else: attr_str = _format_basic_types(k, v, use_mapping) return attr_str def _contain_invalid_identifier(dict_str): contain_invalid_identifier = False for key_name in dict_str: contain_invalid_identifier |= \ (not str(key_name).isidentifier()) return contain_invalid_identifier def _format_dict(input_dict, outest_level=False): r = '' s = [] use_mapping = _contain_invalid_identifier(input_dict) if use_mapping: r += '{' for idx, (k, v) in enumerate(input_dict.items()): is_last = idx >= len(input_dict) - 1 end = '' if outest_level or is_last else ',' if isinstance(v, dict): v_str = '\n' + _format_dict(v) if use_mapping: k_str = f"'{k}'" if isinstance(k, str) else str(k) attr_str = f'{k_str}: dict({v_str}' else: attr_str = f'{str(k)}=dict({v_str}' attr_str = _indent(attr_str, indent) + ')' + end elif isinstance(v, list): attr_str = _format_list(k, v, use_mapping) + end else: attr_str = _format_basic_types(k, v, use_mapping) + end s.append(attr_str) r += '\n'.join(s) if use_mapping: r += '}' return r cfg_dict = self._cfg_dict.to_dict() text = _format_dict(cfg_dict, outest_level=True) # copied from setup.cfg yapf_style = dict( based_on_style='pep8', blank_line_before_nested_class_or_def=True, split_before_expression_after_opening_paren=True) text, _ = FormatCode(text, style_config=yapf_style, verify=True) return text def __repr__(self): return f'Config (path: {self.filename}): {self._cfg_dict.__repr__()}' def __len__(self): return len(self._cfg_dict) def __getattr__(self, name): return getattr(self._cfg_dict, name) def __getitem__(self, name): return self._cfg_dict.__getitem__(name) def __setattr__(self, name, value): if isinstance(value, dict): value = ConfigDict(value) self._cfg_dict.__setattr__(name, value) def __setitem__(self, name, value): if isinstance(value, dict): value = ConfigDict(value) self._cfg_dict.__setitem__(name, value) def __iter__(self): return iter(self._cfg_dict) def __getstate__(self): return (self._cfg_dict, self._filename, self._text) def __setstate__(self, state): _cfg_dict, _filename, _text = state super(Config, self).__setattr__('_cfg_dict', _cfg_dict) super(Config, self).__setattr__('_filename', _filename) super(Config, self).__setattr__('_text', _text) def dump(self, file=None): cfg_dict = super(Config, self).__getattribute__('_cfg_dict').to_dict() if self.filename.endswith('.py'): if file is None: return self.pretty_text else: with open(file, 'w', encoding='utf-8') as f: f.write(self.pretty_text) else: import annotator.uniformer.mmcv as mmcv if file is None: file_format = self.filename.split('.')[-1] return mmcv.dump(cfg_dict, file_format=file_format) else: mmcv.dump(cfg_dict, file) def merge_from_dict(self, options, allow_list_keys=True): """Merge list into cfg_dict. Merge the dict parsed by MultipleKVAction into this cfg. Examples: >>> options = {'model.backbone.depth': 50, ... 'model.backbone.with_cp':True} >>> cfg = Config(dict(model=dict(backbone=dict(type='ResNet')))) >>> cfg.merge_from_dict(options) >>> cfg_dict = super(Config, self).__getattribute__('_cfg_dict') >>> assert cfg_dict == dict( ... model=dict(backbone=dict(depth=50, with_cp=True))) # Merge list element >>> cfg = Config(dict(pipeline=[ ... dict(type='LoadImage'), dict(type='LoadAnnotations')])) >>> options = dict(pipeline={'0': dict(type='SelfLoadImage')}) >>> cfg.merge_from_dict(options, allow_list_keys=True) >>> cfg_dict = super(Config, self).__getattribute__('_cfg_dict') >>> assert cfg_dict == dict(pipeline=[ ... dict(type='SelfLoadImage'), dict(type='LoadAnnotations')]) Args: options (dict): dict of configs to merge from. allow_list_keys (bool): If True, int string keys (e.g. '0', '1') are allowed in ``options`` and will replace the element of the corresponding index in the config if the config is a list. Default: True. """ option_cfg_dict = {} for full_key, v in options.items(): d = option_cfg_dict key_list = full_key.split('.') for subkey in key_list[:-1]: d.setdefault(subkey, ConfigDict()) d = d[subkey] subkey = key_list[-1] d[subkey] = v cfg_dict = super(Config, self).__getattribute__('_cfg_dict') super(Config, self).__setattr__( '_cfg_dict', Config._merge_a_into_b( option_cfg_dict, cfg_dict, allow_list_keys=allow_list_keys)) class DictAction(Action): """ argparse action to split an argument into KEY=VALUE form on the first = and append to a dictionary. List options can be passed as comma separated values, i.e 'KEY=V1,V2,V3', or with explicit brackets, i.e. 'KEY=[V1,V2,V3]'. It also support nested brackets to build list/tuple values. e.g. 'KEY=[(V1,V2),(V3,V4)]' """ @staticmethod def _parse_int_float_bool(val): try: return int(val) except ValueError: pass try: return float(val) except ValueError: pass if val.lower() in ['true', 'false']: return True if val.lower() == 'true' else False return val @staticmethod def _parse_iterable(val): """Parse iterable values in the string. All elements inside '()' or '[]' are treated as iterable values. Args: val (str): Value string. Returns: list | tuple: The expanded list or tuple from the string. Examples: >>> DictAction._parse_iterable('1,2,3') [1, 2, 3] >>> DictAction._parse_iterable('[a, b, c]') ['a', 'b', 'c'] >>> DictAction._parse_iterable('[(1, 2, 3), [a, b], c]') [(1, 2, 3), ['a', 'b'], 'c'] """ def find_next_comma(string): """Find the position of next comma in the string. If no ',' is found in the string, return the string length. All chars inside '()' and '[]' are treated as one element and thus ',' inside these brackets are ignored. """ assert (string.count('(') == string.count(')')) and ( string.count('[') == string.count(']')), \ f'Imbalanced brackets exist in {string}' end = len(string) for idx, char in enumerate(string): pre = string[:idx] # The string before this ',' is balanced if ((char == ',') and (pre.count('(') == pre.count(')')) and (pre.count('[') == pre.count(']'))): end = idx break return end # Strip ' and " characters and replace whitespace. val = val.strip('\'\"').replace(' ', '') is_tuple = False if val.startswith('(') and val.endswith(')'): is_tuple = True val = val[1:-1] elif val.startswith('[') and val.endswith(']'): val = val[1:-1] elif ',' not in val: # val is a single value return DictAction._parse_int_float_bool(val) values = [] while len(val) > 0: comma_idx = find_next_comma(val) element = DictAction._parse_iterable(val[:comma_idx]) values.append(element) val = val[comma_idx + 1:] if is_tuple: values = tuple(values) return values def __call__(self, parser, namespace, values, option_string=None): options = {} for kv in values: key, val = kv.split('=', maxsplit=1) options[key] = self._parse_iterable(val) setattr(namespace, self.dest, options)
trt-samples-for-hackathon-cn-master
Hackathon2023/controlnet/annotator/uniformer/mmcv/utils/config.py
# Copyright (c) OpenMMLab. All rights reserved. """This file holding some environment constant for sharing by other files.""" import os.path as osp import subprocess import sys from collections import defaultdict import cv2 import torch import annotator.uniformer.mmcv as mmcv from .parrots_wrapper import get_build_config def collect_env(): """Collect the information of the running environments. Returns: dict: The environment information. The following fields are contained. - sys.platform: The variable of ``sys.platform``. - Python: Python version. - CUDA available: Bool, indicating if CUDA is available. - GPU devices: Device type of each GPU. - CUDA_HOME (optional): The env var ``CUDA_HOME``. - NVCC (optional): NVCC version. - GCC: GCC version, "n/a" if GCC is not installed. - PyTorch: PyTorch version. - PyTorch compiling details: The output of \ ``torch.__config__.show()``. - TorchVision (optional): TorchVision version. - OpenCV: OpenCV version. - MMCV: MMCV version. - MMCV Compiler: The GCC version for compiling MMCV ops. - MMCV CUDA Compiler: The CUDA version for compiling MMCV ops. """ env_info = {} env_info['sys.platform'] = sys.platform env_info['Python'] = sys.version.replace('\n', '') cuda_available = torch.cuda.is_available() env_info['CUDA available'] = cuda_available if cuda_available: devices = defaultdict(list) for k in range(torch.cuda.device_count()): devices[torch.cuda.get_device_name(k)].append(str(k)) for name, device_ids in devices.items(): env_info['GPU ' + ','.join(device_ids)] = name from annotator.uniformer.mmcv.utils.parrots_wrapper import _get_cuda_home CUDA_HOME = _get_cuda_home() env_info['CUDA_HOME'] = CUDA_HOME if CUDA_HOME is not None and osp.isdir(CUDA_HOME): try: nvcc = osp.join(CUDA_HOME, 'bin/nvcc') nvcc = subprocess.check_output( f'"{nvcc}" -V | tail -n1', shell=True) nvcc = nvcc.decode('utf-8').strip() except subprocess.SubprocessError: nvcc = 'Not Available' env_info['NVCC'] = nvcc try: gcc = subprocess.check_output('gcc --version | head -n1', shell=True) gcc = gcc.decode('utf-8').strip() env_info['GCC'] = gcc except subprocess.CalledProcessError: # gcc is unavailable env_info['GCC'] = 'n/a' env_info['PyTorch'] = torch.__version__ env_info['PyTorch compiling details'] = get_build_config() try: import torchvision env_info['TorchVision'] = torchvision.__version__ except ModuleNotFoundError: pass env_info['OpenCV'] = cv2.__version__ env_info['MMCV'] = mmcv.__version__ try: from annotator.uniformer.mmcv.ops import get_compiler_version, get_compiling_cuda_version except ModuleNotFoundError: env_info['MMCV Compiler'] = 'n/a' env_info['MMCV CUDA Compiler'] = 'n/a' else: env_info['MMCV Compiler'] = get_compiler_version() env_info['MMCV CUDA Compiler'] = get_compiling_cuda_version() return env_info
trt-samples-for-hackathon-cn-master
Hackathon2023/controlnet/annotator/uniformer/mmcv/utils/env.py
# Copyright (c) OpenMMLab. All rights reserved. import inspect import warnings from functools import partial from .misc import is_seq_of def build_from_cfg(cfg, registry, default_args=None): """Build a module from config dict. Args: cfg (dict): Config dict. It should at least contain the key "type". registry (:obj:`Registry`): The registry to search the type from. default_args (dict, optional): Default initialization arguments. Returns: object: The constructed object. """ if not isinstance(cfg, dict): raise TypeError(f'cfg must be a dict, but got {type(cfg)}') if 'type' not in cfg: if default_args is None or 'type' not in default_args: raise KeyError( '`cfg` or `default_args` must contain the key "type", ' f'but got {cfg}\n{default_args}') if not isinstance(registry, Registry): raise TypeError('registry must be an mmcv.Registry object, ' f'but got {type(registry)}') if not (isinstance(default_args, dict) or default_args is None): raise TypeError('default_args must be a dict or None, ' f'but got {type(default_args)}') args = cfg.copy() if default_args is not None: for name, value in default_args.items(): args.setdefault(name, value) obj_type = args.pop('type') if isinstance(obj_type, str): obj_cls = registry.get(obj_type) if obj_cls is None: raise KeyError( f'{obj_type} is not in the {registry.name} registry') elif inspect.isclass(obj_type): obj_cls = obj_type else: raise TypeError( f'type must be a str or valid type, but got {type(obj_type)}') try: return obj_cls(**args) except Exception as e: # Normal TypeError does not print class name. raise type(e)(f'{obj_cls.__name__}: {e}') class Registry: """A registry to map strings to classes. Registered object could be built from registry. Example: >>> MODELS = Registry('models') >>> @MODELS.register_module() >>> class ResNet: >>> pass >>> resnet = MODELS.build(dict(type='ResNet')) Please refer to https://mmcv.readthedocs.io/en/latest/understand_mmcv/registry.html for advanced usage. Args: name (str): Registry name. build_func(func, optional): Build function to construct instance from Registry, func:`build_from_cfg` is used if neither ``parent`` or ``build_func`` is specified. If ``parent`` is specified and ``build_func`` is not given, ``build_func`` will be inherited from ``parent``. Default: None. parent (Registry, optional): Parent registry. The class registered in children registry could be built from parent. Default: None. scope (str, optional): The scope of registry. It is the key to search for children registry. If not specified, scope will be the name of the package where class is defined, e.g. mmdet, mmcls, mmseg. Default: None. """ def __init__(self, name, build_func=None, parent=None, scope=None): self._name = name self._module_dict = dict() self._children = dict() self._scope = self.infer_scope() if scope is None else scope # self.build_func will be set with the following priority: # 1. build_func # 2. parent.build_func # 3. build_from_cfg if build_func is None: if parent is not None: self.build_func = parent.build_func else: self.build_func = build_from_cfg else: self.build_func = build_func if parent is not None: assert isinstance(parent, Registry) parent._add_children(self) self.parent = parent else: self.parent = None def __len__(self): return len(self._module_dict) def __contains__(self, key): return self.get(key) is not None def __repr__(self): format_str = self.__class__.__name__ + \ f'(name={self._name}, ' \ f'items={self._module_dict})' return format_str @staticmethod def infer_scope(): """Infer the scope of registry. The name of the package where registry is defined will be returned. Example: # in mmdet/models/backbone/resnet.py >>> MODELS = Registry('models') >>> @MODELS.register_module() >>> class ResNet: >>> pass The scope of ``ResNet`` will be ``mmdet``. Returns: scope (str): The inferred scope name. """ # inspect.stack() trace where this function is called, the index-2 # indicates the frame where `infer_scope()` is called filename = inspect.getmodule(inspect.stack()[2][0]).__name__ split_filename = filename.split('.') return split_filename[0] @staticmethod def split_scope_key(key): """Split scope and key. The first scope will be split from key. Examples: >>> Registry.split_scope_key('mmdet.ResNet') 'mmdet', 'ResNet' >>> Registry.split_scope_key('ResNet') None, 'ResNet' Return: scope (str, None): The first scope. key (str): The remaining key. """ split_index = key.find('.') if split_index != -1: return key[:split_index], key[split_index + 1:] else: return None, key @property def name(self): return self._name @property def scope(self): return self._scope @property def module_dict(self): return self._module_dict @property def children(self): return self._children def get(self, key): """Get the registry record. Args: key (str): The class name in string format. Returns: class: The corresponding class. """ scope, real_key = self.split_scope_key(key) if scope is None or scope == self._scope: # get from self if real_key in self._module_dict: return self._module_dict[real_key] else: # get from self._children if scope in self._children: return self._children[scope].get(real_key) else: # goto root parent = self.parent while parent.parent is not None: parent = parent.parent return parent.get(key) def build(self, *args, **kwargs): return self.build_func(*args, **kwargs, registry=self) def _add_children(self, registry): """Add children for a registry. The ``registry`` will be added as children based on its scope. The parent registry could build objects from children registry. Example: >>> models = Registry('models') >>> mmdet_models = Registry('models', parent=models) >>> @mmdet_models.register_module() >>> class ResNet: >>> pass >>> resnet = models.build(dict(type='mmdet.ResNet')) """ assert isinstance(registry, Registry) assert registry.scope is not None assert registry.scope not in self.children, \ f'scope {registry.scope} exists in {self.name} registry' self.children[registry.scope] = registry def _register_module(self, module_class, module_name=None, force=False): if not inspect.isclass(module_class): raise TypeError('module must be a class, ' f'but got {type(module_class)}') if module_name is None: module_name = module_class.__name__ if isinstance(module_name, str): module_name = [module_name] for name in module_name: if not force and name in self._module_dict: raise KeyError(f'{name} is already registered ' f'in {self.name}') self._module_dict[name] = module_class def deprecated_register_module(self, cls=None, force=False): warnings.warn( 'The old API of register_module(module, force=False) ' 'is deprecated and will be removed, please use the new API ' 'register_module(name=None, force=False, module=None) instead.') if cls is None: return partial(self.deprecated_register_module, force=force) self._register_module(cls, force=force) return cls def register_module(self, name=None, force=False, module=None): """Register a module. A record will be added to `self._module_dict`, whose key is the class name or the specified name, and value is the class itself. It can be used as a decorator or a normal function. Example: >>> backbones = Registry('backbone') >>> @backbones.register_module() >>> class ResNet: >>> pass >>> backbones = Registry('backbone') >>> @backbones.register_module(name='mnet') >>> class MobileNet: >>> pass >>> backbones = Registry('backbone') >>> class ResNet: >>> pass >>> backbones.register_module(ResNet) Args: name (str | None): The module name to be registered. If not specified, the class name will be used. force (bool, optional): Whether to override an existing class with the same name. Default: False. module (type): Module class to be registered. """ if not isinstance(force, bool): raise TypeError(f'force must be a boolean, but got {type(force)}') # NOTE: This is a walkaround to be compatible with the old api, # while it may introduce unexpected bugs. if isinstance(name, type): return self.deprecated_register_module(name, force=force) # raise the error ahead of time if not (name is None or isinstance(name, str) or is_seq_of(name, str)): raise TypeError( 'name must be either of None, an instance of str or a sequence' f' of str, but got {type(name)}') # use it as a normal method: x.register_module(module=SomeClass) if module is not None: self._register_module( module_class=module, module_name=name, force=force) return module # use it as a decorator: @x.register_module() def _register(cls): self._register_module( module_class=cls, module_name=name, force=force) return cls return _register
trt-samples-for-hackathon-cn-master
Hackathon2023/controlnet/annotator/uniformer/mmcv/utils/registry.py
# Copyright (c) OpenMMLab. All rights reserved. from functools import partial import torch TORCH_VERSION = torch.__version__ def is_rocm_pytorch() -> bool: is_rocm = False if TORCH_VERSION != 'parrots': try: from torch.utils.cpp_extension import ROCM_HOME is_rocm = True if ((torch.version.hip is not None) and (ROCM_HOME is not None)) else False except ImportError: pass return is_rocm def _get_cuda_home(): if TORCH_VERSION == 'parrots': from parrots.utils.build_extension import CUDA_HOME else: if is_rocm_pytorch(): from torch.utils.cpp_extension import ROCM_HOME CUDA_HOME = ROCM_HOME else: from torch.utils.cpp_extension import CUDA_HOME return CUDA_HOME def get_build_config(): if TORCH_VERSION == 'parrots': from parrots.config import get_build_info return get_build_info() else: return torch.__config__.show() def _get_conv(): if TORCH_VERSION == 'parrots': from parrots.nn.modules.conv import _ConvNd, _ConvTransposeMixin else: from torch.nn.modules.conv import _ConvNd, _ConvTransposeMixin return _ConvNd, _ConvTransposeMixin def _get_dataloader(): if TORCH_VERSION == 'parrots': from torch.utils.data import DataLoader, PoolDataLoader else: from torch.utils.data import DataLoader PoolDataLoader = DataLoader return DataLoader, PoolDataLoader def _get_extension(): if TORCH_VERSION == 'parrots': from parrots.utils.build_extension import BuildExtension, Extension CppExtension = partial(Extension, cuda=False) CUDAExtension = partial(Extension, cuda=True) else: from torch.utils.cpp_extension import (BuildExtension, CppExtension, CUDAExtension) return BuildExtension, CppExtension, CUDAExtension def _get_pool(): if TORCH_VERSION == 'parrots': from parrots.nn.modules.pool import (_AdaptiveAvgPoolNd, _AdaptiveMaxPoolNd, _AvgPoolNd, _MaxPoolNd) else: from torch.nn.modules.pooling import (_AdaptiveAvgPoolNd, _AdaptiveMaxPoolNd, _AvgPoolNd, _MaxPoolNd) return _AdaptiveAvgPoolNd, _AdaptiveMaxPoolNd, _AvgPoolNd, _MaxPoolNd def _get_norm(): if TORCH_VERSION == 'parrots': from parrots.nn.modules.batchnorm import _BatchNorm, _InstanceNorm SyncBatchNorm_ = torch.nn.SyncBatchNorm2d else: from torch.nn.modules.instancenorm import _InstanceNorm from torch.nn.modules.batchnorm import _BatchNorm SyncBatchNorm_ = torch.nn.SyncBatchNorm return _BatchNorm, _InstanceNorm, SyncBatchNorm_ _ConvNd, _ConvTransposeMixin = _get_conv() DataLoader, PoolDataLoader = _get_dataloader() BuildExtension, CppExtension, CUDAExtension = _get_extension() _BatchNorm, _InstanceNorm, SyncBatchNorm_ = _get_norm() _AdaptiveAvgPoolNd, _AdaptiveMaxPoolNd, _AvgPoolNd, _MaxPoolNd = _get_pool() class SyncBatchNorm(SyncBatchNorm_): def _check_input_dim(self, input): if TORCH_VERSION == 'parrots': if input.dim() < 2: raise ValueError( f'expected at least 2D input (got {input.dim()}D input)') else: super()._check_input_dim(input)
trt-samples-for-hackathon-cn-master
Hackathon2023/controlnet/annotator/uniformer/mmcv/utils/parrots_wrapper.py
# Copyright (c) OpenMMLab. All rights reserved. from time import time class TimerError(Exception): def __init__(self, message): self.message = message super(TimerError, self).__init__(message) class Timer: """A flexible Timer class. :Example: >>> import time >>> import annotator.uniformer.mmcv as mmcv >>> with mmcv.Timer(): >>> # simulate a code block that will run for 1s >>> time.sleep(1) 1.000 >>> with mmcv.Timer(print_tmpl='it takes {:.1f} seconds'): >>> # simulate a code block that will run for 1s >>> time.sleep(1) it takes 1.0 seconds >>> timer = mmcv.Timer() >>> time.sleep(0.5) >>> print(timer.since_start()) 0.500 >>> time.sleep(0.5) >>> print(timer.since_last_check()) 0.500 >>> print(timer.since_start()) 1.000 """ def __init__(self, start=True, print_tmpl=None): self._is_running = False self.print_tmpl = print_tmpl if print_tmpl else '{:.3f}' if start: self.start() @property def is_running(self): """bool: indicate whether the timer is running""" return self._is_running def __enter__(self): self.start() return self def __exit__(self, type, value, traceback): print(self.print_tmpl.format(self.since_last_check())) self._is_running = False def start(self): """Start the timer.""" if not self._is_running: self._t_start = time() self._is_running = True self._t_last = time() def since_start(self): """Total time since the timer is started. Returns (float): Time in seconds. """ if not self._is_running: raise TimerError('timer is not running') self._t_last = time() return self._t_last - self._t_start def since_last_check(self): """Time since the last checking. Either :func:`since_start` or :func:`since_last_check` is a checking operation. Returns (float): Time in seconds. """ if not self._is_running: raise TimerError('timer is not running') dur = time() - self._t_last self._t_last = time() return dur _g_timers = {} # global timers def check_time(timer_id): """Add check points in a single line. This method is suitable for running a task on a list of items. A timer will be registered when the method is called for the first time. :Example: >>> import time >>> import annotator.uniformer.mmcv as mmcv >>> for i in range(1, 6): >>> # simulate a code block >>> time.sleep(i) >>> mmcv.check_time('task1') 2.000 3.000 4.000 5.000 Args: timer_id (str): Timer identifier. """ if timer_id not in _g_timers: _g_timers[timer_id] = Timer() return 0 else: return _g_timers[timer_id].since_last_check()
trt-samples-for-hackathon-cn-master
Hackathon2023/controlnet/annotator/uniformer/mmcv/utils/timer.py
# flake8: noqa # Copyright (c) OpenMMLab. All rights reserved. from .config import Config, ConfigDict, DictAction from .misc import (check_prerequisites, concat_list, deprecated_api_warning, has_method, import_modules_from_strings, is_list_of, is_method_overridden, is_seq_of, is_str, is_tuple_of, iter_cast, list_cast, requires_executable, requires_package, slice_list, to_1tuple, to_2tuple, to_3tuple, to_4tuple, to_ntuple, tuple_cast) from .path import (check_file_exist, fopen, is_filepath, mkdir_or_exist, scandir, symlink) from .progressbar import (ProgressBar, track_iter_progress, track_parallel_progress, track_progress) from .testing import (assert_attrs_equal, assert_dict_contains_subset, assert_dict_has_keys, assert_is_norm_layer, assert_keys_equal, assert_params_all_zeros, check_python_script) from .timer import Timer, TimerError, check_time from .version_utils import digit_version, get_git_hash try: import torch except ImportError: __all__ = [ 'Config', 'ConfigDict', 'DictAction', 'is_str', 'iter_cast', 'list_cast', 'tuple_cast', 'is_seq_of', 'is_list_of', 'is_tuple_of', 'slice_list', 'concat_list', 'check_prerequisites', 'requires_package', 'requires_executable', 'is_filepath', 'fopen', 'check_file_exist', 'mkdir_or_exist', 'symlink', 'scandir', 'ProgressBar', 'track_progress', 'track_iter_progress', 'track_parallel_progress', 'Timer', 'TimerError', 'check_time', 'deprecated_api_warning', 'digit_version', 'get_git_hash', 'import_modules_from_strings', 'assert_dict_contains_subset', 'assert_attrs_equal', 'assert_dict_has_keys', 'assert_keys_equal', 'check_python_script', 'to_1tuple', 'to_2tuple', 'to_3tuple', 'to_4tuple', 'to_ntuple', 'is_method_overridden', 'has_method' ] else: from .env import collect_env from .logging import get_logger, print_log from .parrots_jit import jit, skip_no_elena from .parrots_wrapper import ( TORCH_VERSION, BuildExtension, CppExtension, CUDAExtension, DataLoader, PoolDataLoader, SyncBatchNorm, _AdaptiveAvgPoolNd, _AdaptiveMaxPoolNd, _AvgPoolNd, _BatchNorm, _ConvNd, _ConvTransposeMixin, _InstanceNorm, _MaxPoolNd, get_build_config, is_rocm_pytorch, _get_cuda_home) from .registry import Registry, build_from_cfg from .trace import is_jit_tracing __all__ = [ 'Config', 'ConfigDict', 'DictAction', 'collect_env', 'get_logger', 'print_log', 'is_str', 'iter_cast', 'list_cast', 'tuple_cast', 'is_seq_of', 'is_list_of', 'is_tuple_of', 'slice_list', 'concat_list', 'check_prerequisites', 'requires_package', 'requires_executable', 'is_filepath', 'fopen', 'check_file_exist', 'mkdir_or_exist', 'symlink', 'scandir', 'ProgressBar', 'track_progress', 'track_iter_progress', 'track_parallel_progress', 'Registry', 'build_from_cfg', 'Timer', 'TimerError', 'check_time', 'SyncBatchNorm', '_AdaptiveAvgPoolNd', '_AdaptiveMaxPoolNd', '_AvgPoolNd', '_BatchNorm', '_ConvNd', '_ConvTransposeMixin', '_InstanceNorm', '_MaxPoolNd', 'get_build_config', 'BuildExtension', 'CppExtension', 'CUDAExtension', 'DataLoader', 'PoolDataLoader', 'TORCH_VERSION', 'deprecated_api_warning', 'digit_version', 'get_git_hash', 'import_modules_from_strings', 'jit', 'skip_no_elena', 'assert_dict_contains_subset', 'assert_attrs_equal', 'assert_dict_has_keys', 'assert_keys_equal', 'assert_is_norm_layer', 'assert_params_all_zeros', 'check_python_script', 'is_method_overridden', 'is_jit_tracing', 'is_rocm_pytorch', '_get_cuda_home', 'has_method' ]
trt-samples-for-hackathon-cn-master
Hackathon2023/controlnet/annotator/uniformer/mmcv/utils/__init__.py
# Copyright (c) OpenMMLab. All rights reserved. import importlib import os import pkgutil import warnings from collections import namedtuple import torch if torch.__version__ != 'parrots': def load_ext(name, funcs): ext = importlib.import_module('mmcv.' + name) for fun in funcs: assert hasattr(ext, fun), f'{fun} miss in module {name}' return ext else: from parrots import extension from parrots.base import ParrotsException has_return_value_ops = [ 'nms', 'softnms', 'nms_match', 'nms_rotated', 'top_pool_forward', 'top_pool_backward', 'bottom_pool_forward', 'bottom_pool_backward', 'left_pool_forward', 'left_pool_backward', 'right_pool_forward', 'right_pool_backward', 'fused_bias_leakyrelu', 'upfirdn2d', 'ms_deform_attn_forward', 'pixel_group', 'contour_expand', ] def get_fake_func(name, e): def fake_func(*args, **kwargs): warnings.warn(f'{name} is not supported in parrots now') raise e return fake_func def load_ext(name, funcs): ExtModule = namedtuple('ExtModule', funcs) ext_list = [] lib_root = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) for fun in funcs: try: ext_fun = extension.load(fun, name, lib_dir=lib_root) except ParrotsException as e: if 'No element registered' not in e.message: warnings.warn(e.message) ext_fun = get_fake_func(fun, e) ext_list.append(ext_fun) else: if fun in has_return_value_ops: ext_list.append(ext_fun.op) else: ext_list.append(ext_fun.op_) return ExtModule(*ext_list) def check_ops_exist(): ext_loader = pkgutil.find_loader('mmcv._ext') return ext_loader is not None
trt-samples-for-hackathon-cn-master
Hackathon2023/controlnet/annotator/uniformer/mmcv/utils/ext_loader.py
# Copyright (c) OpenMMLab. All rights reserved. import os from .parrots_wrapper import TORCH_VERSION parrots_jit_option = os.getenv('PARROTS_JIT_OPTION') if TORCH_VERSION == 'parrots' and parrots_jit_option == 'ON': from parrots.jit import pat as jit else: def jit(func=None, check_input=None, full_shape=True, derivate=False, coderize=False, optimize=False): def wrapper(func): def wrapper_inner(*args, **kargs): return func(*args, **kargs) return wrapper_inner if func is None: return wrapper else: return func if TORCH_VERSION == 'parrots': from parrots.utils.tester import skip_no_elena else: def skip_no_elena(func): def wrapper(*args, **kargs): return func(*args, **kargs) return wrapper
trt-samples-for-hackathon-cn-master
Hackathon2023/controlnet/annotator/uniformer/mmcv/utils/parrots_jit.py
# Copyright (c) OpenMMLab. All rights reserved. import sys from collections.abc import Iterable from multiprocessing import Pool from shutil import get_terminal_size from .timer import Timer class ProgressBar: """A progress bar which can print the progress.""" def __init__(self, task_num=0, bar_width=50, start=True, file=sys.stdout): self.task_num = task_num self.bar_width = bar_width self.completed = 0 self.file = file if start: self.start() @property def terminal_width(self): width, _ = get_terminal_size() return width def start(self): if self.task_num > 0: self.file.write(f'[{" " * self.bar_width}] 0/{self.task_num}, ' 'elapsed: 0s, ETA:') else: self.file.write('completed: 0, elapsed: 0s') self.file.flush() self.timer = Timer() def update(self, num_tasks=1): assert num_tasks > 0 self.completed += num_tasks elapsed = self.timer.since_start() if elapsed > 0: fps = self.completed / elapsed else: fps = float('inf') if self.task_num > 0: percentage = self.completed / float(self.task_num) eta = int(elapsed * (1 - percentage) / percentage + 0.5) msg = f'\r[{{}}] {self.completed}/{self.task_num}, ' \ f'{fps:.1f} task/s, elapsed: {int(elapsed + 0.5)}s, ' \ f'ETA: {eta:5}s' bar_width = min(self.bar_width, int(self.terminal_width - len(msg)) + 2, int(self.terminal_width * 0.6)) bar_width = max(2, bar_width) mark_width = int(bar_width * percentage) bar_chars = '>' * mark_width + ' ' * (bar_width - mark_width) self.file.write(msg.format(bar_chars)) else: self.file.write( f'completed: {self.completed}, elapsed: {int(elapsed + 0.5)}s,' f' {fps:.1f} tasks/s') self.file.flush() def track_progress(func, tasks, bar_width=50, file=sys.stdout, **kwargs): """Track the progress of tasks execution with a progress bar. Tasks are done with a simple for-loop. Args: func (callable): The function to be applied to each task. tasks (list or tuple[Iterable, int]): A list of tasks or (tasks, total num). bar_width (int): Width of progress bar. Returns: list: The task results. """ if isinstance(tasks, tuple): assert len(tasks) == 2 assert isinstance(tasks[0], Iterable) assert isinstance(tasks[1], int) task_num = tasks[1] tasks = tasks[0] elif isinstance(tasks, Iterable): task_num = len(tasks) else: raise TypeError( '"tasks" must be an iterable object or a (iterator, int) tuple') prog_bar = ProgressBar(task_num, bar_width, file=file) results = [] for task in tasks: results.append(func(task, **kwargs)) prog_bar.update() prog_bar.file.write('\n') return results def init_pool(process_num, initializer=None, initargs=None): if initializer is None: return Pool(process_num) elif initargs is None: return Pool(process_num, initializer) else: if not isinstance(initargs, tuple): raise TypeError('"initargs" must be a tuple') return Pool(process_num, initializer, initargs) def track_parallel_progress(func, tasks, nproc, initializer=None, initargs=None, bar_width=50, chunksize=1, skip_first=False, keep_order=True, file=sys.stdout): """Track the progress of parallel task execution with a progress bar. The built-in :mod:`multiprocessing` module is used for process pools and tasks are done with :func:`Pool.map` or :func:`Pool.imap_unordered`. Args: func (callable): The function to be applied to each task. tasks (list or tuple[Iterable, int]): A list of tasks or (tasks, total num). nproc (int): Process (worker) number. initializer (None or callable): Refer to :class:`multiprocessing.Pool` for details. initargs (None or tuple): Refer to :class:`multiprocessing.Pool` for details. chunksize (int): Refer to :class:`multiprocessing.Pool` for details. bar_width (int): Width of progress bar. skip_first (bool): Whether to skip the first sample for each worker when estimating fps, since the initialization step may takes longer. keep_order (bool): If True, :func:`Pool.imap` is used, otherwise :func:`Pool.imap_unordered` is used. Returns: list: The task results. """ if isinstance(tasks, tuple): assert len(tasks) == 2 assert isinstance(tasks[0], Iterable) assert isinstance(tasks[1], int) task_num = tasks[1] tasks = tasks[0] elif isinstance(tasks, Iterable): task_num = len(tasks) else: raise TypeError( '"tasks" must be an iterable object or a (iterator, int) tuple') pool = init_pool(nproc, initializer, initargs) start = not skip_first task_num -= nproc * chunksize * int(skip_first) prog_bar = ProgressBar(task_num, bar_width, start, file=file) results = [] if keep_order: gen = pool.imap(func, tasks, chunksize) else: gen = pool.imap_unordered(func, tasks, chunksize) for result in gen: results.append(result) if skip_first: if len(results) < nproc * chunksize: continue elif len(results) == nproc * chunksize: prog_bar.start() continue prog_bar.update() prog_bar.file.write('\n') pool.close() pool.join() return results def track_iter_progress(tasks, bar_width=50, file=sys.stdout): """Track the progress of tasks iteration or enumeration with a progress bar. Tasks are yielded with a simple for-loop. Args: tasks (list or tuple[Iterable, int]): A list of tasks or (tasks, total num). bar_width (int): Width of progress bar. Yields: list: The task results. """ if isinstance(tasks, tuple): assert len(tasks) == 2 assert isinstance(tasks[0], Iterable) assert isinstance(tasks[1], int) task_num = tasks[1] tasks = tasks[0] elif isinstance(tasks, Iterable): task_num = len(tasks) else: raise TypeError( '"tasks" must be an iterable object or a (iterator, int) tuple') prog_bar = ProgressBar(task_num, bar_width, file=file) for task in tasks: yield task prog_bar.update() prog_bar.file.write('\n')
trt-samples-for-hackathon-cn-master
Hackathon2023/controlnet/annotator/uniformer/mmcv/utils/progressbar.py
# Copyright (c) Open-MMLab. import sys from collections.abc import Iterable from runpy import run_path from shlex import split from typing import Any, Dict, List from unittest.mock import patch def check_python_script(cmd): """Run the python cmd script with `__main__`. The difference between `os.system` is that, this function exectues code in the current process, so that it can be tracked by coverage tools. Currently it supports two forms: - ./tests/data/scripts/hello.py zz - python tests/data/scripts/hello.py zz """ args = split(cmd) if args[0] == 'python': args = args[1:] with patch.object(sys, 'argv', args): run_path(args[0], run_name='__main__') def _any(judge_result): """Since built-in ``any`` works only when the element of iterable is not iterable, implement the function.""" if not isinstance(judge_result, Iterable): return judge_result try: for element in judge_result: if _any(element): return True except TypeError: # Maybe encounter the case: torch.tensor(True) | torch.tensor(False) if judge_result: return True return False def assert_dict_contains_subset(dict_obj: Dict[Any, Any], expected_subset: Dict[Any, Any]) -> bool: """Check if the dict_obj contains the expected_subset. Args: dict_obj (Dict[Any, Any]): Dict object to be checked. expected_subset (Dict[Any, Any]): Subset expected to be contained in dict_obj. Returns: bool: Whether the dict_obj contains the expected_subset. """ for key, value in expected_subset.items(): if key not in dict_obj.keys() or _any(dict_obj[key] != value): return False return True def assert_attrs_equal(obj: Any, expected_attrs: Dict[str, Any]) -> bool: """Check if attribute of class object is correct. Args: obj (object): Class object to be checked. expected_attrs (Dict[str, Any]): Dict of the expected attrs. Returns: bool: Whether the attribute of class object is correct. """ for attr, value in expected_attrs.items(): if not hasattr(obj, attr) or _any(getattr(obj, attr) != value): return False return True def assert_dict_has_keys(obj: Dict[str, Any], expected_keys: List[str]) -> bool: """Check if the obj has all the expected_keys. Args: obj (Dict[str, Any]): Object to be checked. expected_keys (List[str]): Keys expected to contained in the keys of the obj. Returns: bool: Whether the obj has the expected keys. """ return set(expected_keys).issubset(set(obj.keys())) def assert_keys_equal(result_keys: List[str], target_keys: List[str]) -> bool: """Check if target_keys is equal to result_keys. Args: result_keys (List[str]): Result keys to be checked. target_keys (List[str]): Target keys to be checked. Returns: bool: Whether target_keys is equal to result_keys. """ return set(result_keys) == set(target_keys) def assert_is_norm_layer(module) -> bool: """Check if the module is a norm layer. Args: module (nn.Module): The module to be checked. Returns: bool: Whether the module is a norm layer. """ from .parrots_wrapper import _BatchNorm, _InstanceNorm from torch.nn import GroupNorm, LayerNorm norm_layer_candidates = (_BatchNorm, _InstanceNorm, GroupNorm, LayerNorm) return isinstance(module, norm_layer_candidates) def assert_params_all_zeros(module) -> bool: """Check if the parameters of the module is all zeros. Args: module (nn.Module): The module to be checked. Returns: bool: Whether the parameters of the module is all zeros. """ weight_data = module.weight.data is_weight_zero = weight_data.allclose( weight_data.new_zeros(weight_data.size())) if hasattr(module, 'bias') and module.bias is not None: bias_data = module.bias.data is_bias_zero = bias_data.allclose( bias_data.new_zeros(bias_data.size())) else: is_bias_zero = True return is_weight_zero and is_bias_zero
trt-samples-for-hackathon-cn-master
Hackathon2023/controlnet/annotator/uniformer/mmcv/utils/testing.py
# Copyright (c) OpenMMLab. All rights reserved. import os import os.path as osp from pathlib import Path from .misc import is_str def is_filepath(x): return is_str(x) or isinstance(x, Path) def fopen(filepath, *args, **kwargs): if is_str(filepath): return open(filepath, *args, **kwargs) elif isinstance(filepath, Path): return filepath.open(*args, **kwargs) raise ValueError('`filepath` should be a string or a Path') def check_file_exist(filename, msg_tmpl='file "{}" does not exist'): if not osp.isfile(filename): raise FileNotFoundError(msg_tmpl.format(filename)) def mkdir_or_exist(dir_name, mode=0o777): if dir_name == '': return dir_name = osp.expanduser(dir_name) os.makedirs(dir_name, mode=mode, exist_ok=True) def symlink(src, dst, overwrite=True, **kwargs): if os.path.lexists(dst) and overwrite: os.remove(dst) os.symlink(src, dst, **kwargs) def scandir(dir_path, suffix=None, recursive=False, case_sensitive=True): """Scan a directory to find the interested files. Args: dir_path (str | obj:`Path`): Path of the directory. suffix (str | tuple(str), optional): File suffix that we are interested in. Default: None. recursive (bool, optional): If set to True, recursively scan the directory. Default: False. case_sensitive (bool, optional) : If set to False, ignore the case of suffix. Default: True. Returns: A generator for all the interested files with relative paths. """ if isinstance(dir_path, (str, Path)): dir_path = str(dir_path) else: raise TypeError('"dir_path" must be a string or Path object') if (suffix is not None) and not isinstance(suffix, (str, tuple)): raise TypeError('"suffix" must be a string or tuple of strings') if suffix is not None and not case_sensitive: suffix = suffix.lower() if isinstance(suffix, str) else tuple( item.lower() for item in suffix) root = dir_path def _scandir(dir_path, suffix, recursive, case_sensitive): for entry in os.scandir(dir_path): if not entry.name.startswith('.') and entry.is_file(): rel_path = osp.relpath(entry.path, root) _rel_path = rel_path if case_sensitive else rel_path.lower() if suffix is None or _rel_path.endswith(suffix): yield rel_path elif recursive and os.path.isdir(entry.path): # scan recursively if entry.path is a directory yield from _scandir(entry.path, suffix, recursive, case_sensitive) return _scandir(dir_path, suffix, recursive, case_sensitive) def find_vcs_root(path, markers=('.git', )): """Finds the root directory (including itself) of specified markers. Args: path (str): Path of directory or file. markers (list[str], optional): List of file or directory names. Returns: The directory contained one of the markers or None if not found. """ if osp.isfile(path): path = osp.dirname(path) prev, cur = None, osp.abspath(osp.expanduser(path)) while cur != prev: if any(osp.exists(osp.join(cur, marker)) for marker in markers): return cur prev, cur = cur, osp.split(cur)[0] return None
trt-samples-for-hackathon-cn-master
Hackathon2023/controlnet/annotator/uniformer/mmcv/utils/path.py
# Copyright (c) OpenMMLab. All rights reserved. import numpy as np import annotator.uniformer.mmcv as mmcv try: import torch except ImportError: torch = None def tensor2imgs(tensor, mean=(0, 0, 0), std=(1, 1, 1), to_rgb=True): """Convert tensor to 3-channel images. Args: tensor (torch.Tensor): Tensor that contains multiple images, shape ( N, C, H, W). mean (tuple[float], optional): Mean of images. Defaults to (0, 0, 0). std (tuple[float], optional): Standard deviation of images. Defaults to (1, 1, 1). to_rgb (bool, optional): Whether the tensor was converted to RGB format in the first place. If so, convert it back to BGR. Defaults to True. Returns: list[np.ndarray]: A list that contains multiple images. """ if torch is None: raise RuntimeError('pytorch is not installed') assert torch.is_tensor(tensor) and tensor.ndim == 4 assert len(mean) == 3 assert len(std) == 3 num_imgs = tensor.size(0) mean = np.array(mean, dtype=np.float32) std = np.array(std, dtype=np.float32) imgs = [] for img_id in range(num_imgs): img = tensor[img_id, ...].cpu().numpy().transpose(1, 2, 0) img = mmcv.imdenormalize( img, mean, std, to_bgr=to_rgb).astype(np.uint8) imgs.append(np.ascontiguousarray(img)) return imgs
trt-samples-for-hackathon-cn-master
Hackathon2023/controlnet/annotator/uniformer/mmcv/image/misc.py
# Copyright (c) OpenMMLab. All rights reserved. import numbers import cv2 import numpy as np from ..utils import to_2tuple from .io import imread_backend try: from PIL import Image except ImportError: Image = None def _scale_size(size, scale): """Rescale a size by a ratio. Args: size (tuple[int]): (w, h). scale (float | tuple(float)): Scaling factor. Returns: tuple[int]: scaled size. """ if isinstance(scale, (float, int)): scale = (scale, scale) w, h = size return int(w * float(scale[0]) + 0.5), int(h * float(scale[1]) + 0.5) cv2_interp_codes = { 'nearest': cv2.INTER_NEAREST, 'bilinear': cv2.INTER_LINEAR, 'bicubic': cv2.INTER_CUBIC, 'area': cv2.INTER_AREA, 'lanczos': cv2.INTER_LANCZOS4 } if Image is not None: pillow_interp_codes = { 'nearest': Image.NEAREST, 'bilinear': Image.BILINEAR, 'bicubic': Image.BICUBIC, 'box': Image.BOX, 'lanczos': Image.LANCZOS, 'hamming': Image.HAMMING } def imresize(img, size, return_scale=False, interpolation='bilinear', out=None, backend=None): """Resize image to a given size. Args: img (ndarray): The input image. size (tuple[int]): Target size (w, h). return_scale (bool): Whether to return `w_scale` and `h_scale`. interpolation (str): Interpolation method, accepted values are "nearest", "bilinear", "bicubic", "area", "lanczos" for 'cv2' backend, "nearest", "bilinear" for 'pillow' backend. out (ndarray): The output destination. backend (str | None): The image resize backend type. Options are `cv2`, `pillow`, `None`. If backend is None, the global imread_backend specified by ``mmcv.use_backend()`` will be used. Default: None. Returns: tuple | ndarray: (`resized_img`, `w_scale`, `h_scale`) or `resized_img`. """ h, w = img.shape[:2] if backend is None: backend = imread_backend if backend not in ['cv2', 'pillow']: raise ValueError(f'backend: {backend} is not supported for resize.' f"Supported backends are 'cv2', 'pillow'") if backend == 'pillow': assert img.dtype == np.uint8, 'Pillow backend only support uint8 type' pil_image = Image.fromarray(img) pil_image = pil_image.resize(size, pillow_interp_codes[interpolation]) resized_img = np.array(pil_image) else: resized_img = cv2.resize( img, size, dst=out, interpolation=cv2_interp_codes[interpolation]) if not return_scale: return resized_img else: w_scale = size[0] / w h_scale = size[1] / h return resized_img, w_scale, h_scale def imresize_to_multiple(img, divisor, size=None, scale_factor=None, keep_ratio=False, return_scale=False, interpolation='bilinear', out=None, backend=None): """Resize image according to a given size or scale factor and then rounds up the the resized or rescaled image size to the nearest value that can be divided by the divisor. Args: img (ndarray): The input image. divisor (int | tuple): Resized image size will be a multiple of divisor. If divisor is a tuple, divisor should be (w_divisor, h_divisor). size (None | int | tuple[int]): Target size (w, h). Default: None. scale_factor (None | float | tuple[float]): Multiplier for spatial size. Should match input size if it is a tuple and the 2D style is (w_scale_factor, h_scale_factor). Default: None. keep_ratio (bool): Whether to keep the aspect ratio when resizing the image. Default: False. return_scale (bool): Whether to return `w_scale` and `h_scale`. interpolation (str): Interpolation method, accepted values are "nearest", "bilinear", "bicubic", "area", "lanczos" for 'cv2' backend, "nearest", "bilinear" for 'pillow' backend. out (ndarray): The output destination. backend (str | None): The image resize backend type. Options are `cv2`, `pillow`, `None`. If backend is None, the global imread_backend specified by ``mmcv.use_backend()`` will be used. Default: None. Returns: tuple | ndarray: (`resized_img`, `w_scale`, `h_scale`) or `resized_img`. """ h, w = img.shape[:2] if size is not None and scale_factor is not None: raise ValueError('only one of size or scale_factor should be defined') elif size is None and scale_factor is None: raise ValueError('one of size or scale_factor should be defined') elif size is not None: size = to_2tuple(size) if keep_ratio: size = rescale_size((w, h), size, return_scale=False) else: size = _scale_size((w, h), scale_factor) divisor = to_2tuple(divisor) size = tuple([int(np.ceil(s / d)) * d for s, d in zip(size, divisor)]) resized_img, w_scale, h_scale = imresize( img, size, return_scale=True, interpolation=interpolation, out=out, backend=backend) if return_scale: return resized_img, w_scale, h_scale else: return resized_img def imresize_like(img, dst_img, return_scale=False, interpolation='bilinear', backend=None): """Resize image to the same size of a given image. Args: img (ndarray): The input image. dst_img (ndarray): The target image. return_scale (bool): Whether to return `w_scale` and `h_scale`. interpolation (str): Same as :func:`resize`. backend (str | None): Same as :func:`resize`. Returns: tuple or ndarray: (`resized_img`, `w_scale`, `h_scale`) or `resized_img`. """ h, w = dst_img.shape[:2] return imresize(img, (w, h), return_scale, interpolation, backend=backend) def rescale_size(old_size, scale, return_scale=False): """Calculate the new size to be rescaled to. Args: old_size (tuple[int]): The old size (w, h) of image. scale (float | tuple[int]): The scaling factor or maximum size. If it is a float number, then the image will be rescaled by this factor, else if it is a tuple of 2 integers, then the image will be rescaled as large as possible within the scale. return_scale (bool): Whether to return the scaling factor besides the rescaled image size. Returns: tuple[int]: The new rescaled image size. """ w, h = old_size if isinstance(scale, (float, int)): if scale <= 0: raise ValueError(f'Invalid scale {scale}, must be positive.') scale_factor = scale elif isinstance(scale, tuple): max_long_edge = max(scale) max_short_edge = min(scale) scale_factor = min(max_long_edge / max(h, w), max_short_edge / min(h, w)) else: raise TypeError( f'Scale must be a number or tuple of int, but got {type(scale)}') new_size = _scale_size((w, h), scale_factor) if return_scale: return new_size, scale_factor else: return new_size def imrescale(img, scale, return_scale=False, interpolation='bilinear', backend=None): """Resize image while keeping the aspect ratio. Args: img (ndarray): The input image. scale (float | tuple[int]): The scaling factor or maximum size. If it is a float number, then the image will be rescaled by this factor, else if it is a tuple of 2 integers, then the image will be rescaled as large as possible within the scale. return_scale (bool): Whether to return the scaling factor besides the rescaled image. interpolation (str): Same as :func:`resize`. backend (str | None): Same as :func:`resize`. Returns: ndarray: The rescaled image. """ h, w = img.shape[:2] new_size, scale_factor = rescale_size((w, h), scale, return_scale=True) rescaled_img = imresize( img, new_size, interpolation=interpolation, backend=backend) if return_scale: return rescaled_img, scale_factor else: return rescaled_img def imflip(img, direction='horizontal'): """Flip an image horizontally or vertically. Args: img (ndarray): Image to be flipped. direction (str): The flip direction, either "horizontal" or "vertical" or "diagonal". Returns: ndarray: The flipped image. """ assert direction in ['horizontal', 'vertical', 'diagonal'] if direction == 'horizontal': return np.flip(img, axis=1) elif direction == 'vertical': return np.flip(img, axis=0) else: return np.flip(img, axis=(0, 1)) def imflip_(img, direction='horizontal'): """Inplace flip an image horizontally or vertically. Args: img (ndarray): Image to be flipped. direction (str): The flip direction, either "horizontal" or "vertical" or "diagonal". Returns: ndarray: The flipped image (inplace). """ assert direction in ['horizontal', 'vertical', 'diagonal'] if direction == 'horizontal': return cv2.flip(img, 1, img) elif direction == 'vertical': return cv2.flip(img, 0, img) else: return cv2.flip(img, -1, img) def imrotate(img, angle, center=None, scale=1.0, border_value=0, interpolation='bilinear', auto_bound=False): """Rotate an image. Args: img (ndarray): Image to be rotated. angle (float): Rotation angle in degrees, positive values mean clockwise rotation. center (tuple[float], optional): Center point (w, h) of the rotation in the source image. If not specified, the center of the image will be used. scale (float): Isotropic scale factor. border_value (int): Border value. interpolation (str): Same as :func:`resize`. auto_bound (bool): Whether to adjust the image size to cover the whole rotated image. Returns: ndarray: The rotated image. """ if center is not None and auto_bound: raise ValueError('`auto_bound` conflicts with `center`') h, w = img.shape[:2] if center is None: center = ((w - 1) * 0.5, (h - 1) * 0.5) assert isinstance(center, tuple) matrix = cv2.getRotationMatrix2D(center, -angle, scale) if auto_bound: cos = np.abs(matrix[0, 0]) sin = np.abs(matrix[0, 1]) new_w = h * sin + w * cos new_h = h * cos + w * sin matrix[0, 2] += (new_w - w) * 0.5 matrix[1, 2] += (new_h - h) * 0.5 w = int(np.round(new_w)) h = int(np.round(new_h)) rotated = cv2.warpAffine( img, matrix, (w, h), flags=cv2_interp_codes[interpolation], borderValue=border_value) return rotated def bbox_clip(bboxes, img_shape): """Clip bboxes to fit the image shape. Args: bboxes (ndarray): Shape (..., 4*k) img_shape (tuple[int]): (height, width) of the image. Returns: ndarray: Clipped bboxes. """ assert bboxes.shape[-1] % 4 == 0 cmin = np.empty(bboxes.shape[-1], dtype=bboxes.dtype) cmin[0::2] = img_shape[1] - 1 cmin[1::2] = img_shape[0] - 1 clipped_bboxes = np.maximum(np.minimum(bboxes, cmin), 0) return clipped_bboxes def bbox_scaling(bboxes, scale, clip_shape=None): """Scaling bboxes w.r.t the box center. Args: bboxes (ndarray): Shape(..., 4). scale (float): Scaling factor. clip_shape (tuple[int], optional): If specified, bboxes that exceed the boundary will be clipped according to the given shape (h, w). Returns: ndarray: Scaled bboxes. """ if float(scale) == 1.0: scaled_bboxes = bboxes.copy() else: w = bboxes[..., 2] - bboxes[..., 0] + 1 h = bboxes[..., 3] - bboxes[..., 1] + 1 dw = (w * (scale - 1)) * 0.5 dh = (h * (scale - 1)) * 0.5 scaled_bboxes = bboxes + np.stack((-dw, -dh, dw, dh), axis=-1) if clip_shape is not None: return bbox_clip(scaled_bboxes, clip_shape) else: return scaled_bboxes def imcrop(img, bboxes, scale=1.0, pad_fill=None): """Crop image patches. 3 steps: scale the bboxes -> clip bboxes -> crop and pad. Args: img (ndarray): Image to be cropped. bboxes (ndarray): Shape (k, 4) or (4, ), location of cropped bboxes. scale (float, optional): Scale ratio of bboxes, the default value 1.0 means no padding. pad_fill (Number | list[Number]): Value to be filled for padding. Default: None, which means no padding. Returns: list[ndarray] | ndarray: The cropped image patches. """ chn = 1 if img.ndim == 2 else img.shape[2] if pad_fill is not None: if isinstance(pad_fill, (int, float)): pad_fill = [pad_fill for _ in range(chn)] assert len(pad_fill) == chn _bboxes = bboxes[None, ...] if bboxes.ndim == 1 else bboxes scaled_bboxes = bbox_scaling(_bboxes, scale).astype(np.int32) clipped_bbox = bbox_clip(scaled_bboxes, img.shape) patches = [] for i in range(clipped_bbox.shape[0]): x1, y1, x2, y2 = tuple(clipped_bbox[i, :]) if pad_fill is None: patch = img[y1:y2 + 1, x1:x2 + 1, ...] else: _x1, _y1, _x2, _y2 = tuple(scaled_bboxes[i, :]) if chn == 1: patch_shape = (_y2 - _y1 + 1, _x2 - _x1 + 1) else: patch_shape = (_y2 - _y1 + 1, _x2 - _x1 + 1, chn) patch = np.array( pad_fill, dtype=img.dtype) * np.ones( patch_shape, dtype=img.dtype) x_start = 0 if _x1 >= 0 else -_x1 y_start = 0 if _y1 >= 0 else -_y1 w = x2 - x1 + 1 h = y2 - y1 + 1 patch[y_start:y_start + h, x_start:x_start + w, ...] = img[y1:y1 + h, x1:x1 + w, ...] patches.append(patch) if bboxes.ndim == 1: return patches[0] else: return patches def impad(img, *, shape=None, padding=None, pad_val=0, padding_mode='constant'): """Pad the given image to a certain shape or pad on all sides with specified padding mode and padding value. Args: img (ndarray): Image to be padded. shape (tuple[int]): Expected padding shape (h, w). Default: None. padding (int or tuple[int]): Padding on each border. If a single int is provided this is used to pad all borders. If tuple of length 2 is provided this is the padding on left/right and top/bottom respectively. If a tuple of length 4 is provided this is the padding for the left, top, right and bottom borders respectively. Default: None. Note that `shape` and `padding` can not be both set. pad_val (Number | Sequence[Number]): Values to be filled in padding areas when padding_mode is 'constant'. Default: 0. padding_mode (str): Type of padding. Should be: constant, edge, reflect or symmetric. Default: constant. - constant: pads with a constant value, this value is specified with pad_val. - edge: pads with the last value at the edge of the image. - reflect: pads with reflection of image without repeating the last value on the edge. For example, padding [1, 2, 3, 4] with 2 elements on both sides in reflect mode will result in [3, 2, 1, 2, 3, 4, 3, 2]. - symmetric: pads with reflection of image repeating the last value on the edge. For example, padding [1, 2, 3, 4] with 2 elements on both sides in symmetric mode will result in [2, 1, 1, 2, 3, 4, 4, 3] Returns: ndarray: The padded image. """ assert (shape is not None) ^ (padding is not None) if shape is not None: padding = (0, 0, shape[1] - img.shape[1], shape[0] - img.shape[0]) # check pad_val if isinstance(pad_val, tuple): assert len(pad_val) == img.shape[-1] elif not isinstance(pad_val, numbers.Number): raise TypeError('pad_val must be a int or a tuple. ' f'But received {type(pad_val)}') # check padding if isinstance(padding, tuple) and len(padding) in [2, 4]: if len(padding) == 2: padding = (padding[0], padding[1], padding[0], padding[1]) elif isinstance(padding, numbers.Number): padding = (padding, padding, padding, padding) else: raise ValueError('Padding must be a int or a 2, or 4 element tuple.' f'But received {padding}') # check padding mode assert padding_mode in ['constant', 'edge', 'reflect', 'symmetric'] border_type = { 'constant': cv2.BORDER_CONSTANT, 'edge': cv2.BORDER_REPLICATE, 'reflect': cv2.BORDER_REFLECT_101, 'symmetric': cv2.BORDER_REFLECT } img = cv2.copyMakeBorder( img, padding[1], padding[3], padding[0], padding[2], border_type[padding_mode], value=pad_val) return img def impad_to_multiple(img, divisor, pad_val=0): """Pad an image to ensure each edge to be multiple to some number. Args: img (ndarray): Image to be padded. divisor (int): Padded image edges will be multiple to divisor. pad_val (Number | Sequence[Number]): Same as :func:`impad`. Returns: ndarray: The padded image. """ pad_h = int(np.ceil(img.shape[0] / divisor)) * divisor pad_w = int(np.ceil(img.shape[1] / divisor)) * divisor return impad(img, shape=(pad_h, pad_w), pad_val=pad_val) def cutout(img, shape, pad_val=0): """Randomly cut out a rectangle from the original img. Args: img (ndarray): Image to be cutout. shape (int | tuple[int]): Expected cutout shape (h, w). If given as a int, the value will be used for both h and w. pad_val (int | float | tuple[int | float]): Values to be filled in the cut area. Defaults to 0. Returns: ndarray: The cutout image. """ channels = 1 if img.ndim == 2 else img.shape[2] if isinstance(shape, int): cut_h, cut_w = shape, shape else: assert isinstance(shape, tuple) and len(shape) == 2, \ f'shape must be a int or a tuple with length 2, but got type ' \ f'{type(shape)} instead.' cut_h, cut_w = shape if isinstance(pad_val, (int, float)): pad_val = tuple([pad_val] * channels) elif isinstance(pad_val, tuple): assert len(pad_val) == channels, \ 'Expected the num of elements in tuple equals the channels' \ 'of input image. Found {} vs {}'.format( len(pad_val), channels) else: raise TypeError(f'Invalid type {type(pad_val)} for `pad_val`') img_h, img_w = img.shape[:2] y0 = np.random.uniform(img_h) x0 = np.random.uniform(img_w) y1 = int(max(0, y0 - cut_h / 2.)) x1 = int(max(0, x0 - cut_w / 2.)) y2 = min(img_h, y1 + cut_h) x2 = min(img_w, x1 + cut_w) if img.ndim == 2: patch_shape = (y2 - y1, x2 - x1) else: patch_shape = (y2 - y1, x2 - x1, channels) img_cutout = img.copy() patch = np.array( pad_val, dtype=img.dtype) * np.ones( patch_shape, dtype=img.dtype) img_cutout[y1:y2, x1:x2, ...] = patch return img_cutout def _get_shear_matrix(magnitude, direction='horizontal'): """Generate the shear matrix for transformation. Args: magnitude (int | float): The magnitude used for shear. direction (str): The flip direction, either "horizontal" or "vertical". Returns: ndarray: The shear matrix with dtype float32. """ if direction == 'horizontal': shear_matrix = np.float32([[1, magnitude, 0], [0, 1, 0]]) elif direction == 'vertical': shear_matrix = np.float32([[1, 0, 0], [magnitude, 1, 0]]) return shear_matrix def imshear(img, magnitude, direction='horizontal', border_value=0, interpolation='bilinear'): """Shear an image. Args: img (ndarray): Image to be sheared with format (h, w) or (h, w, c). magnitude (int | float): The magnitude used for shear. direction (str): The flip direction, either "horizontal" or "vertical". border_value (int | tuple[int]): Value used in case of a constant border. interpolation (str): Same as :func:`resize`. Returns: ndarray: The sheared image. """ assert direction in ['horizontal', 'vertical'], f'Invalid direction: {direction}' height, width = img.shape[:2] if img.ndim == 2: channels = 1 elif img.ndim == 3: channels = img.shape[-1] if isinstance(border_value, int): border_value = tuple([border_value] * channels) elif isinstance(border_value, tuple): assert len(border_value) == channels, \ 'Expected the num of elements in tuple equals the channels' \ 'of input image. Found {} vs {}'.format( len(border_value), channels) else: raise ValueError( f'Invalid type {type(border_value)} for `border_value`') shear_matrix = _get_shear_matrix(magnitude, direction) sheared = cv2.warpAffine( img, shear_matrix, (width, height), # Note case when the number elements in `border_value` # greater than 3 (e.g. shearing masks whose channels large # than 3) will raise TypeError in `cv2.warpAffine`. # Here simply slice the first 3 values in `border_value`. borderValue=border_value[:3], flags=cv2_interp_codes[interpolation]) return sheared def _get_translate_matrix(offset, direction='horizontal'): """Generate the translate matrix. Args: offset (int | float): The offset used for translate. direction (str): The translate direction, either "horizontal" or "vertical". Returns: ndarray: The translate matrix with dtype float32. """ if direction == 'horizontal': translate_matrix = np.float32([[1, 0, offset], [0, 1, 0]]) elif direction == 'vertical': translate_matrix = np.float32([[1, 0, 0], [0, 1, offset]]) return translate_matrix def imtranslate(img, offset, direction='horizontal', border_value=0, interpolation='bilinear'): """Translate an image. Args: img (ndarray): Image to be translated with format (h, w) or (h, w, c). offset (int | float): The offset used for translate. direction (str): The translate direction, either "horizontal" or "vertical". border_value (int | tuple[int]): Value used in case of a constant border. interpolation (str): Same as :func:`resize`. Returns: ndarray: The translated image. """ assert direction in ['horizontal', 'vertical'], f'Invalid direction: {direction}' height, width = img.shape[:2] if img.ndim == 2: channels = 1 elif img.ndim == 3: channels = img.shape[-1] if isinstance(border_value, int): border_value = tuple([border_value] * channels) elif isinstance(border_value, tuple): assert len(border_value) == channels, \ 'Expected the num of elements in tuple equals the channels' \ 'of input image. Found {} vs {}'.format( len(border_value), channels) else: raise ValueError( f'Invalid type {type(border_value)} for `border_value`.') translate_matrix = _get_translate_matrix(offset, direction) translated = cv2.warpAffine( img, translate_matrix, (width, height), # Note case when the number elements in `border_value` # greater than 3 (e.g. translating masks whose channels # large than 3) will raise TypeError in `cv2.warpAffine`. # Here simply slice the first 3 values in `border_value`. borderValue=border_value[:3], flags=cv2_interp_codes[interpolation]) return translated
trt-samples-for-hackathon-cn-master
Hackathon2023/controlnet/annotator/uniformer/mmcv/image/geometric.py
# Copyright (c) OpenMMLab. All rights reserved. import cv2 import numpy as np def imconvert(img, src, dst): """Convert an image from the src colorspace to dst colorspace. Args: img (ndarray): The input image. src (str): The source colorspace, e.g., 'rgb', 'hsv'. dst (str): The destination colorspace, e.g., 'rgb', 'hsv'. Returns: ndarray: The converted image. """ code = getattr(cv2, f'COLOR_{src.upper()}2{dst.upper()}') out_img = cv2.cvtColor(img, code) return out_img def bgr2gray(img, keepdim=False): """Convert a BGR image to grayscale image. Args: img (ndarray): The input image. keepdim (bool): If False (by default), then return the grayscale image with 2 dims, otherwise 3 dims. Returns: ndarray: The converted grayscale image. """ out_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) if keepdim: out_img = out_img[..., None] return out_img def rgb2gray(img, keepdim=False): """Convert a RGB image to grayscale image. Args: img (ndarray): The input image. keepdim (bool): If False (by default), then return the grayscale image with 2 dims, otherwise 3 dims. Returns: ndarray: The converted grayscale image. """ out_img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) if keepdim: out_img = out_img[..., None] return out_img def gray2bgr(img): """Convert a grayscale image to BGR image. Args: img (ndarray): The input image. Returns: ndarray: The converted BGR image. """ img = img[..., None] if img.ndim == 2 else img out_img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) return out_img def gray2rgb(img): """Convert a grayscale image to RGB image. Args: img (ndarray): The input image. Returns: ndarray: The converted RGB image. """ img = img[..., None] if img.ndim == 2 else img out_img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB) return out_img def _convert_input_type_range(img): """Convert the type and range of the input image. It converts the input image to np.float32 type and range of [0, 1]. It is mainly used for pre-processing the input image in colorspace conversion functions such as rgb2ycbcr and ycbcr2rgb. Args: img (ndarray): The input image. It accepts: 1. np.uint8 type with range [0, 255]; 2. np.float32 type with range [0, 1]. Returns: (ndarray): The converted image with type of np.float32 and range of [0, 1]. """ img_type = img.dtype img = img.astype(np.float32) if img_type == np.float32: pass elif img_type == np.uint8: img /= 255. else: raise TypeError('The img type should be np.float32 or np.uint8, ' f'but got {img_type}') return img def _convert_output_type_range(img, dst_type): """Convert the type and range of the image according to dst_type. It converts the image to desired type and range. If `dst_type` is np.uint8, images will be converted to np.uint8 type with range [0, 255]. If `dst_type` is np.float32, it converts the image to np.float32 type with range [0, 1]. It is mainly used for post-processing images in colorspace conversion functions such as rgb2ycbcr and ycbcr2rgb. Args: img (ndarray): The image to be converted with np.float32 type and range [0, 255]. dst_type (np.uint8 | np.float32): If dst_type is np.uint8, it converts the image to np.uint8 type with range [0, 255]. If dst_type is np.float32, it converts the image to np.float32 type with range [0, 1]. Returns: (ndarray): The converted image with desired type and range. """ if dst_type not in (np.uint8, np.float32): raise TypeError('The dst_type should be np.float32 or np.uint8, ' f'but got {dst_type}') if dst_type == np.uint8: img = img.round() else: img /= 255. return img.astype(dst_type) def rgb2ycbcr(img, y_only=False): """Convert a RGB image to YCbCr image. This function produces the same results as Matlab's `rgb2ycbcr` function. It implements the ITU-R BT.601 conversion for standard-definition television. See more details in https://en.wikipedia.org/wiki/YCbCr#ITU-R_BT.601_conversion. It differs from a similar function in cv2.cvtColor: `RGB <-> YCrCb`. In OpenCV, it implements a JPEG conversion. See more details in https://en.wikipedia.org/wiki/YCbCr#JPEG_conversion. Args: img (ndarray): The input image. It accepts: 1. np.uint8 type with range [0, 255]; 2. np.float32 type with range [0, 1]. y_only (bool): Whether to only return Y channel. Default: False. Returns: ndarray: The converted YCbCr image. The output image has the same type and range as input image. """ img_type = img.dtype img = _convert_input_type_range(img) if y_only: out_img = np.dot(img, [65.481, 128.553, 24.966]) + 16.0 else: out_img = np.matmul( img, [[65.481, -37.797, 112.0], [128.553, -74.203, -93.786], [24.966, 112.0, -18.214]]) + [16, 128, 128] out_img = _convert_output_type_range(out_img, img_type) return out_img def bgr2ycbcr(img, y_only=False): """Convert a BGR image to YCbCr image. The bgr version of rgb2ycbcr. It implements the ITU-R BT.601 conversion for standard-definition television. See more details in https://en.wikipedia.org/wiki/YCbCr#ITU-R_BT.601_conversion. It differs from a similar function in cv2.cvtColor: `BGR <-> YCrCb`. In OpenCV, it implements a JPEG conversion. See more details in https://en.wikipedia.org/wiki/YCbCr#JPEG_conversion. Args: img (ndarray): The input image. It accepts: 1. np.uint8 type with range [0, 255]; 2. np.float32 type with range [0, 1]. y_only (bool): Whether to only return Y channel. Default: False. Returns: ndarray: The converted YCbCr image. The output image has the same type and range as input image. """ img_type = img.dtype img = _convert_input_type_range(img) if y_only: out_img = np.dot(img, [24.966, 128.553, 65.481]) + 16.0 else: out_img = np.matmul( img, [[24.966, 112.0, -18.214], [128.553, -74.203, -93.786], [65.481, -37.797, 112.0]]) + [16, 128, 128] out_img = _convert_output_type_range(out_img, img_type) return out_img def ycbcr2rgb(img): """Convert a YCbCr image to RGB image. This function produces the same results as Matlab's ycbcr2rgb function. It implements the ITU-R BT.601 conversion for standard-definition television. See more details in https://en.wikipedia.org/wiki/YCbCr#ITU-R_BT.601_conversion. It differs from a similar function in cv2.cvtColor: `YCrCb <-> RGB`. In OpenCV, it implements a JPEG conversion. See more details in https://en.wikipedia.org/wiki/YCbCr#JPEG_conversion. Args: img (ndarray): The input image. It accepts: 1. np.uint8 type with range [0, 255]; 2. np.float32 type with range [0, 1]. Returns: ndarray: The converted RGB image. The output image has the same type and range as input image. """ img_type = img.dtype img = _convert_input_type_range(img) * 255 out_img = np.matmul(img, [[0.00456621, 0.00456621, 0.00456621], [0, -0.00153632, 0.00791071], [0.00625893, -0.00318811, 0]]) * 255.0 + [ -222.921, 135.576, -276.836 ] out_img = _convert_output_type_range(out_img, img_type) return out_img def ycbcr2bgr(img): """Convert a YCbCr image to BGR image. The bgr version of ycbcr2rgb. It implements the ITU-R BT.601 conversion for standard-definition television. See more details in https://en.wikipedia.org/wiki/YCbCr#ITU-R_BT.601_conversion. It differs from a similar function in cv2.cvtColor: `YCrCb <-> BGR`. In OpenCV, it implements a JPEG conversion. See more details in https://en.wikipedia.org/wiki/YCbCr#JPEG_conversion. Args: img (ndarray): The input image. It accepts: 1. np.uint8 type with range [0, 255]; 2. np.float32 type with range [0, 1]. Returns: ndarray: The converted BGR image. The output image has the same type and range as input image. """ img_type = img.dtype img = _convert_input_type_range(img) * 255 out_img = np.matmul(img, [[0.00456621, 0.00456621, 0.00456621], [0.00791071, -0.00153632, 0], [0, -0.00318811, 0.00625893]]) * 255.0 + [ -276.836, 135.576, -222.921 ] out_img = _convert_output_type_range(out_img, img_type) return out_img def convert_color_factory(src, dst): code = getattr(cv2, f'COLOR_{src.upper()}2{dst.upper()}') def convert_color(img): out_img = cv2.cvtColor(img, code) return out_img convert_color.__doc__ = f"""Convert a {src.upper()} image to {dst.upper()} image. Args: img (ndarray or str): The input image. Returns: ndarray: The converted {dst.upper()} image. """ return convert_color bgr2rgb = convert_color_factory('bgr', 'rgb') rgb2bgr = convert_color_factory('rgb', 'bgr') bgr2hsv = convert_color_factory('bgr', 'hsv') hsv2bgr = convert_color_factory('hsv', 'bgr') bgr2hls = convert_color_factory('bgr', 'hls') hls2bgr = convert_color_factory('hls', 'bgr')
trt-samples-for-hackathon-cn-master
Hackathon2023/controlnet/annotator/uniformer/mmcv/image/colorspace.py
# Copyright (c) OpenMMLab. All rights reserved. import io import os.path as osp from pathlib import Path import cv2 import numpy as np from cv2 import (IMREAD_COLOR, IMREAD_GRAYSCALE, IMREAD_IGNORE_ORIENTATION, IMREAD_UNCHANGED) from annotator.uniformer.mmcv.utils import check_file_exist, is_str, mkdir_or_exist try: from turbojpeg import TJCS_RGB, TJPF_BGR, TJPF_GRAY, TurboJPEG except ImportError: TJCS_RGB = TJPF_GRAY = TJPF_BGR = TurboJPEG = None try: from PIL import Image, ImageOps except ImportError: Image = None try: import tifffile except ImportError: tifffile = None jpeg = None supported_backends = ['cv2', 'turbojpeg', 'pillow', 'tifffile'] imread_flags = { 'color': IMREAD_COLOR, 'grayscale': IMREAD_GRAYSCALE, 'unchanged': IMREAD_UNCHANGED, 'color_ignore_orientation': IMREAD_IGNORE_ORIENTATION | IMREAD_COLOR, 'grayscale_ignore_orientation': IMREAD_IGNORE_ORIENTATION | IMREAD_GRAYSCALE } imread_backend = 'cv2' def use_backend(backend): """Select a backend for image decoding. Args: backend (str): The image decoding backend type. Options are `cv2`, `pillow`, `turbojpeg` (see https://github.com/lilohuang/PyTurboJPEG) and `tifffile`. `turbojpeg` is faster but it only supports `.jpeg` file format. """ assert backend in supported_backends global imread_backend imread_backend = backend if imread_backend == 'turbojpeg': if TurboJPEG is None: raise ImportError('`PyTurboJPEG` is not installed') global jpeg if jpeg is None: jpeg = TurboJPEG() elif imread_backend == 'pillow': if Image is None: raise ImportError('`Pillow` is not installed') elif imread_backend == 'tifffile': if tifffile is None: raise ImportError('`tifffile` is not installed') def _jpegflag(flag='color', channel_order='bgr'): channel_order = channel_order.lower() if channel_order not in ['rgb', 'bgr']: raise ValueError('channel order must be either "rgb" or "bgr"') if flag == 'color': if channel_order == 'bgr': return TJPF_BGR elif channel_order == 'rgb': return TJCS_RGB elif flag == 'grayscale': return TJPF_GRAY else: raise ValueError('flag must be "color" or "grayscale"') def _pillow2array(img, flag='color', channel_order='bgr'): """Convert a pillow image to numpy array. Args: img (:obj:`PIL.Image.Image`): The image loaded using PIL flag (str): Flags specifying the color type of a loaded image, candidates are 'color', 'grayscale' and 'unchanged'. Default to 'color'. channel_order (str): The channel order of the output image array, candidates are 'bgr' and 'rgb'. Default to 'bgr'. Returns: np.ndarray: The converted numpy array """ channel_order = channel_order.lower() if channel_order not in ['rgb', 'bgr']: raise ValueError('channel order must be either "rgb" or "bgr"') if flag == 'unchanged': array = np.array(img) if array.ndim >= 3 and array.shape[2] >= 3: # color image array[:, :, :3] = array[:, :, (2, 1, 0)] # RGB to BGR else: # Handle exif orientation tag if flag in ['color', 'grayscale']: img = ImageOps.exif_transpose(img) # If the image mode is not 'RGB', convert it to 'RGB' first. if img.mode != 'RGB': if img.mode != 'LA': # Most formats except 'LA' can be directly converted to RGB img = img.convert('RGB') else: # When the mode is 'LA', the default conversion will fill in # the canvas with black, which sometimes shadows black objects # in the foreground. # # Therefore, a random color (124, 117, 104) is used for canvas img_rgba = img.convert('RGBA') img = Image.new('RGB', img_rgba.size, (124, 117, 104)) img.paste(img_rgba, mask=img_rgba.split()[3]) # 3 is alpha if flag in ['color', 'color_ignore_orientation']: array = np.array(img) if channel_order != 'rgb': array = array[:, :, ::-1] # RGB to BGR elif flag in ['grayscale', 'grayscale_ignore_orientation']: img = img.convert('L') array = np.array(img) else: raise ValueError( 'flag must be "color", "grayscale", "unchanged", ' f'"color_ignore_orientation" or "grayscale_ignore_orientation"' f' but got {flag}') return array def imread(img_or_path, flag='color', channel_order='bgr', backend=None): """Read an image. Args: img_or_path (ndarray or str or Path): Either a numpy array or str or pathlib.Path. If it is a numpy array (loaded image), then it will be returned as is. flag (str): Flags specifying the color type of a loaded image, candidates are `color`, `grayscale`, `unchanged`, `color_ignore_orientation` and `grayscale_ignore_orientation`. By default, `cv2` and `pillow` backend would rotate the image according to its EXIF info unless called with `unchanged` or `*_ignore_orientation` flags. `turbojpeg` and `tifffile` backend always ignore image's EXIF info regardless of the flag. The `turbojpeg` backend only supports `color` and `grayscale`. channel_order (str): Order of channel, candidates are `bgr` and `rgb`. backend (str | None): The image decoding backend type. Options are `cv2`, `pillow`, `turbojpeg`, `tifffile`, `None`. If backend is None, the global imread_backend specified by ``mmcv.use_backend()`` will be used. Default: None. Returns: ndarray: Loaded image array. """ if backend is None: backend = imread_backend if backend not in supported_backends: raise ValueError(f'backend: {backend} is not supported. Supported ' "backends are 'cv2', 'turbojpeg', 'pillow'") if isinstance(img_or_path, Path): img_or_path = str(img_or_path) if isinstance(img_or_path, np.ndarray): return img_or_path elif is_str(img_or_path): check_file_exist(img_or_path, f'img file does not exist: {img_or_path}') if backend == 'turbojpeg': with open(img_or_path, 'rb') as in_file: img = jpeg.decode(in_file.read(), _jpegflag(flag, channel_order)) if img.shape[-1] == 1: img = img[:, :, 0] return img elif backend == 'pillow': img = Image.open(img_or_path) img = _pillow2array(img, flag, channel_order) return img elif backend == 'tifffile': img = tifffile.imread(img_or_path) return img else: flag = imread_flags[flag] if is_str(flag) else flag img = cv2.imread(img_or_path, flag) if flag == IMREAD_COLOR and channel_order == 'rgb': cv2.cvtColor(img, cv2.COLOR_BGR2RGB, img) return img else: raise TypeError('"img" must be a numpy array or a str or ' 'a pathlib.Path object') def imfrombytes(content, flag='color', channel_order='bgr', backend=None): """Read an image from bytes. Args: content (bytes): Image bytes got from files or other streams. flag (str): Same as :func:`imread`. backend (str | None): The image decoding backend type. Options are `cv2`, `pillow`, `turbojpeg`, `None`. If backend is None, the global imread_backend specified by ``mmcv.use_backend()`` will be used. Default: None. Returns: ndarray: Loaded image array. """ if backend is None: backend = imread_backend if backend not in supported_backends: raise ValueError(f'backend: {backend} is not supported. Supported ' "backends are 'cv2', 'turbojpeg', 'pillow'") if backend == 'turbojpeg': img = jpeg.decode(content, _jpegflag(flag, channel_order)) if img.shape[-1] == 1: img = img[:, :, 0] return img elif backend == 'pillow': buff = io.BytesIO(content) img = Image.open(buff) img = _pillow2array(img, flag, channel_order) return img else: img_np = np.frombuffer(content, np.uint8) flag = imread_flags[flag] if is_str(flag) else flag img = cv2.imdecode(img_np, flag) if flag == IMREAD_COLOR and channel_order == 'rgb': cv2.cvtColor(img, cv2.COLOR_BGR2RGB, img) return img def imwrite(img, file_path, params=None, auto_mkdir=True): """Write image to file. Args: img (ndarray): Image array to be written. file_path (str): Image file path. params (None or list): Same as opencv :func:`imwrite` interface. auto_mkdir (bool): If the parent folder of `file_path` does not exist, whether to create it automatically. Returns: bool: Successful or not. """ if auto_mkdir: dir_name = osp.abspath(osp.dirname(file_path)) mkdir_or_exist(dir_name) return cv2.imwrite(file_path, img, params)
trt-samples-for-hackathon-cn-master
Hackathon2023/controlnet/annotator/uniformer/mmcv/image/io.py
# Copyright (c) OpenMMLab. All rights reserved. from .colorspace import (bgr2gray, bgr2hls, bgr2hsv, bgr2rgb, bgr2ycbcr, gray2bgr, gray2rgb, hls2bgr, hsv2bgr, imconvert, rgb2bgr, rgb2gray, rgb2ycbcr, ycbcr2bgr, ycbcr2rgb) from .geometric import (cutout, imcrop, imflip, imflip_, impad, impad_to_multiple, imrescale, imresize, imresize_like, imresize_to_multiple, imrotate, imshear, imtranslate, rescale_size) from .io import imfrombytes, imread, imwrite, supported_backends, use_backend from .misc import tensor2imgs from .photometric import (adjust_brightness, adjust_color, adjust_contrast, adjust_lighting, adjust_sharpness, auto_contrast, clahe, imdenormalize, imequalize, iminvert, imnormalize, imnormalize_, lut_transform, posterize, solarize) __all__ = [ 'bgr2gray', 'bgr2hls', 'bgr2hsv', 'bgr2rgb', 'gray2bgr', 'gray2rgb', 'hls2bgr', 'hsv2bgr', 'imconvert', 'rgb2bgr', 'rgb2gray', 'imrescale', 'imresize', 'imresize_like', 'imresize_to_multiple', 'rescale_size', 'imcrop', 'imflip', 'imflip_', 'impad', 'impad_to_multiple', 'imrotate', 'imfrombytes', 'imread', 'imwrite', 'supported_backends', 'use_backend', 'imdenormalize', 'imnormalize', 'imnormalize_', 'iminvert', 'posterize', 'solarize', 'rgb2ycbcr', 'bgr2ycbcr', 'ycbcr2rgb', 'ycbcr2bgr', 'tensor2imgs', 'imshear', 'imtranslate', 'adjust_color', 'imequalize', 'adjust_brightness', 'adjust_contrast', 'lut_transform', 'clahe', 'adjust_sharpness', 'auto_contrast', 'cutout', 'adjust_lighting' ]
trt-samples-for-hackathon-cn-master
Hackathon2023/controlnet/annotator/uniformer/mmcv/image/__init__.py
# Copyright (c) OpenMMLab. All rights reserved. import cv2 import numpy as np from ..utils import is_tuple_of from .colorspace import bgr2gray, gray2bgr def imnormalize(img, mean, std, to_rgb=True): """Normalize an image with mean and std. Args: img (ndarray): Image to be normalized. mean (ndarray): The mean to be used for normalize. std (ndarray): The std to be used for normalize. to_rgb (bool): Whether to convert to rgb. Returns: ndarray: The normalized image. """ img = img.copy().astype(np.float32) return imnormalize_(img, mean, std, to_rgb) def imnormalize_(img, mean, std, to_rgb=True): """Inplace normalize an image with mean and std. Args: img (ndarray): Image to be normalized. mean (ndarray): The mean to be used for normalize. std (ndarray): The std to be used for normalize. to_rgb (bool): Whether to convert to rgb. Returns: ndarray: The normalized image. """ # cv2 inplace normalization does not accept uint8 assert img.dtype != np.uint8 mean = np.float64(mean.reshape(1, -1)) stdinv = 1 / np.float64(std.reshape(1, -1)) if to_rgb: cv2.cvtColor(img, cv2.COLOR_BGR2RGB, img) # inplace cv2.subtract(img, mean, img) # inplace cv2.multiply(img, stdinv, img) # inplace return img def imdenormalize(img, mean, std, to_bgr=True): assert img.dtype != np.uint8 mean = mean.reshape(1, -1).astype(np.float64) std = std.reshape(1, -1).astype(np.float64) img = cv2.multiply(img, std) # make a copy cv2.add(img, mean, img) # inplace if to_bgr: cv2.cvtColor(img, cv2.COLOR_RGB2BGR, img) # inplace return img def iminvert(img): """Invert (negate) an image. Args: img (ndarray): Image to be inverted. Returns: ndarray: The inverted image. """ return np.full_like(img, 255) - img def solarize(img, thr=128): """Solarize an image (invert all pixel values above a threshold) Args: img (ndarray): Image to be solarized. thr (int): Threshold for solarizing (0 - 255). Returns: ndarray: The solarized image. """ img = np.where(img < thr, img, 255 - img) return img def posterize(img, bits): """Posterize an image (reduce the number of bits for each color channel) Args: img (ndarray): Image to be posterized. bits (int): Number of bits (1 to 8) to use for posterizing. Returns: ndarray: The posterized image. """ shift = 8 - bits img = np.left_shift(np.right_shift(img, shift), shift) return img def adjust_color(img, alpha=1, beta=None, gamma=0): r"""It blends the source image and its gray image: .. math:: output = img * alpha + gray\_img * beta + gamma Args: img (ndarray): The input source image. alpha (int | float): Weight for the source image. Default 1. beta (int | float): Weight for the converted gray image. If None, it's assigned the value (1 - `alpha`). gamma (int | float): Scalar added to each sum. Same as :func:`cv2.addWeighted`. Default 0. Returns: ndarray: Colored image which has the same size and dtype as input. """ gray_img = bgr2gray(img) gray_img = np.tile(gray_img[..., None], [1, 1, 3]) if beta is None: beta = 1 - alpha colored_img = cv2.addWeighted(img, alpha, gray_img, beta, gamma) if not colored_img.dtype == np.uint8: # Note when the dtype of `img` is not the default `np.uint8` # (e.g. np.float32), the value in `colored_img` got from cv2 # is not guaranteed to be in range [0, 255], so here clip # is needed. colored_img = np.clip(colored_img, 0, 255) return colored_img def imequalize(img): """Equalize the image histogram. This function applies a non-linear mapping to the input image, in order to create a uniform distribution of grayscale values in the output image. Args: img (ndarray): Image to be equalized. Returns: ndarray: The equalized image. """ def _scale_channel(im, c): """Scale the data in the corresponding channel.""" im = im[:, :, c] # Compute the histogram of the image channel. histo = np.histogram(im, 256, (0, 255))[0] # For computing the step, filter out the nonzeros. nonzero_histo = histo[histo > 0] step = (np.sum(nonzero_histo) - nonzero_histo[-1]) // 255 if not step: lut = np.array(range(256)) else: # Compute the cumulative sum, shifted by step // 2 # and then normalized by step. lut = (np.cumsum(histo) + (step // 2)) // step # Shift lut, prepending with 0. lut = np.concatenate([[0], lut[:-1]], 0) # handle potential integer overflow lut[lut > 255] = 255 # If step is zero, return the original image. # Otherwise, index from lut. return np.where(np.equal(step, 0), im, lut[im]) # Scales each channel independently and then stacks # the result. s1 = _scale_channel(img, 0) s2 = _scale_channel(img, 1) s3 = _scale_channel(img, 2) equalized_img = np.stack([s1, s2, s3], axis=-1) return equalized_img.astype(img.dtype) def adjust_brightness(img, factor=1.): """Adjust image brightness. This function controls the brightness of an image. An enhancement factor of 0.0 gives a black image. A factor of 1.0 gives the original image. This function blends the source image and the degenerated black image: .. math:: output = img * factor + degenerated * (1 - factor) Args: img (ndarray): Image to be brightened. factor (float): A value controls the enhancement. Factor 1.0 returns the original image, lower factors mean less color (brightness, contrast, etc), and higher values more. Default 1. Returns: ndarray: The brightened image. """ degenerated = np.zeros_like(img) # Note manually convert the dtype to np.float32, to # achieve as close results as PIL.ImageEnhance.Brightness. # Set beta=1-factor, and gamma=0 brightened_img = cv2.addWeighted( img.astype(np.float32), factor, degenerated.astype(np.float32), 1 - factor, 0) brightened_img = np.clip(brightened_img, 0, 255) return brightened_img.astype(img.dtype) def adjust_contrast(img, factor=1.): """Adjust image contrast. This function controls the contrast of an image. An enhancement factor of 0.0 gives a solid grey image. A factor of 1.0 gives the original image. It blends the source image and the degenerated mean image: .. math:: output = img * factor + degenerated * (1 - factor) Args: img (ndarray): Image to be contrasted. BGR order. factor (float): Same as :func:`mmcv.adjust_brightness`. Returns: ndarray: The contrasted image. """ gray_img = bgr2gray(img) hist = np.histogram(gray_img, 256, (0, 255))[0] mean = round(np.sum(gray_img) / np.sum(hist)) degenerated = (np.ones_like(img[..., 0]) * mean).astype(img.dtype) degenerated = gray2bgr(degenerated) contrasted_img = cv2.addWeighted( img.astype(np.float32), factor, degenerated.astype(np.float32), 1 - factor, 0) contrasted_img = np.clip(contrasted_img, 0, 255) return contrasted_img.astype(img.dtype) def auto_contrast(img, cutoff=0): """Auto adjust image contrast. This function maximize (normalize) image contrast by first removing cutoff percent of the lightest and darkest pixels from the histogram and remapping the image so that the darkest pixel becomes black (0), and the lightest becomes white (255). Args: img (ndarray): Image to be contrasted. BGR order. cutoff (int | float | tuple): The cutoff percent of the lightest and darkest pixels to be removed. If given as tuple, it shall be (low, high). Otherwise, the single value will be used for both. Defaults to 0. Returns: ndarray: The contrasted image. """ def _auto_contrast_channel(im, c, cutoff): im = im[:, :, c] # Compute the histogram of the image channel. histo = np.histogram(im, 256, (0, 255))[0] # Remove cut-off percent pixels from histo histo_sum = np.cumsum(histo) cut_low = histo_sum[-1] * cutoff[0] // 100 cut_high = histo_sum[-1] - histo_sum[-1] * cutoff[1] // 100 histo_sum = np.clip(histo_sum, cut_low, cut_high) - cut_low histo = np.concatenate([[histo_sum[0]], np.diff(histo_sum)], 0) # Compute mapping low, high = np.nonzero(histo)[0][0], np.nonzero(histo)[0][-1] # If all the values have been cut off, return the origin img if low >= high: return im scale = 255.0 / (high - low) offset = -low * scale lut = np.array(range(256)) lut = lut * scale + offset lut = np.clip(lut, 0, 255) return lut[im] if isinstance(cutoff, (int, float)): cutoff = (cutoff, cutoff) else: assert isinstance(cutoff, tuple), 'cutoff must be of type int, ' \ f'float or tuple, but got {type(cutoff)} instead.' # Auto adjusts contrast for each channel independently and then stacks # the result. s1 = _auto_contrast_channel(img, 0, cutoff) s2 = _auto_contrast_channel(img, 1, cutoff) s3 = _auto_contrast_channel(img, 2, cutoff) contrasted_img = np.stack([s1, s2, s3], axis=-1) return contrasted_img.astype(img.dtype) def adjust_sharpness(img, factor=1., kernel=None): """Adjust image sharpness. This function controls the sharpness of an image. An enhancement factor of 0.0 gives a blurred image. A factor of 1.0 gives the original image. And a factor of 2.0 gives a sharpened image. It blends the source image and the degenerated mean image: .. math:: output = img * factor + degenerated * (1 - factor) Args: img (ndarray): Image to be sharpened. BGR order. factor (float): Same as :func:`mmcv.adjust_brightness`. kernel (np.ndarray, optional): Filter kernel to be applied on the img to obtain the degenerated img. Defaults to None. Note: No value sanity check is enforced on the kernel set by users. So with an inappropriate kernel, the ``adjust_sharpness`` may fail to perform the function its name indicates but end up performing whatever transform determined by the kernel. Returns: ndarray: The sharpened image. """ if kernel is None: # adopted from PIL.ImageFilter.SMOOTH kernel = np.array([[1., 1., 1.], [1., 5., 1.], [1., 1., 1.]]) / 13 assert isinstance(kernel, np.ndarray), \ f'kernel must be of type np.ndarray, but got {type(kernel)} instead.' assert kernel.ndim == 2, \ f'kernel must have a dimension of 2, but got {kernel.ndim} instead.' degenerated = cv2.filter2D(img, -1, kernel) sharpened_img = cv2.addWeighted( img.astype(np.float32), factor, degenerated.astype(np.float32), 1 - factor, 0) sharpened_img = np.clip(sharpened_img, 0, 255) return sharpened_img.astype(img.dtype) def adjust_lighting(img, eigval, eigvec, alphastd=0.1, to_rgb=True): """AlexNet-style PCA jitter. This data augmentation is proposed in `ImageNet Classification with Deep Convolutional Neural Networks <https://dl.acm.org/doi/pdf/10.1145/3065386>`_. Args: img (ndarray): Image to be adjusted lighting. BGR order. eigval (ndarray): the eigenvalue of the convariance matrix of pixel values, respectively. eigvec (ndarray): the eigenvector of the convariance matrix of pixel values, respectively. alphastd (float): The standard deviation for distribution of alpha. Defaults to 0.1 to_rgb (bool): Whether to convert img to rgb. Returns: ndarray: The adjusted image. """ assert isinstance(eigval, np.ndarray) and isinstance(eigvec, np.ndarray), \ f'eigval and eigvec should both be of type np.ndarray, got ' \ f'{type(eigval)} and {type(eigvec)} instead.' assert eigval.ndim == 1 and eigvec.ndim == 2 assert eigvec.shape == (3, eigval.shape[0]) n_eigval = eigval.shape[0] assert isinstance(alphastd, float), 'alphastd should be of type float, ' \ f'got {type(alphastd)} instead.' img = img.copy().astype(np.float32) if to_rgb: cv2.cvtColor(img, cv2.COLOR_BGR2RGB, img) # inplace alpha = np.random.normal(0, alphastd, n_eigval) alter = eigvec \ * np.broadcast_to(alpha.reshape(1, n_eigval), (3, n_eigval)) \ * np.broadcast_to(eigval.reshape(1, n_eigval), (3, n_eigval)) alter = np.broadcast_to(alter.sum(axis=1).reshape(1, 1, 3), img.shape) img_adjusted = img + alter return img_adjusted def lut_transform(img, lut_table): """Transform array by look-up table. The function lut_transform fills the output array with values from the look-up table. Indices of the entries are taken from the input array. Args: img (ndarray): Image to be transformed. lut_table (ndarray): look-up table of 256 elements; in case of multi-channel input array, the table should either have a single channel (in this case the same table is used for all channels) or the same number of channels as in the input array. Returns: ndarray: The transformed image. """ assert isinstance(img, np.ndarray) assert 0 <= np.min(img) and np.max(img) <= 255 assert isinstance(lut_table, np.ndarray) assert lut_table.shape == (256, ) return cv2.LUT(np.array(img, dtype=np.uint8), lut_table) def clahe(img, clip_limit=40.0, tile_grid_size=(8, 8)): """Use CLAHE method to process the image. See `ZUIDERVELD,K. Contrast Limited Adaptive Histogram Equalization[J]. Graphics Gems, 1994:474-485.` for more information. Args: img (ndarray): Image to be processed. clip_limit (float): Threshold for contrast limiting. Default: 40.0. tile_grid_size (tuple[int]): Size of grid for histogram equalization. Input image will be divided into equally sized rectangular tiles. It defines the number of tiles in row and column. Default: (8, 8). Returns: ndarray: The processed image. """ assert isinstance(img, np.ndarray) assert img.ndim == 2 assert isinstance(clip_limit, (float, int)) assert is_tuple_of(tile_grid_size, int) assert len(tile_grid_size) == 2 clahe = cv2.createCLAHE(clip_limit, tile_grid_size) return clahe.apply(np.array(img, dtype=np.uint8))
trt-samples-for-hackathon-cn-master
Hackathon2023/controlnet/annotator/uniformer/mmcv/image/photometric.py
# Copyright (c) OpenMMLab. All rights reserved. import logging import torch.nn as nn from .utils import constant_init, kaiming_init, normal_init def conv3x3(in_planes, out_planes, dilation=1): """3x3 convolution with padding.""" return nn.Conv2d( in_planes, out_planes, kernel_size=3, padding=dilation, dilation=dilation) def make_vgg_layer(inplanes, planes, num_blocks, dilation=1, with_bn=False, ceil_mode=False): layers = [] for _ in range(num_blocks): layers.append(conv3x3(inplanes, planes, dilation)) if with_bn: layers.append(nn.BatchNorm2d(planes)) layers.append(nn.ReLU(inplace=True)) inplanes = planes layers.append(nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=ceil_mode)) return layers class VGG(nn.Module): """VGG backbone. Args: depth (int): Depth of vgg, from {11, 13, 16, 19}. with_bn (bool): Use BatchNorm or not. num_classes (int): number of classes for classification. num_stages (int): VGG stages, normally 5. dilations (Sequence[int]): Dilation of each stage. out_indices (Sequence[int]): Output from which stages. frozen_stages (int): Stages to be frozen (all param fixed). -1 means not freezing any parameters. bn_eval (bool): Whether to set BN layers as eval mode, namely, freeze running stats (mean and var). bn_frozen (bool): Whether to freeze weight and bias of BN layers. """ arch_settings = { 11: (1, 1, 2, 2, 2), 13: (2, 2, 2, 2, 2), 16: (2, 2, 3, 3, 3), 19: (2, 2, 4, 4, 4) } def __init__(self, depth, with_bn=False, num_classes=-1, num_stages=5, dilations=(1, 1, 1, 1, 1), out_indices=(0, 1, 2, 3, 4), frozen_stages=-1, bn_eval=True, bn_frozen=False, ceil_mode=False, with_last_pool=True): super(VGG, self).__init__() if depth not in self.arch_settings: raise KeyError(f'invalid depth {depth} for vgg') assert num_stages >= 1 and num_stages <= 5 stage_blocks = self.arch_settings[depth] self.stage_blocks = stage_blocks[:num_stages] assert len(dilations) == num_stages assert max(out_indices) <= num_stages self.num_classes = num_classes self.out_indices = out_indices self.frozen_stages = frozen_stages self.bn_eval = bn_eval self.bn_frozen = bn_frozen self.inplanes = 3 start_idx = 0 vgg_layers = [] self.range_sub_modules = [] for i, num_blocks in enumerate(self.stage_blocks): num_modules = num_blocks * (2 + with_bn) + 1 end_idx = start_idx + num_modules dilation = dilations[i] planes = 64 * 2**i if i < 4 else 512 vgg_layer = make_vgg_layer( self.inplanes, planes, num_blocks, dilation=dilation, with_bn=with_bn, ceil_mode=ceil_mode) vgg_layers.extend(vgg_layer) self.inplanes = planes self.range_sub_modules.append([start_idx, end_idx]) start_idx = end_idx if not with_last_pool: vgg_layers.pop(-1) self.range_sub_modules[-1][1] -= 1 self.module_name = 'features' self.add_module(self.module_name, nn.Sequential(*vgg_layers)) if self.num_classes > 0: self.classifier = nn.Sequential( nn.Linear(512 * 7 * 7, 4096), nn.ReLU(True), nn.Dropout(), nn.Linear(4096, 4096), nn.ReLU(True), nn.Dropout(), nn.Linear(4096, num_classes), ) def init_weights(self, pretrained=None): if isinstance(pretrained, str): logger = logging.getLogger() from ..runner import load_checkpoint load_checkpoint(self, pretrained, strict=False, logger=logger) elif pretrained is None: for m in self.modules(): if isinstance(m, nn.Conv2d): kaiming_init(m) elif isinstance(m, nn.BatchNorm2d): constant_init(m, 1) elif isinstance(m, nn.Linear): normal_init(m, std=0.01) else: raise TypeError('pretrained must be a str or None') def forward(self, x): outs = [] vgg_layers = getattr(self, self.module_name) for i in range(len(self.stage_blocks)): for j in range(*self.range_sub_modules[i]): vgg_layer = vgg_layers[j] x = vgg_layer(x) if i in self.out_indices: outs.append(x) if self.num_classes > 0: x = x.view(x.size(0), -1) x = self.classifier(x) outs.append(x) if len(outs) == 1: return outs[0] else: return tuple(outs) def train(self, mode=True): super(VGG, self).train(mode) if self.bn_eval: for m in self.modules(): if isinstance(m, nn.BatchNorm2d): m.eval() if self.bn_frozen: for params in m.parameters(): params.requires_grad = False vgg_layers = getattr(self, self.module_name) if mode and self.frozen_stages >= 0: for i in range(self.frozen_stages): for j in range(*self.range_sub_modules[i]): mod = vgg_layers[j] mod.eval() for param in mod.parameters(): param.requires_grad = False
trt-samples-for-hackathon-cn-master
Hackathon2023/controlnet/annotator/uniformer/mmcv/cnn/vgg.py
# Copyright (c) OpenMMLab. All rights reserved. from .alexnet import AlexNet # yapf: disable from .bricks import (ACTIVATION_LAYERS, CONV_LAYERS, NORM_LAYERS, PADDING_LAYERS, PLUGIN_LAYERS, UPSAMPLE_LAYERS, ContextBlock, Conv2d, Conv3d, ConvAWS2d, ConvModule, ConvTranspose2d, ConvTranspose3d, ConvWS2d, DepthwiseSeparableConvModule, GeneralizedAttention, HSigmoid, HSwish, Linear, MaxPool2d, MaxPool3d, NonLocal1d, NonLocal2d, NonLocal3d, Scale, Swish, build_activation_layer, build_conv_layer, build_norm_layer, build_padding_layer, build_plugin_layer, build_upsample_layer, conv_ws_2d, is_norm) from .builder import MODELS, build_model_from_cfg # yapf: enable from .resnet import ResNet, make_res_layer from .utils import (INITIALIZERS, Caffe2XavierInit, ConstantInit, KaimingInit, NormalInit, PretrainedInit, TruncNormalInit, UniformInit, XavierInit, bias_init_with_prob, caffe2_xavier_init, constant_init, fuse_conv_bn, get_model_complexity_info, initialize, kaiming_init, normal_init, trunc_normal_init, uniform_init, xavier_init) from .vgg import VGG, make_vgg_layer __all__ = [ 'AlexNet', 'VGG', 'make_vgg_layer', 'ResNet', 'make_res_layer', 'constant_init', 'xavier_init', 'normal_init', 'trunc_normal_init', 'uniform_init', 'kaiming_init', 'caffe2_xavier_init', 'bias_init_with_prob', 'ConvModule', 'build_activation_layer', 'build_conv_layer', 'build_norm_layer', 'build_padding_layer', 'build_upsample_layer', 'build_plugin_layer', 'is_norm', 'NonLocal1d', 'NonLocal2d', 'NonLocal3d', 'ContextBlock', 'HSigmoid', 'Swish', 'HSwish', 'GeneralizedAttention', 'ACTIVATION_LAYERS', 'CONV_LAYERS', 'NORM_LAYERS', 'PADDING_LAYERS', 'UPSAMPLE_LAYERS', 'PLUGIN_LAYERS', 'Scale', 'get_model_complexity_info', 'conv_ws_2d', 'ConvAWS2d', 'ConvWS2d', 'fuse_conv_bn', 'DepthwiseSeparableConvModule', 'Linear', 'Conv2d', 'ConvTranspose2d', 'MaxPool2d', 'ConvTranspose3d', 'MaxPool3d', 'Conv3d', 'initialize', 'INITIALIZERS', 'ConstantInit', 'XavierInit', 'NormalInit', 'TruncNormalInit', 'UniformInit', 'KaimingInit', 'PretrainedInit', 'Caffe2XavierInit', 'MODELS', 'build_model_from_cfg' ]
trt-samples-for-hackathon-cn-master
Hackathon2023/controlnet/annotator/uniformer/mmcv/cnn/__init__.py
# Copyright (c) OpenMMLab. All rights reserved. from ..runner import Sequential from ..utils import Registry, build_from_cfg def build_model_from_cfg(cfg, registry, default_args=None): """Build a PyTorch model from config dict(s). Different from ``build_from_cfg``, if cfg is a list, a ``nn.Sequential`` will be built. Args: cfg (dict, list[dict]): The config of modules, is is either a config dict or a list of config dicts. If cfg is a list, a the built modules will be wrapped with ``nn.Sequential``. registry (:obj:`Registry`): A registry the module belongs to. default_args (dict, optional): Default arguments to build the module. Defaults to None. Returns: nn.Module: A built nn module. """ if isinstance(cfg, list): modules = [ build_from_cfg(cfg_, registry, default_args) for cfg_ in cfg ] return Sequential(*modules) else: return build_from_cfg(cfg, registry, default_args) MODELS = Registry('model', build_func=build_model_from_cfg)
trt-samples-for-hackathon-cn-master
Hackathon2023/controlnet/annotator/uniformer/mmcv/cnn/builder.py
# Copyright (c) OpenMMLab. All rights reserved. import logging import torch.nn as nn import torch.utils.checkpoint as cp from .utils import constant_init, kaiming_init def conv3x3(in_planes, out_planes, stride=1, dilation=1): """3x3 convolution with padding.""" return nn.Conv2d( in_planes, out_planes, kernel_size=3, stride=stride, padding=dilation, dilation=dilation, bias=False) class BasicBlock(nn.Module): expansion = 1 def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None, style='pytorch', with_cp=False): super(BasicBlock, self).__init__() assert style in ['pytorch', 'caffe'] self.conv1 = conv3x3(inplanes, planes, stride, dilation) self.bn1 = nn.BatchNorm2d(planes) self.relu = nn.ReLU(inplace=True) self.conv2 = conv3x3(planes, planes) self.bn2 = nn.BatchNorm2d(planes) self.downsample = downsample self.stride = stride self.dilation = dilation assert not with_cp def forward(self, x): residual = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) if self.downsample is not None: residual = self.downsample(x) out += residual out = self.relu(out) return out class Bottleneck(nn.Module): expansion = 4 def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None, style='pytorch', with_cp=False): """Bottleneck block. If style is "pytorch", the stride-two layer is the 3x3 conv layer, if it is "caffe", the stride-two layer is the first 1x1 conv layer. """ super(Bottleneck, self).__init__() assert style in ['pytorch', 'caffe'] if style == 'pytorch': conv1_stride = 1 conv2_stride = stride else: conv1_stride = stride conv2_stride = 1 self.conv1 = nn.Conv2d( inplanes, planes, kernel_size=1, stride=conv1_stride, bias=False) self.conv2 = nn.Conv2d( planes, planes, kernel_size=3, stride=conv2_stride, padding=dilation, dilation=dilation, bias=False) self.bn1 = nn.BatchNorm2d(planes) self.bn2 = nn.BatchNorm2d(planes) self.conv3 = nn.Conv2d( planes, planes * self.expansion, kernel_size=1, bias=False) self.bn3 = nn.BatchNorm2d(planes * self.expansion) self.relu = nn.ReLU(inplace=True) self.downsample = downsample self.stride = stride self.dilation = dilation self.with_cp = with_cp def forward(self, x): def _inner_forward(x): residual = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) out = self.relu(out) out = self.conv3(out) out = self.bn3(out) if self.downsample is not None: residual = self.downsample(x) out += residual return out if self.with_cp and x.requires_grad: out = cp.checkpoint(_inner_forward, x) else: out = _inner_forward(x) out = self.relu(out) return out def make_res_layer(block, inplanes, planes, blocks, stride=1, dilation=1, style='pytorch', with_cp=False): downsample = None if stride != 1 or inplanes != planes * block.expansion: downsample = nn.Sequential( nn.Conv2d( inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(planes * block.expansion), ) layers = [] layers.append( block( inplanes, planes, stride, dilation, downsample, style=style, with_cp=with_cp)) inplanes = planes * block.expansion for _ in range(1, blocks): layers.append( block(inplanes, planes, 1, dilation, style=style, with_cp=with_cp)) return nn.Sequential(*layers) class ResNet(nn.Module): """ResNet backbone. Args: depth (int): Depth of resnet, from {18, 34, 50, 101, 152}. num_stages (int): Resnet stages, normally 4. strides (Sequence[int]): Strides of the first block of each stage. dilations (Sequence[int]): Dilation of each stage. out_indices (Sequence[int]): Output from which stages. style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two layer is the 3x3 conv layer, otherwise the stride-two layer is the first 1x1 conv layer. frozen_stages (int): Stages to be frozen (all param fixed). -1 means not freezing any parameters. bn_eval (bool): Whether to set BN layers as eval mode, namely, freeze running stats (mean and var). bn_frozen (bool): Whether to freeze weight and bias of BN layers. with_cp (bool): Use checkpoint or not. Using checkpoint will save some memory while slowing down the training speed. """ arch_settings = { 18: (BasicBlock, (2, 2, 2, 2)), 34: (BasicBlock, (3, 4, 6, 3)), 50: (Bottleneck, (3, 4, 6, 3)), 101: (Bottleneck, (3, 4, 23, 3)), 152: (Bottleneck, (3, 8, 36, 3)) } def __init__(self, depth, num_stages=4, strides=(1, 2, 2, 2), dilations=(1, 1, 1, 1), out_indices=(0, 1, 2, 3), style='pytorch', frozen_stages=-1, bn_eval=True, bn_frozen=False, with_cp=False): super(ResNet, self).__init__() if depth not in self.arch_settings: raise KeyError(f'invalid depth {depth} for resnet') assert num_stages >= 1 and num_stages <= 4 block, stage_blocks = self.arch_settings[depth] stage_blocks = stage_blocks[:num_stages] assert len(strides) == len(dilations) == num_stages assert max(out_indices) < num_stages self.out_indices = out_indices self.style = style self.frozen_stages = frozen_stages self.bn_eval = bn_eval self.bn_frozen = bn_frozen self.with_cp = with_cp self.inplanes = 64 self.conv1 = nn.Conv2d( 3, 64, kernel_size=7, stride=2, padding=3, bias=False) self.bn1 = nn.BatchNorm2d(64) self.relu = nn.ReLU(inplace=True) self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) self.res_layers = [] for i, num_blocks in enumerate(stage_blocks): stride = strides[i] dilation = dilations[i] planes = 64 * 2**i res_layer = make_res_layer( block, self.inplanes, planes, num_blocks, stride=stride, dilation=dilation, style=self.style, with_cp=with_cp) self.inplanes = planes * block.expansion layer_name = f'layer{i + 1}' self.add_module(layer_name, res_layer) self.res_layers.append(layer_name) self.feat_dim = block.expansion * 64 * 2**(len(stage_blocks) - 1) def init_weights(self, pretrained=None): if isinstance(pretrained, str): logger = logging.getLogger() from ..runner import load_checkpoint load_checkpoint(self, pretrained, strict=False, logger=logger) elif pretrained is None: for m in self.modules(): if isinstance(m, nn.Conv2d): kaiming_init(m) elif isinstance(m, nn.BatchNorm2d): constant_init(m, 1) else: raise TypeError('pretrained must be a str or None') def forward(self, x): x = self.conv1(x) x = self.bn1(x) x = self.relu(x) x = self.maxpool(x) outs = [] for i, layer_name in enumerate(self.res_layers): res_layer = getattr(self, layer_name) x = res_layer(x) if i in self.out_indices: outs.append(x) if len(outs) == 1: return outs[0] else: return tuple(outs) def train(self, mode=True): super(ResNet, self).train(mode) if self.bn_eval: for m in self.modules(): if isinstance(m, nn.BatchNorm2d): m.eval() if self.bn_frozen: for params in m.parameters(): params.requires_grad = False if mode and self.frozen_stages >= 0: for param in self.conv1.parameters(): param.requires_grad = False for param in self.bn1.parameters(): param.requires_grad = False self.bn1.eval() self.bn1.weight.requires_grad = False self.bn1.bias.requires_grad = False for i in range(1, self.frozen_stages + 1): mod = getattr(self, f'layer{i}') mod.eval() for param in mod.parameters(): param.requires_grad = False
trt-samples-for-hackathon-cn-master
Hackathon2023/controlnet/annotator/uniformer/mmcv/cnn/resnet.py
# Copyright (c) OpenMMLab. All rights reserved. import logging import torch.nn as nn class AlexNet(nn.Module): """AlexNet backbone. Args: num_classes (int): number of classes for classification. """ def __init__(self, num_classes=-1): super(AlexNet, self).__init__() self.num_classes = num_classes self.features = nn.Sequential( nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=2), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=3, stride=2), nn.Conv2d(64, 192, kernel_size=5, padding=2), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=3, stride=2), nn.Conv2d(192, 384, kernel_size=3, padding=1), nn.ReLU(inplace=True), nn.Conv2d(384, 256, kernel_size=3, padding=1), nn.ReLU(inplace=True), nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=3, stride=2), ) if self.num_classes > 0: self.classifier = nn.Sequential( nn.Dropout(), nn.Linear(256 * 6 * 6, 4096), nn.ReLU(inplace=True), nn.Dropout(), nn.Linear(4096, 4096), nn.ReLU(inplace=True), nn.Linear(4096, num_classes), ) def init_weights(self, pretrained=None): if isinstance(pretrained, str): logger = logging.getLogger() from ..runner import load_checkpoint load_checkpoint(self, pretrained, strict=False, logger=logger) elif pretrained is None: # use default initializer pass else: raise TypeError('pretrained must be a str or None') def forward(self, x): x = self.features(x) if self.num_classes > 0: x = x.view(x.size(0), 256 * 6 * 6) x = self.classifier(x) return x
trt-samples-for-hackathon-cn-master
Hackathon2023/controlnet/annotator/uniformer/mmcv/cnn/alexnet.py
# Copyright (c) OpenMMLab. All rights reserved. import torch from torch import nn from ..utils import constant_init, kaiming_init from .registry import PLUGIN_LAYERS def last_zero_init(m): if isinstance(m, nn.Sequential): constant_init(m[-1], val=0) else: constant_init(m, val=0) @PLUGIN_LAYERS.register_module() class ContextBlock(nn.Module): """ContextBlock module in GCNet. See 'GCNet: Non-local Networks Meet Squeeze-Excitation Networks and Beyond' (https://arxiv.org/abs/1904.11492) for details. Args: in_channels (int): Channels of the input feature map. ratio (float): Ratio of channels of transform bottleneck pooling_type (str): Pooling method for context modeling. Options are 'att' and 'avg', stand for attention pooling and average pooling respectively. Default: 'att'. fusion_types (Sequence[str]): Fusion method for feature fusion, Options are 'channels_add', 'channel_mul', stand for channelwise addition and multiplication respectively. Default: ('channel_add',) """ _abbr_ = 'context_block' def __init__(self, in_channels, ratio, pooling_type='att', fusion_types=('channel_add', )): super(ContextBlock, self).__init__() assert pooling_type in ['avg', 'att'] assert isinstance(fusion_types, (list, tuple)) valid_fusion_types = ['channel_add', 'channel_mul'] assert all([f in valid_fusion_types for f in fusion_types]) assert len(fusion_types) > 0, 'at least one fusion should be used' self.in_channels = in_channels self.ratio = ratio self.planes = int(in_channels * ratio) self.pooling_type = pooling_type self.fusion_types = fusion_types if pooling_type == 'att': self.conv_mask = nn.Conv2d(in_channels, 1, kernel_size=1) self.softmax = nn.Softmax(dim=2) else: self.avg_pool = nn.AdaptiveAvgPool2d(1) if 'channel_add' in fusion_types: self.channel_add_conv = nn.Sequential( nn.Conv2d(self.in_channels, self.planes, kernel_size=1), nn.LayerNorm([self.planes, 1, 1]), nn.ReLU(inplace=True), # yapf: disable nn.Conv2d(self.planes, self.in_channels, kernel_size=1)) else: self.channel_add_conv = None if 'channel_mul' in fusion_types: self.channel_mul_conv = nn.Sequential( nn.Conv2d(self.in_channels, self.planes, kernel_size=1), nn.LayerNorm([self.planes, 1, 1]), nn.ReLU(inplace=True), # yapf: disable nn.Conv2d(self.planes, self.in_channels, kernel_size=1)) else: self.channel_mul_conv = None self.reset_parameters() def reset_parameters(self): if self.pooling_type == 'att': kaiming_init(self.conv_mask, mode='fan_in') self.conv_mask.inited = True if self.channel_add_conv is not None: last_zero_init(self.channel_add_conv) if self.channel_mul_conv is not None: last_zero_init(self.channel_mul_conv) def spatial_pool(self, x): batch, channel, height, width = x.size() if self.pooling_type == 'att': input_x = x # [N, C, H * W] input_x = input_x.view(batch, channel, height * width) # [N, 1, C, H * W] input_x = input_x.unsqueeze(1) # [N, 1, H, W] context_mask = self.conv_mask(x) # [N, 1, H * W] context_mask = context_mask.view(batch, 1, height * width) # [N, 1, H * W] context_mask = self.softmax(context_mask) # [N, 1, H * W, 1] context_mask = context_mask.unsqueeze(-1) # [N, 1, C, 1] context = torch.matmul(input_x, context_mask) # [N, C, 1, 1] context = context.view(batch, channel, 1, 1) else: # [N, C, 1, 1] context = self.avg_pool(x) return context def forward(self, x): # [N, C, 1, 1] context = self.spatial_pool(x) out = x if self.channel_mul_conv is not None: # [N, C, 1, 1] channel_mul_term = torch.sigmoid(self.channel_mul_conv(context)) out = out * channel_mul_term if self.channel_add_conv is not None: # [N, C, 1, 1] channel_add_term = self.channel_add_conv(context) out = out + channel_add_term return out
trt-samples-for-hackathon-cn-master
Hackathon2023/controlnet/annotator/uniformer/mmcv/cnn/bricks/context_block.py
# Copyright (c) OpenMMLab. All rights reserved. import torch.nn as nn from .conv_module import ConvModule class DepthwiseSeparableConvModule(nn.Module): """Depthwise separable convolution module. See https://arxiv.org/pdf/1704.04861.pdf for details. This module can replace a ConvModule with the conv block replaced by two conv block: depthwise conv block and pointwise conv block. The depthwise conv block contains depthwise-conv/norm/activation layers. The pointwise conv block contains pointwise-conv/norm/activation layers. It should be noted that there will be norm/activation layer in the depthwise conv block if `norm_cfg` and `act_cfg` are specified. Args: in_channels (int): Number of channels in the input feature map. Same as that in ``nn._ConvNd``. out_channels (int): Number of channels produced by the convolution. Same as that in ``nn._ConvNd``. kernel_size (int | tuple[int]): Size of the convolving kernel. Same as that in ``nn._ConvNd``. stride (int | tuple[int]): Stride of the convolution. Same as that in ``nn._ConvNd``. Default: 1. padding (int | tuple[int]): Zero-padding added to both sides of the input. Same as that in ``nn._ConvNd``. Default: 0. dilation (int | tuple[int]): Spacing between kernel elements. Same as that in ``nn._ConvNd``. Default: 1. norm_cfg (dict): Default norm config for both depthwise ConvModule and pointwise ConvModule. Default: None. act_cfg (dict): Default activation config for both depthwise ConvModule and pointwise ConvModule. Default: dict(type='ReLU'). dw_norm_cfg (dict): Norm config of depthwise ConvModule. If it is 'default', it will be the same as `norm_cfg`. Default: 'default'. dw_act_cfg (dict): Activation config of depthwise ConvModule. If it is 'default', it will be the same as `act_cfg`. Default: 'default'. pw_norm_cfg (dict): Norm config of pointwise ConvModule. If it is 'default', it will be the same as `norm_cfg`. Default: 'default'. pw_act_cfg (dict): Activation config of pointwise ConvModule. If it is 'default', it will be the same as `act_cfg`. Default: 'default'. kwargs (optional): Other shared arguments for depthwise and pointwise ConvModule. See ConvModule for ref. """ def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, norm_cfg=None, act_cfg=dict(type='ReLU'), dw_norm_cfg='default', dw_act_cfg='default', pw_norm_cfg='default', pw_act_cfg='default', **kwargs): super(DepthwiseSeparableConvModule, self).__init__() assert 'groups' not in kwargs, 'groups should not be specified' # if norm/activation config of depthwise/pointwise ConvModule is not # specified, use default config. dw_norm_cfg = dw_norm_cfg if dw_norm_cfg != 'default' else norm_cfg dw_act_cfg = dw_act_cfg if dw_act_cfg != 'default' else act_cfg pw_norm_cfg = pw_norm_cfg if pw_norm_cfg != 'default' else norm_cfg pw_act_cfg = pw_act_cfg if pw_act_cfg != 'default' else act_cfg # depthwise convolution self.depthwise_conv = ConvModule( in_channels, in_channels, kernel_size, stride=stride, padding=padding, dilation=dilation, groups=in_channels, norm_cfg=dw_norm_cfg, act_cfg=dw_act_cfg, **kwargs) self.pointwise_conv = ConvModule( in_channels, out_channels, 1, norm_cfg=pw_norm_cfg, act_cfg=pw_act_cfg, **kwargs) def forward(self, x): x = self.depthwise_conv(x) x = self.pointwise_conv(x) return x
trt-samples-for-hackathon-cn-master
Hackathon2023/controlnet/annotator/uniformer/mmcv/cnn/bricks/depthwise_separable_conv_module.py
# Copyright (c) OpenMMLab. All rights reserved. from annotator.uniformer.mmcv.utils import Registry CONV_LAYERS = Registry('conv layer') NORM_LAYERS = Registry('norm layer') ACTIVATION_LAYERS = Registry('activation layer') PADDING_LAYERS = Registry('padding layer') UPSAMPLE_LAYERS = Registry('upsample layer') PLUGIN_LAYERS = Registry('plugin layer') DROPOUT_LAYERS = Registry('drop out layers') POSITIONAL_ENCODING = Registry('position encoding') ATTENTION = Registry('attention') FEEDFORWARD_NETWORK = Registry('feed-forward Network') TRANSFORMER_LAYER = Registry('transformerLayer') TRANSFORMER_LAYER_SEQUENCE = Registry('transformer-layers sequence')
trt-samples-for-hackathon-cn-master
Hackathon2023/controlnet/annotator/uniformer/mmcv/cnn/bricks/registry.py
# Copyright (c) OpenMMLab. All rights reserved. import math import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from ..utils import kaiming_init from .registry import PLUGIN_LAYERS @PLUGIN_LAYERS.register_module() class GeneralizedAttention(nn.Module): """GeneralizedAttention module. See 'An Empirical Study of Spatial Attention Mechanisms in Deep Networks' (https://arxiv.org/abs/1711.07971) for details. Args: in_channels (int): Channels of the input feature map. spatial_range (int): The spatial range. -1 indicates no spatial range constraint. Default: -1. num_heads (int): The head number of empirical_attention module. Default: 9. position_embedding_dim (int): The position embedding dimension. Default: -1. position_magnitude (int): A multiplier acting on coord difference. Default: 1. kv_stride (int): The feature stride acting on key/value feature map. Default: 2. q_stride (int): The feature stride acting on query feature map. Default: 1. attention_type (str): A binary indicator string for indicating which items in generalized empirical_attention module are used. Default: '1111'. - '1000' indicates 'query and key content' (appr - appr) item, - '0100' indicates 'query content and relative position' (appr - position) item, - '0010' indicates 'key content only' (bias - appr) item, - '0001' indicates 'relative position only' (bias - position) item. """ _abbr_ = 'gen_attention_block' def __init__(self, in_channels, spatial_range=-1, num_heads=9, position_embedding_dim=-1, position_magnitude=1, kv_stride=2, q_stride=1, attention_type='1111'): super(GeneralizedAttention, self).__init__() # hard range means local range for non-local operation self.position_embedding_dim = ( position_embedding_dim if position_embedding_dim > 0 else in_channels) self.position_magnitude = position_magnitude self.num_heads = num_heads self.in_channels = in_channels self.spatial_range = spatial_range self.kv_stride = kv_stride self.q_stride = q_stride self.attention_type = [bool(int(_)) for _ in attention_type] self.qk_embed_dim = in_channels // num_heads out_c = self.qk_embed_dim * num_heads if self.attention_type[0] or self.attention_type[1]: self.query_conv = nn.Conv2d( in_channels=in_channels, out_channels=out_c, kernel_size=1, bias=False) self.query_conv.kaiming_init = True if self.attention_type[0] or self.attention_type[2]: self.key_conv = nn.Conv2d( in_channels=in_channels, out_channels=out_c, kernel_size=1, bias=False) self.key_conv.kaiming_init = True self.v_dim = in_channels // num_heads self.value_conv = nn.Conv2d( in_channels=in_channels, out_channels=self.v_dim * num_heads, kernel_size=1, bias=False) self.value_conv.kaiming_init = True if self.attention_type[1] or self.attention_type[3]: self.appr_geom_fc_x = nn.Linear( self.position_embedding_dim // 2, out_c, bias=False) self.appr_geom_fc_x.kaiming_init = True self.appr_geom_fc_y = nn.Linear( self.position_embedding_dim // 2, out_c, bias=False) self.appr_geom_fc_y.kaiming_init = True if self.attention_type[2]: stdv = 1.0 / math.sqrt(self.qk_embed_dim * 2) appr_bias_value = -2 * stdv * torch.rand(out_c) + stdv self.appr_bias = nn.Parameter(appr_bias_value) if self.attention_type[3]: stdv = 1.0 / math.sqrt(self.qk_embed_dim * 2) geom_bias_value = -2 * stdv * torch.rand(out_c) + stdv self.geom_bias = nn.Parameter(geom_bias_value) self.proj_conv = nn.Conv2d( in_channels=self.v_dim * num_heads, out_channels=in_channels, kernel_size=1, bias=True) self.proj_conv.kaiming_init = True self.gamma = nn.Parameter(torch.zeros(1)) if self.spatial_range >= 0: # only works when non local is after 3*3 conv if in_channels == 256: max_len = 84 elif in_channels == 512: max_len = 42 max_len_kv = int((max_len - 1.0) / self.kv_stride + 1) local_constraint_map = np.ones( (max_len, max_len, max_len_kv, max_len_kv), dtype=np.int) for iy in range(max_len): for ix in range(max_len): local_constraint_map[ iy, ix, max((iy - self.spatial_range) // self.kv_stride, 0):min((iy + self.spatial_range + 1) // self.kv_stride + 1, max_len), max((ix - self.spatial_range) // self.kv_stride, 0):min((ix + self.spatial_range + 1) // self.kv_stride + 1, max_len)] = 0 self.local_constraint_map = nn.Parameter( torch.from_numpy(local_constraint_map).byte(), requires_grad=False) if self.q_stride > 1: self.q_downsample = nn.AvgPool2d( kernel_size=1, stride=self.q_stride) else: self.q_downsample = None if self.kv_stride > 1: self.kv_downsample = nn.AvgPool2d( kernel_size=1, stride=self.kv_stride) else: self.kv_downsample = None self.init_weights() def get_position_embedding(self, h, w, h_kv, w_kv, q_stride, kv_stride, device, dtype, feat_dim, wave_length=1000): # the default type of Tensor is float32, leading to type mismatch # in fp16 mode. Cast it to support fp16 mode. h_idxs = torch.linspace(0, h - 1, h).to(device=device, dtype=dtype) h_idxs = h_idxs.view((h, 1)) * q_stride w_idxs = torch.linspace(0, w - 1, w).to(device=device, dtype=dtype) w_idxs = w_idxs.view((w, 1)) * q_stride h_kv_idxs = torch.linspace(0, h_kv - 1, h_kv).to( device=device, dtype=dtype) h_kv_idxs = h_kv_idxs.view((h_kv, 1)) * kv_stride w_kv_idxs = torch.linspace(0, w_kv - 1, w_kv).to( device=device, dtype=dtype) w_kv_idxs = w_kv_idxs.view((w_kv, 1)) * kv_stride # (h, h_kv, 1) h_diff = h_idxs.unsqueeze(1) - h_kv_idxs.unsqueeze(0) h_diff *= self.position_magnitude # (w, w_kv, 1) w_diff = w_idxs.unsqueeze(1) - w_kv_idxs.unsqueeze(0) w_diff *= self.position_magnitude feat_range = torch.arange(0, feat_dim / 4).to( device=device, dtype=dtype) dim_mat = torch.Tensor([wave_length]).to(device=device, dtype=dtype) dim_mat = dim_mat**((4. / feat_dim) * feat_range) dim_mat = dim_mat.view((1, 1, -1)) embedding_x = torch.cat( ((w_diff / dim_mat).sin(), (w_diff / dim_mat).cos()), dim=2) embedding_y = torch.cat( ((h_diff / dim_mat).sin(), (h_diff / dim_mat).cos()), dim=2) return embedding_x, embedding_y def forward(self, x_input): num_heads = self.num_heads # use empirical_attention if self.q_downsample is not None: x_q = self.q_downsample(x_input) else: x_q = x_input n, _, h, w = x_q.shape if self.kv_downsample is not None: x_kv = self.kv_downsample(x_input) else: x_kv = x_input _, _, h_kv, w_kv = x_kv.shape if self.attention_type[0] or self.attention_type[1]: proj_query = self.query_conv(x_q).view( (n, num_heads, self.qk_embed_dim, h * w)) proj_query = proj_query.permute(0, 1, 3, 2) if self.attention_type[0] or self.attention_type[2]: proj_key = self.key_conv(x_kv).view( (n, num_heads, self.qk_embed_dim, h_kv * w_kv)) if self.attention_type[1] or self.attention_type[3]: position_embed_x, position_embed_y = self.get_position_embedding( h, w, h_kv, w_kv, self.q_stride, self.kv_stride, x_input.device, x_input.dtype, self.position_embedding_dim) # (n, num_heads, w, w_kv, dim) position_feat_x = self.appr_geom_fc_x(position_embed_x).\ view(1, w, w_kv, num_heads, self.qk_embed_dim).\ permute(0, 3, 1, 2, 4).\ repeat(n, 1, 1, 1, 1) # (n, num_heads, h, h_kv, dim) position_feat_y = self.appr_geom_fc_y(position_embed_y).\ view(1, h, h_kv, num_heads, self.qk_embed_dim).\ permute(0, 3, 1, 2, 4).\ repeat(n, 1, 1, 1, 1) position_feat_x /= math.sqrt(2) position_feat_y /= math.sqrt(2) # accelerate for saliency only if (np.sum(self.attention_type) == 1) and self.attention_type[2]: appr_bias = self.appr_bias.\ view(1, num_heads, 1, self.qk_embed_dim).\ repeat(n, 1, 1, 1) energy = torch.matmul(appr_bias, proj_key).\ view(n, num_heads, 1, h_kv * w_kv) h = 1 w = 1 else: # (n, num_heads, h*w, h_kv*w_kv), query before key, 540mb for if not self.attention_type[0]: energy = torch.zeros( n, num_heads, h, w, h_kv, w_kv, dtype=x_input.dtype, device=x_input.device) # attention_type[0]: appr - appr # attention_type[1]: appr - position # attention_type[2]: bias - appr # attention_type[3]: bias - position if self.attention_type[0] or self.attention_type[2]: if self.attention_type[0] and self.attention_type[2]: appr_bias = self.appr_bias.\ view(1, num_heads, 1, self.qk_embed_dim) energy = torch.matmul(proj_query + appr_bias, proj_key).\ view(n, num_heads, h, w, h_kv, w_kv) elif self.attention_type[0]: energy = torch.matmul(proj_query, proj_key).\ view(n, num_heads, h, w, h_kv, w_kv) elif self.attention_type[2]: appr_bias = self.appr_bias.\ view(1, num_heads, 1, self.qk_embed_dim).\ repeat(n, 1, 1, 1) energy += torch.matmul(appr_bias, proj_key).\ view(n, num_heads, 1, 1, h_kv, w_kv) if self.attention_type[1] or self.attention_type[3]: if self.attention_type[1] and self.attention_type[3]: geom_bias = self.geom_bias.\ view(1, num_heads, 1, self.qk_embed_dim) proj_query_reshape = (proj_query + geom_bias).\ view(n, num_heads, h, w, self.qk_embed_dim) energy_x = torch.matmul( proj_query_reshape.permute(0, 1, 3, 2, 4), position_feat_x.permute(0, 1, 2, 4, 3)) energy_x = energy_x.\ permute(0, 1, 3, 2, 4).unsqueeze(4) energy_y = torch.matmul( proj_query_reshape, position_feat_y.permute(0, 1, 2, 4, 3)) energy_y = energy_y.unsqueeze(5) energy += energy_x + energy_y elif self.attention_type[1]: proj_query_reshape = proj_query.\ view(n, num_heads, h, w, self.qk_embed_dim) proj_query_reshape = proj_query_reshape.\ permute(0, 1, 3, 2, 4) position_feat_x_reshape = position_feat_x.\ permute(0, 1, 2, 4, 3) position_feat_y_reshape = position_feat_y.\ permute(0, 1, 2, 4, 3) energy_x = torch.matmul(proj_query_reshape, position_feat_x_reshape) energy_x = energy_x.permute(0, 1, 3, 2, 4).unsqueeze(4) energy_y = torch.matmul(proj_query_reshape, position_feat_y_reshape) energy_y = energy_y.unsqueeze(5) energy += energy_x + energy_y elif self.attention_type[3]: geom_bias = self.geom_bias.\ view(1, num_heads, self.qk_embed_dim, 1).\ repeat(n, 1, 1, 1) position_feat_x_reshape = position_feat_x.\ view(n, num_heads, w*w_kv, self.qk_embed_dim) position_feat_y_reshape = position_feat_y.\ view(n, num_heads, h * h_kv, self.qk_embed_dim) energy_x = torch.matmul(position_feat_x_reshape, geom_bias) energy_x = energy_x.view(n, num_heads, 1, w, 1, w_kv) energy_y = torch.matmul(position_feat_y_reshape, geom_bias) energy_y = energy_y.view(n, num_heads, h, 1, h_kv, 1) energy += energy_x + energy_y energy = energy.view(n, num_heads, h * w, h_kv * w_kv) if self.spatial_range >= 0: cur_local_constraint_map = \ self.local_constraint_map[:h, :w, :h_kv, :w_kv].\ contiguous().\ view(1, 1, h*w, h_kv*w_kv) energy = energy.masked_fill_(cur_local_constraint_map, float('-inf')) attention = F.softmax(energy, 3) proj_value = self.value_conv(x_kv) proj_value_reshape = proj_value.\ view((n, num_heads, self.v_dim, h_kv * w_kv)).\ permute(0, 1, 3, 2) out = torch.matmul(attention, proj_value_reshape).\ permute(0, 1, 3, 2).\ contiguous().\ view(n, self.v_dim * self.num_heads, h, w) out = self.proj_conv(out) # output is downsampled, upsample back to input size if self.q_downsample is not None: out = F.interpolate( out, size=x_input.shape[2:], mode='bilinear', align_corners=False) out = self.gamma * out + x_input return out def init_weights(self): for m in self.modules(): if hasattr(m, 'kaiming_init') and m.kaiming_init: kaiming_init( m, mode='fan_in', nonlinearity='leaky_relu', bias=0, distribution='uniform', a=1)
trt-samples-for-hackathon-cn-master
Hackathon2023/controlnet/annotator/uniformer/mmcv/cnn/bricks/generalized_attention.py
# Copyright (c) OpenMMLab. All rights reserved. import inspect import torch.nn as nn from annotator.uniformer.mmcv.utils import is_tuple_of from annotator.uniformer.mmcv.utils.parrots_wrapper import SyncBatchNorm, _BatchNorm, _InstanceNorm from .registry import NORM_LAYERS NORM_LAYERS.register_module('BN', module=nn.BatchNorm2d) NORM_LAYERS.register_module('BN1d', module=nn.BatchNorm1d) NORM_LAYERS.register_module('BN2d', module=nn.BatchNorm2d) NORM_LAYERS.register_module('BN3d', module=nn.BatchNorm3d) NORM_LAYERS.register_module('SyncBN', module=SyncBatchNorm) NORM_LAYERS.register_module('GN', module=nn.GroupNorm) NORM_LAYERS.register_module('LN', module=nn.LayerNorm) NORM_LAYERS.register_module('IN', module=nn.InstanceNorm2d) NORM_LAYERS.register_module('IN1d', module=nn.InstanceNorm1d) NORM_LAYERS.register_module('IN2d', module=nn.InstanceNorm2d) NORM_LAYERS.register_module('IN3d', module=nn.InstanceNorm3d) def infer_abbr(class_type): """Infer abbreviation from the class name. When we build a norm layer with `build_norm_layer()`, we want to preserve the norm type in variable names, e.g, self.bn1, self.gn. This method will infer the abbreviation to map class types to abbreviations. Rule 1: If the class has the property "_abbr_", return the property. Rule 2: If the parent class is _BatchNorm, GroupNorm, LayerNorm or InstanceNorm, the abbreviation of this layer will be "bn", "gn", "ln" and "in" respectively. Rule 3: If the class name contains "batch", "group", "layer" or "instance", the abbreviation of this layer will be "bn", "gn", "ln" and "in" respectively. Rule 4: Otherwise, the abbreviation falls back to "norm". Args: class_type (type): The norm layer type. Returns: str: The inferred abbreviation. """ if not inspect.isclass(class_type): raise TypeError( f'class_type must be a type, but got {type(class_type)}') if hasattr(class_type, '_abbr_'): return class_type._abbr_ if issubclass(class_type, _InstanceNorm): # IN is a subclass of BN return 'in' elif issubclass(class_type, _BatchNorm): return 'bn' elif issubclass(class_type, nn.GroupNorm): return 'gn' elif issubclass(class_type, nn.LayerNorm): return 'ln' else: class_name = class_type.__name__.lower() if 'batch' in class_name: return 'bn' elif 'group' in class_name: return 'gn' elif 'layer' in class_name: return 'ln' elif 'instance' in class_name: return 'in' else: return 'norm_layer' def build_norm_layer(cfg, num_features, postfix=''): """Build normalization layer. Args: cfg (dict): The norm layer config, which should contain: - type (str): Layer type. - layer args: Args needed to instantiate a norm layer. - requires_grad (bool, optional): Whether stop gradient updates. num_features (int): Number of input channels. postfix (int | str): The postfix to be appended into norm abbreviation to create named layer. Returns: (str, nn.Module): The first element is the layer name consisting of abbreviation and postfix, e.g., bn1, gn. The second element is the created norm layer. """ if not isinstance(cfg, dict): raise TypeError('cfg must be a dict') if 'type' not in cfg: raise KeyError('the cfg dict must contain the key "type"') cfg_ = cfg.copy() layer_type = cfg_.pop('type') if layer_type not in NORM_LAYERS: raise KeyError(f'Unrecognized norm type {layer_type}') norm_layer = NORM_LAYERS.get(layer_type) abbr = infer_abbr(norm_layer) assert isinstance(postfix, (int, str)) name = abbr + str(postfix) requires_grad = cfg_.pop('requires_grad', True) cfg_.setdefault('eps', 1e-5) if layer_type != 'GN': layer = norm_layer(num_features, **cfg_) if layer_type == 'SyncBN' and hasattr(layer, '_specify_ddp_gpu_num'): layer._specify_ddp_gpu_num(1) else: assert 'num_groups' in cfg_ layer = norm_layer(num_channels=num_features, **cfg_) for param in layer.parameters(): param.requires_grad = requires_grad return name, layer def is_norm(layer, exclude=None): """Check if a layer is a normalization layer. Args: layer (nn.Module): The layer to be checked. exclude (type | tuple[type]): Types to be excluded. Returns: bool: Whether the layer is a norm layer. """ if exclude is not None: if not isinstance(exclude, tuple): exclude = (exclude, ) if not is_tuple_of(exclude, type): raise TypeError( f'"exclude" must be either None or type or a tuple of types, ' f'but got {type(exclude)}: {exclude}') if exclude and isinstance(layer, exclude): return False all_norm_bases = (_BatchNorm, _InstanceNorm, nn.GroupNorm, nn.LayerNorm) return isinstance(layer, all_norm_bases)
trt-samples-for-hackathon-cn-master
Hackathon2023/controlnet/annotator/uniformer/mmcv/cnn/bricks/norm.py
# Copyright (c) OpenMMLab. All rights reserved. import torch.nn as nn from .registry import ACTIVATION_LAYERS @ACTIVATION_LAYERS.register_module() class HSwish(nn.Module): """Hard Swish Module. This module applies the hard swish function: .. math:: Hswish(x) = x * ReLU6(x + 3) / 6 Args: inplace (bool): can optionally do the operation in-place. Default: False. Returns: Tensor: The output tensor. """ def __init__(self, inplace=False): super(HSwish, self).__init__() self.act = nn.ReLU6(inplace) def forward(self, x): return x * self.act(x + 3) / 6
trt-samples-for-hackathon-cn-master
Hackathon2023/controlnet/annotator/uniformer/mmcv/cnn/bricks/hswish.py
# Copyright (c) OpenMMLab. All rights reserved. from .activation import build_activation_layer from .context_block import ContextBlock from .conv import build_conv_layer from .conv2d_adaptive_padding import Conv2dAdaptivePadding from .conv_module import ConvModule from .conv_ws import ConvAWS2d, ConvWS2d, conv_ws_2d from .depthwise_separable_conv_module import DepthwiseSeparableConvModule from .drop import Dropout, DropPath from .generalized_attention import GeneralizedAttention from .hsigmoid import HSigmoid from .hswish import HSwish from .non_local import NonLocal1d, NonLocal2d, NonLocal3d from .norm import build_norm_layer, is_norm from .padding import build_padding_layer from .plugin import build_plugin_layer from .registry import (ACTIVATION_LAYERS, CONV_LAYERS, NORM_LAYERS, PADDING_LAYERS, PLUGIN_LAYERS, UPSAMPLE_LAYERS) from .scale import Scale from .swish import Swish from .upsample import build_upsample_layer from .wrappers import (Conv2d, Conv3d, ConvTranspose2d, ConvTranspose3d, Linear, MaxPool2d, MaxPool3d) __all__ = [ 'ConvModule', 'build_activation_layer', 'build_conv_layer', 'build_norm_layer', 'build_padding_layer', 'build_upsample_layer', 'build_plugin_layer', 'is_norm', 'HSigmoid', 'HSwish', 'NonLocal1d', 'NonLocal2d', 'NonLocal3d', 'ContextBlock', 'GeneralizedAttention', 'ACTIVATION_LAYERS', 'CONV_LAYERS', 'NORM_LAYERS', 'PADDING_LAYERS', 'UPSAMPLE_LAYERS', 'PLUGIN_LAYERS', 'Scale', 'ConvAWS2d', 'ConvWS2d', 'conv_ws_2d', 'DepthwiseSeparableConvModule', 'Swish', 'Linear', 'Conv2dAdaptivePadding', 'Conv2d', 'ConvTranspose2d', 'MaxPool2d', 'ConvTranspose3d', 'MaxPool3d', 'Conv3d', 'Dropout', 'DropPath' ]
trt-samples-for-hackathon-cn-master
Hackathon2023/controlnet/annotator/uniformer/mmcv/cnn/bricks/__init__.py
# Copyright (c) OpenMMLab. All rights reserved. import torch.nn as nn import torch.nn.functional as F from ..utils import xavier_init from .registry import UPSAMPLE_LAYERS UPSAMPLE_LAYERS.register_module('nearest', module=nn.Upsample) UPSAMPLE_LAYERS.register_module('bilinear', module=nn.Upsample) @UPSAMPLE_LAYERS.register_module(name='pixel_shuffle') class PixelShufflePack(nn.Module): """Pixel Shuffle upsample layer. This module packs `F.pixel_shuffle()` and a nn.Conv2d module together to achieve a simple upsampling with pixel shuffle. Args: in_channels (int): Number of input channels. out_channels (int): Number of output channels. scale_factor (int): Upsample ratio. upsample_kernel (int): Kernel size of the conv layer to expand the channels. """ def __init__(self, in_channels, out_channels, scale_factor, upsample_kernel): super(PixelShufflePack, self).__init__() self.in_channels = in_channels self.out_channels = out_channels self.scale_factor = scale_factor self.upsample_kernel = upsample_kernel self.upsample_conv = nn.Conv2d( self.in_channels, self.out_channels * scale_factor * scale_factor, self.upsample_kernel, padding=(self.upsample_kernel - 1) // 2) self.init_weights() def init_weights(self): xavier_init(self.upsample_conv, distribution='uniform') def forward(self, x): x = self.upsample_conv(x) x = F.pixel_shuffle(x, self.scale_factor) return x def build_upsample_layer(cfg, *args, **kwargs): """Build upsample layer. Args: cfg (dict): The upsample layer config, which should contain: - type (str): Layer type. - scale_factor (int): Upsample ratio, which is not applicable to deconv. - layer args: Args needed to instantiate a upsample layer. args (argument list): Arguments passed to the ``__init__`` method of the corresponding conv layer. kwargs (keyword arguments): Keyword arguments passed to the ``__init__`` method of the corresponding conv layer. Returns: nn.Module: Created upsample layer. """ if not isinstance(cfg, dict): raise TypeError(f'cfg must be a dict, but got {type(cfg)}') if 'type' not in cfg: raise KeyError( f'the cfg dict must contain the key "type", but got {cfg}') cfg_ = cfg.copy() layer_type = cfg_.pop('type') if layer_type not in UPSAMPLE_LAYERS: raise KeyError(f'Unrecognized upsample type {layer_type}') else: upsample = UPSAMPLE_LAYERS.get(layer_type) if upsample is nn.Upsample: cfg_['mode'] = layer_type layer = upsample(*args, **kwargs, **cfg_) return layer
trt-samples-for-hackathon-cn-master
Hackathon2023/controlnet/annotator/uniformer/mmcv/cnn/bricks/upsample.py
# Copyright (c) OpenMMLab. All rights reserved. from abc import ABCMeta import torch import torch.nn as nn from ..utils import constant_init, normal_init from .conv_module import ConvModule from .registry import PLUGIN_LAYERS class _NonLocalNd(nn.Module, metaclass=ABCMeta): """Basic Non-local module. This module is proposed in "Non-local Neural Networks" Paper reference: https://arxiv.org/abs/1711.07971 Code reference: https://github.com/AlexHex7/Non-local_pytorch Args: in_channels (int): Channels of the input feature map. reduction (int): Channel reduction ratio. Default: 2. use_scale (bool): Whether to scale pairwise_weight by `1/sqrt(inter_channels)` when the mode is `embedded_gaussian`. Default: True. conv_cfg (None | dict): The config dict for convolution layers. If not specified, it will use `nn.Conv2d` for convolution layers. Default: None. norm_cfg (None | dict): The config dict for normalization layers. Default: None. (This parameter is only applicable to conv_out.) mode (str): Options are `gaussian`, `concatenation`, `embedded_gaussian` and `dot_product`. Default: embedded_gaussian. """ def __init__(self, in_channels, reduction=2, use_scale=True, conv_cfg=None, norm_cfg=None, mode='embedded_gaussian', **kwargs): super(_NonLocalNd, self).__init__() self.in_channels = in_channels self.reduction = reduction self.use_scale = use_scale self.inter_channels = max(in_channels // reduction, 1) self.mode = mode if mode not in [ 'gaussian', 'embedded_gaussian', 'dot_product', 'concatenation' ]: raise ValueError("Mode should be in 'gaussian', 'concatenation', " f"'embedded_gaussian' or 'dot_product', but got " f'{mode} instead.') # g, theta, phi are defaulted as `nn.ConvNd`. # Here we use ConvModule for potential usage. self.g = ConvModule( self.in_channels, self.inter_channels, kernel_size=1, conv_cfg=conv_cfg, act_cfg=None) self.conv_out = ConvModule( self.inter_channels, self.in_channels, kernel_size=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=None) if self.mode != 'gaussian': self.theta = ConvModule( self.in_channels, self.inter_channels, kernel_size=1, conv_cfg=conv_cfg, act_cfg=None) self.phi = ConvModule( self.in_channels, self.inter_channels, kernel_size=1, conv_cfg=conv_cfg, act_cfg=None) if self.mode == 'concatenation': self.concat_project = ConvModule( self.inter_channels * 2, 1, kernel_size=1, stride=1, padding=0, bias=False, act_cfg=dict(type='ReLU')) self.init_weights(**kwargs) def init_weights(self, std=0.01, zeros_init=True): if self.mode != 'gaussian': for m in [self.g, self.theta, self.phi]: normal_init(m.conv, std=std) else: normal_init(self.g.conv, std=std) if zeros_init: if self.conv_out.norm_cfg is None: constant_init(self.conv_out.conv, 0) else: constant_init(self.conv_out.norm, 0) else: if self.conv_out.norm_cfg is None: normal_init(self.conv_out.conv, std=std) else: normal_init(self.conv_out.norm, std=std) def gaussian(self, theta_x, phi_x): # NonLocal1d pairwise_weight: [N, H, H] # NonLocal2d pairwise_weight: [N, HxW, HxW] # NonLocal3d pairwise_weight: [N, TxHxW, TxHxW] pairwise_weight = torch.matmul(theta_x, phi_x) pairwise_weight = pairwise_weight.softmax(dim=-1) return pairwise_weight def embedded_gaussian(self, theta_x, phi_x): # NonLocal1d pairwise_weight: [N, H, H] # NonLocal2d pairwise_weight: [N, HxW, HxW] # NonLocal3d pairwise_weight: [N, TxHxW, TxHxW] pairwise_weight = torch.matmul(theta_x, phi_x) if self.use_scale: # theta_x.shape[-1] is `self.inter_channels` pairwise_weight /= theta_x.shape[-1]**0.5 pairwise_weight = pairwise_weight.softmax(dim=-1) return pairwise_weight def dot_product(self, theta_x, phi_x): # NonLocal1d pairwise_weight: [N, H, H] # NonLocal2d pairwise_weight: [N, HxW, HxW] # NonLocal3d pairwise_weight: [N, TxHxW, TxHxW] pairwise_weight = torch.matmul(theta_x, phi_x) pairwise_weight /= pairwise_weight.shape[-1] return pairwise_weight def concatenation(self, theta_x, phi_x): # NonLocal1d pairwise_weight: [N, H, H] # NonLocal2d pairwise_weight: [N, HxW, HxW] # NonLocal3d pairwise_weight: [N, TxHxW, TxHxW] h = theta_x.size(2) w = phi_x.size(3) theta_x = theta_x.repeat(1, 1, 1, w) phi_x = phi_x.repeat(1, 1, h, 1) concat_feature = torch.cat([theta_x, phi_x], dim=1) pairwise_weight = self.concat_project(concat_feature) n, _, h, w = pairwise_weight.size() pairwise_weight = pairwise_weight.view(n, h, w) pairwise_weight /= pairwise_weight.shape[-1] return pairwise_weight def forward(self, x): # Assume `reduction = 1`, then `inter_channels = C` # or `inter_channels = C` when `mode="gaussian"` # NonLocal1d x: [N, C, H] # NonLocal2d x: [N, C, H, W] # NonLocal3d x: [N, C, T, H, W] n = x.size(0) # NonLocal1d g_x: [N, H, C] # NonLocal2d g_x: [N, HxW, C] # NonLocal3d g_x: [N, TxHxW, C] g_x = self.g(x).view(n, self.inter_channels, -1) g_x = g_x.permute(0, 2, 1) # NonLocal1d theta_x: [N, H, C], phi_x: [N, C, H] # NonLocal2d theta_x: [N, HxW, C], phi_x: [N, C, HxW] # NonLocal3d theta_x: [N, TxHxW, C], phi_x: [N, C, TxHxW] if self.mode == 'gaussian': theta_x = x.view(n, self.in_channels, -1) theta_x = theta_x.permute(0, 2, 1) if self.sub_sample: phi_x = self.phi(x).view(n, self.in_channels, -1) else: phi_x = x.view(n, self.in_channels, -1) elif self.mode == 'concatenation': theta_x = self.theta(x).view(n, self.inter_channels, -1, 1) phi_x = self.phi(x).view(n, self.inter_channels, 1, -1) else: theta_x = self.theta(x).view(n, self.inter_channels, -1) theta_x = theta_x.permute(0, 2, 1) phi_x = self.phi(x).view(n, self.inter_channels, -1) pairwise_func = getattr(self, self.mode) # NonLocal1d pairwise_weight: [N, H, H] # NonLocal2d pairwise_weight: [N, HxW, HxW] # NonLocal3d pairwise_weight: [N, TxHxW, TxHxW] pairwise_weight = pairwise_func(theta_x, phi_x) # NonLocal1d y: [N, H, C] # NonLocal2d y: [N, HxW, C] # NonLocal3d y: [N, TxHxW, C] y = torch.matmul(pairwise_weight, g_x) # NonLocal1d y: [N, C, H] # NonLocal2d y: [N, C, H, W] # NonLocal3d y: [N, C, T, H, W] y = y.permute(0, 2, 1).contiguous().reshape(n, self.inter_channels, *x.size()[2:]) output = x + self.conv_out(y) return output class NonLocal1d(_NonLocalNd): """1D Non-local module. Args: in_channels (int): Same as `NonLocalND`. sub_sample (bool): Whether to apply max pooling after pairwise function (Note that the `sub_sample` is applied on spatial only). Default: False. conv_cfg (None | dict): Same as `NonLocalND`. Default: dict(type='Conv1d'). """ def __init__(self, in_channels, sub_sample=False, conv_cfg=dict(type='Conv1d'), **kwargs): super(NonLocal1d, self).__init__( in_channels, conv_cfg=conv_cfg, **kwargs) self.sub_sample = sub_sample if sub_sample: max_pool_layer = nn.MaxPool1d(kernel_size=2) self.g = nn.Sequential(self.g, max_pool_layer) if self.mode != 'gaussian': self.phi = nn.Sequential(self.phi, max_pool_layer) else: self.phi = max_pool_layer @PLUGIN_LAYERS.register_module() class NonLocal2d(_NonLocalNd): """2D Non-local module. Args: in_channels (int): Same as `NonLocalND`. sub_sample (bool): Whether to apply max pooling after pairwise function (Note that the `sub_sample` is applied on spatial only). Default: False. conv_cfg (None | dict): Same as `NonLocalND`. Default: dict(type='Conv2d'). """ _abbr_ = 'nonlocal_block' def __init__(self, in_channels, sub_sample=False, conv_cfg=dict(type='Conv2d'), **kwargs): super(NonLocal2d, self).__init__( in_channels, conv_cfg=conv_cfg, **kwargs) self.sub_sample = sub_sample if sub_sample: max_pool_layer = nn.MaxPool2d(kernel_size=(2, 2)) self.g = nn.Sequential(self.g, max_pool_layer) if self.mode != 'gaussian': self.phi = nn.Sequential(self.phi, max_pool_layer) else: self.phi = max_pool_layer class NonLocal3d(_NonLocalNd): """3D Non-local module. Args: in_channels (int): Same as `NonLocalND`. sub_sample (bool): Whether to apply max pooling after pairwise function (Note that the `sub_sample` is applied on spatial only). Default: False. conv_cfg (None | dict): Same as `NonLocalND`. Default: dict(type='Conv3d'). """ def __init__(self, in_channels, sub_sample=False, conv_cfg=dict(type='Conv3d'), **kwargs): super(NonLocal3d, self).__init__( in_channels, conv_cfg=conv_cfg, **kwargs) self.sub_sample = sub_sample if sub_sample: max_pool_layer = nn.MaxPool3d(kernel_size=(1, 2, 2)) self.g = nn.Sequential(self.g, max_pool_layer) if self.mode != 'gaussian': self.phi = nn.Sequential(self.phi, max_pool_layer) else: self.phi = max_pool_layer
trt-samples-for-hackathon-cn-master
Hackathon2023/controlnet/annotator/uniformer/mmcv/cnn/bricks/non_local.py
# Copyright (c) OpenMMLab. All rights reserved. import torch import torch.nn as nn import torch.nn.functional as F from .registry import CONV_LAYERS def conv_ws_2d(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1, eps=1e-5): c_in = weight.size(0) weight_flat = weight.view(c_in, -1) mean = weight_flat.mean(dim=1, keepdim=True).view(c_in, 1, 1, 1) std = weight_flat.std(dim=1, keepdim=True).view(c_in, 1, 1, 1) weight = (weight - mean) / (std + eps) return F.conv2d(input, weight, bias, stride, padding, dilation, groups) @CONV_LAYERS.register_module('ConvWS') class ConvWS2d(nn.Conv2d): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, eps=1e-5): super(ConvWS2d, self).__init__( in_channels, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias) self.eps = eps def forward(self, x): return conv_ws_2d(x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups, self.eps) @CONV_LAYERS.register_module(name='ConvAWS') class ConvAWS2d(nn.Conv2d): """AWS (Adaptive Weight Standardization) This is a variant of Weight Standardization (https://arxiv.org/pdf/1903.10520.pdf) It is used in DetectoRS to avoid NaN (https://arxiv.org/pdf/2006.02334.pdf) Args: in_channels (int): Number of channels in the input image out_channels (int): Number of channels produced by the convolution kernel_size (int or tuple): Size of the conv kernel stride (int or tuple, optional): Stride of the convolution. Default: 1 padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0 dilation (int or tuple, optional): Spacing between kernel elements. Default: 1 groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1 bias (bool, optional): If set True, adds a learnable bias to the output. Default: True """ def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True): super().__init__( in_channels, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias) self.register_buffer('weight_gamma', torch.ones(self.out_channels, 1, 1, 1)) self.register_buffer('weight_beta', torch.zeros(self.out_channels, 1, 1, 1)) def _get_weight(self, weight): weight_flat = weight.view(weight.size(0), -1) mean = weight_flat.mean(dim=1).view(-1, 1, 1, 1) std = torch.sqrt(weight_flat.var(dim=1) + 1e-5).view(-1, 1, 1, 1) weight = (weight - mean) / std weight = self.weight_gamma * weight + self.weight_beta return weight def forward(self, x): weight = self._get_weight(self.weight) return F.conv2d(x, weight, self.bias, self.stride, self.padding, self.dilation, self.groups) def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs): """Override default load function. AWS overrides the function _load_from_state_dict to recover weight_gamma and weight_beta if they are missing. If weight_gamma and weight_beta are found in the checkpoint, this function will return after super()._load_from_state_dict. Otherwise, it will compute the mean and std of the pretrained weights and store them in weight_beta and weight_gamma. """ self.weight_gamma.data.fill_(-1) local_missing_keys = [] super()._load_from_state_dict(state_dict, prefix, local_metadata, strict, local_missing_keys, unexpected_keys, error_msgs) if self.weight_gamma.data.mean() > 0: for k in local_missing_keys: missing_keys.append(k) return weight = self.weight.data weight_flat = weight.view(weight.size(0), -1) mean = weight_flat.mean(dim=1).view(-1, 1, 1, 1) std = torch.sqrt(weight_flat.var(dim=1) + 1e-5).view(-1, 1, 1, 1) self.weight_beta.data.copy_(mean) self.weight_gamma.data.copy_(std) missing_gamma_beta = [ k for k in local_missing_keys if k.endswith('weight_gamma') or k.endswith('weight_beta') ] for k in missing_gamma_beta: local_missing_keys.remove(k) for k in local_missing_keys: missing_keys.append(k)
trt-samples-for-hackathon-cn-master
Hackathon2023/controlnet/annotator/uniformer/mmcv/cnn/bricks/conv_ws.py
# Copyright (c) OpenMMLab. All rights reserved. import torch.nn as nn from .registry import ACTIVATION_LAYERS @ACTIVATION_LAYERS.register_module() class HSigmoid(nn.Module): """Hard Sigmoid Module. Apply the hard sigmoid function: Hsigmoid(x) = min(max((x + bias) / divisor, min_value), max_value) Default: Hsigmoid(x) = min(max((x + 1) / 2, 0), 1) Args: bias (float): Bias of the input feature map. Default: 1.0. divisor (float): Divisor of the input feature map. Default: 2.0. min_value (float): Lower bound value. Default: 0.0. max_value (float): Upper bound value. Default: 1.0. Returns: Tensor: The output tensor. """ def __init__(self, bias=1.0, divisor=2.0, min_value=0.0, max_value=1.0): super(HSigmoid, self).__init__() self.bias = bias self.divisor = divisor assert self.divisor != 0 self.min_value = min_value self.max_value = max_value def forward(self, x): x = (x + self.bias) / self.divisor return x.clamp_(self.min_value, self.max_value)
trt-samples-for-hackathon-cn-master
Hackathon2023/controlnet/annotator/uniformer/mmcv/cnn/bricks/hsigmoid.py
# Copyright (c) OpenMMLab. All rights reserved. import torch import torch.nn as nn import torch.nn.functional as F from annotator.uniformer.mmcv.utils import TORCH_VERSION, build_from_cfg, digit_version from .registry import ACTIVATION_LAYERS for module in [ nn.ReLU, nn.LeakyReLU, nn.PReLU, nn.RReLU, nn.ReLU6, nn.ELU, nn.Sigmoid, nn.Tanh ]: ACTIVATION_LAYERS.register_module(module=module) @ACTIVATION_LAYERS.register_module(name='Clip') @ACTIVATION_LAYERS.register_module() class Clamp(nn.Module): """Clamp activation layer. This activation function is to clamp the feature map value within :math:`[min, max]`. More details can be found in ``torch.clamp()``. Args: min (Number | optional): Lower-bound of the range to be clamped to. Default to -1. max (Number | optional): Upper-bound of the range to be clamped to. Default to 1. """ def __init__(self, min=-1., max=1.): super(Clamp, self).__init__() self.min = min self.max = max def forward(self, x): """Forward function. Args: x (torch.Tensor): The input tensor. Returns: torch.Tensor: Clamped tensor. """ return torch.clamp(x, min=self.min, max=self.max) class GELU(nn.Module): r"""Applies the Gaussian Error Linear Units function: .. math:: \text{GELU}(x) = x * \Phi(x) where :math:`\Phi(x)` is the Cumulative Distribution Function for Gaussian Distribution. Shape: - Input: :math:`(N, *)` where `*` means, any number of additional dimensions - Output: :math:`(N, *)`, same shape as the input .. image:: scripts/activation_images/GELU.png Examples:: >>> m = nn.GELU() >>> input = torch.randn(2) >>> output = m(input) """ def forward(self, input): return F.gelu(input) if (TORCH_VERSION == 'parrots' or digit_version(TORCH_VERSION) < digit_version('1.4')): ACTIVATION_LAYERS.register_module(module=GELU) else: ACTIVATION_LAYERS.register_module(module=nn.GELU) def build_activation_layer(cfg): """Build activation layer. Args: cfg (dict): The activation layer config, which should contain: - type (str): Layer type. - layer args: Args needed to instantiate an activation layer. Returns: nn.Module: Created activation layer. """ return build_from_cfg(cfg, ACTIVATION_LAYERS)
trt-samples-for-hackathon-cn-master
Hackathon2023/controlnet/annotator/uniformer/mmcv/cnn/bricks/activation.py
# Copyright (c) OpenMMLab. All rights reserved. r"""Modified from https://github.com/facebookresearch/detectron2/blob/master/detectron2/layers/wrappers.py # noqa: E501 Wrap some nn modules to support empty tensor input. Currently, these wrappers are mainly used in mask heads like fcn_mask_head and maskiou_heads since mask heads are trained on only positive RoIs. """ import math import torch import torch.nn as nn from torch.nn.modules.utils import _pair, _triple from .registry import CONV_LAYERS, UPSAMPLE_LAYERS if torch.__version__ == 'parrots': TORCH_VERSION = torch.__version__ else: # torch.__version__ could be 1.3.1+cu92, we only need the first two # for comparison TORCH_VERSION = tuple(int(x) for x in torch.__version__.split('.')[:2]) def obsolete_torch_version(torch_version, version_threshold): return torch_version == 'parrots' or torch_version <= version_threshold class NewEmptyTensorOp(torch.autograd.Function): @staticmethod def forward(ctx, x, new_shape): ctx.shape = x.shape return x.new_empty(new_shape) @staticmethod def backward(ctx, grad): shape = ctx.shape return NewEmptyTensorOp.apply(grad, shape), None @CONV_LAYERS.register_module('Conv', force=True) class Conv2d(nn.Conv2d): def forward(self, x): if x.numel() == 0 and obsolete_torch_version(TORCH_VERSION, (1, 4)): out_shape = [x.shape[0], self.out_channels] for i, k, p, s, d in zip(x.shape[-2:], self.kernel_size, self.padding, self.stride, self.dilation): o = (i + 2 * p - (d * (k - 1) + 1)) // s + 1 out_shape.append(o) empty = NewEmptyTensorOp.apply(x, out_shape) if self.training: # produce dummy gradient to avoid DDP warning. dummy = sum(x.view(-1)[0] for x in self.parameters()) * 0.0 return empty + dummy else: return empty return super().forward(x) @CONV_LAYERS.register_module('Conv3d', force=True) class Conv3d(nn.Conv3d): def forward(self, x): if x.numel() == 0 and obsolete_torch_version(TORCH_VERSION, (1, 4)): out_shape = [x.shape[0], self.out_channels] for i, k, p, s, d in zip(x.shape[-3:], self.kernel_size, self.padding, self.stride, self.dilation): o = (i + 2 * p - (d * (k - 1) + 1)) // s + 1 out_shape.append(o) empty = NewEmptyTensorOp.apply(x, out_shape) if self.training: # produce dummy gradient to avoid DDP warning. dummy = sum(x.view(-1)[0] for x in self.parameters()) * 0.0 return empty + dummy else: return empty return super().forward(x) @CONV_LAYERS.register_module() @CONV_LAYERS.register_module('deconv') @UPSAMPLE_LAYERS.register_module('deconv', force=True) class ConvTranspose2d(nn.ConvTranspose2d): def forward(self, x): if x.numel() == 0 and obsolete_torch_version(TORCH_VERSION, (1, 4)): out_shape = [x.shape[0], self.out_channels] for i, k, p, s, d, op in zip(x.shape[-2:], self.kernel_size, self.padding, self.stride, self.dilation, self.output_padding): out_shape.append((i - 1) * s - 2 * p + (d * (k - 1) + 1) + op) empty = NewEmptyTensorOp.apply(x, out_shape) if self.training: # produce dummy gradient to avoid DDP warning. dummy = sum(x.view(-1)[0] for x in self.parameters()) * 0.0 return empty + dummy else: return empty return super().forward(x) @CONV_LAYERS.register_module() @CONV_LAYERS.register_module('deconv3d') @UPSAMPLE_LAYERS.register_module('deconv3d', force=True) class ConvTranspose3d(nn.ConvTranspose3d): def forward(self, x): if x.numel() == 0 and obsolete_torch_version(TORCH_VERSION, (1, 4)): out_shape = [x.shape[0], self.out_channels] for i, k, p, s, d, op in zip(x.shape[-3:], self.kernel_size, self.padding, self.stride, self.dilation, self.output_padding): out_shape.append((i - 1) * s - 2 * p + (d * (k - 1) + 1) + op) empty = NewEmptyTensorOp.apply(x, out_shape) if self.training: # produce dummy gradient to avoid DDP warning. dummy = sum(x.view(-1)[0] for x in self.parameters()) * 0.0 return empty + dummy else: return empty return super().forward(x) class MaxPool2d(nn.MaxPool2d): def forward(self, x): # PyTorch 1.9 does not support empty tensor inference yet if x.numel() == 0 and obsolete_torch_version(TORCH_VERSION, (1, 9)): out_shape = list(x.shape[:2]) for i, k, p, s, d in zip(x.shape[-2:], _pair(self.kernel_size), _pair(self.padding), _pair(self.stride), _pair(self.dilation)): o = (i + 2 * p - (d * (k - 1) + 1)) / s + 1 o = math.ceil(o) if self.ceil_mode else math.floor(o) out_shape.append(o) empty = NewEmptyTensorOp.apply(x, out_shape) return empty return super().forward(x) class MaxPool3d(nn.MaxPool3d): def forward(self, x): # PyTorch 1.9 does not support empty tensor inference yet if x.numel() == 0 and obsolete_torch_version(TORCH_VERSION, (1, 9)): out_shape = list(x.shape[:2]) for i, k, p, s, d in zip(x.shape[-3:], _triple(self.kernel_size), _triple(self.padding), _triple(self.stride), _triple(self.dilation)): o = (i + 2 * p - (d * (k - 1) + 1)) / s + 1 o = math.ceil(o) if self.ceil_mode else math.floor(o) out_shape.append(o) empty = NewEmptyTensorOp.apply(x, out_shape) return empty return super().forward(x) class Linear(torch.nn.Linear): def forward(self, x): # empty tensor forward of Linear layer is supported in Pytorch 1.6 if x.numel() == 0 and obsolete_torch_version(TORCH_VERSION, (1, 5)): out_shape = [x.shape[0], self.out_features] empty = NewEmptyTensorOp.apply(x, out_shape) if self.training: # produce dummy gradient to avoid DDP warning. dummy = sum(x.view(-1)[0] for x in self.parameters()) * 0.0 return empty + dummy else: return empty return super().forward(x)
trt-samples-for-hackathon-cn-master
Hackathon2023/controlnet/annotator/uniformer/mmcv/cnn/bricks/wrappers.py
# Copyright (c) OpenMMLab. All rights reserved. import copy import warnings import torch import torch.nn as nn from annotator.uniformer.mmcv import ConfigDict, deprecated_api_warning from annotator.uniformer.mmcv.cnn import Linear, build_activation_layer, build_norm_layer from annotator.uniformer.mmcv.runner.base_module import BaseModule, ModuleList, Sequential from annotator.uniformer.mmcv.utils import build_from_cfg from .drop import build_dropout from .registry import (ATTENTION, FEEDFORWARD_NETWORK, POSITIONAL_ENCODING, TRANSFORMER_LAYER, TRANSFORMER_LAYER_SEQUENCE) # Avoid BC-breaking of importing MultiScaleDeformableAttention from this file try: from annotator.uniformer.mmcv.ops.multi_scale_deform_attn import MultiScaleDeformableAttention # noqa F401 warnings.warn( ImportWarning( '``MultiScaleDeformableAttention`` has been moved to ' '``mmcv.ops.multi_scale_deform_attn``, please change original path ' # noqa E501 '``from annotator.uniformer.mmcv.cnn.bricks.transformer import MultiScaleDeformableAttention`` ' # noqa E501 'to ``from annotator.uniformer.mmcv.ops.multi_scale_deform_attn import MultiScaleDeformableAttention`` ' # noqa E501 )) except ImportError: warnings.warn('Fail to import ``MultiScaleDeformableAttention`` from ' '``mmcv.ops.multi_scale_deform_attn``, ' 'You should install ``mmcv-full`` if you need this module. ') def build_positional_encoding(cfg, default_args=None): """Builder for Position Encoding.""" return build_from_cfg(cfg, POSITIONAL_ENCODING, default_args) def build_attention(cfg, default_args=None): """Builder for attention.""" return build_from_cfg(cfg, ATTENTION, default_args) def build_feedforward_network(cfg, default_args=None): """Builder for feed-forward network (FFN).""" return build_from_cfg(cfg, FEEDFORWARD_NETWORK, default_args) def build_transformer_layer(cfg, default_args=None): """Builder for transformer layer.""" return build_from_cfg(cfg, TRANSFORMER_LAYER, default_args) def build_transformer_layer_sequence(cfg, default_args=None): """Builder for transformer encoder and transformer decoder.""" return build_from_cfg(cfg, TRANSFORMER_LAYER_SEQUENCE, default_args) @ATTENTION.register_module() class MultiheadAttention(BaseModule): """A wrapper for ``torch.nn.MultiheadAttention``. This module implements MultiheadAttention with identity connection, and positional encoding is also passed as input. Args: embed_dims (int): The embedding dimension. num_heads (int): Parallel attention heads. attn_drop (float): A Dropout layer on attn_output_weights. Default: 0.0. proj_drop (float): A Dropout layer after `nn.MultiheadAttention`. Default: 0.0. dropout_layer (obj:`ConfigDict`): The dropout_layer used when adding the shortcut. init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization. Default: None. batch_first (bool): When it is True, Key, Query and Value are shape of (batch, n, embed_dim), otherwise (n, batch, embed_dim). Default to False. """ def __init__(self, embed_dims, num_heads, attn_drop=0., proj_drop=0., dropout_layer=dict(type='Dropout', drop_prob=0.), init_cfg=None, batch_first=False, **kwargs): super(MultiheadAttention, self).__init__(init_cfg) if 'dropout' in kwargs: warnings.warn('The arguments `dropout` in MultiheadAttention ' 'has been deprecated, now you can separately ' 'set `attn_drop`(float), proj_drop(float), ' 'and `dropout_layer`(dict) ') attn_drop = kwargs['dropout'] dropout_layer['drop_prob'] = kwargs.pop('dropout') self.embed_dims = embed_dims self.num_heads = num_heads self.batch_first = batch_first self.attn = nn.MultiheadAttention(embed_dims, num_heads, attn_drop, **kwargs) self.proj_drop = nn.Dropout(proj_drop) self.dropout_layer = build_dropout( dropout_layer) if dropout_layer else nn.Identity() @deprecated_api_warning({'residual': 'identity'}, cls_name='MultiheadAttention') def forward(self, query, key=None, value=None, identity=None, query_pos=None, key_pos=None, attn_mask=None, key_padding_mask=None, **kwargs): """Forward function for `MultiheadAttention`. **kwargs allow passing a more general data flow when combining with other operations in `transformerlayer`. Args: query (Tensor): The input query with shape [num_queries, bs, embed_dims] if self.batch_first is False, else [bs, num_queries embed_dims]. key (Tensor): The key tensor with shape [num_keys, bs, embed_dims] if self.batch_first is False, else [bs, num_keys, embed_dims] . If None, the ``query`` will be used. Defaults to None. value (Tensor): The value tensor with same shape as `key`. Same in `nn.MultiheadAttention.forward`. Defaults to None. If None, the `key` will be used. identity (Tensor): This tensor, with the same shape as x, will be used for the identity link. If None, `x` will be used. Defaults to None. query_pos (Tensor): The positional encoding for query, with the same shape as `x`. If not None, it will be added to `x` before forward function. Defaults to None. key_pos (Tensor): The positional encoding for `key`, with the same shape as `key`. Defaults to None. If not None, it will be added to `key` before forward function. If None, and `query_pos` has the same shape as `key`, then `query_pos` will be used for `key_pos`. Defaults to None. attn_mask (Tensor): ByteTensor mask with shape [num_queries, num_keys]. Same in `nn.MultiheadAttention.forward`. Defaults to None. key_padding_mask (Tensor): ByteTensor with shape [bs, num_keys]. Defaults to None. Returns: Tensor: forwarded results with shape [num_queries, bs, embed_dims] if self.batch_first is False, else [bs, num_queries embed_dims]. """ if key is None: key = query if value is None: value = key if identity is None: identity = query if key_pos is None: if query_pos is not None: # use query_pos if key_pos is not available if query_pos.shape == key.shape: key_pos = query_pos else: warnings.warn(f'position encoding of key is' f'missing in {self.__class__.__name__}.') if query_pos is not None: query = query + query_pos if key_pos is not None: key = key + key_pos # Because the dataflow('key', 'query', 'value') of # ``torch.nn.MultiheadAttention`` is (num_query, batch, # embed_dims), We should adjust the shape of dataflow from # batch_first (batch, num_query, embed_dims) to num_query_first # (num_query ,batch, embed_dims), and recover ``attn_output`` # from num_query_first to batch_first. if self.batch_first: query = query.transpose(0, 1) key = key.transpose(0, 1) value = value.transpose(0, 1) out = self.attn( query=query, key=key, value=value, attn_mask=attn_mask, key_padding_mask=key_padding_mask)[0] if self.batch_first: out = out.transpose(0, 1) return identity + self.dropout_layer(self.proj_drop(out)) @FEEDFORWARD_NETWORK.register_module() class FFN(BaseModule): """Implements feed-forward networks (FFNs) with identity connection. Args: embed_dims (int): The feature dimension. Same as `MultiheadAttention`. Defaults: 256. feedforward_channels (int): The hidden dimension of FFNs. Defaults: 1024. num_fcs (int, optional): The number of fully-connected layers in FFNs. Default: 2. act_cfg (dict, optional): The activation config for FFNs. Default: dict(type='ReLU') ffn_drop (float, optional): Probability of an element to be zeroed in FFN. Default 0.0. add_identity (bool, optional): Whether to add the identity connection. Default: `True`. dropout_layer (obj:`ConfigDict`): The dropout_layer used when adding the shortcut. init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization. Default: None. """ @deprecated_api_warning( { 'dropout': 'ffn_drop', 'add_residual': 'add_identity' }, cls_name='FFN') def __init__(self, embed_dims=256, feedforward_channels=1024, num_fcs=2, act_cfg=dict(type='ReLU', inplace=True), ffn_drop=0., dropout_layer=None, add_identity=True, init_cfg=None, **kwargs): super(FFN, self).__init__(init_cfg) assert num_fcs >= 2, 'num_fcs should be no less ' \ f'than 2. got {num_fcs}.' self.embed_dims = embed_dims self.feedforward_channels = feedforward_channels self.num_fcs = num_fcs self.act_cfg = act_cfg self.activate = build_activation_layer(act_cfg) layers = [] in_channels = embed_dims for _ in range(num_fcs - 1): layers.append( Sequential( Linear(in_channels, feedforward_channels), self.activate, nn.Dropout(ffn_drop))) in_channels = feedforward_channels layers.append(Linear(feedforward_channels, embed_dims)) layers.append(nn.Dropout(ffn_drop)) self.layers = Sequential(*layers) self.dropout_layer = build_dropout( dropout_layer) if dropout_layer else torch.nn.Identity() self.add_identity = add_identity @deprecated_api_warning({'residual': 'identity'}, cls_name='FFN') def forward(self, x, identity=None): """Forward function for `FFN`. The function would add x to the output tensor if residue is None. """ out = self.layers(x) if not self.add_identity: return self.dropout_layer(out) if identity is None: identity = x return identity + self.dropout_layer(out) @TRANSFORMER_LAYER.register_module() class BaseTransformerLayer(BaseModule): """Base `TransformerLayer` for vision transformer. It can be built from `mmcv.ConfigDict` and support more flexible customization, for example, using any number of `FFN or LN ` and use different kinds of `attention` by specifying a list of `ConfigDict` named `attn_cfgs`. It is worth mentioning that it supports `prenorm` when you specifying `norm` as the first element of `operation_order`. More details about the `prenorm`: `On Layer Normalization in the Transformer Architecture <https://arxiv.org/abs/2002.04745>`_ . Args: attn_cfgs (list[`mmcv.ConfigDict`] | obj:`mmcv.ConfigDict` | None )): Configs for `self_attention` or `cross_attention` modules, The order of the configs in the list should be consistent with corresponding attentions in operation_order. If it is a dict, all of the attention modules in operation_order will be built with this config. Default: None. ffn_cfgs (list[`mmcv.ConfigDict`] | obj:`mmcv.ConfigDict` | None )): Configs for FFN, The order of the configs in the list should be consistent with corresponding ffn in operation_order. If it is a dict, all of the attention modules in operation_order will be built with this config. operation_order (tuple[str]): The execution order of operation in transformer. Such as ('self_attn', 'norm', 'ffn', 'norm'). Support `prenorm` when you specifying first element as `norm`. Default:None. norm_cfg (dict): Config dict for normalization layer. Default: dict(type='LN'). init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization. Default: None. batch_first (bool): Key, Query and Value are shape of (batch, n, embed_dim) or (n, batch, embed_dim). Default to False. """ def __init__(self, attn_cfgs=None, ffn_cfgs=dict( type='FFN', embed_dims=256, feedforward_channels=1024, num_fcs=2, ffn_drop=0., act_cfg=dict(type='ReLU', inplace=True), ), operation_order=None, norm_cfg=dict(type='LN'), init_cfg=None, batch_first=False, **kwargs): deprecated_args = dict( feedforward_channels='feedforward_channels', ffn_dropout='ffn_drop', ffn_num_fcs='num_fcs') for ori_name, new_name in deprecated_args.items(): if ori_name in kwargs: warnings.warn( f'The arguments `{ori_name}` in BaseTransformerLayer ' f'has been deprecated, now you should set `{new_name}` ' f'and other FFN related arguments ' f'to a dict named `ffn_cfgs`. ') ffn_cfgs[new_name] = kwargs[ori_name] super(BaseTransformerLayer, self).__init__(init_cfg) self.batch_first = batch_first assert set(operation_order) & set( ['self_attn', 'norm', 'ffn', 'cross_attn']) == \ set(operation_order), f'The operation_order of' \ f' {self.__class__.__name__} should ' \ f'contains all four operation type ' \ f"{['self_attn', 'norm', 'ffn', 'cross_attn']}" num_attn = operation_order.count('self_attn') + operation_order.count( 'cross_attn') if isinstance(attn_cfgs, dict): attn_cfgs = [copy.deepcopy(attn_cfgs) for _ in range(num_attn)] else: assert num_attn == len(attn_cfgs), f'The length ' \ f'of attn_cfg {num_attn} is ' \ f'not consistent with the number of attention' \ f'in operation_order {operation_order}.' self.num_attn = num_attn self.operation_order = operation_order self.norm_cfg = norm_cfg self.pre_norm = operation_order[0] == 'norm' self.attentions = ModuleList() index = 0 for operation_name in operation_order: if operation_name in ['self_attn', 'cross_attn']: if 'batch_first' in attn_cfgs[index]: assert self.batch_first == attn_cfgs[index]['batch_first'] else: attn_cfgs[index]['batch_first'] = self.batch_first attention = build_attention(attn_cfgs[index]) # Some custom attentions used as `self_attn` # or `cross_attn` can have different behavior. attention.operation_name = operation_name self.attentions.append(attention) index += 1 self.embed_dims = self.attentions[0].embed_dims self.ffns = ModuleList() num_ffns = operation_order.count('ffn') if isinstance(ffn_cfgs, dict): ffn_cfgs = ConfigDict(ffn_cfgs) if isinstance(ffn_cfgs, dict): ffn_cfgs = [copy.deepcopy(ffn_cfgs) for _ in range(num_ffns)] assert len(ffn_cfgs) == num_ffns for ffn_index in range(num_ffns): if 'embed_dims' not in ffn_cfgs[ffn_index]: ffn_cfgs['embed_dims'] = self.embed_dims else: assert ffn_cfgs[ffn_index]['embed_dims'] == self.embed_dims self.ffns.append( build_feedforward_network(ffn_cfgs[ffn_index], dict(type='FFN'))) self.norms = ModuleList() num_norms = operation_order.count('norm') for _ in range(num_norms): self.norms.append(build_norm_layer(norm_cfg, self.embed_dims)[1]) def forward(self, query, key=None, value=None, query_pos=None, key_pos=None, attn_masks=None, query_key_padding_mask=None, key_padding_mask=None, **kwargs): """Forward function for `TransformerDecoderLayer`. **kwargs contains some specific arguments of attentions. Args: query (Tensor): The input query with shape [num_queries, bs, embed_dims] if self.batch_first is False, else [bs, num_queries embed_dims]. key (Tensor): The key tensor with shape [num_keys, bs, embed_dims] if self.batch_first is False, else [bs, num_keys, embed_dims] . value (Tensor): The value tensor with same shape as `key`. query_pos (Tensor): The positional encoding for `query`. Default: None. key_pos (Tensor): The positional encoding for `key`. Default: None. attn_masks (List[Tensor] | None): 2D Tensor used in calculation of corresponding attention. The length of it should equal to the number of `attention` in `operation_order`. Default: None. query_key_padding_mask (Tensor): ByteTensor for `query`, with shape [bs, num_queries]. Only used in `self_attn` layer. Defaults to None. key_padding_mask (Tensor): ByteTensor for `query`, with shape [bs, num_keys]. Default: None. Returns: Tensor: forwarded results with shape [num_queries, bs, embed_dims]. """ norm_index = 0 attn_index = 0 ffn_index = 0 identity = query if attn_masks is None: attn_masks = [None for _ in range(self.num_attn)] elif isinstance(attn_masks, torch.Tensor): attn_masks = [ copy.deepcopy(attn_masks) for _ in range(self.num_attn) ] warnings.warn(f'Use same attn_mask in all attentions in ' f'{self.__class__.__name__} ') else: assert len(attn_masks) == self.num_attn, f'The length of ' \ f'attn_masks {len(attn_masks)} must be equal ' \ f'to the number of attention in ' \ f'operation_order {self.num_attn}' for layer in self.operation_order: if layer == 'self_attn': temp_key = temp_value = query query = self.attentions[attn_index]( query, temp_key, temp_value, identity if self.pre_norm else None, query_pos=query_pos, key_pos=query_pos, attn_mask=attn_masks[attn_index], key_padding_mask=query_key_padding_mask, **kwargs) attn_index += 1 identity = query elif layer == 'norm': query = self.norms[norm_index](query) norm_index += 1 elif layer == 'cross_attn': query = self.attentions[attn_index]( query, key, value, identity if self.pre_norm else None, query_pos=query_pos, key_pos=key_pos, attn_mask=attn_masks[attn_index], key_padding_mask=key_padding_mask, **kwargs) attn_index += 1 identity = query elif layer == 'ffn': query = self.ffns[ffn_index]( query, identity if self.pre_norm else None) ffn_index += 1 return query @TRANSFORMER_LAYER_SEQUENCE.register_module() class TransformerLayerSequence(BaseModule): """Base class for TransformerEncoder and TransformerDecoder in vision transformer. As base-class of Encoder and Decoder in vision transformer. Support customization such as specifying different kind of `transformer_layer` in `transformer_coder`. Args: transformerlayer (list[obj:`mmcv.ConfigDict`] | obj:`mmcv.ConfigDict`): Config of transformerlayer in TransformerCoder. If it is obj:`mmcv.ConfigDict`, it would be repeated `num_layer` times to a list[`mmcv.ConfigDict`]. Default: None. num_layers (int): The number of `TransformerLayer`. Default: None. init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization. Default: None. """ def __init__(self, transformerlayers=None, num_layers=None, init_cfg=None): super(TransformerLayerSequence, self).__init__(init_cfg) if isinstance(transformerlayers, dict): transformerlayers = [ copy.deepcopy(transformerlayers) for _ in range(num_layers) ] else: assert isinstance(transformerlayers, list) and \ len(transformerlayers) == num_layers self.num_layers = num_layers self.layers = ModuleList() for i in range(num_layers): self.layers.append(build_transformer_layer(transformerlayers[i])) self.embed_dims = self.layers[0].embed_dims self.pre_norm = self.layers[0].pre_norm def forward(self, query, key, value, query_pos=None, key_pos=None, attn_masks=None, query_key_padding_mask=None, key_padding_mask=None, **kwargs): """Forward function for `TransformerCoder`. Args: query (Tensor): Input query with shape `(num_queries, bs, embed_dims)`. key (Tensor): The key tensor with shape `(num_keys, bs, embed_dims)`. value (Tensor): The value tensor with shape `(num_keys, bs, embed_dims)`. query_pos (Tensor): The positional encoding for `query`. Default: None. key_pos (Tensor): The positional encoding for `key`. Default: None. attn_masks (List[Tensor], optional): Each element is 2D Tensor which is used in calculation of corresponding attention in operation_order. Default: None. query_key_padding_mask (Tensor): ByteTensor for `query`, with shape [bs, num_queries]. Only used in self-attention Default: None. key_padding_mask (Tensor): ByteTensor for `query`, with shape [bs, num_keys]. Default: None. Returns: Tensor: results with shape [num_queries, bs, embed_dims]. """ for layer in self.layers: query = layer( query, key, value, query_pos=query_pos, key_pos=key_pos, attn_masks=attn_masks, query_key_padding_mask=query_key_padding_mask, key_padding_mask=key_padding_mask, **kwargs) return query
trt-samples-for-hackathon-cn-master
Hackathon2023/controlnet/annotator/uniformer/mmcv/cnn/bricks/transformer.py
# Copyright (c) OpenMMLab. All rights reserved. import torch import torch.nn as nn from .registry import ACTIVATION_LAYERS @ACTIVATION_LAYERS.register_module() class Swish(nn.Module): """Swish Module. This module applies the swish function: .. math:: Swish(x) = x * Sigmoid(x) Returns: Tensor: The output tensor. """ def __init__(self): super(Swish, self).__init__() def forward(self, x): return x * torch.sigmoid(x)
trt-samples-for-hackathon-cn-master
Hackathon2023/controlnet/annotator/uniformer/mmcv/cnn/bricks/swish.py
import inspect import platform from .registry import PLUGIN_LAYERS if platform.system() == 'Windows': import regex as re else: import re def infer_abbr(class_type): """Infer abbreviation from the class name. This method will infer the abbreviation to map class types to abbreviations. Rule 1: If the class has the property "abbr", return the property. Rule 2: Otherwise, the abbreviation falls back to snake case of class name, e.g. the abbreviation of ``FancyBlock`` will be ``fancy_block``. Args: class_type (type): The norm layer type. Returns: str: The inferred abbreviation. """ def camel2snack(word): """Convert camel case word into snack case. Modified from `inflection lib <https://inflection.readthedocs.io/en/latest/#inflection.underscore>`_. Example:: >>> camel2snack("FancyBlock") 'fancy_block' """ word = re.sub(r'([A-Z]+)([A-Z][a-z])', r'\1_\2', word) word = re.sub(r'([a-z\d])([A-Z])', r'\1_\2', word) word = word.replace('-', '_') return word.lower() if not inspect.isclass(class_type): raise TypeError( f'class_type must be a type, but got {type(class_type)}') if hasattr(class_type, '_abbr_'): return class_type._abbr_ else: return camel2snack(class_type.__name__) def build_plugin_layer(cfg, postfix='', **kwargs): """Build plugin layer. Args: cfg (None or dict): cfg should contain: type (str): identify plugin layer type. layer args: args needed to instantiate a plugin layer. postfix (int, str): appended into norm abbreviation to create named layer. Default: ''. Returns: tuple[str, nn.Module]: name (str): abbreviation + postfix layer (nn.Module): created plugin layer """ if not isinstance(cfg, dict): raise TypeError('cfg must be a dict') if 'type' not in cfg: raise KeyError('the cfg dict must contain the key "type"') cfg_ = cfg.copy() layer_type = cfg_.pop('type') if layer_type not in PLUGIN_LAYERS: raise KeyError(f'Unrecognized plugin type {layer_type}') plugin_layer = PLUGIN_LAYERS.get(layer_type) abbr = infer_abbr(plugin_layer) assert isinstance(postfix, (int, str)) name = abbr + str(postfix) layer = plugin_layer(**kwargs, **cfg_) return name, layer
trt-samples-for-hackathon-cn-master
Hackathon2023/controlnet/annotator/uniformer/mmcv/cnn/bricks/plugin.py
# Copyright (c) OpenMMLab. All rights reserved. from torch import nn from .registry import CONV_LAYERS CONV_LAYERS.register_module('Conv1d', module=nn.Conv1d) CONV_LAYERS.register_module('Conv2d', module=nn.Conv2d) CONV_LAYERS.register_module('Conv3d', module=nn.Conv3d) CONV_LAYERS.register_module('Conv', module=nn.Conv2d) def build_conv_layer(cfg, *args, **kwargs): """Build convolution layer. Args: cfg (None or dict): The conv layer config, which should contain: - type (str): Layer type. - layer args: Args needed to instantiate an conv layer. args (argument list): Arguments passed to the `__init__` method of the corresponding conv layer. kwargs (keyword arguments): Keyword arguments passed to the `__init__` method of the corresponding conv layer. Returns: nn.Module: Created conv layer. """ if cfg is None: cfg_ = dict(type='Conv2d') else: if not isinstance(cfg, dict): raise TypeError('cfg must be a dict') if 'type' not in cfg: raise KeyError('the cfg dict must contain the key "type"') cfg_ = cfg.copy() layer_type = cfg_.pop('type') if layer_type not in CONV_LAYERS: raise KeyError(f'Unrecognized norm type {layer_type}') else: conv_layer = CONV_LAYERS.get(layer_type) layer = conv_layer(*args, **kwargs, **cfg_) return layer
trt-samples-for-hackathon-cn-master
Hackathon2023/controlnet/annotator/uniformer/mmcv/cnn/bricks/conv.py
# Copyright (c) OpenMMLab. All rights reserved. import warnings import torch.nn as nn from annotator.uniformer.mmcv.utils import _BatchNorm, _InstanceNorm from ..utils import constant_init, kaiming_init from .activation import build_activation_layer from .conv import build_conv_layer from .norm import build_norm_layer from .padding import build_padding_layer from .registry import PLUGIN_LAYERS @PLUGIN_LAYERS.register_module() class ConvModule(nn.Module): """A conv block that bundles conv/norm/activation layers. This block simplifies the usage of convolution layers, which are commonly used with a norm layer (e.g., BatchNorm) and activation layer (e.g., ReLU). It is based upon three build methods: `build_conv_layer()`, `build_norm_layer()` and `build_activation_layer()`. Besides, we add some additional features in this module. 1. Automatically set `bias` of the conv layer. 2. Spectral norm is supported. 3. More padding modes are supported. Before PyTorch 1.5, nn.Conv2d only supports zero and circular padding, and we add "reflect" padding mode. Args: in_channels (int): Number of channels in the input feature map. Same as that in ``nn._ConvNd``. out_channels (int): Number of channels produced by the convolution. Same as that in ``nn._ConvNd``. kernel_size (int | tuple[int]): Size of the convolving kernel. Same as that in ``nn._ConvNd``. stride (int | tuple[int]): Stride of the convolution. Same as that in ``nn._ConvNd``. padding (int | tuple[int]): Zero-padding added to both sides of the input. Same as that in ``nn._ConvNd``. dilation (int | tuple[int]): Spacing between kernel elements. Same as that in ``nn._ConvNd``. groups (int): Number of blocked connections from input channels to output channels. Same as that in ``nn._ConvNd``. bias (bool | str): If specified as `auto`, it will be decided by the norm_cfg. Bias will be set as True if `norm_cfg` is None, otherwise False. Default: "auto". conv_cfg (dict): Config dict for convolution layer. Default: None, which means using conv2d. norm_cfg (dict): Config dict for normalization layer. Default: None. act_cfg (dict): Config dict for activation layer. Default: dict(type='ReLU'). inplace (bool): Whether to use inplace mode for activation. Default: True. with_spectral_norm (bool): Whether use spectral norm in conv module. Default: False. padding_mode (str): If the `padding_mode` has not been supported by current `Conv2d` in PyTorch, we will use our own padding layer instead. Currently, we support ['zeros', 'circular'] with official implementation and ['reflect'] with our own implementation. Default: 'zeros'. order (tuple[str]): The order of conv/norm/activation layers. It is a sequence of "conv", "norm" and "act". Common examples are ("conv", "norm", "act") and ("act", "conv", "norm"). Default: ('conv', 'norm', 'act'). """ _abbr_ = 'conv_block' def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias='auto', conv_cfg=None, norm_cfg=None, act_cfg=dict(type='ReLU'), inplace=True, with_spectral_norm=False, padding_mode='zeros', order=('conv', 'norm', 'act')): super(ConvModule, self).__init__() assert conv_cfg is None or isinstance(conv_cfg, dict) assert norm_cfg is None or isinstance(norm_cfg, dict) assert act_cfg is None or isinstance(act_cfg, dict) official_padding_mode = ['zeros', 'circular'] self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.act_cfg = act_cfg self.inplace = inplace self.with_spectral_norm = with_spectral_norm self.with_explicit_padding = padding_mode not in official_padding_mode self.order = order assert isinstance(self.order, tuple) and len(self.order) == 3 assert set(order) == set(['conv', 'norm', 'act']) self.with_norm = norm_cfg is not None self.with_activation = act_cfg is not None # if the conv layer is before a norm layer, bias is unnecessary. if bias == 'auto': bias = not self.with_norm self.with_bias = bias if self.with_explicit_padding: pad_cfg = dict(type=padding_mode) self.padding_layer = build_padding_layer(pad_cfg, padding) # reset padding to 0 for conv module conv_padding = 0 if self.with_explicit_padding else padding # build convolution layer self.conv = build_conv_layer( conv_cfg, in_channels, out_channels, kernel_size, stride=stride, padding=conv_padding, dilation=dilation, groups=groups, bias=bias) # export the attributes of self.conv to a higher level for convenience self.in_channels = self.conv.in_channels self.out_channels = self.conv.out_channels self.kernel_size = self.conv.kernel_size self.stride = self.conv.stride self.padding = padding self.dilation = self.conv.dilation self.transposed = self.conv.transposed self.output_padding = self.conv.output_padding self.groups = self.conv.groups if self.with_spectral_norm: self.conv = nn.utils.spectral_norm(self.conv) # build normalization layers if self.with_norm: # norm layer is after conv layer if order.index('norm') > order.index('conv'): norm_channels = out_channels else: norm_channels = in_channels self.norm_name, norm = build_norm_layer(norm_cfg, norm_channels) self.add_module(self.norm_name, norm) if self.with_bias: if isinstance(norm, (_BatchNorm, _InstanceNorm)): warnings.warn( 'Unnecessary conv bias before batch/instance norm') else: self.norm_name = None # build activation layer if self.with_activation: act_cfg_ = act_cfg.copy() # nn.Tanh has no 'inplace' argument if act_cfg_['type'] not in [ 'Tanh', 'PReLU', 'Sigmoid', 'HSigmoid', 'Swish' ]: act_cfg_.setdefault('inplace', inplace) self.activate = build_activation_layer(act_cfg_) # Use msra init by default self.init_weights() @property def norm(self): if self.norm_name: return getattr(self, self.norm_name) else: return None def init_weights(self): # 1. It is mainly for customized conv layers with their own # initialization manners by calling their own ``init_weights()``, # and we do not want ConvModule to override the initialization. # 2. For customized conv layers without their own initialization # manners (that is, they don't have their own ``init_weights()``) # and PyTorch's conv layers, they will be initialized by # this method with default ``kaiming_init``. # Note: For PyTorch's conv layers, they will be overwritten by our # initialization implementation using default ``kaiming_init``. if not hasattr(self.conv, 'init_weights'): if self.with_activation and self.act_cfg['type'] == 'LeakyReLU': nonlinearity = 'leaky_relu' a = self.act_cfg.get('negative_slope', 0.01) else: nonlinearity = 'relu' a = 0 kaiming_init(self.conv, a=a, nonlinearity=nonlinearity) if self.with_norm: constant_init(self.norm, 1, bias=0) def forward(self, x, activate=True, norm=True): for layer in self.order: if layer == 'conv': if self.with_explicit_padding: x = self.padding_layer(x) x = self.conv(x) elif layer == 'norm' and norm and self.with_norm: x = self.norm(x) elif layer == 'act' and activate and self.with_activation: x = self.activate(x) return x
trt-samples-for-hackathon-cn-master
Hackathon2023/controlnet/annotator/uniformer/mmcv/cnn/bricks/conv_module.py
# Copyright (c) OpenMMLab. All rights reserved. import torch.nn as nn from .registry import PADDING_LAYERS PADDING_LAYERS.register_module('zero', module=nn.ZeroPad2d) PADDING_LAYERS.register_module('reflect', module=nn.ReflectionPad2d) PADDING_LAYERS.register_module('replicate', module=nn.ReplicationPad2d) def build_padding_layer(cfg, *args, **kwargs): """Build padding layer. Args: cfg (None or dict): The padding layer config, which should contain: - type (str): Layer type. - layer args: Args needed to instantiate a padding layer. Returns: nn.Module: Created padding layer. """ if not isinstance(cfg, dict): raise TypeError('cfg must be a dict') if 'type' not in cfg: raise KeyError('the cfg dict must contain the key "type"') cfg_ = cfg.copy() padding_type = cfg_.pop('type') if padding_type not in PADDING_LAYERS: raise KeyError(f'Unrecognized padding type {padding_type}.') else: padding_layer = PADDING_LAYERS.get(padding_type) layer = padding_layer(*args, **kwargs, **cfg_) return layer
trt-samples-for-hackathon-cn-master
Hackathon2023/controlnet/annotator/uniformer/mmcv/cnn/bricks/padding.py
# Copyright (c) OpenMMLab. All rights reserved. import torch import torch.nn as nn class Scale(nn.Module): """A learnable scale parameter. This layer scales the input by a learnable factor. It multiplies a learnable scale parameter of shape (1,) with input of any shape. Args: scale (float): Initial value of scale factor. Default: 1.0 """ def __init__(self, scale=1.0): super(Scale, self).__init__() self.scale = nn.Parameter(torch.tensor(scale, dtype=torch.float)) def forward(self, x): return x * self.scale
trt-samples-for-hackathon-cn-master
Hackathon2023/controlnet/annotator/uniformer/mmcv/cnn/bricks/scale.py
# Copyright (c) OpenMMLab. All rights reserved. import torch import torch.nn as nn from annotator.uniformer.mmcv import build_from_cfg from .registry import DROPOUT_LAYERS def drop_path(x, drop_prob=0., training=False): """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). We follow the implementation https://github.com/rwightman/pytorch-image-models/blob/a2727c1bf78ba0d7b5727f5f95e37fb7f8866b1f/timm/models/layers/drop.py # noqa: E501 """ if drop_prob == 0. or not training: return x keep_prob = 1 - drop_prob # handle tensors with different dimensions, not just 4D tensors. shape = (x.shape[0], ) + (1, ) * (x.ndim - 1) random_tensor = keep_prob + torch.rand( shape, dtype=x.dtype, device=x.device) output = x.div(keep_prob) * random_tensor.floor() return output @DROPOUT_LAYERS.register_module() class DropPath(nn.Module): """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). We follow the implementation https://github.com/rwightman/pytorch-image-models/blob/a2727c1bf78ba0d7b5727f5f95e37fb7f8866b1f/timm/models/layers/drop.py # noqa: E501 Args: drop_prob (float): Probability of the path to be zeroed. Default: 0.1 """ def __init__(self, drop_prob=0.1): super(DropPath, self).__init__() self.drop_prob = drop_prob def forward(self, x): return drop_path(x, self.drop_prob, self.training) @DROPOUT_LAYERS.register_module() class Dropout(nn.Dropout): """A wrapper for ``torch.nn.Dropout``, We rename the ``p`` of ``torch.nn.Dropout`` to ``drop_prob`` so as to be consistent with ``DropPath`` Args: drop_prob (float): Probability of the elements to be zeroed. Default: 0.5. inplace (bool): Do the operation inplace or not. Default: False. """ def __init__(self, drop_prob=0.5, inplace=False): super().__init__(p=drop_prob, inplace=inplace) def build_dropout(cfg, default_args=None): """Builder for drop out layers.""" return build_from_cfg(cfg, DROPOUT_LAYERS, default_args)
trt-samples-for-hackathon-cn-master
Hackathon2023/controlnet/annotator/uniformer/mmcv/cnn/bricks/drop.py
# Copyright (c) OpenMMLab. All rights reserved. import math from torch import nn from torch.nn import functional as F from .registry import CONV_LAYERS @CONV_LAYERS.register_module() class Conv2dAdaptivePadding(nn.Conv2d): """Implementation of 2D convolution in tensorflow with `padding` as "same", which applies padding to input (if needed) so that input image gets fully covered by filter and stride you specified. For stride 1, this will ensure that output image size is same as input. For stride of 2, output dimensions will be half, for example. Args: in_channels (int): Number of channels in the input image out_channels (int): Number of channels produced by the convolution kernel_size (int or tuple): Size of the convolving kernel stride (int or tuple, optional): Stride of the convolution. Default: 1 padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0 dilation (int or tuple, optional): Spacing between kernel elements. Default: 1 groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1 bias (bool, optional): If ``True``, adds a learnable bias to the output. Default: ``True`` """ def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True): super().__init__(in_channels, out_channels, kernel_size, stride, 0, dilation, groups, bias) def forward(self, x): img_h, img_w = x.size()[-2:] kernel_h, kernel_w = self.weight.size()[-2:] stride_h, stride_w = self.stride output_h = math.ceil(img_h / stride_h) output_w = math.ceil(img_w / stride_w) pad_h = ( max((output_h - 1) * self.stride[0] + (kernel_h - 1) * self.dilation[0] + 1 - img_h, 0)) pad_w = ( max((output_w - 1) * self.stride[1] + (kernel_w - 1) * self.dilation[1] + 1 - img_w, 0)) if pad_h > 0 or pad_w > 0: x = F.pad(x, [ pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2 ]) return F.conv2d(x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups)
trt-samples-for-hackathon-cn-master
Hackathon2023/controlnet/annotator/uniformer/mmcv/cnn/bricks/conv2d_adaptive_padding.py
# Copyright (c) OpenMMLab. All rights reserved. from .flops_counter import get_model_complexity_info from .fuse_conv_bn import fuse_conv_bn from .sync_bn import revert_sync_batchnorm from .weight_init import (INITIALIZERS, Caffe2XavierInit, ConstantInit, KaimingInit, NormalInit, PretrainedInit, TruncNormalInit, UniformInit, XavierInit, bias_init_with_prob, caffe2_xavier_init, constant_init, initialize, kaiming_init, normal_init, trunc_normal_init, uniform_init, xavier_init) __all__ = [ 'get_model_complexity_info', 'bias_init_with_prob', 'caffe2_xavier_init', 'constant_init', 'kaiming_init', 'normal_init', 'trunc_normal_init', 'uniform_init', 'xavier_init', 'fuse_conv_bn', 'initialize', 'INITIALIZERS', 'ConstantInit', 'XavierInit', 'NormalInit', 'TruncNormalInit', 'UniformInit', 'KaimingInit', 'PretrainedInit', 'Caffe2XavierInit', 'revert_sync_batchnorm' ]
trt-samples-for-hackathon-cn-master
Hackathon2023/controlnet/annotator/uniformer/mmcv/cnn/utils/__init__.py
# Copyright (c) OpenMMLab. All rights reserved. import torch import torch.nn as nn def _fuse_conv_bn(conv, bn): """Fuse conv and bn into one module. Args: conv (nn.Module): Conv to be fused. bn (nn.Module): BN to be fused. Returns: nn.Module: Fused module. """ conv_w = conv.weight conv_b = conv.bias if conv.bias is not None else torch.zeros_like( bn.running_mean) factor = bn.weight / torch.sqrt(bn.running_var + bn.eps) conv.weight = nn.Parameter(conv_w * factor.reshape([conv.out_channels, 1, 1, 1])) conv.bias = nn.Parameter((conv_b - bn.running_mean) * factor + bn.bias) return conv def fuse_conv_bn(module): """Recursively fuse conv and bn in a module. During inference, the functionary of batch norm layers is turned off but only the mean and var alone channels are used, which exposes the chance to fuse it with the preceding conv layers to save computations and simplify network structures. Args: module (nn.Module): Module to be fused. Returns: nn.Module: Fused module. """ last_conv = None last_conv_name = None for name, child in module.named_children(): if isinstance(child, (nn.modules.batchnorm._BatchNorm, nn.SyncBatchNorm)): if last_conv is None: # only fuse BN that is after Conv continue fused_conv = _fuse_conv_bn(last_conv, child) module._modules[last_conv_name] = fused_conv # To reduce changes, set BN as Identity instead of deleting it. module._modules[name] = nn.Identity() last_conv = None elif isinstance(child, nn.Conv2d): last_conv = child last_conv_name = name else: fuse_conv_bn(child) return module
trt-samples-for-hackathon-cn-master
Hackathon2023/controlnet/annotator/uniformer/mmcv/cnn/utils/fuse_conv_bn.py
# Modified from flops-counter.pytorch by Vladislav Sovrasov # original repo: https://github.com/sovrasov/flops-counter.pytorch # MIT License # Copyright (c) 2018 Vladislav Sovrasov # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import sys from functools import partial import numpy as np import torch import torch.nn as nn import annotator.uniformer.mmcv as mmcv def get_model_complexity_info(model, input_shape, print_per_layer_stat=True, as_strings=True, input_constructor=None, flush=False, ost=sys.stdout): """Get complexity information of a model. This method can calculate FLOPs and parameter counts of a model with corresponding input shape. It can also print complexity information for each layer in a model. Supported layers are listed as below: - Convolutions: ``nn.Conv1d``, ``nn.Conv2d``, ``nn.Conv3d``. - Activations: ``nn.ReLU``, ``nn.PReLU``, ``nn.ELU``, ``nn.LeakyReLU``, ``nn.ReLU6``. - Poolings: ``nn.MaxPool1d``, ``nn.MaxPool2d``, ``nn.MaxPool3d``, ``nn.AvgPool1d``, ``nn.AvgPool2d``, ``nn.AvgPool3d``, ``nn.AdaptiveMaxPool1d``, ``nn.AdaptiveMaxPool2d``, ``nn.AdaptiveMaxPool3d``, ``nn.AdaptiveAvgPool1d``, ``nn.AdaptiveAvgPool2d``, ``nn.AdaptiveAvgPool3d``. - BatchNorms: ``nn.BatchNorm1d``, ``nn.BatchNorm2d``, ``nn.BatchNorm3d``, ``nn.GroupNorm``, ``nn.InstanceNorm1d``, ``InstanceNorm2d``, ``InstanceNorm3d``, ``nn.LayerNorm``. - Linear: ``nn.Linear``. - Deconvolution: ``nn.ConvTranspose2d``. - Upsample: ``nn.Upsample``. Args: model (nn.Module): The model for complexity calculation. input_shape (tuple): Input shape used for calculation. print_per_layer_stat (bool): Whether to print complexity information for each layer in a model. Default: True. as_strings (bool): Output FLOPs and params counts in a string form. Default: True. input_constructor (None | callable): If specified, it takes a callable method that generates input. otherwise, it will generate a random tensor with input shape to calculate FLOPs. Default: None. flush (bool): same as that in :func:`print`. Default: False. ost (stream): same as ``file`` param in :func:`print`. Default: sys.stdout. Returns: tuple[float | str]: If ``as_strings`` is set to True, it will return FLOPs and parameter counts in a string format. otherwise, it will return those in a float number format. """ assert type(input_shape) is tuple assert len(input_shape) >= 1 assert isinstance(model, nn.Module) flops_model = add_flops_counting_methods(model) flops_model.eval() flops_model.start_flops_count() if input_constructor: input = input_constructor(input_shape) _ = flops_model(**input) else: try: batch = torch.ones(()).new_empty( (1, *input_shape), dtype=next(flops_model.parameters()).dtype, device=next(flops_model.parameters()).device) except StopIteration: # Avoid StopIteration for models which have no parameters, # like `nn.Relu()`, `nn.AvgPool2d`, etc. batch = torch.ones(()).new_empty((1, *input_shape)) _ = flops_model(batch) flops_count, params_count = flops_model.compute_average_flops_cost() if print_per_layer_stat: print_model_with_flops( flops_model, flops_count, params_count, ost=ost, flush=flush) flops_model.stop_flops_count() if as_strings: return flops_to_string(flops_count), params_to_string(params_count) return flops_count, params_count def flops_to_string(flops, units='GFLOPs', precision=2): """Convert FLOPs number into a string. Note that Here we take a multiply-add counts as one FLOP. Args: flops (float): FLOPs number to be converted. units (str | None): Converted FLOPs units. Options are None, 'GFLOPs', 'MFLOPs', 'KFLOPs', 'FLOPs'. If set to None, it will automatically choose the most suitable unit for FLOPs. Default: 'GFLOPs'. precision (int): Digit number after the decimal point. Default: 2. Returns: str: The converted FLOPs number with units. Examples: >>> flops_to_string(1e9) '1.0 GFLOPs' >>> flops_to_string(2e5, 'MFLOPs') '0.2 MFLOPs' >>> flops_to_string(3e-9, None) '3e-09 FLOPs' """ if units is None: if flops // 10**9 > 0: return str(round(flops / 10.**9, precision)) + ' GFLOPs' elif flops // 10**6 > 0: return str(round(flops / 10.**6, precision)) + ' MFLOPs' elif flops // 10**3 > 0: return str(round(flops / 10.**3, precision)) + ' KFLOPs' else: return str(flops) + ' FLOPs' else: if units == 'GFLOPs': return str(round(flops / 10.**9, precision)) + ' ' + units elif units == 'MFLOPs': return str(round(flops / 10.**6, precision)) + ' ' + units elif units == 'KFLOPs': return str(round(flops / 10.**3, precision)) + ' ' + units else: return str(flops) + ' FLOPs' def params_to_string(num_params, units=None, precision=2): """Convert parameter number into a string. Args: num_params (float): Parameter number to be converted. units (str | None): Converted FLOPs units. Options are None, 'M', 'K' and ''. If set to None, it will automatically choose the most suitable unit for Parameter number. Default: None. precision (int): Digit number after the decimal point. Default: 2. Returns: str: The converted parameter number with units. Examples: >>> params_to_string(1e9) '1000.0 M' >>> params_to_string(2e5) '200.0 k' >>> params_to_string(3e-9) '3e-09' """ if units is None: if num_params // 10**6 > 0: return str(round(num_params / 10**6, precision)) + ' M' elif num_params // 10**3: return str(round(num_params / 10**3, precision)) + ' k' else: return str(num_params) else: if units == 'M': return str(round(num_params / 10.**6, precision)) + ' ' + units elif units == 'K': return str(round(num_params / 10.**3, precision)) + ' ' + units else: return str(num_params) def print_model_with_flops(model, total_flops, total_params, units='GFLOPs', precision=3, ost=sys.stdout, flush=False): """Print a model with FLOPs for each layer. Args: model (nn.Module): The model to be printed. total_flops (float): Total FLOPs of the model. total_params (float): Total parameter counts of the model. units (str | None): Converted FLOPs units. Default: 'GFLOPs'. precision (int): Digit number after the decimal point. Default: 3. ost (stream): same as `file` param in :func:`print`. Default: sys.stdout. flush (bool): same as that in :func:`print`. Default: False. Example: >>> class ExampleModel(nn.Module): >>> def __init__(self): >>> super().__init__() >>> self.conv1 = nn.Conv2d(3, 8, 3) >>> self.conv2 = nn.Conv2d(8, 256, 3) >>> self.conv3 = nn.Conv2d(256, 8, 3) >>> self.avg_pool = nn.AdaptiveAvgPool2d((1, 1)) >>> self.flatten = nn.Flatten() >>> self.fc = nn.Linear(8, 1) >>> def forward(self, x): >>> x = self.conv1(x) >>> x = self.conv2(x) >>> x = self.conv3(x) >>> x = self.avg_pool(x) >>> x = self.flatten(x) >>> x = self.fc(x) >>> return x >>> model = ExampleModel() >>> x = (3, 16, 16) to print the complexity information state for each layer, you can use >>> get_model_complexity_info(model, x) or directly use >>> print_model_with_flops(model, 4579784.0, 37361) ExampleModel( 0.037 M, 100.000% Params, 0.005 GFLOPs, 100.000% FLOPs, (conv1): Conv2d(0.0 M, 0.600% Params, 0.0 GFLOPs, 0.959% FLOPs, 3, 8, kernel_size=(3, 3), stride=(1, 1)) # noqa: E501 (conv2): Conv2d(0.019 M, 50.020% Params, 0.003 GFLOPs, 58.760% FLOPs, 8, 256, kernel_size=(3, 3), stride=(1, 1)) (conv3): Conv2d(0.018 M, 49.356% Params, 0.002 GFLOPs, 40.264% FLOPs, 256, 8, kernel_size=(3, 3), stride=(1, 1)) (avg_pool): AdaptiveAvgPool2d(0.0 M, 0.000% Params, 0.0 GFLOPs, 0.017% FLOPs, output_size=(1, 1)) (flatten): Flatten(0.0 M, 0.000% Params, 0.0 GFLOPs, 0.000% FLOPs, ) (fc): Linear(0.0 M, 0.024% Params, 0.0 GFLOPs, 0.000% FLOPs, in_features=8, out_features=1, bias=True) ) """ def accumulate_params(self): if is_supported_instance(self): return self.__params__ else: sum = 0 for m in self.children(): sum += m.accumulate_params() return sum def accumulate_flops(self): if is_supported_instance(self): return self.__flops__ / model.__batch_counter__ else: sum = 0 for m in self.children(): sum += m.accumulate_flops() return sum def flops_repr(self): accumulated_num_params = self.accumulate_params() accumulated_flops_cost = self.accumulate_flops() return ', '.join([ params_to_string( accumulated_num_params, units='M', precision=precision), '{:.3%} Params'.format(accumulated_num_params / total_params), flops_to_string( accumulated_flops_cost, units=units, precision=precision), '{:.3%} FLOPs'.format(accumulated_flops_cost / total_flops), self.original_extra_repr() ]) def add_extra_repr(m): m.accumulate_flops = accumulate_flops.__get__(m) m.accumulate_params = accumulate_params.__get__(m) flops_extra_repr = flops_repr.__get__(m) if m.extra_repr != flops_extra_repr: m.original_extra_repr = m.extra_repr m.extra_repr = flops_extra_repr assert m.extra_repr != m.original_extra_repr def del_extra_repr(m): if hasattr(m, 'original_extra_repr'): m.extra_repr = m.original_extra_repr del m.original_extra_repr if hasattr(m, 'accumulate_flops'): del m.accumulate_flops model.apply(add_extra_repr) print(model, file=ost, flush=flush) model.apply(del_extra_repr) def get_model_parameters_number(model): """Calculate parameter number of a model. Args: model (nn.module): The model for parameter number calculation. Returns: float: Parameter number of the model. """ num_params = sum(p.numel() for p in model.parameters() if p.requires_grad) return num_params def add_flops_counting_methods(net_main_module): # adding additional methods to the existing module object, # this is done this way so that each function has access to self object net_main_module.start_flops_count = start_flops_count.__get__( net_main_module) net_main_module.stop_flops_count = stop_flops_count.__get__( net_main_module) net_main_module.reset_flops_count = reset_flops_count.__get__( net_main_module) net_main_module.compute_average_flops_cost = compute_average_flops_cost.__get__( # noqa: E501 net_main_module) net_main_module.reset_flops_count() return net_main_module def compute_average_flops_cost(self): """Compute average FLOPs cost. A method to compute average FLOPs cost, which will be available after `add_flops_counting_methods()` is called on a desired net object. Returns: float: Current mean flops consumption per image. """ batches_count = self.__batch_counter__ flops_sum = 0 for module in self.modules(): if is_supported_instance(module): flops_sum += module.__flops__ params_sum = get_model_parameters_number(self) return flops_sum / batches_count, params_sum def start_flops_count(self): """Activate the computation of mean flops consumption per image. A method to activate the computation of mean flops consumption per image. which will be available after ``add_flops_counting_methods()`` is called on a desired net object. It should be called before running the network. """ add_batch_counter_hook_function(self) def add_flops_counter_hook_function(module): if is_supported_instance(module): if hasattr(module, '__flops_handle__'): return else: handle = module.register_forward_hook( get_modules_mapping()[type(module)]) module.__flops_handle__ = handle self.apply(partial(add_flops_counter_hook_function)) def stop_flops_count(self): """Stop computing the mean flops consumption per image. A method to stop computing the mean flops consumption per image, which will be available after ``add_flops_counting_methods()`` is called on a desired net object. It can be called to pause the computation whenever. """ remove_batch_counter_hook_function(self) self.apply(remove_flops_counter_hook_function) def reset_flops_count(self): """Reset statistics computed so far. A method to Reset computed statistics, which will be available after `add_flops_counting_methods()` is called on a desired net object. """ add_batch_counter_variables_or_reset(self) self.apply(add_flops_counter_variable_or_reset) # ---- Internal functions def empty_flops_counter_hook(module, input, output): module.__flops__ += 0 def upsample_flops_counter_hook(module, input, output): output_size = output[0] batch_size = output_size.shape[0] output_elements_count = batch_size for val in output_size.shape[1:]: output_elements_count *= val module.__flops__ += int(output_elements_count) def relu_flops_counter_hook(module, input, output): active_elements_count = output.numel() module.__flops__ += int(active_elements_count) def linear_flops_counter_hook(module, input, output): input = input[0] output_last_dim = output.shape[ -1] # pytorch checks dimensions, so here we don't care much module.__flops__ += int(np.prod(input.shape) * output_last_dim) def pool_flops_counter_hook(module, input, output): input = input[0] module.__flops__ += int(np.prod(input.shape)) def norm_flops_counter_hook(module, input, output): input = input[0] batch_flops = np.prod(input.shape) if (getattr(module, 'affine', False) or getattr(module, 'elementwise_affine', False)): batch_flops *= 2 module.__flops__ += int(batch_flops) def deconv_flops_counter_hook(conv_module, input, output): # Can have multiple inputs, getting the first one input = input[0] batch_size = input.shape[0] input_height, input_width = input.shape[2:] kernel_height, kernel_width = conv_module.kernel_size in_channels = conv_module.in_channels out_channels = conv_module.out_channels groups = conv_module.groups filters_per_channel = out_channels // groups conv_per_position_flops = ( kernel_height * kernel_width * in_channels * filters_per_channel) active_elements_count = batch_size * input_height * input_width overall_conv_flops = conv_per_position_flops * active_elements_count bias_flops = 0 if conv_module.bias is not None: output_height, output_width = output.shape[2:] bias_flops = out_channels * batch_size * output_height * output_height overall_flops = overall_conv_flops + bias_flops conv_module.__flops__ += int(overall_flops) def conv_flops_counter_hook(conv_module, input, output): # Can have multiple inputs, getting the first one input = input[0] batch_size = input.shape[0] output_dims = list(output.shape[2:]) kernel_dims = list(conv_module.kernel_size) in_channels = conv_module.in_channels out_channels = conv_module.out_channels groups = conv_module.groups filters_per_channel = out_channels // groups conv_per_position_flops = int( np.prod(kernel_dims)) * in_channels * filters_per_channel active_elements_count = batch_size * int(np.prod(output_dims)) overall_conv_flops = conv_per_position_flops * active_elements_count bias_flops = 0 if conv_module.bias is not None: bias_flops = out_channels * active_elements_count overall_flops = overall_conv_flops + bias_flops conv_module.__flops__ += int(overall_flops) def batch_counter_hook(module, input, output): batch_size = 1 if len(input) > 0: # Can have multiple inputs, getting the first one input = input[0] batch_size = len(input) else: pass print('Warning! No positional inputs found for a module, ' 'assuming batch size is 1.') module.__batch_counter__ += batch_size def add_batch_counter_variables_or_reset(module): module.__batch_counter__ = 0 def add_batch_counter_hook_function(module): if hasattr(module, '__batch_counter_handle__'): return handle = module.register_forward_hook(batch_counter_hook) module.__batch_counter_handle__ = handle def remove_batch_counter_hook_function(module): if hasattr(module, '__batch_counter_handle__'): module.__batch_counter_handle__.remove() del module.__batch_counter_handle__ def add_flops_counter_variable_or_reset(module): if is_supported_instance(module): if hasattr(module, '__flops__') or hasattr(module, '__params__'): print('Warning: variables __flops__ or __params__ are already ' 'defined for the module' + type(module).__name__ + ' ptflops can affect your code!') module.__flops__ = 0 module.__params__ = get_model_parameters_number(module) def is_supported_instance(module): if type(module) in get_modules_mapping(): return True return False def remove_flops_counter_hook_function(module): if is_supported_instance(module): if hasattr(module, '__flops_handle__'): module.__flops_handle__.remove() del module.__flops_handle__ def get_modules_mapping(): return { # convolutions nn.Conv1d: conv_flops_counter_hook, nn.Conv2d: conv_flops_counter_hook, mmcv.cnn.bricks.Conv2d: conv_flops_counter_hook, nn.Conv3d: conv_flops_counter_hook, mmcv.cnn.bricks.Conv3d: conv_flops_counter_hook, # activations nn.ReLU: relu_flops_counter_hook, nn.PReLU: relu_flops_counter_hook, nn.ELU: relu_flops_counter_hook, nn.LeakyReLU: relu_flops_counter_hook, nn.ReLU6: relu_flops_counter_hook, # poolings nn.MaxPool1d: pool_flops_counter_hook, nn.AvgPool1d: pool_flops_counter_hook, nn.AvgPool2d: pool_flops_counter_hook, nn.MaxPool2d: pool_flops_counter_hook, mmcv.cnn.bricks.MaxPool2d: pool_flops_counter_hook, nn.MaxPool3d: pool_flops_counter_hook, mmcv.cnn.bricks.MaxPool3d: pool_flops_counter_hook, nn.AvgPool3d: pool_flops_counter_hook, nn.AdaptiveMaxPool1d: pool_flops_counter_hook, nn.AdaptiveAvgPool1d: pool_flops_counter_hook, nn.AdaptiveMaxPool2d: pool_flops_counter_hook, nn.AdaptiveAvgPool2d: pool_flops_counter_hook, nn.AdaptiveMaxPool3d: pool_flops_counter_hook, nn.AdaptiveAvgPool3d: pool_flops_counter_hook, # normalizations nn.BatchNorm1d: norm_flops_counter_hook, nn.BatchNorm2d: norm_flops_counter_hook, nn.BatchNorm3d: norm_flops_counter_hook, nn.GroupNorm: norm_flops_counter_hook, nn.InstanceNorm1d: norm_flops_counter_hook, nn.InstanceNorm2d: norm_flops_counter_hook, nn.InstanceNorm3d: norm_flops_counter_hook, nn.LayerNorm: norm_flops_counter_hook, # FC nn.Linear: linear_flops_counter_hook, mmcv.cnn.bricks.Linear: linear_flops_counter_hook, # Upscale nn.Upsample: upsample_flops_counter_hook, # Deconvolution nn.ConvTranspose2d: deconv_flops_counter_hook, mmcv.cnn.bricks.ConvTranspose2d: deconv_flops_counter_hook, }
trt-samples-for-hackathon-cn-master
Hackathon2023/controlnet/annotator/uniformer/mmcv/cnn/utils/flops_counter.py
import torch import annotator.uniformer.mmcv as mmcv class _BatchNormXd(torch.nn.modules.batchnorm._BatchNorm): """A general BatchNorm layer without input dimension check. Reproduced from @kapily's work: (https://github.com/pytorch/pytorch/issues/41081#issuecomment-783961547) The only difference between BatchNorm1d, BatchNorm2d, BatchNorm3d, etc is `_check_input_dim` that is designed for tensor sanity checks. The check has been bypassed in this class for the convenience of converting SyncBatchNorm. """ def _check_input_dim(self, input): return def revert_sync_batchnorm(module): """Helper function to convert all `SyncBatchNorm` (SyncBN) and `mmcv.ops.sync_bn.SyncBatchNorm`(MMSyncBN) layers in the model to `BatchNormXd` layers. Adapted from @kapily's work: (https://github.com/pytorch/pytorch/issues/41081#issuecomment-783961547) Args: module (nn.Module): The module containing `SyncBatchNorm` layers. Returns: module_output: The converted module with `BatchNormXd` layers. """ module_output = module module_checklist = [torch.nn.modules.batchnorm.SyncBatchNorm] if hasattr(mmcv, 'ops'): module_checklist.append(mmcv.ops.SyncBatchNorm) if isinstance(module, tuple(module_checklist)): module_output = _BatchNormXd(module.num_features, module.eps, module.momentum, module.affine, module.track_running_stats) if module.affine: # no_grad() may not be needed here but # just to be consistent with `convert_sync_batchnorm()` with torch.no_grad(): module_output.weight = module.weight module_output.bias = module.bias module_output.running_mean = module.running_mean module_output.running_var = module.running_var module_output.num_batches_tracked = module.num_batches_tracked module_output.training = module.training # qconfig exists in quantized models if hasattr(module, 'qconfig'): module_output.qconfig = module.qconfig for name, child in module.named_children(): module_output.add_module(name, revert_sync_batchnorm(child)) del module return module_output
trt-samples-for-hackathon-cn-master
Hackathon2023/controlnet/annotator/uniformer/mmcv/cnn/utils/sync_bn.py
# Copyright (c) OpenMMLab. All rights reserved. import copy import math import warnings import numpy as np import torch import torch.nn as nn from torch import Tensor from annotator.uniformer.mmcv.utils import Registry, build_from_cfg, get_logger, print_log INITIALIZERS = Registry('initializer') def update_init_info(module, init_info): """Update the `_params_init_info` in the module if the value of parameters are changed. Args: module (obj:`nn.Module`): The module of PyTorch with a user-defined attribute `_params_init_info` which records the initialization information. init_info (str): The string that describes the initialization. """ assert hasattr( module, '_params_init_info'), f'Can not find `_params_init_info` in {module}' for name, param in module.named_parameters(): assert param in module._params_init_info, ( f'Find a new :obj:`Parameter` ' f'named `{name}` during executing the ' f'`init_weights` of ' f'`{module.__class__.__name__}`. ' f'Please do not add or ' f'replace parameters during executing ' f'the `init_weights`. ') # The parameter has been changed during executing the # `init_weights` of module mean_value = param.data.mean() if module._params_init_info[param]['tmp_mean_value'] != mean_value: module._params_init_info[param]['init_info'] = init_info module._params_init_info[param]['tmp_mean_value'] = mean_value def constant_init(module, val, bias=0): if hasattr(module, 'weight') and module.weight is not None: nn.init.constant_(module.weight, val) if hasattr(module, 'bias') and module.bias is not None: nn.init.constant_(module.bias, bias) def xavier_init(module, gain=1, bias=0, distribution='normal'): assert distribution in ['uniform', 'normal'] if hasattr(module, 'weight') and module.weight is not None: if distribution == 'uniform': nn.init.xavier_uniform_(module.weight, gain=gain) else: nn.init.xavier_normal_(module.weight, gain=gain) if hasattr(module, 'bias') and module.bias is not None: nn.init.constant_(module.bias, bias) def normal_init(module, mean=0, std=1, bias=0): if hasattr(module, 'weight') and module.weight is not None: nn.init.normal_(module.weight, mean, std) if hasattr(module, 'bias') and module.bias is not None: nn.init.constant_(module.bias, bias) def trunc_normal_init(module: nn.Module, mean: float = 0, std: float = 1, a: float = -2, b: float = 2, bias: float = 0) -> None: if hasattr(module, 'weight') and module.weight is not None: trunc_normal_(module.weight, mean, std, a, b) # type: ignore if hasattr(module, 'bias') and module.bias is not None: nn.init.constant_(module.bias, bias) # type: ignore def uniform_init(module, a=0, b=1, bias=0): if hasattr(module, 'weight') and module.weight is not None: nn.init.uniform_(module.weight, a, b) if hasattr(module, 'bias') and module.bias is not None: nn.init.constant_(module.bias, bias) def kaiming_init(module, a=0, mode='fan_out', nonlinearity='relu', bias=0, distribution='normal'): assert distribution in ['uniform', 'normal'] if hasattr(module, 'weight') and module.weight is not None: if distribution == 'uniform': nn.init.kaiming_uniform_( module.weight, a=a, mode=mode, nonlinearity=nonlinearity) else: nn.init.kaiming_normal_( module.weight, a=a, mode=mode, nonlinearity=nonlinearity) if hasattr(module, 'bias') and module.bias is not None: nn.init.constant_(module.bias, bias) def caffe2_xavier_init(module, bias=0): # `XavierFill` in Caffe2 corresponds to `kaiming_uniform_` in PyTorch # Acknowledgment to FAIR's internal code kaiming_init( module, a=1, mode='fan_in', nonlinearity='leaky_relu', bias=bias, distribution='uniform') def bias_init_with_prob(prior_prob): """initialize conv/fc bias value according to a given probability value.""" bias_init = float(-np.log((1 - prior_prob) / prior_prob)) return bias_init def _get_bases_name(m): return [b.__name__ for b in m.__class__.__bases__] class BaseInit(object): def __init__(self, *, bias=0, bias_prob=None, layer=None): self.wholemodule = False if not isinstance(bias, (int, float)): raise TypeError(f'bias must be a number, but got a {type(bias)}') if bias_prob is not None: if not isinstance(bias_prob, float): raise TypeError(f'bias_prob type must be float, \ but got {type(bias_prob)}') if layer is not None: if not isinstance(layer, (str, list)): raise TypeError(f'layer must be a str or a list of str, \ but got a {type(layer)}') else: layer = [] if bias_prob is not None: self.bias = bias_init_with_prob(bias_prob) else: self.bias = bias self.layer = [layer] if isinstance(layer, str) else layer def _get_init_info(self): info = f'{self.__class__.__name__}, bias={self.bias}' return info @INITIALIZERS.register_module(name='Constant') class ConstantInit(BaseInit): """Initialize module parameters with constant values. Args: val (int | float): the value to fill the weights in the module with bias (int | float): the value to fill the bias. Defaults to 0. bias_prob (float, optional): the probability for bias initialization. Defaults to None. layer (str | list[str], optional): the layer will be initialized. Defaults to None. """ def __init__(self, val, **kwargs): super().__init__(**kwargs) self.val = val def __call__(self, module): def init(m): if self.wholemodule: constant_init(m, self.val, self.bias) else: layername = m.__class__.__name__ basesname = _get_bases_name(m) if len(set(self.layer) & set([layername] + basesname)): constant_init(m, self.val, self.bias) module.apply(init) if hasattr(module, '_params_init_info'): update_init_info(module, init_info=self._get_init_info()) def _get_init_info(self): info = f'{self.__class__.__name__}: val={self.val}, bias={self.bias}' return info @INITIALIZERS.register_module(name='Xavier') class XavierInit(BaseInit): r"""Initialize module parameters with values according to the method described in `Understanding the difficulty of training deep feedforward neural networks - Glorot, X. & Bengio, Y. (2010). <http://proceedings.mlr.press/v9/glorot10a/glorot10a.pdf>`_ Args: gain (int | float): an optional scaling factor. Defaults to 1. bias (int | float): the value to fill the bias. Defaults to 0. bias_prob (float, optional): the probability for bias initialization. Defaults to None. distribution (str): distribution either be ``'normal'`` or ``'uniform'``. Defaults to ``'normal'``. layer (str | list[str], optional): the layer will be initialized. Defaults to None. """ def __init__(self, gain=1, distribution='normal', **kwargs): super().__init__(**kwargs) self.gain = gain self.distribution = distribution def __call__(self, module): def init(m): if self.wholemodule: xavier_init(m, self.gain, self.bias, self.distribution) else: layername = m.__class__.__name__ basesname = _get_bases_name(m) if len(set(self.layer) & set([layername] + basesname)): xavier_init(m, self.gain, self.bias, self.distribution) module.apply(init) if hasattr(module, '_params_init_info'): update_init_info(module, init_info=self._get_init_info()) def _get_init_info(self): info = f'{self.__class__.__name__}: gain={self.gain}, ' \ f'distribution={self.distribution}, bias={self.bias}' return info @INITIALIZERS.register_module(name='Normal') class NormalInit(BaseInit): r"""Initialize module parameters with the values drawn from the normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)`. Args: mean (int | float):the mean of the normal distribution. Defaults to 0. std (int | float): the standard deviation of the normal distribution. Defaults to 1. bias (int | float): the value to fill the bias. Defaults to 0. bias_prob (float, optional): the probability for bias initialization. Defaults to None. layer (str | list[str], optional): the layer will be initialized. Defaults to None. """ def __init__(self, mean=0, std=1, **kwargs): super().__init__(**kwargs) self.mean = mean self.std = std def __call__(self, module): def init(m): if self.wholemodule: normal_init(m, self.mean, self.std, self.bias) else: layername = m.__class__.__name__ basesname = _get_bases_name(m) if len(set(self.layer) & set([layername] + basesname)): normal_init(m, self.mean, self.std, self.bias) module.apply(init) if hasattr(module, '_params_init_info'): update_init_info(module, init_info=self._get_init_info()) def _get_init_info(self): info = f'{self.__class__.__name__}: mean={self.mean},' \ f' std={self.std}, bias={self.bias}' return info @INITIALIZERS.register_module(name='TruncNormal') class TruncNormalInit(BaseInit): r"""Initialize module parameters with the values drawn from the normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)` with values outside :math:`[a, b]`. Args: mean (float): the mean of the normal distribution. Defaults to 0. std (float): the standard deviation of the normal distribution. Defaults to 1. a (float): The minimum cutoff value. b ( float): The maximum cutoff value. bias (float): the value to fill the bias. Defaults to 0. bias_prob (float, optional): the probability for bias initialization. Defaults to None. layer (str | list[str], optional): the layer will be initialized. Defaults to None. """ def __init__(self, mean: float = 0, std: float = 1, a: float = -2, b: float = 2, **kwargs) -> None: super().__init__(**kwargs) self.mean = mean self.std = std self.a = a self.b = b def __call__(self, module: nn.Module) -> None: def init(m): if self.wholemodule: trunc_normal_init(m, self.mean, self.std, self.a, self.b, self.bias) else: layername = m.__class__.__name__ basesname = _get_bases_name(m) if len(set(self.layer) & set([layername] + basesname)): trunc_normal_init(m, self.mean, self.std, self.a, self.b, self.bias) module.apply(init) if hasattr(module, '_params_init_info'): update_init_info(module, init_info=self._get_init_info()) def _get_init_info(self): info = f'{self.__class__.__name__}: a={self.a}, b={self.b},' \ f' mean={self.mean}, std={self.std}, bias={self.bias}' return info @INITIALIZERS.register_module(name='Uniform') class UniformInit(BaseInit): r"""Initialize module parameters with values drawn from the uniform distribution :math:`\mathcal{U}(a, b)`. Args: a (int | float): the lower bound of the uniform distribution. Defaults to 0. b (int | float): the upper bound of the uniform distribution. Defaults to 1. bias (int | float): the value to fill the bias. Defaults to 0. bias_prob (float, optional): the probability for bias initialization. Defaults to None. layer (str | list[str], optional): the layer will be initialized. Defaults to None. """ def __init__(self, a=0, b=1, **kwargs): super().__init__(**kwargs) self.a = a self.b = b def __call__(self, module): def init(m): if self.wholemodule: uniform_init(m, self.a, self.b, self.bias) else: layername = m.__class__.__name__ basesname = _get_bases_name(m) if len(set(self.layer) & set([layername] + basesname)): uniform_init(m, self.a, self.b, self.bias) module.apply(init) if hasattr(module, '_params_init_info'): update_init_info(module, init_info=self._get_init_info()) def _get_init_info(self): info = f'{self.__class__.__name__}: a={self.a},' \ f' b={self.b}, bias={self.bias}' return info @INITIALIZERS.register_module(name='Kaiming') class KaimingInit(BaseInit): r"""Initialize module parameters with the values according to the method described in `Delving deep into rectifiers: Surpassing human-level performance on ImageNet classification - He, K. et al. (2015). <https://www.cv-foundation.org/openaccess/content_iccv_2015/ papers/He_Delving_Deep_into_ICCV_2015_paper.pdf>`_ Args: a (int | float): the negative slope of the rectifier used after this layer (only used with ``'leaky_relu'``). Defaults to 0. mode (str): either ``'fan_in'`` or ``'fan_out'``. Choosing ``'fan_in'`` preserves the magnitude of the variance of the weights in the forward pass. Choosing ``'fan_out'`` preserves the magnitudes in the backwards pass. Defaults to ``'fan_out'``. nonlinearity (str): the non-linear function (`nn.functional` name), recommended to use only with ``'relu'`` or ``'leaky_relu'`` . Defaults to 'relu'. bias (int | float): the value to fill the bias. Defaults to 0. bias_prob (float, optional): the probability for bias initialization. Defaults to None. distribution (str): distribution either be ``'normal'`` or ``'uniform'``. Defaults to ``'normal'``. layer (str | list[str], optional): the layer will be initialized. Defaults to None. """ def __init__(self, a=0, mode='fan_out', nonlinearity='relu', distribution='normal', **kwargs): super().__init__(**kwargs) self.a = a self.mode = mode self.nonlinearity = nonlinearity self.distribution = distribution def __call__(self, module): def init(m): if self.wholemodule: kaiming_init(m, self.a, self.mode, self.nonlinearity, self.bias, self.distribution) else: layername = m.__class__.__name__ basesname = _get_bases_name(m) if len(set(self.layer) & set([layername] + basesname)): kaiming_init(m, self.a, self.mode, self.nonlinearity, self.bias, self.distribution) module.apply(init) if hasattr(module, '_params_init_info'): update_init_info(module, init_info=self._get_init_info()) def _get_init_info(self): info = f'{self.__class__.__name__}: a={self.a}, mode={self.mode}, ' \ f'nonlinearity={self.nonlinearity}, ' \ f'distribution ={self.distribution}, bias={self.bias}' return info @INITIALIZERS.register_module(name='Caffe2Xavier') class Caffe2XavierInit(KaimingInit): # `XavierFill` in Caffe2 corresponds to `kaiming_uniform_` in PyTorch # Acknowledgment to FAIR's internal code def __init__(self, **kwargs): super().__init__( a=1, mode='fan_in', nonlinearity='leaky_relu', distribution='uniform', **kwargs) def __call__(self, module): super().__call__(module) @INITIALIZERS.register_module(name='Pretrained') class PretrainedInit(object): """Initialize module by loading a pretrained model. Args: checkpoint (str): the checkpoint file of the pretrained model should be load. prefix (str, optional): the prefix of a sub-module in the pretrained model. it is for loading a part of the pretrained model to initialize. For example, if we would like to only load the backbone of a detector model, we can set ``prefix='backbone.'``. Defaults to None. map_location (str): map tensors into proper locations. """ def __init__(self, checkpoint, prefix=None, map_location=None): self.checkpoint = checkpoint self.prefix = prefix self.map_location = map_location def __call__(self, module): from annotator.uniformer.mmcv.runner import (_load_checkpoint_with_prefix, load_checkpoint, load_state_dict) logger = get_logger('mmcv') if self.prefix is None: print_log(f'load model from: {self.checkpoint}', logger=logger) load_checkpoint( module, self.checkpoint, map_location=self.map_location, strict=False, logger=logger) else: print_log( f'load {self.prefix} in model from: {self.checkpoint}', logger=logger) state_dict = _load_checkpoint_with_prefix( self.prefix, self.checkpoint, map_location=self.map_location) load_state_dict(module, state_dict, strict=False, logger=logger) if hasattr(module, '_params_init_info'): update_init_info(module, init_info=self._get_init_info()) def _get_init_info(self): info = f'{self.__class__.__name__}: load from {self.checkpoint}' return info def _initialize(module, cfg, wholemodule=False): func = build_from_cfg(cfg, INITIALIZERS) # wholemodule flag is for override mode, there is no layer key in override # and initializer will give init values for the whole module with the name # in override. func.wholemodule = wholemodule func(module) def _initialize_override(module, override, cfg): if not isinstance(override, (dict, list)): raise TypeError(f'override must be a dict or a list of dict, \ but got {type(override)}') override = [override] if isinstance(override, dict) else override for override_ in override: cp_override = copy.deepcopy(override_) name = cp_override.pop('name', None) if name is None: raise ValueError('`override` must contain the key "name",' f'but got {cp_override}') # if override only has name key, it means use args in init_cfg if not cp_override: cp_override.update(cfg) # if override has name key and other args except type key, it will # raise error elif 'type' not in cp_override.keys(): raise ValueError( f'`override` need "type" key, but got {cp_override}') if hasattr(module, name): _initialize(getattr(module, name), cp_override, wholemodule=True) else: raise RuntimeError(f'module did not have attribute {name}, ' f'but init_cfg is {cp_override}.') def initialize(module, init_cfg): """Initialize a module. Args: module (``torch.nn.Module``): the module will be initialized. init_cfg (dict | list[dict]): initialization configuration dict to define initializer. OpenMMLab has implemented 6 initializers including ``Constant``, ``Xavier``, ``Normal``, ``Uniform``, ``Kaiming``, and ``Pretrained``. Example: >>> module = nn.Linear(2, 3, bias=True) >>> init_cfg = dict(type='Constant', layer='Linear', val =1 , bias =2) >>> initialize(module, init_cfg) >>> module = nn.Sequential(nn.Conv1d(3, 1, 3), nn.Linear(1,2)) >>> # define key ``'layer'`` for initializing layer with different >>> # configuration >>> init_cfg = [dict(type='Constant', layer='Conv1d', val=1), dict(type='Constant', layer='Linear', val=2)] >>> initialize(module, init_cfg) >>> # define key``'override'`` to initialize some specific part in >>> # module >>> class FooNet(nn.Module): >>> def __init__(self): >>> super().__init__() >>> self.feat = nn.Conv2d(3, 16, 3) >>> self.reg = nn.Conv2d(16, 10, 3) >>> self.cls = nn.Conv2d(16, 5, 3) >>> model = FooNet() >>> init_cfg = dict(type='Constant', val=1, bias=2, layer='Conv2d', >>> override=dict(type='Constant', name='reg', val=3, bias=4)) >>> initialize(model, init_cfg) >>> model = ResNet(depth=50) >>> # Initialize weights with the pretrained model. >>> init_cfg = dict(type='Pretrained', checkpoint='torchvision://resnet50') >>> initialize(model, init_cfg) >>> # Initialize weights of a sub-module with the specific part of >>> # a pretrained model by using "prefix". >>> url = 'http://download.openmmlab.com/mmdetection/v2.0/retinanet/'\ >>> 'retinanet_r50_fpn_1x_coco/'\ >>> 'retinanet_r50_fpn_1x_coco_20200130-c2398f9e.pth' >>> init_cfg = dict(type='Pretrained', checkpoint=url, prefix='backbone.') """ if not isinstance(init_cfg, (dict, list)): raise TypeError(f'init_cfg must be a dict or a list of dict, \ but got {type(init_cfg)}') if isinstance(init_cfg, dict): init_cfg = [init_cfg] for cfg in init_cfg: # should deeply copy the original config because cfg may be used by # other modules, e.g., one init_cfg shared by multiple bottleneck # blocks, the expected cfg will be changed after pop and will change # the initialization behavior of other modules cp_cfg = copy.deepcopy(cfg) override = cp_cfg.pop('override', None) _initialize(module, cp_cfg) if override is not None: cp_cfg.pop('layer', None) _initialize_override(module, override, cp_cfg) else: # All attributes in module have same initialization. pass def _no_grad_trunc_normal_(tensor: Tensor, mean: float, std: float, a: float, b: float) -> Tensor: # Method based on # https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf # Modified from # https://github.com/pytorch/pytorch/blob/master/torch/nn/init.py def norm_cdf(x): # Computes standard normal cumulative distribution function return (1. + math.erf(x / math.sqrt(2.))) / 2. if (mean < a - 2 * std) or (mean > b + 2 * std): warnings.warn( 'mean is more than 2 std from [a, b] in nn.init.trunc_normal_. ' 'The distribution of values may be incorrect.', stacklevel=2) with torch.no_grad(): # Values are generated by using a truncated uniform distribution and # then using the inverse CDF for the normal distribution. # Get upper and lower cdf values lower = norm_cdf((a - mean) / std) upper = norm_cdf((b - mean) / std) # Uniformly fill tensor with values from [lower, upper], then translate # to [2lower-1, 2upper-1]. tensor.uniform_(2 * lower - 1, 2 * upper - 1) # Use inverse cdf transform for normal distribution to get truncated # standard normal tensor.erfinv_() # Transform to proper mean, std tensor.mul_(std * math.sqrt(2.)) tensor.add_(mean) # Clamp to ensure it's in the proper range tensor.clamp_(min=a, max=b) return tensor def trunc_normal_(tensor: Tensor, mean: float = 0., std: float = 1., a: float = -2., b: float = 2.) -> Tensor: r"""Fills the input Tensor with values drawn from a truncated normal distribution. The values are effectively drawn from the normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)` with values outside :math:`[a, b]` redrawn until they are within the bounds. The method used for generating the random values works best when :math:`a \leq \text{mean} \leq b`. Modified from https://github.com/pytorch/pytorch/blob/master/torch/nn/init.py Args: tensor (``torch.Tensor``): an n-dimensional `torch.Tensor`. mean (float): the mean of the normal distribution. std (float): the standard deviation of the normal distribution. a (float): the minimum cutoff value. b (float): the maximum cutoff value. """ return _no_grad_trunc_normal_(tensor, mean, std, a, b)
trt-samples-for-hackathon-cn-master
Hackathon2023/controlnet/annotator/uniformer/mmcv/cnn/utils/weight_init.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import Tuple, Union import torch import torch.nn as nn import torch.nn.functional as F from torch import Tensor from torch.autograd import Function from torch.autograd.function import once_differentiable from torch.nn.modules.utils import _pair, _single from annotator.uniformer.mmcv.utils import deprecated_api_warning from ..cnn import CONV_LAYERS from ..utils import ext_loader, print_log ext_module = ext_loader.load_ext('_ext', [ 'deform_conv_forward', 'deform_conv_backward_input', 'deform_conv_backward_parameters' ]) class DeformConv2dFunction(Function): @staticmethod def symbolic(g, input, offset, weight, stride, padding, dilation, groups, deform_groups, bias=False, im2col_step=32): return g.op( 'mmcv::MMCVDeformConv2d', input, offset, weight, stride_i=stride, padding_i=padding, dilation_i=dilation, groups_i=groups, deform_groups_i=deform_groups, bias_i=bias, im2col_step_i=im2col_step) @staticmethod def forward(ctx, input, offset, weight, stride=1, padding=0, dilation=1, groups=1, deform_groups=1, bias=False, im2col_step=32): if input is not None and input.dim() != 4: raise ValueError( f'Expected 4D tensor as input, got {input.dim()}D tensor \ instead.') assert bias is False, 'Only support bias is False.' ctx.stride = _pair(stride) ctx.padding = _pair(padding) ctx.dilation = _pair(dilation) ctx.groups = groups ctx.deform_groups = deform_groups ctx.im2col_step = im2col_step # When pytorch version >= 1.6.0, amp is adopted for fp16 mode; # amp won't cast the type of model (float32), but "offset" is cast # to float16 by nn.Conv2d automatically, leading to the type # mismatch with input (when it is float32) or weight. # The flag for whether to use fp16 or amp is the type of "offset", # we cast weight and input to temporarily support fp16 and amp # whatever the pytorch version is. input = input.type_as(offset) weight = weight.type_as(input) ctx.save_for_backward(input, offset, weight) output = input.new_empty( DeformConv2dFunction._output_size(ctx, input, weight)) ctx.bufs_ = [input.new_empty(0), input.new_empty(0)] # columns, ones cur_im2col_step = min(ctx.im2col_step, input.size(0)) assert (input.size(0) % cur_im2col_step) == 0, 'im2col step must divide batchsize' ext_module.deform_conv_forward( input, weight, offset, output, ctx.bufs_[0], ctx.bufs_[1], kW=weight.size(3), kH=weight.size(2), dW=ctx.stride[1], dH=ctx.stride[0], padW=ctx.padding[1], padH=ctx.padding[0], dilationW=ctx.dilation[1], dilationH=ctx.dilation[0], group=ctx.groups, deformable_group=ctx.deform_groups, im2col_step=cur_im2col_step) return output @staticmethod @once_differentiable def backward(ctx, grad_output): input, offset, weight = ctx.saved_tensors grad_input = grad_offset = grad_weight = None cur_im2col_step = min(ctx.im2col_step, input.size(0)) assert (input.size(0) % cur_im2col_step ) == 0, 'batch size must be divisible by im2col_step' grad_output = grad_output.contiguous() if ctx.needs_input_grad[0] or ctx.needs_input_grad[1]: grad_input = torch.zeros_like(input) grad_offset = torch.zeros_like(offset) ext_module.deform_conv_backward_input( input, offset, grad_output, grad_input, grad_offset, weight, ctx.bufs_[0], kW=weight.size(3), kH=weight.size(2), dW=ctx.stride[1], dH=ctx.stride[0], padW=ctx.padding[1], padH=ctx.padding[0], dilationW=ctx.dilation[1], dilationH=ctx.dilation[0], group=ctx.groups, deformable_group=ctx.deform_groups, im2col_step=cur_im2col_step) if ctx.needs_input_grad[2]: grad_weight = torch.zeros_like(weight) ext_module.deform_conv_backward_parameters( input, offset, grad_output, grad_weight, ctx.bufs_[0], ctx.bufs_[1], kW=weight.size(3), kH=weight.size(2), dW=ctx.stride[1], dH=ctx.stride[0], padW=ctx.padding[1], padH=ctx.padding[0], dilationW=ctx.dilation[1], dilationH=ctx.dilation[0], group=ctx.groups, deformable_group=ctx.deform_groups, scale=1, im2col_step=cur_im2col_step) return grad_input, grad_offset, grad_weight, \ None, None, None, None, None, None, None @staticmethod def _output_size(ctx, input, weight): channels = weight.size(0) output_size = (input.size(0), channels) for d in range(input.dim() - 2): in_size = input.size(d + 2) pad = ctx.padding[d] kernel = ctx.dilation[d] * (weight.size(d + 2) - 1) + 1 stride_ = ctx.stride[d] output_size += ((in_size + (2 * pad) - kernel) // stride_ + 1, ) if not all(map(lambda s: s > 0, output_size)): raise ValueError( 'convolution input is too small (output would be ' + 'x'.join(map(str, output_size)) + ')') return output_size deform_conv2d = DeformConv2dFunction.apply class DeformConv2d(nn.Module): r"""Deformable 2D convolution. Applies a deformable 2D convolution over an input signal composed of several input planes. DeformConv2d was described in the paper `Deformable Convolutional Networks <https://arxiv.org/pdf/1703.06211.pdf>`_ Note: The argument ``im2col_step`` was added in version 1.3.17, which means number of samples processed by the ``im2col_cuda_kernel`` per call. It enables users to define ``batch_size`` and ``im2col_step`` more flexibly and solved `issue mmcv#1440 <https://github.com/open-mmlab/mmcv/issues/1440>`_. Args: in_channels (int): Number of channels in the input image. out_channels (int): Number of channels produced by the convolution. kernel_size(int, tuple): Size of the convolving kernel. stride(int, tuple): Stride of the convolution. Default: 1. padding (int or tuple): Zero-padding added to both sides of the input. Default: 0. dilation (int or tuple): Spacing between kernel elements. Default: 1. groups (int): Number of blocked connections from input. channels to output channels. Default: 1. deform_groups (int): Number of deformable group partitions. bias (bool): If True, adds a learnable bias to the output. Default: False. im2col_step (int): Number of samples processed by im2col_cuda_kernel per call. It will work when ``batch_size`` > ``im2col_step``, but ``batch_size`` must be divisible by ``im2col_step``. Default: 32. `New in version 1.3.17.` """ @deprecated_api_warning({'deformable_groups': 'deform_groups'}, cls_name='DeformConv2d') def __init__(self, in_channels: int, out_channels: int, kernel_size: Union[int, Tuple[int, ...]], stride: Union[int, Tuple[int, ...]] = 1, padding: Union[int, Tuple[int, ...]] = 0, dilation: Union[int, Tuple[int, ...]] = 1, groups: int = 1, deform_groups: int = 1, bias: bool = False, im2col_step: int = 32) -> None: super(DeformConv2d, self).__init__() assert not bias, \ f'bias={bias} is not supported in DeformConv2d.' assert in_channels % groups == 0, \ f'in_channels {in_channels} cannot be divisible by groups {groups}' assert out_channels % groups == 0, \ f'out_channels {out_channels} cannot be divisible by groups \ {groups}' self.in_channels = in_channels self.out_channels = out_channels self.kernel_size = _pair(kernel_size) self.stride = _pair(stride) self.padding = _pair(padding) self.dilation = _pair(dilation) self.groups = groups self.deform_groups = deform_groups self.im2col_step = im2col_step # enable compatibility with nn.Conv2d self.transposed = False self.output_padding = _single(0) # only weight, no bias self.weight = nn.Parameter( torch.Tensor(out_channels, in_channels // self.groups, *self.kernel_size)) self.reset_parameters() def reset_parameters(self): # switch the initialization of `self.weight` to the standard kaiming # method described in `Delving deep into rectifiers: Surpassing # human-level performance on ImageNet classification` - He, K. et al. # (2015), using a uniform distribution nn.init.kaiming_uniform_(self.weight, nonlinearity='relu') def forward(self, x: Tensor, offset: Tensor) -> Tensor: """Deformable Convolutional forward function. Args: x (Tensor): Input feature, shape (B, C_in, H_in, W_in) offset (Tensor): Offset for deformable convolution, shape (B, deform_groups*kernel_size[0]*kernel_size[1]*2, H_out, W_out), H_out, W_out are equal to the output's. An offset is like `[y0, x0, y1, x1, y2, x2, ..., y8, x8]`. The spatial arrangement is like: .. code:: text (x0, y0) (x1, y1) (x2, y2) (x3, y3) (x4, y4) (x5, y5) (x6, y6) (x7, y7) (x8, y8) Returns: Tensor: Output of the layer. """ # To fix an assert error in deform_conv_cuda.cpp:128 # input image is smaller than kernel input_pad = (x.size(2) < self.kernel_size[0]) or (x.size(3) < self.kernel_size[1]) if input_pad: pad_h = max(self.kernel_size[0] - x.size(2), 0) pad_w = max(self.kernel_size[1] - x.size(3), 0) x = F.pad(x, (0, pad_w, 0, pad_h), 'constant', 0).contiguous() offset = F.pad(offset, (0, pad_w, 0, pad_h), 'constant', 0) offset = offset.contiguous() out = deform_conv2d(x, offset, self.weight, self.stride, self.padding, self.dilation, self.groups, self.deform_groups, False, self.im2col_step) if input_pad: out = out[:, :, :out.size(2) - pad_h, :out.size(3) - pad_w].contiguous() return out def __repr__(self): s = self.__class__.__name__ s += f'(in_channels={self.in_channels},\n' s += f'out_channels={self.out_channels},\n' s += f'kernel_size={self.kernel_size},\n' s += f'stride={self.stride},\n' s += f'padding={self.padding},\n' s += f'dilation={self.dilation},\n' s += f'groups={self.groups},\n' s += f'deform_groups={self.deform_groups},\n' # bias is not supported in DeformConv2d. s += 'bias=False)' return s @CONV_LAYERS.register_module('DCN') class DeformConv2dPack(DeformConv2d): """A Deformable Conv Encapsulation that acts as normal Conv layers. The offset tensor is like `[y0, x0, y1, x1, y2, x2, ..., y8, x8]`. The spatial arrangement is like: .. code:: text (x0, y0) (x1, y1) (x2, y2) (x3, y3) (x4, y4) (x5, y5) (x6, y6) (x7, y7) (x8, y8) Args: in_channels (int): Same as nn.Conv2d. out_channels (int): Same as nn.Conv2d. kernel_size (int or tuple[int]): Same as nn.Conv2d. stride (int or tuple[int]): Same as nn.Conv2d. padding (int or tuple[int]): Same as nn.Conv2d. dilation (int or tuple[int]): Same as nn.Conv2d. groups (int): Same as nn.Conv2d. bias (bool or str): If specified as `auto`, it will be decided by the norm_cfg. Bias will be set as True if norm_cfg is None, otherwise False. """ _version = 2 def __init__(self, *args, **kwargs): super(DeformConv2dPack, self).__init__(*args, **kwargs) self.conv_offset = nn.Conv2d( self.in_channels, self.deform_groups * 2 * self.kernel_size[0] * self.kernel_size[1], kernel_size=self.kernel_size, stride=_pair(self.stride), padding=_pair(self.padding), dilation=_pair(self.dilation), bias=True) self.init_offset() def init_offset(self): self.conv_offset.weight.data.zero_() self.conv_offset.bias.data.zero_() def forward(self, x): offset = self.conv_offset(x) return deform_conv2d(x, offset, self.weight, self.stride, self.padding, self.dilation, self.groups, self.deform_groups, False, self.im2col_step) def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs): version = local_metadata.get('version', None) if version is None or version < 2: # the key is different in early versions # In version < 2, DeformConvPack loads previous benchmark models. if (prefix + 'conv_offset.weight' not in state_dict and prefix[:-1] + '_offset.weight' in state_dict): state_dict[prefix + 'conv_offset.weight'] = state_dict.pop( prefix[:-1] + '_offset.weight') if (prefix + 'conv_offset.bias' not in state_dict and prefix[:-1] + '_offset.bias' in state_dict): state_dict[prefix + 'conv_offset.bias'] = state_dict.pop(prefix[:-1] + '_offset.bias') if version is not None and version > 1: print_log( f'DeformConv2dPack {prefix.rstrip(".")} is upgraded to ' 'version 2.', logger='root') super()._load_from_state_dict(state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs)
trt-samples-for-hackathon-cn-master
Hackathon2023/controlnet/annotator/uniformer/mmcv/ops/deform_conv.py
import torch from ..utils import ext_loader ext_module = ext_loader.load_ext('_ext', [ 'points_in_boxes_part_forward', 'points_in_boxes_cpu_forward', 'points_in_boxes_all_forward' ]) def points_in_boxes_part(points, boxes): """Find the box in which each point is (CUDA). Args: points (torch.Tensor): [B, M, 3], [x, y, z] in LiDAR/DEPTH coordinate boxes (torch.Tensor): [B, T, 7], num_valid_boxes <= T, [x, y, z, x_size, y_size, z_size, rz] in LiDAR/DEPTH coordinate, (x, y, z) is the bottom center Returns: box_idxs_of_pts (torch.Tensor): (B, M), default background = -1 """ assert points.shape[0] == boxes.shape[0], \ 'Points and boxes should have the same batch size, ' \ f'but got {points.shape[0]} and {boxes.shape[0]}' assert boxes.shape[2] == 7, \ 'boxes dimension should be 7, ' \ f'but got unexpected shape {boxes.shape[2]}' assert points.shape[2] == 3, \ 'points dimension should be 3, ' \ f'but got unexpected shape {points.shape[2]}' batch_size, num_points, _ = points.shape box_idxs_of_pts = points.new_zeros((batch_size, num_points), dtype=torch.int).fill_(-1) # If manually put the tensor 'points' or 'boxes' on a device # which is not the current device, some temporary variables # will be created on the current device in the cuda op, # and the output will be incorrect. # Therefore, we force the current device to be the same # as the device of the tensors if it was not. # Please refer to https://github.com/open-mmlab/mmdetection3d/issues/305 # for the incorrect output before the fix. points_device = points.get_device() assert points_device == boxes.get_device(), \ 'Points and boxes should be put on the same device' if torch.cuda.current_device() != points_device: torch.cuda.set_device(points_device) ext_module.points_in_boxes_part_forward(boxes.contiguous(), points.contiguous(), box_idxs_of_pts) return box_idxs_of_pts def points_in_boxes_cpu(points, boxes): """Find all boxes in which each point is (CPU). The CPU version of :meth:`points_in_boxes_all`. Args: points (torch.Tensor): [B, M, 3], [x, y, z] in LiDAR/DEPTH coordinate boxes (torch.Tensor): [B, T, 7], num_valid_boxes <= T, [x, y, z, x_size, y_size, z_size, rz], (x, y, z) is the bottom center. Returns: box_idxs_of_pts (torch.Tensor): (B, M, T), default background = 0. """ assert points.shape[0] == boxes.shape[0], \ 'Points and boxes should have the same batch size, ' \ f'but got {points.shape[0]} and {boxes.shape[0]}' assert boxes.shape[2] == 7, \ 'boxes dimension should be 7, ' \ f'but got unexpected shape {boxes.shape[2]}' assert points.shape[2] == 3, \ 'points dimension should be 3, ' \ f'but got unexpected shape {points.shape[2]}' batch_size, num_points, _ = points.shape num_boxes = boxes.shape[1] point_indices = points.new_zeros((batch_size, num_boxes, num_points), dtype=torch.int) for b in range(batch_size): ext_module.points_in_boxes_cpu_forward(boxes[b].float().contiguous(), points[b].float().contiguous(), point_indices[b]) point_indices = point_indices.transpose(1, 2) return point_indices def points_in_boxes_all(points, boxes): """Find all boxes in which each point is (CUDA). Args: points (torch.Tensor): [B, M, 3], [x, y, z] in LiDAR/DEPTH coordinate boxes (torch.Tensor): [B, T, 7], num_valid_boxes <= T, [x, y, z, x_size, y_size, z_size, rz], (x, y, z) is the bottom center. Returns: box_idxs_of_pts (torch.Tensor): (B, M, T), default background = 0. """ assert boxes.shape[0] == points.shape[0], \ 'Points and boxes should have the same batch size, ' \ f'but got {boxes.shape[0]} and {boxes.shape[0]}' assert boxes.shape[2] == 7, \ 'boxes dimension should be 7, ' \ f'but got unexpected shape {boxes.shape[2]}' assert points.shape[2] == 3, \ 'points dimension should be 3, ' \ f'but got unexpected shape {points.shape[2]}' batch_size, num_points, _ = points.shape num_boxes = boxes.shape[1] box_idxs_of_pts = points.new_zeros((batch_size, num_points, num_boxes), dtype=torch.int).fill_(0) # Same reason as line 25-32 points_device = points.get_device() assert points_device == boxes.get_device(), \ 'Points and boxes should be put on the same device' if torch.cuda.current_device() != points_device: torch.cuda.set_device(points_device) ext_module.points_in_boxes_all_forward(boxes.contiguous(), points.contiguous(), box_idxs_of_pts) return box_idxs_of_pts
trt-samples-for-hackathon-cn-master
Hackathon2023/controlnet/annotator/uniformer/mmcv/ops/points_in_boxes.py
# Copyright (c) OpenMMLab. All rights reserved. import torch from torch import nn from torch.autograd import Function from ..utils import ext_loader ext_module = ext_loader.load_ext( '_ext', ['dynamic_point_to_voxel_forward', 'dynamic_point_to_voxel_backward']) class _DynamicScatter(Function): @staticmethod def forward(ctx, feats, coors, reduce_type='max'): """convert kitti points(N, >=3) to voxels. Args: feats (torch.Tensor): [N, C]. Points features to be reduced into voxels. coors (torch.Tensor): [N, ndim]. Corresponding voxel coordinates (specifically multi-dim voxel index) of each points. reduce_type (str, optional): Reduce op. support 'max', 'sum' and 'mean'. Default: 'max'. Returns: voxel_feats (torch.Tensor): [M, C]. Reduced features, input features that shares the same voxel coordinates are reduced to one row. voxel_coors (torch.Tensor): [M, ndim]. Voxel coordinates. """ results = ext_module.dynamic_point_to_voxel_forward( feats, coors, reduce_type) (voxel_feats, voxel_coors, point2voxel_map, voxel_points_count) = results ctx.reduce_type = reduce_type ctx.save_for_backward(feats, voxel_feats, point2voxel_map, voxel_points_count) ctx.mark_non_differentiable(voxel_coors) return voxel_feats, voxel_coors @staticmethod def backward(ctx, grad_voxel_feats, grad_voxel_coors=None): (feats, voxel_feats, point2voxel_map, voxel_points_count) = ctx.saved_tensors grad_feats = torch.zeros_like(feats) # TODO: whether to use index put or use cuda_backward # To use index put, need point to voxel index ext_module.dynamic_point_to_voxel_backward( grad_feats, grad_voxel_feats.contiguous(), feats, voxel_feats, point2voxel_map, voxel_points_count, ctx.reduce_type) return grad_feats, None, None dynamic_scatter = _DynamicScatter.apply class DynamicScatter(nn.Module): """Scatters points into voxels, used in the voxel encoder with dynamic voxelization. Note: The CPU and GPU implementation get the same output, but have numerical difference after summation and division (e.g., 5e-7). Args: voxel_size (list): list [x, y, z] size of three dimension. point_cloud_range (list): The coordinate range of points, [x_min, y_min, z_min, x_max, y_max, z_max]. average_points (bool): whether to use avg pooling to scatter points into voxel. """ def __init__(self, voxel_size, point_cloud_range, average_points: bool): super().__init__() self.voxel_size = voxel_size self.point_cloud_range = point_cloud_range self.average_points = average_points def forward_single(self, points, coors): """Scatters points into voxels. Args: points (torch.Tensor): Points to be reduced into voxels. coors (torch.Tensor): Corresponding voxel coordinates (specifically multi-dim voxel index) of each points. Returns: voxel_feats (torch.Tensor): Reduced features, input features that shares the same voxel coordinates are reduced to one row. voxel_coors (torch.Tensor): Voxel coordinates. """ reduce = 'mean' if self.average_points else 'max' return dynamic_scatter(points.contiguous(), coors.contiguous(), reduce) def forward(self, points, coors): """Scatters points/features into voxels. Args: points (torch.Tensor): Points to be reduced into voxels. coors (torch.Tensor): Corresponding voxel coordinates (specifically multi-dim voxel index) of each points. Returns: voxel_feats (torch.Tensor): Reduced features, input features that shares the same voxel coordinates are reduced to one row. voxel_coors (torch.Tensor): Voxel coordinates. """ if coors.size(-1) == 3: return self.forward_single(points, coors) else: batch_size = coors[-1, 0] + 1 voxels, voxel_coors = [], [] for i in range(batch_size): inds = torch.where(coors[:, 0] == i) voxel, voxel_coor = self.forward_single( points[inds], coors[inds][:, 1:]) coor_pad = nn.functional.pad( voxel_coor, (1, 0), mode='constant', value=i) voxel_coors.append(coor_pad) voxels.append(voxel) features = torch.cat(voxels, dim=0) feature_coors = torch.cat(voxel_coors, dim=0) return features, feature_coors def __repr__(self): s = self.__class__.__name__ + '(' s += 'voxel_size=' + str(self.voxel_size) s += ', point_cloud_range=' + str(self.point_cloud_range) s += ', average_points=' + str(self.average_points) s += ')' return s
trt-samples-for-hackathon-cn-master
Hackathon2023/controlnet/annotator/uniformer/mmcv/ops/scatter_points.py
# Copyright (c) OpenMMLab. All rights reserved. # Code reference from "Temporal Interlacing Network" # https://github.com/deepcs233/TIN/blob/master/cuda_shift/rtc_wrap.py # Hao Shao, Shengju Qian, Yu Liu # [email protected], [email protected], [email protected] import torch import torch.nn as nn from torch.autograd import Function from ..utils import ext_loader ext_module = ext_loader.load_ext('_ext', ['tin_shift_forward', 'tin_shift_backward']) class TINShiftFunction(Function): @staticmethod def forward(ctx, input, shift): C = input.size(2) num_segments = shift.size(1) if C // num_segments <= 0 or C % num_segments != 0: raise ValueError('C should be a multiple of num_segments, ' f'but got C={C} and num_segments={num_segments}.') ctx.save_for_backward(shift) out = torch.zeros_like(input) ext_module.tin_shift_forward(input, shift, out) return out @staticmethod def backward(ctx, grad_output): shift = ctx.saved_tensors[0] data_grad_input = grad_output.new(*grad_output.size()).zero_() shift_grad_input = shift.new(*shift.size()).zero_() ext_module.tin_shift_backward(grad_output, shift, data_grad_input) return data_grad_input, shift_grad_input tin_shift = TINShiftFunction.apply class TINShift(nn.Module): """Temporal Interlace Shift. Temporal Interlace shift is a differentiable temporal-wise frame shifting which is proposed in "Temporal Interlacing Network" Please refer to https://arxiv.org/abs/2001.06499 for more details. Code is modified from https://github.com/mit-han-lab/temporal-shift-module """ def forward(self, input, shift): """Perform temporal interlace shift. Args: input (Tensor): Feature map with shape [N, num_segments, C, H * W]. shift (Tensor): Shift tensor with shape [N, num_segments]. Returns: Feature map after temporal interlace shift. """ return tin_shift(input, shift)
trt-samples-for-hackathon-cn-master
Hackathon2023/controlnet/annotator/uniformer/mmcv/ops/tin_shift.py
import torch from torch.autograd import Function from ..utils import ext_loader ext_module = ext_loader.load_ext( '_ext', ['gather_points_forward', 'gather_points_backward']) class GatherPoints(Function): """Gather points with given index.""" @staticmethod def forward(ctx, features: torch.Tensor, indices: torch.Tensor) -> torch.Tensor: """ Args: features (Tensor): (B, C, N) features to gather. indices (Tensor): (B, M) where M is the number of points. Returns: Tensor: (B, C, M) where M is the number of points. """ assert features.is_contiguous() assert indices.is_contiguous() B, npoint = indices.size() _, C, N = features.size() output = torch.cuda.FloatTensor(B, C, npoint) ext_module.gather_points_forward( features, indices, output, b=B, c=C, n=N, npoints=npoint) ctx.for_backwards = (indices, C, N) if torch.__version__ != 'parrots': ctx.mark_non_differentiable(indices) return output @staticmethod def backward(ctx, grad_out): idx, C, N = ctx.for_backwards B, npoint = idx.size() grad_features = torch.cuda.FloatTensor(B, C, N).zero_() grad_out_data = grad_out.data.contiguous() ext_module.gather_points_backward( grad_out_data, idx, grad_features.data, b=B, c=C, n=N, npoints=npoint) return grad_features, None gather_points = GatherPoints.apply
trt-samples-for-hackathon-cn-master
Hackathon2023/controlnet/annotator/uniformer/mmcv/ops/gather_points.py
# Copyright (c) OpenMMLab. All rights reserved. import torch from torch.autograd import Function from ..utils import ext_loader ext_module = ext_loader.load_ext('_ext', ['ball_query_forward']) class BallQuery(Function): """Find nearby points in spherical space.""" @staticmethod def forward(ctx, min_radius: float, max_radius: float, sample_num: int, xyz: torch.Tensor, center_xyz: torch.Tensor) -> torch.Tensor: """ Args: min_radius (float): minimum radius of the balls. max_radius (float): maximum radius of the balls. sample_num (int): maximum number of features in the balls. xyz (Tensor): (B, N, 3) xyz coordinates of the features. center_xyz (Tensor): (B, npoint, 3) centers of the ball query. Returns: Tensor: (B, npoint, nsample) tensor with the indices of the features that form the query balls. """ assert center_xyz.is_contiguous() assert xyz.is_contiguous() assert min_radius < max_radius B, N, _ = xyz.size() npoint = center_xyz.size(1) idx = xyz.new_zeros(B, npoint, sample_num, dtype=torch.int) ext_module.ball_query_forward( center_xyz, xyz, idx, b=B, n=N, m=npoint, min_radius=min_radius, max_radius=max_radius, nsample=sample_num) if torch.__version__ != 'parrots': ctx.mark_non_differentiable(idx) return idx @staticmethod def backward(ctx, a=None): return None, None, None, None ball_query = BallQuery.apply
trt-samples-for-hackathon-cn-master
Hackathon2023/controlnet/annotator/uniformer/mmcv/ops/ball_query.py
# Copyright (c) OpenMMLab. All rights reserved. import math import torch import torch.nn as nn from torch.autograd import Function from torch.autograd.function import once_differentiable from torch.nn.modules.utils import _pair, _single from annotator.uniformer.mmcv.utils import deprecated_api_warning from ..cnn import CONV_LAYERS from ..utils import ext_loader, print_log ext_module = ext_loader.load_ext( '_ext', ['modulated_deform_conv_forward', 'modulated_deform_conv_backward']) class ModulatedDeformConv2dFunction(Function): @staticmethod def symbolic(g, input, offset, mask, weight, bias, stride, padding, dilation, groups, deform_groups): input_tensors = [input, offset, mask, weight] if bias is not None: input_tensors.append(bias) return g.op( 'mmcv::MMCVModulatedDeformConv2d', *input_tensors, stride_i=stride, padding_i=padding, dilation_i=dilation, groups_i=groups, deform_groups_i=deform_groups) @staticmethod def forward(ctx, input, offset, mask, weight, bias=None, stride=1, padding=0, dilation=1, groups=1, deform_groups=1): if input is not None and input.dim() != 4: raise ValueError( f'Expected 4D tensor as input, got {input.dim()}D tensor \ instead.') ctx.stride = _pair(stride) ctx.padding = _pair(padding) ctx.dilation = _pair(dilation) ctx.groups = groups ctx.deform_groups = deform_groups ctx.with_bias = bias is not None if not ctx.with_bias: bias = input.new_empty(0) # fake tensor # When pytorch version >= 1.6.0, amp is adopted for fp16 mode; # amp won't cast the type of model (float32), but "offset" is cast # to float16 by nn.Conv2d automatically, leading to the type # mismatch with input (when it is float32) or weight. # The flag for whether to use fp16 or amp is the type of "offset", # we cast weight and input to temporarily support fp16 and amp # whatever the pytorch version is. input = input.type_as(offset) weight = weight.type_as(input) ctx.save_for_backward(input, offset, mask, weight, bias) output = input.new_empty( ModulatedDeformConv2dFunction._output_size(ctx, input, weight)) ctx._bufs = [input.new_empty(0), input.new_empty(0)] ext_module.modulated_deform_conv_forward( input, weight, bias, ctx._bufs[0], offset, mask, output, ctx._bufs[1], kernel_h=weight.size(2), kernel_w=weight.size(3), stride_h=ctx.stride[0], stride_w=ctx.stride[1], pad_h=ctx.padding[0], pad_w=ctx.padding[1], dilation_h=ctx.dilation[0], dilation_w=ctx.dilation[1], group=ctx.groups, deformable_group=ctx.deform_groups, with_bias=ctx.with_bias) return output @staticmethod @once_differentiable def backward(ctx, grad_output): input, offset, mask, weight, bias = ctx.saved_tensors grad_input = torch.zeros_like(input) grad_offset = torch.zeros_like(offset) grad_mask = torch.zeros_like(mask) grad_weight = torch.zeros_like(weight) grad_bias = torch.zeros_like(bias) grad_output = grad_output.contiguous() ext_module.modulated_deform_conv_backward( input, weight, bias, ctx._bufs[0], offset, mask, ctx._bufs[1], grad_input, grad_weight, grad_bias, grad_offset, grad_mask, grad_output, kernel_h=weight.size(2), kernel_w=weight.size(3), stride_h=ctx.stride[0], stride_w=ctx.stride[1], pad_h=ctx.padding[0], pad_w=ctx.padding[1], dilation_h=ctx.dilation[0], dilation_w=ctx.dilation[1], group=ctx.groups, deformable_group=ctx.deform_groups, with_bias=ctx.with_bias) if not ctx.with_bias: grad_bias = None return (grad_input, grad_offset, grad_mask, grad_weight, grad_bias, None, None, None, None, None) @staticmethod def _output_size(ctx, input, weight): channels = weight.size(0) output_size = (input.size(0), channels) for d in range(input.dim() - 2): in_size = input.size(d + 2) pad = ctx.padding[d] kernel = ctx.dilation[d] * (weight.size(d + 2) - 1) + 1 stride_ = ctx.stride[d] output_size += ((in_size + (2 * pad) - kernel) // stride_ + 1, ) if not all(map(lambda s: s > 0, output_size)): raise ValueError( 'convolution input is too small (output would be ' + 'x'.join(map(str, output_size)) + ')') return output_size modulated_deform_conv2d = ModulatedDeformConv2dFunction.apply class ModulatedDeformConv2d(nn.Module): @deprecated_api_warning({'deformable_groups': 'deform_groups'}, cls_name='ModulatedDeformConv2d') def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, deform_groups=1, bias=True): super(ModulatedDeformConv2d, self).__init__() self.in_channels = in_channels self.out_channels = out_channels self.kernel_size = _pair(kernel_size) self.stride = _pair(stride) self.padding = _pair(padding) self.dilation = _pair(dilation) self.groups = groups self.deform_groups = deform_groups # enable compatibility with nn.Conv2d self.transposed = False self.output_padding = _single(0) self.weight = nn.Parameter( torch.Tensor(out_channels, in_channels // groups, *self.kernel_size)) if bias: self.bias = nn.Parameter(torch.Tensor(out_channels)) else: self.register_parameter('bias', None) self.init_weights() def init_weights(self): n = self.in_channels for k in self.kernel_size: n *= k stdv = 1. / math.sqrt(n) self.weight.data.uniform_(-stdv, stdv) if self.bias is not None: self.bias.data.zero_() def forward(self, x, offset, mask): return modulated_deform_conv2d(x, offset, mask, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups, self.deform_groups) @CONV_LAYERS.register_module('DCNv2') class ModulatedDeformConv2dPack(ModulatedDeformConv2d): """A ModulatedDeformable Conv Encapsulation that acts as normal Conv layers. Args: in_channels (int): Same as nn.Conv2d. out_channels (int): Same as nn.Conv2d. kernel_size (int or tuple[int]): Same as nn.Conv2d. stride (int): Same as nn.Conv2d, while tuple is not supported. padding (int): Same as nn.Conv2d, while tuple is not supported. dilation (int): Same as nn.Conv2d, while tuple is not supported. groups (int): Same as nn.Conv2d. bias (bool or str): If specified as `auto`, it will be decided by the norm_cfg. Bias will be set as True if norm_cfg is None, otherwise False. """ _version = 2 def __init__(self, *args, **kwargs): super(ModulatedDeformConv2dPack, self).__init__(*args, **kwargs) self.conv_offset = nn.Conv2d( self.in_channels, self.deform_groups * 3 * self.kernel_size[0] * self.kernel_size[1], kernel_size=self.kernel_size, stride=self.stride, padding=self.padding, dilation=self.dilation, bias=True) self.init_weights() def init_weights(self): super(ModulatedDeformConv2dPack, self).init_weights() if hasattr(self, 'conv_offset'): self.conv_offset.weight.data.zero_() self.conv_offset.bias.data.zero_() def forward(self, x): out = self.conv_offset(x) o1, o2, mask = torch.chunk(out, 3, dim=1) offset = torch.cat((o1, o2), dim=1) mask = torch.sigmoid(mask) return modulated_deform_conv2d(x, offset, mask, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups, self.deform_groups) def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs): version = local_metadata.get('version', None) if version is None or version < 2: # the key is different in early versions # In version < 2, ModulatedDeformConvPack # loads previous benchmark models. if (prefix + 'conv_offset.weight' not in state_dict and prefix[:-1] + '_offset.weight' in state_dict): state_dict[prefix + 'conv_offset.weight'] = state_dict.pop( prefix[:-1] + '_offset.weight') if (prefix + 'conv_offset.bias' not in state_dict and prefix[:-1] + '_offset.bias' in state_dict): state_dict[prefix + 'conv_offset.bias'] = state_dict.pop(prefix[:-1] + '_offset.bias') if version is not None and version > 1: print_log( f'ModulatedDeformConvPack {prefix.rstrip(".")} is upgraded to ' 'version 2.', logger='root') super()._load_from_state_dict(state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs)
trt-samples-for-hackathon-cn-master
Hackathon2023/controlnet/annotator/uniformer/mmcv/ops/modulated_deform_conv.py
# Copyright (c) OpenMMLab. All rights reserved. from abc import abstractmethod import torch import torch.nn as nn import torch.nn.functional as F from ..cnn import ConvModule class BaseMergeCell(nn.Module): """The basic class for cells used in NAS-FPN and NAS-FCOS. BaseMergeCell takes 2 inputs. After applying convolution on them, they are resized to the target size. Then, they go through binary_op, which depends on the type of cell. If with_out_conv is True, the result of output will go through another convolution layer. Args: in_channels (int): number of input channels in out_conv layer. out_channels (int): number of output channels in out_conv layer. with_out_conv (bool): Whether to use out_conv layer out_conv_cfg (dict): Config dict for convolution layer, which should contain "groups", "kernel_size", "padding", "bias" to build out_conv layer. out_norm_cfg (dict): Config dict for normalization layer in out_conv. out_conv_order (tuple): The order of conv/norm/activation layers in out_conv. with_input1_conv (bool): Whether to use convolution on input1. with_input2_conv (bool): Whether to use convolution on input2. input_conv_cfg (dict): Config dict for building input1_conv layer and input2_conv layer, which is expected to contain the type of convolution. Default: None, which means using conv2d. input_norm_cfg (dict): Config dict for normalization layer in input1_conv and input2_conv layer. Default: None. upsample_mode (str): Interpolation method used to resize the output of input1_conv and input2_conv to target size. Currently, we support ['nearest', 'bilinear']. Default: 'nearest'. """ def __init__(self, fused_channels=256, out_channels=256, with_out_conv=True, out_conv_cfg=dict( groups=1, kernel_size=3, padding=1, bias=True), out_norm_cfg=None, out_conv_order=('act', 'conv', 'norm'), with_input1_conv=False, with_input2_conv=False, input_conv_cfg=None, input_norm_cfg=None, upsample_mode='nearest'): super(BaseMergeCell, self).__init__() assert upsample_mode in ['nearest', 'bilinear'] self.with_out_conv = with_out_conv self.with_input1_conv = with_input1_conv self.with_input2_conv = with_input2_conv self.upsample_mode = upsample_mode if self.with_out_conv: self.out_conv = ConvModule( fused_channels, out_channels, **out_conv_cfg, norm_cfg=out_norm_cfg, order=out_conv_order) self.input1_conv = self._build_input_conv( out_channels, input_conv_cfg, input_norm_cfg) if with_input1_conv else nn.Sequential() self.input2_conv = self._build_input_conv( out_channels, input_conv_cfg, input_norm_cfg) if with_input2_conv else nn.Sequential() def _build_input_conv(self, channel, conv_cfg, norm_cfg): return ConvModule( channel, channel, 3, padding=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, bias=True) @abstractmethod def _binary_op(self, x1, x2): pass def _resize(self, x, size): if x.shape[-2:] == size: return x elif x.shape[-2:] < size: return F.interpolate(x, size=size, mode=self.upsample_mode) else: assert x.shape[-2] % size[-2] == 0 and x.shape[-1] % size[-1] == 0 kernel_size = x.shape[-1] // size[-1] x = F.max_pool2d(x, kernel_size=kernel_size, stride=kernel_size) return x def forward(self, x1, x2, out_size=None): assert x1.shape[:2] == x2.shape[:2] assert out_size is None or len(out_size) == 2 if out_size is None: # resize to larger one out_size = max(x1.size()[2:], x2.size()[2:]) x1 = self.input1_conv(x1) x2 = self.input2_conv(x2) x1 = self._resize(x1, out_size) x2 = self._resize(x2, out_size) x = self._binary_op(x1, x2) if self.with_out_conv: x = self.out_conv(x) return x class SumCell(BaseMergeCell): def __init__(self, in_channels, out_channels, **kwargs): super(SumCell, self).__init__(in_channels, out_channels, **kwargs) def _binary_op(self, x1, x2): return x1 + x2 class ConcatCell(BaseMergeCell): def __init__(self, in_channels, out_channels, **kwargs): super(ConcatCell, self).__init__(in_channels * 2, out_channels, **kwargs) def _binary_op(self, x1, x2): ret = torch.cat([x1, x2], dim=1) return ret class GlobalPoolingCell(BaseMergeCell): def __init__(self, in_channels=None, out_channels=None, **kwargs): super().__init__(in_channels, out_channels, **kwargs) self.global_pool = nn.AdaptiveAvgPool2d((1, 1)) def _binary_op(self, x1, x2): x2_att = self.global_pool(x2).sigmoid() return x2 + x2_att * x1
trt-samples-for-hackathon-cn-master
Hackathon2023/controlnet/annotator/uniformer/mmcv/ops/merge_cells.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import Tuple import torch from torch import nn as nn from torch.autograd import Function from ..utils import ext_loader from .ball_query import ball_query from .knn import knn ext_module = ext_loader.load_ext( '_ext', ['group_points_forward', 'group_points_backward']) class QueryAndGroup(nn.Module): """Groups points with a ball query of radius. Args: max_radius (float): The maximum radius of the balls. If None is given, we will use kNN sampling instead of ball query. sample_num (int): Maximum number of features to gather in the ball. min_radius (float, optional): The minimum radius of the balls. Default: 0. use_xyz (bool, optional): Whether to use xyz. Default: True. return_grouped_xyz (bool, optional): Whether to return grouped xyz. Default: False. normalize_xyz (bool, optional): Whether to normalize xyz. Default: False. uniform_sample (bool, optional): Whether to sample uniformly. Default: False return_unique_cnt (bool, optional): Whether to return the count of unique samples. Default: False. return_grouped_idx (bool, optional): Whether to return grouped idx. Default: False. """ def __init__(self, max_radius, sample_num, min_radius=0, use_xyz=True, return_grouped_xyz=False, normalize_xyz=False, uniform_sample=False, return_unique_cnt=False, return_grouped_idx=False): super().__init__() self.max_radius = max_radius self.min_radius = min_radius self.sample_num = sample_num self.use_xyz = use_xyz self.return_grouped_xyz = return_grouped_xyz self.normalize_xyz = normalize_xyz self.uniform_sample = uniform_sample self.return_unique_cnt = return_unique_cnt self.return_grouped_idx = return_grouped_idx if self.return_unique_cnt: assert self.uniform_sample, \ 'uniform_sample should be True when ' \ 'returning the count of unique samples' if self.max_radius is None: assert not self.normalize_xyz, \ 'can not normalize grouped xyz when max_radius is None' def forward(self, points_xyz, center_xyz, features=None): """ Args: points_xyz (Tensor): (B, N, 3) xyz coordinates of the features. center_xyz (Tensor): (B, npoint, 3) coordinates of the centriods. features (Tensor): (B, C, N) Descriptors of the features. Returns: Tensor: (B, 3 + C, npoint, sample_num) Grouped feature. """ # if self.max_radius is None, we will perform kNN instead of ball query # idx is of shape [B, npoint, sample_num] if self.max_radius is None: idx = knn(self.sample_num, points_xyz, center_xyz, False) idx = idx.transpose(1, 2).contiguous() else: idx = ball_query(self.min_radius, self.max_radius, self.sample_num, points_xyz, center_xyz) if self.uniform_sample: unique_cnt = torch.zeros((idx.shape[0], idx.shape[1])) for i_batch in range(idx.shape[0]): for i_region in range(idx.shape[1]): unique_ind = torch.unique(idx[i_batch, i_region, :]) num_unique = unique_ind.shape[0] unique_cnt[i_batch, i_region] = num_unique sample_ind = torch.randint( 0, num_unique, (self.sample_num - num_unique, ), dtype=torch.long) all_ind = torch.cat((unique_ind, unique_ind[sample_ind])) idx[i_batch, i_region, :] = all_ind xyz_trans = points_xyz.transpose(1, 2).contiguous() # (B, 3, npoint, sample_num) grouped_xyz = grouping_operation(xyz_trans, idx) grouped_xyz_diff = grouped_xyz - \ center_xyz.transpose(1, 2).unsqueeze(-1) # relative offsets if self.normalize_xyz: grouped_xyz_diff /= self.max_radius if features is not None: grouped_features = grouping_operation(features, idx) if self.use_xyz: # (B, C + 3, npoint, sample_num) new_features = torch.cat([grouped_xyz_diff, grouped_features], dim=1) else: new_features = grouped_features else: assert (self.use_xyz ), 'Cannot have not features and not use xyz as a feature!' new_features = grouped_xyz_diff ret = [new_features] if self.return_grouped_xyz: ret.append(grouped_xyz) if self.return_unique_cnt: ret.append(unique_cnt) if self.return_grouped_idx: ret.append(idx) if len(ret) == 1: return ret[0] else: return tuple(ret) class GroupAll(nn.Module): """Group xyz with feature. Args: use_xyz (bool): Whether to use xyz. """ def __init__(self, use_xyz: bool = True): super().__init__() self.use_xyz = use_xyz def forward(self, xyz: torch.Tensor, new_xyz: torch.Tensor, features: torch.Tensor = None): """ Args: xyz (Tensor): (B, N, 3) xyz coordinates of the features. new_xyz (Tensor): new xyz coordinates of the features. features (Tensor): (B, C, N) features to group. Returns: Tensor: (B, C + 3, 1, N) Grouped feature. """ grouped_xyz = xyz.transpose(1, 2).unsqueeze(2) if features is not None: grouped_features = features.unsqueeze(2) if self.use_xyz: # (B, 3 + C, 1, N) new_features = torch.cat([grouped_xyz, grouped_features], dim=1) else: new_features = grouped_features else: new_features = grouped_xyz return new_features class GroupingOperation(Function): """Group feature with given index.""" @staticmethod def forward(ctx, features: torch.Tensor, indices: torch.Tensor) -> torch.Tensor: """ Args: features (Tensor): (B, C, N) tensor of features to group. indices (Tensor): (B, npoint, nsample) the indices of features to group with. Returns: Tensor: (B, C, npoint, nsample) Grouped features. """ features = features.contiguous() indices = indices.contiguous() B, nfeatures, nsample = indices.size() _, C, N = features.size() output = torch.cuda.FloatTensor(B, C, nfeatures, nsample) ext_module.group_points_forward(B, C, N, nfeatures, nsample, features, indices, output) ctx.for_backwards = (indices, N) return output @staticmethod def backward(ctx, grad_out: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: """ Args: grad_out (Tensor): (B, C, npoint, nsample) tensor of the gradients of the output from forward. Returns: Tensor: (B, C, N) gradient of the features. """ idx, N = ctx.for_backwards B, C, npoint, nsample = grad_out.size() grad_features = torch.cuda.FloatTensor(B, C, N).zero_() grad_out_data = grad_out.data.contiguous() ext_module.group_points_backward(B, C, N, npoint, nsample, grad_out_data, idx, grad_features.data) return grad_features, None grouping_operation = GroupingOperation.apply
trt-samples-for-hackathon-cn-master
Hackathon2023/controlnet/annotator/uniformer/mmcv/ops/group_points.py