|
'''by lyuwenyu |
|
''' |
|
|
|
import torch |
|
import torch.nn as nn |
|
|
|
|
|
|
|
class ConvNormLayer(nn.Module): |
|
def __init__(self, ch_in, ch_out, kernel_size, stride, padding=None, bias=False, act=None): |
|
super().__init__() |
|
self.conv = nn.Conv2d( |
|
ch_in, |
|
ch_out, |
|
kernel_size, |
|
stride, |
|
padding=(kernel_size-1)//2 if padding is None else padding, |
|
bias=bias) |
|
self.norm = nn.BatchNorm2d(ch_out) |
|
self.act = nn.Identity() if act is None else get_activation(act) |
|
|
|
def forward(self, x): |
|
return self.act(self.norm(self.conv(x))) |
|
|
|
|
|
class FrozenBatchNorm2d(nn.Module): |
|
"""copy and modified from https://github.com/facebookresearch/detr/blob/master/models/backbone.py |
|
BatchNorm2d where the batch statistics and the affine parameters are fixed. |
|
Copy-paste from torchvision.misc.ops with added eps before rqsrt, |
|
without which any other models than torchvision.models.resnet[18,34,50,101] |
|
produce nans. |
|
""" |
|
def __init__(self, num_features, eps=1e-5): |
|
super(FrozenBatchNorm2d, self).__init__() |
|
n = num_features |
|
self.register_buffer("weight", torch.ones(n)) |
|
self.register_buffer("bias", torch.zeros(n)) |
|
self.register_buffer("running_mean", torch.zeros(n)) |
|
self.register_buffer("running_var", torch.ones(n)) |
|
self.eps = eps |
|
self.num_features = n |
|
|
|
def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, |
|
missing_keys, unexpected_keys, error_msgs): |
|
num_batches_tracked_key = prefix + 'num_batches_tracked' |
|
if num_batches_tracked_key in state_dict: |
|
del state_dict[num_batches_tracked_key] |
|
|
|
super(FrozenBatchNorm2d, self)._load_from_state_dict( |
|
state_dict, prefix, local_metadata, strict, |
|
missing_keys, unexpected_keys, error_msgs) |
|
|
|
def forward(self, x): |
|
|
|
|
|
w = self.weight.reshape(1, -1, 1, 1) |
|
b = self.bias.reshape(1, -1, 1, 1) |
|
rv = self.running_var.reshape(1, -1, 1, 1) |
|
rm = self.running_mean.reshape(1, -1, 1, 1) |
|
scale = w * (rv + self.eps).rsqrt() |
|
bias = b - rm * scale |
|
return x * scale + bias |
|
|
|
def extra_repr(self): |
|
return ( |
|
"{num_features}, eps={eps}".format(**self.__dict__) |
|
) |
|
|
|
|
|
def get_activation(act: str, inpace: bool=True): |
|
'''get activation |
|
''' |
|
act = act.lower() |
|
|
|
if act == 'silu': |
|
m = nn.SiLU() |
|
|
|
elif act == 'relu': |
|
m = nn.ReLU() |
|
|
|
elif act == 'leaky_relu': |
|
m = nn.LeakyReLU() |
|
|
|
elif act == 'silu': |
|
m = nn.SiLU() |
|
|
|
elif act == 'gelu': |
|
m = nn.GELU() |
|
|
|
elif act is None: |
|
m = nn.Identity() |
|
|
|
elif isinstance(act, nn.Module): |
|
m = act |
|
|
|
else: |
|
raise RuntimeError('') |
|
|
|
if hasattr(m, 'inplace'): |
|
m.inplace = inpace |
|
|
|
return m |
|
|