|
""" |
|
functions.py |
|
Here we get helper functions to 1) get schedulers given an option 2) initialize the network weights. |
|
""" |
|
|
|
import torch |
|
from torch.nn import init |
|
from torch.optim import lr_scheduler |
|
|
|
|
|
|
|
|
|
|
|
def get_scheduler(optimizer, opt): |
|
"""Return a learning rate scheduler |
|
|
|
Parameters: |
|
optimizer -- the optimizer of the network |
|
opt (option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions. |
|
opt.lr_policy is the name of learning rate policy: linear | step | plateau | cosine |
|
|
|
For 'linear', we keep the same learning rate for the first <opt.n_epochs> epochs |
|
and linearly decay the rate to zero over the next <opt.n_epochs_decay> epochs. |
|
For other schedulers (step, plateau, and cosine), we use the default PyTorch schedulers. |
|
See https://pytorch.org/docs/stable/optim.html for more details. |
|
""" |
|
if opt.lr_policy == 'linear': |
|
def lambda_rule(iteration): |
|
lr_l = 1.0 - max(0, logger.get_global_step() - opt.static_iters) / float(opt.decay_iters + 1) |
|
return lr_l |
|
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule) |
|
elif opt.lr_policy == 'step': |
|
scheduler = lr_scheduler.StepLR(optimizer, step_size=opt.decay_iters_step, gamma=0.1) |
|
elif opt.lr_policy == 'plateau': |
|
scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.2, threshold=0.01, patience=5) |
|
elif opt.lr_policy == 'cosine': |
|
scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=opt.n_epochs, eta_min=0) |
|
else: |
|
return NotImplementedError('learning rate policy [%s] is not implemented', opt.lr_policy) |
|
return scheduler |
|
|
|
|
|
def init_weights(net, init_type='normal', init_gain=0.02): |
|
"""Initialize network weights. |
|
|
|
Parameters: |
|
net (network) -- network to be initialized |
|
init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal |
|
init_gain (float) -- scaling factor for normal, xavier and orthogonal. |
|
|
|
We use 'normal' in the original pix2pix and CycleGAN paper. But xavier and kaiming might |
|
work better for some applications. Feel free to try yourself. |
|
""" |
|
def init_func(m): |
|
classname = m.__class__.__name__ |
|
if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1): |
|
if init_type == 'normal': |
|
init.normal_(m.weight.data, 0.0, init_gain) |
|
elif init_type == 'xavier': |
|
init.xavier_normal_(m.weight.data, gain=init_gain) |
|
elif init_type == 'kaiming': |
|
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in') |
|
elif init_type == 'orthogonal': |
|
init.orthogonal_(m.weight.data, gain=init_gain) |
|
else: |
|
raise NotImplementedError('initialization method [%s] is not implemented' % init_type) |
|
if hasattr(m, 'bias') and m.bias is not None: |
|
init.constant_(m.bias.data, 0.0) |
|
elif classname.find('BatchNorm2d') != -1: |
|
init.normal_(m.weight.data, 1.0, init_gain) |
|
init.constant_(m.bias.data, 0.0) |
|
|
|
net.apply(init_func) |
|
|
|
|
|
def init_net(net, init_type='normal', init_gain=0.02, gpu_ids=[]): |
|
"""Initialize a network: 1. register CPU/GPU device (with multi-GPU support); 2. initialize the network weights |
|
Parameters: |
|
net (network) -- the network to be initialized |
|
init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal |
|
gain (float) -- scaling factor for normal, xavier and orthogonal. |
|
gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2 |
|
|
|
Return an initialized network. |
|
""" |
|
init_weights(net, init_type, init_gain=init_gain) |
|
return net |
|
|