# Transformer Settings | |
Model: | |
# - transformer- | |
in_chans: 1 | |
encoder_depth: 24 # 24 | |
embed_dim: 1024 # 1024 | |
decoder_depth: 24 # 8 | |
decoder_embed_dim: 1024 | |
num_heads: 16 | |
drop_rate: 0.01 | |
norm_before: True | |
Train: | |
lr: 0.0005 # 0.0005 | |
beta1: 0.9 # adam optimizer beta1 | |
beta2: 0.95 # adam optimizer beta2 | |
weight_decay: 0.01 | |
gamma: 0.5 | |
decay_type: 'linear' # 'constant', 'milestone', 'linear' | |
max_iters: 600000 # 300000 | |
drop_steps: 120000 | |
num_warmup: 20000 # 10000 # 10000 | |
batch_size: 384 | |
accum_grad: 1 | |
grad_clip: 0.1 | |
label_smooth: 0 # 0.1 | |
restore: False # whether to continue learning | |
pretrain_weight: '' # set '' as None | |
log_interval: 1 # step | |
sample_interval: 100 | |
save_interval: 5000 # 40000 | |
# DATA Settings | |
Data: | |
path: './DATACENTER/UKB/2_0_hdf5' | |
flip: False | |
center_crop: False | |
image_size: [256, 256] # [h, w] | |
patch_size: 16 | |
norm: False # norm to [-1, 1] otherwise [0, 1] | |
val_ratio: 0.01 | |