File size: 1,827 Bytes
51f9444 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 |
defaults:
- base_config
- _self_
devices: [0,1,2,3]
epochs: 500
batch_size: 8
monitor_metric: "val_loss"
monitor_metric_mood: "val_loss_mood"
monitor_metric_va: "val_loss_va"
checkpoint:
monitor: "${monitor_metric}"
filename: "{epoch:02d}-{${monitor_metric}:.4f}"
save_top_k: -1
mode: "min"
auto_insert_metric_name: False
save_last: True
checkpoint_mood:
monitor: "${monitor_metric_mood}"
filename: "mood-{epoch:02d}-{${monitor_metric_mood}:.4f}"
save_top_k: -1
mode: "min"
auto_insert_metric_name: False
save_last: True
checkpoint_va:
monitor: "${monitor_metric_va}"
filename: "va-{epoch:02d}-{${monitor_metric_va}:.4f}"
save_top_k: 5
mode: "min"
auto_insert_metric_name: False
save_last: True
earlystopping:
monitor: "${monitor_metric_mood}"
patience: 10
min_delta: 0.0001
mode: "min"
trainer:
devices: ${devices}
max_epochs: ${epochs}
accelerator: 'gpu'
# strategy: 'ddp_find_unused_parameters_true'
# optimizer:
# _target_: torch.optim.AdamW
# _partial_: true
# lr: 1e-4
# weight_decay: 0.01
# scheduler:
# _target_: torch.optim.lr_scheduler.ReduceLROnPlateau
# _partial_: true
# cooldown: 5
# mode: max
# factor: 0.2
# patience: 10
# min_lr: 1.6e-7
# monitor_metric: "val_loss"
# # val_loss
# # val_loss_mood
# # val_loss_va
# checkpoint:
# monitor: "${monitor_metric}"
# filename: "{epoch:02d}-{${monitor_metric}:.4f}"
# save_top_k: 2
# mode: "min"
# auto_insert_metric_name: False
# save_last: True
# checkpoint:
# monitor: "val_loss_mood"
# filename: "{epoch:02d}-{val_loss_mood:.4f}"
# save_top_k: 2
# mode: "min"
# auto_insert_metric_name: False
# save_last: True
# earlystopping:
# monitor: 'val_loss_mood'
# patience: 10
# min_delta: 0.0001
# mode: "min"
# datasets:
# - jamendo
# - dmdd
|