masatochi commited on
Commit
32bfaf6
·
verified ·
1 Parent(s): a622ea9

Training in progress, step 85, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5f5d7b7c33ee6f990b99d2369cecf44560f2299c45d0cc4e6243426213d7ca5e
3
  size 83945296
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8621b618b301f865cadc91959441872f5434678c1ac6424423e50c3e9e10cf52
3
  size 83945296
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:786fe5cf7031fc54610c6ff1e3d066ffdeac9fedc533d1d2955573ddbe5c0477
3
  size 43122580
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:56c287583a4d39a4a8f3665ea1201526cd6abc65cda2be60f1f30c4992b325d2
3
  size 43122580
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:01c1ee15529a526a9fe4c19508572c414c6ef23f4c5c37a9bdfac25ac8b91945
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:826e666b3f1881e003df1e799293232a6d3ede1a55e213829eb507201e9190b8
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4bc75fc1c14b28d29d31fa9d4252536c919fc25a390fac3a1e8c09d6575b4029
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8d9707e13ab424365dde92daf30c033711d2caa0fada77309f65d41532581807
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.039122195733235526,
5
  "eval_steps": 34,
6
- "global_step": 80,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -591,6 +591,41 @@
591
  "learning_rate": 0.00016026346363792567,
592
  "loss": 1.042,
593
  "step": 80
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
594
  }
595
  ],
596
  "logging_steps": 1,
@@ -610,7 +645,7 @@
610
  "attributes": {}
611
  }
612
  },
613
- "total_flos": 3.5511633564401664e+17,
614
  "train_batch_size": 3,
615
  "trial_name": null,
616
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.041567332966562745,
5
  "eval_steps": 34,
6
+ "global_step": 85,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
591
  "learning_rate": 0.00016026346363792567,
592
  "loss": 1.042,
593
  "step": 80
594
+ },
595
+ {
596
+ "epoch": 0.03961122317990097,
597
+ "grad_norm": 1.3361854553222656,
598
+ "learning_rate": 0.00015877852522924732,
599
+ "loss": 1.1104,
600
+ "step": 81
601
+ },
602
+ {
603
+ "epoch": 0.040100250626566414,
604
+ "grad_norm": 1.0672283172607422,
605
+ "learning_rate": 0.00015727351400805052,
606
+ "loss": 0.7879,
607
+ "step": 82
608
+ },
609
+ {
610
+ "epoch": 0.04058927807323186,
611
+ "grad_norm": 1.6033365726470947,
612
+ "learning_rate": 0.00015574894393428855,
613
+ "loss": 1.0356,
614
+ "step": 83
615
+ },
616
+ {
617
+ "epoch": 0.0410783055198973,
618
+ "grad_norm": 1.579293131828308,
619
+ "learning_rate": 0.00015420533564724495,
620
+ "loss": 1.3671,
621
+ "step": 84
622
+ },
623
+ {
624
+ "epoch": 0.041567332966562745,
625
+ "grad_norm": 3.389050006866455,
626
+ "learning_rate": 0.0001526432162877356,
627
+ "loss": 1.0248,
628
+ "step": 85
629
  }
630
  ],
631
  "logging_steps": 1,
 
645
  "attributes": {}
646
  }
647
  },
648
+ "total_flos": 3.773111066217677e+17,
649
  "train_batch_size": 3,
650
  "trial_name": null,
651
  "trial_params": null