masatochi commited on
Commit
0d74cc8
·
verified ·
1 Parent(s): aaddaf6

Training in progress, step 90, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8621b618b301f865cadc91959441872f5434678c1ac6424423e50c3e9e10cf52
3
  size 83945296
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5787ba009af86f1a7239b3beb2ab60528788a4de3b198109ebb7db1b5aab0ca5
3
  size 83945296
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:56c287583a4d39a4a8f3665ea1201526cd6abc65cda2be60f1f30c4992b325d2
3
  size 43122580
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e54e74ade3010c0ee6372f151aaa049a2a128e05c4b5bc14a43045214753657d
3
  size 43122580
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:826e666b3f1881e003df1e799293232a6d3ede1a55e213829eb507201e9190b8
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fdf18d6697609a27a0b488fa23bfd024570519524cd30dfeaa261a878e17c189
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8d9707e13ab424365dde92daf30c033711d2caa0fada77309f65d41532581807
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dfdc0543e9ce40f0d0b0ee9752d10d130598c759cc5a2bd973736f6096894d17
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.041567332966562745,
5
  "eval_steps": 34,
6
- "global_step": 85,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -626,6 +626,41 @@
626
  "learning_rate": 0.0001526432162877356,
627
  "loss": 1.0248,
628
  "step": 85
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
629
  }
630
  ],
631
  "logging_steps": 1,
@@ -645,7 +680,7 @@
645
  "attributes": {}
646
  }
647
  },
648
- "total_flos": 3.773111066217677e+17,
649
  "train_batch_size": 3,
650
  "trial_name": null,
651
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.04401247019988997,
5
  "eval_steps": 34,
6
+ "global_step": 90,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
626
  "learning_rate": 0.0001526432162877356,
627
  "loss": 1.0248,
628
  "step": 85
629
+ },
630
+ {
631
+ "epoch": 0.042056360413228196,
632
+ "grad_norm": 1.3093312978744507,
633
+ "learning_rate": 0.0001510631193180907,
634
+ "loss": 1.017,
635
+ "step": 86
636
+ },
637
+ {
638
+ "epoch": 0.04254538785989364,
639
+ "grad_norm": 2.124237537384033,
640
+ "learning_rate": 0.0001494655843399779,
641
+ "loss": 0.9459,
642
+ "step": 87
643
+ },
644
+ {
645
+ "epoch": 0.04303441530655908,
646
+ "grad_norm": 1.416883111000061,
647
+ "learning_rate": 0.00014785115691012864,
648
+ "loss": 1.0393,
649
+ "step": 88
650
+ },
651
+ {
652
+ "epoch": 0.04352344275322453,
653
+ "grad_norm": 1.1954100131988525,
654
+ "learning_rate": 0.00014622038835403133,
655
+ "loss": 1.4326,
656
+ "step": 89
657
+ },
658
+ {
659
+ "epoch": 0.04401247019988997,
660
+ "grad_norm": 1.1562423706054688,
661
+ "learning_rate": 0.00014457383557765386,
662
+ "loss": 0.9829,
663
+ "step": 90
664
  }
665
  ],
666
  "logging_steps": 1,
 
680
  "attributes": {}
681
  }
682
  },
683
+ "total_flos": 3.995058775995187e+17,
684
  "train_batch_size": 3,
685
  "trial_name": null,
686
  "trial_params": null