leixa commited on
Commit
c25111d
·
verified ·
1 Parent(s): 953b982

Training in progress, step 238, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1c931d5dcab4934d664c86b7b9f1cc7cd35a706b9856206ace9d697eb010c61d
3
  size 132164608
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2197374a33c735028aa536e5fe3af5bc9dc788ab33b4fdcd1ffde4b50ce69bbb
3
  size 132164608
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:29b3f55f7dbe9de2ff2648d336ff782d60d2eaf477c10452e4200763918493d8
3
  size 67487892
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0c2cf7495a13cc47c2d3a8e0b35e97a8a7aa07a0004cdc17b247f3a74ecf175a
3
  size 67487892
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a6936f48172630f3b195d633d05bbdd084bbd64378cb9f0296e98ae7438be100
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0d67ee95f12db6e88f171eb80bca0d2075b72de13453defea2104a6a62511134
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5f9839d107756d9c8815de9164f2ebf92c05b3536704a349ca5892084df7663e
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bbcef9424696e41c7961bd91f0570d39d59ef33af28ed19a0eb9e4f50ed1b09a
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.11336482356210058,
5
  "eval_steps": 34,
6
- "global_step": 204,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -539,6 +539,91 @@
539
  "eval_samples_per_second": 7.822,
540
  "eval_steps_per_second": 0.978,
541
  "step": 204
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
542
  }
543
  ],
544
  "logging_steps": 3,
@@ -558,7 +643,7 @@
558
  "attributes": {}
559
  }
560
  },
561
- "total_flos": 4.07623549249536e+17,
562
  "train_batch_size": 8,
563
  "trial_name": null,
564
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.13225896082245067,
5
  "eval_steps": 34,
6
+ "global_step": 238,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
539
  "eval_samples_per_second": 7.822,
540
  "eval_steps_per_second": 0.978,
541
  "step": 204
542
+ },
543
+ {
544
+ "epoch": 0.11503195332036677,
545
+ "grad_norm": 2.1243629455566406,
546
+ "learning_rate": 2.459724913431772e-05,
547
+ "loss": 5.1268,
548
+ "step": 207
549
+ },
550
+ {
551
+ "epoch": 0.11669908307863296,
552
+ "grad_norm": 1.8287479877471924,
553
+ "learning_rate": 2.399335149726463e-05,
554
+ "loss": 4.8911,
555
+ "step": 210
556
+ },
557
+ {
558
+ "epoch": 0.11836621283689913,
559
+ "grad_norm": 1.827951431274414,
560
+ "learning_rate": 2.3390041714589514e-05,
561
+ "loss": 5.0788,
562
+ "step": 213
563
+ },
564
+ {
565
+ "epoch": 0.12003334259516532,
566
+ "grad_norm": 1.9181324243545532,
567
+ "learning_rate": 2.2787672102216042e-05,
568
+ "loss": 5.2716,
569
+ "step": 216
570
+ },
571
+ {
572
+ "epoch": 0.12170047235343151,
573
+ "grad_norm": 2.334996461868286,
574
+ "learning_rate": 2.2186594427034864e-05,
575
+ "loss": 5.4529,
576
+ "step": 219
577
+ },
578
+ {
579
+ "epoch": 0.1233676021116977,
580
+ "grad_norm": 2.042280673980713,
581
+ "learning_rate": 2.1587159701481716e-05,
582
+ "loss": 4.8902,
583
+ "step": 222
584
+ },
585
+ {
586
+ "epoch": 0.1250347318699639,
587
+ "grad_norm": 1.9080718755722046,
588
+ "learning_rate": 2.098971797855599e-05,
589
+ "loss": 4.933,
590
+ "step": 225
591
+ },
592
+ {
593
+ "epoch": 0.12670186162823005,
594
+ "grad_norm": 1.7101670503616333,
595
+ "learning_rate": 2.0394618147399713e-05,
596
+ "loss": 5.0186,
597
+ "step": 228
598
+ },
599
+ {
600
+ "epoch": 0.12836899138649624,
601
+ "grad_norm": 2.011359453201294,
602
+ "learning_rate": 1.980220772955602e-05,
603
+ "loss": 4.7936,
604
+ "step": 231
605
+ },
606
+ {
607
+ "epoch": 0.13003612114476243,
608
+ "grad_norm": 2.302273750305176,
609
+ "learning_rate": 1.921283267602643e-05,
610
+ "loss": 5.1487,
611
+ "step": 234
612
+ },
613
+ {
614
+ "epoch": 0.13170325090302862,
615
+ "grad_norm": 2.4797189235687256,
616
+ "learning_rate": 1.8626837165245165e-05,
617
+ "loss": 5.2862,
618
+ "step": 237
619
+ },
620
+ {
621
+ "epoch": 0.13225896082245067,
622
+ "eval_loss": 1.303543210029602,
623
+ "eval_runtime": 387.3037,
624
+ "eval_samples_per_second": 7.826,
625
+ "eval_steps_per_second": 0.979,
626
+ "step": 238
627
  }
628
  ],
629
  "logging_steps": 3,
 
643
  "attributes": {}
644
  }
645
  },
646
+ "total_flos": 4.75512659152896e+17,
647
  "train_batch_size": 8,
648
  "trial_name": null,
649
  "trial_params": null