leixa commited on
Commit
bf308ef
·
verified ·
1 Parent(s): 15e398a

Training in progress, step 340, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7e58f921024f7d4a3f5be499e38ac1d140150c729cf63f5886172b49d191c20e
3
  size 132164608
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:395f189336ee88587c08a855a6366bce7e35606805660d672365db26c72fd926
3
  size 132164608
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5067e99c0251a2e383bd50725f2634c2eb1ccbd90a93ba4cc16d409825011bcc
3
  size 67488212
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fb1e33a31bab7e692ff019d655243137d119d1d215b418632c4a4c1baba39958
3
  size 67488212
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ab4ceaa83c98da3eb4ec0aef1f69381cfa8e51d85f3e4d300d824d547bfebb9f
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:55795c4e5c41785c92dfd03c9be6dc5bae6b52fb124698ad8721a4a33a04ef8f
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:542eeb761eff9bd2c88163850a5018d7ed947bdab57ea917e6e376b6cb0c0259
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e6999f9aad8d44fbf7db1d80d56ad86630abb8e28a7187e80ed24f8546462146
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.1700472353431509,
5
  "eval_steps": 34,
6
- "global_step": 306,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -801,6 +801,91 @@
801
  "eval_samples_per_second": 7.828,
802
  "eval_steps_per_second": 0.979,
803
  "step": 306
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
804
  }
805
  ],
806
  "logging_steps": 3,
@@ -820,7 +905,7 @@
820
  "attributes": {}
821
  }
822
  },
823
- "total_flos": 6.10135319642112e+17,
824
  "train_batch_size": 8,
825
  "trial_name": null,
826
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.18894137260350097,
5
  "eval_steps": 34,
6
+ "global_step": 340,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
801
  "eval_samples_per_second": 7.828,
802
  "eval_steps_per_second": 0.979,
803
  "step": 306
804
+ },
805
+ {
806
+ "epoch": 0.17171436510141705,
807
+ "grad_norm": 2.220491886138916,
808
+ "learning_rate": 6.421379363065142e-06,
809
+ "loss": 5.3908,
810
+ "step": 309
811
+ },
812
+ {
813
+ "epoch": 0.17338149485968324,
814
+ "grad_norm": 2.2047910690307617,
815
+ "learning_rate": 6.022586521156715e-06,
816
+ "loss": 5.2721,
817
+ "step": 312
818
+ },
819
+ {
820
+ "epoch": 0.17504862461794943,
821
+ "grad_norm": 2.1623401641845703,
822
+ "learning_rate": 5.634875954308638e-06,
823
+ "loss": 5.5073,
824
+ "step": 315
825
+ },
826
+ {
827
+ "epoch": 0.17671575437621562,
828
+ "grad_norm": 1.9954192638397217,
829
+ "learning_rate": 5.258474074573877e-06,
830
+ "loss": 5.1791,
831
+ "step": 318
832
+ },
833
+ {
834
+ "epoch": 0.1783828841344818,
835
+ "grad_norm": 2.24808669090271,
836
+ "learning_rate": 4.893600690050579e-06,
837
+ "loss": 5.0704,
838
+ "step": 321
839
+ },
840
+ {
841
+ "epoch": 0.180050013892748,
842
+ "grad_norm": 2.2592039108276367,
843
+ "learning_rate": 4.540468876520323e-06,
844
+ "loss": 5.0177,
845
+ "step": 324
846
+ },
847
+ {
848
+ "epoch": 0.18171714365101416,
849
+ "grad_norm": 1.9192252159118652,
850
+ "learning_rate": 4.199284853017896e-06,
851
+ "loss": 5.2738,
852
+ "step": 327
853
+ },
854
+ {
855
+ "epoch": 0.18338427340928035,
856
+ "grad_norm": 2.021440267562866,
857
+ "learning_rate": 3.8702478614051355e-06,
858
+ "loss": 4.6439,
859
+ "step": 330
860
+ },
861
+ {
862
+ "epoch": 0.18505140316754654,
863
+ "grad_norm": 2.309406042098999,
864
+ "learning_rate": 3.5535500500193357e-06,
865
+ "loss": 5.4409,
866
+ "step": 333
867
+ },
868
+ {
869
+ "epoch": 0.18671853292581272,
870
+ "grad_norm": 2.2390878200531006,
871
+ "learning_rate": 3.249376361464021e-06,
872
+ "loss": 5.1074,
873
+ "step": 336
874
+ },
875
+ {
876
+ "epoch": 0.18838566268407891,
877
+ "grad_norm": 2.540015697479248,
878
+ "learning_rate": 2.957904424607652e-06,
879
+ "loss": 5.2675,
880
+ "step": 339
881
+ },
882
+ {
883
+ "epoch": 0.18894137260350097,
884
+ "eval_loss": 1.2895426750183105,
885
+ "eval_runtime": 387.2989,
886
+ "eval_samples_per_second": 7.826,
887
+ "eval_steps_per_second": 0.979,
888
+ "step": 340
889
  }
890
  ],
891
  "logging_steps": 3,
 
905
  "attributes": {}
906
  }
907
  },
908
+ "total_flos": 6.76579980398592e+17,
909
  "train_batch_size": 8,
910
  "trial_name": null,
911
  "trial_params": null