rakhman-llm commited on
Commit
40cfc49
·
verified ·
1 Parent(s): 9a311a9

Training in progress, step 41500, checkpoint

Browse files
last-checkpoint/model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a43c2a0afd3692b4457c78e060bd95a6ca8422aa3f2bfb48776d966c67148820
3
  size 891558696
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ddeba81fa20a347b7f4a8bf56911f542931a9c7a9a28e5b17d8415cffe0f05fc
3
  size 891558696
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:cb247ef281f39dbf138924be64b77fed104e544ac60daad36b17f01b15edabf4
3
  size 1783272762
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c534f106883dfc1dc4725defbacef030c7dba5f5e0888cc081c7906d9d0b8453
3
  size 1783272762
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:83988fc1574bdb56177858ad2504e5d5e90771242080ec94288ec2934ea05174
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3b282c8a79340622eb958db3d6110bd8f623dfad615139665f1498d8b00a6bed
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a519ef81b846f8a03a3bd8bed22cf3df79611baad6f40e402df6bfc345f1aa61
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6abecd6b93d057c006d3ff371902f2a70b673a5fc3d8d5773b1c045cd3372a7b
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 2.7333333333333334,
5
  "eval_steps": 500,
6
- "global_step": 41000,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -2893,6 +2893,41 @@
2893
  "learning_rate": 1.784888888888889e-06,
2894
  "loss": 0.046,
2895
  "step": 41000
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2896
  }
2897
  ],
2898
  "logging_steps": 100,
@@ -2912,7 +2947,7 @@
2912
  "attributes": {}
2913
  }
2914
  },
2915
- "total_flos": 9.986909405184e+16,
2916
  "train_batch_size": 4,
2917
  "trial_name": null,
2918
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 2.7666666666666666,
5
  "eval_steps": 500,
6
+ "global_step": 41500,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
2893
  "learning_rate": 1.784888888888889e-06,
2894
  "loss": 0.046,
2895
  "step": 41000
2896
+ },
2897
+ {
2898
+ "epoch": 2.74,
2899
+ "grad_norm": 0.176757350564003,
2900
+ "learning_rate": 1.7404444444444445e-06,
2901
+ "loss": 0.0512,
2902
+ "step": 41100
2903
+ },
2904
+ {
2905
+ "epoch": 2.7466666666666666,
2906
+ "grad_norm": 0.28724828362464905,
2907
+ "learning_rate": 1.6960000000000002e-06,
2908
+ "loss": 0.0514,
2909
+ "step": 41200
2910
+ },
2911
+ {
2912
+ "epoch": 2.7533333333333334,
2913
+ "grad_norm": 0.654242753982544,
2914
+ "learning_rate": 1.6515555555555556e-06,
2915
+ "loss": 0.0497,
2916
+ "step": 41300
2917
+ },
2918
+ {
2919
+ "epoch": 2.76,
2920
+ "grad_norm": 0.2873416543006897,
2921
+ "learning_rate": 1.6071111111111112e-06,
2922
+ "loss": 0.0492,
2923
+ "step": 41400
2924
+ },
2925
+ {
2926
+ "epoch": 2.7666666666666666,
2927
+ "grad_norm": 0.28858011960983276,
2928
+ "learning_rate": 1.5626666666666667e-06,
2929
+ "loss": 0.0485,
2930
+ "step": 41500
2931
  }
2932
  ],
2933
  "logging_steps": 100,
 
2947
  "attributes": {}
2948
  }
2949
  },
2950
+ "total_flos": 1.0108700983296e+17,
2951
  "train_batch_size": 4,
2952
  "trial_name": null,
2953
  "trial_params": null