rakhman-llm commited on
Commit
4db7e97
·
verified ·
1 Parent(s): 6e4ce5e

Training in progress, step 42500, checkpoint

Browse files
last-checkpoint/model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:745b8b1d7e5bd21d472de4928ff7acd2ffa83a677380359019a9c4799912b8d9
3
  size 891558696
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6a9ad8913737016dba8650f3d7e7a145a4ce8913d458d7301634297efd00278c
3
  size 891558696
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ff26510f9fa790ef0a5e5f59b48491824122de83388e6b8dafee62d5541a9a5f
3
  size 1783272762
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:674d8858e4474544083140dd69e34c045fc6b88c296cb2ac0d991075b6a9ebd1
3
  size 1783272762
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1538e2c63d940ef0e1b73a9e8d73309e15231b1a12e77d8cac6d2db2463579dc
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:07401c37c6a1bb4042ae60703b7c654f256d7be062ccd8d53ae2bf95d64aab34
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:89058407f5b943be63f8348b693549eede8380db14dc40de0b317aff021fa0d0
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:03af6009394f4a2fb32c0cb40ddd5c5c4f50ce08dbb2d5a4f3d15eba8d0516e9
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 2.8,
5
  "eval_steps": 500,
6
- "global_step": 42000,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -2963,6 +2963,41 @@
2963
  "learning_rate": 1.3404444444444445e-06,
2964
  "loss": 0.0504,
2965
  "step": 42000
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2966
  }
2967
  ],
2968
  "logging_steps": 100,
@@ -2982,7 +3017,7 @@
2982
  "attributes": {}
2983
  }
2984
  },
2985
- "total_flos": 1.0230492561408e+17,
2986
  "train_batch_size": 4,
2987
  "trial_name": null,
2988
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 2.8333333333333335,
5
  "eval_steps": 500,
6
+ "global_step": 42500,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
2963
  "learning_rate": 1.3404444444444445e-06,
2964
  "loss": 0.0504,
2965
  "step": 42000
2966
+ },
2967
+ {
2968
+ "epoch": 2.8066666666666666,
2969
+ "grad_norm": 0.984207272529602,
2970
+ "learning_rate": 1.296e-06,
2971
+ "loss": 0.0487,
2972
+ "step": 42100
2973
+ },
2974
+ {
2975
+ "epoch": 2.8133333333333335,
2976
+ "grad_norm": 0.0887194350361824,
2977
+ "learning_rate": 1.2515555555555556e-06,
2978
+ "loss": 0.0543,
2979
+ "step": 42200
2980
+ },
2981
+ {
2982
+ "epoch": 2.82,
2983
+ "grad_norm": 0.17428313195705414,
2984
+ "learning_rate": 1.2071111111111113e-06,
2985
+ "loss": 0.0543,
2986
+ "step": 42300
2987
+ },
2988
+ {
2989
+ "epoch": 2.8266666666666667,
2990
+ "grad_norm": 0.05723314359784126,
2991
+ "learning_rate": 1.1626666666666667e-06,
2992
+ "loss": 0.0496,
2993
+ "step": 42400
2994
+ },
2995
+ {
2996
+ "epoch": 2.8333333333333335,
2997
+ "grad_norm": 0.13802002370357513,
2998
+ "learning_rate": 1.1182222222222224e-06,
2999
+ "loss": 0.0549,
3000
+ "step": 42500
3001
  }
3002
  ],
3003
  "logging_steps": 100,
 
3017
  "attributes": {}
3018
  }
3019
  },
3020
+ "total_flos": 1.035228413952e+17,
3021
  "train_batch_size": 4,
3022
  "trial_name": null,
3023
  "trial_params": null