rakhman-llm commited on
Commit
f783eea
·
verified ·
1 Parent(s): 4053c4b

Training in progress, step 6000, checkpoint

Browse files
last-checkpoint/model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8322b5283d76576b6243d188fe146f52a61201fd3c25ceb62074d59997bad9c2
3
  size 891558696
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b8da66a9292141d41a6c48606ed339406a31277522edcf0302aa38aa658f2e9e
3
  size 891558696
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5a4cbaa339a503815e8f93b8496d408a46cf594a349bfa6e55b537a7341aed97
3
  size 1783272762
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d6e16c15bc4b1df3cf9e6fe1a2e38058b03a4f63c25e10991bd06af6c213d0c8
3
  size 1783272762
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:17e30f2d5aa88fa3a0cef0484b7fc130d9829c04f56c17d8644266ef1a1405ee
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:34caa8055744af1af91499eaae75425a5f72db08ebfbc7b0e342b4b8e68e6533
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e13ecaded8f565c1efc7f8c1f8e87f707a0c1e13fed3f11510caa036a76b90cf
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a00f8130c8c43ba3b86ef986c4fb4241ebc0a44808b15806e2c7f948b3aa8ef4
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 1.142975893599335,
5
  "eval_steps": 500,
6
- "global_step": 5500,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -92,6 +92,13 @@
92
  "learning_rate": 1.2384316985314493e-05,
93
  "loss": 0.3599,
94
  "step": 5500
 
 
 
 
 
 
 
95
  }
96
  ],
97
  "logging_steps": 500,
@@ -111,7 +118,7 @@
111
  "attributes": {}
112
  }
113
  },
114
- "total_flos": 6697927838269440.0,
115
  "train_batch_size": 2,
116
  "trial_name": null,
117
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 1.2468827930174564,
5
  "eval_steps": 500,
6
+ "global_step": 6000,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
92
  "learning_rate": 1.2384316985314493e-05,
93
  "loss": 0.3599,
94
  "step": 5500
95
+ },
96
+ {
97
+ "epoch": 1.2468827930174564,
98
+ "grad_norm": 0.49429360032081604,
99
+ "learning_rate": 1.1692989747852592e-05,
100
+ "loss": 0.3737,
101
+ "step": 6000
102
  }
103
  ],
104
  "logging_steps": 500,
 
118
  "attributes": {}
119
  }
120
  },
121
+ "total_flos": 7306885728829440.0,
122
  "train_batch_size": 2,
123
  "trial_name": null,
124
  "trial_params": null