rakhman-llm commited on
Commit
d509308
·
verified ·
1 Parent(s): 9b8ad7f

Training in progress, step 2500, checkpoint

Browse files
last-checkpoint/model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:92a0e91accf4228b98caf6fb1d285323d5a1184a6a49e46d0af865a8d6eb057c
3
  size 891558696
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8c1bb94b8a77419efd218015aa078f96d962dddb1d7d803a4037f71edb25081b
3
  size 891558696
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1ba08a854f6c958770b50e1d0470ae9e0d583109948b4cdfe1d08bd223264cfd
3
  size 1783272762
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c397e65bf99adf9a6e0565defce89f241ee1e376a6e7d7145c230f7427e8d116
3
  size 1783272762
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4e0045739043a1f47643b813cbabc473141dd79658dbb66a9080e37f01080f2a
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:36fe4f7b36a5388221274c24d05ee4e9199f494788d121bb8791d2cd878af911
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9866a2fe0d11157ea657698adf310adebc8af3b7e4ad7c6e0bfe9124b0e7f879
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:36df38c42a3a82df40d06ca73d44edf3651e608b1093fdf04dfb5064b7258c83
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.13333333333333333,
5
  "eval_steps": 500,
6
- "global_step": 2000,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -147,6 +147,41 @@
147
  "learning_rate": 1.9112000000000003e-05,
148
  "loss": 0.0756,
149
  "step": 2000
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
150
  }
151
  ],
152
  "logging_steps": 100,
@@ -166,7 +201,7 @@
166
  "attributes": {}
167
  }
168
  },
169
- "total_flos": 4871663124480000.0,
170
  "train_batch_size": 4,
171
  "trial_name": null,
172
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.16666666666666666,
5
  "eval_steps": 500,
6
+ "global_step": 2500,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
147
  "learning_rate": 1.9112000000000003e-05,
148
  "loss": 0.0756,
149
  "step": 2000
150
+ },
151
+ {
152
+ "epoch": 0.14,
153
+ "grad_norm": 0.3241085708141327,
154
+ "learning_rate": 1.9067555555555558e-05,
155
+ "loss": 0.0787,
156
+ "step": 2100
157
+ },
158
+ {
159
+ "epoch": 0.14666666666666667,
160
+ "grad_norm": 0.27297067642211914,
161
+ "learning_rate": 1.9023111111111113e-05,
162
+ "loss": 0.0788,
163
+ "step": 2200
164
+ },
165
+ {
166
+ "epoch": 0.15333333333333332,
167
+ "grad_norm": 0.08663206547498703,
168
+ "learning_rate": 1.897866666666667e-05,
169
+ "loss": 0.0775,
170
+ "step": 2300
171
+ },
172
+ {
173
+ "epoch": 0.16,
174
+ "grad_norm": 0.29489508271217346,
175
+ "learning_rate": 1.8934222222222224e-05,
176
+ "loss": 0.0693,
177
+ "step": 2400
178
+ },
179
+ {
180
+ "epoch": 0.16666666666666666,
181
+ "grad_norm": 0.27022987604141235,
182
+ "learning_rate": 1.888977777777778e-05,
183
+ "loss": 0.0737,
184
+ "step": 2500
185
  }
186
  ],
187
  "logging_steps": 100,
 
201
  "attributes": {}
202
  }
203
  },
204
+ "total_flos": 6089578905600000.0,
205
  "train_batch_size": 4,
206
  "trial_name": null,
207
  "trial_params": null
last-checkpoint/training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5da3872b72c31dc28c756543e89c64090a6cb637a3a5de22e6dfe587939acf42
3
  size 5432
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:20d7bfd722c356d3138649856f039df4c593cfd472f7e9aa290a4571f1e6327f
3
  size 5432