gavrilstep commited on
Commit
e687936
·
verified ·
1 Parent(s): 9911e78

Training in progress, step 95, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f637121ce35d5914bb4d4da56f0273cb33d6a87e092ad743032eb48d171a679e
3
  size 191968
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:310684c461415e0f62ae581c5b75592546654a93bc7bb5bd0acd595778734a4b
3
  size 191968
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:dba34b5c73d351a314fcc2d8ad1b544b2c4b4e98182c2cc8497d9a0f851e9866
3
  size 253144
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:05279a72b8a85f85a81b4dca093186f30ace4416bff59d98d82e3d80607cc8a1
3
  size 253144
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:912547e3439c087a494b30d9f86b186e0f5dd1b1bc4e41596aeb4a26e2e169e9
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a09ac20c395c9ed5d9434cf9abe75b4c8e2dafd97fa0cdf706b7b28e5d097bad
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7f6423aaf07b0a3e5bef1b21c59ae6d997dd59505ca758247471609a32b152cd
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:60ca561a785d3802440b426c58aafe0f1cf10dc4bab5c0b5dbec38821026a8aa
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.16927634363097757,
5
  "eval_steps": 55,
6
- "global_step": 80,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -205,6 +205,41 @@
205
  "learning_rate": 3.12696703292044e-05,
206
  "loss": 10.2394,
207
  "step": 78
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
208
  }
209
  ],
210
  "logging_steps": 3,
@@ -219,12 +254,12 @@
219
  "should_evaluate": false,
220
  "should_log": false,
221
  "should_save": true,
222
- "should_training_stop": false
223
  },
224
  "attributes": {}
225
  }
226
  },
227
- "total_flos": 2788766515200.0,
228
  "train_batch_size": 2,
229
  "trial_name": null,
230
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.20101565806178587,
5
  "eval_steps": 55,
6
+ "global_step": 95,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
205
  "learning_rate": 3.12696703292044e-05,
206
  "loss": 10.2394,
207
  "step": 78
208
+ },
209
+ {
210
+ "epoch": 0.1713922979263648,
211
+ "grad_norm": 0.2242937535047531,
212
+ "learning_rate": 2.2040354826462668e-05,
213
+ "loss": 10.2176,
214
+ "step": 81
215
+ },
216
+ {
217
+ "epoch": 0.17774016081252644,
218
+ "grad_norm": 0.3547563850879669,
219
+ "learning_rate": 1.4033009983067452e-05,
220
+ "loss": 10.1373,
221
+ "step": 84
222
+ },
223
+ {
224
+ "epoch": 0.1840880236986881,
225
+ "grad_norm": 0.16841305792331696,
226
+ "learning_rate": 7.597595192178702e-06,
227
+ "loss": 10.2393,
228
+ "step": 87
229
+ },
230
+ {
231
+ "epoch": 0.19043588658484978,
232
+ "grad_norm": 0.223463773727417,
233
+ "learning_rate": 3.0153689607045845e-06,
234
+ "loss": 10.2272,
235
+ "step": 90
236
+ },
237
+ {
238
+ "epoch": 0.19678374947101143,
239
+ "grad_norm": 0.21468529105186462,
240
+ "learning_rate": 4.865965629214819e-07,
241
+ "loss": 10.2198,
242
+ "step": 93
243
  }
244
  ],
245
  "logging_steps": 3,
 
254
  "should_evaluate": false,
255
  "should_log": false,
256
  "should_save": true,
257
+ "should_training_stop": true
258
  },
259
  "attributes": {}
260
  }
261
  },
262
+ "total_flos": 3311660236800.0,
263
  "train_batch_size": 2,
264
  "trial_name": null,
265
  "trial_params": null