rakhman-llm commited on
Commit
dec7bd6
·
verified ·
1 Parent(s): e906792

Training in progress, step 4500, checkpoint

Browse files
last-checkpoint/model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:75a0187432d05655fa95e9e4af04e889f2b84a4214d4995b5dcbc3829f6fa995
3
  size 891558696
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8fdbb9af804d15f5110973d90186ec84defa2a665a8482fd83c11a269501602d
3
  size 891558696
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7eb7dc038fae5860bd92fb49fcb23e213f7099623915ac6963ae71d0e53c7159
3
  size 1783272762
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6e8eed0cff93a66dd9f7dc35bf13769c84be7bd70a4333fe001f86cc9ee212ba
3
  size 1783272762
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e2f6e8ef6ff0cccff7f602f9c9f7831d1edbb8062fe6f8d796324be0b7257b7f
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2ca0d52bc1b71ba08e7156c2b45d758a196d38988c6eda67b4df0e9d832fe78c
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6be5c125c7805acd88ceb3e79bd43e0a5bfe5ffe9cebeb5d64fd8beda77909cd
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:574db7da084c9454e4a06ea3487f7cdcb4dd46faa4038c9a6b4a085fb43bd4b6
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.26666666666666666,
5
  "eval_steps": 500,
6
- "global_step": 4000,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -287,6 +287,41 @@
287
  "learning_rate": 1.8223111111111114e-05,
288
  "loss": 0.0621,
289
  "step": 4000
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
290
  }
291
  ],
292
  "logging_steps": 100,
@@ -306,7 +341,7 @@
306
  "attributes": {}
307
  }
308
  },
309
- "total_flos": 9743326248960000.0,
310
  "train_batch_size": 4,
311
  "trial_name": null,
312
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.3,
5
  "eval_steps": 500,
6
+ "global_step": 4500,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
287
  "learning_rate": 1.8223111111111114e-05,
288
  "loss": 0.0621,
289
  "step": 4000
290
+ },
291
+ {
292
+ "epoch": 0.2733333333333333,
293
+ "grad_norm": 0.2718330919742584,
294
+ "learning_rate": 1.817866666666667e-05,
295
+ "loss": 0.0736,
296
+ "step": 4100
297
+ },
298
+ {
299
+ "epoch": 0.28,
300
+ "grad_norm": 0.25370103120803833,
301
+ "learning_rate": 1.8134222222222224e-05,
302
+ "loss": 0.0758,
303
+ "step": 4200
304
+ },
305
+ {
306
+ "epoch": 0.2866666666666667,
307
+ "grad_norm": 0.1705348938703537,
308
+ "learning_rate": 1.808977777777778e-05,
309
+ "loss": 0.0723,
310
+ "step": 4300
311
+ },
312
+ {
313
+ "epoch": 0.29333333333333333,
314
+ "grad_norm": 0.152600958943367,
315
+ "learning_rate": 1.8045333333333335e-05,
316
+ "loss": 0.0671,
317
+ "step": 4400
318
+ },
319
+ {
320
+ "epoch": 0.3,
321
+ "grad_norm": 0.24814434349536896,
322
+ "learning_rate": 1.800088888888889e-05,
323
+ "loss": 0.0752,
324
+ "step": 4500
325
  }
326
  ],
327
  "logging_steps": 100,
 
341
  "attributes": {}
342
  }
343
  },
344
+ "total_flos": 1.096124203008e+16,
345
  "train_batch_size": 4,
346
  "trial_name": null,
347
  "trial_params": null