masatochi commited on
Commit
84f2117
·
verified ·
1 Parent(s): 2e7ce8b

Training in progress, step 60, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:23bdcdb819742c70bdf0df723db060d4d04d4c363558fc0104da041cc2899307
3
  size 83945296
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7c31cd0c71db08607f32996491b6438bb92e616866984646085e7d8af2abd1e1
3
  size 83945296
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:588827317948950102b9d55acdc57ce6c029fc8335233d8c5538f8928cb09ab6
3
  size 43122580
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ef66b44ddce7a3b65d72c15fad996a5a16867811836823139c8eb5e5ea890c7f
3
  size 43122580
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:501917edc59cb38fa8ba673663fd0069ec90987aba1d4647ea54c9fb6fe18e47
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1e2701e6433cbd351cde5d73f1b3cbd9cefb4bc8bfbd165ddcbca91b8495404c
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:61487aeef4449a4dec50f3ae9ec76bca52908878863009a40746a21c237f51ad
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:78248a64468e8e03af894427063f3f9a858b670b67d13949fb12f06211d294f4
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.026896509566599426,
5
  "eval_steps": 34,
6
- "global_step": 55,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -408,6 +408,41 @@
408
  "learning_rate": 0.00018951632913550626,
409
  "loss": 0.963,
410
  "step": 55
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
411
  }
412
  ],
413
  "logging_steps": 1,
@@ -427,7 +462,7 @@
427
  "attributes": {}
428
  }
429
  },
430
- "total_flos": 2.4414248075526144e+17,
431
  "train_batch_size": 3,
432
  "trial_name": null,
433
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.029341646799926645,
5
  "eval_steps": 34,
6
+ "global_step": 60,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
408
  "learning_rate": 0.00018951632913550626,
409
  "loss": 0.963,
410
  "step": 55
411
+ },
412
+ {
413
+ "epoch": 0.02738553701326487,
414
+ "grad_norm": 1.348286747932434,
415
+ "learning_rate": 0.0001886773685920062,
416
+ "loss": 1.0553,
417
+ "step": 56
418
+ },
419
+ {
420
+ "epoch": 0.027874564459930314,
421
+ "grad_norm": 1.2073681354522705,
422
+ "learning_rate": 0.0001878081248083698,
423
+ "loss": 1.0966,
424
+ "step": 57
425
+ },
426
+ {
427
+ "epoch": 0.028363591906595757,
428
+ "grad_norm": 1.29014253616333,
429
+ "learning_rate": 0.00018690889463055283,
430
+ "loss": 1.2225,
431
+ "step": 58
432
+ },
433
+ {
434
+ "epoch": 0.0288526193532612,
435
+ "grad_norm": 1.265650987625122,
436
+ "learning_rate": 0.00018597998514483725,
437
+ "loss": 0.9056,
438
+ "step": 59
439
+ },
440
+ {
441
+ "epoch": 0.029341646799926645,
442
+ "grad_norm": 1.264709711074829,
443
+ "learning_rate": 0.00018502171357296144,
444
+ "loss": 1.1566,
445
+ "step": 60
446
  }
447
  ],
448
  "logging_steps": 1,
 
462
  "attributes": {}
463
  }
464
  },
465
+ "total_flos": 2.6633725173301248e+17,
466
  "train_batch_size": 3,
467
  "trial_name": null,
468
  "trial_params": null