brixeus commited on
Commit
059d424
·
verified ·
1 Parent(s): 60d4a64

Training in progress, step 200, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:743a17f9cb51b4373285054192f47b3d4cc01d30189d607632beb36868530e76
3
  size 37762064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6c2820bef08450717e312c546e4176785f4c6142863d3abd355111c7614db415
3
  size 37762064
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:051a2e2ecf48177a06afd5f75bf025458e1ea2dcf4c1e3232a10f4afb05824b2
3
  size 19283770
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:49aefdcc0d247db8f6d6a3dba5aceff2873f607081003c67843c8b094bbfe786
3
  size 19283770
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:74a6fed904b71ac961f57839a0cafc9ce692b4390791ad7d64834529ffdcd7fa
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3230f1bf402a17b12cbef0c7bdf70dd52d6c6e666f3e88ceffac3602f0ad66bd
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d8ce05761f46e7cf72fb17a02e3a0ca15c9d25ce3babf590eeb40568923b8bac
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d2d754412c61116546142914503e7369d0cc35d3c380a07e5218f595d76b6d96
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 0.8705639243125916,
3
- "best_model_checkpoint": "miner_id_24/checkpoint-150",
4
- "epoch": 0.364741641337386,
5
  "eval_steps": 50,
6
- "global_step": 150,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -389,6 +389,126 @@
389
  "eval_samples_per_second": 148.561,
390
  "eval_steps_per_second": 37.301,
391
  "step": 150
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
392
  }
393
  ],
394
  "logging_steps": 3,
@@ -412,12 +532,12 @@
412
  "should_evaluate": false,
413
  "should_log": false,
414
  "should_save": true,
415
- "should_training_stop": false
416
  },
417
  "attributes": {}
418
  }
419
  },
420
- "total_flos": 4940298253762560.0,
421
  "train_batch_size": 8,
422
  "trial_name": null,
423
  "trial_params": null
 
1
  {
2
+ "best_metric": 0.8201571106910706,
3
+ "best_model_checkpoint": "miner_id_24/checkpoint-200",
4
+ "epoch": 0.48632218844984804,
5
  "eval_steps": 50,
6
+ "global_step": 200,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
389
  "eval_samples_per_second": 148.561,
390
  "eval_steps_per_second": 37.301,
391
  "step": 150
392
+ },
393
+ {
394
+ "epoch": 0.3720364741641337,
395
+ "grad_norm": 1461.7186279296875,
396
+ "learning_rate": 1.435357758543015e-05,
397
+ "loss": 5.1148,
398
+ "step": 153
399
+ },
400
+ {
401
+ "epoch": 0.37933130699088147,
402
+ "grad_norm": 1224.365478515625,
403
+ "learning_rate": 1.2658926150792322e-05,
404
+ "loss": 3.9842,
405
+ "step": 156
406
+ },
407
+ {
408
+ "epoch": 0.38662613981762917,
409
+ "grad_norm": 635.9539794921875,
410
+ "learning_rate": 1.1056136061894384e-05,
411
+ "loss": 3.4588,
412
+ "step": 159
413
+ },
414
+ {
415
+ "epoch": 0.3939209726443769,
416
+ "grad_norm": 797.1043701171875,
417
+ "learning_rate": 9.549150281252633e-06,
418
+ "loss": 4.1315,
419
+ "step": 162
420
+ },
421
+ {
422
+ "epoch": 0.4012158054711246,
423
+ "grad_norm": 592.1983032226562,
424
+ "learning_rate": 8.141676086873572e-06,
425
+ "loss": 3.3113,
426
+ "step": 165
427
+ },
428
+ {
429
+ "epoch": 0.4085106382978723,
430
+ "grad_norm": 751.4596557617188,
431
+ "learning_rate": 6.837175952121306e-06,
432
+ "loss": 3.6361,
433
+ "step": 168
434
+ },
435
+ {
436
+ "epoch": 0.4158054711246201,
437
+ "grad_norm": 1344.918701171875,
438
+ "learning_rate": 5.6388590278194096e-06,
439
+ "loss": 3.3631,
440
+ "step": 171
441
+ },
442
+ {
443
+ "epoch": 0.4231003039513678,
444
+ "grad_norm": 934.5332641601562,
445
+ "learning_rate": 4.549673247541875e-06,
446
+ "loss": 3.4403,
447
+ "step": 174
448
+ },
449
+ {
450
+ "epoch": 0.43039513677811553,
451
+ "grad_norm": 732.6083374023438,
452
+ "learning_rate": 3.5722980755146517e-06,
453
+ "loss": 4.0096,
454
+ "step": 177
455
+ },
456
+ {
457
+ "epoch": 0.4376899696048632,
458
+ "grad_norm": 591.6499633789062,
459
+ "learning_rate": 2.7091379149682685e-06,
460
+ "loss": 3.5339,
461
+ "step": 180
462
+ },
463
+ {
464
+ "epoch": 0.4449848024316109,
465
+ "grad_norm": 957.0849609375,
466
+ "learning_rate": 1.962316193157593e-06,
467
+ "loss": 3.0528,
468
+ "step": 183
469
+ },
470
+ {
471
+ "epoch": 0.4522796352583587,
472
+ "grad_norm": 605.1629638671875,
473
+ "learning_rate": 1.333670137599713e-06,
474
+ "loss": 3.1099,
475
+ "step": 186
476
+ },
477
+ {
478
+ "epoch": 0.4595744680851064,
479
+ "grad_norm": 631.7711181640625,
480
+ "learning_rate": 8.247462563808817e-07,
481
+ "loss": 3.2638,
482
+ "step": 189
483
+ },
484
+ {
485
+ "epoch": 0.4668693009118541,
486
+ "grad_norm": 571.2025146484375,
487
+ "learning_rate": 4.367965336512403e-07,
488
+ "loss": 3.2275,
489
+ "step": 192
490
+ },
491
+ {
492
+ "epoch": 0.47416413373860183,
493
+ "grad_norm": 574.7122192382812,
494
+ "learning_rate": 1.7077534966650766e-07,
495
+ "loss": 3.141,
496
+ "step": 195
497
+ },
498
+ {
499
+ "epoch": 0.48145896656534953,
500
+ "grad_norm": 524.1749267578125,
501
+ "learning_rate": 2.7337132953697554e-08,
502
+ "loss": 2.3163,
503
+ "step": 198
504
+ },
505
+ {
506
+ "epoch": 0.48632218844984804,
507
+ "eval_loss": 0.8201571106910706,
508
+ "eval_runtime": 4.6233,
509
+ "eval_samples_per_second": 149.894,
510
+ "eval_steps_per_second": 37.636,
511
+ "step": 200
512
  }
513
  ],
514
  "logging_steps": 3,
 
532
  "should_evaluate": false,
533
  "should_log": false,
534
  "should_save": true,
535
+ "should_training_stop": true
536
  },
537
  "attributes": {}
538
  }
539
  },
540
+ "total_flos": 6608875809669120.0,
541
  "train_batch_size": 8,
542
  "trial_name": null,
543
  "trial_params": null