leixa commited on
Commit
455f629
·
verified ·
1 Parent(s): 164bf5d

Training in progress, step 600, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:76fcf8d306caeb043a3cb5d43f6e62efae99a94cce586c3b5974df9a6d484fad
3
  size 147770496
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:63110badfdf85f0bb2550baf34aab56827181068f2ceaca717cc3f89eaf5b58c
3
  size 147770496
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a3bd42150e008a1bbddcb58353eb4002de8774a2318a9ec4f2b223079302a3f0
3
  size 75472244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8bc9044bf49f52ee3c858c16851b8e52412f1f2fd7397346a15167b588d87cf8
3
  size 75472244
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ecf4964b094cde4682dffbc3ccc495d200cbfb994a6a61f566d42b43aed8900d
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:466b3954c2395356a44c2295ce049c71f33cd801d70afc6e9a63811d0adcbcc6
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:70f0f789b56065211b8c0b1a5e2a97dd0b5b08a816bbbe288fb6f9c677282af9
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5ad54995b081fae25638228c5d9c8f38ca277e5c5ad00bc3e49897b543f84405
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 0.34923774003982544,
3
- "best_model_checkpoint": "miner_id_24/checkpoint-450",
4
- "epoch": 0.6764374295377678,
5
  "eval_steps": 50,
6
- "global_step": 450,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -402,6 +402,135 @@
402
  "eval_samples_per_second": 31.637,
403
  "eval_steps_per_second": 7.931,
404
  "step": 450
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
405
  }
406
  ],
407
  "logging_steps": 10,
@@ -425,12 +554,12 @@
425
  "should_evaluate": false,
426
  "should_log": false,
427
  "should_save": true,
428
- "should_training_stop": false
429
  },
430
  "attributes": {}
431
  }
432
  },
433
- "total_flos": 1.24760405670101e+17,
434
  "train_batch_size": 8,
435
  "trial_name": null,
436
  "trial_params": null
 
1
  {
2
+ "best_metric": 0.3471680283546448,
3
+ "best_model_checkpoint": "miner_id_24/checkpoint-600",
4
+ "epoch": 0.9019165727170236,
5
  "eval_steps": 50,
6
+ "global_step": 600,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
402
  "eval_samples_per_second": 31.637,
403
  "eval_steps_per_second": 7.931,
404
  "step": 450
405
+ },
406
+ {
407
+ "epoch": 0.6914693724163848,
408
+ "grad_norm": 0.1071515679359436,
409
+ "learning_rate": 2.6522584913693294e-05,
410
+ "loss": 0.3868,
411
+ "step": 460
412
+ },
413
+ {
414
+ "epoch": 0.7065013152950019,
415
+ "grad_norm": 0.12611959874629974,
416
+ "learning_rate": 2.301660165700936e-05,
417
+ "loss": 0.3711,
418
+ "step": 470
419
+ },
420
+ {
421
+ "epoch": 0.7215332581736189,
422
+ "grad_norm": 0.12682585418224335,
423
+ "learning_rate": 1.9728836206903656e-05,
424
+ "loss": 0.3507,
425
+ "step": 480
426
+ },
427
+ {
428
+ "epoch": 0.736565201052236,
429
+ "grad_norm": 0.1506437361240387,
430
+ "learning_rate": 1.6668608091748495e-05,
431
+ "loss": 0.342,
432
+ "step": 490
433
+ },
434
+ {
435
+ "epoch": 0.7515971439308531,
436
+ "grad_norm": 0.20785292983055115,
437
+ "learning_rate": 1.3844591860619383e-05,
438
+ "loss": 0.2928,
439
+ "step": 500
440
+ },
441
+ {
442
+ "epoch": 0.7515971439308531,
443
+ "eval_loss": 0.34627777338027954,
444
+ "eval_runtime": 35.2593,
445
+ "eval_samples_per_second": 31.793,
446
+ "eval_steps_per_second": 7.97,
447
+ "step": 500
448
+ },
449
+ {
450
+ "epoch": 0.7666290868094702,
451
+ "grad_norm": 0.11490298062562943,
452
+ "learning_rate": 1.1264792494342857e-05,
453
+ "loss": 0.3752,
454
+ "step": 510
455
+ },
456
+ {
457
+ "epoch": 0.7816610296880872,
458
+ "grad_norm": 0.12073410302400589,
459
+ "learning_rate": 8.936522714508678e-06,
460
+ "loss": 0.3795,
461
+ "step": 520
462
+ },
463
+ {
464
+ "epoch": 0.7966929725667042,
465
+ "grad_norm": 0.11344490945339203,
466
+ "learning_rate": 6.866382254766157e-06,
467
+ "loss": 0.3594,
468
+ "step": 530
469
+ },
470
+ {
471
+ "epoch": 0.8117249154453213,
472
+ "grad_norm": 0.14574235677719116,
473
+ "learning_rate": 5.060239153161872e-06,
474
+ "loss": 0.3429,
475
+ "step": 540
476
+ },
477
+ {
478
+ "epoch": 0.8267568583239384,
479
+ "grad_norm": 0.2706868648529053,
480
+ "learning_rate": 3.5232131185484076e-06,
481
+ "loss": 0.3157,
482
+ "step": 550
483
+ },
484
+ {
485
+ "epoch": 0.8267568583239384,
486
+ "eval_loss": 0.3458581268787384,
487
+ "eval_runtime": 35.1975,
488
+ "eval_samples_per_second": 31.849,
489
+ "eval_steps_per_second": 7.984,
490
+ "step": 550
491
+ },
492
+ {
493
+ "epoch": 0.8417888012025554,
494
+ "grad_norm": 0.1057230532169342,
495
+ "learning_rate": 2.259661018213333e-06,
496
+ "loss": 0.3975,
497
+ "step": 560
498
+ },
499
+ {
500
+ "epoch": 0.8568207440811725,
501
+ "grad_norm": 0.11743483692407608,
502
+ "learning_rate": 1.2731645278655445e-06,
503
+ "loss": 0.3814,
504
+ "step": 570
505
+ },
506
+ {
507
+ "epoch": 0.8718526869597896,
508
+ "grad_norm": 0.12397727370262146,
509
+ "learning_rate": 5.665199789862907e-07,
510
+ "loss": 0.3591,
511
+ "step": 580
512
+ },
513
+ {
514
+ "epoch": 0.8868846298384067,
515
+ "grad_norm": 0.14038552343845367,
516
+ "learning_rate": 1.4173043232380557e-07,
517
+ "loss": 0.3337,
518
+ "step": 590
519
+ },
520
+ {
521
+ "epoch": 0.9019165727170236,
522
+ "grad_norm": 0.2670811116695404,
523
+ "learning_rate": 0.0,
524
+ "loss": 0.321,
525
+ "step": 600
526
+ },
527
+ {
528
+ "epoch": 0.9019165727170236,
529
+ "eval_loss": 0.3471680283546448,
530
+ "eval_runtime": 35.4222,
531
+ "eval_samples_per_second": 31.647,
532
+ "eval_steps_per_second": 7.933,
533
+ "step": 600
534
  }
535
  ],
536
  "logging_steps": 10,
 
554
  "should_evaluate": false,
555
  "should_log": false,
556
  "should_save": true,
557
+ "should_training_stop": true
558
  },
559
  "attributes": {}
560
  }
561
  },
562
+ "total_flos": 1.6628098653801677e+17,
563
  "train_batch_size": 8,
564
  "trial_name": null,
565
  "trial_params": null