c14kevincardenas's picture
End of training
907b3fb verified
{
"best_metric": 0.39362508058547974,
"best_model_checkpoint": "limb_classification_person_crop_seq/t4_8heads_2layers_1e-4lr/checkpoint-1332",
"epoch": 20.0,
"eval_steps": 500,
"global_step": 2960,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.16891891891891891,
"grad_norm": 447213.59375,
"learning_rate": 1e-05,
"loss": 1.3965,
"step": 25
},
{
"epoch": 0.33783783783783783,
"grad_norm": 325825.0,
"learning_rate": 2e-05,
"loss": 1.3324,
"step": 50
},
{
"epoch": 0.5067567567567568,
"grad_norm": 362796.53125,
"learning_rate": 3e-05,
"loss": 1.2166,
"step": 75
},
{
"epoch": 0.6756756756756757,
"grad_norm": 503376.46875,
"learning_rate": 4e-05,
"loss": 1.0505,
"step": 100
},
{
"epoch": 0.8445945945945946,
"grad_norm": 327388.3125,
"learning_rate": 5e-05,
"loss": 0.7398,
"step": 125
},
{
"epoch": 1.0,
"eval_accuracy": 0.8778443113772455,
"eval_loss": 0.5189827084541321,
"eval_runtime": 59.8716,
"eval_samples_per_second": 13.947,
"eval_steps_per_second": 0.451,
"step": 148
},
{
"epoch": 1.0135135135135136,
"grad_norm": 384283.0,
"learning_rate": 6e-05,
"loss": 0.5161,
"step": 150
},
{
"epoch": 1.1824324324324325,
"grad_norm": 207891.984375,
"learning_rate": 7e-05,
"loss": 0.5215,
"step": 175
},
{
"epoch": 1.3513513513513513,
"grad_norm": 174695.015625,
"learning_rate": 8e-05,
"loss": 0.5073,
"step": 200
},
{
"epoch": 1.5202702702702702,
"grad_norm": 208182.0,
"learning_rate": 9e-05,
"loss": 0.4609,
"step": 225
},
{
"epoch": 1.689189189189189,
"grad_norm": 80421.375,
"learning_rate": 0.0001,
"loss": 0.4272,
"step": 250
},
{
"epoch": 1.8581081081081081,
"grad_norm": 202293.359375,
"learning_rate": 9.907749077490776e-05,
"loss": 0.4307,
"step": 275
},
{
"epoch": 2.0,
"eval_accuracy": 0.9005988023952096,
"eval_loss": 0.43251675367355347,
"eval_runtime": 59.7558,
"eval_samples_per_second": 13.974,
"eval_steps_per_second": 0.452,
"step": 296
},
{
"epoch": 2.027027027027027,
"grad_norm": 489905.125,
"learning_rate": 9.81549815498155e-05,
"loss": 0.488,
"step": 300
},
{
"epoch": 2.195945945945946,
"grad_norm": 87838.421875,
"learning_rate": 9.723247232472326e-05,
"loss": 0.4634,
"step": 325
},
{
"epoch": 2.364864864864865,
"grad_norm": 105458.578125,
"learning_rate": 9.6309963099631e-05,
"loss": 0.4715,
"step": 350
},
{
"epoch": 2.5337837837837838,
"grad_norm": 149638.75,
"learning_rate": 9.538745387453874e-05,
"loss": 0.4219,
"step": 375
},
{
"epoch": 2.7027027027027026,
"grad_norm": 82806.1640625,
"learning_rate": 9.44649446494465e-05,
"loss": 0.4692,
"step": 400
},
{
"epoch": 2.8716216216216215,
"grad_norm": 350947.53125,
"learning_rate": 9.354243542435425e-05,
"loss": 0.5113,
"step": 425
},
{
"epoch": 3.0,
"eval_accuracy": 0.9101796407185628,
"eval_loss": 0.4380628168582916,
"eval_runtime": 59.354,
"eval_samples_per_second": 14.068,
"eval_steps_per_second": 0.455,
"step": 444
},
{
"epoch": 3.0405405405405403,
"grad_norm": 122694.7734375,
"learning_rate": 9.2619926199262e-05,
"loss": 0.4317,
"step": 450
},
{
"epoch": 3.2094594594594597,
"grad_norm": 140175.5625,
"learning_rate": 9.169741697416975e-05,
"loss": 0.444,
"step": 475
},
{
"epoch": 3.3783783783783785,
"grad_norm": 155567.78125,
"learning_rate": 9.077490774907749e-05,
"loss": 0.4217,
"step": 500
},
{
"epoch": 3.5472972972972974,
"grad_norm": 86427.9140625,
"learning_rate": 8.985239852398525e-05,
"loss": 0.4161,
"step": 525
},
{
"epoch": 3.7162162162162162,
"grad_norm": 133361.25,
"learning_rate": 8.892988929889299e-05,
"loss": 0.4602,
"step": 550
},
{
"epoch": 3.885135135135135,
"grad_norm": 98116.40625,
"learning_rate": 8.800738007380073e-05,
"loss": 0.4097,
"step": 575
},
{
"epoch": 4.0,
"eval_accuracy": 0.9005988023952096,
"eval_loss": 0.42309147119522095,
"eval_runtime": 59.5951,
"eval_samples_per_second": 14.011,
"eval_steps_per_second": 0.453,
"step": 592
},
{
"epoch": 4.054054054054054,
"grad_norm": 97693.5234375,
"learning_rate": 8.708487084870849e-05,
"loss": 0.4732,
"step": 600
},
{
"epoch": 4.222972972972973,
"grad_norm": 30350.802734375,
"learning_rate": 8.616236162361624e-05,
"loss": 0.401,
"step": 625
},
{
"epoch": 4.391891891891892,
"grad_norm": 105219.0625,
"learning_rate": 8.523985239852399e-05,
"loss": 0.4209,
"step": 650
},
{
"epoch": 4.5608108108108105,
"grad_norm": 102363.4609375,
"learning_rate": 8.431734317343174e-05,
"loss": 0.4507,
"step": 675
},
{
"epoch": 4.72972972972973,
"grad_norm": 154839.265625,
"learning_rate": 8.339483394833948e-05,
"loss": 0.4452,
"step": 700
},
{
"epoch": 4.898648648648649,
"grad_norm": 108308.875,
"learning_rate": 8.247232472324724e-05,
"loss": 0.4298,
"step": 725
},
{
"epoch": 5.0,
"eval_accuracy": 0.9197604790419162,
"eval_loss": 0.39416760206222534,
"eval_runtime": 59.4472,
"eval_samples_per_second": 14.046,
"eval_steps_per_second": 0.454,
"step": 740
},
{
"epoch": 5.0675675675675675,
"grad_norm": 237972.3125,
"learning_rate": 8.154981549815498e-05,
"loss": 0.4162,
"step": 750
},
{
"epoch": 5.236486486486487,
"grad_norm": 137521.484375,
"learning_rate": 8.062730627306274e-05,
"loss": 0.4072,
"step": 775
},
{
"epoch": 5.405405405405405,
"grad_norm": 103783.578125,
"learning_rate": 7.970479704797048e-05,
"loss": 0.4159,
"step": 800
},
{
"epoch": 5.574324324324325,
"grad_norm": 258164.625,
"learning_rate": 7.878228782287823e-05,
"loss": 0.4053,
"step": 825
},
{
"epoch": 5.743243243243243,
"grad_norm": 159781.71875,
"learning_rate": 7.785977859778598e-05,
"loss": 0.4128,
"step": 850
},
{
"epoch": 5.912162162162162,
"grad_norm": 118142.734375,
"learning_rate": 7.693726937269373e-05,
"loss": 0.4204,
"step": 875
},
{
"epoch": 6.0,
"eval_accuracy": 0.9041916167664671,
"eval_loss": 0.42130208015441895,
"eval_runtime": 58.3486,
"eval_samples_per_second": 14.311,
"eval_steps_per_second": 0.463,
"step": 888
},
{
"epoch": 6.081081081081081,
"grad_norm": 116189.3671875,
"learning_rate": 7.601476014760149e-05,
"loss": 0.4436,
"step": 900
},
{
"epoch": 6.25,
"grad_norm": 188357.125,
"learning_rate": 7.509225092250923e-05,
"loss": 0.4119,
"step": 925
},
{
"epoch": 6.418918918918919,
"grad_norm": 126994.734375,
"learning_rate": 7.416974169741697e-05,
"loss": 0.3887,
"step": 950
},
{
"epoch": 6.587837837837838,
"grad_norm": 214453.125,
"learning_rate": 7.324723247232473e-05,
"loss": 0.4206,
"step": 975
},
{
"epoch": 6.756756756756757,
"grad_norm": 147634.59375,
"learning_rate": 7.232472324723247e-05,
"loss": 0.3975,
"step": 1000
},
{
"epoch": 6.925675675675675,
"grad_norm": 94356.3046875,
"learning_rate": 7.140221402214023e-05,
"loss": 0.4149,
"step": 1025
},
{
"epoch": 7.0,
"eval_accuracy": 0.9101796407185628,
"eval_loss": 0.4032335579395294,
"eval_runtime": 59.4488,
"eval_samples_per_second": 14.046,
"eval_steps_per_second": 0.454,
"step": 1036
},
{
"epoch": 7.094594594594595,
"grad_norm": 83387.5234375,
"learning_rate": 7.047970479704797e-05,
"loss": 0.4331,
"step": 1050
},
{
"epoch": 7.263513513513513,
"grad_norm": 104310.2734375,
"learning_rate": 6.955719557195572e-05,
"loss": 0.4088,
"step": 1075
},
{
"epoch": 7.4324324324324325,
"grad_norm": 106913.7734375,
"learning_rate": 6.863468634686348e-05,
"loss": 0.4304,
"step": 1100
},
{
"epoch": 7.601351351351351,
"grad_norm": 113361.7421875,
"learning_rate": 6.771217712177122e-05,
"loss": 0.3817,
"step": 1125
},
{
"epoch": 7.77027027027027,
"grad_norm": 111687.25,
"learning_rate": 6.678966789667896e-05,
"loss": 0.402,
"step": 1150
},
{
"epoch": 7.9391891891891895,
"grad_norm": 147454.6875,
"learning_rate": 6.586715867158672e-05,
"loss": 0.4028,
"step": 1175
},
{
"epoch": 8.0,
"eval_accuracy": 0.9161676646706587,
"eval_loss": 0.3973223567008972,
"eval_runtime": 58.6874,
"eval_samples_per_second": 14.228,
"eval_steps_per_second": 0.46,
"step": 1184
},
{
"epoch": 8.108108108108109,
"grad_norm": 128852.5390625,
"learning_rate": 6.494464944649446e-05,
"loss": 0.4045,
"step": 1200
},
{
"epoch": 8.277027027027026,
"grad_norm": 72024.125,
"learning_rate": 6.402214022140222e-05,
"loss": 0.3936,
"step": 1225
},
{
"epoch": 8.445945945945946,
"grad_norm": 95322.5078125,
"learning_rate": 6.309963099630997e-05,
"loss": 0.4119,
"step": 1250
},
{
"epoch": 8.614864864864865,
"grad_norm": 147554.515625,
"learning_rate": 6.217712177121771e-05,
"loss": 0.364,
"step": 1275
},
{
"epoch": 8.783783783783784,
"grad_norm": 113870.6328125,
"learning_rate": 6.125461254612547e-05,
"loss": 0.4181,
"step": 1300
},
{
"epoch": 8.952702702702704,
"grad_norm": 227272.03125,
"learning_rate": 6.033210332103322e-05,
"loss": 0.4611,
"step": 1325
},
{
"epoch": 9.0,
"eval_accuracy": 0.918562874251497,
"eval_loss": 0.39362508058547974,
"eval_runtime": 58.3177,
"eval_samples_per_second": 14.318,
"eval_steps_per_second": 0.463,
"step": 1332
},
{
"epoch": 9.121621621621621,
"grad_norm": 170105.28125,
"learning_rate": 5.940959409594096e-05,
"loss": 0.3968,
"step": 1350
},
{
"epoch": 9.29054054054054,
"grad_norm": 153303.203125,
"learning_rate": 5.848708487084871e-05,
"loss": 0.3987,
"step": 1375
},
{
"epoch": 9.45945945945946,
"grad_norm": 107218.5546875,
"learning_rate": 5.756457564575646e-05,
"loss": 0.3905,
"step": 1400
},
{
"epoch": 9.628378378378379,
"grad_norm": 68339.8046875,
"learning_rate": 5.664206642066421e-05,
"loss": 0.37,
"step": 1425
},
{
"epoch": 9.797297297297296,
"grad_norm": 50506.203125,
"learning_rate": 5.5719557195571956e-05,
"loss": 0.4076,
"step": 1450
},
{
"epoch": 9.966216216216216,
"grad_norm": 99174.765625,
"learning_rate": 5.479704797047971e-05,
"loss": 0.3807,
"step": 1475
},
{
"epoch": 10.0,
"eval_accuracy": 0.918562874251497,
"eval_loss": 0.3945906162261963,
"eval_runtime": 58.4341,
"eval_samples_per_second": 14.29,
"eval_steps_per_second": 0.462,
"step": 1480
},
{
"epoch": 10.135135135135135,
"grad_norm": 48273.4453125,
"learning_rate": 5.387453874538746e-05,
"loss": 0.3946,
"step": 1500
},
{
"epoch": 10.304054054054054,
"grad_norm": 112905.578125,
"learning_rate": 5.295202952029521e-05,
"loss": 0.4089,
"step": 1525
},
{
"epoch": 10.472972972972974,
"grad_norm": 165136.125,
"learning_rate": 5.202952029520295e-05,
"loss": 0.3522,
"step": 1550
},
{
"epoch": 10.641891891891891,
"grad_norm": 97660.359375,
"learning_rate": 5.11070110701107e-05,
"loss": 0.371,
"step": 1575
},
{
"epoch": 10.81081081081081,
"grad_norm": 106295.6875,
"learning_rate": 5.018450184501845e-05,
"loss": 0.3951,
"step": 1600
},
{
"epoch": 10.97972972972973,
"grad_norm": 95395.078125,
"learning_rate": 4.92619926199262e-05,
"loss": 0.3918,
"step": 1625
},
{
"epoch": 11.0,
"eval_accuracy": 0.9197604790419162,
"eval_loss": 0.3982699513435364,
"eval_runtime": 58.4565,
"eval_samples_per_second": 14.284,
"eval_steps_per_second": 0.462,
"step": 1628
},
{
"epoch": 11.14864864864865,
"grad_norm": 115477.0234375,
"learning_rate": 4.833948339483395e-05,
"loss": 0.4004,
"step": 1650
},
{
"epoch": 11.317567567567568,
"grad_norm": 48284.51171875,
"learning_rate": 4.74169741697417e-05,
"loss": 0.3843,
"step": 1675
},
{
"epoch": 11.486486486486486,
"grad_norm": 130136.9765625,
"learning_rate": 4.6494464944649444e-05,
"loss": 0.3695,
"step": 1700
},
{
"epoch": 11.655405405405405,
"grad_norm": 142826.734375,
"learning_rate": 4.55719557195572e-05,
"loss": 0.3857,
"step": 1725
},
{
"epoch": 11.824324324324325,
"grad_norm": 251026.734375,
"learning_rate": 4.464944649446495e-05,
"loss": 0.3496,
"step": 1750
},
{
"epoch": 11.993243243243244,
"grad_norm": 106497.9296875,
"learning_rate": 4.37269372693727e-05,
"loss": 0.3929,
"step": 1775
},
{
"epoch": 12.0,
"eval_accuracy": 0.9149700598802395,
"eval_loss": 0.40130630135536194,
"eval_runtime": 59.6764,
"eval_samples_per_second": 13.992,
"eval_steps_per_second": 0.452,
"step": 1776
},
{
"epoch": 12.162162162162161,
"grad_norm": 179013.296875,
"learning_rate": 4.280442804428044e-05,
"loss": 0.3705,
"step": 1800
},
{
"epoch": 12.33108108108108,
"grad_norm": 223030.25,
"learning_rate": 4.1881918819188195e-05,
"loss": 0.4044,
"step": 1825
},
{
"epoch": 12.5,
"grad_norm": 68311.984375,
"learning_rate": 4.0959409594095944e-05,
"loss": 0.384,
"step": 1850
},
{
"epoch": 12.66891891891892,
"grad_norm": 132574.109375,
"learning_rate": 4.003690036900369e-05,
"loss": 0.4299,
"step": 1875
},
{
"epoch": 12.837837837837839,
"grad_norm": 97282.0859375,
"learning_rate": 3.911439114391144e-05,
"loss": 0.368,
"step": 1900
},
{
"epoch": 13.0,
"eval_accuracy": 0.9089820359281438,
"eval_loss": 0.41854628920555115,
"eval_runtime": 58.5016,
"eval_samples_per_second": 14.273,
"eval_steps_per_second": 0.462,
"step": 1924
},
{
"epoch": 13.006756756756756,
"grad_norm": 106621.609375,
"learning_rate": 3.819188191881919e-05,
"loss": 0.3395,
"step": 1925
},
{
"epoch": 13.175675675675675,
"grad_norm": 130611.234375,
"learning_rate": 3.726937269372694e-05,
"loss": 0.3914,
"step": 1950
},
{
"epoch": 13.344594594594595,
"grad_norm": 170264.09375,
"learning_rate": 3.634686346863469e-05,
"loss": 0.3472,
"step": 1975
},
{
"epoch": 13.513513513513514,
"grad_norm": 89837.6640625,
"learning_rate": 3.542435424354244e-05,
"loss": 0.3658,
"step": 2000
},
{
"epoch": 13.682432432432432,
"grad_norm": 143319.875,
"learning_rate": 3.4501845018450186e-05,
"loss": 0.3627,
"step": 2025
},
{
"epoch": 13.85135135135135,
"grad_norm": 89946.25,
"learning_rate": 3.3579335793357934e-05,
"loss": 0.3921,
"step": 2050
},
{
"epoch": 14.0,
"eval_accuracy": 0.9101796407185628,
"eval_loss": 0.41929543018341064,
"eval_runtime": 59.7333,
"eval_samples_per_second": 13.979,
"eval_steps_per_second": 0.452,
"step": 2072
},
{
"epoch": 14.02027027027027,
"grad_norm": 88344.1328125,
"learning_rate": 3.265682656826568e-05,
"loss": 0.3775,
"step": 2075
},
{
"epoch": 14.18918918918919,
"grad_norm": 32583.9375,
"learning_rate": 3.173431734317343e-05,
"loss": 0.3376,
"step": 2100
},
{
"epoch": 14.358108108108109,
"grad_norm": 142550.1875,
"learning_rate": 3.081180811808118e-05,
"loss": 0.3615,
"step": 2125
},
{
"epoch": 14.527027027027026,
"grad_norm": 97721.6640625,
"learning_rate": 2.9889298892988933e-05,
"loss": 0.3521,
"step": 2150
},
{
"epoch": 14.695945945945946,
"grad_norm": 144481.015625,
"learning_rate": 2.8966789667896682e-05,
"loss": 0.3748,
"step": 2175
},
{
"epoch": 14.864864864864865,
"grad_norm": 45907.69140625,
"learning_rate": 2.8044280442804427e-05,
"loss": 0.3706,
"step": 2200
},
{
"epoch": 15.0,
"eval_accuracy": 0.9137724550898204,
"eval_loss": 0.4133796989917755,
"eval_runtime": 59.0815,
"eval_samples_per_second": 14.133,
"eval_steps_per_second": 0.457,
"step": 2220
},
{
"epoch": 15.033783783783784,
"grad_norm": 151263.125,
"learning_rate": 2.7121771217712176e-05,
"loss": 0.3777,
"step": 2225
},
{
"epoch": 15.202702702702704,
"grad_norm": 129226.9609375,
"learning_rate": 2.619926199261993e-05,
"loss": 0.38,
"step": 2250
},
{
"epoch": 15.371621621621621,
"grad_norm": 86875.140625,
"learning_rate": 2.5276752767527677e-05,
"loss": 0.348,
"step": 2275
},
{
"epoch": 15.54054054054054,
"grad_norm": 105283.8984375,
"learning_rate": 2.4354243542435426e-05,
"loss": 0.3425,
"step": 2300
},
{
"epoch": 15.70945945945946,
"grad_norm": 142772.0,
"learning_rate": 2.3431734317343175e-05,
"loss": 0.3457,
"step": 2325
},
{
"epoch": 15.878378378378379,
"grad_norm": 149936.21875,
"learning_rate": 2.2509225092250924e-05,
"loss": 0.3639,
"step": 2350
},
{
"epoch": 16.0,
"eval_accuracy": 0.9197604790419162,
"eval_loss": 0.3938332796096802,
"eval_runtime": 59.6694,
"eval_samples_per_second": 13.994,
"eval_steps_per_second": 0.452,
"step": 2368
},
{
"epoch": 16.0472972972973,
"grad_norm": 107560.296875,
"learning_rate": 2.1586715867158673e-05,
"loss": 0.3527,
"step": 2375
},
{
"epoch": 16.216216216216218,
"grad_norm": 121598.125,
"learning_rate": 2.066420664206642e-05,
"loss": 0.327,
"step": 2400
},
{
"epoch": 16.385135135135137,
"grad_norm": 64995.89453125,
"learning_rate": 1.974169741697417e-05,
"loss": 0.3418,
"step": 2425
},
{
"epoch": 16.554054054054053,
"grad_norm": 131447.609375,
"learning_rate": 1.881918819188192e-05,
"loss": 0.3559,
"step": 2450
},
{
"epoch": 16.722972972972972,
"grad_norm": 91204.625,
"learning_rate": 1.7896678966789668e-05,
"loss": 0.3425,
"step": 2475
},
{
"epoch": 16.89189189189189,
"grad_norm": 60530.859375,
"learning_rate": 1.6974169741697417e-05,
"loss": 0.3335,
"step": 2500
},
{
"epoch": 17.0,
"eval_accuracy": 0.9137724550898204,
"eval_loss": 0.40573227405548096,
"eval_runtime": 59.6821,
"eval_samples_per_second": 13.991,
"eval_steps_per_second": 0.452,
"step": 2516
},
{
"epoch": 17.06081081081081,
"grad_norm": 105083.375,
"learning_rate": 1.6051660516605166e-05,
"loss": 0.3749,
"step": 2525
},
{
"epoch": 17.22972972972973,
"grad_norm": 70010.59375,
"learning_rate": 1.5129151291512916e-05,
"loss": 0.3461,
"step": 2550
},
{
"epoch": 17.39864864864865,
"grad_norm": 50292.09375,
"learning_rate": 1.4206642066420663e-05,
"loss": 0.3462,
"step": 2575
},
{
"epoch": 17.56756756756757,
"grad_norm": 73966.28125,
"learning_rate": 1.3284132841328414e-05,
"loss": 0.3496,
"step": 2600
},
{
"epoch": 17.736486486486488,
"grad_norm": 132157.171875,
"learning_rate": 1.2361623616236164e-05,
"loss": 0.3271,
"step": 2625
},
{
"epoch": 17.905405405405407,
"grad_norm": 51997.98828125,
"learning_rate": 1.1439114391143913e-05,
"loss": 0.374,
"step": 2650
},
{
"epoch": 18.0,
"eval_accuracy": 0.9173652694610779,
"eval_loss": 0.40893492102622986,
"eval_runtime": 59.9794,
"eval_samples_per_second": 13.921,
"eval_steps_per_second": 0.45,
"step": 2664
},
{
"epoch": 18.074324324324323,
"grad_norm": 156177.09375,
"learning_rate": 1.0516605166051662e-05,
"loss": 0.3418,
"step": 2675
},
{
"epoch": 18.243243243243242,
"grad_norm": 78743.9609375,
"learning_rate": 9.59409594095941e-06,
"loss": 0.3126,
"step": 2700
},
{
"epoch": 18.41216216216216,
"grad_norm": 154911.28125,
"learning_rate": 8.67158671586716e-06,
"loss": 0.3551,
"step": 2725
},
{
"epoch": 18.58108108108108,
"grad_norm": 51651.5859375,
"learning_rate": 7.749077490774908e-06,
"loss": 0.3116,
"step": 2750
},
{
"epoch": 18.75,
"grad_norm": 175163.84375,
"learning_rate": 6.826568265682657e-06,
"loss": 0.3238,
"step": 2775
},
{
"epoch": 18.91891891891892,
"grad_norm": 86829.046875,
"learning_rate": 5.904059040590406e-06,
"loss": 0.3723,
"step": 2800
},
{
"epoch": 19.0,
"eval_accuracy": 0.9173652694610779,
"eval_loss": 0.39987924695014954,
"eval_runtime": 59.8763,
"eval_samples_per_second": 13.945,
"eval_steps_per_second": 0.451,
"step": 2812
},
{
"epoch": 19.08783783783784,
"grad_norm": 111032.2734375,
"learning_rate": 4.981549815498155e-06,
"loss": 0.3502,
"step": 2825
},
{
"epoch": 19.256756756756758,
"grad_norm": 65844.7265625,
"learning_rate": 4.059040590405904e-06,
"loss": 0.329,
"step": 2850
},
{
"epoch": 19.425675675675677,
"grad_norm": 142658.359375,
"learning_rate": 3.136531365313653e-06,
"loss": 0.359,
"step": 2875
},
{
"epoch": 19.594594594594593,
"grad_norm": 107528.9296875,
"learning_rate": 2.2140221402214023e-06,
"loss": 0.3349,
"step": 2900
},
{
"epoch": 19.763513513513512,
"grad_norm": 143320.5,
"learning_rate": 1.2915129151291513e-06,
"loss": 0.3424,
"step": 2925
},
{
"epoch": 19.93243243243243,
"grad_norm": 128712.2109375,
"learning_rate": 3.690036900369004e-07,
"loss": 0.3055,
"step": 2950
},
{
"epoch": 20.0,
"eval_accuracy": 0.9173652694610779,
"eval_loss": 0.4020386338233948,
"eval_runtime": 60.1235,
"eval_samples_per_second": 13.888,
"eval_steps_per_second": 0.449,
"step": 2960
},
{
"epoch": 20.0,
"step": 2960,
"total_flos": 0.0,
"train_loss": 0.42456487417221067,
"train_runtime": 11160.5196,
"train_samples_per_second": 8.475,
"train_steps_per_second": 0.265
}
],
"logging_steps": 25,
"max_steps": 2960,
"num_input_tokens_seen": 0,
"num_train_epochs": 20,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 0.0,
"train_batch_size": 32,
"trial_name": null,
"trial_params": null
}