CTMAE-P2-V2-S3 / trainer_state.json
beingbatman's picture
End of training
ad4a602 verified
{
"best_metric": 0.782608695652174,
"best_model_checkpoint": "CTMAE-P2-V2-S3/checkpoint-2925",
"epoch": 49.02,
"eval_steps": 500,
"global_step": 3250,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.003076923076923077,
"grad_norm": 2.4690194129943848,
"learning_rate": 3.0769230769230774e-07,
"loss": 0.6769,
"step": 10
},
{
"epoch": 0.006153846153846154,
"grad_norm": 2.5120630264282227,
"learning_rate": 6.153846153846155e-07,
"loss": 0.6635,
"step": 20
},
{
"epoch": 0.009230769230769232,
"grad_norm": 5.427048206329346,
"learning_rate": 9.230769230769232e-07,
"loss": 0.6582,
"step": 30
},
{
"epoch": 0.012307692307692308,
"grad_norm": 5.626338958740234,
"learning_rate": 1.230769230769231e-06,
"loss": 0.6186,
"step": 40
},
{
"epoch": 0.015384615384615385,
"grad_norm": 5.170588493347168,
"learning_rate": 1.5384615384615387e-06,
"loss": 0.5612,
"step": 50
},
{
"epoch": 0.018461538461538463,
"grad_norm": 5.933913707733154,
"learning_rate": 1.8461538461538465e-06,
"loss": 0.6138,
"step": 60
},
{
"epoch": 0.02,
"eval_accuracy": 0.5434782608695652,
"eval_loss": 0.7341543436050415,
"eval_runtime": 16.5125,
"eval_samples_per_second": 2.786,
"eval_steps_per_second": 0.727,
"step": 65
},
{
"epoch": 1.0015384615384615,
"grad_norm": 5.487543106079102,
"learning_rate": 2.153846153846154e-06,
"loss": 0.5948,
"step": 70
},
{
"epoch": 1.0046153846153847,
"grad_norm": 5.932001113891602,
"learning_rate": 2.461538461538462e-06,
"loss": 0.6058,
"step": 80
},
{
"epoch": 1.0076923076923077,
"grad_norm": 6.421907901763916,
"learning_rate": 2.7692307692307697e-06,
"loss": 0.6592,
"step": 90
},
{
"epoch": 1.0107692307692309,
"grad_norm": 6.242369651794434,
"learning_rate": 3.0769230769230774e-06,
"loss": 0.5283,
"step": 100
},
{
"epoch": 1.0138461538461538,
"grad_norm": 6.2263007164001465,
"learning_rate": 3.384615384615385e-06,
"loss": 0.4516,
"step": 110
},
{
"epoch": 1.0169230769230768,
"grad_norm": 21.957347869873047,
"learning_rate": 3.692307692307693e-06,
"loss": 0.5932,
"step": 120
},
{
"epoch": 1.02,
"grad_norm": 6.019967079162598,
"learning_rate": 4.000000000000001e-06,
"loss": 0.5607,
"step": 130
},
{
"epoch": 1.02,
"eval_accuracy": 0.5434782608695652,
"eval_loss": 0.7364471554756165,
"eval_runtime": 15.5184,
"eval_samples_per_second": 2.964,
"eval_steps_per_second": 0.773,
"step": 130
},
{
"epoch": 2.003076923076923,
"grad_norm": 10.679378509521484,
"learning_rate": 4.307692307692308e-06,
"loss": 0.4795,
"step": 140
},
{
"epoch": 2.006153846153846,
"grad_norm": 12.046272277832031,
"learning_rate": 4.615384615384616e-06,
"loss": 0.5978,
"step": 150
},
{
"epoch": 2.0092307692307694,
"grad_norm": 14.933501243591309,
"learning_rate": 4.923076923076924e-06,
"loss": 0.6278,
"step": 160
},
{
"epoch": 2.0123076923076924,
"grad_norm": 5.484428405761719,
"learning_rate": 5.230769230769232e-06,
"loss": 0.6293,
"step": 170
},
{
"epoch": 2.0153846153846153,
"grad_norm": 5.703970909118652,
"learning_rate": 5.538461538461539e-06,
"loss": 0.5672,
"step": 180
},
{
"epoch": 2.0184615384615383,
"grad_norm": 7.624868392944336,
"learning_rate": 5.846153846153847e-06,
"loss": 0.5525,
"step": 190
},
{
"epoch": 2.02,
"eval_accuracy": 0.5434782608695652,
"eval_loss": 0.9435462951660156,
"eval_runtime": 15.4928,
"eval_samples_per_second": 2.969,
"eval_steps_per_second": 0.775,
"step": 195
},
{
"epoch": 3.0015384615384617,
"grad_norm": 5.229469299316406,
"learning_rate": 6.153846153846155e-06,
"loss": 0.5915,
"step": 200
},
{
"epoch": 3.0046153846153847,
"grad_norm": 5.959716320037842,
"learning_rate": 6.461538461538463e-06,
"loss": 0.5174,
"step": 210
},
{
"epoch": 3.0076923076923077,
"grad_norm": 5.900456428527832,
"learning_rate": 6.76923076923077e-06,
"loss": 0.5029,
"step": 220
},
{
"epoch": 3.0107692307692306,
"grad_norm": 5.498166084289551,
"learning_rate": 7.076923076923078e-06,
"loss": 0.5725,
"step": 230
},
{
"epoch": 3.013846153846154,
"grad_norm": 6.51685094833374,
"learning_rate": 7.384615384615386e-06,
"loss": 0.5766,
"step": 240
},
{
"epoch": 3.016923076923077,
"grad_norm": 22.756736755371094,
"learning_rate": 7.692307692307694e-06,
"loss": 0.4172,
"step": 250
},
{
"epoch": 3.02,
"grad_norm": 5.767360687255859,
"learning_rate": 8.000000000000001e-06,
"loss": 0.6208,
"step": 260
},
{
"epoch": 3.02,
"eval_accuracy": 0.5869565217391305,
"eval_loss": 0.7286421656608582,
"eval_runtime": 14.2421,
"eval_samples_per_second": 3.23,
"eval_steps_per_second": 0.843,
"step": 260
},
{
"epoch": 4.003076923076923,
"grad_norm": 9.422948837280273,
"learning_rate": 8.307692307692309e-06,
"loss": 0.4313,
"step": 270
},
{
"epoch": 4.006153846153846,
"grad_norm": 6.0982985496521,
"learning_rate": 8.615384615384617e-06,
"loss": 0.4412,
"step": 280
},
{
"epoch": 4.009230769230769,
"grad_norm": 37.13871765136719,
"learning_rate": 8.923076923076925e-06,
"loss": 0.5237,
"step": 290
},
{
"epoch": 4.012307692307692,
"grad_norm": 0.8876226544380188,
"learning_rate": 9.230769230769232e-06,
"loss": 0.417,
"step": 300
},
{
"epoch": 4.015384615384615,
"grad_norm": 11.075803756713867,
"learning_rate": 9.53846153846154e-06,
"loss": 1.0414,
"step": 310
},
{
"epoch": 4.018461538461539,
"grad_norm": 2.8011114597320557,
"learning_rate": 9.846153846153848e-06,
"loss": 0.6099,
"step": 320
},
{
"epoch": 4.02,
"eval_accuracy": 0.5434782608695652,
"eval_loss": 0.7308201193809509,
"eval_runtime": 14.1793,
"eval_samples_per_second": 3.244,
"eval_steps_per_second": 0.846,
"step": 325
},
{
"epoch": 5.001538461538462,
"grad_norm": 1.8966983556747437,
"learning_rate": 9.982905982905984e-06,
"loss": 0.6785,
"step": 330
},
{
"epoch": 5.004615384615384,
"grad_norm": 1.9870883226394653,
"learning_rate": 9.94871794871795e-06,
"loss": 0.6534,
"step": 340
},
{
"epoch": 5.007692307692308,
"grad_norm": 3.71256160736084,
"learning_rate": 9.914529914529915e-06,
"loss": 0.5627,
"step": 350
},
{
"epoch": 5.010769230769231,
"grad_norm": 7.984810829162598,
"learning_rate": 9.880341880341882e-06,
"loss": 0.4855,
"step": 360
},
{
"epoch": 5.013846153846154,
"grad_norm": 7.3561177253723145,
"learning_rate": 9.846153846153848e-06,
"loss": 0.5532,
"step": 370
},
{
"epoch": 5.016923076923077,
"grad_norm": 9.686047554016113,
"learning_rate": 9.811965811965812e-06,
"loss": 0.5581,
"step": 380
},
{
"epoch": 5.02,
"grad_norm": 11.757854461669922,
"learning_rate": 9.777777777777779e-06,
"loss": 0.4065,
"step": 390
},
{
"epoch": 5.02,
"eval_accuracy": 0.5434782608695652,
"eval_loss": 1.057287573814392,
"eval_runtime": 14.2388,
"eval_samples_per_second": 3.231,
"eval_steps_per_second": 0.843,
"step": 390
},
{
"epoch": 6.003076923076923,
"grad_norm": 5.493303298950195,
"learning_rate": 9.743589743589744e-06,
"loss": 0.7007,
"step": 400
},
{
"epoch": 6.006153846153846,
"grad_norm": 11.533708572387695,
"learning_rate": 9.70940170940171e-06,
"loss": 0.5369,
"step": 410
},
{
"epoch": 6.009230769230769,
"grad_norm": 7.627615928649902,
"learning_rate": 9.675213675213677e-06,
"loss": 0.5327,
"step": 420
},
{
"epoch": 6.012307692307692,
"grad_norm": 16.237632751464844,
"learning_rate": 9.641025641025642e-06,
"loss": 0.4611,
"step": 430
},
{
"epoch": 6.015384615384615,
"grad_norm": 25.719934463500977,
"learning_rate": 9.606837606837607e-06,
"loss": 0.5421,
"step": 440
},
{
"epoch": 6.018461538461539,
"grad_norm": 14.450688362121582,
"learning_rate": 9.572649572649575e-06,
"loss": 0.4338,
"step": 450
},
{
"epoch": 6.02,
"eval_accuracy": 0.5869565217391305,
"eval_loss": 0.6411370635032654,
"eval_runtime": 14.1729,
"eval_samples_per_second": 3.246,
"eval_steps_per_second": 0.847,
"step": 455
},
{
"epoch": 7.001538461538462,
"grad_norm": 3.6783626079559326,
"learning_rate": 9.53846153846154e-06,
"loss": 0.61,
"step": 460
},
{
"epoch": 7.004615384615384,
"grad_norm": 11.61288070678711,
"learning_rate": 9.504273504273504e-06,
"loss": 0.6009,
"step": 470
},
{
"epoch": 7.007692307692308,
"grad_norm": 9.435476303100586,
"learning_rate": 9.470085470085471e-06,
"loss": 0.7558,
"step": 480
},
{
"epoch": 7.010769230769231,
"grad_norm": 9.540996551513672,
"learning_rate": 9.435897435897436e-06,
"loss": 0.3977,
"step": 490
},
{
"epoch": 7.013846153846154,
"grad_norm": 4.846683025360107,
"learning_rate": 9.401709401709402e-06,
"loss": 0.6812,
"step": 500
},
{
"epoch": 7.016923076923077,
"grad_norm": 8.068705558776855,
"learning_rate": 9.367521367521369e-06,
"loss": 0.558,
"step": 510
},
{
"epoch": 7.02,
"grad_norm": 0.9865846037864685,
"learning_rate": 9.333333333333334e-06,
"loss": 0.4706,
"step": 520
},
{
"epoch": 7.02,
"eval_accuracy": 0.6956521739130435,
"eval_loss": 0.5360137820243835,
"eval_runtime": 14.2447,
"eval_samples_per_second": 3.229,
"eval_steps_per_second": 0.842,
"step": 520
},
{
"epoch": 8.003076923076923,
"grad_norm": 2.319526433944702,
"learning_rate": 9.2991452991453e-06,
"loss": 0.7282,
"step": 530
},
{
"epoch": 8.006153846153847,
"grad_norm": 5.820730686187744,
"learning_rate": 9.264957264957267e-06,
"loss": 0.5329,
"step": 540
},
{
"epoch": 8.009230769230768,
"grad_norm": 0.8161588311195374,
"learning_rate": 9.230769230769232e-06,
"loss": 0.4894,
"step": 550
},
{
"epoch": 8.012307692307692,
"grad_norm": 8.13700008392334,
"learning_rate": 9.196581196581196e-06,
"loss": 0.5198,
"step": 560
},
{
"epoch": 8.015384615384615,
"grad_norm": 4.7142815589904785,
"learning_rate": 9.162393162393163e-06,
"loss": 0.3848,
"step": 570
},
{
"epoch": 8.018461538461539,
"grad_norm": 0.2144533395767212,
"learning_rate": 9.128205128205129e-06,
"loss": 0.4147,
"step": 580
},
{
"epoch": 8.02,
"eval_accuracy": 0.5869565217391305,
"eval_loss": 1.0753138065338135,
"eval_runtime": 14.2289,
"eval_samples_per_second": 3.233,
"eval_steps_per_second": 0.843,
"step": 585
},
{
"epoch": 9.00153846153846,
"grad_norm": 9.62121868133545,
"learning_rate": 9.094017094017094e-06,
"loss": 1.0112,
"step": 590
},
{
"epoch": 9.004615384615384,
"grad_norm": 33.65559005737305,
"learning_rate": 9.059829059829061e-06,
"loss": 0.4409,
"step": 600
},
{
"epoch": 9.007692307692308,
"grad_norm": 23.247821807861328,
"learning_rate": 9.025641025641027e-06,
"loss": 0.3565,
"step": 610
},
{
"epoch": 9.010769230769231,
"grad_norm": 68.15277099609375,
"learning_rate": 8.991452991452992e-06,
"loss": 0.7108,
"step": 620
},
{
"epoch": 9.013846153846155,
"grad_norm": 5.1095476150512695,
"learning_rate": 8.957264957264959e-06,
"loss": 0.5591,
"step": 630
},
{
"epoch": 9.016923076923076,
"grad_norm": 14.021625518798828,
"learning_rate": 8.923076923076925e-06,
"loss": 0.3854,
"step": 640
},
{
"epoch": 9.02,
"grad_norm": 8.44845199584961,
"learning_rate": 8.888888888888888e-06,
"loss": 0.6016,
"step": 650
},
{
"epoch": 9.02,
"eval_accuracy": 0.5652173913043478,
"eval_loss": 1.1058659553527832,
"eval_runtime": 15.3529,
"eval_samples_per_second": 2.996,
"eval_steps_per_second": 0.782,
"step": 650
},
{
"epoch": 10.003076923076923,
"grad_norm": 6.827932834625244,
"learning_rate": 8.854700854700855e-06,
"loss": 0.7955,
"step": 660
},
{
"epoch": 10.006153846153847,
"grad_norm": 3.688110589981079,
"learning_rate": 8.820512820512821e-06,
"loss": 0.3492,
"step": 670
},
{
"epoch": 10.009230769230768,
"grad_norm": 13.171318054199219,
"learning_rate": 8.786324786324786e-06,
"loss": 0.4298,
"step": 680
},
{
"epoch": 10.012307692307692,
"grad_norm": 12.102375984191895,
"learning_rate": 8.752136752136753e-06,
"loss": 0.6888,
"step": 690
},
{
"epoch": 10.015384615384615,
"grad_norm": 6.251157760620117,
"learning_rate": 8.717948717948719e-06,
"loss": 0.3471,
"step": 700
},
{
"epoch": 10.018461538461539,
"grad_norm": 13.216636657714844,
"learning_rate": 8.683760683760684e-06,
"loss": 0.8252,
"step": 710
},
{
"epoch": 10.02,
"eval_accuracy": 0.6739130434782609,
"eval_loss": 0.5254927277565002,
"eval_runtime": 14.3522,
"eval_samples_per_second": 3.205,
"eval_steps_per_second": 0.836,
"step": 715
},
{
"epoch": 11.00153846153846,
"grad_norm": 4.716592788696289,
"learning_rate": 8.649572649572651e-06,
"loss": 0.3548,
"step": 720
},
{
"epoch": 11.004615384615384,
"grad_norm": 1.752955675125122,
"learning_rate": 8.615384615384617e-06,
"loss": 0.2913,
"step": 730
},
{
"epoch": 11.007692307692308,
"grad_norm": 11.685894966125488,
"learning_rate": 8.58119658119658e-06,
"loss": 0.6061,
"step": 740
},
{
"epoch": 11.010769230769231,
"grad_norm": 14.7794189453125,
"learning_rate": 8.547008547008548e-06,
"loss": 0.4836,
"step": 750
},
{
"epoch": 11.013846153846155,
"grad_norm": 20.174833297729492,
"learning_rate": 8.512820512820513e-06,
"loss": 0.5189,
"step": 760
},
{
"epoch": 11.016923076923076,
"grad_norm": 8.418707847595215,
"learning_rate": 8.478632478632479e-06,
"loss": 0.4754,
"step": 770
},
{
"epoch": 11.02,
"grad_norm": 10.69355583190918,
"learning_rate": 8.444444444444446e-06,
"loss": 0.3944,
"step": 780
},
{
"epoch": 11.02,
"eval_accuracy": 0.5869565217391305,
"eval_loss": 0.8291409611701965,
"eval_runtime": 14.4575,
"eval_samples_per_second": 3.182,
"eval_steps_per_second": 0.83,
"step": 780
},
{
"epoch": 12.003076923076923,
"grad_norm": 23.261892318725586,
"learning_rate": 8.410256410256411e-06,
"loss": 0.5586,
"step": 790
},
{
"epoch": 12.006153846153847,
"grad_norm": 57.039825439453125,
"learning_rate": 8.376068376068377e-06,
"loss": 0.8555,
"step": 800
},
{
"epoch": 12.009230769230768,
"grad_norm": 3.4197776317596436,
"learning_rate": 8.341880341880344e-06,
"loss": 0.4604,
"step": 810
},
{
"epoch": 12.012307692307692,
"grad_norm": 9.3665189743042,
"learning_rate": 8.307692307692309e-06,
"loss": 0.477,
"step": 820
},
{
"epoch": 12.015384615384615,
"grad_norm": 14.621077537536621,
"learning_rate": 8.273504273504273e-06,
"loss": 0.6194,
"step": 830
},
{
"epoch": 12.018461538461539,
"grad_norm": 18.631038665771484,
"learning_rate": 8.23931623931624e-06,
"loss": 0.8652,
"step": 840
},
{
"epoch": 12.02,
"eval_accuracy": 0.5652173913043478,
"eval_loss": 0.726616382598877,
"eval_runtime": 15.1578,
"eval_samples_per_second": 3.035,
"eval_steps_per_second": 0.792,
"step": 845
},
{
"epoch": 13.00153846153846,
"grad_norm": 3.469635009765625,
"learning_rate": 8.205128205128205e-06,
"loss": 0.689,
"step": 850
},
{
"epoch": 13.004615384615384,
"grad_norm": 5.77168083190918,
"learning_rate": 8.17094017094017e-06,
"loss": 0.3198,
"step": 860
},
{
"epoch": 13.007692307692308,
"grad_norm": 12.537999153137207,
"learning_rate": 8.136752136752138e-06,
"loss": 0.4305,
"step": 870
},
{
"epoch": 13.010769230769231,
"grad_norm": 0.7631092667579651,
"learning_rate": 8.102564102564103e-06,
"loss": 0.6715,
"step": 880
},
{
"epoch": 13.013846153846155,
"grad_norm": 13.839919090270996,
"learning_rate": 8.068376068376069e-06,
"loss": 0.4264,
"step": 890
},
{
"epoch": 13.016923076923076,
"grad_norm": 11.262467384338379,
"learning_rate": 8.034188034188036e-06,
"loss": 0.3711,
"step": 900
},
{
"epoch": 13.02,
"grad_norm": 15.98154354095459,
"learning_rate": 8.000000000000001e-06,
"loss": 0.4453,
"step": 910
},
{
"epoch": 13.02,
"eval_accuracy": 0.6304347826086957,
"eval_loss": 0.8913701772689819,
"eval_runtime": 14.2586,
"eval_samples_per_second": 3.226,
"eval_steps_per_second": 0.842,
"step": 910
},
{
"epoch": 14.003076923076923,
"grad_norm": 22.157976150512695,
"learning_rate": 7.965811965811967e-06,
"loss": 0.3566,
"step": 920
},
{
"epoch": 14.006153846153847,
"grad_norm": 4.313268661499023,
"learning_rate": 7.931623931623932e-06,
"loss": 0.5692,
"step": 930
},
{
"epoch": 14.009230769230768,
"grad_norm": 11.119792938232422,
"learning_rate": 7.897435897435898e-06,
"loss": 0.5033,
"step": 940
},
{
"epoch": 14.012307692307692,
"grad_norm": 14.92096996307373,
"learning_rate": 7.863247863247863e-06,
"loss": 0.3935,
"step": 950
},
{
"epoch": 14.015384615384615,
"grad_norm": 8.508167266845703,
"learning_rate": 7.82905982905983e-06,
"loss": 0.2881,
"step": 960
},
{
"epoch": 14.018461538461539,
"grad_norm": 31.820133209228516,
"learning_rate": 7.794871794871796e-06,
"loss": 0.3375,
"step": 970
},
{
"epoch": 14.02,
"eval_accuracy": 0.5869565217391305,
"eval_loss": 1.4724668264389038,
"eval_runtime": 14.2065,
"eval_samples_per_second": 3.238,
"eval_steps_per_second": 0.845,
"step": 975
},
{
"epoch": 15.00153846153846,
"grad_norm": 1.2260445356369019,
"learning_rate": 7.760683760683761e-06,
"loss": 0.2684,
"step": 980
},
{
"epoch": 15.004615384615384,
"grad_norm": 24.864896774291992,
"learning_rate": 7.726495726495728e-06,
"loss": 0.2463,
"step": 990
},
{
"epoch": 15.007692307692308,
"grad_norm": 8.236894607543945,
"learning_rate": 7.692307692307694e-06,
"loss": 0.4154,
"step": 1000
},
{
"epoch": 15.010769230769231,
"grad_norm": 72.18476867675781,
"learning_rate": 7.658119658119659e-06,
"loss": 0.7205,
"step": 1010
},
{
"epoch": 15.013846153846155,
"grad_norm": 3.46252703666687,
"learning_rate": 7.6239316239316244e-06,
"loss": 0.7416,
"step": 1020
},
{
"epoch": 15.016923076923076,
"grad_norm": 23.594797134399414,
"learning_rate": 7.58974358974359e-06,
"loss": 0.3084,
"step": 1030
},
{
"epoch": 15.02,
"grad_norm": 23.40220069885254,
"learning_rate": 7.555555555555556e-06,
"loss": 0.5423,
"step": 1040
},
{
"epoch": 15.02,
"eval_accuracy": 0.6739130434782609,
"eval_loss": 0.6293339729309082,
"eval_runtime": 14.542,
"eval_samples_per_second": 3.163,
"eval_steps_per_second": 0.825,
"step": 1040
},
{
"epoch": 16.00307692307692,
"grad_norm": 6.501556873321533,
"learning_rate": 7.521367521367522e-06,
"loss": 0.1966,
"step": 1050
},
{
"epoch": 16.006153846153847,
"grad_norm": 1.277785062789917,
"learning_rate": 7.487179487179488e-06,
"loss": 0.3867,
"step": 1060
},
{
"epoch": 16.00923076923077,
"grad_norm": 0.18731409311294556,
"learning_rate": 7.452991452991454e-06,
"loss": 0.3975,
"step": 1070
},
{
"epoch": 16.012307692307694,
"grad_norm": 25.202768325805664,
"learning_rate": 7.4188034188034196e-06,
"loss": 0.6083,
"step": 1080
},
{
"epoch": 16.015384615384615,
"grad_norm": 3.939324378967285,
"learning_rate": 7.384615384615386e-06,
"loss": 0.2747,
"step": 1090
},
{
"epoch": 16.018461538461537,
"grad_norm": 3.0128068923950195,
"learning_rate": 7.350427350427351e-06,
"loss": 0.4718,
"step": 1100
},
{
"epoch": 16.02,
"eval_accuracy": 0.5869565217391305,
"eval_loss": 0.9326255917549133,
"eval_runtime": 15.4385,
"eval_samples_per_second": 2.98,
"eval_steps_per_second": 0.777,
"step": 1105
},
{
"epoch": 17.001538461538463,
"grad_norm": 29.254545211791992,
"learning_rate": 7.316239316239317e-06,
"loss": 0.4704,
"step": 1110
},
{
"epoch": 17.004615384615384,
"grad_norm": 1.9285635948181152,
"learning_rate": 7.282051282051282e-06,
"loss": 0.356,
"step": 1120
},
{
"epoch": 17.00769230769231,
"grad_norm": 35.04645919799805,
"learning_rate": 7.247863247863248e-06,
"loss": 0.5782,
"step": 1130
},
{
"epoch": 17.01076923076923,
"grad_norm": 5.088606834411621,
"learning_rate": 7.213675213675214e-06,
"loss": 0.4747,
"step": 1140
},
{
"epoch": 17.013846153846153,
"grad_norm": 14.807530403137207,
"learning_rate": 7.17948717948718e-06,
"loss": 0.3342,
"step": 1150
},
{
"epoch": 17.016923076923078,
"grad_norm": 5.486337661743164,
"learning_rate": 7.145299145299146e-06,
"loss": 0.6439,
"step": 1160
},
{
"epoch": 17.02,
"grad_norm": 16.447872161865234,
"learning_rate": 7.111111111111112e-06,
"loss": 0.4305,
"step": 1170
},
{
"epoch": 17.02,
"eval_accuracy": 0.6956521739130435,
"eval_loss": 0.6471044421195984,
"eval_runtime": 14.2174,
"eval_samples_per_second": 3.235,
"eval_steps_per_second": 0.844,
"step": 1170
},
{
"epoch": 18.00307692307692,
"grad_norm": 22.18661880493164,
"learning_rate": 7.076923076923078e-06,
"loss": 0.387,
"step": 1180
},
{
"epoch": 18.006153846153847,
"grad_norm": 3.238065004348755,
"learning_rate": 7.0427350427350435e-06,
"loss": 0.1337,
"step": 1190
},
{
"epoch": 18.00923076923077,
"grad_norm": 9.553592681884766,
"learning_rate": 7.008547008547009e-06,
"loss": 0.5108,
"step": 1200
},
{
"epoch": 18.012307692307694,
"grad_norm": 20.022808074951172,
"learning_rate": 6.974358974358974e-06,
"loss": 0.4185,
"step": 1210
},
{
"epoch": 18.015384615384615,
"grad_norm": 32.78887939453125,
"learning_rate": 6.940170940170941e-06,
"loss": 0.5136,
"step": 1220
},
{
"epoch": 18.018461538461537,
"grad_norm": 25.71335792541504,
"learning_rate": 6.905982905982906e-06,
"loss": 0.1434,
"step": 1230
},
{
"epoch": 18.02,
"eval_accuracy": 0.6739130434782609,
"eval_loss": 1.1058884859085083,
"eval_runtime": 14.2564,
"eval_samples_per_second": 3.227,
"eval_steps_per_second": 0.842,
"step": 1235
},
{
"epoch": 19.001538461538463,
"grad_norm": 55.081085205078125,
"learning_rate": 6.871794871794872e-06,
"loss": 0.1972,
"step": 1240
},
{
"epoch": 19.004615384615384,
"grad_norm": 30.474956512451172,
"learning_rate": 6.837606837606839e-06,
"loss": 0.4069,
"step": 1250
},
{
"epoch": 19.00769230769231,
"grad_norm": 34.68502426147461,
"learning_rate": 6.803418803418804e-06,
"loss": 0.5577,
"step": 1260
},
{
"epoch": 19.01076923076923,
"grad_norm": 3.7890563011169434,
"learning_rate": 6.76923076923077e-06,
"loss": 0.4651,
"step": 1270
},
{
"epoch": 19.013846153846153,
"grad_norm": 2.22894024848938,
"learning_rate": 6.735042735042736e-06,
"loss": 0.4381,
"step": 1280
},
{
"epoch": 19.016923076923078,
"grad_norm": 1.855634093284607,
"learning_rate": 6.700854700854701e-06,
"loss": 0.4127,
"step": 1290
},
{
"epoch": 19.02,
"grad_norm": 21.70796012878418,
"learning_rate": 6.666666666666667e-06,
"loss": 0.3129,
"step": 1300
},
{
"epoch": 19.02,
"eval_accuracy": 0.7391304347826086,
"eval_loss": 0.7987100481987,
"eval_runtime": 14.9435,
"eval_samples_per_second": 3.078,
"eval_steps_per_second": 0.803,
"step": 1300
},
{
"epoch": 20.00307692307692,
"grad_norm": 37.435733795166016,
"learning_rate": 6.632478632478633e-06,
"loss": 0.4383,
"step": 1310
},
{
"epoch": 20.006153846153847,
"grad_norm": 0.6071020364761353,
"learning_rate": 6.598290598290598e-06,
"loss": 0.2316,
"step": 1320
},
{
"epoch": 20.00923076923077,
"grad_norm": 8.772833824157715,
"learning_rate": 6.564102564102565e-06,
"loss": 0.8647,
"step": 1330
},
{
"epoch": 20.012307692307694,
"grad_norm": 1.2445635795593262,
"learning_rate": 6.529914529914531e-06,
"loss": 0.256,
"step": 1340
},
{
"epoch": 20.015384615384615,
"grad_norm": 12.750873565673828,
"learning_rate": 6.495726495726496e-06,
"loss": 0.3799,
"step": 1350
},
{
"epoch": 20.018461538461537,
"grad_norm": 0.15409302711486816,
"learning_rate": 6.461538461538463e-06,
"loss": 0.2712,
"step": 1360
},
{
"epoch": 20.02,
"eval_accuracy": 0.6956521739130435,
"eval_loss": 0.9583348631858826,
"eval_runtime": 14.2561,
"eval_samples_per_second": 3.227,
"eval_steps_per_second": 0.842,
"step": 1365
},
{
"epoch": 21.001538461538463,
"grad_norm": 0.9794425368309021,
"learning_rate": 6.427350427350428e-06,
"loss": 0.2697,
"step": 1370
},
{
"epoch": 21.004615384615384,
"grad_norm": 41.89208221435547,
"learning_rate": 6.3931623931623935e-06,
"loss": 0.1116,
"step": 1380
},
{
"epoch": 21.00769230769231,
"grad_norm": 0.219721257686615,
"learning_rate": 6.358974358974359e-06,
"loss": 0.3221,
"step": 1390
},
{
"epoch": 21.01076923076923,
"grad_norm": 51.84693908691406,
"learning_rate": 6.324786324786325e-06,
"loss": 0.5398,
"step": 1400
},
{
"epoch": 21.013846153846153,
"grad_norm": 0.23835326731204987,
"learning_rate": 6.290598290598291e-06,
"loss": 0.3207,
"step": 1410
},
{
"epoch": 21.016923076923078,
"grad_norm": 33.43702697753906,
"learning_rate": 6.256410256410257e-06,
"loss": 0.2401,
"step": 1420
},
{
"epoch": 21.02,
"grad_norm": 39.07565689086914,
"learning_rate": 6.222222222222223e-06,
"loss": 0.5669,
"step": 1430
},
{
"epoch": 21.02,
"eval_accuracy": 0.5652173913043478,
"eval_loss": 1.9777171611785889,
"eval_runtime": 14.2455,
"eval_samples_per_second": 3.229,
"eval_steps_per_second": 0.842,
"step": 1430
},
{
"epoch": 22.00307692307692,
"grad_norm": 19.675718307495117,
"learning_rate": 6.188034188034189e-06,
"loss": 0.2085,
"step": 1440
},
{
"epoch": 22.006153846153847,
"grad_norm": 47.32659912109375,
"learning_rate": 6.153846153846155e-06,
"loss": 0.2847,
"step": 1450
},
{
"epoch": 22.00923076923077,
"grad_norm": 20.55663299560547,
"learning_rate": 6.11965811965812e-06,
"loss": 0.6073,
"step": 1460
},
{
"epoch": 22.012307692307694,
"grad_norm": 16.39328956604004,
"learning_rate": 6.085470085470086e-06,
"loss": 0.2029,
"step": 1470
},
{
"epoch": 22.015384615384615,
"grad_norm": 0.12228210270404816,
"learning_rate": 6.051282051282051e-06,
"loss": 0.3122,
"step": 1480
},
{
"epoch": 22.018461538461537,
"grad_norm": 0.8447695970535278,
"learning_rate": 6.0170940170940174e-06,
"loss": 0.3252,
"step": 1490
},
{
"epoch": 22.02,
"eval_accuracy": 0.6739130434782609,
"eval_loss": 0.9683187007904053,
"eval_runtime": 14.2253,
"eval_samples_per_second": 3.234,
"eval_steps_per_second": 0.844,
"step": 1495
},
{
"epoch": 23.001538461538463,
"grad_norm": 0.7028045058250427,
"learning_rate": 5.982905982905983e-06,
"loss": 0.5694,
"step": 1500
},
{
"epoch": 23.004615384615384,
"grad_norm": 0.02848893031477928,
"learning_rate": 5.948717948717949e-06,
"loss": 0.1348,
"step": 1510
},
{
"epoch": 23.00769230769231,
"grad_norm": 16.87662696838379,
"learning_rate": 5.914529914529915e-06,
"loss": 0.243,
"step": 1520
},
{
"epoch": 23.01076923076923,
"grad_norm": 90.88231658935547,
"learning_rate": 5.880341880341881e-06,
"loss": 0.1983,
"step": 1530
},
{
"epoch": 23.013846153846153,
"grad_norm": 6.538637638092041,
"learning_rate": 5.846153846153847e-06,
"loss": 0.2995,
"step": 1540
},
{
"epoch": 23.016923076923078,
"grad_norm": 29.127273559570312,
"learning_rate": 5.8119658119658126e-06,
"loss": 0.4176,
"step": 1550
},
{
"epoch": 23.02,
"grad_norm": 62.22664260864258,
"learning_rate": 5.777777777777778e-06,
"loss": 0.1086,
"step": 1560
},
{
"epoch": 23.02,
"eval_accuracy": 0.7391304347826086,
"eval_loss": 1.0588734149932861,
"eval_runtime": 14.7624,
"eval_samples_per_second": 3.116,
"eval_steps_per_second": 0.813,
"step": 1560
},
{
"epoch": 24.00307692307692,
"grad_norm": 0.369827002286911,
"learning_rate": 5.743589743589743e-06,
"loss": 0.1041,
"step": 1570
},
{
"epoch": 24.006153846153847,
"grad_norm": 0.0885409414768219,
"learning_rate": 5.70940170940171e-06,
"loss": 0.1651,
"step": 1580
},
{
"epoch": 24.00923076923077,
"grad_norm": 38.52853012084961,
"learning_rate": 5.675213675213675e-06,
"loss": 0.4727,
"step": 1590
},
{
"epoch": 24.012307692307694,
"grad_norm": 62.92534255981445,
"learning_rate": 5.641025641025641e-06,
"loss": 0.5216,
"step": 1600
},
{
"epoch": 24.015384615384615,
"grad_norm": 162.53988647460938,
"learning_rate": 5.606837606837608e-06,
"loss": 0.2298,
"step": 1610
},
{
"epoch": 24.018461538461537,
"grad_norm": 11.33154582977295,
"learning_rate": 5.572649572649573e-06,
"loss": 0.2289,
"step": 1620
},
{
"epoch": 24.02,
"eval_accuracy": 0.717391304347826,
"eval_loss": 1.0725319385528564,
"eval_runtime": 14.2373,
"eval_samples_per_second": 3.231,
"eval_steps_per_second": 0.843,
"step": 1625
},
{
"epoch": 25.001538461538463,
"grad_norm": 0.03378036618232727,
"learning_rate": 5.538461538461539e-06,
"loss": 0.107,
"step": 1630
},
{
"epoch": 25.004615384615384,
"grad_norm": 15.620013236999512,
"learning_rate": 5.504273504273505e-06,
"loss": 0.1683,
"step": 1640
},
{
"epoch": 25.00769230769231,
"grad_norm": 35.80552673339844,
"learning_rate": 5.470085470085471e-06,
"loss": 0.5356,
"step": 1650
},
{
"epoch": 25.01076923076923,
"grad_norm": 119.33697509765625,
"learning_rate": 5.435897435897436e-06,
"loss": 0.3483,
"step": 1660
},
{
"epoch": 25.013846153846153,
"grad_norm": 0.039877910166978836,
"learning_rate": 5.401709401709402e-06,
"loss": 0.3975,
"step": 1670
},
{
"epoch": 25.016923076923078,
"grad_norm": 122.86112213134766,
"learning_rate": 5.367521367521367e-06,
"loss": 0.0873,
"step": 1680
},
{
"epoch": 25.02,
"grad_norm": 0.03186870366334915,
"learning_rate": 5.333333333333334e-06,
"loss": 0.2527,
"step": 1690
},
{
"epoch": 25.02,
"eval_accuracy": 0.7608695652173914,
"eval_loss": 1.0045281648635864,
"eval_runtime": 14.274,
"eval_samples_per_second": 3.223,
"eval_steps_per_second": 0.841,
"step": 1690
},
{
"epoch": 26.00307692307692,
"grad_norm": 0.012652536854147911,
"learning_rate": 5.2991452991453e-06,
"loss": 0.0624,
"step": 1700
},
{
"epoch": 26.006153846153847,
"grad_norm": 8.27054500579834,
"learning_rate": 5.264957264957265e-06,
"loss": 0.2865,
"step": 1710
},
{
"epoch": 26.00923076923077,
"grad_norm": 0.5706243515014648,
"learning_rate": 5.230769230769232e-06,
"loss": 0.283,
"step": 1720
},
{
"epoch": 26.012307692307694,
"grad_norm": 0.05628874897956848,
"learning_rate": 5.196581196581197e-06,
"loss": 0.2724,
"step": 1730
},
{
"epoch": 26.015384615384615,
"grad_norm": 3.2168116569519043,
"learning_rate": 5.162393162393163e-06,
"loss": 0.2868,
"step": 1740
},
{
"epoch": 26.018461538461537,
"grad_norm": 1.1663625240325928,
"learning_rate": 5.128205128205128e-06,
"loss": 0.433,
"step": 1750
},
{
"epoch": 26.02,
"eval_accuracy": 0.6304347826086957,
"eval_loss": 1.157365322113037,
"eval_runtime": 15.047,
"eval_samples_per_second": 3.057,
"eval_steps_per_second": 0.798,
"step": 1755
},
{
"epoch": 27.001538461538463,
"grad_norm": 0.0997692197561264,
"learning_rate": 5.094017094017094e-06,
"loss": 0.4229,
"step": 1760
},
{
"epoch": 27.004615384615384,
"grad_norm": 0.09240197390317917,
"learning_rate": 5.05982905982906e-06,
"loss": 0.3206,
"step": 1770
},
{
"epoch": 27.00769230769231,
"grad_norm": 0.2623593509197235,
"learning_rate": 5.025641025641026e-06,
"loss": 0.1986,
"step": 1780
},
{
"epoch": 27.01076923076923,
"grad_norm": 0.24803876876831055,
"learning_rate": 4.991452991452992e-06,
"loss": 0.4088,
"step": 1790
},
{
"epoch": 27.013846153846153,
"grad_norm": 0.5555140972137451,
"learning_rate": 4.957264957264958e-06,
"loss": 0.155,
"step": 1800
},
{
"epoch": 27.016923076923078,
"grad_norm": 0.22051659226417542,
"learning_rate": 4.923076923076924e-06,
"loss": 0.2226,
"step": 1810
},
{
"epoch": 27.02,
"grad_norm": 142.2720489501953,
"learning_rate": 4.888888888888889e-06,
"loss": 0.3203,
"step": 1820
},
{
"epoch": 27.02,
"eval_accuracy": 0.717391304347826,
"eval_loss": 1.0994583368301392,
"eval_runtime": 14.2217,
"eval_samples_per_second": 3.234,
"eval_steps_per_second": 0.844,
"step": 1820
},
{
"epoch": 28.00307692307692,
"grad_norm": 1.9806488752365112,
"learning_rate": 4.854700854700855e-06,
"loss": 0.0561,
"step": 1830
},
{
"epoch": 28.006153846153847,
"grad_norm": 0.4285343289375305,
"learning_rate": 4.820512820512821e-06,
"loss": 0.0691,
"step": 1840
},
{
"epoch": 28.00923076923077,
"grad_norm": 0.16625083982944489,
"learning_rate": 4.786324786324787e-06,
"loss": 0.2259,
"step": 1850
},
{
"epoch": 28.012307692307694,
"grad_norm": 1.8851540088653564,
"learning_rate": 4.752136752136752e-06,
"loss": 0.0057,
"step": 1860
},
{
"epoch": 28.015384615384615,
"grad_norm": 23.864330291748047,
"learning_rate": 4.717948717948718e-06,
"loss": 0.0304,
"step": 1870
},
{
"epoch": 28.018461538461537,
"grad_norm": 36.58037567138672,
"learning_rate": 4.6837606837606844e-06,
"loss": 0.65,
"step": 1880
},
{
"epoch": 28.02,
"eval_accuracy": 0.6956521739130435,
"eval_loss": 1.4325636625289917,
"eval_runtime": 14.2225,
"eval_samples_per_second": 3.234,
"eval_steps_per_second": 0.844,
"step": 1885
},
{
"epoch": 29.001538461538463,
"grad_norm": 97.73675537109375,
"learning_rate": 4.64957264957265e-06,
"loss": 0.1698,
"step": 1890
},
{
"epoch": 29.004615384615384,
"grad_norm": 72.0632095336914,
"learning_rate": 4.615384615384616e-06,
"loss": 0.5534,
"step": 1900
},
{
"epoch": 29.00769230769231,
"grad_norm": 36.123287200927734,
"learning_rate": 4.581196581196582e-06,
"loss": 0.6991,
"step": 1910
},
{
"epoch": 29.01076923076923,
"grad_norm": 0.9478359222412109,
"learning_rate": 4.547008547008547e-06,
"loss": 0.3699,
"step": 1920
},
{
"epoch": 29.013846153846153,
"grad_norm": 13.153339385986328,
"learning_rate": 4.512820512820513e-06,
"loss": 0.0125,
"step": 1930
},
{
"epoch": 29.016923076923078,
"grad_norm": 0.0068915472365915775,
"learning_rate": 4.4786324786324796e-06,
"loss": 0.3134,
"step": 1940
},
{
"epoch": 29.02,
"grad_norm": 0.5487728714942932,
"learning_rate": 4.444444444444444e-06,
"loss": 0.1041,
"step": 1950
},
{
"epoch": 29.02,
"eval_accuracy": 0.717391304347826,
"eval_loss": 1.217459797859192,
"eval_runtime": 14.1868,
"eval_samples_per_second": 3.242,
"eval_steps_per_second": 0.846,
"step": 1950
},
{
"epoch": 30.00307692307692,
"grad_norm": 0.011832280084490776,
"learning_rate": 4.4102564102564104e-06,
"loss": 0.1152,
"step": 1960
},
{
"epoch": 30.006153846153847,
"grad_norm": 0.02865392342209816,
"learning_rate": 4.376068376068377e-06,
"loss": 0.0966,
"step": 1970
},
{
"epoch": 30.00923076923077,
"grad_norm": 8.51937484741211,
"learning_rate": 4.341880341880342e-06,
"loss": 0.1849,
"step": 1980
},
{
"epoch": 30.012307692307694,
"grad_norm": 0.006231443490833044,
"learning_rate": 4.307692307692308e-06,
"loss": 0.4387,
"step": 1990
},
{
"epoch": 30.015384615384615,
"grad_norm": 1.779493808746338,
"learning_rate": 4.273504273504274e-06,
"loss": 0.2136,
"step": 2000
},
{
"epoch": 30.018461538461537,
"grad_norm": 119.08480072021484,
"learning_rate": 4.239316239316239e-06,
"loss": 0.0569,
"step": 2010
},
{
"epoch": 30.02,
"eval_accuracy": 0.717391304347826,
"eval_loss": 1.4498846530914307,
"eval_runtime": 14.237,
"eval_samples_per_second": 3.231,
"eval_steps_per_second": 0.843,
"step": 2015
},
{
"epoch": 31.001538461538463,
"grad_norm": 1.9587482213974,
"learning_rate": 4.2051282051282055e-06,
"loss": 0.0864,
"step": 2020
},
{
"epoch": 31.004615384615384,
"grad_norm": 193.9834442138672,
"learning_rate": 4.170940170940172e-06,
"loss": 0.328,
"step": 2030
},
{
"epoch": 31.00769230769231,
"grad_norm": 4.229808807373047,
"learning_rate": 4.136752136752136e-06,
"loss": 0.0106,
"step": 2040
},
{
"epoch": 31.01076923076923,
"grad_norm": 0.17659415304660797,
"learning_rate": 4.102564102564103e-06,
"loss": 0.0987,
"step": 2050
},
{
"epoch": 31.013846153846153,
"grad_norm": 185.70263671875,
"learning_rate": 4.068376068376069e-06,
"loss": 0.5216,
"step": 2060
},
{
"epoch": 31.016923076923078,
"grad_norm": 151.5210418701172,
"learning_rate": 4.034188034188034e-06,
"loss": 0.3362,
"step": 2070
},
{
"epoch": 31.02,
"grad_norm": 0.8233569860458374,
"learning_rate": 4.000000000000001e-06,
"loss": 0.2142,
"step": 2080
},
{
"epoch": 31.02,
"eval_accuracy": 0.7608695652173914,
"eval_loss": 1.3655930757522583,
"eval_runtime": 14.1707,
"eval_samples_per_second": 3.246,
"eval_steps_per_second": 0.847,
"step": 2080
},
{
"epoch": 32.003076923076925,
"grad_norm": 3.8791003227233887,
"learning_rate": 3.965811965811966e-06,
"loss": 0.0783,
"step": 2090
},
{
"epoch": 32.00615384615384,
"grad_norm": 5.642085552215576,
"learning_rate": 3.9316239316239315e-06,
"loss": 0.0175,
"step": 2100
},
{
"epoch": 32.00923076923077,
"grad_norm": 58.637088775634766,
"learning_rate": 3.897435897435898e-06,
"loss": 0.2936,
"step": 2110
},
{
"epoch": 32.012307692307694,
"grad_norm": 0.4610566198825836,
"learning_rate": 3.863247863247864e-06,
"loss": 0.1162,
"step": 2120
},
{
"epoch": 32.01538461538462,
"grad_norm": 0.08907013386487961,
"learning_rate": 3.8290598290598295e-06,
"loss": 0.2103,
"step": 2130
},
{
"epoch": 32.01846153846154,
"grad_norm": 0.04717539623379707,
"learning_rate": 3.794871794871795e-06,
"loss": 0.343,
"step": 2140
},
{
"epoch": 32.02,
"eval_accuracy": 0.7608695652173914,
"eval_loss": 1.3127285242080688,
"eval_runtime": 14.1293,
"eval_samples_per_second": 3.256,
"eval_steps_per_second": 0.849,
"step": 2145
},
{
"epoch": 33.00153846153846,
"grad_norm": 0.09433780610561371,
"learning_rate": 3.760683760683761e-06,
"loss": 0.2631,
"step": 2150
},
{
"epoch": 33.004615384615384,
"grad_norm": 0.08145506680011749,
"learning_rate": 3.726495726495727e-06,
"loss": 0.0053,
"step": 2160
},
{
"epoch": 33.00769230769231,
"grad_norm": 31.62529945373535,
"learning_rate": 3.692307692307693e-06,
"loss": 0.3741,
"step": 2170
},
{
"epoch": 33.01076923076923,
"grad_norm": 198.175048828125,
"learning_rate": 3.6581196581196584e-06,
"loss": 0.2364,
"step": 2180
},
{
"epoch": 33.01384615384615,
"grad_norm": 0.010738876648247242,
"learning_rate": 3.623931623931624e-06,
"loss": 0.2583,
"step": 2190
},
{
"epoch": 33.01692307692308,
"grad_norm": 0.07434792816638947,
"learning_rate": 3.58974358974359e-06,
"loss": 0.1219,
"step": 2200
},
{
"epoch": 33.02,
"grad_norm": 0.04365354776382446,
"learning_rate": 3.555555555555556e-06,
"loss": 0.0331,
"step": 2210
},
{
"epoch": 33.02,
"eval_accuracy": 0.6956521739130435,
"eval_loss": 1.5137012004852295,
"eval_runtime": 14.1663,
"eval_samples_per_second": 3.247,
"eval_steps_per_second": 0.847,
"step": 2210
},
{
"epoch": 34.003076923076925,
"grad_norm": 0.04650627076625824,
"learning_rate": 3.5213675213675218e-06,
"loss": 0.2369,
"step": 2220
},
{
"epoch": 34.00615384615384,
"grad_norm": 0.016520565375685692,
"learning_rate": 3.487179487179487e-06,
"loss": 0.0013,
"step": 2230
},
{
"epoch": 34.00923076923077,
"grad_norm": 0.010590762831270695,
"learning_rate": 3.452991452991453e-06,
"loss": 0.1856,
"step": 2240
},
{
"epoch": 34.012307692307694,
"grad_norm": 0.21154338121414185,
"learning_rate": 3.4188034188034193e-06,
"loss": 0.3057,
"step": 2250
},
{
"epoch": 34.01538461538462,
"grad_norm": 282.3055419921875,
"learning_rate": 3.384615384615385e-06,
"loss": 0.1637,
"step": 2260
},
{
"epoch": 34.01846153846154,
"grad_norm": 263.7860412597656,
"learning_rate": 3.3504273504273506e-06,
"loss": 0.1634,
"step": 2270
},
{
"epoch": 34.02,
"eval_accuracy": 0.717391304347826,
"eval_loss": 1.4773932695388794,
"eval_runtime": 14.2162,
"eval_samples_per_second": 3.236,
"eval_steps_per_second": 0.844,
"step": 2275
},
{
"epoch": 35.00153846153846,
"grad_norm": 0.02735510654747486,
"learning_rate": 3.3162393162393165e-06,
"loss": 0.0023,
"step": 2280
},
{
"epoch": 35.004615384615384,
"grad_norm": 0.04332771152257919,
"learning_rate": 3.2820512820512823e-06,
"loss": 0.2221,
"step": 2290
},
{
"epoch": 35.00769230769231,
"grad_norm": 0.016110800206661224,
"learning_rate": 3.247863247863248e-06,
"loss": 0.2181,
"step": 2300
},
{
"epoch": 35.01076923076923,
"grad_norm": 0.12393196672201157,
"learning_rate": 3.213675213675214e-06,
"loss": 0.4696,
"step": 2310
},
{
"epoch": 35.01384615384615,
"grad_norm": 0.06598012894392014,
"learning_rate": 3.1794871794871795e-06,
"loss": 0.1672,
"step": 2320
},
{
"epoch": 35.01692307692308,
"grad_norm": 0.004367148969322443,
"learning_rate": 3.1452991452991453e-06,
"loss": 0.0012,
"step": 2330
},
{
"epoch": 35.02,
"grad_norm": 0.01092283334583044,
"learning_rate": 3.1111111111111116e-06,
"loss": 0.5358,
"step": 2340
},
{
"epoch": 35.02,
"eval_accuracy": 0.6739130434782609,
"eval_loss": 1.5174051523208618,
"eval_runtime": 14.2031,
"eval_samples_per_second": 3.239,
"eval_steps_per_second": 0.845,
"step": 2340
},
{
"epoch": 36.003076923076925,
"grad_norm": 0.026499304920434952,
"learning_rate": 3.0769230769230774e-06,
"loss": 0.1287,
"step": 2350
},
{
"epoch": 36.00615384615384,
"grad_norm": 2.692544460296631,
"learning_rate": 3.042735042735043e-06,
"loss": 0.2211,
"step": 2360
},
{
"epoch": 36.00923076923077,
"grad_norm": 0.026944167912006378,
"learning_rate": 3.0085470085470087e-06,
"loss": 0.01,
"step": 2370
},
{
"epoch": 36.012307692307694,
"grad_norm": 0.16730260848999023,
"learning_rate": 2.9743589743589746e-06,
"loss": 0.3199,
"step": 2380
},
{
"epoch": 36.01538461538462,
"grad_norm": 11.841240882873535,
"learning_rate": 2.9401709401709404e-06,
"loss": 0.0015,
"step": 2390
},
{
"epoch": 36.01846153846154,
"grad_norm": 206.77235412597656,
"learning_rate": 2.9059829059829063e-06,
"loss": 0.0396,
"step": 2400
},
{
"epoch": 36.02,
"eval_accuracy": 0.6956521739130435,
"eval_loss": 1.447475552558899,
"eval_runtime": 14.233,
"eval_samples_per_second": 3.232,
"eval_steps_per_second": 0.843,
"step": 2405
},
{
"epoch": 37.00153846153846,
"grad_norm": 0.008494521491229534,
"learning_rate": 2.8717948717948717e-06,
"loss": 0.0187,
"step": 2410
},
{
"epoch": 37.004615384615384,
"grad_norm": 0.12653498351573944,
"learning_rate": 2.8376068376068376e-06,
"loss": 0.188,
"step": 2420
},
{
"epoch": 37.00769230769231,
"grad_norm": 0.06878158450126648,
"learning_rate": 2.803418803418804e-06,
"loss": 0.1975,
"step": 2430
},
{
"epoch": 37.01076923076923,
"grad_norm": 0.012811361812055111,
"learning_rate": 2.7692307692307697e-06,
"loss": 0.0616,
"step": 2440
},
{
"epoch": 37.01384615384615,
"grad_norm": 0.012951454147696495,
"learning_rate": 2.7350427350427355e-06,
"loss": 0.0182,
"step": 2450
},
{
"epoch": 37.01692307692308,
"grad_norm": 0.015709415078163147,
"learning_rate": 2.700854700854701e-06,
"loss": 0.1402,
"step": 2460
},
{
"epoch": 37.02,
"grad_norm": 111.22488403320312,
"learning_rate": 2.666666666666667e-06,
"loss": 0.7272,
"step": 2470
},
{
"epoch": 37.02,
"eval_accuracy": 0.717391304347826,
"eval_loss": 1.6262131929397583,
"eval_runtime": 14.2867,
"eval_samples_per_second": 3.22,
"eval_steps_per_second": 0.84,
"step": 2470
},
{
"epoch": 38.003076923076925,
"grad_norm": 0.6310192346572876,
"learning_rate": 2.6324786324786327e-06,
"loss": 0.069,
"step": 2480
},
{
"epoch": 38.00615384615384,
"grad_norm": 0.005475493147969246,
"learning_rate": 2.5982905982905985e-06,
"loss": 0.454,
"step": 2490
},
{
"epoch": 38.00923076923077,
"grad_norm": 0.031099509447813034,
"learning_rate": 2.564102564102564e-06,
"loss": 0.2161,
"step": 2500
},
{
"epoch": 38.012307692307694,
"grad_norm": 0.025538576766848564,
"learning_rate": 2.52991452991453e-06,
"loss": 0.0924,
"step": 2510
},
{
"epoch": 38.01538461538462,
"grad_norm": 0.26595520973205566,
"learning_rate": 2.495726495726496e-06,
"loss": 0.4355,
"step": 2520
},
{
"epoch": 38.01846153846154,
"grad_norm": 0.00920882262289524,
"learning_rate": 2.461538461538462e-06,
"loss": 0.3445,
"step": 2530
},
{
"epoch": 38.02,
"eval_accuracy": 0.717391304347826,
"eval_loss": 1.787401795387268,
"eval_runtime": 14.2035,
"eval_samples_per_second": 3.239,
"eval_steps_per_second": 0.845,
"step": 2535
},
{
"epoch": 39.00153846153846,
"grad_norm": 0.216685950756073,
"learning_rate": 2.4273504273504274e-06,
"loss": 0.1452,
"step": 2540
},
{
"epoch": 39.004615384615384,
"grad_norm": 228.7432098388672,
"learning_rate": 2.3931623931623937e-06,
"loss": 0.1403,
"step": 2550
},
{
"epoch": 39.00769230769231,
"grad_norm": 0.0731094554066658,
"learning_rate": 2.358974358974359e-06,
"loss": 0.0189,
"step": 2560
},
{
"epoch": 39.01076923076923,
"grad_norm": 0.01636696793138981,
"learning_rate": 2.324786324786325e-06,
"loss": 0.1849,
"step": 2570
},
{
"epoch": 39.01384615384615,
"grad_norm": 0.005581328645348549,
"learning_rate": 2.290598290598291e-06,
"loss": 0.3136,
"step": 2580
},
{
"epoch": 39.01692307692308,
"grad_norm": 0.13789384067058563,
"learning_rate": 2.2564102564102566e-06,
"loss": 0.0827,
"step": 2590
},
{
"epoch": 39.02,
"grad_norm": 0.01795981265604496,
"learning_rate": 2.222222222222222e-06,
"loss": 0.2743,
"step": 2600
},
{
"epoch": 39.02,
"eval_accuracy": 0.717391304347826,
"eval_loss": 1.5739110708236694,
"eval_runtime": 14.2448,
"eval_samples_per_second": 3.229,
"eval_steps_per_second": 0.842,
"step": 2600
},
{
"epoch": 40.003076923076925,
"grad_norm": 77.0948715209961,
"learning_rate": 2.1880341880341884e-06,
"loss": 0.1258,
"step": 2610
},
{
"epoch": 40.00615384615384,
"grad_norm": 0.06270722299814224,
"learning_rate": 2.153846153846154e-06,
"loss": 0.1022,
"step": 2620
},
{
"epoch": 40.00923076923077,
"grad_norm": 0.011491877026855946,
"learning_rate": 2.1196581196581196e-06,
"loss": 0.0004,
"step": 2630
},
{
"epoch": 40.012307692307694,
"grad_norm": 204.40928649902344,
"learning_rate": 2.085470085470086e-06,
"loss": 0.0763,
"step": 2640
},
{
"epoch": 40.01538461538462,
"grad_norm": 0.006064881104975939,
"learning_rate": 2.0512820512820513e-06,
"loss": 0.2972,
"step": 2650
},
{
"epoch": 40.01846153846154,
"grad_norm": 0.01428136881440878,
"learning_rate": 2.017094017094017e-06,
"loss": 0.2592,
"step": 2660
},
{
"epoch": 40.02,
"eval_accuracy": 0.717391304347826,
"eval_loss": 1.6092571020126343,
"eval_runtime": 14.2231,
"eval_samples_per_second": 3.234,
"eval_steps_per_second": 0.844,
"step": 2665
},
{
"epoch": 41.00153846153846,
"grad_norm": 237.1355743408203,
"learning_rate": 1.982905982905983e-06,
"loss": 0.2178,
"step": 2670
},
{
"epoch": 41.004615384615384,
"grad_norm": 0.01185480784624815,
"learning_rate": 1.948717948717949e-06,
"loss": 0.0087,
"step": 2680
},
{
"epoch": 41.00769230769231,
"grad_norm": 0.01697443798184395,
"learning_rate": 1.9145299145299148e-06,
"loss": 0.4878,
"step": 2690
},
{
"epoch": 41.01076923076923,
"grad_norm": 275.4456481933594,
"learning_rate": 1.8803418803418804e-06,
"loss": 0.0663,
"step": 2700
},
{
"epoch": 41.01384615384615,
"grad_norm": 0.5372545719146729,
"learning_rate": 1.8461538461538465e-06,
"loss": 0.0006,
"step": 2710
},
{
"epoch": 41.01692307692308,
"grad_norm": 0.0075569795444607735,
"learning_rate": 1.811965811965812e-06,
"loss": 0.0014,
"step": 2720
},
{
"epoch": 41.02,
"grad_norm": 0.09308155626058578,
"learning_rate": 1.777777777777778e-06,
"loss": 0.1367,
"step": 2730
},
{
"epoch": 41.02,
"eval_accuracy": 0.717391304347826,
"eval_loss": 1.5614510774612427,
"eval_runtime": 14.1903,
"eval_samples_per_second": 3.242,
"eval_steps_per_second": 0.846,
"step": 2730
},
{
"epoch": 42.003076923076925,
"grad_norm": 0.07777335494756699,
"learning_rate": 1.7435897435897436e-06,
"loss": 0.0557,
"step": 2740
},
{
"epoch": 42.00615384615384,
"grad_norm": 0.005379654001444578,
"learning_rate": 1.7094017094017097e-06,
"loss": 0.0125,
"step": 2750
},
{
"epoch": 42.00923076923077,
"grad_norm": 278.01739501953125,
"learning_rate": 1.6752136752136753e-06,
"loss": 0.1841,
"step": 2760
},
{
"epoch": 42.012307692307694,
"grad_norm": 0.15389478206634521,
"learning_rate": 1.6410256410256412e-06,
"loss": 0.0351,
"step": 2770
},
{
"epoch": 42.01538461538462,
"grad_norm": 70.6353530883789,
"learning_rate": 1.606837606837607e-06,
"loss": 0.3138,
"step": 2780
},
{
"epoch": 42.01846153846154,
"grad_norm": 0.006937114987522364,
"learning_rate": 1.5726495726495727e-06,
"loss": 0.0459,
"step": 2790
},
{
"epoch": 42.02,
"eval_accuracy": 0.717391304347826,
"eval_loss": 1.5237913131713867,
"eval_runtime": 13.2102,
"eval_samples_per_second": 3.482,
"eval_steps_per_second": 0.908,
"step": 2795
},
{
"epoch": 43.00153846153846,
"grad_norm": 0.016063353046774864,
"learning_rate": 1.5384615384615387e-06,
"loss": 0.0381,
"step": 2800
},
{
"epoch": 43.004615384615384,
"grad_norm": 0.12228815257549286,
"learning_rate": 1.5042735042735044e-06,
"loss": 0.001,
"step": 2810
},
{
"epoch": 43.00769230769231,
"grad_norm": 25.483070373535156,
"learning_rate": 1.4700854700854702e-06,
"loss": 0.1634,
"step": 2820
},
{
"epoch": 43.01076923076923,
"grad_norm": 9.318077087402344,
"learning_rate": 1.4358974358974359e-06,
"loss": 0.317,
"step": 2830
},
{
"epoch": 43.01384615384615,
"grad_norm": 0.2631111443042755,
"learning_rate": 1.401709401709402e-06,
"loss": 0.018,
"step": 2840
},
{
"epoch": 43.01692307692308,
"grad_norm": 0.2109161764383316,
"learning_rate": 1.3675213675213678e-06,
"loss": 0.0432,
"step": 2850
},
{
"epoch": 43.02,
"grad_norm": 0.0032740840688347816,
"learning_rate": 1.3333333333333334e-06,
"loss": 0.0102,
"step": 2860
},
{
"epoch": 43.02,
"eval_accuracy": 0.717391304347826,
"eval_loss": 1.4446630477905273,
"eval_runtime": 13.2357,
"eval_samples_per_second": 3.475,
"eval_steps_per_second": 0.907,
"step": 2860
},
{
"epoch": 44.003076923076925,
"grad_norm": 0.050007205456495285,
"learning_rate": 1.2991452991452993e-06,
"loss": 0.0036,
"step": 2870
},
{
"epoch": 44.00615384615384,
"grad_norm": 0.02491617575287819,
"learning_rate": 1.264957264957265e-06,
"loss": 0.2155,
"step": 2880
},
{
"epoch": 44.00923076923077,
"grad_norm": 0.01646566204726696,
"learning_rate": 1.230769230769231e-06,
"loss": 0.0006,
"step": 2890
},
{
"epoch": 44.012307692307694,
"grad_norm": 0.6604272723197937,
"learning_rate": 1.1965811965811968e-06,
"loss": 0.3125,
"step": 2900
},
{
"epoch": 44.01538461538462,
"grad_norm": 0.008244900032877922,
"learning_rate": 1.1623931623931625e-06,
"loss": 0.1206,
"step": 2910
},
{
"epoch": 44.01846153846154,
"grad_norm": 4.3739237785339355,
"learning_rate": 1.1282051282051283e-06,
"loss": 0.0084,
"step": 2920
},
{
"epoch": 44.02,
"eval_accuracy": 0.782608695652174,
"eval_loss": 1.3307474851608276,
"eval_runtime": 14.3847,
"eval_samples_per_second": 3.198,
"eval_steps_per_second": 0.834,
"step": 2925
},
{
"epoch": 45.00153846153846,
"grad_norm": 0.011739898473024368,
"learning_rate": 1.0940170940170942e-06,
"loss": 0.004,
"step": 2930
},
{
"epoch": 45.004615384615384,
"grad_norm": 0.011195595376193523,
"learning_rate": 1.0598290598290598e-06,
"loss": 0.0017,
"step": 2940
},
{
"epoch": 45.00769230769231,
"grad_norm": 0.007319163531064987,
"learning_rate": 1.0256410256410257e-06,
"loss": 0.1595,
"step": 2950
},
{
"epoch": 45.01076923076923,
"grad_norm": 193.5129852294922,
"learning_rate": 9.914529914529915e-07,
"loss": 0.1625,
"step": 2960
},
{
"epoch": 45.01384615384615,
"grad_norm": 0.01686985418200493,
"learning_rate": 9.572649572649574e-07,
"loss": 0.0014,
"step": 2970
},
{
"epoch": 45.01692307692308,
"grad_norm": 0.09226860105991364,
"learning_rate": 9.230769230769232e-07,
"loss": 0.5208,
"step": 2980
},
{
"epoch": 45.02,
"grad_norm": 0.21285171806812286,
"learning_rate": 8.88888888888889e-07,
"loss": 0.106,
"step": 2990
},
{
"epoch": 45.02,
"eval_accuracy": 0.7608695652173914,
"eval_loss": 1.3326114416122437,
"eval_runtime": 14.1718,
"eval_samples_per_second": 3.246,
"eval_steps_per_second": 0.847,
"step": 2990
},
{
"epoch": 46.003076923076925,
"grad_norm": 0.07202818244695663,
"learning_rate": 8.547008547008548e-07,
"loss": 0.0018,
"step": 3000
},
{
"epoch": 46.00615384615384,
"grad_norm": 61.24966812133789,
"learning_rate": 8.205128205128206e-07,
"loss": 0.1323,
"step": 3010
},
{
"epoch": 46.00923076923077,
"grad_norm": 0.006612909492105246,
"learning_rate": 7.863247863247863e-07,
"loss": 0.0181,
"step": 3020
},
{
"epoch": 46.012307692307694,
"grad_norm": 0.028757771477103233,
"learning_rate": 7.521367521367522e-07,
"loss": 0.0946,
"step": 3030
},
{
"epoch": 46.01538461538462,
"grad_norm": 0.12236111611127853,
"learning_rate": 7.179487179487179e-07,
"loss": 0.0006,
"step": 3040
},
{
"epoch": 46.01846153846154,
"grad_norm": 0.01944558136165142,
"learning_rate": 6.837606837606839e-07,
"loss": 0.0796,
"step": 3050
},
{
"epoch": 46.02,
"eval_accuracy": 0.7391304347826086,
"eval_loss": 1.3509420156478882,
"eval_runtime": 14.2379,
"eval_samples_per_second": 3.231,
"eval_steps_per_second": 0.843,
"step": 3055
},
{
"epoch": 47.00153846153846,
"grad_norm": 109.69795989990234,
"learning_rate": 6.495726495726496e-07,
"loss": 0.0994,
"step": 3060
},
{
"epoch": 47.004615384615384,
"grad_norm": 0.015416641719639301,
"learning_rate": 6.153846153846155e-07,
"loss": 0.114,
"step": 3070
},
{
"epoch": 47.00769230769231,
"grad_norm": 0.023845411837100983,
"learning_rate": 5.811965811965812e-07,
"loss": 0.0067,
"step": 3080
},
{
"epoch": 47.01076923076923,
"grad_norm": 0.007653037551790476,
"learning_rate": 5.470085470085471e-07,
"loss": 0.0013,
"step": 3090
},
{
"epoch": 47.01384615384615,
"grad_norm": 0.6111039519309998,
"learning_rate": 5.128205128205128e-07,
"loss": 0.0334,
"step": 3100
},
{
"epoch": 47.01692307692308,
"grad_norm": 0.005491567775607109,
"learning_rate": 4.786324786324787e-07,
"loss": 0.0007,
"step": 3110
},
{
"epoch": 47.02,
"grad_norm": 0.012984632514417171,
"learning_rate": 4.444444444444445e-07,
"loss": 0.0004,
"step": 3120
},
{
"epoch": 47.02,
"eval_accuracy": 0.6521739130434783,
"eval_loss": 1.5340851545333862,
"eval_runtime": 14.5799,
"eval_samples_per_second": 3.155,
"eval_steps_per_second": 0.823,
"step": 3120
},
{
"epoch": 48.003076923076925,
"grad_norm": 0.008926596492528915,
"learning_rate": 4.102564102564103e-07,
"loss": 0.0007,
"step": 3130
},
{
"epoch": 48.00615384615384,
"grad_norm": 0.0157285425812006,
"learning_rate": 3.760683760683761e-07,
"loss": 0.157,
"step": 3140
},
{
"epoch": 48.00923076923077,
"grad_norm": 0.0048833079636096954,
"learning_rate": 3.4188034188034194e-07,
"loss": 0.0005,
"step": 3150
},
{
"epoch": 48.012307692307694,
"grad_norm": 0.10273938626050949,
"learning_rate": 3.0769230769230774e-07,
"loss": 0.137,
"step": 3160
},
{
"epoch": 48.01538461538462,
"grad_norm": 0.011431020684540272,
"learning_rate": 2.7350427350427354e-07,
"loss": 0.1131,
"step": 3170
},
{
"epoch": 48.01846153846154,
"grad_norm": 0.01966054178774357,
"learning_rate": 2.3931623931623934e-07,
"loss": 0.0004,
"step": 3180
},
{
"epoch": 48.02,
"eval_accuracy": 0.7391304347826086,
"eval_loss": 1.392978310585022,
"eval_runtime": 14.2391,
"eval_samples_per_second": 3.231,
"eval_steps_per_second": 0.843,
"step": 3185
},
{
"epoch": 49.00153846153846,
"grad_norm": 0.005121205002069473,
"learning_rate": 2.0512820512820514e-07,
"loss": 0.0005,
"step": 3190
},
{
"epoch": 49.004615384615384,
"grad_norm": 0.007318329997360706,
"learning_rate": 1.7094017094017097e-07,
"loss": 0.0162,
"step": 3200
},
{
"epoch": 49.00769230769231,
"grad_norm": 0.0550755113363266,
"learning_rate": 1.3675213675213677e-07,
"loss": 0.0344,
"step": 3210
},
{
"epoch": 49.01076923076923,
"grad_norm": 0.004471645224839449,
"learning_rate": 1.0256410256410257e-07,
"loss": 0.3291,
"step": 3220
},
{
"epoch": 49.01384615384615,
"grad_norm": 0.10918418318033218,
"learning_rate": 6.837606837606839e-08,
"loss": 0.0152,
"step": 3230
},
{
"epoch": 49.01692307692308,
"grad_norm": 0.005762889981269836,
"learning_rate": 3.418803418803419e-08,
"loss": 0.0006,
"step": 3240
},
{
"epoch": 49.02,
"grad_norm": 50.610591888427734,
"learning_rate": 0.0,
"loss": 0.0033,
"step": 3250
},
{
"epoch": 49.02,
"eval_accuracy": 0.717391304347826,
"eval_loss": 1.4388333559036255,
"eval_runtime": 16.7736,
"eval_samples_per_second": 2.742,
"eval_steps_per_second": 0.715,
"step": 3250
},
{
"epoch": 49.02,
"step": 3250,
"total_flos": 5.708334209885798e+19,
"train_loss": 0.32200761378957676,
"train_runtime": 8569.5008,
"train_samples_per_second": 1.517,
"train_steps_per_second": 0.379
},
{
"epoch": 49.02,
"eval_accuracy": 0.782608695652174,
"eval_loss": 1.330747127532959,
"eval_runtime": 14.1891,
"eval_samples_per_second": 3.242,
"eval_steps_per_second": 0.846,
"step": 3250
},
{
"epoch": 49.02,
"eval_accuracy": 0.782608695652174,
"eval_loss": 1.3307472467422485,
"eval_runtime": 14.1739,
"eval_samples_per_second": 3.245,
"eval_steps_per_second": 0.847,
"step": 3250
}
],
"logging_steps": 10,
"max_steps": 3250,
"num_input_tokens_seen": 0,
"num_train_epochs": 9223372036854775807,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 5.708334209885798e+19,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}