ciloku's picture
Training in progress, step 200, checkpoint
f08d966 verified
{
"best_metric": 1.2237151861190796,
"best_model_checkpoint": "miner_id_24/checkpoint-200",
"epoch": 0.27155465037338766,
"eval_steps": 50,
"global_step": 200,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0013577732518669382,
"grad_norm": 0.2727883458137512,
"learning_rate": 6e-06,
"loss": 1.3028,
"step": 1
},
{
"epoch": 0.0013577732518669382,
"eval_loss": 1.8691651821136475,
"eval_runtime": 20.576,
"eval_samples_per_second": 60.313,
"eval_steps_per_second": 15.115,
"step": 1
},
{
"epoch": 0.0027155465037338763,
"grad_norm": 0.291520893573761,
"learning_rate": 1.2e-05,
"loss": 1.515,
"step": 2
},
{
"epoch": 0.004073319755600814,
"grad_norm": 0.3538097143173218,
"learning_rate": 1.8e-05,
"loss": 1.3986,
"step": 3
},
{
"epoch": 0.005431093007467753,
"grad_norm": 0.34487512707710266,
"learning_rate": 2.4e-05,
"loss": 1.4966,
"step": 4
},
{
"epoch": 0.006788866259334691,
"grad_norm": 0.38797444105148315,
"learning_rate": 3e-05,
"loss": 1.4055,
"step": 5
},
{
"epoch": 0.008146639511201629,
"grad_norm": 0.4047252833843231,
"learning_rate": 3.6e-05,
"loss": 1.4603,
"step": 6
},
{
"epoch": 0.009504412763068567,
"grad_norm": 0.3205205798149109,
"learning_rate": 4.2e-05,
"loss": 1.4481,
"step": 7
},
{
"epoch": 0.010862186014935505,
"grad_norm": 0.48629483580589294,
"learning_rate": 4.8e-05,
"loss": 1.4678,
"step": 8
},
{
"epoch": 0.012219959266802444,
"grad_norm": 0.4313899278640747,
"learning_rate": 5.4000000000000005e-05,
"loss": 1.5263,
"step": 9
},
{
"epoch": 0.013577732518669382,
"grad_norm": 0.404436856508255,
"learning_rate": 6e-05,
"loss": 1.5395,
"step": 10
},
{
"epoch": 0.01493550577053632,
"grad_norm": 0.35745665431022644,
"learning_rate": 5.999589914977407e-05,
"loss": 1.547,
"step": 11
},
{
"epoch": 0.016293279022403257,
"grad_norm": 0.3784649968147278,
"learning_rate": 5.998359772022778e-05,
"loss": 1.3765,
"step": 12
},
{
"epoch": 0.017651052274270197,
"grad_norm": 0.38371866941452026,
"learning_rate": 5.996309907444915e-05,
"loss": 1.4725,
"step": 13
},
{
"epoch": 0.019008825526137134,
"grad_norm": 0.4023575782775879,
"learning_rate": 5.9934408816563236e-05,
"loss": 1.5672,
"step": 14
},
{
"epoch": 0.020366598778004074,
"grad_norm": 0.4079117774963379,
"learning_rate": 5.98975347902001e-05,
"loss": 1.4821,
"step": 15
},
{
"epoch": 0.02172437202987101,
"grad_norm": 0.3644375801086426,
"learning_rate": 5.9852487076350345e-05,
"loss": 1.4558,
"step": 16
},
{
"epoch": 0.02308214528173795,
"grad_norm": 0.3798045516014099,
"learning_rate": 5.979927799060915e-05,
"loss": 1.3945,
"step": 17
},
{
"epoch": 0.024439918533604887,
"grad_norm": 0.3799417018890381,
"learning_rate": 5.9737922079809257e-05,
"loss": 1.5371,
"step": 18
},
{
"epoch": 0.025797691785471828,
"grad_norm": 0.45709484815597534,
"learning_rate": 5.9668436118044054e-05,
"loss": 1.5058,
"step": 19
},
{
"epoch": 0.027155465037338764,
"grad_norm": 0.4341858923435211,
"learning_rate": 5.959083910208167e-05,
"loss": 1.4192,
"step": 20
},
{
"epoch": 0.028513238289205704,
"grad_norm": 0.37348461151123047,
"learning_rate": 5.9505152246171474e-05,
"loss": 1.3679,
"step": 21
},
{
"epoch": 0.02987101154107264,
"grad_norm": 0.4123372435569763,
"learning_rate": 5.941139897624428e-05,
"loss": 1.4606,
"step": 22
},
{
"epoch": 0.031228784792939578,
"grad_norm": 0.41863560676574707,
"learning_rate": 5.9309604923507984e-05,
"loss": 1.5268,
"step": 23
},
{
"epoch": 0.032586558044806514,
"grad_norm": 0.3617679476737976,
"learning_rate": 5.9199797917440176e-05,
"loss": 1.3856,
"step": 24
},
{
"epoch": 0.03394433129667346,
"grad_norm": 0.4142565131187439,
"learning_rate": 5.908200797817991e-05,
"loss": 1.5046,
"step": 25
},
{
"epoch": 0.035302104548540394,
"grad_norm": 0.45500728487968445,
"learning_rate": 5.895626730832046e-05,
"loss": 1.5162,
"step": 26
},
{
"epoch": 0.03665987780040733,
"grad_norm": 0.3940930664539337,
"learning_rate": 5.882261028410545e-05,
"loss": 1.4056,
"step": 27
},
{
"epoch": 0.03801765105227427,
"grad_norm": 0.37967368960380554,
"learning_rate": 5.8681073446030734e-05,
"loss": 1.4119,
"step": 28
},
{
"epoch": 0.03937542430414121,
"grad_norm": 0.4179513454437256,
"learning_rate": 5.853169548885461e-05,
"loss": 1.3789,
"step": 29
},
{
"epoch": 0.04073319755600815,
"grad_norm": 0.4345241189002991,
"learning_rate": 5.8374517251019035e-05,
"loss": 1.3115,
"step": 30
},
{
"epoch": 0.042090970807875085,
"grad_norm": 0.44251716136932373,
"learning_rate": 5.820958170348484e-05,
"loss": 1.343,
"step": 31
},
{
"epoch": 0.04344874405974202,
"grad_norm": 0.44365394115448,
"learning_rate": 5.8036933937983825e-05,
"loss": 1.4196,
"step": 32
},
{
"epoch": 0.04480651731160896,
"grad_norm": 0.4314393401145935,
"learning_rate": 5.7856621154691217e-05,
"loss": 1.3913,
"step": 33
},
{
"epoch": 0.0461642905634759,
"grad_norm": 0.4089583456516266,
"learning_rate": 5.766869264932154e-05,
"loss": 1.4025,
"step": 34
},
{
"epoch": 0.04752206381534284,
"grad_norm": 0.40149539709091187,
"learning_rate": 5.747319979965172e-05,
"loss": 1.406,
"step": 35
},
{
"epoch": 0.048879837067209775,
"grad_norm": 0.4526827931404114,
"learning_rate": 5.727019605147488e-05,
"loss": 1.4015,
"step": 36
},
{
"epoch": 0.05023761031907671,
"grad_norm": 0.46217799186706543,
"learning_rate": 5.7059736903988775e-05,
"loss": 1.4696,
"step": 37
},
{
"epoch": 0.051595383570943655,
"grad_norm": 0.43967458605766296,
"learning_rate": 5.684187989462291e-05,
"loss": 1.348,
"step": 38
},
{
"epoch": 0.05295315682281059,
"grad_norm": 0.44179558753967285,
"learning_rate": 5.661668458330836e-05,
"loss": 1.3125,
"step": 39
},
{
"epoch": 0.05431093007467753,
"grad_norm": 0.5129258632659912,
"learning_rate": 5.638421253619467e-05,
"loss": 1.3217,
"step": 40
},
{
"epoch": 0.055668703326544465,
"grad_norm": 0.5877251625061035,
"learning_rate": 5.614452730881832e-05,
"loss": 1.4882,
"step": 41
},
{
"epoch": 0.05702647657841141,
"grad_norm": 0.48830196261405945,
"learning_rate": 5.589769442872722e-05,
"loss": 1.3058,
"step": 42
},
{
"epoch": 0.058384249830278345,
"grad_norm": 0.5610761046409607,
"learning_rate": 5.5643781377566175e-05,
"loss": 1.2949,
"step": 43
},
{
"epoch": 0.05974202308214528,
"grad_norm": 0.5211129188537598,
"learning_rate": 5.538285757262806e-05,
"loss": 1.2745,
"step": 44
},
{
"epoch": 0.06109979633401222,
"grad_norm": 0.625769317150116,
"learning_rate": 5.5114994347875856e-05,
"loss": 1.3896,
"step": 45
},
{
"epoch": 0.062457569585879155,
"grad_norm": 0.607923686504364,
"learning_rate": 5.48402649344406e-05,
"loss": 1.2187,
"step": 46
},
{
"epoch": 0.06381534283774609,
"grad_norm": 0.8620524406433105,
"learning_rate": 5.455874444060078e-05,
"loss": 1.5842,
"step": 47
},
{
"epoch": 0.06517311608961303,
"grad_norm": 0.8019170165061951,
"learning_rate": 5.427050983124843e-05,
"loss": 1.5473,
"step": 48
},
{
"epoch": 0.06653088934147998,
"grad_norm": 1.2089159488677979,
"learning_rate": 5.397563990684774e-05,
"loss": 1.5974,
"step": 49
},
{
"epoch": 0.06788866259334692,
"grad_norm": 1.9010008573532104,
"learning_rate": 5.367421528189181e-05,
"loss": 1.7379,
"step": 50
},
{
"epoch": 0.06788866259334692,
"eval_loss": 1.3871045112609863,
"eval_runtime": 20.6029,
"eval_samples_per_second": 60.234,
"eval_steps_per_second": 15.095,
"step": 50
},
{
"epoch": 0.06924643584521385,
"grad_norm": 0.6502735018730164,
"learning_rate": 5.336631836286338e-05,
"loss": 1.15,
"step": 51
},
{
"epoch": 0.07060420909708079,
"grad_norm": 0.7638380527496338,
"learning_rate": 5.3052033325705774e-05,
"loss": 1.3236,
"step": 52
},
{
"epoch": 0.07196198234894773,
"grad_norm": 0.8170233964920044,
"learning_rate": 5.2731446092810044e-05,
"loss": 1.4022,
"step": 53
},
{
"epoch": 0.07331975560081466,
"grad_norm": 0.8679696917533875,
"learning_rate": 5.240464430952462e-05,
"loss": 1.4187,
"step": 54
},
{
"epoch": 0.0746775288526816,
"grad_norm": 0.7302316427230835,
"learning_rate": 5.207171732019395e-05,
"loss": 1.4995,
"step": 55
},
{
"epoch": 0.07603530210454854,
"grad_norm": 0.6195611953735352,
"learning_rate": 5.1732756143732675e-05,
"loss": 1.386,
"step": 56
},
{
"epoch": 0.07739307535641547,
"grad_norm": 0.6950957775115967,
"learning_rate": 5.1387853448741916e-05,
"loss": 1.3669,
"step": 57
},
{
"epoch": 0.07875084860828242,
"grad_norm": 0.6076563000679016,
"learning_rate": 5.103710352817465e-05,
"loss": 1.3245,
"step": 58
},
{
"epoch": 0.08010862186014936,
"grad_norm": 0.5559670329093933,
"learning_rate": 5.068060227355698e-05,
"loss": 1.3563,
"step": 59
},
{
"epoch": 0.0814663951120163,
"grad_norm": 0.43826499581336975,
"learning_rate": 5.0318447148772234e-05,
"loss": 1.2848,
"step": 60
},
{
"epoch": 0.08282416836388323,
"grad_norm": 0.4111815094947815,
"learning_rate": 4.995073716341545e-05,
"loss": 1.2948,
"step": 61
},
{
"epoch": 0.08418194161575017,
"grad_norm": 0.38872504234313965,
"learning_rate": 4.957757284572506e-05,
"loss": 1.2717,
"step": 62
},
{
"epoch": 0.0855397148676171,
"grad_norm": 0.3297795057296753,
"learning_rate": 4.91990562150995e-05,
"loss": 1.2787,
"step": 63
},
{
"epoch": 0.08689748811948404,
"grad_norm": 0.3442164659500122,
"learning_rate": 4.881529075420611e-05,
"loss": 1.2691,
"step": 64
},
{
"epoch": 0.08825526137135098,
"grad_norm": 0.337697833776474,
"learning_rate": 4.8426381380690036e-05,
"loss": 1.2559,
"step": 65
},
{
"epoch": 0.08961303462321792,
"grad_norm": 0.377407968044281,
"learning_rate": 4.8032434418490753e-05,
"loss": 1.1306,
"step": 66
},
{
"epoch": 0.09097080787508487,
"grad_norm": 0.37138831615448,
"learning_rate": 4.7633557568774194e-05,
"loss": 1.3517,
"step": 67
},
{
"epoch": 0.0923285811269518,
"grad_norm": 0.3734515309333801,
"learning_rate": 4.722985988048831e-05,
"loss": 1.241,
"step": 68
},
{
"epoch": 0.09368635437881874,
"grad_norm": 0.40681424736976624,
"learning_rate": 4.6821451720550184e-05,
"loss": 1.3452,
"step": 69
},
{
"epoch": 0.09504412763068568,
"grad_norm": 0.3799170255661011,
"learning_rate": 4.640844474367282e-05,
"loss": 1.2281,
"step": 70
},
{
"epoch": 0.09640190088255261,
"grad_norm": 0.40699976682662964,
"learning_rate": 4.5990951861839815e-05,
"loss": 1.2165,
"step": 71
},
{
"epoch": 0.09775967413441955,
"grad_norm": 0.4101755917072296,
"learning_rate": 4.5569087213436455e-05,
"loss": 1.2346,
"step": 72
},
{
"epoch": 0.09911744738628649,
"grad_norm": 0.41790157556533813,
"learning_rate": 4.514296613204532e-05,
"loss": 1.3324,
"step": 73
},
{
"epoch": 0.10047522063815342,
"grad_norm": 0.4398515224456787,
"learning_rate": 4.471270511491525e-05,
"loss": 1.2324,
"step": 74
},
{
"epoch": 0.10183299389002037,
"grad_norm": 0.41198304295539856,
"learning_rate": 4.427842179111221e-05,
"loss": 1.1749,
"step": 75
},
{
"epoch": 0.10319076714188731,
"grad_norm": 0.40906915068626404,
"learning_rate": 4.3840234889360634e-05,
"loss": 1.21,
"step": 76
},
{
"epoch": 0.10454854039375425,
"grad_norm": 0.38414862751960754,
"learning_rate": 4.33982642055842e-05,
"loss": 1.2519,
"step": 77
},
{
"epoch": 0.10590631364562118,
"grad_norm": 0.3740120828151703,
"learning_rate": 4.2952630570154785e-05,
"loss": 1.2096,
"step": 78
},
{
"epoch": 0.10726408689748812,
"grad_norm": 0.3862381875514984,
"learning_rate": 4.250345581485871e-05,
"loss": 1.2626,
"step": 79
},
{
"epoch": 0.10862186014935506,
"grad_norm": 0.3637017011642456,
"learning_rate": 4.205086273958909e-05,
"loss": 1.2496,
"step": 80
},
{
"epoch": 0.109979633401222,
"grad_norm": 0.38009828329086304,
"learning_rate": 4.1594975078773565e-05,
"loss": 1.2312,
"step": 81
},
{
"epoch": 0.11133740665308893,
"grad_norm": 0.353546142578125,
"learning_rate": 4.113591746754662e-05,
"loss": 1.2196,
"step": 82
},
{
"epoch": 0.11269517990495587,
"grad_norm": 0.4014517068862915,
"learning_rate": 4.06738154076755e-05,
"loss": 1.2508,
"step": 83
},
{
"epoch": 0.11405295315682282,
"grad_norm": 0.4051145315170288,
"learning_rate": 4.020879523324929e-05,
"loss": 1.2438,
"step": 84
},
{
"epoch": 0.11541072640868975,
"grad_norm": 0.39561352133750916,
"learning_rate": 3.974098407614051e-05,
"loss": 1.2708,
"step": 85
},
{
"epoch": 0.11676849966055669,
"grad_norm": 0.4071887135505676,
"learning_rate": 3.927050983124842e-05,
"loss": 1.2017,
"step": 86
},
{
"epoch": 0.11812627291242363,
"grad_norm": 0.4768187403678894,
"learning_rate": 3.8797501121533946e-05,
"loss": 1.2136,
"step": 87
},
{
"epoch": 0.11948404616429056,
"grad_norm": 0.4878613352775574,
"learning_rate": 3.832208726285534e-05,
"loss": 1.1696,
"step": 88
},
{
"epoch": 0.1208418194161575,
"grad_norm": 0.4963112473487854,
"learning_rate": 3.784439822861459e-05,
"loss": 1.1474,
"step": 89
},
{
"epoch": 0.12219959266802444,
"grad_norm": 0.5566866993904114,
"learning_rate": 3.7364564614223976e-05,
"loss": 1.1513,
"step": 90
},
{
"epoch": 0.12355736591989137,
"grad_norm": 0.49696850776672363,
"learning_rate": 3.688271760140255e-05,
"loss": 1.0694,
"step": 91
},
{
"epoch": 0.12491513917175831,
"grad_norm": 0.4537845551967621,
"learning_rate": 3.6398988922312406e-05,
"loss": 1.2268,
"step": 92
},
{
"epoch": 0.12627291242362526,
"grad_norm": 0.5615867376327515,
"learning_rate": 3.591351082354441e-05,
"loss": 1.209,
"step": 93
},
{
"epoch": 0.12763068567549218,
"grad_norm": 0.5191481113433838,
"learning_rate": 3.54264160299633e-05,
"loss": 0.9851,
"step": 94
},
{
"epoch": 0.12898845892735913,
"grad_norm": 0.5748217701911926,
"learning_rate": 3.493783770842202e-05,
"loss": 1.1675,
"step": 95
},
{
"epoch": 0.13034623217922606,
"grad_norm": 0.5830937623977661,
"learning_rate": 3.444790943135526e-05,
"loss": 1.1482,
"step": 96
},
{
"epoch": 0.131704005431093,
"grad_norm": 0.7683277130126953,
"learning_rate": 3.3956765140262074e-05,
"loss": 1.1567,
"step": 97
},
{
"epoch": 0.13306177868295996,
"grad_norm": 0.9153833985328674,
"learning_rate": 3.346453910908759e-05,
"loss": 1.0613,
"step": 98
},
{
"epoch": 0.13441955193482688,
"grad_norm": 1.457146167755127,
"learning_rate": 3.297136590751389e-05,
"loss": 1.4215,
"step": 99
},
{
"epoch": 0.13577732518669383,
"grad_norm": 2.4654388427734375,
"learning_rate": 3.247738036416998e-05,
"loss": 1.3325,
"step": 100
},
{
"epoch": 0.13577732518669383,
"eval_loss": 1.2617239952087402,
"eval_runtime": 20.5894,
"eval_samples_per_second": 60.274,
"eval_steps_per_second": 15.105,
"step": 100
},
{
"epoch": 0.13713509843856075,
"grad_norm": 0.3299471437931061,
"learning_rate": 3.1982717529770985e-05,
"loss": 1.043,
"step": 101
},
{
"epoch": 0.1384928716904277,
"grad_norm": 0.5103358626365662,
"learning_rate": 3.148751264019667e-05,
"loss": 1.0821,
"step": 102
},
{
"epoch": 0.13985064494229463,
"grad_norm": 0.38934123516082764,
"learning_rate": 3.099190107951924e-05,
"loss": 1.1851,
"step": 103
},
{
"epoch": 0.14120841819416158,
"grad_norm": 0.4618616998195648,
"learning_rate": 3.049601834299076e-05,
"loss": 1.1477,
"step": 104
},
{
"epoch": 0.1425661914460285,
"grad_norm": 0.4667770564556122,
"learning_rate": 3e-05,
"loss": 1.3379,
"step": 105
},
{
"epoch": 0.14392396469789545,
"grad_norm": 0.5612397789955139,
"learning_rate": 2.9503981657009246e-05,
"loss": 1.2474,
"step": 106
},
{
"epoch": 0.1452817379497624,
"grad_norm": 0.4959563612937927,
"learning_rate": 2.9008098920480752e-05,
"loss": 1.2996,
"step": 107
},
{
"epoch": 0.14663951120162932,
"grad_norm": 0.46022874116897583,
"learning_rate": 2.851248735980333e-05,
"loss": 1.2365,
"step": 108
},
{
"epoch": 0.14799728445349628,
"grad_norm": 0.4637479782104492,
"learning_rate": 2.801728247022902e-05,
"loss": 1.1274,
"step": 109
},
{
"epoch": 0.1493550577053632,
"grad_norm": 0.46539270877838135,
"learning_rate": 2.7522619635830034e-05,
"loss": 1.2835,
"step": 110
},
{
"epoch": 0.15071283095723015,
"grad_norm": 0.44558578729629517,
"learning_rate": 2.702863409248612e-05,
"loss": 1.1671,
"step": 111
},
{
"epoch": 0.15207060420909707,
"grad_norm": 0.4058595299720764,
"learning_rate": 2.6535460890912416e-05,
"loss": 1.3161,
"step": 112
},
{
"epoch": 0.15342837746096402,
"grad_norm": 0.41494250297546387,
"learning_rate": 2.604323485973793e-05,
"loss": 1.2028,
"step": 113
},
{
"epoch": 0.15478615071283094,
"grad_norm": 0.3981316387653351,
"learning_rate": 2.555209056864474e-05,
"loss": 1.3,
"step": 114
},
{
"epoch": 0.1561439239646979,
"grad_norm": 0.3824096620082855,
"learning_rate": 2.5062162291577978e-05,
"loss": 1.3645,
"step": 115
},
{
"epoch": 0.15750169721656485,
"grad_norm": 0.37386175990104675,
"learning_rate": 2.4573583970036712e-05,
"loss": 1.1997,
"step": 116
},
{
"epoch": 0.15885947046843177,
"grad_norm": 0.3558681607246399,
"learning_rate": 2.4086489176455595e-05,
"loss": 1.1774,
"step": 117
},
{
"epoch": 0.16021724372029872,
"grad_norm": 0.3079434037208557,
"learning_rate": 2.36010110776876e-05,
"loss": 1.1493,
"step": 118
},
{
"epoch": 0.16157501697216564,
"grad_norm": 0.3555084764957428,
"learning_rate": 2.3117282398597456e-05,
"loss": 1.2321,
"step": 119
},
{
"epoch": 0.1629327902240326,
"grad_norm": 0.3722546398639679,
"learning_rate": 2.263543538577603e-05,
"loss": 1.2395,
"step": 120
},
{
"epoch": 0.16429056347589951,
"grad_norm": 0.34470927715301514,
"learning_rate": 2.215560177138541e-05,
"loss": 1.3016,
"step": 121
},
{
"epoch": 0.16564833672776647,
"grad_norm": 0.36612775921821594,
"learning_rate": 2.167791273714467e-05,
"loss": 1.2412,
"step": 122
},
{
"epoch": 0.1670061099796334,
"grad_norm": 0.4002249538898468,
"learning_rate": 2.1202498878466062e-05,
"loss": 1.2357,
"step": 123
},
{
"epoch": 0.16836388323150034,
"grad_norm": 0.34691083431243896,
"learning_rate": 2.072949016875158e-05,
"loss": 1.2007,
"step": 124
},
{
"epoch": 0.1697216564833673,
"grad_norm": 0.36753562092781067,
"learning_rate": 2.0259015923859498e-05,
"loss": 1.2795,
"step": 125
},
{
"epoch": 0.1710794297352342,
"grad_norm": 0.35715538263320923,
"learning_rate": 1.979120476675071e-05,
"loss": 1.2341,
"step": 126
},
{
"epoch": 0.17243720298710116,
"grad_norm": 0.3618859350681305,
"learning_rate": 1.9326184592324503e-05,
"loss": 1.3268,
"step": 127
},
{
"epoch": 0.17379497623896809,
"grad_norm": 0.3898789584636688,
"learning_rate": 1.8864082532453373e-05,
"loss": 1.2401,
"step": 128
},
{
"epoch": 0.17515274949083504,
"grad_norm": 0.38298866152763367,
"learning_rate": 1.840502492122644e-05,
"loss": 1.1924,
"step": 129
},
{
"epoch": 0.17651052274270196,
"grad_norm": 0.41282638907432556,
"learning_rate": 1.7949137260410924e-05,
"loss": 1.299,
"step": 130
},
{
"epoch": 0.1778682959945689,
"grad_norm": 0.40525734424591064,
"learning_rate": 1.7496544185141295e-05,
"loss": 1.1921,
"step": 131
},
{
"epoch": 0.17922606924643583,
"grad_norm": 0.3740069568157196,
"learning_rate": 1.7047369429845216e-05,
"loss": 1.1422,
"step": 132
},
{
"epoch": 0.18058384249830278,
"grad_norm": 0.4477040469646454,
"learning_rate": 1.6601735794415806e-05,
"loss": 1.273,
"step": 133
},
{
"epoch": 0.18194161575016973,
"grad_norm": 0.4326741695404053,
"learning_rate": 1.615976511063937e-05,
"loss": 1.1538,
"step": 134
},
{
"epoch": 0.18329938900203666,
"grad_norm": 0.40829864144325256,
"learning_rate": 1.5721578208887793e-05,
"loss": 1.1364,
"step": 135
},
{
"epoch": 0.1846571622539036,
"grad_norm": 0.48507779836654663,
"learning_rate": 1.5287294885084766e-05,
"loss": 1.1943,
"step": 136
},
{
"epoch": 0.18601493550577053,
"grad_norm": 0.4629991054534912,
"learning_rate": 1.4857033867954697e-05,
"loss": 1.3347,
"step": 137
},
{
"epoch": 0.18737270875763748,
"grad_norm": 0.448877215385437,
"learning_rate": 1.4430912786563554e-05,
"loss": 1.2268,
"step": 138
},
{
"epoch": 0.1887304820095044,
"grad_norm": 0.4393776059150696,
"learning_rate": 1.4009048138160195e-05,
"loss": 1.1314,
"step": 139
},
{
"epoch": 0.19008825526137135,
"grad_norm": 0.4872407913208008,
"learning_rate": 1.3591555256327199e-05,
"loss": 1.1743,
"step": 140
},
{
"epoch": 0.19144602851323828,
"grad_norm": 0.5232310891151428,
"learning_rate": 1.3178548279449822e-05,
"loss": 1.1972,
"step": 141
},
{
"epoch": 0.19280380176510523,
"grad_norm": 0.5275864005088806,
"learning_rate": 1.2770140119511693e-05,
"loss": 1.2252,
"step": 142
},
{
"epoch": 0.19416157501697218,
"grad_norm": 0.6005786657333374,
"learning_rate": 1.2366442431225809e-05,
"loss": 1.2634,
"step": 143
},
{
"epoch": 0.1955193482688391,
"grad_norm": 0.6898880004882812,
"learning_rate": 1.1967565581509248e-05,
"loss": 1.2417,
"step": 144
},
{
"epoch": 0.19687712152070605,
"grad_norm": 0.711155116558075,
"learning_rate": 1.1573618619309965e-05,
"loss": 1.1956,
"step": 145
},
{
"epoch": 0.19823489477257297,
"grad_norm": 0.6882777810096741,
"learning_rate": 1.1184709245793889e-05,
"loss": 1.0373,
"step": 146
},
{
"epoch": 0.19959266802443992,
"grad_norm": 0.9063619375228882,
"learning_rate": 1.0800943784900502e-05,
"loss": 1.1443,
"step": 147
},
{
"epoch": 0.20095044127630685,
"grad_norm": 1.1058382987976074,
"learning_rate": 1.042242715427494e-05,
"loss": 1.2277,
"step": 148
},
{
"epoch": 0.2023082145281738,
"grad_norm": 1.3979732990264893,
"learning_rate": 1.004926283658455e-05,
"loss": 1.072,
"step": 149
},
{
"epoch": 0.20366598778004075,
"grad_norm": 2.6124637126922607,
"learning_rate": 9.681552851227774e-06,
"loss": 1.6226,
"step": 150
},
{
"epoch": 0.20366598778004075,
"eval_loss": 1.230950117111206,
"eval_runtime": 20.5956,
"eval_samples_per_second": 60.256,
"eval_steps_per_second": 15.1,
"step": 150
},
{
"epoch": 0.20502376103190767,
"grad_norm": 0.30644547939300537,
"learning_rate": 9.319397726443026e-06,
"loss": 1.0185,
"step": 151
},
{
"epoch": 0.20638153428377462,
"grad_norm": 0.3141520023345947,
"learning_rate": 8.962896471825342e-06,
"loss": 1.205,
"step": 152
},
{
"epoch": 0.20773930753564154,
"grad_norm": 0.3089138865470886,
"learning_rate": 8.61214655125809e-06,
"loss": 1.2672,
"step": 153
},
{
"epoch": 0.2090970807875085,
"grad_norm": 0.3366032838821411,
"learning_rate": 8.267243856267331e-06,
"loss": 1.2912,
"step": 154
},
{
"epoch": 0.21045485403937542,
"grad_norm": 0.3448525071144104,
"learning_rate": 7.928282679806052e-06,
"loss": 1.162,
"step": 155
},
{
"epoch": 0.21181262729124237,
"grad_norm": 0.33545318245887756,
"learning_rate": 7.595355690475393e-06,
"loss": 1.1831,
"step": 156
},
{
"epoch": 0.2131704005431093,
"grad_norm": 0.37803956866264343,
"learning_rate": 7.268553907189964e-06,
"loss": 1.2012,
"step": 157
},
{
"epoch": 0.21452817379497624,
"grad_norm": 0.387054979801178,
"learning_rate": 6.947966674294236e-06,
"loss": 1.1716,
"step": 158
},
{
"epoch": 0.2158859470468432,
"grad_norm": 0.3598926365375519,
"learning_rate": 6.6336816371366305e-06,
"loss": 1.3053,
"step": 159
},
{
"epoch": 0.2172437202987101,
"grad_norm": 0.3465365171432495,
"learning_rate": 6.325784718108196e-06,
"loss": 1.2507,
"step": 160
},
{
"epoch": 0.21860149355057706,
"grad_norm": 0.34380465745925903,
"learning_rate": 6.0243600931522595e-06,
"loss": 1.1837,
"step": 161
},
{
"epoch": 0.219959266802444,
"grad_norm": 0.38984525203704834,
"learning_rate": 5.72949016875158e-06,
"loss": 1.1786,
"step": 162
},
{
"epoch": 0.22131704005431094,
"grad_norm": 0.32796481251716614,
"learning_rate": 5.44125555939923e-06,
"loss": 1.1675,
"step": 163
},
{
"epoch": 0.22267481330617786,
"grad_norm": 0.3847575783729553,
"learning_rate": 5.159735065559399e-06,
"loss": 1.1848,
"step": 164
},
{
"epoch": 0.2240325865580448,
"grad_norm": 0.3389010429382324,
"learning_rate": 4.885005652124144e-06,
"loss": 1.2208,
"step": 165
},
{
"epoch": 0.22539035980991173,
"grad_norm": 0.3421236574649811,
"learning_rate": 4.617142427371934e-06,
"loss": 1.2395,
"step": 166
},
{
"epoch": 0.22674813306177868,
"grad_norm": 0.40454038977622986,
"learning_rate": 4.3562186224338265e-06,
"loss": 1.1845,
"step": 167
},
{
"epoch": 0.22810590631364563,
"grad_norm": 0.38095664978027344,
"learning_rate": 4.102305571272783e-06,
"loss": 1.2256,
"step": 168
},
{
"epoch": 0.22946367956551256,
"grad_norm": 0.39606228470802307,
"learning_rate": 3.855472691181678e-06,
"loss": 1.2515,
"step": 169
},
{
"epoch": 0.2308214528173795,
"grad_norm": 0.40278103947639465,
"learning_rate": 3.615787463805331e-06,
"loss": 1.1997,
"step": 170
},
{
"epoch": 0.23217922606924643,
"grad_norm": 0.38342180848121643,
"learning_rate": 3.383315416691646e-06,
"loss": 1.2968,
"step": 171
},
{
"epoch": 0.23353699932111338,
"grad_norm": 0.3293931484222412,
"learning_rate": 3.158120105377096e-06,
"loss": 1.105,
"step": 172
},
{
"epoch": 0.2348947725729803,
"grad_norm": 0.3917769491672516,
"learning_rate": 2.940263096011233e-06,
"loss": 1.2725,
"step": 173
},
{
"epoch": 0.23625254582484725,
"grad_norm": 0.40385058522224426,
"learning_rate": 2.729803948525125e-06,
"loss": 1.268,
"step": 174
},
{
"epoch": 0.23761031907671418,
"grad_norm": 0.3761354088783264,
"learning_rate": 2.526800200348275e-06,
"loss": 1.2043,
"step": 175
},
{
"epoch": 0.23896809232858113,
"grad_norm": 0.35785752534866333,
"learning_rate": 2.3313073506784575e-06,
"loss": 1.2083,
"step": 176
},
{
"epoch": 0.24032586558044808,
"grad_norm": 0.3641122281551361,
"learning_rate": 2.143378845308791e-06,
"loss": 1.1328,
"step": 177
},
{
"epoch": 0.241683638832315,
"grad_norm": 0.3801313042640686,
"learning_rate": 1.9630660620161777e-06,
"loss": 1.196,
"step": 178
},
{
"epoch": 0.24304141208418195,
"grad_norm": 0.3835360109806061,
"learning_rate": 1.790418296515165e-06,
"loss": 1.1675,
"step": 179
},
{
"epoch": 0.24439918533604887,
"grad_norm": 0.39598432183265686,
"learning_rate": 1.625482748980961e-06,
"loss": 1.1781,
"step": 180
},
{
"epoch": 0.24575695858791582,
"grad_norm": 0.43931299448013306,
"learning_rate": 1.4683045111453942e-06,
"loss": 1.3284,
"step": 181
},
{
"epoch": 0.24711473183978275,
"grad_norm": 0.38031482696533203,
"learning_rate": 1.3189265539692707e-06,
"loss": 1.1546,
"step": 182
},
{
"epoch": 0.2484725050916497,
"grad_norm": 0.43264663219451904,
"learning_rate": 1.1773897158945557e-06,
"loss": 1.1991,
"step": 183
},
{
"epoch": 0.24983027834351662,
"grad_norm": 0.39598092436790466,
"learning_rate": 1.0437326916795432e-06,
"loss": 1.217,
"step": 184
},
{
"epoch": 0.25118805159538354,
"grad_norm": 0.434064120054245,
"learning_rate": 9.179920218200888e-07,
"loss": 1.1195,
"step": 185
},
{
"epoch": 0.2525458248472505,
"grad_norm": 0.4763726592063904,
"learning_rate": 8.002020825598277e-07,
"loss": 1.2145,
"step": 186
},
{
"epoch": 0.25390359809911744,
"grad_norm": 0.45298972725868225,
"learning_rate": 6.90395076492022e-07,
"loss": 1.2858,
"step": 187
},
{
"epoch": 0.25526137135098437,
"grad_norm": 0.5360013842582703,
"learning_rate": 5.886010237557194e-07,
"loss": 1.1961,
"step": 188
},
{
"epoch": 0.25661914460285135,
"grad_norm": 0.44087594747543335,
"learning_rate": 4.94847753828529e-07,
"loss": 1.0941,
"step": 189
},
{
"epoch": 0.25797691785471827,
"grad_norm": 0.5056028962135315,
"learning_rate": 4.091608979183303e-07,
"loss": 1.1411,
"step": 190
},
{
"epoch": 0.2593346911065852,
"grad_norm": 0.5390208959579468,
"learning_rate": 3.315638819559452e-07,
"loss": 1.0857,
"step": 191
},
{
"epoch": 0.2606924643584521,
"grad_norm": 0.5694705843925476,
"learning_rate": 2.6207792019074414e-07,
"loss": 1.1992,
"step": 192
},
{
"epoch": 0.2620502376103191,
"grad_norm": 0.5326172709465027,
"learning_rate": 2.0072200939085573e-07,
"loss": 1.2654,
"step": 193
},
{
"epoch": 0.263408010862186,
"grad_norm": 0.6019105315208435,
"learning_rate": 1.475129236496575e-07,
"loss": 1.2256,
"step": 194
},
{
"epoch": 0.26476578411405294,
"grad_norm": 0.6420196890830994,
"learning_rate": 1.0246520979990459e-07,
"loss": 1.1443,
"step": 195
},
{
"epoch": 0.2661235573659199,
"grad_norm": 0.6914029121398926,
"learning_rate": 6.559118343676396e-08,
"loss": 1.1849,
"step": 196
},
{
"epoch": 0.26748133061778684,
"grad_norm": 0.8857573866844177,
"learning_rate": 3.690092555085789e-08,
"loss": 1.239,
"step": 197
},
{
"epoch": 0.26883910386965376,
"grad_norm": 1.1338528394699097,
"learning_rate": 1.640227977221853e-08,
"loss": 1.3823,
"step": 198
},
{
"epoch": 0.2701968771215207,
"grad_norm": 1.0841175317764282,
"learning_rate": 4.1008502259298755e-09,
"loss": 1.2757,
"step": 199
},
{
"epoch": 0.27155465037338766,
"grad_norm": 2.116804599761963,
"learning_rate": 0.0,
"loss": 1.257,
"step": 200
},
{
"epoch": 0.27155465037338766,
"eval_loss": 1.2237151861190796,
"eval_runtime": 20.6081,
"eval_samples_per_second": 60.219,
"eval_steps_per_second": 15.091,
"step": 200
}
],
"logging_steps": 1,
"max_steps": 200,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 50,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 4,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.375493127929856e+16,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}