romainnn's picture
Training in progress, step 840, checkpoint
51a28d5 verified
{
"best_metric": 1.203278660774231,
"best_model_checkpoint": "miner_id_24/checkpoint-800",
"epoch": 0.05339477970680545,
"eval_steps": 100,
"global_step": 840,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 6.356521393667315e-05,
"grad_norm": 0.9428938031196594,
"learning_rate": 2e-05,
"loss": 1.7738,
"step": 1
},
{
"epoch": 6.356521393667315e-05,
"eval_loss": 1.7346340417861938,
"eval_runtime": 1229.3502,
"eval_samples_per_second": 4.067,
"eval_steps_per_second": 1.017,
"step": 1
},
{
"epoch": 0.0001271304278733463,
"grad_norm": 0.9990512728691101,
"learning_rate": 4e-05,
"loss": 1.7963,
"step": 2
},
{
"epoch": 0.00019069564181001947,
"grad_norm": 0.9891267418861389,
"learning_rate": 6e-05,
"loss": 1.7598,
"step": 3
},
{
"epoch": 0.0002542608557466926,
"grad_norm": 0.9866920709609985,
"learning_rate": 8e-05,
"loss": 1.7408,
"step": 4
},
{
"epoch": 0.0003178260696833658,
"grad_norm": 1.0441697835922241,
"learning_rate": 0.0001,
"loss": 1.7145,
"step": 5
},
{
"epoch": 0.00038139128362003893,
"grad_norm": 0.6852884888648987,
"learning_rate": 0.00012,
"loss": 1.5726,
"step": 6
},
{
"epoch": 0.00044495649755671207,
"grad_norm": 0.3806685507297516,
"learning_rate": 0.00014,
"loss": 1.4647,
"step": 7
},
{
"epoch": 0.0005085217114933852,
"grad_norm": 1.17180597782135,
"learning_rate": 0.00016,
"loss": 1.6533,
"step": 8
},
{
"epoch": 0.0005720869254300584,
"grad_norm": 1.060241937637329,
"learning_rate": 0.00018,
"loss": 1.6696,
"step": 9
},
{
"epoch": 0.0006356521393667316,
"grad_norm": 0.5335236191749573,
"learning_rate": 0.0002,
"loss": 1.5761,
"step": 10
},
{
"epoch": 0.0006992173533034047,
"grad_norm": 0.2798483967781067,
"learning_rate": 0.00019999928367015404,
"loss": 1.5146,
"step": 11
},
{
"epoch": 0.0007627825672400779,
"grad_norm": 0.3353223502635956,
"learning_rate": 0.00019999713469087867,
"loss": 1.6184,
"step": 12
},
{
"epoch": 0.0008263477811767511,
"grad_norm": 0.3838560879230499,
"learning_rate": 0.00019999355309296144,
"loss": 1.4522,
"step": 13
},
{
"epoch": 0.0008899129951134241,
"grad_norm": 0.39443036913871765,
"learning_rate": 0.00019998853892771453,
"loss": 1.517,
"step": 14
},
{
"epoch": 0.0009534782090500973,
"grad_norm": 0.2620809078216553,
"learning_rate": 0.00019998209226697376,
"loss": 1.3812,
"step": 15
},
{
"epoch": 0.0010170434229867704,
"grad_norm": 0.23137860000133514,
"learning_rate": 0.00019997421320309795,
"loss": 1.3922,
"step": 16
},
{
"epoch": 0.0010806086369234437,
"grad_norm": 0.2614266276359558,
"learning_rate": 0.00019996490184896723,
"loss": 1.508,
"step": 17
},
{
"epoch": 0.0011441738508601168,
"grad_norm": 0.325157105922699,
"learning_rate": 0.00019995415833798158,
"loss": 1.4778,
"step": 18
},
{
"epoch": 0.0012077390647967899,
"grad_norm": 0.3683066666126251,
"learning_rate": 0.000199941982824059,
"loss": 1.5108,
"step": 19
},
{
"epoch": 0.0012713042787334632,
"grad_norm": 0.24171724915504456,
"learning_rate": 0.00019992837548163316,
"loss": 1.4124,
"step": 20
},
{
"epoch": 0.0013348694926701363,
"grad_norm": 0.2298036813735962,
"learning_rate": 0.00019991333650565095,
"loss": 1.4449,
"step": 21
},
{
"epoch": 0.0013984347066068094,
"grad_norm": 0.26465296745300293,
"learning_rate": 0.00019989686611156972,
"loss": 1.4632,
"step": 22
},
{
"epoch": 0.0014619999205434827,
"grad_norm": 0.26514774560928345,
"learning_rate": 0.0001998789645353542,
"loss": 1.3014,
"step": 23
},
{
"epoch": 0.0015255651344801557,
"grad_norm": 0.21595242619514465,
"learning_rate": 0.000199859632033473,
"loss": 1.3242,
"step": 24
},
{
"epoch": 0.0015891303484168288,
"grad_norm": 0.21354669332504272,
"learning_rate": 0.00019983886888289514,
"loss": 1.234,
"step": 25
},
{
"epoch": 0.0016526955623535021,
"grad_norm": 0.2628548741340637,
"learning_rate": 0.00019981667538108587,
"loss": 1.4919,
"step": 26
},
{
"epoch": 0.0017162607762901752,
"grad_norm": 0.26308178901672363,
"learning_rate": 0.00019979305184600256,
"loss": 1.462,
"step": 27
},
{
"epoch": 0.0017798259902268483,
"grad_norm": 0.2193090170621872,
"learning_rate": 0.00019976799861609008,
"loss": 1.4595,
"step": 28
},
{
"epoch": 0.0018433912041635216,
"grad_norm": 0.22884628176689148,
"learning_rate": 0.00019974151605027594,
"loss": 1.4373,
"step": 29
},
{
"epoch": 0.0019069564181001947,
"grad_norm": 0.23094788193702698,
"learning_rate": 0.00019971360452796522,
"loss": 1.4242,
"step": 30
},
{
"epoch": 0.001970521632036868,
"grad_norm": 0.2305305153131485,
"learning_rate": 0.000199684264449035,
"loss": 1.4417,
"step": 31
},
{
"epoch": 0.002034086845973541,
"grad_norm": 0.23320302367210388,
"learning_rate": 0.0001996534962338288,
"loss": 1.4594,
"step": 32
},
{
"epoch": 0.002097652059910214,
"grad_norm": 0.2571035623550415,
"learning_rate": 0.00019962130032315044,
"loss": 1.4992,
"step": 33
},
{
"epoch": 0.0021612172738468874,
"grad_norm": 0.23477134108543396,
"learning_rate": 0.0001995876771782577,
"loss": 1.4658,
"step": 34
},
{
"epoch": 0.0022247824877835603,
"grad_norm": 0.2341969609260559,
"learning_rate": 0.0001995526272808559,
"loss": 1.4237,
"step": 35
},
{
"epoch": 0.0022883477017202336,
"grad_norm": 0.22168391942977905,
"learning_rate": 0.00019951615113309075,
"loss": 1.4189,
"step": 36
},
{
"epoch": 0.002351912915656907,
"grad_norm": 0.21319416165351868,
"learning_rate": 0.00019947824925754131,
"loss": 1.3227,
"step": 37
},
{
"epoch": 0.0024154781295935798,
"grad_norm": 0.21283291280269623,
"learning_rate": 0.00019943892219721253,
"loss": 1.2823,
"step": 38
},
{
"epoch": 0.002479043343530253,
"grad_norm": 0.2249862402677536,
"learning_rate": 0.00019939817051552728,
"loss": 1.4165,
"step": 39
},
{
"epoch": 0.0025426085574669264,
"grad_norm": 0.23104599118232727,
"learning_rate": 0.0001993559947963185,
"loss": 1.345,
"step": 40
},
{
"epoch": 0.0026061737714035992,
"grad_norm": 0.21223703026771545,
"learning_rate": 0.00019931239564382073,
"loss": 1.3816,
"step": 41
},
{
"epoch": 0.0026697389853402725,
"grad_norm": 0.22133688628673553,
"learning_rate": 0.00019926737368266144,
"loss": 1.4486,
"step": 42
},
{
"epoch": 0.002733304199276946,
"grad_norm": 0.2195105105638504,
"learning_rate": 0.00019922092955785217,
"loss": 1.4167,
"step": 43
},
{
"epoch": 0.0027968694132136187,
"grad_norm": 0.21872290968894958,
"learning_rate": 0.00019917306393477907,
"loss": 1.2899,
"step": 44
},
{
"epoch": 0.002860434627150292,
"grad_norm": 0.22723184525966644,
"learning_rate": 0.00019912377749919374,
"loss": 1.3379,
"step": 45
},
{
"epoch": 0.0029239998410869653,
"grad_norm": 0.20976956188678741,
"learning_rate": 0.00019907307095720303,
"loss": 1.4117,
"step": 46
},
{
"epoch": 0.002987565055023638,
"grad_norm": 0.23163513839244843,
"learning_rate": 0.0001990209450352591,
"loss": 1.1854,
"step": 47
},
{
"epoch": 0.0030511302689603115,
"grad_norm": 0.24127621948719025,
"learning_rate": 0.00019896740048014908,
"loss": 1.3795,
"step": 48
},
{
"epoch": 0.0031146954828969848,
"grad_norm": 0.2310689091682434,
"learning_rate": 0.0001989124380589842,
"loss": 1.386,
"step": 49
},
{
"epoch": 0.0031782606968336576,
"grad_norm": 0.21882858872413635,
"learning_rate": 0.00019885605855918885,
"loss": 1.3369,
"step": 50
},
{
"epoch": 0.003241825910770331,
"grad_norm": 0.2317679077386856,
"learning_rate": 0.0001987982627884895,
"loss": 1.3222,
"step": 51
},
{
"epoch": 0.0033053911247070042,
"grad_norm": 0.22262057662010193,
"learning_rate": 0.00019873905157490285,
"loss": 1.2872,
"step": 52
},
{
"epoch": 0.003368956338643677,
"grad_norm": 0.2331005483865738,
"learning_rate": 0.00019867842576672403,
"loss": 1.3113,
"step": 53
},
{
"epoch": 0.0034325215525803504,
"grad_norm": 0.22421784698963165,
"learning_rate": 0.0001986163862325146,
"loss": 1.3245,
"step": 54
},
{
"epoch": 0.0034960867665170237,
"grad_norm": 0.23669274151325226,
"learning_rate": 0.00019855293386108992,
"loss": 1.2904,
"step": 55
},
{
"epoch": 0.0035596519804536966,
"grad_norm": 0.2372964769601822,
"learning_rate": 0.0001984880695615066,
"loss": 1.4022,
"step": 56
},
{
"epoch": 0.00362321719439037,
"grad_norm": 0.2388596534729004,
"learning_rate": 0.00019842179426304924,
"loss": 1.3641,
"step": 57
},
{
"epoch": 0.003686782408327043,
"grad_norm": 0.2330758273601532,
"learning_rate": 0.0001983541089152174,
"loss": 1.3179,
"step": 58
},
{
"epoch": 0.003750347622263716,
"grad_norm": 0.236953005194664,
"learning_rate": 0.00019828501448771176,
"loss": 1.4528,
"step": 59
},
{
"epoch": 0.0038139128362003893,
"grad_norm": 0.22548601031303406,
"learning_rate": 0.00019821451197042026,
"loss": 1.4551,
"step": 60
},
{
"epoch": 0.0038774780501370626,
"grad_norm": 0.2316240519285202,
"learning_rate": 0.00019814260237340414,
"loss": 1.3653,
"step": 61
},
{
"epoch": 0.003941043264073736,
"grad_norm": 0.2395096868276596,
"learning_rate": 0.0001980692867268832,
"loss": 1.2773,
"step": 62
},
{
"epoch": 0.004004608478010409,
"grad_norm": 0.23329375684261322,
"learning_rate": 0.00019799456608122107,
"loss": 1.3483,
"step": 63
},
{
"epoch": 0.004068173691947082,
"grad_norm": 0.22131139039993286,
"learning_rate": 0.0001979184415069104,
"loss": 1.2442,
"step": 64
},
{
"epoch": 0.004131738905883755,
"grad_norm": 0.22645598649978638,
"learning_rate": 0.00019784091409455728,
"loss": 1.3506,
"step": 65
},
{
"epoch": 0.004195304119820428,
"grad_norm": 0.2391359955072403,
"learning_rate": 0.00019776198495486565,
"loss": 1.3665,
"step": 66
},
{
"epoch": 0.004258869333757101,
"grad_norm": 0.2379651963710785,
"learning_rate": 0.00019768165521862155,
"loss": 1.3642,
"step": 67
},
{
"epoch": 0.004322434547693775,
"grad_norm": 0.2432878315448761,
"learning_rate": 0.00019759992603667667,
"loss": 1.3705,
"step": 68
},
{
"epoch": 0.004385999761630448,
"grad_norm": 0.22903011739253998,
"learning_rate": 0.0001975167985799321,
"loss": 1.4265,
"step": 69
},
{
"epoch": 0.004449564975567121,
"grad_norm": 0.2231925129890442,
"learning_rate": 0.00019743227403932134,
"loss": 1.2986,
"step": 70
},
{
"epoch": 0.004513130189503794,
"grad_norm": 0.21844634413719177,
"learning_rate": 0.0001973463536257935,
"loss": 1.3026,
"step": 71
},
{
"epoch": 0.004576695403440467,
"grad_norm": 0.22515372931957245,
"learning_rate": 0.00019725903857029564,
"loss": 1.4038,
"step": 72
},
{
"epoch": 0.00464026061737714,
"grad_norm": 0.22920459508895874,
"learning_rate": 0.00019717033012375538,
"loss": 1.3418,
"step": 73
},
{
"epoch": 0.004703825831313814,
"grad_norm": 0.23658373951911926,
"learning_rate": 0.00019708022955706292,
"loss": 1.3967,
"step": 74
},
{
"epoch": 0.004767391045250487,
"grad_norm": 0.2569814622402191,
"learning_rate": 0.00019698873816105273,
"loss": 1.2474,
"step": 75
},
{
"epoch": 0.0048309562591871595,
"grad_norm": 0.24887211620807648,
"learning_rate": 0.00019689585724648516,
"loss": 1.3784,
"step": 76
},
{
"epoch": 0.004894521473123833,
"grad_norm": 0.24335400760173798,
"learning_rate": 0.00019680158814402762,
"loss": 1.2964,
"step": 77
},
{
"epoch": 0.004958086687060506,
"grad_norm": 0.233675017952919,
"learning_rate": 0.00019670593220423558,
"loss": 1.2317,
"step": 78
},
{
"epoch": 0.005021651900997179,
"grad_norm": 0.24147622287273407,
"learning_rate": 0.0001966088907975331,
"loss": 1.1799,
"step": 79
},
{
"epoch": 0.005085217114933853,
"grad_norm": 0.24836859107017517,
"learning_rate": 0.00019651046531419332,
"loss": 1.2793,
"step": 80
},
{
"epoch": 0.005148782328870526,
"grad_norm": 0.24832896888256073,
"learning_rate": 0.00019641065716431849,
"loss": 1.2897,
"step": 81
},
{
"epoch": 0.0052123475428071985,
"grad_norm": 0.22504734992980957,
"learning_rate": 0.00019630946777781966,
"loss": 1.2926,
"step": 82
},
{
"epoch": 0.005275912756743872,
"grad_norm": 0.23750871419906616,
"learning_rate": 0.00019620689860439647,
"loss": 1.3263,
"step": 83
},
{
"epoch": 0.005339477970680545,
"grad_norm": 0.2326141744852066,
"learning_rate": 0.0001961029511135161,
"loss": 1.3168,
"step": 84
},
{
"epoch": 0.005403043184617218,
"grad_norm": 0.24577590823173523,
"learning_rate": 0.0001959976267943923,
"loss": 1.3602,
"step": 85
},
{
"epoch": 0.005466608398553892,
"grad_norm": 0.24200314283370972,
"learning_rate": 0.00019589092715596417,
"loss": 1.3106,
"step": 86
},
{
"epoch": 0.0055301736124905645,
"grad_norm": 0.24798905849456787,
"learning_rate": 0.00019578285372687446,
"loss": 1.3652,
"step": 87
},
{
"epoch": 0.005593738826427237,
"grad_norm": 0.24428631365299225,
"learning_rate": 0.00019567340805544758,
"loss": 1.3525,
"step": 88
},
{
"epoch": 0.005657304040363911,
"grad_norm": 0.22070927917957306,
"learning_rate": 0.00019556259170966755,
"loss": 1.2287,
"step": 89
},
{
"epoch": 0.005720869254300584,
"grad_norm": 0.22392472624778748,
"learning_rate": 0.0001954504062771555,
"loss": 1.2536,
"step": 90
},
{
"epoch": 0.005784434468237257,
"grad_norm": 0.23670928180217743,
"learning_rate": 0.00019533685336514697,
"loss": 1.2817,
"step": 91
},
{
"epoch": 0.005847999682173931,
"grad_norm": 0.24242345988750458,
"learning_rate": 0.00019522193460046864,
"loss": 1.4427,
"step": 92
},
{
"epoch": 0.0059115648961106035,
"grad_norm": 0.23915454745292664,
"learning_rate": 0.00019510565162951537,
"loss": 1.3832,
"step": 93
},
{
"epoch": 0.005975130110047276,
"grad_norm": 0.2182474136352539,
"learning_rate": 0.00019498800611822645,
"loss": 1.2558,
"step": 94
},
{
"epoch": 0.00603869532398395,
"grad_norm": 0.2170976847410202,
"learning_rate": 0.00019486899975206166,
"loss": 1.2063,
"step": 95
},
{
"epoch": 0.006102260537920623,
"grad_norm": 0.23279117047786713,
"learning_rate": 0.00019474863423597728,
"loss": 1.2725,
"step": 96
},
{
"epoch": 0.006165825751857296,
"grad_norm": 0.2265056073665619,
"learning_rate": 0.00019462691129440147,
"loss": 1.3748,
"step": 97
},
{
"epoch": 0.0062293909657939696,
"grad_norm": 0.24922017753124237,
"learning_rate": 0.00019450383267120982,
"loss": 1.2609,
"step": 98
},
{
"epoch": 0.006292956179730642,
"grad_norm": 0.2250678539276123,
"learning_rate": 0.00019437940012970013,
"loss": 1.318,
"step": 99
},
{
"epoch": 0.006356521393667315,
"grad_norm": 0.2318231761455536,
"learning_rate": 0.00019425361545256727,
"loss": 1.3742,
"step": 100
},
{
"epoch": 0.006356521393667315,
"eval_loss": 1.2972837686538696,
"eval_runtime": 1239.6451,
"eval_samples_per_second": 4.033,
"eval_steps_per_second": 1.008,
"step": 100
},
{
"epoch": 0.006420086607603989,
"grad_norm": 0.2393624186515808,
"learning_rate": 0.0001941264804418776,
"loss": 1.3151,
"step": 101
},
{
"epoch": 0.006483651821540662,
"grad_norm": 0.22296006977558136,
"learning_rate": 0.0001939979969190432,
"loss": 1.3039,
"step": 102
},
{
"epoch": 0.006547217035477335,
"grad_norm": 0.23289115726947784,
"learning_rate": 0.00019386816672479565,
"loss": 1.2941,
"step": 103
},
{
"epoch": 0.0066107822494140085,
"grad_norm": 0.23189423978328705,
"learning_rate": 0.00019373699171915988,
"loss": 1.383,
"step": 104
},
{
"epoch": 0.006674347463350681,
"grad_norm": 0.23659420013427734,
"learning_rate": 0.00019360447378142728,
"loss": 1.3502,
"step": 105
},
{
"epoch": 0.006737912677287354,
"grad_norm": 0.2206616997718811,
"learning_rate": 0.00019347061481012894,
"loss": 1.2325,
"step": 106
},
{
"epoch": 0.006801477891224028,
"grad_norm": 0.23390865325927734,
"learning_rate": 0.00019333541672300841,
"loss": 1.3818,
"step": 107
},
{
"epoch": 0.006865043105160701,
"grad_norm": 0.22923798859119415,
"learning_rate": 0.00019319888145699415,
"loss": 1.3863,
"step": 108
},
{
"epoch": 0.006928608319097374,
"grad_norm": 0.2286650538444519,
"learning_rate": 0.00019306101096817186,
"loss": 1.3434,
"step": 109
},
{
"epoch": 0.006992173533034047,
"grad_norm": 0.22913207113742828,
"learning_rate": 0.00019292180723175654,
"loss": 1.3104,
"step": 110
},
{
"epoch": 0.00705573874697072,
"grad_norm": 0.22382238507270813,
"learning_rate": 0.00019278127224206396,
"loss": 1.3462,
"step": 111
},
{
"epoch": 0.007119303960907393,
"grad_norm": 0.23973888158798218,
"learning_rate": 0.00019263940801248226,
"loss": 1.2197,
"step": 112
},
{
"epoch": 0.007182869174844067,
"grad_norm": 0.22928211092948914,
"learning_rate": 0.0001924962165754431,
"loss": 1.2881,
"step": 113
},
{
"epoch": 0.00724643438878074,
"grad_norm": 0.23706093430519104,
"learning_rate": 0.0001923516999823925,
"loss": 1.3918,
"step": 114
},
{
"epoch": 0.007309999602717413,
"grad_norm": 0.23154719173908234,
"learning_rate": 0.00019220586030376134,
"loss": 1.2693,
"step": 115
},
{
"epoch": 0.007373564816654086,
"grad_norm": 0.22728873789310455,
"learning_rate": 0.00019205869962893605,
"loss": 1.3066,
"step": 116
},
{
"epoch": 0.007437130030590759,
"grad_norm": 0.23983193933963776,
"learning_rate": 0.0001919102200662282,
"loss": 1.2305,
"step": 117
},
{
"epoch": 0.007500695244527432,
"grad_norm": 0.2278611958026886,
"learning_rate": 0.0001917604237428447,
"loss": 1.3355,
"step": 118
},
{
"epoch": 0.007564260458464106,
"grad_norm": 0.22559694945812225,
"learning_rate": 0.00019160931280485702,
"loss": 1.2477,
"step": 119
},
{
"epoch": 0.007627825672400779,
"grad_norm": 0.22317031025886536,
"learning_rate": 0.00019145688941717075,
"loss": 1.2892,
"step": 120
},
{
"epoch": 0.0076913908863374516,
"grad_norm": 0.24438677728176117,
"learning_rate": 0.00019130315576349423,
"loss": 1.3355,
"step": 121
},
{
"epoch": 0.007754956100274125,
"grad_norm": 0.22496148943901062,
"learning_rate": 0.00019114811404630762,
"loss": 1.2373,
"step": 122
},
{
"epoch": 0.007818521314210798,
"grad_norm": 0.23284892737865448,
"learning_rate": 0.0001909917664868311,
"loss": 1.2912,
"step": 123
},
{
"epoch": 0.007882086528147472,
"grad_norm": 0.23059885203838348,
"learning_rate": 0.0001908341153249931,
"loss": 1.2765,
"step": 124
},
{
"epoch": 0.007945651742084144,
"grad_norm": 0.23237423598766327,
"learning_rate": 0.00019067516281939825,
"loss": 1.2671,
"step": 125
},
{
"epoch": 0.008009216956020818,
"grad_norm": 0.24287502467632294,
"learning_rate": 0.00019051491124729512,
"loss": 1.3046,
"step": 126
},
{
"epoch": 0.008072782169957491,
"grad_norm": 0.23740199208259583,
"learning_rate": 0.00019035336290454334,
"loss": 1.3117,
"step": 127
},
{
"epoch": 0.008136347383894163,
"grad_norm": 0.2472347617149353,
"learning_rate": 0.00019019052010558088,
"loss": 1.213,
"step": 128
},
{
"epoch": 0.008199912597830837,
"grad_norm": 0.23237191140651703,
"learning_rate": 0.00019002638518339087,
"loss": 1.3313,
"step": 129
},
{
"epoch": 0.00826347781176751,
"grad_norm": 0.23392349481582642,
"learning_rate": 0.00018986096048946824,
"loss": 1.2891,
"step": 130
},
{
"epoch": 0.008327043025704183,
"grad_norm": 0.23264287412166595,
"learning_rate": 0.00018969424839378584,
"loss": 1.2695,
"step": 131
},
{
"epoch": 0.008390608239640857,
"grad_norm": 0.24009764194488525,
"learning_rate": 0.0001895262512847607,
"loss": 1.3448,
"step": 132
},
{
"epoch": 0.00845417345357753,
"grad_norm": 0.2373800426721573,
"learning_rate": 0.0001893569715692197,
"loss": 1.2892,
"step": 133
},
{
"epoch": 0.008517738667514202,
"grad_norm": 0.24795696139335632,
"learning_rate": 0.00018918641167236505,
"loss": 1.2628,
"step": 134
},
{
"epoch": 0.008581303881450876,
"grad_norm": 0.2304483950138092,
"learning_rate": 0.00018901457403773967,
"loss": 1.3338,
"step": 135
},
{
"epoch": 0.00864486909538755,
"grad_norm": 0.2331048846244812,
"learning_rate": 0.00018884146112719207,
"loss": 1.4057,
"step": 136
},
{
"epoch": 0.008708434309324222,
"grad_norm": 0.23537760972976685,
"learning_rate": 0.00018866707542084118,
"loss": 1.211,
"step": 137
},
{
"epoch": 0.008771999523260895,
"grad_norm": 0.24523571133613586,
"learning_rate": 0.00018849141941704067,
"loss": 1.3498,
"step": 138
},
{
"epoch": 0.00883556473719757,
"grad_norm": 0.23081225156784058,
"learning_rate": 0.0001883144956323433,
"loss": 1.289,
"step": 139
},
{
"epoch": 0.008899129951134241,
"grad_norm": 0.24642105400562286,
"learning_rate": 0.00018813630660146488,
"loss": 1.2903,
"step": 140
},
{
"epoch": 0.008962695165070915,
"grad_norm": 0.24798071384429932,
"learning_rate": 0.00018795685487724782,
"loss": 1.2623,
"step": 141
},
{
"epoch": 0.009026260379007589,
"grad_norm": 0.23816098272800446,
"learning_rate": 0.00018777614303062457,
"loss": 1.247,
"step": 142
},
{
"epoch": 0.00908982559294426,
"grad_norm": 0.25761744379997253,
"learning_rate": 0.000187594173650581,
"loss": 1.2847,
"step": 143
},
{
"epoch": 0.009153390806880934,
"grad_norm": 0.2335919439792633,
"learning_rate": 0.000187410949344119,
"loss": 1.3036,
"step": 144
},
{
"epoch": 0.009216956020817608,
"grad_norm": 0.23194169998168945,
"learning_rate": 0.0001872264727362194,
"loss": 1.1859,
"step": 145
},
{
"epoch": 0.00928052123475428,
"grad_norm": 0.23245130479335785,
"learning_rate": 0.00018704074646980415,
"loss": 1.271,
"step": 146
},
{
"epoch": 0.009344086448690954,
"grad_norm": 0.2260020226240158,
"learning_rate": 0.0001868537732056987,
"loss": 1.3987,
"step": 147
},
{
"epoch": 0.009407651662627628,
"grad_norm": 0.2418159693479538,
"learning_rate": 0.00018666555562259356,
"loss": 1.3091,
"step": 148
},
{
"epoch": 0.0094712168765643,
"grad_norm": 0.24677526950836182,
"learning_rate": 0.0001864760964170062,
"loss": 1.296,
"step": 149
},
{
"epoch": 0.009534782090500973,
"grad_norm": 0.25532832741737366,
"learning_rate": 0.00018628539830324229,
"loss": 1.4065,
"step": 150
},
{
"epoch": 0.009598347304437647,
"grad_norm": 0.2427254617214203,
"learning_rate": 0.00018609346401335684,
"loss": 1.3204,
"step": 151
},
{
"epoch": 0.009661912518374319,
"grad_norm": 0.24365244805812836,
"learning_rate": 0.00018590029629711506,
"loss": 1.3882,
"step": 152
},
{
"epoch": 0.009725477732310993,
"grad_norm": 0.22638386487960815,
"learning_rate": 0.00018570589792195298,
"loss": 1.2492,
"step": 153
},
{
"epoch": 0.009789042946247667,
"grad_norm": 0.2262774556875229,
"learning_rate": 0.00018551027167293768,
"loss": 1.2209,
"step": 154
},
{
"epoch": 0.009852608160184339,
"grad_norm": 0.21981820464134216,
"learning_rate": 0.00018531342035272766,
"loss": 1.1872,
"step": 155
},
{
"epoch": 0.009916173374121012,
"grad_norm": 0.23878952860832214,
"learning_rate": 0.00018511534678153244,
"loss": 1.3344,
"step": 156
},
{
"epoch": 0.009979738588057686,
"grad_norm": 0.2407320737838745,
"learning_rate": 0.0001849160537970722,
"loss": 1.259,
"step": 157
},
{
"epoch": 0.010043303801994358,
"grad_norm": 0.23277470469474792,
"learning_rate": 0.0001847155442545372,
"loss": 1.2743,
"step": 158
},
{
"epoch": 0.010106869015931032,
"grad_norm": 0.23986446857452393,
"learning_rate": 0.00018451382102654683,
"loss": 1.2853,
"step": 159
},
{
"epoch": 0.010170434229867705,
"grad_norm": 0.23248420655727386,
"learning_rate": 0.00018431088700310844,
"loss": 1.2478,
"step": 160
},
{
"epoch": 0.010233999443804377,
"grad_norm": 0.2354133576154709,
"learning_rate": 0.00018410674509157607,
"loss": 1.2036,
"step": 161
},
{
"epoch": 0.010297564657741051,
"grad_norm": 0.2392132580280304,
"learning_rate": 0.00018390139821660855,
"loss": 1.2546,
"step": 162
},
{
"epoch": 0.010361129871677725,
"grad_norm": 0.2282291203737259,
"learning_rate": 0.00018369484932012777,
"loss": 1.2038,
"step": 163
},
{
"epoch": 0.010424695085614397,
"grad_norm": 0.24312043190002441,
"learning_rate": 0.00018348710136127655,
"loss": 1.3012,
"step": 164
},
{
"epoch": 0.01048826029955107,
"grad_norm": 0.2509647309780121,
"learning_rate": 0.00018327815731637612,
"loss": 1.2312,
"step": 165
},
{
"epoch": 0.010551825513487744,
"grad_norm": 0.2409852147102356,
"learning_rate": 0.0001830680201788836,
"loss": 1.2188,
"step": 166
},
{
"epoch": 0.010615390727424416,
"grad_norm": 0.23334528505802155,
"learning_rate": 0.0001828566929593491,
"loss": 1.3472,
"step": 167
},
{
"epoch": 0.01067895594136109,
"grad_norm": 0.23858241736888885,
"learning_rate": 0.00018264417868537244,
"loss": 1.3308,
"step": 168
},
{
"epoch": 0.010742521155297764,
"grad_norm": 0.22705812752246857,
"learning_rate": 0.00018243048040156003,
"loss": 1.2548,
"step": 169
},
{
"epoch": 0.010806086369234436,
"grad_norm": 0.24830320477485657,
"learning_rate": 0.00018221560116948103,
"loss": 1.308,
"step": 170
},
{
"epoch": 0.01086965158317111,
"grad_norm": 0.23871473968029022,
"learning_rate": 0.00018199954406762352,
"loss": 1.2678,
"step": 171
},
{
"epoch": 0.010933216797107783,
"grad_norm": 0.23958680033683777,
"learning_rate": 0.0001817823121913506,
"loss": 1.2768,
"step": 172
},
{
"epoch": 0.010996782011044455,
"grad_norm": 0.2274036854505539,
"learning_rate": 0.00018156390865285574,
"loss": 1.239,
"step": 173
},
{
"epoch": 0.011060347224981129,
"grad_norm": 0.24076679348945618,
"learning_rate": 0.00018134433658111845,
"loss": 1.3382,
"step": 174
},
{
"epoch": 0.011123912438917803,
"grad_norm": 0.23195293545722961,
"learning_rate": 0.00018112359912185924,
"loss": 1.2813,
"step": 175
},
{
"epoch": 0.011187477652854475,
"grad_norm": 0.24076253175735474,
"learning_rate": 0.00018090169943749476,
"loss": 1.2844,
"step": 176
},
{
"epoch": 0.011251042866791149,
"grad_norm": 0.228972926735878,
"learning_rate": 0.00018067864070709231,
"loss": 1.3109,
"step": 177
},
{
"epoch": 0.011314608080727822,
"grad_norm": 0.23846612870693207,
"learning_rate": 0.00018045442612632444,
"loss": 1.2099,
"step": 178
},
{
"epoch": 0.011378173294664494,
"grad_norm": 0.22861884534358978,
"learning_rate": 0.00018022905890742306,
"loss": 1.3219,
"step": 179
},
{
"epoch": 0.011441738508601168,
"grad_norm": 0.23232094943523407,
"learning_rate": 0.00018000254227913348,
"loss": 1.1867,
"step": 180
},
{
"epoch": 0.011505303722537842,
"grad_norm": 0.24195295572280884,
"learning_rate": 0.0001797748794866681,
"loss": 1.3789,
"step": 181
},
{
"epoch": 0.011568868936474514,
"grad_norm": 0.23893454670906067,
"learning_rate": 0.00017954607379166,
"loss": 1.2436,
"step": 182
},
{
"epoch": 0.011632434150411188,
"grad_norm": 0.23289944231510162,
"learning_rate": 0.00017931612847211614,
"loss": 1.2716,
"step": 183
},
{
"epoch": 0.011695999364347861,
"grad_norm": 0.23664525151252747,
"learning_rate": 0.00017908504682237047,
"loss": 1.2751,
"step": 184
},
{
"epoch": 0.011759564578284533,
"grad_norm": 0.22646844387054443,
"learning_rate": 0.0001788528321530366,
"loss": 1.2692,
"step": 185
},
{
"epoch": 0.011823129792221207,
"grad_norm": 0.23897014558315277,
"learning_rate": 0.00017861948779096046,
"loss": 1.2104,
"step": 186
},
{
"epoch": 0.01188669500615788,
"grad_norm": 0.2374998778104782,
"learning_rate": 0.00017838501707917277,
"loss": 1.3326,
"step": 187
},
{
"epoch": 0.011950260220094553,
"grad_norm": 0.23287327587604523,
"learning_rate": 0.0001781494233768408,
"loss": 1.2739,
"step": 188
},
{
"epoch": 0.012013825434031226,
"grad_norm": 0.25094935297966003,
"learning_rate": 0.00017791271005922066,
"loss": 1.3138,
"step": 189
},
{
"epoch": 0.0120773906479679,
"grad_norm": 0.24589940905570984,
"learning_rate": 0.00017767488051760857,
"loss": 1.2747,
"step": 190
},
{
"epoch": 0.012140955861904572,
"grad_norm": 0.22110210359096527,
"learning_rate": 0.0001774359381592925,
"loss": 1.2751,
"step": 191
},
{
"epoch": 0.012204521075841246,
"grad_norm": 0.25095194578170776,
"learning_rate": 0.00017719588640750336,
"loss": 1.25,
"step": 192
},
{
"epoch": 0.01226808628977792,
"grad_norm": 0.2439023107290268,
"learning_rate": 0.00017695472870136577,
"loss": 1.1904,
"step": 193
},
{
"epoch": 0.012331651503714592,
"grad_norm": 0.2421332150697708,
"learning_rate": 0.00017671246849584903,
"loss": 1.2793,
"step": 194
},
{
"epoch": 0.012395216717651265,
"grad_norm": 0.23399034142494202,
"learning_rate": 0.00017646910926171747,
"loss": 1.2824,
"step": 195
},
{
"epoch": 0.012458781931587939,
"grad_norm": 0.22972531616687775,
"learning_rate": 0.0001762246544854807,
"loss": 1.1848,
"step": 196
},
{
"epoch": 0.012522347145524611,
"grad_norm": 0.22563257813453674,
"learning_rate": 0.00017597910766934378,
"loss": 1.2338,
"step": 197
},
{
"epoch": 0.012585912359461285,
"grad_norm": 0.24203820526599884,
"learning_rate": 0.00017573247233115694,
"loss": 1.2555,
"step": 198
},
{
"epoch": 0.012649477573397959,
"grad_norm": 0.2555934488773346,
"learning_rate": 0.00017548475200436533,
"loss": 1.3626,
"step": 199
},
{
"epoch": 0.01271304278733463,
"grad_norm": 0.2504107356071472,
"learning_rate": 0.00017523595023795813,
"loss": 1.3926,
"step": 200
},
{
"epoch": 0.01271304278733463,
"eval_loss": 1.2624019384384155,
"eval_runtime": 1240.3941,
"eval_samples_per_second": 4.031,
"eval_steps_per_second": 1.008,
"step": 200
},
{
"epoch": 0.012776608001271304,
"grad_norm": 0.24385319650173187,
"learning_rate": 0.00017498607059641806,
"loss": 1.3571,
"step": 201
},
{
"epoch": 0.012840173215207978,
"grad_norm": 0.22875139117240906,
"learning_rate": 0.00017473511665966993,
"loss": 1.1978,
"step": 202
},
{
"epoch": 0.01290373842914465,
"grad_norm": 0.23931051790714264,
"learning_rate": 0.00017448309202302968,
"loss": 1.2109,
"step": 203
},
{
"epoch": 0.012967303643081324,
"grad_norm": 0.24745705723762512,
"learning_rate": 0.00017423000029715267,
"loss": 1.2498,
"step": 204
},
{
"epoch": 0.013030868857017998,
"grad_norm": 0.2564117908477783,
"learning_rate": 0.0001739758451079821,
"loss": 1.3109,
"step": 205
},
{
"epoch": 0.01309443407095467,
"grad_norm": 0.23329947888851166,
"learning_rate": 0.00017372063009669686,
"loss": 1.2075,
"step": 206
},
{
"epoch": 0.013157999284891343,
"grad_norm": 0.25186190009117126,
"learning_rate": 0.00017346435891965956,
"loss": 1.3316,
"step": 207
},
{
"epoch": 0.013221564498828017,
"grad_norm": 0.24167706072330475,
"learning_rate": 0.00017320703524836405,
"loss": 1.2435,
"step": 208
},
{
"epoch": 0.013285129712764689,
"grad_norm": 0.24145130813121796,
"learning_rate": 0.00017294866276938288,
"loss": 1.2089,
"step": 209
},
{
"epoch": 0.013348694926701363,
"grad_norm": 0.23003919422626495,
"learning_rate": 0.00017268924518431438,
"loss": 1.2861,
"step": 210
},
{
"epoch": 0.013412260140638036,
"grad_norm": 0.2578867971897125,
"learning_rate": 0.00017242878620972972,
"loss": 1.2853,
"step": 211
},
{
"epoch": 0.013475825354574708,
"grad_norm": 0.2372393161058426,
"learning_rate": 0.00017216728957711967,
"loss": 1.2119,
"step": 212
},
{
"epoch": 0.013539390568511382,
"grad_norm": 0.2317056953907013,
"learning_rate": 0.0001719047590328411,
"loss": 1.2229,
"step": 213
},
{
"epoch": 0.013602955782448056,
"grad_norm": 0.24721965193748474,
"learning_rate": 0.0001716411983380632,
"loss": 1.3942,
"step": 214
},
{
"epoch": 0.013666520996384728,
"grad_norm": 0.2334171086549759,
"learning_rate": 0.0001713766112687139,
"loss": 1.2808,
"step": 215
},
{
"epoch": 0.013730086210321402,
"grad_norm": 0.23908108472824097,
"learning_rate": 0.00017111100161542545,
"loss": 1.2997,
"step": 216
},
{
"epoch": 0.013793651424258075,
"grad_norm": 0.23297281563282013,
"learning_rate": 0.00017084437318348034,
"loss": 1.2322,
"step": 217
},
{
"epoch": 0.013857216638194747,
"grad_norm": 0.23927763104438782,
"learning_rate": 0.00017057672979275656,
"loss": 1.2686,
"step": 218
},
{
"epoch": 0.013920781852131421,
"grad_norm": 0.24085508286952972,
"learning_rate": 0.00017030807527767318,
"loss": 1.279,
"step": 219
},
{
"epoch": 0.013984347066068095,
"grad_norm": 0.24235960841178894,
"learning_rate": 0.0001700384134871351,
"loss": 1.2663,
"step": 220
},
{
"epoch": 0.014047912280004767,
"grad_norm": 0.23297156393527985,
"learning_rate": 0.00016976774828447808,
"loss": 1.3186,
"step": 221
},
{
"epoch": 0.01411147749394144,
"grad_norm": 0.23962871730327606,
"learning_rate": 0.0001694960835474134,
"loss": 1.3278,
"step": 222
},
{
"epoch": 0.014175042707878114,
"grad_norm": 0.23609745502471924,
"learning_rate": 0.00016922342316797224,
"loss": 1.2402,
"step": 223
},
{
"epoch": 0.014238607921814786,
"grad_norm": 0.23808908462524414,
"learning_rate": 0.00016894977105244997,
"loss": 1.179,
"step": 224
},
{
"epoch": 0.01430217313575146,
"grad_norm": 0.253446102142334,
"learning_rate": 0.00016867513112135013,
"loss": 1.1718,
"step": 225
},
{
"epoch": 0.014365738349688134,
"grad_norm": 0.2394704669713974,
"learning_rate": 0.0001683995073093283,
"loss": 1.3013,
"step": 226
},
{
"epoch": 0.014429303563624806,
"grad_norm": 0.2568953335285187,
"learning_rate": 0.00016812290356513576,
"loss": 1.3447,
"step": 227
},
{
"epoch": 0.01449286877756148,
"grad_norm": 0.24280501902103424,
"learning_rate": 0.00016784532385156285,
"loss": 1.1629,
"step": 228
},
{
"epoch": 0.014556433991498153,
"grad_norm": 0.24458537995815277,
"learning_rate": 0.0001675667721453822,
"loss": 1.3059,
"step": 229
},
{
"epoch": 0.014619999205434825,
"grad_norm": 0.2558242082595825,
"learning_rate": 0.0001672872524372919,
"loss": 1.2409,
"step": 230
},
{
"epoch": 0.014683564419371499,
"grad_norm": 0.2413797229528427,
"learning_rate": 0.0001670067687318581,
"loss": 1.1722,
"step": 231
},
{
"epoch": 0.014747129633308173,
"grad_norm": 0.23881018161773682,
"learning_rate": 0.00016672532504745778,
"loss": 1.2605,
"step": 232
},
{
"epoch": 0.014810694847244845,
"grad_norm": 0.2394268959760666,
"learning_rate": 0.00016644292541622118,
"loss": 1.3001,
"step": 233
},
{
"epoch": 0.014874260061181518,
"grad_norm": 0.2380480170249939,
"learning_rate": 0.00016615957388397399,
"loss": 1.2341,
"step": 234
},
{
"epoch": 0.014937825275118192,
"grad_norm": 0.25071921944618225,
"learning_rate": 0.00016587527451017938,
"loss": 1.2613,
"step": 235
},
{
"epoch": 0.015001390489054864,
"grad_norm": 0.24680428206920624,
"learning_rate": 0.00016559003136787988,
"loss": 1.1784,
"step": 236
},
{
"epoch": 0.015064955702991538,
"grad_norm": 0.23283323645591736,
"learning_rate": 0.00016530384854363908,
"loss": 1.2108,
"step": 237
},
{
"epoch": 0.015128520916928212,
"grad_norm": 0.25436681509017944,
"learning_rate": 0.00016501673013748284,
"loss": 1.2712,
"step": 238
},
{
"epoch": 0.015192086130864884,
"grad_norm": 0.24940147995948792,
"learning_rate": 0.0001647286802628409,
"loss": 1.3073,
"step": 239
},
{
"epoch": 0.015255651344801557,
"grad_norm": 0.24033071100711823,
"learning_rate": 0.0001644397030464877,
"loss": 1.2888,
"step": 240
},
{
"epoch": 0.015319216558738231,
"grad_norm": 0.2522200047969818,
"learning_rate": 0.00016414980262848333,
"loss": 1.3287,
"step": 241
},
{
"epoch": 0.015382781772674903,
"grad_norm": 0.24863417446613312,
"learning_rate": 0.00016385898316211426,
"loss": 1.3074,
"step": 242
},
{
"epoch": 0.015446346986611577,
"grad_norm": 0.2419770359992981,
"learning_rate": 0.0001635672488138337,
"loss": 1.22,
"step": 243
},
{
"epoch": 0.01550991220054825,
"grad_norm": 0.24712932109832764,
"learning_rate": 0.0001632746037632021,
"loss": 1.1722,
"step": 244
},
{
"epoch": 0.015573477414484923,
"grad_norm": 0.2783099412918091,
"learning_rate": 0.00016298105220282713,
"loss": 1.2831,
"step": 245
},
{
"epoch": 0.015637042628421596,
"grad_norm": 0.24741223454475403,
"learning_rate": 0.00016268659833830367,
"loss": 1.2515,
"step": 246
},
{
"epoch": 0.01570060784235827,
"grad_norm": 0.23735453188419342,
"learning_rate": 0.00016239124638815357,
"loss": 1.3524,
"step": 247
},
{
"epoch": 0.015764173056294944,
"grad_norm": 0.24380360543727875,
"learning_rate": 0.00016209500058376515,
"loss": 1.3111,
"step": 248
},
{
"epoch": 0.015827738270231616,
"grad_norm": 0.2466178685426712,
"learning_rate": 0.00016179786516933264,
"loss": 1.212,
"step": 249
},
{
"epoch": 0.015891303484168288,
"grad_norm": 0.2434193640947342,
"learning_rate": 0.00016149984440179537,
"loss": 1.271,
"step": 250
},
{
"epoch": 0.015954868698104963,
"grad_norm": 0.23572176694869995,
"learning_rate": 0.0001612009425507767,
"loss": 1.2519,
"step": 251
},
{
"epoch": 0.016018433912041635,
"grad_norm": 0.25842970609664917,
"learning_rate": 0.00016090116389852306,
"loss": 1.2756,
"step": 252
},
{
"epoch": 0.016081999125978307,
"grad_norm": 0.24664999544620514,
"learning_rate": 0.00016060051273984226,
"loss": 1.3055,
"step": 253
},
{
"epoch": 0.016145564339914983,
"grad_norm": 0.2308342307806015,
"learning_rate": 0.00016029899338204233,
"loss": 1.2021,
"step": 254
},
{
"epoch": 0.016209129553851655,
"grad_norm": 0.2438189834356308,
"learning_rate": 0.00015999661014486956,
"loss": 1.2889,
"step": 255
},
{
"epoch": 0.016272694767788327,
"grad_norm": 0.25176259875297546,
"learning_rate": 0.0001596933673604467,
"loss": 1.2601,
"step": 256
},
{
"epoch": 0.016336259981725002,
"grad_norm": 0.2458602339029312,
"learning_rate": 0.0001593892693732109,
"loss": 1.2662,
"step": 257
},
{
"epoch": 0.016399825195661674,
"grad_norm": 0.2469761073589325,
"learning_rate": 0.00015908432053985143,
"loss": 1.2756,
"step": 258
},
{
"epoch": 0.016463390409598346,
"grad_norm": 0.24446940422058105,
"learning_rate": 0.00015877852522924732,
"loss": 1.2768,
"step": 259
},
{
"epoch": 0.01652695562353502,
"grad_norm": 0.23439179360866547,
"learning_rate": 0.0001584718878224047,
"loss": 1.2613,
"step": 260
},
{
"epoch": 0.016590520837471694,
"grad_norm": 0.2435050904750824,
"learning_rate": 0.00015816441271239415,
"loss": 1.1588,
"step": 261
},
{
"epoch": 0.016654086051408366,
"grad_norm": 0.24683699011802673,
"learning_rate": 0.00015785610430428762,
"loss": 1.2775,
"step": 262
},
{
"epoch": 0.01671765126534504,
"grad_norm": 0.24520638585090637,
"learning_rate": 0.00015754696701509533,
"loss": 1.2367,
"step": 263
},
{
"epoch": 0.016781216479281713,
"grad_norm": 0.24515977501869202,
"learning_rate": 0.00015723700527370268,
"loss": 1.3044,
"step": 264
},
{
"epoch": 0.016844781693218385,
"grad_norm": 0.23757795989513397,
"learning_rate": 0.00015692622352080662,
"loss": 1.2438,
"step": 265
},
{
"epoch": 0.01690834690715506,
"grad_norm": 0.23741991817951202,
"learning_rate": 0.00015661462620885199,
"loss": 1.2141,
"step": 266
},
{
"epoch": 0.016971912121091733,
"grad_norm": 0.2564014494419098,
"learning_rate": 0.00015630221780196793,
"loss": 1.2862,
"step": 267
},
{
"epoch": 0.017035477335028405,
"grad_norm": 0.24640651047229767,
"learning_rate": 0.0001559890027759037,
"loss": 1.3254,
"step": 268
},
{
"epoch": 0.01709904254896508,
"grad_norm": 0.24048472940921783,
"learning_rate": 0.0001556749856179648,
"loss": 1.2562,
"step": 269
},
{
"epoch": 0.017162607762901752,
"grad_norm": 0.24159260094165802,
"learning_rate": 0.00015536017082694846,
"loss": 1.1149,
"step": 270
},
{
"epoch": 0.017226172976838424,
"grad_norm": 0.2385244071483612,
"learning_rate": 0.00015504456291307928,
"loss": 1.1802,
"step": 271
},
{
"epoch": 0.0172897381907751,
"grad_norm": 0.2397131323814392,
"learning_rate": 0.0001547281663979446,
"loss": 1.1722,
"step": 272
},
{
"epoch": 0.01735330340471177,
"grad_norm": 0.24814891815185547,
"learning_rate": 0.0001544109858144298,
"loss": 1.2578,
"step": 273
},
{
"epoch": 0.017416868618648444,
"grad_norm": 0.2575724124908447,
"learning_rate": 0.00015409302570665325,
"loss": 1.2823,
"step": 274
},
{
"epoch": 0.01748043383258512,
"grad_norm": 0.2540719211101532,
"learning_rate": 0.00015377429062990122,
"loss": 1.2979,
"step": 275
},
{
"epoch": 0.01754399904652179,
"grad_norm": 0.24557684361934662,
"learning_rate": 0.00015345478515056267,
"loss": 1.3124,
"step": 276
},
{
"epoch": 0.017607564260458463,
"grad_norm": 0.24520032107830048,
"learning_rate": 0.0001531345138460639,
"loss": 1.2435,
"step": 277
},
{
"epoch": 0.01767112947439514,
"grad_norm": 0.24698208272457123,
"learning_rate": 0.00015281348130480272,
"loss": 1.1623,
"step": 278
},
{
"epoch": 0.01773469468833181,
"grad_norm": 0.2547725439071655,
"learning_rate": 0.00015249169212608294,
"loss": 1.2966,
"step": 279
},
{
"epoch": 0.017798259902268482,
"grad_norm": 0.24341244995594025,
"learning_rate": 0.00015216915092004847,
"loss": 1.3736,
"step": 280
},
{
"epoch": 0.017861825116205158,
"grad_norm": 0.2434585839509964,
"learning_rate": 0.0001518458623076171,
"loss": 1.285,
"step": 281
},
{
"epoch": 0.01792539033014183,
"grad_norm": 0.2521164119243622,
"learning_rate": 0.0001515218309204145,
"loss": 1.2666,
"step": 282
},
{
"epoch": 0.017988955544078502,
"grad_norm": 0.23752371966838837,
"learning_rate": 0.00015119706140070778,
"loss": 1.1804,
"step": 283
},
{
"epoch": 0.018052520758015177,
"grad_norm": 0.2338380366563797,
"learning_rate": 0.00015087155840133888,
"loss": 1.2849,
"step": 284
},
{
"epoch": 0.01811608597195185,
"grad_norm": 0.24568547308444977,
"learning_rate": 0.0001505453265856581,
"loss": 1.3278,
"step": 285
},
{
"epoch": 0.01817965118588852,
"grad_norm": 0.24116171896457672,
"learning_rate": 0.00015021837062745714,
"loss": 1.1844,
"step": 286
},
{
"epoch": 0.018243216399825197,
"grad_norm": 0.23728054761886597,
"learning_rate": 0.00014989069521090226,
"loss": 1.1546,
"step": 287
},
{
"epoch": 0.01830678161376187,
"grad_norm": 0.24119961261749268,
"learning_rate": 0.00014956230503046703,
"loss": 1.1987,
"step": 288
},
{
"epoch": 0.01837034682769854,
"grad_norm": 0.24379144608974457,
"learning_rate": 0.00014923320479086523,
"loss": 1.2073,
"step": 289
},
{
"epoch": 0.018433912041635216,
"grad_norm": 0.2407054901123047,
"learning_rate": 0.00014890339920698334,
"loss": 1.2317,
"step": 290
},
{
"epoch": 0.01849747725557189,
"grad_norm": 0.22647134959697723,
"learning_rate": 0.000148572893003813,
"loss": 1.1869,
"step": 291
},
{
"epoch": 0.01856104246950856,
"grad_norm": 0.24462617933750153,
"learning_rate": 0.00014824169091638337,
"loss": 1.1895,
"step": 292
},
{
"epoch": 0.018624607683445236,
"grad_norm": 0.24606405198574066,
"learning_rate": 0.0001479097976896933,
"loss": 1.3085,
"step": 293
},
{
"epoch": 0.018688172897381908,
"grad_norm": 0.24407516419887543,
"learning_rate": 0.00014757721807864317,
"loss": 1.2148,
"step": 294
},
{
"epoch": 0.01875173811131858,
"grad_norm": 0.24692152440547943,
"learning_rate": 0.0001472439568479671,
"loss": 1.2923,
"step": 295
},
{
"epoch": 0.018815303325255255,
"grad_norm": 0.2552792429924011,
"learning_rate": 0.0001469100187721644,
"loss": 1.3185,
"step": 296
},
{
"epoch": 0.018878868539191927,
"grad_norm": 0.24867340922355652,
"learning_rate": 0.0001465754086354312,
"loss": 1.2277,
"step": 297
},
{
"epoch": 0.0189424337531286,
"grad_norm": 0.2612234354019165,
"learning_rate": 0.0001462401312315922,
"loss": 1.1123,
"step": 298
},
{
"epoch": 0.019005998967065275,
"grad_norm": 0.24172838032245636,
"learning_rate": 0.00014590419136403148,
"loss": 1.384,
"step": 299
},
{
"epoch": 0.019069564181001947,
"grad_norm": 0.25088098645210266,
"learning_rate": 0.00014556759384562416,
"loss": 1.2406,
"step": 300
},
{
"epoch": 0.019069564181001947,
"eval_loss": 1.2426228523254395,
"eval_runtime": 1240.5903,
"eval_samples_per_second": 4.03,
"eval_steps_per_second": 1.008,
"step": 300
},
{
"epoch": 0.01913312939493862,
"grad_norm": 0.24052245914936066,
"learning_rate": 0.00014523034349866724,
"loss": 1.1181,
"step": 301
},
{
"epoch": 0.019196694608875294,
"grad_norm": 0.24944396317005157,
"learning_rate": 0.00014489244515481046,
"loss": 1.2483,
"step": 302
},
{
"epoch": 0.019260259822811966,
"grad_norm": 0.24091075360774994,
"learning_rate": 0.00014455390365498722,
"loss": 1.3159,
"step": 303
},
{
"epoch": 0.019323825036748638,
"grad_norm": 0.23631052672863007,
"learning_rate": 0.0001442147238493451,
"loss": 1.2644,
"step": 304
},
{
"epoch": 0.019387390250685314,
"grad_norm": 0.25283417105674744,
"learning_rate": 0.00014387491059717652,
"loss": 1.1845,
"step": 305
},
{
"epoch": 0.019450955464621986,
"grad_norm": 0.24758540093898773,
"learning_rate": 0.00014353446876684892,
"loss": 1.1757,
"step": 306
},
{
"epoch": 0.019514520678558658,
"grad_norm": 0.2573185861110687,
"learning_rate": 0.00014319340323573518,
"loss": 1.2536,
"step": 307
},
{
"epoch": 0.019578085892495333,
"grad_norm": 0.25676408410072327,
"learning_rate": 0.0001428517188901437,
"loss": 1.2669,
"step": 308
},
{
"epoch": 0.019641651106432005,
"grad_norm": 0.2364342361688614,
"learning_rate": 0.00014250942062524834,
"loss": 1.2699,
"step": 309
},
{
"epoch": 0.019705216320368677,
"grad_norm": 0.24659450352191925,
"learning_rate": 0.0001421665133450184,
"loss": 1.2379,
"step": 310
},
{
"epoch": 0.019768781534305353,
"grad_norm": 0.24497289955615997,
"learning_rate": 0.00014182300196214829,
"loss": 1.2435,
"step": 311
},
{
"epoch": 0.019832346748242025,
"grad_norm": 0.2553839683532715,
"learning_rate": 0.00014147889139798708,
"loss": 1.264,
"step": 312
},
{
"epoch": 0.019895911962178697,
"grad_norm": 0.2326495200395584,
"learning_rate": 0.00014113418658246815,
"loss": 1.2118,
"step": 313
},
{
"epoch": 0.019959477176115372,
"grad_norm": 0.2362123280763626,
"learning_rate": 0.00014078889245403844,
"loss": 1.2604,
"step": 314
},
{
"epoch": 0.020023042390052044,
"grad_norm": 0.24172857403755188,
"learning_rate": 0.0001404430139595877,
"loss": 1.2177,
"step": 315
},
{
"epoch": 0.020086607603988716,
"grad_norm": 0.2595900595188141,
"learning_rate": 0.0001400965560543778,
"loss": 1.1845,
"step": 316
},
{
"epoch": 0.02015017281792539,
"grad_norm": 0.2511995732784271,
"learning_rate": 0.0001397495237019714,
"loss": 1.2778,
"step": 317
},
{
"epoch": 0.020213738031862064,
"grad_norm": 0.256143182516098,
"learning_rate": 0.0001394019218741612,
"loss": 1.1939,
"step": 318
},
{
"epoch": 0.020277303245798736,
"grad_norm": 0.2604413330554962,
"learning_rate": 0.00013905375555089844,
"loss": 1.2582,
"step": 319
},
{
"epoch": 0.02034086845973541,
"grad_norm": 0.24835054576396942,
"learning_rate": 0.00013870502972022173,
"loss": 1.2737,
"step": 320
},
{
"epoch": 0.020404433673672083,
"grad_norm": 0.24885138869285583,
"learning_rate": 0.00013835574937818543,
"loss": 1.1771,
"step": 321
},
{
"epoch": 0.020467998887608755,
"grad_norm": 0.24037449061870575,
"learning_rate": 0.00013800591952878825,
"loss": 1.11,
"step": 322
},
{
"epoch": 0.02053156410154543,
"grad_norm": 0.24274124205112457,
"learning_rate": 0.00013765554518390142,
"loss": 1.2627,
"step": 323
},
{
"epoch": 0.020595129315482102,
"grad_norm": 0.23798492550849915,
"learning_rate": 0.00013730463136319692,
"loss": 1.2339,
"step": 324
},
{
"epoch": 0.020658694529418774,
"grad_norm": 0.24593910574913025,
"learning_rate": 0.0001369531830940757,
"loss": 1.2493,
"step": 325
},
{
"epoch": 0.02072225974335545,
"grad_norm": 0.25841817259788513,
"learning_rate": 0.00013660120541159537,
"loss": 1.234,
"step": 326
},
{
"epoch": 0.020785824957292122,
"grad_norm": 0.24712218344211578,
"learning_rate": 0.00013624870335839835,
"loss": 1.3844,
"step": 327
},
{
"epoch": 0.020849390171228794,
"grad_norm": 0.25257477164268494,
"learning_rate": 0.00013589568198463944,
"loss": 1.2407,
"step": 328
},
{
"epoch": 0.02091295538516547,
"grad_norm": 0.24789343774318695,
"learning_rate": 0.00013554214634791358,
"loss": 1.2008,
"step": 329
},
{
"epoch": 0.02097652059910214,
"grad_norm": 0.2543022930622101,
"learning_rate": 0.0001351881015131833,
"loss": 1.2352,
"step": 330
},
{
"epoch": 0.021040085813038813,
"grad_norm": 0.2460203766822815,
"learning_rate": 0.00013483355255270634,
"loss": 1.2062,
"step": 331
},
{
"epoch": 0.02110365102697549,
"grad_norm": 0.25283366441726685,
"learning_rate": 0.00013447850454596265,
"loss": 1.2771,
"step": 332
},
{
"epoch": 0.02116721624091216,
"grad_norm": 0.24133743345737457,
"learning_rate": 0.0001341229625795819,
"loss": 1.1678,
"step": 333
},
{
"epoch": 0.021230781454848833,
"grad_norm": 0.2596588432788849,
"learning_rate": 0.00013376693174727065,
"loss": 1.293,
"step": 334
},
{
"epoch": 0.02129434666878551,
"grad_norm": 0.23952282965183258,
"learning_rate": 0.000133410417149739,
"loss": 1.235,
"step": 335
},
{
"epoch": 0.02135791188272218,
"grad_norm": 0.24762655794620514,
"learning_rate": 0.00013305342389462792,
"loss": 1.2907,
"step": 336
},
{
"epoch": 0.021421477096658852,
"grad_norm": 0.2540108859539032,
"learning_rate": 0.0001326959570964359,
"loss": 1.2545,
"step": 337
},
{
"epoch": 0.021485042310595528,
"grad_norm": 0.24914731085300446,
"learning_rate": 0.00013233802187644566,
"loss": 1.2428,
"step": 338
},
{
"epoch": 0.0215486075245322,
"grad_norm": 0.24028116464614868,
"learning_rate": 0.00013197962336265078,
"loss": 1.2802,
"step": 339
},
{
"epoch": 0.021612172738468872,
"grad_norm": 0.2514800727367401,
"learning_rate": 0.0001316207666896824,
"loss": 1.2193,
"step": 340
},
{
"epoch": 0.021675737952405547,
"grad_norm": 0.24528062343597412,
"learning_rate": 0.00013126145699873532,
"loss": 1.3053,
"step": 341
},
{
"epoch": 0.02173930316634222,
"grad_norm": 0.2537722587585449,
"learning_rate": 0.00013090169943749476,
"loss": 1.0966,
"step": 342
},
{
"epoch": 0.02180286838027889,
"grad_norm": 0.25322455167770386,
"learning_rate": 0.00013054149916006218,
"loss": 1.1693,
"step": 343
},
{
"epoch": 0.021866433594215567,
"grad_norm": 0.2454894334077835,
"learning_rate": 0.00013018086132688184,
"loss": 1.1239,
"step": 344
},
{
"epoch": 0.02192999880815224,
"grad_norm": 0.25263822078704834,
"learning_rate": 0.00012981979110466654,
"loss": 1.3322,
"step": 345
},
{
"epoch": 0.02199356402208891,
"grad_norm": 0.2569487988948822,
"learning_rate": 0.0001294582936663239,
"loss": 1.2597,
"step": 346
},
{
"epoch": 0.022057129236025586,
"grad_norm": 0.26036956906318665,
"learning_rate": 0.00012909637419088193,
"loss": 1.2215,
"step": 347
},
{
"epoch": 0.022120694449962258,
"grad_norm": 0.24927780032157898,
"learning_rate": 0.00012873403786341513,
"loss": 1.2706,
"step": 348
},
{
"epoch": 0.02218425966389893,
"grad_norm": 0.2510969340801239,
"learning_rate": 0.00012837128987497,
"loss": 1.2803,
"step": 349
},
{
"epoch": 0.022247824877835606,
"grad_norm": 0.25289925932884216,
"learning_rate": 0.00012800813542249072,
"loss": 1.279,
"step": 350
},
{
"epoch": 0.022311390091772278,
"grad_norm": 0.27076616883277893,
"learning_rate": 0.00012764457970874474,
"loss": 1.2417,
"step": 351
},
{
"epoch": 0.02237495530570895,
"grad_norm": 0.25417760014533997,
"learning_rate": 0.00012728062794224832,
"loss": 1.2225,
"step": 352
},
{
"epoch": 0.022438520519645625,
"grad_norm": 0.2544157803058624,
"learning_rate": 0.00012691628533719162,
"loss": 1.2485,
"step": 353
},
{
"epoch": 0.022502085733582297,
"grad_norm": 0.24504055082798004,
"learning_rate": 0.0001265515571133643,
"loss": 1.213,
"step": 354
},
{
"epoch": 0.02256565094751897,
"grad_norm": 0.24301055073738098,
"learning_rate": 0.0001261864484960807,
"loss": 1.3125,
"step": 355
},
{
"epoch": 0.022629216161455645,
"grad_norm": 0.25097525119781494,
"learning_rate": 0.00012582096471610467,
"loss": 1.1494,
"step": 356
},
{
"epoch": 0.022692781375392317,
"grad_norm": 0.24074026942253113,
"learning_rate": 0.00012545511100957513,
"loss": 1.2536,
"step": 357
},
{
"epoch": 0.02275634658932899,
"grad_norm": 0.26268255710601807,
"learning_rate": 0.00012508889261793059,
"loss": 1.276,
"step": 358
},
{
"epoch": 0.022819911803265664,
"grad_norm": 0.2453855574131012,
"learning_rate": 0.00012472231478783432,
"loss": 1.1745,
"step": 359
},
{
"epoch": 0.022883477017202336,
"grad_norm": 0.25187695026397705,
"learning_rate": 0.0001243553827710992,
"loss": 1.2707,
"step": 360
},
{
"epoch": 0.022947042231139008,
"grad_norm": 0.2632068991661072,
"learning_rate": 0.00012398810182461228,
"loss": 1.3185,
"step": 361
},
{
"epoch": 0.023010607445075684,
"grad_norm": 0.24560274183750153,
"learning_rate": 0.00012362047721025968,
"loss": 1.1637,
"step": 362
},
{
"epoch": 0.023074172659012356,
"grad_norm": 0.2577439546585083,
"learning_rate": 0.00012325251419485102,
"loss": 1.2254,
"step": 363
},
{
"epoch": 0.023137737872949028,
"grad_norm": 0.24503523111343384,
"learning_rate": 0.00012288421805004414,
"loss": 1.2327,
"step": 364
},
{
"epoch": 0.023201303086885703,
"grad_norm": 0.24148108065128326,
"learning_rate": 0.00012251559405226941,
"loss": 1.2588,
"step": 365
},
{
"epoch": 0.023264868300822375,
"grad_norm": 0.24820099771022797,
"learning_rate": 0.0001221466474826543,
"loss": 1.1931,
"step": 366
},
{
"epoch": 0.023328433514759047,
"grad_norm": 0.24834313988685608,
"learning_rate": 0.00012177738362694757,
"loss": 1.2426,
"step": 367
},
{
"epoch": 0.023391998728695722,
"grad_norm": 0.24435977637767792,
"learning_rate": 0.00012140780777544367,
"loss": 1.3185,
"step": 368
},
{
"epoch": 0.023455563942632394,
"grad_norm": 0.2529852092266083,
"learning_rate": 0.00012103792522290682,
"loss": 1.2934,
"step": 369
},
{
"epoch": 0.023519129156569066,
"grad_norm": 0.2517044246196747,
"learning_rate": 0.00012066774126849529,
"loss": 1.1421,
"step": 370
},
{
"epoch": 0.023582694370505742,
"grad_norm": 0.24829211831092834,
"learning_rate": 0.00012029726121568541,
"loss": 1.2096,
"step": 371
},
{
"epoch": 0.023646259584442414,
"grad_norm": 0.25763294100761414,
"learning_rate": 0.00011992649037219545,
"loss": 1.253,
"step": 372
},
{
"epoch": 0.023709824798379086,
"grad_norm": 0.2500714659690857,
"learning_rate": 0.00011955543404990994,
"loss": 1.2382,
"step": 373
},
{
"epoch": 0.02377339001231576,
"grad_norm": 0.2516911029815674,
"learning_rate": 0.0001191840975648032,
"loss": 1.2695,
"step": 374
},
{
"epoch": 0.023836955226252433,
"grad_norm": 0.2422354817390442,
"learning_rate": 0.00011881248623686338,
"loss": 1.1835,
"step": 375
},
{
"epoch": 0.023900520440189105,
"grad_norm": 0.24256454408168793,
"learning_rate": 0.00011844060539001618,
"loss": 1.1847,
"step": 376
},
{
"epoch": 0.02396408565412578,
"grad_norm": 0.24635834991931915,
"learning_rate": 0.00011806846035204863,
"loss": 1.1659,
"step": 377
},
{
"epoch": 0.024027650868062453,
"grad_norm": 0.24487607181072235,
"learning_rate": 0.00011769605645453265,
"loss": 1.1742,
"step": 378
},
{
"epoch": 0.024091216081999125,
"grad_norm": 0.24941600859165192,
"learning_rate": 0.00011732339903274878,
"loss": 1.272,
"step": 379
},
{
"epoch": 0.0241547812959358,
"grad_norm": 0.251520037651062,
"learning_rate": 0.00011695049342560968,
"loss": 1.2567,
"step": 380
},
{
"epoch": 0.024218346509872472,
"grad_norm": 0.24927428364753723,
"learning_rate": 0.00011657734497558371,
"loss": 1.2295,
"step": 381
},
{
"epoch": 0.024281911723809144,
"grad_norm": 0.266940712928772,
"learning_rate": 0.00011620395902861822,
"loss": 1.2919,
"step": 382
},
{
"epoch": 0.02434547693774582,
"grad_norm": 0.2506616711616516,
"learning_rate": 0.00011583034093406327,
"loss": 1.2859,
"step": 383
},
{
"epoch": 0.024409042151682492,
"grad_norm": 0.24484044313430786,
"learning_rate": 0.00011545649604459466,
"loss": 1.2118,
"step": 384
},
{
"epoch": 0.024472607365619164,
"grad_norm": 0.2639765441417694,
"learning_rate": 0.00011508242971613741,
"loss": 1.3253,
"step": 385
},
{
"epoch": 0.02453617257955584,
"grad_norm": 0.257828950881958,
"learning_rate": 0.00011470814730778905,
"loss": 1.2582,
"step": 386
},
{
"epoch": 0.02459973779349251,
"grad_norm": 0.26992151141166687,
"learning_rate": 0.00011433365418174278,
"loss": 1.3236,
"step": 387
},
{
"epoch": 0.024663303007429183,
"grad_norm": 0.2564980387687683,
"learning_rate": 0.00011395895570321064,
"loss": 1.2659,
"step": 388
},
{
"epoch": 0.02472686822136586,
"grad_norm": 0.2378605455160141,
"learning_rate": 0.00011358405724034676,
"loss": 1.2018,
"step": 389
},
{
"epoch": 0.02479043343530253,
"grad_norm": 0.2514176070690155,
"learning_rate": 0.00011320896416417026,
"loss": 1.1897,
"step": 390
},
{
"epoch": 0.024853998649239203,
"grad_norm": 0.24492673575878143,
"learning_rate": 0.00011283368184848842,
"loss": 1.2224,
"step": 391
},
{
"epoch": 0.024917563863175878,
"grad_norm": 0.23529104888439178,
"learning_rate": 0.00011245821566981976,
"loss": 1.0588,
"step": 392
},
{
"epoch": 0.02498112907711255,
"grad_norm": 0.25407955050468445,
"learning_rate": 0.0001120825710073169,
"loss": 1.3508,
"step": 393
},
{
"epoch": 0.025044694291049222,
"grad_norm": 0.25098952651023865,
"learning_rate": 0.00011170675324268942,
"loss": 1.3079,
"step": 394
},
{
"epoch": 0.025108259504985898,
"grad_norm": 0.2655404806137085,
"learning_rate": 0.000111330767760127,
"loss": 1.2383,
"step": 395
},
{
"epoch": 0.02517182471892257,
"grad_norm": 0.2586209774017334,
"learning_rate": 0.00011095461994622209,
"loss": 1.2128,
"step": 396
},
{
"epoch": 0.02523538993285924,
"grad_norm": 0.24864241480827332,
"learning_rate": 0.0001105783151898928,
"loss": 1.3113,
"step": 397
},
{
"epoch": 0.025298955146795917,
"grad_norm": 0.24880805611610413,
"learning_rate": 0.00011020185888230571,
"loss": 1.263,
"step": 398
},
{
"epoch": 0.02536252036073259,
"grad_norm": 0.2573794424533844,
"learning_rate": 0.00010982525641679859,
"loss": 1.2119,
"step": 399
},
{
"epoch": 0.02542608557466926,
"grad_norm": 0.24325676262378693,
"learning_rate": 0.00010944851318880314,
"loss": 1.3042,
"step": 400
},
{
"epoch": 0.02542608557466926,
"eval_loss": 1.2277675867080688,
"eval_runtime": 1239.5038,
"eval_samples_per_second": 4.034,
"eval_steps_per_second": 1.008,
"step": 400
},
{
"epoch": 0.025489650788605937,
"grad_norm": 0.2504250407218933,
"learning_rate": 0.00010907163459576775,
"loss": 1.1943,
"step": 401
},
{
"epoch": 0.02555321600254261,
"grad_norm": 0.23995766043663025,
"learning_rate": 0.00010869462603708011,
"loss": 1.1737,
"step": 402
},
{
"epoch": 0.02561678121647928,
"grad_norm": 0.2422688603401184,
"learning_rate": 0.00010831749291398984,
"loss": 1.1162,
"step": 403
},
{
"epoch": 0.025680346430415956,
"grad_norm": 0.2618953287601471,
"learning_rate": 0.00010794024062953123,
"loss": 1.2612,
"step": 404
},
{
"epoch": 0.025743911644352628,
"grad_norm": 0.2553127110004425,
"learning_rate": 0.00010756287458844569,
"loss": 1.2236,
"step": 405
},
{
"epoch": 0.0258074768582893,
"grad_norm": 0.24291488528251648,
"learning_rate": 0.00010718540019710432,
"loss": 1.1694,
"step": 406
},
{
"epoch": 0.025871042072225976,
"grad_norm": 0.25561729073524475,
"learning_rate": 0.0001068078228634307,
"loss": 1.224,
"step": 407
},
{
"epoch": 0.025934607286162648,
"grad_norm": 0.25068506598472595,
"learning_rate": 0.00010643014799682296,
"loss": 1.2409,
"step": 408
},
{
"epoch": 0.02599817250009932,
"grad_norm": 0.2648126482963562,
"learning_rate": 0.00010605238100807673,
"loss": 1.1836,
"step": 409
},
{
"epoch": 0.026061737714035995,
"grad_norm": 0.2548812925815582,
"learning_rate": 0.00010567452730930743,
"loss": 1.2615,
"step": 410
},
{
"epoch": 0.026125302927972667,
"grad_norm": 0.24808640778064728,
"learning_rate": 0.00010529659231387263,
"loss": 1.2308,
"step": 411
},
{
"epoch": 0.02618886814190934,
"grad_norm": 0.25301888585090637,
"learning_rate": 0.00010491858143629469,
"loss": 1.2277,
"step": 412
},
{
"epoch": 0.026252433355846014,
"grad_norm": 0.2523147761821747,
"learning_rate": 0.00010454050009218313,
"loss": 1.2519,
"step": 413
},
{
"epoch": 0.026315998569782686,
"grad_norm": 0.2423991709947586,
"learning_rate": 0.00010416235369815693,
"loss": 1.211,
"step": 414
},
{
"epoch": 0.02637956378371936,
"grad_norm": 0.24331238865852356,
"learning_rate": 0.00010378414767176705,
"loss": 1.1111,
"step": 415
},
{
"epoch": 0.026443128997656034,
"grad_norm": 0.2526540756225586,
"learning_rate": 0.00010340588743141879,
"loss": 1.2619,
"step": 416
},
{
"epoch": 0.026506694211592706,
"grad_norm": 0.23379378020763397,
"learning_rate": 0.00010302757839629416,
"loss": 1.2193,
"step": 417
},
{
"epoch": 0.026570259425529378,
"grad_norm": 0.2350548952817917,
"learning_rate": 0.00010264922598627418,
"loss": 1.266,
"step": 418
},
{
"epoch": 0.026633824639466053,
"grad_norm": 0.25372815132141113,
"learning_rate": 0.00010227083562186135,
"loss": 1.1881,
"step": 419
},
{
"epoch": 0.026697389853402725,
"grad_norm": 0.26083245873451233,
"learning_rate": 0.0001018924127241019,
"loss": 1.1722,
"step": 420
},
{
"epoch": 0.026760955067339397,
"grad_norm": 0.2449260652065277,
"learning_rate": 0.00010151396271450817,
"loss": 1.2495,
"step": 421
},
{
"epoch": 0.026824520281276073,
"grad_norm": 0.2520759105682373,
"learning_rate": 0.00010113549101498086,
"loss": 1.0433,
"step": 422
},
{
"epoch": 0.026888085495212745,
"grad_norm": 0.2503208518028259,
"learning_rate": 0.00010075700304773148,
"loss": 1.2593,
"step": 423
},
{
"epoch": 0.026951650709149417,
"grad_norm": 0.25252294540405273,
"learning_rate": 0.00010037850423520454,
"loss": 1.2165,
"step": 424
},
{
"epoch": 0.027015215923086092,
"grad_norm": 0.25652945041656494,
"learning_rate": 0.0001,
"loss": 1.3014,
"step": 425
},
{
"epoch": 0.027078781137022764,
"grad_norm": 0.2584279775619507,
"learning_rate": 9.962149576479545e-05,
"loss": 1.2832,
"step": 426
},
{
"epoch": 0.027142346350959436,
"grad_norm": 0.2617090344429016,
"learning_rate": 9.924299695226857e-05,
"loss": 1.2049,
"step": 427
},
{
"epoch": 0.027205911564896112,
"grad_norm": 0.2533385157585144,
"learning_rate": 9.886450898501917e-05,
"loss": 1.2449,
"step": 428
},
{
"epoch": 0.027269476778832784,
"grad_norm": 0.251128226518631,
"learning_rate": 9.848603728549185e-05,
"loss": 1.1209,
"step": 429
},
{
"epoch": 0.027333041992769456,
"grad_norm": 0.256508469581604,
"learning_rate": 9.810758727589813e-05,
"loss": 1.2408,
"step": 430
},
{
"epoch": 0.02739660720670613,
"grad_norm": 0.24774308502674103,
"learning_rate": 9.772916437813868e-05,
"loss": 1.2901,
"step": 431
},
{
"epoch": 0.027460172420642803,
"grad_norm": 0.24494284391403198,
"learning_rate": 9.735077401372583e-05,
"loss": 1.2019,
"step": 432
},
{
"epoch": 0.027523737634579475,
"grad_norm": 0.24128341674804688,
"learning_rate": 9.697242160370589e-05,
"loss": 1.196,
"step": 433
},
{
"epoch": 0.02758730284851615,
"grad_norm": 0.24446694552898407,
"learning_rate": 9.659411256858122e-05,
"loss": 1.3256,
"step": 434
},
{
"epoch": 0.027650868062452823,
"grad_norm": 0.22289952635765076,
"learning_rate": 9.621585232823298e-05,
"loss": 1.0862,
"step": 435
},
{
"epoch": 0.027714433276389495,
"grad_norm": 0.25129738450050354,
"learning_rate": 9.583764630184311e-05,
"loss": 1.2579,
"step": 436
},
{
"epoch": 0.02777799849032617,
"grad_norm": 0.25697946548461914,
"learning_rate": 9.54594999078169e-05,
"loss": 1.2646,
"step": 437
},
{
"epoch": 0.027841563704262842,
"grad_norm": 0.2540769577026367,
"learning_rate": 9.508141856370532e-05,
"loss": 1.3544,
"step": 438
},
{
"epoch": 0.027905128918199514,
"grad_norm": 0.24931739270687103,
"learning_rate": 9.470340768612742e-05,
"loss": 1.2314,
"step": 439
},
{
"epoch": 0.02796869413213619,
"grad_norm": 0.25047457218170166,
"learning_rate": 9.432547269069261e-05,
"loss": 1.2619,
"step": 440
},
{
"epoch": 0.02803225934607286,
"grad_norm": 0.24546508491039276,
"learning_rate": 9.394761899192327e-05,
"loss": 1.1973,
"step": 441
},
{
"epoch": 0.028095824560009534,
"grad_norm": 0.25607752799987793,
"learning_rate": 9.356985200317709e-05,
"loss": 1.2323,
"step": 442
},
{
"epoch": 0.02815938977394621,
"grad_norm": 0.2613637149333954,
"learning_rate": 9.319217713656934e-05,
"loss": 1.1984,
"step": 443
},
{
"epoch": 0.02822295498788288,
"grad_norm": 0.24950894713401794,
"learning_rate": 9.281459980289567e-05,
"loss": 1.1618,
"step": 444
},
{
"epoch": 0.028286520201819553,
"grad_norm": 0.24787862598896027,
"learning_rate": 9.243712541155436e-05,
"loss": 1.2381,
"step": 445
},
{
"epoch": 0.02835008541575623,
"grad_norm": 0.2636643946170807,
"learning_rate": 9.205975937046879e-05,
"loss": 1.1979,
"step": 446
},
{
"epoch": 0.0284136506296929,
"grad_norm": 0.25856512784957886,
"learning_rate": 9.168250708601017e-05,
"loss": 1.2338,
"step": 447
},
{
"epoch": 0.028477215843629573,
"grad_norm": 0.2551221251487732,
"learning_rate": 9.130537396291994e-05,
"loss": 1.2167,
"step": 448
},
{
"epoch": 0.028540781057566248,
"grad_norm": 0.26933833956718445,
"learning_rate": 9.092836540423227e-05,
"loss": 1.0898,
"step": 449
},
{
"epoch": 0.02860434627150292,
"grad_norm": 0.2525901198387146,
"learning_rate": 9.055148681119688e-05,
"loss": 1.2945,
"step": 450
},
{
"epoch": 0.028667911485439592,
"grad_norm": 0.25787627696990967,
"learning_rate": 9.017474358320144e-05,
"loss": 1.2626,
"step": 451
},
{
"epoch": 0.028731476699376268,
"grad_norm": 0.25059428811073303,
"learning_rate": 8.979814111769431e-05,
"loss": 1.1074,
"step": 452
},
{
"epoch": 0.02879504191331294,
"grad_norm": 0.2509251832962036,
"learning_rate": 8.94216848101072e-05,
"loss": 1.2628,
"step": 453
},
{
"epoch": 0.02885860712724961,
"grad_norm": 0.26459380984306335,
"learning_rate": 8.904538005377794e-05,
"loss": 1.2641,
"step": 454
},
{
"epoch": 0.028922172341186287,
"grad_norm": 0.2545078694820404,
"learning_rate": 8.866923223987302e-05,
"loss": 1.2532,
"step": 455
},
{
"epoch": 0.02898573755512296,
"grad_norm": 0.269462525844574,
"learning_rate": 8.829324675731059e-05,
"loss": 1.1667,
"step": 456
},
{
"epoch": 0.02904930276905963,
"grad_norm": 0.2679223418235779,
"learning_rate": 8.791742899268315e-05,
"loss": 1.3168,
"step": 457
},
{
"epoch": 0.029112867982996306,
"grad_norm": 0.25728389620780945,
"learning_rate": 8.754178433018025e-05,
"loss": 1.1545,
"step": 458
},
{
"epoch": 0.02917643319693298,
"grad_norm": 0.2632697820663452,
"learning_rate": 8.716631815151158e-05,
"loss": 1.2874,
"step": 459
},
{
"epoch": 0.02923999841086965,
"grad_norm": 0.24366050958633423,
"learning_rate": 8.679103583582979e-05,
"loss": 1.2196,
"step": 460
},
{
"epoch": 0.029303563624806326,
"grad_norm": 0.24694758653640747,
"learning_rate": 8.641594275965327e-05,
"loss": 1.1856,
"step": 461
},
{
"epoch": 0.029367128838742998,
"grad_norm": 0.26368018984794617,
"learning_rate": 8.604104429678935e-05,
"loss": 1.2568,
"step": 462
},
{
"epoch": 0.02943069405267967,
"grad_norm": 0.2524515390396118,
"learning_rate": 8.566634581825727e-05,
"loss": 1.2232,
"step": 463
},
{
"epoch": 0.029494259266616345,
"grad_norm": 0.26754358410835266,
"learning_rate": 8.529185269221097e-05,
"loss": 1.1847,
"step": 464
},
{
"epoch": 0.029557824480553017,
"grad_norm": 0.2522285282611847,
"learning_rate": 8.491757028386263e-05,
"loss": 1.1285,
"step": 465
},
{
"epoch": 0.02962138969448969,
"grad_norm": 0.25777748227119446,
"learning_rate": 8.45435039554054e-05,
"loss": 1.2401,
"step": 466
},
{
"epoch": 0.029684954908426365,
"grad_norm": 0.23897330462932587,
"learning_rate": 8.416965906593675e-05,
"loss": 1.1599,
"step": 467
},
{
"epoch": 0.029748520122363037,
"grad_norm": 0.25694599747657776,
"learning_rate": 8.379604097138179e-05,
"loss": 1.1386,
"step": 468
},
{
"epoch": 0.02981208533629971,
"grad_norm": 0.2588801980018616,
"learning_rate": 8.342265502441636e-05,
"loss": 1.2106,
"step": 469
},
{
"epoch": 0.029875650550236384,
"grad_norm": 0.2556898593902588,
"learning_rate": 8.304950657439033e-05,
"loss": 1.2324,
"step": 470
},
{
"epoch": 0.029939215764173056,
"grad_norm": 0.2549362778663635,
"learning_rate": 8.267660096725123e-05,
"loss": 1.253,
"step": 471
},
{
"epoch": 0.03000278097810973,
"grad_norm": 0.24882453680038452,
"learning_rate": 8.230394354546737e-05,
"loss": 1.1547,
"step": 472
},
{
"epoch": 0.030066346192046404,
"grad_norm": 0.25553610920906067,
"learning_rate": 8.193153964795139e-05,
"loss": 1.1983,
"step": 473
},
{
"epoch": 0.030129911405983076,
"grad_norm": 0.2545006275177002,
"learning_rate": 8.15593946099838e-05,
"loss": 1.211,
"step": 474
},
{
"epoch": 0.030193476619919748,
"grad_norm": 0.261358380317688,
"learning_rate": 8.118751376313664e-05,
"loss": 1.2255,
"step": 475
},
{
"epoch": 0.030257041833856423,
"grad_norm": 0.24736320972442627,
"learning_rate": 8.08159024351968e-05,
"loss": 1.1618,
"step": 476
},
{
"epoch": 0.030320607047793095,
"grad_norm": 0.2506306767463684,
"learning_rate": 8.044456595009006e-05,
"loss": 1.2803,
"step": 477
},
{
"epoch": 0.030384172261729767,
"grad_norm": 0.25885093212127686,
"learning_rate": 8.007350962780456e-05,
"loss": 1.2624,
"step": 478
},
{
"epoch": 0.030447737475666443,
"grad_norm": 0.26026806235313416,
"learning_rate": 7.970273878431463e-05,
"loss": 1.1407,
"step": 479
},
{
"epoch": 0.030511302689603115,
"grad_norm": 0.24689649045467377,
"learning_rate": 7.93322587315047e-05,
"loss": 1.2858,
"step": 480
},
{
"epoch": 0.030574867903539787,
"grad_norm": 0.24269208312034607,
"learning_rate": 7.89620747770932e-05,
"loss": 1.2273,
"step": 481
},
{
"epoch": 0.030638433117476462,
"grad_norm": 0.2571215331554413,
"learning_rate": 7.859219222455634e-05,
"loss": 1.1898,
"step": 482
},
{
"epoch": 0.030701998331413134,
"grad_norm": 0.24754567444324493,
"learning_rate": 7.822261637305242e-05,
"loss": 1.2594,
"step": 483
},
{
"epoch": 0.030765563545349806,
"grad_norm": 0.2660878300666809,
"learning_rate": 7.785335251734573e-05,
"loss": 1.314,
"step": 484
},
{
"epoch": 0.03082912875928648,
"grad_norm": 0.2501409351825714,
"learning_rate": 7.74844059477306e-05,
"loss": 1.1022,
"step": 485
},
{
"epoch": 0.030892693973223154,
"grad_norm": 0.2525741755962372,
"learning_rate": 7.711578194995589e-05,
"loss": 1.27,
"step": 486
},
{
"epoch": 0.030956259187159826,
"grad_norm": 0.25781622529029846,
"learning_rate": 7.674748580514899e-05,
"loss": 1.1116,
"step": 487
},
{
"epoch": 0.0310198244010965,
"grad_norm": 0.2542114555835724,
"learning_rate": 7.637952278974034e-05,
"loss": 1.2904,
"step": 488
},
{
"epoch": 0.031083389615033173,
"grad_norm": 0.25267302989959717,
"learning_rate": 7.60118981753877e-05,
"loss": 1.3022,
"step": 489
},
{
"epoch": 0.031146954828969845,
"grad_norm": 0.27163398265838623,
"learning_rate": 7.564461722890081e-05,
"loss": 1.2724,
"step": 490
},
{
"epoch": 0.03121052004290652,
"grad_norm": 0.25189656019210815,
"learning_rate": 7.527768521216568e-05,
"loss": 1.1929,
"step": 491
},
{
"epoch": 0.03127408525684319,
"grad_norm": 0.24594004452228546,
"learning_rate": 7.491110738206942e-05,
"loss": 1.063,
"step": 492
},
{
"epoch": 0.03133765047077987,
"grad_norm": 0.25397610664367676,
"learning_rate": 7.45448889904249e-05,
"loss": 1.2254,
"step": 493
},
{
"epoch": 0.03140121568471654,
"grad_norm": 0.2588002383708954,
"learning_rate": 7.417903528389534e-05,
"loss": 1.187,
"step": 494
},
{
"epoch": 0.03146478089865321,
"grad_norm": 0.2593894898891449,
"learning_rate": 7.381355150391933e-05,
"loss": 1.2026,
"step": 495
},
{
"epoch": 0.03152834611258989,
"grad_norm": 0.2586647868156433,
"learning_rate": 7.344844288663571e-05,
"loss": 1.2351,
"step": 496
},
{
"epoch": 0.031591911326526556,
"grad_norm": 0.25638607144355774,
"learning_rate": 7.30837146628084e-05,
"loss": 1.2891,
"step": 497
},
{
"epoch": 0.03165547654046323,
"grad_norm": 0.26439526677131653,
"learning_rate": 7.27193720577517e-05,
"loss": 1.2405,
"step": 498
},
{
"epoch": 0.03171904175439991,
"grad_norm": 0.26030415296554565,
"learning_rate": 7.235542029125527e-05,
"loss": 1.3782,
"step": 499
},
{
"epoch": 0.031782606968336576,
"grad_norm": 0.26349353790283203,
"learning_rate": 7.19918645775093e-05,
"loss": 1.1462,
"step": 500
},
{
"epoch": 0.031782606968336576,
"eval_loss": 1.2162970304489136,
"eval_runtime": 1239.1136,
"eval_samples_per_second": 4.035,
"eval_steps_per_second": 1.009,
"step": 500
},
{
"epoch": 0.03184617218227325,
"grad_norm": 0.26652681827545166,
"learning_rate": 7.162871012503003e-05,
"loss": 1.2841,
"step": 501
},
{
"epoch": 0.031909737396209926,
"grad_norm": 0.258722722530365,
"learning_rate": 7.126596213658488e-05,
"loss": 1.3076,
"step": 502
},
{
"epoch": 0.031973302610146595,
"grad_norm": 0.2519145607948303,
"learning_rate": 7.090362580911808e-05,
"loss": 1.3191,
"step": 503
},
{
"epoch": 0.03203686782408327,
"grad_norm": 0.24408216774463654,
"learning_rate": 7.05417063336761e-05,
"loss": 1.2056,
"step": 504
},
{
"epoch": 0.032100433038019946,
"grad_norm": 0.25859251618385315,
"learning_rate": 7.018020889533348e-05,
"loss": 1.2509,
"step": 505
},
{
"epoch": 0.032163998251956614,
"grad_norm": 0.274295836687088,
"learning_rate": 6.981913867311819e-05,
"loss": 1.1457,
"step": 506
},
{
"epoch": 0.03222756346589329,
"grad_norm": 0.25392991304397583,
"learning_rate": 6.945850083993781e-05,
"loss": 1.1389,
"step": 507
},
{
"epoch": 0.032291128679829965,
"grad_norm": 0.2518613338470459,
"learning_rate": 6.909830056250527e-05,
"loss": 1.1288,
"step": 508
},
{
"epoch": 0.032354693893766634,
"grad_norm": 0.25153687596321106,
"learning_rate": 6.873854300126467e-05,
"loss": 1.1106,
"step": 509
},
{
"epoch": 0.03241825910770331,
"grad_norm": 0.2594717741012573,
"learning_rate": 6.83792333103176e-05,
"loss": 1.2556,
"step": 510
},
{
"epoch": 0.032481824321639985,
"grad_norm": 0.2567812204360962,
"learning_rate": 6.802037663734923e-05,
"loss": 1.1773,
"step": 511
},
{
"epoch": 0.03254538953557665,
"grad_norm": 0.2533416748046875,
"learning_rate": 6.766197812355438e-05,
"loss": 1.2053,
"step": 512
},
{
"epoch": 0.03260895474951333,
"grad_norm": 0.2625131607055664,
"learning_rate": 6.73040429035641e-05,
"loss": 1.2467,
"step": 513
},
{
"epoch": 0.032672519963450004,
"grad_norm": 0.2665756344795227,
"learning_rate": 6.69465761053721e-05,
"loss": 1.2737,
"step": 514
},
{
"epoch": 0.03273608517738667,
"grad_norm": 0.2558302581310272,
"learning_rate": 6.658958285026102e-05,
"loss": 1.2684,
"step": 515
},
{
"epoch": 0.03279965039132335,
"grad_norm": 0.24979649484157562,
"learning_rate": 6.623306825272937e-05,
"loss": 1.2144,
"step": 516
},
{
"epoch": 0.032863215605260024,
"grad_norm": 0.25010988116264343,
"learning_rate": 6.58770374204181e-05,
"loss": 1.2679,
"step": 517
},
{
"epoch": 0.03292678081919669,
"grad_norm": 0.2494756430387497,
"learning_rate": 6.552149545403739e-05,
"loss": 1.2291,
"step": 518
},
{
"epoch": 0.03299034603313337,
"grad_norm": 0.2528434097766876,
"learning_rate": 6.516644744729367e-05,
"loss": 1.2519,
"step": 519
},
{
"epoch": 0.03305391124707004,
"grad_norm": 0.2631024718284607,
"learning_rate": 6.48118984868167e-05,
"loss": 1.2086,
"step": 520
},
{
"epoch": 0.03311747646100671,
"grad_norm": 0.2514025866985321,
"learning_rate": 6.445785365208645e-05,
"loss": 1.2098,
"step": 521
},
{
"epoch": 0.03318104167494339,
"grad_norm": 0.25096845626831055,
"learning_rate": 6.410431801536058e-05,
"loss": 1.2062,
"step": 522
},
{
"epoch": 0.03324460688888006,
"grad_norm": 0.24552412331104279,
"learning_rate": 6.375129664160168e-05,
"loss": 1.139,
"step": 523
},
{
"epoch": 0.03330817210281673,
"grad_norm": 0.257235586643219,
"learning_rate": 6.339879458840465e-05,
"loss": 1.3102,
"step": 524
},
{
"epoch": 0.03337173731675341,
"grad_norm": 0.24831882119178772,
"learning_rate": 6.304681690592431e-05,
"loss": 1.1848,
"step": 525
},
{
"epoch": 0.03343530253069008,
"grad_norm": 0.2522595524787903,
"learning_rate": 6.269536863680307e-05,
"loss": 1.228,
"step": 526
},
{
"epoch": 0.03349886774462675,
"grad_norm": 0.25006258487701416,
"learning_rate": 6.23444548160986e-05,
"loss": 1.1059,
"step": 527
},
{
"epoch": 0.033562432958563426,
"grad_norm": 0.2498662769794464,
"learning_rate": 6.199408047121174e-05,
"loss": 1.2354,
"step": 528
},
{
"epoch": 0.0336259981725001,
"grad_norm": 0.25638020038604736,
"learning_rate": 6.16442506218146e-05,
"loss": 1.2332,
"step": 529
},
{
"epoch": 0.03368956338643677,
"grad_norm": 0.26172903180122375,
"learning_rate": 6.129497027977829e-05,
"loss": 1.2062,
"step": 530
},
{
"epoch": 0.033753128600373446,
"grad_norm": 0.2451334148645401,
"learning_rate": 6.0946244449101574e-05,
"loss": 1.2513,
"step": 531
},
{
"epoch": 0.03381669381431012,
"grad_norm": 0.2550620436668396,
"learning_rate": 6.059807812583883e-05,
"loss": 1.08,
"step": 532
},
{
"epoch": 0.03388025902824679,
"grad_norm": 0.2467305064201355,
"learning_rate": 6.02504762980286e-05,
"loss": 1.1822,
"step": 533
},
{
"epoch": 0.033943824242183465,
"grad_norm": 0.2539975345134735,
"learning_rate": 5.990344394562226e-05,
"loss": 1.2334,
"step": 534
},
{
"epoch": 0.03400738945612014,
"grad_norm": 0.26874592900276184,
"learning_rate": 5.955698604041231e-05,
"loss": 1.2736,
"step": 535
},
{
"epoch": 0.03407095467005681,
"grad_norm": 0.25802844762802124,
"learning_rate": 5.92111075459616e-05,
"loss": 1.1959,
"step": 536
},
{
"epoch": 0.034134519883993485,
"grad_norm": 0.2507290542125702,
"learning_rate": 5.88658134175319e-05,
"loss": 1.2045,
"step": 537
},
{
"epoch": 0.03419808509793016,
"grad_norm": 0.25168654322624207,
"learning_rate": 5.852110860201294e-05,
"loss": 1.259,
"step": 538
},
{
"epoch": 0.03426165031186683,
"grad_norm": 0.25015729665756226,
"learning_rate": 5.817699803785174e-05,
"loss": 1.0865,
"step": 539
},
{
"epoch": 0.034325215525803504,
"grad_norm": 0.24665945768356323,
"learning_rate": 5.7833486654981606e-05,
"loss": 1.1366,
"step": 540
},
{
"epoch": 0.03438878073974018,
"grad_norm": 0.2521713972091675,
"learning_rate": 5.7490579374751686e-05,
"loss": 1.2052,
"step": 541
},
{
"epoch": 0.03445234595367685,
"grad_norm": 0.24369041621685028,
"learning_rate": 5.714828110985635e-05,
"loss": 1.1564,
"step": 542
},
{
"epoch": 0.034515911167613524,
"grad_norm": 0.2600708603858948,
"learning_rate": 5.6806596764264874e-05,
"loss": 1.1852,
"step": 543
},
{
"epoch": 0.0345794763815502,
"grad_norm": 0.25693967938423157,
"learning_rate": 5.6465531233151126e-05,
"loss": 1.1887,
"step": 544
},
{
"epoch": 0.03464304159548687,
"grad_norm": 0.24953435361385345,
"learning_rate": 5.6125089402823485e-05,
"loss": 1.0897,
"step": 545
},
{
"epoch": 0.03470660680942354,
"grad_norm": 0.2587718665599823,
"learning_rate": 5.578527615065492e-05,
"loss": 1.1345,
"step": 546
},
{
"epoch": 0.03477017202336022,
"grad_norm": 0.2567615807056427,
"learning_rate": 5.544609634501279e-05,
"loss": 1.277,
"step": 547
},
{
"epoch": 0.03483373723729689,
"grad_norm": 0.25938692688941956,
"learning_rate": 5.510755484518955e-05,
"loss": 1.2087,
"step": 548
},
{
"epoch": 0.03489730245123356,
"grad_norm": 0.25982430577278137,
"learning_rate": 5.476965650133279e-05,
"loss": 1.2554,
"step": 549
},
{
"epoch": 0.03496086766517024,
"grad_norm": 0.25441205501556396,
"learning_rate": 5.443240615437586e-05,
"loss": 1.1437,
"step": 550
},
{
"epoch": 0.035024432879106907,
"grad_norm": 0.2553006410598755,
"learning_rate": 5.4095808635968546e-05,
"loss": 1.2544,
"step": 551
},
{
"epoch": 0.03508799809304358,
"grad_norm": 0.2520417273044586,
"learning_rate": 5.375986876840784e-05,
"loss": 1.0661,
"step": 552
},
{
"epoch": 0.03515156330698026,
"grad_norm": 0.2597469091415405,
"learning_rate": 5.342459136456881e-05,
"loss": 1.1732,
"step": 553
},
{
"epoch": 0.035215128520916926,
"grad_norm": 0.2597144544124603,
"learning_rate": 5.30899812278356e-05,
"loss": 1.2128,
"step": 554
},
{
"epoch": 0.0352786937348536,
"grad_norm": 0.2565470039844513,
"learning_rate": 5.275604315203293e-05,
"loss": 1.1585,
"step": 555
},
{
"epoch": 0.03534225894879028,
"grad_norm": 0.24314232170581818,
"learning_rate": 5.2422781921356826e-05,
"loss": 1.0955,
"step": 556
},
{
"epoch": 0.035405824162726945,
"grad_norm": 0.24443262815475464,
"learning_rate": 5.209020231030672e-05,
"loss": 1.1649,
"step": 557
},
{
"epoch": 0.03546938937666362,
"grad_norm": 0.26083680987358093,
"learning_rate": 5.1758309083616673e-05,
"loss": 1.2195,
"step": 558
},
{
"epoch": 0.035532954590600296,
"grad_norm": 0.25181084871292114,
"learning_rate": 5.142710699618701e-05,
"loss": 1.2411,
"step": 559
},
{
"epoch": 0.035596519804536965,
"grad_norm": 0.24913780391216278,
"learning_rate": 5.109660079301668e-05,
"loss": 1.2251,
"step": 560
},
{
"epoch": 0.03566008501847364,
"grad_norm": 0.2578226625919342,
"learning_rate": 5.076679520913479e-05,
"loss": 1.1685,
"step": 561
},
{
"epoch": 0.035723650232410316,
"grad_norm": 0.25115758180618286,
"learning_rate": 5.043769496953299e-05,
"loss": 1.0909,
"step": 562
},
{
"epoch": 0.035787215446346984,
"grad_norm": 0.24539780616760254,
"learning_rate": 5.010930478909779e-05,
"loss": 1.221,
"step": 563
},
{
"epoch": 0.03585078066028366,
"grad_norm": 0.26085364818573,
"learning_rate": 4.9781629372542895e-05,
"loss": 1.2141,
"step": 564
},
{
"epoch": 0.035914345874220335,
"grad_norm": 0.25367042422294617,
"learning_rate": 4.945467341434195e-05,
"loss": 1.2999,
"step": 565
},
{
"epoch": 0.035977911088157004,
"grad_norm": 0.27679672837257385,
"learning_rate": 4.912844159866112e-05,
"loss": 1.2494,
"step": 566
},
{
"epoch": 0.03604147630209368,
"grad_norm": 0.2423914223909378,
"learning_rate": 4.880293859929227e-05,
"loss": 1.1681,
"step": 567
},
{
"epoch": 0.036105041516030355,
"grad_norm": 0.24873730540275574,
"learning_rate": 4.847816907958549e-05,
"loss": 1.2964,
"step": 568
},
{
"epoch": 0.03616860672996702,
"grad_norm": 0.2618268132209778,
"learning_rate": 4.8154137692382907e-05,
"loss": 1.2184,
"step": 569
},
{
"epoch": 0.0362321719439037,
"grad_norm": 0.2559436857700348,
"learning_rate": 4.783084907995156e-05,
"loss": 1.216,
"step": 570
},
{
"epoch": 0.036295737157840374,
"grad_norm": 0.2557474970817566,
"learning_rate": 4.750830787391708e-05,
"loss": 1.2993,
"step": 571
},
{
"epoch": 0.03635930237177704,
"grad_norm": 0.25075972080230713,
"learning_rate": 4.718651869519731e-05,
"loss": 1.1178,
"step": 572
},
{
"epoch": 0.03642286758571372,
"grad_norm": 0.2534898817539215,
"learning_rate": 4.686548615393613e-05,
"loss": 1.1891,
"step": 573
},
{
"epoch": 0.036486432799650394,
"grad_norm": 0.26913774013519287,
"learning_rate": 4.654521484943735e-05,
"loss": 1.2552,
"step": 574
},
{
"epoch": 0.03654999801358706,
"grad_norm": 0.2566869556903839,
"learning_rate": 4.622570937009879e-05,
"loss": 1.3527,
"step": 575
},
{
"epoch": 0.03661356322752374,
"grad_norm": 0.26296791434288025,
"learning_rate": 4.59069742933468e-05,
"loss": 1.2464,
"step": 576
},
{
"epoch": 0.03667712844146041,
"grad_norm": 0.261929988861084,
"learning_rate": 4.558901418557021e-05,
"loss": 1.2744,
"step": 577
},
{
"epoch": 0.03674069365539708,
"grad_norm": 0.252572238445282,
"learning_rate": 4.527183360205541e-05,
"loss": 1.112,
"step": 578
},
{
"epoch": 0.03680425886933376,
"grad_norm": 0.27232858538627625,
"learning_rate": 4.495543708692075e-05,
"loss": 1.2743,
"step": 579
},
{
"epoch": 0.03686782408327043,
"grad_norm": 0.2609007656574249,
"learning_rate": 4.4639829173051554e-05,
"loss": 1.3188,
"step": 580
},
{
"epoch": 0.0369313892972071,
"grad_norm": 0.2632231116294861,
"learning_rate": 4.43250143820352e-05,
"loss": 1.1243,
"step": 581
},
{
"epoch": 0.03699495451114378,
"grad_norm": 0.2551668882369995,
"learning_rate": 4.401099722409631e-05,
"loss": 1.1864,
"step": 582
},
{
"epoch": 0.03705851972508045,
"grad_norm": 0.2566946744918823,
"learning_rate": 4.369778219803211e-05,
"loss": 1.2117,
"step": 583
},
{
"epoch": 0.03712208493901712,
"grad_norm": 0.2648017704486847,
"learning_rate": 4.338537379114801e-05,
"loss": 1.2357,
"step": 584
},
{
"epoch": 0.037185650152953796,
"grad_norm": 0.24553461372852325,
"learning_rate": 4.307377647919343e-05,
"loss": 1.1774,
"step": 585
},
{
"epoch": 0.03724921536689047,
"grad_norm": 0.24831490218639374,
"learning_rate": 4.2762994726297346e-05,
"loss": 1.1336,
"step": 586
},
{
"epoch": 0.03731278058082714,
"grad_norm": 0.2644321024417877,
"learning_rate": 4.245303298490467e-05,
"loss": 1.1865,
"step": 587
},
{
"epoch": 0.037376345794763816,
"grad_norm": 0.2654249370098114,
"learning_rate": 4.2143895695712444e-05,
"loss": 1.1872,
"step": 588
},
{
"epoch": 0.03743991100870049,
"grad_norm": 0.24722780287265778,
"learning_rate": 4.183558728760586e-05,
"loss": 1.1609,
"step": 589
},
{
"epoch": 0.03750347622263716,
"grad_norm": 0.2560960054397583,
"learning_rate": 4.152811217759529e-05,
"loss": 1.1897,
"step": 590
},
{
"epoch": 0.037567041436573835,
"grad_norm": 0.24647943675518036,
"learning_rate": 4.12214747707527e-05,
"loss": 1.1098,
"step": 591
},
{
"epoch": 0.03763060665051051,
"grad_norm": 0.2763408422470093,
"learning_rate": 4.091567946014858e-05,
"loss": 1.2313,
"step": 592
},
{
"epoch": 0.03769417186444718,
"grad_norm": 0.254727303981781,
"learning_rate": 4.061073062678912e-05,
"loss": 1.2363,
"step": 593
},
{
"epoch": 0.037757737078383855,
"grad_norm": 0.2722640037536621,
"learning_rate": 4.0306632639553323e-05,
"loss": 1.1633,
"step": 594
},
{
"epoch": 0.03782130229232053,
"grad_norm": 0.2594556212425232,
"learning_rate": 4.000338985513046e-05,
"loss": 1.224,
"step": 595
},
{
"epoch": 0.0378848675062572,
"grad_norm": 0.25068074464797974,
"learning_rate": 3.970100661795766e-05,
"loss": 1.1809,
"step": 596
},
{
"epoch": 0.037948432720193874,
"grad_norm": 0.2550009787082672,
"learning_rate": 3.9399487260157766e-05,
"loss": 1.2022,
"step": 597
},
{
"epoch": 0.03801199793413055,
"grad_norm": 0.2593897879123688,
"learning_rate": 3.909883610147696e-05,
"loss": 1.3124,
"step": 598
},
{
"epoch": 0.03807556314806722,
"grad_norm": 0.2558402717113495,
"learning_rate": 3.879905744922329e-05,
"loss": 1.1618,
"step": 599
},
{
"epoch": 0.03813912836200389,
"grad_norm": 0.25934746861457825,
"learning_rate": 3.8500155598204644e-05,
"loss": 1.0083,
"step": 600
},
{
"epoch": 0.03813912836200389,
"eval_loss": 1.2089511156082153,
"eval_runtime": 1239.249,
"eval_samples_per_second": 4.035,
"eval_steps_per_second": 1.009,
"step": 600
},
{
"epoch": 0.03820269357594057,
"grad_norm": 0.2633255422115326,
"learning_rate": 3.820213483066737e-05,
"loss": 1.1605,
"step": 601
},
{
"epoch": 0.03826625878987724,
"grad_norm": 0.24185825884342194,
"learning_rate": 3.7904999416234864e-05,
"loss": 1.2412,
"step": 602
},
{
"epoch": 0.03832982400381391,
"grad_norm": 0.26332393288612366,
"learning_rate": 3.7608753611846446e-05,
"loss": 1.222,
"step": 603
},
{
"epoch": 0.03839338921775059,
"grad_norm": 0.258789986371994,
"learning_rate": 3.731340166169635e-05,
"loss": 1.105,
"step": 604
},
{
"epoch": 0.03845695443168726,
"grad_norm": 0.2542060315608978,
"learning_rate": 3.701894779717286e-05,
"loss": 1.149,
"step": 605
},
{
"epoch": 0.03852051964562393,
"grad_norm": 0.2608526647090912,
"learning_rate": 3.6725396236797935e-05,
"loss": 1.1996,
"step": 606
},
{
"epoch": 0.03858408485956061,
"grad_norm": 0.24727903306484222,
"learning_rate": 3.64327511861663e-05,
"loss": 1.1608,
"step": 607
},
{
"epoch": 0.038647650073497276,
"grad_norm": 0.2504411041736603,
"learning_rate": 3.614101683788575e-05,
"loss": 1.1245,
"step": 608
},
{
"epoch": 0.03871121528743395,
"grad_norm": 0.26074525713920593,
"learning_rate": 3.585019737151669e-05,
"loss": 1.1894,
"step": 609
},
{
"epoch": 0.03877478050137063,
"grad_norm": 0.2569214105606079,
"learning_rate": 3.5560296953512295e-05,
"loss": 1.2079,
"step": 610
},
{
"epoch": 0.038838345715307296,
"grad_norm": 0.2611664831638336,
"learning_rate": 3.52713197371591e-05,
"loss": 1.1224,
"step": 611
},
{
"epoch": 0.03890191092924397,
"grad_norm": 0.25584879517555237,
"learning_rate": 3.498326986251717e-05,
"loss": 1.3047,
"step": 612
},
{
"epoch": 0.03896547614318065,
"grad_norm": 0.24464906752109528,
"learning_rate": 3.4696151456360956e-05,
"loss": 1.0912,
"step": 613
},
{
"epoch": 0.039029041357117315,
"grad_norm": 0.25548434257507324,
"learning_rate": 3.4409968632120126e-05,
"loss": 1.2166,
"step": 614
},
{
"epoch": 0.03909260657105399,
"grad_norm": 0.25605612993240356,
"learning_rate": 3.4124725489820645e-05,
"loss": 1.2629,
"step": 615
},
{
"epoch": 0.039156171784990666,
"grad_norm": 0.24373793601989746,
"learning_rate": 3.3840426116026044e-05,
"loss": 1.1917,
"step": 616
},
{
"epoch": 0.039219736998927335,
"grad_norm": 0.24588941037654877,
"learning_rate": 3.3557074583778814e-05,
"loss": 1.286,
"step": 617
},
{
"epoch": 0.03928330221286401,
"grad_norm": 0.25556549429893494,
"learning_rate": 3.327467495254225e-05,
"loss": 1.2295,
"step": 618
},
{
"epoch": 0.039346867426800686,
"grad_norm": 0.2570589780807495,
"learning_rate": 3.299323126814191e-05,
"loss": 1.2417,
"step": 619
},
{
"epoch": 0.039410432640737354,
"grad_norm": 0.24832259118556976,
"learning_rate": 3.2712747562708115e-05,
"loss": 1.2996,
"step": 620
},
{
"epoch": 0.03947399785467403,
"grad_norm": 0.2418624311685562,
"learning_rate": 3.243322785461781e-05,
"loss": 1.2418,
"step": 621
},
{
"epoch": 0.039537563068610705,
"grad_norm": 0.2648262083530426,
"learning_rate": 3.215467614843719e-05,
"loss": 1.2913,
"step": 622
},
{
"epoch": 0.039601128282547374,
"grad_norm": 0.2682283818721771,
"learning_rate": 3.187709643486427e-05,
"loss": 1.2148,
"step": 623
},
{
"epoch": 0.03966469349648405,
"grad_norm": 0.26762086153030396,
"learning_rate": 3.160049269067174e-05,
"loss": 1.2949,
"step": 624
},
{
"epoch": 0.039728258710420725,
"grad_norm": 0.25577932596206665,
"learning_rate": 3.132486887864992e-05,
"loss": 1.12,
"step": 625
},
{
"epoch": 0.03979182392435739,
"grad_norm": 0.2690037190914154,
"learning_rate": 3.105022894755003e-05,
"loss": 1.1813,
"step": 626
},
{
"epoch": 0.03985538913829407,
"grad_norm": 0.25239890813827515,
"learning_rate": 3.077657683202779e-05,
"loss": 1.2678,
"step": 627
},
{
"epoch": 0.039918954352230744,
"grad_norm": 0.2665114104747772,
"learning_rate": 3.0503916452586612e-05,
"loss": 1.0682,
"step": 628
},
{
"epoch": 0.03998251956616741,
"grad_norm": 0.2506917715072632,
"learning_rate": 3.0232251715521932e-05,
"loss": 1.2247,
"step": 629
},
{
"epoch": 0.04004608478010409,
"grad_norm": 0.24417519569396973,
"learning_rate": 2.9961586512864947e-05,
"loss": 1.2097,
"step": 630
},
{
"epoch": 0.040109649994040764,
"grad_norm": 0.2546679973602295,
"learning_rate": 2.9691924722326826e-05,
"loss": 1.1986,
"step": 631
},
{
"epoch": 0.04017321520797743,
"grad_norm": 0.25688743591308594,
"learning_rate": 2.9423270207243437e-05,
"loss": 1.2543,
"step": 632
},
{
"epoch": 0.04023678042191411,
"grad_norm": 0.23989447951316833,
"learning_rate": 2.9155626816519677e-05,
"loss": 1.0773,
"step": 633
},
{
"epoch": 0.04030034563585078,
"grad_norm": 0.25679922103881836,
"learning_rate": 2.888899838457455e-05,
"loss": 1.2843,
"step": 634
},
{
"epoch": 0.04036391084978745,
"grad_norm": 0.2580190896987915,
"learning_rate": 2.8623388731286093e-05,
"loss": 1.1911,
"step": 635
},
{
"epoch": 0.04042747606372413,
"grad_norm": 0.24526208639144897,
"learning_rate": 2.835880166193683e-05,
"loss": 1.2574,
"step": 636
},
{
"epoch": 0.0404910412776608,
"grad_norm": 0.25860583782196045,
"learning_rate": 2.8095240967158954e-05,
"loss": 1.2943,
"step": 637
},
{
"epoch": 0.04055460649159747,
"grad_norm": 0.26202407479286194,
"learning_rate": 2.7832710422880328e-05,
"loss": 1.1769,
"step": 638
},
{
"epoch": 0.040618171705534147,
"grad_norm": 0.2458542138338089,
"learning_rate": 2.75712137902703e-05,
"loss": 1.1669,
"step": 639
},
{
"epoch": 0.04068173691947082,
"grad_norm": 0.25534749031066895,
"learning_rate": 2.7310754815685624e-05,
"loss": 1.2057,
"step": 640
},
{
"epoch": 0.04074530213340749,
"grad_norm": 0.2514583170413971,
"learning_rate": 2.7051337230617125e-05,
"loss": 1.2483,
"step": 641
},
{
"epoch": 0.040808867347344166,
"grad_norm": 0.25142601132392883,
"learning_rate": 2.679296475163595e-05,
"loss": 1.1685,
"step": 642
},
{
"epoch": 0.04087243256128084,
"grad_norm": 0.2746109962463379,
"learning_rate": 2.6535641080340458e-05,
"loss": 1.2658,
"step": 643
},
{
"epoch": 0.04093599777521751,
"grad_norm": 0.26082682609558105,
"learning_rate": 2.6279369903303175e-05,
"loss": 1.2184,
"step": 644
},
{
"epoch": 0.040999562989154185,
"grad_norm": 0.27172860503196716,
"learning_rate": 2.6024154892017937e-05,
"loss": 1.2417,
"step": 645
},
{
"epoch": 0.04106312820309086,
"grad_norm": 0.26511403918266296,
"learning_rate": 2.5769999702847346e-05,
"loss": 1.2099,
"step": 646
},
{
"epoch": 0.04112669341702753,
"grad_norm": 0.26414263248443604,
"learning_rate": 2.5516907976970328e-05,
"loss": 1.2562,
"step": 647
},
{
"epoch": 0.041190258630964205,
"grad_norm": 0.25787821412086487,
"learning_rate": 2.5264883340330113e-05,
"loss": 1.2202,
"step": 648
},
{
"epoch": 0.04125382384490088,
"grad_norm": 0.25424811244010925,
"learning_rate": 2.501392940358197e-05,
"loss": 1.2154,
"step": 649
},
{
"epoch": 0.04131738905883755,
"grad_norm": 0.26234978437423706,
"learning_rate": 2.4764049762041874e-05,
"loss": 1.242,
"step": 650
},
{
"epoch": 0.041380954272774224,
"grad_norm": 0.25528523325920105,
"learning_rate": 2.4515247995634694e-05,
"loss": 1.1873,
"step": 651
},
{
"epoch": 0.0414445194867109,
"grad_norm": 0.2629062235355377,
"learning_rate": 2.426752766884306e-05,
"loss": 1.1596,
"step": 652
},
{
"epoch": 0.04150808470064757,
"grad_norm": 0.24369929730892181,
"learning_rate": 2.4020892330656252e-05,
"loss": 1.069,
"step": 653
},
{
"epoch": 0.041571649914584244,
"grad_norm": 0.2602699100971222,
"learning_rate": 2.377534551451932e-05,
"loss": 1.2132,
"step": 654
},
{
"epoch": 0.04163521512852092,
"grad_norm": 0.24992002546787262,
"learning_rate": 2.353089073828255e-05,
"loss": 1.1259,
"step": 655
},
{
"epoch": 0.04169878034245759,
"grad_norm": 0.2784167528152466,
"learning_rate": 2.328753150415094e-05,
"loss": 1.1997,
"step": 656
},
{
"epoch": 0.04176234555639426,
"grad_norm": 0.2581193745136261,
"learning_rate": 2.304527129863424e-05,
"loss": 1.1832,
"step": 657
},
{
"epoch": 0.04182591077033094,
"grad_norm": 0.25155678391456604,
"learning_rate": 2.280411359249668e-05,
"loss": 1.147,
"step": 658
},
{
"epoch": 0.04188947598426761,
"grad_norm": 0.2618091106414795,
"learning_rate": 2.2564061840707495e-05,
"loss": 1.303,
"step": 659
},
{
"epoch": 0.04195304119820428,
"grad_norm": 0.2630173861980438,
"learning_rate": 2.2325119482391467e-05,
"loss": 1.2555,
"step": 660
},
{
"epoch": 0.04201660641214096,
"grad_norm": 0.25127795338630676,
"learning_rate": 2.2087289940779343e-05,
"loss": 1.1694,
"step": 661
},
{
"epoch": 0.04208017162607763,
"grad_norm": 0.2526141107082367,
"learning_rate": 2.185057662315918e-05,
"loss": 1.0997,
"step": 662
},
{
"epoch": 0.0421437368400143,
"grad_norm": 0.2466498613357544,
"learning_rate": 2.1614982920827243e-05,
"loss": 1.2093,
"step": 663
},
{
"epoch": 0.04220730205395098,
"grad_norm": 0.2559715211391449,
"learning_rate": 2.1380512209039528e-05,
"loss": 1.239,
"step": 664
},
{
"epoch": 0.042270867267887646,
"grad_norm": 0.24562884867191315,
"learning_rate": 2.1147167846963422e-05,
"loss": 1.1716,
"step": 665
},
{
"epoch": 0.04233443248182432,
"grad_norm": 0.25966036319732666,
"learning_rate": 2.0914953177629548e-05,
"loss": 1.2553,
"step": 666
},
{
"epoch": 0.042397997695761,
"grad_norm": 0.25772759318351746,
"learning_rate": 2.068387152788387e-05,
"loss": 1.1341,
"step": 667
},
{
"epoch": 0.042461562909697666,
"grad_norm": 0.24900874495506287,
"learning_rate": 2.0453926208340003e-05,
"loss": 1.1742,
"step": 668
},
{
"epoch": 0.04252512812363434,
"grad_norm": 0.2540144622325897,
"learning_rate": 2.022512051333194e-05,
"loss": 1.1856,
"step": 669
},
{
"epoch": 0.04258869333757102,
"grad_norm": 0.26840710639953613,
"learning_rate": 1.999745772086655e-05,
"loss": 1.2104,
"step": 670
},
{
"epoch": 0.042652258551507685,
"grad_norm": 0.2511826753616333,
"learning_rate": 1.9770941092576957e-05,
"loss": 1.2477,
"step": 671
},
{
"epoch": 0.04271582376544436,
"grad_norm": 0.26480165123939514,
"learning_rate": 1.954557387367557e-05,
"loss": 1.1991,
"step": 672
},
{
"epoch": 0.042779388979381036,
"grad_norm": 0.2562330663204193,
"learning_rate": 1.9321359292907702e-05,
"loss": 1.2336,
"step": 673
},
{
"epoch": 0.042842954193317705,
"grad_norm": 0.25312507152557373,
"learning_rate": 1.9098300562505266e-05,
"loss": 1.1593,
"step": 674
},
{
"epoch": 0.04290651940725438,
"grad_norm": 0.2678249180316925,
"learning_rate": 1.8876400878140775e-05,
"loss": 1.1886,
"step": 675
},
{
"epoch": 0.042970084621191056,
"grad_norm": 0.25428783893585205,
"learning_rate": 1.8655663418881584e-05,
"loss": 1.2123,
"step": 676
},
{
"epoch": 0.043033649835127724,
"grad_norm": 0.2611987292766571,
"learning_rate": 1.8436091347144246e-05,
"loss": 1.2407,
"step": 677
},
{
"epoch": 0.0430972150490644,
"grad_norm": 0.2611881196498871,
"learning_rate": 1.821768780864943e-05,
"loss": 1.1918,
"step": 678
},
{
"epoch": 0.043160780263001075,
"grad_norm": 0.2661250829696655,
"learning_rate": 1.800045593237647e-05,
"loss": 1.2046,
"step": 679
},
{
"epoch": 0.043224345476937744,
"grad_norm": 0.2643533945083618,
"learning_rate": 1.7784398830519e-05,
"loss": 1.1827,
"step": 680
},
{
"epoch": 0.04328791069087442,
"grad_norm": 0.25061362981796265,
"learning_rate": 1.756951959844e-05,
"loss": 1.2051,
"step": 681
},
{
"epoch": 0.043351475904811095,
"grad_norm": 0.24832050502300262,
"learning_rate": 1.7355821314627564e-05,
"loss": 1.1704,
"step": 682
},
{
"epoch": 0.04341504111874776,
"grad_norm": 0.26712068915367126,
"learning_rate": 1.7143307040650925e-05,
"loss": 1.2655,
"step": 683
},
{
"epoch": 0.04347860633268444,
"grad_norm": 0.26257115602493286,
"learning_rate": 1.6931979821116418e-05,
"loss": 1.183,
"step": 684
},
{
"epoch": 0.043542171546621114,
"grad_norm": 0.2578732371330261,
"learning_rate": 1.672184268362391e-05,
"loss": 1.1036,
"step": 685
},
{
"epoch": 0.04360573676055778,
"grad_norm": 0.25747859477996826,
"learning_rate": 1.6512898638723497e-05,
"loss": 1.2769,
"step": 686
},
{
"epoch": 0.04366930197449446,
"grad_norm": 0.26593005657196045,
"learning_rate": 1.630515067987226e-05,
"loss": 1.2707,
"step": 687
},
{
"epoch": 0.04373286718843113,
"grad_norm": 0.2610760033130646,
"learning_rate": 1.6098601783391487e-05,
"loss": 1.2226,
"step": 688
},
{
"epoch": 0.0437964324023678,
"grad_norm": 0.2636644244194031,
"learning_rate": 1.5893254908423937e-05,
"loss": 1.194,
"step": 689
},
{
"epoch": 0.04385999761630448,
"grad_norm": 0.25099021196365356,
"learning_rate": 1.5689112996891576e-05,
"loss": 1.1853,
"step": 690
},
{
"epoch": 0.04392356283024115,
"grad_norm": 0.26002123951911926,
"learning_rate": 1.54861789734532e-05,
"loss": 1.1705,
"step": 691
},
{
"epoch": 0.04398712804417782,
"grad_norm": 0.25610899925231934,
"learning_rate": 1.5284455745462834e-05,
"loss": 1.173,
"step": 692
},
{
"epoch": 0.0440506932581145,
"grad_norm": 0.2630417048931122,
"learning_rate": 1.5083946202927824e-05,
"loss": 1.183,
"step": 693
},
{
"epoch": 0.04411425847205117,
"grad_norm": 0.26131799817085266,
"learning_rate": 1.4884653218467571e-05,
"loss": 1.2147,
"step": 694
},
{
"epoch": 0.04417782368598784,
"grad_norm": 0.2511073052883148,
"learning_rate": 1.4686579647272336e-05,
"loss": 1.1362,
"step": 695
},
{
"epoch": 0.044241388899924516,
"grad_norm": 0.2500525414943695,
"learning_rate": 1.4489728327062324e-05,
"loss": 1.1264,
"step": 696
},
{
"epoch": 0.04430495411386119,
"grad_norm": 0.2648208439350128,
"learning_rate": 1.4294102078047055e-05,
"loss": 1.2098,
"step": 697
},
{
"epoch": 0.04436851932779786,
"grad_norm": 0.2602032721042633,
"learning_rate": 1.4099703702884936e-05,
"loss": 1.2527,
"step": 698
},
{
"epoch": 0.044432084541734536,
"grad_norm": 0.26263752579689026,
"learning_rate": 1.3906535986643176e-05,
"loss": 1.218,
"step": 699
},
{
"epoch": 0.04449564975567121,
"grad_norm": 0.2635667622089386,
"learning_rate": 1.3714601696757712e-05,
"loss": 1.2896,
"step": 700
},
{
"epoch": 0.04449564975567121,
"eval_loss": 1.2046868801116943,
"eval_runtime": 1238.8537,
"eval_samples_per_second": 4.036,
"eval_steps_per_second": 1.009,
"step": 700
},
{
"epoch": 0.04455921496960788,
"grad_norm": 0.258548766374588,
"learning_rate": 1.3523903582993802e-05,
"loss": 1.1745,
"step": 701
},
{
"epoch": 0.044622780183544555,
"grad_norm": 0.26299649477005005,
"learning_rate": 1.3334444377406452e-05,
"loss": 1.2185,
"step": 702
},
{
"epoch": 0.04468634539748123,
"grad_norm": 0.26299095153808594,
"learning_rate": 1.3146226794301308e-05,
"loss": 1.2282,
"step": 703
},
{
"epoch": 0.0447499106114179,
"grad_norm": 0.26419302821159363,
"learning_rate": 1.2959253530195836e-05,
"loss": 1.2207,
"step": 704
},
{
"epoch": 0.044813475825354575,
"grad_norm": 0.2554692029953003,
"learning_rate": 1.2773527263780626e-05,
"loss": 1.2711,
"step": 705
},
{
"epoch": 0.04487704103929125,
"grad_norm": 0.2452605962753296,
"learning_rate": 1.258905065588103e-05,
"loss": 1.1732,
"step": 706
},
{
"epoch": 0.04494060625322792,
"grad_norm": 0.2570919394493103,
"learning_rate": 1.2405826349419014e-05,
"loss": 1.2534,
"step": 707
},
{
"epoch": 0.045004171467164594,
"grad_norm": 0.26748916506767273,
"learning_rate": 1.2223856969375447e-05,
"loss": 1.2603,
"step": 708
},
{
"epoch": 0.04506773668110127,
"grad_norm": 0.24398306012153625,
"learning_rate": 1.204314512275222e-05,
"loss": 1.1563,
"step": 709
},
{
"epoch": 0.04513130189503794,
"grad_norm": 0.2574535012245178,
"learning_rate": 1.1863693398535114e-05,
"loss": 1.2212,
"step": 710
},
{
"epoch": 0.045194867108974614,
"grad_norm": 0.2488526552915573,
"learning_rate": 1.1685504367656708e-05,
"loss": 1.191,
"step": 711
},
{
"epoch": 0.04525843232291129,
"grad_norm": 0.2656523585319519,
"learning_rate": 1.1508580582959349e-05,
"loss": 1.158,
"step": 712
},
{
"epoch": 0.04532199753684796,
"grad_norm": 0.2593711316585541,
"learning_rate": 1.1332924579158844e-05,
"loss": 1.1769,
"step": 713
},
{
"epoch": 0.04538556275078463,
"grad_norm": 0.25598251819610596,
"learning_rate": 1.1158538872807933e-05,
"loss": 1.1678,
"step": 714
},
{
"epoch": 0.04544912796472131,
"grad_norm": 0.26309624314308167,
"learning_rate": 1.0985425962260343e-05,
"loss": 1.1714,
"step": 715
},
{
"epoch": 0.04551269317865798,
"grad_norm": 0.2591480016708374,
"learning_rate": 1.0813588327634961e-05,
"loss": 1.1854,
"step": 716
},
{
"epoch": 0.04557625839259465,
"grad_norm": 0.2530069947242737,
"learning_rate": 1.0643028430780321e-05,
"loss": 1.1974,
"step": 717
},
{
"epoch": 0.04563982360653133,
"grad_norm": 0.26100844144821167,
"learning_rate": 1.0473748715239307e-05,
"loss": 1.2274,
"step": 718
},
{
"epoch": 0.045703388820468,
"grad_norm": 0.25156131386756897,
"learning_rate": 1.0305751606214165e-05,
"loss": 1.1218,
"step": 719
},
{
"epoch": 0.04576695403440467,
"grad_norm": 0.26160189509391785,
"learning_rate": 1.01390395105318e-05,
"loss": 1.209,
"step": 720
},
{
"epoch": 0.04583051924834135,
"grad_norm": 0.2658187448978424,
"learning_rate": 9.973614816609134e-06,
"loss": 1.2046,
"step": 721
},
{
"epoch": 0.045894084462278016,
"grad_norm": 0.24954035878181458,
"learning_rate": 9.809479894419149e-06,
"loss": 1.2043,
"step": 722
},
{
"epoch": 0.04595764967621469,
"grad_norm": 0.25436848402023315,
"learning_rate": 9.64663709545668e-06,
"loss": 1.2672,
"step": 723
},
{
"epoch": 0.04602121489015137,
"grad_norm": 0.2657119929790497,
"learning_rate": 9.485088752704885e-06,
"loss": 1.2123,
"step": 724
},
{
"epoch": 0.046084780104088036,
"grad_norm": 0.2558092474937439,
"learning_rate": 9.324837180601741e-06,
"loss": 1.1356,
"step": 725
},
{
"epoch": 0.04614834531802471,
"grad_norm": 0.2469986379146576,
"learning_rate": 9.16588467500693e-06,
"loss": 1.1135,
"step": 726
},
{
"epoch": 0.04621191053196139,
"grad_norm": 0.25156447291374207,
"learning_rate": 9.00823351316893e-06,
"loss": 1.234,
"step": 727
},
{
"epoch": 0.046275475745898055,
"grad_norm": 0.27851831912994385,
"learning_rate": 8.851885953692374e-06,
"loss": 1.1979,
"step": 728
},
{
"epoch": 0.04633904095983473,
"grad_norm": 0.26087456941604614,
"learning_rate": 8.696844236505785e-06,
"loss": 1.1554,
"step": 729
},
{
"epoch": 0.046402606173771406,
"grad_norm": 0.24540308117866516,
"learning_rate": 8.543110582829272e-06,
"loss": 1.081,
"step": 730
},
{
"epoch": 0.046466171387708075,
"grad_norm": 0.2543467581272125,
"learning_rate": 8.390687195142976e-06,
"loss": 1.1653,
"step": 731
},
{
"epoch": 0.04652973660164475,
"grad_norm": 0.2682556211948395,
"learning_rate": 8.239576257155334e-06,
"loss": 1.3331,
"step": 732
},
{
"epoch": 0.046593301815581425,
"grad_norm": 0.25438860058784485,
"learning_rate": 8.08977993377179e-06,
"loss": 1.1936,
"step": 733
},
{
"epoch": 0.046656867029518094,
"grad_norm": 0.25737786293029785,
"learning_rate": 7.941300371063953e-06,
"loss": 1.1429,
"step": 734
},
{
"epoch": 0.04672043224345477,
"grad_norm": 0.25992342829704285,
"learning_rate": 7.794139696238645e-06,
"loss": 1.061,
"step": 735
},
{
"epoch": 0.046783997457391445,
"grad_norm": 0.26320621371269226,
"learning_rate": 7.648300017607534e-06,
"loss": 1.1917,
"step": 736
},
{
"epoch": 0.046847562671328113,
"grad_norm": 0.2699463665485382,
"learning_rate": 7.503783424556921e-06,
"loss": 1.214,
"step": 737
},
{
"epoch": 0.04691112788526479,
"grad_norm": 0.2559181749820709,
"learning_rate": 7.360591987517762e-06,
"loss": 1.1313,
"step": 738
},
{
"epoch": 0.046974693099201464,
"grad_norm": 0.26224273443222046,
"learning_rate": 7.218727757936072e-06,
"loss": 1.2719,
"step": 739
},
{
"epoch": 0.04703825831313813,
"grad_norm": 0.2562790513038635,
"learning_rate": 7.078192768243486e-06,
"loss": 1.1937,
"step": 740
},
{
"epoch": 0.04710182352707481,
"grad_norm": 0.26699408888816833,
"learning_rate": 6.938989031828158e-06,
"loss": 1.2505,
"step": 741
},
{
"epoch": 0.047165388741011484,
"grad_norm": 0.267403244972229,
"learning_rate": 6.80111854300588e-06,
"loss": 1.2634,
"step": 742
},
{
"epoch": 0.04722895395494815,
"grad_norm": 0.2613820731639862,
"learning_rate": 6.664583276991632e-06,
"loss": 1.1293,
"step": 743
},
{
"epoch": 0.04729251916888483,
"grad_norm": 0.25940650701522827,
"learning_rate": 6.5293851898710625e-06,
"loss": 1.1064,
"step": 744
},
{
"epoch": 0.0473560843828215,
"grad_norm": 0.25714200735092163,
"learning_rate": 6.395526218572723e-06,
"loss": 1.2651,
"step": 745
},
{
"epoch": 0.04741964959675817,
"grad_norm": 0.25669535994529724,
"learning_rate": 6.2630082808401326e-06,
"loss": 1.2458,
"step": 746
},
{
"epoch": 0.04748321481069485,
"grad_norm": 0.25497812032699585,
"learning_rate": 6.131833275204346e-06,
"loss": 1.3052,
"step": 747
},
{
"epoch": 0.04754678002463152,
"grad_norm": 0.2577572762966156,
"learning_rate": 6.00200308095682e-06,
"loss": 1.1764,
"step": 748
},
{
"epoch": 0.04761034523856819,
"grad_norm": 0.2610035538673401,
"learning_rate": 5.873519558122398e-06,
"loss": 1.3754,
"step": 749
},
{
"epoch": 0.04767391045250487,
"grad_norm": 0.25255724787712097,
"learning_rate": 5.746384547432737e-06,
"loss": 1.2312,
"step": 750
},
{
"epoch": 0.04773747566644154,
"grad_norm": 0.2752842307090759,
"learning_rate": 5.62059987029987e-06,
"loss": 1.221,
"step": 751
},
{
"epoch": 0.04780104088037821,
"grad_norm": 0.2542133331298828,
"learning_rate": 5.496167328790191e-06,
"loss": 1.3241,
"step": 752
},
{
"epoch": 0.047864606094314886,
"grad_norm": 0.2514469623565674,
"learning_rate": 5.373088705598539e-06,
"loss": 1.1142,
"step": 753
},
{
"epoch": 0.04792817130825156,
"grad_norm": 0.26052698493003845,
"learning_rate": 5.251365764022753e-06,
"loss": 1.2683,
"step": 754
},
{
"epoch": 0.04799173652218823,
"grad_norm": 0.25234684348106384,
"learning_rate": 5.131000247938367e-06,
"loss": 1.2594,
"step": 755
},
{
"epoch": 0.048055301736124906,
"grad_norm": 0.25910067558288574,
"learning_rate": 5.011993881773569e-06,
"loss": 1.1759,
"step": 756
},
{
"epoch": 0.04811886695006158,
"grad_norm": 0.2570941746234894,
"learning_rate": 4.8943483704846475e-06,
"loss": 1.1994,
"step": 757
},
{
"epoch": 0.04818243216399825,
"grad_norm": 0.26008912920951843,
"learning_rate": 4.778065399531395e-06,
"loss": 1.214,
"step": 758
},
{
"epoch": 0.048245997377934925,
"grad_norm": 0.24928806722164154,
"learning_rate": 4.663146634853066e-06,
"loss": 1.1172,
"step": 759
},
{
"epoch": 0.0483095625918716,
"grad_norm": 0.2581523358821869,
"learning_rate": 4.549593722844492e-06,
"loss": 1.2443,
"step": 760
},
{
"epoch": 0.04837312780580827,
"grad_norm": 0.25802290439605713,
"learning_rate": 4.4374082903324565e-06,
"loss": 1.3145,
"step": 761
},
{
"epoch": 0.048436693019744945,
"grad_norm": 0.24896131455898285,
"learning_rate": 4.326591944552438e-06,
"loss": 1.2242,
"step": 762
},
{
"epoch": 0.04850025823368162,
"grad_norm": 0.2623492479324341,
"learning_rate": 4.217146273125549e-06,
"loss": 1.2571,
"step": 763
},
{
"epoch": 0.04856382344761829,
"grad_norm": 0.26440027356147766,
"learning_rate": 4.109072844035844e-06,
"loss": 1.2363,
"step": 764
},
{
"epoch": 0.048627388661554964,
"grad_norm": 0.26211121678352356,
"learning_rate": 4.002373205607723e-06,
"loss": 1.1663,
"step": 765
},
{
"epoch": 0.04869095387549164,
"grad_norm": 0.25894424319267273,
"learning_rate": 3.8970488864839334e-06,
"loss": 1.1758,
"step": 766
},
{
"epoch": 0.04875451908942831,
"grad_norm": 0.24708890914916992,
"learning_rate": 3.793101395603538e-06,
"loss": 1.0515,
"step": 767
},
{
"epoch": 0.048818084303364984,
"grad_norm": 0.2569176256656647,
"learning_rate": 3.690532222180343e-06,
"loss": 1.2896,
"step": 768
},
{
"epoch": 0.04888164951730166,
"grad_norm": 0.2633558511734009,
"learning_rate": 3.5893428356815305e-06,
"loss": 1.2318,
"step": 769
},
{
"epoch": 0.04894521473123833,
"grad_norm": 0.25989681482315063,
"learning_rate": 3.4895346858066724e-06,
"loss": 1.2352,
"step": 770
},
{
"epoch": 0.049008779945175,
"grad_norm": 0.24307774007320404,
"learning_rate": 3.391109202466902e-06,
"loss": 1.1933,
"step": 771
},
{
"epoch": 0.04907234515911168,
"grad_norm": 0.2679169178009033,
"learning_rate": 3.2940677957644215e-06,
"loss": 1.1622,
"step": 772
},
{
"epoch": 0.04913591037304835,
"grad_norm": 0.26480844616889954,
"learning_rate": 3.198411855972383e-06,
"loss": 1.235,
"step": 773
},
{
"epoch": 0.04919947558698502,
"grad_norm": 0.25534242391586304,
"learning_rate": 3.104142753514849e-06,
"loss": 1.1912,
"step": 774
},
{
"epoch": 0.0492630408009217,
"grad_norm": 0.25266873836517334,
"learning_rate": 3.011261838947277e-06,
"loss": 1.2265,
"step": 775
},
{
"epoch": 0.04932660601485837,
"grad_norm": 0.255912721157074,
"learning_rate": 2.9197704429370977e-06,
"loss": 1.1606,
"step": 776
},
{
"epoch": 0.04939017122879504,
"grad_norm": 0.2584792375564575,
"learning_rate": 2.829669876244612e-06,
"loss": 1.138,
"step": 777
},
{
"epoch": 0.04945373644273172,
"grad_norm": 0.26956725120544434,
"learning_rate": 2.7409614297043806e-06,
"loss": 1.1858,
"step": 778
},
{
"epoch": 0.049517301656668386,
"grad_norm": 0.2608809471130371,
"learning_rate": 2.653646374206531e-06,
"loss": 1.1392,
"step": 779
},
{
"epoch": 0.04958086687060506,
"grad_norm": 0.24935650825500488,
"learning_rate": 2.5677259606786684e-06,
"loss": 1.1769,
"step": 780
},
{
"epoch": 0.04964443208454174,
"grad_norm": 0.24793782830238342,
"learning_rate": 2.4832014200679154e-06,
"loss": 1.2592,
"step": 781
},
{
"epoch": 0.049707997298478405,
"grad_norm": 0.2635771632194519,
"learning_rate": 2.4000739633233347e-06,
"loss": 1.1995,
"step": 782
},
{
"epoch": 0.04977156251241508,
"grad_norm": 0.25501325726509094,
"learning_rate": 2.3183447813784675e-06,
"loss": 1.3232,
"step": 783
},
{
"epoch": 0.049835127726351756,
"grad_norm": 0.25844094157218933,
"learning_rate": 2.238015045134334e-06,
"loss": 1.1711,
"step": 784
},
{
"epoch": 0.049898692940288425,
"grad_norm": 0.24389567971229553,
"learning_rate": 2.159085905442737e-06,
"loss": 1.1424,
"step": 785
},
{
"epoch": 0.0499622581542251,
"grad_norm": 0.25697383284568787,
"learning_rate": 2.0815584930895972e-06,
"loss": 1.1575,
"step": 786
},
{
"epoch": 0.050025823368161776,
"grad_norm": 0.26238778233528137,
"learning_rate": 2.005433918778932e-06,
"loss": 1.1042,
"step": 787
},
{
"epoch": 0.050089388582098444,
"grad_norm": 0.2617799639701843,
"learning_rate": 1.9307132731168352e-06,
"loss": 1.1595,
"step": 788
},
{
"epoch": 0.05015295379603512,
"grad_norm": 0.2609799802303314,
"learning_rate": 1.857397626595858e-06,
"loss": 1.1745,
"step": 789
},
{
"epoch": 0.050216519009971795,
"grad_norm": 0.2602226734161377,
"learning_rate": 1.7854880295797405e-06,
"loss": 1.2053,
"step": 790
},
{
"epoch": 0.050280084223908464,
"grad_norm": 0.25434112548828125,
"learning_rate": 1.7149855122882697e-06,
"loss": 1.269,
"step": 791
},
{
"epoch": 0.05034364943784514,
"grad_norm": 0.26935696601867676,
"learning_rate": 1.6458910847826026e-06,
"loss": 1.2428,
"step": 792
},
{
"epoch": 0.050407214651781815,
"grad_norm": 0.2743295431137085,
"learning_rate": 1.5782057369507553e-06,
"loss": 1.2965,
"step": 793
},
{
"epoch": 0.05047077986571848,
"grad_norm": 0.24975071847438812,
"learning_rate": 1.5119304384934252e-06,
"loss": 1.1749,
"step": 794
},
{
"epoch": 0.05053434507965516,
"grad_norm": 0.256743848323822,
"learning_rate": 1.4470661389100804e-06,
"loss": 1.3172,
"step": 795
},
{
"epoch": 0.050597910293591834,
"grad_norm": 0.2594708800315857,
"learning_rate": 1.3836137674854255e-06,
"loss": 1.2,
"step": 796
},
{
"epoch": 0.0506614755075285,
"grad_norm": 0.26457688212394714,
"learning_rate": 1.3215742332759794e-06,
"loss": 1.3076,
"step": 797
},
{
"epoch": 0.05072504072146518,
"grad_norm": 0.25573182106018066,
"learning_rate": 1.2609484250971749e-06,
"loss": 1.2932,
"step": 798
},
{
"epoch": 0.050788605935401854,
"grad_norm": 0.25195443630218506,
"learning_rate": 1.2017372115104897e-06,
"loss": 1.2156,
"step": 799
},
{
"epoch": 0.05085217114933852,
"grad_norm": 0.25070831179618835,
"learning_rate": 1.143941440811147e-06,
"loss": 1.2064,
"step": 800
},
{
"epoch": 0.05085217114933852,
"eval_loss": 1.203278660774231,
"eval_runtime": 1239.0979,
"eval_samples_per_second": 4.035,
"eval_steps_per_second": 1.009,
"step": 800
},
{
"epoch": 0.0509157363632752,
"grad_norm": 0.2525332570075989,
"learning_rate": 1.0875619410158466e-06,
"loss": 1.1738,
"step": 801
},
{
"epoch": 0.05097930157721187,
"grad_norm": 0.24210986495018005,
"learning_rate": 1.0325995198509409e-06,
"loss": 1.1435,
"step": 802
},
{
"epoch": 0.05104286679114854,
"grad_norm": 0.25146523118019104,
"learning_rate": 9.79054964740911e-07,
"loss": 1.2707,
"step": 803
},
{
"epoch": 0.05110643200508522,
"grad_norm": 0.2529788315296173,
"learning_rate": 9.269290427969868e-07,
"loss": 1.1679,
"step": 804
},
{
"epoch": 0.05116999721902189,
"grad_norm": 0.25673815608024597,
"learning_rate": 8.762225008062674e-07,
"loss": 1.2141,
"step": 805
},
{
"epoch": 0.05123356243295856,
"grad_norm": 0.25803902745246887,
"learning_rate": 8.26936065220929e-07,
"loss": 1.2018,
"step": 806
},
{
"epoch": 0.05129712764689524,
"grad_norm": 0.2565945088863373,
"learning_rate": 7.790704421478557e-07,
"loss": 1.2261,
"step": 807
},
{
"epoch": 0.05136069286083191,
"grad_norm": 0.2625206410884857,
"learning_rate": 7.326263173385584e-07,
"loss": 1.1934,
"step": 808
},
{
"epoch": 0.05142425807476858,
"grad_norm": 0.25721174478530884,
"learning_rate": 6.876043561792833e-07,
"loss": 1.2349,
"step": 809
},
{
"epoch": 0.051487823288705256,
"grad_norm": 0.25995710492134094,
"learning_rate": 6.440052036815081e-07,
"loss": 1.3027,
"step": 810
},
{
"epoch": 0.05155138850264193,
"grad_norm": 0.25100308656692505,
"learning_rate": 6.018294844727379e-07,
"loss": 1.1802,
"step": 811
},
{
"epoch": 0.0516149537165786,
"grad_norm": 0.2459433674812317,
"learning_rate": 5.610778027874908e-07,
"loss": 1.1474,
"step": 812
},
{
"epoch": 0.051678518930515276,
"grad_norm": 0.2367779165506363,
"learning_rate": 5.217507424586821e-07,
"loss": 1.168,
"step": 813
},
{
"epoch": 0.05174208414445195,
"grad_norm": 0.2512117922306061,
"learning_rate": 4.838488669092534e-07,
"loss": 1.091,
"step": 814
},
{
"epoch": 0.05180564935838862,
"grad_norm": 0.2595987319946289,
"learning_rate": 4.4737271914411236e-07,
"loss": 1.1756,
"step": 815
},
{
"epoch": 0.051869214572325295,
"grad_norm": 0.26023730635643005,
"learning_rate": 4.123228217422948e-07,
"loss": 1.068,
"step": 816
},
{
"epoch": 0.05193277978626197,
"grad_norm": 0.26552048325538635,
"learning_rate": 3.7869967684958094e-07,
"loss": 1.1605,
"step": 817
},
{
"epoch": 0.05199634500019864,
"grad_norm": 0.24736690521240234,
"learning_rate": 3.465037661712134e-07,
"loss": 1.2006,
"step": 818
},
{
"epoch": 0.052059910214135315,
"grad_norm": 0.26172155141830444,
"learning_rate": 3.1573555096501283e-07,
"loss": 1.2359,
"step": 819
},
{
"epoch": 0.05212347542807199,
"grad_norm": 0.25399184226989746,
"learning_rate": 2.86395472034795e-07,
"loss": 1.2153,
"step": 820
},
{
"epoch": 0.05218704064200866,
"grad_norm": 0.25162798166275024,
"learning_rate": 2.584839497240643e-07,
"loss": 1.2581,
"step": 821
},
{
"epoch": 0.052250605855945334,
"grad_norm": 0.2551822066307068,
"learning_rate": 2.3200138390993e-07,
"loss": 1.1388,
"step": 822
},
{
"epoch": 0.05231417106988201,
"grad_norm": 0.24114681780338287,
"learning_rate": 2.0694815399744382e-07,
"loss": 1.2377,
"step": 823
},
{
"epoch": 0.05237773628381868,
"grad_norm": 0.26416000723838806,
"learning_rate": 1.83324618914138e-07,
"loss": 1.2193,
"step": 824
},
{
"epoch": 0.052441301497755353,
"grad_norm": 0.25959083437919617,
"learning_rate": 1.611311171048735e-07,
"loss": 1.1987,
"step": 825
},
{
"epoch": 0.05250486671169203,
"grad_norm": 0.24999088048934937,
"learning_rate": 1.4036796652701078e-07,
"loss": 1.1644,
"step": 826
},
{
"epoch": 0.0525684319256287,
"grad_norm": 0.25357383489608765,
"learning_rate": 1.210354646458245e-07,
"loss": 1.2345,
"step": 827
},
{
"epoch": 0.05263199713956537,
"grad_norm": 0.25583428144454956,
"learning_rate": 1.031338884302846e-07,
"loss": 1.2685,
"step": 828
},
{
"epoch": 0.05269556235350205,
"grad_norm": 0.25566795468330383,
"learning_rate": 8.666349434907073e-08,
"loss": 1.2141,
"step": 829
},
{
"epoch": 0.05275912756743872,
"grad_norm": 0.2603313624858856,
"learning_rate": 7.162451836685291e-08,
"loss": 1.2535,
"step": 830
},
{
"epoch": 0.05282269278137539,
"grad_norm": 0.24881498515605927,
"learning_rate": 5.8017175941005306e-08,
"loss": 1.1596,
"step": 831
},
{
"epoch": 0.05288625799531207,
"grad_norm": 0.2581416070461273,
"learning_rate": 4.584166201841988e-08,
"loss": 1.2291,
"step": 832
},
{
"epoch": 0.052949823209248736,
"grad_norm": 0.2521674335002899,
"learning_rate": 3.5098151032786355e-08,
"loss": 1.2752,
"step": 833
},
{
"epoch": 0.05301338842318541,
"grad_norm": 0.2460847645998001,
"learning_rate": 2.578679690204977e-08,
"loss": 1.1633,
"step": 834
},
{
"epoch": 0.05307695363712209,
"grad_norm": 0.2515714764595032,
"learning_rate": 1.7907733026223394e-08,
"loss": 1.1517,
"step": 835
},
{
"epoch": 0.053140518851058756,
"grad_norm": 0.2554892301559448,
"learning_rate": 1.1461072285490204e-08,
"loss": 1.1205,
"step": 836
},
{
"epoch": 0.05320408406499543,
"grad_norm": 0.2557508945465088,
"learning_rate": 6.446907038559769e-09,
"loss": 1.1845,
"step": 837
},
{
"epoch": 0.05326764927893211,
"grad_norm": 0.25483280420303345,
"learning_rate": 2.865309121358184e-09,
"loss": 1.1348,
"step": 838
},
{
"epoch": 0.053331214492868775,
"grad_norm": 0.2680445909500122,
"learning_rate": 7.163298459844647e-10,
"loss": 1.1985,
"step": 839
},
{
"epoch": 0.05339477970680545,
"grad_norm": 0.26120489835739136,
"learning_rate": 0.0,
"loss": 1.264,
"step": 840
}
],
"logging_steps": 1,
"max_steps": 840,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 100,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 2,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 4.3648292071931904e+18,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}