lesso's picture
Training in progress, step 150, checkpoint
1ebb54c verified
raw
history blame
27.8 kB
{
"best_metric": 0.40253087878227234,
"best_model_checkpoint": "miner_id_24/checkpoint-150",
"epoch": 0.20060180541624875,
"eval_steps": 50,
"global_step": 150,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0013373453694416582,
"grad_norm": 7.7768025398254395,
"learning_rate": 1.0100000000000002e-05,
"loss": 6.4221,
"step": 1
},
{
"epoch": 0.0013373453694416582,
"eval_loss": 6.176359176635742,
"eval_runtime": 163.9132,
"eval_samples_per_second": 61.465,
"eval_steps_per_second": 1.922,
"step": 1
},
{
"epoch": 0.0026746907388833165,
"grad_norm": 5.993926048278809,
"learning_rate": 2.0200000000000003e-05,
"loss": 6.4108,
"step": 2
},
{
"epoch": 0.004012036108324975,
"grad_norm": 5.60136079788208,
"learning_rate": 3.0299999999999998e-05,
"loss": 6.3333,
"step": 3
},
{
"epoch": 0.005349381477766633,
"grad_norm": 5.272766590118408,
"learning_rate": 4.0400000000000006e-05,
"loss": 5.6085,
"step": 4
},
{
"epoch": 0.006686726847208292,
"grad_norm": 4.917640686035156,
"learning_rate": 5.05e-05,
"loss": 5.1171,
"step": 5
},
{
"epoch": 0.00802407221664995,
"grad_norm": 4.776335716247559,
"learning_rate": 6.0599999999999996e-05,
"loss": 4.6322,
"step": 6
},
{
"epoch": 0.009361417586091608,
"grad_norm": 5.44158935546875,
"learning_rate": 7.07e-05,
"loss": 3.9832,
"step": 7
},
{
"epoch": 0.010698762955533266,
"grad_norm": 6.46093225479126,
"learning_rate": 8.080000000000001e-05,
"loss": 3.51,
"step": 8
},
{
"epoch": 0.012036108324974924,
"grad_norm": 4.540277004241943,
"learning_rate": 9.09e-05,
"loss": 2.9562,
"step": 9
},
{
"epoch": 0.013373453694416584,
"grad_norm": 4.324646472930908,
"learning_rate": 0.000101,
"loss": 2.6557,
"step": 10
},
{
"epoch": 0.014710799063858242,
"grad_norm": 2.8748514652252197,
"learning_rate": 0.00010046842105263158,
"loss": 2.1475,
"step": 11
},
{
"epoch": 0.0160481444332999,
"grad_norm": 6.15751314163208,
"learning_rate": 9.993684210526315e-05,
"loss": 2.1734,
"step": 12
},
{
"epoch": 0.017385489802741558,
"grad_norm": 2.7471158504486084,
"learning_rate": 9.940526315789473e-05,
"loss": 1.8299,
"step": 13
},
{
"epoch": 0.018722835172183216,
"grad_norm": 1.9818578958511353,
"learning_rate": 9.887368421052632e-05,
"loss": 1.6142,
"step": 14
},
{
"epoch": 0.020060180541624874,
"grad_norm": 1.5324410200119019,
"learning_rate": 9.83421052631579e-05,
"loss": 1.5587,
"step": 15
},
{
"epoch": 0.021397525911066532,
"grad_norm": 1.4006189107894897,
"learning_rate": 9.781052631578948e-05,
"loss": 1.3915,
"step": 16
},
{
"epoch": 0.02273487128050819,
"grad_norm": 1.3229491710662842,
"learning_rate": 9.727894736842106e-05,
"loss": 1.2889,
"step": 17
},
{
"epoch": 0.024072216649949848,
"grad_norm": 1.2081401348114014,
"learning_rate": 9.674736842105263e-05,
"loss": 1.3103,
"step": 18
},
{
"epoch": 0.02540956201939151,
"grad_norm": 1.85019052028656,
"learning_rate": 9.621578947368421e-05,
"loss": 1.3369,
"step": 19
},
{
"epoch": 0.026746907388833167,
"grad_norm": 1.4622437953948975,
"learning_rate": 9.568421052631578e-05,
"loss": 1.2271,
"step": 20
},
{
"epoch": 0.028084252758274825,
"grad_norm": 1.6035629510879517,
"learning_rate": 9.515263157894737e-05,
"loss": 1.1791,
"step": 21
},
{
"epoch": 0.029421598127716483,
"grad_norm": 1.191978096961975,
"learning_rate": 9.462105263157895e-05,
"loss": 1.149,
"step": 22
},
{
"epoch": 0.03075894349715814,
"grad_norm": 1.1039379835128784,
"learning_rate": 9.408947368421054e-05,
"loss": 1.0236,
"step": 23
},
{
"epoch": 0.0320962888665998,
"grad_norm": 0.9501120448112488,
"learning_rate": 9.355789473684211e-05,
"loss": 1.0568,
"step": 24
},
{
"epoch": 0.03343363423604146,
"grad_norm": 0.9906852841377258,
"learning_rate": 9.302631578947369e-05,
"loss": 1.1361,
"step": 25
},
{
"epoch": 0.034770979605483116,
"grad_norm": 1.1276156902313232,
"learning_rate": 9.249473684210526e-05,
"loss": 0.8694,
"step": 26
},
{
"epoch": 0.03610832497492478,
"grad_norm": 0.9503394365310669,
"learning_rate": 9.196315789473685e-05,
"loss": 0.9323,
"step": 27
},
{
"epoch": 0.03744567034436643,
"grad_norm": 0.9411821961402893,
"learning_rate": 9.143157894736843e-05,
"loss": 0.955,
"step": 28
},
{
"epoch": 0.03878301571380809,
"grad_norm": 0.8490228056907654,
"learning_rate": 9.09e-05,
"loss": 0.8836,
"step": 29
},
{
"epoch": 0.04012036108324975,
"grad_norm": 0.8191907405853271,
"learning_rate": 9.036842105263158e-05,
"loss": 0.8396,
"step": 30
},
{
"epoch": 0.04145770645269141,
"grad_norm": 0.7862714529037476,
"learning_rate": 8.983684210526316e-05,
"loss": 0.9116,
"step": 31
},
{
"epoch": 0.042795051822133064,
"grad_norm": 1.3398971557617188,
"learning_rate": 8.930526315789474e-05,
"loss": 0.8548,
"step": 32
},
{
"epoch": 0.044132397191574725,
"grad_norm": 1.1809728145599365,
"learning_rate": 8.877368421052632e-05,
"loss": 0.7964,
"step": 33
},
{
"epoch": 0.04546974256101638,
"grad_norm": 1.0573207139968872,
"learning_rate": 8.82421052631579e-05,
"loss": 0.721,
"step": 34
},
{
"epoch": 0.04680708793045804,
"grad_norm": 0.7660430669784546,
"learning_rate": 8.771052631578948e-05,
"loss": 0.7354,
"step": 35
},
{
"epoch": 0.048144433299899696,
"grad_norm": 0.8423962593078613,
"learning_rate": 8.717894736842105e-05,
"loss": 0.7304,
"step": 36
},
{
"epoch": 0.04948177866934136,
"grad_norm": 0.7409595847129822,
"learning_rate": 8.664736842105263e-05,
"loss": 0.8138,
"step": 37
},
{
"epoch": 0.05081912403878302,
"grad_norm": 1.0868232250213623,
"learning_rate": 8.61157894736842e-05,
"loss": 0.8021,
"step": 38
},
{
"epoch": 0.05215646940822467,
"grad_norm": 1.0219075679779053,
"learning_rate": 8.55842105263158e-05,
"loss": 0.7564,
"step": 39
},
{
"epoch": 0.053493814777666335,
"grad_norm": 0.8560124635696411,
"learning_rate": 8.505263157894737e-05,
"loss": 0.6875,
"step": 40
},
{
"epoch": 0.05483116014710799,
"grad_norm": 0.7004531025886536,
"learning_rate": 8.452105263157896e-05,
"loss": 0.6683,
"step": 41
},
{
"epoch": 0.05616850551654965,
"grad_norm": 0.6894235610961914,
"learning_rate": 8.398947368421053e-05,
"loss": 0.6691,
"step": 42
},
{
"epoch": 0.057505850885991305,
"grad_norm": 0.7334969639778137,
"learning_rate": 8.345789473684211e-05,
"loss": 0.7808,
"step": 43
},
{
"epoch": 0.05884319625543297,
"grad_norm": 0.8385999798774719,
"learning_rate": 8.292631578947368e-05,
"loss": 0.7513,
"step": 44
},
{
"epoch": 0.06018054162487462,
"grad_norm": 0.7268967032432556,
"learning_rate": 8.239473684210526e-05,
"loss": 0.6678,
"step": 45
},
{
"epoch": 0.06151788699431628,
"grad_norm": 1.0294888019561768,
"learning_rate": 8.186315789473683e-05,
"loss": 0.7087,
"step": 46
},
{
"epoch": 0.06285523236375794,
"grad_norm": 0.6869120001792908,
"learning_rate": 8.133157894736842e-05,
"loss": 0.6256,
"step": 47
},
{
"epoch": 0.0641925777331996,
"grad_norm": 0.5973199605941772,
"learning_rate": 8.080000000000001e-05,
"loss": 0.6142,
"step": 48
},
{
"epoch": 0.06552992310264126,
"grad_norm": 0.6814255118370056,
"learning_rate": 8.026842105263159e-05,
"loss": 0.7083,
"step": 49
},
{
"epoch": 0.06686726847208292,
"grad_norm": 0.803034245967865,
"learning_rate": 7.973684210526316e-05,
"loss": 0.7366,
"step": 50
},
{
"epoch": 0.06686726847208292,
"eval_loss": 0.6198433637619019,
"eval_runtime": 161.5386,
"eval_samples_per_second": 62.369,
"eval_steps_per_second": 1.95,
"step": 50
},
{
"epoch": 0.06820461384152457,
"grad_norm": 0.7916774153709412,
"learning_rate": 7.920526315789474e-05,
"loss": 0.5536,
"step": 51
},
{
"epoch": 0.06954195921096623,
"grad_norm": 0.6397995352745056,
"learning_rate": 7.867368421052631e-05,
"loss": 0.575,
"step": 52
},
{
"epoch": 0.07087930458040789,
"grad_norm": 0.8294323086738586,
"learning_rate": 7.814210526315789e-05,
"loss": 0.5815,
"step": 53
},
{
"epoch": 0.07221664994984955,
"grad_norm": 0.6827910542488098,
"learning_rate": 7.761052631578946e-05,
"loss": 0.5574,
"step": 54
},
{
"epoch": 0.0735539953192912,
"grad_norm": 0.6560467481613159,
"learning_rate": 7.707894736842105e-05,
"loss": 0.6331,
"step": 55
},
{
"epoch": 0.07489134068873286,
"grad_norm": 0.6438890695571899,
"learning_rate": 7.654736842105264e-05,
"loss": 0.6216,
"step": 56
},
{
"epoch": 0.07622868605817452,
"grad_norm": 0.7472130656242371,
"learning_rate": 7.601578947368422e-05,
"loss": 0.6165,
"step": 57
},
{
"epoch": 0.07756603142761619,
"grad_norm": 0.6722190976142883,
"learning_rate": 7.548421052631579e-05,
"loss": 0.5489,
"step": 58
},
{
"epoch": 0.07890337679705783,
"grad_norm": 0.7682622671127319,
"learning_rate": 7.495263157894737e-05,
"loss": 0.5283,
"step": 59
},
{
"epoch": 0.0802407221664995,
"grad_norm": 0.6968657970428467,
"learning_rate": 7.442105263157894e-05,
"loss": 0.5136,
"step": 60
},
{
"epoch": 0.08157806753594116,
"grad_norm": 0.705340564250946,
"learning_rate": 7.388947368421053e-05,
"loss": 0.542,
"step": 61
},
{
"epoch": 0.08291541290538282,
"grad_norm": 0.7181550860404968,
"learning_rate": 7.335789473684211e-05,
"loss": 0.6186,
"step": 62
},
{
"epoch": 0.08425275827482448,
"grad_norm": 0.7151327133178711,
"learning_rate": 7.282631578947368e-05,
"loss": 0.5627,
"step": 63
},
{
"epoch": 0.08559010364426613,
"grad_norm": 0.6997770667076111,
"learning_rate": 7.229473684210527e-05,
"loss": 0.5474,
"step": 64
},
{
"epoch": 0.08692744901370779,
"grad_norm": 0.6078030467033386,
"learning_rate": 7.176315789473685e-05,
"loss": 0.5169,
"step": 65
},
{
"epoch": 0.08826479438314945,
"grad_norm": 0.6001531481742859,
"learning_rate": 7.123157894736842e-05,
"loss": 0.4925,
"step": 66
},
{
"epoch": 0.08960213975259111,
"grad_norm": 0.5884681940078735,
"learning_rate": 7.07e-05,
"loss": 0.517,
"step": 67
},
{
"epoch": 0.09093948512203276,
"grad_norm": 0.676162600517273,
"learning_rate": 7.016842105263159e-05,
"loss": 0.6264,
"step": 68
},
{
"epoch": 0.09227683049147442,
"grad_norm": 0.7076681852340698,
"learning_rate": 6.963684210526316e-05,
"loss": 0.5951,
"step": 69
},
{
"epoch": 0.09361417586091608,
"grad_norm": 0.688647985458374,
"learning_rate": 6.910526315789474e-05,
"loss": 0.5149,
"step": 70
},
{
"epoch": 0.09495152123035774,
"grad_norm": 0.6269810199737549,
"learning_rate": 6.857368421052631e-05,
"loss": 0.4786,
"step": 71
},
{
"epoch": 0.09628886659979939,
"grad_norm": 0.5938174724578857,
"learning_rate": 6.80421052631579e-05,
"loss": 0.4705,
"step": 72
},
{
"epoch": 0.09762621196924105,
"grad_norm": 0.5638948678970337,
"learning_rate": 6.751052631578948e-05,
"loss": 0.4933,
"step": 73
},
{
"epoch": 0.09896355733868271,
"grad_norm": 0.5896424055099487,
"learning_rate": 6.697894736842105e-05,
"loss": 0.5242,
"step": 74
},
{
"epoch": 0.10030090270812438,
"grad_norm": 0.6239397525787354,
"learning_rate": 6.644736842105264e-05,
"loss": 0.5819,
"step": 75
},
{
"epoch": 0.10163824807756604,
"grad_norm": 0.6227509379386902,
"learning_rate": 6.591578947368422e-05,
"loss": 0.4862,
"step": 76
},
{
"epoch": 0.10297559344700769,
"grad_norm": 0.5637099742889404,
"learning_rate": 6.538421052631579e-05,
"loss": 0.4858,
"step": 77
},
{
"epoch": 0.10431293881644935,
"grad_norm": 0.5321072936058044,
"learning_rate": 6.485263157894737e-05,
"loss": 0.4471,
"step": 78
},
{
"epoch": 0.10565028418589101,
"grad_norm": 0.5791961550712585,
"learning_rate": 6.432105263157894e-05,
"loss": 0.4531,
"step": 79
},
{
"epoch": 0.10698762955533267,
"grad_norm": 0.5246228575706482,
"learning_rate": 6.378947368421053e-05,
"loss": 0.4812,
"step": 80
},
{
"epoch": 0.10832497492477432,
"grad_norm": 0.5978341698646545,
"learning_rate": 6.32578947368421e-05,
"loss": 0.5496,
"step": 81
},
{
"epoch": 0.10966232029421598,
"grad_norm": 0.6050398945808411,
"learning_rate": 6.27263157894737e-05,
"loss": 0.4998,
"step": 82
},
{
"epoch": 0.11099966566365764,
"grad_norm": 0.5705775618553162,
"learning_rate": 6.219473684210527e-05,
"loss": 0.4493,
"step": 83
},
{
"epoch": 0.1123370110330993,
"grad_norm": 0.5693026781082153,
"learning_rate": 6.166315789473685e-05,
"loss": 0.4627,
"step": 84
},
{
"epoch": 0.11367435640254096,
"grad_norm": 0.6199223399162292,
"learning_rate": 6.113157894736842e-05,
"loss": 0.4128,
"step": 85
},
{
"epoch": 0.11501170177198261,
"grad_norm": 0.7600672841072083,
"learning_rate": 6.0599999999999996e-05,
"loss": 0.4716,
"step": 86
},
{
"epoch": 0.11634904714142427,
"grad_norm": 0.6081468462944031,
"learning_rate": 6.006842105263158e-05,
"loss": 0.5161,
"step": 87
},
{
"epoch": 0.11768639251086593,
"grad_norm": 0.6899324655532837,
"learning_rate": 5.953684210526315e-05,
"loss": 0.5071,
"step": 88
},
{
"epoch": 0.1190237378803076,
"grad_norm": 0.618345320224762,
"learning_rate": 5.900526315789474e-05,
"loss": 0.49,
"step": 89
},
{
"epoch": 0.12036108324974924,
"grad_norm": 0.5444545149803162,
"learning_rate": 5.847368421052632e-05,
"loss": 0.4405,
"step": 90
},
{
"epoch": 0.1216984286191909,
"grad_norm": 0.60733562707901,
"learning_rate": 5.79421052631579e-05,
"loss": 0.4539,
"step": 91
},
{
"epoch": 0.12303577398863257,
"grad_norm": 0.5278117060661316,
"learning_rate": 5.7410526315789475e-05,
"loss": 0.4333,
"step": 92
},
{
"epoch": 0.12437311935807423,
"grad_norm": 0.6142237782478333,
"learning_rate": 5.687894736842105e-05,
"loss": 0.5313,
"step": 93
},
{
"epoch": 0.12571046472751587,
"grad_norm": 0.5651121735572815,
"learning_rate": 5.6347368421052625e-05,
"loss": 0.4827,
"step": 94
},
{
"epoch": 0.12704781009695754,
"grad_norm": 0.5625864863395691,
"learning_rate": 5.5815789473684214e-05,
"loss": 0.4306,
"step": 95
},
{
"epoch": 0.1283851554663992,
"grad_norm": 0.5742880702018738,
"learning_rate": 5.5284210526315796e-05,
"loss": 0.4486,
"step": 96
},
{
"epoch": 0.12972250083584086,
"grad_norm": 0.5653654932975769,
"learning_rate": 5.475263157894737e-05,
"loss": 0.4327,
"step": 97
},
{
"epoch": 0.13105984620528252,
"grad_norm": 0.6067980527877808,
"learning_rate": 5.422105263157895e-05,
"loss": 0.4323,
"step": 98
},
{
"epoch": 0.13239719157472418,
"grad_norm": 0.5677730441093445,
"learning_rate": 5.368947368421053e-05,
"loss": 0.4826,
"step": 99
},
{
"epoch": 0.13373453694416584,
"grad_norm": 0.5298727750778198,
"learning_rate": 5.3157894736842104e-05,
"loss": 0.5075,
"step": 100
},
{
"epoch": 0.13373453694416584,
"eval_loss": 0.45096904039382935,
"eval_runtime": 163.4922,
"eval_samples_per_second": 61.624,
"eval_steps_per_second": 1.927,
"step": 100
},
{
"epoch": 0.13507188231360748,
"grad_norm": 0.5436654090881348,
"learning_rate": 5.262631578947368e-05,
"loss": 0.4414,
"step": 101
},
{
"epoch": 0.13640922768304914,
"grad_norm": 0.500196635723114,
"learning_rate": 5.209473684210527e-05,
"loss": 0.4313,
"step": 102
},
{
"epoch": 0.1377465730524908,
"grad_norm": 0.5394172668457031,
"learning_rate": 5.1563157894736844e-05,
"loss": 0.3968,
"step": 103
},
{
"epoch": 0.13908391842193246,
"grad_norm": 0.5281717777252197,
"learning_rate": 5.1031578947368426e-05,
"loss": 0.4193,
"step": 104
},
{
"epoch": 0.14042126379137412,
"grad_norm": 0.5725506544113159,
"learning_rate": 5.05e-05,
"loss": 0.4368,
"step": 105
},
{
"epoch": 0.14175860916081578,
"grad_norm": 0.614000678062439,
"learning_rate": 4.9968421052631576e-05,
"loss": 0.4904,
"step": 106
},
{
"epoch": 0.14309595453025745,
"grad_norm": 0.5707330107688904,
"learning_rate": 4.943684210526316e-05,
"loss": 0.4335,
"step": 107
},
{
"epoch": 0.1444332998996991,
"grad_norm": 0.8218697905540466,
"learning_rate": 4.890526315789474e-05,
"loss": 0.4607,
"step": 108
},
{
"epoch": 0.14577064526914077,
"grad_norm": 0.49518197774887085,
"learning_rate": 4.8373684210526316e-05,
"loss": 0.4167,
"step": 109
},
{
"epoch": 0.1471079906385824,
"grad_norm": 0.5105622410774231,
"learning_rate": 4.784210526315789e-05,
"loss": 0.3983,
"step": 110
},
{
"epoch": 0.14844533600802406,
"grad_norm": 0.559273898601532,
"learning_rate": 4.731052631578947e-05,
"loss": 0.4263,
"step": 111
},
{
"epoch": 0.14978268137746573,
"grad_norm": 0.634856641292572,
"learning_rate": 4.6778947368421055e-05,
"loss": 0.4869,
"step": 112
},
{
"epoch": 0.1511200267469074,
"grad_norm": 0.557166337966919,
"learning_rate": 4.624736842105263e-05,
"loss": 0.4447,
"step": 113
},
{
"epoch": 0.15245737211634905,
"grad_norm": 0.4786439836025238,
"learning_rate": 4.571578947368421e-05,
"loss": 0.4436,
"step": 114
},
{
"epoch": 0.1537947174857907,
"grad_norm": 0.47446924448013306,
"learning_rate": 4.518421052631579e-05,
"loss": 0.3959,
"step": 115
},
{
"epoch": 0.15513206285523237,
"grad_norm": 0.5089144110679626,
"learning_rate": 4.465263157894737e-05,
"loss": 0.4078,
"step": 116
},
{
"epoch": 0.15646940822467403,
"grad_norm": 0.6025608777999878,
"learning_rate": 4.412105263157895e-05,
"loss": 0.4322,
"step": 117
},
{
"epoch": 0.15780675359411567,
"grad_norm": 0.6016778945922852,
"learning_rate": 4.358947368421053e-05,
"loss": 0.4408,
"step": 118
},
{
"epoch": 0.15914409896355733,
"grad_norm": 0.6104781031608582,
"learning_rate": 4.30578947368421e-05,
"loss": 0.4705,
"step": 119
},
{
"epoch": 0.160481444332999,
"grad_norm": 0.49418070912361145,
"learning_rate": 4.2526315789473685e-05,
"loss": 0.4095,
"step": 120
},
{
"epoch": 0.16181878970244065,
"grad_norm": 0.49163714051246643,
"learning_rate": 4.199473684210527e-05,
"loss": 0.3926,
"step": 121
},
{
"epoch": 0.1631561350718823,
"grad_norm": 0.4370480179786682,
"learning_rate": 4.146315789473684e-05,
"loss": 0.3847,
"step": 122
},
{
"epoch": 0.16449348044132397,
"grad_norm": 0.5089737772941589,
"learning_rate": 4.093157894736842e-05,
"loss": 0.3866,
"step": 123
},
{
"epoch": 0.16583082581076564,
"grad_norm": 0.5304917097091675,
"learning_rate": 4.0400000000000006e-05,
"loss": 0.4045,
"step": 124
},
{
"epoch": 0.1671681711802073,
"grad_norm": 0.6393993496894836,
"learning_rate": 3.986842105263158e-05,
"loss": 0.5036,
"step": 125
},
{
"epoch": 0.16850551654964896,
"grad_norm": 0.544893741607666,
"learning_rate": 3.933684210526316e-05,
"loss": 0.3761,
"step": 126
},
{
"epoch": 0.1698428619190906,
"grad_norm": 0.5071455240249634,
"learning_rate": 3.880526315789473e-05,
"loss": 0.4161,
"step": 127
},
{
"epoch": 0.17118020728853225,
"grad_norm": 0.48931699991226196,
"learning_rate": 3.827368421052632e-05,
"loss": 0.384,
"step": 128
},
{
"epoch": 0.17251755265797392,
"grad_norm": 0.5249499678611755,
"learning_rate": 3.7742105263157896e-05,
"loss": 0.3637,
"step": 129
},
{
"epoch": 0.17385489802741558,
"grad_norm": 0.6430131196975708,
"learning_rate": 3.721052631578947e-05,
"loss": 0.4372,
"step": 130
},
{
"epoch": 0.17519224339685724,
"grad_norm": 0.5788580775260925,
"learning_rate": 3.6678947368421054e-05,
"loss": 0.4384,
"step": 131
},
{
"epoch": 0.1765295887662989,
"grad_norm": 0.525442361831665,
"learning_rate": 3.6147368421052636e-05,
"loss": 0.4069,
"step": 132
},
{
"epoch": 0.17786693413574056,
"grad_norm": 0.491260826587677,
"learning_rate": 3.561578947368421e-05,
"loss": 0.4497,
"step": 133
},
{
"epoch": 0.17920427950518222,
"grad_norm": 0.46042758226394653,
"learning_rate": 3.508421052631579e-05,
"loss": 0.3671,
"step": 134
},
{
"epoch": 0.18054162487462388,
"grad_norm": 0.5297501087188721,
"learning_rate": 3.455263157894737e-05,
"loss": 0.3714,
"step": 135
},
{
"epoch": 0.18187897024406552,
"grad_norm": 0.5078780651092529,
"learning_rate": 3.402105263157895e-05,
"loss": 0.3958,
"step": 136
},
{
"epoch": 0.18321631561350718,
"grad_norm": 0.6146392226219177,
"learning_rate": 3.3489473684210526e-05,
"loss": 0.4246,
"step": 137
},
{
"epoch": 0.18455366098294884,
"grad_norm": 0.6537202596664429,
"learning_rate": 3.295789473684211e-05,
"loss": 0.4329,
"step": 138
},
{
"epoch": 0.1858910063523905,
"grad_norm": 0.5298619270324707,
"learning_rate": 3.242631578947368e-05,
"loss": 0.4508,
"step": 139
},
{
"epoch": 0.18722835172183216,
"grad_norm": 0.4924841523170471,
"learning_rate": 3.1894736842105265e-05,
"loss": 0.3845,
"step": 140
},
{
"epoch": 0.18856569709127383,
"grad_norm": 0.4037409722805023,
"learning_rate": 3.136315789473685e-05,
"loss": 0.3572,
"step": 141
},
{
"epoch": 0.1899030424607155,
"grad_norm": 0.5435509085655212,
"learning_rate": 3.083157894736842e-05,
"loss": 0.3659,
"step": 142
},
{
"epoch": 0.19124038783015715,
"grad_norm": 0.5061467289924622,
"learning_rate": 3.0299999999999998e-05,
"loss": 0.4057,
"step": 143
},
{
"epoch": 0.19257773319959878,
"grad_norm": 0.5949498414993286,
"learning_rate": 2.9768421052631577e-05,
"loss": 0.4538,
"step": 144
},
{
"epoch": 0.19391507856904044,
"grad_norm": 0.4520629346370697,
"learning_rate": 2.923684210526316e-05,
"loss": 0.4127,
"step": 145
},
{
"epoch": 0.1952524239384821,
"grad_norm": 0.4914908707141876,
"learning_rate": 2.8705263157894737e-05,
"loss": 0.4048,
"step": 146
},
{
"epoch": 0.19658976930792377,
"grad_norm": 0.5175070762634277,
"learning_rate": 2.8173684210526313e-05,
"loss": 0.4261,
"step": 147
},
{
"epoch": 0.19792711467736543,
"grad_norm": 0.4606378376483917,
"learning_rate": 2.7642105263157898e-05,
"loss": 0.3737,
"step": 148
},
{
"epoch": 0.1992644600468071,
"grad_norm": 0.5699568390846252,
"learning_rate": 2.7110526315789473e-05,
"loss": 0.4222,
"step": 149
},
{
"epoch": 0.20060180541624875,
"grad_norm": 0.5272060632705688,
"learning_rate": 2.6578947368421052e-05,
"loss": 0.4438,
"step": 150
},
{
"epoch": 0.20060180541624875,
"eval_loss": 0.40253087878227234,
"eval_runtime": 163.1972,
"eval_samples_per_second": 61.735,
"eval_steps_per_second": 1.93,
"step": 150
}
],
"logging_steps": 1,
"max_steps": 200,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 50,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 5,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 3.172246609849221e+18,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}