blood34's picture
Training in progress, step 500, checkpoint
d37465a verified
{
"best_metric": 1.1766536235809326,
"best_model_checkpoint": "miner_id_24/checkpoint-400",
"epoch": 0.45610034207525657,
"eval_steps": 100,
"global_step": 500,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0009122006841505131,
"grad_norm": 0.25783950090408325,
"learning_rate": 5e-06,
"loss": 1.2112,
"step": 1
},
{
"epoch": 0.0009122006841505131,
"eval_loss": 1.528485894203186,
"eval_runtime": 113.6842,
"eval_samples_per_second": 16.238,
"eval_steps_per_second": 4.064,
"step": 1
},
{
"epoch": 0.0018244013683010262,
"grad_norm": 0.3564816117286682,
"learning_rate": 1e-05,
"loss": 1.0953,
"step": 2
},
{
"epoch": 0.0027366020524515395,
"grad_norm": 0.3273909389972687,
"learning_rate": 1.5e-05,
"loss": 1.3012,
"step": 3
},
{
"epoch": 0.0036488027366020524,
"grad_norm": 0.2606115937232971,
"learning_rate": 2e-05,
"loss": 1.2529,
"step": 4
},
{
"epoch": 0.004561003420752566,
"grad_norm": 0.3063911199569702,
"learning_rate": 2.5e-05,
"loss": 1.4867,
"step": 5
},
{
"epoch": 0.005473204104903079,
"grad_norm": 0.30145400762557983,
"learning_rate": 3e-05,
"loss": 1.4505,
"step": 6
},
{
"epoch": 0.0063854047890535915,
"grad_norm": 0.3016102612018585,
"learning_rate": 3.5e-05,
"loss": 1.5596,
"step": 7
},
{
"epoch": 0.007297605473204105,
"grad_norm": 0.291785329580307,
"learning_rate": 4e-05,
"loss": 2.1331,
"step": 8
},
{
"epoch": 0.008209806157354617,
"grad_norm": 0.38654470443725586,
"learning_rate": 4.5e-05,
"loss": 1.998,
"step": 9
},
{
"epoch": 0.009122006841505131,
"grad_norm": 0.24303749203681946,
"learning_rate": 5e-05,
"loss": 1.2621,
"step": 10
},
{
"epoch": 0.010034207525655644,
"grad_norm": 0.4852235019207001,
"learning_rate": 5.500000000000001e-05,
"loss": 2.9854,
"step": 11
},
{
"epoch": 0.010946408209806158,
"grad_norm": 0.2009701430797577,
"learning_rate": 6e-05,
"loss": 1.2834,
"step": 12
},
{
"epoch": 0.01185860889395667,
"grad_norm": 0.17731308937072754,
"learning_rate": 6.500000000000001e-05,
"loss": 1.3094,
"step": 13
},
{
"epoch": 0.012770809578107183,
"grad_norm": 0.1488691121339798,
"learning_rate": 7e-05,
"loss": 1.0258,
"step": 14
},
{
"epoch": 0.013683010262257697,
"grad_norm": 0.15303461253643036,
"learning_rate": 7.500000000000001e-05,
"loss": 1.3275,
"step": 15
},
{
"epoch": 0.01459521094640821,
"grad_norm": 0.19198919832706451,
"learning_rate": 8e-05,
"loss": 1.2239,
"step": 16
},
{
"epoch": 0.015507411630558724,
"grad_norm": 0.18219693005084991,
"learning_rate": 8.5e-05,
"loss": 1.1367,
"step": 17
},
{
"epoch": 0.016419612314709234,
"grad_norm": 0.15564540028572083,
"learning_rate": 9e-05,
"loss": 1.1671,
"step": 18
},
{
"epoch": 0.01733181299885975,
"grad_norm": 0.15285353362560272,
"learning_rate": 9.5e-05,
"loss": 1.2621,
"step": 19
},
{
"epoch": 0.018244013683010263,
"grad_norm": 0.1607624590396881,
"learning_rate": 0.0001,
"loss": 1.3366,
"step": 20
},
{
"epoch": 0.019156214367160775,
"grad_norm": 0.1453542858362198,
"learning_rate": 9.999892908320647e-05,
"loss": 1.0341,
"step": 21
},
{
"epoch": 0.020068415051311288,
"grad_norm": 0.2023877501487732,
"learning_rate": 9.999571637870036e-05,
"loss": 1.3088,
"step": 22
},
{
"epoch": 0.0209806157354618,
"grad_norm": 0.14624053239822388,
"learning_rate": 9.999036202410325e-05,
"loss": 0.9596,
"step": 23
},
{
"epoch": 0.021892816419612316,
"grad_norm": 0.18767771124839783,
"learning_rate": 9.998286624877786e-05,
"loss": 1.2299,
"step": 24
},
{
"epoch": 0.02280501710376283,
"grad_norm": 0.20108367502689362,
"learning_rate": 9.997322937381829e-05,
"loss": 1.2757,
"step": 25
},
{
"epoch": 0.02371721778791334,
"grad_norm": 0.2668151259422302,
"learning_rate": 9.996145181203615e-05,
"loss": 1.2839,
"step": 26
},
{
"epoch": 0.024629418472063853,
"grad_norm": 0.21400977671146393,
"learning_rate": 9.994753406794301e-05,
"loss": 1.4506,
"step": 27
},
{
"epoch": 0.025541619156214366,
"grad_norm": 0.26276835799217224,
"learning_rate": 9.99314767377287e-05,
"loss": 1.2257,
"step": 28
},
{
"epoch": 0.026453819840364882,
"grad_norm": 0.23972387611865997,
"learning_rate": 9.991328050923581e-05,
"loss": 1.3055,
"step": 29
},
{
"epoch": 0.027366020524515394,
"grad_norm": 0.19858954846858978,
"learning_rate": 9.989294616193017e-05,
"loss": 1.0788,
"step": 30
},
{
"epoch": 0.028278221208665907,
"grad_norm": 0.21251460909843445,
"learning_rate": 9.98704745668676e-05,
"loss": 1.0723,
"step": 31
},
{
"epoch": 0.02919042189281642,
"grad_norm": 0.25747787952423096,
"learning_rate": 9.98458666866564e-05,
"loss": 1.2754,
"step": 32
},
{
"epoch": 0.03010262257696693,
"grad_norm": 0.18308940529823303,
"learning_rate": 9.981912357541627e-05,
"loss": 1.1309,
"step": 33
},
{
"epoch": 0.031014823261117448,
"grad_norm": 0.28769150376319885,
"learning_rate": 9.97902463787331e-05,
"loss": 1.1491,
"step": 34
},
{
"epoch": 0.03192702394526796,
"grad_norm": 0.33303603529930115,
"learning_rate": 9.975923633360985e-05,
"loss": 1.5505,
"step": 35
},
{
"epoch": 0.03283922462941847,
"grad_norm": 0.19029738008975983,
"learning_rate": 9.972609476841367e-05,
"loss": 1.3271,
"step": 36
},
{
"epoch": 0.033751425313568985,
"grad_norm": 0.1386023312807083,
"learning_rate": 9.969082310281891e-05,
"loss": 1.2999,
"step": 37
},
{
"epoch": 0.0346636259977195,
"grad_norm": 0.13166525959968567,
"learning_rate": 9.965342284774632e-05,
"loss": 1.2839,
"step": 38
},
{
"epoch": 0.03557582668187001,
"grad_norm": 0.1366724967956543,
"learning_rate": 9.961389560529836e-05,
"loss": 1.327,
"step": 39
},
{
"epoch": 0.036488027366020526,
"grad_norm": 0.15251226723194122,
"learning_rate": 9.957224306869053e-05,
"loss": 1.312,
"step": 40
},
{
"epoch": 0.037400228050171035,
"grad_norm": 0.13887174427509308,
"learning_rate": 9.952846702217886e-05,
"loss": 1.3777,
"step": 41
},
{
"epoch": 0.03831242873432155,
"grad_norm": 0.1658075451850891,
"learning_rate": 9.948256934098352e-05,
"loss": 1.3959,
"step": 42
},
{
"epoch": 0.039224629418472066,
"grad_norm": 0.16137467324733734,
"learning_rate": 9.943455199120837e-05,
"loss": 1.3763,
"step": 43
},
{
"epoch": 0.040136830102622575,
"grad_norm": 0.17031268775463104,
"learning_rate": 9.938441702975689e-05,
"loss": 1.3507,
"step": 44
},
{
"epoch": 0.04104903078677309,
"grad_norm": 0.17008689045906067,
"learning_rate": 9.933216660424395e-05,
"loss": 1.3843,
"step": 45
},
{
"epoch": 0.0419612314709236,
"grad_norm": 0.23986858129501343,
"learning_rate": 9.927780295290389e-05,
"loss": 1.3637,
"step": 46
},
{
"epoch": 0.042873432155074116,
"grad_norm": 0.253842830657959,
"learning_rate": 9.922132840449459e-05,
"loss": 1.4677,
"step": 47
},
{
"epoch": 0.04378563283922463,
"grad_norm": 0.3024607002735138,
"learning_rate": 9.916274537819775e-05,
"loss": 1.595,
"step": 48
},
{
"epoch": 0.04469783352337514,
"grad_norm": 0.4269244372844696,
"learning_rate": 9.91020563835152e-05,
"loss": 1.6949,
"step": 49
},
{
"epoch": 0.04561003420752566,
"grad_norm": 1.273447871208191,
"learning_rate": 9.903926402016153e-05,
"loss": 2.6063,
"step": 50
},
{
"epoch": 0.046522234891676166,
"grad_norm": 0.25115707516670227,
"learning_rate": 9.897437097795257e-05,
"loss": 1.2458,
"step": 51
},
{
"epoch": 0.04743443557582668,
"grad_norm": 0.2734687328338623,
"learning_rate": 9.890738003669029e-05,
"loss": 1.1315,
"step": 52
},
{
"epoch": 0.0483466362599772,
"grad_norm": 0.3112577199935913,
"learning_rate": 9.883829406604363e-05,
"loss": 1.2837,
"step": 53
},
{
"epoch": 0.04925883694412771,
"grad_norm": 0.2956832945346832,
"learning_rate": 9.876711602542563e-05,
"loss": 1.6056,
"step": 54
},
{
"epoch": 0.05017103762827822,
"grad_norm": 0.27159854769706726,
"learning_rate": 9.869384896386668e-05,
"loss": 1.4008,
"step": 55
},
{
"epoch": 0.05108323831242873,
"grad_norm": 0.23057343065738678,
"learning_rate": 9.861849601988383e-05,
"loss": 1.3612,
"step": 56
},
{
"epoch": 0.05199543899657925,
"grad_norm": 0.22093012928962708,
"learning_rate": 9.854106042134641e-05,
"loss": 1.3945,
"step": 57
},
{
"epoch": 0.052907639680729764,
"grad_norm": 0.24365180730819702,
"learning_rate": 9.846154548533773e-05,
"loss": 1.4811,
"step": 58
},
{
"epoch": 0.05381984036488027,
"grad_norm": 0.255241721868515,
"learning_rate": 9.837995461801299e-05,
"loss": 1.3864,
"step": 59
},
{
"epoch": 0.05473204104903079,
"grad_norm": 0.2505890429019928,
"learning_rate": 9.829629131445342e-05,
"loss": 1.3,
"step": 60
},
{
"epoch": 0.0556442417331813,
"grad_norm": 0.19075927138328552,
"learning_rate": 9.821055915851647e-05,
"loss": 1.1616,
"step": 61
},
{
"epoch": 0.05655644241733181,
"grad_norm": 0.19343258440494537,
"learning_rate": 9.812276182268236e-05,
"loss": 1.0507,
"step": 62
},
{
"epoch": 0.05746864310148233,
"grad_norm": 0.14569373428821564,
"learning_rate": 9.803290306789676e-05,
"loss": 1.1448,
"step": 63
},
{
"epoch": 0.05838084378563284,
"grad_norm": 0.1645849347114563,
"learning_rate": 9.794098674340965e-05,
"loss": 1.2202,
"step": 64
},
{
"epoch": 0.059293044469783354,
"grad_norm": 0.1657133549451828,
"learning_rate": 9.784701678661045e-05,
"loss": 0.9998,
"step": 65
},
{
"epoch": 0.06020524515393386,
"grad_norm": 0.14773251116275787,
"learning_rate": 9.775099722285935e-05,
"loss": 1.0601,
"step": 66
},
{
"epoch": 0.06111744583808438,
"grad_norm": 0.14379927515983582,
"learning_rate": 9.765293216531486e-05,
"loss": 1.232,
"step": 67
},
{
"epoch": 0.062029646522234895,
"grad_norm": 0.14879746735095978,
"learning_rate": 9.755282581475769e-05,
"loss": 1.011,
"step": 68
},
{
"epoch": 0.0629418472063854,
"grad_norm": 0.14008712768554688,
"learning_rate": 9.74506824594107e-05,
"loss": 0.9511,
"step": 69
},
{
"epoch": 0.06385404789053592,
"grad_norm": 0.15255555510520935,
"learning_rate": 9.73465064747553e-05,
"loss": 0.983,
"step": 70
},
{
"epoch": 0.06476624857468644,
"grad_norm": 0.16574011743068695,
"learning_rate": 9.724030232334391e-05,
"loss": 1.0218,
"step": 71
},
{
"epoch": 0.06567844925883694,
"grad_norm": 0.16048061847686768,
"learning_rate": 9.713207455460894e-05,
"loss": 1.1199,
"step": 72
},
{
"epoch": 0.06659064994298745,
"grad_norm": 0.18178284168243408,
"learning_rate": 9.702182780466775e-05,
"loss": 1.0187,
"step": 73
},
{
"epoch": 0.06750285062713797,
"grad_norm": 0.15241661667823792,
"learning_rate": 9.690956679612421e-05,
"loss": 1.2068,
"step": 74
},
{
"epoch": 0.06841505131128849,
"grad_norm": 0.1547076255083084,
"learning_rate": 9.67952963378663e-05,
"loss": 0.9773,
"step": 75
},
{
"epoch": 0.069327251995439,
"grad_norm": 0.1607867181301117,
"learning_rate": 9.667902132486009e-05,
"loss": 1.1561,
"step": 76
},
{
"epoch": 0.0702394526795895,
"grad_norm": 0.17180593311786652,
"learning_rate": 9.656074673794018e-05,
"loss": 1.0215,
"step": 77
},
{
"epoch": 0.07115165336374002,
"grad_norm": 0.18671779334545135,
"learning_rate": 9.644047764359622e-05,
"loss": 1.0732,
"step": 78
},
{
"epoch": 0.07206385404789054,
"grad_norm": 0.22109557688236237,
"learning_rate": 9.631821919375591e-05,
"loss": 1.2983,
"step": 79
},
{
"epoch": 0.07297605473204105,
"grad_norm": 0.16936635971069336,
"learning_rate": 9.619397662556435e-05,
"loss": 1.0403,
"step": 80
},
{
"epoch": 0.07388825541619157,
"grad_norm": 0.20985794067382812,
"learning_rate": 9.606775526115963e-05,
"loss": 1.1105,
"step": 81
},
{
"epoch": 0.07480045610034207,
"grad_norm": 0.24890471994876862,
"learning_rate": 9.593956050744492e-05,
"loss": 1.0215,
"step": 82
},
{
"epoch": 0.07571265678449259,
"grad_norm": 0.204713374376297,
"learning_rate": 9.580939785585681e-05,
"loss": 1.1271,
"step": 83
},
{
"epoch": 0.0766248574686431,
"grad_norm": 0.2502953112125397,
"learning_rate": 9.567727288213005e-05,
"loss": 1.2075,
"step": 84
},
{
"epoch": 0.07753705815279362,
"grad_norm": 0.2343108057975769,
"learning_rate": 9.554319124605879e-05,
"loss": 1.2663,
"step": 85
},
{
"epoch": 0.07844925883694413,
"grad_norm": 0.15296907722949982,
"learning_rate": 9.540715869125407e-05,
"loss": 1.2773,
"step": 86
},
{
"epoch": 0.07936145952109463,
"grad_norm": 0.13890579342842102,
"learning_rate": 9.526918104489777e-05,
"loss": 1.0958,
"step": 87
},
{
"epoch": 0.08027366020524515,
"grad_norm": 0.14783431589603424,
"learning_rate": 9.512926421749304e-05,
"loss": 1.1379,
"step": 88
},
{
"epoch": 0.08118586088939567,
"grad_norm": 0.1457366943359375,
"learning_rate": 9.498741420261108e-05,
"loss": 1.1979,
"step": 89
},
{
"epoch": 0.08209806157354618,
"grad_norm": 0.14700822532176971,
"learning_rate": 9.484363707663442e-05,
"loss": 1.2511,
"step": 90
},
{
"epoch": 0.0830102622576967,
"grad_norm": 0.14076313376426697,
"learning_rate": 9.469793899849661e-05,
"loss": 1.273,
"step": 91
},
{
"epoch": 0.0839224629418472,
"grad_norm": 0.1587403565645218,
"learning_rate": 9.45503262094184e-05,
"loss": 1.2475,
"step": 92
},
{
"epoch": 0.08483466362599772,
"grad_norm": 0.1829884648323059,
"learning_rate": 9.440080503264037e-05,
"loss": 1.3541,
"step": 93
},
{
"epoch": 0.08574686431014823,
"grad_norm": 0.15041080117225647,
"learning_rate": 9.42493818731521e-05,
"loss": 1.3175,
"step": 94
},
{
"epoch": 0.08665906499429875,
"grad_norm": 0.16183224320411682,
"learning_rate": 9.409606321741775e-05,
"loss": 1.3011,
"step": 95
},
{
"epoch": 0.08757126567844926,
"grad_norm": 0.17238490283489227,
"learning_rate": 9.394085563309827e-05,
"loss": 1.441,
"step": 96
},
{
"epoch": 0.08848346636259977,
"grad_norm": 0.1964457482099533,
"learning_rate": 9.378376576876999e-05,
"loss": 1.3421,
"step": 97
},
{
"epoch": 0.08939566704675028,
"grad_norm": 0.2645294964313507,
"learning_rate": 9.362480035363986e-05,
"loss": 1.5238,
"step": 98
},
{
"epoch": 0.0903078677309008,
"grad_norm": 0.4722297191619873,
"learning_rate": 9.34639661972572e-05,
"loss": 1.6759,
"step": 99
},
{
"epoch": 0.09122006841505131,
"grad_norm": 1.4135944843292236,
"learning_rate": 9.330127018922194e-05,
"loss": 2.271,
"step": 100
},
{
"epoch": 0.09122006841505131,
"eval_loss": 1.2722834348678589,
"eval_runtime": 113.025,
"eval_samples_per_second": 16.333,
"eval_steps_per_second": 4.088,
"step": 100
},
{
"epoch": 0.09213226909920183,
"grad_norm": 0.12369579821825027,
"learning_rate": 9.31367192988896e-05,
"loss": 0.996,
"step": 101
},
{
"epoch": 0.09304446978335233,
"grad_norm": 0.12814058363437653,
"learning_rate": 9.297032057507264e-05,
"loss": 1.0837,
"step": 102
},
{
"epoch": 0.09395667046750285,
"grad_norm": 0.14010389149188995,
"learning_rate": 9.280208114573859e-05,
"loss": 1.2772,
"step": 103
},
{
"epoch": 0.09486887115165336,
"grad_norm": 0.15505646169185638,
"learning_rate": 9.263200821770461e-05,
"loss": 1.3608,
"step": 104
},
{
"epoch": 0.09578107183580388,
"grad_norm": 0.16221708059310913,
"learning_rate": 9.246010907632895e-05,
"loss": 1.3107,
"step": 105
},
{
"epoch": 0.0966932725199544,
"grad_norm": 0.15704494714736938,
"learning_rate": 9.228639108519868e-05,
"loss": 1.1564,
"step": 106
},
{
"epoch": 0.0976054732041049,
"grad_norm": 0.15318067371845245,
"learning_rate": 9.211086168581433e-05,
"loss": 1.2841,
"step": 107
},
{
"epoch": 0.09851767388825541,
"grad_norm": 0.16971643269062042,
"learning_rate": 9.193352839727121e-05,
"loss": 1.3776,
"step": 108
},
{
"epoch": 0.09942987457240593,
"grad_norm": 0.17601683735847473,
"learning_rate": 9.175439881593716e-05,
"loss": 1.1342,
"step": 109
},
{
"epoch": 0.10034207525655645,
"grad_norm": 0.2042451649904251,
"learning_rate": 9.157348061512727e-05,
"loss": 1.3205,
"step": 110
},
{
"epoch": 0.10125427594070696,
"grad_norm": 0.20541740953922272,
"learning_rate": 9.139078154477512e-05,
"loss": 1.2256,
"step": 111
},
{
"epoch": 0.10216647662485746,
"grad_norm": 0.1825324296951294,
"learning_rate": 9.120630943110077e-05,
"loss": 1.1943,
"step": 112
},
{
"epoch": 0.10307867730900798,
"grad_norm": 0.17503520846366882,
"learning_rate": 9.102007217627568e-05,
"loss": 1.0749,
"step": 113
},
{
"epoch": 0.1039908779931585,
"grad_norm": 0.1890123039484024,
"learning_rate": 9.083207775808396e-05,
"loss": 1.0601,
"step": 114
},
{
"epoch": 0.10490307867730901,
"grad_norm": 0.15623880922794342,
"learning_rate": 9.064233422958077e-05,
"loss": 0.8245,
"step": 115
},
{
"epoch": 0.10581527936145953,
"grad_norm": 0.1406368911266327,
"learning_rate": 9.045084971874738e-05,
"loss": 1.0316,
"step": 116
},
{
"epoch": 0.10672748004561003,
"grad_norm": 0.1501624435186386,
"learning_rate": 9.025763242814291e-05,
"loss": 1.1908,
"step": 117
},
{
"epoch": 0.10763968072976055,
"grad_norm": 0.13635428249835968,
"learning_rate": 9.006269063455304e-05,
"loss": 0.9685,
"step": 118
},
{
"epoch": 0.10855188141391106,
"grad_norm": 0.1702578067779541,
"learning_rate": 8.986603268863536e-05,
"loss": 1.0043,
"step": 119
},
{
"epoch": 0.10946408209806158,
"grad_norm": 0.16892701387405396,
"learning_rate": 8.966766701456177e-05,
"loss": 1.0515,
"step": 120
},
{
"epoch": 0.11037628278221209,
"grad_norm": 0.1571364402770996,
"learning_rate": 8.94676021096575e-05,
"loss": 0.9528,
"step": 121
},
{
"epoch": 0.1112884834663626,
"grad_norm": 0.15004131197929382,
"learning_rate": 8.926584654403724e-05,
"loss": 1.0623,
"step": 122
},
{
"epoch": 0.11220068415051311,
"grad_norm": 0.18099543452262878,
"learning_rate": 8.906240896023794e-05,
"loss": 1.0798,
"step": 123
},
{
"epoch": 0.11311288483466363,
"grad_norm": 0.16211232542991638,
"learning_rate": 8.885729807284856e-05,
"loss": 0.9695,
"step": 124
},
{
"epoch": 0.11402508551881414,
"grad_norm": 0.19721828401088715,
"learning_rate": 8.865052266813685e-05,
"loss": 1.0282,
"step": 125
},
{
"epoch": 0.11493728620296466,
"grad_norm": 0.16071362793445587,
"learning_rate": 8.844209160367299e-05,
"loss": 1.0692,
"step": 126
},
{
"epoch": 0.11584948688711516,
"grad_norm": 0.20482495427131653,
"learning_rate": 8.823201380795001e-05,
"loss": 1.1321,
"step": 127
},
{
"epoch": 0.11676168757126568,
"grad_norm": 0.19026656448841095,
"learning_rate": 8.802029828000156e-05,
"loss": 0.9361,
"step": 128
},
{
"epoch": 0.11767388825541619,
"grad_norm": 0.19571590423583984,
"learning_rate": 8.780695408901613e-05,
"loss": 1.0152,
"step": 129
},
{
"epoch": 0.11858608893956671,
"grad_norm": 0.2167755663394928,
"learning_rate": 8.759199037394887e-05,
"loss": 1.1986,
"step": 130
},
{
"epoch": 0.11949828962371722,
"grad_norm": 0.20209772884845734,
"learning_rate": 8.737541634312985e-05,
"loss": 0.9061,
"step": 131
},
{
"epoch": 0.12041049030786773,
"grad_norm": 0.2215677797794342,
"learning_rate": 8.715724127386972e-05,
"loss": 0.8564,
"step": 132
},
{
"epoch": 0.12132269099201824,
"grad_norm": 0.2394784688949585,
"learning_rate": 8.693747451206232e-05,
"loss": 1.1324,
"step": 133
},
{
"epoch": 0.12223489167616876,
"grad_norm": 0.2727131247520447,
"learning_rate": 8.671612547178428e-05,
"loss": 1.3143,
"step": 134
},
{
"epoch": 0.12314709236031927,
"grad_norm": 0.263237327337265,
"learning_rate": 8.649320363489179e-05,
"loss": 1.654,
"step": 135
},
{
"epoch": 0.12405929304446979,
"grad_norm": 0.14213962852954865,
"learning_rate": 8.626871855061438e-05,
"loss": 1.2624,
"step": 136
},
{
"epoch": 0.12497149372862029,
"grad_norm": 0.11428536474704742,
"learning_rate": 8.604267983514594e-05,
"loss": 1.1345,
"step": 137
},
{
"epoch": 0.1258836944127708,
"grad_norm": 0.11425229161977768,
"learning_rate": 8.581509717123273e-05,
"loss": 1.0768,
"step": 138
},
{
"epoch": 0.12679589509692132,
"grad_norm": 0.14232636988162994,
"learning_rate": 8.558598030775857e-05,
"loss": 1.2241,
"step": 139
},
{
"epoch": 0.12770809578107184,
"grad_norm": 0.14870019257068634,
"learning_rate": 8.535533905932738e-05,
"loss": 1.2843,
"step": 140
},
{
"epoch": 0.12862029646522236,
"grad_norm": 0.14005614817142487,
"learning_rate": 8.51231833058426e-05,
"loss": 1.348,
"step": 141
},
{
"epoch": 0.12953249714937287,
"grad_norm": 0.14191389083862305,
"learning_rate": 8.488952299208401e-05,
"loss": 1.3306,
"step": 142
},
{
"epoch": 0.1304446978335234,
"grad_norm": 0.14700360596179962,
"learning_rate": 8.46543681272818e-05,
"loss": 1.2998,
"step": 143
},
{
"epoch": 0.13135689851767388,
"grad_norm": 0.17052660882472992,
"learning_rate": 8.44177287846877e-05,
"loss": 1.3757,
"step": 144
},
{
"epoch": 0.1322690992018244,
"grad_norm": 0.17917169630527496,
"learning_rate": 8.417961510114356e-05,
"loss": 1.276,
"step": 145
},
{
"epoch": 0.1331812998859749,
"grad_norm": 0.1966487616300583,
"learning_rate": 8.39400372766471e-05,
"loss": 1.2529,
"step": 146
},
{
"epoch": 0.13409350057012542,
"grad_norm": 0.22469089925289154,
"learning_rate": 8.36990055739149e-05,
"loss": 1.4108,
"step": 147
},
{
"epoch": 0.13500570125427594,
"grad_norm": 0.29152804613113403,
"learning_rate": 8.345653031794292e-05,
"loss": 1.3991,
"step": 148
},
{
"epoch": 0.13591790193842646,
"grad_norm": 0.44599342346191406,
"learning_rate": 8.321262189556409e-05,
"loss": 1.5375,
"step": 149
},
{
"epoch": 0.13683010262257697,
"grad_norm": 0.9355916380882263,
"learning_rate": 8.296729075500344e-05,
"loss": 2.2142,
"step": 150
},
{
"epoch": 0.1377423033067275,
"grad_norm": 0.16966940462589264,
"learning_rate": 8.272054740543052e-05,
"loss": 1.0935,
"step": 151
},
{
"epoch": 0.138654503990878,
"grad_norm": 0.17415517568588257,
"learning_rate": 8.247240241650918e-05,
"loss": 1.069,
"step": 152
},
{
"epoch": 0.13956670467502852,
"grad_norm": 0.15520510077476501,
"learning_rate": 8.222286641794488e-05,
"loss": 1.1164,
"step": 153
},
{
"epoch": 0.140478905359179,
"grad_norm": 0.16199630498886108,
"learning_rate": 8.197195009902924e-05,
"loss": 1.2711,
"step": 154
},
{
"epoch": 0.14139110604332952,
"grad_norm": 0.20093193650245667,
"learning_rate": 8.171966420818228e-05,
"loss": 1.3726,
"step": 155
},
{
"epoch": 0.14230330672748004,
"grad_norm": 0.17293791472911835,
"learning_rate": 8.146601955249188e-05,
"loss": 1.2781,
"step": 156
},
{
"epoch": 0.14321550741163055,
"grad_norm": 0.20883025228977203,
"learning_rate": 8.121102699725089e-05,
"loss": 1.3765,
"step": 157
},
{
"epoch": 0.14412770809578107,
"grad_norm": 0.1882334053516388,
"learning_rate": 8.095469746549172e-05,
"loss": 1.2449,
"step": 158
},
{
"epoch": 0.1450399087799316,
"grad_norm": 0.20094580948352814,
"learning_rate": 8.069704193751832e-05,
"loss": 1.2535,
"step": 159
},
{
"epoch": 0.1459521094640821,
"grad_norm": 0.2629421055316925,
"learning_rate": 8.043807145043604e-05,
"loss": 1.3591,
"step": 160
},
{
"epoch": 0.14686431014823262,
"grad_norm": 0.6893475651741028,
"learning_rate": 8.017779709767858e-05,
"loss": 2.445,
"step": 161
},
{
"epoch": 0.14777651083238313,
"grad_norm": 0.14671513438224792,
"learning_rate": 7.991623002853296e-05,
"loss": 0.9265,
"step": 162
},
{
"epoch": 0.14868871151653365,
"grad_norm": 0.17360147833824158,
"learning_rate": 7.965338144766186e-05,
"loss": 0.9184,
"step": 163
},
{
"epoch": 0.14960091220068414,
"grad_norm": 0.16476592421531677,
"learning_rate": 7.938926261462366e-05,
"loss": 1.0991,
"step": 164
},
{
"epoch": 0.15051311288483465,
"grad_norm": 0.18807539343833923,
"learning_rate": 7.912388484339012e-05,
"loss": 1.1164,
"step": 165
},
{
"epoch": 0.15142531356898517,
"grad_norm": 0.14939163625240326,
"learning_rate": 7.88572595018617e-05,
"loss": 1.0408,
"step": 166
},
{
"epoch": 0.1523375142531357,
"grad_norm": 0.16947825253009796,
"learning_rate": 7.858939801138061e-05,
"loss": 0.937,
"step": 167
},
{
"epoch": 0.1532497149372862,
"grad_norm": 0.13935978710651398,
"learning_rate": 7.832031184624164e-05,
"loss": 0.9394,
"step": 168
},
{
"epoch": 0.15416191562143672,
"grad_norm": 0.16934405267238617,
"learning_rate": 7.80500125332005e-05,
"loss": 1.1168,
"step": 169
},
{
"epoch": 0.15507411630558723,
"grad_norm": 0.16050076484680176,
"learning_rate": 7.777851165098012e-05,
"loss": 0.8851,
"step": 170
},
{
"epoch": 0.15598631698973775,
"grad_norm": 0.13356317579746246,
"learning_rate": 7.750582082977467e-05,
"loss": 0.8304,
"step": 171
},
{
"epoch": 0.15689851767388827,
"grad_norm": 0.14673428237438202,
"learning_rate": 7.723195175075136e-05,
"loss": 1.1082,
"step": 172
},
{
"epoch": 0.15781071835803878,
"grad_norm": 0.18469220399856567,
"learning_rate": 7.695691614555003e-05,
"loss": 1.314,
"step": 173
},
{
"epoch": 0.15872291904218927,
"grad_norm": 0.15937888622283936,
"learning_rate": 7.668072579578058e-05,
"loss": 0.917,
"step": 174
},
{
"epoch": 0.15963511972633979,
"grad_norm": 0.17641136050224304,
"learning_rate": 7.64033925325184e-05,
"loss": 1.0633,
"step": 175
},
{
"epoch": 0.1605473204104903,
"grad_norm": 0.13878133893013,
"learning_rate": 7.612492823579745e-05,
"loss": 0.6571,
"step": 176
},
{
"epoch": 0.16145952109464082,
"grad_norm": 0.19035881757736206,
"learning_rate": 7.584534483410137e-05,
"loss": 1.0225,
"step": 177
},
{
"epoch": 0.16237172177879133,
"grad_norm": 0.19843564927577972,
"learning_rate": 7.55646543038526e-05,
"loss": 1.0645,
"step": 178
},
{
"epoch": 0.16328392246294185,
"grad_norm": 0.18366238474845886,
"learning_rate": 7.528286866889924e-05,
"loss": 0.91,
"step": 179
},
{
"epoch": 0.16419612314709237,
"grad_norm": 0.2137911468744278,
"learning_rate": 7.500000000000001e-05,
"loss": 0.9201,
"step": 180
},
{
"epoch": 0.16510832383124288,
"grad_norm": 0.19734999537467957,
"learning_rate": 7.471606041430723e-05,
"loss": 1.1128,
"step": 181
},
{
"epoch": 0.1660205245153934,
"grad_norm": 0.21062205731868744,
"learning_rate": 7.443106207484776e-05,
"loss": 1.0556,
"step": 182
},
{
"epoch": 0.1669327251995439,
"grad_norm": 0.24208199977874756,
"learning_rate": 7.414501719000187e-05,
"loss": 1.1084,
"step": 183
},
{
"epoch": 0.1678449258836944,
"grad_norm": 0.3003711998462677,
"learning_rate": 7.385793801298042e-05,
"loss": 1.2188,
"step": 184
},
{
"epoch": 0.16875712656784492,
"grad_norm": 0.12483101338148117,
"learning_rate": 7.35698368412999e-05,
"loss": 1.242,
"step": 185
},
{
"epoch": 0.16966932725199543,
"grad_norm": 0.10687603801488876,
"learning_rate": 7.328072601625557e-05,
"loss": 1.0969,
"step": 186
},
{
"epoch": 0.17058152793614595,
"grad_norm": 0.10687136650085449,
"learning_rate": 7.2990617922393e-05,
"loss": 1.2887,
"step": 187
},
{
"epoch": 0.17149372862029647,
"grad_norm": 0.12314095348119736,
"learning_rate": 7.269952498697734e-05,
"loss": 1.2609,
"step": 188
},
{
"epoch": 0.17240592930444698,
"grad_norm": 0.1314985156059265,
"learning_rate": 7.240745967946113e-05,
"loss": 1.2694,
"step": 189
},
{
"epoch": 0.1733181299885975,
"grad_norm": 0.12387102842330933,
"learning_rate": 7.211443451095007e-05,
"loss": 1.2707,
"step": 190
},
{
"epoch": 0.174230330672748,
"grad_norm": 0.16482983529567719,
"learning_rate": 7.18204620336671e-05,
"loss": 1.1994,
"step": 191
},
{
"epoch": 0.17514253135689853,
"grad_norm": 0.16161847114562988,
"learning_rate": 7.152555484041476e-05,
"loss": 1.3605,
"step": 192
},
{
"epoch": 0.17605473204104904,
"grad_norm": 0.16728828847408295,
"learning_rate": 7.122972556403567e-05,
"loss": 1.1442,
"step": 193
},
{
"epoch": 0.17696693272519953,
"grad_norm": 0.18906857073307037,
"learning_rate": 7.09329868768714e-05,
"loss": 1.1403,
"step": 194
},
{
"epoch": 0.17787913340935005,
"grad_norm": 0.21785447001457214,
"learning_rate": 7.063535149021973e-05,
"loss": 1.4139,
"step": 195
},
{
"epoch": 0.17879133409350056,
"grad_norm": 0.21159727871418,
"learning_rate": 7.033683215379002e-05,
"loss": 1.4149,
"step": 196
},
{
"epoch": 0.17970353477765108,
"grad_norm": 0.2096451222896576,
"learning_rate": 7.003744165515705e-05,
"loss": 1.1846,
"step": 197
},
{
"epoch": 0.1806157354618016,
"grad_norm": 0.30022695660591125,
"learning_rate": 6.973719281921335e-05,
"loss": 1.5039,
"step": 198
},
{
"epoch": 0.1815279361459521,
"grad_norm": 0.39245763421058655,
"learning_rate": 6.943609850761979e-05,
"loss": 1.4439,
"step": 199
},
{
"epoch": 0.18244013683010263,
"grad_norm": 0.7122502326965332,
"learning_rate": 6.91341716182545e-05,
"loss": 1.7626,
"step": 200
},
{
"epoch": 0.18244013683010263,
"eval_loss": 1.1971056461334229,
"eval_runtime": 113.4472,
"eval_samples_per_second": 16.272,
"eval_steps_per_second": 4.072,
"step": 200
},
{
"epoch": 0.18335233751425314,
"grad_norm": 0.11290186643600464,
"learning_rate": 6.883142508466054e-05,
"loss": 0.8787,
"step": 201
},
{
"epoch": 0.18426453819840366,
"grad_norm": 0.12823708355426788,
"learning_rate": 6.852787187549182e-05,
"loss": 0.8172,
"step": 202
},
{
"epoch": 0.18517673888255415,
"grad_norm": 0.1377316415309906,
"learning_rate": 6.82235249939575e-05,
"loss": 1.0454,
"step": 203
},
{
"epoch": 0.18608893956670466,
"grad_norm": 0.1476827710866928,
"learning_rate": 6.7918397477265e-05,
"loss": 1.2703,
"step": 204
},
{
"epoch": 0.18700114025085518,
"grad_norm": 0.14928844571113586,
"learning_rate": 6.761250239606169e-05,
"loss": 1.3711,
"step": 205
},
{
"epoch": 0.1879133409350057,
"grad_norm": 0.16220971941947937,
"learning_rate": 6.730585285387465e-05,
"loss": 1.1761,
"step": 206
},
{
"epoch": 0.1888255416191562,
"grad_norm": 0.15594278275966644,
"learning_rate": 6.699846198654971e-05,
"loss": 1.159,
"step": 207
},
{
"epoch": 0.18973774230330673,
"grad_norm": 0.22083112597465515,
"learning_rate": 6.669034296168855e-05,
"loss": 1.3703,
"step": 208
},
{
"epoch": 0.19064994298745724,
"grad_norm": 0.2218848466873169,
"learning_rate": 6.638150897808468e-05,
"loss": 1.0984,
"step": 209
},
{
"epoch": 0.19156214367160776,
"grad_norm": 0.3727222979068756,
"learning_rate": 6.607197326515808e-05,
"loss": 1.4564,
"step": 210
},
{
"epoch": 0.19247434435575828,
"grad_norm": 0.14719393849372864,
"learning_rate": 6.57617490823885e-05,
"loss": 1.0078,
"step": 211
},
{
"epoch": 0.1933865450399088,
"grad_norm": 0.13103777170181274,
"learning_rate": 6.545084971874738e-05,
"loss": 0.9745,
"step": 212
},
{
"epoch": 0.19429874572405928,
"grad_norm": 0.13376685976982117,
"learning_rate": 6.513928849212873e-05,
"loss": 0.9558,
"step": 213
},
{
"epoch": 0.1952109464082098,
"grad_norm": 0.1713114231824875,
"learning_rate": 6.482707874877854e-05,
"loss": 1.0795,
"step": 214
},
{
"epoch": 0.1961231470923603,
"grad_norm": 0.14341691136360168,
"learning_rate": 6.451423386272312e-05,
"loss": 1.0165,
"step": 215
},
{
"epoch": 0.19703534777651083,
"grad_norm": 0.14939330518245697,
"learning_rate": 6.420076723519614e-05,
"loss": 0.9657,
"step": 216
},
{
"epoch": 0.19794754846066134,
"grad_norm": 0.1301591396331787,
"learning_rate": 6.388669229406462e-05,
"loss": 0.757,
"step": 217
},
{
"epoch": 0.19885974914481186,
"grad_norm": 0.160866379737854,
"learning_rate": 6.357202249325371e-05,
"loss": 0.965,
"step": 218
},
{
"epoch": 0.19977194982896238,
"grad_norm": 0.21365958452224731,
"learning_rate": 6.32567713121704e-05,
"loss": 1.1012,
"step": 219
},
{
"epoch": 0.2006841505131129,
"grad_norm": 0.16345688700675964,
"learning_rate": 6.294095225512603e-05,
"loss": 1.0251,
"step": 220
},
{
"epoch": 0.2015963511972634,
"grad_norm": 0.1504313200712204,
"learning_rate": 6.26245788507579e-05,
"loss": 1.0471,
"step": 221
},
{
"epoch": 0.20250855188141392,
"grad_norm": 0.15645664930343628,
"learning_rate": 6.230766465144967e-05,
"loss": 0.932,
"step": 222
},
{
"epoch": 0.2034207525655644,
"grad_norm": 0.16272133588790894,
"learning_rate": 6.199022323275083e-05,
"loss": 0.9476,
"step": 223
},
{
"epoch": 0.20433295324971493,
"grad_norm": 0.15730346739292145,
"learning_rate": 6.167226819279528e-05,
"loss": 0.9678,
"step": 224
},
{
"epoch": 0.20524515393386544,
"grad_norm": 0.16858989000320435,
"learning_rate": 6.135381315171867e-05,
"loss": 1.0503,
"step": 225
},
{
"epoch": 0.20615735461801596,
"grad_norm": 0.18682138621807098,
"learning_rate": 6.103487175107507e-05,
"loss": 1.0477,
"step": 226
},
{
"epoch": 0.20706955530216647,
"grad_norm": 0.17781555652618408,
"learning_rate": 6.071545765325254e-05,
"loss": 0.9751,
"step": 227
},
{
"epoch": 0.207981755986317,
"grad_norm": 0.22681044042110443,
"learning_rate": 6.0395584540887963e-05,
"loss": 1.1294,
"step": 228
},
{
"epoch": 0.2088939566704675,
"grad_norm": 0.20905116200447083,
"learning_rate": 6.007526611628086e-05,
"loss": 1.0836,
"step": 229
},
{
"epoch": 0.20980615735461802,
"grad_norm": 0.17305400967597961,
"learning_rate": 5.9754516100806423e-05,
"loss": 1.0323,
"step": 230
},
{
"epoch": 0.21071835803876854,
"grad_norm": 0.1822892129421234,
"learning_rate": 5.9433348234327765e-05,
"loss": 0.9336,
"step": 231
},
{
"epoch": 0.21163055872291905,
"grad_norm": 0.2400861382484436,
"learning_rate": 5.911177627460739e-05,
"loss": 1.0643,
"step": 232
},
{
"epoch": 0.21254275940706954,
"grad_norm": 0.19734802842140198,
"learning_rate": 5.8789813996717736e-05,
"loss": 1.1696,
"step": 233
},
{
"epoch": 0.21345496009122006,
"grad_norm": 0.24006140232086182,
"learning_rate": 5.8467475192451226e-05,
"loss": 1.3169,
"step": 234
},
{
"epoch": 0.21436716077537057,
"grad_norm": 0.1660918891429901,
"learning_rate": 5.814477366972945e-05,
"loss": 1.2928,
"step": 235
},
{
"epoch": 0.2152793614595211,
"grad_norm": 0.10857274383306503,
"learning_rate": 5.782172325201155e-05,
"loss": 1.1304,
"step": 236
},
{
"epoch": 0.2161915621436716,
"grad_norm": 0.09754611551761627,
"learning_rate": 5.749833777770225e-05,
"loss": 1.0516,
"step": 237
},
{
"epoch": 0.21710376282782212,
"grad_norm": 0.10842633992433548,
"learning_rate": 5.717463109955896e-05,
"loss": 1.2546,
"step": 238
},
{
"epoch": 0.21801596351197264,
"grad_norm": 0.11293677240610123,
"learning_rate": 5.685061708409841e-05,
"loss": 1.2007,
"step": 239
},
{
"epoch": 0.21892816419612315,
"grad_norm": 0.11466323584318161,
"learning_rate": 5.6526309611002594e-05,
"loss": 1.2429,
"step": 240
},
{
"epoch": 0.21984036488027367,
"grad_norm": 0.12489130347967148,
"learning_rate": 5.6201722572524275e-05,
"loss": 1.2082,
"step": 241
},
{
"epoch": 0.22075256556442419,
"grad_norm": 0.14694885909557343,
"learning_rate": 5.587686987289189e-05,
"loss": 1.2742,
"step": 242
},
{
"epoch": 0.22166476624857467,
"grad_norm": 0.14593251049518585,
"learning_rate": 5.5551765427713884e-05,
"loss": 1.3337,
"step": 243
},
{
"epoch": 0.2225769669327252,
"grad_norm": 0.15233193337917328,
"learning_rate": 5.522642316338268e-05,
"loss": 1.1762,
"step": 244
},
{
"epoch": 0.2234891676168757,
"grad_norm": 0.16144950687885284,
"learning_rate": 5.490085701647805e-05,
"loss": 1.2968,
"step": 245
},
{
"epoch": 0.22440136830102622,
"grad_norm": 0.16887733340263367,
"learning_rate": 5.457508093317013e-05,
"loss": 1.312,
"step": 246
},
{
"epoch": 0.22531356898517674,
"grad_norm": 0.24274863302707672,
"learning_rate": 5.4249108868622086e-05,
"loss": 1.4066,
"step": 247
},
{
"epoch": 0.22622576966932725,
"grad_norm": 0.24290831387043,
"learning_rate": 5.392295478639225e-05,
"loss": 1.3587,
"step": 248
},
{
"epoch": 0.22713797035347777,
"grad_norm": 0.35980361700057983,
"learning_rate": 5.359663265783598e-05,
"loss": 1.545,
"step": 249
},
{
"epoch": 0.22805017103762829,
"grad_norm": 1.2798198461532593,
"learning_rate": 5.327015646150716e-05,
"loss": 1.8089,
"step": 250
},
{
"epoch": 0.2289623717217788,
"grad_norm": 0.15012221038341522,
"learning_rate": 5.294354018255945e-05,
"loss": 1.1607,
"step": 251
},
{
"epoch": 0.22987457240592932,
"grad_norm": 0.14231781661510468,
"learning_rate": 5.26167978121472e-05,
"loss": 1.0741,
"step": 252
},
{
"epoch": 0.2307867730900798,
"grad_norm": 0.1374557614326477,
"learning_rate": 5.228994334682604e-05,
"loss": 1.1327,
"step": 253
},
{
"epoch": 0.23169897377423032,
"grad_norm": 0.16241897642612457,
"learning_rate": 5.196299078795344e-05,
"loss": 1.1431,
"step": 254
},
{
"epoch": 0.23261117445838084,
"grad_norm": 0.19717371463775635,
"learning_rate": 5.1635954141088813e-05,
"loss": 1.2522,
"step": 255
},
{
"epoch": 0.23352337514253135,
"grad_norm": 0.16350847482681274,
"learning_rate": 5.1308847415393666e-05,
"loss": 1.1139,
"step": 256
},
{
"epoch": 0.23443557582668187,
"grad_norm": 0.2092381715774536,
"learning_rate": 5.0981684623031415e-05,
"loss": 1.4112,
"step": 257
},
{
"epoch": 0.23534777651083238,
"grad_norm": 0.1716463714838028,
"learning_rate": 5.0654479778567223e-05,
"loss": 1.0679,
"step": 258
},
{
"epoch": 0.2362599771949829,
"grad_norm": 0.2410898059606552,
"learning_rate": 5.0327246898367597e-05,
"loss": 1.5,
"step": 259
},
{
"epoch": 0.23717217787913342,
"grad_norm": 0.21982981264591217,
"learning_rate": 5e-05,
"loss": 1.1005,
"step": 260
},
{
"epoch": 0.23808437856328393,
"grad_norm": 0.3865964412689209,
"learning_rate": 4.9672753101632415e-05,
"loss": 1.3476,
"step": 261
},
{
"epoch": 0.23899657924743445,
"grad_norm": 0.2032884657382965,
"learning_rate": 4.934552022143279e-05,
"loss": 0.9498,
"step": 262
},
{
"epoch": 0.23990877993158494,
"grad_norm": 0.1798202246427536,
"learning_rate": 4.901831537696859e-05,
"loss": 1.153,
"step": 263
},
{
"epoch": 0.24082098061573545,
"grad_norm": 0.16176198422908783,
"learning_rate": 4.869115258460635e-05,
"loss": 0.9052,
"step": 264
},
{
"epoch": 0.24173318129988597,
"grad_norm": 0.18311551213264465,
"learning_rate": 4.83640458589112e-05,
"loss": 1.1653,
"step": 265
},
{
"epoch": 0.24264538198403648,
"grad_norm": 0.1691948026418686,
"learning_rate": 4.8037009212046586e-05,
"loss": 0.9367,
"step": 266
},
{
"epoch": 0.243557582668187,
"grad_norm": 0.144092857837677,
"learning_rate": 4.7710056653173976e-05,
"loss": 0.9161,
"step": 267
},
{
"epoch": 0.24446978335233752,
"grad_norm": 0.16229337453842163,
"learning_rate": 4.738320218785281e-05,
"loss": 0.9686,
"step": 268
},
{
"epoch": 0.24538198403648803,
"grad_norm": 0.14550195634365082,
"learning_rate": 4.7056459817440544e-05,
"loss": 0.7886,
"step": 269
},
{
"epoch": 0.24629418472063855,
"grad_norm": 0.15839822590351105,
"learning_rate": 4.6729843538492847e-05,
"loss": 1.0488,
"step": 270
},
{
"epoch": 0.24720638540478906,
"grad_norm": 0.16177891194820404,
"learning_rate": 4.640336734216403e-05,
"loss": 1.0537,
"step": 271
},
{
"epoch": 0.24811858608893958,
"grad_norm": 0.15026786923408508,
"learning_rate": 4.607704521360776e-05,
"loss": 0.9876,
"step": 272
},
{
"epoch": 0.24903078677309007,
"grad_norm": 0.16026102006435394,
"learning_rate": 4.575089113137792e-05,
"loss": 0.9325,
"step": 273
},
{
"epoch": 0.24994298745724058,
"grad_norm": 0.14951908588409424,
"learning_rate": 4.542491906682989e-05,
"loss": 0.9553,
"step": 274
},
{
"epoch": 0.2508551881413911,
"grad_norm": 0.1985773742198944,
"learning_rate": 4.509914298352197e-05,
"loss": 1.078,
"step": 275
},
{
"epoch": 0.2517673888255416,
"grad_norm": 0.15566202998161316,
"learning_rate": 4.477357683661734e-05,
"loss": 0.9121,
"step": 276
},
{
"epoch": 0.25267958950969216,
"grad_norm": 0.15593530237674713,
"learning_rate": 4.444823457228612e-05,
"loss": 0.8146,
"step": 277
},
{
"epoch": 0.25359179019384265,
"grad_norm": 0.16978751122951508,
"learning_rate": 4.412313012710813e-05,
"loss": 0.8579,
"step": 278
},
{
"epoch": 0.25450399087799314,
"grad_norm": 0.22708088159561157,
"learning_rate": 4.379827742747575e-05,
"loss": 0.9367,
"step": 279
},
{
"epoch": 0.2554161915621437,
"grad_norm": 0.18289873003959656,
"learning_rate": 4.347369038899744e-05,
"loss": 1.1297,
"step": 280
},
{
"epoch": 0.25632839224629417,
"grad_norm": 0.19509181380271912,
"learning_rate": 4.3149382915901606e-05,
"loss": 0.9119,
"step": 281
},
{
"epoch": 0.2572405929304447,
"grad_norm": 0.19992905855178833,
"learning_rate": 4.282536890044104e-05,
"loss": 1.1763,
"step": 282
},
{
"epoch": 0.2581527936145952,
"grad_norm": 0.25321832299232483,
"learning_rate": 4.250166222229774e-05,
"loss": 1.1429,
"step": 283
},
{
"epoch": 0.25906499429874574,
"grad_norm": 0.26218289136886597,
"learning_rate": 4.2178276747988446e-05,
"loss": 1.2399,
"step": 284
},
{
"epoch": 0.25997719498289623,
"grad_norm": 0.2333952635526657,
"learning_rate": 4.185522633027057e-05,
"loss": 0.8556,
"step": 285
},
{
"epoch": 0.2608893956670468,
"grad_norm": 0.1518288403749466,
"learning_rate": 4.153252480754877e-05,
"loss": 1.2989,
"step": 286
},
{
"epoch": 0.26180159635119726,
"grad_norm": 0.1034812331199646,
"learning_rate": 4.1210186003282275e-05,
"loss": 1.168,
"step": 287
},
{
"epoch": 0.26271379703534775,
"grad_norm": 0.10131867974996567,
"learning_rate": 4.088822372539263e-05,
"loss": 1.0937,
"step": 288
},
{
"epoch": 0.2636259977194983,
"grad_norm": 0.11592473089694977,
"learning_rate": 4.0566651765672246e-05,
"loss": 1.1637,
"step": 289
},
{
"epoch": 0.2645381984036488,
"grad_norm": 0.1236988753080368,
"learning_rate": 4.0245483899193595e-05,
"loss": 1.2724,
"step": 290
},
{
"epoch": 0.2654503990877993,
"grad_norm": 0.13103049993515015,
"learning_rate": 3.992473388371915e-05,
"loss": 1.1731,
"step": 291
},
{
"epoch": 0.2663625997719498,
"grad_norm": 0.13575758039951324,
"learning_rate": 3.960441545911204e-05,
"loss": 1.256,
"step": 292
},
{
"epoch": 0.26727480045610036,
"grad_norm": 0.13911360502243042,
"learning_rate": 3.928454234674747e-05,
"loss": 1.329,
"step": 293
},
{
"epoch": 0.26818700114025085,
"grad_norm": 0.1553173065185547,
"learning_rate": 3.896512824892495e-05,
"loss": 1.2601,
"step": 294
},
{
"epoch": 0.2690992018244014,
"grad_norm": 0.22820927202701569,
"learning_rate": 3.864618684828134e-05,
"loss": 1.2263,
"step": 295
},
{
"epoch": 0.2700114025085519,
"grad_norm": 0.251654714345932,
"learning_rate": 3.832773180720475e-05,
"loss": 1.3326,
"step": 296
},
{
"epoch": 0.27092360319270237,
"grad_norm": 0.28499627113342285,
"learning_rate": 3.800977676724919e-05,
"loss": 1.673,
"step": 297
},
{
"epoch": 0.2718358038768529,
"grad_norm": 0.2628072500228882,
"learning_rate": 3.769233534855035e-05,
"loss": 1.4846,
"step": 298
},
{
"epoch": 0.2727480045610034,
"grad_norm": 0.3170837461948395,
"learning_rate": 3.73754211492421e-05,
"loss": 1.3636,
"step": 299
},
{
"epoch": 0.27366020524515394,
"grad_norm": 0.8047922253608704,
"learning_rate": 3.705904774487396e-05,
"loss": 1.1731,
"step": 300
},
{
"epoch": 0.27366020524515394,
"eval_loss": 1.1972854137420654,
"eval_runtime": 113.2028,
"eval_samples_per_second": 16.307,
"eval_steps_per_second": 4.081,
"step": 300
},
{
"epoch": 0.27457240592930443,
"grad_norm": 0.10662523657083511,
"learning_rate": 3.6743228687829595e-05,
"loss": 1.0097,
"step": 301
},
{
"epoch": 0.275484606613455,
"grad_norm": 0.12424086779356003,
"learning_rate": 3.642797750674629e-05,
"loss": 1.0579,
"step": 302
},
{
"epoch": 0.27639680729760546,
"grad_norm": 0.1385432481765747,
"learning_rate": 3.6113307705935396e-05,
"loss": 1.344,
"step": 303
},
{
"epoch": 0.277309007981756,
"grad_norm": 0.16241592168807983,
"learning_rate": 3.579923276480387e-05,
"loss": 1.4409,
"step": 304
},
{
"epoch": 0.2782212086659065,
"grad_norm": 0.15419059991836548,
"learning_rate": 3.5485766137276894e-05,
"loss": 1.3336,
"step": 305
},
{
"epoch": 0.27913340935005704,
"grad_norm": 0.18154077231884003,
"learning_rate": 3.5172921251221455e-05,
"loss": 1.3504,
"step": 306
},
{
"epoch": 0.2800456100342075,
"grad_norm": 0.1785464584827423,
"learning_rate": 3.486071150787128e-05,
"loss": 1.2279,
"step": 307
},
{
"epoch": 0.280957810718358,
"grad_norm": 0.20182940363883972,
"learning_rate": 3.4549150281252636e-05,
"loss": 1.4907,
"step": 308
},
{
"epoch": 0.28187001140250856,
"grad_norm": 0.22802571952342987,
"learning_rate": 3.423825091761153e-05,
"loss": 1.4304,
"step": 309
},
{
"epoch": 0.28278221208665905,
"grad_norm": 0.41071566939353943,
"learning_rate": 3.392802673484193e-05,
"loss": 1.5391,
"step": 310
},
{
"epoch": 0.2836944127708096,
"grad_norm": 0.2370689958333969,
"learning_rate": 3.361849102191533e-05,
"loss": 1.1847,
"step": 311
},
{
"epoch": 0.2846066134549601,
"grad_norm": 0.18752293288707733,
"learning_rate": 3.330965703831146e-05,
"loss": 1.1175,
"step": 312
},
{
"epoch": 0.2855188141391106,
"grad_norm": 0.1348540335893631,
"learning_rate": 3.300153801345028e-05,
"loss": 0.9726,
"step": 313
},
{
"epoch": 0.2864310148232611,
"grad_norm": 0.14316678047180176,
"learning_rate": 3.2694147146125345e-05,
"loss": 1.0138,
"step": 314
},
{
"epoch": 0.28734321550741165,
"grad_norm": 0.13163286447525024,
"learning_rate": 3.2387497603938326e-05,
"loss": 0.8848,
"step": 315
},
{
"epoch": 0.28825541619156214,
"grad_norm": 0.19286373257637024,
"learning_rate": 3.2081602522734986e-05,
"loss": 1.105,
"step": 316
},
{
"epoch": 0.28916761687571263,
"grad_norm": 0.12224922329187393,
"learning_rate": 3.177647500604252e-05,
"loss": 0.6901,
"step": 317
},
{
"epoch": 0.2900798175598632,
"grad_norm": 0.1634187549352646,
"learning_rate": 3.147212812450819e-05,
"loss": 0.9563,
"step": 318
},
{
"epoch": 0.29099201824401366,
"grad_norm": 0.15377987921237946,
"learning_rate": 3.116857491533947e-05,
"loss": 0.8784,
"step": 319
},
{
"epoch": 0.2919042189281642,
"grad_norm": 0.13165193796157837,
"learning_rate": 3.086582838174551e-05,
"loss": 0.8488,
"step": 320
},
{
"epoch": 0.2928164196123147,
"grad_norm": 0.16833990812301636,
"learning_rate": 3.056390149238022e-05,
"loss": 1.1677,
"step": 321
},
{
"epoch": 0.29372862029646524,
"grad_norm": 0.15428762137889862,
"learning_rate": 3.0262807180786647e-05,
"loss": 0.934,
"step": 322
},
{
"epoch": 0.2946408209806157,
"grad_norm": 0.15865963697433472,
"learning_rate": 2.996255834484296e-05,
"loss": 1.0805,
"step": 323
},
{
"epoch": 0.29555302166476627,
"grad_norm": 0.14876452088356018,
"learning_rate": 2.9663167846209998e-05,
"loss": 0.9789,
"step": 324
},
{
"epoch": 0.29646522234891676,
"grad_norm": 0.1823773831129074,
"learning_rate": 2.936464850978027e-05,
"loss": 1.0265,
"step": 325
},
{
"epoch": 0.2973774230330673,
"grad_norm": 0.17371368408203125,
"learning_rate": 2.9067013123128613e-05,
"loss": 0.9221,
"step": 326
},
{
"epoch": 0.2982896237172178,
"grad_norm": 0.19091551005840302,
"learning_rate": 2.8770274435964355e-05,
"loss": 0.9469,
"step": 327
},
{
"epoch": 0.2992018244013683,
"grad_norm": 0.18162409961223602,
"learning_rate": 2.8474445159585235e-05,
"loss": 1.0272,
"step": 328
},
{
"epoch": 0.3001140250855188,
"grad_norm": 0.16869843006134033,
"learning_rate": 2.8179537966332887e-05,
"loss": 1.1221,
"step": 329
},
{
"epoch": 0.3010262257696693,
"grad_norm": 0.1988871544599533,
"learning_rate": 2.7885565489049946e-05,
"loss": 0.9206,
"step": 330
},
{
"epoch": 0.30193842645381985,
"grad_norm": 0.17574124038219452,
"learning_rate": 2.759254032053888e-05,
"loss": 1.067,
"step": 331
},
{
"epoch": 0.30285062713797034,
"grad_norm": 0.20466162264347076,
"learning_rate": 2.7300475013022663e-05,
"loss": 1.0049,
"step": 332
},
{
"epoch": 0.3037628278221209,
"grad_norm": 0.30088433623313904,
"learning_rate": 2.700938207760701e-05,
"loss": 1.1633,
"step": 333
},
{
"epoch": 0.3046750285062714,
"grad_norm": 0.31139159202575684,
"learning_rate": 2.671927398374443e-05,
"loss": 1.1374,
"step": 334
},
{
"epoch": 0.3055872291904219,
"grad_norm": 0.20724207162857056,
"learning_rate": 2.6430163158700115e-05,
"loss": 1.379,
"step": 335
},
{
"epoch": 0.3064994298745724,
"grad_norm": 0.12832605838775635,
"learning_rate": 2.6142061987019577e-05,
"loss": 1.1881,
"step": 336
},
{
"epoch": 0.3074116305587229,
"grad_norm": 0.10497277975082397,
"learning_rate": 2.5854982809998153e-05,
"loss": 1.0799,
"step": 337
},
{
"epoch": 0.30832383124287344,
"grad_norm": 0.1079997569322586,
"learning_rate": 2.556893792515227e-05,
"loss": 1.1005,
"step": 338
},
{
"epoch": 0.3092360319270239,
"grad_norm": 0.10871066898107529,
"learning_rate": 2.5283939585692783e-05,
"loss": 1.0749,
"step": 339
},
{
"epoch": 0.31014823261117447,
"grad_norm": 0.11225587129592896,
"learning_rate": 2.500000000000001e-05,
"loss": 1.2332,
"step": 340
},
{
"epoch": 0.31106043329532496,
"grad_norm": 0.12466397881507874,
"learning_rate": 2.471713133110078e-05,
"loss": 1.2856,
"step": 341
},
{
"epoch": 0.3119726339794755,
"grad_norm": 0.14498235285282135,
"learning_rate": 2.4435345696147403e-05,
"loss": 1.243,
"step": 342
},
{
"epoch": 0.312884834663626,
"grad_norm": 0.1273733228445053,
"learning_rate": 2.4154655165898627e-05,
"loss": 1.199,
"step": 343
},
{
"epoch": 0.31379703534777653,
"grad_norm": 0.17925268411636353,
"learning_rate": 2.3875071764202563e-05,
"loss": 1.1705,
"step": 344
},
{
"epoch": 0.314709236031927,
"grad_norm": 0.18367154896259308,
"learning_rate": 2.3596607467481603e-05,
"loss": 1.412,
"step": 345
},
{
"epoch": 0.31562143671607756,
"grad_norm": 0.18743032217025757,
"learning_rate": 2.3319274204219428e-05,
"loss": 1.1635,
"step": 346
},
{
"epoch": 0.31653363740022805,
"grad_norm": 0.197452574968338,
"learning_rate": 2.3043083854449988e-05,
"loss": 1.3102,
"step": 347
},
{
"epoch": 0.31744583808437854,
"grad_norm": 0.26595744490623474,
"learning_rate": 2.2768048249248648e-05,
"loss": 1.279,
"step": 348
},
{
"epoch": 0.3183580387685291,
"grad_norm": 0.4088784158229828,
"learning_rate": 2.2494179170225333e-05,
"loss": 1.5259,
"step": 349
},
{
"epoch": 0.31927023945267957,
"grad_norm": 0.5466248393058777,
"learning_rate": 2.2221488349019903e-05,
"loss": 1.5536,
"step": 350
},
{
"epoch": 0.3201824401368301,
"grad_norm": 0.13032464683055878,
"learning_rate": 2.194998746679952e-05,
"loss": 1.1141,
"step": 351
},
{
"epoch": 0.3210946408209806,
"grad_norm": 0.1586756855249405,
"learning_rate": 2.167968815375837e-05,
"loss": 1.1245,
"step": 352
},
{
"epoch": 0.32200684150513115,
"grad_norm": 0.1475306749343872,
"learning_rate": 2.1410601988619394e-05,
"loss": 1.0502,
"step": 353
},
{
"epoch": 0.32291904218928164,
"grad_norm": 0.16401484608650208,
"learning_rate": 2.1142740498138324e-05,
"loss": 1.2346,
"step": 354
},
{
"epoch": 0.3238312428734322,
"grad_norm": 0.17974554002285004,
"learning_rate": 2.08761151566099e-05,
"loss": 1.1683,
"step": 355
},
{
"epoch": 0.32474344355758267,
"grad_norm": 0.17584644258022308,
"learning_rate": 2.061073738537635e-05,
"loss": 1.4097,
"step": 356
},
{
"epoch": 0.32565564424173316,
"grad_norm": 0.17970524728298187,
"learning_rate": 2.034661855233815e-05,
"loss": 1.2119,
"step": 357
},
{
"epoch": 0.3265678449258837,
"grad_norm": 0.1974216103553772,
"learning_rate": 2.008376997146705e-05,
"loss": 1.1943,
"step": 358
},
{
"epoch": 0.3274800456100342,
"grad_norm": 0.1703203320503235,
"learning_rate": 1.982220290232143e-05,
"loss": 0.9671,
"step": 359
},
{
"epoch": 0.32839224629418473,
"grad_norm": 0.2753969132900238,
"learning_rate": 1.9561928549563968e-05,
"loss": 1.3897,
"step": 360
},
{
"epoch": 0.3293044469783352,
"grad_norm": 0.3241826593875885,
"learning_rate": 1.9302958062481673e-05,
"loss": 1.1562,
"step": 361
},
{
"epoch": 0.33021664766248576,
"grad_norm": 0.2132183462381363,
"learning_rate": 1.9045302534508297e-05,
"loss": 0.7516,
"step": 362
},
{
"epoch": 0.33112884834663625,
"grad_norm": 0.14898429811000824,
"learning_rate": 1.8788973002749112e-05,
"loss": 1.1512,
"step": 363
},
{
"epoch": 0.3320410490307868,
"grad_norm": 0.1288880854845047,
"learning_rate": 1.8533980447508137e-05,
"loss": 0.8832,
"step": 364
},
{
"epoch": 0.3329532497149373,
"grad_norm": 0.15893982350826263,
"learning_rate": 1.8280335791817733e-05,
"loss": 1.1057,
"step": 365
},
{
"epoch": 0.3338654503990878,
"grad_norm": 0.15993352234363556,
"learning_rate": 1.8028049900970767e-05,
"loss": 0.9927,
"step": 366
},
{
"epoch": 0.3347776510832383,
"grad_norm": 0.14739666879177094,
"learning_rate": 1.777713358205514e-05,
"loss": 0.8977,
"step": 367
},
{
"epoch": 0.3356898517673888,
"grad_norm": 0.1271563321352005,
"learning_rate": 1.7527597583490822e-05,
"loss": 0.8456,
"step": 368
},
{
"epoch": 0.33660205245153935,
"grad_norm": 0.1543639749288559,
"learning_rate": 1.7279452594569483e-05,
"loss": 1.0841,
"step": 369
},
{
"epoch": 0.33751425313568983,
"grad_norm": 0.13747648894786835,
"learning_rate": 1.703270924499656e-05,
"loss": 0.8802,
"step": 370
},
{
"epoch": 0.3384264538198404,
"grad_norm": 0.1359814554452896,
"learning_rate": 1.678737810443593e-05,
"loss": 0.8192,
"step": 371
},
{
"epoch": 0.33933865450399087,
"grad_norm": 0.16268639266490936,
"learning_rate": 1.6543469682057106e-05,
"loss": 0.8709,
"step": 372
},
{
"epoch": 0.3402508551881414,
"grad_norm": 0.1843179315328598,
"learning_rate": 1.6300994426085103e-05,
"loss": 0.8591,
"step": 373
},
{
"epoch": 0.3411630558722919,
"grad_norm": 0.19724664092063904,
"learning_rate": 1.605996272335291e-05,
"loss": 1.1374,
"step": 374
},
{
"epoch": 0.34207525655644244,
"grad_norm": 0.2107182741165161,
"learning_rate": 1.5820384898856434e-05,
"loss": 1.0738,
"step": 375
},
{
"epoch": 0.34298745724059293,
"grad_norm": 0.18194270133972168,
"learning_rate": 1.5582271215312294e-05,
"loss": 1.1537,
"step": 376
},
{
"epoch": 0.3438996579247434,
"grad_norm": 0.14367586374282837,
"learning_rate": 1.5345631872718214e-05,
"loss": 0.981,
"step": 377
},
{
"epoch": 0.34481185860889396,
"grad_norm": 0.1595829576253891,
"learning_rate": 1.5110477007916001e-05,
"loss": 0.9495,
"step": 378
},
{
"epoch": 0.34572405929304445,
"grad_norm": 0.16781240701675415,
"learning_rate": 1.4876816694157419e-05,
"loss": 0.8829,
"step": 379
},
{
"epoch": 0.346636259977195,
"grad_norm": 0.18402887880802155,
"learning_rate": 1.4644660940672627e-05,
"loss": 1.0337,
"step": 380
},
{
"epoch": 0.3475484606613455,
"grad_norm": 0.21096819639205933,
"learning_rate": 1.4414019692241437e-05,
"loss": 1.235,
"step": 381
},
{
"epoch": 0.348460661345496,
"grad_norm": 0.20575056970119476,
"learning_rate": 1.4184902828767287e-05,
"loss": 1.1308,
"step": 382
},
{
"epoch": 0.3493728620296465,
"grad_norm": 0.23069782555103302,
"learning_rate": 1.3957320164854059e-05,
"loss": 0.7216,
"step": 383
},
{
"epoch": 0.35028506271379706,
"grad_norm": 0.2631959617137909,
"learning_rate": 1.373128144938563e-05,
"loss": 1.051,
"step": 384
},
{
"epoch": 0.35119726339794755,
"grad_norm": 0.2909131646156311,
"learning_rate": 1.3506796365108232e-05,
"loss": 1.4182,
"step": 385
},
{
"epoch": 0.3521094640820981,
"grad_norm": 0.1559658795595169,
"learning_rate": 1.3283874528215733e-05,
"loss": 1.2366,
"step": 386
},
{
"epoch": 0.3530216647662486,
"grad_norm": 0.09982962161302567,
"learning_rate": 1.3062525487937699e-05,
"loss": 1.1475,
"step": 387
},
{
"epoch": 0.35393386545039907,
"grad_norm": 0.10416360944509506,
"learning_rate": 1.2842758726130283e-05,
"loss": 1.0829,
"step": 388
},
{
"epoch": 0.3548460661345496,
"grad_norm": 0.10417391359806061,
"learning_rate": 1.2624583656870154e-05,
"loss": 1.0939,
"step": 389
},
{
"epoch": 0.3557582668187001,
"grad_norm": 0.12138167768716812,
"learning_rate": 1.2408009626051137e-05,
"loss": 1.3434,
"step": 390
},
{
"epoch": 0.35667046750285064,
"grad_norm": 0.13164609670639038,
"learning_rate": 1.2193045910983863e-05,
"loss": 1.2702,
"step": 391
},
{
"epoch": 0.35758266818700113,
"grad_norm": 0.1307135969400406,
"learning_rate": 1.1979701719998453e-05,
"loss": 1.2093,
"step": 392
},
{
"epoch": 0.3584948688711517,
"grad_norm": 0.1524544358253479,
"learning_rate": 1.1767986192049984e-05,
"loss": 1.179,
"step": 393
},
{
"epoch": 0.35940706955530216,
"grad_norm": 0.16346591711044312,
"learning_rate": 1.1557908396327028e-05,
"loss": 1.2838,
"step": 394
},
{
"epoch": 0.3603192702394527,
"grad_norm": 0.18474753201007843,
"learning_rate": 1.134947733186315e-05,
"loss": 1.2491,
"step": 395
},
{
"epoch": 0.3612314709236032,
"grad_norm": 0.19478006660938263,
"learning_rate": 1.1142701927151456e-05,
"loss": 1.4218,
"step": 396
},
{
"epoch": 0.3621436716077537,
"grad_norm": 0.22533515095710754,
"learning_rate": 1.0937591039762085e-05,
"loss": 1.4451,
"step": 397
},
{
"epoch": 0.3630558722919042,
"grad_norm": 0.27334368228912354,
"learning_rate": 1.0734153455962765e-05,
"loss": 1.4695,
"step": 398
},
{
"epoch": 0.3639680729760547,
"grad_norm": 0.31324154138565063,
"learning_rate": 1.0532397890342505e-05,
"loss": 1.4504,
"step": 399
},
{
"epoch": 0.36488027366020526,
"grad_norm": 0.9359192848205566,
"learning_rate": 1.0332332985438248e-05,
"loss": 1.5549,
"step": 400
},
{
"epoch": 0.36488027366020526,
"eval_loss": 1.1766536235809326,
"eval_runtime": 112.3781,
"eval_samples_per_second": 16.427,
"eval_steps_per_second": 4.111,
"step": 400
},
{
"epoch": 0.36579247434435574,
"grad_norm": 0.12196308374404907,
"learning_rate": 1.013396731136465e-05,
"loss": 1.0011,
"step": 401
},
{
"epoch": 0.3667046750285063,
"grad_norm": 0.13182106614112854,
"learning_rate": 9.937309365446973e-06,
"loss": 1.1752,
"step": 402
},
{
"epoch": 0.3676168757126568,
"grad_norm": 0.14486649632453918,
"learning_rate": 9.742367571857091e-06,
"loss": 1.1526,
"step": 403
},
{
"epoch": 0.3685290763968073,
"grad_norm": 0.14698417484760284,
"learning_rate": 9.549150281252633e-06,
"loss": 1.2639,
"step": 404
},
{
"epoch": 0.3694412770809578,
"grad_norm": 0.16532011330127716,
"learning_rate": 9.357665770419244e-06,
"loss": 1.3237,
"step": 405
},
{
"epoch": 0.3703534777651083,
"grad_norm": 0.17021730542182922,
"learning_rate": 9.167922241916055e-06,
"loss": 1.3305,
"step": 406
},
{
"epoch": 0.37126567844925884,
"grad_norm": 0.1707400232553482,
"learning_rate": 8.97992782372432e-06,
"loss": 1.1334,
"step": 407
},
{
"epoch": 0.37217787913340933,
"grad_norm": 0.1890312284231186,
"learning_rate": 8.793690568899216e-06,
"loss": 1.2203,
"step": 408
},
{
"epoch": 0.37309007981755987,
"grad_norm": 0.2785188555717468,
"learning_rate": 8.609218455224893e-06,
"loss": 1.3662,
"step": 409
},
{
"epoch": 0.37400228050171036,
"grad_norm": 0.3217642307281494,
"learning_rate": 8.426519384872733e-06,
"loss": 1.5266,
"step": 410
},
{
"epoch": 0.3749144811858609,
"grad_norm": 0.637722909450531,
"learning_rate": 8.245601184062852e-06,
"loss": 1.7853,
"step": 411
},
{
"epoch": 0.3758266818700114,
"grad_norm": 0.15114322304725647,
"learning_rate": 8.066471602728803e-06,
"loss": 0.8234,
"step": 412
},
{
"epoch": 0.37673888255416194,
"grad_norm": 0.15969091653823853,
"learning_rate": 7.889138314185678e-06,
"loss": 1.0191,
"step": 413
},
{
"epoch": 0.3776510832383124,
"grad_norm": 0.16418735682964325,
"learning_rate": 7.71360891480134e-06,
"loss": 1.1389,
"step": 414
},
{
"epoch": 0.37856328392246297,
"grad_norm": 0.15018145740032196,
"learning_rate": 7.539890923671062e-06,
"loss": 1.0788,
"step": 415
},
{
"epoch": 0.37947548460661346,
"grad_norm": 0.14109903573989868,
"learning_rate": 7.367991782295391e-06,
"loss": 0.9719,
"step": 416
},
{
"epoch": 0.38038768529076394,
"grad_norm": 0.1540416181087494,
"learning_rate": 7.197918854261432e-06,
"loss": 1.0884,
"step": 417
},
{
"epoch": 0.3812998859749145,
"grad_norm": 0.14207249879837036,
"learning_rate": 7.029679424927365e-06,
"loss": 1.1356,
"step": 418
},
{
"epoch": 0.382212086659065,
"grad_norm": 0.14175021648406982,
"learning_rate": 6.863280701110408e-06,
"loss": 0.8289,
"step": 419
},
{
"epoch": 0.3831242873432155,
"grad_norm": 0.1370278298854828,
"learning_rate": 6.698729810778065e-06,
"loss": 0.9297,
"step": 420
},
{
"epoch": 0.384036488027366,
"grad_norm": 0.2763122618198395,
"learning_rate": 6.536033802742813e-06,
"loss": 1.2414,
"step": 421
},
{
"epoch": 0.38494868871151655,
"grad_norm": 0.15884122252464294,
"learning_rate": 6.375199646360142e-06,
"loss": 1.0608,
"step": 422
},
{
"epoch": 0.38586088939566704,
"grad_norm": 0.14970403909683228,
"learning_rate": 6.216234231230012e-06,
"loss": 0.7528,
"step": 423
},
{
"epoch": 0.3867730900798176,
"grad_norm": 0.15861846506595612,
"learning_rate": 6.059144366901736e-06,
"loss": 0.924,
"step": 424
},
{
"epoch": 0.38768529076396807,
"grad_norm": 0.18967342376708984,
"learning_rate": 5.903936782582253e-06,
"loss": 1.0738,
"step": 425
},
{
"epoch": 0.38859749144811856,
"grad_norm": 0.1890667825937271,
"learning_rate": 5.750618126847912e-06,
"loss": 1.0767,
"step": 426
},
{
"epoch": 0.3895096921322691,
"grad_norm": 0.18360556662082672,
"learning_rate": 5.599194967359639e-06,
"loss": 1.137,
"step": 427
},
{
"epoch": 0.3904218928164196,
"grad_norm": 0.2094428986310959,
"learning_rate": 5.449673790581611e-06,
"loss": 1.0618,
"step": 428
},
{
"epoch": 0.39133409350057013,
"grad_norm": 0.17591425776481628,
"learning_rate": 5.302061001503394e-06,
"loss": 0.9315,
"step": 429
},
{
"epoch": 0.3922462941847206,
"grad_norm": 0.19204486906528473,
"learning_rate": 5.156362923365588e-06,
"loss": 1.1151,
"step": 430
},
{
"epoch": 0.39315849486887117,
"grad_norm": 0.19108223915100098,
"learning_rate": 5.012585797388936e-06,
"loss": 0.9007,
"step": 431
},
{
"epoch": 0.39407069555302165,
"grad_norm": 0.24487371742725372,
"learning_rate": 4.87073578250698e-06,
"loss": 1.0956,
"step": 432
},
{
"epoch": 0.3949828962371722,
"grad_norm": 0.36626699566841125,
"learning_rate": 4.730818955102234e-06,
"loss": 1.1817,
"step": 433
},
{
"epoch": 0.3958950969213227,
"grad_norm": 0.20722968876361847,
"learning_rate": 4.592841308745932e-06,
"loss": 1.1693,
"step": 434
},
{
"epoch": 0.39680729760547323,
"grad_norm": 0.18895329535007477,
"learning_rate": 4.456808753941205e-06,
"loss": 1.2137,
"step": 435
},
{
"epoch": 0.3977194982896237,
"grad_norm": 0.11202345043420792,
"learning_rate": 4.322727117869951e-06,
"loss": 1.1175,
"step": 436
},
{
"epoch": 0.3986316989737742,
"grad_norm": 0.10380493104457855,
"learning_rate": 4.190602144143207e-06,
"loss": 0.9969,
"step": 437
},
{
"epoch": 0.39954389965792475,
"grad_norm": 0.10471412539482117,
"learning_rate": 4.06043949255509e-06,
"loss": 1.1972,
"step": 438
},
{
"epoch": 0.40045610034207524,
"grad_norm": 0.10842544585466385,
"learning_rate": 3.932244738840379e-06,
"loss": 1.233,
"step": 439
},
{
"epoch": 0.4013683010262258,
"grad_norm": 0.1057286411523819,
"learning_rate": 3.8060233744356633e-06,
"loss": 1.1387,
"step": 440
},
{
"epoch": 0.40228050171037627,
"grad_norm": 0.13041551411151886,
"learning_rate": 3.681780806244095e-06,
"loss": 1.3469,
"step": 441
},
{
"epoch": 0.4031927023945268,
"grad_norm": 0.14460065960884094,
"learning_rate": 3.5595223564037884e-06,
"loss": 1.2273,
"step": 442
},
{
"epoch": 0.4041049030786773,
"grad_norm": 0.1310551017522812,
"learning_rate": 3.4392532620598216e-06,
"loss": 1.2728,
"step": 443
},
{
"epoch": 0.40501710376282785,
"grad_norm": 0.15493756532669067,
"learning_rate": 3.3209786751399187e-06,
"loss": 1.2911,
"step": 444
},
{
"epoch": 0.40592930444697833,
"grad_norm": 0.17276257276535034,
"learning_rate": 3.2047036621337236e-06,
"loss": 1.2539,
"step": 445
},
{
"epoch": 0.4068415051311288,
"grad_norm": 0.19491133093833923,
"learning_rate": 3.0904332038757977e-06,
"loss": 1.3042,
"step": 446
},
{
"epoch": 0.40775370581527937,
"grad_norm": 0.2031175196170807,
"learning_rate": 2.978172195332263e-06,
"loss": 1.3051,
"step": 447
},
{
"epoch": 0.40866590649942985,
"grad_norm": 0.24767906963825226,
"learning_rate": 2.8679254453910785e-06,
"loss": 1.3451,
"step": 448
},
{
"epoch": 0.4095781071835804,
"grad_norm": 0.3026193380355835,
"learning_rate": 2.759697676656098e-06,
"loss": 1.4116,
"step": 449
},
{
"epoch": 0.4104903078677309,
"grad_norm": 0.8892145156860352,
"learning_rate": 2.653493525244721e-06,
"loss": 1.6982,
"step": 450
},
{
"epoch": 0.41140250855188143,
"grad_norm": 0.13387838006019592,
"learning_rate": 2.549317540589308e-06,
"loss": 1.0503,
"step": 451
},
{
"epoch": 0.4123147092360319,
"grad_norm": 0.12145627290010452,
"learning_rate": 2.4471741852423237e-06,
"loss": 0.9952,
"step": 452
},
{
"epoch": 0.41322690992018246,
"grad_norm": 0.14256002008914948,
"learning_rate": 2.3470678346851518e-06,
"loss": 1.0721,
"step": 453
},
{
"epoch": 0.41413911060433295,
"grad_norm": 0.13277588784694672,
"learning_rate": 2.2490027771406687e-06,
"loss": 1.0023,
"step": 454
},
{
"epoch": 0.4150513112884835,
"grad_norm": 0.14663711190223694,
"learning_rate": 2.152983213389559e-06,
"loss": 1.335,
"step": 455
},
{
"epoch": 0.415963511972634,
"grad_norm": 0.1930977702140808,
"learning_rate": 2.0590132565903476e-06,
"loss": 1.212,
"step": 456
},
{
"epoch": 0.41687571265678447,
"grad_norm": 0.19310270249843597,
"learning_rate": 1.9670969321032407e-06,
"loss": 1.281,
"step": 457
},
{
"epoch": 0.417787913340935,
"grad_norm": 0.20942994952201843,
"learning_rate": 1.8772381773176417e-06,
"loss": 1.5858,
"step": 458
},
{
"epoch": 0.4187001140250855,
"grad_norm": 0.1741369217634201,
"learning_rate": 1.7894408414835362e-06,
"loss": 1.1413,
"step": 459
},
{
"epoch": 0.41961231470923605,
"grad_norm": 0.2890353500843048,
"learning_rate": 1.70370868554659e-06,
"loss": 1.5824,
"step": 460
},
{
"epoch": 0.42052451539338653,
"grad_norm": 0.38743922114372253,
"learning_rate": 1.620045381987012e-06,
"loss": 1.2338,
"step": 461
},
{
"epoch": 0.4214367160775371,
"grad_norm": 0.4166823923587799,
"learning_rate": 1.5384545146622852e-06,
"loss": 1.4114,
"step": 462
},
{
"epoch": 0.42234891676168757,
"grad_norm": 0.16984394192695618,
"learning_rate": 1.4589395786535953e-06,
"loss": 1.033,
"step": 463
},
{
"epoch": 0.4232611174458381,
"grad_norm": 0.1662890464067459,
"learning_rate": 1.3815039801161721e-06,
"loss": 1.1955,
"step": 464
},
{
"epoch": 0.4241733181299886,
"grad_norm": 0.15266531705856323,
"learning_rate": 1.3061510361333185e-06,
"loss": 1.0023,
"step": 465
},
{
"epoch": 0.4250855188141391,
"grad_norm": 0.13052251935005188,
"learning_rate": 1.232883974574367e-06,
"loss": 0.7769,
"step": 466
},
{
"epoch": 0.42599771949828963,
"grad_norm": 0.15434938669204712,
"learning_rate": 1.1617059339563807e-06,
"loss": 0.9081,
"step": 467
},
{
"epoch": 0.4269099201824401,
"grad_norm": 0.1454930305480957,
"learning_rate": 1.0926199633097157e-06,
"loss": 0.8577,
"step": 468
},
{
"epoch": 0.42782212086659066,
"grad_norm": 0.14353904128074646,
"learning_rate": 1.0256290220474307e-06,
"loss": 0.9633,
"step": 469
},
{
"epoch": 0.42873432155074115,
"grad_norm": 0.136052668094635,
"learning_rate": 9.607359798384785e-07,
"loss": 0.8172,
"step": 470
},
{
"epoch": 0.4296465222348917,
"grad_norm": 0.1676524579524994,
"learning_rate": 8.979436164848088e-07,
"loss": 1.0447,
"step": 471
},
{
"epoch": 0.4305587229190422,
"grad_norm": 0.2036374807357788,
"learning_rate": 8.372546218022747e-07,
"loss": 0.9286,
"step": 472
},
{
"epoch": 0.4314709236031927,
"grad_norm": 0.14977683126926422,
"learning_rate": 7.786715955054203e-07,
"loss": 0.9408,
"step": 473
},
{
"epoch": 0.4323831242873432,
"grad_norm": 0.1696736216545105,
"learning_rate": 7.221970470961125e-07,
"loss": 0.9405,
"step": 474
},
{
"epoch": 0.43329532497149376,
"grad_norm": 0.1618494838476181,
"learning_rate": 6.678333957560512e-07,
"loss": 1.0186,
"step": 475
},
{
"epoch": 0.43420752565564424,
"grad_norm": 0.19542407989501953,
"learning_rate": 6.15582970243117e-07,
"loss": 0.9891,
"step": 476
},
{
"epoch": 0.43511972633979473,
"grad_norm": 0.1792437881231308,
"learning_rate": 5.654480087916303e-07,
"loss": 0.786,
"step": 477
},
{
"epoch": 0.4360319270239453,
"grad_norm": 0.19154495000839233,
"learning_rate": 5.174306590164879e-07,
"loss": 1.026,
"step": 478
},
{
"epoch": 0.43694412770809576,
"grad_norm": 0.19262973964214325,
"learning_rate": 4.715329778211375e-07,
"loss": 1.1393,
"step": 479
},
{
"epoch": 0.4378563283922463,
"grad_norm": 0.19536390900611877,
"learning_rate": 4.277569313094809e-07,
"loss": 1.0381,
"step": 480
},
{
"epoch": 0.4387685290763968,
"grad_norm": 0.19628259539604187,
"learning_rate": 3.8610439470164737e-07,
"loss": 0.9981,
"step": 481
},
{
"epoch": 0.43968072976054734,
"grad_norm": 0.19776402413845062,
"learning_rate": 3.465771522536854e-07,
"loss": 1.0142,
"step": 482
},
{
"epoch": 0.44059293044469783,
"grad_norm": 0.21788012981414795,
"learning_rate": 3.09176897181096e-07,
"loss": 0.9793,
"step": 483
},
{
"epoch": 0.44150513112884837,
"grad_norm": 0.2997768521308899,
"learning_rate": 2.7390523158633554e-07,
"loss": 1.0695,
"step": 484
},
{
"epoch": 0.44241733181299886,
"grad_norm": 0.27953919768333435,
"learning_rate": 2.407636663901591e-07,
"loss": 1.3182,
"step": 485
},
{
"epoch": 0.44332953249714935,
"grad_norm": 0.15639732778072357,
"learning_rate": 2.0975362126691712e-07,
"loss": 1.0899,
"step": 486
},
{
"epoch": 0.4442417331812999,
"grad_norm": 0.12085293978452682,
"learning_rate": 1.8087642458373134e-07,
"loss": 1.3242,
"step": 487
},
{
"epoch": 0.4451539338654504,
"grad_norm": 0.105677030980587,
"learning_rate": 1.5413331334360182e-07,
"loss": 1.2878,
"step": 488
},
{
"epoch": 0.4460661345496009,
"grad_norm": 0.12019108980894089,
"learning_rate": 1.2952543313240472e-07,
"loss": 1.076,
"step": 489
},
{
"epoch": 0.4469783352337514,
"grad_norm": 0.12178989499807358,
"learning_rate": 1.0705383806982606e-07,
"loss": 1.2949,
"step": 490
},
{
"epoch": 0.44789053591790196,
"grad_norm": 0.13249683380126953,
"learning_rate": 8.671949076420882e-08,
"loss": 1.4278,
"step": 491
},
{
"epoch": 0.44880273660205244,
"grad_norm": 0.13811670243740082,
"learning_rate": 6.852326227130834e-08,
"loss": 1.3262,
"step": 492
},
{
"epoch": 0.449714937286203,
"grad_norm": 0.14182856678962708,
"learning_rate": 5.246593205699424e-08,
"loss": 1.3477,
"step": 493
},
{
"epoch": 0.4506271379703535,
"grad_norm": 0.1499001681804657,
"learning_rate": 3.8548187963854956e-08,
"loss": 1.2677,
"step": 494
},
{
"epoch": 0.45153933865450396,
"grad_norm": 0.16916415095329285,
"learning_rate": 2.6770626181715773e-08,
"loss": 1.3193,
"step": 495
},
{
"epoch": 0.4524515393386545,
"grad_norm": 0.20377494394779205,
"learning_rate": 1.7133751222137007e-08,
"loss": 1.357,
"step": 496
},
{
"epoch": 0.453363740022805,
"grad_norm": 0.2306216061115265,
"learning_rate": 9.637975896759077e-09,
"loss": 1.7053,
"step": 497
},
{
"epoch": 0.45427594070695554,
"grad_norm": 0.2522677481174469,
"learning_rate": 4.2836212996499865e-09,
"loss": 1.3653,
"step": 498
},
{
"epoch": 0.455188141391106,
"grad_norm": 0.33547061681747437,
"learning_rate": 1.0709167935385455e-09,
"loss": 1.4946,
"step": 499
},
{
"epoch": 0.45610034207525657,
"grad_norm": 0.8273468017578125,
"learning_rate": 0.0,
"loss": 1.7554,
"step": 500
},
{
"epoch": 0.45610034207525657,
"eval_loss": 1.195978045463562,
"eval_runtime": 113.8887,
"eval_samples_per_second": 16.209,
"eval_steps_per_second": 4.057,
"step": 500
}
],
"logging_steps": 1,
"max_steps": 500,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 100,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 5,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 1
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 4.202037058732032e+16,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}