lesso's picture
Training in progress, step 200, checkpoint
4a833bc verified
{
"best_metric": 0.5863075852394104,
"best_model_checkpoint": "miner_id_24/checkpoint-200",
"epoch": 0.20171457387796268,
"eval_steps": 50,
"global_step": 200,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0010085728693898135,
"grad_norm": 1.311654806137085,
"learning_rate": 1.0018000000000001e-05,
"loss": 0.9276,
"step": 1
},
{
"epoch": 0.0010085728693898135,
"eval_loss": 1.104080319404602,
"eval_runtime": 47.2809,
"eval_samples_per_second": 8.841,
"eval_steps_per_second": 2.221,
"step": 1
},
{
"epoch": 0.002017145738779627,
"grad_norm": 1.4768002033233643,
"learning_rate": 2.0036000000000003e-05,
"loss": 1.1293,
"step": 2
},
{
"epoch": 0.0030257186081694403,
"grad_norm": 1.863478422164917,
"learning_rate": 3.0054e-05,
"loss": 1.095,
"step": 3
},
{
"epoch": 0.004034291477559254,
"grad_norm": 1.4832903146743774,
"learning_rate": 4.0072000000000005e-05,
"loss": 1.0675,
"step": 4
},
{
"epoch": 0.005042864346949067,
"grad_norm": 1.516528606414795,
"learning_rate": 5.009e-05,
"loss": 1.1571,
"step": 5
},
{
"epoch": 0.006051437216338881,
"grad_norm": 1.0894883871078491,
"learning_rate": 6.0108e-05,
"loss": 0.8794,
"step": 6
},
{
"epoch": 0.0070600100857286935,
"grad_norm": 1.1469849348068237,
"learning_rate": 7.0126e-05,
"loss": 0.7944,
"step": 7
},
{
"epoch": 0.008068582955118508,
"grad_norm": 0.9738489389419556,
"learning_rate": 8.014400000000001e-05,
"loss": 0.7899,
"step": 8
},
{
"epoch": 0.009077155824508321,
"grad_norm": 1.1924914121627808,
"learning_rate": 9.016200000000001e-05,
"loss": 0.8915,
"step": 9
},
{
"epoch": 0.010085728693898134,
"grad_norm": 0.986112117767334,
"learning_rate": 0.00010018,
"loss": 0.735,
"step": 10
},
{
"epoch": 0.011094301563287948,
"grad_norm": 0.9808599948883057,
"learning_rate": 9.965273684210526e-05,
"loss": 0.7869,
"step": 11
},
{
"epoch": 0.012102874432677761,
"grad_norm": 0.8853185176849365,
"learning_rate": 9.912547368421053e-05,
"loss": 0.7262,
"step": 12
},
{
"epoch": 0.013111447302067574,
"grad_norm": 0.7452360987663269,
"learning_rate": 9.859821052631579e-05,
"loss": 0.679,
"step": 13
},
{
"epoch": 0.014120020171457387,
"grad_norm": 0.7864087224006653,
"learning_rate": 9.807094736842106e-05,
"loss": 0.6494,
"step": 14
},
{
"epoch": 0.015128593040847202,
"grad_norm": 0.7922771573066711,
"learning_rate": 9.754368421052633e-05,
"loss": 0.6502,
"step": 15
},
{
"epoch": 0.016137165910237016,
"grad_norm": 0.7772244215011597,
"learning_rate": 9.701642105263158e-05,
"loss": 0.5793,
"step": 16
},
{
"epoch": 0.01714573877962683,
"grad_norm": 0.7786775827407837,
"learning_rate": 9.648915789473685e-05,
"loss": 0.5646,
"step": 17
},
{
"epoch": 0.018154311649016642,
"grad_norm": 0.9989523887634277,
"learning_rate": 9.596189473684211e-05,
"loss": 0.8072,
"step": 18
},
{
"epoch": 0.019162884518406455,
"grad_norm": 0.8197798728942871,
"learning_rate": 9.543463157894737e-05,
"loss": 0.549,
"step": 19
},
{
"epoch": 0.020171457387796268,
"grad_norm": 0.727142333984375,
"learning_rate": 9.490736842105264e-05,
"loss": 0.6405,
"step": 20
},
{
"epoch": 0.02118003025718608,
"grad_norm": 0.7259921431541443,
"learning_rate": 9.43801052631579e-05,
"loss": 0.6329,
"step": 21
},
{
"epoch": 0.022188603126575897,
"grad_norm": 0.7431148886680603,
"learning_rate": 9.385284210526316e-05,
"loss": 0.522,
"step": 22
},
{
"epoch": 0.02319717599596571,
"grad_norm": 0.8169230818748474,
"learning_rate": 9.332557894736843e-05,
"loss": 0.7403,
"step": 23
},
{
"epoch": 0.024205748865355523,
"grad_norm": 0.9084967970848083,
"learning_rate": 9.279831578947369e-05,
"loss": 0.6538,
"step": 24
},
{
"epoch": 0.025214321734745335,
"grad_norm": 0.8007498383522034,
"learning_rate": 9.227105263157896e-05,
"loss": 0.6519,
"step": 25
},
{
"epoch": 0.026222894604135148,
"grad_norm": 0.7854844927787781,
"learning_rate": 9.174378947368421e-05,
"loss": 0.6472,
"step": 26
},
{
"epoch": 0.02723146747352496,
"grad_norm": 0.8193365335464478,
"learning_rate": 9.121652631578948e-05,
"loss": 0.5204,
"step": 27
},
{
"epoch": 0.028240040342914774,
"grad_norm": 0.8219320178031921,
"learning_rate": 9.068926315789475e-05,
"loss": 0.6833,
"step": 28
},
{
"epoch": 0.02924861321230459,
"grad_norm": 0.8295514583587646,
"learning_rate": 9.016200000000001e-05,
"loss": 0.6379,
"step": 29
},
{
"epoch": 0.030257186081694403,
"grad_norm": 0.9452921748161316,
"learning_rate": 8.963473684210526e-05,
"loss": 0.7919,
"step": 30
},
{
"epoch": 0.031265758951084216,
"grad_norm": 0.893158495426178,
"learning_rate": 8.910747368421053e-05,
"loss": 0.8002,
"step": 31
},
{
"epoch": 0.03227433182047403,
"grad_norm": 0.7392338514328003,
"learning_rate": 8.858021052631579e-05,
"loss": 0.6503,
"step": 32
},
{
"epoch": 0.03328290468986384,
"grad_norm": 0.8364588618278503,
"learning_rate": 8.805294736842106e-05,
"loss": 0.7389,
"step": 33
},
{
"epoch": 0.03429147755925366,
"grad_norm": 0.8333988189697266,
"learning_rate": 8.752568421052633e-05,
"loss": 0.718,
"step": 34
},
{
"epoch": 0.03530005042864347,
"grad_norm": 0.8555812835693359,
"learning_rate": 8.699842105263159e-05,
"loss": 0.8115,
"step": 35
},
{
"epoch": 0.036308623298033284,
"grad_norm": 0.909781277179718,
"learning_rate": 8.647115789473686e-05,
"loss": 0.7834,
"step": 36
},
{
"epoch": 0.03731719616742309,
"grad_norm": 0.8471423387527466,
"learning_rate": 8.594389473684211e-05,
"loss": 0.7363,
"step": 37
},
{
"epoch": 0.03832576903681291,
"grad_norm": 0.7747591137886047,
"learning_rate": 8.541663157894737e-05,
"loss": 0.7223,
"step": 38
},
{
"epoch": 0.039334341906202726,
"grad_norm": 0.7503566145896912,
"learning_rate": 8.488936842105264e-05,
"loss": 0.6464,
"step": 39
},
{
"epoch": 0.040342914775592535,
"grad_norm": 0.7485514283180237,
"learning_rate": 8.43621052631579e-05,
"loss": 0.624,
"step": 40
},
{
"epoch": 0.04135148764498235,
"grad_norm": 0.8754268884658813,
"learning_rate": 8.383484210526316e-05,
"loss": 0.7319,
"step": 41
},
{
"epoch": 0.04236006051437216,
"grad_norm": 0.9374569058418274,
"learning_rate": 8.330757894736843e-05,
"loss": 0.8252,
"step": 42
},
{
"epoch": 0.04336863338376198,
"grad_norm": 1.0073909759521484,
"learning_rate": 8.278031578947369e-05,
"loss": 0.8029,
"step": 43
},
{
"epoch": 0.044377206253151794,
"grad_norm": 0.9302268624305725,
"learning_rate": 8.225305263157896e-05,
"loss": 0.7655,
"step": 44
},
{
"epoch": 0.0453857791225416,
"grad_norm": 0.9419411420822144,
"learning_rate": 8.172578947368422e-05,
"loss": 0.7218,
"step": 45
},
{
"epoch": 0.04639435199193142,
"grad_norm": 0.936607301235199,
"learning_rate": 8.119852631578947e-05,
"loss": 0.7549,
"step": 46
},
{
"epoch": 0.04740292486132123,
"grad_norm": 0.8890344500541687,
"learning_rate": 8.067126315789474e-05,
"loss": 0.7809,
"step": 47
},
{
"epoch": 0.048411497730711045,
"grad_norm": 1.0612900257110596,
"learning_rate": 8.014400000000001e-05,
"loss": 0.7987,
"step": 48
},
{
"epoch": 0.049420070600100854,
"grad_norm": 0.9946021437644958,
"learning_rate": 7.961673684210527e-05,
"loss": 0.8006,
"step": 49
},
{
"epoch": 0.05042864346949067,
"grad_norm": 1.1615980863571167,
"learning_rate": 7.908947368421054e-05,
"loss": 0.8517,
"step": 50
},
{
"epoch": 0.05042864346949067,
"eval_loss": 0.6663289666175842,
"eval_runtime": 47.2665,
"eval_samples_per_second": 8.843,
"eval_steps_per_second": 2.221,
"step": 50
},
{
"epoch": 0.05143721633888049,
"grad_norm": 0.8349344730377197,
"learning_rate": 7.856221052631579e-05,
"loss": 0.4587,
"step": 51
},
{
"epoch": 0.052445789208270296,
"grad_norm": 0.6953268647193909,
"learning_rate": 7.803494736842106e-05,
"loss": 0.666,
"step": 52
},
{
"epoch": 0.05345436207766011,
"grad_norm": 0.7224613428115845,
"learning_rate": 7.750768421052632e-05,
"loss": 0.7536,
"step": 53
},
{
"epoch": 0.05446293494704992,
"grad_norm": 0.6757810115814209,
"learning_rate": 7.698042105263157e-05,
"loss": 0.6169,
"step": 54
},
{
"epoch": 0.05547150781643974,
"grad_norm": 0.7635074257850647,
"learning_rate": 7.645315789473686e-05,
"loss": 0.6307,
"step": 55
},
{
"epoch": 0.05648008068582955,
"grad_norm": 0.6975710988044739,
"learning_rate": 7.592589473684211e-05,
"loss": 0.6233,
"step": 56
},
{
"epoch": 0.057488653555219364,
"grad_norm": 0.9252186417579651,
"learning_rate": 7.539863157894737e-05,
"loss": 0.6551,
"step": 57
},
{
"epoch": 0.05849722642460918,
"grad_norm": 0.8248884677886963,
"learning_rate": 7.487136842105264e-05,
"loss": 0.5789,
"step": 58
},
{
"epoch": 0.05950579929399899,
"grad_norm": 0.6074532866477966,
"learning_rate": 7.43441052631579e-05,
"loss": 0.5004,
"step": 59
},
{
"epoch": 0.060514372163388806,
"grad_norm": 0.6752936244010925,
"learning_rate": 7.381684210526315e-05,
"loss": 0.4884,
"step": 60
},
{
"epoch": 0.061522945032778616,
"grad_norm": 0.7720556259155273,
"learning_rate": 7.328957894736844e-05,
"loss": 0.7758,
"step": 61
},
{
"epoch": 0.06253151790216843,
"grad_norm": 0.699116051197052,
"learning_rate": 7.276231578947369e-05,
"loss": 0.5716,
"step": 62
},
{
"epoch": 0.06354009077155824,
"grad_norm": 0.7762227058410645,
"learning_rate": 7.223505263157895e-05,
"loss": 0.6935,
"step": 63
},
{
"epoch": 0.06454866364094806,
"grad_norm": 0.6913344264030457,
"learning_rate": 7.170778947368422e-05,
"loss": 0.6068,
"step": 64
},
{
"epoch": 0.06555723651033787,
"grad_norm": 0.7144531011581421,
"learning_rate": 7.118052631578947e-05,
"loss": 0.6431,
"step": 65
},
{
"epoch": 0.06656580937972768,
"grad_norm": 0.6667282581329346,
"learning_rate": 7.065326315789474e-05,
"loss": 0.5421,
"step": 66
},
{
"epoch": 0.06757438224911749,
"grad_norm": 0.6969847083091736,
"learning_rate": 7.0126e-05,
"loss": 0.6785,
"step": 67
},
{
"epoch": 0.06858295511850732,
"grad_norm": 0.6973515748977661,
"learning_rate": 6.959873684210527e-05,
"loss": 0.5455,
"step": 68
},
{
"epoch": 0.06959152798789713,
"grad_norm": 0.7713499069213867,
"learning_rate": 6.907147368421054e-05,
"loss": 0.7372,
"step": 69
},
{
"epoch": 0.07060010085728693,
"grad_norm": 0.5957585573196411,
"learning_rate": 6.85442105263158e-05,
"loss": 0.4954,
"step": 70
},
{
"epoch": 0.07160867372667676,
"grad_norm": 0.7025538086891174,
"learning_rate": 6.801694736842105e-05,
"loss": 0.5496,
"step": 71
},
{
"epoch": 0.07261724659606657,
"grad_norm": 0.7217448353767395,
"learning_rate": 6.748968421052632e-05,
"loss": 0.5567,
"step": 72
},
{
"epoch": 0.07362581946545638,
"grad_norm": 0.7021524906158447,
"learning_rate": 6.696242105263158e-05,
"loss": 0.5291,
"step": 73
},
{
"epoch": 0.07463439233484619,
"grad_norm": 0.7426285743713379,
"learning_rate": 6.643515789473685e-05,
"loss": 0.6815,
"step": 74
},
{
"epoch": 0.07564296520423601,
"grad_norm": 0.7391223311424255,
"learning_rate": 6.590789473684212e-05,
"loss": 0.6689,
"step": 75
},
{
"epoch": 0.07665153807362582,
"grad_norm": 0.6848605871200562,
"learning_rate": 6.538063157894737e-05,
"loss": 0.5782,
"step": 76
},
{
"epoch": 0.07766011094301563,
"grad_norm": 0.711951732635498,
"learning_rate": 6.485336842105264e-05,
"loss": 0.5801,
"step": 77
},
{
"epoch": 0.07866868381240545,
"grad_norm": 0.5852221846580505,
"learning_rate": 6.43261052631579e-05,
"loss": 0.4995,
"step": 78
},
{
"epoch": 0.07967725668179526,
"grad_norm": 0.9040377736091614,
"learning_rate": 6.379884210526315e-05,
"loss": 0.5318,
"step": 79
},
{
"epoch": 0.08068582955118507,
"grad_norm": 0.756641149520874,
"learning_rate": 6.327157894736842e-05,
"loss": 0.6616,
"step": 80
},
{
"epoch": 0.08169440242057488,
"grad_norm": 0.7158970832824707,
"learning_rate": 6.274431578947368e-05,
"loss": 0.6113,
"step": 81
},
{
"epoch": 0.0827029752899647,
"grad_norm": 0.6931173205375671,
"learning_rate": 6.221705263157895e-05,
"loss": 0.5882,
"step": 82
},
{
"epoch": 0.08371154815935451,
"grad_norm": 0.7512569427490234,
"learning_rate": 6.168978947368422e-05,
"loss": 0.6451,
"step": 83
},
{
"epoch": 0.08472012102874432,
"grad_norm": 0.7793223261833191,
"learning_rate": 6.116252631578948e-05,
"loss": 0.6186,
"step": 84
},
{
"epoch": 0.08572869389813415,
"grad_norm": 0.7280353307723999,
"learning_rate": 6.063526315789474e-05,
"loss": 0.6657,
"step": 85
},
{
"epoch": 0.08673726676752395,
"grad_norm": 0.7213184833526611,
"learning_rate": 6.0108e-05,
"loss": 0.6312,
"step": 86
},
{
"epoch": 0.08774583963691376,
"grad_norm": 0.7809893488883972,
"learning_rate": 5.9580736842105264e-05,
"loss": 0.6928,
"step": 87
},
{
"epoch": 0.08875441250630359,
"grad_norm": 0.8184586763381958,
"learning_rate": 5.905347368421053e-05,
"loss": 0.6551,
"step": 88
},
{
"epoch": 0.0897629853756934,
"grad_norm": 0.8529835939407349,
"learning_rate": 5.85262105263158e-05,
"loss": 0.7481,
"step": 89
},
{
"epoch": 0.0907715582450832,
"grad_norm": 0.8931265473365784,
"learning_rate": 5.799894736842106e-05,
"loss": 0.7928,
"step": 90
},
{
"epoch": 0.09178013111447302,
"grad_norm": 0.7958138585090637,
"learning_rate": 5.747168421052632e-05,
"loss": 0.6151,
"step": 91
},
{
"epoch": 0.09278870398386284,
"grad_norm": 0.7931579351425171,
"learning_rate": 5.694442105263158e-05,
"loss": 0.7141,
"step": 92
},
{
"epoch": 0.09379727685325265,
"grad_norm": 0.8281200528144836,
"learning_rate": 5.641715789473684e-05,
"loss": 0.6538,
"step": 93
},
{
"epoch": 0.09480584972264246,
"grad_norm": 0.8042008280754089,
"learning_rate": 5.5889894736842104e-05,
"loss": 0.6661,
"step": 94
},
{
"epoch": 0.09581442259203228,
"grad_norm": 0.8040047287940979,
"learning_rate": 5.5362631578947374e-05,
"loss": 0.5659,
"step": 95
},
{
"epoch": 0.09682299546142209,
"grad_norm": 1.044537901878357,
"learning_rate": 5.483536842105264e-05,
"loss": 0.835,
"step": 96
},
{
"epoch": 0.0978315683308119,
"grad_norm": 0.9413779377937317,
"learning_rate": 5.43081052631579e-05,
"loss": 0.7605,
"step": 97
},
{
"epoch": 0.09884014120020171,
"grad_norm": 0.9656868577003479,
"learning_rate": 5.378084210526316e-05,
"loss": 0.7144,
"step": 98
},
{
"epoch": 0.09984871406959153,
"grad_norm": 1.0694280862808228,
"learning_rate": 5.3253578947368426e-05,
"loss": 0.8615,
"step": 99
},
{
"epoch": 0.10085728693898134,
"grad_norm": 1.2606571912765503,
"learning_rate": 5.272631578947368e-05,
"loss": 0.8549,
"step": 100
},
{
"epoch": 0.10085728693898134,
"eval_loss": 0.635071337223053,
"eval_runtime": 47.0624,
"eval_samples_per_second": 8.882,
"eval_steps_per_second": 2.231,
"step": 100
},
{
"epoch": 0.10186585980837115,
"grad_norm": 0.7055741548538208,
"learning_rate": 5.2199052631578945e-05,
"loss": 0.6355,
"step": 101
},
{
"epoch": 0.10287443267776097,
"grad_norm": 0.7601715922355652,
"learning_rate": 5.167178947368422e-05,
"loss": 0.6535,
"step": 102
},
{
"epoch": 0.10388300554715078,
"grad_norm": 0.6638849973678589,
"learning_rate": 5.114452631578948e-05,
"loss": 0.6594,
"step": 103
},
{
"epoch": 0.10489157841654059,
"grad_norm": 0.5764726996421814,
"learning_rate": 5.061726315789474e-05,
"loss": 0.5227,
"step": 104
},
{
"epoch": 0.1059001512859304,
"grad_norm": 0.7011322975158691,
"learning_rate": 5.009e-05,
"loss": 0.6485,
"step": 105
},
{
"epoch": 0.10690872415532023,
"grad_norm": 0.6385596394538879,
"learning_rate": 4.9562736842105266e-05,
"loss": 0.617,
"step": 106
},
{
"epoch": 0.10791729702471003,
"grad_norm": 0.613409161567688,
"learning_rate": 4.903547368421053e-05,
"loss": 0.5565,
"step": 107
},
{
"epoch": 0.10892586989409984,
"grad_norm": 0.5934216976165771,
"learning_rate": 4.850821052631579e-05,
"loss": 0.5369,
"step": 108
},
{
"epoch": 0.10993444276348967,
"grad_norm": 0.5698984265327454,
"learning_rate": 4.7980947368421055e-05,
"loss": 0.5095,
"step": 109
},
{
"epoch": 0.11094301563287948,
"grad_norm": 0.6158843636512756,
"learning_rate": 4.745368421052632e-05,
"loss": 0.5607,
"step": 110
},
{
"epoch": 0.11195158850226929,
"grad_norm": 0.6479724645614624,
"learning_rate": 4.692642105263158e-05,
"loss": 0.4708,
"step": 111
},
{
"epoch": 0.1129601613716591,
"grad_norm": 0.7158260345458984,
"learning_rate": 4.6399157894736844e-05,
"loss": 0.6306,
"step": 112
},
{
"epoch": 0.11396873424104892,
"grad_norm": 0.6772667169570923,
"learning_rate": 4.5871894736842107e-05,
"loss": 0.5333,
"step": 113
},
{
"epoch": 0.11497730711043873,
"grad_norm": 0.6242232322692871,
"learning_rate": 4.5344631578947376e-05,
"loss": 0.5704,
"step": 114
},
{
"epoch": 0.11598587997982854,
"grad_norm": 0.6591203212738037,
"learning_rate": 4.481736842105263e-05,
"loss": 0.665,
"step": 115
},
{
"epoch": 0.11699445284921836,
"grad_norm": 0.6875463128089905,
"learning_rate": 4.4290105263157895e-05,
"loss": 0.546,
"step": 116
},
{
"epoch": 0.11800302571860817,
"grad_norm": 0.7117153406143188,
"learning_rate": 4.3762842105263165e-05,
"loss": 0.6204,
"step": 117
},
{
"epoch": 0.11901159858799798,
"grad_norm": 0.6039959192276001,
"learning_rate": 4.323557894736843e-05,
"loss": 0.635,
"step": 118
},
{
"epoch": 0.1200201714573878,
"grad_norm": 0.7009987235069275,
"learning_rate": 4.2708315789473684e-05,
"loss": 0.608,
"step": 119
},
{
"epoch": 0.12102874432677761,
"grad_norm": 0.6250441074371338,
"learning_rate": 4.218105263157895e-05,
"loss": 0.5882,
"step": 120
},
{
"epoch": 0.12203731719616742,
"grad_norm": 0.7398062944412231,
"learning_rate": 4.1653789473684217e-05,
"loss": 0.6349,
"step": 121
},
{
"epoch": 0.12304589006555723,
"grad_norm": 0.7030249834060669,
"learning_rate": 4.112652631578948e-05,
"loss": 0.6066,
"step": 122
},
{
"epoch": 0.12405446293494705,
"grad_norm": 0.7045819163322449,
"learning_rate": 4.0599263157894736e-05,
"loss": 0.6708,
"step": 123
},
{
"epoch": 0.12506303580433686,
"grad_norm": 0.6833453178405762,
"learning_rate": 4.0072000000000005e-05,
"loss": 0.6002,
"step": 124
},
{
"epoch": 0.1260716086737267,
"grad_norm": 0.8382267951965332,
"learning_rate": 3.954473684210527e-05,
"loss": 0.6854,
"step": 125
},
{
"epoch": 0.12708018154311648,
"grad_norm": 0.8998156189918518,
"learning_rate": 3.901747368421053e-05,
"loss": 0.5236,
"step": 126
},
{
"epoch": 0.1280887544125063,
"grad_norm": 0.6268869638442993,
"learning_rate": 3.849021052631579e-05,
"loss": 0.5159,
"step": 127
},
{
"epoch": 0.12909732728189613,
"grad_norm": 0.7306240797042847,
"learning_rate": 3.796294736842106e-05,
"loss": 0.6468,
"step": 128
},
{
"epoch": 0.13010590015128592,
"grad_norm": 0.696013331413269,
"learning_rate": 3.743568421052632e-05,
"loss": 0.5668,
"step": 129
},
{
"epoch": 0.13111447302067575,
"grad_norm": 0.6369208693504333,
"learning_rate": 3.6908421052631576e-05,
"loss": 0.5423,
"step": 130
},
{
"epoch": 0.13212304589006554,
"grad_norm": 0.711659848690033,
"learning_rate": 3.6381157894736846e-05,
"loss": 0.5699,
"step": 131
},
{
"epoch": 0.13313161875945537,
"grad_norm": 0.6578086614608765,
"learning_rate": 3.585389473684211e-05,
"loss": 0.4694,
"step": 132
},
{
"epoch": 0.1341401916288452,
"grad_norm": 0.7941672205924988,
"learning_rate": 3.532663157894737e-05,
"loss": 0.6442,
"step": 133
},
{
"epoch": 0.13514876449823499,
"grad_norm": 0.7573974132537842,
"learning_rate": 3.4799368421052634e-05,
"loss": 0.7441,
"step": 134
},
{
"epoch": 0.1361573373676248,
"grad_norm": 0.7425283789634705,
"learning_rate": 3.42721052631579e-05,
"loss": 0.6675,
"step": 135
},
{
"epoch": 0.13716591023701463,
"grad_norm": 0.6818085312843323,
"learning_rate": 3.374484210526316e-05,
"loss": 0.5508,
"step": 136
},
{
"epoch": 0.13817448310640443,
"grad_norm": 0.729345440864563,
"learning_rate": 3.321757894736842e-05,
"loss": 0.5426,
"step": 137
},
{
"epoch": 0.13918305597579425,
"grad_norm": 0.6872043013572693,
"learning_rate": 3.2690315789473686e-05,
"loss": 0.6318,
"step": 138
},
{
"epoch": 0.14019162884518407,
"grad_norm": 0.8204219937324524,
"learning_rate": 3.216305263157895e-05,
"loss": 0.6653,
"step": 139
},
{
"epoch": 0.14120020171457387,
"grad_norm": 0.788364827632904,
"learning_rate": 3.163578947368421e-05,
"loss": 0.6192,
"step": 140
},
{
"epoch": 0.1422087745839637,
"grad_norm": 0.7906739115715027,
"learning_rate": 3.1108526315789475e-05,
"loss": 0.6494,
"step": 141
},
{
"epoch": 0.14321734745335352,
"grad_norm": 0.8248524069786072,
"learning_rate": 3.058126315789474e-05,
"loss": 0.6254,
"step": 142
},
{
"epoch": 0.1442259203227433,
"grad_norm": 0.7894427180290222,
"learning_rate": 3.0054e-05,
"loss": 0.6097,
"step": 143
},
{
"epoch": 0.14523449319213314,
"grad_norm": 0.8506288528442383,
"learning_rate": 2.9526736842105263e-05,
"loss": 0.6809,
"step": 144
},
{
"epoch": 0.14624306606152296,
"grad_norm": 1.004377841949463,
"learning_rate": 2.899947368421053e-05,
"loss": 0.7994,
"step": 145
},
{
"epoch": 0.14725163893091275,
"grad_norm": 0.9447079300880432,
"learning_rate": 2.847221052631579e-05,
"loss": 0.7159,
"step": 146
},
{
"epoch": 0.14826021180030258,
"grad_norm": 0.9489012360572815,
"learning_rate": 2.7944947368421052e-05,
"loss": 0.7263,
"step": 147
},
{
"epoch": 0.14926878466969237,
"grad_norm": 0.9614557027816772,
"learning_rate": 2.741768421052632e-05,
"loss": 0.6547,
"step": 148
},
{
"epoch": 0.1502773575390822,
"grad_norm": 0.98894202709198,
"learning_rate": 2.689042105263158e-05,
"loss": 0.7373,
"step": 149
},
{
"epoch": 0.15128593040847202,
"grad_norm": 1.164485216140747,
"learning_rate": 2.636315789473684e-05,
"loss": 0.7158,
"step": 150
},
{
"epoch": 0.15128593040847202,
"eval_loss": 0.6001591086387634,
"eval_runtime": 47.5174,
"eval_samples_per_second": 8.797,
"eval_steps_per_second": 2.21,
"step": 150
},
{
"epoch": 0.15229450327786181,
"grad_norm": 0.6352901458740234,
"learning_rate": 2.583589473684211e-05,
"loss": 0.5723,
"step": 151
},
{
"epoch": 0.15330307614725164,
"grad_norm": 0.6519777774810791,
"learning_rate": 2.530863157894737e-05,
"loss": 0.5986,
"step": 152
},
{
"epoch": 0.15431164901664146,
"grad_norm": 0.6912335753440857,
"learning_rate": 2.4781368421052633e-05,
"loss": 0.6372,
"step": 153
},
{
"epoch": 0.15532022188603126,
"grad_norm": 0.8081892728805542,
"learning_rate": 2.4254105263157896e-05,
"loss": 0.6308,
"step": 154
},
{
"epoch": 0.15632879475542108,
"grad_norm": 0.638839066028595,
"learning_rate": 2.372684210526316e-05,
"loss": 0.5209,
"step": 155
},
{
"epoch": 0.1573373676248109,
"grad_norm": 0.8660756945610046,
"learning_rate": 2.3199578947368422e-05,
"loss": 0.6188,
"step": 156
},
{
"epoch": 0.1583459404942007,
"grad_norm": 0.8016595244407654,
"learning_rate": 2.2672315789473688e-05,
"loss": 0.683,
"step": 157
},
{
"epoch": 0.15935451336359052,
"grad_norm": 0.7723732590675354,
"learning_rate": 2.2145052631578948e-05,
"loss": 0.5858,
"step": 158
},
{
"epoch": 0.16036308623298035,
"grad_norm": 0.7663549780845642,
"learning_rate": 2.1617789473684214e-05,
"loss": 0.6837,
"step": 159
},
{
"epoch": 0.16137165910237014,
"grad_norm": 0.6378078460693359,
"learning_rate": 2.1090526315789473e-05,
"loss": 0.5426,
"step": 160
},
{
"epoch": 0.16238023197175996,
"grad_norm": 0.6299136877059937,
"learning_rate": 2.056326315789474e-05,
"loss": 0.6205,
"step": 161
},
{
"epoch": 0.16338880484114976,
"grad_norm": 0.614180326461792,
"learning_rate": 2.0036000000000003e-05,
"loss": 0.645,
"step": 162
},
{
"epoch": 0.16439737771053958,
"grad_norm": 0.5849306583404541,
"learning_rate": 1.9508736842105266e-05,
"loss": 0.5969,
"step": 163
},
{
"epoch": 0.1654059505799294,
"grad_norm": 0.6921943426132202,
"learning_rate": 1.898147368421053e-05,
"loss": 0.6696,
"step": 164
},
{
"epoch": 0.1664145234493192,
"grad_norm": 0.6622946858406067,
"learning_rate": 1.8454210526315788e-05,
"loss": 0.5939,
"step": 165
},
{
"epoch": 0.16742309631870902,
"grad_norm": 0.5671906471252441,
"learning_rate": 1.7926947368421054e-05,
"loss": 0.4853,
"step": 166
},
{
"epoch": 0.16843166918809885,
"grad_norm": 0.5902379155158997,
"learning_rate": 1.7399684210526317e-05,
"loss": 0.5543,
"step": 167
},
{
"epoch": 0.16944024205748864,
"grad_norm": 0.6716185808181763,
"learning_rate": 1.687242105263158e-05,
"loss": 0.4996,
"step": 168
},
{
"epoch": 0.17044881492687847,
"grad_norm": 0.6331367492675781,
"learning_rate": 1.6345157894736843e-05,
"loss": 0.5686,
"step": 169
},
{
"epoch": 0.1714573877962683,
"grad_norm": 0.6758613586425781,
"learning_rate": 1.5817894736842106e-05,
"loss": 0.6115,
"step": 170
},
{
"epoch": 0.17246596066565809,
"grad_norm": 0.631222128868103,
"learning_rate": 1.529063157894737e-05,
"loss": 0.5845,
"step": 171
},
{
"epoch": 0.1734745335350479,
"grad_norm": 0.688679575920105,
"learning_rate": 1.4763368421052632e-05,
"loss": 0.5624,
"step": 172
},
{
"epoch": 0.17448310640443773,
"grad_norm": 0.6124164462089539,
"learning_rate": 1.4236105263157895e-05,
"loss": 0.5512,
"step": 173
},
{
"epoch": 0.17549167927382753,
"grad_norm": 0.7388119101524353,
"learning_rate": 1.370884210526316e-05,
"loss": 0.5659,
"step": 174
},
{
"epoch": 0.17650025214321735,
"grad_norm": 0.624447762966156,
"learning_rate": 1.318157894736842e-05,
"loss": 0.5343,
"step": 175
},
{
"epoch": 0.17750882501260717,
"grad_norm": 0.7680219411849976,
"learning_rate": 1.2654315789473685e-05,
"loss": 0.6469,
"step": 176
},
{
"epoch": 0.17851739788199697,
"grad_norm": 0.6990251541137695,
"learning_rate": 1.2127052631578948e-05,
"loss": 0.5686,
"step": 177
},
{
"epoch": 0.1795259707513868,
"grad_norm": 0.625948965549469,
"learning_rate": 1.1599789473684211e-05,
"loss": 0.5172,
"step": 178
},
{
"epoch": 0.1805345436207766,
"grad_norm": 0.7253612875938416,
"learning_rate": 1.1072526315789474e-05,
"loss": 0.648,
"step": 179
},
{
"epoch": 0.1815431164901664,
"grad_norm": 0.7192074060440063,
"learning_rate": 1.0545263157894737e-05,
"loss": 0.5826,
"step": 180
},
{
"epoch": 0.18255168935955624,
"grad_norm": 0.8760311603546143,
"learning_rate": 1.0018000000000001e-05,
"loss": 0.726,
"step": 181
},
{
"epoch": 0.18356026222894603,
"grad_norm": 0.7654669284820557,
"learning_rate": 9.490736842105264e-06,
"loss": 0.7193,
"step": 182
},
{
"epoch": 0.18456883509833585,
"grad_norm": 0.6888782382011414,
"learning_rate": 8.963473684210527e-06,
"loss": 0.5643,
"step": 183
},
{
"epoch": 0.18557740796772568,
"grad_norm": 0.7420192956924438,
"learning_rate": 8.43621052631579e-06,
"loss": 0.6996,
"step": 184
},
{
"epoch": 0.18658598083711547,
"grad_norm": 0.761364221572876,
"learning_rate": 7.908947368421053e-06,
"loss": 0.6823,
"step": 185
},
{
"epoch": 0.1875945537065053,
"grad_norm": 0.7957232594490051,
"learning_rate": 7.381684210526316e-06,
"loss": 0.6033,
"step": 186
},
{
"epoch": 0.18860312657589512,
"grad_norm": 0.7030571103096008,
"learning_rate": 6.85442105263158e-06,
"loss": 0.6799,
"step": 187
},
{
"epoch": 0.18961169944528491,
"grad_norm": 0.7977284789085388,
"learning_rate": 6.3271578947368425e-06,
"loss": 0.6251,
"step": 188
},
{
"epoch": 0.19062027231467474,
"grad_norm": 0.774628758430481,
"learning_rate": 5.7998947368421054e-06,
"loss": 0.5933,
"step": 189
},
{
"epoch": 0.19162884518406456,
"grad_norm": 0.7732921242713928,
"learning_rate": 5.272631578947368e-06,
"loss": 0.6884,
"step": 190
},
{
"epoch": 0.19263741805345436,
"grad_norm": 0.8167909979820251,
"learning_rate": 4.745368421052632e-06,
"loss": 0.6984,
"step": 191
},
{
"epoch": 0.19364599092284418,
"grad_norm": 0.7652740478515625,
"learning_rate": 4.218105263157895e-06,
"loss": 0.5643,
"step": 192
},
{
"epoch": 0.19465456379223398,
"grad_norm": 0.7798756957054138,
"learning_rate": 3.690842105263158e-06,
"loss": 0.5983,
"step": 193
},
{
"epoch": 0.1956631366616238,
"grad_norm": 0.894702136516571,
"learning_rate": 3.1635789473684213e-06,
"loss": 0.7333,
"step": 194
},
{
"epoch": 0.19667170953101362,
"grad_norm": 0.8882285952568054,
"learning_rate": 2.636315789473684e-06,
"loss": 0.7199,
"step": 195
},
{
"epoch": 0.19768028240040342,
"grad_norm": 1.022255778312683,
"learning_rate": 2.1090526315789475e-06,
"loss": 0.6327,
"step": 196
},
{
"epoch": 0.19868885526979324,
"grad_norm": 0.9281327128410339,
"learning_rate": 1.5817894736842106e-06,
"loss": 0.6983,
"step": 197
},
{
"epoch": 0.19969742813918306,
"grad_norm": 0.9630473256111145,
"learning_rate": 1.0545263157894738e-06,
"loss": 0.6424,
"step": 198
},
{
"epoch": 0.20070600100857286,
"grad_norm": 0.9315603971481323,
"learning_rate": 5.272631578947369e-07,
"loss": 0.6501,
"step": 199
},
{
"epoch": 0.20171457387796268,
"grad_norm": 1.1420410871505737,
"learning_rate": 0.0,
"loss": 0.7652,
"step": 200
},
{
"epoch": 0.20171457387796268,
"eval_loss": 0.5863075852394104,
"eval_runtime": 47.2866,
"eval_samples_per_second": 8.84,
"eval_steps_per_second": 2.221,
"step": 200
}
],
"logging_steps": 1,
"max_steps": 200,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 50,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 5,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.733017139675136e+17,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}