lesso's picture
Training in progress, step 200, checkpoint
e861eb0 verified
{
"best_metric": 2.786395788192749,
"best_model_checkpoint": "miner_id_24/checkpoint-200",
"epoch": 0.012832852101379532,
"eval_steps": 50,
"global_step": 200,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 6.416426050689766e-05,
"grad_norm": 1.6019502878189087,
"learning_rate": 1.004e-05,
"loss": 7.1708,
"step": 1
},
{
"epoch": 6.416426050689766e-05,
"eval_loss": 3.9179491996765137,
"eval_runtime": 703.5067,
"eval_samples_per_second": 9.329,
"eval_steps_per_second": 2.333,
"step": 1
},
{
"epoch": 0.00012832852101379532,
"grad_norm": 1.6569987535476685,
"learning_rate": 2.008e-05,
"loss": 7.7498,
"step": 2
},
{
"epoch": 0.00019249278152069297,
"grad_norm": 1.8272496461868286,
"learning_rate": 3.012e-05,
"loss": 7.4374,
"step": 3
},
{
"epoch": 0.00025665704202759064,
"grad_norm": 1.8648499250411987,
"learning_rate": 4.016e-05,
"loss": 7.5896,
"step": 4
},
{
"epoch": 0.0003208213025344883,
"grad_norm": 1.9629138708114624,
"learning_rate": 5.02e-05,
"loss": 7.1235,
"step": 5
},
{
"epoch": 0.00038498556304138594,
"grad_norm": 2.0486273765563965,
"learning_rate": 6.024e-05,
"loss": 7.5494,
"step": 6
},
{
"epoch": 0.0004491498235482836,
"grad_norm": 2.0849950313568115,
"learning_rate": 7.028e-05,
"loss": 7.0111,
"step": 7
},
{
"epoch": 0.0005133140840551813,
"grad_norm": 2.051990509033203,
"learning_rate": 8.032e-05,
"loss": 7.0505,
"step": 8
},
{
"epoch": 0.0005774783445620789,
"grad_norm": 2.1753077507019043,
"learning_rate": 9.036000000000001e-05,
"loss": 6.5329,
"step": 9
},
{
"epoch": 0.0006416426050689766,
"grad_norm": 2.141965627670288,
"learning_rate": 0.0001004,
"loss": 6.9547,
"step": 10
},
{
"epoch": 0.0007058068655758742,
"grad_norm": 1.723946213722229,
"learning_rate": 9.987157894736842e-05,
"loss": 6.5572,
"step": 11
},
{
"epoch": 0.0007699711260827719,
"grad_norm": 1.7414573431015015,
"learning_rate": 9.934315789473684e-05,
"loss": 6.3907,
"step": 12
},
{
"epoch": 0.0008341353865896695,
"grad_norm": 2.984980821609497,
"learning_rate": 9.881473684210525e-05,
"loss": 5.9726,
"step": 13
},
{
"epoch": 0.0008982996470965672,
"grad_norm": 2.6435160636901855,
"learning_rate": 9.828631578947369e-05,
"loss": 5.9844,
"step": 14
},
{
"epoch": 0.0009624639076034649,
"grad_norm": 2.249189615249634,
"learning_rate": 9.77578947368421e-05,
"loss": 6.4426,
"step": 15
},
{
"epoch": 0.0010266281681103626,
"grad_norm": 2.079399347305298,
"learning_rate": 9.722947368421052e-05,
"loss": 6.2247,
"step": 16
},
{
"epoch": 0.00109079242861726,
"grad_norm": 1.9160220623016357,
"learning_rate": 9.670105263157895e-05,
"loss": 6.5336,
"step": 17
},
{
"epoch": 0.0011549566891241579,
"grad_norm": 1.7296383380889893,
"learning_rate": 9.617263157894737e-05,
"loss": 6.0668,
"step": 18
},
{
"epoch": 0.0012191209496310554,
"grad_norm": 2.0181539058685303,
"learning_rate": 9.564421052631579e-05,
"loss": 6.4754,
"step": 19
},
{
"epoch": 0.0012832852101379532,
"grad_norm": 1.6639951467514038,
"learning_rate": 9.511578947368421e-05,
"loss": 5.3426,
"step": 20
},
{
"epoch": 0.001347449470644851,
"grad_norm": 2.009568214416504,
"learning_rate": 9.458736842105264e-05,
"loss": 6.255,
"step": 21
},
{
"epoch": 0.0014116137311517485,
"grad_norm": 1.925960659980774,
"learning_rate": 9.405894736842106e-05,
"loss": 5.712,
"step": 22
},
{
"epoch": 0.0014757779916586462,
"grad_norm": 1.931323528289795,
"learning_rate": 9.353052631578947e-05,
"loss": 5.7793,
"step": 23
},
{
"epoch": 0.0015399422521655437,
"grad_norm": 2.025932550430298,
"learning_rate": 9.300210526315789e-05,
"loss": 5.5682,
"step": 24
},
{
"epoch": 0.0016041065126724415,
"grad_norm": 1.761017918586731,
"learning_rate": 9.247368421052631e-05,
"loss": 6.1186,
"step": 25
},
{
"epoch": 0.001668270773179339,
"grad_norm": 2.1195151805877686,
"learning_rate": 9.194526315789473e-05,
"loss": 6.1748,
"step": 26
},
{
"epoch": 0.0017324350336862368,
"grad_norm": 1.9101064205169678,
"learning_rate": 9.141684210526316e-05,
"loss": 5.9105,
"step": 27
},
{
"epoch": 0.0017965992941931343,
"grad_norm": 1.8694759607315063,
"learning_rate": 9.088842105263158e-05,
"loss": 5.6025,
"step": 28
},
{
"epoch": 0.001860763554700032,
"grad_norm": 1.7502330541610718,
"learning_rate": 9.036000000000001e-05,
"loss": 5.4051,
"step": 29
},
{
"epoch": 0.0019249278152069298,
"grad_norm": 2.0604054927825928,
"learning_rate": 8.983157894736843e-05,
"loss": 6.5442,
"step": 30
},
{
"epoch": 0.0019890920757138276,
"grad_norm": 2.209613800048828,
"learning_rate": 8.930315789473684e-05,
"loss": 6.4631,
"step": 31
},
{
"epoch": 0.002053256336220725,
"grad_norm": 2.1784732341766357,
"learning_rate": 8.877473684210526e-05,
"loss": 6.2148,
"step": 32
},
{
"epoch": 0.0021174205967276227,
"grad_norm": 2.089324712753296,
"learning_rate": 8.824631578947368e-05,
"loss": 5.6497,
"step": 33
},
{
"epoch": 0.00218158485723452,
"grad_norm": 2.2112064361572266,
"learning_rate": 8.771789473684211e-05,
"loss": 5.8131,
"step": 34
},
{
"epoch": 0.002245749117741418,
"grad_norm": 2.4185714721679688,
"learning_rate": 8.718947368421053e-05,
"loss": 6.0275,
"step": 35
},
{
"epoch": 0.0023099133782483157,
"grad_norm": 2.314575433731079,
"learning_rate": 8.666105263157895e-05,
"loss": 5.5578,
"step": 36
},
{
"epoch": 0.0023740776387552133,
"grad_norm": 2.286389112472534,
"learning_rate": 8.613263157894737e-05,
"loss": 5.8923,
"step": 37
},
{
"epoch": 0.002438241899262111,
"grad_norm": 2.207777976989746,
"learning_rate": 8.560421052631578e-05,
"loss": 5.9219,
"step": 38
},
{
"epoch": 0.0025024061597690088,
"grad_norm": 2.3077611923217773,
"learning_rate": 8.50757894736842e-05,
"loss": 5.7264,
"step": 39
},
{
"epoch": 0.0025665704202759063,
"grad_norm": 2.4047534465789795,
"learning_rate": 8.454736842105263e-05,
"loss": 6.2283,
"step": 40
},
{
"epoch": 0.002630734680782804,
"grad_norm": 2.7174782752990723,
"learning_rate": 8.401894736842106e-05,
"loss": 6.1087,
"step": 41
},
{
"epoch": 0.002694898941289702,
"grad_norm": 2.5407960414886475,
"learning_rate": 8.349052631578948e-05,
"loss": 6.0594,
"step": 42
},
{
"epoch": 0.0027590632017965994,
"grad_norm": 2.6896162033081055,
"learning_rate": 8.29621052631579e-05,
"loss": 6.087,
"step": 43
},
{
"epoch": 0.002823227462303497,
"grad_norm": 2.7748165130615234,
"learning_rate": 8.243368421052632e-05,
"loss": 5.8548,
"step": 44
},
{
"epoch": 0.0028873917228103944,
"grad_norm": 2.458576202392578,
"learning_rate": 8.190526315789474e-05,
"loss": 5.7433,
"step": 45
},
{
"epoch": 0.0029515559833172924,
"grad_norm": 2.6704394817352295,
"learning_rate": 8.137684210526315e-05,
"loss": 5.944,
"step": 46
},
{
"epoch": 0.00301572024382419,
"grad_norm": 2.7400500774383545,
"learning_rate": 8.084842105263157e-05,
"loss": 5.8527,
"step": 47
},
{
"epoch": 0.0030798845043310875,
"grad_norm": 2.7328145503997803,
"learning_rate": 8.032e-05,
"loss": 5.9252,
"step": 48
},
{
"epoch": 0.003144048764837985,
"grad_norm": 3.944028615951538,
"learning_rate": 7.979157894736842e-05,
"loss": 6.5863,
"step": 49
},
{
"epoch": 0.003208213025344883,
"grad_norm": 3.3126437664031982,
"learning_rate": 7.926315789473684e-05,
"loss": 5.661,
"step": 50
},
{
"epoch": 0.003208213025344883,
"eval_loss": 2.9033212661743164,
"eval_runtime": 705.8123,
"eval_samples_per_second": 9.299,
"eval_steps_per_second": 2.325,
"step": 50
},
{
"epoch": 0.0032723772858517805,
"grad_norm": 1.5550154447555542,
"learning_rate": 7.873473684210526e-05,
"loss": 5.5412,
"step": 51
},
{
"epoch": 0.003336541546358678,
"grad_norm": 1.3860490322113037,
"learning_rate": 7.820631578947369e-05,
"loss": 5.5933,
"step": 52
},
{
"epoch": 0.003400705806865576,
"grad_norm": 1.4063161611557007,
"learning_rate": 7.76778947368421e-05,
"loss": 5.9446,
"step": 53
},
{
"epoch": 0.0034648700673724736,
"grad_norm": 1.0651419162750244,
"learning_rate": 7.714947368421052e-05,
"loss": 5.664,
"step": 54
},
{
"epoch": 0.003529034327879371,
"grad_norm": 1.0154882669448853,
"learning_rate": 7.662105263157896e-05,
"loss": 5.0898,
"step": 55
},
{
"epoch": 0.0035931985883862687,
"grad_norm": 1.1615040302276611,
"learning_rate": 7.609263157894737e-05,
"loss": 6.0429,
"step": 56
},
{
"epoch": 0.0036573628488931666,
"grad_norm": 1.004925012588501,
"learning_rate": 7.556421052631579e-05,
"loss": 5.1923,
"step": 57
},
{
"epoch": 0.003721527109400064,
"grad_norm": 1.1468150615692139,
"learning_rate": 7.503578947368421e-05,
"loss": 5.5039,
"step": 58
},
{
"epoch": 0.0037856913699069617,
"grad_norm": 1.3373304605484009,
"learning_rate": 7.450736842105263e-05,
"loss": 5.6979,
"step": 59
},
{
"epoch": 0.0038498556304138597,
"grad_norm": 1.1840150356292725,
"learning_rate": 7.397894736842105e-05,
"loss": 5.5324,
"step": 60
},
{
"epoch": 0.003914019890920757,
"grad_norm": 1.2353522777557373,
"learning_rate": 7.345052631578948e-05,
"loss": 5.8171,
"step": 61
},
{
"epoch": 0.003978184151427655,
"grad_norm": 1.2683886289596558,
"learning_rate": 7.29221052631579e-05,
"loss": 5.7111,
"step": 62
},
{
"epoch": 0.004042348411934553,
"grad_norm": 1.293735384941101,
"learning_rate": 7.239368421052631e-05,
"loss": 6.0277,
"step": 63
},
{
"epoch": 0.00410651267244145,
"grad_norm": 1.3527857065200806,
"learning_rate": 7.186526315789474e-05,
"loss": 5.5717,
"step": 64
},
{
"epoch": 0.004170676932948348,
"grad_norm": 1.3823940753936768,
"learning_rate": 7.133684210526316e-05,
"loss": 5.7555,
"step": 65
},
{
"epoch": 0.004234841193455245,
"grad_norm": 1.4544517993927002,
"learning_rate": 7.080842105263158e-05,
"loss": 5.6251,
"step": 66
},
{
"epoch": 0.004299005453962143,
"grad_norm": 1.45553457736969,
"learning_rate": 7.028e-05,
"loss": 6.0601,
"step": 67
},
{
"epoch": 0.00436316971446904,
"grad_norm": 1.3593374490737915,
"learning_rate": 6.975157894736843e-05,
"loss": 5.3168,
"step": 68
},
{
"epoch": 0.004427333974975939,
"grad_norm": 1.7438663244247437,
"learning_rate": 6.922315789473685e-05,
"loss": 6.2382,
"step": 69
},
{
"epoch": 0.004491498235482836,
"grad_norm": 1.430897831916809,
"learning_rate": 6.869473684210527e-05,
"loss": 5.5089,
"step": 70
},
{
"epoch": 0.004555662495989734,
"grad_norm": 1.5499473810195923,
"learning_rate": 6.816631578947368e-05,
"loss": 5.5141,
"step": 71
},
{
"epoch": 0.0046198267564966315,
"grad_norm": 1.4696685075759888,
"learning_rate": 6.76378947368421e-05,
"loss": 6.2138,
"step": 72
},
{
"epoch": 0.004683991017003529,
"grad_norm": 1.6637015342712402,
"learning_rate": 6.710947368421052e-05,
"loss": 5.5004,
"step": 73
},
{
"epoch": 0.0047481552775104265,
"grad_norm": 1.773887276649475,
"learning_rate": 6.658105263157894e-05,
"loss": 5.1702,
"step": 74
},
{
"epoch": 0.004812319538017324,
"grad_norm": 1.6259382963180542,
"learning_rate": 6.605263157894737e-05,
"loss": 6.0664,
"step": 75
},
{
"epoch": 0.004876483798524222,
"grad_norm": 2.1414995193481445,
"learning_rate": 6.55242105263158e-05,
"loss": 6.0428,
"step": 76
},
{
"epoch": 0.00494064805903112,
"grad_norm": 1.9427917003631592,
"learning_rate": 6.499578947368422e-05,
"loss": 5.7616,
"step": 77
},
{
"epoch": 0.0050048123195380175,
"grad_norm": 1.8610105514526367,
"learning_rate": 6.446736842105264e-05,
"loss": 5.6827,
"step": 78
},
{
"epoch": 0.005068976580044915,
"grad_norm": 2.151019811630249,
"learning_rate": 6.393894736842105e-05,
"loss": 5.4517,
"step": 79
},
{
"epoch": 0.005133140840551813,
"grad_norm": 1.7757065296173096,
"learning_rate": 6.341052631578947e-05,
"loss": 5.5975,
"step": 80
},
{
"epoch": 0.00519730510105871,
"grad_norm": 2.1130731105804443,
"learning_rate": 6.288210526315789e-05,
"loss": 5.7106,
"step": 81
},
{
"epoch": 0.005261469361565608,
"grad_norm": 1.9320672750473022,
"learning_rate": 6.235368421052632e-05,
"loss": 5.9469,
"step": 82
},
{
"epoch": 0.005325633622072505,
"grad_norm": 2.143235683441162,
"learning_rate": 6.182526315789474e-05,
"loss": 5.7521,
"step": 83
},
{
"epoch": 0.005389797882579404,
"grad_norm": 2.1077873706817627,
"learning_rate": 6.129684210526316e-05,
"loss": 5.7203,
"step": 84
},
{
"epoch": 0.005453962143086301,
"grad_norm": 2.1506385803222656,
"learning_rate": 6.076842105263158e-05,
"loss": 6.1618,
"step": 85
},
{
"epoch": 0.005518126403593199,
"grad_norm": 2.0908615589141846,
"learning_rate": 6.024e-05,
"loss": 5.4239,
"step": 86
},
{
"epoch": 0.005582290664100096,
"grad_norm": 2.7268731594085693,
"learning_rate": 5.971157894736842e-05,
"loss": 5.7719,
"step": 87
},
{
"epoch": 0.005646454924606994,
"grad_norm": 2.2536518573760986,
"learning_rate": 5.9183157894736835e-05,
"loss": 5.7436,
"step": 88
},
{
"epoch": 0.005710619185113891,
"grad_norm": 2.401059865951538,
"learning_rate": 5.8654736842105267e-05,
"loss": 5.8639,
"step": 89
},
{
"epoch": 0.005774783445620789,
"grad_norm": 2.6088643074035645,
"learning_rate": 5.8126315789473684e-05,
"loss": 6.0605,
"step": 90
},
{
"epoch": 0.005838947706127687,
"grad_norm": 2.477994680404663,
"learning_rate": 5.759789473684211e-05,
"loss": 5.752,
"step": 91
},
{
"epoch": 0.005903111966634585,
"grad_norm": 2.4255878925323486,
"learning_rate": 5.706947368421053e-05,
"loss": 6.0441,
"step": 92
},
{
"epoch": 0.005967276227141482,
"grad_norm": 2.4979171752929688,
"learning_rate": 5.6541052631578945e-05,
"loss": 5.5721,
"step": 93
},
{
"epoch": 0.00603144048764838,
"grad_norm": 2.662763833999634,
"learning_rate": 5.601263157894736e-05,
"loss": 5.7948,
"step": 94
},
{
"epoch": 0.0060956047481552774,
"grad_norm": 2.7771427631378174,
"learning_rate": 5.5484210526315794e-05,
"loss": 5.5672,
"step": 95
},
{
"epoch": 0.006159769008662175,
"grad_norm": 2.8292243480682373,
"learning_rate": 5.495578947368421e-05,
"loss": 5.5114,
"step": 96
},
{
"epoch": 0.0062239332691690725,
"grad_norm": 2.8949716091156006,
"learning_rate": 5.442736842105264e-05,
"loss": 5.5617,
"step": 97
},
{
"epoch": 0.00628809752967597,
"grad_norm": 3.2557790279388428,
"learning_rate": 5.3898947368421055e-05,
"loss": 6.2371,
"step": 98
},
{
"epoch": 0.0063522617901828685,
"grad_norm": 3.2778756618499756,
"learning_rate": 5.337052631578947e-05,
"loss": 5.3658,
"step": 99
},
{
"epoch": 0.006416426050689766,
"grad_norm": 3.3429481983184814,
"learning_rate": 5.284210526315789e-05,
"loss": 6.0804,
"step": 100
},
{
"epoch": 0.006416426050689766,
"eval_loss": 2.8372957706451416,
"eval_runtime": 702.8035,
"eval_samples_per_second": 9.338,
"eval_steps_per_second": 2.335,
"step": 100
},
{
"epoch": 0.0064805903111966635,
"grad_norm": 1.095025658607483,
"learning_rate": 5.231368421052631e-05,
"loss": 5.3962,
"step": 101
},
{
"epoch": 0.006544754571703561,
"grad_norm": 1.1655224561691284,
"learning_rate": 5.178526315789474e-05,
"loss": 5.6473,
"step": 102
},
{
"epoch": 0.006608918832210459,
"grad_norm": 1.1299000978469849,
"learning_rate": 5.1256842105263165e-05,
"loss": 5.5021,
"step": 103
},
{
"epoch": 0.006673083092717356,
"grad_norm": 1.0751549005508423,
"learning_rate": 5.072842105263158e-05,
"loss": 5.6848,
"step": 104
},
{
"epoch": 0.006737247353224254,
"grad_norm": 1.1250840425491333,
"learning_rate": 5.02e-05,
"loss": 5.7385,
"step": 105
},
{
"epoch": 0.006801411613731152,
"grad_norm": 1.096813440322876,
"learning_rate": 4.967157894736842e-05,
"loss": 5.4361,
"step": 106
},
{
"epoch": 0.00686557587423805,
"grad_norm": 1.1172356605529785,
"learning_rate": 4.914315789473684e-05,
"loss": 5.141,
"step": 107
},
{
"epoch": 0.006929740134744947,
"grad_norm": 1.2574751377105713,
"learning_rate": 4.861473684210526e-05,
"loss": 5.7921,
"step": 108
},
{
"epoch": 0.006993904395251845,
"grad_norm": 1.1475943326950073,
"learning_rate": 4.8086315789473686e-05,
"loss": 5.3017,
"step": 109
},
{
"epoch": 0.007058068655758742,
"grad_norm": 1.182779312133789,
"learning_rate": 4.7557894736842104e-05,
"loss": 5.2065,
"step": 110
},
{
"epoch": 0.00712223291626564,
"grad_norm": 1.1800684928894043,
"learning_rate": 4.702947368421053e-05,
"loss": 5.4792,
"step": 111
},
{
"epoch": 0.007186397176772537,
"grad_norm": 1.346672773361206,
"learning_rate": 4.6501052631578946e-05,
"loss": 5.9185,
"step": 112
},
{
"epoch": 0.007250561437279436,
"grad_norm": 1.3497660160064697,
"learning_rate": 4.5972631578947364e-05,
"loss": 5.7997,
"step": 113
},
{
"epoch": 0.007314725697786333,
"grad_norm": 1.487912654876709,
"learning_rate": 4.544421052631579e-05,
"loss": 6.1196,
"step": 114
},
{
"epoch": 0.007378889958293231,
"grad_norm": 1.363545298576355,
"learning_rate": 4.4915789473684213e-05,
"loss": 5.2859,
"step": 115
},
{
"epoch": 0.007443054218800128,
"grad_norm": 1.5003371238708496,
"learning_rate": 4.438736842105263e-05,
"loss": 5.5254,
"step": 116
},
{
"epoch": 0.007507218479307026,
"grad_norm": 1.685253381729126,
"learning_rate": 4.3858947368421056e-05,
"loss": 5.5345,
"step": 117
},
{
"epoch": 0.007571382739813923,
"grad_norm": 1.8338501453399658,
"learning_rate": 4.3330526315789474e-05,
"loss": 5.6438,
"step": 118
},
{
"epoch": 0.007635547000320821,
"grad_norm": 1.5995521545410156,
"learning_rate": 4.280210526315789e-05,
"loss": 5.6032,
"step": 119
},
{
"epoch": 0.007699711260827719,
"grad_norm": 1.5582114458084106,
"learning_rate": 4.2273684210526317e-05,
"loss": 5.7405,
"step": 120
},
{
"epoch": 0.007763875521334617,
"grad_norm": 2.019646644592285,
"learning_rate": 4.174526315789474e-05,
"loss": 6.0767,
"step": 121
},
{
"epoch": 0.007828039781841514,
"grad_norm": 1.9564622640609741,
"learning_rate": 4.121684210526316e-05,
"loss": 6.3192,
"step": 122
},
{
"epoch": 0.007892204042348411,
"grad_norm": 1.720892310142517,
"learning_rate": 4.068842105263158e-05,
"loss": 5.6769,
"step": 123
},
{
"epoch": 0.00795636830285531,
"grad_norm": 1.9853098392486572,
"learning_rate": 4.016e-05,
"loss": 5.4357,
"step": 124
},
{
"epoch": 0.008020532563362208,
"grad_norm": 1.7024343013763428,
"learning_rate": 3.963157894736842e-05,
"loss": 5.605,
"step": 125
},
{
"epoch": 0.008084696823869105,
"grad_norm": 1.926780343055725,
"learning_rate": 3.9103157894736844e-05,
"loss": 5.5458,
"step": 126
},
{
"epoch": 0.008148861084376003,
"grad_norm": 1.8911856412887573,
"learning_rate": 3.857473684210526e-05,
"loss": 5.5754,
"step": 127
},
{
"epoch": 0.0082130253448829,
"grad_norm": 1.9950193166732788,
"learning_rate": 3.804631578947369e-05,
"loss": 5.7808,
"step": 128
},
{
"epoch": 0.008277189605389798,
"grad_norm": 2.379807472229004,
"learning_rate": 3.7517894736842105e-05,
"loss": 5.5042,
"step": 129
},
{
"epoch": 0.008341353865896696,
"grad_norm": 1.982927918434143,
"learning_rate": 3.698947368421052e-05,
"loss": 4.8575,
"step": 130
},
{
"epoch": 0.008405518126403593,
"grad_norm": 2.424283266067505,
"learning_rate": 3.646105263157895e-05,
"loss": 6.3244,
"step": 131
},
{
"epoch": 0.00846968238691049,
"grad_norm": 2.2915103435516357,
"learning_rate": 3.593263157894737e-05,
"loss": 5.3091,
"step": 132
},
{
"epoch": 0.008533846647417388,
"grad_norm": 2.128199815750122,
"learning_rate": 3.540421052631579e-05,
"loss": 5.6833,
"step": 133
},
{
"epoch": 0.008598010907924286,
"grad_norm": 2.3494131565093994,
"learning_rate": 3.4875789473684215e-05,
"loss": 5.435,
"step": 134
},
{
"epoch": 0.008662175168431183,
"grad_norm": 2.4190518856048584,
"learning_rate": 3.434736842105263e-05,
"loss": 5.7771,
"step": 135
},
{
"epoch": 0.00872633942893808,
"grad_norm": 2.0826826095581055,
"learning_rate": 3.381894736842105e-05,
"loss": 5.6895,
"step": 136
},
{
"epoch": 0.008790503689444978,
"grad_norm": 2.357288122177124,
"learning_rate": 3.329052631578947e-05,
"loss": 4.9487,
"step": 137
},
{
"epoch": 0.008854667949951878,
"grad_norm": 2.6756513118743896,
"learning_rate": 3.27621052631579e-05,
"loss": 5.5356,
"step": 138
},
{
"epoch": 0.008918832210458775,
"grad_norm": 2.8232810497283936,
"learning_rate": 3.223368421052632e-05,
"loss": 6.3404,
"step": 139
},
{
"epoch": 0.008982996470965673,
"grad_norm": 2.7257964611053467,
"learning_rate": 3.1705263157894736e-05,
"loss": 5.493,
"step": 140
},
{
"epoch": 0.00904716073147257,
"grad_norm": 2.5550906658172607,
"learning_rate": 3.117684210526316e-05,
"loss": 5.3718,
"step": 141
},
{
"epoch": 0.009111324991979468,
"grad_norm": 2.4199259281158447,
"learning_rate": 3.064842105263158e-05,
"loss": 5.3804,
"step": 142
},
{
"epoch": 0.009175489252486365,
"grad_norm": 2.59352445602417,
"learning_rate": 3.012e-05,
"loss": 5.7932,
"step": 143
},
{
"epoch": 0.009239653512993263,
"grad_norm": 2.602415084838867,
"learning_rate": 2.9591578947368418e-05,
"loss": 5.0882,
"step": 144
},
{
"epoch": 0.00930381777350016,
"grad_norm": 3.0608277320861816,
"learning_rate": 2.9063157894736842e-05,
"loss": 5.6549,
"step": 145
},
{
"epoch": 0.009367982034007058,
"grad_norm": 2.965409994125366,
"learning_rate": 2.8534736842105264e-05,
"loss": 5.5571,
"step": 146
},
{
"epoch": 0.009432146294513956,
"grad_norm": 2.993109941482544,
"learning_rate": 2.800631578947368e-05,
"loss": 5.1919,
"step": 147
},
{
"epoch": 0.009496310555020853,
"grad_norm": 3.2940351963043213,
"learning_rate": 2.7477894736842106e-05,
"loss": 4.7934,
"step": 148
},
{
"epoch": 0.00956047481552775,
"grad_norm": 3.4445641040802,
"learning_rate": 2.6949473684210527e-05,
"loss": 5.3651,
"step": 149
},
{
"epoch": 0.009624639076034648,
"grad_norm": 3.1870310306549072,
"learning_rate": 2.6421052631578945e-05,
"loss": 5.4287,
"step": 150
},
{
"epoch": 0.009624639076034648,
"eval_loss": 2.803600311279297,
"eval_runtime": 703.2591,
"eval_samples_per_second": 9.332,
"eval_steps_per_second": 2.333,
"step": 150
},
{
"epoch": 0.009688803336541546,
"grad_norm": 1.013375997543335,
"learning_rate": 2.589263157894737e-05,
"loss": 5.1892,
"step": 151
},
{
"epoch": 0.009752967597048443,
"grad_norm": 1.2413551807403564,
"learning_rate": 2.536421052631579e-05,
"loss": 6.1944,
"step": 152
},
{
"epoch": 0.009817131857555342,
"grad_norm": 1.0705325603485107,
"learning_rate": 2.483578947368421e-05,
"loss": 5.8975,
"step": 153
},
{
"epoch": 0.00988129611806224,
"grad_norm": 1.1972635984420776,
"learning_rate": 2.430736842105263e-05,
"loss": 5.9433,
"step": 154
},
{
"epoch": 0.009945460378569138,
"grad_norm": 1.179183840751648,
"learning_rate": 2.3778947368421052e-05,
"loss": 5.5273,
"step": 155
},
{
"epoch": 0.010009624639076035,
"grad_norm": 1.2335314750671387,
"learning_rate": 2.3250526315789473e-05,
"loss": 5.4563,
"step": 156
},
{
"epoch": 0.010073788899582933,
"grad_norm": 1.1595032215118408,
"learning_rate": 2.2722105263157894e-05,
"loss": 5.3758,
"step": 157
},
{
"epoch": 0.01013795316008983,
"grad_norm": 1.3281739950180054,
"learning_rate": 2.2193684210526316e-05,
"loss": 5.7909,
"step": 158
},
{
"epoch": 0.010202117420596728,
"grad_norm": 1.3218365907669067,
"learning_rate": 2.1665263157894737e-05,
"loss": 5.8669,
"step": 159
},
{
"epoch": 0.010266281681103625,
"grad_norm": 1.4711360931396484,
"learning_rate": 2.1136842105263158e-05,
"loss": 5.346,
"step": 160
},
{
"epoch": 0.010330445941610523,
"grad_norm": 1.376670002937317,
"learning_rate": 2.060842105263158e-05,
"loss": 5.9901,
"step": 161
},
{
"epoch": 0.01039461020211742,
"grad_norm": 1.5264054536819458,
"learning_rate": 2.008e-05,
"loss": 5.2455,
"step": 162
},
{
"epoch": 0.010458774462624318,
"grad_norm": 1.338756799697876,
"learning_rate": 1.9551578947368422e-05,
"loss": 5.5642,
"step": 163
},
{
"epoch": 0.010522938723131215,
"grad_norm": 1.5276857614517212,
"learning_rate": 1.9023157894736843e-05,
"loss": 5.7341,
"step": 164
},
{
"epoch": 0.010587102983638113,
"grad_norm": 1.5230273008346558,
"learning_rate": 1.849473684210526e-05,
"loss": 5.4654,
"step": 165
},
{
"epoch": 0.01065126724414501,
"grad_norm": 1.5083640813827515,
"learning_rate": 1.7966315789473686e-05,
"loss": 5.2931,
"step": 166
},
{
"epoch": 0.010715431504651908,
"grad_norm": 1.4739644527435303,
"learning_rate": 1.7437894736842107e-05,
"loss": 5.7144,
"step": 167
},
{
"epoch": 0.010779595765158807,
"grad_norm": 1.5886435508728027,
"learning_rate": 1.6909473684210525e-05,
"loss": 5.8369,
"step": 168
},
{
"epoch": 0.010843760025665705,
"grad_norm": 1.5803991556167603,
"learning_rate": 1.638105263157895e-05,
"loss": 5.4256,
"step": 169
},
{
"epoch": 0.010907924286172602,
"grad_norm": 1.5075167417526245,
"learning_rate": 1.5852631578947368e-05,
"loss": 5.5388,
"step": 170
},
{
"epoch": 0.0109720885466795,
"grad_norm": 1.7619450092315674,
"learning_rate": 1.532421052631579e-05,
"loss": 5.5654,
"step": 171
},
{
"epoch": 0.011036252807186397,
"grad_norm": 1.7552381753921509,
"learning_rate": 1.4795789473684209e-05,
"loss": 5.5068,
"step": 172
},
{
"epoch": 0.011100417067693295,
"grad_norm": 1.588983178138733,
"learning_rate": 1.4267368421052632e-05,
"loss": 5.3193,
"step": 173
},
{
"epoch": 0.011164581328200193,
"grad_norm": 1.6596215963363647,
"learning_rate": 1.3738947368421053e-05,
"loss": 4.9316,
"step": 174
},
{
"epoch": 0.01122874558870709,
"grad_norm": 2.2129220962524414,
"learning_rate": 1.3210526315789473e-05,
"loss": 6.2231,
"step": 175
},
{
"epoch": 0.011292909849213988,
"grad_norm": 1.8372780084609985,
"learning_rate": 1.2682105263157896e-05,
"loss": 6.2628,
"step": 176
},
{
"epoch": 0.011357074109720885,
"grad_norm": 1.8769925832748413,
"learning_rate": 1.2153684210526315e-05,
"loss": 5.8288,
"step": 177
},
{
"epoch": 0.011421238370227783,
"grad_norm": 1.7789459228515625,
"learning_rate": 1.1625263157894737e-05,
"loss": 5.4904,
"step": 178
},
{
"epoch": 0.01148540263073468,
"grad_norm": 2.0547878742218018,
"learning_rate": 1.1096842105263158e-05,
"loss": 6.2145,
"step": 179
},
{
"epoch": 0.011549566891241578,
"grad_norm": 2.1852498054504395,
"learning_rate": 1.0568421052631579e-05,
"loss": 5.3296,
"step": 180
},
{
"epoch": 0.011613731151748475,
"grad_norm": 2.358675479888916,
"learning_rate": 1.004e-05,
"loss": 5.3969,
"step": 181
},
{
"epoch": 0.011677895412255375,
"grad_norm": 2.2292699813842773,
"learning_rate": 9.511578947368422e-06,
"loss": 5.8842,
"step": 182
},
{
"epoch": 0.011742059672762272,
"grad_norm": 2.164508581161499,
"learning_rate": 8.983157894736843e-06,
"loss": 5.7544,
"step": 183
},
{
"epoch": 0.01180622393326917,
"grad_norm": 2.151883125305176,
"learning_rate": 8.454736842105263e-06,
"loss": 5.8768,
"step": 184
},
{
"epoch": 0.011870388193776067,
"grad_norm": 2.0282275676727295,
"learning_rate": 7.926315789473684e-06,
"loss": 5.936,
"step": 185
},
{
"epoch": 0.011934552454282965,
"grad_norm": 2.4359304904937744,
"learning_rate": 7.397894736842104e-06,
"loss": 5.7367,
"step": 186
},
{
"epoch": 0.011998716714789862,
"grad_norm": 2.2616195678710938,
"learning_rate": 6.8694736842105265e-06,
"loss": 5.2675,
"step": 187
},
{
"epoch": 0.01206288097529676,
"grad_norm": 2.1506917476654053,
"learning_rate": 6.341052631578948e-06,
"loss": 5.5162,
"step": 188
},
{
"epoch": 0.012127045235803657,
"grad_norm": 2.314349889755249,
"learning_rate": 5.812631578947368e-06,
"loss": 5.7775,
"step": 189
},
{
"epoch": 0.012191209496310555,
"grad_norm": 2.3769171237945557,
"learning_rate": 5.2842105263157896e-06,
"loss": 6.1183,
"step": 190
},
{
"epoch": 0.012255373756817452,
"grad_norm": 2.4818451404571533,
"learning_rate": 4.755789473684211e-06,
"loss": 5.7682,
"step": 191
},
{
"epoch": 0.01231953801732435,
"grad_norm": 2.498087167739868,
"learning_rate": 4.227368421052631e-06,
"loss": 5.555,
"step": 192
},
{
"epoch": 0.012383702277831247,
"grad_norm": 2.425711154937744,
"learning_rate": 3.698947368421052e-06,
"loss": 5.1028,
"step": 193
},
{
"epoch": 0.012447866538338145,
"grad_norm": 2.878943920135498,
"learning_rate": 3.170526315789474e-06,
"loss": 5.748,
"step": 194
},
{
"epoch": 0.012512030798845043,
"grad_norm": 2.801713466644287,
"learning_rate": 2.6421052631578948e-06,
"loss": 6.4606,
"step": 195
},
{
"epoch": 0.01257619505935194,
"grad_norm": 2.964569568634033,
"learning_rate": 2.1136842105263157e-06,
"loss": 5.5059,
"step": 196
},
{
"epoch": 0.01264035931985884,
"grad_norm": 2.598716974258423,
"learning_rate": 1.585263157894737e-06,
"loss": 5.0059,
"step": 197
},
{
"epoch": 0.012704523580365737,
"grad_norm": 3.2355284690856934,
"learning_rate": 1.0568421052631578e-06,
"loss": 5.8954,
"step": 198
},
{
"epoch": 0.012768687840872634,
"grad_norm": 2.76204514503479,
"learning_rate": 5.284210526315789e-07,
"loss": 5.7194,
"step": 199
},
{
"epoch": 0.012832852101379532,
"grad_norm": 3.6820626258850098,
"learning_rate": 0.0,
"loss": 5.3636,
"step": 200
},
{
"epoch": 0.012832852101379532,
"eval_loss": 2.786395788192749,
"eval_runtime": 702.4656,
"eval_samples_per_second": 9.343,
"eval_steps_per_second": 2.336,
"step": 200
}
],
"logging_steps": 1,
"max_steps": 200,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 50,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 5,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 5.963334542480179e+16,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}