dimasik2987's picture
Training in progress, step 150, checkpoint
e9cab05 verified
raw
history blame
27.8 kB
{
"best_metric": 0.02783067524433136,
"best_model_checkpoint": "miner_id_24/checkpoint-150",
"epoch": 0.11286681715575621,
"eval_steps": 50,
"global_step": 150,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0007524454477050414,
"grad_norm": 0.2406926155090332,
"learning_rate": 1.004e-05,
"loss": 0.4829,
"step": 1
},
{
"epoch": 0.0007524454477050414,
"eval_loss": 0.448444128036499,
"eval_runtime": 25.7969,
"eval_samples_per_second": 21.708,
"eval_steps_per_second": 5.427,
"step": 1
},
{
"epoch": 0.0015048908954100827,
"grad_norm": 0.2582743167877197,
"learning_rate": 2.008e-05,
"loss": 0.0809,
"step": 2
},
{
"epoch": 0.002257336343115124,
"grad_norm": 0.4347482919692993,
"learning_rate": 3.012e-05,
"loss": 0.1315,
"step": 3
},
{
"epoch": 0.0030097817908201654,
"grad_norm": 0.39034780859947205,
"learning_rate": 4.016e-05,
"loss": 0.1019,
"step": 4
},
{
"epoch": 0.003762227238525207,
"grad_norm": 0.5869060754776001,
"learning_rate": 5.02e-05,
"loss": 0.1216,
"step": 5
},
{
"epoch": 0.004514672686230248,
"grad_norm": 0.4321593940258026,
"learning_rate": 6.024e-05,
"loss": 0.1246,
"step": 6
},
{
"epoch": 0.005267118133935289,
"grad_norm": 0.4198409914970398,
"learning_rate": 7.028e-05,
"loss": 0.1255,
"step": 7
},
{
"epoch": 0.006019563581640331,
"grad_norm": 0.4511987864971161,
"learning_rate": 8.032e-05,
"loss": 0.0875,
"step": 8
},
{
"epoch": 0.006772009029345372,
"grad_norm": 0.30963364243507385,
"learning_rate": 9.036000000000001e-05,
"loss": 0.0681,
"step": 9
},
{
"epoch": 0.007524454477050414,
"grad_norm": 0.5047326683998108,
"learning_rate": 0.0001004,
"loss": 0.0941,
"step": 10
},
{
"epoch": 0.008276899924755455,
"grad_norm": 0.3052256107330322,
"learning_rate": 9.987157894736842e-05,
"loss": 0.0641,
"step": 11
},
{
"epoch": 0.009029345372460496,
"grad_norm": 0.29083898663520813,
"learning_rate": 9.934315789473684e-05,
"loss": 0.0529,
"step": 12
},
{
"epoch": 0.009781790820165538,
"grad_norm": 0.46031373739242554,
"learning_rate": 9.881473684210525e-05,
"loss": 0.0923,
"step": 13
},
{
"epoch": 0.010534236267870579,
"grad_norm": 0.3271160125732422,
"learning_rate": 9.828631578947369e-05,
"loss": 0.0459,
"step": 14
},
{
"epoch": 0.011286681715575621,
"grad_norm": 0.43920812010765076,
"learning_rate": 9.77578947368421e-05,
"loss": 0.059,
"step": 15
},
{
"epoch": 0.012039127163280662,
"grad_norm": 0.502988874912262,
"learning_rate": 9.722947368421052e-05,
"loss": 0.056,
"step": 16
},
{
"epoch": 0.012791572610985704,
"grad_norm": 1.8321483135223389,
"learning_rate": 9.670105263157895e-05,
"loss": 0.0628,
"step": 17
},
{
"epoch": 0.013544018058690745,
"grad_norm": 0.35579952597618103,
"learning_rate": 9.617263157894737e-05,
"loss": 0.0259,
"step": 18
},
{
"epoch": 0.014296463506395787,
"grad_norm": 0.6292356252670288,
"learning_rate": 9.564421052631579e-05,
"loss": 0.0443,
"step": 19
},
{
"epoch": 0.015048908954100828,
"grad_norm": 0.4263708293437958,
"learning_rate": 9.511578947368421e-05,
"loss": 0.0445,
"step": 20
},
{
"epoch": 0.01580135440180587,
"grad_norm": 0.5908687114715576,
"learning_rate": 9.458736842105264e-05,
"loss": 0.0441,
"step": 21
},
{
"epoch": 0.01655379984951091,
"grad_norm": 0.7689208984375,
"learning_rate": 9.405894736842106e-05,
"loss": 0.0351,
"step": 22
},
{
"epoch": 0.01730624529721595,
"grad_norm": 0.87498939037323,
"learning_rate": 9.353052631578947e-05,
"loss": 0.0163,
"step": 23
},
{
"epoch": 0.01805869074492099,
"grad_norm": 0.2699977457523346,
"learning_rate": 9.300210526315789e-05,
"loss": 0.0118,
"step": 24
},
{
"epoch": 0.018811136192626036,
"grad_norm": 0.30637574195861816,
"learning_rate": 9.247368421052631e-05,
"loss": 0.0065,
"step": 25
},
{
"epoch": 0.019563581640331076,
"grad_norm": 0.6475765109062195,
"learning_rate": 9.194526315789473e-05,
"loss": 0.012,
"step": 26
},
{
"epoch": 0.020316027088036117,
"grad_norm": 0.7173324227333069,
"learning_rate": 9.141684210526316e-05,
"loss": 0.0178,
"step": 27
},
{
"epoch": 0.021068472535741158,
"grad_norm": 0.3768528699874878,
"learning_rate": 9.088842105263158e-05,
"loss": 0.0228,
"step": 28
},
{
"epoch": 0.0218209179834462,
"grad_norm": 0.34722164273262024,
"learning_rate": 9.036000000000001e-05,
"loss": 0.0099,
"step": 29
},
{
"epoch": 0.022573363431151242,
"grad_norm": 0.07302386313676834,
"learning_rate": 8.983157894736843e-05,
"loss": 0.0015,
"step": 30
},
{
"epoch": 0.023325808878856283,
"grad_norm": 1.0248472690582275,
"learning_rate": 8.930315789473684e-05,
"loss": 0.021,
"step": 31
},
{
"epoch": 0.024078254326561323,
"grad_norm": 0.1479930877685547,
"learning_rate": 8.877473684210526e-05,
"loss": 0.003,
"step": 32
},
{
"epoch": 0.024830699774266364,
"grad_norm": 1.3804433345794678,
"learning_rate": 8.824631578947368e-05,
"loss": 0.016,
"step": 33
},
{
"epoch": 0.025583145221971408,
"grad_norm": 1.6197274923324585,
"learning_rate": 8.771789473684211e-05,
"loss": 0.0367,
"step": 34
},
{
"epoch": 0.02633559066967645,
"grad_norm": 0.6973819732666016,
"learning_rate": 8.718947368421053e-05,
"loss": 0.0141,
"step": 35
},
{
"epoch": 0.02708803611738149,
"grad_norm": 0.8518109321594238,
"learning_rate": 8.666105263157895e-05,
"loss": 0.0319,
"step": 36
},
{
"epoch": 0.02784048156508653,
"grad_norm": 0.9821231961250305,
"learning_rate": 8.613263157894737e-05,
"loss": 0.0333,
"step": 37
},
{
"epoch": 0.028592927012791574,
"grad_norm": 0.1206764355301857,
"learning_rate": 8.560421052631578e-05,
"loss": 0.0038,
"step": 38
},
{
"epoch": 0.029345372460496615,
"grad_norm": 1.610734462738037,
"learning_rate": 8.50757894736842e-05,
"loss": 0.0207,
"step": 39
},
{
"epoch": 0.030097817908201655,
"grad_norm": 0.8153631687164307,
"learning_rate": 8.454736842105263e-05,
"loss": 0.0363,
"step": 40
},
{
"epoch": 0.030850263355906696,
"grad_norm": 2.7571136951446533,
"learning_rate": 8.401894736842106e-05,
"loss": 0.0164,
"step": 41
},
{
"epoch": 0.03160270880361174,
"grad_norm": 0.11961274594068527,
"learning_rate": 8.349052631578948e-05,
"loss": 0.0035,
"step": 42
},
{
"epoch": 0.03235515425131678,
"grad_norm": 0.6352341771125793,
"learning_rate": 8.29621052631579e-05,
"loss": 0.0049,
"step": 43
},
{
"epoch": 0.03310759969902182,
"grad_norm": 0.2854023873806,
"learning_rate": 8.243368421052632e-05,
"loss": 0.0134,
"step": 44
},
{
"epoch": 0.033860045146726865,
"grad_norm": 0.2841341197490692,
"learning_rate": 8.190526315789474e-05,
"loss": 0.0068,
"step": 45
},
{
"epoch": 0.0346124905944319,
"grad_norm": 0.21941381692886353,
"learning_rate": 8.137684210526315e-05,
"loss": 0.0031,
"step": 46
},
{
"epoch": 0.035364936042136946,
"grad_norm": 1.6787192821502686,
"learning_rate": 8.084842105263157e-05,
"loss": 0.0155,
"step": 47
},
{
"epoch": 0.03611738148984198,
"grad_norm": 2.5062623023986816,
"learning_rate": 8.032e-05,
"loss": 0.0486,
"step": 48
},
{
"epoch": 0.03686982693754703,
"grad_norm": 3.1842916011810303,
"learning_rate": 7.979157894736842e-05,
"loss": 0.0298,
"step": 49
},
{
"epoch": 0.03762227238525207,
"grad_norm": 1.3214054107666016,
"learning_rate": 7.926315789473684e-05,
"loss": 0.0393,
"step": 50
},
{
"epoch": 0.03762227238525207,
"eval_loss": 0.04788399860262871,
"eval_runtime": 22.4157,
"eval_samples_per_second": 24.983,
"eval_steps_per_second": 6.246,
"step": 50
},
{
"epoch": 0.03837471783295711,
"grad_norm": 0.3795923590660095,
"learning_rate": 7.873473684210526e-05,
"loss": 0.0368,
"step": 51
},
{
"epoch": 0.03912716328066215,
"grad_norm": 0.7790267467498779,
"learning_rate": 7.820631578947369e-05,
"loss": 0.1122,
"step": 52
},
{
"epoch": 0.0398796087283672,
"grad_norm": 0.4049144983291626,
"learning_rate": 7.76778947368421e-05,
"loss": 0.0085,
"step": 53
},
{
"epoch": 0.040632054176072234,
"grad_norm": 0.07192933559417725,
"learning_rate": 7.714947368421052e-05,
"loss": 0.0009,
"step": 54
},
{
"epoch": 0.04138449962377728,
"grad_norm": 0.09724583476781845,
"learning_rate": 7.662105263157896e-05,
"loss": 0.0029,
"step": 55
},
{
"epoch": 0.042136945071482315,
"grad_norm": 0.17651653289794922,
"learning_rate": 7.609263157894737e-05,
"loss": 0.0082,
"step": 56
},
{
"epoch": 0.04288939051918736,
"grad_norm": 0.28293490409851074,
"learning_rate": 7.556421052631579e-05,
"loss": 0.003,
"step": 57
},
{
"epoch": 0.0436418359668924,
"grad_norm": 1.0796512365341187,
"learning_rate": 7.503578947368421e-05,
"loss": 0.0071,
"step": 58
},
{
"epoch": 0.04439428141459744,
"grad_norm": 0.5031782388687134,
"learning_rate": 7.450736842105263e-05,
"loss": 0.016,
"step": 59
},
{
"epoch": 0.045146726862302484,
"grad_norm": 0.2986276149749756,
"learning_rate": 7.397894736842105e-05,
"loss": 0.0068,
"step": 60
},
{
"epoch": 0.04589917231000752,
"grad_norm": 1.870119571685791,
"learning_rate": 7.345052631578948e-05,
"loss": 0.0103,
"step": 61
},
{
"epoch": 0.046651617757712566,
"grad_norm": 0.13697421550750732,
"learning_rate": 7.29221052631579e-05,
"loss": 0.0025,
"step": 62
},
{
"epoch": 0.04740406320541761,
"grad_norm": 0.03275276720523834,
"learning_rate": 7.239368421052631e-05,
"loss": 0.0011,
"step": 63
},
{
"epoch": 0.04815650865312265,
"grad_norm": 0.06320174783468246,
"learning_rate": 7.186526315789474e-05,
"loss": 0.0031,
"step": 64
},
{
"epoch": 0.04890895410082769,
"grad_norm": 0.03870139271020889,
"learning_rate": 7.133684210526316e-05,
"loss": 0.0012,
"step": 65
},
{
"epoch": 0.04966139954853273,
"grad_norm": 0.03169720619916916,
"learning_rate": 7.080842105263158e-05,
"loss": 0.0012,
"step": 66
},
{
"epoch": 0.05041384499623777,
"grad_norm": 0.8996763825416565,
"learning_rate": 7.028e-05,
"loss": 0.0197,
"step": 67
},
{
"epoch": 0.051166290443942816,
"grad_norm": 0.05753805488348007,
"learning_rate": 6.975157894736843e-05,
"loss": 0.0011,
"step": 68
},
{
"epoch": 0.05191873589164785,
"grad_norm": 0.16330073773860931,
"learning_rate": 6.922315789473685e-05,
"loss": 0.0029,
"step": 69
},
{
"epoch": 0.0526711813393529,
"grad_norm": 0.1298224925994873,
"learning_rate": 6.869473684210527e-05,
"loss": 0.0022,
"step": 70
},
{
"epoch": 0.05342362678705794,
"grad_norm": 0.04153815284371376,
"learning_rate": 6.816631578947368e-05,
"loss": 0.0014,
"step": 71
},
{
"epoch": 0.05417607223476298,
"grad_norm": 0.06644019484519958,
"learning_rate": 6.76378947368421e-05,
"loss": 0.0019,
"step": 72
},
{
"epoch": 0.05492851768246802,
"grad_norm": 0.12409530580043793,
"learning_rate": 6.710947368421052e-05,
"loss": 0.0017,
"step": 73
},
{
"epoch": 0.05568096313017306,
"grad_norm": 0.06674883514642715,
"learning_rate": 6.658105263157894e-05,
"loss": 0.0016,
"step": 74
},
{
"epoch": 0.056433408577878104,
"grad_norm": 0.09339006245136261,
"learning_rate": 6.605263157894737e-05,
"loss": 0.0011,
"step": 75
},
{
"epoch": 0.05718585402558315,
"grad_norm": 0.04911891743540764,
"learning_rate": 6.55242105263158e-05,
"loss": 0.0015,
"step": 76
},
{
"epoch": 0.057938299473288185,
"grad_norm": 0.3085123598575592,
"learning_rate": 6.499578947368422e-05,
"loss": 0.0096,
"step": 77
},
{
"epoch": 0.05869074492099323,
"grad_norm": 0.11163350194692612,
"learning_rate": 6.446736842105264e-05,
"loss": 0.0013,
"step": 78
},
{
"epoch": 0.059443190368698266,
"grad_norm": 0.06438886374235153,
"learning_rate": 6.393894736842105e-05,
"loss": 0.0011,
"step": 79
},
{
"epoch": 0.06019563581640331,
"grad_norm": 0.04296841472387314,
"learning_rate": 6.341052631578947e-05,
"loss": 0.0009,
"step": 80
},
{
"epoch": 0.060948081264108354,
"grad_norm": 0.03423371911048889,
"learning_rate": 6.288210526315789e-05,
"loss": 0.001,
"step": 81
},
{
"epoch": 0.06170052671181339,
"grad_norm": 0.09237401187419891,
"learning_rate": 6.235368421052632e-05,
"loss": 0.001,
"step": 82
},
{
"epoch": 0.062452972159518436,
"grad_norm": 0.01862478442490101,
"learning_rate": 6.182526315789474e-05,
"loss": 0.0006,
"step": 83
},
{
"epoch": 0.06320541760722348,
"grad_norm": 0.06537121534347534,
"learning_rate": 6.129684210526316e-05,
"loss": 0.0016,
"step": 84
},
{
"epoch": 0.06395786305492852,
"grad_norm": 0.6745563745498657,
"learning_rate": 6.076842105263158e-05,
"loss": 0.0117,
"step": 85
},
{
"epoch": 0.06471030850263355,
"grad_norm": 0.16666635870933533,
"learning_rate": 6.024e-05,
"loss": 0.0062,
"step": 86
},
{
"epoch": 0.0654627539503386,
"grad_norm": 0.017271826043725014,
"learning_rate": 5.971157894736842e-05,
"loss": 0.0005,
"step": 87
},
{
"epoch": 0.06621519939804364,
"grad_norm": 0.390020489692688,
"learning_rate": 5.9183157894736835e-05,
"loss": 0.0117,
"step": 88
},
{
"epoch": 0.06696764484574869,
"grad_norm": 0.9855097532272339,
"learning_rate": 5.8654736842105267e-05,
"loss": 0.0413,
"step": 89
},
{
"epoch": 0.06772009029345373,
"grad_norm": 0.08488702028989792,
"learning_rate": 5.8126315789473684e-05,
"loss": 0.002,
"step": 90
},
{
"epoch": 0.06847253574115876,
"grad_norm": 0.019529394805431366,
"learning_rate": 5.759789473684211e-05,
"loss": 0.0005,
"step": 91
},
{
"epoch": 0.0692249811888638,
"grad_norm": 2.2314491271972656,
"learning_rate": 5.706947368421053e-05,
"loss": 0.0078,
"step": 92
},
{
"epoch": 0.06997742663656885,
"grad_norm": 0.01869085058569908,
"learning_rate": 5.6541052631578945e-05,
"loss": 0.0006,
"step": 93
},
{
"epoch": 0.07072987208427389,
"grad_norm": 0.9058141112327576,
"learning_rate": 5.601263157894736e-05,
"loss": 0.0469,
"step": 94
},
{
"epoch": 0.07148231753197894,
"grad_norm": 0.47502413392066956,
"learning_rate": 5.5484210526315794e-05,
"loss": 0.0102,
"step": 95
},
{
"epoch": 0.07223476297968397,
"grad_norm": 3.3080074787139893,
"learning_rate": 5.495578947368421e-05,
"loss": 0.0324,
"step": 96
},
{
"epoch": 0.07298720842738901,
"grad_norm": 2.7681944370269775,
"learning_rate": 5.442736842105264e-05,
"loss": 0.027,
"step": 97
},
{
"epoch": 0.07373965387509406,
"grad_norm": 0.02752815932035446,
"learning_rate": 5.3898947368421055e-05,
"loss": 0.001,
"step": 98
},
{
"epoch": 0.0744920993227991,
"grad_norm": 0.4413633346557617,
"learning_rate": 5.337052631578947e-05,
"loss": 0.0032,
"step": 99
},
{
"epoch": 0.07524454477050414,
"grad_norm": 6.715440273284912,
"learning_rate": 5.284210526315789e-05,
"loss": 0.0134,
"step": 100
},
{
"epoch": 0.07524454477050414,
"eval_loss": 0.04449079558253288,
"eval_runtime": 22.4889,
"eval_samples_per_second": 24.901,
"eval_steps_per_second": 6.225,
"step": 100
},
{
"epoch": 0.07599699021820917,
"grad_norm": 3.5583584308624268,
"learning_rate": 5.231368421052631e-05,
"loss": 1.4636,
"step": 101
},
{
"epoch": 0.07674943566591422,
"grad_norm": 0.1338910460472107,
"learning_rate": 5.178526315789474e-05,
"loss": 0.0514,
"step": 102
},
{
"epoch": 0.07750188111361926,
"grad_norm": 0.21548093855381012,
"learning_rate": 5.1256842105263165e-05,
"loss": 0.0055,
"step": 103
},
{
"epoch": 0.0782543265613243,
"grad_norm": 0.2258404642343521,
"learning_rate": 5.072842105263158e-05,
"loss": 0.0067,
"step": 104
},
{
"epoch": 0.07900677200902935,
"grad_norm": 0.3479577898979187,
"learning_rate": 5.02e-05,
"loss": 0.0025,
"step": 105
},
{
"epoch": 0.0797592174567344,
"grad_norm": 0.13383802771568298,
"learning_rate": 4.967157894736842e-05,
"loss": 0.0074,
"step": 106
},
{
"epoch": 0.08051166290443942,
"grad_norm": 0.11670215427875519,
"learning_rate": 4.914315789473684e-05,
"loss": 0.0026,
"step": 107
},
{
"epoch": 0.08126410835214447,
"grad_norm": 0.32123813033103943,
"learning_rate": 4.861473684210526e-05,
"loss": 0.0095,
"step": 108
},
{
"epoch": 0.08201655379984951,
"grad_norm": 0.05738672614097595,
"learning_rate": 4.8086315789473686e-05,
"loss": 0.001,
"step": 109
},
{
"epoch": 0.08276899924755456,
"grad_norm": 0.05406120792031288,
"learning_rate": 4.7557894736842104e-05,
"loss": 0.0016,
"step": 110
},
{
"epoch": 0.0835214446952596,
"grad_norm": 0.02784488908946514,
"learning_rate": 4.702947368421053e-05,
"loss": 0.0016,
"step": 111
},
{
"epoch": 0.08427389014296463,
"grad_norm": 0.03466448560357094,
"learning_rate": 4.6501052631578946e-05,
"loss": 0.0017,
"step": 112
},
{
"epoch": 0.08502633559066967,
"grad_norm": 0.03824542835354805,
"learning_rate": 4.5972631578947364e-05,
"loss": 0.0021,
"step": 113
},
{
"epoch": 0.08577878103837472,
"grad_norm": 0.15851886570453644,
"learning_rate": 4.544421052631579e-05,
"loss": 0.0043,
"step": 114
},
{
"epoch": 0.08653122648607976,
"grad_norm": 0.025240371003746986,
"learning_rate": 4.4915789473684213e-05,
"loss": 0.0009,
"step": 115
},
{
"epoch": 0.0872836719337848,
"grad_norm": 0.019472820684313774,
"learning_rate": 4.438736842105263e-05,
"loss": 0.0009,
"step": 116
},
{
"epoch": 0.08803611738148984,
"grad_norm": 0.48911502957344055,
"learning_rate": 4.3858947368421056e-05,
"loss": 0.0037,
"step": 117
},
{
"epoch": 0.08878856282919488,
"grad_norm": 0.1643715500831604,
"learning_rate": 4.3330526315789474e-05,
"loss": 0.0056,
"step": 118
},
{
"epoch": 0.08954100827689992,
"grad_norm": 0.19596649706363678,
"learning_rate": 4.280210526315789e-05,
"loss": 0.0065,
"step": 119
},
{
"epoch": 0.09029345372460497,
"grad_norm": 1.3377373218536377,
"learning_rate": 4.2273684210526317e-05,
"loss": 0.0621,
"step": 120
},
{
"epoch": 0.09104589917231001,
"grad_norm": 0.505384087562561,
"learning_rate": 4.174526315789474e-05,
"loss": 0.0104,
"step": 121
},
{
"epoch": 0.09179834462001504,
"grad_norm": 0.0544271357357502,
"learning_rate": 4.121684210526316e-05,
"loss": 0.0023,
"step": 122
},
{
"epoch": 0.09255079006772009,
"grad_norm": 0.16811607778072357,
"learning_rate": 4.068842105263158e-05,
"loss": 0.0163,
"step": 123
},
{
"epoch": 0.09330323551542513,
"grad_norm": 0.019329270347952843,
"learning_rate": 4.016e-05,
"loss": 0.0008,
"step": 124
},
{
"epoch": 0.09405568096313018,
"grad_norm": 0.05329417437314987,
"learning_rate": 3.963157894736842e-05,
"loss": 0.0027,
"step": 125
},
{
"epoch": 0.09480812641083522,
"grad_norm": 0.08204272389411926,
"learning_rate": 3.9103157894736844e-05,
"loss": 0.0029,
"step": 126
},
{
"epoch": 0.09556057185854025,
"grad_norm": 0.06737394630908966,
"learning_rate": 3.857473684210526e-05,
"loss": 0.0014,
"step": 127
},
{
"epoch": 0.0963130173062453,
"grad_norm": 0.20691591501235962,
"learning_rate": 3.804631578947369e-05,
"loss": 0.0039,
"step": 128
},
{
"epoch": 0.09706546275395034,
"grad_norm": 0.010593212209641933,
"learning_rate": 3.7517894736842105e-05,
"loss": 0.0007,
"step": 129
},
{
"epoch": 0.09781790820165538,
"grad_norm": 0.025148313492536545,
"learning_rate": 3.698947368421052e-05,
"loss": 0.0014,
"step": 130
},
{
"epoch": 0.09857035364936043,
"grad_norm": 0.1723494529724121,
"learning_rate": 3.646105263157895e-05,
"loss": 0.0043,
"step": 131
},
{
"epoch": 0.09932279909706546,
"grad_norm": 0.01964077726006508,
"learning_rate": 3.593263157894737e-05,
"loss": 0.001,
"step": 132
},
{
"epoch": 0.1000752445447705,
"grad_norm": 0.2998151481151581,
"learning_rate": 3.540421052631579e-05,
"loss": 0.0105,
"step": 133
},
{
"epoch": 0.10082768999247554,
"grad_norm": 0.1715938299894333,
"learning_rate": 3.4875789473684215e-05,
"loss": 0.0035,
"step": 134
},
{
"epoch": 0.10158013544018059,
"grad_norm": 0.028639964759349823,
"learning_rate": 3.434736842105263e-05,
"loss": 0.0015,
"step": 135
},
{
"epoch": 0.10233258088788563,
"grad_norm": 0.13613921403884888,
"learning_rate": 3.381894736842105e-05,
"loss": 0.0029,
"step": 136
},
{
"epoch": 0.10308502633559068,
"grad_norm": 0.03546198457479477,
"learning_rate": 3.329052631578947e-05,
"loss": 0.0016,
"step": 137
},
{
"epoch": 0.1038374717832957,
"grad_norm": 0.4500066936016083,
"learning_rate": 3.27621052631579e-05,
"loss": 0.0062,
"step": 138
},
{
"epoch": 0.10458991723100075,
"grad_norm": 0.13641779124736786,
"learning_rate": 3.223368421052632e-05,
"loss": 0.0022,
"step": 139
},
{
"epoch": 0.1053423626787058,
"grad_norm": 0.24456901848316193,
"learning_rate": 3.1705263157894736e-05,
"loss": 0.0047,
"step": 140
},
{
"epoch": 0.10609480812641084,
"grad_norm": 0.6857782006263733,
"learning_rate": 3.117684210526316e-05,
"loss": 0.0237,
"step": 141
},
{
"epoch": 0.10684725357411588,
"grad_norm": 0.017228834331035614,
"learning_rate": 3.064842105263158e-05,
"loss": 0.001,
"step": 142
},
{
"epoch": 0.10759969902182091,
"grad_norm": 0.8723503351211548,
"learning_rate": 3.012e-05,
"loss": 0.0409,
"step": 143
},
{
"epoch": 0.10835214446952596,
"grad_norm": 0.1626891791820526,
"learning_rate": 2.9591578947368418e-05,
"loss": 0.0035,
"step": 144
},
{
"epoch": 0.109104589917231,
"grad_norm": 0.020657368004322052,
"learning_rate": 2.9063157894736842e-05,
"loss": 0.0009,
"step": 145
},
{
"epoch": 0.10985703536493605,
"grad_norm": 0.12785644829273224,
"learning_rate": 2.8534736842105264e-05,
"loss": 0.0024,
"step": 146
},
{
"epoch": 0.11060948081264109,
"grad_norm": 0.13075271248817444,
"learning_rate": 2.800631578947368e-05,
"loss": 0.0028,
"step": 147
},
{
"epoch": 0.11136192626034612,
"grad_norm": 0.02945193648338318,
"learning_rate": 2.7477894736842106e-05,
"loss": 0.0015,
"step": 148
},
{
"epoch": 0.11211437170805116,
"grad_norm": 0.6050726175308228,
"learning_rate": 2.6949473684210527e-05,
"loss": 0.0216,
"step": 149
},
{
"epoch": 0.11286681715575621,
"grad_norm": 2.671539068222046,
"learning_rate": 2.6421052631578945e-05,
"loss": 0.0604,
"step": 150
},
{
"epoch": 0.11286681715575621,
"eval_loss": 0.02783067524433136,
"eval_runtime": 22.4241,
"eval_samples_per_second": 24.973,
"eval_steps_per_second": 6.243,
"step": 150
}
],
"logging_steps": 1,
"max_steps": 200,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 50,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 5,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 1.03394618376192e+16,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}