prog-y-tiny-llama-CDL-19 / trainer_state.json
hosseinbv's picture
Uploading /ephemeral/hossein/output/prog-y-tiny-llama-CDL-19
34f07a8 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.19745502413339183,
"eval_steps": 50,
"global_step": 150,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0013163668275559457,
"grad_norm": 0.30696616335070354,
"learning_rate": 2e-05,
"loss": 2.0039,
"step": 1
},
{
"epoch": 0.0026327336551118913,
"grad_norm": 0.2981340320093832,
"learning_rate": 1.999777729859618e-05,
"loss": 1.9835,
"step": 2
},
{
"epoch": 0.003949100482667837,
"grad_norm": 14.179116456215626,
"learning_rate": 1.9991110182465032e-05,
"loss": 2.1888,
"step": 3
},
{
"epoch": 0.005265467310223783,
"grad_norm": 5.134986399673115,
"learning_rate": 1.9980001615408228e-05,
"loss": 2.0727,
"step": 4
},
{
"epoch": 0.006581834137779728,
"grad_norm": 4.2864655605921556,
"learning_rate": 1.9964456535631287e-05,
"loss": 2.0366,
"step": 5
},
{
"epoch": 0.007898200965335674,
"grad_norm": 3.425493419827274,
"learning_rate": 1.9944481853548335e-05,
"loss": 2.0437,
"step": 6
},
{
"epoch": 0.009214567792891619,
"grad_norm": 2.2782739580301414,
"learning_rate": 1.9920086448710162e-05,
"loss": 2.0373,
"step": 7
},
{
"epoch": 0.010530934620447565,
"grad_norm": 1.8345447423860846,
"learning_rate": 1.9891281165856876e-05,
"loss": 2.0013,
"step": 8
},
{
"epoch": 0.01184730144800351,
"grad_norm": 1.6656939664554824,
"learning_rate": 1.9858078810097004e-05,
"loss": 2.0271,
"step": 9
},
{
"epoch": 0.013163668275559455,
"grad_norm": 1.2832081039970213,
"learning_rate": 1.98204941412151e-05,
"loss": 2.0229,
"step": 10
},
{
"epoch": 0.014480035103115402,
"grad_norm": 1.064845389532268,
"learning_rate": 1.9778543867110428e-05,
"loss": 2.0242,
"step": 11
},
{
"epoch": 0.015796401930671347,
"grad_norm": 1.0011655878120684,
"learning_rate": 1.9732246636369605e-05,
"loss": 2.0269,
"step": 12
},
{
"epoch": 0.017112768758227294,
"grad_norm": 1.2132864745851109,
"learning_rate": 1.968162302997659e-05,
"loss": 2.0263,
"step": 13
},
{
"epoch": 0.018429135585783237,
"grad_norm": 1.21338076286533,
"learning_rate": 1.962669555216358e-05,
"loss": 2.0117,
"step": 14
},
{
"epoch": 0.019745502413339184,
"grad_norm": 0.8187255504437202,
"learning_rate": 1.9567488620406984e-05,
"loss": 2.0295,
"step": 15
},
{
"epoch": 0.02106186924089513,
"grad_norm": 0.7746476520569149,
"learning_rate": 1.9504028554572865e-05,
"loss": 2.0119,
"step": 16
},
{
"epoch": 0.022378236068451074,
"grad_norm": 1.0497273073425593,
"learning_rate": 1.943634356521671e-05,
"loss": 2.0206,
"step": 17
},
{
"epoch": 0.02369460289600702,
"grad_norm": 0.9479600984734513,
"learning_rate": 1.9364463741042694e-05,
"loss": 1.9865,
"step": 18
},
{
"epoch": 0.025010969723562967,
"grad_norm": 0.8196862860693493,
"learning_rate": 1.928842103552803e-05,
"loss": 1.9976,
"step": 19
},
{
"epoch": 0.02632733655111891,
"grad_norm": 0.5913274908675824,
"learning_rate": 1.920824925271838e-05,
"loss": 2.011,
"step": 20
},
{
"epoch": 0.027643703378674857,
"grad_norm": 0.6728905852774738,
"learning_rate": 1.9123984032200586e-05,
"loss": 1.9905,
"step": 21
},
{
"epoch": 0.028960070206230804,
"grad_norm": 0.7497374260591831,
"learning_rate": 1.9035662833259433e-05,
"loss": 2.0073,
"step": 22
},
{
"epoch": 0.030276437033786747,
"grad_norm": 0.6936831191512686,
"learning_rate": 1.8943324918225495e-05,
"loss": 2.0065,
"step": 23
},
{
"epoch": 0.031592803861342694,
"grad_norm": 0.5507208523194586,
"learning_rate": 1.8847011335021447e-05,
"loss": 1.9922,
"step": 24
},
{
"epoch": 0.03290917068889864,
"grad_norm": 0.5156365942844777,
"learning_rate": 1.874676489891461e-05,
"loss": 1.9934,
"step": 25
},
{
"epoch": 0.03422553751645459,
"grad_norm": 0.5251346487715227,
"learning_rate": 1.8642630173483832e-05,
"loss": 1.9927,
"step": 26
},
{
"epoch": 0.03554190434401053,
"grad_norm": 0.510635144961559,
"learning_rate": 1.85346534508092e-05,
"loss": 1.9962,
"step": 27
},
{
"epoch": 0.036858271171566474,
"grad_norm": 0.5181804796655289,
"learning_rate": 1.8422882730893323e-05,
"loss": 2.0137,
"step": 28
},
{
"epoch": 0.03817463799912242,
"grad_norm": 0.49077337711361835,
"learning_rate": 1.8307367700323412e-05,
"loss": 2.0108,
"step": 29
},
{
"epoch": 0.03949100482667837,
"grad_norm": 0.5359933989375382,
"learning_rate": 1.8188159710183595e-05,
"loss": 1.9796,
"step": 30
},
{
"epoch": 0.040807371654234315,
"grad_norm": 0.5191960799186749,
"learning_rate": 1.8065311753227272e-05,
"loss": 2.0251,
"step": 31
},
{
"epoch": 0.04212373848179026,
"grad_norm": 0.4504971551269138,
"learning_rate": 1.7938878440319722e-05,
"loss": 2.0165,
"step": 32
},
{
"epoch": 0.0434401053093462,
"grad_norm": 0.6183611766129652,
"learning_rate": 1.7808915976161364e-05,
"loss": 2.0187,
"step": 33
},
{
"epoch": 0.04475647213690215,
"grad_norm": 0.7248943050013543,
"learning_rate": 1.7675482134302503e-05,
"loss": 1.9992,
"step": 34
},
{
"epoch": 0.046072838964458095,
"grad_norm": 0.8805699361270162,
"learning_rate": 1.753863623146066e-05,
"loss": 2.0015,
"step": 35
},
{
"epoch": 0.04738920579201404,
"grad_norm": 1.0132129872309281,
"learning_rate": 1.7398439101151908e-05,
"loss": 1.9881,
"step": 36
},
{
"epoch": 0.04870557261956999,
"grad_norm": 1.002441811799336,
"learning_rate": 1.7254953066647915e-05,
"loss": 1.9919,
"step": 37
},
{
"epoch": 0.050021939447125935,
"grad_norm": 0.760921923703588,
"learning_rate": 1.710824191327075e-05,
"loss": 2.0243,
"step": 38
},
{
"epoch": 0.051338306274681875,
"grad_norm": 0.48286113953076154,
"learning_rate": 1.695837086003772e-05,
"loss": 1.9978,
"step": 39
},
{
"epoch": 0.05265467310223782,
"grad_norm": 0.6473967666505364,
"learning_rate": 1.680540653066891e-05,
"loss": 1.9951,
"step": 40
},
{
"epoch": 0.05397103992979377,
"grad_norm": 0.8514602423142358,
"learning_rate": 1.6649416923970248e-05,
"loss": 1.9972,
"step": 41
},
{
"epoch": 0.055287406757349715,
"grad_norm": 0.751097254833928,
"learning_rate": 1.649047138360529e-05,
"loss": 1.9876,
"step": 42
},
{
"epoch": 0.05660377358490566,
"grad_norm": 0.5323632856254606,
"learning_rate": 1.632864056726917e-05,
"loss": 1.9878,
"step": 43
},
{
"epoch": 0.05792014041246161,
"grad_norm": 0.5485571128183261,
"learning_rate": 1.6163996415278423e-05,
"loss": 2.0138,
"step": 44
},
{
"epoch": 0.05923650724001755,
"grad_norm": 0.5935935140811553,
"learning_rate": 1.5996612118590604e-05,
"loss": 1.9929,
"step": 45
},
{
"epoch": 0.060552874067573495,
"grad_norm": 0.5401522230895758,
"learning_rate": 1.5826562086267956e-05,
"loss": 2.0125,
"step": 46
},
{
"epoch": 0.06186924089512944,
"grad_norm": 0.5646827202517825,
"learning_rate": 1.565392191239959e-05,
"loss": 1.9997,
"step": 47
},
{
"epoch": 0.06318560772268539,
"grad_norm": 0.5388084007303031,
"learning_rate": 1.5478768342496872e-05,
"loss": 1.989,
"step": 48
},
{
"epoch": 0.06450197455024133,
"grad_norm": 0.6323289849565407,
"learning_rate": 1.5301179239376936e-05,
"loss": 2.0023,
"step": 49
},
{
"epoch": 0.06581834137779728,
"grad_norm": 0.6419188688457494,
"learning_rate": 1.512123354854955e-05,
"loss": 1.9813,
"step": 50
},
{
"epoch": 0.06713470820535322,
"grad_norm": 0.5229047414471234,
"learning_rate": 1.4939011263122635e-05,
"loss": 1.9822,
"step": 51
},
{
"epoch": 0.06845107503290918,
"grad_norm": 0.48105770067016557,
"learning_rate": 1.4754593388242117e-05,
"loss": 1.9904,
"step": 52
},
{
"epoch": 0.06976744186046512,
"grad_norm": 0.5572347089874431,
"learning_rate": 1.4568061905081874e-05,
"loss": 1.9813,
"step": 53
},
{
"epoch": 0.07108380868802106,
"grad_norm": 0.6271370685019291,
"learning_rate": 1.4379499734399797e-05,
"loss": 1.9934,
"step": 54
},
{
"epoch": 0.07240017551557701,
"grad_norm": 0.5559157267210807,
"learning_rate": 1.4188990699676186e-05,
"loss": 1.9896,
"step": 55
},
{
"epoch": 0.07371654234313295,
"grad_norm": 0.454681292718451,
"learning_rate": 1.3996619489850822e-05,
"loss": 2.0017,
"step": 56
},
{
"epoch": 0.0750329091706889,
"grad_norm": 0.47954445521042993,
"learning_rate": 1.3802471621675337e-05,
"loss": 1.9839,
"step": 57
},
{
"epoch": 0.07634927599824484,
"grad_norm": 0.4085817174163343,
"learning_rate": 1.3606633401697557e-05,
"loss": 2.0066,
"step": 58
},
{
"epoch": 0.0776656428258008,
"grad_norm": 0.4378465366364925,
"learning_rate": 1.340919188789477e-05,
"loss": 1.9859,
"step": 59
},
{
"epoch": 0.07898200965335674,
"grad_norm": 0.46170835320156245,
"learning_rate": 1.3210234850972966e-05,
"loss": 2.0042,
"step": 60
},
{
"epoch": 0.08029837648091268,
"grad_norm": 0.5138010856648089,
"learning_rate": 1.300985073534919e-05,
"loss": 2.0035,
"step": 61
},
{
"epoch": 0.08161474330846863,
"grad_norm": 0.43492083866769093,
"learning_rate": 1.280812861983446e-05,
"loss": 1.983,
"step": 62
},
{
"epoch": 0.08293111013602457,
"grad_norm": 0.44130144930069237,
"learning_rate": 1.2605158178034656e-05,
"loss": 1.9878,
"step": 63
},
{
"epoch": 0.08424747696358052,
"grad_norm": 0.5155233783810983,
"learning_rate": 1.2401029638486952e-05,
"loss": 1.9645,
"step": 64
},
{
"epoch": 0.08556384379113646,
"grad_norm": 0.48282431373183604,
"learning_rate": 1.219583374454963e-05,
"loss": 2.0009,
"step": 65
},
{
"epoch": 0.0868802106186924,
"grad_norm": 0.41876634156662645,
"learning_rate": 1.1989661714063e-05,
"loss": 2.0183,
"step": 66
},
{
"epoch": 0.08819657744624836,
"grad_norm": 0.5011135601707007,
"learning_rate": 1.1782605198799371e-05,
"loss": 2.0048,
"step": 67
},
{
"epoch": 0.0895129442738043,
"grad_norm": 0.48015642779128836,
"learning_rate": 1.157475624372018e-05,
"loss": 1.9775,
"step": 68
},
{
"epoch": 0.09082931110136025,
"grad_norm": 0.42507957087590986,
"learning_rate": 1.1366207246058269e-05,
"loss": 1.9882,
"step": 69
},
{
"epoch": 0.09214567792891619,
"grad_norm": 0.4618501564423574,
"learning_rate": 1.1157050914243614e-05,
"loss": 1.9987,
"step": 70
},
{
"epoch": 0.09346204475647214,
"grad_norm": 0.3970436128931091,
"learning_rate": 1.0947380226690686e-05,
"loss": 1.9935,
"step": 71
},
{
"epoch": 0.09477841158402808,
"grad_norm": 0.44290807770756685,
"learning_rate": 1.0737288390465792e-05,
"loss": 1.9837,
"step": 72
},
{
"epoch": 0.09609477841158402,
"grad_norm": 0.4175771965499877,
"learning_rate": 1.0526868799852797e-05,
"loss": 2.0069,
"step": 73
},
{
"epoch": 0.09741114523913998,
"grad_norm": 0.402423068113447,
"learning_rate": 1.031621499483559e-05,
"loss": 1.9997,
"step": 74
},
{
"epoch": 0.09872751206669592,
"grad_norm": 0.38578133568811257,
"learning_rate": 1.0105420619515798e-05,
"loss": 1.9892,
"step": 75
},
{
"epoch": 0.10004387889425187,
"grad_norm": 0.38927798137572783,
"learning_rate": 9.894579380484206e-06,
"loss": 2.0054,
"step": 76
},
{
"epoch": 0.10136024572180781,
"grad_norm": 0.35304659318076537,
"learning_rate": 9.683785005164412e-06,
"loss": 2.0099,
"step": 77
},
{
"epoch": 0.10267661254936375,
"grad_norm": 0.39266341718589065,
"learning_rate": 9.473131200147205e-06,
"loss": 2.0163,
"step": 78
},
{
"epoch": 0.1039929793769197,
"grad_norm": 0.3461839316811149,
"learning_rate": 9.262711609534211e-06,
"loss": 2.0017,
"step": 79
},
{
"epoch": 0.10530934620447564,
"grad_norm": 0.38645244912119553,
"learning_rate": 9.052619773309318e-06,
"loss": 1.9923,
"step": 80
},
{
"epoch": 0.1066257130320316,
"grad_norm": 0.3430411034396793,
"learning_rate": 8.842949085756389e-06,
"loss": 1.9877,
"step": 81
},
{
"epoch": 0.10794207985958754,
"grad_norm": 0.3595805436281005,
"learning_rate": 8.633792753941733e-06,
"loss": 1.9972,
"step": 82
},
{
"epoch": 0.10925844668714349,
"grad_norm": 0.3601315940798048,
"learning_rate": 8.425243756279824e-06,
"loss": 1.9756,
"step": 83
},
{
"epoch": 0.11057481351469943,
"grad_norm": 0.376156660660695,
"learning_rate": 8.217394801200632e-06,
"loss": 1.997,
"step": 84
},
{
"epoch": 0.11189118034225537,
"grad_norm": 0.33929542030931453,
"learning_rate": 8.010338285937006e-06,
"loss": 1.985,
"step": 85
},
{
"epoch": 0.11320754716981132,
"grad_norm": 0.35313642599010037,
"learning_rate": 7.804166255450372e-06,
"loss": 1.9909,
"step": 86
},
{
"epoch": 0.11452391399736726,
"grad_norm": 0.3143928376429902,
"learning_rate": 7.598970361513052e-06,
"loss": 1.9938,
"step": 87
},
{
"epoch": 0.11584028082492322,
"grad_norm": 0.330150688278516,
"learning_rate": 7.394841821965345e-06,
"loss": 1.9711,
"step": 88
},
{
"epoch": 0.11715664765247916,
"grad_norm": 0.3315276717328342,
"learning_rate": 7.191871380165538e-06,
"loss": 1.995,
"step": 89
},
{
"epoch": 0.1184730144800351,
"grad_norm": 0.3116955059840049,
"learning_rate": 6.990149264650814e-06,
"loss": 1.9866,
"step": 90
},
{
"epoch": 0.11978938130759105,
"grad_norm": 0.3272285228682192,
"learning_rate": 6.789765149027039e-06,
"loss": 1.9933,
"step": 91
},
{
"epoch": 0.12110574813514699,
"grad_norm": 0.3042405256944742,
"learning_rate": 6.590808112105232e-06,
"loss": 2.009,
"step": 92
},
{
"epoch": 0.12242211496270294,
"grad_norm": 0.2929772246885549,
"learning_rate": 6.3933665983024465e-06,
"loss": 1.9938,
"step": 93
},
{
"epoch": 0.12373848179025888,
"grad_norm": 0.3208011638623377,
"learning_rate": 6.197528378324664e-06,
"loss": 1.9713,
"step": 94
},
{
"epoch": 0.12505484861781482,
"grad_norm": 0.32082408506423593,
"learning_rate": 6.003380510149179e-06,
"loss": 1.9982,
"step": 95
},
{
"epoch": 0.12637121544537078,
"grad_norm": 0.2951994783208125,
"learning_rate": 5.8110093003238175e-06,
"loss": 2.021,
"step": 96
},
{
"epoch": 0.12768758227292673,
"grad_norm": 0.7109167282946866,
"learning_rate": 5.620500265600206e-06,
"loss": 1.9939,
"step": 97
},
{
"epoch": 0.12900394910048266,
"grad_norm": 0.2999708748800405,
"learning_rate": 5.431938094918132e-06,
"loss": 2.0038,
"step": 98
},
{
"epoch": 0.1303203159280386,
"grad_norm": 0.2816495513792004,
"learning_rate": 5.245406611757882e-06,
"loss": 2.0026,
"step": 99
},
{
"epoch": 0.13163668275559456,
"grad_norm": 0.29701629779857913,
"learning_rate": 5.060988736877366e-06,
"loss": 1.9831,
"step": 100
},
{
"epoch": 0.13295304958315052,
"grad_norm": 0.2887151681364845,
"learning_rate": 4.878766451450451e-06,
"loss": 1.9907,
"step": 101
},
{
"epoch": 0.13426941641070644,
"grad_norm": 0.2779884548237959,
"learning_rate": 4.698820760623064e-06,
"loss": 1.984,
"step": 102
},
{
"epoch": 0.1355857832382624,
"grad_norm": 0.2766694288774238,
"learning_rate": 4.5212316575031325e-06,
"loss": 1.9963,
"step": 103
},
{
"epoch": 0.13690215006581835,
"grad_norm": 0.2822067203881779,
"learning_rate": 4.346078087600411e-06,
"loss": 2.0006,
"step": 104
},
{
"epoch": 0.13821851689337428,
"grad_norm": 0.2991158574435879,
"learning_rate": 4.173437913732048e-06,
"loss": 1.9971,
"step": 105
},
{
"epoch": 0.13953488372093023,
"grad_norm": 0.2708052906825167,
"learning_rate": 4.003387881409397e-06,
"loss": 2.0011,
"step": 106
},
{
"epoch": 0.14085125054848618,
"grad_norm": 0.2723657374730129,
"learning_rate": 3.836003584721577e-06,
"loss": 1.9999,
"step": 107
},
{
"epoch": 0.1421676173760421,
"grad_norm": 0.2630763874969036,
"learning_rate": 3.6713594327308343e-06,
"loss": 1.9855,
"step": 108
},
{
"epoch": 0.14348398420359806,
"grad_norm": 0.27377038898140144,
"learning_rate": 3.509528616394716e-06,
"loss": 2.0024,
"step": 109
},
{
"epoch": 0.14480035103115402,
"grad_norm": 0.26976500367398504,
"learning_rate": 3.3505830760297543e-06,
"loss": 1.9982,
"step": 110
},
{
"epoch": 0.14611671785870997,
"grad_norm": 0.23637132922045676,
"learning_rate": 3.1945934693310897e-06,
"loss": 2.0043,
"step": 111
},
{
"epoch": 0.1474330846862659,
"grad_norm": 0.27811290145029116,
"learning_rate": 3.0416291399622834e-06,
"loss": 2.0297,
"step": 112
},
{
"epoch": 0.14874945151382185,
"grad_norm": 0.2564537834227521,
"learning_rate": 2.891758086729253e-06,
"loss": 1.9815,
"step": 113
},
{
"epoch": 0.1500658183413778,
"grad_norm": 0.2608533593345402,
"learning_rate": 2.7450469333520856e-06,
"loss": 1.988,
"step": 114
},
{
"epoch": 0.15138218516893373,
"grad_norm": 0.23992377367249682,
"learning_rate": 2.6015608988480956e-06,
"loss": 1.9913,
"step": 115
},
{
"epoch": 0.15269855199648968,
"grad_norm": 0.23794989474751416,
"learning_rate": 2.4613637685393433e-06,
"loss": 2.0012,
"step": 116
},
{
"epoch": 0.15401491882404564,
"grad_norm": 0.23566567498202615,
"learning_rate": 2.324517865697501e-06,
"loss": 1.9748,
"step": 117
},
{
"epoch": 0.1553312856516016,
"grad_norm": 0.23519681087391583,
"learning_rate": 2.19108402383864e-06,
"loss": 1.9874,
"step": 118
},
{
"epoch": 0.15664765247915752,
"grad_norm": 0.22813903636281588,
"learning_rate": 2.06112155968028e-06,
"loss": 2.0067,
"step": 119
},
{
"epoch": 0.15796401930671347,
"grad_norm": 0.21989471390672014,
"learning_rate": 1.9346882467727323e-06,
"loss": 2.0111,
"step": 120
},
{
"epoch": 0.15928038613426942,
"grad_norm": 0.23407319590609088,
"learning_rate": 1.811840289816409e-06,
"loss": 2.004,
"step": 121
},
{
"epoch": 0.16059675296182535,
"grad_norm": 0.21115130572603857,
"learning_rate": 1.6926322996765899e-06,
"loss": 2.0044,
"step": 122
},
{
"epoch": 0.1619131197893813,
"grad_norm": 0.23157246285251912,
"learning_rate": 1.5771172691066793e-06,
"loss": 2.0086,
"step": 123
},
{
"epoch": 0.16322948661693726,
"grad_norm": 0.21352091323327335,
"learning_rate": 1.4653465491908003e-06,
"loss": 2.0314,
"step": 124
},
{
"epoch": 0.1645458534444932,
"grad_norm": 0.2128894225391903,
"learning_rate": 1.3573698265161683e-06,
"loss": 1.9876,
"step": 125
},
{
"epoch": 0.16586222027204914,
"grad_norm": 0.2161080422635352,
"learning_rate": 1.2532351010853916e-06,
"loss": 1.9922,
"step": 126
},
{
"epoch": 0.1671785870996051,
"grad_norm": 0.2163538313637293,
"learning_rate": 1.152988664978556e-06,
"loss": 1.9999,
"step": 127
},
{
"epoch": 0.16849495392716105,
"grad_norm": 0.21200680169098507,
"learning_rate": 1.0566750817745076e-06,
"loss": 2.0068,
"step": 128
},
{
"epoch": 0.16981132075471697,
"grad_norm": 0.2101625971202669,
"learning_rate": 9.6433716674057e-07,
"loss": 1.9989,
"step": 129
},
{
"epoch": 0.17112768758227292,
"grad_norm": 0.22375568619197952,
"learning_rate": 8.760159677994174e-07,
"loss": 2.0037,
"step": 130
},
{
"epoch": 0.17244405440982888,
"grad_norm": 0.20405409107101957,
"learning_rate": 7.91750747281621e-07,
"loss": 1.9944,
"step": 131
},
{
"epoch": 0.1737604212373848,
"grad_norm": 0.19929937491222882,
"learning_rate": 7.115789644719728e-07,
"loss": 2.0145,
"step": 132
},
{
"epoch": 0.17507678806494076,
"grad_norm": 0.19627547685487395,
"learning_rate": 6.355362589573078e-07,
"loss": 1.9962,
"step": 133
},
{
"epoch": 0.1763931548924967,
"grad_norm": 0.20551587903874605,
"learning_rate": 5.636564347832907e-07,
"loss": 2.0016,
"step": 134
},
{
"epoch": 0.17770952172005267,
"grad_norm": 0.19486467558987572,
"learning_rate": 4.95971445427137e-07,
"loss": 2.0025,
"step": 135
},
{
"epoch": 0.1790258885476086,
"grad_norm": 0.19857404536937123,
"learning_rate": 4.3251137959302023e-07,
"loss": 1.9897,
"step": 136
},
{
"epoch": 0.18034225537516455,
"grad_norm": 0.1994848957702455,
"learning_rate": 3.733044478364234e-07,
"loss": 1.9868,
"step": 137
},
{
"epoch": 0.1816586222027205,
"grad_norm": 0.1888779875973913,
"learning_rate": 3.1837697002341293e-07,
"loss": 1.9982,
"step": 138
},
{
"epoch": 0.18297498903027642,
"grad_norm": 0.19151154827082154,
"learning_rate": 2.677533636303964e-07,
"loss": 1.9881,
"step": 139
},
{
"epoch": 0.18429135585783238,
"grad_norm": 0.18878577251287124,
"learning_rate": 2.214561328895748e-07,
"loss": 1.9933,
"step": 140
},
{
"epoch": 0.18560772268538833,
"grad_norm": 0.19163925087857359,
"learning_rate": 1.7950585878489856e-07,
"loss": 1.9981,
"step": 141
},
{
"epoch": 0.18692408951294429,
"grad_norm": 0.1891926330725101,
"learning_rate": 1.419211899029971e-07,
"loss": 1.9893,
"step": 142
},
{
"epoch": 0.1882404563405002,
"grad_norm": 0.1855856283054167,
"learning_rate": 1.0871883414312778e-07,
"loss": 1.9993,
"step": 143
},
{
"epoch": 0.18955682316805617,
"grad_norm": 0.18824124797220707,
"learning_rate": 7.99135512898408e-08,
"loss": 1.9916,
"step": 144
},
{
"epoch": 0.19087318999561212,
"grad_norm": 0.18302209948142348,
"learning_rate": 5.55181464516652e-08,
"loss": 1.9942,
"step": 145
},
{
"epoch": 0.19218955682316805,
"grad_norm": 0.18944239262741197,
"learning_rate": 3.554346436871581e-08,
"loss": 1.9889,
"step": 146
},
{
"epoch": 0.193505923650724,
"grad_norm": 0.18518930942475542,
"learning_rate": 1.9998384591773945e-08,
"loss": 1.9975,
"step": 147
},
{
"epoch": 0.19482229047827995,
"grad_norm": 0.1845302991836952,
"learning_rate": 8.889817534969425e-09,
"loss": 2.0052,
"step": 148
},
{
"epoch": 0.1961386573058359,
"grad_norm": 0.18441948627727545,
"learning_rate": 2.222701403818972e-09,
"loss": 1.9941,
"step": 149
},
{
"epoch": 0.19745502413339183,
"grad_norm": 0.20003870031363952,
"learning_rate": 0.0,
"loss": 2.0068,
"step": 150
},
{
"epoch": 0.19745502413339183,
"step": 150,
"total_flos": 349677715193856.0,
"train_loss": 2.0010925364494323,
"train_runtime": 5203.7451,
"train_samples_per_second": 58.112,
"train_steps_per_second": 0.029
}
],
"logging_steps": 1,
"max_steps": 150,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 50,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 349677715193856.0,
"train_batch_size": 42,
"trial_name": null,
"trial_params": null
}