llama3.2_1B_math_pro2 / trainer_state.json
CodeIsAbstract's picture
Upload fine-tuned model
667e68a verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.4384283660757067,
"eval_steps": 200,
"global_step": 1200,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.011978917105893628,
"grad_norm": Infinity,
"learning_rate": 4e-07,
"loss": 94.1331,
"step": 10
},
{
"epoch": 0.023957834211787255,
"grad_norm": 616.6350708007812,
"learning_rate": 1.9999995539071857e-06,
"loss": 13.5081,
"step": 20
},
{
"epoch": 0.035936751317680884,
"grad_norm": 1712.5594482421875,
"learning_rate": 1.9999460232510422e-06,
"loss": 44.8754,
"step": 30
},
{
"epoch": 0.04791566842357451,
"grad_norm": 0.8271563053131104,
"learning_rate": 1.9998032795044213e-06,
"loss": 31.7079,
"step": 40
},
{
"epoch": 0.059894585529468136,
"grad_norm": 0.7399762868881226,
"learning_rate": 1.9995713354026208e-06,
"loss": 14.6654,
"step": 50
},
{
"epoch": 0.07187350263536177,
"grad_norm": 4523.66748046875,
"learning_rate": 1.999250211639208e-06,
"loss": 15.7606,
"step": 60
},
{
"epoch": 0.08385241974125539,
"grad_norm": 2372.1298828125,
"learning_rate": 1.998839936864172e-06,
"loss": 29.1475,
"step": 70
},
{
"epoch": 0.09583133684714902,
"grad_norm": 525.8820190429688,
"learning_rate": 1.9983405476813705e-06,
"loss": 30.3939,
"step": 80
},
{
"epoch": 0.10781025395304264,
"grad_norm": 622.8740844726562,
"learning_rate": 1.9977520886452595e-06,
"loss": 25.0045,
"step": 90
},
{
"epoch": 0.11978917105893627,
"grad_norm": 393.3197326660156,
"learning_rate": 1.997074612256923e-06,
"loss": 10.3827,
"step": 100
},
{
"epoch": 0.1317680881648299,
"grad_norm": 1076.3707275390625,
"learning_rate": 1.996308178959385e-06,
"loss": 35.8152,
"step": 110
},
{
"epoch": 0.14374700527072354,
"grad_norm": 0.5970665812492371,
"learning_rate": 1.99545285713222e-06,
"loss": 18.8117,
"step": 120
},
{
"epoch": 0.15572592237661714,
"grad_norm": 1666.3372802734375,
"learning_rate": 1.9945087230854505e-06,
"loss": 35.409,
"step": 130
},
{
"epoch": 0.16770483948251078,
"grad_norm": 454.6666564941406,
"learning_rate": 1.9934758610527393e-06,
"loss": 26.721,
"step": 140
},
{
"epoch": 0.1796837565884044,
"grad_norm": 0.6310660243034363,
"learning_rate": 1.9923543631838743e-06,
"loss": 29.0184,
"step": 150
},
{
"epoch": 0.19166267369429804,
"grad_norm": 1460.1695556640625,
"learning_rate": 1.991144329536547e-06,
"loss": 32.6666,
"step": 160
},
{
"epoch": 0.20364159080019167,
"grad_norm": 1623.7030029296875,
"learning_rate": 1.989845868067426e-06,
"loss": 22.361,
"step": 170
},
{
"epoch": 0.21562050790608528,
"grad_norm": 0.2606387734413147,
"learning_rate": 1.9884590946225245e-06,
"loss": 26.8546,
"step": 180
},
{
"epoch": 0.2275994250119789,
"grad_norm": 2438.140869140625,
"learning_rate": 1.986984132926866e-06,
"loss": 22.4338,
"step": 190
},
{
"epoch": 0.23957834211787254,
"grad_norm": 457.811767578125,
"learning_rate": 1.985421114573445e-06,
"loss": 38.06,
"step": 200
},
{
"epoch": 0.23957834211787254,
"eval_loss": 7.858267307281494,
"eval_runtime": 239.5749,
"eval_samples_per_second": 6.219,
"eval_steps_per_second": 3.11,
"step": 200
},
{
"epoch": 0.25155725922376615,
"grad_norm": 3668.065673828125,
"learning_rate": 1.983770179011485e-06,
"loss": 36.1835,
"step": 210
},
{
"epoch": 0.2635361763296598,
"grad_norm": 0.26454946398735046,
"learning_rate": 1.9820314735340013e-06,
"loss": 36.229,
"step": 220
},
{
"epoch": 0.2755150934355534,
"grad_norm": 0.4162730872631073,
"learning_rate": 1.9802051532646555e-06,
"loss": 32.5845,
"step": 230
},
{
"epoch": 0.2874940105414471,
"grad_norm": 0.15878209471702576,
"learning_rate": 1.9782913811439183e-06,
"loss": 15.8691,
"step": 240
},
{
"epoch": 0.2994729276473407,
"grad_norm": 210.30516052246094,
"learning_rate": 1.9762903279145307e-06,
"loss": 21.8668,
"step": 250
},
{
"epoch": 0.3114518447532343,
"grad_norm": 0.31922483444213867,
"learning_rate": 1.974202172106272e-06,
"loss": 19.8604,
"step": 260
},
{
"epoch": 0.32343076185912795,
"grad_norm": 166.21446228027344,
"learning_rate": 1.9720271000200323e-06,
"loss": 19.3605,
"step": 270
},
{
"epoch": 0.33540967896502155,
"grad_norm": 1452.038330078125,
"learning_rate": 1.9697653057111875e-06,
"loss": 33.3646,
"step": 280
},
{
"epoch": 0.3473885960709152,
"grad_norm": 0.4086947739124298,
"learning_rate": 1.967416990972291e-06,
"loss": 15.2998,
"step": 290
},
{
"epoch": 0.3593675131768088,
"grad_norm": 1385.362548828125,
"learning_rate": 1.964982365315066e-06,
"loss": 16.0225,
"step": 300
},
{
"epoch": 0.3713464302827024,
"grad_norm": 189.06552124023438,
"learning_rate": 1.9624616459517164e-06,
"loss": 33.4549,
"step": 310
},
{
"epoch": 0.3833253473885961,
"grad_norm": 1255.678466796875,
"learning_rate": 1.959855057775546e-06,
"loss": 33.8802,
"step": 320
},
{
"epoch": 0.3953042644944897,
"grad_norm": 1233.65673828125,
"learning_rate": 1.9571628333408937e-06,
"loss": 33.1002,
"step": 330
},
{
"epoch": 0.40728318160038335,
"grad_norm": 162.6513671875,
"learning_rate": 1.954385212842387e-06,
"loss": 28.827,
"step": 340
},
{
"epoch": 0.41926209870627695,
"grad_norm": 2140.404296875,
"learning_rate": 1.9515224440935103e-06,
"loss": 16.8164,
"step": 350
},
{
"epoch": 0.43124101581217056,
"grad_norm": 0.2348758578300476,
"learning_rate": 1.948574782504498e-06,
"loss": 11.385,
"step": 360
},
{
"epoch": 0.4432199329180642,
"grad_norm": 0.24031803011894226,
"learning_rate": 1.9455424910595442e-06,
"loss": 12.6478,
"step": 370
},
{
"epoch": 0.4551988500239578,
"grad_norm": 992.0042114257812,
"learning_rate": 1.942425840293343e-06,
"loss": 17.7328,
"step": 380
},
{
"epoch": 0.4671777671298515,
"grad_norm": 1306.5712890625,
"learning_rate": 1.939225108266951e-06,
"loss": 30.8945,
"step": 390
},
{
"epoch": 0.4791566842357451,
"grad_norm": 0.18838146328926086,
"learning_rate": 1.9359405805429763e-06,
"loss": 12.8307,
"step": 400
},
{
"epoch": 0.4791566842357451,
"eval_loss": 5.787263870239258,
"eval_runtime": 237.9568,
"eval_samples_per_second": 6.262,
"eval_steps_per_second": 3.131,
"step": 400
},
{
"epoch": 0.4911356013416387,
"grad_norm": 0.2591519057750702,
"learning_rate": 1.932572550160106e-06,
"loss": 30.1815,
"step": 410
},
{
"epoch": 0.5031145184475323,
"grad_norm": 1008.1605224609375,
"learning_rate": 1.9291213176069584e-06,
"loss": 15.7189,
"step": 420
},
{
"epoch": 0.515093435553426,
"grad_norm": 347.5401306152344,
"learning_rate": 1.9255871907952756e-06,
"loss": 18.0663,
"step": 430
},
{
"epoch": 0.5270723526593196,
"grad_norm": 480.1645812988281,
"learning_rate": 1.921970485032452e-06,
"loss": 18.3552,
"step": 440
},
{
"epoch": 0.5390512697652132,
"grad_norm": 0.502577543258667,
"learning_rate": 1.918271522993403e-06,
"loss": 9.8206,
"step": 450
},
{
"epoch": 0.5510301868711068,
"grad_norm": 181.82675170898438,
"learning_rate": 1.914490634691777e-06,
"loss": 22.7731,
"step": 460
},
{
"epoch": 0.5630091039770004,
"grad_norm": 1757.3203125,
"learning_rate": 1.9106281574505113e-06,
"loss": 42.1869,
"step": 470
},
{
"epoch": 0.5749880210828942,
"grad_norm": 0.3148007392883301,
"learning_rate": 1.9066844358717384e-06,
"loss": 10.479,
"step": 480
},
{
"epoch": 0.5869669381887878,
"grad_norm": 0.43468308448791504,
"learning_rate": 1.90265982180604e-06,
"loss": 25.0078,
"step": 490
},
{
"epoch": 0.5989458552946814,
"grad_norm": 428.818359375,
"learning_rate": 1.8985546743210556e-06,
"loss": 17.2799,
"step": 500
},
{
"epoch": 0.610924772400575,
"grad_norm": 826.046630859375,
"learning_rate": 1.894369359669449e-06,
"loss": 22.2318,
"step": 510
},
{
"epoch": 0.6229036895064686,
"grad_norm": 1385.6824951171875,
"learning_rate": 1.8901042512562298e-06,
"loss": 23.271,
"step": 520
},
{
"epoch": 0.6348826066123623,
"grad_norm": 1433.165771484375,
"learning_rate": 1.8857597296054402e-06,
"loss": 15.2536,
"step": 530
},
{
"epoch": 0.6468615237182559,
"grad_norm": 1745.93115234375,
"learning_rate": 1.8813361823262054e-06,
"loss": 25.0982,
"step": 540
},
{
"epoch": 0.6588404408241495,
"grad_norm": 1039.310302734375,
"learning_rate": 1.8768340040781511e-06,
"loss": 30.7892,
"step": 550
},
{
"epoch": 0.6708193579300431,
"grad_norm": 310.6319274902344,
"learning_rate": 1.8722535965361946e-06,
"loss": 7.1616,
"step": 560
},
{
"epoch": 0.6827982750359367,
"grad_norm": 356.1474304199219,
"learning_rate": 1.8675953683547053e-06,
"loss": 18.7229,
"step": 570
},
{
"epoch": 0.6947771921418304,
"grad_norm": 145.14263916015625,
"learning_rate": 1.8628597351310482e-06,
"loss": 17.1177,
"step": 580
},
{
"epoch": 0.706756109247724,
"grad_norm": 989.70654296875,
"learning_rate": 1.8580471193685044e-06,
"loss": 20.149,
"step": 590
},
{
"epoch": 0.7187350263536176,
"grad_norm": 1277.737548828125,
"learning_rate": 1.853157950438575e-06,
"loss": 13.4416,
"step": 600
},
{
"epoch": 0.7187350263536176,
"eval_loss": 5.8129730224609375,
"eval_runtime": 239.911,
"eval_samples_per_second": 6.211,
"eval_steps_per_second": 3.105,
"step": 600
},
{
"epoch": 0.7307139434595112,
"grad_norm": 446.188232421875,
"learning_rate": 1.8481926645426752e-06,
"loss": 18.3421,
"step": 610
},
{
"epoch": 0.7426928605654048,
"grad_norm": 0.44947949051856995,
"learning_rate": 1.8431517046732154e-06,
"loss": 18.8154,
"step": 620
},
{
"epoch": 0.7546717776712986,
"grad_norm": 224.2598876953125,
"learning_rate": 1.8380355205740811e-06,
"loss": 19.2502,
"step": 630
},
{
"epoch": 0.7666506947771922,
"grad_norm": 0.3780662715435028,
"learning_rate": 1.8328445687005049e-06,
"loss": 17.2432,
"step": 640
},
{
"epoch": 0.7786296118830858,
"grad_norm": 0.4702111482620239,
"learning_rate": 1.8275793121783438e-06,
"loss": 26.5332,
"step": 650
},
{
"epoch": 0.7906085289889794,
"grad_norm": 0.27338534593582153,
"learning_rate": 1.8222402207627605e-06,
"loss": 9.9615,
"step": 660
},
{
"epoch": 0.802587446094873,
"grad_norm": 2576.7802734375,
"learning_rate": 1.8168277707963116e-06,
"loss": 28.2978,
"step": 670
},
{
"epoch": 0.8145663632007667,
"grad_norm": 171.48802185058594,
"learning_rate": 1.8113424451664507e-06,
"loss": 21.4006,
"step": 680
},
{
"epoch": 0.8265452803066603,
"grad_norm": 228.12960815429688,
"learning_rate": 1.805784733262445e-06,
"loss": 16.6388,
"step": 690
},
{
"epoch": 0.8385241974125539,
"grad_norm": 250.2900390625,
"learning_rate": 1.8001551309317136e-06,
"loss": 12.3178,
"step": 700
},
{
"epoch": 0.8505031145184475,
"grad_norm": 0.25326794385910034,
"learning_rate": 1.7944541404355888e-06,
"loss": 15.9896,
"step": 710
},
{
"epoch": 0.8624820316243411,
"grad_norm": 614.1929321289062,
"learning_rate": 1.7886822704045063e-06,
"loss": 21.2252,
"step": 720
},
{
"epoch": 0.8744609487302348,
"grad_norm": 0.3262430429458618,
"learning_rate": 1.782840035792625e-06,
"loss": 17.6816,
"step": 730
},
{
"epoch": 0.8864398658361284,
"grad_norm": 0.4543621242046356,
"learning_rate": 1.7769279578318837e-06,
"loss": 11.1479,
"step": 740
},
{
"epoch": 0.898418782942022,
"grad_norm": 317.15899658203125,
"learning_rate": 1.7709465639854994e-06,
"loss": 12.7186,
"step": 750
},
{
"epoch": 0.9103977000479156,
"grad_norm": 735.1277465820312,
"learning_rate": 1.7648963879009074e-06,
"loss": 24.4749,
"step": 760
},
{
"epoch": 0.9223766171538093,
"grad_norm": 259.2248840332031,
"learning_rate": 1.7587779693621494e-06,
"loss": 13.9466,
"step": 770
},
{
"epoch": 0.934355534259703,
"grad_norm": 0.48265841603279114,
"learning_rate": 1.752591854241717e-06,
"loss": 10.1343,
"step": 780
},
{
"epoch": 0.9463344513655966,
"grad_norm": 281.167236328125,
"learning_rate": 1.7463385944518482e-06,
"loss": 8.1786,
"step": 790
},
{
"epoch": 0.9583133684714902,
"grad_norm": 143.92201232910156,
"learning_rate": 1.7400187478952888e-06,
"loss": 30.3769,
"step": 800
},
{
"epoch": 0.9583133684714902,
"eval_loss": 5.779825687408447,
"eval_runtime": 239.5089,
"eval_samples_per_second": 6.221,
"eval_steps_per_second": 3.111,
"step": 800
},
{
"epoch": 0.9702922855773838,
"grad_norm": 787.3501586914062,
"learning_rate": 1.733632878415515e-06,
"loss": 18.6365,
"step": 810
},
{
"epoch": 0.9822712026832774,
"grad_norm": 1132.944580078125,
"learning_rate": 1.7271815557464313e-06,
"loss": 14.7469,
"step": 820
},
{
"epoch": 0.9942501197891711,
"grad_norm": 1053.750244140625,
"learning_rate": 1.7206653554615384e-06,
"loss": 25.2116,
"step": 830
},
{
"epoch": 1.0071873502635362,
"grad_norm": 1744.1956787109375,
"learning_rate": 1.7140848589225819e-06,
"loss": 12.9715,
"step": 840
},
{
"epoch": 1.0191662673694297,
"grad_norm": 0.658488392829895,
"learning_rate": 1.7074406532276843e-06,
"loss": 15.2876,
"step": 850
},
{
"epoch": 1.0311451844753234,
"grad_norm": 935.2374877929688,
"learning_rate": 1.700733331158966e-06,
"loss": 15.3804,
"step": 860
},
{
"epoch": 1.0431241015812172,
"grad_norm": 1073.8214111328125,
"learning_rate": 1.693963491129658e-06,
"loss": 9.6525,
"step": 870
},
{
"epoch": 1.0551030186871106,
"grad_norm": 1496.8275146484375,
"learning_rate": 1.6871317371307128e-06,
"loss": 11.7977,
"step": 880
},
{
"epoch": 1.0670819357930044,
"grad_norm": 1174.0115966796875,
"learning_rate": 1.6802386786769173e-06,
"loss": 11.7103,
"step": 890
},
{
"epoch": 1.0790608528988979,
"grad_norm": 0.169984370470047,
"learning_rate": 1.6732849307525148e-06,
"loss": 17.5266,
"step": 900
},
{
"epoch": 1.0910397700047916,
"grad_norm": 315.50323486328125,
"learning_rate": 1.6662711137563345e-06,
"loss": 13.7159,
"step": 910
},
{
"epoch": 1.1030186871106853,
"grad_norm": 1578.7376708984375,
"learning_rate": 1.6591978534464428e-06,
"loss": 24.5682,
"step": 920
},
{
"epoch": 1.1149976042165788,
"grad_norm": 741.6873779296875,
"learning_rate": 1.652065780884315e-06,
"loss": 21.1884,
"step": 930
},
{
"epoch": 1.1269765213224725,
"grad_norm": 0.4323195517063141,
"learning_rate": 1.6448755323785313e-06,
"loss": 4.4746,
"step": 940
},
{
"epoch": 1.138955438428366,
"grad_norm": 0.26412129402160645,
"learning_rate": 1.6376277494280085e-06,
"loss": 10.954,
"step": 950
},
{
"epoch": 1.1509343555342597,
"grad_norm": 0.3975050449371338,
"learning_rate": 1.6303230786647652e-06,
"loss": 7.8232,
"step": 960
},
{
"epoch": 1.1629132726401532,
"grad_norm": 1786.0848388671875,
"learning_rate": 1.622962171796233e-06,
"loss": 15.7735,
"step": 970
},
{
"epoch": 1.174892189746047,
"grad_norm": 0.31270813941955566,
"learning_rate": 1.6155456855471099e-06,
"loss": 12.1594,
"step": 980
},
{
"epoch": 1.1868711068519406,
"grad_norm": 369.54388427734375,
"learning_rate": 1.6080742816007699e-06,
"loss": 14.8824,
"step": 990
},
{
"epoch": 1.1988500239578341,
"grad_norm": 1238.604736328125,
"learning_rate": 1.6005486265402297e-06,
"loss": 28.347,
"step": 1000
},
{
"epoch": 1.1988500239578341,
"eval_loss": 5.866006374359131,
"eval_runtime": 240.6875,
"eval_samples_per_second": 6.191,
"eval_steps_per_second": 3.095,
"step": 1000
},
{
"epoch": 1.2108289410637278,
"grad_norm": 767.6785888671875,
"learning_rate": 1.5929693917886763e-06,
"loss": 10.1278,
"step": 1010
},
{
"epoch": 1.2228078581696216,
"grad_norm": 206.80807495117188,
"learning_rate": 1.585337253549564e-06,
"loss": 15.3151,
"step": 1020
},
{
"epoch": 1.234786775275515,
"grad_norm": 1851.927490234375,
"learning_rate": 1.577652892746287e-06,
"loss": 27.8078,
"step": 1030
},
{
"epoch": 1.2467656923814088,
"grad_norm": 196.03907775878906,
"learning_rate": 1.5699169949614256e-06,
"loss": 16.0873,
"step": 1040
},
{
"epoch": 1.2587446094873023,
"grad_norm": 1514.0220947265625,
"learning_rate": 1.5621302503755832e-06,
"loss": 18.1552,
"step": 1050
},
{
"epoch": 1.270723526593196,
"grad_norm": 0.2465379238128662,
"learning_rate": 1.5542933537058072e-06,
"loss": 15.3595,
"step": 1060
},
{
"epoch": 1.2827024436990895,
"grad_norm": 1181.0020751953125,
"learning_rate": 1.5464070041436097e-06,
"loss": 9.0312,
"step": 1070
},
{
"epoch": 1.2946813608049832,
"grad_norm": 234.97207641601562,
"learning_rate": 1.538471905292585e-06,
"loss": 10.9891,
"step": 1080
},
{
"epoch": 1.306660277910877,
"grad_norm": NaN,
"learning_rate": 1.5304887651056396e-06,
"loss": 17.0111,
"step": 1090
},
{
"epoch": 1.3186391950167704,
"grad_norm": 189.0157928466797,
"learning_rate": 1.5232634521732122e-06,
"loss": 9.3152,
"step": 1100
},
{
"epoch": 1.3306181121226641,
"grad_norm": 0.5336930751800537,
"learning_rate": 1.515190999157904e-06,
"loss": 20.7567,
"step": 1110
},
{
"epoch": 1.3425970292285578,
"grad_norm": 2664.725830078125,
"learning_rate": 1.5070725818803735e-06,
"loss": 11.0421,
"step": 1120
},
{
"epoch": 1.3545759463344513,
"grad_norm": 0.2817501425743103,
"learning_rate": 1.498908924648811e-06,
"loss": 12.3921,
"step": 1130
},
{
"epoch": 1.366554863440345,
"grad_norm": 166.9059295654297,
"learning_rate": 1.4907007558076216e-06,
"loss": 11.9341,
"step": 1140
},
{
"epoch": 1.3785337805462385,
"grad_norm": 275.1123352050781,
"learning_rate": 1.4824488076724433e-06,
"loss": 13.0555,
"step": 1150
},
{
"epoch": 1.3905126976521323,
"grad_norm": 0.3370044231414795,
"learning_rate": 1.4741538164648097e-06,
"loss": 25.3636,
"step": 1160
},
{
"epoch": 1.4024916147580258,
"grad_norm": 190.51695251464844,
"learning_rate": 1.4658165222464686e-06,
"loss": 13.6367,
"step": 1170
},
{
"epoch": 1.4144705318639195,
"grad_norm": 166.0221710205078,
"learning_rate": 1.4574376688533532e-06,
"loss": 9.6226,
"step": 1180
},
{
"epoch": 1.4264494489698132,
"grad_norm": 1424.2623291015625,
"learning_rate": 1.4490180038292192e-06,
"loss": 13.0517,
"step": 1190
},
{
"epoch": 1.4384283660757067,
"grad_norm": 528.625244140625,
"learning_rate": 1.4405582783589513e-06,
"loss": 24.365,
"step": 1200
},
{
"epoch": 1.4384283660757067,
"eval_loss": 5.900476455688477,
"eval_runtime": 238.5443,
"eval_samples_per_second": 6.246,
"eval_steps_per_second": 3.123,
"step": 1200
}
],
"logging_steps": 10,
"max_steps": 3336,
"num_input_tokens_seen": 0,
"num_train_epochs": 4,
"save_steps": 300,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 2.8713596241523507e+17,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}