{ "best_metric": null, "best_model_checkpoint": null, "epoch": 0.09924910218739798, "eval_steps": 76, "global_step": 152, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.000652954619653934, "grad_norm": 4.757781028747559, "learning_rate": 4.000000000000001e-06, "loss": 4.2047, "step": 1 }, { "epoch": 0.000652954619653934, "eval_loss": NaN, "eval_runtime": 704.7675, "eval_samples_per_second": 3.661, "eval_steps_per_second": 0.915, "step": 1 }, { "epoch": 0.001305909239307868, "grad_norm": 4.874630928039551, "learning_rate": 8.000000000000001e-06, "loss": 4.1653, "step": 2 }, { "epoch": 0.0019588638589618022, "grad_norm": 5.5883331298828125, "learning_rate": 1.2e-05, "loss": 4.1635, "step": 3 }, { "epoch": 0.002611818478615736, "grad_norm": 5.3879313468933105, "learning_rate": 1.6000000000000003e-05, "loss": 3.8569, "step": 4 }, { "epoch": 0.00326477309826967, "grad_norm": 4.848824501037598, "learning_rate": 2e-05, "loss": 4.0147, "step": 5 }, { "epoch": 0.0039177277179236044, "grad_norm": 5.3052978515625, "learning_rate": 2.4e-05, "loss": 3.7956, "step": 6 }, { "epoch": 0.004570682337577538, "grad_norm": 6.782866954803467, "learning_rate": 2.8000000000000003e-05, "loss": 4.6771, "step": 7 }, { "epoch": 0.005223636957231472, "grad_norm": 6.1247735023498535, "learning_rate": 3.2000000000000005e-05, "loss": 3.8201, "step": 8 }, { "epoch": 0.005876591576885406, "grad_norm": 5.305412769317627, "learning_rate": 3.6e-05, "loss": 3.2011, "step": 9 }, { "epoch": 0.00652954619653934, "grad_norm": 4.974228858947754, "learning_rate": 4e-05, "loss": 3.2488, "step": 10 }, { "epoch": 0.007182500816193275, "grad_norm": 4.17997407913208, "learning_rate": 4.4000000000000006e-05, "loss": 2.8667, "step": 11 }, { "epoch": 0.007835455435847209, "grad_norm": 5.494494915008545, "learning_rate": 4.8e-05, "loss": 3.0199, "step": 12 }, { "epoch": 0.008488410055501142, "grad_norm": 3.663966178894043, "learning_rate": 5.2000000000000004e-05, "loss": 2.5652, "step": 13 }, { "epoch": 0.009141364675155077, "grad_norm": 3.196805477142334, "learning_rate": 5.6000000000000006e-05, "loss": 2.6467, "step": 14 }, { "epoch": 0.009794319294809012, "grad_norm": 3.161735773086548, "learning_rate": 6e-05, "loss": 2.4477, "step": 15 }, { "epoch": 0.010447273914462945, "grad_norm": 4.473456859588623, "learning_rate": 6.400000000000001e-05, "loss": 2.4943, "step": 16 }, { "epoch": 0.01110022853411688, "grad_norm": 5.138904094696045, "learning_rate": 6.800000000000001e-05, "loss": 2.51, "step": 17 }, { "epoch": 0.011753183153770812, "grad_norm": 5.232083320617676, "learning_rate": 7.2e-05, "loss": 2.3384, "step": 18 }, { "epoch": 0.012406137773424747, "grad_norm": 5.67042875289917, "learning_rate": 7.6e-05, "loss": 2.3158, "step": 19 }, { "epoch": 0.01305909239307868, "grad_norm": 6.414549350738525, "learning_rate": 8e-05, "loss": 2.2479, "step": 20 }, { "epoch": 0.013712047012732615, "grad_norm": 6.064276218414307, "learning_rate": 8.4e-05, "loss": 2.689, "step": 21 }, { "epoch": 0.01436500163238655, "grad_norm": 4.989445686340332, "learning_rate": 8.800000000000001e-05, "loss": 1.9927, "step": 22 }, { "epoch": 0.015017956252040483, "grad_norm": 6.970681667327881, "learning_rate": 9.200000000000001e-05, "loss": 2.0652, "step": 23 }, { "epoch": 0.015670910871694418, "grad_norm": 5.712936878204346, "learning_rate": 9.6e-05, "loss": 1.7749, "step": 24 }, { "epoch": 0.01632386549134835, "grad_norm": 6.509803771972656, "learning_rate": 0.0001, "loss": 1.6381, "step": 25 }, { "epoch": 0.016976820111002284, "grad_norm": 9.46315860748291, "learning_rate": 0.00010400000000000001, "loss": 2.9097, "step": 26 }, { "epoch": 0.01762977473065622, "grad_norm": 7.60649299621582, "learning_rate": 0.00010800000000000001, "loss": 2.3185, "step": 27 }, { "epoch": 0.018282729350310153, "grad_norm": 4.304985046386719, "learning_rate": 0.00011200000000000001, "loss": 1.8866, "step": 28 }, { "epoch": 0.018935683969964087, "grad_norm": 4.42153787612915, "learning_rate": 0.000116, "loss": 1.9317, "step": 29 }, { "epoch": 0.019588638589618023, "grad_norm": 4.993203163146973, "learning_rate": 0.00012, "loss": 1.963, "step": 30 }, { "epoch": 0.020241593209271956, "grad_norm": 5.630634784698486, "learning_rate": 0.000124, "loss": 2.0134, "step": 31 }, { "epoch": 0.02089454782892589, "grad_norm": 4.011257648468018, "learning_rate": 0.00012800000000000002, "loss": 1.5838, "step": 32 }, { "epoch": 0.021547502448579822, "grad_norm": 4.430102348327637, "learning_rate": 0.000132, "loss": 1.567, "step": 33 }, { "epoch": 0.02220045706823376, "grad_norm": 4.285562038421631, "learning_rate": 0.00013600000000000003, "loss": 1.5668, "step": 34 }, { "epoch": 0.022853411687887692, "grad_norm": 4.76671028137207, "learning_rate": 0.00014, "loss": 1.4504, "step": 35 }, { "epoch": 0.023506366307541625, "grad_norm": 3.6335926055908203, "learning_rate": 0.000144, "loss": 1.7653, "step": 36 }, { "epoch": 0.02415932092719556, "grad_norm": 3.863640546798706, "learning_rate": 0.000148, "loss": 1.7619, "step": 37 }, { "epoch": 0.024812275546849494, "grad_norm": 3.339837074279785, "learning_rate": 0.000152, "loss": 1.9767, "step": 38 }, { "epoch": 0.025465230166503428, "grad_norm": 3.004544734954834, "learning_rate": 0.00015600000000000002, "loss": 1.954, "step": 39 }, { "epoch": 0.02611818478615736, "grad_norm": 3.1160991191864014, "learning_rate": 0.00016, "loss": 2.2944, "step": 40 }, { "epoch": 0.026771139405811297, "grad_norm": 2.9257686138153076, "learning_rate": 0.000164, "loss": 2.1756, "step": 41 }, { "epoch": 0.02742409402546523, "grad_norm": 3.0116686820983887, "learning_rate": 0.000168, "loss": 2.2031, "step": 42 }, { "epoch": 0.028077048645119163, "grad_norm": 3.1789779663085938, "learning_rate": 0.000172, "loss": 1.71, "step": 43 }, { "epoch": 0.0287300032647731, "grad_norm": 3.2652573585510254, "learning_rate": 0.00017600000000000002, "loss": 2.108, "step": 44 }, { "epoch": 0.029382957884427033, "grad_norm": 3.3591766357421875, "learning_rate": 0.00018, "loss": 2.252, "step": 45 }, { "epoch": 0.030035912504080966, "grad_norm": 4.020256519317627, "learning_rate": 0.00018400000000000003, "loss": 2.0209, "step": 46 }, { "epoch": 0.0306888671237349, "grad_norm": 3.1844100952148438, "learning_rate": 0.000188, "loss": 1.7205, "step": 47 }, { "epoch": 0.031341821743388835, "grad_norm": 4.041150093078613, "learning_rate": 0.000192, "loss": 2.0322, "step": 48 }, { "epoch": 0.031994776363042765, "grad_norm": 3.976959228515625, "learning_rate": 0.000196, "loss": 1.5729, "step": 49 }, { "epoch": 0.0326477309826967, "grad_norm": 3.8643813133239746, "learning_rate": 0.0002, "loss": 1.3921, "step": 50 }, { "epoch": 0.03330068560235064, "grad_norm": 4.523630142211914, "learning_rate": 0.0001999922905547776, "loss": 2.0141, "step": 51 }, { "epoch": 0.03395364022200457, "grad_norm": 3.308136463165283, "learning_rate": 0.0001999691634078213, "loss": 1.6375, "step": 52 }, { "epoch": 0.034606594841658504, "grad_norm": 3.70528244972229, "learning_rate": 0.00019993062212508053, "loss": 2.1218, "step": 53 }, { "epoch": 0.03525954946131244, "grad_norm": 3.909076690673828, "learning_rate": 0.0001998766726491935, "loss": 1.7935, "step": 54 }, { "epoch": 0.03591250408096637, "grad_norm": 3.2719497680664062, "learning_rate": 0.00019980732329857076, "loss": 1.4998, "step": 55 }, { "epoch": 0.03656545870062031, "grad_norm": 4.357359886169434, "learning_rate": 0.0001997225847661127, "loss": 1.7119, "step": 56 }, { "epoch": 0.03721841332027424, "grad_norm": 3.111414909362793, "learning_rate": 0.00019962247011756081, "loss": 1.3332, "step": 57 }, { "epoch": 0.03787136793992817, "grad_norm": 3.3698601722717285, "learning_rate": 0.00019950699478948309, "loss": 1.3576, "step": 58 }, { "epoch": 0.03852432255958211, "grad_norm": 4.0696024894714355, "learning_rate": 0.00019937617658689384, "loss": 1.2196, "step": 59 }, { "epoch": 0.039177277179236046, "grad_norm": 3.984119176864624, "learning_rate": 0.00019923003568050844, "loss": 1.4652, "step": 60 }, { "epoch": 0.039830231798889976, "grad_norm": 3.909024238586426, "learning_rate": 0.00019906859460363307, "loss": 1.2951, "step": 61 }, { "epoch": 0.04048318641854391, "grad_norm": 3.550440788269043, "learning_rate": 0.0001988918782486906, "loss": 1.4699, "step": 62 }, { "epoch": 0.04113614103819784, "grad_norm": 3.341071367263794, "learning_rate": 0.0001986999138633821, "loss": 2.1781, "step": 63 }, { "epoch": 0.04178909565785178, "grad_norm": 2.938025951385498, "learning_rate": 0.00019849273104648592, "loss": 1.7088, "step": 64 }, { "epoch": 0.042442050277505715, "grad_norm": 2.956080913543701, "learning_rate": 0.00019827036174329353, "loss": 2.0676, "step": 65 }, { "epoch": 0.043095004897159644, "grad_norm": 2.349592924118042, "learning_rate": 0.00019803284024068427, "loss": 1.7783, "step": 66 }, { "epoch": 0.04374795951681358, "grad_norm": 2.7854583263397217, "learning_rate": 0.0001977802031618383, "loss": 2.222, "step": 67 }, { "epoch": 0.04440091413646752, "grad_norm": 3.6111695766448975, "learning_rate": 0.00019751248946059014, "loss": 2.0701, "step": 68 }, { "epoch": 0.04505386875612145, "grad_norm": 3.226724863052368, "learning_rate": 0.00019722974041542203, "loss": 2.2502, "step": 69 }, { "epoch": 0.045706823375775384, "grad_norm": 3.4755945205688477, "learning_rate": 0.0001969319996230995, "loss": 2.0837, "step": 70 }, { "epoch": 0.04635977799542932, "grad_norm": 3.1673951148986816, "learning_rate": 0.0001966193129919491, "loss": 1.8692, "step": 71 }, { "epoch": 0.04701273261508325, "grad_norm": 3.5966238975524902, "learning_rate": 0.00019629172873477995, "loss": 2.0036, "step": 72 }, { "epoch": 0.047665687234737186, "grad_norm": 4.344339370727539, "learning_rate": 0.00019594929736144976, "loss": 1.963, "step": 73 }, { "epoch": 0.04831864185439112, "grad_norm": 3.6855549812316895, "learning_rate": 0.00019559207167107684, "loss": 1.5932, "step": 74 }, { "epoch": 0.04897159647404505, "grad_norm": 3.7931909561157227, "learning_rate": 0.000195220106743899, "loss": 1.272, "step": 75 }, { "epoch": 0.04962455109369899, "grad_norm": 3.422001361846924, "learning_rate": 0.00019483345993278093, "loss": 1.8281, "step": 76 }, { "epoch": 0.04962455109369899, "eval_loss": NaN, "eval_runtime": 704.1079, "eval_samples_per_second": 3.664, "eval_steps_per_second": 0.916, "step": 76 }, { "epoch": 0.05027750571335292, "grad_norm": 3.9176759719848633, "learning_rate": 0.0001944321908543708, "loss": 1.9556, "step": 77 }, { "epoch": 0.050930460333006855, "grad_norm": 3.578507423400879, "learning_rate": 0.00019401636137990816, "loss": 2.025, "step": 78 }, { "epoch": 0.05158341495266079, "grad_norm": 3.641873598098755, "learning_rate": 0.00019358603562568416, "loss": 1.667, "step": 79 }, { "epoch": 0.05223636957231472, "grad_norm": 4.218084335327148, "learning_rate": 0.0001931412799431554, "loss": 1.797, "step": 80 }, { "epoch": 0.05288932419196866, "grad_norm": 3.496741533279419, "learning_rate": 0.0001926821629087133, "loss": 1.5076, "step": 81 }, { "epoch": 0.053542278811622594, "grad_norm": 5.606319427490234, "learning_rate": 0.00019220875531311045, "loss": 1.5796, "step": 82 }, { "epoch": 0.054195233431276524, "grad_norm": 3.143007516860962, "learning_rate": 0.00019172113015054532, "loss": 1.4222, "step": 83 }, { "epoch": 0.05484818805093046, "grad_norm": 3.359457015991211, "learning_rate": 0.00019121936260740752, "loss": 0.9389, "step": 84 }, { "epoch": 0.0555011426705844, "grad_norm": 3.911376476287842, "learning_rate": 0.00019070353005068484, "loss": 1.8963, "step": 85 }, { "epoch": 0.056154097290238326, "grad_norm": 3.542954444885254, "learning_rate": 0.00019017371201603407, "loss": 1.4677, "step": 86 }, { "epoch": 0.05680705190989226, "grad_norm": 3.1694722175598145, "learning_rate": 0.00018962999019551754, "loss": 1.4803, "step": 87 }, { "epoch": 0.0574600065295462, "grad_norm": 2.960282564163208, "learning_rate": 0.00018907244842500704, "loss": 1.8923, "step": 88 }, { "epoch": 0.05811296114920013, "grad_norm": 2.6101479530334473, "learning_rate": 0.00018850117267125738, "loss": 1.9243, "step": 89 }, { "epoch": 0.058765915768854066, "grad_norm": 3.0619993209838867, "learning_rate": 0.00018791625101865117, "loss": 2.1384, "step": 90 }, { "epoch": 0.059418870388507995, "grad_norm": 2.6776371002197266, "learning_rate": 0.0001873177736556172, "loss": 1.7285, "step": 91 }, { "epoch": 0.06007182500816193, "grad_norm": 3.687798023223877, "learning_rate": 0.00018670583286072443, "loss": 1.8332, "step": 92 }, { "epoch": 0.06072477962781587, "grad_norm": 2.632847547531128, "learning_rate": 0.0001860805229884536, "loss": 1.8342, "step": 93 }, { "epoch": 0.0613777342474698, "grad_norm": 3.5173568725585938, "learning_rate": 0.00018544194045464886, "loss": 2.004, "step": 94 }, { "epoch": 0.062030688867123734, "grad_norm": 3.2944045066833496, "learning_rate": 0.0001847901837216515, "loss": 1.8861, "step": 95 }, { "epoch": 0.06268364348677767, "grad_norm": 3.5300235748291016, "learning_rate": 0.00018412535328311814, "loss": 1.7608, "step": 96 }, { "epoch": 0.06333659810643161, "grad_norm": 3.253826856613159, "learning_rate": 0.0001834475516485257, "loss": 1.9151, "step": 97 }, { "epoch": 0.06398955272608553, "grad_norm": 3.243023633956909, "learning_rate": 0.00018275688332736577, "loss": 1.5671, "step": 98 }, { "epoch": 0.06464250734573947, "grad_norm": 3.3818089962005615, "learning_rate": 0.00018205345481302998, "loss": 1.4077, "step": 99 }, { "epoch": 0.0652954619653934, "grad_norm": 3.632511615753174, "learning_rate": 0.00018133737456639044, "loss": 1.0454, "step": 100 }, { "epoch": 0.06594841658504734, "grad_norm": 3.6621978282928467, "learning_rate": 0.0001806087529990758, "loss": 2.0844, "step": 101 }, { "epoch": 0.06660137120470128, "grad_norm": 5.99480676651001, "learning_rate": 0.0001798677024564473, "loss": 1.8015, "step": 102 }, { "epoch": 0.06725432582435521, "grad_norm": 3.063887357711792, "learning_rate": 0.00017911433720027624, "loss": 1.7182, "step": 103 }, { "epoch": 0.06790728044400914, "grad_norm": 3.2303333282470703, "learning_rate": 0.00017834877339112612, "loss": 1.6701, "step": 104 }, { "epoch": 0.06856023506366307, "grad_norm": 7.370791435241699, "learning_rate": 0.000177571129070442, "loss": 1.6819, "step": 105 }, { "epoch": 0.06921318968331701, "grad_norm": 3.4059948921203613, "learning_rate": 0.00017678152414234968, "loss": 1.3683, "step": 106 }, { "epoch": 0.06986614430297095, "grad_norm": 4.130568504333496, "learning_rate": 0.000175980080355168, "loss": 1.6074, "step": 107 }, { "epoch": 0.07051909892262488, "grad_norm": 4.288647651672363, "learning_rate": 0.00017516692128263648, "loss": 1.2521, "step": 108 }, { "epoch": 0.07117205354227882, "grad_norm": 3.245211362838745, "learning_rate": 0.00017434217230486164, "loss": 1.3333, "step": 109 }, { "epoch": 0.07182500816193274, "grad_norm": 3.7068018913269043, "learning_rate": 0.00017350596058898483, "loss": 1.287, "step": 110 }, { "epoch": 0.07247796278158668, "grad_norm": 3.418928623199463, "learning_rate": 0.0001726584150695744, "loss": 1.3896, "step": 111 }, { "epoch": 0.07313091740124061, "grad_norm": 3.3947291374206543, "learning_rate": 0.00017179966642874543, "loss": 1.7948, "step": 112 }, { "epoch": 0.07378387202089455, "grad_norm": 2.770167589187622, "learning_rate": 0.0001709298470760101, "loss": 1.8008, "step": 113 }, { "epoch": 0.07443682664054849, "grad_norm": 2.841723918914795, "learning_rate": 0.00017004909112786144, "loss": 1.7773, "step": 114 }, { "epoch": 0.07508978126020241, "grad_norm": 3.010446071624756, "learning_rate": 0.00016915753438709417, "loss": 1.9485, "step": 115 }, { "epoch": 0.07574273587985635, "grad_norm": 2.804893970489502, "learning_rate": 0.00016825531432186543, "loss": 1.8283, "step": 116 }, { "epoch": 0.07639569049951028, "grad_norm": 2.607825517654419, "learning_rate": 0.00016734257004449862, "loss": 1.5881, "step": 117 }, { "epoch": 0.07704864511916422, "grad_norm": 3.1926357746124268, "learning_rate": 0.00016641944229003395, "loss": 1.9909, "step": 118 }, { "epoch": 0.07770159973881816, "grad_norm": 2.6762239933013916, "learning_rate": 0.00016548607339452853, "loss": 1.7493, "step": 119 }, { "epoch": 0.07835455435847209, "grad_norm": 2.9282002449035645, "learning_rate": 0.00016454260727310978, "loss": 1.6987, "step": 120 }, { "epoch": 0.07900750897812601, "grad_norm": 3.6605136394500732, "learning_rate": 0.00016358918939778536, "loss": 2.1916, "step": 121 }, { "epoch": 0.07966046359777995, "grad_norm": 3.031012535095215, "learning_rate": 0.00016262596677501297, "loss": 1.9056, "step": 122 }, { "epoch": 0.08031341821743389, "grad_norm": 3.2578601837158203, "learning_rate": 0.0001616530879230335, "loss": 1.5707, "step": 123 }, { "epoch": 0.08096637283708782, "grad_norm": 3.2448766231536865, "learning_rate": 0.00016067070284897137, "loss": 1.43, "step": 124 }, { "epoch": 0.08161932745674176, "grad_norm": 3.2631771564483643, "learning_rate": 0.00015967896302570485, "loss": 1.0851, "step": 125 }, { "epoch": 0.08227228207639568, "grad_norm": 3.316664457321167, "learning_rate": 0.0001586780213685108, "loss": 1.9519, "step": 126 }, { "epoch": 0.08292523669604962, "grad_norm": 2.7955403327941895, "learning_rate": 0.00015766803221148673, "loss": 1.6003, "step": 127 }, { "epoch": 0.08357819131570356, "grad_norm": 3.2884178161621094, "learning_rate": 0.0001566491512837543, "loss": 1.7108, "step": 128 }, { "epoch": 0.0842311459353575, "grad_norm": 3.472278356552124, "learning_rate": 0.00015562153568544752, "loss": 1.8399, "step": 129 }, { "epoch": 0.08488410055501143, "grad_norm": 2.900644302368164, "learning_rate": 0.00015458534386348966, "loss": 1.6259, "step": 130 }, { "epoch": 0.08553705517466537, "grad_norm": 3.018883228302002, "learning_rate": 0.0001535407355871626, "loss": 1.5246, "step": 131 }, { "epoch": 0.08619000979431929, "grad_norm": 3.375364303588867, "learning_rate": 0.00015248787192347196, "loss": 1.6209, "step": 132 }, { "epoch": 0.08684296441397323, "grad_norm": 2.9012420177459717, "learning_rate": 0.00015142691521231267, "loss": 1.2602, "step": 133 }, { "epoch": 0.08749591903362716, "grad_norm": 4.421230316162109, "learning_rate": 0.00015035802904143762, "loss": 1.5445, "step": 134 }, { "epoch": 0.0881488736532811, "grad_norm": 3.3842790126800537, "learning_rate": 0.00014928137822123452, "loss": 1.2163, "step": 135 }, { "epoch": 0.08880182827293504, "grad_norm": 3.1379024982452393, "learning_rate": 0.0001481971287593138, "loss": 1.6229, "step": 136 }, { "epoch": 0.08945478289258897, "grad_norm": 3.0401148796081543, "learning_rate": 0.00014710544783491208, "loss": 2.1325, "step": 137 }, { "epoch": 0.0901077375122429, "grad_norm": 3.1100378036499023, "learning_rate": 0.00014600650377311522, "loss": 1.8436, "step": 138 }, { "epoch": 0.09076069213189683, "grad_norm": 2.646742105484009, "learning_rate": 0.00014490046601890405, "loss": 2.0225, "step": 139 }, { "epoch": 0.09141364675155077, "grad_norm": 2.7213666439056396, "learning_rate": 0.00014378750511102826, "loss": 1.9278, "step": 140 }, { "epoch": 0.0920666013712047, "grad_norm": 2.6186165809631348, "learning_rate": 0.00014266779265571087, "loss": 2.1003, "step": 141 }, { "epoch": 0.09271955599085864, "grad_norm": 2.469266653060913, "learning_rate": 0.00014154150130018866, "loss": 2.1259, "step": 142 }, { "epoch": 0.09337251061051256, "grad_norm": 2.95566725730896, "learning_rate": 0.00014040880470609187, "loss": 2.0982, "step": 143 }, { "epoch": 0.0940254652301665, "grad_norm": 3.408046007156372, "learning_rate": 0.00013926987752266735, "loss": 2.1553, "step": 144 }, { "epoch": 0.09467841984982044, "grad_norm": 2.5797595977783203, "learning_rate": 0.00013812489535984981, "loss": 1.9652, "step": 145 }, { "epoch": 0.09533137446947437, "grad_norm": 3.9302403926849365, "learning_rate": 0.00013697403476118454, "loss": 1.916, "step": 146 }, { "epoch": 0.09598432908912831, "grad_norm": 3.0103251934051514, "learning_rate": 0.0001358174731766064, "loss": 1.5778, "step": 147 }, { "epoch": 0.09663728370878225, "grad_norm": 3.0252418518066406, "learning_rate": 0.00013465538893507907, "loss": 1.862, "step": 148 }, { "epoch": 0.09729023832843617, "grad_norm": 3.1504366397857666, "learning_rate": 0.00013348796121709862, "loss": 1.7159, "step": 149 }, { "epoch": 0.0979431929480901, "grad_norm": 2.718940019607544, "learning_rate": 0.00013231537002706594, "loss": 1.1477, "step": 150 }, { "epoch": 0.09859614756774404, "grad_norm": 3.6509456634521484, "learning_rate": 0.0001311377961655319, "loss": 1.6706, "step": 151 }, { "epoch": 0.09924910218739798, "grad_norm": 3.2815134525299072, "learning_rate": 0.00012995542120132017, "loss": 1.4804, "step": 152 }, { "epoch": 0.09924910218739798, "eval_loss": NaN, "eval_runtime": 699.3675, "eval_samples_per_second": 3.689, "eval_steps_per_second": 0.922, "step": 152 } ], "logging_steps": 1, "max_steps": 303, "num_input_tokens_seen": 0, "num_train_epochs": 1, "save_steps": 76, "stateful_callbacks": { "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": false }, "attributes": {} } }, "total_flos": 4.662717514168402e+17, "train_batch_size": 2, "trial_name": null, "trial_params": null }