|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.7692307692307693, |
|
"eval_steps": 500, |
|
"global_step": 140, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.005494505494505495, |
|
"grad_norm": 0.29632216691970825, |
|
"learning_rate": 4e-05, |
|
"loss": 1.5561, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.01098901098901099, |
|
"grad_norm": 0.3113888204097748, |
|
"learning_rate": 8e-05, |
|
"loss": 1.5458, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.016483516483516484, |
|
"grad_norm": 0.3019237518310547, |
|
"learning_rate": 0.00012, |
|
"loss": 1.5507, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.02197802197802198, |
|
"grad_norm": 0.29094433784484863, |
|
"learning_rate": 0.00016, |
|
"loss": 1.4652, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.027472527472527472, |
|
"grad_norm": 0.27828261256217957, |
|
"learning_rate": 0.0002, |
|
"loss": 1.4454, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.03296703296703297, |
|
"grad_norm": 0.3085501492023468, |
|
"learning_rate": 0.0001999842488663838, |
|
"loss": 1.1417, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.038461538461538464, |
|
"grad_norm": 0.30620744824409485, |
|
"learning_rate": 0.00019993700042749937, |
|
"loss": 1.225, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.04395604395604396, |
|
"grad_norm": 0.41407617926597595, |
|
"learning_rate": 0.0001998582695676762, |
|
"loss": 0.9375, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.04945054945054945, |
|
"grad_norm": 0.3822758197784424, |
|
"learning_rate": 0.00019974808108892016, |
|
"loss": 0.8788, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.054945054945054944, |
|
"grad_norm": 0.40259653329849243, |
|
"learning_rate": 0.00019960646970310027, |
|
"loss": 0.7664, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.06043956043956044, |
|
"grad_norm": 0.5633108019828796, |
|
"learning_rate": 0.00019943348002101371, |
|
"loss": 0.6699, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.06593406593406594, |
|
"grad_norm": 0.37629789113998413, |
|
"learning_rate": 0.00019922916653833248, |
|
"loss": 0.5892, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.07142857142857142, |
|
"grad_norm": 0.31924572587013245, |
|
"learning_rate": 0.0001989935936184358, |
|
"loss": 0.5198, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.07692307692307693, |
|
"grad_norm": 0.38061216473579407, |
|
"learning_rate": 0.00019872683547213446, |
|
"loss": 0.5894, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.08241758241758242, |
|
"grad_norm": 0.2537476122379303, |
|
"learning_rate": 0.00019842897613429262, |
|
"loss": 0.5341, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.08791208791208792, |
|
"grad_norm": 0.24057163298130035, |
|
"learning_rate": 0.00019810010943735479, |
|
"loss": 0.3744, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.09340659340659341, |
|
"grad_norm": 0.2103324830532074, |
|
"learning_rate": 0.00019774033898178667, |
|
"loss": 0.5054, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.0989010989010989, |
|
"grad_norm": 0.18747441470623016, |
|
"learning_rate": 0.00019734977810343865, |
|
"loss": 0.3734, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.1043956043956044, |
|
"grad_norm": 0.19926677644252777, |
|
"learning_rate": 0.00019692854983784235, |
|
"loss": 0.399, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.10989010989010989, |
|
"grad_norm": 0.20050929486751556, |
|
"learning_rate": 0.0001964767868814516, |
|
"loss": 0.4754, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.11538461538461539, |
|
"grad_norm": 0.18073034286499023, |
|
"learning_rate": 0.0001959946315498402, |
|
"loss": 0.4375, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.12087912087912088, |
|
"grad_norm": 0.20278413593769073, |
|
"learning_rate": 0.0001954822357328692, |
|
"loss": 0.4896, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.12637362637362637, |
|
"grad_norm": 0.16931243240833282, |
|
"learning_rate": 0.00019493976084683813, |
|
"loss": 0.4137, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.13186813186813187, |
|
"grad_norm": 0.18224675953388214, |
|
"learning_rate": 0.00019436737778363527, |
|
"loss": 0.3895, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.13736263736263737, |
|
"grad_norm": 0.16282841563224792, |
|
"learning_rate": 0.0001937652668569028, |
|
"loss": 0.364, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.14285714285714285, |
|
"grad_norm": 0.19350826740264893, |
|
"learning_rate": 0.00019313361774523385, |
|
"loss": 0.3578, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.14835164835164835, |
|
"grad_norm": 0.20993147790431976, |
|
"learning_rate": 0.0001924726294324196, |
|
"loss": 0.4351, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.15384615384615385, |
|
"grad_norm": 0.20188163220882416, |
|
"learning_rate": 0.00019178251014476466, |
|
"loss": 0.3965, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.15934065934065933, |
|
"grad_norm": 0.2008114606142044, |
|
"learning_rate": 0.00019106347728549135, |
|
"loss": 0.3193, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.16483516483516483, |
|
"grad_norm": 0.1973695605993271, |
|
"learning_rate": 0.00019031575736625238, |
|
"loss": 0.298, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.17032967032967034, |
|
"grad_norm": 0.20849934220314026, |
|
"learning_rate": 0.00018953958593577493, |
|
"loss": 0.2995, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.17582417582417584, |
|
"grad_norm": 0.20263291895389557, |
|
"learning_rate": 0.00018873520750565718, |
|
"loss": 0.3021, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.1813186813186813, |
|
"grad_norm": 0.20541806519031525, |
|
"learning_rate": 0.00018790287547334176, |
|
"loss": 0.2883, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.18681318681318682, |
|
"grad_norm": 0.24142159521579742, |
|
"learning_rate": 0.00018704285204228973, |
|
"loss": 0.3396, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.19230769230769232, |
|
"grad_norm": 0.20562608540058136, |
|
"learning_rate": 0.0001861554081393806, |
|
"loss": 0.2773, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.1978021978021978, |
|
"grad_norm": 0.26048898696899414, |
|
"learning_rate": 0.00018524082332956428, |
|
"loss": 0.3024, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.2032967032967033, |
|
"grad_norm": 0.23103846609592438, |
|
"learning_rate": 0.00018429938572779152, |
|
"loss": 0.2843, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.2087912087912088, |
|
"grad_norm": 0.23378689587116241, |
|
"learning_rate": 0.0001833313919082515, |
|
"loss": 0.2963, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.21428571428571427, |
|
"grad_norm": 0.27318716049194336, |
|
"learning_rate": 0.00018233714681094404, |
|
"loss": 0.3863, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.21978021978021978, |
|
"grad_norm": 0.219524085521698, |
|
"learning_rate": 0.00018131696364561667, |
|
"loss": 0.2703, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.22527472527472528, |
|
"grad_norm": 0.21837233006954193, |
|
"learning_rate": 0.00018027116379309638, |
|
"loss": 0.2695, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.23076923076923078, |
|
"grad_norm": 0.2384708672761917, |
|
"learning_rate": 0.0001792000767040474, |
|
"loss": 0.2819, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.23626373626373626, |
|
"grad_norm": 0.20765846967697144, |
|
"learning_rate": 0.00017810403979518681, |
|
"loss": 0.2658, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.24175824175824176, |
|
"grad_norm": 0.22932475805282593, |
|
"learning_rate": 0.00017698339834299061, |
|
"loss": 0.2276, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.24725274725274726, |
|
"grad_norm": 0.2080792635679245, |
|
"learning_rate": 0.00017583850537492387, |
|
"loss": 0.1902, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.25274725274725274, |
|
"grad_norm": 0.2754678428173065, |
|
"learning_rate": 0.0001746697215582288, |
|
"loss": 0.2797, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.25824175824175827, |
|
"grad_norm": 0.22715018689632416, |
|
"learning_rate": 0.00017347741508630672, |
|
"loss": 0.1703, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.26373626373626374, |
|
"grad_norm": 0.31407058238983154, |
|
"learning_rate": 0.00017226196156272874, |
|
"loss": 0.281, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.2692307692307692, |
|
"grad_norm": 0.2527458071708679, |
|
"learning_rate": 0.00017102374388291183, |
|
"loss": 0.2579, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.27472527472527475, |
|
"grad_norm": 0.2112707644701004, |
|
"learning_rate": 0.0001697631521134985, |
|
"loss": 0.264, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.2802197802197802, |
|
"grad_norm": 0.20465674996376038, |
|
"learning_rate": 0.00016848058336947657, |
|
"loss": 0.2882, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.2857142857142857, |
|
"grad_norm": 0.2256706804037094, |
|
"learning_rate": 0.0001671764416890793, |
|
"loss": 0.1847, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.29120879120879123, |
|
"grad_norm": 0.20961597561836243, |
|
"learning_rate": 0.00016585113790650388, |
|
"loss": 0.196, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.2967032967032967, |
|
"grad_norm": 0.2462984025478363, |
|
"learning_rate": 0.00016450508952248956, |
|
"loss": 0.2032, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.3021978021978022, |
|
"grad_norm": 0.22251084446907043, |
|
"learning_rate": 0.00016313872057279534, |
|
"loss": 0.203, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.3076923076923077, |
|
"grad_norm": 0.23791608214378357, |
|
"learning_rate": 0.0001617524614946192, |
|
"loss": 0.2458, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.3131868131868132, |
|
"grad_norm": 0.23427598178386688, |
|
"learning_rate": 0.0001603467489910004, |
|
"loss": 0.149, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.31868131868131866, |
|
"grad_norm": 0.23513749241828918, |
|
"learning_rate": 0.00015892202589324835, |
|
"loss": 0.1449, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.3241758241758242, |
|
"grad_norm": 0.23785251379013062, |
|
"learning_rate": 0.0001574787410214407, |
|
"loss": 0.1938, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.32967032967032966, |
|
"grad_norm": 0.20233368873596191, |
|
"learning_rate": 0.0001560173490430346, |
|
"loss": 0.1226, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.33516483516483514, |
|
"grad_norm": 0.2437211126089096, |
|
"learning_rate": 0.0001545383103296365, |
|
"loss": 0.1892, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.34065934065934067, |
|
"grad_norm": 0.2462146282196045, |
|
"learning_rate": 0.00015304209081197425, |
|
"loss": 0.1581, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.34615384615384615, |
|
"grad_norm": 0.3353044092655182, |
|
"learning_rate": 0.0001515291618331188, |
|
"loss": 0.2195, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.3516483516483517, |
|
"grad_norm": 0.22197988629341125, |
|
"learning_rate": 0.00015000000000000001, |
|
"loss": 0.1739, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.35714285714285715, |
|
"grad_norm": 0.2737789750099182, |
|
"learning_rate": 0.00014845508703326504, |
|
"loss": 0.1717, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.3626373626373626, |
|
"grad_norm": 0.22385859489440918, |
|
"learning_rate": 0.00014689490961552513, |
|
"loss": 0.1724, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.36813186813186816, |
|
"grad_norm": 0.27109456062316895, |
|
"learning_rate": 0.00014531995923803973, |
|
"loss": 0.196, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.37362637362637363, |
|
"grad_norm": 0.20588435232639313, |
|
"learning_rate": 0.00014373073204588556, |
|
"loss": 0.1351, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.3791208791208791, |
|
"grad_norm": 0.22479820251464844, |
|
"learning_rate": 0.00014212772868165958, |
|
"loss": 0.1083, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.38461538461538464, |
|
"grad_norm": 0.2374928891658783, |
|
"learning_rate": 0.00014051145412776535, |
|
"loss": 0.136, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.3901098901098901, |
|
"grad_norm": 0.25980404019355774, |
|
"learning_rate": 0.00013888241754733208, |
|
"loss": 0.1505, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.3956043956043956, |
|
"grad_norm": 0.3704344928264618, |
|
"learning_rate": 0.0001372411321238166, |
|
"loss": 0.2365, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.4010989010989011, |
|
"grad_norm": 0.2545515298843384, |
|
"learning_rate": 0.00013558811489933908, |
|
"loss": 0.1645, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.4065934065934066, |
|
"grad_norm": 0.2218877524137497, |
|
"learning_rate": 0.00013392388661180303, |
|
"loss": 0.1178, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.41208791208791207, |
|
"grad_norm": 0.19732226431369781, |
|
"learning_rate": 0.0001322489715308509, |
|
"loss": 0.1001, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.4175824175824176, |
|
"grad_norm": 0.28702080249786377, |
|
"learning_rate": 0.00013056389729270738, |
|
"loss": 0.1437, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 0.4230769230769231, |
|
"grad_norm": 0.27200955152511597, |
|
"learning_rate": 0.0001288691947339621, |
|
"loss": 0.1753, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 0.42857142857142855, |
|
"grad_norm": 0.20188194513320923, |
|
"learning_rate": 0.00012716539772434388, |
|
"loss": 0.0998, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 0.4340659340659341, |
|
"grad_norm": 0.2388327270746231, |
|
"learning_rate": 0.00012545304299853977, |
|
"loss": 0.1339, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 0.43956043956043955, |
|
"grad_norm": 0.21373318135738373, |
|
"learning_rate": 0.0001237326699871115, |
|
"loss": 0.1205, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.44505494505494503, |
|
"grad_norm": 0.29054194688796997, |
|
"learning_rate": 0.00012200482064656248, |
|
"loss": 0.1632, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.45054945054945056, |
|
"grad_norm": 0.24942484498023987, |
|
"learning_rate": 0.00012027003928860937, |
|
"loss": 0.1282, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 0.45604395604395603, |
|
"grad_norm": 0.20856431126594543, |
|
"learning_rate": 0.00011852887240871145, |
|
"loss": 0.1168, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 0.46153846153846156, |
|
"grad_norm": 0.2552940249443054, |
|
"learning_rate": 0.00011678186851391218, |
|
"loss": 0.1269, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 0.46703296703296704, |
|
"grad_norm": 0.2256869375705719, |
|
"learning_rate": 0.00011502957795004705, |
|
"loss": 0.1043, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.4725274725274725, |
|
"grad_norm": 0.20186680555343628, |
|
"learning_rate": 0.00011327255272837221, |
|
"loss": 0.0929, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 0.47802197802197804, |
|
"grad_norm": 0.2341729998588562, |
|
"learning_rate": 0.00011151134635166829, |
|
"loss": 0.0981, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 0.4835164835164835, |
|
"grad_norm": 0.23417356610298157, |
|
"learning_rate": 0.00010974651363987465, |
|
"loss": 0.1066, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 0.489010989010989, |
|
"grad_norm": 0.18209996819496155, |
|
"learning_rate": 0.00010797861055530831, |
|
"loss": 0.1032, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 0.4945054945054945, |
|
"grad_norm": 0.18998686969280243, |
|
"learning_rate": 0.0001062081940275234, |
|
"loss": 0.0965, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"grad_norm": 0.20998521149158478, |
|
"learning_rate": 0.00010443582177786564, |
|
"loss": 0.144, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 0.5054945054945055, |
|
"grad_norm": 0.22074103355407715, |
|
"learning_rate": 0.00010266205214377748, |
|
"loss": 0.1184, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 0.510989010989011, |
|
"grad_norm": 0.15477561950683594, |
|
"learning_rate": 0.0001008874439029091, |
|
"loss": 0.0796, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 0.5164835164835165, |
|
"grad_norm": 0.2496633678674698, |
|
"learning_rate": 9.91125560970909e-05, |
|
"loss": 0.142, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 0.521978021978022, |
|
"grad_norm": 0.3139898180961609, |
|
"learning_rate": 9.733794785622253e-05, |
|
"loss": 0.1407, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.5274725274725275, |
|
"grad_norm": 0.21977433562278748, |
|
"learning_rate": 9.556417822213435e-05, |
|
"loss": 0.1164, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 0.532967032967033, |
|
"grad_norm": 0.19279415905475616, |
|
"learning_rate": 9.379180597247661e-05, |
|
"loss": 0.0876, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 0.5384615384615384, |
|
"grad_norm": 0.23302212357521057, |
|
"learning_rate": 9.202138944469168e-05, |
|
"loss": 0.1269, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 0.5439560439560439, |
|
"grad_norm": 0.1825583428144455, |
|
"learning_rate": 9.025348636012536e-05, |
|
"loss": 0.0936, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 0.5494505494505495, |
|
"grad_norm": 0.19986358284950256, |
|
"learning_rate": 8.84886536483317e-05, |
|
"loss": 0.113, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.554945054945055, |
|
"grad_norm": 0.19531255960464478, |
|
"learning_rate": 8.672744727162781e-05, |
|
"loss": 0.0941, |
|
"step": 101 |
|
}, |
|
{ |
|
"epoch": 0.5604395604395604, |
|
"grad_norm": 0.2396000772714615, |
|
"learning_rate": 8.497042204995299e-05, |
|
"loss": 0.0992, |
|
"step": 102 |
|
}, |
|
{ |
|
"epoch": 0.5659340659340659, |
|
"grad_norm": 0.196890726685524, |
|
"learning_rate": 8.321813148608783e-05, |
|
"loss": 0.0833, |
|
"step": 103 |
|
}, |
|
{ |
|
"epoch": 0.5714285714285714, |
|
"grad_norm": 0.2729422450065613, |
|
"learning_rate": 8.147112759128859e-05, |
|
"loss": 0.1452, |
|
"step": 104 |
|
}, |
|
{ |
|
"epoch": 0.5769230769230769, |
|
"grad_norm": 0.1796470433473587, |
|
"learning_rate": 7.972996071139064e-05, |
|
"loss": 0.0799, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 0.5824175824175825, |
|
"grad_norm": 0.14928919076919556, |
|
"learning_rate": 7.799517935343757e-05, |
|
"loss": 0.0481, |
|
"step": 106 |
|
}, |
|
{ |
|
"epoch": 0.5879120879120879, |
|
"grad_norm": 0.21496616303920746, |
|
"learning_rate": 7.626733001288851e-05, |
|
"loss": 0.0693, |
|
"step": 107 |
|
}, |
|
{ |
|
"epoch": 0.5934065934065934, |
|
"grad_norm": 0.24284154176712036, |
|
"learning_rate": 7.454695700146024e-05, |
|
"loss": 0.1201, |
|
"step": 108 |
|
}, |
|
{ |
|
"epoch": 0.5989010989010989, |
|
"grad_norm": 0.2560741603374481, |
|
"learning_rate": 7.283460227565613e-05, |
|
"loss": 0.1095, |
|
"step": 109 |
|
}, |
|
{ |
|
"epoch": 0.6043956043956044, |
|
"grad_norm": 0.13009899854660034, |
|
"learning_rate": 7.113080526603792e-05, |
|
"loss": 0.0512, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.6098901098901099, |
|
"grad_norm": 0.20721551775932312, |
|
"learning_rate": 6.94361027072926e-05, |
|
"loss": 0.1132, |
|
"step": 111 |
|
}, |
|
{ |
|
"epoch": 0.6153846153846154, |
|
"grad_norm": 0.154499813914299, |
|
"learning_rate": 6.775102846914911e-05, |
|
"loss": 0.0755, |
|
"step": 112 |
|
}, |
|
{ |
|
"epoch": 0.6208791208791209, |
|
"grad_norm": 0.19335274398326874, |
|
"learning_rate": 6.607611338819697e-05, |
|
"loss": 0.0847, |
|
"step": 113 |
|
}, |
|
{ |
|
"epoch": 0.6263736263736264, |
|
"grad_norm": 0.17462438344955444, |
|
"learning_rate": 6.441188510066091e-05, |
|
"loss": 0.0569, |
|
"step": 114 |
|
}, |
|
{ |
|
"epoch": 0.6318681318681318, |
|
"grad_norm": 0.33881354331970215, |
|
"learning_rate": 6.275886787618339e-05, |
|
"loss": 0.1436, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 0.6373626373626373, |
|
"grad_norm": 0.14344196021556854, |
|
"learning_rate": 6.111758245266794e-05, |
|
"loss": 0.0628, |
|
"step": 116 |
|
}, |
|
{ |
|
"epoch": 0.6428571428571429, |
|
"grad_norm": 0.10723075270652771, |
|
"learning_rate": 5.9488545872234645e-05, |
|
"loss": 0.0462, |
|
"step": 117 |
|
}, |
|
{ |
|
"epoch": 0.6483516483516484, |
|
"grad_norm": 0.19554950296878815, |
|
"learning_rate": 5.787227131834043e-05, |
|
"loss": 0.1092, |
|
"step": 118 |
|
}, |
|
{ |
|
"epoch": 0.6538461538461539, |
|
"grad_norm": 0.13956406712532043, |
|
"learning_rate": 5.626926795411447e-05, |
|
"loss": 0.0674, |
|
"step": 119 |
|
}, |
|
{ |
|
"epoch": 0.6593406593406593, |
|
"grad_norm": 0.2507220506668091, |
|
"learning_rate": 5.468004076196029e-05, |
|
"loss": 0.1, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.6648351648351648, |
|
"grad_norm": 0.1673005074262619, |
|
"learning_rate": 5.310509038447492e-05, |
|
"loss": 0.1039, |
|
"step": 121 |
|
}, |
|
{ |
|
"epoch": 0.6703296703296703, |
|
"grad_norm": 0.12703153491020203, |
|
"learning_rate": 5.1544912966734994e-05, |
|
"loss": 0.0704, |
|
"step": 122 |
|
}, |
|
{ |
|
"epoch": 0.6758241758241759, |
|
"grad_norm": 0.17319592833518982, |
|
"learning_rate": 5.000000000000002e-05, |
|
"loss": 0.0724, |
|
"step": 123 |
|
}, |
|
{ |
|
"epoch": 0.6813186813186813, |
|
"grad_norm": 0.154402956366539, |
|
"learning_rate": 4.8470838166881226e-05, |
|
"loss": 0.047, |
|
"step": 124 |
|
}, |
|
{ |
|
"epoch": 0.6868131868131868, |
|
"grad_norm": 0.15553316473960876, |
|
"learning_rate": 4.695790918802576e-05, |
|
"loss": 0.0932, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 0.6923076923076923, |
|
"grad_norm": 0.1012202799320221, |
|
"learning_rate": 4.546168967036351e-05, |
|
"loss": 0.0456, |
|
"step": 126 |
|
}, |
|
{ |
|
"epoch": 0.6978021978021978, |
|
"grad_norm": 0.1133142039179802, |
|
"learning_rate": 4.398265095696539e-05, |
|
"loss": 0.0482, |
|
"step": 127 |
|
}, |
|
{ |
|
"epoch": 0.7032967032967034, |
|
"grad_norm": 0.14953628182411194, |
|
"learning_rate": 4.252125897855932e-05, |
|
"loss": 0.0577, |
|
"step": 128 |
|
}, |
|
{ |
|
"epoch": 0.7087912087912088, |
|
"grad_norm": 0.145247682929039, |
|
"learning_rate": 4.107797410675166e-05, |
|
"loss": 0.0645, |
|
"step": 129 |
|
}, |
|
{ |
|
"epoch": 0.7142857142857143, |
|
"grad_norm": 0.09949145466089249, |
|
"learning_rate": 3.965325100899961e-05, |
|
"loss": 0.0486, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.7197802197802198, |
|
"grad_norm": 0.128681942820549, |
|
"learning_rate": 3.824753850538082e-05, |
|
"loss": 0.0561, |
|
"step": 131 |
|
}, |
|
{ |
|
"epoch": 0.7252747252747253, |
|
"grad_norm": 0.16942492127418518, |
|
"learning_rate": 3.686127942720463e-05, |
|
"loss": 0.0614, |
|
"step": 132 |
|
}, |
|
{ |
|
"epoch": 0.7307692307692307, |
|
"grad_norm": 0.1377490609884262, |
|
"learning_rate": 3.5494910477510445e-05, |
|
"loss": 0.0634, |
|
"step": 133 |
|
}, |
|
{ |
|
"epoch": 0.7362637362637363, |
|
"grad_norm": 0.1743365377187729, |
|
"learning_rate": 3.414886209349615e-05, |
|
"loss": 0.0695, |
|
"step": 134 |
|
}, |
|
{ |
|
"epoch": 0.7417582417582418, |
|
"grad_norm": 0.18100859224796295, |
|
"learning_rate": 3.282355831092072e-05, |
|
"loss": 0.0561, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 0.7472527472527473, |
|
"grad_norm": 0.1113915666937828, |
|
"learning_rate": 3.1519416630523444e-05, |
|
"loss": 0.0609, |
|
"step": 136 |
|
}, |
|
{ |
|
"epoch": 0.7527472527472527, |
|
"grad_norm": 0.11914083361625671, |
|
"learning_rate": 3.0236847886501542e-05, |
|
"loss": 0.0437, |
|
"step": 137 |
|
}, |
|
{ |
|
"epoch": 0.7582417582417582, |
|
"grad_norm": 0.16115868091583252, |
|
"learning_rate": 2.8976256117088195e-05, |
|
"loss": 0.0668, |
|
"step": 138 |
|
}, |
|
{ |
|
"epoch": 0.7637362637362637, |
|
"grad_norm": 0.1613149493932724, |
|
"learning_rate": 2.7738038437271284e-05, |
|
"loss": 0.0769, |
|
"step": 139 |
|
}, |
|
{ |
|
"epoch": 0.7692307692307693, |
|
"grad_norm": 0.12686750292778015, |
|
"learning_rate": 2.6522584913693294e-05, |
|
"loss": 0.0586, |
|
"step": 140 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 182, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 20, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 4.078551699224986e+16, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|