|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.05868329359985329, |
|
"eval_steps": 34, |
|
"global_step": 120, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0004890274466654441, |
|
"grad_norm": 1.4216102361679077, |
|
"learning_rate": 6.666666666666667e-06, |
|
"loss": 3.258, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0004890274466654441, |
|
"eval_loss": 3.5600969791412354, |
|
"eval_runtime": 1313.2977, |
|
"eval_samples_per_second": 1.967, |
|
"eval_steps_per_second": 0.656, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0009780548933308881, |
|
"grad_norm": 1.504632830619812, |
|
"learning_rate": 1.3333333333333333e-05, |
|
"loss": 3.8029, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.0014670823399963323, |
|
"grad_norm": 1.6489232778549194, |
|
"learning_rate": 2e-05, |
|
"loss": 3.6594, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.0019561097866617762, |
|
"grad_norm": 1.274643063545227, |
|
"learning_rate": 2.6666666666666667e-05, |
|
"loss": 3.1495, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.0024451372333272204, |
|
"grad_norm": 1.4067387580871582, |
|
"learning_rate": 3.3333333333333335e-05, |
|
"loss": 3.6677, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.0029341646799926646, |
|
"grad_norm": 1.7147626876831055, |
|
"learning_rate": 4e-05, |
|
"loss": 3.8075, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.0034231921266581087, |
|
"grad_norm": 1.672584891319275, |
|
"learning_rate": 4.666666666666667e-05, |
|
"loss": 3.3499, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.0039122195733235525, |
|
"grad_norm": 2.0120997428894043, |
|
"learning_rate": 5.333333333333333e-05, |
|
"loss": 4.2653, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.004401247019988997, |
|
"grad_norm": 2.0601062774658203, |
|
"learning_rate": 6e-05, |
|
"loss": 3.3055, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.004890274466654441, |
|
"grad_norm": 2.0153276920318604, |
|
"learning_rate": 6.666666666666667e-05, |
|
"loss": 3.1547, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.005379301913319885, |
|
"grad_norm": 2.443328857421875, |
|
"learning_rate": 7.333333333333333e-05, |
|
"loss": 3.4654, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.005868329359985329, |
|
"grad_norm": 2.801769971847534, |
|
"learning_rate": 8e-05, |
|
"loss": 2.8224, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.006357356806650773, |
|
"grad_norm": 2.6738572120666504, |
|
"learning_rate": 8.666666666666667e-05, |
|
"loss": 2.8465, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.0068463842533162175, |
|
"grad_norm": 2.8812367916107178, |
|
"learning_rate": 9.333333333333334e-05, |
|
"loss": 2.622, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.007335411699981661, |
|
"grad_norm": 2.9697248935699463, |
|
"learning_rate": 0.0001, |
|
"loss": 2.9623, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.007824439146647105, |
|
"grad_norm": 2.5231637954711914, |
|
"learning_rate": 0.00010666666666666667, |
|
"loss": 2.2665, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.00831346659331255, |
|
"grad_norm": 2.341275691986084, |
|
"learning_rate": 0.00011333333333333334, |
|
"loss": 2.2247, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.008802494039977994, |
|
"grad_norm": 2.683793306350708, |
|
"learning_rate": 0.00012, |
|
"loss": 1.9371, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.009291521486643438, |
|
"grad_norm": 2.972334384918213, |
|
"learning_rate": 0.00012666666666666666, |
|
"loss": 1.8682, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.009780548933308882, |
|
"grad_norm": 3.541074275970459, |
|
"learning_rate": 0.00013333333333333334, |
|
"loss": 1.912, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.010269576379974325, |
|
"grad_norm": 2.9546866416931152, |
|
"learning_rate": 0.00014, |
|
"loss": 1.6904, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.01075860382663977, |
|
"grad_norm": 2.5912656784057617, |
|
"learning_rate": 0.00014666666666666666, |
|
"loss": 1.5096, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.011247631273305215, |
|
"grad_norm": 2.258963108062744, |
|
"learning_rate": 0.00015333333333333334, |
|
"loss": 1.3978, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.011736658719970658, |
|
"grad_norm": 2.275815725326538, |
|
"learning_rate": 0.00016, |
|
"loss": 1.1538, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.012225686166636102, |
|
"grad_norm": 2.224825382232666, |
|
"learning_rate": 0.0001666666666666667, |
|
"loss": 1.471, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.012714713613301546, |
|
"grad_norm": 1.876035451889038, |
|
"learning_rate": 0.00017333333333333334, |
|
"loss": 1.4368, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.013203741059966991, |
|
"grad_norm": 1.6051979064941406, |
|
"learning_rate": 0.00018, |
|
"loss": 1.3951, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.013692768506632435, |
|
"grad_norm": 1.7268619537353516, |
|
"learning_rate": 0.0001866666666666667, |
|
"loss": 1.1915, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.014181795953297879, |
|
"grad_norm": 1.888218879699707, |
|
"learning_rate": 0.00019333333333333333, |
|
"loss": 1.1358, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.014670823399963322, |
|
"grad_norm": 1.7404896020889282, |
|
"learning_rate": 0.0002, |
|
"loss": 1.2051, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.015159850846628768, |
|
"grad_norm": 1.560935139656067, |
|
"learning_rate": 0.00019998292504580528, |
|
"loss": 1.1611, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.01564887829329421, |
|
"grad_norm": 2.000502824783325, |
|
"learning_rate": 0.0001999317060143023, |
|
"loss": 1.4315, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.016137905739959654, |
|
"grad_norm": 1.7972925901412964, |
|
"learning_rate": 0.0001998463603967434, |
|
"loss": 1.1507, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.0166269331866251, |
|
"grad_norm": 1.4039409160614014, |
|
"learning_rate": 0.00019972691733857883, |
|
"loss": 1.1626, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.0166269331866251, |
|
"eval_loss": 1.1705251932144165, |
|
"eval_runtime": 1316.0492, |
|
"eval_samples_per_second": 1.963, |
|
"eval_steps_per_second": 0.654, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.017115960633290545, |
|
"grad_norm": 1.5077097415924072, |
|
"learning_rate": 0.00019957341762950344, |
|
"loss": 1.0223, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.017604988079955988, |
|
"grad_norm": 1.4470211267471313, |
|
"learning_rate": 0.0001993859136895274, |
|
"loss": 1.0228, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.018094015526621432, |
|
"grad_norm": 1.7781013250350952, |
|
"learning_rate": 0.00019916446955107428, |
|
"loss": 1.0388, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.018583042973286876, |
|
"grad_norm": 1.8719812631607056, |
|
"learning_rate": 0.0001989091608371146, |
|
"loss": 1.0568, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.01907207041995232, |
|
"grad_norm": 1.389418363571167, |
|
"learning_rate": 0.00019862007473534025, |
|
"loss": 1.2011, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.019561097866617763, |
|
"grad_norm": 1.561482310295105, |
|
"learning_rate": 0.0001982973099683902, |
|
"loss": 1.2364, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.020050125313283207, |
|
"grad_norm": 1.3894869089126587, |
|
"learning_rate": 0.0001979409767601366, |
|
"loss": 1.0156, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.02053915275994865, |
|
"grad_norm": 1.2144147157669067, |
|
"learning_rate": 0.00019755119679804367, |
|
"loss": 0.9838, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.021028180206614098, |
|
"grad_norm": 1.2613141536712646, |
|
"learning_rate": 0.0001971281031916114, |
|
"loss": 0.9763, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.02151720765327954, |
|
"grad_norm": 1.316279649734497, |
|
"learning_rate": 0.00019667184042691875, |
|
"loss": 1.0909, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.022006235099944985, |
|
"grad_norm": 1.4056583642959595, |
|
"learning_rate": 0.00019618256431728194, |
|
"loss": 1.2575, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.02249526254661043, |
|
"grad_norm": 1.4552671909332275, |
|
"learning_rate": 0.0001956604419500441, |
|
"loss": 1.1668, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.022984289993275873, |
|
"grad_norm": 1.2041035890579224, |
|
"learning_rate": 0.00019510565162951537, |
|
"loss": 1.0945, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.023473317439941317, |
|
"grad_norm": 1.2852847576141357, |
|
"learning_rate": 0.00019451838281608197, |
|
"loss": 1.1085, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.02396234488660676, |
|
"grad_norm": 1.4202251434326172, |
|
"learning_rate": 0.00019389883606150566, |
|
"loss": 1.1757, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.024451372333272204, |
|
"grad_norm": 1.2115014791488647, |
|
"learning_rate": 0.00019324722294043558, |
|
"loss": 1.0677, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.024940399779937648, |
|
"grad_norm": 1.692592740058899, |
|
"learning_rate": 0.00019256376597815564, |
|
"loss": 0.9642, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.02542942722660309, |
|
"grad_norm": 1.4862620830535889, |
|
"learning_rate": 0.00019184869857459232, |
|
"loss": 1.2206, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.02591845467326854, |
|
"grad_norm": 1.3480483293533325, |
|
"learning_rate": 0.00019110226492460885, |
|
"loss": 1.0028, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.026407482119933982, |
|
"grad_norm": 1.2847557067871094, |
|
"learning_rate": 0.0001903247199346129, |
|
"loss": 1.1267, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.026896509566599426, |
|
"grad_norm": 1.1814807653427124, |
|
"learning_rate": 0.00018951632913550626, |
|
"loss": 0.963, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.02738553701326487, |
|
"grad_norm": 1.348286747932434, |
|
"learning_rate": 0.0001886773685920062, |
|
"loss": 1.0553, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.027874564459930314, |
|
"grad_norm": 1.2073681354522705, |
|
"learning_rate": 0.0001878081248083698, |
|
"loss": 1.0966, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.028363591906595757, |
|
"grad_norm": 1.29014253616333, |
|
"learning_rate": 0.00018690889463055283, |
|
"loss": 1.2225, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.0288526193532612, |
|
"grad_norm": 1.265650987625122, |
|
"learning_rate": 0.00018597998514483725, |
|
"loss": 0.9056, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.029341646799926645, |
|
"grad_norm": 1.264709711074829, |
|
"learning_rate": 0.00018502171357296144, |
|
"loss": 1.1566, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.02983067424659209, |
|
"grad_norm": 1.1395761966705322, |
|
"learning_rate": 0.00018403440716378928, |
|
"loss": 0.9825, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.030319701693257536, |
|
"grad_norm": 1.1984392404556274, |
|
"learning_rate": 0.00018301840308155507, |
|
"loss": 1.1295, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.03080872913992298, |
|
"grad_norm": 1.3706929683685303, |
|
"learning_rate": 0.00018197404829072215, |
|
"loss": 1.3507, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.03129775658658842, |
|
"grad_norm": 1.0434547662734985, |
|
"learning_rate": 0.00018090169943749476, |
|
"loss": 0.9119, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.03178678403325386, |
|
"grad_norm": 1.2470561265945435, |
|
"learning_rate": 0.000179801722728024, |
|
"loss": 1.1485, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.03227581147991931, |
|
"grad_norm": 1.2176910638809204, |
|
"learning_rate": 0.00017867449380334834, |
|
"loss": 1.0743, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.03276483892658476, |
|
"grad_norm": 1.239547848701477, |
|
"learning_rate": 0.00017752039761111297, |
|
"loss": 1.0839, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.0332538663732502, |
|
"grad_norm": 1.4201483726501465, |
|
"learning_rate": 0.00017633982827411032, |
|
"loss": 1.1837, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.0332538663732502, |
|
"eval_loss": 1.062552571296692, |
|
"eval_runtime": 1315.0047, |
|
"eval_samples_per_second": 1.964, |
|
"eval_steps_per_second": 0.655, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.033742893819915645, |
|
"grad_norm": 1.2789006233215332, |
|
"learning_rate": 0.00017513318895568737, |
|
"loss": 1.0102, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.03423192126658109, |
|
"grad_norm": 1.3672975301742554, |
|
"learning_rate": 0.00017390089172206592, |
|
"loss": 1.3682, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.03472094871324653, |
|
"grad_norm": 1.5209168195724487, |
|
"learning_rate": 0.00017264335740162242, |
|
"loss": 1.2258, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.035209976159911976, |
|
"grad_norm": 1.3500794172286987, |
|
"learning_rate": 0.00017136101544117525, |
|
"loss": 1.2796, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.03569900360657742, |
|
"grad_norm": 1.3323686122894287, |
|
"learning_rate": 0.0001700543037593291, |
|
"loss": 0.9231, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.036188031053242864, |
|
"grad_norm": 1.4972914457321167, |
|
"learning_rate": 0.00016872366859692627, |
|
"loss": 1.2526, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.03667705849990831, |
|
"grad_norm": 1.2735319137573242, |
|
"learning_rate": 0.00016736956436465573, |
|
"loss": 1.368, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.03716608594657375, |
|
"grad_norm": 1.0858744382858276, |
|
"learning_rate": 0.0001659924534878723, |
|
"loss": 0.7402, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 0.037655113393239195, |
|
"grad_norm": 1.3259844779968262, |
|
"learning_rate": 0.00016459280624867874, |
|
"loss": 0.9388, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 0.03814414083990464, |
|
"grad_norm": 1.348748803138733, |
|
"learning_rate": 0.0001631711006253251, |
|
"loss": 1.2139, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 0.03863316828657008, |
|
"grad_norm": 3.0502963066101074, |
|
"learning_rate": 0.0001617278221289793, |
|
"loss": 0.9326, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 0.039122195733235526, |
|
"grad_norm": 1.213534951210022, |
|
"learning_rate": 0.00016026346363792567, |
|
"loss": 1.042, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.03961122317990097, |
|
"grad_norm": 1.3361854553222656, |
|
"learning_rate": 0.00015877852522924732, |
|
"loss": 1.1104, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.040100250626566414, |
|
"grad_norm": 1.0672283172607422, |
|
"learning_rate": 0.00015727351400805052, |
|
"loss": 0.7879, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 0.04058927807323186, |
|
"grad_norm": 1.6033365726470947, |
|
"learning_rate": 0.00015574894393428855, |
|
"loss": 1.0356, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 0.0410783055198973, |
|
"grad_norm": 1.579293131828308, |
|
"learning_rate": 0.00015420533564724495, |
|
"loss": 1.3671, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 0.041567332966562745, |
|
"grad_norm": 3.389050006866455, |
|
"learning_rate": 0.0001526432162877356, |
|
"loss": 1.0248, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.042056360413228196, |
|
"grad_norm": 1.3093312978744507, |
|
"learning_rate": 0.0001510631193180907, |
|
"loss": 1.017, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 0.04254538785989364, |
|
"grad_norm": 2.124237537384033, |
|
"learning_rate": 0.0001494655843399779, |
|
"loss": 0.9459, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 0.04303441530655908, |
|
"grad_norm": 1.416883111000061, |
|
"learning_rate": 0.00014785115691012864, |
|
"loss": 1.0393, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 0.04352344275322453, |
|
"grad_norm": 1.1954100131988525, |
|
"learning_rate": 0.00014622038835403133, |
|
"loss": 1.4326, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 0.04401247019988997, |
|
"grad_norm": 1.1562423706054688, |
|
"learning_rate": 0.00014457383557765386, |
|
"loss": 0.9829, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.044501497646555414, |
|
"grad_norm": 1.9508070945739746, |
|
"learning_rate": 0.0001429120608772609, |
|
"loss": 1.1847, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 0.04499052509322086, |
|
"grad_norm": 1.0829613208770752, |
|
"learning_rate": 0.00014123563174739037, |
|
"loss": 1.0758, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 0.0454795525398863, |
|
"grad_norm": 1.0986757278442383, |
|
"learning_rate": 0.00013954512068705424, |
|
"loss": 0.9567, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 0.045968579986551746, |
|
"grad_norm": 1.0479655265808105, |
|
"learning_rate": 0.00013784110500423104, |
|
"loss": 0.8693, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 0.04645760743321719, |
|
"grad_norm": 1.2674229145050049, |
|
"learning_rate": 0.00013612416661871533, |
|
"loss": 1.0495, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.04694663487988263, |
|
"grad_norm": 1.1395543813705444, |
|
"learning_rate": 0.00013439489186339282, |
|
"loss": 0.9213, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 0.04743566232654808, |
|
"grad_norm": 1.1885366439819336, |
|
"learning_rate": 0.0001326538712840083, |
|
"loss": 0.9957, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 0.04792468977321352, |
|
"grad_norm": 1.3940829038619995, |
|
"learning_rate": 0.00013090169943749476, |
|
"loss": 1.056, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 0.048413717219878964, |
|
"grad_norm": 1.4555394649505615, |
|
"learning_rate": 0.00012913897468893248, |
|
"loss": 1.1525, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 0.04890274466654441, |
|
"grad_norm": 1.1065882444381714, |
|
"learning_rate": 0.0001273662990072083, |
|
"loss": 0.8194, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.04939177211320985, |
|
"grad_norm": 1.2166293859481812, |
|
"learning_rate": 0.00012558427775944357, |
|
"loss": 1.198, |
|
"step": 101 |
|
}, |
|
{ |
|
"epoch": 0.049880799559875295, |
|
"grad_norm": 1.2149029970169067, |
|
"learning_rate": 0.00012379351950426187, |
|
"loss": 0.9292, |
|
"step": 102 |
|
}, |
|
{ |
|
"epoch": 0.049880799559875295, |
|
"eval_loss": 1.0268832445144653, |
|
"eval_runtime": 1315.1963, |
|
"eval_samples_per_second": 1.964, |
|
"eval_steps_per_second": 0.655, |
|
"step": 102 |
|
}, |
|
{ |
|
"epoch": 0.05036982700654074, |
|
"grad_norm": 1.1670883893966675, |
|
"learning_rate": 0.00012199463578396688, |
|
"loss": 0.8447, |
|
"step": 103 |
|
}, |
|
{ |
|
"epoch": 0.05085885445320618, |
|
"grad_norm": 1.4905412197113037, |
|
"learning_rate": 0.00012018824091570103, |
|
"loss": 1.2532, |
|
"step": 104 |
|
}, |
|
{ |
|
"epoch": 0.051347881899871634, |
|
"grad_norm": 1.334544062614441, |
|
"learning_rate": 0.00011837495178165706, |
|
"loss": 0.7804, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 0.05183690934653708, |
|
"grad_norm": 1.6085052490234375, |
|
"learning_rate": 0.000116555387618413, |
|
"loss": 1.093, |
|
"step": 106 |
|
}, |
|
{ |
|
"epoch": 0.05232593679320252, |
|
"grad_norm": 1.4606610536575317, |
|
"learning_rate": 0.00011473016980546377, |
|
"loss": 1.0967, |
|
"step": 107 |
|
}, |
|
{ |
|
"epoch": 0.052814964239867965, |
|
"grad_norm": 1.591978669166565, |
|
"learning_rate": 0.00011289992165302035, |
|
"loss": 1.1526, |
|
"step": 108 |
|
}, |
|
{ |
|
"epoch": 0.05330399168653341, |
|
"grad_norm": 1.4087272882461548, |
|
"learning_rate": 0.00011106526818915008, |
|
"loss": 1.1814, |
|
"step": 109 |
|
}, |
|
{ |
|
"epoch": 0.05379301913319885, |
|
"grad_norm": 1.3588765859603882, |
|
"learning_rate": 0.00010922683594633021, |
|
"loss": 1.11, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.054282046579864296, |
|
"grad_norm": 1.154991865158081, |
|
"learning_rate": 0.00010738525274748741, |
|
"loss": 0.9469, |
|
"step": 111 |
|
}, |
|
{ |
|
"epoch": 0.05477107402652974, |
|
"grad_norm": 1.1687790155410767, |
|
"learning_rate": 0.000105541147491597, |
|
"loss": 1.1575, |
|
"step": 112 |
|
}, |
|
{ |
|
"epoch": 0.05526010147319518, |
|
"grad_norm": 1.1781808137893677, |
|
"learning_rate": 0.00010369514993891452, |
|
"loss": 1.0453, |
|
"step": 113 |
|
}, |
|
{ |
|
"epoch": 0.05574912891986063, |
|
"grad_norm": 1.1359920501708984, |
|
"learning_rate": 0.00010184789049591299, |
|
"loss": 1.072, |
|
"step": 114 |
|
}, |
|
{ |
|
"epoch": 0.05623815636652607, |
|
"grad_norm": 1.2046093940734863, |
|
"learning_rate": 0.0001, |
|
"loss": 0.921, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 0.056727183813191515, |
|
"grad_norm": 1.1520406007766724, |
|
"learning_rate": 9.815210950408704e-05, |
|
"loss": 0.9206, |
|
"step": 116 |
|
}, |
|
{ |
|
"epoch": 0.05721621125985696, |
|
"grad_norm": 1.240869402885437, |
|
"learning_rate": 9.630485006108553e-05, |
|
"loss": 1.0142, |
|
"step": 117 |
|
}, |
|
{ |
|
"epoch": 0.0577052387065224, |
|
"grad_norm": 1.4092172384262085, |
|
"learning_rate": 9.4458852508403e-05, |
|
"loss": 1.2377, |
|
"step": 118 |
|
}, |
|
{ |
|
"epoch": 0.058194266153187846, |
|
"grad_norm": 1.3101301193237305, |
|
"learning_rate": 9.261474725251261e-05, |
|
"loss": 1.1154, |
|
"step": 119 |
|
}, |
|
{ |
|
"epoch": 0.05868329359985329, |
|
"grad_norm": 1.2811529636383057, |
|
"learning_rate": 9.077316405366981e-05, |
|
"loss": 0.9481, |
|
"step": 120 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 200, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 5, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 5.3267450346602496e+17, |
|
"train_batch_size": 3, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|