|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.9999069680900549, |
|
"eval_steps": 100, |
|
"global_step": 2687, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0018606381989022234, |
|
"grad_norm": 0.025198739022016525, |
|
"learning_rate": 3.7174721189591085e-07, |
|
"loss": 0.7637, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.003721276397804447, |
|
"grad_norm": 0.017377199605107307, |
|
"learning_rate": 7.434944237918217e-07, |
|
"loss": 0.7786, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.0055819145967066705, |
|
"grad_norm": 0.019743537530303, |
|
"learning_rate": 1.1152416356877324e-06, |
|
"loss": 0.7174, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.007442552795608894, |
|
"grad_norm": 0.017805561423301697, |
|
"learning_rate": 1.4869888475836434e-06, |
|
"loss": 0.724, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.009303190994511117, |
|
"grad_norm": 0.017246991395950317, |
|
"learning_rate": 1.858736059479554e-06, |
|
"loss": 0.7668, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.011163829193413341, |
|
"grad_norm": 0.023152414709329605, |
|
"learning_rate": 2.2304832713754648e-06, |
|
"loss": 0.781, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.013024467392315565, |
|
"grad_norm": 0.019485710188746452, |
|
"learning_rate": 2.6022304832713758e-06, |
|
"loss": 0.7593, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.014885105591217787, |
|
"grad_norm": 0.01784881390631199, |
|
"learning_rate": 2.973977695167287e-06, |
|
"loss": 0.762, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.01674574379012001, |
|
"grad_norm": 0.01972021535038948, |
|
"learning_rate": 3.3457249070631974e-06, |
|
"loss": 0.7489, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.018606381989022234, |
|
"grad_norm": 0.019333072006702423, |
|
"learning_rate": 3.717472118959108e-06, |
|
"loss": 0.7461, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.02046702018792446, |
|
"grad_norm": 0.015675414353609085, |
|
"learning_rate": 4.089219330855019e-06, |
|
"loss": 0.7621, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.022327658386826682, |
|
"grad_norm": 0.021361010149121284, |
|
"learning_rate": 4.4609665427509296e-06, |
|
"loss": 0.7677, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.024188296585728904, |
|
"grad_norm": 0.021466689184308052, |
|
"learning_rate": 4.83271375464684e-06, |
|
"loss": 0.7857, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.02604893478463113, |
|
"grad_norm": 0.019599348306655884, |
|
"learning_rate": 5.2044609665427516e-06, |
|
"loss": 0.7308, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.027909572983533353, |
|
"grad_norm": 0.015855278819799423, |
|
"learning_rate": 5.576208178438662e-06, |
|
"loss": 0.7423, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.029770211182435575, |
|
"grad_norm": 0.019443219527602196, |
|
"learning_rate": 5.947955390334574e-06, |
|
"loss": 0.7782, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.0316308493813378, |
|
"grad_norm": 0.01575319655239582, |
|
"learning_rate": 6.319702602230484e-06, |
|
"loss": 0.7494, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.03349148758024002, |
|
"grad_norm": 0.0159482192248106, |
|
"learning_rate": 6.691449814126395e-06, |
|
"loss": 0.6999, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.03535212577914225, |
|
"grad_norm": 0.016392122954130173, |
|
"learning_rate": 7.063197026022306e-06, |
|
"loss": 0.6966, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.03721276397804447, |
|
"grad_norm": 0.017619600519537926, |
|
"learning_rate": 7.434944237918216e-06, |
|
"loss": 0.7659, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.03721276397804447, |
|
"eval_loss": 0.760661780834198, |
|
"eval_runtime": 26.8914, |
|
"eval_samples_per_second": 4.723, |
|
"eval_steps_per_second": 4.723, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.039073402176946694, |
|
"grad_norm": 0.02020377479493618, |
|
"learning_rate": 7.806691449814127e-06, |
|
"loss": 0.7475, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 0.04093404037584892, |
|
"grad_norm": 0.018723690882325172, |
|
"learning_rate": 8.178438661710038e-06, |
|
"loss": 0.7233, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.04279467857475114, |
|
"grad_norm": 0.017194446176290512, |
|
"learning_rate": 8.550185873605949e-06, |
|
"loss": 0.7655, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 0.044655316773653364, |
|
"grad_norm": 0.013910962268710136, |
|
"learning_rate": 8.921933085501859e-06, |
|
"loss": 0.7493, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.04651595497255559, |
|
"grad_norm": 0.01600288413465023, |
|
"learning_rate": 9.29368029739777e-06, |
|
"loss": 0.7528, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 0.04837659317145781, |
|
"grad_norm": 0.01554879080504179, |
|
"learning_rate": 9.66542750929368e-06, |
|
"loss": 0.6947, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.050237231370360035, |
|
"grad_norm": 0.015109645202755928, |
|
"learning_rate": 1.0037174721189591e-05, |
|
"loss": 0.6647, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 0.05209786956926226, |
|
"grad_norm": 0.015620779246091843, |
|
"learning_rate": 1.0408921933085503e-05, |
|
"loss": 0.7298, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.05395850776816448, |
|
"grad_norm": 0.013970567844808102, |
|
"learning_rate": 1.0780669144981412e-05, |
|
"loss": 0.7485, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 0.055819145967066705, |
|
"grad_norm": 0.014776401221752167, |
|
"learning_rate": 1.1152416356877324e-05, |
|
"loss": 0.6767, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.057679784165968924, |
|
"grad_norm": 0.014368101954460144, |
|
"learning_rate": 1.1524163568773235e-05, |
|
"loss": 0.7037, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 0.05954042236487115, |
|
"grad_norm": 0.01449244562536478, |
|
"learning_rate": 1.1895910780669147e-05, |
|
"loss": 0.7305, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.061401060563773376, |
|
"grad_norm": 0.015350298024713993, |
|
"learning_rate": 1.2267657992565056e-05, |
|
"loss": 0.6804, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 0.0632616987626756, |
|
"grad_norm": 0.014395203441381454, |
|
"learning_rate": 1.2639405204460968e-05, |
|
"loss": 0.6832, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.06512233696157782, |
|
"grad_norm": 0.015541068278253078, |
|
"learning_rate": 1.3011152416356879e-05, |
|
"loss": 0.7168, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 0.06698297516048005, |
|
"grad_norm": 0.012579885311424732, |
|
"learning_rate": 1.338289962825279e-05, |
|
"loss": 0.693, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.06884361335938227, |
|
"grad_norm": 0.014168789610266685, |
|
"learning_rate": 1.37546468401487e-05, |
|
"loss": 0.7056, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 0.0707042515582845, |
|
"grad_norm": 0.012087655253708363, |
|
"learning_rate": 1.4126394052044612e-05, |
|
"loss": 0.7006, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.07256488975718671, |
|
"grad_norm": 0.014433121308684349, |
|
"learning_rate": 1.4498141263940521e-05, |
|
"loss": 0.7016, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 0.07442552795608894, |
|
"grad_norm": 0.015074139460921288, |
|
"learning_rate": 1.4869888475836432e-05, |
|
"loss": 0.6912, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.07442552795608894, |
|
"eval_loss": 0.7027233242988586, |
|
"eval_runtime": 26.5611, |
|
"eval_samples_per_second": 4.781, |
|
"eval_steps_per_second": 4.781, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.07628616615499116, |
|
"grad_norm": 0.013617471791803837, |
|
"learning_rate": 1.5241635687732344e-05, |
|
"loss": 0.7139, |
|
"step": 205 |
|
}, |
|
{ |
|
"epoch": 0.07814680435389339, |
|
"grad_norm": 0.013274065218865871, |
|
"learning_rate": 1.5613382899628255e-05, |
|
"loss": 0.6955, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.08000744255279561, |
|
"grad_norm": 0.013781987130641937, |
|
"learning_rate": 1.5985130111524165e-05, |
|
"loss": 0.65, |
|
"step": 215 |
|
}, |
|
{ |
|
"epoch": 0.08186808075169784, |
|
"grad_norm": 0.01373015996068716, |
|
"learning_rate": 1.6356877323420076e-05, |
|
"loss": 0.6681, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.08372871895060005, |
|
"grad_norm": 0.01403126772493124, |
|
"learning_rate": 1.6728624535315986e-05, |
|
"loss": 0.6981, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 0.08558935714950228, |
|
"grad_norm": 0.01332685723900795, |
|
"learning_rate": 1.7100371747211897e-05, |
|
"loss": 0.6517, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.0874499953484045, |
|
"grad_norm": 0.013317782431840897, |
|
"learning_rate": 1.7472118959107808e-05, |
|
"loss": 0.6835, |
|
"step": 235 |
|
}, |
|
{ |
|
"epoch": 0.08931063354730673, |
|
"grad_norm": 0.015096917748451233, |
|
"learning_rate": 1.7843866171003718e-05, |
|
"loss": 0.7138, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.09117127174620895, |
|
"grad_norm": 0.013502717949450016, |
|
"learning_rate": 1.8215613382899632e-05, |
|
"loss": 0.6722, |
|
"step": 245 |
|
}, |
|
{ |
|
"epoch": 0.09303190994511118, |
|
"grad_norm": 0.01279054582118988, |
|
"learning_rate": 1.858736059479554e-05, |
|
"loss": 0.6323, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.09489254814401339, |
|
"grad_norm": 0.011777005158364773, |
|
"learning_rate": 1.8959107806691453e-05, |
|
"loss": 0.6332, |
|
"step": 255 |
|
}, |
|
{ |
|
"epoch": 0.09675318634291562, |
|
"grad_norm": 0.01320689544081688, |
|
"learning_rate": 1.933085501858736e-05, |
|
"loss": 0.7034, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.09861382454181784, |
|
"grad_norm": 0.012140162289142609, |
|
"learning_rate": 1.970260223048327e-05, |
|
"loss": 0.6154, |
|
"step": 265 |
|
}, |
|
{ |
|
"epoch": 0.10047446274072007, |
|
"grad_norm": 0.01772845722734928, |
|
"learning_rate": 1.9999991559715313e-05, |
|
"loss": 0.7119, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.1023351009396223, |
|
"grad_norm": 0.012948377057909966, |
|
"learning_rate": 1.999969615124717e-05, |
|
"loss": 0.6554, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 0.10419573913852452, |
|
"grad_norm": 0.013341420330107212, |
|
"learning_rate": 1.9998978742792098e-05, |
|
"loss": 0.636, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.10605637733742673, |
|
"grad_norm": 0.012272336520254612, |
|
"learning_rate": 1.999783936462566e-05, |
|
"loss": 0.6182, |
|
"step": 285 |
|
}, |
|
{ |
|
"epoch": 0.10791701553632896, |
|
"grad_norm": 0.012329615652561188, |
|
"learning_rate": 1.999627806483107e-05, |
|
"loss": 0.601, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.10977765373523118, |
|
"grad_norm": 0.01313008088618517, |
|
"learning_rate": 1.999429490929718e-05, |
|
"loss": 0.7002, |
|
"step": 295 |
|
}, |
|
{ |
|
"epoch": 0.11163829193413341, |
|
"grad_norm": 0.01170238945633173, |
|
"learning_rate": 1.9991889981715696e-05, |
|
"loss": 0.6784, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.11163829193413341, |
|
"eval_loss": 0.676296055316925, |
|
"eval_runtime": 26.7211, |
|
"eval_samples_per_second": 4.753, |
|
"eval_steps_per_second": 4.753, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.11349893013303564, |
|
"grad_norm": 0.012631393037736416, |
|
"learning_rate": 1.9989063383577644e-05, |
|
"loss": 0.6332, |
|
"step": 305 |
|
}, |
|
{ |
|
"epoch": 0.11535956833193785, |
|
"grad_norm": 0.012392008677124977, |
|
"learning_rate": 1.998581523416908e-05, |
|
"loss": 0.713, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.11722020653084007, |
|
"grad_norm": 0.012342042289674282, |
|
"learning_rate": 1.998214567056607e-05, |
|
"loss": 0.6072, |
|
"step": 315 |
|
}, |
|
{ |
|
"epoch": 0.1190808447297423, |
|
"grad_norm": 0.011801215820014477, |
|
"learning_rate": 1.9978054847628908e-05, |
|
"loss": 0.6553, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.12094148292864453, |
|
"grad_norm": 0.013543626293540001, |
|
"learning_rate": 1.997354293799555e-05, |
|
"loss": 0.638, |
|
"step": 325 |
|
}, |
|
{ |
|
"epoch": 0.12280212112754675, |
|
"grad_norm": 0.012367943301796913, |
|
"learning_rate": 1.9968610132074372e-05, |
|
"loss": 0.6867, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.12466275932644898, |
|
"grad_norm": 0.01382706593722105, |
|
"learning_rate": 1.99632566380361e-05, |
|
"loss": 0.67, |
|
"step": 335 |
|
}, |
|
{ |
|
"epoch": 0.1265233975253512, |
|
"grad_norm": 0.01336714904755354, |
|
"learning_rate": 1.9957482681805036e-05, |
|
"loss": 0.6615, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.12838403572425341, |
|
"grad_norm": 0.012786686420440674, |
|
"learning_rate": 1.9951288507049532e-05, |
|
"loss": 0.6343, |
|
"step": 345 |
|
}, |
|
{ |
|
"epoch": 0.13024467392315564, |
|
"grad_norm": 0.014750728383660316, |
|
"learning_rate": 1.9944674375171697e-05, |
|
"loss": 0.6478, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.13210531212205787, |
|
"grad_norm": 0.01330367662012577, |
|
"learning_rate": 1.9937640565296372e-05, |
|
"loss": 0.6844, |
|
"step": 355 |
|
}, |
|
{ |
|
"epoch": 0.1339659503209601, |
|
"grad_norm": 0.011332959868013859, |
|
"learning_rate": 1.9930187374259338e-05, |
|
"loss": 0.6188, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.13582658851986232, |
|
"grad_norm": 0.013507647439837456, |
|
"learning_rate": 1.992231511659481e-05, |
|
"loss": 0.6844, |
|
"step": 365 |
|
}, |
|
{ |
|
"epoch": 0.13768722671876454, |
|
"grad_norm": 0.01494019664824009, |
|
"learning_rate": 1.991402412452214e-05, |
|
"loss": 0.6616, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 0.13954786491766677, |
|
"grad_norm": 0.012762832455337048, |
|
"learning_rate": 1.9905314747931816e-05, |
|
"loss": 0.6797, |
|
"step": 375 |
|
}, |
|
{ |
|
"epoch": 0.141408503116569, |
|
"grad_norm": 0.014913683757185936, |
|
"learning_rate": 1.989618735437069e-05, |
|
"loss": 0.6268, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 0.1432691413154712, |
|
"grad_norm": 0.012221734039485455, |
|
"learning_rate": 1.9886642329026457e-05, |
|
"loss": 0.6587, |
|
"step": 385 |
|
}, |
|
{ |
|
"epoch": 0.14512977951437342, |
|
"grad_norm": 0.01553855836391449, |
|
"learning_rate": 1.9876680074711417e-05, |
|
"loss": 0.6403, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 0.14699041771327565, |
|
"grad_norm": 0.013682518154382706, |
|
"learning_rate": 1.986630101184546e-05, |
|
"loss": 0.6287, |
|
"step": 395 |
|
}, |
|
{ |
|
"epoch": 0.14885105591217787, |
|
"grad_norm": 0.013481502421200275, |
|
"learning_rate": 1.9855505578438343e-05, |
|
"loss": 0.6757, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.14885105591217787, |
|
"eval_loss": 0.6647208333015442, |
|
"eval_runtime": 27.129, |
|
"eval_samples_per_second": 4.681, |
|
"eval_steps_per_second": 4.681, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.1507116941110801, |
|
"grad_norm": 0.01228385604918003, |
|
"learning_rate": 1.984429423007117e-05, |
|
"loss": 0.6277, |
|
"step": 405 |
|
}, |
|
{ |
|
"epoch": 0.15257233230998232, |
|
"grad_norm": 0.014119746163487434, |
|
"learning_rate": 1.9832667439877217e-05, |
|
"loss": 0.615, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 0.15443297050888455, |
|
"grad_norm": 0.014395875856280327, |
|
"learning_rate": 1.9820625698521918e-05, |
|
"loss": 0.6417, |
|
"step": 415 |
|
}, |
|
{ |
|
"epoch": 0.15629360870778677, |
|
"grad_norm": 0.013175971806049347, |
|
"learning_rate": 1.9808169514182182e-05, |
|
"loss": 0.6509, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 0.158154246906689, |
|
"grad_norm": 0.015295376069843769, |
|
"learning_rate": 1.9795299412524948e-05, |
|
"loss": 0.6275, |
|
"step": 425 |
|
}, |
|
{ |
|
"epoch": 0.16001488510559123, |
|
"grad_norm": 0.014611025340855122, |
|
"learning_rate": 1.9782015936684987e-05, |
|
"loss": 0.6627, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 0.16187552330449345, |
|
"grad_norm": 0.01412207167595625, |
|
"learning_rate": 1.9768319647242e-05, |
|
"loss": 0.6362, |
|
"step": 435 |
|
}, |
|
{ |
|
"epoch": 0.16373616150339568, |
|
"grad_norm": 0.012070410884916782, |
|
"learning_rate": 1.9754211122196945e-05, |
|
"loss": 0.6429, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 0.16559679970229788, |
|
"grad_norm": 0.013232079334557056, |
|
"learning_rate": 1.9739690956947652e-05, |
|
"loss": 0.6941, |
|
"step": 445 |
|
}, |
|
{ |
|
"epoch": 0.1674574379012001, |
|
"grad_norm": 0.012606708332896233, |
|
"learning_rate": 1.972475976426369e-05, |
|
"loss": 0.6554, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.16931807610010233, |
|
"grad_norm": 0.012638423591852188, |
|
"learning_rate": 1.9709418174260523e-05, |
|
"loss": 0.645, |
|
"step": 455 |
|
}, |
|
{ |
|
"epoch": 0.17117871429900455, |
|
"grad_norm": 0.013119902461767197, |
|
"learning_rate": 1.9693666834372896e-05, |
|
"loss": 0.6128, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 0.17303935249790678, |
|
"grad_norm": 0.011363113299012184, |
|
"learning_rate": 1.9677506409327532e-05, |
|
"loss": 0.6294, |
|
"step": 465 |
|
}, |
|
{ |
|
"epoch": 0.174899990696809, |
|
"grad_norm": 0.014238959178328514, |
|
"learning_rate": 1.9660937581115073e-05, |
|
"loss": 0.6647, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 0.17676062889571123, |
|
"grad_norm": 0.013214629143476486, |
|
"learning_rate": 1.9643961048961283e-05, |
|
"loss": 0.6037, |
|
"step": 475 |
|
}, |
|
{ |
|
"epoch": 0.17862126709461346, |
|
"grad_norm": 0.012312485836446285, |
|
"learning_rate": 1.9626577529297573e-05, |
|
"loss": 0.6703, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 0.18048190529351568, |
|
"grad_norm": 0.012318914756178856, |
|
"learning_rate": 1.9608787755730746e-05, |
|
"loss": 0.6141, |
|
"step": 485 |
|
}, |
|
{ |
|
"epoch": 0.1823425434924179, |
|
"grad_norm": 0.01374764647334814, |
|
"learning_rate": 1.9590592479012022e-05, |
|
"loss": 0.673, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 0.18420318169132013, |
|
"grad_norm": 0.012634415179491043, |
|
"learning_rate": 1.9571992467005395e-05, |
|
"loss": 0.6135, |
|
"step": 495 |
|
}, |
|
{ |
|
"epoch": 0.18606381989022236, |
|
"grad_norm": 0.012813772074878216, |
|
"learning_rate": 1.9552988504655194e-05, |
|
"loss": 0.6648, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.18606381989022236, |
|
"eval_loss": 0.6582558751106262, |
|
"eval_runtime": 26.9621, |
|
"eval_samples_per_second": 4.71, |
|
"eval_steps_per_second": 4.71, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.18792445808912456, |
|
"grad_norm": 0.012235240079462528, |
|
"learning_rate": 1.9533581393952978e-05, |
|
"loss": 0.6108, |
|
"step": 505 |
|
}, |
|
{ |
|
"epoch": 0.18978509628802678, |
|
"grad_norm": 0.012589952908456326, |
|
"learning_rate": 1.951377195390367e-05, |
|
"loss": 0.6218, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 0.191645734486929, |
|
"grad_norm": 0.012783786281943321, |
|
"learning_rate": 1.9493561020491024e-05, |
|
"loss": 0.6668, |
|
"step": 515 |
|
}, |
|
{ |
|
"epoch": 0.19350637268583123, |
|
"grad_norm": 0.013828632421791553, |
|
"learning_rate": 1.9472949446642318e-05, |
|
"loss": 0.6081, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 0.19536701088473346, |
|
"grad_norm": 0.011929171159863472, |
|
"learning_rate": 1.945193810219237e-05, |
|
"loss": 0.6329, |
|
"step": 525 |
|
}, |
|
{ |
|
"epoch": 0.1972276490836357, |
|
"grad_norm": 0.014584473334252834, |
|
"learning_rate": 1.9430527873846826e-05, |
|
"loss": 0.7017, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 0.1990882872825379, |
|
"grad_norm": 0.01474926806986332, |
|
"learning_rate": 1.9408719665144756e-05, |
|
"loss": 0.632, |
|
"step": 535 |
|
}, |
|
{ |
|
"epoch": 0.20094892548144014, |
|
"grad_norm": 0.015552829019725323, |
|
"learning_rate": 1.9386514396420503e-05, |
|
"loss": 0.6757, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 0.20280956368034236, |
|
"grad_norm": 0.013232480734586716, |
|
"learning_rate": 1.9363913004764847e-05, |
|
"loss": 0.6722, |
|
"step": 545 |
|
}, |
|
{ |
|
"epoch": 0.2046702018792446, |
|
"grad_norm": 0.012858827598392963, |
|
"learning_rate": 1.9340916443985465e-05, |
|
"loss": 0.6231, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 0.20653084007814682, |
|
"grad_norm": 0.012365566566586494, |
|
"learning_rate": 1.9317525684566686e-05, |
|
"loss": 0.5986, |
|
"step": 555 |
|
}, |
|
{ |
|
"epoch": 0.20839147827704904, |
|
"grad_norm": 0.01528852991759777, |
|
"learning_rate": 1.9293741713628518e-05, |
|
"loss": 0.6537, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 0.21025211647595124, |
|
"grad_norm": 0.014512522146105766, |
|
"learning_rate": 1.9269565534885003e-05, |
|
"loss": 0.6527, |
|
"step": 565 |
|
}, |
|
{ |
|
"epoch": 0.21211275467485347, |
|
"grad_norm": 0.013798325322568417, |
|
"learning_rate": 1.9244998168601848e-05, |
|
"loss": 0.6148, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 0.2139733928737557, |
|
"grad_norm": 0.013186248019337654, |
|
"learning_rate": 1.9220040651553388e-05, |
|
"loss": 0.6106, |
|
"step": 575 |
|
}, |
|
{ |
|
"epoch": 0.21583403107265792, |
|
"grad_norm": 0.013986771926283836, |
|
"learning_rate": 1.9194694036978807e-05, |
|
"loss": 0.6654, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 0.21769466927156014, |
|
"grad_norm": 0.016201818361878395, |
|
"learning_rate": 1.9168959394537708e-05, |
|
"loss": 0.6306, |
|
"step": 585 |
|
}, |
|
{ |
|
"epoch": 0.21955530747046237, |
|
"grad_norm": 0.013889294117689133, |
|
"learning_rate": 1.9142837810264972e-05, |
|
"loss": 0.6749, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 0.2214159456693646, |
|
"grad_norm": 0.013025142252445221, |
|
"learning_rate": 1.911633038652491e-05, |
|
"loss": 0.6075, |
|
"step": 595 |
|
}, |
|
{ |
|
"epoch": 0.22327658386826682, |
|
"grad_norm": 0.013716059736907482, |
|
"learning_rate": 1.9089438241964764e-05, |
|
"loss": 0.6516, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.22327658386826682, |
|
"eval_loss": 0.6547934412956238, |
|
"eval_runtime": 26.7928, |
|
"eval_samples_per_second": 4.74, |
|
"eval_steps_per_second": 4.74, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.22513722206716905, |
|
"grad_norm": 0.016251811757683754, |
|
"learning_rate": 1.906216251146748e-05, |
|
"loss": 0.6265, |
|
"step": 605 |
|
}, |
|
{ |
|
"epoch": 0.22699786026607127, |
|
"grad_norm": 0.013359563425183296, |
|
"learning_rate": 1.9034504346103825e-05, |
|
"loss": 0.6052, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 0.2288584984649735, |
|
"grad_norm": 0.012794552370905876, |
|
"learning_rate": 1.9006464913083807e-05, |
|
"loss": 0.613, |
|
"step": 615 |
|
}, |
|
{ |
|
"epoch": 0.2307191366638757, |
|
"grad_norm": 0.012533072382211685, |
|
"learning_rate": 1.897804539570742e-05, |
|
"loss": 0.6735, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 0.23257977486277792, |
|
"grad_norm": 0.013286220841109753, |
|
"learning_rate": 1.8949246993314694e-05, |
|
"loss": 0.6692, |
|
"step": 625 |
|
}, |
|
{ |
|
"epoch": 0.23444041306168015, |
|
"grad_norm": 0.013466808013617992, |
|
"learning_rate": 1.892007092123511e-05, |
|
"loss": 0.6513, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 0.23630105126058237, |
|
"grad_norm": 0.012991335242986679, |
|
"learning_rate": 1.8890518410736275e-05, |
|
"loss": 0.6405, |
|
"step": 635 |
|
}, |
|
{ |
|
"epoch": 0.2381616894594846, |
|
"grad_norm": 0.013223089277744293, |
|
"learning_rate": 1.8860590708971997e-05, |
|
"loss": 0.6488, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 0.24002232765838682, |
|
"grad_norm": 0.012394067831337452, |
|
"learning_rate": 1.8830289078929618e-05, |
|
"loss": 0.6131, |
|
"step": 645 |
|
}, |
|
{ |
|
"epoch": 0.24188296585728905, |
|
"grad_norm": 0.013721502386033535, |
|
"learning_rate": 1.8799614799376743e-05, |
|
"loss": 0.681, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 0.24374360405619128, |
|
"grad_norm": 0.012726777233183384, |
|
"learning_rate": 1.8768569164807272e-05, |
|
"loss": 0.6837, |
|
"step": 655 |
|
}, |
|
{ |
|
"epoch": 0.2456042422550935, |
|
"grad_norm": 0.013720668852329254, |
|
"learning_rate": 1.8737153485386737e-05, |
|
"loss": 0.6007, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 0.24746488045399573, |
|
"grad_norm": 0.012646087445318699, |
|
"learning_rate": 1.8705369086897063e-05, |
|
"loss": 0.6545, |
|
"step": 665 |
|
}, |
|
{ |
|
"epoch": 0.24932551865289795, |
|
"grad_norm": 0.013658811338245869, |
|
"learning_rate": 1.8673217310680578e-05, |
|
"loss": 0.6379, |
|
"step": 670 |
|
}, |
|
{ |
|
"epoch": 0.2511861568518002, |
|
"grad_norm": 0.012248256243765354, |
|
"learning_rate": 1.864069951358342e-05, |
|
"loss": 0.6356, |
|
"step": 675 |
|
}, |
|
{ |
|
"epoch": 0.2530467950507024, |
|
"grad_norm": 0.0133894681930542, |
|
"learning_rate": 1.860781706789829e-05, |
|
"loss": 0.6737, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 0.25490743324960463, |
|
"grad_norm": 0.01430124044418335, |
|
"learning_rate": 1.857457136130651e-05, |
|
"loss": 0.6169, |
|
"step": 685 |
|
}, |
|
{ |
|
"epoch": 0.25676807144850683, |
|
"grad_norm": 0.013437042012810707, |
|
"learning_rate": 1.854096379681949e-05, |
|
"loss": 0.6021, |
|
"step": 690 |
|
}, |
|
{ |
|
"epoch": 0.2586287096474091, |
|
"grad_norm": 0.011497768573462963, |
|
"learning_rate": 1.8506995792719498e-05, |
|
"loss": 0.6119, |
|
"step": 695 |
|
}, |
|
{ |
|
"epoch": 0.2604893478463113, |
|
"grad_norm": 0.01319235097616911, |
|
"learning_rate": 1.8472668782499817e-05, |
|
"loss": 0.627, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.2604893478463113, |
|
"eval_loss": 0.6522720456123352, |
|
"eval_runtime": 26.7201, |
|
"eval_samples_per_second": 4.753, |
|
"eval_steps_per_second": 4.753, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.26234998604521353, |
|
"grad_norm": 0.014044429175555706, |
|
"learning_rate": 1.843798421480426e-05, |
|
"loss": 0.6244, |
|
"step": 705 |
|
}, |
|
{ |
|
"epoch": 0.26421062424411573, |
|
"grad_norm": 0.011311609297990799, |
|
"learning_rate": 1.8402943553365998e-05, |
|
"loss": 0.5975, |
|
"step": 710 |
|
}, |
|
{ |
|
"epoch": 0.26607126244301793, |
|
"grad_norm": 0.012355692684650421, |
|
"learning_rate": 1.8367548276945846e-05, |
|
"loss": 0.6009, |
|
"step": 715 |
|
}, |
|
{ |
|
"epoch": 0.2679319006419202, |
|
"grad_norm": 0.012264437042176723, |
|
"learning_rate": 1.83317998792698e-05, |
|
"loss": 0.6103, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 0.2697925388408224, |
|
"grad_norm": 0.012037084437906742, |
|
"learning_rate": 1.8295699868966038e-05, |
|
"loss": 0.5602, |
|
"step": 725 |
|
}, |
|
{ |
|
"epoch": 0.27165317703972464, |
|
"grad_norm": 0.012773050926625729, |
|
"learning_rate": 1.8259249769501237e-05, |
|
"loss": 0.6215, |
|
"step": 730 |
|
}, |
|
{ |
|
"epoch": 0.27351381523862683, |
|
"grad_norm": 0.012415550649166107, |
|
"learning_rate": 1.8222451119116288e-05, |
|
"loss": 0.6364, |
|
"step": 735 |
|
}, |
|
{ |
|
"epoch": 0.2753744534375291, |
|
"grad_norm": 0.01302468404173851, |
|
"learning_rate": 1.8185305470761366e-05, |
|
"loss": 0.5994, |
|
"step": 740 |
|
}, |
|
{ |
|
"epoch": 0.2772350916364313, |
|
"grad_norm": 0.013447316363453865, |
|
"learning_rate": 1.814781439203043e-05, |
|
"loss": 0.6458, |
|
"step": 745 |
|
}, |
|
{ |
|
"epoch": 0.27909572983533354, |
|
"grad_norm": 0.012098093517124653, |
|
"learning_rate": 1.8109979465095014e-05, |
|
"loss": 0.6357, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 0.28095636803423574, |
|
"grad_norm": 0.012987499125301838, |
|
"learning_rate": 1.8071802286637505e-05, |
|
"loss": 0.6248, |
|
"step": 755 |
|
}, |
|
{ |
|
"epoch": 0.282817006233138, |
|
"grad_norm": 0.011747024022042751, |
|
"learning_rate": 1.8033284467783742e-05, |
|
"loss": 0.6202, |
|
"step": 760 |
|
}, |
|
{ |
|
"epoch": 0.2846776444320402, |
|
"grad_norm": 0.01332057174295187, |
|
"learning_rate": 1.7994427634035016e-05, |
|
"loss": 0.6347, |
|
"step": 765 |
|
}, |
|
{ |
|
"epoch": 0.2865382826309424, |
|
"grad_norm": 0.013383504003286362, |
|
"learning_rate": 1.795523342519948e-05, |
|
"loss": 0.6001, |
|
"step": 770 |
|
}, |
|
{ |
|
"epoch": 0.28839892082984464, |
|
"grad_norm": 0.013648821040987968, |
|
"learning_rate": 1.7915703495322967e-05, |
|
"loss": 0.6399, |
|
"step": 775 |
|
}, |
|
{ |
|
"epoch": 0.29025955902874684, |
|
"grad_norm": 0.012947522103786469, |
|
"learning_rate": 1.7875839512619148e-05, |
|
"loss": 0.6298, |
|
"step": 780 |
|
}, |
|
{ |
|
"epoch": 0.2921201972276491, |
|
"grad_norm": 0.01334394421428442, |
|
"learning_rate": 1.7835643159399156e-05, |
|
"loss": 0.6418, |
|
"step": 785 |
|
}, |
|
{ |
|
"epoch": 0.2939808354265513, |
|
"grad_norm": 0.014045110903680325, |
|
"learning_rate": 1.7795116132000587e-05, |
|
"loss": 0.6403, |
|
"step": 790 |
|
}, |
|
{ |
|
"epoch": 0.29584147362545354, |
|
"grad_norm": 0.015219368040561676, |
|
"learning_rate": 1.7754260140715918e-05, |
|
"loss": 0.6277, |
|
"step": 795 |
|
}, |
|
{ |
|
"epoch": 0.29770211182435574, |
|
"grad_norm": 0.01307649165391922, |
|
"learning_rate": 1.771307690972031e-05, |
|
"loss": 0.6271, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 0.29770211182435574, |
|
"eval_loss": 0.6502260565757751, |
|
"eval_runtime": 26.7042, |
|
"eval_samples_per_second": 4.756, |
|
"eval_steps_per_second": 4.756, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 0.299562750023258, |
|
"grad_norm": 0.013835963793098927, |
|
"learning_rate": 1.7671568176998865e-05, |
|
"loss": 0.6286, |
|
"step": 805 |
|
}, |
|
{ |
|
"epoch": 0.3014233882221602, |
|
"grad_norm": 0.013574733398854733, |
|
"learning_rate": 1.762973569427328e-05, |
|
"loss": 0.6462, |
|
"step": 810 |
|
}, |
|
{ |
|
"epoch": 0.30328402642106245, |
|
"grad_norm": 0.01131366565823555, |
|
"learning_rate": 1.758758122692791e-05, |
|
"loss": 0.6167, |
|
"step": 815 |
|
}, |
|
{ |
|
"epoch": 0.30514466461996465, |
|
"grad_norm": 0.013482702895998955, |
|
"learning_rate": 1.7545106553935277e-05, |
|
"loss": 0.6413, |
|
"step": 820 |
|
}, |
|
{ |
|
"epoch": 0.3070053028188669, |
|
"grad_norm": 0.012861824594438076, |
|
"learning_rate": 1.7502313467780988e-05, |
|
"loss": 0.6027, |
|
"step": 825 |
|
}, |
|
{ |
|
"epoch": 0.3088659410177691, |
|
"grad_norm": 0.012556380592286587, |
|
"learning_rate": 1.7459203774388097e-05, |
|
"loss": 0.6603, |
|
"step": 830 |
|
}, |
|
{ |
|
"epoch": 0.3107265792166713, |
|
"grad_norm": 0.012278062291443348, |
|
"learning_rate": 1.7415779293040887e-05, |
|
"loss": 0.5803, |
|
"step": 835 |
|
}, |
|
{ |
|
"epoch": 0.31258721741557355, |
|
"grad_norm": 0.012213567271828651, |
|
"learning_rate": 1.7372041856308098e-05, |
|
"loss": 0.6624, |
|
"step": 840 |
|
}, |
|
{ |
|
"epoch": 0.31444785561447575, |
|
"grad_norm": 0.0131307952105999, |
|
"learning_rate": 1.7327993309965583e-05, |
|
"loss": 0.6447, |
|
"step": 845 |
|
}, |
|
{ |
|
"epoch": 0.316308493813378, |
|
"grad_norm": 0.0121999466791749, |
|
"learning_rate": 1.7283635512918423e-05, |
|
"loss": 0.6451, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 0.3181691320122802, |
|
"grad_norm": 0.012872702442109585, |
|
"learning_rate": 1.7238970337122484e-05, |
|
"loss": 0.5724, |
|
"step": 855 |
|
}, |
|
{ |
|
"epoch": 0.32002977021118245, |
|
"grad_norm": 0.013137550093233585, |
|
"learning_rate": 1.7193999667505387e-05, |
|
"loss": 0.6459, |
|
"step": 860 |
|
}, |
|
{ |
|
"epoch": 0.32189040841008465, |
|
"grad_norm": 0.013948196545243263, |
|
"learning_rate": 1.7148725401887002e-05, |
|
"loss": 0.651, |
|
"step": 865 |
|
}, |
|
{ |
|
"epoch": 0.3237510466089869, |
|
"grad_norm": 0.012517811730504036, |
|
"learning_rate": 1.710314945089933e-05, |
|
"loss": 0.6114, |
|
"step": 870 |
|
}, |
|
{ |
|
"epoch": 0.3256116848078891, |
|
"grad_norm": 0.014199101366102695, |
|
"learning_rate": 1.7057273737905887e-05, |
|
"loss": 0.6405, |
|
"step": 875 |
|
}, |
|
{ |
|
"epoch": 0.32747232300679135, |
|
"grad_norm": 0.012591714970767498, |
|
"learning_rate": 1.7011100198920528e-05, |
|
"loss": 0.6767, |
|
"step": 880 |
|
}, |
|
{ |
|
"epoch": 0.32933296120569355, |
|
"grad_norm": 0.012114683166146278, |
|
"learning_rate": 1.6964630782525743e-05, |
|
"loss": 0.6037, |
|
"step": 885 |
|
}, |
|
{ |
|
"epoch": 0.33119359940459575, |
|
"grad_norm": 0.013139299117028713, |
|
"learning_rate": 1.6917867449790432e-05, |
|
"loss": 0.643, |
|
"step": 890 |
|
}, |
|
{ |
|
"epoch": 0.333054237603498, |
|
"grad_norm": 0.012391779571771622, |
|
"learning_rate": 1.6870812174187136e-05, |
|
"loss": 0.647, |
|
"step": 895 |
|
}, |
|
{ |
|
"epoch": 0.3349148758024002, |
|
"grad_norm": 0.013360656797885895, |
|
"learning_rate": 1.6823466941508762e-05, |
|
"loss": 0.725, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 0.3349148758024002, |
|
"eval_loss": 0.6488396525382996, |
|
"eval_runtime": 27.3774, |
|
"eval_samples_per_second": 4.639, |
|
"eval_steps_per_second": 4.639, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 0.33677551400130246, |
|
"grad_norm": 0.013424807228147984, |
|
"learning_rate": 1.677583374978478e-05, |
|
"loss": 0.6342, |
|
"step": 905 |
|
}, |
|
{ |
|
"epoch": 0.33863615220020465, |
|
"grad_norm": 0.01573541946709156, |
|
"learning_rate": 1.6727914609196895e-05, |
|
"loss": 0.6562, |
|
"step": 910 |
|
}, |
|
{ |
|
"epoch": 0.3404967903991069, |
|
"grad_norm": 0.011363287456333637, |
|
"learning_rate": 1.6679711541994227e-05, |
|
"loss": 0.6492, |
|
"step": 915 |
|
}, |
|
{ |
|
"epoch": 0.3423574285980091, |
|
"grad_norm": 0.012154373340308666, |
|
"learning_rate": 1.6631226582407954e-05, |
|
"loss": 0.602, |
|
"step": 920 |
|
}, |
|
{ |
|
"epoch": 0.34421806679691136, |
|
"grad_norm": 0.01744014024734497, |
|
"learning_rate": 1.658246177656548e-05, |
|
"loss": 0.6318, |
|
"step": 925 |
|
}, |
|
{ |
|
"epoch": 0.34607870499581356, |
|
"grad_norm": 0.012465902604162693, |
|
"learning_rate": 1.6533419182404078e-05, |
|
"loss": 0.6522, |
|
"step": 930 |
|
}, |
|
{ |
|
"epoch": 0.3479393431947158, |
|
"grad_norm": 0.015055039897561073, |
|
"learning_rate": 1.6484100869584044e-05, |
|
"loss": 0.6376, |
|
"step": 935 |
|
}, |
|
{ |
|
"epoch": 0.349799981393618, |
|
"grad_norm": 0.012351201847195625, |
|
"learning_rate": 1.6434508919401357e-05, |
|
"loss": 0.6206, |
|
"step": 940 |
|
}, |
|
{ |
|
"epoch": 0.3516606195925202, |
|
"grad_norm": 0.012793191708624363, |
|
"learning_rate": 1.6384645424699835e-05, |
|
"loss": 0.6182, |
|
"step": 945 |
|
}, |
|
{ |
|
"epoch": 0.35352125779142246, |
|
"grad_norm": 0.012946651317179203, |
|
"learning_rate": 1.6334512489782833e-05, |
|
"loss": 0.5839, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 0.35538189599032466, |
|
"grad_norm": 0.012998082675039768, |
|
"learning_rate": 1.628411223032442e-05, |
|
"loss": 0.6517, |
|
"step": 955 |
|
}, |
|
{ |
|
"epoch": 0.3572425341892269, |
|
"grad_norm": 0.012614963576197624, |
|
"learning_rate": 1.6233446773280113e-05, |
|
"loss": 0.6235, |
|
"step": 960 |
|
}, |
|
{ |
|
"epoch": 0.3591031723881291, |
|
"grad_norm": 0.012318151071667671, |
|
"learning_rate": 1.6182518256797095e-05, |
|
"loss": 0.664, |
|
"step": 965 |
|
}, |
|
{ |
|
"epoch": 0.36096381058703136, |
|
"grad_norm": 0.012551162391901016, |
|
"learning_rate": 1.6131328830123997e-05, |
|
"loss": 0.6317, |
|
"step": 970 |
|
}, |
|
{ |
|
"epoch": 0.36282444878593356, |
|
"grad_norm": 0.013372802175581455, |
|
"learning_rate": 1.60798806535202e-05, |
|
"loss": 0.6418, |
|
"step": 975 |
|
}, |
|
{ |
|
"epoch": 0.3646850869848358, |
|
"grad_norm": 0.011675640940666199, |
|
"learning_rate": 1.6028175898164665e-05, |
|
"loss": 0.6118, |
|
"step": 980 |
|
}, |
|
{ |
|
"epoch": 0.366545725183738, |
|
"grad_norm": 0.013295911252498627, |
|
"learning_rate": 1.5976216746064294e-05, |
|
"loss": 0.6217, |
|
"step": 985 |
|
}, |
|
{ |
|
"epoch": 0.36840636338264027, |
|
"grad_norm": 0.012895721010863781, |
|
"learning_rate": 1.5924005389961866e-05, |
|
"loss": 0.6436, |
|
"step": 990 |
|
}, |
|
{ |
|
"epoch": 0.37026700158154247, |
|
"grad_norm": 0.014572090469300747, |
|
"learning_rate": 1.5871544033243488e-05, |
|
"loss": 0.6342, |
|
"step": 995 |
|
}, |
|
{ |
|
"epoch": 0.3721276397804447, |
|
"grad_norm": 0.012393898330628872, |
|
"learning_rate": 1.581883488984562e-05, |
|
"loss": 0.6218, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.3721276397804447, |
|
"eval_loss": 0.6476932168006897, |
|
"eval_runtime": 27.1488, |
|
"eval_samples_per_second": 4.678, |
|
"eval_steps_per_second": 4.678, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.3739882779793469, |
|
"grad_norm": 0.014692210592329502, |
|
"learning_rate": 1.5765880184161625e-05, |
|
"loss": 0.6216, |
|
"step": 1005 |
|
}, |
|
{ |
|
"epoch": 0.3758489161782491, |
|
"grad_norm": 0.012152746319770813, |
|
"learning_rate": 1.5712682150947926e-05, |
|
"loss": 0.6243, |
|
"step": 1010 |
|
}, |
|
{ |
|
"epoch": 0.37770955437715137, |
|
"grad_norm": 0.012929155491292477, |
|
"learning_rate": 1.5659243035229657e-05, |
|
"loss": 0.6493, |
|
"step": 1015 |
|
}, |
|
{ |
|
"epoch": 0.37957019257605357, |
|
"grad_norm": 0.0136475944891572, |
|
"learning_rate": 1.5605565092205973e-05, |
|
"loss": 0.6506, |
|
"step": 1020 |
|
}, |
|
{ |
|
"epoch": 0.3814308307749558, |
|
"grad_norm": 0.014008302241563797, |
|
"learning_rate": 1.5551650587154815e-05, |
|
"loss": 0.6429, |
|
"step": 1025 |
|
}, |
|
{ |
|
"epoch": 0.383291468973858, |
|
"grad_norm": 0.014000017195940018, |
|
"learning_rate": 1.5497501795337366e-05, |
|
"loss": 0.6277, |
|
"step": 1030 |
|
}, |
|
{ |
|
"epoch": 0.38515210717276027, |
|
"grad_norm": 0.012146887369453907, |
|
"learning_rate": 1.5443121001901994e-05, |
|
"loss": 0.635, |
|
"step": 1035 |
|
}, |
|
{ |
|
"epoch": 0.38701274537166247, |
|
"grad_norm": 0.013878699392080307, |
|
"learning_rate": 1.5388510501787855e-05, |
|
"loss": 0.6416, |
|
"step": 1040 |
|
}, |
|
{ |
|
"epoch": 0.3888733835705647, |
|
"grad_norm": 0.011823480948805809, |
|
"learning_rate": 1.5333672599628005e-05, |
|
"loss": 0.637, |
|
"step": 1045 |
|
}, |
|
{ |
|
"epoch": 0.3907340217694669, |
|
"grad_norm": 0.012524113990366459, |
|
"learning_rate": 1.527860960965216e-05, |
|
"loss": 0.6763, |
|
"step": 1050 |
|
}, |
|
{ |
|
"epoch": 0.3925946599683692, |
|
"grad_norm": 0.013192784041166306, |
|
"learning_rate": 1.5223323855589027e-05, |
|
"loss": 0.6501, |
|
"step": 1055 |
|
}, |
|
{ |
|
"epoch": 0.3944552981672714, |
|
"grad_norm": 0.012439992278814316, |
|
"learning_rate": 1.5167817670568253e-05, |
|
"loss": 0.5886, |
|
"step": 1060 |
|
}, |
|
{ |
|
"epoch": 0.39631593636617357, |
|
"grad_norm": 0.013605669140815735, |
|
"learning_rate": 1.5112093397021945e-05, |
|
"loss": 0.5925, |
|
"step": 1065 |
|
}, |
|
{ |
|
"epoch": 0.3981765745650758, |
|
"grad_norm": 0.01384530495852232, |
|
"learning_rate": 1.5056153386585828e-05, |
|
"loss": 0.6607, |
|
"step": 1070 |
|
}, |
|
{ |
|
"epoch": 0.400037212763978, |
|
"grad_norm": 0.014060786925256252, |
|
"learning_rate": 1.5000000000000002e-05, |
|
"loss": 0.6458, |
|
"step": 1075 |
|
}, |
|
{ |
|
"epoch": 0.4018978509628803, |
|
"grad_norm": 0.012853951193392277, |
|
"learning_rate": 1.494363560700931e-05, |
|
"loss": 0.6028, |
|
"step": 1080 |
|
}, |
|
{ |
|
"epoch": 0.4037584891617825, |
|
"grad_norm": 0.012846381403505802, |
|
"learning_rate": 1.4887062586263334e-05, |
|
"loss": 0.6543, |
|
"step": 1085 |
|
}, |
|
{ |
|
"epoch": 0.40561912736068473, |
|
"grad_norm": 0.013001542538404465, |
|
"learning_rate": 1.4830283325216026e-05, |
|
"loss": 0.5654, |
|
"step": 1090 |
|
}, |
|
{ |
|
"epoch": 0.4074797655595869, |
|
"grad_norm": 0.012333175167441368, |
|
"learning_rate": 1.477330022002493e-05, |
|
"loss": 0.6465, |
|
"step": 1095 |
|
}, |
|
{ |
|
"epoch": 0.4093404037584892, |
|
"grad_norm": 0.01342159602791071, |
|
"learning_rate": 1.4716115675450078e-05, |
|
"loss": 0.6168, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 0.4093404037584892, |
|
"eval_loss": 0.6468775272369385, |
|
"eval_runtime": 26.8263, |
|
"eval_samples_per_second": 4.734, |
|
"eval_steps_per_second": 4.734, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 0.4112010419573914, |
|
"grad_norm": 0.0139292748644948, |
|
"learning_rate": 1.4658732104752507e-05, |
|
"loss": 0.634, |
|
"step": 1105 |
|
}, |
|
{ |
|
"epoch": 0.41306168015629363, |
|
"grad_norm": 0.013169731944799423, |
|
"learning_rate": 1.4601151929592403e-05, |
|
"loss": 0.6227, |
|
"step": 1110 |
|
}, |
|
{ |
|
"epoch": 0.41492231835519583, |
|
"grad_norm": 0.013705245219171047, |
|
"learning_rate": 1.4543377579926915e-05, |
|
"loss": 0.6441, |
|
"step": 1115 |
|
}, |
|
{ |
|
"epoch": 0.4167829565540981, |
|
"grad_norm": 0.013035726733505726, |
|
"learning_rate": 1.4485411493907617e-05, |
|
"loss": 0.6498, |
|
"step": 1120 |
|
}, |
|
{ |
|
"epoch": 0.4186435947530003, |
|
"grad_norm": 0.01190096139907837, |
|
"learning_rate": 1.442725611777758e-05, |
|
"loss": 0.6285, |
|
"step": 1125 |
|
}, |
|
{ |
|
"epoch": 0.4205042329519025, |
|
"grad_norm": 0.013753347098827362, |
|
"learning_rate": 1.4368913905768178e-05, |
|
"loss": 0.6541, |
|
"step": 1130 |
|
}, |
|
{ |
|
"epoch": 0.42236487115080473, |
|
"grad_norm": 0.012330746278166771, |
|
"learning_rate": 1.4310387319995492e-05, |
|
"loss": 0.6721, |
|
"step": 1135 |
|
}, |
|
{ |
|
"epoch": 0.42422550934970693, |
|
"grad_norm": 0.01278294064104557, |
|
"learning_rate": 1.4251678830356408e-05, |
|
"loss": 0.6589, |
|
"step": 1140 |
|
}, |
|
{ |
|
"epoch": 0.4260861475486092, |
|
"grad_norm": 0.012772184796631336, |
|
"learning_rate": 1.41927909144244e-05, |
|
"loss": 0.6411, |
|
"step": 1145 |
|
}, |
|
{ |
|
"epoch": 0.4279467857475114, |
|
"grad_norm": 0.012047790922224522, |
|
"learning_rate": 1.413372605734495e-05, |
|
"loss": 0.5759, |
|
"step": 1150 |
|
}, |
|
{ |
|
"epoch": 0.42980742394641364, |
|
"grad_norm": 0.014543715864419937, |
|
"learning_rate": 1.4074486751730687e-05, |
|
"loss": 0.6578, |
|
"step": 1155 |
|
}, |
|
{ |
|
"epoch": 0.43166806214531583, |
|
"grad_norm": 0.013436605222523212, |
|
"learning_rate": 1.4015075497556193e-05, |
|
"loss": 0.5876, |
|
"step": 1160 |
|
}, |
|
{ |
|
"epoch": 0.4335287003442181, |
|
"grad_norm": 0.011584432795643806, |
|
"learning_rate": 1.3955494802052498e-05, |
|
"loss": 0.656, |
|
"step": 1165 |
|
}, |
|
{ |
|
"epoch": 0.4353893385431203, |
|
"grad_norm": 0.012196795083582401, |
|
"learning_rate": 1.3895747179601275e-05, |
|
"loss": 0.6562, |
|
"step": 1170 |
|
}, |
|
{ |
|
"epoch": 0.43724997674202254, |
|
"grad_norm": 0.011762428097426891, |
|
"learning_rate": 1.3835835151628728e-05, |
|
"loss": 0.5918, |
|
"step": 1175 |
|
}, |
|
{ |
|
"epoch": 0.43911061494092474, |
|
"grad_norm": 0.014100808650255203, |
|
"learning_rate": 1.3775761246499177e-05, |
|
"loss": 0.6216, |
|
"step": 1180 |
|
}, |
|
{ |
|
"epoch": 0.44097125313982694, |
|
"grad_norm": 0.011724433861672878, |
|
"learning_rate": 1.3715527999408376e-05, |
|
"loss": 0.6434, |
|
"step": 1185 |
|
}, |
|
{ |
|
"epoch": 0.4428318913387292, |
|
"grad_norm": 0.013045977801084518, |
|
"learning_rate": 1.365513795227651e-05, |
|
"loss": 0.5915, |
|
"step": 1190 |
|
}, |
|
{ |
|
"epoch": 0.4446925295376314, |
|
"grad_norm": 0.013339078053832054, |
|
"learning_rate": 1.359459365364092e-05, |
|
"loss": 0.6148, |
|
"step": 1195 |
|
}, |
|
{ |
|
"epoch": 0.44655316773653364, |
|
"grad_norm": 0.011851554736495018, |
|
"learning_rate": 1.3533897658548571e-05, |
|
"loss": 0.6294, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 0.44655316773653364, |
|
"eval_loss": 0.6459712982177734, |
|
"eval_runtime": 27.1858, |
|
"eval_samples_per_second": 4.672, |
|
"eval_steps_per_second": 4.672, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 0.44841380593543584, |
|
"grad_norm": 0.014516811817884445, |
|
"learning_rate": 1.3473052528448203e-05, |
|
"loss": 0.6052, |
|
"step": 1205 |
|
}, |
|
{ |
|
"epoch": 0.4502744441343381, |
|
"grad_norm": 0.013740907423198223, |
|
"learning_rate": 1.341206083108225e-05, |
|
"loss": 0.6035, |
|
"step": 1210 |
|
}, |
|
{ |
|
"epoch": 0.4521350823332403, |
|
"grad_norm": 0.013699905015528202, |
|
"learning_rate": 1.3350925140378465e-05, |
|
"loss": 0.64, |
|
"step": 1215 |
|
}, |
|
{ |
|
"epoch": 0.45399572053214254, |
|
"grad_norm": 0.013219136744737625, |
|
"learning_rate": 1.328964803634131e-05, |
|
"loss": 0.6102, |
|
"step": 1220 |
|
}, |
|
{ |
|
"epoch": 0.45585635873104474, |
|
"grad_norm": 0.012603058479726315, |
|
"learning_rate": 1.3228232104943073e-05, |
|
"loss": 0.5452, |
|
"step": 1225 |
|
}, |
|
{ |
|
"epoch": 0.457716996929947, |
|
"grad_norm": 0.012072132900357246, |
|
"learning_rate": 1.3166679938014728e-05, |
|
"loss": 0.5864, |
|
"step": 1230 |
|
}, |
|
{ |
|
"epoch": 0.4595776351288492, |
|
"grad_norm": 0.01093310210853815, |
|
"learning_rate": 1.3104994133136563e-05, |
|
"loss": 0.6122, |
|
"step": 1235 |
|
}, |
|
{ |
|
"epoch": 0.4614382733277514, |
|
"grad_norm": 0.01245942059904337, |
|
"learning_rate": 1.3043177293528571e-05, |
|
"loss": 0.5889, |
|
"step": 1240 |
|
}, |
|
{ |
|
"epoch": 0.46329891152665365, |
|
"grad_norm": 0.013127041980624199, |
|
"learning_rate": 1.2981232027940562e-05, |
|
"loss": 0.6225, |
|
"step": 1245 |
|
}, |
|
{ |
|
"epoch": 0.46515954972555584, |
|
"grad_norm": 0.018618909642100334, |
|
"learning_rate": 1.2919160950542095e-05, |
|
"loss": 0.6189, |
|
"step": 1250 |
|
}, |
|
{ |
|
"epoch": 0.4670201879244581, |
|
"grad_norm": 0.013042682781815529, |
|
"learning_rate": 1.2856966680812148e-05, |
|
"loss": 0.674, |
|
"step": 1255 |
|
}, |
|
{ |
|
"epoch": 0.4688808261233603, |
|
"grad_norm": 0.013208975084125996, |
|
"learning_rate": 1.2794651843428575e-05, |
|
"loss": 0.6084, |
|
"step": 1260 |
|
}, |
|
{ |
|
"epoch": 0.47074146432226255, |
|
"grad_norm": 0.012473770417273045, |
|
"learning_rate": 1.2732219068157335e-05, |
|
"loss": 0.5748, |
|
"step": 1265 |
|
}, |
|
{ |
|
"epoch": 0.47260210252116475, |
|
"grad_norm": 0.013629582710564137, |
|
"learning_rate": 1.2669670989741519e-05, |
|
"loss": 0.6358, |
|
"step": 1270 |
|
}, |
|
{ |
|
"epoch": 0.474462740720067, |
|
"grad_norm": 0.014257396571338177, |
|
"learning_rate": 1.2607010247790158e-05, |
|
"loss": 0.6794, |
|
"step": 1275 |
|
}, |
|
{ |
|
"epoch": 0.4763233789189692, |
|
"grad_norm": 0.0164741612970829, |
|
"learning_rate": 1.2544239486666831e-05, |
|
"loss": 0.6647, |
|
"step": 1280 |
|
}, |
|
{ |
|
"epoch": 0.47818401711787145, |
|
"grad_norm": 0.012896180152893066, |
|
"learning_rate": 1.2481361355378066e-05, |
|
"loss": 0.6413, |
|
"step": 1285 |
|
}, |
|
{ |
|
"epoch": 0.48004465531677365, |
|
"grad_norm": 0.012296337634325027, |
|
"learning_rate": 1.2418378507461544e-05, |
|
"loss": 0.62, |
|
"step": 1290 |
|
}, |
|
{ |
|
"epoch": 0.4819052935156759, |
|
"grad_norm": 0.01213445421308279, |
|
"learning_rate": 1.2355293600874132e-05, |
|
"loss": 0.6611, |
|
"step": 1295 |
|
}, |
|
{ |
|
"epoch": 0.4837659317145781, |
|
"grad_norm": 0.01198558695614338, |
|
"learning_rate": 1.229210929787969e-05, |
|
"loss": 0.6438, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 0.4837659317145781, |
|
"eval_loss": 0.6453238725662231, |
|
"eval_runtime": 26.6037, |
|
"eval_samples_per_second": 4.774, |
|
"eval_steps_per_second": 4.774, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 0.4856265699134803, |
|
"grad_norm": 0.012105841189622879, |
|
"learning_rate": 1.2228828264936755e-05, |
|
"loss": 0.675, |
|
"step": 1305 |
|
}, |
|
{ |
|
"epoch": 0.48748720811238255, |
|
"grad_norm": 0.012657279148697853, |
|
"learning_rate": 1.2165453172585964e-05, |
|
"loss": 0.6066, |
|
"step": 1310 |
|
}, |
|
{ |
|
"epoch": 0.48934784631128475, |
|
"grad_norm": 0.01320530753582716, |
|
"learning_rate": 1.2101986695337407e-05, |
|
"loss": 0.6578, |
|
"step": 1315 |
|
}, |
|
{ |
|
"epoch": 0.491208484510187, |
|
"grad_norm": 0.012736879289150238, |
|
"learning_rate": 1.2038431511557715e-05, |
|
"loss": 0.6596, |
|
"step": 1320 |
|
}, |
|
{ |
|
"epoch": 0.4930691227090892, |
|
"grad_norm": 0.013182558119297028, |
|
"learning_rate": 1.197479030335706e-05, |
|
"loss": 0.595, |
|
"step": 1325 |
|
}, |
|
{ |
|
"epoch": 0.49492976090799146, |
|
"grad_norm": 0.013970241881906986, |
|
"learning_rate": 1.1911065756475953e-05, |
|
"loss": 0.6525, |
|
"step": 1330 |
|
}, |
|
{ |
|
"epoch": 0.49679039910689365, |
|
"grad_norm": 0.012158108875155449, |
|
"learning_rate": 1.1847260560171895e-05, |
|
"loss": 0.576, |
|
"step": 1335 |
|
}, |
|
{ |
|
"epoch": 0.4986510373057959, |
|
"grad_norm": 0.012398924678564072, |
|
"learning_rate": 1.1783377407105907e-05, |
|
"loss": 0.6039, |
|
"step": 1340 |
|
}, |
|
{ |
|
"epoch": 0.5005116755046981, |
|
"grad_norm": 0.013791786506772041, |
|
"learning_rate": 1.1719418993228883e-05, |
|
"loss": 0.6585, |
|
"step": 1345 |
|
}, |
|
{ |
|
"epoch": 0.5023723137036004, |
|
"grad_norm": 0.011280239559710026, |
|
"learning_rate": 1.1655388017667812e-05, |
|
"loss": 0.5919, |
|
"step": 1350 |
|
}, |
|
{ |
|
"epoch": 0.5042329519025026, |
|
"grad_norm": 0.015436794608831406, |
|
"learning_rate": 1.159128718261189e-05, |
|
"loss": 0.6632, |
|
"step": 1355 |
|
}, |
|
{ |
|
"epoch": 0.5060935901014048, |
|
"grad_norm": 0.015739573165774345, |
|
"learning_rate": 1.1527119193198466e-05, |
|
"loss": 0.6384, |
|
"step": 1360 |
|
}, |
|
{ |
|
"epoch": 0.507954228300307, |
|
"grad_norm": 0.01318281702697277, |
|
"learning_rate": 1.146288675739889e-05, |
|
"loss": 0.6312, |
|
"step": 1365 |
|
}, |
|
{ |
|
"epoch": 0.5098148664992093, |
|
"grad_norm": 0.01480270829051733, |
|
"learning_rate": 1.1398592585904234e-05, |
|
"loss": 0.6453, |
|
"step": 1370 |
|
}, |
|
{ |
|
"epoch": 0.5116755046981114, |
|
"grad_norm": 0.011967113241553307, |
|
"learning_rate": 1.133423939201089e-05, |
|
"loss": 0.6335, |
|
"step": 1375 |
|
}, |
|
{ |
|
"epoch": 0.5135361428970137, |
|
"grad_norm": 0.013129732571542263, |
|
"learning_rate": 1.1269829891506081e-05, |
|
"loss": 0.5852, |
|
"step": 1380 |
|
}, |
|
{ |
|
"epoch": 0.5153967810959159, |
|
"grad_norm": 0.0132956113666296, |
|
"learning_rate": 1.1205366802553231e-05, |
|
"loss": 0.6477, |
|
"step": 1385 |
|
}, |
|
{ |
|
"epoch": 0.5172574192948182, |
|
"grad_norm": 0.01314165536314249, |
|
"learning_rate": 1.1140852845577273e-05, |
|
"loss": 0.6441, |
|
"step": 1390 |
|
}, |
|
{ |
|
"epoch": 0.5191180574937203, |
|
"grad_norm": 0.01178740430623293, |
|
"learning_rate": 1.1076290743149827e-05, |
|
"loss": 0.6035, |
|
"step": 1395 |
|
}, |
|
{ |
|
"epoch": 0.5209786956926226, |
|
"grad_norm": 0.013940893113613129, |
|
"learning_rate": 1.1011683219874324e-05, |
|
"loss": 0.6492, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 0.5209786956926226, |
|
"eval_loss": 0.644782543182373, |
|
"eval_runtime": 26.5697, |
|
"eval_samples_per_second": 4.78, |
|
"eval_steps_per_second": 4.78, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 0.5228393338915248, |
|
"grad_norm": 0.013468287885189056, |
|
"learning_rate": 1.0947033002271001e-05, |
|
"loss": 0.6135, |
|
"step": 1405 |
|
}, |
|
{ |
|
"epoch": 0.5246999720904271, |
|
"grad_norm": 0.015101495198905468, |
|
"learning_rate": 1.0882342818661859e-05, |
|
"loss": 0.6449, |
|
"step": 1410 |
|
}, |
|
{ |
|
"epoch": 0.5265606102893292, |
|
"grad_norm": 0.013072527013719082, |
|
"learning_rate": 1.0817615399055513e-05, |
|
"loss": 0.6252, |
|
"step": 1415 |
|
}, |
|
{ |
|
"epoch": 0.5284212484882315, |
|
"grad_norm": 0.01259040180593729, |
|
"learning_rate": 1.075285347503198e-05, |
|
"loss": 0.6274, |
|
"step": 1420 |
|
}, |
|
{ |
|
"epoch": 0.5302818866871337, |
|
"grad_norm": 0.014084907248616219, |
|
"learning_rate": 1.0688059779627417e-05, |
|
"loss": 0.6298, |
|
"step": 1425 |
|
}, |
|
{ |
|
"epoch": 0.5321425248860359, |
|
"grad_norm": 0.015118095092475414, |
|
"learning_rate": 1.0623237047218771e-05, |
|
"loss": 0.6638, |
|
"step": 1430 |
|
}, |
|
{ |
|
"epoch": 0.5340031630849381, |
|
"grad_norm": 0.013168774545192719, |
|
"learning_rate": 1.0558388013408378e-05, |
|
"loss": 0.6134, |
|
"step": 1435 |
|
}, |
|
{ |
|
"epoch": 0.5358638012838404, |
|
"grad_norm": 0.011875185184180737, |
|
"learning_rate": 1.0493515414908542e-05, |
|
"loss": 0.6396, |
|
"step": 1440 |
|
}, |
|
{ |
|
"epoch": 0.5377244394827426, |
|
"grad_norm": 0.013676362112164497, |
|
"learning_rate": 1.0428621989426016e-05, |
|
"loss": 0.6286, |
|
"step": 1445 |
|
}, |
|
{ |
|
"epoch": 0.5395850776816448, |
|
"grad_norm": 0.012775209732353687, |
|
"learning_rate": 1.0363710475546483e-05, |
|
"loss": 0.6156, |
|
"step": 1450 |
|
}, |
|
{ |
|
"epoch": 0.541445715880547, |
|
"grad_norm": 0.014763396233320236, |
|
"learning_rate": 1.0298783612618977e-05, |
|
"loss": 0.6713, |
|
"step": 1455 |
|
}, |
|
{ |
|
"epoch": 0.5433063540794493, |
|
"grad_norm": 0.014012233354151249, |
|
"learning_rate": 1.0233844140640287e-05, |
|
"loss": 0.5887, |
|
"step": 1460 |
|
}, |
|
{ |
|
"epoch": 0.5451669922783515, |
|
"grad_norm": 0.013994456268846989, |
|
"learning_rate": 1.0168894800139311e-05, |
|
"loss": 0.6509, |
|
"step": 1465 |
|
}, |
|
{ |
|
"epoch": 0.5470276304772537, |
|
"grad_norm": 0.014792009256780148, |
|
"learning_rate": 1.0103938332061422e-05, |
|
"loss": 0.6434, |
|
"step": 1470 |
|
}, |
|
{ |
|
"epoch": 0.5488882686761559, |
|
"grad_norm": 0.01255644578486681, |
|
"learning_rate": 1.0038977477652779e-05, |
|
"loss": 0.6407, |
|
"step": 1475 |
|
}, |
|
{ |
|
"epoch": 0.5507489068750582, |
|
"grad_norm": 0.013431582599878311, |
|
"learning_rate": 9.974014978344646e-06, |
|
"loss": 0.6528, |
|
"step": 1480 |
|
}, |
|
{ |
|
"epoch": 0.5526095450739603, |
|
"grad_norm": 0.0123568931594491, |
|
"learning_rate": 9.909053575637717e-06, |
|
"loss": 0.602, |
|
"step": 1485 |
|
}, |
|
{ |
|
"epoch": 0.5544701832728626, |
|
"grad_norm": 0.014569776132702827, |
|
"learning_rate": 9.844096010986392e-06, |
|
"loss": 0.6268, |
|
"step": 1490 |
|
}, |
|
{ |
|
"epoch": 0.5563308214717648, |
|
"grad_norm": 0.012518184259533882, |
|
"learning_rate": 9.779145025683114e-06, |
|
"loss": 0.5936, |
|
"step": 1495 |
|
}, |
|
{ |
|
"epoch": 0.5581914596706671, |
|
"grad_norm": 0.011615007184445858, |
|
"learning_rate": 9.714203360742666e-06, |
|
"loss": 0.6275, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.5581914596706671, |
|
"eval_loss": 0.6442868709564209, |
|
"eval_runtime": 26.5597, |
|
"eval_samples_per_second": 4.782, |
|
"eval_steps_per_second": 4.782, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.5600520978695692, |
|
"grad_norm": 0.013426556251943111, |
|
"learning_rate": 9.649273756786486e-06, |
|
"loss": 0.6291, |
|
"step": 1505 |
|
}, |
|
{ |
|
"epoch": 0.5619127360684715, |
|
"grad_norm": 0.01306986529380083, |
|
"learning_rate": 9.584358953927043e-06, |
|
"loss": 0.6211, |
|
"step": 1510 |
|
}, |
|
{ |
|
"epoch": 0.5637733742673737, |
|
"grad_norm": 0.012594708241522312, |
|
"learning_rate": 9.519461691652169e-06, |
|
"loss": 0.5803, |
|
"step": 1515 |
|
}, |
|
{ |
|
"epoch": 0.565634012466276, |
|
"grad_norm": 0.014830299653112888, |
|
"learning_rate": 9.454584708709462e-06, |
|
"loss": 0.5976, |
|
"step": 1520 |
|
}, |
|
{ |
|
"epoch": 0.5674946506651781, |
|
"grad_norm": 0.013333864510059357, |
|
"learning_rate": 9.389730742990714e-06, |
|
"loss": 0.6154, |
|
"step": 1525 |
|
}, |
|
{ |
|
"epoch": 0.5693552888640804, |
|
"grad_norm": 0.013150627724826336, |
|
"learning_rate": 9.324902531416348e-06, |
|
"loss": 0.581, |
|
"step": 1530 |
|
}, |
|
{ |
|
"epoch": 0.5712159270629826, |
|
"grad_norm": 0.012874056585133076, |
|
"learning_rate": 9.260102809819939e-06, |
|
"loss": 0.6224, |
|
"step": 1535 |
|
}, |
|
{ |
|
"epoch": 0.5730765652618848, |
|
"grad_norm": 0.012678616680204868, |
|
"learning_rate": 9.195334312832742e-06, |
|
"loss": 0.6705, |
|
"step": 1540 |
|
}, |
|
{ |
|
"epoch": 0.574937203460787, |
|
"grad_norm": 0.011814710684120655, |
|
"learning_rate": 9.1305997737683e-06, |
|
"loss": 0.6103, |
|
"step": 1545 |
|
}, |
|
{ |
|
"epoch": 0.5767978416596893, |
|
"grad_norm": 0.013010908849537373, |
|
"learning_rate": 9.065901924507085e-06, |
|
"loss": 0.655, |
|
"step": 1550 |
|
}, |
|
{ |
|
"epoch": 0.5786584798585915, |
|
"grad_norm": 0.013622297905385494, |
|
"learning_rate": 9.001243495381207e-06, |
|
"loss": 0.5961, |
|
"step": 1555 |
|
}, |
|
{ |
|
"epoch": 0.5805191180574937, |
|
"grad_norm": 0.013876644894480705, |
|
"learning_rate": 8.936627215059206e-06, |
|
"loss": 0.6789, |
|
"step": 1560 |
|
}, |
|
{ |
|
"epoch": 0.5823797562563959, |
|
"grad_norm": 0.01281541958451271, |
|
"learning_rate": 8.872055810430881e-06, |
|
"loss": 0.6567, |
|
"step": 1565 |
|
}, |
|
{ |
|
"epoch": 0.5842403944552982, |
|
"grad_norm": 0.012182512320578098, |
|
"learning_rate": 8.80753200649222e-06, |
|
"loss": 0.6338, |
|
"step": 1570 |
|
}, |
|
{ |
|
"epoch": 0.5861010326542004, |
|
"grad_norm": 0.012718496844172478, |
|
"learning_rate": 8.743058526230409e-06, |
|
"loss": 0.6151, |
|
"step": 1575 |
|
}, |
|
{ |
|
"epoch": 0.5879616708531026, |
|
"grad_norm": 0.01301120687276125, |
|
"learning_rate": 8.678638090508897e-06, |
|
"loss": 0.6147, |
|
"step": 1580 |
|
}, |
|
{ |
|
"epoch": 0.5898223090520048, |
|
"grad_norm": 0.011715607717633247, |
|
"learning_rate": 8.614273417952593e-06, |
|
"loss": 0.5776, |
|
"step": 1585 |
|
}, |
|
{ |
|
"epoch": 0.5916829472509071, |
|
"grad_norm": 0.013846187852323055, |
|
"learning_rate": 8.549967224833131e-06, |
|
"loss": 0.6604, |
|
"step": 1590 |
|
}, |
|
{ |
|
"epoch": 0.5935435854498092, |
|
"grad_norm": 0.013312350027263165, |
|
"learning_rate": 8.485722224954237e-06, |
|
"loss": 0.6395, |
|
"step": 1595 |
|
}, |
|
{ |
|
"epoch": 0.5954042236487115, |
|
"grad_norm": 0.014313463121652603, |
|
"learning_rate": 8.421541129537194e-06, |
|
"loss": 0.6848, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 0.5954042236487115, |
|
"eval_loss": 0.6439012885093689, |
|
"eval_runtime": 26.5935, |
|
"eval_samples_per_second": 4.776, |
|
"eval_steps_per_second": 4.776, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 0.5972648618476137, |
|
"grad_norm": 0.012196023017168045, |
|
"learning_rate": 8.357426647106451e-06, |
|
"loss": 0.6079, |
|
"step": 1605 |
|
}, |
|
{ |
|
"epoch": 0.599125500046516, |
|
"grad_norm": 0.0135956397280097, |
|
"learning_rate": 8.293381483375293e-06, |
|
"loss": 0.6463, |
|
"step": 1610 |
|
}, |
|
{ |
|
"epoch": 0.6009861382454181, |
|
"grad_norm": 0.011982797645032406, |
|
"learning_rate": 8.229408341131665e-06, |
|
"loss": 0.6113, |
|
"step": 1615 |
|
}, |
|
{ |
|
"epoch": 0.6028467764443204, |
|
"grad_norm": 0.014127896167337894, |
|
"learning_rate": 8.165509920124125e-06, |
|
"loss": 0.6602, |
|
"step": 1620 |
|
}, |
|
{ |
|
"epoch": 0.6047074146432226, |
|
"grad_norm": 0.012588880024850368, |
|
"learning_rate": 8.10168891694789e-06, |
|
"loss": 0.6376, |
|
"step": 1625 |
|
}, |
|
{ |
|
"epoch": 0.6065680528421249, |
|
"grad_norm": 0.012559432536363602, |
|
"learning_rate": 8.037948024931039e-06, |
|
"loss": 0.6336, |
|
"step": 1630 |
|
}, |
|
{ |
|
"epoch": 0.608428691041027, |
|
"grad_norm": 0.012455436401069164, |
|
"learning_rate": 7.974289934020879e-06, |
|
"loss": 0.6403, |
|
"step": 1635 |
|
}, |
|
{ |
|
"epoch": 0.6102893292399293, |
|
"grad_norm": 0.01241991762071848, |
|
"learning_rate": 7.91071733067038e-06, |
|
"loss": 0.6518, |
|
"step": 1640 |
|
}, |
|
{ |
|
"epoch": 0.6121499674388315, |
|
"grad_norm": 0.012328005395829678, |
|
"learning_rate": 7.84723289772484e-06, |
|
"loss": 0.6162, |
|
"step": 1645 |
|
}, |
|
{ |
|
"epoch": 0.6140106056377338, |
|
"grad_norm": 0.012782512232661247, |
|
"learning_rate": 7.783839314308656e-06, |
|
"loss": 0.6624, |
|
"step": 1650 |
|
}, |
|
{ |
|
"epoch": 0.6158712438366359, |
|
"grad_norm": 0.012050081044435501, |
|
"learning_rate": 7.720539255712252e-06, |
|
"loss": 0.6565, |
|
"step": 1655 |
|
}, |
|
{ |
|
"epoch": 0.6177318820355382, |
|
"grad_norm": 0.013794245198369026, |
|
"learning_rate": 7.657335393279179e-06, |
|
"loss": 0.6475, |
|
"step": 1660 |
|
}, |
|
{ |
|
"epoch": 0.6195925202344404, |
|
"grad_norm": 0.012430761009454727, |
|
"learning_rate": 7.594230394293404e-06, |
|
"loss": 0.5821, |
|
"step": 1665 |
|
}, |
|
{ |
|
"epoch": 0.6214531584333426, |
|
"grad_norm": 0.013150406070053577, |
|
"learning_rate": 7.531226921866715e-06, |
|
"loss": 0.6023, |
|
"step": 1670 |
|
}, |
|
{ |
|
"epoch": 0.6233137966322448, |
|
"grad_norm": 0.013466687873005867, |
|
"learning_rate": 7.468327634826354e-06, |
|
"loss": 0.637, |
|
"step": 1675 |
|
}, |
|
{ |
|
"epoch": 0.6251744348311471, |
|
"grad_norm": 0.014000273309648037, |
|
"learning_rate": 7.405535187602809e-06, |
|
"loss": 0.6113, |
|
"step": 1680 |
|
}, |
|
{ |
|
"epoch": 0.6270350730300494, |
|
"grad_norm": 0.013736380264163017, |
|
"learning_rate": 7.3428522301177894e-06, |
|
"loss": 0.5914, |
|
"step": 1685 |
|
}, |
|
{ |
|
"epoch": 0.6288957112289515, |
|
"grad_norm": 0.013854081742465496, |
|
"learning_rate": 7.2802814076723896e-06, |
|
"loss": 0.6744, |
|
"step": 1690 |
|
}, |
|
{ |
|
"epoch": 0.6307563494278537, |
|
"grad_norm": 0.012866591103374958, |
|
"learning_rate": 7.217825360835475e-06, |
|
"loss": 0.6209, |
|
"step": 1695 |
|
}, |
|
{ |
|
"epoch": 0.632616987626756, |
|
"grad_norm": 0.012788123451173306, |
|
"learning_rate": 7.155486725332224e-06, |
|
"loss": 0.5764, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 0.632616987626756, |
|
"eval_loss": 0.6434745192527771, |
|
"eval_runtime": 26.5737, |
|
"eval_samples_per_second": 4.779, |
|
"eval_steps_per_second": 4.779, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 0.6344776258256583, |
|
"grad_norm": 0.013442011550068855, |
|
"learning_rate": 7.093268131932905e-06, |
|
"loss": 0.6522, |
|
"step": 1705 |
|
}, |
|
{ |
|
"epoch": 0.6363382640245604, |
|
"grad_norm": 0.013659958727657795, |
|
"learning_rate": 7.03117220634187e-06, |
|
"loss": 0.5949, |
|
"step": 1710 |
|
}, |
|
{ |
|
"epoch": 0.6381989022234626, |
|
"grad_norm": 0.013367819599807262, |
|
"learning_rate": 6.9692015690867135e-06, |
|
"loss": 0.5959, |
|
"step": 1715 |
|
}, |
|
{ |
|
"epoch": 0.6400595404223649, |
|
"grad_norm": 0.013543561100959778, |
|
"learning_rate": 6.9073588354077125e-06, |
|
"loss": 0.6539, |
|
"step": 1720 |
|
}, |
|
{ |
|
"epoch": 0.641920178621267, |
|
"grad_norm": 0.014200146310031414, |
|
"learning_rate": 6.845646615147445e-06, |
|
"loss": 0.6438, |
|
"step": 1725 |
|
}, |
|
{ |
|
"epoch": 0.6437808168201693, |
|
"grad_norm": 0.012410931289196014, |
|
"learning_rate": 6.784067512640666e-06, |
|
"loss": 0.6035, |
|
"step": 1730 |
|
}, |
|
{ |
|
"epoch": 0.6456414550190716, |
|
"grad_norm": 0.01299245934933424, |
|
"learning_rate": 6.7226241266043735e-06, |
|
"loss": 0.6507, |
|
"step": 1735 |
|
}, |
|
{ |
|
"epoch": 0.6475020932179738, |
|
"grad_norm": 0.013046164996922016, |
|
"learning_rate": 6.661319050028167e-06, |
|
"loss": 0.6277, |
|
"step": 1740 |
|
}, |
|
{ |
|
"epoch": 0.649362731416876, |
|
"grad_norm": 0.013011117465794086, |
|
"learning_rate": 6.600154870064812e-06, |
|
"loss": 0.6415, |
|
"step": 1745 |
|
}, |
|
{ |
|
"epoch": 0.6512233696157782, |
|
"grad_norm": 0.012195507064461708, |
|
"learning_rate": 6.53913416792105e-06, |
|
"loss": 0.5718, |
|
"step": 1750 |
|
}, |
|
{ |
|
"epoch": 0.6530840078146805, |
|
"grad_norm": 0.012789854779839516, |
|
"learning_rate": 6.478259518748675e-06, |
|
"loss": 0.5963, |
|
"step": 1755 |
|
}, |
|
{ |
|
"epoch": 0.6549446460135827, |
|
"grad_norm": 0.01217294204980135, |
|
"learning_rate": 6.41753349153587e-06, |
|
"loss": 0.6361, |
|
"step": 1760 |
|
}, |
|
{ |
|
"epoch": 0.6568052842124849, |
|
"grad_norm": 0.014139696955680847, |
|
"learning_rate": 6.356958648998762e-06, |
|
"loss": 0.6321, |
|
"step": 1765 |
|
}, |
|
{ |
|
"epoch": 0.6586659224113871, |
|
"grad_norm": 0.014064906165003777, |
|
"learning_rate": 6.296537547473302e-06, |
|
"loss": 0.6519, |
|
"step": 1770 |
|
}, |
|
{ |
|
"epoch": 0.6605265606102894, |
|
"grad_norm": 0.012787656858563423, |
|
"learning_rate": 6.236272736807378e-06, |
|
"loss": 0.6033, |
|
"step": 1775 |
|
}, |
|
{ |
|
"epoch": 0.6623871988091915, |
|
"grad_norm": 0.012475831434130669, |
|
"learning_rate": 6.176166760253196e-06, |
|
"loss": 0.5947, |
|
"step": 1780 |
|
}, |
|
{ |
|
"epoch": 0.6642478370080938, |
|
"grad_norm": 0.013439202681183815, |
|
"learning_rate": 6.116222154359952e-06, |
|
"loss": 0.636, |
|
"step": 1785 |
|
}, |
|
{ |
|
"epoch": 0.666108475206996, |
|
"grad_norm": 0.01295219361782074, |
|
"learning_rate": 6.056441448866817e-06, |
|
"loss": 0.6203, |
|
"step": 1790 |
|
}, |
|
{ |
|
"epoch": 0.6679691134058983, |
|
"grad_norm": 0.012275603599846363, |
|
"learning_rate": 5.996827166596129e-06, |
|
"loss": 0.6743, |
|
"step": 1795 |
|
}, |
|
{ |
|
"epoch": 0.6698297516048004, |
|
"grad_norm": 0.01329441275447607, |
|
"learning_rate": 5.937381823346964e-06, |
|
"loss": 0.5975, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 0.6698297516048004, |
|
"eval_loss": 0.6431623101234436, |
|
"eval_runtime": 26.5644, |
|
"eval_samples_per_second": 4.781, |
|
"eval_steps_per_second": 4.781, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 0.6716903898037027, |
|
"grad_norm": 0.013007073663175106, |
|
"learning_rate": 5.878107927788962e-06, |
|
"loss": 0.6165, |
|
"step": 1805 |
|
}, |
|
{ |
|
"epoch": 0.6735510280026049, |
|
"grad_norm": 0.013040522113442421, |
|
"learning_rate": 5.819007981356441e-06, |
|
"loss": 0.6107, |
|
"step": 1810 |
|
}, |
|
{ |
|
"epoch": 0.6754116662015072, |
|
"grad_norm": 0.014542641118168831, |
|
"learning_rate": 5.760084478142842e-06, |
|
"loss": 0.6284, |
|
"step": 1815 |
|
}, |
|
{ |
|
"epoch": 0.6772723044004093, |
|
"grad_norm": 0.01339266262948513, |
|
"learning_rate": 5.701339904795486e-06, |
|
"loss": 0.6228, |
|
"step": 1820 |
|
}, |
|
{ |
|
"epoch": 0.6791329425993116, |
|
"grad_norm": 0.013091943226754665, |
|
"learning_rate": 5.642776740410618e-06, |
|
"loss": 0.5995, |
|
"step": 1825 |
|
}, |
|
{ |
|
"epoch": 0.6809935807982138, |
|
"grad_norm": 0.013321259059011936, |
|
"learning_rate": 5.584397456428785e-06, |
|
"loss": 0.627, |
|
"step": 1830 |
|
}, |
|
{ |
|
"epoch": 0.682854218997116, |
|
"grad_norm": 0.012838364578783512, |
|
"learning_rate": 5.5262045165305615e-06, |
|
"loss": 0.658, |
|
"step": 1835 |
|
}, |
|
{ |
|
"epoch": 0.6847148571960182, |
|
"grad_norm": 0.01306592021137476, |
|
"learning_rate": 5.468200376532552e-06, |
|
"loss": 0.6756, |
|
"step": 1840 |
|
}, |
|
{ |
|
"epoch": 0.6865754953949205, |
|
"grad_norm": 0.014314945787191391, |
|
"learning_rate": 5.410387484283767e-06, |
|
"loss": 0.6598, |
|
"step": 1845 |
|
}, |
|
{ |
|
"epoch": 0.6884361335938227, |
|
"grad_norm": 0.013973386958241463, |
|
"learning_rate": 5.352768279562315e-06, |
|
"loss": 0.6535, |
|
"step": 1850 |
|
}, |
|
{ |
|
"epoch": 0.6902967717927249, |
|
"grad_norm": 0.012864621356129646, |
|
"learning_rate": 5.295345193972445e-06, |
|
"loss": 0.6422, |
|
"step": 1855 |
|
}, |
|
{ |
|
"epoch": 0.6921574099916271, |
|
"grad_norm": 0.014392906799912453, |
|
"learning_rate": 5.238120650841925e-06, |
|
"loss": 0.664, |
|
"step": 1860 |
|
}, |
|
{ |
|
"epoch": 0.6940180481905294, |
|
"grad_norm": 0.01327193807810545, |
|
"learning_rate": 5.18109706511978e-06, |
|
"loss": 0.6179, |
|
"step": 1865 |
|
}, |
|
{ |
|
"epoch": 0.6958786863894316, |
|
"grad_norm": 0.01355487760156393, |
|
"learning_rate": 5.124276843274372e-06, |
|
"loss": 0.605, |
|
"step": 1870 |
|
}, |
|
{ |
|
"epoch": 0.6977393245883338, |
|
"grad_norm": 0.012646518647670746, |
|
"learning_rate": 5.067662383191845e-06, |
|
"loss": 0.6608, |
|
"step": 1875 |
|
}, |
|
{ |
|
"epoch": 0.699599962787236, |
|
"grad_norm": 0.013363865204155445, |
|
"learning_rate": 5.011256074074945e-06, |
|
"loss": 0.6975, |
|
"step": 1880 |
|
}, |
|
{ |
|
"epoch": 0.7014606009861383, |
|
"grad_norm": 0.013032359071075916, |
|
"learning_rate": 4.955060296342163e-06, |
|
"loss": 0.6041, |
|
"step": 1885 |
|
}, |
|
{ |
|
"epoch": 0.7033212391850404, |
|
"grad_norm": 0.012049228884279728, |
|
"learning_rate": 4.899077421527304e-06, |
|
"loss": 0.6078, |
|
"step": 1890 |
|
}, |
|
{ |
|
"epoch": 0.7051818773839427, |
|
"grad_norm": 0.013416007161140442, |
|
"learning_rate": 4.843309812179405e-06, |
|
"loss": 0.6514, |
|
"step": 1895 |
|
}, |
|
{ |
|
"epoch": 0.7070425155828449, |
|
"grad_norm": 0.013701760210096836, |
|
"learning_rate": 4.787759821763017e-06, |
|
"loss": 0.6606, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 0.7070425155828449, |
|
"eval_loss": 0.6429811716079712, |
|
"eval_runtime": 26.563, |
|
"eval_samples_per_second": 4.781, |
|
"eval_steps_per_second": 4.781, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 0.7089031537817472, |
|
"grad_norm": 0.013226517476141453, |
|
"learning_rate": 4.732429794558887e-06, |
|
"loss": 0.6391, |
|
"step": 1905 |
|
}, |
|
{ |
|
"epoch": 0.7107637919806493, |
|
"grad_norm": 0.012964668683707714, |
|
"learning_rate": 4.677322065565039e-06, |
|
"loss": 0.6768, |
|
"step": 1910 |
|
}, |
|
{ |
|
"epoch": 0.7126244301795516, |
|
"grad_norm": 0.01224041823297739, |
|
"learning_rate": 4.622438960398234e-06, |
|
"loss": 0.645, |
|
"step": 1915 |
|
}, |
|
{ |
|
"epoch": 0.7144850683784538, |
|
"grad_norm": 0.013205330818891525, |
|
"learning_rate": 4.567782795195816e-06, |
|
"loss": 0.6051, |
|
"step": 1920 |
|
}, |
|
{ |
|
"epoch": 0.7163457065773561, |
|
"grad_norm": 0.01323428563773632, |
|
"learning_rate": 4.5133558765179576e-06, |
|
"loss": 0.6113, |
|
"step": 1925 |
|
}, |
|
{ |
|
"epoch": 0.7182063447762582, |
|
"grad_norm": 0.012786868028342724, |
|
"learning_rate": 4.459160501250358e-06, |
|
"loss": 0.6677, |
|
"step": 1930 |
|
}, |
|
{ |
|
"epoch": 0.7200669829751605, |
|
"grad_norm": 0.010466697625815868, |
|
"learning_rate": 4.405198956507272e-06, |
|
"loss": 0.586, |
|
"step": 1935 |
|
}, |
|
{ |
|
"epoch": 0.7219276211740627, |
|
"grad_norm": 0.012410039082169533, |
|
"learning_rate": 4.35147351953501e-06, |
|
"loss": 0.6111, |
|
"step": 1940 |
|
}, |
|
{ |
|
"epoch": 0.723788259372965, |
|
"grad_norm": 0.013155271299183369, |
|
"learning_rate": 4.297986457615836e-06, |
|
"loss": 0.6404, |
|
"step": 1945 |
|
}, |
|
{ |
|
"epoch": 0.7256488975718671, |
|
"grad_norm": 0.01472384575754404, |
|
"learning_rate": 4.244740027972275e-06, |
|
"loss": 0.6553, |
|
"step": 1950 |
|
}, |
|
{ |
|
"epoch": 0.7275095357707694, |
|
"grad_norm": 0.012972739525139332, |
|
"learning_rate": 4.191736477671864e-06, |
|
"loss": 0.6613, |
|
"step": 1955 |
|
}, |
|
{ |
|
"epoch": 0.7293701739696716, |
|
"grad_norm": 0.013699792325496674, |
|
"learning_rate": 4.138978043532332e-06, |
|
"loss": 0.6178, |
|
"step": 1960 |
|
}, |
|
{ |
|
"epoch": 0.7312308121685738, |
|
"grad_norm": 0.012072841636836529, |
|
"learning_rate": 4.086466952027171e-06, |
|
"loss": 0.5865, |
|
"step": 1965 |
|
}, |
|
{ |
|
"epoch": 0.733091450367476, |
|
"grad_norm": 0.013510013930499554, |
|
"learning_rate": 4.034205419191709e-06, |
|
"loss": 0.6387, |
|
"step": 1970 |
|
}, |
|
{ |
|
"epoch": 0.7349520885663783, |
|
"grad_norm": 0.01405192632228136, |
|
"learning_rate": 3.982195650529583e-06, |
|
"loss": 0.5677, |
|
"step": 1975 |
|
}, |
|
{ |
|
"epoch": 0.7368127267652805, |
|
"grad_norm": 0.012013067491352558, |
|
"learning_rate": 3.930439840919652e-06, |
|
"loss": 0.5868, |
|
"step": 1980 |
|
}, |
|
{ |
|
"epoch": 0.7386733649641827, |
|
"grad_norm": 0.01283415500074625, |
|
"learning_rate": 3.878940174523371e-06, |
|
"loss": 0.597, |
|
"step": 1985 |
|
}, |
|
{ |
|
"epoch": 0.7405340031630849, |
|
"grad_norm": 0.013413486070930958, |
|
"learning_rate": 3.827698824692643e-06, |
|
"loss": 0.6074, |
|
"step": 1990 |
|
}, |
|
{ |
|
"epoch": 0.7423946413619872, |
|
"grad_norm": 0.013137887232005596, |
|
"learning_rate": 3.776717953878064e-06, |
|
"loss": 0.6599, |
|
"step": 1995 |
|
}, |
|
{ |
|
"epoch": 0.7442552795608894, |
|
"grad_norm": 0.013435564935207367, |
|
"learning_rate": 3.725999713537689e-06, |
|
"loss": 0.6191, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.7442552795608894, |
|
"eval_loss": 0.6428595185279846, |
|
"eval_runtime": 26.6061, |
|
"eval_samples_per_second": 4.773, |
|
"eval_steps_per_second": 4.773, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.7461159177597916, |
|
"grad_norm": 0.013417122885584831, |
|
"learning_rate": 3.6755462440462288e-06, |
|
"loss": 0.6012, |
|
"step": 2005 |
|
}, |
|
{ |
|
"epoch": 0.7479765559586938, |
|
"grad_norm": 0.01378430612385273, |
|
"learning_rate": 3.625359674604725e-06, |
|
"loss": 0.607, |
|
"step": 2010 |
|
}, |
|
{ |
|
"epoch": 0.7498371941575961, |
|
"grad_norm": 0.012614194303750992, |
|
"learning_rate": 3.5754421231506953e-06, |
|
"loss": 0.6364, |
|
"step": 2015 |
|
}, |
|
{ |
|
"epoch": 0.7516978323564982, |
|
"grad_norm": 0.013113941997289658, |
|
"learning_rate": 3.5257956962687545e-06, |
|
"loss": 0.6148, |
|
"step": 2020 |
|
}, |
|
{ |
|
"epoch": 0.7535584705554005, |
|
"grad_norm": 0.012877865694463253, |
|
"learning_rate": 3.476422489101713e-06, |
|
"loss": 0.6369, |
|
"step": 2025 |
|
}, |
|
{ |
|
"epoch": 0.7554191087543027, |
|
"grad_norm": 0.013343026861548424, |
|
"learning_rate": 3.427324585262156e-06, |
|
"loss": 0.6257, |
|
"step": 2030 |
|
}, |
|
{ |
|
"epoch": 0.757279746953205, |
|
"grad_norm": 0.01313395518809557, |
|
"learning_rate": 3.3785040567445282e-06, |
|
"loss": 0.6301, |
|
"step": 2035 |
|
}, |
|
{ |
|
"epoch": 0.7591403851521071, |
|
"grad_norm": 0.013584131374955177, |
|
"learning_rate": 3.329962963837661e-06, |
|
"loss": 0.6244, |
|
"step": 2040 |
|
}, |
|
{ |
|
"epoch": 0.7610010233510094, |
|
"grad_norm": 0.011685586534440517, |
|
"learning_rate": 3.281703355037854e-06, |
|
"loss": 0.5818, |
|
"step": 2045 |
|
}, |
|
{ |
|
"epoch": 0.7628616615499116, |
|
"grad_norm": 0.013454140163958073, |
|
"learning_rate": 3.233727266962425e-06, |
|
"loss": 0.6151, |
|
"step": 2050 |
|
}, |
|
{ |
|
"epoch": 0.7647222997488139, |
|
"grad_norm": 0.012276149354875088, |
|
"learning_rate": 3.186036724263748e-06, |
|
"loss": 0.621, |
|
"step": 2055 |
|
}, |
|
{ |
|
"epoch": 0.766582937947716, |
|
"grad_norm": 0.013796020299196243, |
|
"learning_rate": 3.138633739543805e-06, |
|
"loss": 0.6466, |
|
"step": 2060 |
|
}, |
|
{ |
|
"epoch": 0.7684435761466183, |
|
"grad_norm": 0.013075289316475391, |
|
"learning_rate": 3.0915203132692805e-06, |
|
"loss": 0.6116, |
|
"step": 2065 |
|
}, |
|
{ |
|
"epoch": 0.7703042143455205, |
|
"grad_norm": 0.014096383936703205, |
|
"learning_rate": 3.0446984336871144e-06, |
|
"loss": 0.5877, |
|
"step": 2070 |
|
}, |
|
{ |
|
"epoch": 0.7721648525444227, |
|
"grad_norm": 0.013074836693704128, |
|
"learning_rate": 2.998170076740601e-06, |
|
"loss": 0.5829, |
|
"step": 2075 |
|
}, |
|
{ |
|
"epoch": 0.7740254907433249, |
|
"grad_norm": 0.01671191304922104, |
|
"learning_rate": 2.951937205986004e-06, |
|
"loss": 0.6439, |
|
"step": 2080 |
|
}, |
|
{ |
|
"epoch": 0.7758861289422272, |
|
"grad_norm": 0.013207321055233479, |
|
"learning_rate": 2.9060017725096943e-06, |
|
"loss": 0.621, |
|
"step": 2085 |
|
}, |
|
{ |
|
"epoch": 0.7777467671411294, |
|
"grad_norm": 0.014768741093575954, |
|
"learning_rate": 2.8603657148458053e-06, |
|
"loss": 0.6272, |
|
"step": 2090 |
|
}, |
|
{ |
|
"epoch": 0.7796074053400316, |
|
"grad_norm": 0.015293040312826633, |
|
"learning_rate": 2.8150309588944304e-06, |
|
"loss": 0.6388, |
|
"step": 2095 |
|
}, |
|
{ |
|
"epoch": 0.7814680435389338, |
|
"grad_norm": 0.01271377969533205, |
|
"learning_rate": 2.769999417840341e-06, |
|
"loss": 0.6249, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 0.7814680435389338, |
|
"eval_loss": 0.6426796913146973, |
|
"eval_runtime": 26.6067, |
|
"eval_samples_per_second": 4.773, |
|
"eval_steps_per_second": 4.773, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 0.7833286817378361, |
|
"grad_norm": 0.013834511861205101, |
|
"learning_rate": 2.7252729920722564e-06, |
|
"loss": 0.6254, |
|
"step": 2105 |
|
}, |
|
{ |
|
"epoch": 0.7851893199367384, |
|
"grad_norm": 0.020366957411170006, |
|
"learning_rate": 2.680853569102633e-06, |
|
"loss": 0.632, |
|
"step": 2110 |
|
}, |
|
{ |
|
"epoch": 0.7870499581356405, |
|
"grad_norm": 0.012656195089221, |
|
"learning_rate": 2.6367430234880286e-06, |
|
"loss": 0.6274, |
|
"step": 2115 |
|
}, |
|
{ |
|
"epoch": 0.7889105963345427, |
|
"grad_norm": 0.01308165118098259, |
|
"learning_rate": 2.5929432167499658e-06, |
|
"loss": 0.6457, |
|
"step": 2120 |
|
}, |
|
{ |
|
"epoch": 0.790771234533445, |
|
"grad_norm": 0.012838170863687992, |
|
"learning_rate": 2.5494559972963928e-06, |
|
"loss": 0.6436, |
|
"step": 2125 |
|
}, |
|
{ |
|
"epoch": 0.7926318727323471, |
|
"grad_norm": 0.012966095469892025, |
|
"learning_rate": 2.5062832003436833e-06, |
|
"loss": 0.6449, |
|
"step": 2130 |
|
}, |
|
{ |
|
"epoch": 0.7944925109312494, |
|
"grad_norm": 0.013781096786260605, |
|
"learning_rate": 2.463426647839173e-06, |
|
"loss": 0.584, |
|
"step": 2135 |
|
}, |
|
{ |
|
"epoch": 0.7963531491301516, |
|
"grad_norm": 0.012445746921002865, |
|
"learning_rate": 2.420888148384265e-06, |
|
"loss": 0.6397, |
|
"step": 2140 |
|
}, |
|
{ |
|
"epoch": 0.7982137873290539, |
|
"grad_norm": 0.012599524110555649, |
|
"learning_rate": 2.378669497158138e-06, |
|
"loss": 0.5974, |
|
"step": 2145 |
|
}, |
|
{ |
|
"epoch": 0.800074425527956, |
|
"grad_norm": 0.011499716900289059, |
|
"learning_rate": 2.3367724758419495e-06, |
|
"loss": 0.5552, |
|
"step": 2150 |
|
}, |
|
{ |
|
"epoch": 0.8019350637268583, |
|
"grad_norm": 0.012771397829055786, |
|
"learning_rate": 2.2951988525436695e-06, |
|
"loss": 0.666, |
|
"step": 2155 |
|
}, |
|
{ |
|
"epoch": 0.8037957019257606, |
|
"grad_norm": 0.013215843588113785, |
|
"learning_rate": 2.2539503817234553e-06, |
|
"loss": 0.5925, |
|
"step": 2160 |
|
}, |
|
{ |
|
"epoch": 0.8056563401246628, |
|
"grad_norm": 0.014011417515575886, |
|
"learning_rate": 2.2130288041196135e-06, |
|
"loss": 0.6216, |
|
"step": 2165 |
|
}, |
|
{ |
|
"epoch": 0.807516978323565, |
|
"grad_norm": 0.014169846661388874, |
|
"learning_rate": 2.1724358466751394e-06, |
|
"loss": 0.625, |
|
"step": 2170 |
|
}, |
|
{ |
|
"epoch": 0.8093776165224672, |
|
"grad_norm": 0.013908619992434978, |
|
"learning_rate": 2.132173222464834e-06, |
|
"loss": 0.6641, |
|
"step": 2175 |
|
}, |
|
{ |
|
"epoch": 0.8112382547213695, |
|
"grad_norm": 0.01255475077778101, |
|
"learning_rate": 2.092242630623016e-06, |
|
"loss": 0.6135, |
|
"step": 2180 |
|
}, |
|
{ |
|
"epoch": 0.8130988929202716, |
|
"grad_norm": 0.01320129819214344, |
|
"learning_rate": 2.0526457562718074e-06, |
|
"loss": 0.5893, |
|
"step": 2185 |
|
}, |
|
{ |
|
"epoch": 0.8149595311191739, |
|
"grad_norm": 0.012163982726633549, |
|
"learning_rate": 2.013384270450036e-06, |
|
"loss": 0.6552, |
|
"step": 2190 |
|
}, |
|
{ |
|
"epoch": 0.8168201693180761, |
|
"grad_norm": 0.012990654446184635, |
|
"learning_rate": 1.974459830042691e-06, |
|
"loss": 0.6309, |
|
"step": 2195 |
|
}, |
|
{ |
|
"epoch": 0.8186808075169784, |
|
"grad_norm": 0.015373739413917065, |
|
"learning_rate": 1.9358740777110154e-06, |
|
"loss": 0.6761, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 0.8186808075169784, |
|
"eval_loss": 0.6425639390945435, |
|
"eval_runtime": 26.5817, |
|
"eval_samples_per_second": 4.778, |
|
"eval_steps_per_second": 4.778, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 0.8205414457158805, |
|
"grad_norm": 0.012748058885335922, |
|
"learning_rate": 1.8976286418231916e-06, |
|
"loss": 0.6313, |
|
"step": 2205 |
|
}, |
|
{ |
|
"epoch": 0.8224020839147828, |
|
"grad_norm": 0.014108446426689625, |
|
"learning_rate": 1.8597251363856061e-06, |
|
"loss": 0.5997, |
|
"step": 2210 |
|
}, |
|
{ |
|
"epoch": 0.824262722113685, |
|
"grad_norm": 0.0133186811581254, |
|
"learning_rate": 1.8221651609747337e-06, |
|
"loss": 0.6732, |
|
"step": 2215 |
|
}, |
|
{ |
|
"epoch": 0.8261233603125873, |
|
"grad_norm": 0.012968887574970722, |
|
"learning_rate": 1.7849503006696566e-06, |
|
"loss": 0.6129, |
|
"step": 2220 |
|
}, |
|
{ |
|
"epoch": 0.8279839985114894, |
|
"grad_norm": 0.012473355047404766, |
|
"learning_rate": 1.7480821259851488e-06, |
|
"loss": 0.6131, |
|
"step": 2225 |
|
}, |
|
{ |
|
"epoch": 0.8298446367103917, |
|
"grad_norm": 0.012497167102992535, |
|
"learning_rate": 1.7115621928054105e-06, |
|
"loss": 0.6199, |
|
"step": 2230 |
|
}, |
|
{ |
|
"epoch": 0.8317052749092939, |
|
"grad_norm": 0.014741111546754837, |
|
"learning_rate": 1.6753920423184022e-06, |
|
"loss": 0.6159, |
|
"step": 2235 |
|
}, |
|
{ |
|
"epoch": 0.8335659131081962, |
|
"grad_norm": 0.013057067058980465, |
|
"learning_rate": 1.6395732009508058e-06, |
|
"loss": 0.5898, |
|
"step": 2240 |
|
}, |
|
{ |
|
"epoch": 0.8354265513070983, |
|
"grad_norm": 0.014299440197646618, |
|
"learning_rate": 1.60410718030361e-06, |
|
"loss": 0.6486, |
|
"step": 2245 |
|
}, |
|
{ |
|
"epoch": 0.8372871895060006, |
|
"grad_norm": 0.013178296387195587, |
|
"learning_rate": 1.568995477088323e-06, |
|
"loss": 0.6254, |
|
"step": 2250 |
|
}, |
|
{ |
|
"epoch": 0.8391478277049028, |
|
"grad_norm": 0.013072814792394638, |
|
"learning_rate": 1.5342395730637904e-06, |
|
"loss": 0.6385, |
|
"step": 2255 |
|
}, |
|
{ |
|
"epoch": 0.841008465903805, |
|
"grad_norm": 0.014187455177307129, |
|
"learning_rate": 1.4998409349736841e-06, |
|
"loss": 0.6458, |
|
"step": 2260 |
|
}, |
|
{ |
|
"epoch": 0.8428691041027072, |
|
"grad_norm": 0.011844666674733162, |
|
"learning_rate": 1.4658010144846001e-06, |
|
"loss": 0.6556, |
|
"step": 2265 |
|
}, |
|
{ |
|
"epoch": 0.8447297423016095, |
|
"grad_norm": 0.013700997456908226, |
|
"learning_rate": 1.432121248124786e-06, |
|
"loss": 0.6381, |
|
"step": 2270 |
|
}, |
|
{ |
|
"epoch": 0.8465903805005117, |
|
"grad_norm": 0.014204096049070358, |
|
"learning_rate": 1.3988030572235212e-06, |
|
"loss": 0.624, |
|
"step": 2275 |
|
}, |
|
{ |
|
"epoch": 0.8484510186994139, |
|
"grad_norm": 0.013039126060903072, |
|
"learning_rate": 1.3658478478511416e-06, |
|
"loss": 0.593, |
|
"step": 2280 |
|
}, |
|
{ |
|
"epoch": 0.8503116568983161, |
|
"grad_norm": 0.014153816737234592, |
|
"learning_rate": 1.333257010759702e-06, |
|
"loss": 0.5909, |
|
"step": 2285 |
|
}, |
|
{ |
|
"epoch": 0.8521722950972184, |
|
"grad_norm": 0.012623702175915241, |
|
"learning_rate": 1.3010319213242762e-06, |
|
"loss": 0.6006, |
|
"step": 2290 |
|
}, |
|
{ |
|
"epoch": 0.8540329332961206, |
|
"grad_norm": 0.013684497214853764, |
|
"learning_rate": 1.2691739394849089e-06, |
|
"loss": 0.6112, |
|
"step": 2295 |
|
}, |
|
{ |
|
"epoch": 0.8558935714950228, |
|
"grad_norm": 0.012923210859298706, |
|
"learning_rate": 1.2376844096892526e-06, |
|
"loss": 0.6239, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 0.8558935714950228, |
|
"eval_loss": 0.6425209641456604, |
|
"eval_runtime": 26.5582, |
|
"eval_samples_per_second": 4.782, |
|
"eval_steps_per_second": 4.782, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 0.857754209693925, |
|
"grad_norm": 0.01325127761811018, |
|
"learning_rate": 1.2065646608357972e-06, |
|
"loss": 0.6537, |
|
"step": 2305 |
|
}, |
|
{ |
|
"epoch": 0.8596148478928273, |
|
"grad_norm": 0.01215402316302061, |
|
"learning_rate": 1.1758160062178093e-06, |
|
"loss": 0.634, |
|
"step": 2310 |
|
}, |
|
{ |
|
"epoch": 0.8614754860917294, |
|
"grad_norm": 0.015918321907520294, |
|
"learning_rate": 1.1454397434679022e-06, |
|
"loss": 0.6296, |
|
"step": 2315 |
|
}, |
|
{ |
|
"epoch": 0.8633361242906317, |
|
"grad_norm": 0.011971482075750828, |
|
"learning_rate": 1.1154371545032738e-06, |
|
"loss": 0.5983, |
|
"step": 2320 |
|
}, |
|
{ |
|
"epoch": 0.8651967624895339, |
|
"grad_norm": 0.01542737614363432, |
|
"learning_rate": 1.0858095054716111e-06, |
|
"loss": 0.6468, |
|
"step": 2325 |
|
}, |
|
{ |
|
"epoch": 0.8670574006884362, |
|
"grad_norm": 0.012823596596717834, |
|
"learning_rate": 1.0565580466976566e-06, |
|
"loss": 0.6222, |
|
"step": 2330 |
|
}, |
|
{ |
|
"epoch": 0.8689180388873383, |
|
"grad_norm": 0.012822597287595272, |
|
"learning_rate": 1.027684012630441e-06, |
|
"loss": 0.6385, |
|
"step": 2335 |
|
}, |
|
{ |
|
"epoch": 0.8707786770862406, |
|
"grad_norm": 0.014365943148732185, |
|
"learning_rate": 9.991886217911851e-07, |
|
"loss": 0.6541, |
|
"step": 2340 |
|
}, |
|
{ |
|
"epoch": 0.8726393152851428, |
|
"grad_norm": 0.013635417446494102, |
|
"learning_rate": 9.710730767218913e-07, |
|
"loss": 0.6323, |
|
"step": 2345 |
|
}, |
|
{ |
|
"epoch": 0.8744999534840451, |
|
"grad_norm": 0.012839280068874359, |
|
"learning_rate": 9.433385639345705e-07, |
|
"loss": 0.5944, |
|
"step": 2350 |
|
}, |
|
{ |
|
"epoch": 0.8763605916829472, |
|
"grad_norm": 0.0141257019713521, |
|
"learning_rate": 9.159862538611908e-07, |
|
"loss": 0.6632, |
|
"step": 2355 |
|
}, |
|
{ |
|
"epoch": 0.8782212298818495, |
|
"grad_norm": 0.013571621850132942, |
|
"learning_rate": 8.890173008042768e-07, |
|
"loss": 0.6086, |
|
"step": 2360 |
|
}, |
|
{ |
|
"epoch": 0.8800818680807517, |
|
"grad_norm": 0.01433682069182396, |
|
"learning_rate": 8.624328428881945e-07, |
|
"loss": 0.6482, |
|
"step": 2365 |
|
}, |
|
{ |
|
"epoch": 0.8819425062796539, |
|
"grad_norm": 0.014090972021222115, |
|
"learning_rate": 8.36234002011117e-07, |
|
"loss": 0.6335, |
|
"step": 2370 |
|
}, |
|
{ |
|
"epoch": 0.8838031444785561, |
|
"grad_norm": 0.013004067353904247, |
|
"learning_rate": 8.10421883797694e-07, |
|
"loss": 0.6145, |
|
"step": 2375 |
|
}, |
|
{ |
|
"epoch": 0.8856637826774584, |
|
"grad_norm": 0.01391025260090828, |
|
"learning_rate": 7.849975775523777e-07, |
|
"loss": 0.6415, |
|
"step": 2380 |
|
}, |
|
{ |
|
"epoch": 0.8875244208763606, |
|
"grad_norm": 0.012305272743105888, |
|
"learning_rate": 7.599621562134596e-07, |
|
"loss": 0.6462, |
|
"step": 2385 |
|
}, |
|
{ |
|
"epoch": 0.8893850590752628, |
|
"grad_norm": 0.013947544619441032, |
|
"learning_rate": 7.35316676307789e-07, |
|
"loss": 0.6266, |
|
"step": 2390 |
|
}, |
|
{ |
|
"epoch": 0.891245697274165, |
|
"grad_norm": 0.016055511310696602, |
|
"learning_rate": 7.110621779061889e-07, |
|
"loss": 0.6501, |
|
"step": 2395 |
|
}, |
|
{ |
|
"epoch": 0.8931063354730673, |
|
"grad_norm": 0.01238927897065878, |
|
"learning_rate": 6.871996845795581e-07, |
|
"loss": 0.5895, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 0.8931063354730673, |
|
"eval_loss": 0.6424703598022461, |
|
"eval_runtime": 26.6049, |
|
"eval_samples_per_second": 4.774, |
|
"eval_steps_per_second": 4.774, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 0.8949669736719695, |
|
"grad_norm": 0.012343904934823513, |
|
"learning_rate": 6.637302033556891e-07, |
|
"loss": 0.6105, |
|
"step": 2405 |
|
}, |
|
{ |
|
"epoch": 0.8968276118708717, |
|
"grad_norm": 0.01289752684533596, |
|
"learning_rate": 6.40654724676748e-07, |
|
"loss": 0.6265, |
|
"step": 2410 |
|
}, |
|
{ |
|
"epoch": 0.8986882500697739, |
|
"grad_norm": 0.013334677554666996, |
|
"learning_rate": 6.179742223574936e-07, |
|
"loss": 0.6261, |
|
"step": 2415 |
|
}, |
|
{ |
|
"epoch": 0.9005488882686762, |
|
"grad_norm": 0.013512643985450268, |
|
"learning_rate": 5.956896535441803e-07, |
|
"loss": 0.5797, |
|
"step": 2420 |
|
}, |
|
{ |
|
"epoch": 0.9024095264675783, |
|
"grad_norm": 0.01403987966477871, |
|
"learning_rate": 5.738019586741573e-07, |
|
"loss": 0.616, |
|
"step": 2425 |
|
}, |
|
{ |
|
"epoch": 0.9042701646664806, |
|
"grad_norm": 0.013224196620285511, |
|
"learning_rate": 5.523120614361821e-07, |
|
"loss": 0.608, |
|
"step": 2430 |
|
}, |
|
{ |
|
"epoch": 0.9061308028653828, |
|
"grad_norm": 0.01317799836397171, |
|
"learning_rate": 5.312208687314502e-07, |
|
"loss": 0.6206, |
|
"step": 2435 |
|
}, |
|
{ |
|
"epoch": 0.9079914410642851, |
|
"grad_norm": 0.01357815321534872, |
|
"learning_rate": 5.105292706353093e-07, |
|
"loss": 0.6759, |
|
"step": 2440 |
|
}, |
|
{ |
|
"epoch": 0.9098520792631872, |
|
"grad_norm": 0.012360199354588985, |
|
"learning_rate": 4.902381403597046e-07, |
|
"loss": 0.6182, |
|
"step": 2445 |
|
}, |
|
{ |
|
"epoch": 0.9117127174620895, |
|
"grad_norm": 0.012978832237422466, |
|
"learning_rate": 4.703483342163262e-07, |
|
"loss": 0.5892, |
|
"step": 2450 |
|
}, |
|
{ |
|
"epoch": 0.9135733556609917, |
|
"grad_norm": 0.013323403894901276, |
|
"learning_rate": 4.5086069158047143e-07, |
|
"loss": 0.636, |
|
"step": 2455 |
|
}, |
|
{ |
|
"epoch": 0.915433993859894, |
|
"grad_norm": 0.013127562589943409, |
|
"learning_rate": 4.3177603485562327e-07, |
|
"loss": 0.5847, |
|
"step": 2460 |
|
}, |
|
{ |
|
"epoch": 0.9172946320587961, |
|
"grad_norm": 0.012560434639453888, |
|
"learning_rate": 4.1309516943874196e-07, |
|
"loss": 0.6073, |
|
"step": 2465 |
|
}, |
|
{ |
|
"epoch": 0.9191552702576984, |
|
"grad_norm": 0.01229447964578867, |
|
"learning_rate": 3.9481888368627764e-07, |
|
"loss": 0.5846, |
|
"step": 2470 |
|
}, |
|
{ |
|
"epoch": 0.9210159084566006, |
|
"grad_norm": 0.012554515153169632, |
|
"learning_rate": 3.7694794888090025e-07, |
|
"loss": 0.6186, |
|
"step": 2475 |
|
}, |
|
{ |
|
"epoch": 0.9228765466555028, |
|
"grad_norm": 0.011958773247897625, |
|
"learning_rate": 3.594831191989523e-07, |
|
"loss": 0.6217, |
|
"step": 2480 |
|
}, |
|
{ |
|
"epoch": 0.924737184854405, |
|
"grad_norm": 0.01367896143347025, |
|
"learning_rate": 3.424251316786165e-07, |
|
"loss": 0.6572, |
|
"step": 2485 |
|
}, |
|
{ |
|
"epoch": 0.9265978230533073, |
|
"grad_norm": 0.013063084334135056, |
|
"learning_rate": 3.2577470618881726e-07, |
|
"loss": 0.5973, |
|
"step": 2490 |
|
}, |
|
{ |
|
"epoch": 0.9284584612522095, |
|
"grad_norm": 0.012596881948411465, |
|
"learning_rate": 3.095325453988385e-07, |
|
"loss": 0.6153, |
|
"step": 2495 |
|
}, |
|
{ |
|
"epoch": 0.9303190994511117, |
|
"grad_norm": 0.013333328068256378, |
|
"learning_rate": 2.9369933474867496e-07, |
|
"loss": 0.6337, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.9303190994511117, |
|
"eval_loss": 0.6424322128295898, |
|
"eval_runtime": 26.5954, |
|
"eval_samples_per_second": 4.775, |
|
"eval_steps_per_second": 4.775, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.9321797376500139, |
|
"grad_norm": 0.012263627722859383, |
|
"learning_rate": 2.7827574242009434e-07, |
|
"loss": 0.6258, |
|
"step": 2505 |
|
}, |
|
{ |
|
"epoch": 0.9340403758489162, |
|
"grad_norm": 0.01295017171651125, |
|
"learning_rate": 2.632624193084499e-07, |
|
"loss": 0.6071, |
|
"step": 2510 |
|
}, |
|
{ |
|
"epoch": 0.9359010140478184, |
|
"grad_norm": 0.013952870853245258, |
|
"learning_rate": 2.48659998995211e-07, |
|
"loss": 0.6594, |
|
"step": 2515 |
|
}, |
|
{ |
|
"epoch": 0.9377616522467206, |
|
"grad_norm": 0.01351676881313324, |
|
"learning_rate": 2.344690977212205e-07, |
|
"loss": 0.6072, |
|
"step": 2520 |
|
}, |
|
{ |
|
"epoch": 0.9396222904456228, |
|
"grad_norm": 0.012661050073802471, |
|
"learning_rate": 2.2069031436068643e-07, |
|
"loss": 0.6152, |
|
"step": 2525 |
|
}, |
|
{ |
|
"epoch": 0.9414829286445251, |
|
"grad_norm": 0.013936568051576614, |
|
"learning_rate": 2.0732423039591998e-07, |
|
"loss": 0.6149, |
|
"step": 2530 |
|
}, |
|
{ |
|
"epoch": 0.9433435668434274, |
|
"grad_norm": 0.012601389549672604, |
|
"learning_rate": 1.9437140989278624e-07, |
|
"loss": 0.6205, |
|
"step": 2535 |
|
}, |
|
{ |
|
"epoch": 0.9452042050423295, |
|
"grad_norm": 0.011978083290159702, |
|
"learning_rate": 1.8183239947690112e-07, |
|
"loss": 0.6252, |
|
"step": 2540 |
|
}, |
|
{ |
|
"epoch": 0.9470648432412317, |
|
"grad_norm": 0.012075323611497879, |
|
"learning_rate": 1.6970772831056637e-07, |
|
"loss": 0.6438, |
|
"step": 2545 |
|
}, |
|
{ |
|
"epoch": 0.948925481440134, |
|
"grad_norm": 0.012837120331823826, |
|
"learning_rate": 1.5799790807043857e-07, |
|
"loss": 0.6304, |
|
"step": 2550 |
|
}, |
|
{ |
|
"epoch": 0.9507861196390361, |
|
"grad_norm": 0.012269029393792152, |
|
"learning_rate": 1.467034329259287e-07, |
|
"loss": 0.5938, |
|
"step": 2555 |
|
}, |
|
{ |
|
"epoch": 0.9526467578379384, |
|
"grad_norm": 0.01306453812867403, |
|
"learning_rate": 1.358247795183587e-07, |
|
"loss": 0.5981, |
|
"step": 2560 |
|
}, |
|
{ |
|
"epoch": 0.9545073960368406, |
|
"grad_norm": 0.012171389535069466, |
|
"learning_rate": 1.2536240694083658e-07, |
|
"loss": 0.5599, |
|
"step": 2565 |
|
}, |
|
{ |
|
"epoch": 0.9563680342357429, |
|
"grad_norm": 0.013338044285774231, |
|
"learning_rate": 1.1531675671888621e-07, |
|
"loss": 0.6366, |
|
"step": 2570 |
|
}, |
|
{ |
|
"epoch": 0.958228672434645, |
|
"grad_norm": 0.012387475930154324, |
|
"learning_rate": 1.0568825279181572e-07, |
|
"loss": 0.6376, |
|
"step": 2575 |
|
}, |
|
{ |
|
"epoch": 0.9600893106335473, |
|
"grad_norm": 0.013054094277322292, |
|
"learning_rate": 9.647730149482614e-08, |
|
"loss": 0.6386, |
|
"step": 2580 |
|
}, |
|
{ |
|
"epoch": 0.9619499488324496, |
|
"grad_norm": 0.01319506112486124, |
|
"learning_rate": 8.768429154185853e-08, |
|
"loss": 0.6067, |
|
"step": 2585 |
|
}, |
|
{ |
|
"epoch": 0.9638105870313518, |
|
"grad_norm": 0.013576803728938103, |
|
"learning_rate": 7.930959400919924e-08, |
|
"loss": 0.6258, |
|
"step": 2590 |
|
}, |
|
{ |
|
"epoch": 0.965671225230254, |
|
"grad_norm": 0.013341645710170269, |
|
"learning_rate": 7.135356231981028e-08, |
|
"loss": 0.6344, |
|
"step": 2595 |
|
}, |
|
{ |
|
"epoch": 0.9675318634291562, |
|
"grad_norm": 0.013685373589396477, |
|
"learning_rate": 6.381653222842011e-08, |
|
"loss": 0.6144, |
|
"step": 2600 |
|
}, |
|
{ |
|
"epoch": 0.9675318634291562, |
|
"eval_loss": 0.642424464225769, |
|
"eval_runtime": 26.5716, |
|
"eval_samples_per_second": 4.78, |
|
"eval_steps_per_second": 4.78, |
|
"step": 2600 |
|
}, |
|
{ |
|
"epoch": 0.9693925016280585, |
|
"grad_norm": 0.018206071108579636, |
|
"learning_rate": 5.6698821807354975e-08, |
|
"loss": 0.5716, |
|
"step": 2605 |
|
}, |
|
{ |
|
"epoch": 0.9712531398269606, |
|
"grad_norm": 0.014082850888371468, |
|
"learning_rate": 5.000073143310969e-08, |
|
"loss": 0.641, |
|
"step": 2610 |
|
}, |
|
{ |
|
"epoch": 0.9731137780258629, |
|
"grad_norm": 0.01310745719820261, |
|
"learning_rate": 4.3722543773681016e-08, |
|
"loss": 0.6206, |
|
"step": 2615 |
|
}, |
|
{ |
|
"epoch": 0.9749744162247651, |
|
"grad_norm": 0.013652559369802475, |
|
"learning_rate": 3.7864523776628414e-08, |
|
"loss": 0.6405, |
|
"step": 2620 |
|
}, |
|
{ |
|
"epoch": 0.9768350544236674, |
|
"grad_norm": 0.01430275198072195, |
|
"learning_rate": 3.242691865790071e-08, |
|
"loss": 0.6191, |
|
"step": 2625 |
|
}, |
|
{ |
|
"epoch": 0.9786956926225695, |
|
"grad_norm": 0.013149100355803967, |
|
"learning_rate": 2.7409957891397775e-08, |
|
"loss": 0.6676, |
|
"step": 2630 |
|
}, |
|
{ |
|
"epoch": 0.9805563308214718, |
|
"grad_norm": 0.01429106667637825, |
|
"learning_rate": 2.2813853199292745e-08, |
|
"loss": 0.656, |
|
"step": 2635 |
|
}, |
|
{ |
|
"epoch": 0.982416969020374, |
|
"grad_norm": 0.012532561086118221, |
|
"learning_rate": 1.8638798543090253e-08, |
|
"loss": 0.6065, |
|
"step": 2640 |
|
}, |
|
{ |
|
"epoch": 0.9842776072192763, |
|
"grad_norm": 0.01296111661940813, |
|
"learning_rate": 1.4884970115444097e-08, |
|
"loss": 0.604, |
|
"step": 2645 |
|
}, |
|
{ |
|
"epoch": 0.9861382454181784, |
|
"grad_norm": 0.012804310768842697, |
|
"learning_rate": 1.1552526332723191e-08, |
|
"loss": 0.6236, |
|
"step": 2650 |
|
}, |
|
{ |
|
"epoch": 0.9879988836170807, |
|
"grad_norm": 0.012863220646977425, |
|
"learning_rate": 8.641607828324682e-09, |
|
"loss": 0.6245, |
|
"step": 2655 |
|
}, |
|
{ |
|
"epoch": 0.9898595218159829, |
|
"grad_norm": 0.013312343508005142, |
|
"learning_rate": 6.152337446736489e-09, |
|
"loss": 0.6561, |
|
"step": 2660 |
|
}, |
|
{ |
|
"epoch": 0.991720160014885, |
|
"grad_norm": 0.011470803059637547, |
|
"learning_rate": 4.0848202383581e-09, |
|
"loss": 0.5847, |
|
"step": 2665 |
|
}, |
|
{ |
|
"epoch": 0.9935807982137873, |
|
"grad_norm": 0.013984150253236294, |
|
"learning_rate": 2.4391434550652403e-09, |
|
"loss": 0.5881, |
|
"step": 2670 |
|
}, |
|
{ |
|
"epoch": 0.9954414364126896, |
|
"grad_norm": 0.012489933520555496, |
|
"learning_rate": 1.2153765465250378e-09, |
|
"loss": 0.6653, |
|
"step": 2675 |
|
}, |
|
{ |
|
"epoch": 0.9973020746115918, |
|
"grad_norm": 0.012180610559880733, |
|
"learning_rate": 4.1357115726947674e-10, |
|
"loss": 0.629, |
|
"step": 2680 |
|
}, |
|
{ |
|
"epoch": 0.999162712810494, |
|
"grad_norm": 0.014413848519325256, |
|
"learning_rate": 3.376112451158875e-11, |
|
"loss": 0.6525, |
|
"step": 2685 |
|
}, |
|
{ |
|
"epoch": 0.9999069680900549, |
|
"step": 2687, |
|
"total_flos": 8.247315480402985e+17, |
|
"train_loss": 0.638783668202896, |
|
"train_runtime": 14784.0018, |
|
"train_samples_per_second": 1.454, |
|
"train_steps_per_second": 0.182 |
|
} |
|
], |
|
"logging_steps": 5, |
|
"max_steps": 2687, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": false, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 8.247315480402985e+17, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|