shivanandmn's picture
Training in progress, step 500
61d1600 verified
raw
history blame
22.6 kB
{
"best_metric": 3.185894727706909,
"best_model_checkpoint": "./output/models/parallel-mean-bottleneck-gpt2-medium-wikitext/checkpoint-8500",
"epoch": 5.0,
"eval_steps": 500,
"global_step": 8910,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.05611672278338945,
"grad_norm": 1.5436944961547852,
"learning_rate": 1.1223344556677892e-05,
"loss": 8.933,
"step": 100
},
{
"epoch": 0.1122334455667789,
"grad_norm": 1.0424357652664185,
"learning_rate": 2.2446689113355783e-05,
"loss": 7.3249,
"step": 200
},
{
"epoch": 0.16835016835016836,
"grad_norm": 1.080315351486206,
"learning_rate": 3.3670033670033675e-05,
"loss": 6.6032,
"step": 300
},
{
"epoch": 0.2244668911335578,
"grad_norm": 1.2949663400650024,
"learning_rate": 4.4893378226711566e-05,
"loss": 6.2747,
"step": 400
},
{
"epoch": 0.28058361391694725,
"grad_norm": 1.1373127698898315,
"learning_rate": 5.611672278338945e-05,
"loss": 6.0432,
"step": 500
},
{
"epoch": 0.28058361391694725,
"eval_accuracy": 0.1909473862768935,
"eval_bleu": 0.03783860296240099,
"eval_loss": 5.917980670928955,
"eval_perplexity": 371.66045085486866,
"eval_runtime": 11.0079,
"eval_samples_per_second": 103.653,
"eval_steps_per_second": 1.635,
"step": 500
},
{
"epoch": 0.3367003367003367,
"grad_norm": 1.0700047016143799,
"learning_rate": 6.734006734006735e-05,
"loss": 5.8439,
"step": 600
},
{
"epoch": 0.39281705948372614,
"grad_norm": 1.4188077449798584,
"learning_rate": 7.856341189674523e-05,
"loss": 5.6426,
"step": 700
},
{
"epoch": 0.4489337822671156,
"grad_norm": 1.1133205890655518,
"learning_rate": 8.978675645342313e-05,
"loss": 5.464,
"step": 800
},
{
"epoch": 0.5050505050505051,
"grad_norm": 1.3167998790740967,
"learning_rate": 9.988776655443322e-05,
"loss": 5.25,
"step": 900
},
{
"epoch": 0.5611672278338945,
"grad_norm": 1.0362534523010254,
"learning_rate": 9.864072827035791e-05,
"loss": 5.0476,
"step": 1000
},
{
"epoch": 0.5611672278338945,
"eval_accuracy": 0.26329735967574874,
"eval_bleu": 0.06118160815453846,
"eval_loss": 4.8985185623168945,
"eval_perplexity": 134.0909850321564,
"eval_runtime": 11.0771,
"eval_samples_per_second": 103.006,
"eval_steps_per_second": 1.625,
"step": 1000
},
{
"epoch": 0.6172839506172839,
"grad_norm": 0.9963632225990295,
"learning_rate": 9.73936899862826e-05,
"loss": 4.8705,
"step": 1100
},
{
"epoch": 0.6734006734006734,
"grad_norm": 0.9135327935218811,
"learning_rate": 9.614665170220725e-05,
"loss": 4.7183,
"step": 1200
},
{
"epoch": 0.7295173961840629,
"grad_norm": 0.9141886830329895,
"learning_rate": 9.489961341813194e-05,
"loss": 4.5877,
"step": 1300
},
{
"epoch": 0.7856341189674523,
"grad_norm": 0.8190032839775085,
"learning_rate": 9.365257513405662e-05,
"loss": 4.4743,
"step": 1400
},
{
"epoch": 0.8417508417508418,
"grad_norm": 0.8178410530090332,
"learning_rate": 9.24055368499813e-05,
"loss": 4.3528,
"step": 1500
},
{
"epoch": 0.8417508417508418,
"eval_accuracy": 0.3181762495041735,
"eval_bleu": 0.08335013046001274,
"eval_loss": 4.239789962768555,
"eval_perplexity": 69.39327513659308,
"eval_runtime": 11.1029,
"eval_samples_per_second": 102.766,
"eval_steps_per_second": 1.621,
"step": 1500
},
{
"epoch": 0.8978675645342312,
"grad_norm": 0.9094557166099548,
"learning_rate": 9.115849856590598e-05,
"loss": 4.2651,
"step": 1600
},
{
"epoch": 0.9539842873176206,
"grad_norm": 0.8006865382194519,
"learning_rate": 8.991146028183066e-05,
"loss": 4.1573,
"step": 1700
},
{
"epoch": 1.0101010101010102,
"grad_norm": 0.8540999293327332,
"learning_rate": 8.866442199775533e-05,
"loss": 4.0896,
"step": 1800
},
{
"epoch": 1.0662177328843996,
"grad_norm": 0.8293948173522949,
"learning_rate": 8.741738371368002e-05,
"loss": 4.0039,
"step": 1900
},
{
"epoch": 1.122334455667789,
"grad_norm": 0.7962952256202698,
"learning_rate": 8.617034542960469e-05,
"loss": 3.9497,
"step": 2000
},
{
"epoch": 1.122334455667789,
"eval_accuracy": 0.35195670481639213,
"eval_bleu": 0.10543076899138493,
"eval_loss": 3.8878896236419678,
"eval_perplexity": 48.807774985506946,
"eval_runtime": 11.1215,
"eval_samples_per_second": 102.594,
"eval_steps_per_second": 1.618,
"step": 2000
},
{
"epoch": 1.1784511784511784,
"grad_norm": 0.7807645797729492,
"learning_rate": 8.492330714552937e-05,
"loss": 3.9058,
"step": 2100
},
{
"epoch": 1.2345679012345678,
"grad_norm": 0.7973741292953491,
"learning_rate": 8.367626886145406e-05,
"loss": 3.859,
"step": 2200
},
{
"epoch": 1.2906846240179575,
"grad_norm": 0.7875047922134399,
"learning_rate": 8.242923057737873e-05,
"loss": 3.84,
"step": 2300
},
{
"epoch": 1.3468013468013469,
"grad_norm": 0.7630696296691895,
"learning_rate": 8.11821922933034e-05,
"loss": 3.7989,
"step": 2400
},
{
"epoch": 1.4029180695847363,
"grad_norm": 0.7279968857765198,
"learning_rate": 7.993515400922809e-05,
"loss": 3.7614,
"step": 2500
},
{
"epoch": 1.4029180695847363,
"eval_accuracy": 0.367412783799089,
"eval_bleu": 0.12071702163720974,
"eval_loss": 3.7127673625946045,
"eval_perplexity": 40.967020402690565,
"eval_runtime": 11.2307,
"eval_samples_per_second": 101.596,
"eval_steps_per_second": 1.603,
"step": 2500
},
{
"epoch": 1.4590347923681257,
"grad_norm": 0.7240808606147766,
"learning_rate": 7.868811572515277e-05,
"loss": 3.7429,
"step": 2600
},
{
"epoch": 1.5151515151515151,
"grad_norm": 0.7098649144172668,
"learning_rate": 7.744107744107744e-05,
"loss": 3.7126,
"step": 2700
},
{
"epoch": 1.5712682379349046,
"grad_norm": 0.7229514122009277,
"learning_rate": 7.619403915700213e-05,
"loss": 3.6816,
"step": 2800
},
{
"epoch": 1.627384960718294,
"grad_norm": 0.7334359288215637,
"learning_rate": 7.49470008729268e-05,
"loss": 3.6683,
"step": 2900
},
{
"epoch": 1.6835016835016834,
"grad_norm": 0.7653447389602661,
"learning_rate": 7.369996258885148e-05,
"loss": 3.6543,
"step": 3000
},
{
"epoch": 1.6835016835016834,
"eval_accuracy": 0.37795900253846026,
"eval_bleu": 0.13095688045902315,
"eval_loss": 3.590174913406372,
"eval_perplexity": 36.24041430644117,
"eval_runtime": 11.0101,
"eval_samples_per_second": 103.632,
"eval_steps_per_second": 1.635,
"step": 3000
},
{
"epoch": 1.7396184062850728,
"grad_norm": 0.7163519263267517,
"learning_rate": 7.245292430477615e-05,
"loss": 3.621,
"step": 3100
},
{
"epoch": 1.7957351290684624,
"grad_norm": 0.7292365431785583,
"learning_rate": 7.120588602070084e-05,
"loss": 3.6054,
"step": 3200
},
{
"epoch": 1.8518518518518519,
"grad_norm": 0.7313345670700073,
"learning_rate": 6.995884773662552e-05,
"loss": 3.578,
"step": 3300
},
{
"epoch": 1.9079685746352413,
"grad_norm": 0.7751216292381287,
"learning_rate": 6.871180945255021e-05,
"loss": 3.5709,
"step": 3400
},
{
"epoch": 1.964085297418631,
"grad_norm": 0.7018395066261292,
"learning_rate": 6.746477116847487e-05,
"loss": 3.5527,
"step": 3500
},
{
"epoch": 1.964085297418631,
"eval_accuracy": 0.38635314154807526,
"eval_bleu": 0.13373604633044894,
"eval_loss": 3.504826545715332,
"eval_perplexity": 33.2756715439979,
"eval_runtime": 11.1003,
"eval_samples_per_second": 102.79,
"eval_steps_per_second": 1.622,
"step": 3500
},
{
"epoch": 2.0202020202020203,
"grad_norm": 0.7037595510482788,
"learning_rate": 6.621773288439955e-05,
"loss": 3.5173,
"step": 3600
},
{
"epoch": 2.0763187429854097,
"grad_norm": 0.745313286781311,
"learning_rate": 6.497069460032424e-05,
"loss": 3.4633,
"step": 3700
},
{
"epoch": 2.132435465768799,
"grad_norm": 0.7386572360992432,
"learning_rate": 6.372365631624892e-05,
"loss": 3.4408,
"step": 3800
},
{
"epoch": 2.1885521885521886,
"grad_norm": 0.709396243095398,
"learning_rate": 6.247661803217359e-05,
"loss": 3.4414,
"step": 3900
},
{
"epoch": 2.244668911335578,
"grad_norm": 0.7127860188484192,
"learning_rate": 6.122957974809826e-05,
"loss": 3.4348,
"step": 4000
},
{
"epoch": 2.244668911335578,
"eval_accuracy": 0.39228335487983224,
"eval_bleu": 0.13612405667589095,
"eval_loss": 3.4400899410247803,
"eval_perplexity": 31.189763281432384,
"eval_runtime": 11.0954,
"eval_samples_per_second": 102.835,
"eval_steps_per_second": 1.622,
"step": 4000
},
{
"epoch": 2.3007856341189674,
"grad_norm": 0.7178986668586731,
"learning_rate": 5.998254146402295e-05,
"loss": 3.4225,
"step": 4100
},
{
"epoch": 2.356902356902357,
"grad_norm": 0.701989471912384,
"learning_rate": 5.8735503179947625e-05,
"loss": 3.4018,
"step": 4200
},
{
"epoch": 2.4130190796857462,
"grad_norm": 0.7325447797775269,
"learning_rate": 5.748846489587231e-05,
"loss": 3.3989,
"step": 4300
},
{
"epoch": 2.4691358024691357,
"grad_norm": 0.7156170010566711,
"learning_rate": 5.624142661179699e-05,
"loss": 3.3827,
"step": 4400
},
{
"epoch": 2.525252525252525,
"grad_norm": 0.7117893099784851,
"learning_rate": 5.4994388327721666e-05,
"loss": 3.3739,
"step": 4500
},
{
"epoch": 2.525252525252525,
"eval_accuracy": 0.3973971143969165,
"eval_bleu": 0.1418527614095323,
"eval_loss": 3.3868210315704346,
"eval_perplexity": 29.57179488564003,
"eval_runtime": 10.9961,
"eval_samples_per_second": 103.765,
"eval_steps_per_second": 1.637,
"step": 4500
},
{
"epoch": 2.581369248035915,
"grad_norm": 0.7047144770622253,
"learning_rate": 5.374735004364634e-05,
"loss": 3.3787,
"step": 4600
},
{
"epoch": 2.637485970819304,
"grad_norm": 0.6755483746528625,
"learning_rate": 5.250031175957102e-05,
"loss": 3.3641,
"step": 4700
},
{
"epoch": 2.6936026936026938,
"grad_norm": 0.7206361889839172,
"learning_rate": 5.12532734754957e-05,
"loss": 3.3589,
"step": 4800
},
{
"epoch": 2.749719416386083,
"grad_norm": 0.6900231838226318,
"learning_rate": 5.000623519142038e-05,
"loss": 3.3492,
"step": 4900
},
{
"epoch": 2.8058361391694726,
"grad_norm": 0.7102543115615845,
"learning_rate": 4.8759196907345056e-05,
"loss": 3.3441,
"step": 5000
},
{
"epoch": 2.8058361391694726,
"eval_accuracy": 0.4019574330280841,
"eval_bleu": 0.13938720153734416,
"eval_loss": 3.3418636322021484,
"eval_perplexity": 28.271765813386498,
"eval_runtime": 11.1267,
"eval_samples_per_second": 102.546,
"eval_steps_per_second": 1.618,
"step": 5000
},
{
"epoch": 2.861952861952862,
"grad_norm": 0.7073910236358643,
"learning_rate": 4.751215862326974e-05,
"loss": 3.3419,
"step": 5100
},
{
"epoch": 2.9180695847362514,
"grad_norm": 0.6984754800796509,
"learning_rate": 4.626512033919442e-05,
"loss": 3.3203,
"step": 5200
},
{
"epoch": 2.974186307519641,
"grad_norm": 0.6974130272865295,
"learning_rate": 4.5018082055119096e-05,
"loss": 3.3243,
"step": 5300
},
{
"epoch": 3.0303030303030303,
"grad_norm": 0.7236476540565491,
"learning_rate": 4.3771043771043774e-05,
"loss": 3.2725,
"step": 5400
},
{
"epoch": 3.0864197530864197,
"grad_norm": 0.7239139080047607,
"learning_rate": 4.252400548696845e-05,
"loss": 3.2252,
"step": 5500
},
{
"epoch": 3.0864197530864197,
"eval_accuracy": 0.4057432771068235,
"eval_bleu": 0.1432212835687173,
"eval_loss": 3.306666374206543,
"eval_perplexity": 27.29398570860935,
"eval_runtime": 11.1335,
"eval_samples_per_second": 102.483,
"eval_steps_per_second": 1.617,
"step": 5500
},
{
"epoch": 3.142536475869809,
"grad_norm": 0.737918496131897,
"learning_rate": 4.127696720289313e-05,
"loss": 3.2316,
"step": 5600
},
{
"epoch": 3.1986531986531985,
"grad_norm": 0.7476251125335693,
"learning_rate": 4.002992891881781e-05,
"loss": 3.2276,
"step": 5700
},
{
"epoch": 3.254769921436588,
"grad_norm": 0.7400563359260559,
"learning_rate": 3.8782890634742486e-05,
"loss": 3.224,
"step": 5800
},
{
"epoch": 3.3108866442199774,
"grad_norm": 0.7361284494400024,
"learning_rate": 3.7535852350667164e-05,
"loss": 3.2252,
"step": 5900
},
{
"epoch": 3.3670033670033668,
"grad_norm": 0.7335928082466125,
"learning_rate": 3.628881406659185e-05,
"loss": 3.2188,
"step": 6000
},
{
"epoch": 3.3670033670033668,
"eval_accuracy": 0.4087632138295111,
"eval_bleu": 0.14205763499672933,
"eval_loss": 3.2775487899780273,
"eval_perplexity": 26.510709673638566,
"eval_runtime": 11.1182,
"eval_samples_per_second": 102.625,
"eval_steps_per_second": 1.619,
"step": 6000
},
{
"epoch": 3.4231200897867566,
"grad_norm": 0.7380982637405396,
"learning_rate": 3.504177578251652e-05,
"loss": 3.1985,
"step": 6100
},
{
"epoch": 3.479236812570146,
"grad_norm": 0.7403559684753418,
"learning_rate": 3.3794737498441205e-05,
"loss": 3.2097,
"step": 6200
},
{
"epoch": 3.5353535353535355,
"grad_norm": 0.7356846928596497,
"learning_rate": 3.254769921436588e-05,
"loss": 3.1995,
"step": 6300
},
{
"epoch": 3.591470258136925,
"grad_norm": 0.731614887714386,
"learning_rate": 3.130066093029056e-05,
"loss": 3.201,
"step": 6400
},
{
"epoch": 3.6475869809203143,
"grad_norm": 0.7287305593490601,
"learning_rate": 3.0053622646215242e-05,
"loss": 3.1971,
"step": 6500
},
{
"epoch": 3.6475869809203143,
"eval_accuracy": 0.4115484093714848,
"eval_bleu": 0.14263540857108656,
"eval_loss": 3.250213146209717,
"eval_perplexity": 25.795837616279766,
"eval_runtime": 11.1156,
"eval_samples_per_second": 102.648,
"eval_steps_per_second": 1.619,
"step": 6500
},
{
"epoch": 3.7037037037037037,
"grad_norm": 0.730910062789917,
"learning_rate": 2.880658436213992e-05,
"loss": 3.1895,
"step": 6600
},
{
"epoch": 3.759820426487093,
"grad_norm": 0.7339698672294617,
"learning_rate": 2.7559546078064598e-05,
"loss": 3.1804,
"step": 6700
},
{
"epoch": 3.8159371492704826,
"grad_norm": 0.7487326860427856,
"learning_rate": 2.6312507793989276e-05,
"loss": 3.171,
"step": 6800
},
{
"epoch": 3.872053872053872,
"grad_norm": 0.7359170913696289,
"learning_rate": 2.5065469509913957e-05,
"loss": 3.1629,
"step": 6900
},
{
"epoch": 3.9281705948372614,
"grad_norm": 0.7311303019523621,
"learning_rate": 2.3818431225838632e-05,
"loss": 3.1722,
"step": 7000
},
{
"epoch": 3.9281705948372614,
"eval_accuracy": 0.41430276300650337,
"eval_bleu": 0.14455082637881378,
"eval_loss": 3.2265915870666504,
"eval_perplexity": 25.193640134914105,
"eval_runtime": 11.1079,
"eval_samples_per_second": 102.719,
"eval_steps_per_second": 1.62,
"step": 7000
},
{
"epoch": 3.984287317620651,
"grad_norm": 0.740738034248352,
"learning_rate": 2.2571392941763313e-05,
"loss": 3.1681,
"step": 7100
},
{
"epoch": 4.040404040404041,
"grad_norm": 0.7563683986663818,
"learning_rate": 2.132435465768799e-05,
"loss": 3.1033,
"step": 7200
},
{
"epoch": 4.09652076318743,
"grad_norm": 0.7448651194572449,
"learning_rate": 2.007731637361267e-05,
"loss": 3.0994,
"step": 7300
},
{
"epoch": 4.1526374859708195,
"grad_norm": 0.7695390582084656,
"learning_rate": 1.883027808953735e-05,
"loss": 3.1011,
"step": 7400
},
{
"epoch": 4.2087542087542085,
"grad_norm": 0.7607565522193909,
"learning_rate": 1.758323980546203e-05,
"loss": 3.1052,
"step": 7500
},
{
"epoch": 4.2087542087542085,
"eval_accuracy": 0.41628521224800663,
"eval_bleu": 0.14328695475901032,
"eval_loss": 3.210294723510742,
"eval_perplexity": 24.78639028015837,
"eval_runtime": 11.0972,
"eval_samples_per_second": 102.819,
"eval_steps_per_second": 1.622,
"step": 7500
},
{
"epoch": 4.264870931537598,
"grad_norm": 0.766854465007782,
"learning_rate": 1.6336201521386706e-05,
"loss": 3.0911,
"step": 7600
},
{
"epoch": 4.320987654320987,
"grad_norm": 0.7667710185050964,
"learning_rate": 1.5089163237311384e-05,
"loss": 3.0779,
"step": 7700
},
{
"epoch": 4.377104377104377,
"grad_norm": 0.7539135813713074,
"learning_rate": 1.3842124953236066e-05,
"loss": 3.0881,
"step": 7800
},
{
"epoch": 4.433221099887767,
"grad_norm": 0.768229603767395,
"learning_rate": 1.2595086669160744e-05,
"loss": 3.0859,
"step": 7900
},
{
"epoch": 4.489337822671156,
"grad_norm": 0.7591537833213806,
"learning_rate": 1.1348048385085423e-05,
"loss": 3.0672,
"step": 8000
},
{
"epoch": 4.489337822671156,
"eval_accuracy": 0.41795752898068356,
"eval_bleu": 0.1437527142843717,
"eval_loss": 3.196687936782837,
"eval_perplexity": 24.45141131580561,
"eval_runtime": 11.1317,
"eval_samples_per_second": 102.5,
"eval_steps_per_second": 1.617,
"step": 8000
},
{
"epoch": 4.545454545454545,
"grad_norm": 0.7729194760322571,
"learning_rate": 1.0101010101010101e-05,
"loss": 3.0826,
"step": 8100
},
{
"epoch": 4.601571268237935,
"grad_norm": 0.7760916948318481,
"learning_rate": 8.853971816934781e-06,
"loss": 3.0755,
"step": 8200
},
{
"epoch": 4.657687991021325,
"grad_norm": 0.7464035153388977,
"learning_rate": 7.606933532859459e-06,
"loss": 3.0829,
"step": 8300
},
{
"epoch": 4.713804713804714,
"grad_norm": 0.7666931748390198,
"learning_rate": 6.359895248784138e-06,
"loss": 3.0744,
"step": 8400
},
{
"epoch": 4.7699214365881035,
"grad_norm": 0.7630265355110168,
"learning_rate": 5.112856964708817e-06,
"loss": 3.0774,
"step": 8500
},
{
"epoch": 4.7699214365881035,
"eval_accuracy": 0.41937625670061845,
"eval_bleu": 0.14605654521068695,
"eval_loss": 3.185894727706909,
"eval_perplexity": 24.18892122468071,
"eval_runtime": 11.0781,
"eval_samples_per_second": 102.996,
"eval_steps_per_second": 1.625,
"step": 8500
},
{
"epoch": 4.8260381593714925,
"grad_norm": 0.7553364038467407,
"learning_rate": 3.865818680633495e-06,
"loss": 3.0679,
"step": 8600
},
{
"epoch": 4.882154882154882,
"grad_norm": 0.7630147933959961,
"learning_rate": 2.6187803965581742e-06,
"loss": 3.0784,
"step": 8700
},
{
"epoch": 4.938271604938271,
"grad_norm": 0.7633751034736633,
"learning_rate": 1.3717421124828533e-06,
"loss": 3.0748,
"step": 8800
},
{
"epoch": 4.994388327721661,
"grad_norm": 0.7511401772499084,
"learning_rate": 1.2470382840753213e-07,
"loss": 3.0648,
"step": 8900
},
{
"epoch": 5.0,
"step": 8910,
"total_flos": 1.0586630697202483e+18,
"train_loss": 1.7646983184129434,
"train_runtime": 7367.504,
"train_samples_per_second": 77.363,
"train_steps_per_second": 1.209
}
],
"logging_steps": 100,
"max_steps": 8910,
"num_input_tokens_seen": 0,
"num_train_epochs": 5,
"save_steps": 500,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 2,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.0586630697202483e+18,
"train_batch_size": 64,
"trial_name": null,
"trial_params": null
}