shivanandmn's picture
Training in progress, step 500
6ad02c3 verified
{
"best_metric": 3.1860642433166504,
"best_model_checkpoint": "./output/models/parallel-mean-bottleneck-gpt2-medium-wikitext/checkpoint-8500",
"epoch": 5.0,
"eval_steps": 500,
"global_step": 8910,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.05611672278338945,
"grad_norm": 1.54179847240448,
"learning_rate": 1.1223344556677892e-05,
"loss": 8.933,
"step": 100
},
{
"epoch": 0.1122334455667789,
"grad_norm": 1.036051869392395,
"learning_rate": 2.2446689113355783e-05,
"loss": 7.3249,
"step": 200
},
{
"epoch": 0.16835016835016836,
"grad_norm": 1.0635498762130737,
"learning_rate": 3.3670033670033675e-05,
"loss": 6.6032,
"step": 300
},
{
"epoch": 0.2244668911335578,
"grad_norm": 1.8017141819000244,
"learning_rate": 4.4893378226711566e-05,
"loss": 6.276,
"step": 400
},
{
"epoch": 0.28058361391694725,
"grad_norm": 1.3540623188018799,
"learning_rate": 5.611672278338945e-05,
"loss": 6.0438,
"step": 500
},
{
"epoch": 0.28058361391694725,
"eval_accuracy": 0.189721420475428,
"eval_bleu": 0.03593862692187448,
"eval_loss": 5.919970989227295,
"eval_perplexity": 372.40091008131225,
"eval_runtime": 11.0176,
"eval_samples_per_second": 103.562,
"eval_steps_per_second": 1.634,
"step": 500
},
{
"epoch": 0.3367003367003367,
"grad_norm": 1.158894658088684,
"learning_rate": 6.734006734006735e-05,
"loss": 5.8447,
"step": 600
},
{
"epoch": 0.39281705948372614,
"grad_norm": 1.1720908880233765,
"learning_rate": 7.856341189674523e-05,
"loss": 5.6413,
"step": 700
},
{
"epoch": 0.4489337822671156,
"grad_norm": 1.2613935470581055,
"learning_rate": 8.978675645342313e-05,
"loss": 5.4607,
"step": 800
},
{
"epoch": 0.5050505050505051,
"grad_norm": 1.0967986583709717,
"learning_rate": 9.988776655443322e-05,
"loss": 5.2463,
"step": 900
},
{
"epoch": 0.5611672278338945,
"grad_norm": 1.0036829710006714,
"learning_rate": 9.864072827035791e-05,
"loss": 5.0422,
"step": 1000
},
{
"epoch": 0.5611672278338945,
"eval_accuracy": 0.2635603726045048,
"eval_bleu": 0.06103039117966378,
"eval_loss": 4.893420696258545,
"eval_perplexity": 133.40914658800884,
"eval_runtime": 11.0697,
"eval_samples_per_second": 103.074,
"eval_steps_per_second": 1.626,
"step": 1000
},
{
"epoch": 0.6172839506172839,
"grad_norm": 0.9532070159912109,
"learning_rate": 9.73936899862826e-05,
"loss": 4.8675,
"step": 1100
},
{
"epoch": 0.6734006734006734,
"grad_norm": 0.9311050772666931,
"learning_rate": 9.614665170220725e-05,
"loss": 4.7157,
"step": 1200
},
{
"epoch": 0.7295173961840629,
"grad_norm": 0.9085267782211304,
"learning_rate": 9.489961341813194e-05,
"loss": 4.5856,
"step": 1300
},
{
"epoch": 0.7856341189674523,
"grad_norm": 0.8684507608413696,
"learning_rate": 9.365257513405662e-05,
"loss": 4.4711,
"step": 1400
},
{
"epoch": 0.8417508417508418,
"grad_norm": 0.8369265794754028,
"learning_rate": 9.24055368499813e-05,
"loss": 4.3494,
"step": 1500
},
{
"epoch": 0.8417508417508418,
"eval_accuracy": 0.318285052898154,
"eval_bleu": 0.08333640625132697,
"eval_loss": 4.238930702209473,
"eval_perplexity": 69.33367384239048,
"eval_runtime": 11.0928,
"eval_samples_per_second": 102.859,
"eval_steps_per_second": 1.623,
"step": 1500
},
{
"epoch": 0.8978675645342312,
"grad_norm": 0.882328987121582,
"learning_rate": 9.115849856590598e-05,
"loss": 4.2632,
"step": 1600
},
{
"epoch": 0.9539842873176206,
"grad_norm": 0.8303613662719727,
"learning_rate": 8.991146028183066e-05,
"loss": 4.1548,
"step": 1700
},
{
"epoch": 1.0101010101010102,
"grad_norm": 0.8696436285972595,
"learning_rate": 8.866442199775533e-05,
"loss": 4.0877,
"step": 1800
},
{
"epoch": 1.0662177328843996,
"grad_norm": 0.8291804194450378,
"learning_rate": 8.741738371368002e-05,
"loss": 4.0032,
"step": 1900
},
{
"epoch": 1.122334455667789,
"grad_norm": 0.7963048815727234,
"learning_rate": 8.617034542960469e-05,
"loss": 3.9486,
"step": 2000
},
{
"epoch": 1.122334455667789,
"eval_accuracy": 0.352146039856311,
"eval_bleu": 0.10372829354298996,
"eval_loss": 3.885582208633423,
"eval_perplexity": 48.695285023394725,
"eval_runtime": 11.0694,
"eval_samples_per_second": 103.077,
"eval_steps_per_second": 1.626,
"step": 2000
},
{
"epoch": 1.1784511784511784,
"grad_norm": 0.7901706099510193,
"learning_rate": 8.492330714552937e-05,
"loss": 3.9051,
"step": 2100
},
{
"epoch": 1.2345679012345678,
"grad_norm": 0.8067757487297058,
"learning_rate": 8.367626886145406e-05,
"loss": 3.8583,
"step": 2200
},
{
"epoch": 1.2906846240179575,
"grad_norm": 0.7848672866821289,
"learning_rate": 8.242923057737873e-05,
"loss": 3.8389,
"step": 2300
},
{
"epoch": 1.3468013468013469,
"grad_norm": 0.7620055675506592,
"learning_rate": 8.11821922933034e-05,
"loss": 3.7985,
"step": 2400
},
{
"epoch": 1.4029180695847363,
"grad_norm": 0.7321527600288391,
"learning_rate": 7.993515400922809e-05,
"loss": 3.7605,
"step": 2500
},
{
"epoch": 1.4029180695847363,
"eval_accuracy": 0.36705039139236645,
"eval_bleu": 0.12056364590808125,
"eval_loss": 3.7143070697784424,
"eval_perplexity": 41.03014620345847,
"eval_runtime": 11.0834,
"eval_samples_per_second": 102.947,
"eval_steps_per_second": 1.624,
"step": 2500
},
{
"epoch": 1.4590347923681257,
"grad_norm": 0.7496013641357422,
"learning_rate": 7.868811572515277e-05,
"loss": 3.7426,
"step": 2600
},
{
"epoch": 1.5151515151515151,
"grad_norm": 0.7082226872444153,
"learning_rate": 7.744107744107744e-05,
"loss": 3.7123,
"step": 2700
},
{
"epoch": 1.5712682379349046,
"grad_norm": 0.7311062812805176,
"learning_rate": 7.619403915700213e-05,
"loss": 3.6816,
"step": 2800
},
{
"epoch": 1.627384960718294,
"grad_norm": 0.726064920425415,
"learning_rate": 7.49470008729268e-05,
"loss": 3.6676,
"step": 2900
},
{
"epoch": 1.6835016835016834,
"grad_norm": 0.7564496994018555,
"learning_rate": 7.369996258885148e-05,
"loss": 3.6544,
"step": 3000
},
{
"epoch": 1.6835016835016834,
"eval_accuracy": 0.37813634350345215,
"eval_bleu": 0.13317535043978146,
"eval_loss": 3.589838981628418,
"eval_perplexity": 36.22824204426872,
"eval_runtime": 11.0756,
"eval_samples_per_second": 103.019,
"eval_steps_per_second": 1.625,
"step": 3000
},
{
"epoch": 1.7396184062850728,
"grad_norm": 0.7231422066688538,
"learning_rate": 7.245292430477615e-05,
"loss": 3.6204,
"step": 3100
},
{
"epoch": 1.7957351290684624,
"grad_norm": 0.7412242293357849,
"learning_rate": 7.120588602070084e-05,
"loss": 3.605,
"step": 3200
},
{
"epoch": 1.8518518518518519,
"grad_norm": 0.7535395622253418,
"learning_rate": 6.995884773662552e-05,
"loss": 3.5779,
"step": 3300
},
{
"epoch": 1.9079685746352413,
"grad_norm": 0.7545406222343445,
"learning_rate": 6.871180945255021e-05,
"loss": 3.571,
"step": 3400
},
{
"epoch": 1.964085297418631,
"grad_norm": 0.716988742351532,
"learning_rate": 6.746477116847487e-05,
"loss": 3.5527,
"step": 3500
},
{
"epoch": 1.964085297418631,
"eval_accuracy": 0.3861560960314176,
"eval_bleu": 0.13491971280952403,
"eval_loss": 3.505063772201538,
"eval_perplexity": 33.283566351026074,
"eval_runtime": 11.0804,
"eval_samples_per_second": 102.974,
"eval_steps_per_second": 1.624,
"step": 3500
},
{
"epoch": 2.0202020202020203,
"grad_norm": 0.6990429759025574,
"learning_rate": 6.621773288439955e-05,
"loss": 3.517,
"step": 3600
},
{
"epoch": 2.0763187429854097,
"grad_norm": 0.7404659390449524,
"learning_rate": 6.497069460032424e-05,
"loss": 3.4636,
"step": 3700
},
{
"epoch": 2.132435465768799,
"grad_norm": 0.7441074848175049,
"learning_rate": 6.372365631624892e-05,
"loss": 3.4411,
"step": 3800
},
{
"epoch": 2.1885521885521886,
"grad_norm": 0.701506495475769,
"learning_rate": 6.247661803217359e-05,
"loss": 3.4414,
"step": 3900
},
{
"epoch": 2.244668911335578,
"grad_norm": 0.7056393027305603,
"learning_rate": 6.122957974809826e-05,
"loss": 3.4346,
"step": 4000
},
{
"epoch": 2.244668911335578,
"eval_accuracy": 0.39188926384651696,
"eval_bleu": 0.1334943569993854,
"eval_loss": 3.4409983158111572,
"eval_perplexity": 31.218108147922273,
"eval_runtime": 11.0871,
"eval_samples_per_second": 102.912,
"eval_steps_per_second": 1.624,
"step": 4000
},
{
"epoch": 2.3007856341189674,
"grad_norm": 0.7123896479606628,
"learning_rate": 5.998254146402295e-05,
"loss": 3.4223,
"step": 4100
},
{
"epoch": 2.356902356902357,
"grad_norm": 0.7008007168769836,
"learning_rate": 5.8735503179947625e-05,
"loss": 3.4019,
"step": 4200
},
{
"epoch": 2.4130190796857462,
"grad_norm": 0.7202064394950867,
"learning_rate": 5.748846489587231e-05,
"loss": 3.3989,
"step": 4300
},
{
"epoch": 2.4691358024691357,
"grad_norm": 0.7087401151657104,
"learning_rate": 5.624142661179699e-05,
"loss": 3.3828,
"step": 4400
},
{
"epoch": 2.525252525252525,
"grad_norm": 0.7096906304359436,
"learning_rate": 5.4994388327721666e-05,
"loss": 3.374,
"step": 4500
},
{
"epoch": 2.525252525252525,
"eval_accuracy": 0.3972454750210539,
"eval_bleu": 0.13536753244366495,
"eval_loss": 3.3866658210754395,
"eval_perplexity": 29.56720538889618,
"eval_runtime": 11.1089,
"eval_samples_per_second": 102.71,
"eval_steps_per_second": 1.62,
"step": 4500
},
{
"epoch": 2.581369248035915,
"grad_norm": 0.7192590832710266,
"learning_rate": 5.374735004364634e-05,
"loss": 3.3785,
"step": 4600
},
{
"epoch": 2.637485970819304,
"grad_norm": 0.6848036646842957,
"learning_rate": 5.250031175957102e-05,
"loss": 3.3642,
"step": 4700
},
{
"epoch": 2.6936026936026938,
"grad_norm": 0.7250380516052246,
"learning_rate": 5.12532734754957e-05,
"loss": 3.3589,
"step": 4800
},
{
"epoch": 2.749719416386083,
"grad_norm": 0.6998625993728638,
"learning_rate": 5.000623519142038e-05,
"loss": 3.3491,
"step": 4900
},
{
"epoch": 2.8058361391694726,
"grad_norm": 0.7044927477836609,
"learning_rate": 4.8759196907345056e-05,
"loss": 3.3442,
"step": 5000
},
{
"epoch": 2.8058361391694726,
"eval_accuracy": 0.40168071258512583,
"eval_bleu": 0.14053869067204397,
"eval_loss": 3.340980052947998,
"eval_perplexity": 28.24679650044353,
"eval_runtime": 11.0994,
"eval_samples_per_second": 102.798,
"eval_steps_per_second": 1.622,
"step": 5000
},
{
"epoch": 2.861952861952862,
"grad_norm": 0.7032910585403442,
"learning_rate": 4.751215862326974e-05,
"loss": 3.342,
"step": 5100
},
{
"epoch": 2.9180695847362514,
"grad_norm": 0.6985570788383484,
"learning_rate": 4.626512033919442e-05,
"loss": 3.3205,
"step": 5200
},
{
"epoch": 2.974186307519641,
"grad_norm": 0.7079312801361084,
"learning_rate": 4.5018082055119096e-05,
"loss": 3.3244,
"step": 5300
},
{
"epoch": 3.0303030303030303,
"grad_norm": 0.7505896687507629,
"learning_rate": 4.3771043771043774e-05,
"loss": 3.2727,
"step": 5400
},
{
"epoch": 3.0864197530864197,
"grad_norm": 0.7251821756362915,
"learning_rate": 4.252400548696845e-05,
"loss": 3.2251,
"step": 5500
},
{
"epoch": 3.0864197530864197,
"eval_accuracy": 0.40551710312248607,
"eval_bleu": 0.14035240790883957,
"eval_loss": 3.3072268962860107,
"eval_perplexity": 27.3092888787174,
"eval_runtime": 11.1046,
"eval_samples_per_second": 102.75,
"eval_steps_per_second": 1.621,
"step": 5500
},
{
"epoch": 3.142536475869809,
"grad_norm": 0.7410485744476318,
"learning_rate": 4.127696720289313e-05,
"loss": 3.2314,
"step": 5600
},
{
"epoch": 3.1986531986531985,
"grad_norm": 0.7442547678947449,
"learning_rate": 4.002992891881781e-05,
"loss": 3.2277,
"step": 5700
},
{
"epoch": 3.254769921436588,
"grad_norm": 0.7330621480941772,
"learning_rate": 3.8782890634742486e-05,
"loss": 3.2246,
"step": 5800
},
{
"epoch": 3.3108866442199774,
"grad_norm": 0.7348044514656067,
"learning_rate": 3.7535852350667164e-05,
"loss": 3.2255,
"step": 5900
},
{
"epoch": 3.3670033670033668,
"grad_norm": 0.7259684801101685,
"learning_rate": 3.628881406659185e-05,
"loss": 3.2187,
"step": 6000
},
{
"epoch": 3.3670033670033668,
"eval_accuracy": 0.40877263774552514,
"eval_bleu": 0.14006482323692013,
"eval_loss": 3.278057813644409,
"eval_perplexity": 26.524207687387797,
"eval_runtime": 11.0977,
"eval_samples_per_second": 102.814,
"eval_steps_per_second": 1.622,
"step": 6000
},
{
"epoch": 3.4231200897867566,
"grad_norm": 0.7419958710670471,
"learning_rate": 3.504177578251652e-05,
"loss": 3.1992,
"step": 6100
},
{
"epoch": 3.479236812570146,
"grad_norm": 0.7455360889434814,
"learning_rate": 3.3794737498441205e-05,
"loss": 3.2099,
"step": 6200
},
{
"epoch": 3.5353535353535355,
"grad_norm": 0.7269027829170227,
"learning_rate": 3.254769921436588e-05,
"loss": 3.1998,
"step": 6300
},
{
"epoch": 3.591470258136925,
"grad_norm": 0.7311801910400391,
"learning_rate": 3.130066093029056e-05,
"loss": 3.2011,
"step": 6400
},
{
"epoch": 3.6475869809203143,
"grad_norm": 0.7334641218185425,
"learning_rate": 3.0053622646215242e-05,
"loss": 3.1975,
"step": 6500
},
{
"epoch": 3.6475869809203143,
"eval_accuracy": 0.41177629679509753,
"eval_bleu": 0.14331769757778526,
"eval_loss": 3.2494168281555176,
"eval_perplexity": 25.775304101751964,
"eval_runtime": 11.123,
"eval_samples_per_second": 102.581,
"eval_steps_per_second": 1.618,
"step": 6500
},
{
"epoch": 3.7037037037037037,
"grad_norm": 0.7255159616470337,
"learning_rate": 2.880658436213992e-05,
"loss": 3.1898,
"step": 6600
},
{
"epoch": 3.759820426487093,
"grad_norm": 0.730241060256958,
"learning_rate": 2.7559546078064598e-05,
"loss": 3.1806,
"step": 6700
},
{
"epoch": 3.8159371492704826,
"grad_norm": 0.7587730884552002,
"learning_rate": 2.6312507793989276e-05,
"loss": 3.1713,
"step": 6800
},
{
"epoch": 3.872053872053872,
"grad_norm": 0.7403008937835693,
"learning_rate": 2.5065469509913957e-05,
"loss": 3.1627,
"step": 6900
},
{
"epoch": 3.9281705948372614,
"grad_norm": 0.7402880191802979,
"learning_rate": 2.3818431225838632e-05,
"loss": 3.172,
"step": 7000
},
{
"epoch": 3.9281705948372614,
"eval_accuracy": 0.41417168490194417,
"eval_bleu": 0.14449203518513976,
"eval_loss": 3.227550506591797,
"eval_perplexity": 25.21781039516413,
"eval_runtime": 11.0586,
"eval_samples_per_second": 103.178,
"eval_steps_per_second": 1.628,
"step": 7000
},
{
"epoch": 3.984287317620651,
"grad_norm": 0.7368999123573303,
"learning_rate": 2.2571392941763313e-05,
"loss": 3.1682,
"step": 7100
},
{
"epoch": 4.040404040404041,
"grad_norm": 0.757147490978241,
"learning_rate": 2.132435465768799e-05,
"loss": 3.1035,
"step": 7200
},
{
"epoch": 4.09652076318743,
"grad_norm": 0.7439301609992981,
"learning_rate": 2.007731637361267e-05,
"loss": 3.0995,
"step": 7300
},
{
"epoch": 4.1526374859708195,
"grad_norm": 0.7797232866287231,
"learning_rate": 1.883027808953735e-05,
"loss": 3.1013,
"step": 7400
},
{
"epoch": 4.2087542087542085,
"grad_norm": 0.7685579657554626,
"learning_rate": 1.758323980546203e-05,
"loss": 3.1055,
"step": 7500
},
{
"epoch": 4.2087542087542085,
"eval_accuracy": 0.41630834367822295,
"eval_bleu": 0.1447371014434901,
"eval_loss": 3.2109010219573975,
"eval_perplexity": 24.801422786715616,
"eval_runtime": 11.144,
"eval_samples_per_second": 102.387,
"eval_steps_per_second": 1.615,
"step": 7500
},
{
"epoch": 4.264870931537598,
"grad_norm": 0.7712327241897583,
"learning_rate": 1.6336201521386706e-05,
"loss": 3.0911,
"step": 7600
},
{
"epoch": 4.320987654320987,
"grad_norm": 0.7560853958129883,
"learning_rate": 1.5089163237311384e-05,
"loss": 3.0781,
"step": 7700
},
{
"epoch": 4.377104377104377,
"grad_norm": 0.7597346901893616,
"learning_rate": 1.3842124953236066e-05,
"loss": 3.0883,
"step": 7800
},
{
"epoch": 4.433221099887767,
"grad_norm": 0.7666236162185669,
"learning_rate": 1.2595086669160744e-05,
"loss": 3.0862,
"step": 7900
},
{
"epoch": 4.489337822671156,
"grad_norm": 0.7614879608154297,
"learning_rate": 1.1348048385085423e-05,
"loss": 3.0676,
"step": 8000
},
{
"epoch": 4.489337822671156,
"eval_accuracy": 0.4178333046332255,
"eval_bleu": 0.14527019621911616,
"eval_loss": 3.197705030441284,
"eval_perplexity": 24.476293342725903,
"eval_runtime": 11.0951,
"eval_samples_per_second": 102.838,
"eval_steps_per_second": 1.622,
"step": 8000
},
{
"epoch": 4.545454545454545,
"grad_norm": 0.7722211480140686,
"learning_rate": 1.0101010101010101e-05,
"loss": 3.0831,
"step": 8100
},
{
"epoch": 4.601571268237935,
"grad_norm": 0.7750692963600159,
"learning_rate": 8.853971816934781e-06,
"loss": 3.0761,
"step": 8200
},
{
"epoch": 4.657687991021325,
"grad_norm": 0.7448268532752991,
"learning_rate": 7.606933532859459e-06,
"loss": 3.0831,
"step": 8300
},
{
"epoch": 4.713804713804714,
"grad_norm": 0.7668105959892273,
"learning_rate": 6.359895248784138e-06,
"loss": 3.0747,
"step": 8400
},
{
"epoch": 4.7699214365881035,
"grad_norm": 0.768974244594574,
"learning_rate": 5.112856964708817e-06,
"loss": 3.0779,
"step": 8500
},
{
"epoch": 4.7699214365881035,
"eval_accuracy": 0.41932913712054815,
"eval_bleu": 0.1439933070551526,
"eval_loss": 3.1860642433166504,
"eval_perplexity": 24.19302197197161,
"eval_runtime": 11.1043,
"eval_samples_per_second": 102.753,
"eval_steps_per_second": 1.621,
"step": 8500
},
{
"epoch": 4.8260381593714925,
"grad_norm": 0.7649372816085815,
"learning_rate": 3.865818680633495e-06,
"loss": 3.0682,
"step": 8600
},
{
"epoch": 4.882154882154882,
"grad_norm": 0.7638269066810608,
"learning_rate": 2.6187803965581742e-06,
"loss": 3.0788,
"step": 8700
},
{
"epoch": 4.938271604938271,
"grad_norm": 0.7597787380218506,
"learning_rate": 1.3717421124828533e-06,
"loss": 3.0747,
"step": 8800
},
{
"epoch": 4.994388327721661,
"grad_norm": 0.7531468272209167,
"learning_rate": 1.2470382840753213e-07,
"loss": 3.0651,
"step": 8900
},
{
"epoch": 5.0,
"step": 8910,
"total_flos": 1.0586630697202483e+18,
"train_loss": 3.7704881070840237,
"train_runtime": 13409.8958,
"train_samples_per_second": 42.504,
"train_steps_per_second": 0.664
}
],
"logging_steps": 100,
"max_steps": 8910,
"num_input_tokens_seen": 0,
"num_train_epochs": 5,
"save_steps": 500,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 2,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.0586630697202483e+18,
"train_batch_size": 64,
"trial_name": null,
"trial_params": null
}