gpt2-wikitext / trainer_state.json
shivanandmn's picture
Training in progress, step 500
fa874a5 verified
raw
history blame
22.6 kB
{
"best_metric": 3.167205333709717,
"best_model_checkpoint": "./output/models/gpt2-wikitext/checkpoint-8500",
"epoch": 5.0,
"eval_steps": 500,
"global_step": 8910,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.05611672278338945,
"grad_norm": 1.7994316816329956,
"learning_rate": 1.1223344556677892e-05,
"loss": 8.9349,
"step": 100
},
{
"epoch": 0.1122334455667789,
"grad_norm": 1.2631176710128784,
"learning_rate": 2.2446689113355783e-05,
"loss": 7.3431,
"step": 200
},
{
"epoch": 0.16835016835016836,
"grad_norm": 1.3330615758895874,
"learning_rate": 3.3670033670033675e-05,
"loss": 6.6309,
"step": 300
},
{
"epoch": 0.2244668911335578,
"grad_norm": 1.3906015157699585,
"learning_rate": 4.4893378226711566e-05,
"loss": 6.3105,
"step": 400
},
{
"epoch": 0.28058361391694725,
"grad_norm": 1.6407803297042847,
"learning_rate": 5.611672278338945e-05,
"loss": 6.0797,
"step": 500
},
{
"epoch": 0.28058361391694725,
"eval_accuracy": 0.18855371160932213,
"eval_bleu": 0.03281611993021713,
"eval_loss": 5.951260566711426,
"eval_perplexity": 384.23739077136935,
"eval_runtime": 10.9097,
"eval_samples_per_second": 104.586,
"eval_steps_per_second": 1.65,
"step": 500
},
{
"epoch": 0.3367003367003367,
"grad_norm": 1.5170800685882568,
"learning_rate": 6.734006734006735e-05,
"loss": 5.8695,
"step": 600
},
{
"epoch": 0.39281705948372614,
"grad_norm": 1.4596922397613525,
"learning_rate": 7.856341189674523e-05,
"loss": 5.6562,
"step": 700
},
{
"epoch": 0.4489337822671156,
"grad_norm": 1.3204127550125122,
"learning_rate": 8.978675645342313e-05,
"loss": 5.477,
"step": 800
},
{
"epoch": 0.5050505050505051,
"grad_norm": 1.3912745714187622,
"learning_rate": 9.988776655443322e-05,
"loss": 5.2684,
"step": 900
},
{
"epoch": 0.5611672278338945,
"grad_norm": 1.1582123041152954,
"learning_rate": 9.864072827035791e-05,
"loss": 5.0682,
"step": 1000
},
{
"epoch": 0.5611672278338945,
"eval_accuracy": 0.26155564865242287,
"eval_bleu": 0.06397318988386062,
"eval_loss": 4.9242753982543945,
"eval_perplexity": 137.58960784355924,
"eval_runtime": 10.9977,
"eval_samples_per_second": 103.749,
"eval_steps_per_second": 1.637,
"step": 1000
},
{
"epoch": 0.6172839506172839,
"grad_norm": 1.1077975034713745,
"learning_rate": 9.73936899862826e-05,
"loss": 4.8918,
"step": 1100
},
{
"epoch": 0.6734006734006734,
"grad_norm": 1.0428259372711182,
"learning_rate": 9.614665170220725e-05,
"loss": 4.7372,
"step": 1200
},
{
"epoch": 0.7295173961840629,
"grad_norm": 1.0460771322250366,
"learning_rate": 9.489961341813194e-05,
"loss": 4.5964,
"step": 1300
},
{
"epoch": 0.7856341189674523,
"grad_norm": 0.9821613430976868,
"learning_rate": 9.365257513405662e-05,
"loss": 4.4684,
"step": 1400
},
{
"epoch": 0.8417508417508418,
"grad_norm": 1.0163836479187012,
"learning_rate": 9.24055368499813e-05,
"loss": 4.3376,
"step": 1500
},
{
"epoch": 0.8417508417508418,
"eval_accuracy": 0.32197580109711516,
"eval_bleu": 0.08373812406522842,
"eval_loss": 4.217575550079346,
"eval_perplexity": 67.86874030130332,
"eval_runtime": 10.9879,
"eval_samples_per_second": 103.842,
"eval_steps_per_second": 1.638,
"step": 1500
},
{
"epoch": 0.8978675645342312,
"grad_norm": 0.9277803897857666,
"learning_rate": 9.115849856590598e-05,
"loss": 4.2489,
"step": 1600
},
{
"epoch": 0.9539842873176206,
"grad_norm": 0.8715107440948486,
"learning_rate": 8.991146028183066e-05,
"loss": 4.1453,
"step": 1700
},
{
"epoch": 1.0101010101010102,
"grad_norm": 0.9395766854286194,
"learning_rate": 8.866442199775533e-05,
"loss": 4.0811,
"step": 1800
},
{
"epoch": 1.0662177328843996,
"grad_norm": 0.961445689201355,
"learning_rate": 8.741738371368002e-05,
"loss": 4.0001,
"step": 1900
},
{
"epoch": 1.122334455667789,
"grad_norm": 0.852159321308136,
"learning_rate": 8.617034542960469e-05,
"loss": 3.946,
"step": 2000
},
{
"epoch": 1.122334455667789,
"eval_accuracy": 0.3530944284951805,
"eval_bleu": 0.10624641853917971,
"eval_loss": 3.8834526538848877,
"eval_perplexity": 48.591696086246394,
"eval_runtime": 10.9772,
"eval_samples_per_second": 103.943,
"eval_steps_per_second": 1.64,
"step": 2000
},
{
"epoch": 1.1784511784511784,
"grad_norm": 0.8771251440048218,
"learning_rate": 8.492330714552937e-05,
"loss": 3.9036,
"step": 2100
},
{
"epoch": 1.2345679012345678,
"grad_norm": 0.8191014528274536,
"learning_rate": 8.367626886145406e-05,
"loss": 3.8563,
"step": 2200
},
{
"epoch": 1.2906846240179575,
"grad_norm": 0.764343798160553,
"learning_rate": 8.242923057737873e-05,
"loss": 3.8371,
"step": 2300
},
{
"epoch": 1.3468013468013469,
"grad_norm": 0.8013666272163391,
"learning_rate": 8.11821922933034e-05,
"loss": 3.7957,
"step": 2400
},
{
"epoch": 1.4029180695847363,
"grad_norm": 0.7934336066246033,
"learning_rate": 7.993515400922809e-05,
"loss": 3.7578,
"step": 2500
},
{
"epoch": 1.4029180695847363,
"eval_accuracy": 0.36828064079202016,
"eval_bleu": 0.123915580713345,
"eval_loss": 3.707221269607544,
"eval_perplexity": 40.74044239027585,
"eval_runtime": 11.022,
"eval_samples_per_second": 103.52,
"eval_steps_per_second": 1.633,
"step": 2500
},
{
"epoch": 1.4590347923681257,
"grad_norm": 0.7827987670898438,
"learning_rate": 7.868811572515277e-05,
"loss": 3.7391,
"step": 2600
},
{
"epoch": 1.5151515151515151,
"grad_norm": 0.7529945373535156,
"learning_rate": 7.744107744107744e-05,
"loss": 3.709,
"step": 2700
},
{
"epoch": 1.5712682379349046,
"grad_norm": 0.7922238111495972,
"learning_rate": 7.619403915700213e-05,
"loss": 3.6763,
"step": 2800
},
{
"epoch": 1.627384960718294,
"grad_norm": 0.7893389463424683,
"learning_rate": 7.49470008729268e-05,
"loss": 3.6622,
"step": 2900
},
{
"epoch": 1.6835016835016834,
"grad_norm": 0.7969167232513428,
"learning_rate": 7.369996258885148e-05,
"loss": 3.6484,
"step": 3000
},
{
"epoch": 1.6835016835016834,
"eval_accuracy": 0.37901191097312215,
"eval_bleu": 0.13115794166725414,
"eval_loss": 3.582406520843506,
"eval_perplexity": 35.959975232202034,
"eval_runtime": 10.9765,
"eval_samples_per_second": 103.95,
"eval_steps_per_second": 1.64,
"step": 3000
},
{
"epoch": 1.7396184062850728,
"grad_norm": 0.7236408591270447,
"learning_rate": 7.245292430477615e-05,
"loss": 3.6141,
"step": 3100
},
{
"epoch": 1.7957351290684624,
"grad_norm": 0.7503365278244019,
"learning_rate": 7.120588602070084e-05,
"loss": 3.598,
"step": 3200
},
{
"epoch": 1.8518518518518519,
"grad_norm": 0.7647961974143982,
"learning_rate": 6.995884773662552e-05,
"loss": 3.5699,
"step": 3300
},
{
"epoch": 1.9079685746352413,
"grad_norm": 0.8108649253845215,
"learning_rate": 6.871180945255021e-05,
"loss": 3.5629,
"step": 3400
},
{
"epoch": 1.964085297418631,
"grad_norm": 0.7428236603736877,
"learning_rate": 6.746477116847487e-05,
"loss": 3.5434,
"step": 3500
},
{
"epoch": 1.964085297418631,
"eval_accuracy": 0.3874129037398382,
"eval_bleu": 0.13535548737386568,
"eval_loss": 3.4942469596862793,
"eval_perplexity": 32.92548439803781,
"eval_runtime": 10.9847,
"eval_samples_per_second": 103.872,
"eval_steps_per_second": 1.639,
"step": 3500
},
{
"epoch": 2.0202020202020203,
"grad_norm": 0.727523148059845,
"learning_rate": 6.621773288439955e-05,
"loss": 3.5077,
"step": 3600
},
{
"epoch": 2.0763187429854097,
"grad_norm": 0.8040069937705994,
"learning_rate": 6.497069460032424e-05,
"loss": 3.453,
"step": 3700
},
{
"epoch": 2.132435465768799,
"grad_norm": 0.7441558241844177,
"learning_rate": 6.372365631624892e-05,
"loss": 3.4309,
"step": 3800
},
{
"epoch": 2.1885521885521886,
"grad_norm": 0.7570568323135376,
"learning_rate": 6.247661803217359e-05,
"loss": 3.431,
"step": 3900
},
{
"epoch": 2.244668911335578,
"grad_norm": 0.7841463088989258,
"learning_rate": 6.122957974809826e-05,
"loss": 3.4228,
"step": 4000
},
{
"epoch": 2.244668911335578,
"eval_accuracy": 0.3937826142457055,
"eval_bleu": 0.13319961357711083,
"eval_loss": 3.428004026412964,
"eval_perplexity": 30.81507525028359,
"eval_runtime": 10.9825,
"eval_samples_per_second": 103.893,
"eval_steps_per_second": 1.639,
"step": 4000
},
{
"epoch": 2.3007856341189674,
"grad_norm": 0.7675340175628662,
"learning_rate": 5.998254146402295e-05,
"loss": 3.4103,
"step": 4100
},
{
"epoch": 2.356902356902357,
"grad_norm": 0.7562149167060852,
"learning_rate": 5.8735503179947625e-05,
"loss": 3.389,
"step": 4200
},
{
"epoch": 2.4130190796857462,
"grad_norm": 0.7706029415130615,
"learning_rate": 5.748846489587231e-05,
"loss": 3.386,
"step": 4300
},
{
"epoch": 2.4691358024691357,
"grad_norm": 0.7378390431404114,
"learning_rate": 5.624142661179699e-05,
"loss": 3.3698,
"step": 4400
},
{
"epoch": 2.525252525252525,
"grad_norm": 0.7577961087226868,
"learning_rate": 5.4994388327721666e-05,
"loss": 3.3604,
"step": 4500
},
{
"epoch": 2.525252525252525,
"eval_accuracy": 0.39890065736097796,
"eval_bleu": 0.14087054617100203,
"eval_loss": 3.373204469680786,
"eval_perplexity": 29.171857773976008,
"eval_runtime": 11.067,
"eval_samples_per_second": 103.099,
"eval_steps_per_second": 1.626,
"step": 4500
},
{
"epoch": 2.581369248035915,
"grad_norm": 0.7494369745254517,
"learning_rate": 5.374735004364634e-05,
"loss": 3.3646,
"step": 4600
},
{
"epoch": 2.637485970819304,
"grad_norm": 0.7131938338279724,
"learning_rate": 5.250031175957102e-05,
"loss": 3.3498,
"step": 4700
},
{
"epoch": 2.6936026936026938,
"grad_norm": 0.7385434508323669,
"learning_rate": 5.12532734754957e-05,
"loss": 3.3446,
"step": 4800
},
{
"epoch": 2.749719416386083,
"grad_norm": 0.7146123051643372,
"learning_rate": 5.000623519142038e-05,
"loss": 3.3344,
"step": 4900
},
{
"epoch": 2.8058361391694726,
"grad_norm": 0.7290214896202087,
"learning_rate": 4.8759196907345056e-05,
"loss": 3.3288,
"step": 5000
},
{
"epoch": 2.8058361391694726,
"eval_accuracy": 0.4039090403626323,
"eval_bleu": 0.13955972441411385,
"eval_loss": 3.326815128326416,
"eval_perplexity": 27.849503214825326,
"eval_runtime": 10.9947,
"eval_samples_per_second": 103.777,
"eval_steps_per_second": 1.637,
"step": 5000
},
{
"epoch": 2.861952861952862,
"grad_norm": 0.7395574450492859,
"learning_rate": 4.751215862326974e-05,
"loss": 3.3267,
"step": 5100
},
{
"epoch": 2.9180695847362514,
"grad_norm": 0.742502748966217,
"learning_rate": 4.626512033919442e-05,
"loss": 3.3049,
"step": 5200
},
{
"epoch": 2.974186307519641,
"grad_norm": 0.736373782157898,
"learning_rate": 4.5018082055119096e-05,
"loss": 3.3088,
"step": 5300
},
{
"epoch": 3.0303030303030303,
"grad_norm": 0.7669495940208435,
"learning_rate": 4.3771043771043774e-05,
"loss": 3.2562,
"step": 5400
},
{
"epoch": 3.0864197530864197,
"grad_norm": 0.7537806034088135,
"learning_rate": 4.252400548696845e-05,
"loss": 3.2073,
"step": 5500
},
{
"epoch": 3.0864197530864197,
"eval_accuracy": 0.407890216518754,
"eval_bleu": 0.141043099350946,
"eval_loss": 3.288761615753174,
"eval_perplexity": 26.809642450961483,
"eval_runtime": 11.0034,
"eval_samples_per_second": 103.695,
"eval_steps_per_second": 1.636,
"step": 5500
},
{
"epoch": 3.142536475869809,
"grad_norm": 0.770527720451355,
"learning_rate": 4.127696720289313e-05,
"loss": 3.2138,
"step": 5600
},
{
"epoch": 3.1986531986531985,
"grad_norm": 0.7779518961906433,
"learning_rate": 4.002992891881781e-05,
"loss": 3.2101,
"step": 5700
},
{
"epoch": 3.254769921436588,
"grad_norm": 0.7703467607498169,
"learning_rate": 3.8782890634742486e-05,
"loss": 3.2071,
"step": 5800
},
{
"epoch": 3.3108866442199774,
"grad_norm": 0.7714940309524536,
"learning_rate": 3.7535852350667164e-05,
"loss": 3.2081,
"step": 5900
},
{
"epoch": 3.3670033670033668,
"grad_norm": 0.7608822584152222,
"learning_rate": 3.628881406659185e-05,
"loss": 3.2009,
"step": 6000
},
{
"epoch": 3.3670033670033668,
"eval_accuracy": 0.4109290010734697,
"eval_bleu": 0.14258105937027538,
"eval_loss": 3.2614388465881348,
"eval_perplexity": 26.087045408007207,
"eval_runtime": 10.961,
"eval_samples_per_second": 104.096,
"eval_steps_per_second": 1.642,
"step": 6000
},
{
"epoch": 3.4231200897867566,
"grad_norm": 0.7619470953941345,
"learning_rate": 3.504177578251652e-05,
"loss": 3.1806,
"step": 6100
},
{
"epoch": 3.479236812570146,
"grad_norm": 0.7633839845657349,
"learning_rate": 3.3794737498441205e-05,
"loss": 3.1914,
"step": 6200
},
{
"epoch": 3.5353535353535355,
"grad_norm": 0.7744373679161072,
"learning_rate": 3.254769921436588e-05,
"loss": 3.1814,
"step": 6300
},
{
"epoch": 3.591470258136925,
"grad_norm": 0.7579171061515808,
"learning_rate": 3.130066093029056e-05,
"loss": 3.183,
"step": 6400
},
{
"epoch": 3.6475869809203143,
"grad_norm": 0.7644525170326233,
"learning_rate": 3.0053622646215242e-05,
"loss": 3.1787,
"step": 6500
},
{
"epoch": 3.6475869809203143,
"eval_accuracy": 0.4140928666952811,
"eval_bleu": 0.13935180908429576,
"eval_loss": 3.232950210571289,
"eval_perplexity": 25.354347404276307,
"eval_runtime": 11.0007,
"eval_samples_per_second": 103.721,
"eval_steps_per_second": 1.636,
"step": 6500
},
{
"epoch": 3.7037037037037037,
"grad_norm": 0.7558349967002869,
"learning_rate": 2.880658436213992e-05,
"loss": 3.171,
"step": 6600
},
{
"epoch": 3.759820426487093,
"grad_norm": 0.749515950679779,
"learning_rate": 2.7559546078064598e-05,
"loss": 3.1616,
"step": 6700
},
{
"epoch": 3.8159371492704826,
"grad_norm": 0.7755558490753174,
"learning_rate": 2.6312507793989276e-05,
"loss": 3.1522,
"step": 6800
},
{
"epoch": 3.872053872053872,
"grad_norm": 0.7555833458900452,
"learning_rate": 2.5065469509913957e-05,
"loss": 3.1441,
"step": 6900
},
{
"epoch": 3.9281705948372614,
"grad_norm": 0.7530249357223511,
"learning_rate": 2.3818431225838632e-05,
"loss": 3.1528,
"step": 7000
},
{
"epoch": 3.9281705948372614,
"eval_accuracy": 0.41642143067039167,
"eval_bleu": 0.1459274363105479,
"eval_loss": 3.209371328353882,
"eval_perplexity": 24.763513211330427,
"eval_runtime": 10.9838,
"eval_samples_per_second": 103.88,
"eval_steps_per_second": 1.639,
"step": 7000
},
{
"epoch": 3.984287317620651,
"grad_norm": 0.7562186121940613,
"learning_rate": 2.2571392941763313e-05,
"loss": 3.1494,
"step": 7100
},
{
"epoch": 4.040404040404041,
"grad_norm": 0.7712120413780212,
"learning_rate": 2.132435465768799e-05,
"loss": 3.0834,
"step": 7200
},
{
"epoch": 4.09652076318743,
"grad_norm": 0.7722312808036804,
"learning_rate": 2.007731637361267e-05,
"loss": 3.0788,
"step": 7300
},
{
"epoch": 4.1526374859708195,
"grad_norm": 0.7912709712982178,
"learning_rate": 1.883027808953735e-05,
"loss": 3.0811,
"step": 7400
},
{
"epoch": 4.2087542087542085,
"grad_norm": 0.7807295918464661,
"learning_rate": 1.758323980546203e-05,
"loss": 3.0849,
"step": 7500
},
{
"epoch": 4.2087542087542085,
"eval_accuracy": 0.41817513576864457,
"eval_bleu": 0.14198542251109653,
"eval_loss": 3.1927247047424316,
"eval_perplexity": 24.35469647731234,
"eval_runtime": 10.9732,
"eval_samples_per_second": 103.98,
"eval_steps_per_second": 1.64,
"step": 7500
},
{
"epoch": 4.264870931537598,
"grad_norm": 0.7920138835906982,
"learning_rate": 1.6336201521386706e-05,
"loss": 3.0711,
"step": 7600
},
{
"epoch": 4.320987654320987,
"grad_norm": 0.7751849889755249,
"learning_rate": 1.5089163237311384e-05,
"loss": 3.0578,
"step": 7700
},
{
"epoch": 4.377104377104377,
"grad_norm": 0.780758261680603,
"learning_rate": 1.3842124953236066e-05,
"loss": 3.0678,
"step": 7800
},
{
"epoch": 4.433221099887767,
"grad_norm": 0.7852928638458252,
"learning_rate": 1.2595086669160744e-05,
"loss": 3.066,
"step": 7900
},
{
"epoch": 4.489337822671156,
"grad_norm": 0.7849876880645752,
"learning_rate": 1.1348048385085423e-05,
"loss": 3.0471,
"step": 8000
},
{
"epoch": 4.489337822671156,
"eval_accuracy": 0.4200282203448639,
"eval_bleu": 0.14760415379211805,
"eval_loss": 3.179920196533203,
"eval_perplexity": 24.04483461433579,
"eval_runtime": 10.9642,
"eval_samples_per_second": 104.066,
"eval_steps_per_second": 1.642,
"step": 8000
},
{
"epoch": 4.545454545454545,
"grad_norm": 0.8002632856369019,
"learning_rate": 1.0101010101010101e-05,
"loss": 3.0626,
"step": 8100
},
{
"epoch": 4.601571268237935,
"grad_norm": 0.7944212555885315,
"learning_rate": 8.853971816934781e-06,
"loss": 3.0554,
"step": 8200
},
{
"epoch": 4.657687991021325,
"grad_norm": 0.768937349319458,
"learning_rate": 7.606933532859459e-06,
"loss": 3.0624,
"step": 8300
},
{
"epoch": 4.713804713804714,
"grad_norm": 0.789129376411438,
"learning_rate": 6.359895248784138e-06,
"loss": 3.0546,
"step": 8400
},
{
"epoch": 4.7699214365881035,
"grad_norm": 0.7887497544288635,
"learning_rate": 5.112856964708817e-06,
"loss": 3.0571,
"step": 8500
},
{
"epoch": 4.7699214365881035,
"eval_accuracy": 0.4214486615040741,
"eval_bleu": 0.14741709794238567,
"eval_loss": 3.167205333709717,
"eval_perplexity": 23.741043266022093,
"eval_runtime": 11.017,
"eval_samples_per_second": 103.567,
"eval_steps_per_second": 1.634,
"step": 8500
},
{
"epoch": 4.8260381593714925,
"grad_norm": 0.7839872241020203,
"learning_rate": 3.865818680633495e-06,
"loss": 3.0474,
"step": 8600
},
{
"epoch": 4.882154882154882,
"grad_norm": 0.7838252782821655,
"learning_rate": 2.6187803965581742e-06,
"loss": 3.0579,
"step": 8700
},
{
"epoch": 4.938271604938271,
"grad_norm": 0.7778410911560059,
"learning_rate": 1.3717421124828533e-06,
"loss": 3.0544,
"step": 8800
},
{
"epoch": 4.994388327721661,
"grad_norm": 0.7718851566314697,
"learning_rate": 1.2470382840753213e-07,
"loss": 3.0444,
"step": 8900
},
{
"epoch": 5.0,
"step": 8910,
"total_flos": 1.0586630697202483e+18,
"train_loss": 3.7617398207436508,
"train_runtime": 13270.4719,
"train_samples_per_second": 42.95,
"train_steps_per_second": 0.671
}
],
"logging_steps": 100,
"max_steps": 8910,
"num_input_tokens_seen": 0,
"num_train_epochs": 5,
"save_steps": 500,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 2,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.0586630697202483e+18,
"train_batch_size": 64,
"trial_name": null,
"trial_params": null
}