CouplingSmells-Detection-Adpater / trainer_state.json
codeaidbackUp's picture
Upload folder using huggingface_hub
af9e925 verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 3.0,
"eval_steps": 100,
"global_step": 1728,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.017361111111111112,
"grad_norm": 2.007795810699463,
"learning_rate": 5.202312138728324e-06,
"loss": 1.463,
"step": 10
},
{
"epoch": 0.034722222222222224,
"grad_norm": 1.1297450065612793,
"learning_rate": 1.0982658959537573e-05,
"loss": 1.2884,
"step": 20
},
{
"epoch": 0.052083333333333336,
"grad_norm": 0.774868369102478,
"learning_rate": 1.676300578034682e-05,
"loss": 1.0796,
"step": 30
},
{
"epoch": 0.06944444444444445,
"grad_norm": 0.5096573233604431,
"learning_rate": 2.254335260115607e-05,
"loss": 0.8443,
"step": 40
},
{
"epoch": 0.08680555555555555,
"grad_norm": 0.6606957316398621,
"learning_rate": 2.832369942196532e-05,
"loss": 0.756,
"step": 50
},
{
"epoch": 0.10416666666666667,
"grad_norm": 0.8858688473701477,
"learning_rate": 3.410404624277457e-05,
"loss": 0.6759,
"step": 60
},
{
"epoch": 0.12152777777777778,
"grad_norm": 1.686498999595642,
"learning_rate": 3.988439306358382e-05,
"loss": 0.636,
"step": 70
},
{
"epoch": 0.1388888888888889,
"grad_norm": 0.681108295917511,
"learning_rate": 4.566473988439307e-05,
"loss": 0.5915,
"step": 80
},
{
"epoch": 0.15625,
"grad_norm": 0.657633900642395,
"learning_rate": 5.1445086705202317e-05,
"loss": 0.6144,
"step": 90
},
{
"epoch": 0.1736111111111111,
"grad_norm": 0.9662413001060486,
"learning_rate": 5.722543352601156e-05,
"loss": 0.656,
"step": 100
},
{
"epoch": 0.1736111111111111,
"eval_loss": 0.5766947269439697,
"eval_runtime": 513.1213,
"eval_samples_per_second": 2.247,
"eval_steps_per_second": 2.247,
"step": 100
},
{
"epoch": 0.1909722222222222,
"grad_norm": 0.6163507699966431,
"learning_rate": 6.300578034682081e-05,
"loss": 0.5819,
"step": 110
},
{
"epoch": 0.20833333333333334,
"grad_norm": 0.7107399106025696,
"learning_rate": 6.878612716763007e-05,
"loss": 0.5718,
"step": 120
},
{
"epoch": 0.22569444444444445,
"grad_norm": 0.6259875893592834,
"learning_rate": 7.456647398843931e-05,
"loss": 0.658,
"step": 130
},
{
"epoch": 0.24305555555555555,
"grad_norm": 0.5605979561805725,
"learning_rate": 8.034682080924855e-05,
"loss": 0.549,
"step": 140
},
{
"epoch": 0.2604166666666667,
"grad_norm": 0.5297986268997192,
"learning_rate": 8.612716763005781e-05,
"loss": 0.5671,
"step": 150
},
{
"epoch": 0.2777777777777778,
"grad_norm": 0.5555981397628784,
"learning_rate": 9.190751445086706e-05,
"loss": 0.5569,
"step": 160
},
{
"epoch": 0.2951388888888889,
"grad_norm": 0.5598844885826111,
"learning_rate": 9.768786127167631e-05,
"loss": 0.5862,
"step": 170
},
{
"epoch": 0.3125,
"grad_norm": 0.5559885501861572,
"learning_rate": 9.999632653293776e-05,
"loss": 0.5738,
"step": 180
},
{
"epoch": 0.3298611111111111,
"grad_norm": 0.4869542717933655,
"learning_rate": 9.99738795222662e-05,
"loss": 0.5789,
"step": 190
},
{
"epoch": 0.3472222222222222,
"grad_norm": 0.5302107334136963,
"learning_rate": 9.993103546669093e-05,
"loss": 0.5695,
"step": 200
},
{
"epoch": 0.3472222222222222,
"eval_loss": 0.5406240224838257,
"eval_runtime": 515.9751,
"eval_samples_per_second": 2.235,
"eval_steps_per_second": 2.235,
"step": 200
},
{
"epoch": 0.3645833333333333,
"grad_norm": 0.4969506561756134,
"learning_rate": 9.98678118531898e-05,
"loss": 0.5943,
"step": 210
},
{
"epoch": 0.3819444444444444,
"grad_norm": 0.5448970794677734,
"learning_rate": 9.97842344867413e-05,
"loss": 0.5719,
"step": 220
},
{
"epoch": 0.3993055555555556,
"grad_norm": 0.5558710098266602,
"learning_rate": 9.968033747979195e-05,
"loss": 0.5303,
"step": 230
},
{
"epoch": 0.4166666666666667,
"grad_norm": 0.5268853902816772,
"learning_rate": 9.955616323833343e-05,
"loss": 0.5646,
"step": 240
},
{
"epoch": 0.4340277777777778,
"grad_norm": 0.5985867381095886,
"learning_rate": 9.941176244459418e-05,
"loss": 0.5558,
"step": 250
},
{
"epoch": 0.4513888888888889,
"grad_norm": 0.37411603331565857,
"learning_rate": 9.924719403635333e-05,
"loss": 0.5372,
"step": 260
},
{
"epoch": 0.46875,
"grad_norm": 0.4203139841556549,
"learning_rate": 9.906252518288501e-05,
"loss": 0.5492,
"step": 270
},
{
"epoch": 0.4861111111111111,
"grad_norm": 0.5267091393470764,
"learning_rate": 9.885783125754291e-05,
"loss": 0.5233,
"step": 280
},
{
"epoch": 0.5034722222222222,
"grad_norm": 0.5201379656791687,
"learning_rate": 9.863319580699635e-05,
"loss": 0.5909,
"step": 290
},
{
"epoch": 0.5208333333333334,
"grad_norm": 0.419812947511673,
"learning_rate": 9.838871051713033e-05,
"loss": 0.5849,
"step": 300
},
{
"epoch": 0.5208333333333334,
"eval_loss": 0.4985196888446808,
"eval_runtime": 506.4652,
"eval_samples_per_second": 2.277,
"eval_steps_per_second": 2.277,
"step": 300
},
{
"epoch": 0.5381944444444444,
"grad_norm": 0.5255069136619568,
"learning_rate": 9.81244751756236e-05,
"loss": 0.5894,
"step": 310
},
{
"epoch": 0.5555555555555556,
"grad_norm": 0.5103104114532471,
"learning_rate": 9.784059763121993e-05,
"loss": 0.5187,
"step": 320
},
{
"epoch": 0.5729166666666666,
"grad_norm": 0.6287491321563721,
"learning_rate": 9.753719374970908e-05,
"loss": 0.5181,
"step": 330
},
{
"epoch": 0.5902777777777778,
"grad_norm": 0.8683649897575378,
"learning_rate": 9.721438736663585e-05,
"loss": 0.5413,
"step": 340
},
{
"epoch": 0.6076388888888888,
"grad_norm": 0.7942582964897156,
"learning_rate": 9.687231023675597e-05,
"loss": 0.4949,
"step": 350
},
{
"epoch": 0.625,
"grad_norm": 0.476675808429718,
"learning_rate": 9.651110198025997e-05,
"loss": 0.5139,
"step": 360
},
{
"epoch": 0.6423611111111112,
"grad_norm": 0.42043331265449524,
"learning_rate": 9.613091002578649e-05,
"loss": 0.5163,
"step": 370
},
{
"epoch": 0.6597222222222222,
"grad_norm": 0.4923442602157593,
"learning_rate": 9.573188955024882e-05,
"loss": 0.5453,
"step": 380
},
{
"epoch": 0.6770833333333334,
"grad_norm": 0.4754851758480072,
"learning_rate": 9.531420341549865e-05,
"loss": 0.5674,
"step": 390
},
{
"epoch": 0.6944444444444444,
"grad_norm": 0.49470463395118713,
"learning_rate": 9.48780221018535e-05,
"loss": 0.527,
"step": 400
},
{
"epoch": 0.6944444444444444,
"eval_loss": 0.48567065596580505,
"eval_runtime": 512.591,
"eval_samples_per_second": 2.249,
"eval_steps_per_second": 2.249,
"step": 400
},
{
"epoch": 0.7118055555555556,
"grad_norm": 0.5066745281219482,
"learning_rate": 9.442352363851428e-05,
"loss": 0.555,
"step": 410
},
{
"epoch": 0.7291666666666666,
"grad_norm": 0.3770899772644043,
"learning_rate": 9.395089353090213e-05,
"loss": 0.4918,
"step": 420
},
{
"epoch": 0.7465277777777778,
"grad_norm": 0.497141569852829,
"learning_rate": 9.346032468494353e-05,
"loss": 0.5761,
"step": 430
},
{
"epoch": 0.7638888888888888,
"grad_norm": 0.3677353858947754,
"learning_rate": 9.295201732833487e-05,
"loss": 0.5363,
"step": 440
},
{
"epoch": 0.78125,
"grad_norm": 0.5019080638885498,
"learning_rate": 9.242617892881889e-05,
"loss": 0.5385,
"step": 450
},
{
"epoch": 0.7986111111111112,
"grad_norm": 0.4239552617073059,
"learning_rate": 9.188302410950565e-05,
"loss": 0.5026,
"step": 460
},
{
"epoch": 0.8159722222222222,
"grad_norm": 0.4331854283809662,
"learning_rate": 9.13227745612734e-05,
"loss": 0.5322,
"step": 470
},
{
"epoch": 0.8333333333333334,
"grad_norm": 0.4505394399166107,
"learning_rate": 9.074565895228442e-05,
"loss": 0.5145,
"step": 480
},
{
"epoch": 0.8506944444444444,
"grad_norm": 0.4377307891845703,
"learning_rate": 9.015191283465319e-05,
"loss": 0.5339,
"step": 490
},
{
"epoch": 0.8680555555555556,
"grad_norm": 0.5486445426940918,
"learning_rate": 8.954177854830487e-05,
"loss": 0.4782,
"step": 500
},
{
"epoch": 0.8680555555555556,
"eval_loss": 0.4704386591911316,
"eval_runtime": 507.1518,
"eval_samples_per_second": 2.273,
"eval_steps_per_second": 2.273,
"step": 500
},
{
"epoch": 0.8854166666666666,
"grad_norm": 0.505261242389679,
"learning_rate": 8.891550512206322e-05,
"loss": 0.51,
"step": 510
},
{
"epoch": 0.9027777777777778,
"grad_norm": 0.4194414019584656,
"learning_rate": 8.827334817200845e-05,
"loss": 0.5049,
"step": 520
},
{
"epoch": 0.9201388888888888,
"grad_norm": 0.5332881212234497,
"learning_rate": 8.76155697971465e-05,
"loss": 0.5303,
"step": 530
},
{
"epoch": 0.9375,
"grad_norm": 0.4372180700302124,
"learning_rate": 8.694243847243224e-05,
"loss": 0.5237,
"step": 540
},
{
"epoch": 0.9548611111111112,
"grad_norm": 0.5241920948028564,
"learning_rate": 8.625422893919027e-05,
"loss": 0.5211,
"step": 550
},
{
"epoch": 0.9722222222222222,
"grad_norm": 0.4739208221435547,
"learning_rate": 8.555122209297817e-05,
"loss": 0.4999,
"step": 560
},
{
"epoch": 0.9895833333333334,
"grad_norm": 0.4937109649181366,
"learning_rate": 8.483370486893783e-05,
"loss": 0.5348,
"step": 570
},
{
"epoch": 1.0069444444444444,
"grad_norm": 0.40740323066711426,
"learning_rate": 8.410197012468156e-05,
"loss": 0.487,
"step": 580
},
{
"epoch": 1.0243055555555556,
"grad_norm": 0.4971655607223511,
"learning_rate": 8.335631652076125e-05,
"loss": 0.4467,
"step": 590
},
{
"epoch": 1.0416666666666667,
"grad_norm": 0.5765105485916138,
"learning_rate": 8.259704839876871e-05,
"loss": 0.4357,
"step": 600
},
{
"epoch": 1.0416666666666667,
"eval_loss": 0.4711858034133911,
"eval_runtime": 502.4975,
"eval_samples_per_second": 2.295,
"eval_steps_per_second": 2.295,
"step": 600
},
{
"epoch": 1.0590277777777777,
"grad_norm": 0.6608824133872986,
"learning_rate": 8.182447565711737e-05,
"loss": 0.4109,
"step": 610
},
{
"epoch": 1.0763888888888888,
"grad_norm": 0.7043329477310181,
"learning_rate": 8.103891362455602e-05,
"loss": 0.425,
"step": 620
},
{
"epoch": 1.09375,
"grad_norm": 0.6336780786514282,
"learning_rate": 8.024068293146612e-05,
"loss": 0.4188,
"step": 630
},
{
"epoch": 1.1111111111111112,
"grad_norm": 0.6143578886985779,
"learning_rate": 7.943010937899511e-05,
"loss": 0.4329,
"step": 640
},
{
"epoch": 1.1284722222222223,
"grad_norm": 0.4573313891887665,
"learning_rate": 7.860752380607936e-05,
"loss": 0.4484,
"step": 650
},
{
"epoch": 1.1458333333333333,
"grad_norm": 0.49324455857276917,
"learning_rate": 7.777326195441088e-05,
"loss": 0.4482,
"step": 660
},
{
"epoch": 1.1631944444444444,
"grad_norm": 0.5778242349624634,
"learning_rate": 7.692766433140302e-05,
"loss": 0.4013,
"step": 670
},
{
"epoch": 1.1805555555555556,
"grad_norm": 0.515394926071167,
"learning_rate": 7.607107607121086e-05,
"loss": 0.4149,
"step": 680
},
{
"epoch": 1.1979166666666667,
"grad_norm": 0.5518147349357605,
"learning_rate": 7.520384679386344e-05,
"loss": 0.4334,
"step": 690
},
{
"epoch": 1.2152777777777777,
"grad_norm": 0.6633225679397583,
"learning_rate": 7.432633046256472e-05,
"loss": 0.4033,
"step": 700
},
{
"epoch": 1.2152777777777777,
"eval_loss": 0.4653947055339813,
"eval_runtime": 508.2251,
"eval_samples_per_second": 2.269,
"eval_steps_per_second": 2.269,
"step": 700
},
{
"epoch": 1.2326388888888888,
"grad_norm": 0.48088809847831726,
"learning_rate": 7.343888523922219e-05,
"loss": 0.4328,
"step": 710
},
{
"epoch": 1.25,
"grad_norm": 0.6473700404167175,
"learning_rate": 7.25418733382615e-05,
"loss": 0.4019,
"step": 720
},
{
"epoch": 1.2673611111111112,
"grad_norm": 0.6023727059364319,
"learning_rate": 7.163566087878726e-05,
"loss": 0.4238,
"step": 730
},
{
"epoch": 1.2847222222222223,
"grad_norm": 0.5473999977111816,
"learning_rate": 7.072061773514991e-05,
"loss": 0.4204,
"step": 740
},
{
"epoch": 1.3020833333333333,
"grad_norm": 0.5065605640411377,
"learning_rate": 6.979711738598004e-05,
"loss": 0.4175,
"step": 750
},
{
"epoch": 1.3194444444444444,
"grad_norm": 0.48466363549232483,
"learning_rate": 6.886553676175155e-05,
"loss": 0.4559,
"step": 760
},
{
"epoch": 1.3368055555555556,
"grad_norm": 0.3937870264053345,
"learning_rate": 6.792625609093593e-05,
"loss": 0.4052,
"step": 770
},
{
"epoch": 1.3541666666666667,
"grad_norm": 0.5539367198944092,
"learning_rate": 6.697965874481038e-05,
"loss": 0.4268,
"step": 780
},
{
"epoch": 1.3715277777777777,
"grad_norm": 0.5883862376213074,
"learning_rate": 6.602613108098327e-05,
"loss": 0.3801,
"step": 790
},
{
"epoch": 1.3888888888888888,
"grad_norm": 0.5152338743209839,
"learning_rate": 6.506606228570077e-05,
"loss": 0.4438,
"step": 800
},
{
"epoch": 1.3888888888888888,
"eval_loss": 0.4593656063079834,
"eval_runtime": 507.6748,
"eval_samples_per_second": 2.271,
"eval_steps_per_second": 2.271,
"step": 800
},
{
"epoch": 1.40625,
"grad_norm": 0.5437167286872864,
"learning_rate": 6.409984421499877e-05,
"loss": 0.4497,
"step": 810
},
{
"epoch": 1.4236111111111112,
"grad_norm": 0.560701847076416,
"learning_rate": 6.312787123476523e-05,
"loss": 0.4215,
"step": 820
},
{
"epoch": 1.4409722222222223,
"grad_norm": 0.4475458860397339,
"learning_rate": 6.215054005977829e-05,
"loss": 0.417,
"step": 830
},
{
"epoch": 1.4583333333333333,
"grad_norm": 0.6264685988426208,
"learning_rate": 6.11682495917852e-05,
"loss": 0.3694,
"step": 840
},
{
"epoch": 1.4756944444444444,
"grad_norm": 0.4710831940174103,
"learning_rate": 6.0181400756689185e-05,
"loss": 0.4168,
"step": 850
},
{
"epoch": 1.4930555555555556,
"grad_norm": 0.4411803185939789,
"learning_rate": 5.919039634090979e-05,
"loss": 0.4061,
"step": 860
},
{
"epoch": 1.5104166666666665,
"grad_norm": 0.5613175630569458,
"learning_rate": 5.819564082698392e-05,
"loss": 0.4011,
"step": 870
},
{
"epoch": 1.5277777777777777,
"grad_norm": 0.5883941054344177,
"learning_rate": 5.7197540228474634e-05,
"loss": 0.4046,
"step": 880
},
{
"epoch": 1.5451388888888888,
"grad_norm": 0.7705323100090027,
"learning_rate": 5.6196501924255083e-05,
"loss": 0.4447,
"step": 890
},
{
"epoch": 1.5625,
"grad_norm": 0.4412948489189148,
"learning_rate": 5.5192934492235116e-05,
"loss": 0.4297,
"step": 900
},
{
"epoch": 1.5625,
"eval_loss": 0.45315322279930115,
"eval_runtime": 509.3304,
"eval_samples_per_second": 2.264,
"eval_steps_per_second": 2.264,
"step": 900
},
{
"epoch": 1.5798611111111112,
"grad_norm": 0.4683547914028168,
"learning_rate": 5.4187247542598495e-05,
"loss": 0.4094,
"step": 910
},
{
"epoch": 1.5972222222222223,
"grad_norm": 0.5470554232597351,
"learning_rate": 5.317985155061882e-05,
"loss": 0.413,
"step": 920
},
{
"epoch": 1.6145833333333335,
"grad_norm": 0.476554274559021,
"learning_rate": 5.217115768912242e-05,
"loss": 0.3978,
"step": 930
},
{
"epoch": 1.6319444444444444,
"grad_norm": 0.4833614230155945,
"learning_rate": 5.116157766066637e-05,
"loss": 0.4377,
"step": 940
},
{
"epoch": 1.6493055555555556,
"grad_norm": 0.5570272207260132,
"learning_rate": 5.015152352950045e-05,
"loss": 0.4299,
"step": 950
},
{
"epoch": 1.6666666666666665,
"grad_norm": 0.37459680438041687,
"learning_rate": 4.914140755338144e-05,
"loss": 0.4285,
"step": 960
},
{
"epoch": 1.6840277777777777,
"grad_norm": 0.462679922580719,
"learning_rate": 4.813164201530838e-05,
"loss": 0.4038,
"step": 970
},
{
"epoch": 1.7013888888888888,
"grad_norm": 0.5221256017684937,
"learning_rate": 4.712263905524754e-05,
"loss": 0.4016,
"step": 980
},
{
"epoch": 1.71875,
"grad_norm": 0.6251868009567261,
"learning_rate": 4.611481050191587e-05,
"loss": 0.4188,
"step": 990
},
{
"epoch": 1.7361111111111112,
"grad_norm": 0.5684912204742432,
"learning_rate": 4.5108567704691405e-05,
"loss": 0.4012,
"step": 1000
},
{
"epoch": 1.7361111111111112,
"eval_loss": 0.4480222463607788,
"eval_runtime": 505.0708,
"eval_samples_per_second": 2.283,
"eval_steps_per_second": 2.283,
"step": 1000
},
{
"epoch": 1.7534722222222223,
"grad_norm": 0.44916579127311707,
"learning_rate": 4.4104321365719216e-05,
"loss": 0.3071,
"step": 1010
},
{
"epoch": 1.7708333333333335,
"grad_norm": 0.4596291482448578,
"learning_rate": 4.310248137228176e-05,
"loss": 0.302,
"step": 1020
},
{
"epoch": 1.7881944444444444,
"grad_norm": 0.44650307297706604,
"learning_rate": 4.210345662950161e-05,
"loss": 0.2602,
"step": 1030
},
{
"epoch": 1.8055555555555556,
"grad_norm": 0.4998283088207245,
"learning_rate": 4.11076548934452e-05,
"loss": 0.3086,
"step": 1040
},
{
"epoch": 1.8229166666666665,
"grad_norm": 0.4060577154159546,
"learning_rate": 4.011548260469552e-05,
"loss": 0.2801,
"step": 1050
},
{
"epoch": 1.8402777777777777,
"grad_norm": 0.8512741327285767,
"learning_rate": 3.9127344722461615e-05,
"loss": 0.3004,
"step": 1060
},
{
"epoch": 1.8576388888888888,
"grad_norm": 0.6027821898460388,
"learning_rate": 3.8143644559292927e-05,
"loss": 0.2837,
"step": 1070
},
{
"epoch": 1.875,
"grad_norm": 0.5069570541381836,
"learning_rate": 3.716478361646555e-05,
"loss": 0.2604,
"step": 1080
},
{
"epoch": 1.8923611111111112,
"grad_norm": 0.56328284740448,
"learning_rate": 3.6191161420107864e-05,
"loss": 0.304,
"step": 1090
},
{
"epoch": 1.9097222222222223,
"grad_norm": 0.5753824710845947,
"learning_rate": 3.5223175358132256e-05,
"loss": 0.2866,
"step": 1100
},
{
"epoch": 1.9097222222222223,
"eval_loss": 0.48055773973464966,
"eval_runtime": 491.0229,
"eval_samples_per_second": 2.348,
"eval_steps_per_second": 2.348,
"step": 1100
},
{
"epoch": 1.9270833333333335,
"grad_norm": 0.6144798398017883,
"learning_rate": 3.426122051803969e-05,
"loss": 0.2847,
"step": 1110
},
{
"epoch": 1.9444444444444444,
"grad_norm": 0.7388680577278137,
"learning_rate": 3.330568952566302e-05,
"loss": 0.278,
"step": 1120
},
{
"epoch": 1.9618055555555556,
"grad_norm": 0.7205446362495422,
"learning_rate": 3.235697238491516e-05,
"loss": 0.2883,
"step": 1130
},
{
"epoch": 1.9791666666666665,
"grad_norm": 0.6098833680152893,
"learning_rate": 3.141545631860746e-05,
"loss": 0.2924,
"step": 1140
},
{
"epoch": 1.9965277777777777,
"grad_norm": 0.5834055542945862,
"learning_rate": 3.0481525610402978e-05,
"loss": 0.3073,
"step": 1150
},
{
"epoch": 2.013888888888889,
"grad_norm": 0.6533185839653015,
"learning_rate": 2.9555561447969683e-05,
"loss": 0.304,
"step": 1160
},
{
"epoch": 2.03125,
"grad_norm": 0.6724961996078491,
"learning_rate": 2.8637941767396954e-05,
"loss": 0.2801,
"step": 1170
},
{
"epoch": 2.048611111111111,
"grad_norm": 0.771094024181366,
"learning_rate": 2.7729041098939585e-05,
"loss": 0.287,
"step": 1180
},
{
"epoch": 2.0659722222222223,
"grad_norm": 0.6162275075912476,
"learning_rate": 2.682923041415163e-05,
"loss": 0.3276,
"step": 1190
},
{
"epoch": 2.0833333333333335,
"grad_norm": 0.7060827016830444,
"learning_rate": 2.5938876974472802e-05,
"loss": 0.2718,
"step": 1200
},
{
"epoch": 2.0833333333333335,
"eval_loss": 0.47958508133888245,
"eval_runtime": 491.4469,
"eval_samples_per_second": 2.346,
"eval_steps_per_second": 2.346,
"step": 1200
},
{
"epoch": 2.1006944444444446,
"grad_norm": 0.7561212182044983,
"learning_rate": 2.5058344181329273e-05,
"loss": 0.2929,
"step": 1210
},
{
"epoch": 2.1180555555555554,
"grad_norm": 0.5527466535568237,
"learning_rate": 2.418799142780982e-05,
"loss": 0.3054,
"step": 1220
},
{
"epoch": 2.1354166666666665,
"grad_norm": 0.6940686702728271,
"learning_rate": 2.332817395197813e-05,
"loss": 0.3087,
"step": 1230
},
{
"epoch": 2.1527777777777777,
"grad_norm": 0.7222234010696411,
"learning_rate": 2.2479242691880887e-05,
"loss": 0.286,
"step": 1240
},
{
"epoch": 2.170138888888889,
"grad_norm": 0.6128625273704529,
"learning_rate": 2.164154414231091e-05,
"loss": 0.2902,
"step": 1250
},
{
"epoch": 2.1875,
"grad_norm": 0.672198474407196,
"learning_rate": 2.0815420213383936e-05,
"loss": 0.2631,
"step": 1260
},
{
"epoch": 2.204861111111111,
"grad_norm": 0.5412243008613586,
"learning_rate": 2.0001208090986557e-05,
"loss": 0.3163,
"step": 1270
},
{
"epoch": 2.2222222222222223,
"grad_norm": 0.8546826839447021,
"learning_rate": 1.9199240099152498e-05,
"loss": 0.3257,
"step": 1280
},
{
"epoch": 2.2395833333333335,
"grad_norm": 0.7093095183372498,
"learning_rate": 1.8409843564423056e-05,
"loss": 0.2952,
"step": 1290
},
{
"epoch": 2.2569444444444446,
"grad_norm": 0.6084073781967163,
"learning_rate": 1.7633340682247558e-05,
"loss": 0.3169,
"step": 1300
},
{
"epoch": 2.2569444444444446,
"eval_loss": 0.468641072511673,
"eval_runtime": 486.238,
"eval_samples_per_second": 2.371,
"eval_steps_per_second": 2.371,
"step": 1300
},
{
"epoch": 2.2743055555555554,
"grad_norm": 0.7556958198547363,
"learning_rate": 1.6870048385477848e-05,
"loss": 0.3121,
"step": 1310
},
{
"epoch": 2.2916666666666665,
"grad_norm": 0.6000959873199463,
"learning_rate": 1.6120278215010886e-05,
"loss": 0.2928,
"step": 1320
},
{
"epoch": 2.3090277777777777,
"grad_norm": 0.6133815050125122,
"learning_rate": 1.5384336192632104e-05,
"loss": 0.2578,
"step": 1330
},
{
"epoch": 2.326388888888889,
"grad_norm": 0.7813517451286316,
"learning_rate": 1.4662522696111225e-05,
"loss": 0.2856,
"step": 1340
},
{
"epoch": 2.34375,
"grad_norm": 0.738869309425354,
"learning_rate": 1.395513233660199e-05,
"loss": 0.2829,
"step": 1350
},
{
"epoch": 2.361111111111111,
"grad_norm": 0.7364549040794373,
"learning_rate": 1.3262453838395205e-05,
"loss": 0.3188,
"step": 1360
},
{
"epoch": 2.3784722222222223,
"grad_norm": 0.5563517212867737,
"learning_rate": 1.2584769921074962e-05,
"loss": 0.295,
"step": 1370
},
{
"epoch": 2.3958333333333335,
"grad_norm": 0.6138696074485779,
"learning_rate": 1.1922357184125327e-05,
"loss": 0.3112,
"step": 1380
},
{
"epoch": 2.4131944444444446,
"grad_norm": 0.6921333074569702,
"learning_rate": 1.1275485994035107e-05,
"loss": 0.2837,
"step": 1390
},
{
"epoch": 2.4305555555555554,
"grad_norm": 0.6158069968223572,
"learning_rate": 1.064442037394679e-05,
"loss": 0.276,
"step": 1400
},
{
"epoch": 2.4305555555555554,
"eval_loss": 0.46923181414604187,
"eval_runtime": 491.6516,
"eval_samples_per_second": 2.345,
"eval_steps_per_second": 2.345,
"step": 1400
},
{
"epoch": 2.4479166666666665,
"grad_norm": 0.5451982021331787,
"learning_rate": 1.0029417895894211e-05,
"loss": 0.305,
"step": 1410
},
{
"epoch": 2.4652777777777777,
"grad_norm": 0.5262075662612915,
"learning_rate": 9.430729575673658e-06,
"loss": 0.263,
"step": 1420
},
{
"epoch": 2.482638888888889,
"grad_norm": 0.5978283286094666,
"learning_rate": 8.848599770390653e-06,
"loss": 0.2709,
"step": 1430
},
{
"epoch": 2.5,
"grad_norm": 0.7257421612739563,
"learning_rate": 8.283266078724571e-06,
"loss": 0.2834,
"step": 1440
},
{
"epoch": 2.517361111111111,
"grad_norm": 0.682624101638794,
"learning_rate": 7.734959243951783e-06,
"loss": 0.2785,
"step": 1450
},
{
"epoch": 2.5347222222222223,
"grad_norm": 0.5846174955368042,
"learning_rate": 7.203903059766848e-06,
"loss": 0.2671,
"step": 1460
},
{
"epoch": 2.5520833333333335,
"grad_norm": 0.7986878752708435,
"learning_rate": 6.690314278940207e-06,
"loss": 0.2646,
"step": 1470
},
{
"epoch": 2.5694444444444446,
"grad_norm": 0.6198819875717163,
"learning_rate": 6.194402524849585e-06,
"loss": 0.2905,
"step": 1480
},
{
"epoch": 2.5868055555555554,
"grad_norm": 0.5760586261749268,
"learning_rate": 5.716370205921434e-06,
"loss": 0.2948,
"step": 1490
},
{
"epoch": 2.6041666666666665,
"grad_norm": 0.707608163356781,
"learning_rate": 5.256412433017005e-06,
"loss": 0.2616,
"step": 1500
},
{
"epoch": 2.6041666666666665,
"eval_loss": 0.4693165123462677,
"eval_runtime": 489.2257,
"eval_samples_per_second": 2.357,
"eval_steps_per_second": 2.357,
"step": 1500
},
{
"epoch": 2.6215277777777777,
"grad_norm": 0.43746083974838257,
"learning_rate": 4.814716939797109e-06,
"loss": 0.2411,
"step": 1510
},
{
"epoch": 2.638888888888889,
"grad_norm": 0.6086592674255371,
"learning_rate": 4.391464006097845e-06,
"loss": 0.2909,
"step": 1520
},
{
"epoch": 2.65625,
"grad_norm": 0.6783342957496643,
"learning_rate": 3.986826384348608e-06,
"loss": 0.2828,
"step": 1530
},
{
"epoch": 2.673611111111111,
"grad_norm": 2.342499017715454,
"learning_rate": 3.600969229062523e-06,
"loss": 0.3004,
"step": 1540
},
{
"epoch": 2.6909722222222223,
"grad_norm": 0.7391747832298279,
"learning_rate": 3.2340500294278876e-06,
"loss": 0.3031,
"step": 1550
},
{
"epoch": 2.7083333333333335,
"grad_norm": 0.8236761093139648,
"learning_rate": 2.8862185450283572e-06,
"loss": 0.2654,
"step": 1560
},
{
"epoch": 2.7256944444444446,
"grad_norm": 0.5708310604095459,
"learning_rate": 2.55761674471795e-06,
"loss": 0.2812,
"step": 1570
},
{
"epoch": 2.7430555555555554,
"grad_norm": 0.6445372104644775,
"learning_rate": 2.24837874867585e-06,
"loss": 0.2854,
"step": 1580
},
{
"epoch": 2.7604166666666665,
"grad_norm": 0.7138336300849915,
"learning_rate": 1.9586307736647323e-06,
"loss": 0.2975,
"step": 1590
},
{
"epoch": 2.7777777777777777,
"grad_norm": 0.7076891660690308,
"learning_rate": 1.6884910815148425e-06,
"loss": 0.2545,
"step": 1600
},
{
"epoch": 2.7777777777777777,
"eval_loss": 0.4670693278312683,
"eval_runtime": 490.7784,
"eval_samples_per_second": 2.349,
"eval_steps_per_second": 2.349,
"step": 1600
},
{
"epoch": 2.795138888888889,
"grad_norm": 0.6267815828323364,
"learning_rate": 1.4380699308549572e-06,
"loss": 0.2886,
"step": 1610
},
{
"epoch": 2.8125,
"grad_norm": 0.6017192602157593,
"learning_rate": 1.2074695321098895e-06,
"loss": 0.2791,
"step": 1620
},
{
"epoch": 2.829861111111111,
"grad_norm": 0.6211732029914856,
"learning_rate": 9.96784005782836e-07,
"loss": 0.2814,
"step": 1630
},
{
"epoch": 2.8472222222222223,
"grad_norm": 0.6479988694190979,
"learning_rate": 8.060993440397624e-07,
"loss": 0.2814,
"step": 1640
},
{
"epoch": 2.8645833333333335,
"grad_norm": 0.6914340257644653,
"learning_rate": 6.354933756112902e-07,
"loss": 0.2736,
"step": 1650
},
{
"epoch": 2.8819444444444446,
"grad_norm": 0.5987943410873413,
"learning_rate": 4.850357340266231e-07,
"loss": 0.286,
"step": 1660
},
{
"epoch": 2.8993055555555554,
"grad_norm": 0.8642510771751404,
"learning_rate": 3.5478782919230457e-07,
"loss": 0.2604,
"step": 1670
},
{
"epoch": 2.9166666666666665,
"grad_norm": 0.6662091016769409,
"learning_rate": 2.4480282232754445e-07,
"loss": 0.288,
"step": 1680
},
{
"epoch": 2.9340277777777777,
"grad_norm": 0.6560271978378296,
"learning_rate": 1.5512560426621526e-07,
"loss": 0.2588,
"step": 1690
},
{
"epoch": 2.951388888888889,
"grad_norm": 0.5207160115242004,
"learning_rate": 8.579277713448574e-08,
"loss": 0.2673,
"step": 1700
},
{
"epoch": 2.951388888888889,
"eval_loss": 0.46653616428375244,
"eval_runtime": 493.8981,
"eval_samples_per_second": 2.334,
"eval_steps_per_second": 2.334,
"step": 1700
},
{
"epoch": 2.96875,
"grad_norm": 0.6704832315444946,
"learning_rate": 3.6832639411521485e-08,
"loss": 0.2919,
"step": 1710
},
{
"epoch": 2.986111111111111,
"grad_norm": 0.6270363330841064,
"learning_rate": 8.26517437933494e-09,
"loss": 0.2505,
"step": 1720
},
{
"epoch": 3.0,
"step": 1728,
"total_flos": 2.0991618521684705e+18,
"train_loss": 0.12052025701160785,
"train_runtime": 11479.8644,
"train_samples_per_second": 1.204,
"train_steps_per_second": 0.151
}
],
"logging_steps": 10,
"max_steps": 1728,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 200,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 2.0991618521684705e+18,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}