duxx's picture
Upload folder using huggingface_hub
674e25f verified
raw
history blame
46.6 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 26,
"global_step": 253,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.003952569169960474,
"grad_norm": 3.0407819747924805,
"learning_rate": 3.3333333333333333e-06,
"loss": 2.3652,
"step": 1
},
{
"epoch": 0.007905138339920948,
"grad_norm": 3.093733310699463,
"learning_rate": 6.666666666666667e-06,
"loss": 2.1458,
"step": 2
},
{
"epoch": 0.011857707509881422,
"grad_norm": 3.0263988971710205,
"learning_rate": 1e-05,
"loss": 2.2628,
"step": 3
},
{
"epoch": 0.015810276679841896,
"grad_norm": 2.6713979244232178,
"learning_rate": 9.999605221019082e-06,
"loss": 2.0308,
"step": 4
},
{
"epoch": 0.019762845849802372,
"grad_norm": 2.4664857387542725,
"learning_rate": 9.9984209464165e-06,
"loss": 1.6917,
"step": 5
},
{
"epoch": 0.023715415019762844,
"grad_norm": 2.100984811782837,
"learning_rate": 9.996447363202947e-06,
"loss": 1.8489,
"step": 6
},
{
"epoch": 0.02766798418972332,
"grad_norm": 1.7837852239608765,
"learning_rate": 9.99368478303009e-06,
"loss": 1.9894,
"step": 7
},
{
"epoch": 0.03162055335968379,
"grad_norm": 1.6338787078857422,
"learning_rate": 9.990133642141359e-06,
"loss": 1.8366,
"step": 8
},
{
"epoch": 0.03557312252964427,
"grad_norm": 1.7102547883987427,
"learning_rate": 9.98579450130307e-06,
"loss": 1.9013,
"step": 9
},
{
"epoch": 0.039525691699604744,
"grad_norm": 1.8172574043273926,
"learning_rate": 9.980668045715864e-06,
"loss": 2.034,
"step": 10
},
{
"epoch": 0.043478260869565216,
"grad_norm": 1.7970800399780273,
"learning_rate": 9.974755084906503e-06,
"loss": 1.8565,
"step": 11
},
{
"epoch": 0.04743083003952569,
"grad_norm": 1.9662507772445679,
"learning_rate": 9.968056552600043e-06,
"loss": 1.8154,
"step": 12
},
{
"epoch": 0.05138339920948617,
"grad_norm": 1.6226023435592651,
"learning_rate": 9.960573506572391e-06,
"loss": 1.7205,
"step": 13
},
{
"epoch": 0.05533596837944664,
"grad_norm": 1.4996356964111328,
"learning_rate": 9.952307128483257e-06,
"loss": 1.8481,
"step": 14
},
{
"epoch": 0.05928853754940711,
"grad_norm": 1.2719931602478027,
"learning_rate": 9.94325872368957e-06,
"loss": 1.5792,
"step": 15
},
{
"epoch": 0.06324110671936758,
"grad_norm": 1.2413394451141357,
"learning_rate": 9.93342972103934e-06,
"loss": 1.6527,
"step": 16
},
{
"epoch": 0.06719367588932806,
"grad_norm": 1.1712374687194824,
"learning_rate": 9.922821672646028e-06,
"loss": 1.3883,
"step": 17
},
{
"epoch": 0.07114624505928854,
"grad_norm": 1.1695728302001953,
"learning_rate": 9.911436253643445e-06,
"loss": 1.5297,
"step": 18
},
{
"epoch": 0.07509881422924901,
"grad_norm": 1.1794648170471191,
"learning_rate": 9.899275261921236e-06,
"loss": 1.5525,
"step": 19
},
{
"epoch": 0.07905138339920949,
"grad_norm": 1.131502628326416,
"learning_rate": 9.886340617840968e-06,
"loss": 1.5752,
"step": 20
},
{
"epoch": 0.08300395256916997,
"grad_norm": 1.1554540395736694,
"learning_rate": 9.872634363932887e-06,
"loss": 1.5944,
"step": 21
},
{
"epoch": 0.08695652173913043,
"grad_norm": 1.1099374294281006,
"learning_rate": 9.85815866457337e-06,
"loss": 1.4019,
"step": 22
},
{
"epoch": 0.09090909090909091,
"grad_norm": 1.2356349229812622,
"learning_rate": 9.842915805643156e-06,
"loss": 1.706,
"step": 23
},
{
"epoch": 0.09486166007905138,
"grad_norm": 1.1689324378967285,
"learning_rate": 9.82690819416637e-06,
"loss": 1.6249,
"step": 24
},
{
"epoch": 0.09881422924901186,
"grad_norm": 1.2375783920288086,
"learning_rate": 9.81013835793043e-06,
"loss": 1.8374,
"step": 25
},
{
"epoch": 0.10276679841897234,
"grad_norm": 1.1797913312911987,
"learning_rate": 9.79260894508688e-06,
"loss": 1.6161,
"step": 26
},
{
"epoch": 0.10276679841897234,
"eval_loss": 1.4920315742492676,
"eval_runtime": 0.6276,
"eval_samples_per_second": 33.462,
"eval_steps_per_second": 4.78,
"step": 26
},
{
"epoch": 0.1067193675889328,
"grad_norm": 1.1279163360595703,
"learning_rate": 9.774322723733216e-06,
"loss": 1.6593,
"step": 27
},
{
"epoch": 0.11067193675889328,
"grad_norm": 1.1455029249191284,
"learning_rate": 9.755282581475769e-06,
"loss": 1.6982,
"step": 28
},
{
"epoch": 0.11462450592885376,
"grad_norm": 1.0429457426071167,
"learning_rate": 9.735491524973723e-06,
"loss": 1.4423,
"step": 29
},
{
"epoch": 0.11857707509881422,
"grad_norm": 0.9471132755279541,
"learning_rate": 9.714952679464324e-06,
"loss": 1.3055,
"step": 30
},
{
"epoch": 0.1225296442687747,
"grad_norm": 1.0985954999923706,
"learning_rate": 9.693669288269371e-06,
"loss": 1.6302,
"step": 31
},
{
"epoch": 0.12648221343873517,
"grad_norm": 1.11372709274292,
"learning_rate": 9.671644712283061e-06,
"loss": 1.7975,
"step": 32
},
{
"epoch": 0.13043478260869565,
"grad_norm": 0.9115369915962219,
"learning_rate": 9.648882429441258e-06,
"loss": 1.2709,
"step": 33
},
{
"epoch": 0.13438735177865613,
"grad_norm": 1.0038697719573975,
"learning_rate": 9.62538603417229e-06,
"loss": 1.4833,
"step": 34
},
{
"epoch": 0.1383399209486166,
"grad_norm": 1.0109611749649048,
"learning_rate": 9.601159236829353e-06,
"loss": 1.2978,
"step": 35
},
{
"epoch": 0.1422924901185771,
"grad_norm": 0.966667115688324,
"learning_rate": 9.576205863104588e-06,
"loss": 1.5622,
"step": 36
},
{
"epoch": 0.14624505928853754,
"grad_norm": 1.0922729969024658,
"learning_rate": 9.550529853424979e-06,
"loss": 1.7766,
"step": 37
},
{
"epoch": 0.15019762845849802,
"grad_norm": 1.010776162147522,
"learning_rate": 9.524135262330098e-06,
"loss": 1.5206,
"step": 38
},
{
"epoch": 0.1541501976284585,
"grad_norm": 0.9982693791389465,
"learning_rate": 9.497026257831856e-06,
"loss": 1.5341,
"step": 39
},
{
"epoch": 0.15810276679841898,
"grad_norm": 0.9735735058784485,
"learning_rate": 9.46920712075632e-06,
"loss": 1.4157,
"step": 40
},
{
"epoch": 0.16205533596837945,
"grad_norm": 0.9151104092597961,
"learning_rate": 9.440682244067724e-06,
"loss": 1.2846,
"step": 41
},
{
"epoch": 0.16600790513833993,
"grad_norm": 0.9795275330543518,
"learning_rate": 9.411456132174768e-06,
"loss": 1.4727,
"step": 42
},
{
"epoch": 0.16996047430830039,
"grad_norm": 1.1112526655197144,
"learning_rate": 9.381533400219319e-06,
"loss": 1.6619,
"step": 43
},
{
"epoch": 0.17391304347826086,
"grad_norm": 0.9755843877792358,
"learning_rate": 9.35091877334763e-06,
"loss": 1.4687,
"step": 44
},
{
"epoch": 0.17786561264822134,
"grad_norm": 0.9589361548423767,
"learning_rate": 9.319617085964177e-06,
"loss": 1.4871,
"step": 45
},
{
"epoch": 0.18181818181818182,
"grad_norm": 1.105637788772583,
"learning_rate": 9.287633280968263e-06,
"loss": 1.2694,
"step": 46
},
{
"epoch": 0.1857707509881423,
"grad_norm": 0.9662937521934509,
"learning_rate": 9.25497240897346e-06,
"loss": 1.2148,
"step": 47
},
{
"epoch": 0.18972332015810275,
"grad_norm": 1.0539385080337524,
"learning_rate": 9.221639627510076e-06,
"loss": 1.5932,
"step": 48
},
{
"epoch": 0.19367588932806323,
"grad_norm": 0.9249448180198669,
"learning_rate": 9.18764020021071e-06,
"loss": 1.3074,
"step": 49
},
{
"epoch": 0.1976284584980237,
"grad_norm": 1.0217876434326172,
"learning_rate": 9.152979495979064e-06,
"loss": 1.5327,
"step": 50
},
{
"epoch": 0.2015810276679842,
"grad_norm": 1.070178747177124,
"learning_rate": 9.117662988142138e-06,
"loss": 1.6561,
"step": 51
},
{
"epoch": 0.20553359683794467,
"grad_norm": 0.9971439242362976,
"learning_rate": 9.08169625358592e-06,
"loss": 1.3683,
"step": 52
},
{
"epoch": 0.20553359683794467,
"eval_loss": 1.325599193572998,
"eval_runtime": 0.6316,
"eval_samples_per_second": 33.25,
"eval_steps_per_second": 4.75,
"step": 52
},
{
"epoch": 0.20948616600790515,
"grad_norm": 0.9724494814872742,
"learning_rate": 9.045084971874738e-06,
"loss": 1.5192,
"step": 53
},
{
"epoch": 0.2134387351778656,
"grad_norm": 0.9346773624420166,
"learning_rate": 9.007834924354384e-06,
"loss": 1.4107,
"step": 54
},
{
"epoch": 0.21739130434782608,
"grad_norm": 0.9584831595420837,
"learning_rate": 8.969951993239177e-06,
"loss": 1.392,
"step": 55
},
{
"epoch": 0.22134387351778656,
"grad_norm": 0.8788594603538513,
"learning_rate": 8.931442160683094e-06,
"loss": 1.2833,
"step": 56
},
{
"epoch": 0.22529644268774704,
"grad_norm": 0.9413896203041077,
"learning_rate": 8.892311507835118e-06,
"loss": 1.26,
"step": 57
},
{
"epoch": 0.22924901185770752,
"grad_norm": 0.944340169429779,
"learning_rate": 8.852566213878947e-06,
"loss": 1.3223,
"step": 58
},
{
"epoch": 0.233201581027668,
"grad_norm": 0.8935737609863281,
"learning_rate": 8.81221255505724e-06,
"loss": 1.3826,
"step": 59
},
{
"epoch": 0.23715415019762845,
"grad_norm": 0.9506601095199585,
"learning_rate": 8.77125690368052e-06,
"loss": 1.4478,
"step": 60
},
{
"epoch": 0.24110671936758893,
"grad_norm": 0.9523410201072693,
"learning_rate": 8.729705727120911e-06,
"loss": 1.3503,
"step": 61
},
{
"epoch": 0.2450592885375494,
"grad_norm": 0.8316011428833008,
"learning_rate": 8.68756558679087e-06,
"loss": 1.041,
"step": 62
},
{
"epoch": 0.2490118577075099,
"grad_norm": 0.9092923998832703,
"learning_rate": 8.644843137107058e-06,
"loss": 1.323,
"step": 63
},
{
"epoch": 0.25296442687747034,
"grad_norm": 0.9320958852767944,
"learning_rate": 8.601545124439535e-06,
"loss": 1.3562,
"step": 64
},
{
"epoch": 0.25691699604743085,
"grad_norm": 0.9427369832992554,
"learning_rate": 8.557678386046429e-06,
"loss": 1.5575,
"step": 65
},
{
"epoch": 0.2608695652173913,
"grad_norm": 0.9649813175201416,
"learning_rate": 8.513249848994248e-06,
"loss": 1.2068,
"step": 66
},
{
"epoch": 0.2648221343873518,
"grad_norm": 0.9272534251213074,
"learning_rate": 8.468266529064025e-06,
"loss": 1.2409,
"step": 67
},
{
"epoch": 0.26877470355731226,
"grad_norm": 0.952318549156189,
"learning_rate": 8.422735529643445e-06,
"loss": 1.4648,
"step": 68
},
{
"epoch": 0.2727272727272727,
"grad_norm": 0.7945717573165894,
"learning_rate": 8.376664040605122e-06,
"loss": 1.142,
"step": 69
},
{
"epoch": 0.2766798418972332,
"grad_norm": 0.8417496085166931,
"learning_rate": 8.33005933717126e-06,
"loss": 1.179,
"step": 70
},
{
"epoch": 0.28063241106719367,
"grad_norm": 0.8613891005516052,
"learning_rate": 8.282928778764783e-06,
"loss": 1.258,
"step": 71
},
{
"epoch": 0.2845849802371542,
"grad_norm": 0.8771772980690002,
"learning_rate": 8.235279807847223e-06,
"loss": 1.3277,
"step": 72
},
{
"epoch": 0.2885375494071146,
"grad_norm": 0.9778826832771301,
"learning_rate": 8.18711994874345e-06,
"loss": 1.1869,
"step": 73
},
{
"epoch": 0.2924901185770751,
"grad_norm": 0.9443055391311646,
"learning_rate": 8.138456806453503e-06,
"loss": 1.37,
"step": 74
},
{
"epoch": 0.2964426877470356,
"grad_norm": 0.8966163992881775,
"learning_rate": 8.089298065451673e-06,
"loss": 1.2171,
"step": 75
},
{
"epoch": 0.30039525691699603,
"grad_norm": 0.9195836782455444,
"learning_rate": 8.039651488473028e-06,
"loss": 1.3983,
"step": 76
},
{
"epoch": 0.30434782608695654,
"grad_norm": 0.952008843421936,
"learning_rate": 7.989524915287595e-06,
"loss": 1.4224,
"step": 77
},
{
"epoch": 0.308300395256917,
"grad_norm": 1.0114339590072632,
"learning_rate": 7.938926261462366e-06,
"loss": 1.409,
"step": 78
},
{
"epoch": 0.308300395256917,
"eval_loss": 1.2486090660095215,
"eval_runtime": 0.6312,
"eval_samples_per_second": 33.269,
"eval_steps_per_second": 4.753,
"step": 78
},
{
"epoch": 0.31225296442687744,
"grad_norm": 0.8975977301597595,
"learning_rate": 7.887863517111337e-06,
"loss": 1.3565,
"step": 79
},
{
"epoch": 0.31620553359683795,
"grad_norm": 0.9322842955589294,
"learning_rate": 7.836344745633785e-06,
"loss": 1.336,
"step": 80
},
{
"epoch": 0.3201581027667984,
"grad_norm": 0.934016227722168,
"learning_rate": 7.78437808244094e-06,
"loss": 1.318,
"step": 81
},
{
"epoch": 0.3241106719367589,
"grad_norm": 0.8984368443489075,
"learning_rate": 7.731971733671347e-06,
"loss": 1.264,
"step": 82
},
{
"epoch": 0.32806324110671936,
"grad_norm": 0.9175249338150024,
"learning_rate": 7.679133974894984e-06,
"loss": 1.2758,
"step": 83
},
{
"epoch": 0.33201581027667987,
"grad_norm": 1.0880227088928223,
"learning_rate": 7.6258731498064796e-06,
"loss": 1.1448,
"step": 84
},
{
"epoch": 0.3359683794466403,
"grad_norm": 0.8632190823554993,
"learning_rate": 7.572197668907533e-06,
"loss": 1.2644,
"step": 85
},
{
"epoch": 0.33992094861660077,
"grad_norm": 0.8895804286003113,
"learning_rate": 7.518116008178805e-06,
"loss": 1.2656,
"step": 86
},
{
"epoch": 0.3438735177865613,
"grad_norm": 0.9582334756851196,
"learning_rate": 7.463636707741458e-06,
"loss": 1.2453,
"step": 87
},
{
"epoch": 0.34782608695652173,
"grad_norm": 0.8999531865119934,
"learning_rate": 7.408768370508577e-06,
"loss": 1.1815,
"step": 88
},
{
"epoch": 0.35177865612648224,
"grad_norm": 0.8813087940216064,
"learning_rate": 7.353519660826665e-06,
"loss": 1.2232,
"step": 89
},
{
"epoch": 0.3557312252964427,
"grad_norm": 0.8202010989189148,
"learning_rate": 7.297899303107441e-06,
"loss": 1.1386,
"step": 90
},
{
"epoch": 0.35968379446640314,
"grad_norm": 0.9317258596420288,
"learning_rate": 7.241916080450163e-06,
"loss": 1.2962,
"step": 91
},
{
"epoch": 0.36363636363636365,
"grad_norm": 0.8814587593078613,
"learning_rate": 7.185578833254665e-06,
"loss": 1.3109,
"step": 92
},
{
"epoch": 0.3675889328063241,
"grad_norm": 0.9203696846961975,
"learning_rate": 7.128896457825364e-06,
"loss": 1.4728,
"step": 93
},
{
"epoch": 0.3715415019762846,
"grad_norm": 0.842402994632721,
"learning_rate": 7.071877904966422e-06,
"loss": 1.2597,
"step": 94
},
{
"epoch": 0.37549407114624506,
"grad_norm": 0.8974599838256836,
"learning_rate": 7.014532178568314e-06,
"loss": 1.2041,
"step": 95
},
{
"epoch": 0.3794466403162055,
"grad_norm": 0.8709033727645874,
"learning_rate": 6.9568683341860135e-06,
"loss": 1.1369,
"step": 96
},
{
"epoch": 0.383399209486166,
"grad_norm": 0.8162269592285156,
"learning_rate": 6.898895477609007e-06,
"loss": 1.2103,
"step": 97
},
{
"epoch": 0.38735177865612647,
"grad_norm": 0.8966283798217773,
"learning_rate": 6.840622763423391e-06,
"loss": 1.2609,
"step": 98
},
{
"epoch": 0.391304347826087,
"grad_norm": 0.8919047117233276,
"learning_rate": 6.782059393566254e-06,
"loss": 1.1807,
"step": 99
},
{
"epoch": 0.3952569169960474,
"grad_norm": 0.9296970367431641,
"learning_rate": 6.723214615872585e-06,
"loss": 1.3748,
"step": 100
},
{
"epoch": 0.39920948616600793,
"grad_norm": 0.8833282589912415,
"learning_rate": 6.664097722614934e-06,
"loss": 1.271,
"step": 101
},
{
"epoch": 0.4031620553359684,
"grad_norm": 0.9111071228981018,
"learning_rate": 6.604718049036047e-06,
"loss": 1.3016,
"step": 102
},
{
"epoch": 0.40711462450592883,
"grad_norm": 0.9141144156455994,
"learning_rate": 6.545084971874738e-06,
"loss": 1.3072,
"step": 103
},
{
"epoch": 0.41106719367588934,
"grad_norm": 0.8589788675308228,
"learning_rate": 6.485207907885175e-06,
"loss": 1.1914,
"step": 104
},
{
"epoch": 0.41106719367588934,
"eval_loss": 1.2048839330673218,
"eval_runtime": 0.6355,
"eval_samples_per_second": 33.045,
"eval_steps_per_second": 4.721,
"step": 104
},
{
"epoch": 0.4150197628458498,
"grad_norm": 0.9179502725601196,
"learning_rate": 6.425096312349881e-06,
"loss": 1.3336,
"step": 105
},
{
"epoch": 0.4189723320158103,
"grad_norm": 0.8490063548088074,
"learning_rate": 6.364759677586627e-06,
"loss": 1.275,
"step": 106
},
{
"epoch": 0.42292490118577075,
"grad_norm": 0.8626274466514587,
"learning_rate": 6.304207531449486e-06,
"loss": 1.237,
"step": 107
},
{
"epoch": 0.4268774703557312,
"grad_norm": 0.855128824710846,
"learning_rate": 6.243449435824276e-06,
"loss": 1.1993,
"step": 108
},
{
"epoch": 0.4308300395256917,
"grad_norm": 0.8959077000617981,
"learning_rate": 6.182494985118625e-06,
"loss": 1.3898,
"step": 109
},
{
"epoch": 0.43478260869565216,
"grad_norm": 0.9411128163337708,
"learning_rate": 6.121353804746907e-06,
"loss": 1.1805,
"step": 110
},
{
"epoch": 0.43873517786561267,
"grad_norm": 0.8071564435958862,
"learning_rate": 6.060035549610275e-06,
"loss": 0.8793,
"step": 111
},
{
"epoch": 0.4426877470355731,
"grad_norm": 0.873630702495575,
"learning_rate": 5.9985499025720354e-06,
"loss": 1.3551,
"step": 112
},
{
"epoch": 0.44664031620553357,
"grad_norm": 0.7491222620010376,
"learning_rate": 5.936906572928625e-06,
"loss": 1.0515,
"step": 113
},
{
"epoch": 0.4505928853754941,
"grad_norm": 0.8194934129714966,
"learning_rate": 5.8751152948763815e-06,
"loss": 1.0884,
"step": 114
},
{
"epoch": 0.45454545454545453,
"grad_norm": 0.9355419874191284,
"learning_rate": 5.813185825974419e-06,
"loss": 1.4432,
"step": 115
},
{
"epoch": 0.45849802371541504,
"grad_norm": 0.760794460773468,
"learning_rate": 5.751127945603786e-06,
"loss": 1.0239,
"step": 116
},
{
"epoch": 0.4624505928853755,
"grad_norm": 0.9574670791625977,
"learning_rate": 5.68895145342319e-06,
"loss": 1.5178,
"step": 117
},
{
"epoch": 0.466403162055336,
"grad_norm": 0.9131107330322266,
"learning_rate": 5.626666167821522e-06,
"loss": 1.3872,
"step": 118
},
{
"epoch": 0.47035573122529645,
"grad_norm": 0.9126591682434082,
"learning_rate": 5.5642819243674085e-06,
"loss": 1.3254,
"step": 119
},
{
"epoch": 0.4743083003952569,
"grad_norm": 0.8607676029205322,
"learning_rate": 5.5018085742560745e-06,
"loss": 1.1636,
"step": 120
},
{
"epoch": 0.4782608695652174,
"grad_norm": 0.8300676345825195,
"learning_rate": 5.439255982753717e-06,
"loss": 1.0787,
"step": 121
},
{
"epoch": 0.48221343873517786,
"grad_norm": 0.8748376965522766,
"learning_rate": 5.376634027639664e-06,
"loss": 1.3438,
"step": 122
},
{
"epoch": 0.48616600790513836,
"grad_norm": 0.8148254156112671,
"learning_rate": 5.3139525976465675e-06,
"loss": 1.2148,
"step": 123
},
{
"epoch": 0.4901185770750988,
"grad_norm": 0.7850709557533264,
"learning_rate": 5.251221590898848e-06,
"loss": 1.1715,
"step": 124
},
{
"epoch": 0.49407114624505927,
"grad_norm": 0.8794577717781067,
"learning_rate": 5.188450913349674e-06,
"loss": 1.1467,
"step": 125
},
{
"epoch": 0.4980237154150198,
"grad_norm": 0.8375518321990967,
"learning_rate": 5.1256504772166885e-06,
"loss": 1.2558,
"step": 126
},
{
"epoch": 0.5019762845849802,
"grad_norm": 0.7941477298736572,
"learning_rate": 5.062830199416764e-06,
"loss": 1.0128,
"step": 127
},
{
"epoch": 0.5059288537549407,
"grad_norm": 0.9250499606132507,
"learning_rate": 5e-06,
"loss": 1.2622,
"step": 128
},
{
"epoch": 0.5098814229249012,
"grad_norm": 0.9209011793136597,
"learning_rate": 4.937169800583237e-06,
"loss": 1.3989,
"step": 129
},
{
"epoch": 0.5138339920948617,
"grad_norm": 0.8219020962715149,
"learning_rate": 4.874349522783313e-06,
"loss": 1.1264,
"step": 130
},
{
"epoch": 0.5138339920948617,
"eval_loss": 1.1763263940811157,
"eval_runtime": 0.6328,
"eval_samples_per_second": 33.184,
"eval_steps_per_second": 4.741,
"step": 130
},
{
"epoch": 0.5177865612648221,
"grad_norm": 0.9172279238700867,
"learning_rate": 4.811549086650327e-06,
"loss": 1.2868,
"step": 131
},
{
"epoch": 0.5217391304347826,
"grad_norm": 0.8297377228736877,
"learning_rate": 4.748778409101153e-06,
"loss": 1.221,
"step": 132
},
{
"epoch": 0.525691699604743,
"grad_norm": 0.8089572787284851,
"learning_rate": 4.686047402353433e-06,
"loss": 1.2466,
"step": 133
},
{
"epoch": 0.5296442687747036,
"grad_norm": 0.8571174144744873,
"learning_rate": 4.6233659723603374e-06,
"loss": 1.3095,
"step": 134
},
{
"epoch": 0.5335968379446641,
"grad_norm": 0.9645123481750488,
"learning_rate": 4.560744017246284e-06,
"loss": 1.4281,
"step": 135
},
{
"epoch": 0.5375494071146245,
"grad_norm": 0.8215659856796265,
"learning_rate": 4.4981914257439254e-06,
"loss": 1.1636,
"step": 136
},
{
"epoch": 0.541501976284585,
"grad_norm": 0.8466573357582092,
"learning_rate": 4.4357180756325915e-06,
"loss": 1.2409,
"step": 137
},
{
"epoch": 0.5454545454545454,
"grad_norm": 0.8330338597297668,
"learning_rate": 4.373333832178478e-06,
"loss": 1.1857,
"step": 138
},
{
"epoch": 0.549407114624506,
"grad_norm": 0.8727869987487793,
"learning_rate": 4.31104854657681e-06,
"loss": 1.1947,
"step": 139
},
{
"epoch": 0.5533596837944664,
"grad_norm": 0.8822157979011536,
"learning_rate": 4.248872054396215e-06,
"loss": 1.1367,
"step": 140
},
{
"epoch": 0.5573122529644269,
"grad_norm": 0.876277506351471,
"learning_rate": 4.186814174025582e-06,
"loss": 1.1754,
"step": 141
},
{
"epoch": 0.5612648221343873,
"grad_norm": 0.8091803789138794,
"learning_rate": 4.124884705123619e-06,
"loss": 1.1789,
"step": 142
},
{
"epoch": 0.5652173913043478,
"grad_norm": 0.9175235629081726,
"learning_rate": 4.063093427071376e-06,
"loss": 1.2189,
"step": 143
},
{
"epoch": 0.5691699604743083,
"grad_norm": 0.8601986765861511,
"learning_rate": 4.001450097427965e-06,
"loss": 1.3021,
"step": 144
},
{
"epoch": 0.5731225296442688,
"grad_norm": 0.821972668170929,
"learning_rate": 3.939964450389728e-06,
"loss": 1.2536,
"step": 145
},
{
"epoch": 0.5770750988142292,
"grad_norm": 0.8965577483177185,
"learning_rate": 3.8786461952530955e-06,
"loss": 1.2669,
"step": 146
},
{
"epoch": 0.5810276679841897,
"grad_norm": 0.8901940584182739,
"learning_rate": 3.817505014881378e-06,
"loss": 1.2246,
"step": 147
},
{
"epoch": 0.5849802371541502,
"grad_norm": 0.8637762069702148,
"learning_rate": 3.756550564175727e-06,
"loss": 1.2705,
"step": 148
},
{
"epoch": 0.5889328063241107,
"grad_norm": 0.8907751441001892,
"learning_rate": 3.695792468550517e-06,
"loss": 1.3191,
"step": 149
},
{
"epoch": 0.5928853754940712,
"grad_norm": 0.9000714421272278,
"learning_rate": 3.635240322413375e-06,
"loss": 1.3388,
"step": 150
},
{
"epoch": 0.5968379446640316,
"grad_norm": 0.8116331696510315,
"learning_rate": 3.5749036876501196e-06,
"loss": 1.2169,
"step": 151
},
{
"epoch": 0.6007905138339921,
"grad_norm": 0.9376166462898254,
"learning_rate": 3.5147920921148267e-06,
"loss": 1.4159,
"step": 152
},
{
"epoch": 0.6047430830039525,
"grad_norm": 0.8815961480140686,
"learning_rate": 3.4549150281252635e-06,
"loss": 1.3918,
"step": 153
},
{
"epoch": 0.6086956521739131,
"grad_norm": 0.8155955672264099,
"learning_rate": 3.3952819509639534e-06,
"loss": 1.0842,
"step": 154
},
{
"epoch": 0.6126482213438735,
"grad_norm": 0.8554919958114624,
"learning_rate": 3.3359022773850673e-06,
"loss": 1.3536,
"step": 155
},
{
"epoch": 0.616600790513834,
"grad_norm": 0.8661870956420898,
"learning_rate": 3.2767853841274154e-06,
"loss": 1.1721,
"step": 156
},
{
"epoch": 0.616600790513834,
"eval_loss": 1.1584229469299316,
"eval_runtime": 0.6329,
"eval_samples_per_second": 33.182,
"eval_steps_per_second": 4.74,
"step": 156
},
{
"epoch": 0.6205533596837944,
"grad_norm": 0.8114671111106873,
"learning_rate": 3.217940606433747e-06,
"loss": 1.0664,
"step": 157
},
{
"epoch": 0.6245059288537549,
"grad_norm": 0.863220751285553,
"learning_rate": 3.1593772365766107e-06,
"loss": 1.2761,
"step": 158
},
{
"epoch": 0.6284584980237155,
"grad_norm": 0.8586646318435669,
"learning_rate": 3.1011045223909954e-06,
"loss": 1.2633,
"step": 159
},
{
"epoch": 0.6324110671936759,
"grad_norm": 0.8601418137550354,
"learning_rate": 3.043131665813988e-06,
"loss": 1.2919,
"step": 160
},
{
"epoch": 0.6363636363636364,
"grad_norm": 0.8035183548927307,
"learning_rate": 2.9854678214316875e-06,
"loss": 1.1463,
"step": 161
},
{
"epoch": 0.6403162055335968,
"grad_norm": 0.8140465021133423,
"learning_rate": 2.92812209503358e-06,
"loss": 1.0966,
"step": 162
},
{
"epoch": 0.6442687747035574,
"grad_norm": 0.8538705706596375,
"learning_rate": 2.871103542174637e-06,
"loss": 1.2748,
"step": 163
},
{
"epoch": 0.6482213438735178,
"grad_norm": 0.8856031894683838,
"learning_rate": 2.814421166745337e-06,
"loss": 1.3632,
"step": 164
},
{
"epoch": 0.6521739130434783,
"grad_norm": 0.8147869110107422,
"learning_rate": 2.7580839195498397e-06,
"loss": 1.1984,
"step": 165
},
{
"epoch": 0.6561264822134387,
"grad_norm": 0.9556436538696289,
"learning_rate": 2.7021006968925613e-06,
"loss": 1.5325,
"step": 166
},
{
"epoch": 0.6600790513833992,
"grad_norm": 0.8936392664909363,
"learning_rate": 2.646480339173337e-06,
"loss": 1.4192,
"step": 167
},
{
"epoch": 0.6640316205533597,
"grad_norm": 0.8458386063575745,
"learning_rate": 2.5912316294914232e-06,
"loss": 1.2821,
"step": 168
},
{
"epoch": 0.6679841897233202,
"grad_norm": 0.9311079382896423,
"learning_rate": 2.536363292258543e-06,
"loss": 1.3371,
"step": 169
},
{
"epoch": 0.6719367588932806,
"grad_norm": 0.8807729482650757,
"learning_rate": 2.4818839918211963e-06,
"loss": 1.2789,
"step": 170
},
{
"epoch": 0.6758893280632411,
"grad_norm": 0.8326642513275146,
"learning_rate": 2.4278023310924676e-06,
"loss": 1.2545,
"step": 171
},
{
"epoch": 0.6798418972332015,
"grad_norm": 0.7561216950416565,
"learning_rate": 2.3741268501935212e-06,
"loss": 0.9872,
"step": 172
},
{
"epoch": 0.6837944664031621,
"grad_norm": 0.9049252867698669,
"learning_rate": 2.320866025105016e-06,
"loss": 1.0876,
"step": 173
},
{
"epoch": 0.6877470355731226,
"grad_norm": 0.8050791621208191,
"learning_rate": 2.268028266328655e-06,
"loss": 1.2257,
"step": 174
},
{
"epoch": 0.691699604743083,
"grad_norm": 0.7893306016921997,
"learning_rate": 2.2156219175590623e-06,
"loss": 1.1499,
"step": 175
},
{
"epoch": 0.6956521739130435,
"grad_norm": 0.8402442336082458,
"learning_rate": 2.1636552543662187e-06,
"loss": 1.3324,
"step": 176
},
{
"epoch": 0.6996047430830039,
"grad_norm": 0.847458004951477,
"learning_rate": 2.112136482888663e-06,
"loss": 1.3478,
"step": 177
},
{
"epoch": 0.7035573122529645,
"grad_norm": 0.8522107601165771,
"learning_rate": 2.061073738537635e-06,
"loss": 1.2586,
"step": 178
},
{
"epoch": 0.7075098814229249,
"grad_norm": 0.8222721219062805,
"learning_rate": 2.0104750847124075e-06,
"loss": 1.2455,
"step": 179
},
{
"epoch": 0.7114624505928854,
"grad_norm": 0.8545589447021484,
"learning_rate": 1.9603485115269743e-06,
"loss": 1.2892,
"step": 180
},
{
"epoch": 0.7154150197628458,
"grad_norm": 0.7990689873695374,
"learning_rate": 1.910701934548329e-06,
"loss": 1.1216,
"step": 181
},
{
"epoch": 0.7193675889328063,
"grad_norm": 0.8381747007369995,
"learning_rate": 1.8615431935464984e-06,
"loss": 1.2738,
"step": 182
},
{
"epoch": 0.7193675889328063,
"eval_loss": 1.1467477083206177,
"eval_runtime": 0.6308,
"eval_samples_per_second": 33.29,
"eval_steps_per_second": 4.756,
"step": 182
},
{
"epoch": 0.7233201581027668,
"grad_norm": 0.8528211712837219,
"learning_rate": 1.8128800512565514e-06,
"loss": 1.3237,
"step": 183
},
{
"epoch": 0.7272727272727273,
"grad_norm": 0.8386929035186768,
"learning_rate": 1.7647201921527802e-06,
"loss": 1.1454,
"step": 184
},
{
"epoch": 0.7312252964426877,
"grad_norm": 0.9228261709213257,
"learning_rate": 1.7170712212352187e-06,
"loss": 1.5128,
"step": 185
},
{
"epoch": 0.7351778656126482,
"grad_norm": 0.763982892036438,
"learning_rate": 1.6699406628287423e-06,
"loss": 0.9779,
"step": 186
},
{
"epoch": 0.7391304347826086,
"grad_norm": 0.9443770051002502,
"learning_rate": 1.6233359593948777e-06,
"loss": 0.9658,
"step": 187
},
{
"epoch": 0.7430830039525692,
"grad_norm": 0.8337656855583191,
"learning_rate": 1.5772644703565564e-06,
"loss": 1.2752,
"step": 188
},
{
"epoch": 0.7470355731225297,
"grad_norm": 0.8052669763565063,
"learning_rate": 1.531733470935976e-06,
"loss": 1.1755,
"step": 189
},
{
"epoch": 0.7509881422924901,
"grad_norm": 0.8237268328666687,
"learning_rate": 1.4867501510057548e-06,
"loss": 1.1529,
"step": 190
},
{
"epoch": 0.7549407114624506,
"grad_norm": 0.8448726534843445,
"learning_rate": 1.4423216139535735e-06,
"loss": 1.2361,
"step": 191
},
{
"epoch": 0.758893280632411,
"grad_norm": 0.8243027329444885,
"learning_rate": 1.3984548755604655e-06,
"loss": 1.2842,
"step": 192
},
{
"epoch": 0.7628458498023716,
"grad_norm": 0.8658849000930786,
"learning_rate": 1.3551568628929434e-06,
"loss": 1.256,
"step": 193
},
{
"epoch": 0.766798418972332,
"grad_norm": 0.8200253248214722,
"learning_rate": 1.312434413209131e-06,
"loss": 1.1356,
"step": 194
},
{
"epoch": 0.7707509881422925,
"grad_norm": 0.8177831768989563,
"learning_rate": 1.2702942728790897e-06,
"loss": 1.151,
"step": 195
},
{
"epoch": 0.7747035573122529,
"grad_norm": 0.8590410351753235,
"learning_rate": 1.2287430963194807e-06,
"loss": 1.3267,
"step": 196
},
{
"epoch": 0.7786561264822134,
"grad_norm": 0.8247159719467163,
"learning_rate": 1.18778744494276e-06,
"loss": 1.1943,
"step": 197
},
{
"epoch": 0.782608695652174,
"grad_norm": 0.9330562949180603,
"learning_rate": 1.1474337861210543e-06,
"loss": 1.4392,
"step": 198
},
{
"epoch": 0.7865612648221344,
"grad_norm": 0.8507537841796875,
"learning_rate": 1.1076884921648834e-06,
"loss": 1.2678,
"step": 199
},
{
"epoch": 0.7905138339920948,
"grad_norm": 0.8290367126464844,
"learning_rate": 1.0685578393169054e-06,
"loss": 1.2738,
"step": 200
},
{
"epoch": 0.7944664031620553,
"grad_norm": 0.8783449530601501,
"learning_rate": 1.0300480067608232e-06,
"loss": 1.4097,
"step": 201
},
{
"epoch": 0.7984189723320159,
"grad_norm": 0.8614851236343384,
"learning_rate": 9.921650756456164e-07,
"loss": 1.1463,
"step": 202
},
{
"epoch": 0.8023715415019763,
"grad_norm": 0.8946048617362976,
"learning_rate": 9.549150281252633e-07,
"loss": 1.3288,
"step": 203
},
{
"epoch": 0.8063241106719368,
"grad_norm": 0.8253918290138245,
"learning_rate": 9.183037464140804e-07,
"loss": 1.1852,
"step": 204
},
{
"epoch": 0.8102766798418972,
"grad_norm": 0.8394426107406616,
"learning_rate": 8.823370118578628e-07,
"loss": 1.2328,
"step": 205
},
{
"epoch": 0.8142292490118577,
"grad_norm": 0.7891396880149841,
"learning_rate": 8.470205040209362e-07,
"loss": 1.2409,
"step": 206
},
{
"epoch": 0.8181818181818182,
"grad_norm": 0.8969730138778687,
"learning_rate": 8.123597997892918e-07,
"loss": 1.4195,
"step": 207
},
{
"epoch": 0.8221343873517787,
"grad_norm": 0.7859252095222473,
"learning_rate": 7.783603724899258e-07,
"loss": 1.1729,
"step": 208
},
{
"epoch": 0.8221343873517787,
"eval_loss": 1.1414591073989868,
"eval_runtime": 0.63,
"eval_samples_per_second": 33.334,
"eval_steps_per_second": 4.762,
"step": 208
},
{
"epoch": 0.8260869565217391,
"grad_norm": 0.8004709482192993,
"learning_rate": 7.450275910265415e-07,
"loss": 1.1787,
"step": 209
},
{
"epoch": 0.8300395256916996,
"grad_norm": 0.8863723874092102,
"learning_rate": 7.123667190317396e-07,
"loss": 1.3486,
"step": 210
},
{
"epoch": 0.83399209486166,
"grad_norm": 0.7677893042564392,
"learning_rate": 6.803829140358237e-07,
"loss": 1.2067,
"step": 211
},
{
"epoch": 0.8379446640316206,
"grad_norm": 0.7935658097267151,
"learning_rate": 6.490812266523716e-07,
"loss": 1.0929,
"step": 212
},
{
"epoch": 0.841897233201581,
"grad_norm": 0.7727168798446655,
"learning_rate": 6.184665997806832e-07,
"loss": 1.0544,
"step": 213
},
{
"epoch": 0.8458498023715415,
"grad_norm": 0.8302650451660156,
"learning_rate": 5.885438678252342e-07,
"loss": 1.2409,
"step": 214
},
{
"epoch": 0.849802371541502,
"grad_norm": 0.8888508081436157,
"learning_rate": 5.593177559322776e-07,
"loss": 1.3816,
"step": 215
},
{
"epoch": 0.8537549407114624,
"grad_norm": 0.76105135679245,
"learning_rate": 5.307928792436812e-07,
"loss": 1.0972,
"step": 216
},
{
"epoch": 0.857707509881423,
"grad_norm": 0.8869984149932861,
"learning_rate": 5.029737421681446e-07,
"loss": 1.3786,
"step": 217
},
{
"epoch": 0.8616600790513834,
"grad_norm": 0.8762255907058716,
"learning_rate": 4.758647376699033e-07,
"loss": 1.3592,
"step": 218
},
{
"epoch": 0.8656126482213439,
"grad_norm": 0.8182483911514282,
"learning_rate": 4.494701465750217e-07,
"loss": 1.3265,
"step": 219
},
{
"epoch": 0.8695652173913043,
"grad_norm": 0.8035054206848145,
"learning_rate": 4.237941368954124e-07,
"loss": 1.1705,
"step": 220
},
{
"epoch": 0.8735177865612648,
"grad_norm": 0.8334324359893799,
"learning_rate": 3.9884076317064813e-07,
"loss": 1.2725,
"step": 221
},
{
"epoch": 0.8774703557312253,
"grad_norm": 0.8486202955245972,
"learning_rate": 3.7461396582771035e-07,
"loss": 1.2205,
"step": 222
},
{
"epoch": 0.8814229249011858,
"grad_norm": 0.8365004658699036,
"learning_rate": 3.511175705587433e-07,
"loss": 1.2246,
"step": 223
},
{
"epoch": 0.8853754940711462,
"grad_norm": 0.7247976660728455,
"learning_rate": 3.283552877169399e-07,
"loss": 1.0818,
"step": 224
},
{
"epoch": 0.8893280632411067,
"grad_norm": 0.9177906513214111,
"learning_rate": 3.0633071173062966e-07,
"loss": 1.328,
"step": 225
},
{
"epoch": 0.8932806324110671,
"grad_norm": 0.8105781078338623,
"learning_rate": 2.850473205356774e-07,
"loss": 1.0848,
"step": 226
},
{
"epoch": 0.8972332015810277,
"grad_norm": 0.8612114191055298,
"learning_rate": 2.6450847502627883e-07,
"loss": 1.3192,
"step": 227
},
{
"epoch": 0.9011857707509882,
"grad_norm": 0.8288993239402771,
"learning_rate": 2.447174185242324e-07,
"loss": 1.2458,
"step": 228
},
{
"epoch": 0.9051383399209486,
"grad_norm": 0.8506494760513306,
"learning_rate": 2.2567727626678527e-07,
"loss": 1.2568,
"step": 229
},
{
"epoch": 0.9090909090909091,
"grad_norm": 0.9084119200706482,
"learning_rate": 2.0739105491312028e-07,
"loss": 1.4233,
"step": 230
},
{
"epoch": 0.9130434782608695,
"grad_norm": 0.9303674101829529,
"learning_rate": 1.8986164206957037e-07,
"loss": 1.3435,
"step": 231
},
{
"epoch": 0.9169960474308301,
"grad_norm": 0.8007397055625916,
"learning_rate": 1.7309180583363062e-07,
"loss": 1.1919,
"step": 232
},
{
"epoch": 0.9209486166007905,
"grad_norm": 0.7730187177658081,
"learning_rate": 1.5708419435684463e-07,
"loss": 1.0833,
"step": 233
},
{
"epoch": 0.924901185770751,
"grad_norm": 0.9434295296669006,
"learning_rate": 1.4184133542663014e-07,
"loss": 1.4446,
"step": 234
},
{
"epoch": 0.924901185770751,
"eval_loss": 1.1398719549179077,
"eval_runtime": 0.6371,
"eval_samples_per_second": 32.96,
"eval_steps_per_second": 4.709,
"step": 234
},
{
"epoch": 0.9288537549407114,
"grad_norm": 0.8378840684890747,
"learning_rate": 1.2736563606711384e-07,
"loss": 1.3058,
"step": 235
},
{
"epoch": 0.932806324110672,
"grad_norm": 0.7894282341003418,
"learning_rate": 1.136593821590326e-07,
"loss": 1.1822,
"step": 236
},
{
"epoch": 0.9367588932806324,
"grad_norm": 0.8614757657051086,
"learning_rate": 1.007247380787657e-07,
"loss": 1.2477,
"step": 237
},
{
"epoch": 0.9407114624505929,
"grad_norm": 0.7437376976013184,
"learning_rate": 8.856374635655696e-08,
"loss": 0.9579,
"step": 238
},
{
"epoch": 0.9446640316205533,
"grad_norm": 0.8177477717399597,
"learning_rate": 7.717832735397335e-08,
"loss": 1.2895,
"step": 239
},
{
"epoch": 0.9486166007905138,
"grad_norm": 0.8371565937995911,
"learning_rate": 6.657027896065982e-08,
"loss": 1.2901,
"step": 240
},
{
"epoch": 0.9525691699604744,
"grad_norm": 0.8062224388122559,
"learning_rate": 5.674127631043025e-08,
"loss": 1.1185,
"step": 241
},
{
"epoch": 0.9565217391304348,
"grad_norm": 0.782124936580658,
"learning_rate": 4.769287151674407e-08,
"loss": 1.1315,
"step": 242
},
{
"epoch": 0.9604743083003953,
"grad_norm": 0.8169525265693665,
"learning_rate": 3.9426493427611177e-08,
"loss": 1.3511,
"step": 243
},
{
"epoch": 0.9644268774703557,
"grad_norm": 0.791191577911377,
"learning_rate": 3.194344739995803e-08,
"loss": 1.2501,
"step": 244
},
{
"epoch": 0.9683794466403162,
"grad_norm": 0.7813810706138611,
"learning_rate": 2.5244915093499134e-08,
"loss": 1.1408,
"step": 245
},
{
"epoch": 0.9723320158102767,
"grad_norm": 0.8077353239059448,
"learning_rate": 1.9331954284137476e-08,
"loss": 1.2169,
"step": 246
},
{
"epoch": 0.9762845849802372,
"grad_norm": 0.8798792362213135,
"learning_rate": 1.4205498696930332e-08,
"loss": 1.4156,
"step": 247
},
{
"epoch": 0.9802371541501976,
"grad_norm": 0.7928077578544617,
"learning_rate": 9.866357858642206e-09,
"loss": 1.1922,
"step": 248
},
{
"epoch": 0.9841897233201581,
"grad_norm": 0.8758398294448853,
"learning_rate": 6.315216969912663e-09,
"loss": 1.4098,
"step": 249
},
{
"epoch": 0.9881422924901185,
"grad_norm": 0.8929165005683899,
"learning_rate": 3.5526367970539765e-09,
"loss": 1.3601,
"step": 250
},
{
"epoch": 0.9920948616600791,
"grad_norm": 0.9001562595367432,
"learning_rate": 1.5790535835003006e-09,
"loss": 1.2039,
"step": 251
},
{
"epoch": 0.9960474308300395,
"grad_norm": 0.8405194878578186,
"learning_rate": 3.9477898091944135e-10,
"loss": 1.1514,
"step": 252
},
{
"epoch": 1.0,
"grad_norm": 0.8833491206169128,
"learning_rate": 0.0,
"loss": 1.2887,
"step": 253
},
{
"epoch": 1.0,
"step": 253,
"total_flos": 8.443588829301965e+16,
"train_loss": 1.3333067849219553,
"train_runtime": 1520.8803,
"train_samples_per_second": 1.329,
"train_steps_per_second": 0.166
}
],
"logging_steps": 1,
"max_steps": 253,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 253,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 8.443588829301965e+16,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}