|
{ |
|
"best_metric": 0.8196816444396973, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-100", |
|
"epoch": 0.8281573498964804, |
|
"eval_steps": 50, |
|
"global_step": 100, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.008281573498964804, |
|
"grad_norm": 0.8388795256614685, |
|
"learning_rate": 7e-06, |
|
"loss": 0.804, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.008281573498964804, |
|
"eval_loss": 1.1692047119140625, |
|
"eval_runtime": 32.2894, |
|
"eval_samples_per_second": 6.318, |
|
"eval_steps_per_second": 1.579, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.016563146997929608, |
|
"grad_norm": 1.023823618888855, |
|
"learning_rate": 1.4e-05, |
|
"loss": 0.8544, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.024844720496894408, |
|
"grad_norm": 1.0270742177963257, |
|
"learning_rate": 2.1e-05, |
|
"loss": 0.8755, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.033126293995859216, |
|
"grad_norm": 0.9506105780601501, |
|
"learning_rate": 2.8e-05, |
|
"loss": 0.8441, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.041407867494824016, |
|
"grad_norm": 0.7932412028312683, |
|
"learning_rate": 3.5e-05, |
|
"loss": 0.7846, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.049689440993788817, |
|
"grad_norm": 0.7128869295120239, |
|
"learning_rate": 4.2e-05, |
|
"loss": 0.9147, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.057971014492753624, |
|
"grad_norm": 0.4176234304904938, |
|
"learning_rate": 4.899999999999999e-05, |
|
"loss": 0.7784, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.06625258799171843, |
|
"grad_norm": 0.6762263178825378, |
|
"learning_rate": 5.6e-05, |
|
"loss": 0.8248, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.07453416149068323, |
|
"grad_norm": 0.8291305899620056, |
|
"learning_rate": 6.3e-05, |
|
"loss": 0.8014, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.08281573498964803, |
|
"grad_norm": 0.8136356472969055, |
|
"learning_rate": 7e-05, |
|
"loss": 0.9253, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.09109730848861283, |
|
"grad_norm": 0.4319281578063965, |
|
"learning_rate": 6.999521567473641e-05, |
|
"loss": 0.8555, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.09937888198757763, |
|
"grad_norm": 0.36292925477027893, |
|
"learning_rate": 6.998086400693241e-05, |
|
"loss": 0.7425, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.10766045548654245, |
|
"grad_norm": 0.3616969585418701, |
|
"learning_rate": 6.995694892019065e-05, |
|
"loss": 0.9021, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.11594202898550725, |
|
"grad_norm": 0.3888731002807617, |
|
"learning_rate": 6.99234769526571e-05, |
|
"loss": 0.7912, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.12422360248447205, |
|
"grad_norm": 0.3677065968513489, |
|
"learning_rate": 6.988045725523343e-05, |
|
"loss": 0.6922, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.13250517598343686, |
|
"grad_norm": 0.38920730352401733, |
|
"learning_rate": 6.982790158907539e-05, |
|
"loss": 0.8843, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.14078674948240166, |
|
"grad_norm": 0.4462592601776123, |
|
"learning_rate": 6.976582432237733e-05, |
|
"loss": 0.9304, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.14906832298136646, |
|
"grad_norm": 0.40514039993286133, |
|
"learning_rate": 6.969424242644413e-05, |
|
"loss": 0.8461, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.15734989648033126, |
|
"grad_norm": 0.44129592180252075, |
|
"learning_rate": 6.961317547105138e-05, |
|
"loss": 0.8518, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.16563146997929606, |
|
"grad_norm": 0.4335060715675354, |
|
"learning_rate": 6.952264561909527e-05, |
|
"loss": 0.8793, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.17391304347826086, |
|
"grad_norm": 0.543445885181427, |
|
"learning_rate": 6.942267762053337e-05, |
|
"loss": 0.9113, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.18219461697722567, |
|
"grad_norm": 0.5542380809783936, |
|
"learning_rate": 6.931329880561832e-05, |
|
"loss": 0.9433, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.19047619047619047, |
|
"grad_norm": 0.516521692276001, |
|
"learning_rate": 6.919453907742597e-05, |
|
"loss": 0.9016, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.19875776397515527, |
|
"grad_norm": 0.5378891825675964, |
|
"learning_rate": 6.90664309036802e-05, |
|
"loss": 0.7877, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.2070393374741201, |
|
"grad_norm": 0.577548086643219, |
|
"learning_rate": 6.892900930787656e-05, |
|
"loss": 0.8454, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.2153209109730849, |
|
"grad_norm": 0.5783238410949707, |
|
"learning_rate": 6.87823118597072e-05, |
|
"loss": 0.8776, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.2236024844720497, |
|
"grad_norm": 0.5544853210449219, |
|
"learning_rate": 6.862637866478969e-05, |
|
"loss": 0.8123, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.2318840579710145, |
|
"grad_norm": 0.7183583974838257, |
|
"learning_rate": 6.846125235370252e-05, |
|
"loss": 0.941, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.2401656314699793, |
|
"grad_norm": 0.7497084140777588, |
|
"learning_rate": 6.828697807033038e-05, |
|
"loss": 0.9533, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.2484472049689441, |
|
"grad_norm": 1.0239611864089966, |
|
"learning_rate": 6.81036034595222e-05, |
|
"loss": 1.0226, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.2567287784679089, |
|
"grad_norm": 0.215080127120018, |
|
"learning_rate": 6.791117865406564e-05, |
|
"loss": 0.5555, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.2650103519668737, |
|
"grad_norm": 0.32441794872283936, |
|
"learning_rate": 6.770975626098112e-05, |
|
"loss": 0.7501, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.2732919254658385, |
|
"grad_norm": 0.309184730052948, |
|
"learning_rate": 6.749939134713974e-05, |
|
"loss": 0.7853, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.2815734989648033, |
|
"grad_norm": 0.3206530511379242, |
|
"learning_rate": 6.728014142420846e-05, |
|
"loss": 0.659, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.2898550724637681, |
|
"grad_norm": 0.2530266046524048, |
|
"learning_rate": 6.7052066432927e-05, |
|
"loss": 0.651, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.2981366459627329, |
|
"grad_norm": 0.27744585275650024, |
|
"learning_rate": 6.681522872672069e-05, |
|
"loss": 0.6502, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.3064182194616977, |
|
"grad_norm": 0.2372923493385315, |
|
"learning_rate": 6.656969305465356e-05, |
|
"loss": 0.7619, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.3146997929606625, |
|
"grad_norm": 0.2609351575374603, |
|
"learning_rate": 6.631552654372672e-05, |
|
"loss": 0.6668, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.32298136645962733, |
|
"grad_norm": 0.2953057885169983, |
|
"learning_rate": 6.60527986805264e-05, |
|
"loss": 0.7653, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.33126293995859213, |
|
"grad_norm": 0.32191261649131775, |
|
"learning_rate": 6.578158129222711e-05, |
|
"loss": 0.8575, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.33954451345755693, |
|
"grad_norm": 0.2882176339626312, |
|
"learning_rate": 6.550194852695469e-05, |
|
"loss": 0.7252, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.34782608695652173, |
|
"grad_norm": 0.27066636085510254, |
|
"learning_rate": 6.521397683351509e-05, |
|
"loss": 0.7575, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.35610766045548653, |
|
"grad_norm": 0.3208519518375397, |
|
"learning_rate": 6.491774494049386e-05, |
|
"loss": 0.8635, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.36438923395445133, |
|
"grad_norm": 0.35946550965309143, |
|
"learning_rate": 6.461333383473272e-05, |
|
"loss": 0.8081, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.37267080745341613, |
|
"grad_norm": 0.3770899176597595, |
|
"learning_rate": 6.430082673918849e-05, |
|
"loss": 0.8199, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.38095238095238093, |
|
"grad_norm": 0.36672505736351013, |
|
"learning_rate": 6.398030909018069e-05, |
|
"loss": 0.8569, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.38923395445134573, |
|
"grad_norm": 0.3554442226886749, |
|
"learning_rate": 6.365186851403423e-05, |
|
"loss": 0.8098, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.39751552795031053, |
|
"grad_norm": 0.3235284388065338, |
|
"learning_rate": 6.331559480312315e-05, |
|
"loss": 0.791, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.4057971014492754, |
|
"grad_norm": 0.3624836206436157, |
|
"learning_rate": 6.297157989132236e-05, |
|
"loss": 0.7369, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.4140786749482402, |
|
"grad_norm": 0.3535378873348236, |
|
"learning_rate": 6.261991782887377e-05, |
|
"loss": 0.6616, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.4140786749482402, |
|
"eval_loss": 0.8644523024559021, |
|
"eval_runtime": 32.248, |
|
"eval_samples_per_second": 6.326, |
|
"eval_steps_per_second": 1.581, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.422360248447205, |
|
"grad_norm": 0.3799665868282318, |
|
"learning_rate": 6.226070475667393e-05, |
|
"loss": 0.7939, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.4306418219461698, |
|
"grad_norm": 0.4117141664028168, |
|
"learning_rate": 6.189403887999006e-05, |
|
"loss": 0.7855, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.4389233954451346, |
|
"grad_norm": 0.4020320177078247, |
|
"learning_rate": 6.152002044161171e-05, |
|
"loss": 0.8022, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.4472049689440994, |
|
"grad_norm": 0.46013712882995605, |
|
"learning_rate": 6.113875169444539e-05, |
|
"loss": 0.8572, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.4554865424430642, |
|
"grad_norm": 0.45274391770362854, |
|
"learning_rate": 6.0750336873559605e-05, |
|
"loss": 0.7123, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.463768115942029, |
|
"grad_norm": 0.5365428328514099, |
|
"learning_rate": 6.035488216768811e-05, |
|
"loss": 0.8758, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.4720496894409938, |
|
"grad_norm": 0.641646146774292, |
|
"learning_rate": 5.9952495690198894e-05, |
|
"loss": 0.9564, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.4803312629399586, |
|
"grad_norm": 0.508011519908905, |
|
"learning_rate": 5.954328744953709e-05, |
|
"loss": 0.8731, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.4886128364389234, |
|
"grad_norm": 0.8867300152778625, |
|
"learning_rate": 5.91273693191498e-05, |
|
"loss": 0.9009, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.4968944099378882, |
|
"grad_norm": 0.9791533946990967, |
|
"learning_rate": 5.870485500690094e-05, |
|
"loss": 1.1371, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.505175983436853, |
|
"grad_norm": 0.24218077957630157, |
|
"learning_rate": 5.827586002398468e-05, |
|
"loss": 0.6875, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.5134575569358178, |
|
"grad_norm": 0.22737136483192444, |
|
"learning_rate": 5.784050165334589e-05, |
|
"loss": 0.7116, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.5217391304347826, |
|
"grad_norm": 0.2559671103954315, |
|
"learning_rate": 5.739889891761608e-05, |
|
"loss": 0.7719, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.5300207039337475, |
|
"grad_norm": 0.24487318098545074, |
|
"learning_rate": 5.6951172546573794e-05, |
|
"loss": 0.7212, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.5383022774327122, |
|
"grad_norm": 0.23421718180179596, |
|
"learning_rate": 5.6497444944138376e-05, |
|
"loss": 0.7471, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.546583850931677, |
|
"grad_norm": 0.21821844577789307, |
|
"learning_rate": 5.603784015490587e-05, |
|
"loss": 0.6552, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.5548654244306418, |
|
"grad_norm": 0.26210862398147583, |
|
"learning_rate": 5.557248383023655e-05, |
|
"loss": 0.774, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.5631469979296067, |
|
"grad_norm": 0.2591906785964966, |
|
"learning_rate": 5.510150319390302e-05, |
|
"loss": 0.687, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.5714285714285714, |
|
"grad_norm": 0.26978063583374023, |
|
"learning_rate": 5.4625027007308546e-05, |
|
"loss": 0.7278, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.5797101449275363, |
|
"grad_norm": 0.24709314107894897, |
|
"learning_rate": 5.414318553428494e-05, |
|
"loss": 0.5976, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.587991718426501, |
|
"grad_norm": 0.30029296875, |
|
"learning_rate": 5.3656110505479776e-05, |
|
"loss": 0.7129, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.5962732919254659, |
|
"grad_norm": 0.32972994446754456, |
|
"learning_rate": 5.316393508234253e-05, |
|
"loss": 0.8332, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.6045548654244306, |
|
"grad_norm": 0.3365096151828766, |
|
"learning_rate": 5.266679382071953e-05, |
|
"loss": 0.8121, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.6128364389233955, |
|
"grad_norm": 0.31908273696899414, |
|
"learning_rate": 5.216482263406778e-05, |
|
"loss": 0.8543, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.6211180124223602, |
|
"grad_norm": 0.3485449552536011, |
|
"learning_rate": 5.1658158756297576e-05, |
|
"loss": 0.8116, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.629399585921325, |
|
"grad_norm": 0.33620256185531616, |
|
"learning_rate": 5.114694070425407e-05, |
|
"loss": 0.7493, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 0.6376811594202898, |
|
"grad_norm": 0.35500848293304443, |
|
"learning_rate": 5.063130823984823e-05, |
|
"loss": 0.7026, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 0.6459627329192547, |
|
"grad_norm": 0.4022809565067291, |
|
"learning_rate": 5.011140233184724e-05, |
|
"loss": 0.7928, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 0.6542443064182195, |
|
"grad_norm": 0.41914957761764526, |
|
"learning_rate": 4.958736511733516e-05, |
|
"loss": 0.8219, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 0.6625258799171843, |
|
"grad_norm": 0.4002333879470825, |
|
"learning_rate": 4.905933986285393e-05, |
|
"loss": 0.8019, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.6708074534161491, |
|
"grad_norm": 0.46458199620246887, |
|
"learning_rate": 4.8527470925235824e-05, |
|
"loss": 0.7353, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.6790890269151139, |
|
"grad_norm": 0.5049306154251099, |
|
"learning_rate": 4.799190371213772e-05, |
|
"loss": 0.935, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 0.6873706004140787, |
|
"grad_norm": 0.48192158341407776, |
|
"learning_rate": 4.745278464228808e-05, |
|
"loss": 0.8568, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 0.6956521739130435, |
|
"grad_norm": 0.5385396480560303, |
|
"learning_rate": 4.69102611054575e-05, |
|
"loss": 0.8613, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 0.7039337474120083, |
|
"grad_norm": 0.4860864579677582, |
|
"learning_rate": 4.6364481422163926e-05, |
|
"loss": 0.8288, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.7122153209109731, |
|
"grad_norm": 0.4691753387451172, |
|
"learning_rate": 4.581559480312316e-05, |
|
"loss": 0.7301, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 0.7204968944099379, |
|
"grad_norm": 0.6713128685951233, |
|
"learning_rate": 4.526375130845627e-05, |
|
"loss": 1.0118, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 0.7287784679089027, |
|
"grad_norm": 0.8337951302528381, |
|
"learning_rate": 4.4709101806664554e-05, |
|
"loss": 0.8735, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 0.7370600414078675, |
|
"grad_norm": 0.7612811326980591, |
|
"learning_rate": 4.4151797933383685e-05, |
|
"loss": 0.9304, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 0.7453416149068323, |
|
"grad_norm": 0.8643386960029602, |
|
"learning_rate": 4.359199204992797e-05, |
|
"loss": 1.0096, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.7536231884057971, |
|
"grad_norm": 0.20685485005378723, |
|
"learning_rate": 4.30298372016363e-05, |
|
"loss": 0.6227, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 0.7619047619047619, |
|
"grad_norm": 0.25332385301589966, |
|
"learning_rate": 4.246548707603114e-05, |
|
"loss": 0.7315, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 0.7701863354037267, |
|
"grad_norm": 0.24437353014945984, |
|
"learning_rate": 4.1899095960801805e-05, |
|
"loss": 0.6921, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 0.7784679089026915, |
|
"grad_norm": 0.2593900263309479, |
|
"learning_rate": 4.133081870162385e-05, |
|
"loss": 0.7514, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 0.7867494824016563, |
|
"grad_norm": 0.2574782371520996, |
|
"learning_rate": 4.076081065982569e-05, |
|
"loss": 0.6447, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.7950310559006211, |
|
"grad_norm": 0.3053584396839142, |
|
"learning_rate": 4.018922766991447e-05, |
|
"loss": 0.6437, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 0.8033126293995859, |
|
"grad_norm": 0.2642808258533478, |
|
"learning_rate": 3.961622599697241e-05, |
|
"loss": 0.7539, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 0.8115942028985508, |
|
"grad_norm": 0.2618858814239502, |
|
"learning_rate": 3.9041962293935516e-05, |
|
"loss": 0.6641, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 0.8198757763975155, |
|
"grad_norm": 0.2602124512195587, |
|
"learning_rate": 3.84665935587662e-05, |
|
"loss": 0.6166, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 0.8281573498964804, |
|
"grad_norm": 0.2887020707130432, |
|
"learning_rate": 3.7890277091531636e-05, |
|
"loss": 0.6502, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.8281573498964804, |
|
"eval_loss": 0.8196816444396973, |
|
"eval_runtime": 32.2536, |
|
"eval_samples_per_second": 6.325, |
|
"eval_steps_per_second": 1.581, |
|
"step": 100 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 200, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 2, |
|
"save_steps": 50, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 4, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 3.373606698567598e+17, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|