|
{ |
|
"best_metric": 0.635071337223053, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-100", |
|
"epoch": 0.10085728693898134, |
|
"eval_steps": 50, |
|
"global_step": 100, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0010085728693898135, |
|
"grad_norm": 1.311654806137085, |
|
"learning_rate": 1.0018000000000001e-05, |
|
"loss": 0.9276, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0010085728693898135, |
|
"eval_loss": 1.104080319404602, |
|
"eval_runtime": 47.2809, |
|
"eval_samples_per_second": 8.841, |
|
"eval_steps_per_second": 2.221, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.002017145738779627, |
|
"grad_norm": 1.4768002033233643, |
|
"learning_rate": 2.0036000000000003e-05, |
|
"loss": 1.1293, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.0030257186081694403, |
|
"grad_norm": 1.863478422164917, |
|
"learning_rate": 3.0054e-05, |
|
"loss": 1.095, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.004034291477559254, |
|
"grad_norm": 1.4832903146743774, |
|
"learning_rate": 4.0072000000000005e-05, |
|
"loss": 1.0675, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.005042864346949067, |
|
"grad_norm": 1.516528606414795, |
|
"learning_rate": 5.009e-05, |
|
"loss": 1.1571, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.006051437216338881, |
|
"grad_norm": 1.0894883871078491, |
|
"learning_rate": 6.0108e-05, |
|
"loss": 0.8794, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.0070600100857286935, |
|
"grad_norm": 1.1469849348068237, |
|
"learning_rate": 7.0126e-05, |
|
"loss": 0.7944, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.008068582955118508, |
|
"grad_norm": 0.9738489389419556, |
|
"learning_rate": 8.014400000000001e-05, |
|
"loss": 0.7899, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.009077155824508321, |
|
"grad_norm": 1.1924914121627808, |
|
"learning_rate": 9.016200000000001e-05, |
|
"loss": 0.8915, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.010085728693898134, |
|
"grad_norm": 0.986112117767334, |
|
"learning_rate": 0.00010018, |
|
"loss": 0.735, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.011094301563287948, |
|
"grad_norm": 0.9808599948883057, |
|
"learning_rate": 9.965273684210526e-05, |
|
"loss": 0.7869, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.012102874432677761, |
|
"grad_norm": 0.8853185176849365, |
|
"learning_rate": 9.912547368421053e-05, |
|
"loss": 0.7262, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.013111447302067574, |
|
"grad_norm": 0.7452360987663269, |
|
"learning_rate": 9.859821052631579e-05, |
|
"loss": 0.679, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.014120020171457387, |
|
"grad_norm": 0.7864087224006653, |
|
"learning_rate": 9.807094736842106e-05, |
|
"loss": 0.6494, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.015128593040847202, |
|
"grad_norm": 0.7922771573066711, |
|
"learning_rate": 9.754368421052633e-05, |
|
"loss": 0.6502, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.016137165910237016, |
|
"grad_norm": 0.7772244215011597, |
|
"learning_rate": 9.701642105263158e-05, |
|
"loss": 0.5793, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.01714573877962683, |
|
"grad_norm": 0.7786775827407837, |
|
"learning_rate": 9.648915789473685e-05, |
|
"loss": 0.5646, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.018154311649016642, |
|
"grad_norm": 0.9989523887634277, |
|
"learning_rate": 9.596189473684211e-05, |
|
"loss": 0.8072, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.019162884518406455, |
|
"grad_norm": 0.8197798728942871, |
|
"learning_rate": 9.543463157894737e-05, |
|
"loss": 0.549, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.020171457387796268, |
|
"grad_norm": 0.727142333984375, |
|
"learning_rate": 9.490736842105264e-05, |
|
"loss": 0.6405, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.02118003025718608, |
|
"grad_norm": 0.7259921431541443, |
|
"learning_rate": 9.43801052631579e-05, |
|
"loss": 0.6329, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.022188603126575897, |
|
"grad_norm": 0.7431148886680603, |
|
"learning_rate": 9.385284210526316e-05, |
|
"loss": 0.522, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.02319717599596571, |
|
"grad_norm": 0.8169230818748474, |
|
"learning_rate": 9.332557894736843e-05, |
|
"loss": 0.7403, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.024205748865355523, |
|
"grad_norm": 0.9084967970848083, |
|
"learning_rate": 9.279831578947369e-05, |
|
"loss": 0.6538, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.025214321734745335, |
|
"grad_norm": 0.8007498383522034, |
|
"learning_rate": 9.227105263157896e-05, |
|
"loss": 0.6519, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.026222894604135148, |
|
"grad_norm": 0.7854844927787781, |
|
"learning_rate": 9.174378947368421e-05, |
|
"loss": 0.6472, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.02723146747352496, |
|
"grad_norm": 0.8193365335464478, |
|
"learning_rate": 9.121652631578948e-05, |
|
"loss": 0.5204, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.028240040342914774, |
|
"grad_norm": 0.8219320178031921, |
|
"learning_rate": 9.068926315789475e-05, |
|
"loss": 0.6833, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.02924861321230459, |
|
"grad_norm": 0.8295514583587646, |
|
"learning_rate": 9.016200000000001e-05, |
|
"loss": 0.6379, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.030257186081694403, |
|
"grad_norm": 0.9452921748161316, |
|
"learning_rate": 8.963473684210526e-05, |
|
"loss": 0.7919, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.031265758951084216, |
|
"grad_norm": 0.893158495426178, |
|
"learning_rate": 8.910747368421053e-05, |
|
"loss": 0.8002, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.03227433182047403, |
|
"grad_norm": 0.7392338514328003, |
|
"learning_rate": 8.858021052631579e-05, |
|
"loss": 0.6503, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.03328290468986384, |
|
"grad_norm": 0.8364588618278503, |
|
"learning_rate": 8.805294736842106e-05, |
|
"loss": 0.7389, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.03429147755925366, |
|
"grad_norm": 0.8333988189697266, |
|
"learning_rate": 8.752568421052633e-05, |
|
"loss": 0.718, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.03530005042864347, |
|
"grad_norm": 0.8555812835693359, |
|
"learning_rate": 8.699842105263159e-05, |
|
"loss": 0.8115, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.036308623298033284, |
|
"grad_norm": 0.909781277179718, |
|
"learning_rate": 8.647115789473686e-05, |
|
"loss": 0.7834, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.03731719616742309, |
|
"grad_norm": 0.8471423387527466, |
|
"learning_rate": 8.594389473684211e-05, |
|
"loss": 0.7363, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.03832576903681291, |
|
"grad_norm": 0.7747591137886047, |
|
"learning_rate": 8.541663157894737e-05, |
|
"loss": 0.7223, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.039334341906202726, |
|
"grad_norm": 0.7503566145896912, |
|
"learning_rate": 8.488936842105264e-05, |
|
"loss": 0.6464, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.040342914775592535, |
|
"grad_norm": 0.7485514283180237, |
|
"learning_rate": 8.43621052631579e-05, |
|
"loss": 0.624, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.04135148764498235, |
|
"grad_norm": 0.8754268884658813, |
|
"learning_rate": 8.383484210526316e-05, |
|
"loss": 0.7319, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.04236006051437216, |
|
"grad_norm": 0.9374569058418274, |
|
"learning_rate": 8.330757894736843e-05, |
|
"loss": 0.8252, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.04336863338376198, |
|
"grad_norm": 1.0073909759521484, |
|
"learning_rate": 8.278031578947369e-05, |
|
"loss": 0.8029, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.044377206253151794, |
|
"grad_norm": 0.9302268624305725, |
|
"learning_rate": 8.225305263157896e-05, |
|
"loss": 0.7655, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.0453857791225416, |
|
"grad_norm": 0.9419411420822144, |
|
"learning_rate": 8.172578947368422e-05, |
|
"loss": 0.7218, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.04639435199193142, |
|
"grad_norm": 0.936607301235199, |
|
"learning_rate": 8.119852631578947e-05, |
|
"loss": 0.7549, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.04740292486132123, |
|
"grad_norm": 0.8890344500541687, |
|
"learning_rate": 8.067126315789474e-05, |
|
"loss": 0.7809, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.048411497730711045, |
|
"grad_norm": 1.0612900257110596, |
|
"learning_rate": 8.014400000000001e-05, |
|
"loss": 0.7987, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.049420070600100854, |
|
"grad_norm": 0.9946021437644958, |
|
"learning_rate": 7.961673684210527e-05, |
|
"loss": 0.8006, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.05042864346949067, |
|
"grad_norm": 1.1615980863571167, |
|
"learning_rate": 7.908947368421054e-05, |
|
"loss": 0.8517, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.05042864346949067, |
|
"eval_loss": 0.6663289666175842, |
|
"eval_runtime": 47.2665, |
|
"eval_samples_per_second": 8.843, |
|
"eval_steps_per_second": 2.221, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.05143721633888049, |
|
"grad_norm": 0.8349344730377197, |
|
"learning_rate": 7.856221052631579e-05, |
|
"loss": 0.4587, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.052445789208270296, |
|
"grad_norm": 0.6953268647193909, |
|
"learning_rate": 7.803494736842106e-05, |
|
"loss": 0.666, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.05345436207766011, |
|
"grad_norm": 0.7224613428115845, |
|
"learning_rate": 7.750768421052632e-05, |
|
"loss": 0.7536, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.05446293494704992, |
|
"grad_norm": 0.6757810115814209, |
|
"learning_rate": 7.698042105263157e-05, |
|
"loss": 0.6169, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.05547150781643974, |
|
"grad_norm": 0.7635074257850647, |
|
"learning_rate": 7.645315789473686e-05, |
|
"loss": 0.6307, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.05648008068582955, |
|
"grad_norm": 0.6975710988044739, |
|
"learning_rate": 7.592589473684211e-05, |
|
"loss": 0.6233, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.057488653555219364, |
|
"grad_norm": 0.9252186417579651, |
|
"learning_rate": 7.539863157894737e-05, |
|
"loss": 0.6551, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.05849722642460918, |
|
"grad_norm": 0.8248884677886963, |
|
"learning_rate": 7.487136842105264e-05, |
|
"loss": 0.5789, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.05950579929399899, |
|
"grad_norm": 0.6074532866477966, |
|
"learning_rate": 7.43441052631579e-05, |
|
"loss": 0.5004, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.060514372163388806, |
|
"grad_norm": 0.6752936244010925, |
|
"learning_rate": 7.381684210526315e-05, |
|
"loss": 0.4884, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.061522945032778616, |
|
"grad_norm": 0.7720556259155273, |
|
"learning_rate": 7.328957894736844e-05, |
|
"loss": 0.7758, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.06253151790216843, |
|
"grad_norm": 0.699116051197052, |
|
"learning_rate": 7.276231578947369e-05, |
|
"loss": 0.5716, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.06354009077155824, |
|
"grad_norm": 0.7762227058410645, |
|
"learning_rate": 7.223505263157895e-05, |
|
"loss": 0.6935, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.06454866364094806, |
|
"grad_norm": 0.6913344264030457, |
|
"learning_rate": 7.170778947368422e-05, |
|
"loss": 0.6068, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.06555723651033787, |
|
"grad_norm": 0.7144531011581421, |
|
"learning_rate": 7.118052631578947e-05, |
|
"loss": 0.6431, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.06656580937972768, |
|
"grad_norm": 0.6667282581329346, |
|
"learning_rate": 7.065326315789474e-05, |
|
"loss": 0.5421, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.06757438224911749, |
|
"grad_norm": 0.6969847083091736, |
|
"learning_rate": 7.0126e-05, |
|
"loss": 0.6785, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.06858295511850732, |
|
"grad_norm": 0.6973515748977661, |
|
"learning_rate": 6.959873684210527e-05, |
|
"loss": 0.5455, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.06959152798789713, |
|
"grad_norm": 0.7713499069213867, |
|
"learning_rate": 6.907147368421054e-05, |
|
"loss": 0.7372, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.07060010085728693, |
|
"grad_norm": 0.5957585573196411, |
|
"learning_rate": 6.85442105263158e-05, |
|
"loss": 0.4954, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.07160867372667676, |
|
"grad_norm": 0.7025538086891174, |
|
"learning_rate": 6.801694736842105e-05, |
|
"loss": 0.5496, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.07261724659606657, |
|
"grad_norm": 0.7217448353767395, |
|
"learning_rate": 6.748968421052632e-05, |
|
"loss": 0.5567, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.07362581946545638, |
|
"grad_norm": 0.7021524906158447, |
|
"learning_rate": 6.696242105263158e-05, |
|
"loss": 0.5291, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.07463439233484619, |
|
"grad_norm": 0.7426285743713379, |
|
"learning_rate": 6.643515789473685e-05, |
|
"loss": 0.6815, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.07564296520423601, |
|
"grad_norm": 0.7391223311424255, |
|
"learning_rate": 6.590789473684212e-05, |
|
"loss": 0.6689, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.07665153807362582, |
|
"grad_norm": 0.6848605871200562, |
|
"learning_rate": 6.538063157894737e-05, |
|
"loss": 0.5782, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 0.07766011094301563, |
|
"grad_norm": 0.711951732635498, |
|
"learning_rate": 6.485336842105264e-05, |
|
"loss": 0.5801, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 0.07866868381240545, |
|
"grad_norm": 0.5852221846580505, |
|
"learning_rate": 6.43261052631579e-05, |
|
"loss": 0.4995, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 0.07967725668179526, |
|
"grad_norm": 0.9040377736091614, |
|
"learning_rate": 6.379884210526315e-05, |
|
"loss": 0.5318, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 0.08068582955118507, |
|
"grad_norm": 0.756641149520874, |
|
"learning_rate": 6.327157894736842e-05, |
|
"loss": 0.6616, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.08169440242057488, |
|
"grad_norm": 0.7158970832824707, |
|
"learning_rate": 6.274431578947368e-05, |
|
"loss": 0.6113, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.0827029752899647, |
|
"grad_norm": 0.6931173205375671, |
|
"learning_rate": 6.221705263157895e-05, |
|
"loss": 0.5882, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 0.08371154815935451, |
|
"grad_norm": 0.7512569427490234, |
|
"learning_rate": 6.168978947368422e-05, |
|
"loss": 0.6451, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 0.08472012102874432, |
|
"grad_norm": 0.7793223261833191, |
|
"learning_rate": 6.116252631578948e-05, |
|
"loss": 0.6186, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 0.08572869389813415, |
|
"grad_norm": 0.7280353307723999, |
|
"learning_rate": 6.063526315789474e-05, |
|
"loss": 0.6657, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.08673726676752395, |
|
"grad_norm": 0.7213184833526611, |
|
"learning_rate": 6.0108e-05, |
|
"loss": 0.6312, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 0.08774583963691376, |
|
"grad_norm": 0.7809893488883972, |
|
"learning_rate": 5.9580736842105264e-05, |
|
"loss": 0.6928, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 0.08875441250630359, |
|
"grad_norm": 0.8184586763381958, |
|
"learning_rate": 5.905347368421053e-05, |
|
"loss": 0.6551, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 0.0897629853756934, |
|
"grad_norm": 0.8529835939407349, |
|
"learning_rate": 5.85262105263158e-05, |
|
"loss": 0.7481, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 0.0907715582450832, |
|
"grad_norm": 0.8931265473365784, |
|
"learning_rate": 5.799894736842106e-05, |
|
"loss": 0.7928, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.09178013111447302, |
|
"grad_norm": 0.7958138585090637, |
|
"learning_rate": 5.747168421052632e-05, |
|
"loss": 0.6151, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 0.09278870398386284, |
|
"grad_norm": 0.7931579351425171, |
|
"learning_rate": 5.694442105263158e-05, |
|
"loss": 0.7141, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 0.09379727685325265, |
|
"grad_norm": 0.8281200528144836, |
|
"learning_rate": 5.641715789473684e-05, |
|
"loss": 0.6538, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 0.09480584972264246, |
|
"grad_norm": 0.8042008280754089, |
|
"learning_rate": 5.5889894736842104e-05, |
|
"loss": 0.6661, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 0.09581442259203228, |
|
"grad_norm": 0.8040047287940979, |
|
"learning_rate": 5.5362631578947374e-05, |
|
"loss": 0.5659, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.09682299546142209, |
|
"grad_norm": 1.044537901878357, |
|
"learning_rate": 5.483536842105264e-05, |
|
"loss": 0.835, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 0.0978315683308119, |
|
"grad_norm": 0.9413779377937317, |
|
"learning_rate": 5.43081052631579e-05, |
|
"loss": 0.7605, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 0.09884014120020171, |
|
"grad_norm": 0.9656868577003479, |
|
"learning_rate": 5.378084210526316e-05, |
|
"loss": 0.7144, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 0.09984871406959153, |
|
"grad_norm": 1.0694280862808228, |
|
"learning_rate": 5.3253578947368426e-05, |
|
"loss": 0.8615, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 0.10085728693898134, |
|
"grad_norm": 1.2606571912765503, |
|
"learning_rate": 5.272631578947368e-05, |
|
"loss": 0.8549, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.10085728693898134, |
|
"eval_loss": 0.635071337223053, |
|
"eval_runtime": 47.0624, |
|
"eval_samples_per_second": 8.882, |
|
"eval_steps_per_second": 2.231, |
|
"step": 100 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 200, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 50, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 5, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 8.648580773235917e+16, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|