|
{ |
|
"best_metric": NaN, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 0.21499596882558453, |
|
"eval_steps": 50, |
|
"global_step": 100, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.002149959688255845, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.5000000000000002e-07, |
|
"loss": 0.0, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.002149959688255845, |
|
"eval_loss": NaN, |
|
"eval_runtime": 25.4332, |
|
"eval_samples_per_second": 15.413, |
|
"eval_steps_per_second": 15.413, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.00429991937651169, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.0000000000000004e-07, |
|
"loss": 0.0, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.0064498790647675355, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.5e-07, |
|
"loss": 0.0, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.00859983875302338, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.000000000000001e-07, |
|
"loss": 0.0, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.010749798441279226, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.5e-07, |
|
"loss": 0.0, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.012899758129535071, |
|
"grad_norm": NaN, |
|
"learning_rate": 9e-07, |
|
"loss": 0.0, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.015049717817790917, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.0500000000000001e-06, |
|
"loss": 0.0, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.01719967750604676, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.2000000000000002e-06, |
|
"loss": 0.0, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.019349637194302608, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.35e-06, |
|
"loss": 0.0, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.021499596882558453, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.5e-06, |
|
"loss": 0.0, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.023649556570814297, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.65e-06, |
|
"loss": 0.0, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.025799516259070142, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.8e-06, |
|
"loss": 0.0, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.027949475947325986, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.95e-06, |
|
"loss": 0.0, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.030099435635581834, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.1000000000000002e-06, |
|
"loss": 0.0, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.03224939532383768, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.25e-06, |
|
"loss": 0.0, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.03439935501209352, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.4000000000000003e-06, |
|
"loss": 0.0, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.03654931470034937, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.55e-06, |
|
"loss": 0.0, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.038699274388605216, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.7e-06, |
|
"loss": 0.0, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.04084923407686106, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.8500000000000002e-06, |
|
"loss": 0.0, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.042999193765116905, |
|
"grad_norm": NaN, |
|
"learning_rate": 3e-06, |
|
"loss": 0.0, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.045149153453372746, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.15e-06, |
|
"loss": 0.0, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.047299113141628595, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.3e-06, |
|
"loss": 0.0, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.04944907282988444, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.4500000000000004e-06, |
|
"loss": 0.0, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.051599032518140284, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.6e-06, |
|
"loss": 0.0, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.05374899220639613, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.75e-06, |
|
"loss": 0.0, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.05589895189465197, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.9e-06, |
|
"loss": 0.0, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.05804891158290782, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.05e-06, |
|
"loss": 0.0, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.06019887127116367, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.2000000000000004e-06, |
|
"loss": 0.0, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.06234883095941951, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.35e-06, |
|
"loss": 0.0, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.06449879064767536, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.5e-06, |
|
"loss": 0.0, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.0666487503359312, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.65e-06, |
|
"loss": 0.0, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.06879871002418704, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.800000000000001e-06, |
|
"loss": 0.0, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.07094866971244289, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.95e-06, |
|
"loss": 0.0, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.07309862940069874, |
|
"grad_norm": NaN, |
|
"learning_rate": 5.1e-06, |
|
"loss": 0.0, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.07524858908895458, |
|
"grad_norm": NaN, |
|
"learning_rate": 5.25e-06, |
|
"loss": 0.0, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.07739854877721043, |
|
"grad_norm": NaN, |
|
"learning_rate": 5.4e-06, |
|
"loss": 0.0, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.07954850846546627, |
|
"grad_norm": NaN, |
|
"learning_rate": 5.55e-06, |
|
"loss": 0.0, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.08169846815372211, |
|
"grad_norm": NaN, |
|
"learning_rate": 5.7000000000000005e-06, |
|
"loss": 0.0, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.08384842784197796, |
|
"grad_norm": NaN, |
|
"learning_rate": 5.850000000000001e-06, |
|
"loss": 0.0, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.08599838753023381, |
|
"grad_norm": NaN, |
|
"learning_rate": 6e-06, |
|
"loss": 0.0, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.08814834721848966, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.1499999999999996e-06, |
|
"loss": 0.0, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.09029830690674549, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.3e-06, |
|
"loss": 0.0, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.09244826659500134, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.45e-06, |
|
"loss": 0.0, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.09459822628325719, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.6e-06, |
|
"loss": 0.0, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.09674818597151304, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.750000000000001e-06, |
|
"loss": 0.0, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.09889814565976889, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.900000000000001e-06, |
|
"loss": 0.0, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.10104810534802472, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.049999999999999e-06, |
|
"loss": 0.0, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.10319806503628057, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.2e-06, |
|
"loss": 0.0, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.10534802472453642, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.35e-06, |
|
"loss": 0.0, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.10749798441279226, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.5e-06, |
|
"loss": 0.0, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.10749798441279226, |
|
"eval_loss": NaN, |
|
"eval_runtime": 25.4707, |
|
"eval_samples_per_second": 15.39, |
|
"eval_steps_per_second": 15.39, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.10964794410104811, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.65e-06, |
|
"loss": 0.0, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.11179790378930395, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.8e-06, |
|
"loss": 0.0, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.1139478634775598, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.95e-06, |
|
"loss": 0.0, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.11609782316581564, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.1e-06, |
|
"loss": 0.0, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.11824778285407149, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.25e-06, |
|
"loss": 0.0, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.12039774254232734, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.400000000000001e-06, |
|
"loss": 0.0, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.12254770223058317, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.55e-06, |
|
"loss": 0.0, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.12469766191883902, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.7e-06, |
|
"loss": 0.0, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.12684762160709487, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.85e-06, |
|
"loss": 0.0, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.12899758129535072, |
|
"grad_norm": NaN, |
|
"learning_rate": 9e-06, |
|
"loss": 0.0, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.13114754098360656, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.15e-06, |
|
"loss": 0.0, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.1332975006718624, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.3e-06, |
|
"loss": 0.0, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.13544746036011826, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.450000000000001e-06, |
|
"loss": 0.0, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.13759742004837408, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.600000000000001e-06, |
|
"loss": 0.0, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.13974737973662993, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.75e-06, |
|
"loss": 0.0, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.14189733942488578, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.9e-06, |
|
"loss": 0.0, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.14404729911314162, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.005e-05, |
|
"loss": 0.0, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.14619725880139747, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.02e-05, |
|
"loss": 0.0, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.14834721848965332, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.035e-05, |
|
"loss": 0.0, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.15049717817790917, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.05e-05, |
|
"loss": 0.0, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.15264713786616502, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.065e-05, |
|
"loss": 0.0, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.15479709755442086, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.08e-05, |
|
"loss": 0.0, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.1569470572426767, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.095e-05, |
|
"loss": 0.0, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.15909701693093253, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.11e-05, |
|
"loss": 0.0, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.16124697661918838, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.125e-05, |
|
"loss": 0.0, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.16339693630744423, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.1400000000000001e-05, |
|
"loss": 0.0, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 0.16554689599570008, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.1550000000000001e-05, |
|
"loss": 0.0, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 0.16769685568395593, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.1700000000000001e-05, |
|
"loss": 0.0, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 0.16984681537221177, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.185e-05, |
|
"loss": 0.0, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 0.17199677506046762, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.2e-05, |
|
"loss": 0.0, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.17414673474872347, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.215e-05, |
|
"loss": 0.0, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.17629669443697932, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.2299999999999999e-05, |
|
"loss": 0.0, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 0.17844665412523517, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.245e-05, |
|
"loss": 0.0, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 0.18059661381349099, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.26e-05, |
|
"loss": 0.0, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 0.18274657350174683, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.275e-05, |
|
"loss": 0.0, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.18489653319000268, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.29e-05, |
|
"loss": 0.0, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 0.18704649287825853, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.305e-05, |
|
"loss": 0.0, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 0.18919645256651438, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.32e-05, |
|
"loss": 0.0, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 0.19134641225477023, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.3350000000000001e-05, |
|
"loss": 0.0, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 0.19349637194302607, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.3500000000000001e-05, |
|
"loss": 0.0, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.19564633163128192, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.3650000000000001e-05, |
|
"loss": 0.0, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 0.19779629131953777, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.3800000000000002e-05, |
|
"loss": 0.0, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 0.1999462510077936, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.395e-05, |
|
"loss": 0.0, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 0.20209621069604944, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.4099999999999999e-05, |
|
"loss": 0.0, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 0.2042461703843053, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.4249999999999999e-05, |
|
"loss": 0.0, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.20639613007256113, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.44e-05, |
|
"loss": 0.0, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 0.20854608976081698, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.455e-05, |
|
"loss": 0.0, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 0.21069604944907283, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.47e-05, |
|
"loss": 0.0, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 0.21284600913732868, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.485e-05, |
|
"loss": 0.0, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 0.21499596882558453, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.5e-05, |
|
"loss": 0.0, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.21499596882558453, |
|
"eval_loss": NaN, |
|
"eval_runtime": 25.4729, |
|
"eval_samples_per_second": 15.389, |
|
"eval_steps_per_second": 15.389, |
|
"step": 100 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 100, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 50, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 2, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 1 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 3.17940289241088e+16, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|