{ "best_metric": 0.8644523024559021, "best_model_checkpoint": "miner_id_24/checkpoint-50", "epoch": 0.4140786749482402, "eval_steps": 50, "global_step": 50, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.008281573498964804, "grad_norm": 0.8388795256614685, "learning_rate": 7e-06, "loss": 0.804, "step": 1 }, { "epoch": 0.008281573498964804, "eval_loss": 1.1692047119140625, "eval_runtime": 32.2894, "eval_samples_per_second": 6.318, "eval_steps_per_second": 1.579, "step": 1 }, { "epoch": 0.016563146997929608, "grad_norm": 1.023823618888855, "learning_rate": 1.4e-05, "loss": 0.8544, "step": 2 }, { "epoch": 0.024844720496894408, "grad_norm": 1.0270742177963257, "learning_rate": 2.1e-05, "loss": 0.8755, "step": 3 }, { "epoch": 0.033126293995859216, "grad_norm": 0.9506105780601501, "learning_rate": 2.8e-05, "loss": 0.8441, "step": 4 }, { "epoch": 0.041407867494824016, "grad_norm": 0.7932412028312683, "learning_rate": 3.5e-05, "loss": 0.7846, "step": 5 }, { "epoch": 0.049689440993788817, "grad_norm": 0.7128869295120239, "learning_rate": 4.2e-05, "loss": 0.9147, "step": 6 }, { "epoch": 0.057971014492753624, "grad_norm": 0.4176234304904938, "learning_rate": 4.899999999999999e-05, "loss": 0.7784, "step": 7 }, { "epoch": 0.06625258799171843, "grad_norm": 0.6762263178825378, "learning_rate": 5.6e-05, "loss": 0.8248, "step": 8 }, { "epoch": 0.07453416149068323, "grad_norm": 0.8291305899620056, "learning_rate": 6.3e-05, "loss": 0.8014, "step": 9 }, { "epoch": 0.08281573498964803, "grad_norm": 0.8136356472969055, "learning_rate": 7e-05, "loss": 0.9253, "step": 10 }, { "epoch": 0.09109730848861283, "grad_norm": 0.4319281578063965, "learning_rate": 6.999521567473641e-05, "loss": 0.8555, "step": 11 }, { "epoch": 0.09937888198757763, "grad_norm": 0.36292925477027893, "learning_rate": 6.998086400693241e-05, "loss": 0.7425, "step": 12 }, { "epoch": 0.10766045548654245, "grad_norm": 0.3616969585418701, "learning_rate": 6.995694892019065e-05, "loss": 0.9021, "step": 13 }, { "epoch": 0.11594202898550725, "grad_norm": 0.3888731002807617, "learning_rate": 6.99234769526571e-05, "loss": 0.7912, "step": 14 }, { "epoch": 0.12422360248447205, "grad_norm": 0.3677065968513489, "learning_rate": 6.988045725523343e-05, "loss": 0.6922, "step": 15 }, { "epoch": 0.13250517598343686, "grad_norm": 0.38920730352401733, "learning_rate": 6.982790158907539e-05, "loss": 0.8843, "step": 16 }, { "epoch": 0.14078674948240166, "grad_norm": 0.4462592601776123, "learning_rate": 6.976582432237733e-05, "loss": 0.9304, "step": 17 }, { "epoch": 0.14906832298136646, "grad_norm": 0.40514039993286133, "learning_rate": 6.969424242644413e-05, "loss": 0.8461, "step": 18 }, { "epoch": 0.15734989648033126, "grad_norm": 0.44129592180252075, "learning_rate": 6.961317547105138e-05, "loss": 0.8518, "step": 19 }, { "epoch": 0.16563146997929606, "grad_norm": 0.4335060715675354, "learning_rate": 6.952264561909527e-05, "loss": 0.8793, "step": 20 }, { "epoch": 0.17391304347826086, "grad_norm": 0.543445885181427, "learning_rate": 6.942267762053337e-05, "loss": 0.9113, "step": 21 }, { "epoch": 0.18219461697722567, "grad_norm": 0.5542380809783936, "learning_rate": 6.931329880561832e-05, "loss": 0.9433, "step": 22 }, { "epoch": 0.19047619047619047, "grad_norm": 0.516521692276001, "learning_rate": 6.919453907742597e-05, "loss": 0.9016, "step": 23 }, { "epoch": 0.19875776397515527, "grad_norm": 0.5378891825675964, "learning_rate": 6.90664309036802e-05, "loss": 0.7877, "step": 24 }, { "epoch": 0.2070393374741201, "grad_norm": 0.577548086643219, "learning_rate": 6.892900930787656e-05, "loss": 0.8454, "step": 25 }, { "epoch": 0.2153209109730849, "grad_norm": 0.5783238410949707, "learning_rate": 6.87823118597072e-05, "loss": 0.8776, "step": 26 }, { "epoch": 0.2236024844720497, "grad_norm": 0.5544853210449219, "learning_rate": 6.862637866478969e-05, "loss": 0.8123, "step": 27 }, { "epoch": 0.2318840579710145, "grad_norm": 0.7183583974838257, "learning_rate": 6.846125235370252e-05, "loss": 0.941, "step": 28 }, { "epoch": 0.2401656314699793, "grad_norm": 0.7497084140777588, "learning_rate": 6.828697807033038e-05, "loss": 0.9533, "step": 29 }, { "epoch": 0.2484472049689441, "grad_norm": 1.0239611864089966, "learning_rate": 6.81036034595222e-05, "loss": 1.0226, "step": 30 }, { "epoch": 0.2567287784679089, "grad_norm": 0.215080127120018, "learning_rate": 6.791117865406564e-05, "loss": 0.5555, "step": 31 }, { "epoch": 0.2650103519668737, "grad_norm": 0.32441794872283936, "learning_rate": 6.770975626098112e-05, "loss": 0.7501, "step": 32 }, { "epoch": 0.2732919254658385, "grad_norm": 0.309184730052948, "learning_rate": 6.749939134713974e-05, "loss": 0.7853, "step": 33 }, { "epoch": 0.2815734989648033, "grad_norm": 0.3206530511379242, "learning_rate": 6.728014142420846e-05, "loss": 0.659, "step": 34 }, { "epoch": 0.2898550724637681, "grad_norm": 0.2530266046524048, "learning_rate": 6.7052066432927e-05, "loss": 0.651, "step": 35 }, { "epoch": 0.2981366459627329, "grad_norm": 0.27744585275650024, "learning_rate": 6.681522872672069e-05, "loss": 0.6502, "step": 36 }, { "epoch": 0.3064182194616977, "grad_norm": 0.2372923493385315, "learning_rate": 6.656969305465356e-05, "loss": 0.7619, "step": 37 }, { "epoch": 0.3146997929606625, "grad_norm": 0.2609351575374603, "learning_rate": 6.631552654372672e-05, "loss": 0.6668, "step": 38 }, { "epoch": 0.32298136645962733, "grad_norm": 0.2953057885169983, "learning_rate": 6.60527986805264e-05, "loss": 0.7653, "step": 39 }, { "epoch": 0.33126293995859213, "grad_norm": 0.32191261649131775, "learning_rate": 6.578158129222711e-05, "loss": 0.8575, "step": 40 }, { "epoch": 0.33954451345755693, "grad_norm": 0.2882176339626312, "learning_rate": 6.550194852695469e-05, "loss": 0.7252, "step": 41 }, { "epoch": 0.34782608695652173, "grad_norm": 0.27066636085510254, "learning_rate": 6.521397683351509e-05, "loss": 0.7575, "step": 42 }, { "epoch": 0.35610766045548653, "grad_norm": 0.3208519518375397, "learning_rate": 6.491774494049386e-05, "loss": 0.8635, "step": 43 }, { "epoch": 0.36438923395445133, "grad_norm": 0.35946550965309143, "learning_rate": 6.461333383473272e-05, "loss": 0.8081, "step": 44 }, { "epoch": 0.37267080745341613, "grad_norm": 0.3770899176597595, "learning_rate": 6.430082673918849e-05, "loss": 0.8199, "step": 45 }, { "epoch": 0.38095238095238093, "grad_norm": 0.36672505736351013, "learning_rate": 6.398030909018069e-05, "loss": 0.8569, "step": 46 }, { "epoch": 0.38923395445134573, "grad_norm": 0.3554442226886749, "learning_rate": 6.365186851403423e-05, "loss": 0.8098, "step": 47 }, { "epoch": 0.39751552795031053, "grad_norm": 0.3235284388065338, "learning_rate": 6.331559480312315e-05, "loss": 0.791, "step": 48 }, { "epoch": 0.4057971014492754, "grad_norm": 0.3624836206436157, "learning_rate": 6.297157989132236e-05, "loss": 0.7369, "step": 49 }, { "epoch": 0.4140786749482402, "grad_norm": 0.3535378873348236, "learning_rate": 6.261991782887377e-05, "loss": 0.6616, "step": 50 }, { "epoch": 0.4140786749482402, "eval_loss": 0.8644523024559021, "eval_runtime": 32.248, "eval_samples_per_second": 6.326, "eval_steps_per_second": 1.581, "step": 50 } ], "logging_steps": 1, "max_steps": 200, "num_input_tokens_seen": 0, "num_train_epochs": 2, "save_steps": 50, "stateful_callbacks": { "EarlyStoppingCallback": { "args": { "early_stopping_patience": 4, "early_stopping_threshold": 0.0 }, "attributes": { "early_stopping_patience_counter": 0 } }, "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": false }, "attributes": {} } }, "total_flos": 1.696706304367657e+17, "train_batch_size": 8, "trial_name": null, "trial_params": null }