|
{ |
|
"best_metric": 3.0577471256256104, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 0.5154639175257731, |
|
"eval_steps": 50, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.010309278350515464, |
|
"grad_norm": 2.0397210121154785, |
|
"learning_rate": 5e-06, |
|
"loss": 3.2464, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.010309278350515464, |
|
"eval_loss": 3.466643810272217, |
|
"eval_runtime": 26.5692, |
|
"eval_samples_per_second": 6.135, |
|
"eval_steps_per_second": 1.543, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.020618556701030927, |
|
"grad_norm": 1.9331371784210205, |
|
"learning_rate": 1e-05, |
|
"loss": 3.1738, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.030927835051546393, |
|
"grad_norm": 2.05216908454895, |
|
"learning_rate": 1.5e-05, |
|
"loss": 3.3049, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.041237113402061855, |
|
"grad_norm": 1.8277063369750977, |
|
"learning_rate": 2e-05, |
|
"loss": 3.193, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.05154639175257732, |
|
"grad_norm": 1.5371441841125488, |
|
"learning_rate": 2.5e-05, |
|
"loss": 3.0383, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.061855670103092786, |
|
"grad_norm": 1.2227097749710083, |
|
"learning_rate": 3e-05, |
|
"loss": 3.089, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.07216494845360824, |
|
"grad_norm": 0.9845421314239502, |
|
"learning_rate": 3.5e-05, |
|
"loss": 2.9222, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.08247422680412371, |
|
"grad_norm": 1.217247724533081, |
|
"learning_rate": 4e-05, |
|
"loss": 2.9749, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.09278350515463918, |
|
"grad_norm": 1.2879574298858643, |
|
"learning_rate": 4.5e-05, |
|
"loss": 3.0741, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.10309278350515463, |
|
"grad_norm": 1.1864397525787354, |
|
"learning_rate": 5e-05, |
|
"loss": 3.1363, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.1134020618556701, |
|
"grad_norm": 0.7237774729728699, |
|
"learning_rate": 4.9996582624811725e-05, |
|
"loss": 3.0243, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.12371134020618557, |
|
"grad_norm": 0.7130429744720459, |
|
"learning_rate": 4.9986331433523156e-05, |
|
"loss": 3.0367, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.13402061855670103, |
|
"grad_norm": 0.721259593963623, |
|
"learning_rate": 4.996924922870762e-05, |
|
"loss": 2.9687, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.14432989690721648, |
|
"grad_norm": 0.7355589270591736, |
|
"learning_rate": 4.994534068046937e-05, |
|
"loss": 3.1342, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.15463917525773196, |
|
"grad_norm": 0.7934995293617249, |
|
"learning_rate": 4.991461232516675e-05, |
|
"loss": 3.115, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.16494845360824742, |
|
"grad_norm": 0.7838003039360046, |
|
"learning_rate": 4.9877072563625285e-05, |
|
"loss": 3.1102, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.17525773195876287, |
|
"grad_norm": 0.7115891575813293, |
|
"learning_rate": 4.9832731658840956e-05, |
|
"loss": 3.0109, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.18556701030927836, |
|
"grad_norm": 0.8016122579574585, |
|
"learning_rate": 4.978160173317438e-05, |
|
"loss": 3.2599, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.1958762886597938, |
|
"grad_norm": 0.7975648045539856, |
|
"learning_rate": 4.972369676503672e-05, |
|
"loss": 3.2147, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.20618556701030927, |
|
"grad_norm": 0.8431238532066345, |
|
"learning_rate": 4.965903258506806e-05, |
|
"loss": 3.0223, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.21649484536082475, |
|
"grad_norm": 0.9466091394424438, |
|
"learning_rate": 4.958762687180956e-05, |
|
"loss": 3.2875, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.2268041237113402, |
|
"grad_norm": 1.138825535774231, |
|
"learning_rate": 4.9509499146870236e-05, |
|
"loss": 3.2387, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.23711340206185566, |
|
"grad_norm": 1.6336590051651, |
|
"learning_rate": 4.9424670769589984e-05, |
|
"loss": 3.5127, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.24742268041237114, |
|
"grad_norm": 3.3279550075531006, |
|
"learning_rate": 4.933316493120015e-05, |
|
"loss": 4.1843, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.25773195876288657, |
|
"grad_norm": 0.5221712589263916, |
|
"learning_rate": 4.923500664848326e-05, |
|
"loss": 2.9478, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.26804123711340205, |
|
"grad_norm": 0.5041414499282837, |
|
"learning_rate": 4.913022275693372e-05, |
|
"loss": 2.9723, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.27835051546391754, |
|
"grad_norm": 0.5228736400604248, |
|
"learning_rate": 4.901884190342121e-05, |
|
"loss": 2.8516, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.28865979381443296, |
|
"grad_norm": 0.44938141107559204, |
|
"learning_rate": 4.8900894538358944e-05, |
|
"loss": 2.9021, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.29896907216494845, |
|
"grad_norm": 0.3917822241783142, |
|
"learning_rate": 4.877641290737884e-05, |
|
"loss": 2.8034, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.30927835051546393, |
|
"grad_norm": 0.4111269414424896, |
|
"learning_rate": 4.864543104251587e-05, |
|
"loss": 2.9712, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.31958762886597936, |
|
"grad_norm": 0.4035302698612213, |
|
"learning_rate": 4.850798475290403e-05, |
|
"loss": 2.952, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.32989690721649484, |
|
"grad_norm": 0.4234113395214081, |
|
"learning_rate": 4.8364111614986527e-05, |
|
"loss": 2.9556, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.3402061855670103, |
|
"grad_norm": 0.41241785883903503, |
|
"learning_rate": 4.821385096224268e-05, |
|
"loss": 2.9139, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.35051546391752575, |
|
"grad_norm": 0.4428929090499878, |
|
"learning_rate": 4.805724387443462e-05, |
|
"loss": 2.9375, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.36082474226804123, |
|
"grad_norm": 0.4585018455982208, |
|
"learning_rate": 4.789433316637644e-05, |
|
"loss": 2.9829, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.3711340206185567, |
|
"grad_norm": 0.47150719165802, |
|
"learning_rate": 4.7725163376229064e-05, |
|
"loss": 2.996, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.38144329896907214, |
|
"grad_norm": 0.4896671772003174, |
|
"learning_rate": 4.754978075332398e-05, |
|
"loss": 2.9585, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.3917525773195876, |
|
"grad_norm": 0.5090042948722839, |
|
"learning_rate": 4.736823324551909e-05, |
|
"loss": 2.9878, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.4020618556701031, |
|
"grad_norm": 0.5601177215576172, |
|
"learning_rate": 4.71805704860903e-05, |
|
"loss": 3.0062, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.41237113402061853, |
|
"grad_norm": 0.5434714555740356, |
|
"learning_rate": 4.698684378016222e-05, |
|
"loss": 3.0068, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.422680412371134, |
|
"grad_norm": 0.584433913230896, |
|
"learning_rate": 4.678710609068193e-05, |
|
"loss": 3.0006, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.4329896907216495, |
|
"grad_norm": 0.6252883076667786, |
|
"learning_rate": 4.6581412023939354e-05, |
|
"loss": 3.1496, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.44329896907216493, |
|
"grad_norm": 0.7323458194732666, |
|
"learning_rate": 4.6369817814638475e-05, |
|
"loss": 3.0819, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.4536082474226804, |
|
"grad_norm": 0.8921486735343933, |
|
"learning_rate": 4.6152381310523387e-05, |
|
"loss": 3.3431, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.4639175257731959, |
|
"grad_norm": 0.9650557637214661, |
|
"learning_rate": 4.592916195656322e-05, |
|
"loss": 3.1395, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.4742268041237113, |
|
"grad_norm": 1.141668438911438, |
|
"learning_rate": 4.5700220778700504e-05, |
|
"loss": 3.236, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.4845360824742268, |
|
"grad_norm": 1.7221248149871826, |
|
"learning_rate": 4.546562036716732e-05, |
|
"loss": 3.2945, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.4948453608247423, |
|
"grad_norm": 3.100931167602539, |
|
"learning_rate": 4.522542485937369e-05, |
|
"loss": 3.7196, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.5051546391752577, |
|
"grad_norm": 0.5098997950553894, |
|
"learning_rate": 4.497969992237312e-05, |
|
"loss": 2.8551, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.5154639175257731, |
|
"grad_norm": 0.5181565284729004, |
|
"learning_rate": 4.4728512734909844e-05, |
|
"loss": 2.8939, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.5154639175257731, |
|
"eval_loss": 3.0577471256256104, |
|
"eval_runtime": 26.5905, |
|
"eval_samples_per_second": 6.13, |
|
"eval_steps_per_second": 1.542, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 200, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 50, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 4, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.7165122145353728e+17, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|