|
{ |
|
"best_metric": NaN, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-100", |
|
"epoch": 0.2164892657405737, |
|
"eval_steps": 100, |
|
"global_step": 600, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.00036081544290095615, |
|
"eval_loss": NaN, |
|
"eval_runtime": 196.169, |
|
"eval_samples_per_second": 23.796, |
|
"eval_steps_per_second": 5.949, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0036081544290095615, |
|
"grad_norm": 0.0, |
|
"learning_rate": 0.0002, |
|
"loss": 0.0, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.007216308858019123, |
|
"grad_norm": 0.0, |
|
"learning_rate": 0.0001998582695676762, |
|
"loss": 0.0, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.010824463287028685, |
|
"grad_norm": 0.0, |
|
"learning_rate": 0.00019943348002101371, |
|
"loss": 0.0, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.014432617716038246, |
|
"grad_norm": 0.0, |
|
"learning_rate": 0.00019872683547213446, |
|
"loss": 0.0, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.018040772145047807, |
|
"grad_norm": 0.0, |
|
"learning_rate": 0.00019774033898178667, |
|
"loss": 0.0, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.02164892657405737, |
|
"grad_norm": 0.0, |
|
"learning_rate": 0.0001964767868814516, |
|
"loss": 0.0, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.025257081003066933, |
|
"grad_norm": 0.0, |
|
"learning_rate": 0.00019493976084683813, |
|
"loss": 0.0, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.028865235432076492, |
|
"grad_norm": 0.0, |
|
"learning_rate": 0.00019313361774523385, |
|
"loss": 0.0, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.032473389861086055, |
|
"grad_norm": 0.0, |
|
"learning_rate": 0.00019106347728549135, |
|
"loss": 0.0, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.036081544290095614, |
|
"grad_norm": 0.0, |
|
"learning_rate": 0.00018873520750565718, |
|
"loss": 0.0, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.036081544290095614, |
|
"eval_loss": NaN, |
|
"eval_runtime": 197.2789, |
|
"eval_samples_per_second": 23.662, |
|
"eval_steps_per_second": 5.915, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.03968969871910518, |
|
"grad_norm": 0.0, |
|
"learning_rate": 0.0001861554081393806, |
|
"loss": 0.0, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.04329785314811474, |
|
"grad_norm": 0.0, |
|
"learning_rate": 0.0001833313919082515, |
|
"loss": 0.0, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.0469060075771243, |
|
"grad_norm": 0.0, |
|
"learning_rate": 0.00018027116379309638, |
|
"loss": 0.0, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.050514162006133866, |
|
"grad_norm": 0.0, |
|
"learning_rate": 0.00017698339834299061, |
|
"loss": 0.0, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.054122316435143425, |
|
"grad_norm": 0.0, |
|
"learning_rate": 0.00017347741508630672, |
|
"loss": 0.0, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.057730470864152984, |
|
"grad_norm": 0.0, |
|
"learning_rate": 0.0001697631521134985, |
|
"loss": 0.0, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.06133862529316255, |
|
"grad_norm": 0.0, |
|
"learning_rate": 0.00016585113790650388, |
|
"loss": 0.0, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.06494677972217211, |
|
"grad_norm": 0.0, |
|
"learning_rate": 0.0001617524614946192, |
|
"loss": 0.0, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.06855493415118168, |
|
"grad_norm": 0.0, |
|
"learning_rate": 0.0001574787410214407, |
|
"loss": 0.0, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.07216308858019123, |
|
"grad_norm": 0.0, |
|
"learning_rate": 0.00015304209081197425, |
|
"loss": 0.0, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.07216308858019123, |
|
"eval_loss": NaN, |
|
"eval_runtime": 197.0795, |
|
"eval_samples_per_second": 23.686, |
|
"eval_steps_per_second": 5.921, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.0757712430092008, |
|
"grad_norm": 0.0, |
|
"learning_rate": 0.00014845508703326504, |
|
"loss": 0.0, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.07937939743821036, |
|
"grad_norm": 0.0, |
|
"learning_rate": 0.00014373073204588556, |
|
"loss": 0.0, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.08298755186721991, |
|
"grad_norm": 0.0, |
|
"learning_rate": 0.00013888241754733208, |
|
"loss": 0.0, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.08659570629622948, |
|
"grad_norm": 0.0, |
|
"learning_rate": 0.00013392388661180303, |
|
"loss": 0.0, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.09020386072523905, |
|
"grad_norm": 0.0, |
|
"learning_rate": 0.0001288691947339621, |
|
"loss": 0.0, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.0938120151542486, |
|
"grad_norm": 0.0, |
|
"learning_rate": 0.0001237326699871115, |
|
"loss": 0.0, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.09742016958325816, |
|
"grad_norm": 0.0, |
|
"learning_rate": 0.00011852887240871145, |
|
"loss": 0.0, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.10102832401226773, |
|
"grad_norm": 0.0, |
|
"learning_rate": 0.00011327255272837221, |
|
"loss": 0.0, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.10463647844127728, |
|
"grad_norm": 0.0, |
|
"learning_rate": 0.00010797861055530831, |
|
"loss": 0.0, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.10824463287028685, |
|
"grad_norm": 0.0, |
|
"learning_rate": 0.00010266205214377748, |
|
"loss": 0.0, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.10824463287028685, |
|
"eval_loss": NaN, |
|
"eval_runtime": 197.2517, |
|
"eval_samples_per_second": 23.665, |
|
"eval_steps_per_second": 5.916, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.11185278729929642, |
|
"grad_norm": 0.0, |
|
"learning_rate": 9.733794785622253e-05, |
|
"loss": 0.0, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.11546094172830597, |
|
"grad_norm": 0.0, |
|
"learning_rate": 9.202138944469168e-05, |
|
"loss": 0.0, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.11906909615731553, |
|
"grad_norm": 0.0, |
|
"learning_rate": 8.672744727162781e-05, |
|
"loss": 0.0, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.1226772505863251, |
|
"grad_norm": 0.0, |
|
"learning_rate": 8.147112759128859e-05, |
|
"loss": 0.0, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.12628540501533467, |
|
"grad_norm": 0.0, |
|
"learning_rate": 7.626733001288851e-05, |
|
"loss": 0.0, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.12989355944434422, |
|
"grad_norm": 0.0, |
|
"learning_rate": 7.113080526603792e-05, |
|
"loss": 0.0, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.13350171387335377, |
|
"grad_norm": 0.0, |
|
"learning_rate": 6.607611338819697e-05, |
|
"loss": 0.0, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 0.13710986830236335, |
|
"grad_norm": 0.0, |
|
"learning_rate": 6.111758245266794e-05, |
|
"loss": 0.0, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 0.1407180227313729, |
|
"grad_norm": 0.0, |
|
"learning_rate": 5.626926795411447e-05, |
|
"loss": 0.0, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 0.14432617716038246, |
|
"grad_norm": 0.0, |
|
"learning_rate": 5.1544912966734994e-05, |
|
"loss": 0.0, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.14432617716038246, |
|
"eval_loss": NaN, |
|
"eval_runtime": 197.4311, |
|
"eval_samples_per_second": 23.644, |
|
"eval_steps_per_second": 5.911, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.14793433158939204, |
|
"grad_norm": 0.0, |
|
"learning_rate": 4.695790918802576e-05, |
|
"loss": 0.0, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 0.1515424860184016, |
|
"grad_norm": 0.0, |
|
"learning_rate": 4.252125897855932e-05, |
|
"loss": 0.0, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 0.15515064044741114, |
|
"grad_norm": 0.0, |
|
"learning_rate": 3.824753850538082e-05, |
|
"loss": 0.0, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 0.15875879487642072, |
|
"grad_norm": 0.0, |
|
"learning_rate": 3.414886209349615e-05, |
|
"loss": 0.0, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 0.16236694930543027, |
|
"grad_norm": 0.0, |
|
"learning_rate": 3.0236847886501542e-05, |
|
"loss": 0.0, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.16597510373443983, |
|
"grad_norm": 0.0, |
|
"learning_rate": 2.6522584913693294e-05, |
|
"loss": 0.0, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 0.1695832581634494, |
|
"grad_norm": 0.0, |
|
"learning_rate": 2.301660165700936e-05, |
|
"loss": 0.0, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 0.17319141259245896, |
|
"grad_norm": 0.0, |
|
"learning_rate": 1.9728836206903656e-05, |
|
"loss": 0.0, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 0.1767995670214685, |
|
"grad_norm": 0.0, |
|
"learning_rate": 1.6668608091748495e-05, |
|
"loss": 0.0, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 0.1804077214504781, |
|
"grad_norm": 0.0, |
|
"learning_rate": 1.3844591860619383e-05, |
|
"loss": 0.0, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.1804077214504781, |
|
"eval_loss": NaN, |
|
"eval_runtime": 197.1433, |
|
"eval_samples_per_second": 23.678, |
|
"eval_steps_per_second": 5.92, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.18401587587948764, |
|
"grad_norm": 0.0, |
|
"learning_rate": 1.1264792494342857e-05, |
|
"loss": 0.0, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 0.1876240303084972, |
|
"grad_norm": 0.0, |
|
"learning_rate": 8.936522714508678e-06, |
|
"loss": 0.0, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 0.19123218473750678, |
|
"grad_norm": 0.0, |
|
"learning_rate": 6.866382254766157e-06, |
|
"loss": 0.0, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 0.19484033916651633, |
|
"grad_norm": 0.0, |
|
"learning_rate": 5.060239153161872e-06, |
|
"loss": 0.0, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 0.19844849359552588, |
|
"grad_norm": 0.0, |
|
"learning_rate": 3.5232131185484076e-06, |
|
"loss": 0.0, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 0.20205664802453546, |
|
"grad_norm": 0.0, |
|
"learning_rate": 2.259661018213333e-06, |
|
"loss": 0.0, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 0.20566480245354501, |
|
"grad_norm": 0.0, |
|
"learning_rate": 1.2731645278655445e-06, |
|
"loss": 0.0, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 0.20927295688255457, |
|
"grad_norm": 0.0, |
|
"learning_rate": 5.665199789862907e-07, |
|
"loss": 0.0, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 0.21288111131156415, |
|
"grad_norm": 0.0, |
|
"learning_rate": 1.4173043232380557e-07, |
|
"loss": 0.0, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 0.2164892657405737, |
|
"grad_norm": 0.0, |
|
"learning_rate": 0.0, |
|
"loss": 0.0, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.2164892657405737, |
|
"eval_loss": NaN, |
|
"eval_runtime": 197.2007, |
|
"eval_samples_per_second": 23.671, |
|
"eval_steps_per_second": 5.918, |
|
"step": 600 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 600, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 100, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 5, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 5 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 3.98122404544512e+17, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|