|
{ |
|
"best_metric": 0.4868355989456177, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-150", |
|
"epoch": 1.0038412291933418, |
|
"eval_steps": 50, |
|
"global_step": 196, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.005121638924455826, |
|
"eval_loss": 8.380269050598145, |
|
"eval_runtime": 6.2344, |
|
"eval_samples_per_second": 52.771, |
|
"eval_steps_per_second": 13.313, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.05121638924455826, |
|
"grad_norm": 9.538023948669434, |
|
"learning_rate": 0.0002, |
|
"loss": 5.2433, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.10243277848911651, |
|
"grad_norm": 5.801913261413574, |
|
"learning_rate": 0.00019857697953148037, |
|
"loss": 1.1184, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.15364916773367476, |
|
"grad_norm": 3.538983106613159, |
|
"learning_rate": 0.00019434841787099803, |
|
"loss": 0.6127, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.20486555697823303, |
|
"grad_norm": 13.218347549438477, |
|
"learning_rate": 0.00018743466161445823, |
|
"loss": 0.6928, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.2560819462227913, |
|
"grad_norm": 5.4068732261657715, |
|
"learning_rate": 0.0001780324790952092, |
|
"loss": 2.6809, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.2560819462227913, |
|
"eval_loss": 0.8460147976875305, |
|
"eval_runtime": 6.2948, |
|
"eval_samples_per_second": 52.265, |
|
"eval_steps_per_second": 13.185, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.3072983354673495, |
|
"grad_norm": 2.373478889465332, |
|
"learning_rate": 0.00016640946027672392, |
|
"loss": 0.4578, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.3585147247119078, |
|
"grad_norm": 2.6782474517822266, |
|
"learning_rate": 0.00015289640103269625, |
|
"loss": 0.3475, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.40973111395646605, |
|
"grad_norm": 2.94343638420105, |
|
"learning_rate": 0.0001378778885610576, |
|
"loss": 0.2493, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.46094750320102434, |
|
"grad_norm": 10.372923851013184, |
|
"learning_rate": 0.00012178135587488515, |
|
"loss": 0.8931, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.5121638924455826, |
|
"grad_norm": 1.2253535985946655, |
|
"learning_rate": 0.00010506491688387127, |
|
"loss": 1.492, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.5121638924455826, |
|
"eval_loss": 0.6194465160369873, |
|
"eval_runtime": 6.3051, |
|
"eval_samples_per_second": 52.18, |
|
"eval_steps_per_second": 13.164, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.5633802816901409, |
|
"grad_norm": 1.6862285137176514, |
|
"learning_rate": 8.820432828491542e-05, |
|
"loss": 0.2869, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.614596670934699, |
|
"grad_norm": 1.361668348312378, |
|
"learning_rate": 7.16794493317696e-05, |
|
"loss": 0.2391, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.6658130601792573, |
|
"grad_norm": 2.5609822273254395, |
|
"learning_rate": 5.596058484423656e-05, |
|
"loss": 0.158, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.7170294494238156, |
|
"grad_norm": 5.137864112854004, |
|
"learning_rate": 4.149510014046922e-05, |
|
"loss": 1.0696, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.7682458386683739, |
|
"grad_norm": 2.4468164443969727, |
|
"learning_rate": 2.869468883687798e-05, |
|
"loss": 0.9942, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.7682458386683739, |
|
"eval_loss": 0.4868355989456177, |
|
"eval_runtime": 6.3284, |
|
"eval_samples_per_second": 51.988, |
|
"eval_steps_per_second": 13.116, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.8194622279129321, |
|
"grad_norm": 2.048182487487793, |
|
"learning_rate": 1.7923655879272393e-05, |
|
"loss": 0.2675, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.8706786171574904, |
|
"grad_norm": 1.122640609741211, |
|
"learning_rate": 9.488549274967872e-06, |
|
"loss": 0.1, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.9218950064020487, |
|
"grad_norm": 2.5161125659942627, |
|
"learning_rate": 3.6294356110059157e-06, |
|
"loss": 0.1207, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.9731113956466069, |
|
"grad_norm": 6.927064895629883, |
|
"learning_rate": 5.130676608104845e-07, |
|
"loss": 1.1711, |
|
"step": 190 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 196, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 2, |
|
"save_steps": 50, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 5, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 3.853325055924634e+16, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|