|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.766417290108063, |
|
"eval_steps": 500, |
|
"global_step": 8500, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.10390689941812137, |
|
"grad_norm": 1.8855979442596436, |
|
"learning_rate": 1.9310058187863676e-05, |
|
"loss": 0.8022, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.20781379883624274, |
|
"grad_norm": 0.8622527122497559, |
|
"learning_rate": 1.86173455250762e-05, |
|
"loss": 0.5316, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.3117206982543641, |
|
"grad_norm": 1.404678225517273, |
|
"learning_rate": 1.7924632862288724e-05, |
|
"loss": 0.4832, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.41562759767248547, |
|
"grad_norm": 1.3559819459915161, |
|
"learning_rate": 1.7233305624826823e-05, |
|
"loss": 0.4518, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.5195344970906068, |
|
"grad_norm": 0.8163271546363831, |
|
"learning_rate": 1.6540592962039347e-05, |
|
"loss": 0.4389, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.6234413965087282, |
|
"grad_norm": 0.8109046816825867, |
|
"learning_rate": 1.584788029925187e-05, |
|
"loss": 0.4193, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.7273482959268496, |
|
"grad_norm": 1.0217444896697998, |
|
"learning_rate": 1.5155167636464397e-05, |
|
"loss": 0.4215, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 0.8312551953449709, |
|
"grad_norm": 1.6476292610168457, |
|
"learning_rate": 1.446245497367692e-05, |
|
"loss": 0.4062, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.9351620947630923, |
|
"grad_norm": 1.4694277048110962, |
|
"learning_rate": 1.3769742310889445e-05, |
|
"loss": 0.4029, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_loss": 0.3212089240550995, |
|
"eval_runtime": 31.4577, |
|
"eval_samples_per_second": 15.704, |
|
"eval_steps_per_second": 7.852, |
|
"step": 4812 |
|
}, |
|
{ |
|
"epoch": 1.0390689941812137, |
|
"grad_norm": 0.6561925411224365, |
|
"learning_rate": 1.3077029648101969e-05, |
|
"loss": 0.4059, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 1.142975893599335, |
|
"grad_norm": 0.6741281747817993, |
|
"learning_rate": 1.2384316985314493e-05, |
|
"loss": 0.3594, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 1.2468827930174564, |
|
"grad_norm": 0.48619207739830017, |
|
"learning_rate": 1.1691604322527017e-05, |
|
"loss": 0.3736, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 1.3507896924355778, |
|
"grad_norm": 1.1009119749069214, |
|
"learning_rate": 1.099889165973954e-05, |
|
"loss": 0.3624, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 1.4546965918536992, |
|
"grad_norm": 0.3497615456581116, |
|
"learning_rate": 1.0306178996952066e-05, |
|
"loss": 0.3516, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 1.5586034912718203, |
|
"grad_norm": 1.4209001064300537, |
|
"learning_rate": 9.61346633416459e-06, |
|
"loss": 0.3565, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 1.6625103906899419, |
|
"grad_norm": 0.8116744160652161, |
|
"learning_rate": 8.920753671377114e-06, |
|
"loss": 0.3635, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 1.766417290108063, |
|
"grad_norm": 0.8015578985214233, |
|
"learning_rate": 8.228041008589638e-06, |
|
"loss": 0.3549, |
|
"step": 8500 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 14436, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.035167518162944e+16, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|