|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.001867505176490911, |
|
"eval_steps": 50, |
|
"global_step": 200, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 9.337525882454556e-06, |
|
"eval_loss": 1.8007404804229736, |
|
"eval_runtime": 1408.1902, |
|
"eval_samples_per_second": 32.022, |
|
"eval_steps_per_second": 16.011, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 9.337525882454556e-05, |
|
"grad_norm": 16.06702423095703, |
|
"learning_rate": 0.00019967573081342103, |
|
"loss": 4.4126, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.0001867505176490911, |
|
"grad_norm": 20.935449600219727, |
|
"learning_rate": 0.0001970941817426052, |
|
"loss": 1.0184, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.0002801257764736367, |
|
"grad_norm": 15.171615600585938, |
|
"learning_rate": 0.00019199794436588243, |
|
"loss": 0.8537, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.0003735010352981822, |
|
"grad_norm": 10.143608093261719, |
|
"learning_rate": 0.0001845190085543795, |
|
"loss": 0.8855, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.00046687629412272777, |
|
"grad_norm": 11.451573371887207, |
|
"learning_rate": 0.00017485107481711012, |
|
"loss": 0.6427, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.00046687629412272777, |
|
"eval_loss": 0.1703098565340042, |
|
"eval_runtime": 1404.613, |
|
"eval_samples_per_second": 32.104, |
|
"eval_steps_per_second": 16.052, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.0005602515529472734, |
|
"grad_norm": 6.204837799072266, |
|
"learning_rate": 0.00016324453755953773, |
|
"loss": 0.8176, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.0006536268117718189, |
|
"grad_norm": 4.421334743499756, |
|
"learning_rate": 0.00015000000000000001, |
|
"loss": 0.6348, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.0007470020705963644, |
|
"grad_norm": 5.26912260055542, |
|
"learning_rate": 0.00013546048870425356, |
|
"loss": 0.5701, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.0008403773294209099, |
|
"grad_norm": 12.316139221191406, |
|
"learning_rate": 0.00012000256937760445, |
|
"loss": 0.4992, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.0009337525882454555, |
|
"grad_norm": 7.5016350746154785, |
|
"learning_rate": 0.00010402659401094152, |
|
"loss": 0.4787, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.0009337525882454555, |
|
"eval_loss": 0.13443197309970856, |
|
"eval_runtime": 1402.4134, |
|
"eval_samples_per_second": 32.154, |
|
"eval_steps_per_second": 16.077, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.001027127847070001, |
|
"grad_norm": 7.565667629241943, |
|
"learning_rate": 8.79463319744677e-05, |
|
"loss": 0.4692, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.0011205031058945467, |
|
"grad_norm": 7.207690238952637, |
|
"learning_rate": 7.217825360835473e-05, |
|
"loss": 0.494, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.0012138783647190922, |
|
"grad_norm": 20.8629093170166, |
|
"learning_rate": 5.713074385969457e-05, |
|
"loss": 0.5744, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.0013072536235436377, |
|
"grad_norm": 6.1960625648498535, |
|
"learning_rate": 4.3193525326884435e-05, |
|
"loss": 0.3863, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.0014006288823681834, |
|
"grad_norm": 4.858926296234131, |
|
"learning_rate": 3.072756464904006e-05, |
|
"loss": 0.3115, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.0014006288823681834, |
|
"eval_loss": 0.10407046973705292, |
|
"eval_runtime": 1402.3656, |
|
"eval_samples_per_second": 32.155, |
|
"eval_steps_per_second": 16.078, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.001494004141192729, |
|
"grad_norm": 6.014227390289307, |
|
"learning_rate": 2.0055723659649904e-05, |
|
"loss": 0.4751, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.0015873794000172744, |
|
"grad_norm": 6.426490306854248, |
|
"learning_rate": 1.1454397434679021e-05, |
|
"loss": 0.317, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.0016807546588418199, |
|
"grad_norm": 6.19764518737793, |
|
"learning_rate": 5.146355805285452e-06, |
|
"loss": 0.3526, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.0017741299176663656, |
|
"grad_norm": 4.254812240600586, |
|
"learning_rate": 1.2949737362087156e-06, |
|
"loss": 0.278, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.001867505176490911, |
|
"grad_norm": 5.536872863769531, |
|
"learning_rate": 0.0, |
|
"loss": 0.2585, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.001867505176490911, |
|
"eval_loss": 0.09946458041667938, |
|
"eval_runtime": 1400.9431, |
|
"eval_samples_per_second": 32.188, |
|
"eval_steps_per_second": 16.094, |
|
"step": 200 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 200, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 50, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 3.50534158516224e+16, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|