|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 2.0, |
|
"eval_steps": 500, |
|
"global_step": 28, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.07142857142857142, |
|
"grad_norm": 0.018473914863755117, |
|
"learning_rate": 1.6666666666666667e-05, |
|
"loss": 0.1596, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.14285714285714285, |
|
"grad_norm": 0.01958436031595016, |
|
"learning_rate": 3.3333333333333335e-05, |
|
"loss": 0.1592, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.21428571428571427, |
|
"grad_norm": 0.018997349295490185, |
|
"learning_rate": 5e-05, |
|
"loss": 0.1581, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.2857142857142857, |
|
"grad_norm": 0.019098244798499944, |
|
"learning_rate": 4.980286753286195e-05, |
|
"loss": 0.1618, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.35714285714285715, |
|
"grad_norm": 0.01986014761183824, |
|
"learning_rate": 4.9214579028215776e-05, |
|
"loss": 0.1597, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.42857142857142855, |
|
"grad_norm": 0.026066687910980477, |
|
"learning_rate": 4.8244412147206284e-05, |
|
"loss": 0.1632, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"grad_norm": 0.033262521534120314, |
|
"learning_rate": 4.690766700109659e-05, |
|
"loss": 0.1501, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.5714285714285714, |
|
"grad_norm": 0.034303061725366304, |
|
"learning_rate": 4.522542485937369e-05, |
|
"loss": 0.1453, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.6428571428571429, |
|
"grad_norm": 0.028929042166310412, |
|
"learning_rate": 4.3224215685535294e-05, |
|
"loss": 0.1429, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.7142857142857143, |
|
"grad_norm": 0.024596477972911673, |
|
"learning_rate": 4.093559974371725e-05, |
|
"loss": 0.1389, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.7857142857142857, |
|
"grad_norm": 0.02095523145172841, |
|
"learning_rate": 3.8395669874474915e-05, |
|
"loss": 0.1403, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.8571428571428571, |
|
"grad_norm": 0.018521244081732214, |
|
"learning_rate": 3.564448228912682e-05, |
|
"loss": 0.131, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.9285714285714286, |
|
"grad_norm": 0.019389700999808326, |
|
"learning_rate": 3.272542485937369e-05, |
|
"loss": 0.1275, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"grad_norm": 0.018717414460688725, |
|
"learning_rate": 2.9684532864643122e-05, |
|
"loss": 0.1267, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 1.0714285714285714, |
|
"grad_norm": 0.20910676825639798, |
|
"learning_rate": 2.656976298823284e-05, |
|
"loss": 0.1499, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 1.1428571428571428, |
|
"grad_norm": 0.17013123112157438, |
|
"learning_rate": 2.3430237011767167e-05, |
|
"loss": 0.143, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 1.2142857142857142, |
|
"grad_norm": 0.15401200255113412, |
|
"learning_rate": 2.031546713535688e-05, |
|
"loss": 0.1358, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 1.2857142857142856, |
|
"grad_norm": 0.10539625737893336, |
|
"learning_rate": 1.7274575140626318e-05, |
|
"loss": 0.1238, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 1.3571428571428572, |
|
"grad_norm": 0.0851408863484842, |
|
"learning_rate": 1.4355517710873184e-05, |
|
"loss": 0.1161, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 1.4285714285714286, |
|
"grad_norm": 0.06421811377219377, |
|
"learning_rate": 1.1604330125525079e-05, |
|
"loss": 0.1107, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"grad_norm": 0.04679299115412706, |
|
"learning_rate": 9.064400256282757e-06, |
|
"loss": 0.1091, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 1.5714285714285714, |
|
"grad_norm": 0.04496230116925608, |
|
"learning_rate": 6.775784314464717e-06, |
|
"loss": 0.1136, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 1.6428571428571428, |
|
"grad_norm": 0.03819673601799427, |
|
"learning_rate": 4.7745751406263165e-06, |
|
"loss": 0.1082, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 1.7142857142857144, |
|
"grad_norm": 0.0347997372216549, |
|
"learning_rate": 3.092332998903416e-06, |
|
"loss": 0.1125, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 1.7857142857142856, |
|
"grad_norm": 0.03227529902193574, |
|
"learning_rate": 1.7555878527937164e-06, |
|
"loss": 0.1064, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 1.8571428571428572, |
|
"grad_norm": 0.029351357581022388, |
|
"learning_rate": 7.854209717842231e-07, |
|
"loss": 0.1041, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 1.9285714285714286, |
|
"grad_norm": 0.0326274405504393, |
|
"learning_rate": 1.9713246713805588e-07, |
|
"loss": 0.1043, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"grad_norm": 0.02926719898005176, |
|
"learning_rate": 0.0, |
|
"loss": 0.1103, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"step": 28, |
|
"total_flos": 490074470875136.0, |
|
"train_loss": 0.058849748490112166, |
|
"train_runtime": 478.1215, |
|
"train_samples_per_second": 0.904, |
|
"train_steps_per_second": 0.059 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 28, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 2, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 490074470875136.0, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|