|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 2.4444444444444446, |
|
"eval_steps": 9, |
|
"global_step": 99, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.024691358024691357, |
|
"eval_loss": 2.1480655670166016, |
|
"eval_runtime": 1.0117, |
|
"eval_samples_per_second": 68.202, |
|
"eval_steps_per_second": 8.896, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.07407407407407407, |
|
"grad_norm": 0.17830029129981995, |
|
"learning_rate": 1.5e-05, |
|
"loss": 2.2572, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.14814814814814814, |
|
"grad_norm": 0.21468417346477509, |
|
"learning_rate": 3e-05, |
|
"loss": 2.2267, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.2222222222222222, |
|
"grad_norm": 0.22635942697525024, |
|
"learning_rate": 4.5e-05, |
|
"loss": 2.271, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.2222222222222222, |
|
"eval_loss": 2.143685817718506, |
|
"eval_runtime": 1.0061, |
|
"eval_samples_per_second": 68.58, |
|
"eval_steps_per_second": 8.945, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.2962962962962963, |
|
"grad_norm": 0.1852104514837265, |
|
"learning_rate": 4.993910125649561e-05, |
|
"loss": 2.2203, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.37037037037037035, |
|
"grad_norm": 0.24263235926628113, |
|
"learning_rate": 4.962019382530521e-05, |
|
"loss": 2.165, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.4444444444444444, |
|
"grad_norm": 0.24341049790382385, |
|
"learning_rate": 4.9031542398457974e-05, |
|
"loss": 2.2557, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.4444444444444444, |
|
"eval_loss": 2.1180965900421143, |
|
"eval_runtime": 1.0063, |
|
"eval_samples_per_second": 68.568, |
|
"eval_steps_per_second": 8.944, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.5185185185185185, |
|
"grad_norm": 0.2104165405035019, |
|
"learning_rate": 4.817959636416969e-05, |
|
"loss": 2.2173, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.5925925925925926, |
|
"grad_norm": 0.1979183554649353, |
|
"learning_rate": 4.707368982147318e-05, |
|
"loss": 2.1982, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.6666666666666666, |
|
"grad_norm": 0.23016861081123352, |
|
"learning_rate": 4.572593931387604e-05, |
|
"loss": 2.2124, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.6666666666666666, |
|
"eval_loss": 2.072659730911255, |
|
"eval_runtime": 1.0059, |
|
"eval_samples_per_second": 68.595, |
|
"eval_steps_per_second": 8.947, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.7407407407407407, |
|
"grad_norm": 0.14867456257343292, |
|
"learning_rate": 4.415111107797445e-05, |
|
"loss": 2.1073, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.8148148148148148, |
|
"grad_norm": 0.20560601353645325, |
|
"learning_rate": 4.2366459261474933e-05, |
|
"loss": 2.1231, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.8888888888888888, |
|
"grad_norm": 0.20878741145133972, |
|
"learning_rate": 4.039153688314145e-05, |
|
"loss": 2.1412, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.8888888888888888, |
|
"eval_loss": 2.027235746383667, |
|
"eval_runtime": 1.0148, |
|
"eval_samples_per_second": 67.995, |
|
"eval_steps_per_second": 8.869, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.9629629629629629, |
|
"grad_norm": 0.24395641684532166, |
|
"learning_rate": 3.824798160583012e-05, |
|
"loss": 2.1021, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 1.037037037037037, |
|
"grad_norm": 0.20625323057174683, |
|
"learning_rate": 3.5959278669726935e-05, |
|
"loss": 2.5074, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 1.1111111111111112, |
|
"grad_norm": 0.23216880857944489, |
|
"learning_rate": 3.355050358314172e-05, |
|
"loss": 2.0798, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 1.1111111111111112, |
|
"eval_loss": 1.9839798212051392, |
|
"eval_runtime": 1.0128, |
|
"eval_samples_per_second": 68.128, |
|
"eval_steps_per_second": 8.886, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 1.1851851851851851, |
|
"grad_norm": 0.20464646816253662, |
|
"learning_rate": 3.104804738999169e-05, |
|
"loss": 2.0576, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 1.2592592592592593, |
|
"grad_norm": 0.20127850770950317, |
|
"learning_rate": 2.8479327524001636e-05, |
|
"loss": 1.9593, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 1.3333333333333333, |
|
"grad_norm": 0.2141118198633194, |
|
"learning_rate": 2.587248741756253e-05, |
|
"loss": 2.1897, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 1.3333333333333333, |
|
"eval_loss": 1.9443175792694092, |
|
"eval_runtime": 1.0093, |
|
"eval_samples_per_second": 68.361, |
|
"eval_steps_per_second": 8.917, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 1.4074074074074074, |
|
"grad_norm": 0.31057700514793396, |
|
"learning_rate": 2.3256088156396868e-05, |
|
"loss": 1.9642, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 1.4814814814814814, |
|
"grad_norm": 0.30521175265312195, |
|
"learning_rate": 2.0658795558326743e-05, |
|
"loss": 2.0191, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 1.5555555555555556, |
|
"grad_norm": 0.1954805850982666, |
|
"learning_rate": 1.8109066104575023e-05, |
|
"loss": 2.1129, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 1.5555555555555556, |
|
"eval_loss": 1.9135476350784302, |
|
"eval_runtime": 1.0142, |
|
"eval_samples_per_second": 68.033, |
|
"eval_steps_per_second": 8.874, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 1.6296296296296298, |
|
"grad_norm": 0.18209777772426605, |
|
"learning_rate": 1.56348351646022e-05, |
|
"loss": 1.8804, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 1.7037037037037037, |
|
"grad_norm": 0.19758297502994537, |
|
"learning_rate": 1.3263210930352737e-05, |
|
"loss": 1.8818, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 1.7777777777777777, |
|
"grad_norm": 0.19676625728607178, |
|
"learning_rate": 1.1020177413231334e-05, |
|
"loss": 2.0798, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 1.7777777777777777, |
|
"eval_loss": 1.9025213718414307, |
|
"eval_runtime": 1.0144, |
|
"eval_samples_per_second": 68.024, |
|
"eval_steps_per_second": 8.873, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 1.8518518518518519, |
|
"grad_norm": 0.24667955935001373, |
|
"learning_rate": 8.930309757836517e-06, |
|
"loss": 1.8911, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 1.925925925925926, |
|
"grad_norm": 0.20230413973331451, |
|
"learning_rate": 7.016504991533726e-06, |
|
"loss": 2.0511, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"grad_norm": 0.2845350503921509, |
|
"learning_rate": 5.299731159831953e-06, |
|
"loss": 2.298, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_loss": 1.8977771997451782, |
|
"eval_runtime": 1.0089, |
|
"eval_samples_per_second": 68.393, |
|
"eval_steps_per_second": 8.921, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 2.074074074074074, |
|
"grad_norm": 0.20115576684474945, |
|
"learning_rate": 3.798797596089351e-06, |
|
"loss": 2.0542, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 2.148148148148148, |
|
"grad_norm": 0.1773402839899063, |
|
"learning_rate": 2.5301488425208296e-06, |
|
"loss": 2.0123, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 2.2222222222222223, |
|
"grad_norm": 0.20624512434005737, |
|
"learning_rate": 1.5076844803522922e-06, |
|
"loss": 1.9569, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 2.2222222222222223, |
|
"eval_loss": 1.8960562944412231, |
|
"eval_runtime": 1.0098, |
|
"eval_samples_per_second": 68.33, |
|
"eval_steps_per_second": 8.913, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 2.2962962962962963, |
|
"grad_norm": 0.20254096388816833, |
|
"learning_rate": 7.426068431000882e-07, |
|
"loss": 1.8373, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 2.3703703703703702, |
|
"grad_norm": 0.20453697443008423, |
|
"learning_rate": 2.4329828146074095e-07, |
|
"loss": 1.9392, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 2.4444444444444446, |
|
"grad_norm": 0.1803455650806427, |
|
"learning_rate": 1.522932452260595e-08, |
|
"loss": 2.0326, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 2.4444444444444446, |
|
"eval_loss": 1.8957442045211792, |
|
"eval_runtime": 1.0119, |
|
"eval_samples_per_second": 68.192, |
|
"eval_steps_per_second": 8.895, |
|
"step": 99 |
|
} |
|
], |
|
"logging_steps": 3, |
|
"max_steps": 100, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 9, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 6289164702842880.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|