|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.9893390191897654, |
|
"eval_steps": 100, |
|
"global_step": 58, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"completion_length": 614.0942222595215, |
|
"epoch": 0.08528784648187633, |
|
"grad_norm": 39.48369216918945, |
|
"kl": 0.002408742904663086, |
|
"learning_rate": 2.5e-06, |
|
"loss": 0.0001, |
|
"reward": 0.6303571693599224, |
|
"reward_std": 0.3278109859675169, |
|
"rewards/accuracy_reward": 0.6299107424914837, |
|
"rewards/format_reward": 0.00044642859138548373, |
|
"step": 5 |
|
}, |
|
{ |
|
"completion_length": 618.1955627441406, |
|
"epoch": 0.17057569296375266, |
|
"grad_norm": 0.26418083906173706, |
|
"kl": 0.0033366203308105467, |
|
"learning_rate": 2.956412726139078e-06, |
|
"loss": 0.0001, |
|
"reward": 0.6823660999536514, |
|
"reward_std": 0.2967432256788015, |
|
"rewards/accuracy_reward": 0.6821428865194321, |
|
"rewards/format_reward": 0.00022321429569274187, |
|
"step": 10 |
|
}, |
|
{ |
|
"completion_length": 615.4616325378418, |
|
"epoch": 0.255863539445629, |
|
"grad_norm": 0.29952046275138855, |
|
"kl": 0.004865837097167969, |
|
"learning_rate": 2.7836719084521715e-06, |
|
"loss": 0.0002, |
|
"reward": 0.7544643208384514, |
|
"reward_std": 0.22131893783807755, |
|
"rewards/accuracy_reward": 0.7544643208384514, |
|
"rewards/format_reward": 0.0, |
|
"step": 15 |
|
}, |
|
{ |
|
"completion_length": 594.9377471923829, |
|
"epoch": 0.3411513859275053, |
|
"grad_norm": 0.26904380321502686, |
|
"kl": 0.0036653518676757813, |
|
"learning_rate": 2.4946839873611927e-06, |
|
"loss": 0.0001, |
|
"reward": 0.7558036029338837, |
|
"reward_std": 0.20889290906488894, |
|
"rewards/accuracy_reward": 0.7558036029338837, |
|
"rewards/format_reward": 0.0, |
|
"step": 20 |
|
}, |
|
{ |
|
"completion_length": 605.4913223266601, |
|
"epoch": 0.42643923240938164, |
|
"grad_norm": 0.12021861970424652, |
|
"kl": 0.003992271423339844, |
|
"learning_rate": 2.1156192081791355e-06, |
|
"loss": 0.0002, |
|
"reward": 0.7569196805357933, |
|
"reward_std": 0.19887337032705546, |
|
"rewards/accuracy_reward": 0.7569196805357933, |
|
"rewards/format_reward": 0.0, |
|
"step": 25 |
|
}, |
|
{ |
|
"completion_length": 609.8823944091797, |
|
"epoch": 0.511727078891258, |
|
"grad_norm": 0.09571157395839691, |
|
"kl": 0.0034656524658203125, |
|
"learning_rate": 1.6808050203829845e-06, |
|
"loss": 0.0001, |
|
"reward": 0.7562500357627868, |
|
"reward_std": 0.1893269034102559, |
|
"rewards/accuracy_reward": 0.7562500357627868, |
|
"rewards/format_reward": 0.0, |
|
"step": 30 |
|
}, |
|
{ |
|
"completion_length": 594.9317237854004, |
|
"epoch": 0.5970149253731343, |
|
"grad_norm": 0.17406129837036133, |
|
"kl": 0.01944389343261719, |
|
"learning_rate": 1.2296174432791415e-06, |
|
"loss": 0.0008, |
|
"reward": 0.7457589656114578, |
|
"reward_std": 0.1772445771843195, |
|
"rewards/accuracy_reward": 0.7457589656114578, |
|
"rewards/format_reward": 0.0, |
|
"step": 35 |
|
}, |
|
{ |
|
"completion_length": 587.7542655944824, |
|
"epoch": 0.6823027718550106, |
|
"grad_norm": 0.3893604278564453, |
|
"kl": 0.0033367156982421877, |
|
"learning_rate": 8.029152419343472e-07, |
|
"loss": 0.0001, |
|
"reward": 0.7665178909897804, |
|
"reward_std": 0.17746288534253835, |
|
"rewards/accuracy_reward": 0.7665178909897804, |
|
"rewards/format_reward": 0.0, |
|
"step": 40 |
|
}, |
|
{ |
|
"completion_length": 608.1580612182618, |
|
"epoch": 0.767590618336887, |
|
"grad_norm": 0.13640980422496796, |
|
"kl": 0.0030500411987304686, |
|
"learning_rate": 4.3933982822017883e-07, |
|
"loss": 0.0001, |
|
"reward": 0.7549107506871223, |
|
"reward_std": 0.18457430368289351, |
|
"rewards/accuracy_reward": 0.7549107506871223, |
|
"rewards/format_reward": 0.0, |
|
"step": 45 |
|
}, |
|
{ |
|
"completion_length": 602.2951156616211, |
|
"epoch": 0.8528784648187633, |
|
"grad_norm": 0.217549666762352, |
|
"kl": 0.003106689453125, |
|
"learning_rate": 1.718159615201853e-07, |
|
"loss": 0.0001, |
|
"reward": 0.7488839671015739, |
|
"reward_std": 0.18157500196248294, |
|
"rewards/accuracy_reward": 0.7488839671015739, |
|
"rewards/format_reward": 0.0, |
|
"step": 50 |
|
}, |
|
{ |
|
"completion_length": 599.9350708007812, |
|
"epoch": 0.9381663113006397, |
|
"grad_norm": 0.23885449767112732, |
|
"kl": 0.0033361434936523436, |
|
"learning_rate": 2.4570139579284723e-08, |
|
"loss": 0.0001, |
|
"reward": 0.776116107404232, |
|
"reward_std": 0.19774878825992345, |
|
"rewards/accuracy_reward": 0.776116107404232, |
|
"rewards/format_reward": 0.0, |
|
"step": 55 |
|
}, |
|
{ |
|
"completion_length": 583.1644630432129, |
|
"epoch": 0.9893390191897654, |
|
"kl": 0.0035022099812825522, |
|
"reward": 0.7760417039195696, |
|
"reward_std": 0.16163485000530878, |
|
"rewards/accuracy_reward": 0.7760417039195696, |
|
"rewards/format_reward": 0.0, |
|
"step": 58, |
|
"total_flos": 0.0, |
|
"train_loss": -0.00014772719968559928, |
|
"train_runtime": 12261.9552, |
|
"train_samples_per_second": 0.612, |
|
"train_steps_per_second": 0.005 |
|
} |
|
], |
|
"logging_steps": 5, |
|
"max_steps": 58, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": false, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 0.0, |
|
"train_batch_size": 16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|