|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.4925373134328357, |
|
"eval_steps": 50, |
|
"global_step": 100, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.07462686567164178, |
|
"grad_norm": 63.20117586062747, |
|
"learning_rate": 5e-07, |
|
"logits/chosen": -2.727349281311035, |
|
"logits/rejected": -2.7335803508758545, |
|
"logps/chosen": -270.8079833984375, |
|
"logps/rejected": -222.7043914794922, |
|
"loss": 0.6897, |
|
"rewards/accuracies": 0.3187499940395355, |
|
"rewards/chosen": 0.013827224262058735, |
|
"rewards/margins": 0.000779601454269141, |
|
"rewards/rejected": 0.013047623448073864, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.14925373134328357, |
|
"grad_norm": 50.76880252527562, |
|
"learning_rate": 1e-06, |
|
"logits/chosen": -2.686920166015625, |
|
"logits/rejected": -2.6769723892211914, |
|
"logps/chosen": -249.0262451171875, |
|
"logps/rejected": -231.17623901367188, |
|
"loss": 0.6468, |
|
"rewards/accuracies": 0.5375000238418579, |
|
"rewards/chosen": 0.47842222452163696, |
|
"rewards/margins": 0.08185982704162598, |
|
"rewards/rejected": 0.396562397480011, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.22388059701492538, |
|
"grad_norm": 44.786597014715575, |
|
"learning_rate": 9.983100718730718e-07, |
|
"logits/chosen": -2.4991869926452637, |
|
"logits/rejected": -2.519619941711426, |
|
"logps/chosen": -257.5209045410156, |
|
"logps/rejected": -217.60446166992188, |
|
"loss": 0.645, |
|
"rewards/accuracies": 0.668749988079071, |
|
"rewards/chosen": 1.3132820129394531, |
|
"rewards/margins": 0.6335776448249817, |
|
"rewards/rejected": 0.6797044277191162, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.29850746268656714, |
|
"grad_norm": 88.88932587574585, |
|
"learning_rate": 9.932517109205849e-07, |
|
"logits/chosen": -2.3716320991516113, |
|
"logits/rejected": -2.350722551345825, |
|
"logps/chosen": -275.037353515625, |
|
"logps/rejected": -229.4193878173828, |
|
"loss": 0.639, |
|
"rewards/accuracies": 0.668749988079071, |
|
"rewards/chosen": 1.544551134109497, |
|
"rewards/margins": 0.876867949962616, |
|
"rewards/rejected": 0.6676831245422363, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.373134328358209, |
|
"grad_norm": 42.980070421826184, |
|
"learning_rate": 9.848591102083375e-07, |
|
"logits/chosen": -2.2988502979278564, |
|
"logits/rejected": -2.288430690765381, |
|
"logps/chosen": -244.6092987060547, |
|
"logps/rejected": -221.5340118408203, |
|
"loss": 0.617, |
|
"rewards/accuracies": 0.6875, |
|
"rewards/chosen": 1.3750474452972412, |
|
"rewards/margins": 1.164804220199585, |
|
"rewards/rejected": 0.21024329960346222, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.44776119402985076, |
|
"grad_norm": 37.576258990861845, |
|
"learning_rate": 9.731890013043367e-07, |
|
"logits/chosen": -2.3538706302642822, |
|
"logits/rejected": -2.3402347564697266, |
|
"logps/chosen": -244.2089080810547, |
|
"logps/rejected": -220.82180786132812, |
|
"loss": 0.5935, |
|
"rewards/accuracies": 0.6937500238418579, |
|
"rewards/chosen": 1.0666203498840332, |
|
"rewards/margins": 1.0009348392486572, |
|
"rewards/rejected": 0.06568538397550583, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.5223880597014925, |
|
"grad_norm": 48.27486571141399, |
|
"learning_rate": 9.583202707897073e-07, |
|
"logits/chosen": -2.4315402507781982, |
|
"logits/rejected": -2.4107561111450195, |
|
"logps/chosen": -258.6941223144531, |
|
"logps/rejected": -200.2947998046875, |
|
"loss": 0.5815, |
|
"rewards/accuracies": 0.7749999761581421, |
|
"rewards/chosen": 0.854897677898407, |
|
"rewards/margins": 1.0769257545471191, |
|
"rewards/rejected": -0.2220279723405838, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.5970149253731343, |
|
"grad_norm": 34.10499734328659, |
|
"learning_rate": 9.403534270080829e-07, |
|
"logits/chosen": -2.478212833404541, |
|
"logits/rejected": -2.4579110145568848, |
|
"logps/chosen": -247.2701416015625, |
|
"logps/rejected": -204.61563110351562, |
|
"loss": 0.5553, |
|
"rewards/accuracies": 0.7875000238418579, |
|
"rewards/chosen": 0.8650096654891968, |
|
"rewards/margins": 1.3998459577560425, |
|
"rewards/rejected": -0.5348363518714905, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.6716417910447762, |
|
"grad_norm": 54.84196205672111, |
|
"learning_rate": 9.19409920658098e-07, |
|
"logits/chosen": -2.4562790393829346, |
|
"logits/rejected": -2.406428575515747, |
|
"logps/chosen": -251.5320587158203, |
|
"logps/rejected": -196.53280639648438, |
|
"loss": 0.6172, |
|
"rewards/accuracies": 0.7250000238418579, |
|
"rewards/chosen": 0.724992573261261, |
|
"rewards/margins": 0.909608006477356, |
|
"rewards/rejected": -0.18461540341377258, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.746268656716418, |
|
"grad_norm": 44.60950151362463, |
|
"learning_rate": 8.956313238215823e-07, |
|
"logits/chosen": -2.429295063018799, |
|
"logits/rejected": -2.4014017581939697, |
|
"logps/chosen": -267.9967041015625, |
|
"logps/rejected": -231.6461639404297, |
|
"loss": 0.5811, |
|
"rewards/accuracies": 0.71875, |
|
"rewards/chosen": 1.032130241394043, |
|
"rewards/margins": 1.233904242515564, |
|
"rewards/rejected": -0.20177388191223145, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.746268656716418, |
|
"eval_logits/chosen": -2.379634141921997, |
|
"eval_logits/rejected": -2.3887269496917725, |
|
"eval_logps/chosen": -262.3047790527344, |
|
"eval_logps/rejected": -217.30076599121094, |
|
"eval_loss": 0.5699393153190613, |
|
"eval_rewards/accuracies": 0.7333333492279053, |
|
"eval_rewards/chosen": 0.5697149634361267, |
|
"eval_rewards/margins": 1.181886076927185, |
|
"eval_rewards/rejected": -0.6121711730957031, |
|
"eval_runtime": 125.8925, |
|
"eval_samples_per_second": 15.092, |
|
"eval_steps_per_second": 0.238, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.8208955223880597, |
|
"grad_norm": 38.86022913172134, |
|
"learning_rate": 8.691783729769873e-07, |
|
"logits/chosen": -2.384519338607788, |
|
"logits/rejected": -2.369540214538574, |
|
"logps/chosen": -268.18524169921875, |
|
"logps/rejected": -256.36614990234375, |
|
"loss": 0.5328, |
|
"rewards/accuracies": 0.7437499761581421, |
|
"rewards/chosen": 0.911398708820343, |
|
"rewards/margins": 1.7182573080062866, |
|
"rewards/rejected": -0.8068588376045227, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.8955223880597015, |
|
"grad_norm": 35.804216796075785, |
|
"learning_rate": 8.402298824670029e-07, |
|
"logits/chosen": -2.4195339679718018, |
|
"logits/rejected": -2.3873202800750732, |
|
"logps/chosen": -259.75775146484375, |
|
"logps/rejected": -212.31201171875, |
|
"loss": 0.533, |
|
"rewards/accuracies": 0.768750011920929, |
|
"rewards/chosen": 1.215578317642212, |
|
"rewards/margins": 1.4803569316864014, |
|
"rewards/rejected": -0.2647787034511566, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.9701492537313433, |
|
"grad_norm": 38.53205216239837, |
|
"learning_rate": 8.089815357650089e-07, |
|
"logits/chosen": -2.5004165172576904, |
|
"logits/rejected": -2.459744930267334, |
|
"logps/chosen": -245.0717315673828, |
|
"logps/rejected": -219.3514862060547, |
|
"loss": 0.5168, |
|
"rewards/accuracies": 0.71875, |
|
"rewards/chosen": 1.0596349239349365, |
|
"rewards/margins": 1.2952834367752075, |
|
"rewards/rejected": -0.23564846813678741, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 1.044776119402985, |
|
"grad_norm": 18.93894037354211, |
|
"learning_rate": 7.756445627110522e-07, |
|
"logits/chosen": -2.5253801345825195, |
|
"logits/rejected": -2.498584032058716, |
|
"logps/chosen": -256.0594482421875, |
|
"logps/rejected": -230.0350799560547, |
|
"loss": 0.3642, |
|
"rewards/accuracies": 0.831250011920929, |
|
"rewards/chosen": 1.1649268865585327, |
|
"rewards/margins": 2.232837200164795, |
|
"rewards/rejected": -1.067910075187683, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 1.1194029850746268, |
|
"grad_norm": 20.704224755579, |
|
"learning_rate": 7.404443116588547e-07, |
|
"logits/chosen": -2.500047206878662, |
|
"logits/rejected": -2.477970838546753, |
|
"logps/chosen": -232.2460479736328, |
|
"logps/rejected": -218.29568481445312, |
|
"loss": 0.2398, |
|
"rewards/accuracies": 0.8812500238418579, |
|
"rewards/chosen": 1.6713672876358032, |
|
"rewards/margins": 2.5339531898498535, |
|
"rewards/rejected": -0.8625856637954712, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 1.1940298507462686, |
|
"grad_norm": 18.90792590059534, |
|
"learning_rate": 7.036187261857288e-07, |
|
"logits/chosen": -2.4836063385009766, |
|
"logits/rejected": -2.4460768699645996, |
|
"logps/chosen": -245.5286407470703, |
|
"logps/rejected": -243.6932830810547, |
|
"loss": 0.2302, |
|
"rewards/accuracies": 0.925000011920929, |
|
"rewards/chosen": 2.2828805446624756, |
|
"rewards/margins": 3.4864933490753174, |
|
"rewards/rejected": -1.2036125659942627, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 1.2686567164179103, |
|
"grad_norm": 20.90479576440656, |
|
"learning_rate": 6.654167366624008e-07, |
|
"logits/chosen": -2.427332878112793, |
|
"logits/rejected": -2.425985813140869, |
|
"logps/chosen": -233.1042938232422, |
|
"logps/rejected": -215.51437377929688, |
|
"loss": 0.2332, |
|
"rewards/accuracies": 0.90625, |
|
"rewards/chosen": 1.7251182794570923, |
|
"rewards/margins": 3.2692294120788574, |
|
"rewards/rejected": -1.5441116094589233, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 1.3432835820895521, |
|
"grad_norm": 23.387687179198938, |
|
"learning_rate": 6.260965775552713e-07, |
|
"logits/chosen": -2.4304587841033936, |
|
"logits/rejected": -2.4162423610687256, |
|
"logps/chosen": -247.0415802001953, |
|
"logps/rejected": -230.84005737304688, |
|
"loss": 0.253, |
|
"rewards/accuracies": 0.918749988079071, |
|
"rewards/chosen": 1.8513774871826172, |
|
"rewards/margins": 3.377821445465088, |
|
"rewards/rejected": -1.5264440774917603, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 1.417910447761194, |
|
"grad_norm": 25.290227374387985, |
|
"learning_rate": 5.859240418356614e-07, |
|
"logits/chosen": -2.4067845344543457, |
|
"logits/rejected": -2.403015375137329, |
|
"logps/chosen": -229.62283325195312, |
|
"logps/rejected": -227.48825073242188, |
|
"loss": 0.2631, |
|
"rewards/accuracies": 0.90625, |
|
"rewards/chosen": 1.354150652885437, |
|
"rewards/margins": 3.5103206634521484, |
|
"rewards/rejected": -2.156169891357422, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 1.4925373134328357, |
|
"grad_norm": 24.465863734128728, |
|
"learning_rate": 5.451706842957421e-07, |
|
"logits/chosen": -2.4527347087860107, |
|
"logits/rejected": -2.454359292984009, |
|
"logps/chosen": -250.5390625, |
|
"logps/rejected": -231.2834930419922, |
|
"loss": 0.2898, |
|
"rewards/accuracies": 0.918749988079071, |
|
"rewards/chosen": 2.1465234756469727, |
|
"rewards/margins": 3.222960948944092, |
|
"rewards/rejected": -1.076436996459961, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 1.4925373134328357, |
|
"eval_logits/chosen": -2.4716811180114746, |
|
"eval_logits/rejected": -2.4747350215911865, |
|
"eval_logps/chosen": -255.55557250976562, |
|
"eval_logps/rejected": -216.7303009033203, |
|
"eval_loss": 0.5633274912834167, |
|
"eval_rewards/accuracies": 0.7583333253860474, |
|
"eval_rewards/chosen": 1.2446327209472656, |
|
"eval_rewards/margins": 1.799760103225708, |
|
"eval_rewards/rejected": -0.5551272034645081, |
|
"eval_runtime": 125.768, |
|
"eval_samples_per_second": 15.107, |
|
"eval_steps_per_second": 0.239, |
|
"step": 100 |
|
} |
|
], |
|
"logging_steps": 5, |
|
"max_steps": 201, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 100, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1178822762299392.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|