ShenaoZ's picture
Model save
e968755 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.9874476987447699,
"eval_steps": 500,
"global_step": 59,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.016736401673640166,
"grad_norm": 58.266818387163894,
"learning_rate": 8.333333333333333e-08,
"logits/chosen": -2.640246868133545,
"logits/rejected": -2.6412415504455566,
"logps/chosen": -221.59217834472656,
"logps/pi_response": -146.05226135253906,
"logps/ref_response": -146.05226135253906,
"logps/rejected": -459.28009033203125,
"loss": 0.6931,
"rewards/accuracies": 0.0,
"rewards/chosen": 0.0,
"rewards/margins": 0.0,
"rewards/rejected": 0.0,
"step": 1
},
{
"epoch": 0.16736401673640167,
"grad_norm": 41.65988474449752,
"learning_rate": 4.930057285201027e-07,
"logits/chosen": -2.632171630859375,
"logits/rejected": -2.585049629211426,
"logps/chosen": -243.53781127929688,
"logps/pi_response": -126.54959106445312,
"logps/ref_response": -117.57322692871094,
"logps/rejected": -443.431884765625,
"loss": 0.6436,
"rewards/accuracies": 0.5868055820465088,
"rewards/chosen": -0.20465856790542603,
"rewards/margins": 0.1644359976053238,
"rewards/rejected": -0.3690946102142334,
"step": 10
},
{
"epoch": 0.33472803347280333,
"grad_norm": 40.52264024295099,
"learning_rate": 4.187457503795526e-07,
"logits/chosen": -2.519150972366333,
"logits/rejected": -2.4623703956604004,
"logps/chosen": -364.3970031738281,
"logps/pi_response": -168.98965454101562,
"logps/ref_response": -125.69271087646484,
"logps/rejected": -663.7247314453125,
"loss": 0.5606,
"rewards/accuracies": 0.75,
"rewards/chosen": -1.2418674230575562,
"rewards/margins": 1.2894560098648071,
"rewards/rejected": -2.5313236713409424,
"step": 20
},
{
"epoch": 0.502092050209205,
"grad_norm": 27.07562924150943,
"learning_rate": 2.8691164100062034e-07,
"logits/chosen": -2.4185967445373535,
"logits/rejected": -2.362191677093506,
"logps/chosen": -338.4853820800781,
"logps/pi_response": -144.1025848388672,
"logps/ref_response": -121.75166320800781,
"logps/rejected": -581.2408447265625,
"loss": 0.4885,
"rewards/accuracies": 0.7406250238418579,
"rewards/chosen": -0.8758844137191772,
"rewards/margins": 1.0427850484848022,
"rewards/rejected": -1.9186694622039795,
"step": 30
},
{
"epoch": 0.6694560669456067,
"grad_norm": 29.889956038106074,
"learning_rate": 1.4248369943086995e-07,
"logits/chosen": -2.4027583599090576,
"logits/rejected": -2.3214104175567627,
"logps/chosen": -301.30023193359375,
"logps/pi_response": -138.3297576904297,
"logps/ref_response": -122.65152740478516,
"logps/rejected": -591.5069580078125,
"loss": 0.4636,
"rewards/accuracies": 0.793749988079071,
"rewards/chosen": -0.6572309136390686,
"rewards/margins": 1.041191816329956,
"rewards/rejected": -1.6984226703643799,
"step": 40
},
{
"epoch": 0.8368200836820083,
"grad_norm": 32.7260861871924,
"learning_rate": 3.473909705816111e-08,
"logits/chosen": -2.377892255783081,
"logits/rejected": -2.300891160964966,
"logps/chosen": -314.6452331542969,
"logps/pi_response": -141.83682250976562,
"logps/ref_response": -123.07878112792969,
"logps/rejected": -590.2091674804688,
"loss": 0.4642,
"rewards/accuracies": 0.784375011920929,
"rewards/chosen": -0.699453592300415,
"rewards/margins": 1.0583473443984985,
"rewards/rejected": -1.7578010559082031,
"step": 50
},
{
"epoch": 0.9874476987447699,
"step": 59,
"total_flos": 0.0,
"train_loss": 0.5172020217119637,
"train_runtime": 2584.7224,
"train_samples_per_second": 5.913,
"train_steps_per_second": 0.023
}
],
"logging_steps": 10,
"max_steps": 59,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 100,
"total_flos": 0.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}