|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.9984301412872841, |
|
"eval_steps": 500, |
|
"global_step": 159, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 3.125e-08, |
|
"logits/chosen": -2.8296937942504883, |
|
"logits/rejected": -2.7712650299072266, |
|
"logps/chosen": -209.29501342773438, |
|
"logps/pi_response": -161.2981719970703, |
|
"logps/ref_response": -161.2981719970703, |
|
"logps/rejected": -243.91220092773438, |
|
"loss": 0.6931, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 3.1249999999999997e-07, |
|
"logits/chosen": -2.6473469734191895, |
|
"logits/rejected": -2.65586256980896, |
|
"logps/chosen": -233.16639709472656, |
|
"logps/pi_response": -126.42855072021484, |
|
"logps/ref_response": -126.3212890625, |
|
"logps/rejected": -237.84884643554688, |
|
"loss": 0.6915, |
|
"rewards/accuracies": 0.4861111044883728, |
|
"rewards/chosen": -0.0029828613623976707, |
|
"rewards/margins": 0.0017328110989183187, |
|
"rewards/rejected": -0.004715672694146633, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 4.990353313429303e-07, |
|
"logits/chosen": -2.6891844272613525, |
|
"logits/rejected": -2.646394968032837, |
|
"logps/chosen": -241.78857421875, |
|
"logps/pi_response": -133.52012634277344, |
|
"logps/ref_response": -132.0270538330078, |
|
"logps/rejected": -248.91781616210938, |
|
"loss": 0.6813, |
|
"rewards/accuracies": 0.5687500238418579, |
|
"rewards/chosen": -0.05102919414639473, |
|
"rewards/margins": 0.06945384293794632, |
|
"rewards/rejected": -0.12048304080963135, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 4.882681251368548e-07, |
|
"logits/chosen": -2.495587110519409, |
|
"logits/rejected": -2.4219412803649902, |
|
"logps/chosen": -261.9192810058594, |
|
"logps/pi_response": -143.7517852783203, |
|
"logps/ref_response": -128.5669403076172, |
|
"logps/rejected": -254.8279266357422, |
|
"loss": 0.6587, |
|
"rewards/accuracies": 0.675000011920929, |
|
"rewards/chosen": -0.1558101624250412, |
|
"rewards/margins": 0.13714733719825745, |
|
"rewards/rejected": -0.2929575443267822, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 4.6604720940421207e-07, |
|
"logits/chosen": -2.368725299835205, |
|
"logits/rejected": -2.335207462310791, |
|
"logps/chosen": -250.52261352539062, |
|
"logps/pi_response": -155.18409729003906, |
|
"logps/ref_response": -116.94285583496094, |
|
"logps/rejected": -280.72857666015625, |
|
"loss": 0.6403, |
|
"rewards/accuracies": 0.6312500238418579, |
|
"rewards/chosen": -0.32246842980384827, |
|
"rewards/margins": 0.1960955113172531, |
|
"rewards/rejected": -0.5185639262199402, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 4.3344075855595097e-07, |
|
"logits/chosen": -2.2041923999786377, |
|
"logits/rejected": -2.2023794651031494, |
|
"logps/chosen": -318.76263427734375, |
|
"logps/pi_response": -205.65676879882812, |
|
"logps/ref_response": -120.89216613769531, |
|
"logps/rejected": -388.4427490234375, |
|
"loss": 0.6537, |
|
"rewards/accuracies": 0.6625000238418579, |
|
"rewards/chosen": -0.8351813554763794, |
|
"rewards/margins": 0.3358566462993622, |
|
"rewards/rejected": -1.1710379123687744, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 3.920161866827889e-07, |
|
"logits/chosen": -2.2313358783721924, |
|
"logits/rejected": -2.1953959465026855, |
|
"logps/chosen": -280.07379150390625, |
|
"logps/pi_response": -200.27957153320312, |
|
"logps/ref_response": -128.31727600097656, |
|
"logps/rejected": -318.95355224609375, |
|
"loss": 0.6357, |
|
"rewards/accuracies": 0.675000011920929, |
|
"rewards/chosen": -0.47581368684768677, |
|
"rewards/margins": 0.3573099970817566, |
|
"rewards/rejected": -0.8331238031387329, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 3.4376480090239047e-07, |
|
"logits/chosen": -2.1807610988616943, |
|
"logits/rejected": -2.155255079269409, |
|
"logps/chosen": -308.809814453125, |
|
"logps/pi_response": -214.41006469726562, |
|
"logps/ref_response": -129.27540588378906, |
|
"logps/rejected": -313.05169677734375, |
|
"loss": 0.6289, |
|
"rewards/accuracies": 0.6812499761581421, |
|
"rewards/chosen": -0.5900571942329407, |
|
"rewards/margins": 0.38541507720947266, |
|
"rewards/rejected": -0.9754722714424133, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 2.910060778827554e-07, |
|
"logits/chosen": -2.2518563270568848, |
|
"logits/rejected": -2.181877613067627, |
|
"logps/chosen": -287.37249755859375, |
|
"logps/pi_response": -214.14730834960938, |
|
"logps/ref_response": -122.84080505371094, |
|
"logps/rejected": -332.5687561035156, |
|
"loss": 0.6102, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": -0.6444031596183777, |
|
"rewards/margins": 0.41357699036598206, |
|
"rewards/rejected": -1.0579801797866821, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 2.3627616503391812e-07, |
|
"logits/chosen": -2.225679636001587, |
|
"logits/rejected": -2.1895217895507812, |
|
"logps/chosen": -316.6213684082031, |
|
"logps/pi_response": -198.88323974609375, |
|
"logps/ref_response": -130.94314575195312, |
|
"logps/rejected": -309.0672912597656, |
|
"loss": 0.6179, |
|
"rewards/accuracies": 0.65625, |
|
"rewards/chosen": -0.5348471403121948, |
|
"rewards/margins": 0.31216517090797424, |
|
"rewards/rejected": -0.8470123410224915, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 1.8220596619089573e-07, |
|
"logits/chosen": -2.1740920543670654, |
|
"logits/rejected": -2.1656651496887207, |
|
"logps/chosen": -319.45831298828125, |
|
"logps/pi_response": -214.01913452148438, |
|
"logps/ref_response": -109.1845932006836, |
|
"logps/rejected": -362.14593505859375, |
|
"loss": 0.6081, |
|
"rewards/accuracies": 0.6187499761581421, |
|
"rewards/chosen": -0.9864041209220886, |
|
"rewards/margins": 0.3350769579410553, |
|
"rewards/rejected": -1.3214809894561768, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 1.3139467229135998e-07, |
|
"logits/chosen": -2.152390956878662, |
|
"logits/rejected": -2.129937171936035, |
|
"logps/chosen": -318.823486328125, |
|
"logps/pi_response": -224.39791870117188, |
|
"logps/ref_response": -120.67756652832031, |
|
"logps/rejected": -327.2312927246094, |
|
"loss": 0.6116, |
|
"rewards/accuracies": 0.668749988079071, |
|
"rewards/chosen": -0.8843333125114441, |
|
"rewards/margins": 0.3031155467033386, |
|
"rewards/rejected": -1.1874487400054932, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 8.628481651367875e-08, |
|
"logits/chosen": -2.2765345573425293, |
|
"logits/rejected": -2.207855701446533, |
|
"logps/chosen": -342.8375549316406, |
|
"logps/pi_response": -240.87997436523438, |
|
"logps/ref_response": -135.39627075195312, |
|
"logps/rejected": -374.40643310546875, |
|
"loss": 0.5961, |
|
"rewards/accuracies": 0.6875, |
|
"rewards/chosen": -0.8819538950920105, |
|
"rewards/margins": 0.286579430103302, |
|
"rewards/rejected": -1.1685333251953125, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 4.904486005914027e-08, |
|
"logits/chosen": -2.1735401153564453, |
|
"logits/rejected": -2.1397767066955566, |
|
"logps/chosen": -333.1070556640625, |
|
"logps/pi_response": -216.6471405029297, |
|
"logps/ref_response": -120.5396499633789, |
|
"logps/rejected": -351.8336181640625, |
|
"loss": 0.5822, |
|
"rewards/accuracies": 0.675000011920929, |
|
"rewards/chosen": -0.8667758703231812, |
|
"rewards/margins": 0.2806575298309326, |
|
"rewards/rejected": -1.1474335193634033, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 2.1464952759020856e-08, |
|
"logits/chosen": -2.1801669597625732, |
|
"logits/rejected": -2.146042585372925, |
|
"logps/chosen": -293.39892578125, |
|
"logps/pi_response": -224.3563995361328, |
|
"logps/ref_response": -119.8243637084961, |
|
"logps/rejected": -390.6162414550781, |
|
"loss": 0.5935, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": -0.8484286069869995, |
|
"rewards/margins": 0.4271484315395355, |
|
"rewards/rejected": -1.275577187538147, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 4.8708793644441086e-09, |
|
"logits/chosen": -2.1090188026428223, |
|
"logits/rejected": -2.1141250133514404, |
|
"logps/chosen": -330.9354553222656, |
|
"logps/pi_response": -228.4915008544922, |
|
"logps/ref_response": -127.62400817871094, |
|
"logps/rejected": -327.726318359375, |
|
"loss": 0.586, |
|
"rewards/accuracies": 0.6812499761581421, |
|
"rewards/chosen": -0.8448723554611206, |
|
"rewards/margins": 0.3227764666080475, |
|
"rewards/rejected": -1.1676489114761353, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"step": 159, |
|
"total_flos": 0.0, |
|
"train_loss": 0.624936523677418, |
|
"train_runtime": 5215.8044, |
|
"train_samples_per_second": 3.907, |
|
"train_steps_per_second": 0.03 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 159, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 100, |
|
"total_flos": 0.0, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|