ClaimVer_Zephyr-7B-Beta-Chat / trainer_state.json
preetam7's picture
Upload 10 files
7566c5b verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.9952941176470587,
"eval_steps": 500,
"global_step": 212,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.047058823529411764,
"grad_norm": 1.3170621395111084,
"learning_rate": 4.9931407070965254e-05,
"loss": 0.8347,
"num_input_tokens_seen": 116560,
"step": 5
},
{
"epoch": 0.09411764705882353,
"grad_norm": 0.7480270862579346,
"learning_rate": 4.97260046830541e-05,
"loss": 0.545,
"num_input_tokens_seen": 229632,
"step": 10
},
{
"epoch": 0.1411764705882353,
"grad_norm": 0.7734505534172058,
"learning_rate": 4.9384919968379945e-05,
"loss": 0.4738,
"num_input_tokens_seen": 345008,
"step": 15
},
{
"epoch": 0.18823529411764706,
"grad_norm": 0.6531177163124084,
"learning_rate": 4.891002460691306e-05,
"loss": 0.4494,
"num_input_tokens_seen": 464176,
"step": 20
},
{
"epoch": 0.23529411764705882,
"grad_norm": 0.6236650943756104,
"learning_rate": 4.83039245557597e-05,
"loss": 0.4115,
"num_input_tokens_seen": 579536,
"step": 25
},
{
"epoch": 0.2823529411764706,
"grad_norm": 0.5742521286010742,
"learning_rate": 4.756994574914359e-05,
"loss": 0.3727,
"num_input_tokens_seen": 695664,
"step": 30
},
{
"epoch": 0.32941176470588235,
"grad_norm": 0.6050655841827393,
"learning_rate": 4.6712115847560355e-05,
"loss": 0.3611,
"num_input_tokens_seen": 816160,
"step": 35
},
{
"epoch": 0.3764705882352941,
"grad_norm": 0.610315203666687,
"learning_rate": 4.573514213625505e-05,
"loss": 0.3559,
"num_input_tokens_seen": 933856,
"step": 40
},
{
"epoch": 0.4235294117647059,
"grad_norm": 0.6431180834770203,
"learning_rate": 4.464438569430354e-05,
"loss": 0.3541,
"num_input_tokens_seen": 1056032,
"step": 45
},
{
"epoch": 0.47058823529411764,
"grad_norm": 0.6149665117263794,
"learning_rate": 4.344583197604318e-05,
"loss": 0.3322,
"num_input_tokens_seen": 1175184,
"step": 50
},
{
"epoch": 0.5176470588235295,
"grad_norm": 0.6773995757102966,
"learning_rate": 4.214605796628527e-05,
"loss": 0.3391,
"num_input_tokens_seen": 1296368,
"step": 55
},
{
"epoch": 0.5647058823529412,
"grad_norm": 0.8067043423652649,
"learning_rate": 4.075219608954278e-05,
"loss": 0.3238,
"num_input_tokens_seen": 1416656,
"step": 60
},
{
"epoch": 0.611764705882353,
"grad_norm": 0.6966668367385864,
"learning_rate": 3.927189507131938e-05,
"loss": 0.3191,
"num_input_tokens_seen": 1533136,
"step": 65
},
{
"epoch": 0.6588235294117647,
"grad_norm": 0.6485809087753296,
"learning_rate": 3.7713277966230514e-05,
"loss": 0.3192,
"num_input_tokens_seen": 1647680,
"step": 70
},
{
"epoch": 0.7058823529411765,
"grad_norm": 0.6206268072128296,
"learning_rate": 3.608489758327472e-05,
"loss": 0.324,
"num_input_tokens_seen": 1770192,
"step": 75
},
{
"epoch": 0.7529411764705882,
"grad_norm": 0.5658767819404602,
"learning_rate": 3.4395689552855955e-05,
"loss": 0.3092,
"num_input_tokens_seen": 1887696,
"step": 80
},
{
"epoch": 0.8,
"grad_norm": 0.6363285779953003,
"learning_rate": 3.265492329309867e-05,
"loss": 0.3074,
"num_input_tokens_seen": 2007312,
"step": 85
},
{
"epoch": 0.8470588235294118,
"grad_norm": 0.6739779710769653,
"learning_rate": 3.0872151144524595e-05,
"loss": 0.3092,
"num_input_tokens_seen": 2117952,
"step": 90
},
{
"epoch": 0.8941176470588236,
"grad_norm": 0.6705839037895203,
"learning_rate": 2.9057155952211502e-05,
"loss": 0.3015,
"num_input_tokens_seen": 2231696,
"step": 95
},
{
"epoch": 0.9411764705882353,
"grad_norm": 0.6125645041465759,
"learning_rate": 2.7219897383073373e-05,
"loss": 0.2964,
"num_input_tokens_seen": 2348320,
"step": 100
},
{
"epoch": 0.9882352941176471,
"grad_norm": 0.6411484479904175,
"learning_rate": 2.537045727284232e-05,
"loss": 0.2759,
"num_input_tokens_seen": 2462112,
"step": 105
},
{
"epoch": 1.035294117647059,
"grad_norm": 0.6348647475242615,
"learning_rate": 2.3518984302657146e-05,
"loss": 0.2772,
"num_input_tokens_seen": 2583648,
"step": 110
},
{
"epoch": 1.0823529411764705,
"grad_norm": 0.659324049949646,
"learning_rate": 2.1675638308842145e-05,
"loss": 0.2834,
"num_input_tokens_seen": 2704112,
"step": 115
},
{
"epoch": 1.1294117647058823,
"grad_norm": 0.7502089738845825,
"learning_rate": 1.9850534531472546e-05,
"loss": 0.2764,
"num_input_tokens_seen": 2823088,
"step": 120
},
{
"epoch": 1.1764705882352942,
"grad_norm": 0.6629338264465332,
"learning_rate": 1.8053688107658908e-05,
"loss": 0.2842,
"num_input_tokens_seen": 2940800,
"step": 125
},
{
"epoch": 1.223529411764706,
"grad_norm": 0.7144020199775696,
"learning_rate": 1.6294959114140034e-05,
"loss": 0.2712,
"num_input_tokens_seen": 3058864,
"step": 130
},
{
"epoch": 1.2705882352941176,
"grad_norm": 0.717128574848175,
"learning_rate": 1.4583998460759424e-05,
"loss": 0.2638,
"num_input_tokens_seen": 3174128,
"step": 135
},
{
"epoch": 1.3176470588235294,
"grad_norm": 0.7881118059158325,
"learning_rate": 1.2930194931731382e-05,
"loss": 0.2754,
"num_input_tokens_seen": 3289712,
"step": 140
},
{
"epoch": 1.3647058823529412,
"grad_norm": 0.738702654838562,
"learning_rate": 1.1342623665304209e-05,
"loss": 0.2844,
"num_input_tokens_seen": 3410480,
"step": 145
},
{
"epoch": 1.4117647058823528,
"grad_norm": 0.722718358039856,
"learning_rate": 9.829996354535172e-06,
"loss": 0.2745,
"num_input_tokens_seen": 3526224,
"step": 150
},
{
"epoch": 1.4588235294117646,
"grad_norm": 0.7094296216964722,
"learning_rate": 8.400613442446948e-06,
"loss": 0.2772,
"num_input_tokens_seen": 3644464,
"step": 155
},
{
"epoch": 1.5058823529411764,
"grad_norm": 0.7090117335319519,
"learning_rate": 7.062318573891716e-06,
"loss": 0.2777,
"num_input_tokens_seen": 3760976,
"step": 160
},
{
"epoch": 1.5529411764705883,
"grad_norm": 0.6970705389976501,
"learning_rate": 5.822455554065217e-06,
"loss": 0.2766,
"num_input_tokens_seen": 3880608,
"step": 165
},
{
"epoch": 1.6,
"grad_norm": 0.7649534344673157,
"learning_rate": 4.687828049857967e-06,
"loss": 0.2599,
"num_input_tokens_seen": 3993216,
"step": 170
},
{
"epoch": 1.6470588235294117,
"grad_norm": 0.6839666366577148,
"learning_rate": 3.6646622551801345e-06,
"loss": 0.2535,
"num_input_tokens_seen": 4111136,
"step": 175
},
{
"epoch": 1.6941176470588235,
"grad_norm": 0.7336522340774536,
"learning_rate": 2.75857272513132e-06,
"loss": 0.2741,
"num_input_tokens_seen": 4229136,
"step": 180
},
{
"epoch": 1.7411764705882353,
"grad_norm": 0.6339069604873657,
"learning_rate": 1.9745315664982276e-06,
"loss": 0.2601,
"num_input_tokens_seen": 4344928,
"step": 185
},
{
"epoch": 1.788235294117647,
"grad_norm": 0.6466521620750427,
"learning_rate": 1.3168411536452152e-06,
"loss": 0.2536,
"num_input_tokens_seen": 4462192,
"step": 190
},
{
"epoch": 1.835294117647059,
"grad_norm": 0.7034646272659302,
"learning_rate": 7.891105195175358e-07,
"loss": 0.2678,
"num_input_tokens_seen": 4584528,
"step": 195
},
{
"epoch": 1.8823529411764706,
"grad_norm": 0.6738657355308533,
"learning_rate": 3.9423555131007925e-07,
"loss": 0.2693,
"num_input_tokens_seen": 4701808,
"step": 200
},
{
"epoch": 1.9294117647058824,
"grad_norm": 0.6806563138961792,
"learning_rate": 1.343830994765982e-07,
"loss": 0.2545,
"num_input_tokens_seen": 4817760,
"step": 205
},
{
"epoch": 1.9764705882352942,
"grad_norm": 0.7523545622825623,
"learning_rate": 1.0979087280141298e-08,
"loss": 0.2693,
"num_input_tokens_seen": 4932544,
"step": 210
},
{
"epoch": 1.9952941176470587,
"num_input_tokens_seen": 4979584,
"step": 212,
"total_flos": 2.1307535061837414e+17,
"train_loss": 0.32363298928962564,
"train_runtime": 2717.673,
"train_samples_per_second": 2.502,
"train_steps_per_second": 0.078
}
],
"logging_steps": 5,
"max_steps": 212,
"num_input_tokens_seen": 4979584,
"num_train_epochs": 2,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 2.1307535061837414e+17,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}