sedrickkeh's picture
End of training
6efdff3 verified
raw
history blame
11.2 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.926829268292683,
"eval_steps": 500,
"global_step": 60,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.04878048780487805,
"grad_norm": 6.836372375488281,
"learning_rate": 1.6666666666666667e-06,
"loss": 1.1182,
"step": 1
},
{
"epoch": 0.0975609756097561,
"grad_norm": 6.894412517547607,
"learning_rate": 3.3333333333333333e-06,
"loss": 1.1051,
"step": 2
},
{
"epoch": 0.14634146341463414,
"grad_norm": 6.724112033843994,
"learning_rate": 5e-06,
"loss": 1.1203,
"step": 3
},
{
"epoch": 0.1951219512195122,
"grad_norm": 5.394062042236328,
"learning_rate": 6.666666666666667e-06,
"loss": 1.0799,
"step": 4
},
{
"epoch": 0.24390243902439024,
"grad_norm": 2.9904468059539795,
"learning_rate": 8.333333333333334e-06,
"loss": 0.9942,
"step": 5
},
{
"epoch": 0.2926829268292683,
"grad_norm": 4.739986896514893,
"learning_rate": 1e-05,
"loss": 0.9869,
"step": 6
},
{
"epoch": 0.34146341463414637,
"grad_norm": 4.689507961273193,
"learning_rate": 9.991540791356342e-06,
"loss": 0.9747,
"step": 7
},
{
"epoch": 0.3902439024390244,
"grad_norm": 4.10852575302124,
"learning_rate": 9.966191788709716e-06,
"loss": 1.0225,
"step": 8
},
{
"epoch": 0.43902439024390244,
"grad_norm": 3.6707763671875,
"learning_rate": 9.924038765061042e-06,
"loss": 0.9272,
"step": 9
},
{
"epoch": 0.4878048780487805,
"grad_norm": 2.4826507568359375,
"learning_rate": 9.86522435289912e-06,
"loss": 0.9513,
"step": 10
},
{
"epoch": 0.5365853658536586,
"grad_norm": 2.0593159198760986,
"learning_rate": 9.789947561577445e-06,
"loss": 0.8729,
"step": 11
},
{
"epoch": 0.5853658536585366,
"grad_norm": 2.3102424144744873,
"learning_rate": 9.698463103929542e-06,
"loss": 0.9028,
"step": 12
},
{
"epoch": 0.6341463414634146,
"grad_norm": 1.8726847171783447,
"learning_rate": 9.591080534401371e-06,
"loss": 0.8811,
"step": 13
},
{
"epoch": 0.6829268292682927,
"grad_norm": 1.3534939289093018,
"learning_rate": 9.468163201617063e-06,
"loss": 0.9254,
"step": 14
},
{
"epoch": 0.7317073170731707,
"grad_norm": 1.3493455648422241,
"learning_rate": 9.330127018922195e-06,
"loss": 0.8082,
"step": 15
},
{
"epoch": 0.7804878048780488,
"grad_norm": 1.103345274925232,
"learning_rate": 9.177439057064684e-06,
"loss": 0.8552,
"step": 16
},
{
"epoch": 0.8292682926829268,
"grad_norm": 0.8746293783187866,
"learning_rate": 9.01061596377522e-06,
"loss": 0.8405,
"step": 17
},
{
"epoch": 0.8780487804878049,
"grad_norm": 0.923000156879425,
"learning_rate": 8.83022221559489e-06,
"loss": 0.8543,
"step": 18
},
{
"epoch": 0.926829268292683,
"grad_norm": 0.7458917498588562,
"learning_rate": 8.636868207865244e-06,
"loss": 0.9075,
"step": 19
},
{
"epoch": 0.975609756097561,
"grad_norm": 0.7327495813369751,
"learning_rate": 8.43120818934367e-06,
"loss": 0.8463,
"step": 20
},
{
"epoch": 1.024390243902439,
"grad_norm": 1.1537771224975586,
"learning_rate": 8.213938048432697e-06,
"loss": 1.1651,
"step": 21
},
{
"epoch": 1.0731707317073171,
"grad_norm": 0.6341186761856079,
"learning_rate": 7.985792958513932e-06,
"loss": 0.7399,
"step": 22
},
{
"epoch": 1.1219512195121952,
"grad_norm": 0.7606744170188904,
"learning_rate": 7.747544890354031e-06,
"loss": 0.8688,
"step": 23
},
{
"epoch": 1.170731707317073,
"grad_norm": 0.6201728582382202,
"learning_rate": 7.500000000000001e-06,
"loss": 0.7049,
"step": 24
},
{
"epoch": 1.2195121951219512,
"grad_norm": 0.8632344007492065,
"learning_rate": 7.243995901002312e-06,
"loss": 0.92,
"step": 25
},
{
"epoch": 1.2682926829268293,
"grad_norm": 0.6163485050201416,
"learning_rate": 6.980398830195785e-06,
"loss": 0.6655,
"step": 26
},
{
"epoch": 1.3170731707317074,
"grad_norm": 0.5234319567680359,
"learning_rate": 6.710100716628345e-06,
"loss": 0.7905,
"step": 27
},
{
"epoch": 1.3658536585365852,
"grad_norm": 0.5779823660850525,
"learning_rate": 6.434016163555452e-06,
"loss": 0.8514,
"step": 28
},
{
"epoch": 1.4146341463414633,
"grad_norm": 0.6601042747497559,
"learning_rate": 6.153079353712201e-06,
"loss": 0.88,
"step": 29
},
{
"epoch": 1.4634146341463414,
"grad_norm": 0.5179601311683655,
"learning_rate": 5.8682408883346535e-06,
"loss": 0.6719,
"step": 30
},
{
"epoch": 1.5121951219512195,
"grad_norm": 0.5515910983085632,
"learning_rate": 5.5804645706261515e-06,
"loss": 0.9152,
"step": 31
},
{
"epoch": 1.5609756097560976,
"grad_norm": 0.49588778614997864,
"learning_rate": 5.290724144552379e-06,
"loss": 0.6975,
"step": 32
},
{
"epoch": 1.6097560975609757,
"grad_norm": 0.5540623068809509,
"learning_rate": 5e-06,
"loss": 0.8428,
"step": 33
},
{
"epoch": 1.6585365853658538,
"grad_norm": 0.5262721180915833,
"learning_rate": 4.7092758554476215e-06,
"loss": 0.7701,
"step": 34
},
{
"epoch": 1.7073170731707317,
"grad_norm": 0.499987930059433,
"learning_rate": 4.4195354293738484e-06,
"loss": 0.684,
"step": 35
},
{
"epoch": 1.7560975609756098,
"grad_norm": 0.5329548716545105,
"learning_rate": 4.131759111665349e-06,
"loss": 0.811,
"step": 36
},
{
"epoch": 1.8048780487804879,
"grad_norm": 0.516512393951416,
"learning_rate": 3.8469206462878e-06,
"loss": 0.8534,
"step": 37
},
{
"epoch": 1.8536585365853657,
"grad_norm": 0.5150623917579651,
"learning_rate": 3.5659838364445505e-06,
"loss": 0.7364,
"step": 38
},
{
"epoch": 1.9024390243902438,
"grad_norm": 0.5092661380767822,
"learning_rate": 3.289899283371657e-06,
"loss": 0.7937,
"step": 39
},
{
"epoch": 1.951219512195122,
"grad_norm": 0.4906715452671051,
"learning_rate": 3.019601169804216e-06,
"loss": 0.7559,
"step": 40
},
{
"epoch": 2.0,
"grad_norm": 0.7150355577468872,
"learning_rate": 2.7560040989976894e-06,
"loss": 1.254,
"step": 41
},
{
"epoch": 2.048780487804878,
"grad_norm": 0.4212687015533447,
"learning_rate": 2.5000000000000015e-06,
"loss": 0.7906,
"step": 42
},
{
"epoch": 2.097560975609756,
"grad_norm": 0.45231571793556213,
"learning_rate": 2.2524551096459703e-06,
"loss": 0.7495,
"step": 43
},
{
"epoch": 2.1463414634146343,
"grad_norm": 0.47202828526496887,
"learning_rate": 2.0142070414860704e-06,
"loss": 0.7846,
"step": 44
},
{
"epoch": 2.1951219512195124,
"grad_norm": 0.49245500564575195,
"learning_rate": 1.7860619515673034e-06,
"loss": 0.7664,
"step": 45
},
{
"epoch": 2.2439024390243905,
"grad_norm": 0.4255480170249939,
"learning_rate": 1.5687918106563326e-06,
"loss": 0.7178,
"step": 46
},
{
"epoch": 2.292682926829268,
"grad_norm": 0.42227357625961304,
"learning_rate": 1.3631317921347564e-06,
"loss": 0.7867,
"step": 47
},
{
"epoch": 2.341463414634146,
"grad_norm": 0.4211116135120392,
"learning_rate": 1.1697777844051105e-06,
"loss": 0.7419,
"step": 48
},
{
"epoch": 2.3902439024390243,
"grad_norm": 0.38902539014816284,
"learning_rate": 9.893840362247809e-07,
"loss": 0.7224,
"step": 49
},
{
"epoch": 2.4390243902439024,
"grad_norm": 0.473481684923172,
"learning_rate": 8.225609429353187e-07,
"loss": 0.7914,
"step": 50
},
{
"epoch": 2.4878048780487805,
"grad_norm": 0.40908774733543396,
"learning_rate": 6.698729810778065e-07,
"loss": 0.7448,
"step": 51
},
{
"epoch": 2.5365853658536586,
"grad_norm": 0.4036145806312561,
"learning_rate": 5.318367983829393e-07,
"loss": 0.7486,
"step": 52
},
{
"epoch": 2.5853658536585367,
"grad_norm": 0.4587302803993225,
"learning_rate": 4.089194655986306e-07,
"loss": 0.7421,
"step": 53
},
{
"epoch": 2.6341463414634148,
"grad_norm": 0.42735883593559265,
"learning_rate": 3.015368960704584e-07,
"loss": 0.8132,
"step": 54
},
{
"epoch": 2.682926829268293,
"grad_norm": 0.3429286777973175,
"learning_rate": 2.1005243842255552e-07,
"loss": 0.7491,
"step": 55
},
{
"epoch": 2.7317073170731705,
"grad_norm": 0.3881649971008301,
"learning_rate": 1.3477564710088097e-07,
"loss": 0.7919,
"step": 56
},
{
"epoch": 2.7804878048780486,
"grad_norm": 0.36438366770744324,
"learning_rate": 7.59612349389599e-08,
"loss": 0.7485,
"step": 57
},
{
"epoch": 2.8292682926829267,
"grad_norm": 0.4045783579349518,
"learning_rate": 3.3808211290284886e-08,
"loss": 0.734,
"step": 58
},
{
"epoch": 2.8780487804878048,
"grad_norm": 0.3840378522872925,
"learning_rate": 8.459208643659122e-09,
"loss": 0.7751,
"step": 59
},
{
"epoch": 2.926829268292683,
"grad_norm": 0.41218218207359314,
"learning_rate": 0.0,
"loss": 0.7135,
"step": 60
},
{
"epoch": 2.926829268292683,
"step": 60,
"total_flos": 56084346568704.0,
"train_loss": 0.8459723273913066,
"train_runtime": 3353.8656,
"train_samples_per_second": 1.756,
"train_steps_per_second": 0.018
}
],
"logging_steps": 1.0,
"max_steps": 60,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 56084346568704.0,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}