|
{ |
|
"best_metric": 0.016363559290766716, |
|
"best_model_checkpoint": "saves/psy-course-doc/Llama-3.1-8B-Instruct/train/fold3/checkpoint-100", |
|
"epoch": 4.938271604938271, |
|
"eval_steps": 10, |
|
"global_step": 125, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.03950617283950617, |
|
"grad_norm": 0.11982958763837814, |
|
"learning_rate": 7.692307692307694e-06, |
|
"loss": 0.1009, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.07901234567901234, |
|
"grad_norm": 0.18020769953727722, |
|
"learning_rate": 1.5384615384615387e-05, |
|
"loss": 0.1311, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.11851851851851852, |
|
"grad_norm": 0.17260421812534332, |
|
"learning_rate": 2.307692307692308e-05, |
|
"loss": 0.1305, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.1580246913580247, |
|
"grad_norm": 0.1354549378156662, |
|
"learning_rate": 3.0769230769230774e-05, |
|
"loss": 0.1094, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.19753086419753085, |
|
"grad_norm": 0.10506410896778107, |
|
"learning_rate": 3.846153846153846e-05, |
|
"loss": 0.0996, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.23703703703703705, |
|
"grad_norm": 0.14751164615154266, |
|
"learning_rate": 4.615384615384616e-05, |
|
"loss": 0.11, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.2765432098765432, |
|
"grad_norm": 0.135710671544075, |
|
"learning_rate": 5.384615384615385e-05, |
|
"loss": 0.1059, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.3160493827160494, |
|
"grad_norm": 0.13157124817371368, |
|
"learning_rate": 6.153846153846155e-05, |
|
"loss": 0.097, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.35555555555555557, |
|
"grad_norm": 0.13530097901821136, |
|
"learning_rate": 6.923076923076924e-05, |
|
"loss": 0.1037, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.3950617283950617, |
|
"grad_norm": 0.12781627476215363, |
|
"learning_rate": 7.692307692307693e-05, |
|
"loss": 0.0899, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.3950617283950617, |
|
"eval_loss": 0.08468911796808243, |
|
"eval_runtime": 29.6804, |
|
"eval_samples_per_second": 1.516, |
|
"eval_steps_per_second": 1.516, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.4345679012345679, |
|
"grad_norm": 0.11392076313495636, |
|
"learning_rate": 8.461538461538461e-05, |
|
"loss": 0.0682, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.4740740740740741, |
|
"grad_norm": 0.1307547390460968, |
|
"learning_rate": 9.230769230769232e-05, |
|
"loss": 0.0791, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.5135802469135803, |
|
"grad_norm": 0.1341903805732727, |
|
"learning_rate": 0.0001, |
|
"loss": 0.0647, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.5530864197530864, |
|
"grad_norm": 0.12305284291505814, |
|
"learning_rate": 9.998033131915266e-05, |
|
"loss": 0.0521, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.5925925925925926, |
|
"grad_norm": 0.14141477644443512, |
|
"learning_rate": 9.992134075089084e-05, |
|
"loss": 0.0561, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.6320987654320988, |
|
"grad_norm": 0.1190507784485817, |
|
"learning_rate": 9.982307470588098e-05, |
|
"loss": 0.0482, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.671604938271605, |
|
"grad_norm": 0.12106993794441223, |
|
"learning_rate": 9.968561049466214e-05, |
|
"loss": 0.0462, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.7111111111111111, |
|
"grad_norm": 0.13914185762405396, |
|
"learning_rate": 9.950905626682228e-05, |
|
"loss": 0.0434, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.7506172839506173, |
|
"grad_norm": 0.0968879908323288, |
|
"learning_rate": 9.92935509259118e-05, |
|
"loss": 0.0377, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.7901234567901234, |
|
"grad_norm": 0.07901491969823837, |
|
"learning_rate": 9.903926402016153e-05, |
|
"loss": 0.0334, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.7901234567901234, |
|
"eval_loss": 0.03550420328974724, |
|
"eval_runtime": 29.5606, |
|
"eval_samples_per_second": 1.522, |
|
"eval_steps_per_second": 1.522, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.8296296296296296, |
|
"grad_norm": 0.07772660255432129, |
|
"learning_rate": 9.874639560909117e-05, |
|
"loss": 0.0321, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.8691358024691358, |
|
"grad_norm": 0.08289165794849396, |
|
"learning_rate": 9.841517610611309e-05, |
|
"loss": 0.0341, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.908641975308642, |
|
"grad_norm": 0.06809127330780029, |
|
"learning_rate": 9.804586609725499e-05, |
|
"loss": 0.0245, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.9481481481481482, |
|
"grad_norm": 0.08088243007659912, |
|
"learning_rate": 9.763875613614482e-05, |
|
"loss": 0.0298, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.9876543209876543, |
|
"grad_norm": 0.09069989621639252, |
|
"learning_rate": 9.719416651541839e-05, |
|
"loss": 0.0291, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 1.0271604938271606, |
|
"grad_norm": 0.14729762077331543, |
|
"learning_rate": 9.671244701472999e-05, |
|
"loss": 0.0465, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 1.0666666666666667, |
|
"grad_norm": 0.06314294785261154, |
|
"learning_rate": 9.619397662556435e-05, |
|
"loss": 0.0233, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 1.106172839506173, |
|
"grad_norm": 0.07074397057294846, |
|
"learning_rate": 9.563916325306594e-05, |
|
"loss": 0.0269, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 1.145679012345679, |
|
"grad_norm": 0.0783693790435791, |
|
"learning_rate": 9.504844339512095e-05, |
|
"loss": 0.0264, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 1.1851851851851851, |
|
"grad_norm": 0.07551830261945724, |
|
"learning_rate": 9.442228179894362e-05, |
|
"loss": 0.0204, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 1.1851851851851851, |
|
"eval_loss": 0.024371623992919922, |
|
"eval_runtime": 29.5682, |
|
"eval_samples_per_second": 1.522, |
|
"eval_steps_per_second": 1.522, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 1.2246913580246914, |
|
"grad_norm": 0.07203515619039536, |
|
"learning_rate": 9.376117109543769e-05, |
|
"loss": 0.0225, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 1.2641975308641975, |
|
"grad_norm": 0.082725889980793, |
|
"learning_rate": 9.306563141162046e-05, |
|
"loss": 0.0252, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 1.3037037037037038, |
|
"grad_norm": 0.060164906084537506, |
|
"learning_rate": 9.233620996141421e-05, |
|
"loss": 0.0188, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 1.34320987654321, |
|
"grad_norm": 0.07446308434009552, |
|
"learning_rate": 9.157348061512727e-05, |
|
"loss": 0.0236, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 1.382716049382716, |
|
"grad_norm": 0.06977657973766327, |
|
"learning_rate": 9.077804344796302e-05, |
|
"loss": 0.0225, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 1.4222222222222223, |
|
"grad_norm": 0.06203348562121391, |
|
"learning_rate": 8.995052426791247e-05, |
|
"loss": 0.0181, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 1.4617283950617284, |
|
"grad_norm": 0.07342709600925446, |
|
"learning_rate": 8.90915741234015e-05, |
|
"loss": 0.0221, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 1.5012345679012347, |
|
"grad_norm": 0.07633589953184128, |
|
"learning_rate": 8.820186879108038e-05, |
|
"loss": 0.0177, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 1.5407407407407407, |
|
"grad_norm": 0.056108083575963974, |
|
"learning_rate": 8.728210824415827e-05, |
|
"loss": 0.0175, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 1.5802469135802468, |
|
"grad_norm": 0.06389579176902771, |
|
"learning_rate": 8.633301610170135e-05, |
|
"loss": 0.0215, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 1.5802469135802468, |
|
"eval_loss": 0.020678821951150894, |
|
"eval_runtime": 29.5921, |
|
"eval_samples_per_second": 1.521, |
|
"eval_steps_per_second": 1.521, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 1.6197530864197531, |
|
"grad_norm": 0.0723925456404686, |
|
"learning_rate": 8.535533905932738e-05, |
|
"loss": 0.015, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 1.6592592592592592, |
|
"grad_norm": 0.05432877689599991, |
|
"learning_rate": 8.434984630174509e-05, |
|
"loss": 0.0158, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 1.6987654320987655, |
|
"grad_norm": 0.05594627931714058, |
|
"learning_rate": 8.33173288976002e-05, |
|
"loss": 0.0164, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 1.7382716049382716, |
|
"grad_norm": 0.05370336398482323, |
|
"learning_rate": 8.225859917710439e-05, |
|
"loss": 0.0226, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 1.7777777777777777, |
|
"grad_norm": 0.07103245705366135, |
|
"learning_rate": 8.117449009293668e-05, |
|
"loss": 0.0176, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 1.817283950617284, |
|
"grad_norm": 0.06544952839612961, |
|
"learning_rate": 8.006585456492029e-05, |
|
"loss": 0.0186, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 1.8567901234567903, |
|
"grad_norm": 0.05212434381246567, |
|
"learning_rate": 7.89335648089903e-05, |
|
"loss": 0.0158, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 1.8962962962962964, |
|
"grad_norm": 0.06580112874507904, |
|
"learning_rate": 7.777851165098012e-05, |
|
"loss": 0.0181, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 1.9358024691358025, |
|
"grad_norm": 0.060175977647304535, |
|
"learning_rate": 7.660160382576683e-05, |
|
"loss": 0.0169, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 1.9753086419753085, |
|
"grad_norm": 0.05927619710564613, |
|
"learning_rate": 7.540376726232648e-05, |
|
"loss": 0.0173, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 1.9753086419753085, |
|
"eval_loss": 0.018583130091428757, |
|
"eval_runtime": 29.5769, |
|
"eval_samples_per_second": 1.521, |
|
"eval_steps_per_second": 1.521, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 2.0148148148148146, |
|
"grad_norm": 0.11036009341478348, |
|
"learning_rate": 7.4185944355262e-05, |
|
"loss": 0.0289, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 2.054320987654321, |
|
"grad_norm": 0.05153071880340576, |
|
"learning_rate": 7.294909322337689e-05, |
|
"loss": 0.016, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 2.093827160493827, |
|
"grad_norm": 0.06391610205173492, |
|
"learning_rate": 7.169418695587791e-05, |
|
"loss": 0.0139, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 2.1333333333333333, |
|
"grad_norm": 0.07181736081838608, |
|
"learning_rate": 7.042221284679982e-05, |
|
"loss": 0.014, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 2.1728395061728394, |
|
"grad_norm": 0.04309231787919998, |
|
"learning_rate": 6.91341716182545e-05, |
|
"loss": 0.0123, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 2.212345679012346, |
|
"grad_norm": 0.053510796278715134, |
|
"learning_rate": 6.783107663311565e-05, |
|
"loss": 0.0143, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 2.251851851851852, |
|
"grad_norm": 0.06065421551465988, |
|
"learning_rate": 6.651395309775837e-05, |
|
"loss": 0.019, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 2.291358024691358, |
|
"grad_norm": 0.06207943707704544, |
|
"learning_rate": 6.518383725548074e-05, |
|
"loss": 0.0158, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 2.330864197530864, |
|
"grad_norm": 0.06607165187597275, |
|
"learning_rate": 6.384177557124247e-05, |
|
"loss": 0.0185, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 2.3703703703703702, |
|
"grad_norm": 0.06022383272647858, |
|
"learning_rate": 6.248882390836135e-05, |
|
"loss": 0.014, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 2.3703703703703702, |
|
"eval_loss": 0.01788869872689247, |
|
"eval_runtime": 29.5983, |
|
"eval_samples_per_second": 1.52, |
|
"eval_steps_per_second": 1.52, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 2.4098765432098768, |
|
"grad_norm": 0.0726022720336914, |
|
"learning_rate": 6.112604669781572e-05, |
|
"loss": 0.017, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 2.449382716049383, |
|
"grad_norm": 0.058323588222265244, |
|
"learning_rate": 5.9754516100806423e-05, |
|
"loss": 0.0119, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 2.488888888888889, |
|
"grad_norm": 0.06410640478134155, |
|
"learning_rate": 5.837531116523682e-05, |
|
"loss": 0.0147, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 2.528395061728395, |
|
"grad_norm": 0.053326427936553955, |
|
"learning_rate": 5.698951697677498e-05, |
|
"loss": 0.0137, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 2.567901234567901, |
|
"grad_norm": 0.06104976311326027, |
|
"learning_rate": 5.559822380516539e-05, |
|
"loss": 0.0139, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 2.6074074074074076, |
|
"grad_norm": 0.06476392596960068, |
|
"learning_rate": 5.420252624646238e-05, |
|
"loss": 0.0167, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 2.6469135802469137, |
|
"grad_norm": 0.06204933673143387, |
|
"learning_rate": 5.2803522361859594e-05, |
|
"loss": 0.0137, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 2.68641975308642, |
|
"grad_norm": 0.058300163596868515, |
|
"learning_rate": 5.140231281379345e-05, |
|
"loss": 0.0137, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 2.725925925925926, |
|
"grad_norm": 0.058873895555734634, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0169, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 2.765432098765432, |
|
"grad_norm": 0.07083891332149506, |
|
"learning_rate": 4.859768718620656e-05, |
|
"loss": 0.0131, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 2.765432098765432, |
|
"eval_loss": 0.01707731932401657, |
|
"eval_runtime": 29.5547, |
|
"eval_samples_per_second": 1.523, |
|
"eval_steps_per_second": 1.523, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 2.8049382716049385, |
|
"grad_norm": 0.06999720633029938, |
|
"learning_rate": 4.7196477638140404e-05, |
|
"loss": 0.0141, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 2.8444444444444446, |
|
"grad_norm": 0.06281733512878418, |
|
"learning_rate": 4.579747375353763e-05, |
|
"loss": 0.0129, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 2.8839506172839506, |
|
"grad_norm": 0.061437204480171204, |
|
"learning_rate": 4.4401776194834613e-05, |
|
"loss": 0.0106, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 2.9234567901234567, |
|
"grad_norm": 0.05753050744533539, |
|
"learning_rate": 4.3010483023225045e-05, |
|
"loss": 0.0183, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 2.962962962962963, |
|
"grad_norm": 0.07456520944833755, |
|
"learning_rate": 4.162468883476319e-05, |
|
"loss": 0.0162, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 3.0024691358024693, |
|
"grad_norm": 0.09577728062868118, |
|
"learning_rate": 4.0245483899193595e-05, |
|
"loss": 0.0246, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 3.0419753086419754, |
|
"grad_norm": 0.05529274418950081, |
|
"learning_rate": 3.887395330218429e-05, |
|
"loss": 0.0119, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 3.0814814814814815, |
|
"grad_norm": 0.039042335003614426, |
|
"learning_rate": 3.7511176091638653e-05, |
|
"loss": 0.0137, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 3.1209876543209876, |
|
"grad_norm": 0.04296581447124481, |
|
"learning_rate": 3.6158224428757535e-05, |
|
"loss": 0.0109, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 3.1604938271604937, |
|
"grad_norm": 0.06321784853935242, |
|
"learning_rate": 3.4816162744519263e-05, |
|
"loss": 0.0152, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 3.1604938271604937, |
|
"eval_loss": 0.016468888148665428, |
|
"eval_runtime": 29.5492, |
|
"eval_samples_per_second": 1.523, |
|
"eval_steps_per_second": 1.523, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 3.2, |
|
"grad_norm": 0.05038125813007355, |
|
"learning_rate": 3.3486046902241664e-05, |
|
"loss": 0.0128, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 3.2395061728395063, |
|
"grad_norm": 0.046389661729335785, |
|
"learning_rate": 3.216892336688435e-05, |
|
"loss": 0.0128, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 3.2790123456790123, |
|
"grad_norm": 0.06602297723293304, |
|
"learning_rate": 3.086582838174551e-05, |
|
"loss": 0.0116, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 3.3185185185185184, |
|
"grad_norm": 0.047070205211639404, |
|
"learning_rate": 2.9577787153200197e-05, |
|
"loss": 0.0111, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 3.3580246913580245, |
|
"grad_norm": 0.04912222921848297, |
|
"learning_rate": 2.8305813044122097e-05, |
|
"loss": 0.0106, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 3.397530864197531, |
|
"grad_norm": 0.04519559070467949, |
|
"learning_rate": 2.705090677662311e-05, |
|
"loss": 0.0141, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 3.437037037037037, |
|
"grad_norm": 0.05045366659760475, |
|
"learning_rate": 2.581405564473801e-05, |
|
"loss": 0.0096, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 3.476543209876543, |
|
"grad_norm": 0.070041723549366, |
|
"learning_rate": 2.459623273767354e-05, |
|
"loss": 0.0125, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 3.5160493827160493, |
|
"grad_norm": 0.05587673932313919, |
|
"learning_rate": 2.3398396174233178e-05, |
|
"loss": 0.0107, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 3.5555555555555554, |
|
"grad_norm": 0.05657823756337166, |
|
"learning_rate": 2.2221488349019903e-05, |
|
"loss": 0.014, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 3.5555555555555554, |
|
"eval_loss": 0.017023487016558647, |
|
"eval_runtime": 29.5202, |
|
"eval_samples_per_second": 1.524, |
|
"eval_steps_per_second": 1.524, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 3.595061728395062, |
|
"grad_norm": 0.0667024478316307, |
|
"learning_rate": 2.1066435191009715e-05, |
|
"loss": 0.0143, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 3.634567901234568, |
|
"grad_norm": 0.04744872823357582, |
|
"learning_rate": 1.9934145435079702e-05, |
|
"loss": 0.0118, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 3.674074074074074, |
|
"grad_norm": 0.07030303031206131, |
|
"learning_rate": 1.8825509907063327e-05, |
|
"loss": 0.0121, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 3.71358024691358, |
|
"grad_norm": 0.05097898840904236, |
|
"learning_rate": 1.774140082289563e-05, |
|
"loss": 0.0141, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 3.753086419753086, |
|
"grad_norm": 0.06667334586381912, |
|
"learning_rate": 1.6682671102399805e-05, |
|
"loss": 0.0151, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 3.7925925925925927, |
|
"grad_norm": 0.06494573503732681, |
|
"learning_rate": 1.5650153698254916e-05, |
|
"loss": 0.0108, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 3.832098765432099, |
|
"grad_norm": 0.0436541885137558, |
|
"learning_rate": 1.4644660940672627e-05, |
|
"loss": 0.0108, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 3.871604938271605, |
|
"grad_norm": 0.06152384355664253, |
|
"learning_rate": 1.3666983898298657e-05, |
|
"loss": 0.0125, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 3.911111111111111, |
|
"grad_norm": 0.05768159404397011, |
|
"learning_rate": 1.2717891755841722e-05, |
|
"loss": 0.0126, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 3.950617283950617, |
|
"grad_norm": 0.05780719220638275, |
|
"learning_rate": 1.1798131208919627e-05, |
|
"loss": 0.0106, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 3.950617283950617, |
|
"eval_loss": 0.016363559290766716, |
|
"eval_runtime": 29.4826, |
|
"eval_samples_per_second": 1.526, |
|
"eval_steps_per_second": 1.526, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 3.9901234567901236, |
|
"grad_norm": 0.061449840664863586, |
|
"learning_rate": 1.090842587659851e-05, |
|
"loss": 0.0151, |
|
"step": 101 |
|
}, |
|
{ |
|
"epoch": 4.029629629629629, |
|
"grad_norm": 0.08285302668809891, |
|
"learning_rate": 1.004947573208756e-05, |
|
"loss": 0.019, |
|
"step": 102 |
|
}, |
|
{ |
|
"epoch": 4.069135802469136, |
|
"grad_norm": 0.039812907576560974, |
|
"learning_rate": 9.221956552036992e-06, |
|
"loss": 0.0105, |
|
"step": 103 |
|
}, |
|
{ |
|
"epoch": 4.108641975308642, |
|
"grad_norm": 0.04446830600500107, |
|
"learning_rate": 8.426519384872733e-06, |
|
"loss": 0.0095, |
|
"step": 104 |
|
}, |
|
{ |
|
"epoch": 4.148148148148148, |
|
"grad_norm": 0.04810008034110069, |
|
"learning_rate": 7.663790038585793e-06, |
|
"loss": 0.0139, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 4.187654320987654, |
|
"grad_norm": 0.04866350069642067, |
|
"learning_rate": 6.934368588379553e-06, |
|
"loss": 0.0097, |
|
"step": 106 |
|
}, |
|
{ |
|
"epoch": 4.22716049382716, |
|
"grad_norm": 0.04433601349592209, |
|
"learning_rate": 6.238828904562316e-06, |
|
"loss": 0.0102, |
|
"step": 107 |
|
}, |
|
{ |
|
"epoch": 4.266666666666667, |
|
"grad_norm": 0.04722043126821518, |
|
"learning_rate": 5.577718201056392e-06, |
|
"loss": 0.015, |
|
"step": 108 |
|
}, |
|
{ |
|
"epoch": 4.306172839506173, |
|
"grad_norm": 0.0726417750120163, |
|
"learning_rate": 4.951556604879048e-06, |
|
"loss": 0.015, |
|
"step": 109 |
|
}, |
|
{ |
|
"epoch": 4.345679012345679, |
|
"grad_norm": 0.0651179701089859, |
|
"learning_rate": 4.360836746934055e-06, |
|
"loss": 0.0133, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 4.345679012345679, |
|
"eval_loss": 0.01652962900698185, |
|
"eval_runtime": 29.4288, |
|
"eval_samples_per_second": 1.529, |
|
"eval_steps_per_second": 1.529, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 4.385185185185185, |
|
"grad_norm": 0.046997252851724625, |
|
"learning_rate": 3.8060233744356633e-06, |
|
"loss": 0.0082, |
|
"step": 111 |
|
}, |
|
{ |
|
"epoch": 4.424691358024692, |
|
"grad_norm": 0.05112281069159508, |
|
"learning_rate": 3.2875529852700147e-06, |
|
"loss": 0.0084, |
|
"step": 112 |
|
}, |
|
{ |
|
"epoch": 4.4641975308641975, |
|
"grad_norm": 0.05031479522585869, |
|
"learning_rate": 2.8058334845816213e-06, |
|
"loss": 0.0122, |
|
"step": 113 |
|
}, |
|
{ |
|
"epoch": 4.503703703703704, |
|
"grad_norm": 0.05058969557285309, |
|
"learning_rate": 2.361243863855184e-06, |
|
"loss": 0.0142, |
|
"step": 114 |
|
}, |
|
{ |
|
"epoch": 4.54320987654321, |
|
"grad_norm": 0.047341667115688324, |
|
"learning_rate": 1.9541339027450256e-06, |
|
"loss": 0.0112, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 4.582716049382716, |
|
"grad_norm": 0.03792574256658554, |
|
"learning_rate": 1.584823893886933e-06, |
|
"loss": 0.01, |
|
"step": 116 |
|
}, |
|
{ |
|
"epoch": 4.622222222222222, |
|
"grad_norm": 0.04679827392101288, |
|
"learning_rate": 1.2536043909088191e-06, |
|
"loss": 0.0119, |
|
"step": 117 |
|
}, |
|
{ |
|
"epoch": 4.661728395061728, |
|
"grad_norm": 0.042794372886419296, |
|
"learning_rate": 9.607359798384785e-07, |
|
"loss": 0.0095, |
|
"step": 118 |
|
}, |
|
{ |
|
"epoch": 4.701234567901235, |
|
"grad_norm": 0.05135497450828552, |
|
"learning_rate": 7.064490740882057e-07, |
|
"loss": 0.0134, |
|
"step": 119 |
|
}, |
|
{ |
|
"epoch": 4.7407407407407405, |
|
"grad_norm": 0.04792887717485428, |
|
"learning_rate": 4.909437331777179e-07, |
|
"loss": 0.013, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 4.7407407407407405, |
|
"eval_loss": 0.01643712818622589, |
|
"eval_runtime": 29.3583, |
|
"eval_samples_per_second": 1.533, |
|
"eval_steps_per_second": 1.533, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 4.780246913580247, |
|
"grad_norm": 0.054526232182979584, |
|
"learning_rate": 3.143895053378698e-07, |
|
"loss": 0.01, |
|
"step": 121 |
|
}, |
|
{ |
|
"epoch": 4.8197530864197535, |
|
"grad_norm": 0.043358638882637024, |
|
"learning_rate": 1.7692529411904578e-07, |
|
"loss": 0.0108, |
|
"step": 122 |
|
}, |
|
{ |
|
"epoch": 4.859259259259259, |
|
"grad_norm": 0.05204227939248085, |
|
"learning_rate": 7.865924910916977e-08, |
|
"loss": 0.0111, |
|
"step": 123 |
|
}, |
|
{ |
|
"epoch": 4.898765432098766, |
|
"grad_norm": 0.040465980768203735, |
|
"learning_rate": 1.9668680847356735e-08, |
|
"loss": 0.0091, |
|
"step": 124 |
|
}, |
|
{ |
|
"epoch": 4.938271604938271, |
|
"grad_norm": 0.048052042722702026, |
|
"learning_rate": 0.0, |
|
"loss": 0.0129, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 4.938271604938271, |
|
"step": 125, |
|
"total_flos": 1.9234926316191744e+17, |
|
"train_loss": 0.026285644583404065, |
|
"train_runtime": 5836.1157, |
|
"train_samples_per_second": 0.347, |
|
"train_steps_per_second": 0.021 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 125, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 5, |
|
"save_steps": 10, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.9234926316191744e+17, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|