|
{ |
|
"best_metric": 0.06251987814903259, |
|
"best_model_checkpoint": "saves/psy-course-doc/Llama-3.1-8B-Instruct/train/fold9/checkpoint-100", |
|
"epoch": 4.938271604938271, |
|
"eval_steps": 10, |
|
"global_step": 125, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.03950617283950617, |
|
"grad_norm": 0.16012704372406006, |
|
"learning_rate": 7.692307692307694e-06, |
|
"loss": 0.2188, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.07901234567901234, |
|
"grad_norm": 0.1626744419336319, |
|
"learning_rate": 1.5384615384615387e-05, |
|
"loss": 0.2227, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.11851851851851852, |
|
"grad_norm": 0.18549920618534088, |
|
"learning_rate": 2.307692307692308e-05, |
|
"loss": 0.1686, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.1580246913580247, |
|
"grad_norm": 0.17375493049621582, |
|
"learning_rate": 3.0769230769230774e-05, |
|
"loss": 0.2938, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.19753086419753085, |
|
"grad_norm": 0.17902612686157227, |
|
"learning_rate": 3.846153846153846e-05, |
|
"loss": 0.2091, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.23703703703703705, |
|
"grad_norm": 0.18505369126796722, |
|
"learning_rate": 4.615384615384616e-05, |
|
"loss": 0.1754, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.2765432098765432, |
|
"grad_norm": 0.19533436000347137, |
|
"learning_rate": 5.384615384615385e-05, |
|
"loss": 0.1805, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.3160493827160494, |
|
"grad_norm": 0.18160583078861237, |
|
"learning_rate": 6.153846153846155e-05, |
|
"loss": 0.2426, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.35555555555555557, |
|
"grad_norm": 0.16058214008808136, |
|
"learning_rate": 6.923076923076924e-05, |
|
"loss": 0.1368, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.3950617283950617, |
|
"grad_norm": 0.14139407873153687, |
|
"learning_rate": 7.692307692307693e-05, |
|
"loss": 0.2259, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.3950617283950617, |
|
"eval_loss": 0.14175167679786682, |
|
"eval_runtime": 22.9155, |
|
"eval_samples_per_second": 1.964, |
|
"eval_steps_per_second": 1.964, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.4345679012345679, |
|
"grad_norm": 0.15317194163799286, |
|
"learning_rate": 8.461538461538461e-05, |
|
"loss": 0.1597, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.4740740740740741, |
|
"grad_norm": 0.17080363631248474, |
|
"learning_rate": 9.230769230769232e-05, |
|
"loss": 0.1835, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.5135802469135803, |
|
"grad_norm": 0.16679717600345612, |
|
"learning_rate": 0.0001, |
|
"loss": 0.139, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.5530864197530864, |
|
"grad_norm": 0.16384510695934296, |
|
"learning_rate": 9.998033131915266e-05, |
|
"loss": 0.1513, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.5925925925925926, |
|
"grad_norm": 0.1557767689228058, |
|
"learning_rate": 9.992134075089084e-05, |
|
"loss": 0.1007, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.6320987654320988, |
|
"grad_norm": 0.14455045759677887, |
|
"learning_rate": 9.982307470588098e-05, |
|
"loss": 0.1663, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.671604938271605, |
|
"grad_norm": 0.1448976993560791, |
|
"learning_rate": 9.968561049466214e-05, |
|
"loss": 0.1246, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.7111111111111111, |
|
"grad_norm": 0.12634430825710297, |
|
"learning_rate": 9.950905626682228e-05, |
|
"loss": 0.0858, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.7506172839506173, |
|
"grad_norm": 0.11505317687988281, |
|
"learning_rate": 9.92935509259118e-05, |
|
"loss": 0.1494, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.7901234567901234, |
|
"grad_norm": 0.10868946462869644, |
|
"learning_rate": 9.903926402016153e-05, |
|
"loss": 0.172, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.7901234567901234, |
|
"eval_loss": 0.08925747126340866, |
|
"eval_runtime": 22.8471, |
|
"eval_samples_per_second": 1.97, |
|
"eval_steps_per_second": 1.97, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.8296296296296296, |
|
"grad_norm": 0.09257800132036209, |
|
"learning_rate": 9.874639560909117e-05, |
|
"loss": 0.0908, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.8691358024691358, |
|
"grad_norm": 0.09200786799192429, |
|
"learning_rate": 9.841517610611309e-05, |
|
"loss": 0.0843, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.908641975308642, |
|
"grad_norm": 0.09939802438020706, |
|
"learning_rate": 9.804586609725499e-05, |
|
"loss": 0.1106, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.9481481481481482, |
|
"grad_norm": 0.10130990296602249, |
|
"learning_rate": 9.763875613614482e-05, |
|
"loss": 0.1202, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.9876543209876543, |
|
"grad_norm": 0.09145660698413849, |
|
"learning_rate": 9.719416651541839e-05, |
|
"loss": 0.1106, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 1.0271604938271606, |
|
"grad_norm": 0.20054078102111816, |
|
"learning_rate": 9.671244701472999e-05, |
|
"loss": 0.1968, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 1.0666666666666667, |
|
"grad_norm": 0.10766153782606125, |
|
"learning_rate": 9.619397662556435e-05, |
|
"loss": 0.102, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 1.106172839506173, |
|
"grad_norm": 0.10508635640144348, |
|
"learning_rate": 9.563916325306594e-05, |
|
"loss": 0.1394, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 1.145679012345679, |
|
"grad_norm": 0.10992046445608139, |
|
"learning_rate": 9.504844339512095e-05, |
|
"loss": 0.1035, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 1.1851851851851851, |
|
"grad_norm": 0.09355330467224121, |
|
"learning_rate": 9.442228179894362e-05, |
|
"loss": 0.0791, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 1.1851851851851851, |
|
"eval_loss": 0.07540746033191681, |
|
"eval_runtime": 22.8428, |
|
"eval_samples_per_second": 1.97, |
|
"eval_steps_per_second": 1.97, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 1.2246913580246914, |
|
"grad_norm": 0.11442509293556213, |
|
"learning_rate": 9.376117109543769e-05, |
|
"loss": 0.1317, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 1.2641975308641975, |
|
"grad_norm": 0.11588981002569199, |
|
"learning_rate": 9.306563141162046e-05, |
|
"loss": 0.124, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 1.3037037037037038, |
|
"grad_norm": 0.10404524207115173, |
|
"learning_rate": 9.233620996141421e-05, |
|
"loss": 0.1212, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 1.34320987654321, |
|
"grad_norm": 0.09653621166944504, |
|
"learning_rate": 9.157348061512727e-05, |
|
"loss": 0.1008, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 1.382716049382716, |
|
"grad_norm": 0.07260455191135406, |
|
"learning_rate": 9.077804344796302e-05, |
|
"loss": 0.0485, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 1.4222222222222223, |
|
"grad_norm": 0.09970170259475708, |
|
"learning_rate": 8.995052426791247e-05, |
|
"loss": 0.1211, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 1.4617283950617284, |
|
"grad_norm": 0.07373712211847305, |
|
"learning_rate": 8.90915741234015e-05, |
|
"loss": 0.0547, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 1.5012345679012347, |
|
"grad_norm": 0.0925954282283783, |
|
"learning_rate": 8.820186879108038e-05, |
|
"loss": 0.1459, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 1.5407407407407407, |
|
"grad_norm": 0.09566950052976608, |
|
"learning_rate": 8.728210824415827e-05, |
|
"loss": 0.122, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 1.5802469135802468, |
|
"grad_norm": 0.08995156735181808, |
|
"learning_rate": 8.633301610170135e-05, |
|
"loss": 0.0414, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 1.5802469135802468, |
|
"eval_loss": 0.06949092447757721, |
|
"eval_runtime": 22.9091, |
|
"eval_samples_per_second": 1.964, |
|
"eval_steps_per_second": 1.964, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 1.6197530864197531, |
|
"grad_norm": 0.07674718648195267, |
|
"learning_rate": 8.535533905932738e-05, |
|
"loss": 0.0266, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 1.6592592592592592, |
|
"grad_norm": 0.08287996053695679, |
|
"learning_rate": 8.434984630174509e-05, |
|
"loss": 0.0589, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 1.6987654320987655, |
|
"grad_norm": 0.0897073820233345, |
|
"learning_rate": 8.33173288976002e-05, |
|
"loss": 0.0993, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 1.7382716049382716, |
|
"grad_norm": 0.10945425927639008, |
|
"learning_rate": 8.225859917710439e-05, |
|
"loss": 0.1523, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 1.7777777777777777, |
|
"grad_norm": 0.07625408470630646, |
|
"learning_rate": 8.117449009293668e-05, |
|
"loss": 0.0327, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 1.817283950617284, |
|
"grad_norm": 0.08506359905004501, |
|
"learning_rate": 8.006585456492029e-05, |
|
"loss": 0.0575, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 1.8567901234567903, |
|
"grad_norm": 0.08949103206396103, |
|
"learning_rate": 7.89335648089903e-05, |
|
"loss": 0.1164, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 1.8962962962962964, |
|
"grad_norm": 0.09872898459434509, |
|
"learning_rate": 7.777851165098012e-05, |
|
"loss": 0.1056, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 1.9358024691358025, |
|
"grad_norm": 0.07700581848621368, |
|
"learning_rate": 7.660160382576683e-05, |
|
"loss": 0.0655, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 1.9753086419753085, |
|
"grad_norm": 0.10579069703817368, |
|
"learning_rate": 7.540376726232648e-05, |
|
"loss": 0.0704, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 1.9753086419753085, |
|
"eval_loss": 0.06626862287521362, |
|
"eval_runtime": 22.8666, |
|
"eval_samples_per_second": 1.968, |
|
"eval_steps_per_second": 1.968, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 2.0148148148148146, |
|
"grad_norm": 0.16567502915859222, |
|
"learning_rate": 7.4185944355262e-05, |
|
"loss": 0.1483, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 2.054320987654321, |
|
"grad_norm": 0.07487107068300247, |
|
"learning_rate": 7.294909322337689e-05, |
|
"loss": 0.0686, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 2.093827160493827, |
|
"grad_norm": 0.0772494301199913, |
|
"learning_rate": 7.169418695587791e-05, |
|
"loss": 0.097, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 2.1333333333333333, |
|
"grad_norm": 0.08777953684329987, |
|
"learning_rate": 7.042221284679982e-05, |
|
"loss": 0.0962, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 2.1728395061728394, |
|
"grad_norm": 0.0702255591750145, |
|
"learning_rate": 6.91341716182545e-05, |
|
"loss": 0.0511, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 2.212345679012346, |
|
"grad_norm": 0.09427014738321304, |
|
"learning_rate": 6.783107663311565e-05, |
|
"loss": 0.1301, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 2.251851851851852, |
|
"grad_norm": 0.09751700609922409, |
|
"learning_rate": 6.651395309775837e-05, |
|
"loss": 0.0769, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 2.291358024691358, |
|
"grad_norm": 0.09331370890140533, |
|
"learning_rate": 6.518383725548074e-05, |
|
"loss": 0.0894, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 2.330864197530864, |
|
"grad_norm": 0.10984798520803452, |
|
"learning_rate": 6.384177557124247e-05, |
|
"loss": 0.0949, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 2.3703703703703702, |
|
"grad_norm": 0.08716970682144165, |
|
"learning_rate": 6.248882390836135e-05, |
|
"loss": 0.0784, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 2.3703703703703702, |
|
"eval_loss": 0.0646897703409195, |
|
"eval_runtime": 22.843, |
|
"eval_samples_per_second": 1.97, |
|
"eval_steps_per_second": 1.97, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 2.4098765432098768, |
|
"grad_norm": 0.08923397958278656, |
|
"learning_rate": 6.112604669781572e-05, |
|
"loss": 0.0378, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 2.449382716049383, |
|
"grad_norm": 0.09308641403913498, |
|
"learning_rate": 5.9754516100806423e-05, |
|
"loss": 0.0806, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 2.488888888888889, |
|
"grad_norm": 0.07544505596160889, |
|
"learning_rate": 5.837531116523682e-05, |
|
"loss": 0.0507, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 2.528395061728395, |
|
"grad_norm": 0.11075833439826965, |
|
"learning_rate": 5.698951697677498e-05, |
|
"loss": 0.0507, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 2.567901234567901, |
|
"grad_norm": 0.1257985383272171, |
|
"learning_rate": 5.559822380516539e-05, |
|
"loss": 0.0972, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 2.6074074074074076, |
|
"grad_norm": 0.0933673232793808, |
|
"learning_rate": 5.420252624646238e-05, |
|
"loss": 0.0677, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 2.6469135802469137, |
|
"grad_norm": 0.09619047492742538, |
|
"learning_rate": 5.2803522361859594e-05, |
|
"loss": 0.1156, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 2.68641975308642, |
|
"grad_norm": 0.11217456310987473, |
|
"learning_rate": 5.140231281379345e-05, |
|
"loss": 0.1447, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 2.725925925925926, |
|
"grad_norm": 0.11414963006973267, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0922, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 2.765432098765432, |
|
"grad_norm": 0.10665580630302429, |
|
"learning_rate": 4.859768718620656e-05, |
|
"loss": 0.1002, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 2.765432098765432, |
|
"eval_loss": 0.06409724801778793, |
|
"eval_runtime": 22.8363, |
|
"eval_samples_per_second": 1.971, |
|
"eval_steps_per_second": 1.971, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 2.8049382716049385, |
|
"grad_norm": 0.09320751577615738, |
|
"learning_rate": 4.7196477638140404e-05, |
|
"loss": 0.0748, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 2.8444444444444446, |
|
"grad_norm": 0.09927953779697418, |
|
"learning_rate": 4.579747375353763e-05, |
|
"loss": 0.1017, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 2.8839506172839506, |
|
"grad_norm": 0.1172712966799736, |
|
"learning_rate": 4.4401776194834613e-05, |
|
"loss": 0.1052, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 2.9234567901234567, |
|
"grad_norm": 0.0946318581700325, |
|
"learning_rate": 4.3010483023225045e-05, |
|
"loss": 0.0497, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 2.962962962962963, |
|
"grad_norm": 0.09094702452421188, |
|
"learning_rate": 4.162468883476319e-05, |
|
"loss": 0.0543, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 3.0024691358024693, |
|
"grad_norm": 0.16316764056682587, |
|
"learning_rate": 4.0245483899193595e-05, |
|
"loss": 0.1286, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 3.0419753086419754, |
|
"grad_norm": 0.07997765392065048, |
|
"learning_rate": 3.887395330218429e-05, |
|
"loss": 0.0485, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 3.0814814814814815, |
|
"grad_norm": 0.1100013479590416, |
|
"learning_rate": 3.7511176091638653e-05, |
|
"loss": 0.1499, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 3.1209876543209876, |
|
"grad_norm": 0.08684566617012024, |
|
"learning_rate": 3.6158224428757535e-05, |
|
"loss": 0.0454, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 3.1604938271604937, |
|
"grad_norm": 0.10784677416086197, |
|
"learning_rate": 3.4816162744519263e-05, |
|
"loss": 0.1164, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 3.1604938271604937, |
|
"eval_loss": 0.06294846534729004, |
|
"eval_runtime": 22.8349, |
|
"eval_samples_per_second": 1.971, |
|
"eval_steps_per_second": 1.971, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 3.2, |
|
"grad_norm": 0.08588241040706635, |
|
"learning_rate": 3.3486046902241664e-05, |
|
"loss": 0.0732, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 3.2395061728395063, |
|
"grad_norm": 0.0709327757358551, |
|
"learning_rate": 3.216892336688435e-05, |
|
"loss": 0.0297, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 3.2790123456790123, |
|
"grad_norm": 0.09673380851745605, |
|
"learning_rate": 3.086582838174551e-05, |
|
"loss": 0.1105, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 3.3185185185185184, |
|
"grad_norm": 0.09051958471536636, |
|
"learning_rate": 2.9577787153200197e-05, |
|
"loss": 0.0403, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 3.3580246913580245, |
|
"grad_norm": 0.09911269694566727, |
|
"learning_rate": 2.8305813044122097e-05, |
|
"loss": 0.0781, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 3.397530864197531, |
|
"grad_norm": 0.07569113373756409, |
|
"learning_rate": 2.705090677662311e-05, |
|
"loss": 0.0444, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 3.437037037037037, |
|
"grad_norm": 0.11394467204809189, |
|
"learning_rate": 2.581405564473801e-05, |
|
"loss": 0.0689, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 3.476543209876543, |
|
"grad_norm": 0.11975882947444916, |
|
"learning_rate": 2.459623273767354e-05, |
|
"loss": 0.156, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 3.5160493827160493, |
|
"grad_norm": 0.1024811640381813, |
|
"learning_rate": 2.3398396174233178e-05, |
|
"loss": 0.1163, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 3.5555555555555554, |
|
"grad_norm": 0.10779350996017456, |
|
"learning_rate": 2.2221488349019903e-05, |
|
"loss": 0.0822, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 3.5555555555555554, |
|
"eval_loss": 0.06290505826473236, |
|
"eval_runtime": 22.8598, |
|
"eval_samples_per_second": 1.969, |
|
"eval_steps_per_second": 1.969, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 3.595061728395062, |
|
"grad_norm": 0.13391198217868805, |
|
"learning_rate": 2.1066435191009715e-05, |
|
"loss": 0.1135, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 3.634567901234568, |
|
"grad_norm": 0.09439925849437714, |
|
"learning_rate": 1.9934145435079702e-05, |
|
"loss": 0.0635, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 3.674074074074074, |
|
"grad_norm": 0.07956387847661972, |
|
"learning_rate": 1.8825509907063327e-05, |
|
"loss": 0.0431, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 3.71358024691358, |
|
"grad_norm": 0.10458944737911224, |
|
"learning_rate": 1.774140082289563e-05, |
|
"loss": 0.0868, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 3.753086419753086, |
|
"grad_norm": 0.0939633771777153, |
|
"learning_rate": 1.6682671102399805e-05, |
|
"loss": 0.0522, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 3.7925925925925927, |
|
"grad_norm": 0.09950806945562363, |
|
"learning_rate": 1.5650153698254916e-05, |
|
"loss": 0.054, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 3.832098765432099, |
|
"grad_norm": 0.11619756370782852, |
|
"learning_rate": 1.4644660940672627e-05, |
|
"loss": 0.0962, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 3.871604938271605, |
|
"grad_norm": 0.10245565325021744, |
|
"learning_rate": 1.3666983898298657e-05, |
|
"loss": 0.0676, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 3.911111111111111, |
|
"grad_norm": 0.10659981518983841, |
|
"learning_rate": 1.2717891755841722e-05, |
|
"loss": 0.0655, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 3.950617283950617, |
|
"grad_norm": 0.09897475689649582, |
|
"learning_rate": 1.1798131208919627e-05, |
|
"loss": 0.0583, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 3.950617283950617, |
|
"eval_loss": 0.06251987814903259, |
|
"eval_runtime": 22.8549, |
|
"eval_samples_per_second": 1.969, |
|
"eval_steps_per_second": 1.969, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 3.9901234567901236, |
|
"grad_norm": 0.07174082100391388, |
|
"learning_rate": 1.090842587659851e-05, |
|
"loss": 0.025, |
|
"step": 101 |
|
}, |
|
{ |
|
"epoch": 4.029629629629629, |
|
"grad_norm": 0.14929655194282532, |
|
"learning_rate": 1.004947573208756e-05, |
|
"loss": 0.0668, |
|
"step": 102 |
|
}, |
|
{ |
|
"epoch": 4.069135802469136, |
|
"grad_norm": 0.10811996459960938, |
|
"learning_rate": 9.221956552036992e-06, |
|
"loss": 0.0871, |
|
"step": 103 |
|
}, |
|
{ |
|
"epoch": 4.108641975308642, |
|
"grad_norm": 0.08908949047327042, |
|
"learning_rate": 8.426519384872733e-06, |
|
"loss": 0.0341, |
|
"step": 104 |
|
}, |
|
{ |
|
"epoch": 4.148148148148148, |
|
"grad_norm": 0.1073828712105751, |
|
"learning_rate": 7.663790038585793e-06, |
|
"loss": 0.0994, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 4.187654320987654, |
|
"grad_norm": 0.0953696072101593, |
|
"learning_rate": 6.934368588379553e-06, |
|
"loss": 0.0749, |
|
"step": 106 |
|
}, |
|
{ |
|
"epoch": 4.22716049382716, |
|
"grad_norm": 0.09153662621974945, |
|
"learning_rate": 6.238828904562316e-06, |
|
"loss": 0.0479, |
|
"step": 107 |
|
}, |
|
{ |
|
"epoch": 4.266666666666667, |
|
"grad_norm": 0.0921344980597496, |
|
"learning_rate": 5.577718201056392e-06, |
|
"loss": 0.0713, |
|
"step": 108 |
|
}, |
|
{ |
|
"epoch": 4.306172839506173, |
|
"grad_norm": 0.10496306419372559, |
|
"learning_rate": 4.951556604879048e-06, |
|
"loss": 0.0785, |
|
"step": 109 |
|
}, |
|
{ |
|
"epoch": 4.345679012345679, |
|
"grad_norm": 0.07941649854183197, |
|
"learning_rate": 4.360836746934055e-06, |
|
"loss": 0.0488, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 4.345679012345679, |
|
"eval_loss": 0.06252405047416687, |
|
"eval_runtime": 22.8465, |
|
"eval_samples_per_second": 1.97, |
|
"eval_steps_per_second": 1.97, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 4.385185185185185, |
|
"grad_norm": 0.09306307137012482, |
|
"learning_rate": 3.8060233744356633e-06, |
|
"loss": 0.0711, |
|
"step": 111 |
|
}, |
|
{ |
|
"epoch": 4.424691358024692, |
|
"grad_norm": 0.10954549908638, |
|
"learning_rate": 3.2875529852700147e-06, |
|
"loss": 0.0794, |
|
"step": 112 |
|
}, |
|
{ |
|
"epoch": 4.4641975308641975, |
|
"grad_norm": 0.10445868223905563, |
|
"learning_rate": 2.8058334845816213e-06, |
|
"loss": 0.0809, |
|
"step": 113 |
|
}, |
|
{ |
|
"epoch": 4.503703703703704, |
|
"grad_norm": 0.09497830271720886, |
|
"learning_rate": 2.361243863855184e-06, |
|
"loss": 0.052, |
|
"step": 114 |
|
}, |
|
{ |
|
"epoch": 4.54320987654321, |
|
"grad_norm": 0.09065257012844086, |
|
"learning_rate": 1.9541339027450256e-06, |
|
"loss": 0.0602, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 4.582716049382716, |
|
"grad_norm": 0.09563349932432175, |
|
"learning_rate": 1.584823893886933e-06, |
|
"loss": 0.0427, |
|
"step": 116 |
|
}, |
|
{ |
|
"epoch": 4.622222222222222, |
|
"grad_norm": 0.08353404700756073, |
|
"learning_rate": 1.2536043909088191e-06, |
|
"loss": 0.0449, |
|
"step": 117 |
|
}, |
|
{ |
|
"epoch": 4.661728395061728, |
|
"grad_norm": 0.10227520763874054, |
|
"learning_rate": 9.607359798384785e-07, |
|
"loss": 0.0745, |
|
"step": 118 |
|
}, |
|
{ |
|
"epoch": 4.701234567901235, |
|
"grad_norm": 0.10729608684778214, |
|
"learning_rate": 7.064490740882057e-07, |
|
"loss": 0.0839, |
|
"step": 119 |
|
}, |
|
{ |
|
"epoch": 4.7407407407407405, |
|
"grad_norm": 0.10004584491252899, |
|
"learning_rate": 4.909437331777179e-07, |
|
"loss": 0.0822, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 4.7407407407407405, |
|
"eval_loss": 0.0625731498003006, |
|
"eval_runtime": 22.8373, |
|
"eval_samples_per_second": 1.97, |
|
"eval_steps_per_second": 1.97, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 4.780246913580247, |
|
"grad_norm": 0.11356940865516663, |
|
"learning_rate": 3.143895053378698e-07, |
|
"loss": 0.12, |
|
"step": 121 |
|
}, |
|
{ |
|
"epoch": 4.8197530864197535, |
|
"grad_norm": 0.09182242304086685, |
|
"learning_rate": 1.7692529411904578e-07, |
|
"loss": 0.0783, |
|
"step": 122 |
|
}, |
|
{ |
|
"epoch": 4.859259259259259, |
|
"grad_norm": 0.09101668745279312, |
|
"learning_rate": 7.865924910916977e-08, |
|
"loss": 0.08, |
|
"step": 123 |
|
}, |
|
{ |
|
"epoch": 4.898765432098766, |
|
"grad_norm": 0.09921537339687347, |
|
"learning_rate": 1.9668680847356735e-08, |
|
"loss": 0.0667, |
|
"step": 124 |
|
}, |
|
{ |
|
"epoch": 4.938271604938271, |
|
"grad_norm": 0.09743266552686691, |
|
"learning_rate": 0.0, |
|
"loss": 0.0659, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 4.938271604938271, |
|
"step": 125, |
|
"total_flos": 1.4942709034234675e+17, |
|
"train_loss": 0.09839795534312724, |
|
"train_runtime": 3683.0658, |
|
"train_samples_per_second": 0.55, |
|
"train_steps_per_second": 0.034 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 125, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 5, |
|
"save_steps": 10, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.4942709034234675e+17, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|