|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.9956331877729258, |
|
"eval_steps": 500, |
|
"global_step": 57, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.017467248908296942, |
|
"grad_norm": 0.12968653440475464, |
|
"learning_rate": 1.0000000000000002e-06, |
|
"loss": 1.3378, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.034934497816593885, |
|
"grad_norm": 0.1782296746969223, |
|
"learning_rate": 2.0000000000000003e-06, |
|
"loss": 1.2777, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.05240174672489083, |
|
"grad_norm": 0.1216743066906929, |
|
"learning_rate": 3e-06, |
|
"loss": 1.2269, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.06986899563318777, |
|
"grad_norm": 0.13824662566184998, |
|
"learning_rate": 4.000000000000001e-06, |
|
"loss": 1.2621, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.08733624454148471, |
|
"grad_norm": 0.15446175634860992, |
|
"learning_rate": 5e-06, |
|
"loss": 1.2518, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.10480349344978165, |
|
"grad_norm": 0.13843178749084473, |
|
"learning_rate": 6e-06, |
|
"loss": 1.2438, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.1222707423580786, |
|
"grad_norm": 0.15130339562892914, |
|
"learning_rate": 7e-06, |
|
"loss": 1.2331, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.13973799126637554, |
|
"grad_norm": 0.15838982164859772, |
|
"learning_rate": 8.000000000000001e-06, |
|
"loss": 1.2703, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.1572052401746725, |
|
"grad_norm": 0.15755866467952728, |
|
"learning_rate": 9e-06, |
|
"loss": 1.2603, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.17467248908296942, |
|
"grad_norm": 0.16667789220809937, |
|
"learning_rate": 1e-05, |
|
"loss": 1.2428, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.19213973799126638, |
|
"grad_norm": 0.1794327050447464, |
|
"learning_rate": 9.988834393115768e-06, |
|
"loss": 1.2442, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.2096069868995633, |
|
"grad_norm": 0.18840420246124268, |
|
"learning_rate": 9.955387440773902e-06, |
|
"loss": 1.2562, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.22707423580786026, |
|
"grad_norm": 0.24771647155284882, |
|
"learning_rate": 9.899808525182935e-06, |
|
"loss": 1.2056, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.2445414847161572, |
|
"grad_norm": 0.2212584763765335, |
|
"learning_rate": 9.822345875271884e-06, |
|
"loss": 1.2462, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.26200873362445415, |
|
"grad_norm": 0.2274945080280304, |
|
"learning_rate": 9.723345458039595e-06, |
|
"loss": 1.262, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.2794759825327511, |
|
"grad_norm": 0.22981807589530945, |
|
"learning_rate": 9.603249433382145e-06, |
|
"loss": 1.2918, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.29694323144104806, |
|
"grad_norm": 0.27434590458869934, |
|
"learning_rate": 9.462594179299408e-06, |
|
"loss": 1.2858, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.314410480349345, |
|
"grad_norm": 0.23107963800430298, |
|
"learning_rate": 9.302007896300697e-06, |
|
"loss": 1.1679, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.3318777292576419, |
|
"grad_norm": 0.23975740373134613, |
|
"learning_rate": 9.122207801708802e-06, |
|
"loss": 1.2119, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.34934497816593885, |
|
"grad_norm": 0.21921472251415253, |
|
"learning_rate": 8.923996926393306e-06, |
|
"loss": 1.1735, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.36681222707423583, |
|
"grad_norm": 0.22271591424942017, |
|
"learning_rate": 8.708260528239788e-06, |
|
"loss": 1.2749, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.38427947598253276, |
|
"grad_norm": 0.20965439081192017, |
|
"learning_rate": 8.475962138373212e-06, |
|
"loss": 1.25, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.4017467248908297, |
|
"grad_norm": 0.17370395362377167, |
|
"learning_rate": 8.228139257794012e-06, |
|
"loss": 1.1968, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.4192139737991266, |
|
"grad_norm": 0.19179144501686096, |
|
"learning_rate": 7.965898723646777e-06, |
|
"loss": 1.169, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.4366812227074236, |
|
"grad_norm": 0.1869051307439804, |
|
"learning_rate": 7.690411765816864e-06, |
|
"loss": 1.199, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.45414847161572053, |
|
"grad_norm": 0.17581412196159363, |
|
"learning_rate": 7.402908775933419e-06, |
|
"loss": 1.126, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.47161572052401746, |
|
"grad_norm": 0.1623380184173584, |
|
"learning_rate": 7.104673812141676e-06, |
|
"loss": 1.1392, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.4890829694323144, |
|
"grad_norm": 0.19948747754096985, |
|
"learning_rate": 6.797038864187564e-06, |
|
"loss": 1.0956, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.5065502183406113, |
|
"grad_norm": 0.1480165272951126, |
|
"learning_rate": 6.481377904428171e-06, |
|
"loss": 1.1984, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.5240174672489083, |
|
"grad_norm": 0.15460729598999023, |
|
"learning_rate": 6.1591007513376425e-06, |
|
"loss": 1.1759, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.5414847161572053, |
|
"grad_norm": 0.13603241741657257, |
|
"learning_rate": 5.831646772915651e-06, |
|
"loss": 1.182, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.5589519650655022, |
|
"grad_norm": 0.15064558386802673, |
|
"learning_rate": 5.500478458120493e-06, |
|
"loss": 1.1191, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.5764192139737991, |
|
"grad_norm": 0.1394384205341339, |
|
"learning_rate": 5.1670748850383734e-06, |
|
"loss": 1.1359, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.5938864628820961, |
|
"grad_norm": 0.12493956089019775, |
|
"learning_rate": 4.832925114961629e-06, |
|
"loss": 1.1747, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.611353711790393, |
|
"grad_norm": 0.14052054286003113, |
|
"learning_rate": 4.499521541879508e-06, |
|
"loss": 1.1116, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.62882096069869, |
|
"grad_norm": 0.1254061758518219, |
|
"learning_rate": 4.1683532270843505e-06, |
|
"loss": 1.1836, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.6462882096069869, |
|
"grad_norm": 0.1256641447544098, |
|
"learning_rate": 3.840899248662358e-06, |
|
"loss": 1.1166, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.6637554585152838, |
|
"grad_norm": 0.22417481243610382, |
|
"learning_rate": 3.518622095571831e-06, |
|
"loss": 1.1714, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.6812227074235808, |
|
"grad_norm": 0.11602552980184555, |
|
"learning_rate": 3.202961135812437e-06, |
|
"loss": 1.1894, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.6986899563318777, |
|
"grad_norm": 0.19411428272724152, |
|
"learning_rate": 2.8953261878583263e-06, |
|
"loss": 1.1708, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.7161572052401747, |
|
"grad_norm": 0.13553348183631897, |
|
"learning_rate": 2.5970912240665815e-06, |
|
"loss": 1.0575, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.7336244541484717, |
|
"grad_norm": 0.10409089922904968, |
|
"learning_rate": 2.309588234183137e-06, |
|
"loss": 1.1015, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.7510917030567685, |
|
"grad_norm": 0.11063861846923828, |
|
"learning_rate": 2.0341012763532243e-06, |
|
"loss": 1.1157, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.7685589519650655, |
|
"grad_norm": 0.11219683289527893, |
|
"learning_rate": 1.771860742205988e-06, |
|
"loss": 1.182, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.7860262008733624, |
|
"grad_norm": 0.11221789568662643, |
|
"learning_rate": 1.5240378616267887e-06, |
|
"loss": 1.0891, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.8034934497816594, |
|
"grad_norm": 0.11372566968202591, |
|
"learning_rate": 1.2917394717602123e-06, |
|
"loss": 1.141, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.8209606986899564, |
|
"grad_norm": 0.10632464289665222, |
|
"learning_rate": 1.0760030736066952e-06, |
|
"loss": 1.0804, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.8384279475982532, |
|
"grad_norm": 0.11523567885160446, |
|
"learning_rate": 8.777921982911996e-07, |
|
"loss": 1.0865, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.8558951965065502, |
|
"grad_norm": 0.13787008821964264, |
|
"learning_rate": 6.979921036993042e-07, |
|
"loss": 1.1934, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.8733624454148472, |
|
"grad_norm": 0.11613141745328903, |
|
"learning_rate": 5.374058207005945e-07, |
|
"loss": 1.1211, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.8908296943231441, |
|
"grad_norm": 0.11686104536056519, |
|
"learning_rate": 3.9675056661785563e-07, |
|
"loss": 1.1523, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.9082969432314411, |
|
"grad_norm": 0.11762232333421707, |
|
"learning_rate": 2.7665454196040665e-07, |
|
"loss": 1.132, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.925764192139738, |
|
"grad_norm": 0.12294066697359085, |
|
"learning_rate": 1.776541247281177e-07, |
|
"loss": 1.1347, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.9432314410480349, |
|
"grad_norm": 0.13556340336799622, |
|
"learning_rate": 1.0019147481706626e-07, |
|
"loss": 1.1571, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.9606986899563319, |
|
"grad_norm": 0.11323779076337814, |
|
"learning_rate": 4.461255922609986e-08, |
|
"loss": 1.1618, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.9781659388646288, |
|
"grad_norm": 0.1132877767086029, |
|
"learning_rate": 1.1165606884234182e-08, |
|
"loss": 1.1674, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.9956331877729258, |
|
"grad_norm": 0.10288415104150772, |
|
"learning_rate": 0.0, |
|
"loss": 1.1306, |
|
"step": 57 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 57, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 19, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 5.90947182212599e+18, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|