|
{ |
|
"best_metric": 1.6226980686187744, |
|
"best_model_checkpoint": "/data/user_data/gonilude/cpp_and_text_pythia_410m/checkpoint-200", |
|
"epoch": 3.0, |
|
"eval_steps": 50, |
|
"global_step": 237, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0, |
|
"eval_accuracy": 0.3, |
|
"eval_loss": 2.5906739234924316, |
|
"eval_runtime": 0.536, |
|
"eval_samples_per_second": 130.596, |
|
"eval_steps_per_second": 16.791, |
|
"num_input_tokens_seen": 0, |
|
"step": 0 |
|
}, |
|
{ |
|
"epoch": 0.012658227848101266, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.0, |
|
"loss": 2.2691, |
|
"num_input_tokens_seen": 8192, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.06329113924050633, |
|
"grad_norm": Infinity, |
|
"learning_rate": 0.0, |
|
"loss": 2.9877, |
|
"num_input_tokens_seen": 40960, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.12658227848101267, |
|
"grad_norm": Infinity, |
|
"learning_rate": 1e-05, |
|
"loss": 2.2275, |
|
"num_input_tokens_seen": 81920, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.189873417721519, |
|
"grad_norm": 274.08111572265625, |
|
"learning_rate": 1.9999058994907564e-05, |
|
"loss": 3.2099, |
|
"num_input_tokens_seen": 122880, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.25316455696202533, |
|
"grad_norm": 166.55935668945312, |
|
"learning_rate": 1.99661424082419e-05, |
|
"loss": 2.5884, |
|
"num_input_tokens_seen": 163840, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.31645569620253167, |
|
"grad_norm": 110.16832733154297, |
|
"learning_rate": 1.9886352515311134e-05, |
|
"loss": 2.4323, |
|
"num_input_tokens_seen": 204800, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.379746835443038, |
|
"grad_norm": 82.22343444824219, |
|
"learning_rate": 1.9760064588305347e-05, |
|
"loss": 2.3275, |
|
"num_input_tokens_seen": 245760, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.4430379746835443, |
|
"grad_norm": 56.37868118286133, |
|
"learning_rate": 1.9587872591512583e-05, |
|
"loss": 1.8227, |
|
"num_input_tokens_seen": 286720, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.5063291139240507, |
|
"grad_norm": 54.27399444580078, |
|
"learning_rate": 1.9370586387753532e-05, |
|
"loss": 1.998, |
|
"num_input_tokens_seen": 327680, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.569620253164557, |
|
"grad_norm": 44.79275894165039, |
|
"learning_rate": 1.9109227929390378e-05, |
|
"loss": 1.972, |
|
"num_input_tokens_seen": 368640, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.6329113924050633, |
|
"grad_norm": 46.56148910522461, |
|
"learning_rate": 1.8805026451824547e-05, |
|
"loss": 1.9728, |
|
"num_input_tokens_seen": 409600, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.6329113924050633, |
|
"eval_accuracy": 0.18571428571428572, |
|
"eval_loss": 2.392421245574951, |
|
"eval_runtime": 0.367, |
|
"eval_samples_per_second": 190.718, |
|
"eval_steps_per_second": 24.521, |
|
"num_input_tokens_seen": 409600, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.6962025316455697, |
|
"grad_norm": 35.83555221557617, |
|
"learning_rate": 1.8459412692089497e-05, |
|
"loss": 2.219, |
|
"num_input_tokens_seen": 450560, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.759493670886076, |
|
"grad_norm": 33.77033996582031, |
|
"learning_rate": 1.8074012159730034e-05, |
|
"loss": 1.8699, |
|
"num_input_tokens_seen": 491520, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.8227848101265823, |
|
"grad_norm": 51.212223052978516, |
|
"learning_rate": 1.765063749161688e-05, |
|
"loss": 1.9769, |
|
"num_input_tokens_seen": 532480, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.8860759493670886, |
|
"grad_norm": 25.946151733398438, |
|
"learning_rate": 1.719127992665376e-05, |
|
"loss": 1.8649, |
|
"num_input_tokens_seen": 573440, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.9493670886075949, |
|
"grad_norm": 34.178245544433594, |
|
"learning_rate": 1.6698099940473644e-05, |
|
"loss": 1.8005, |
|
"num_input_tokens_seen": 614400, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 1.0126582278481013, |
|
"grad_norm": 26.01968765258789, |
|
"learning_rate": 1.6173417084171537e-05, |
|
"loss": 1.6863, |
|
"num_input_tokens_seen": 655360, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 1.0759493670886076, |
|
"grad_norm": 17.587852478027344, |
|
"learning_rate": 1.5619699074864864e-05, |
|
"loss": 1.6306, |
|
"num_input_tokens_seen": 696320, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 1.139240506329114, |
|
"grad_norm": 25.147842407226562, |
|
"learning_rate": 1.50395501893913e-05, |
|
"loss": 1.5408, |
|
"num_input_tokens_seen": 737280, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 1.2025316455696202, |
|
"grad_norm": 32.01517868041992, |
|
"learning_rate": 1.4435699015731449e-05, |
|
"loss": 1.9078, |
|
"num_input_tokens_seen": 778240, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 1.2658227848101267, |
|
"grad_norm": 22.242891311645508, |
|
"learning_rate": 1.3810985619764573e-05, |
|
"loss": 1.6016, |
|
"num_input_tokens_seen": 819200, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 1.2658227848101267, |
|
"eval_accuracy": 0.14285714285714285, |
|
"eval_loss": 1.9393833875656128, |
|
"eval_runtime": 0.4244, |
|
"eval_samples_per_second": 164.94, |
|
"eval_steps_per_second": 21.207, |
|
"num_input_tokens_seen": 819200, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 1.3291139240506329, |
|
"grad_norm": 36.45238494873047, |
|
"learning_rate": 1.3168348187715353e-05, |
|
"loss": 1.7416, |
|
"num_input_tokens_seen": 860160, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 1.3924050632911391, |
|
"grad_norm": 14.048996925354004, |
|
"learning_rate": 1.2510809207115666e-05, |
|
"loss": 1.6521, |
|
"num_input_tokens_seen": 901120, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 1.4556962025316456, |
|
"grad_norm": 52.22154235839844, |
|
"learning_rate": 1.1841461251275868e-05, |
|
"loss": 1.7971, |
|
"num_input_tokens_seen": 942080, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 1.518987341772152, |
|
"grad_norm": 48.804534912109375, |
|
"learning_rate": 1.1163452434124773e-05, |
|
"loss": 1.7401, |
|
"num_input_tokens_seen": 983040, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 1.5822784810126582, |
|
"grad_norm": 31.043750762939453, |
|
"learning_rate": 1.0479971603828001e-05, |
|
"loss": 1.6436, |
|
"num_input_tokens_seen": 1024000, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 1.6455696202531644, |
|
"grad_norm": 28.90459632873535, |
|
"learning_rate": 9.79423334482279e-06, |
|
"loss": 1.6048, |
|
"num_input_tokens_seen": 1064960, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 1.7088607594936709, |
|
"grad_norm": 33.064090728759766, |
|
"learning_rate": 9.109462858808586e-06, |
|
"loss": 1.6833, |
|
"num_input_tokens_seen": 1105920, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 1.7721518987341773, |
|
"grad_norm": 37.580997467041016, |
|
"learning_rate": 8.428880795801965e-06, |
|
"loss": 1.5788, |
|
"num_input_tokens_seen": 1146880, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 1.8354430379746836, |
|
"grad_norm": 36.66419219970703, |
|
"learning_rate": 7.75568810659924e-06, |
|
"loss": 1.7446, |
|
"num_input_tokens_seen": 1187840, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 1.8987341772151898, |
|
"grad_norm": 35.965087890625, |
|
"learning_rate": 7.093050987889547e-06, |
|
"loss": 1.7223, |
|
"num_input_tokens_seen": 1228800, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 1.8987341772151898, |
|
"eval_accuracy": 0.14285714285714285, |
|
"eval_loss": 1.6300362348556519, |
|
"eval_runtime": 0.3719, |
|
"eval_samples_per_second": 188.224, |
|
"eval_steps_per_second": 24.2, |
|
"num_input_tokens_seen": 1228800, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 1.9620253164556962, |
|
"grad_norm": 36.96841812133789, |
|
"learning_rate": 6.444085990825338e-06, |
|
"loss": 1.628, |
|
"num_input_tokens_seen": 1269760, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 2.0253164556962027, |
|
"grad_norm": 20.582347869873047, |
|
"learning_rate": 5.811845363088477e-06, |
|
"loss": 1.6389, |
|
"num_input_tokens_seen": 1310720, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 2.088607594936709, |
|
"grad_norm": 40.09123229980469, |
|
"learning_rate": 5.199302693391958e-06, |
|
"loss": 1.5496, |
|
"num_input_tokens_seen": 1351680, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 2.151898734177215, |
|
"grad_norm": 21.198436737060547, |
|
"learning_rate": 4.609338925934743e-06, |
|
"loss": 1.5789, |
|
"num_input_tokens_seen": 1392640, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 2.2151898734177213, |
|
"grad_norm": 42.79294967651367, |
|
"learning_rate": 4.044728810587406e-06, |
|
"loss": 1.7237, |
|
"num_input_tokens_seen": 1433600, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 2.278481012658228, |
|
"grad_norm": 25.977487564086914, |
|
"learning_rate": 3.508127852536698e-06, |
|
"loss": 1.6412, |
|
"num_input_tokens_seen": 1474560, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 2.3417721518987342, |
|
"grad_norm": 12.27081298828125, |
|
"learning_rate": 3.0020598227682794e-06, |
|
"loss": 1.5877, |
|
"num_input_tokens_seen": 1515520, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 2.4050632911392404, |
|
"grad_norm": 29.93262481689453, |
|
"learning_rate": 2.5289048881289256e-06, |
|
"loss": 1.5655, |
|
"num_input_tokens_seen": 1556480, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 2.4683544303797467, |
|
"grad_norm": 11.503835678100586, |
|
"learning_rate": 2.090888416795582e-06, |
|
"loss": 1.6816, |
|
"num_input_tokens_seen": 1597440, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 2.5316455696202533, |
|
"grad_norm": 32.70045852661133, |
|
"learning_rate": 1.69007051180199e-06, |
|
"loss": 1.6031, |
|
"num_input_tokens_seen": 1638400, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 2.5316455696202533, |
|
"eval_accuracy": 0.15714285714285714, |
|
"eval_loss": 1.6226980686187744, |
|
"eval_runtime": 0.3713, |
|
"eval_samples_per_second": 188.508, |
|
"eval_steps_per_second": 24.237, |
|
"num_input_tokens_seen": 1638400, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 2.5949367088607596, |
|
"grad_norm": 25.854286193847656, |
|
"learning_rate": 1.3283363218493962e-06, |
|
"loss": 1.5904, |
|
"num_input_tokens_seen": 1679360, |
|
"step": 205 |
|
}, |
|
{ |
|
"epoch": 2.6582278481012658, |
|
"grad_norm": 19.382221221923828, |
|
"learning_rate": 1.0073871749720221e-06, |
|
"loss": 1.5788, |
|
"num_input_tokens_seen": 1720320, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 2.721518987341772, |
|
"grad_norm": 22.248796463012695, |
|
"learning_rate": 7.287325767579756e-07, |
|
"loss": 1.6045, |
|
"num_input_tokens_seen": 1761280, |
|
"step": 215 |
|
}, |
|
{ |
|
"epoch": 2.7848101265822782, |
|
"grad_norm": 23.392297744750977, |
|
"learning_rate": 4.936831107599749e-07, |
|
"loss": 1.5288, |
|
"num_input_tokens_seen": 1802240, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 2.848101265822785, |
|
"grad_norm": 22.441410064697266, |
|
"learning_rate": 3.033442744870685e-07, |
|
"loss": 1.5869, |
|
"num_input_tokens_seen": 1843200, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 2.911392405063291, |
|
"grad_norm": 31.935455322265625, |
|
"learning_rate": 1.5861127996827597e-07, |
|
"loss": 1.5976, |
|
"num_input_tokens_seen": 1884160, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 2.9746835443037973, |
|
"grad_norm": 13.112102508544922, |
|
"learning_rate": 6.016484334238515e-08, |
|
"loss": 1.613, |
|
"num_input_tokens_seen": 1925120, |
|
"step": 235 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"num_input_tokens_seen": 1941504, |
|
"step": 237, |
|
"total_flos": 3521692676653056.0, |
|
"train_loss": 1.8215426835329724, |
|
"train_runtime": 107.3388, |
|
"train_samples_per_second": 17.552, |
|
"train_steps_per_second": 2.208 |
|
} |
|
], |
|
"logging_steps": 5, |
|
"max_steps": 237, |
|
"num_input_tokens_seen": 1941504, |
|
"num_train_epochs": 3, |
|
"save_steps": 50, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 3521692676653056.0, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|