|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 30.76923076923077, |
|
"eval_steps": 50, |
|
"global_step": 400, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.07692307692307693, |
|
"eval_loss": 10.376392364501953, |
|
"eval_runtime": 5.3202, |
|
"eval_samples_per_second": 282.131, |
|
"eval_steps_per_second": 4.511, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.7692307692307693, |
|
"grad_norm": 0.09619140625, |
|
"learning_rate": 6.666666666666667e-05, |
|
"loss": 10.378, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 1.5384615384615383, |
|
"grad_norm": 0.09716796875, |
|
"learning_rate": 0.00013333333333333334, |
|
"loss": 10.3754, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 2.3076923076923075, |
|
"grad_norm": 0.11279296875, |
|
"learning_rate": 0.0002, |
|
"loss": 10.3683, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 3.076923076923077, |
|
"grad_norm": 0.201171875, |
|
"learning_rate": 0.00019984815164333163, |
|
"loss": 10.3522, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 3.8461538461538463, |
|
"grad_norm": 0.3203125, |
|
"learning_rate": 0.00019939306773179497, |
|
"loss": 10.3159, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 3.8461538461538463, |
|
"eval_loss": 10.285228729248047, |
|
"eval_runtime": 5.2726, |
|
"eval_samples_per_second": 284.681, |
|
"eval_steps_per_second": 4.552, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 4.615384615384615, |
|
"grad_norm": 0.32421875, |
|
"learning_rate": 0.00019863613034027224, |
|
"loss": 10.2518, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 5.384615384615385, |
|
"grad_norm": 0.326171875, |
|
"learning_rate": 0.00019757963826274357, |
|
"loss": 10.1828, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 6.153846153846154, |
|
"grad_norm": 0.330078125, |
|
"learning_rate": 0.00019622680003092503, |
|
"loss": 10.1177, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 6.923076923076923, |
|
"grad_norm": 0.33203125, |
|
"learning_rate": 0.00019458172417006347, |
|
"loss": 10.0565, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 7.6923076923076925, |
|
"grad_norm": 0.341796875, |
|
"learning_rate": 0.00019264940672148018, |
|
"loss": 9.998, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 7.6923076923076925, |
|
"eval_loss": 9.973793029785156, |
|
"eval_runtime": 5.2822, |
|
"eval_samples_per_second": 284.164, |
|
"eval_steps_per_second": 4.544, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 8.461538461538462, |
|
"grad_norm": 0.345703125, |
|
"learning_rate": 0.00019043571606975777, |
|
"loss": 9.942, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 9.23076923076923, |
|
"grad_norm": 0.34765625, |
|
"learning_rate": 0.0001879473751206489, |
|
"loss": 9.8887, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"grad_norm": 0.349609375, |
|
"learning_rate": 0.00018519194088383273, |
|
"loss": 9.836, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 10.76923076923077, |
|
"grad_norm": 0.357421875, |
|
"learning_rate": 0.0001821777815225245, |
|
"loss": 9.7845, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 11.538461538461538, |
|
"grad_norm": 0.361328125, |
|
"learning_rate": 0.00017891405093963938, |
|
"loss": 9.7359, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 11.538461538461538, |
|
"eval_loss": 9.719038963317871, |
|
"eval_runtime": 5.2736, |
|
"eval_samples_per_second": 284.626, |
|
"eval_steps_per_second": 4.551, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 12.307692307692308, |
|
"grad_norm": 0.369140625, |
|
"learning_rate": 0.00017541066097768963, |
|
"loss": 9.688, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 13.076923076923077, |
|
"grad_norm": 0.3671875, |
|
"learning_rate": 0.00017167825131684513, |
|
"loss": 9.6416, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 13.846153846153847, |
|
"grad_norm": 0.375, |
|
"learning_rate": 0.00016772815716257412, |
|
"loss": 9.597, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 14.615384615384615, |
|
"grad_norm": 0.380859375, |
|
"learning_rate": 0.00016357237482099684, |
|
"loss": 9.5534, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 15.384615384615385, |
|
"grad_norm": 0.37890625, |
|
"learning_rate": 0.00015922352526649803, |
|
"loss": 9.5151, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 15.384615384615385, |
|
"eval_loss": 9.504176139831543, |
|
"eval_runtime": 5.2799, |
|
"eval_samples_per_second": 284.283, |
|
"eval_steps_per_second": 4.545, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 16.153846153846153, |
|
"grad_norm": 0.3828125, |
|
"learning_rate": 0.00015469481581224272, |
|
"loss": 9.4734, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 16.923076923076923, |
|
"grad_norm": 0.388671875, |
|
"learning_rate": 0.00015000000000000001, |
|
"loss": 9.4381, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 17.692307692307693, |
|
"grad_norm": 0.39453125, |
|
"learning_rate": 0.00014515333583108896, |
|
"loss": 9.4021, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 18.46153846153846, |
|
"grad_norm": 0.396484375, |
|
"learning_rate": 0.00014016954246529696, |
|
"loss": 9.3725, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 19.23076923076923, |
|
"grad_norm": 0.396484375, |
|
"learning_rate": 0.00013506375551927547, |
|
"loss": 9.3407, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 19.23076923076923, |
|
"eval_loss": 9.341134071350098, |
|
"eval_runtime": 5.9586, |
|
"eval_samples_per_second": 251.904, |
|
"eval_steps_per_second": 4.028, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"grad_norm": 0.400390625, |
|
"learning_rate": 0.00012985148110016947, |
|
"loss": 9.3153, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 20.76923076923077, |
|
"grad_norm": 0.40234375, |
|
"learning_rate": 0.00012454854871407994, |
|
"loss": 9.2905, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 21.53846153846154, |
|
"grad_norm": 0.408203125, |
|
"learning_rate": 0.00011917106319237386, |
|
"loss": 9.2678, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 22.307692307692307, |
|
"grad_norm": 0.404296875, |
|
"learning_rate": 0.00011373535578184082, |
|
"loss": 9.2496, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 23.076923076923077, |
|
"grad_norm": 0.40625, |
|
"learning_rate": 0.00010825793454723325, |
|
"loss": 9.2338, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 23.076923076923077, |
|
"eval_loss": 9.241479873657227, |
|
"eval_runtime": 5.2815, |
|
"eval_samples_per_second": 284.199, |
|
"eval_steps_per_second": 4.544, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 23.846153846153847, |
|
"grad_norm": 0.412109375, |
|
"learning_rate": 0.00010275543423681621, |
|
"loss": 9.2185, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 24.615384615384617, |
|
"grad_norm": 0.4140625, |
|
"learning_rate": 9.724456576318381e-05, |
|
"loss": 9.2101, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 25.384615384615383, |
|
"grad_norm": 0.412109375, |
|
"learning_rate": 9.174206545276677e-05, |
|
"loss": 9.2002, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 26.153846153846153, |
|
"grad_norm": 0.416015625, |
|
"learning_rate": 8.626464421815919e-05, |
|
"loss": 9.193, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 26.923076923076923, |
|
"grad_norm": 0.41015625, |
|
"learning_rate": 8.082893680762619e-05, |
|
"loss": 9.1896, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 26.923076923076923, |
|
"eval_loss": 9.20389461517334, |
|
"eval_runtime": 5.2643, |
|
"eval_samples_per_second": 285.129, |
|
"eval_steps_per_second": 4.559, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 27.692307692307693, |
|
"grad_norm": 0.416015625, |
|
"learning_rate": 7.54514512859201e-05, |
|
"loss": 9.186, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 28.46153846153846, |
|
"grad_norm": 0.412109375, |
|
"learning_rate": 7.014851889983057e-05, |
|
"loss": 9.182, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 29.23076923076923, |
|
"grad_norm": 0.416015625, |
|
"learning_rate": 6.493624448072457e-05, |
|
"loss": 9.1823, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 30.0, |
|
"grad_norm": 0.4140625, |
|
"learning_rate": 5.983045753470308e-05, |
|
"loss": 9.1806, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 30.76923076923077, |
|
"grad_norm": 0.419921875, |
|
"learning_rate": 5.484666416891109e-05, |
|
"loss": 9.18, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 30.76923076923077, |
|
"eval_loss": 9.195965766906738, |
|
"eval_runtime": 5.2708, |
|
"eval_samples_per_second": 284.775, |
|
"eval_steps_per_second": 4.553, |
|
"step": 400 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 600, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 47, |
|
"save_steps": 50, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 163663419801600.0, |
|
"train_batch_size": 32, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|