|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 5.0, |
|
"eval_steps": 500, |
|
"global_step": 60, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.08333333333333333, |
|
"grad_norm": 1.9183009817804988, |
|
"learning_rate": 3.3333333333333333e-06, |
|
"loss": 3.4904, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.4166666666666667, |
|
"grad_norm": 1.4669764740494382, |
|
"learning_rate": 1.6666666666666667e-05, |
|
"loss": 3.6113, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.8333333333333334, |
|
"grad_norm": 2.3102441270892657, |
|
"learning_rate": 1.973044870579824e-05, |
|
"loss": 3.5363, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_loss": 3.6004714965820312, |
|
"eval_runtime": 0.3916, |
|
"eval_samples_per_second": 45.968, |
|
"eval_steps_per_second": 7.661, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 1.25, |
|
"grad_norm": 1.2536552391599605, |
|
"learning_rate": 1.866025403784439e-05, |
|
"loss": 3.4578, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 1.6666666666666665, |
|
"grad_norm": 0.8379504173528296, |
|
"learning_rate": 1.686241637868734e-05, |
|
"loss": 3.516, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_loss": 3.560063362121582, |
|
"eval_runtime": 0.395, |
|
"eval_samples_per_second": 45.564, |
|
"eval_steps_per_second": 7.594, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 2.0833333333333335, |
|
"grad_norm": 0.8826743196650887, |
|
"learning_rate": 1.4487991802004625e-05, |
|
"loss": 3.429, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 2.5, |
|
"grad_norm": 0.9643551070955534, |
|
"learning_rate": 1.1736481776669307e-05, |
|
"loss": 3.4933, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 2.9166666666666665, |
|
"grad_norm": 0.800770231101502, |
|
"learning_rate": 8.839070858747697e-06, |
|
"loss": 3.4836, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_loss": 3.534764289855957, |
|
"eval_runtime": 0.5846, |
|
"eval_samples_per_second": 30.789, |
|
"eval_steps_per_second": 5.131, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 3.3333333333333335, |
|
"grad_norm": 0.8243232107653643, |
|
"learning_rate": 6.039202339608432e-06, |
|
"loss": 3.4287, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 3.75, |
|
"grad_norm": 1.0751974758407474, |
|
"learning_rate": 3.5721239031346067e-06, |
|
"loss": 3.5007, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_loss": 3.523860454559326, |
|
"eval_runtime": 0.3897, |
|
"eval_samples_per_second": 46.188, |
|
"eval_steps_per_second": 7.698, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 4.166666666666667, |
|
"grad_norm": 0.7780096021946287, |
|
"learning_rate": 1.6451218858706374e-06, |
|
"loss": 3.4041, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 4.583333333333333, |
|
"grad_norm": 0.9901654284407495, |
|
"learning_rate": 4.2010487684511105e-07, |
|
"loss": 3.4807, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"grad_norm": 1.3948344306556273, |
|
"learning_rate": 0.0, |
|
"loss": 3.4187, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_loss": 3.5221145153045654, |
|
"eval_runtime": 0.3973, |
|
"eval_samples_per_second": 45.3, |
|
"eval_steps_per_second": 7.55, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"step": 60, |
|
"total_flos": 18203285913600.0, |
|
"train_loss": 3.477997573216756, |
|
"train_runtime": 36.1944, |
|
"train_samples_per_second": 12.295, |
|
"train_steps_per_second": 1.658 |
|
} |
|
], |
|
"logging_steps": 5, |
|
"max_steps": 60, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 5, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": false, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 18203285913600.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|