sedrickkeh's picture
End of training
a7caec7 verified
raw
history blame
3.91 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 3.0,
"eval_steps": 500,
"global_step": 18,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.16666666666666666,
"grad_norm": 8.6195098709927,
"learning_rate": 5e-06,
"loss": 1.2879,
"step": 1
},
{
"epoch": 0.3333333333333333,
"grad_norm": 9.18504010868732,
"learning_rate": 1e-05,
"loss": 1.3268,
"step": 2
},
{
"epoch": 0.5,
"grad_norm": 6.218768040825584,
"learning_rate": 9.903926402016153e-06,
"loss": 1.2078,
"step": 3
},
{
"epoch": 0.6666666666666666,
"grad_norm": 3.0252982804718846,
"learning_rate": 9.619397662556434e-06,
"loss": 1.1302,
"step": 4
},
{
"epoch": 0.8333333333333334,
"grad_norm": 6.088666166550111,
"learning_rate": 9.157348061512728e-06,
"loss": 1.1449,
"step": 5
},
{
"epoch": 1.0,
"grad_norm": 6.6511885203086525,
"learning_rate": 8.535533905932739e-06,
"loss": 1.085,
"step": 6
},
{
"epoch": 1.1666666666666667,
"grad_norm": 6.827809589776133,
"learning_rate": 7.777851165098012e-06,
"loss": 1.07,
"step": 7
},
{
"epoch": 1.3333333333333333,
"grad_norm": 4.930048078986181,
"learning_rate": 6.913417161825449e-06,
"loss": 1.0365,
"step": 8
},
{
"epoch": 1.5,
"grad_norm": 3.183540927096083,
"learning_rate": 5.975451610080643e-06,
"loss": 1.0098,
"step": 9
},
{
"epoch": 1.6666666666666665,
"grad_norm": 2.0474463937732112,
"learning_rate": 5e-06,
"loss": 0.9637,
"step": 10
},
{
"epoch": 1.8333333333333335,
"grad_norm": 2.1859506115193605,
"learning_rate": 4.02454838991936e-06,
"loss": 0.9329,
"step": 11
},
{
"epoch": 2.0,
"grad_norm": 2.000136935221579,
"learning_rate": 3.0865828381745515e-06,
"loss": 0.9329,
"step": 12
},
{
"epoch": 2.1666666666666665,
"grad_norm": 1.6376864195190064,
"learning_rate": 2.2221488349019903e-06,
"loss": 0.9097,
"step": 13
},
{
"epoch": 2.3333333333333335,
"grad_norm": 1.3140675009197313,
"learning_rate": 1.4644660940672628e-06,
"loss": 0.8905,
"step": 14
},
{
"epoch": 2.5,
"grad_norm": 1.1399610334807526,
"learning_rate": 8.426519384872733e-07,
"loss": 0.9138,
"step": 15
},
{
"epoch": 2.6666666666666665,
"grad_norm": 0.9070029152199568,
"learning_rate": 3.8060233744356634e-07,
"loss": 0.9051,
"step": 16
},
{
"epoch": 2.8333333333333335,
"grad_norm": 0.7711669705895277,
"learning_rate": 9.607359798384785e-08,
"loss": 0.902,
"step": 17
},
{
"epoch": 3.0,
"grad_norm": 0.6997339491210084,
"learning_rate": 0.0,
"loss": 0.8931,
"step": 18
},
{
"epoch": 3.0,
"step": 18,
"total_flos": 6.177445717318042e+16,
"train_loss": 1.0301417509714763,
"train_runtime": 973.1743,
"train_samples_per_second": 1.773,
"train_steps_per_second": 0.018
}
],
"logging_steps": 1.0,
"max_steps": 18,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 6.177445717318042e+16,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}