camelmath / trainer_state.json
esfrankel17's picture
Upload folder using huggingface_hub
d1d9035 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.998109640831758,
"eval_steps": 500,
"global_step": 396,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.02520478890989288,
"grad_norm": 8.663247629605545,
"learning_rate": 5e-06,
"loss": 0.9445,
"step": 10
},
{
"epoch": 0.05040957781978576,
"grad_norm": 3.604310550347369,
"learning_rate": 5e-06,
"loss": 0.8102,
"step": 20
},
{
"epoch": 0.07561436672967864,
"grad_norm": 2.0617190728114823,
"learning_rate": 5e-06,
"loss": 0.7619,
"step": 30
},
{
"epoch": 0.10081915563957151,
"grad_norm": 1.18844546620169,
"learning_rate": 5e-06,
"loss": 0.7359,
"step": 40
},
{
"epoch": 0.1260239445494644,
"grad_norm": 0.9098677466338352,
"learning_rate": 5e-06,
"loss": 0.7192,
"step": 50
},
{
"epoch": 0.15122873345935728,
"grad_norm": 1.1179673603795768,
"learning_rate": 5e-06,
"loss": 0.7017,
"step": 60
},
{
"epoch": 0.17643352236925017,
"grad_norm": 0.6728813590124636,
"learning_rate": 5e-06,
"loss": 0.6883,
"step": 70
},
{
"epoch": 0.20163831127914303,
"grad_norm": 0.616114295888885,
"learning_rate": 5e-06,
"loss": 0.676,
"step": 80
},
{
"epoch": 0.22684310018903592,
"grad_norm": 0.6244841168422012,
"learning_rate": 5e-06,
"loss": 0.6753,
"step": 90
},
{
"epoch": 0.2520478890989288,
"grad_norm": 0.6316192036087691,
"learning_rate": 5e-06,
"loss": 0.6557,
"step": 100
},
{
"epoch": 0.2772526780088217,
"grad_norm": 0.6364791638345465,
"learning_rate": 5e-06,
"loss": 0.6644,
"step": 110
},
{
"epoch": 0.30245746691871456,
"grad_norm": 0.5617775979562007,
"learning_rate": 5e-06,
"loss": 0.6625,
"step": 120
},
{
"epoch": 0.3276622558286074,
"grad_norm": 0.567108836097158,
"learning_rate": 5e-06,
"loss": 0.654,
"step": 130
},
{
"epoch": 0.35286704473850034,
"grad_norm": 0.5642557336913758,
"learning_rate": 5e-06,
"loss": 0.6512,
"step": 140
},
{
"epoch": 0.3780718336483932,
"grad_norm": 0.6891460217415533,
"learning_rate": 5e-06,
"loss": 0.6517,
"step": 150
},
{
"epoch": 0.40327662255828606,
"grad_norm": 0.5762478394818821,
"learning_rate": 5e-06,
"loss": 0.6409,
"step": 160
},
{
"epoch": 0.428481411468179,
"grad_norm": 0.5764786797811281,
"learning_rate": 5e-06,
"loss": 0.6457,
"step": 170
},
{
"epoch": 0.45368620037807184,
"grad_norm": 0.5790937278606256,
"learning_rate": 5e-06,
"loss": 0.6447,
"step": 180
},
{
"epoch": 0.4788909892879647,
"grad_norm": 0.6151732237689204,
"learning_rate": 5e-06,
"loss": 0.639,
"step": 190
},
{
"epoch": 0.5040957781978576,
"grad_norm": 0.6117049362959994,
"learning_rate": 5e-06,
"loss": 0.6376,
"step": 200
},
{
"epoch": 0.5293005671077504,
"grad_norm": 0.5265553452792068,
"learning_rate": 5e-06,
"loss": 0.6366,
"step": 210
},
{
"epoch": 0.5545053560176434,
"grad_norm": 0.5227752714039351,
"learning_rate": 5e-06,
"loss": 0.638,
"step": 220
},
{
"epoch": 0.5797101449275363,
"grad_norm": 0.6921733773658189,
"learning_rate": 5e-06,
"loss": 0.6345,
"step": 230
},
{
"epoch": 0.6049149338374291,
"grad_norm": 0.6523199668192793,
"learning_rate": 5e-06,
"loss": 0.6337,
"step": 240
},
{
"epoch": 0.630119722747322,
"grad_norm": 0.6158625325468641,
"learning_rate": 5e-06,
"loss": 0.633,
"step": 250
},
{
"epoch": 0.6553245116572148,
"grad_norm": 0.5223527686271148,
"learning_rate": 5e-06,
"loss": 0.6266,
"step": 260
},
{
"epoch": 0.6805293005671077,
"grad_norm": 0.5817620101876396,
"learning_rate": 5e-06,
"loss": 0.6245,
"step": 270
},
{
"epoch": 0.7057340894770007,
"grad_norm": 0.5130963229148819,
"learning_rate": 5e-06,
"loss": 0.6309,
"step": 280
},
{
"epoch": 0.7309388783868935,
"grad_norm": 0.5250410285378843,
"learning_rate": 5e-06,
"loss": 0.623,
"step": 290
},
{
"epoch": 0.7561436672967864,
"grad_norm": 0.528760930353754,
"learning_rate": 5e-06,
"loss": 0.621,
"step": 300
},
{
"epoch": 0.7813484562066793,
"grad_norm": 0.48737586255544146,
"learning_rate": 5e-06,
"loss": 0.6141,
"step": 310
},
{
"epoch": 0.8065532451165721,
"grad_norm": 0.6021703382001868,
"learning_rate": 5e-06,
"loss": 0.6199,
"step": 320
},
{
"epoch": 0.831758034026465,
"grad_norm": 0.6054382209455371,
"learning_rate": 5e-06,
"loss": 0.6245,
"step": 330
},
{
"epoch": 0.856962822936358,
"grad_norm": 0.5202200830394619,
"learning_rate": 5e-06,
"loss": 0.6224,
"step": 340
},
{
"epoch": 0.8821676118462508,
"grad_norm": 0.6290990280129226,
"learning_rate": 5e-06,
"loss": 0.6153,
"step": 350
},
{
"epoch": 0.9073724007561437,
"grad_norm": 0.5370989167044362,
"learning_rate": 5e-06,
"loss": 0.6104,
"step": 360
},
{
"epoch": 0.9325771896660365,
"grad_norm": 0.6295835561355619,
"learning_rate": 5e-06,
"loss": 0.614,
"step": 370
},
{
"epoch": 0.9577819785759294,
"grad_norm": 0.4795035908507123,
"learning_rate": 5e-06,
"loss": 0.6144,
"step": 380
},
{
"epoch": 0.9829867674858223,
"grad_norm": 0.5341688832960285,
"learning_rate": 5e-06,
"loss": 0.6119,
"step": 390
},
{
"epoch": 0.998109640831758,
"eval_loss": 0.6133832335472107,
"eval_runtime": 304.1094,
"eval_samples_per_second": 35.155,
"eval_steps_per_second": 0.552,
"step": 396
}
],
"logging_steps": 10,
"max_steps": 1188,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 664361647472640.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}