lesso's picture
Training in progress, step 50, checkpoint
20b46be verified
raw
history blame
10 kB
{
"best_metric": 0.6198433637619019,
"best_model_checkpoint": "miner_id_24/checkpoint-50",
"epoch": 0.06686726847208292,
"eval_steps": 50,
"global_step": 50,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0013373453694416582,
"grad_norm": 7.7768025398254395,
"learning_rate": 1.0100000000000002e-05,
"loss": 6.4221,
"step": 1
},
{
"epoch": 0.0013373453694416582,
"eval_loss": 6.176359176635742,
"eval_runtime": 163.9132,
"eval_samples_per_second": 61.465,
"eval_steps_per_second": 1.922,
"step": 1
},
{
"epoch": 0.0026746907388833165,
"grad_norm": 5.993926048278809,
"learning_rate": 2.0200000000000003e-05,
"loss": 6.4108,
"step": 2
},
{
"epoch": 0.004012036108324975,
"grad_norm": 5.60136079788208,
"learning_rate": 3.0299999999999998e-05,
"loss": 6.3333,
"step": 3
},
{
"epoch": 0.005349381477766633,
"grad_norm": 5.272766590118408,
"learning_rate": 4.0400000000000006e-05,
"loss": 5.6085,
"step": 4
},
{
"epoch": 0.006686726847208292,
"grad_norm": 4.917640686035156,
"learning_rate": 5.05e-05,
"loss": 5.1171,
"step": 5
},
{
"epoch": 0.00802407221664995,
"grad_norm": 4.776335716247559,
"learning_rate": 6.0599999999999996e-05,
"loss": 4.6322,
"step": 6
},
{
"epoch": 0.009361417586091608,
"grad_norm": 5.44158935546875,
"learning_rate": 7.07e-05,
"loss": 3.9832,
"step": 7
},
{
"epoch": 0.010698762955533266,
"grad_norm": 6.46093225479126,
"learning_rate": 8.080000000000001e-05,
"loss": 3.51,
"step": 8
},
{
"epoch": 0.012036108324974924,
"grad_norm": 4.540277004241943,
"learning_rate": 9.09e-05,
"loss": 2.9562,
"step": 9
},
{
"epoch": 0.013373453694416584,
"grad_norm": 4.324646472930908,
"learning_rate": 0.000101,
"loss": 2.6557,
"step": 10
},
{
"epoch": 0.014710799063858242,
"grad_norm": 2.8748514652252197,
"learning_rate": 0.00010046842105263158,
"loss": 2.1475,
"step": 11
},
{
"epoch": 0.0160481444332999,
"grad_norm": 6.15751314163208,
"learning_rate": 9.993684210526315e-05,
"loss": 2.1734,
"step": 12
},
{
"epoch": 0.017385489802741558,
"grad_norm": 2.7471158504486084,
"learning_rate": 9.940526315789473e-05,
"loss": 1.8299,
"step": 13
},
{
"epoch": 0.018722835172183216,
"grad_norm": 1.9818578958511353,
"learning_rate": 9.887368421052632e-05,
"loss": 1.6142,
"step": 14
},
{
"epoch": 0.020060180541624874,
"grad_norm": 1.5324410200119019,
"learning_rate": 9.83421052631579e-05,
"loss": 1.5587,
"step": 15
},
{
"epoch": 0.021397525911066532,
"grad_norm": 1.4006189107894897,
"learning_rate": 9.781052631578948e-05,
"loss": 1.3915,
"step": 16
},
{
"epoch": 0.02273487128050819,
"grad_norm": 1.3229491710662842,
"learning_rate": 9.727894736842106e-05,
"loss": 1.2889,
"step": 17
},
{
"epoch": 0.024072216649949848,
"grad_norm": 1.2081401348114014,
"learning_rate": 9.674736842105263e-05,
"loss": 1.3103,
"step": 18
},
{
"epoch": 0.02540956201939151,
"grad_norm": 1.85019052028656,
"learning_rate": 9.621578947368421e-05,
"loss": 1.3369,
"step": 19
},
{
"epoch": 0.026746907388833167,
"grad_norm": 1.4622437953948975,
"learning_rate": 9.568421052631578e-05,
"loss": 1.2271,
"step": 20
},
{
"epoch": 0.028084252758274825,
"grad_norm": 1.6035629510879517,
"learning_rate": 9.515263157894737e-05,
"loss": 1.1791,
"step": 21
},
{
"epoch": 0.029421598127716483,
"grad_norm": 1.191978096961975,
"learning_rate": 9.462105263157895e-05,
"loss": 1.149,
"step": 22
},
{
"epoch": 0.03075894349715814,
"grad_norm": 1.1039379835128784,
"learning_rate": 9.408947368421054e-05,
"loss": 1.0236,
"step": 23
},
{
"epoch": 0.0320962888665998,
"grad_norm": 0.9501120448112488,
"learning_rate": 9.355789473684211e-05,
"loss": 1.0568,
"step": 24
},
{
"epoch": 0.03343363423604146,
"grad_norm": 0.9906852841377258,
"learning_rate": 9.302631578947369e-05,
"loss": 1.1361,
"step": 25
},
{
"epoch": 0.034770979605483116,
"grad_norm": 1.1276156902313232,
"learning_rate": 9.249473684210526e-05,
"loss": 0.8694,
"step": 26
},
{
"epoch": 0.03610832497492478,
"grad_norm": 0.9503394365310669,
"learning_rate": 9.196315789473685e-05,
"loss": 0.9323,
"step": 27
},
{
"epoch": 0.03744567034436643,
"grad_norm": 0.9411821961402893,
"learning_rate": 9.143157894736843e-05,
"loss": 0.955,
"step": 28
},
{
"epoch": 0.03878301571380809,
"grad_norm": 0.8490228056907654,
"learning_rate": 9.09e-05,
"loss": 0.8836,
"step": 29
},
{
"epoch": 0.04012036108324975,
"grad_norm": 0.8191907405853271,
"learning_rate": 9.036842105263158e-05,
"loss": 0.8396,
"step": 30
},
{
"epoch": 0.04145770645269141,
"grad_norm": 0.7862714529037476,
"learning_rate": 8.983684210526316e-05,
"loss": 0.9116,
"step": 31
},
{
"epoch": 0.042795051822133064,
"grad_norm": 1.3398971557617188,
"learning_rate": 8.930526315789474e-05,
"loss": 0.8548,
"step": 32
},
{
"epoch": 0.044132397191574725,
"grad_norm": 1.1809728145599365,
"learning_rate": 8.877368421052632e-05,
"loss": 0.7964,
"step": 33
},
{
"epoch": 0.04546974256101638,
"grad_norm": 1.0573207139968872,
"learning_rate": 8.82421052631579e-05,
"loss": 0.721,
"step": 34
},
{
"epoch": 0.04680708793045804,
"grad_norm": 0.7660430669784546,
"learning_rate": 8.771052631578948e-05,
"loss": 0.7354,
"step": 35
},
{
"epoch": 0.048144433299899696,
"grad_norm": 0.8423962593078613,
"learning_rate": 8.717894736842105e-05,
"loss": 0.7304,
"step": 36
},
{
"epoch": 0.04948177866934136,
"grad_norm": 0.7409595847129822,
"learning_rate": 8.664736842105263e-05,
"loss": 0.8138,
"step": 37
},
{
"epoch": 0.05081912403878302,
"grad_norm": 1.0868232250213623,
"learning_rate": 8.61157894736842e-05,
"loss": 0.8021,
"step": 38
},
{
"epoch": 0.05215646940822467,
"grad_norm": 1.0219075679779053,
"learning_rate": 8.55842105263158e-05,
"loss": 0.7564,
"step": 39
},
{
"epoch": 0.053493814777666335,
"grad_norm": 0.8560124635696411,
"learning_rate": 8.505263157894737e-05,
"loss": 0.6875,
"step": 40
},
{
"epoch": 0.05483116014710799,
"grad_norm": 0.7004531025886536,
"learning_rate": 8.452105263157896e-05,
"loss": 0.6683,
"step": 41
},
{
"epoch": 0.05616850551654965,
"grad_norm": 0.6894235610961914,
"learning_rate": 8.398947368421053e-05,
"loss": 0.6691,
"step": 42
},
{
"epoch": 0.057505850885991305,
"grad_norm": 0.7334969639778137,
"learning_rate": 8.345789473684211e-05,
"loss": 0.7808,
"step": 43
},
{
"epoch": 0.05884319625543297,
"grad_norm": 0.8385999798774719,
"learning_rate": 8.292631578947368e-05,
"loss": 0.7513,
"step": 44
},
{
"epoch": 0.06018054162487462,
"grad_norm": 0.7268967032432556,
"learning_rate": 8.239473684210526e-05,
"loss": 0.6678,
"step": 45
},
{
"epoch": 0.06151788699431628,
"grad_norm": 1.0294888019561768,
"learning_rate": 8.186315789473683e-05,
"loss": 0.7087,
"step": 46
},
{
"epoch": 0.06285523236375794,
"grad_norm": 0.6869120001792908,
"learning_rate": 8.133157894736842e-05,
"loss": 0.6256,
"step": 47
},
{
"epoch": 0.0641925777331996,
"grad_norm": 0.5973199605941772,
"learning_rate": 8.080000000000001e-05,
"loss": 0.6142,
"step": 48
},
{
"epoch": 0.06552992310264126,
"grad_norm": 0.6814255118370056,
"learning_rate": 8.026842105263159e-05,
"loss": 0.7083,
"step": 49
},
{
"epoch": 0.06686726847208292,
"grad_norm": 0.803034245967865,
"learning_rate": 7.973684210526316e-05,
"loss": 0.7366,
"step": 50
},
{
"epoch": 0.06686726847208292,
"eval_loss": 0.6198433637619019,
"eval_runtime": 161.5386,
"eval_samples_per_second": 62.369,
"eval_steps_per_second": 1.95,
"step": 50
}
],
"logging_steps": 1,
"max_steps": 200,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 50,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 5,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 1.0576356021507195e+18,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}