CocoRoF's picture
Training in progress, step 15000, checkpoint
7c2b743 verified
raw
history blame
11.9 kB
{
"best_metric": 0.4201970100402832,
"best_model_checkpoint": "/workspace/plateer_classifier_v0.1_result/checkpoint-15000",
"epoch": 0.08784117144986245,
"eval_steps": 5000,
"global_step": 15000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0014640195241643742,
"grad_norm": 50.05304718017578,
"learning_rate": 4.880000000000001e-06,
"loss": 4.3958,
"step": 250
},
{
"epoch": 0.0029280390483287485,
"grad_norm": 48.363304138183594,
"learning_rate": 9.88e-06,
"loss": 1.6496,
"step": 500
},
{
"epoch": 0.004392058572493123,
"grad_norm": 54.546974182128906,
"learning_rate": 1.488e-05,
"loss": 0.8787,
"step": 750
},
{
"epoch": 0.005856078096657497,
"grad_norm": 50.317874908447266,
"learning_rate": 1.9880000000000003e-05,
"loss": 0.7721,
"step": 1000
},
{
"epoch": 0.007320097620821872,
"grad_norm": 62.48823928833008,
"learning_rate": 2.488e-05,
"loss": 0.7047,
"step": 1250
},
{
"epoch": 0.008784117144986246,
"grad_norm": 44.35001754760742,
"learning_rate": 2.9880000000000002e-05,
"loss": 0.6749,
"step": 1500
},
{
"epoch": 0.01024813666915062,
"grad_norm": 36.486793518066406,
"learning_rate": 3.4880000000000005e-05,
"loss": 0.6409,
"step": 1750
},
{
"epoch": 0.011712156193314994,
"grad_norm": 47.03588104248047,
"learning_rate": 3.988e-05,
"loss": 0.6406,
"step": 2000
},
{
"epoch": 0.013176175717479368,
"grad_norm": 31.227832794189453,
"learning_rate": 4.488e-05,
"loss": 0.6149,
"step": 2250
},
{
"epoch": 0.014640195241643743,
"grad_norm": 39.8408317565918,
"learning_rate": 4.9880000000000004e-05,
"loss": 0.5956,
"step": 2500
},
{
"epoch": 0.016104214765808117,
"grad_norm": 41.118736267089844,
"learning_rate": 5.4879999999999996e-05,
"loss": 0.5905,
"step": 2750
},
{
"epoch": 0.017568234289972492,
"grad_norm": 29.624338150024414,
"learning_rate": 5.988e-05,
"loss": 0.5608,
"step": 3000
},
{
"epoch": 0.019032253814136865,
"grad_norm": 22.993818283081055,
"learning_rate": 6.488e-05,
"loss": 0.5614,
"step": 3250
},
{
"epoch": 0.02049627333830124,
"grad_norm": 19.964269638061523,
"learning_rate": 6.988e-05,
"loss": 0.5569,
"step": 3500
},
{
"epoch": 0.021960292862465612,
"grad_norm": 36.538047790527344,
"learning_rate": 7.488e-05,
"loss": 0.5316,
"step": 3750
},
{
"epoch": 0.023424312386629988,
"grad_norm": 37.63505935668945,
"learning_rate": 7.988e-05,
"loss": 0.5364,
"step": 4000
},
{
"epoch": 0.024888331910794363,
"grad_norm": 25.934967041015625,
"learning_rate": 8.486000000000001e-05,
"loss": 0.5234,
"step": 4250
},
{
"epoch": 0.026352351434958735,
"grad_norm": 24.810028076171875,
"learning_rate": 8.986e-05,
"loss": 0.5155,
"step": 4500
},
{
"epoch": 0.02781637095912311,
"grad_norm": 32.76811981201172,
"learning_rate": 9.484e-05,
"loss": 0.5022,
"step": 4750
},
{
"epoch": 0.029280390483287486,
"grad_norm": 27.094772338867188,
"learning_rate": 9.984e-05,
"loss": 0.5023,
"step": 5000
},
{
"epoch": 0.029280390483287486,
"eval_accuracy": 0.8572352668691132,
"eval_loss": 0.5044249296188354,
"eval_runtime": 11541.1431,
"eval_samples_per_second": 210.432,
"eval_steps_per_second": 6.576,
"step": 5000
},
{
"epoch": 0.03074441000745186,
"grad_norm": 24.74563217163086,
"learning_rate": 0.00010484,
"loss": 0.5073,
"step": 5250
},
{
"epoch": 0.032208429531616234,
"grad_norm": 17.229019165039062,
"learning_rate": 0.00010984,
"loss": 0.4932,
"step": 5500
},
{
"epoch": 0.03367244905578061,
"grad_norm": 23.318979263305664,
"learning_rate": 0.00011484000000000002,
"loss": 0.504,
"step": 5750
},
{
"epoch": 0.035136468579944985,
"grad_norm": 22.271846771240234,
"learning_rate": 0.00011983999999999999,
"loss": 0.4817,
"step": 6000
},
{
"epoch": 0.036600488104109354,
"grad_norm": 24.304887771606445,
"learning_rate": 0.00012484,
"loss": 0.4966,
"step": 6250
},
{
"epoch": 0.03806450762827373,
"grad_norm": 23.76158905029297,
"learning_rate": 0.00012984000000000002,
"loss": 0.4899,
"step": 6500
},
{
"epoch": 0.039528527152438105,
"grad_norm": 20.765274047851562,
"learning_rate": 0.00013484,
"loss": 0.4773,
"step": 6750
},
{
"epoch": 0.04099254667660248,
"grad_norm": 12.793950080871582,
"learning_rate": 0.00013982000000000003,
"loss": 0.4781,
"step": 7000
},
{
"epoch": 0.042456566200766856,
"grad_norm": 14.128210067749023,
"learning_rate": 0.00014482,
"loss": 0.4687,
"step": 7250
},
{
"epoch": 0.043920585724931224,
"grad_norm": 22.348928451538086,
"learning_rate": 0.00014982,
"loss": 0.4722,
"step": 7500
},
{
"epoch": 0.0453846052490956,
"grad_norm": 17.29800796508789,
"learning_rate": 0.00015480000000000002,
"loss": 0.4692,
"step": 7750
},
{
"epoch": 0.046848624773259975,
"grad_norm": 11.0147066116333,
"learning_rate": 0.0001598,
"loss": 0.4689,
"step": 8000
},
{
"epoch": 0.04831264429742435,
"grad_norm": 11.713265419006348,
"learning_rate": 0.0001648,
"loss": 0.4788,
"step": 8250
},
{
"epoch": 0.049776663821588726,
"grad_norm": 12.367693901062012,
"learning_rate": 0.0001698,
"loss": 0.4697,
"step": 8500
},
{
"epoch": 0.0512406833457531,
"grad_norm": 8.11889934539795,
"learning_rate": 0.00017480000000000002,
"loss": 0.4696,
"step": 8750
},
{
"epoch": 0.05270470286991747,
"grad_norm": 12.321019172668457,
"learning_rate": 0.0001798,
"loss": 0.461,
"step": 9000
},
{
"epoch": 0.054168722394081846,
"grad_norm": 15.612183570861816,
"learning_rate": 0.00018480000000000002,
"loss": 0.4646,
"step": 9250
},
{
"epoch": 0.05563274191824622,
"grad_norm": 10.72978687286377,
"learning_rate": 0.0001898,
"loss": 0.4673,
"step": 9500
},
{
"epoch": 0.0570967614424106,
"grad_norm": 8.815441131591797,
"learning_rate": 0.0001948,
"loss": 0.4472,
"step": 9750
},
{
"epoch": 0.05856078096657497,
"grad_norm": 8.681705474853516,
"learning_rate": 0.0001998,
"loss": 0.4629,
"step": 10000
},
{
"epoch": 0.05856078096657497,
"eval_accuracy": 0.8688706572649133,
"eval_loss": 0.457188218832016,
"eval_runtime": 11537.8227,
"eval_samples_per_second": 210.492,
"eval_steps_per_second": 6.578,
"step": 10000
},
{
"epoch": 0.06002480049073934,
"grad_norm": 13.643828392028809,
"learning_rate": 0.0001997014219778306,
"loss": 0.456,
"step": 10250
},
{
"epoch": 0.06148882001490372,
"grad_norm": 13.211404800415039,
"learning_rate": 0.00019939040320473745,
"loss": 0.4666,
"step": 10500
},
{
"epoch": 0.06295283953906809,
"grad_norm": 11.1001615524292,
"learning_rate": 0.00019907938443164432,
"loss": 0.4495,
"step": 10750
},
{
"epoch": 0.06441685906323247,
"grad_norm": 8.222249984741211,
"learning_rate": 0.00019876836565855117,
"loss": 0.4483,
"step": 11000
},
{
"epoch": 0.06588087858739684,
"grad_norm": 13.589752197265625,
"learning_rate": 0.0001984585909605504,
"loss": 0.4438,
"step": 11250
},
{
"epoch": 0.06734489811156122,
"grad_norm": 9.988068580627441,
"learning_rate": 0.00019814757218745724,
"loss": 0.447,
"step": 11500
},
{
"epoch": 0.0688089176357256,
"grad_norm": 8.311960220336914,
"learning_rate": 0.0001978365534143641,
"loss": 0.4476,
"step": 11750
},
{
"epoch": 0.07027293715988997,
"grad_norm": 8.099685668945312,
"learning_rate": 0.00019752553464127094,
"loss": 0.4477,
"step": 12000
},
{
"epoch": 0.07173695668405435,
"grad_norm": 8.23130989074707,
"learning_rate": 0.00019721451586817782,
"loss": 0.4385,
"step": 12250
},
{
"epoch": 0.07320097620821871,
"grad_norm": 10.875362396240234,
"learning_rate": 0.00019690349709508467,
"loss": 0.4345,
"step": 12500
},
{
"epoch": 0.07466499573238308,
"grad_norm": 9.479572296142578,
"learning_rate": 0.00019659247832199152,
"loss": 0.4345,
"step": 12750
},
{
"epoch": 0.07612901525654746,
"grad_norm": 11.883151054382324,
"learning_rate": 0.0001962814595488984,
"loss": 0.4241,
"step": 13000
},
{
"epoch": 0.07759303478071183,
"grad_norm": 8.15208911895752,
"learning_rate": 0.00019597044077580524,
"loss": 0.4335,
"step": 13250
},
{
"epoch": 0.07905705430487621,
"grad_norm": 9.323240280151367,
"learning_rate": 0.0001956594220027121,
"loss": 0.4396,
"step": 13500
},
{
"epoch": 0.08052107382904058,
"grad_norm": 7.250824928283691,
"learning_rate": 0.00019534840322961897,
"loss": 0.4376,
"step": 13750
},
{
"epoch": 0.08198509335320496,
"grad_norm": 12.220071792602539,
"learning_rate": 0.0001950373844565258,
"loss": 0.4323,
"step": 14000
},
{
"epoch": 0.08344911287736934,
"grad_norm": 8.460916519165039,
"learning_rate": 0.00019472636568343266,
"loss": 0.4271,
"step": 14250
},
{
"epoch": 0.08491313240153371,
"grad_norm": 6.110500812530518,
"learning_rate": 0.0001944153469103395,
"loss": 0.4253,
"step": 14500
},
{
"epoch": 0.08637715192569809,
"grad_norm": 10.618386268615723,
"learning_rate": 0.00019410432813724636,
"loss": 0.427,
"step": 14750
},
{
"epoch": 0.08784117144986245,
"grad_norm": 9.827556610107422,
"learning_rate": 0.00019379330936415324,
"loss": 0.4254,
"step": 15000
},
{
"epoch": 0.08784117144986245,
"eval_accuracy": 0.877075711565186,
"eval_loss": 0.4201970100402832,
"eval_runtime": 11537.2443,
"eval_samples_per_second": 210.503,
"eval_steps_per_second": 6.578,
"step": 15000
}
],
"logging_steps": 250,
"max_steps": 170762,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 5000,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 3.8871669372931277e+18,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}