lesso03's picture
Training in progress, step 500, checkpoint
33055bf verified
{
"best_metric": 11.918383598327637,
"best_model_checkpoint": "miner_id_24/checkpoint-500",
"epoch": 0.11757789535567313,
"eval_steps": 50,
"global_step": 500,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.00023515579071134627,
"eval_loss": 11.93261432647705,
"eval_runtime": 5.8878,
"eval_samples_per_second": 304.189,
"eval_steps_per_second": 76.09,
"step": 1
},
{
"epoch": 0.0023515579071134627,
"grad_norm": 0.0224897600710392,
"learning_rate": 4.0600000000000004e-05,
"loss": 11.9324,
"step": 10
},
{
"epoch": 0.004703115814226925,
"grad_norm": 0.02093578316271305,
"learning_rate": 8.120000000000001e-05,
"loss": 11.9322,
"step": 20
},
{
"epoch": 0.007054673721340388,
"grad_norm": 0.035910580307245255,
"learning_rate": 0.00012179999999999999,
"loss": 11.932,
"step": 30
},
{
"epoch": 0.00940623162845385,
"grad_norm": 0.0722586065530777,
"learning_rate": 0.00016240000000000002,
"loss": 11.9307,
"step": 40
},
{
"epoch": 0.011757789535567314,
"grad_norm": 0.18456719815731049,
"learning_rate": 0.000203,
"loss": 11.9259,
"step": 50
},
{
"epoch": 0.011757789535567314,
"eval_loss": 11.92370891571045,
"eval_runtime": 5.9809,
"eval_samples_per_second": 299.453,
"eval_steps_per_second": 74.905,
"step": 50
},
{
"epoch": 0.014109347442680775,
"grad_norm": 0.0920960083603859,
"learning_rate": 0.00020275275110137215,
"loss": 11.923,
"step": 60
},
{
"epoch": 0.01646090534979424,
"grad_norm": 0.054449375718832016,
"learning_rate": 0.00020201220897726938,
"loss": 11.9227,
"step": 70
},
{
"epoch": 0.0188124632569077,
"grad_norm": 0.05156860873103142,
"learning_rate": 0.00020078198147448128,
"loss": 11.9227,
"step": 80
},
{
"epoch": 0.021164021164021163,
"grad_norm": 0.07899127155542374,
"learning_rate": 0.00019906806213773937,
"loss": 11.9202,
"step": 90
},
{
"epoch": 0.023515579071134628,
"grad_norm": 0.21356940269470215,
"learning_rate": 0.0001968788010097697,
"loss": 11.9206,
"step": 100
},
{
"epoch": 0.023515579071134628,
"eval_loss": 11.921173095703125,
"eval_runtime": 5.9782,
"eval_samples_per_second": 299.59,
"eval_steps_per_second": 74.939,
"step": 100
},
{
"epoch": 0.02586713697824809,
"grad_norm": 0.03613104671239853,
"learning_rate": 0.00019422486395072398,
"loss": 11.9222,
"step": 110
},
{
"epoch": 0.02821869488536155,
"grad_norm": 0.07303137332201004,
"learning_rate": 0.0001911191806751811,
"loss": 11.921,
"step": 120
},
{
"epoch": 0.030570252792475015,
"grad_norm": 0.0452888086438179,
"learning_rate": 0.00018757688175987723,
"loss": 11.9211,
"step": 130
},
{
"epoch": 0.03292181069958848,
"grad_norm": 0.06807375699281693,
"learning_rate": 0.00018361522492905716,
"loss": 11.9201,
"step": 140
},
{
"epoch": 0.03527336860670194,
"grad_norm": 0.07713644951581955,
"learning_rate": 0.00017925351097657625,
"loss": 11.9201,
"step": 150
},
{
"epoch": 0.03527336860670194,
"eval_loss": 11.920866012573242,
"eval_runtime": 5.9178,
"eval_samples_per_second": 302.648,
"eval_steps_per_second": 75.704,
"step": 150
},
{
"epoch": 0.0376249265138154,
"grad_norm": 0.031196460127830505,
"learning_rate": 0.00017451298973437308,
"loss": 11.922,
"step": 160
},
{
"epoch": 0.03997648442092887,
"grad_norm": 0.12291362881660461,
"learning_rate": 0.0001694167565454241,
"loss": 11.9212,
"step": 170
},
{
"epoch": 0.042328042328042326,
"grad_norm": 0.025286966934800148,
"learning_rate": 0.0001639896397455543,
"loss": 11.9208,
"step": 180
},
{
"epoch": 0.04467960023515579,
"grad_norm": 0.04822389781475067,
"learning_rate": 0.0001582580797022808,
"loss": 11.9206,
"step": 190
},
{
"epoch": 0.047031158142269255,
"grad_norm": 0.14918547868728638,
"learning_rate": 0.00015225,
"loss": 11.9218,
"step": 200
},
{
"epoch": 0.047031158142269255,
"eval_loss": 11.920147895812988,
"eval_runtime": 5.9709,
"eval_samples_per_second": 299.954,
"eval_steps_per_second": 75.03,
"step": 200
},
{
"epoch": 0.04938271604938271,
"grad_norm": 0.03527095913887024,
"learning_rate": 0.00014599467139909136,
"loss": 11.9203,
"step": 210
},
{
"epoch": 0.05173427395649618,
"grad_norm": 0.0468476302921772,
"learning_rate": 0.0001395225692317151,
"loss": 11.9197,
"step": 220
},
{
"epoch": 0.05408583186360964,
"grad_norm": 0.03908485919237137,
"learning_rate": 0.00013286522492905717,
"loss": 11.9189,
"step": 230
},
{
"epoch": 0.0564373897707231,
"grad_norm": 0.06336972862482071,
"learning_rate": 0.00012605507240336626,
"loss": 11.9198,
"step": 240
},
{
"epoch": 0.058788947677836566,
"grad_norm": 0.11935758590698242,
"learning_rate": 0.00011912529003319345,
"loss": 11.9193,
"step": 250
},
{
"epoch": 0.058788947677836566,
"eval_loss": 11.919634819030762,
"eval_runtime": 5.9117,
"eval_samples_per_second": 302.961,
"eval_steps_per_second": 75.783,
"step": 250
},
{
"epoch": 0.06114050558495003,
"grad_norm": 0.0363902673125267,
"learning_rate": 0.00011210963902166683,
"loss": 11.921,
"step": 260
},
{
"epoch": 0.06349206349206349,
"grad_norm": 0.06434895843267441,
"learning_rate": 0.00010504229891530386,
"loss": 11.92,
"step": 270
},
{
"epoch": 0.06584362139917696,
"grad_norm": 0.04380424693226814,
"learning_rate": 9.795770108469618e-05,
"loss": 11.9174,
"step": 280
},
{
"epoch": 0.06819517930629042,
"grad_norm": 0.06086306646466255,
"learning_rate": 9.08903609783332e-05,
"loss": 11.9192,
"step": 290
},
{
"epoch": 0.07054673721340388,
"grad_norm": 0.11999793350696564,
"learning_rate": 8.387470996680658e-05,
"loss": 11.9197,
"step": 300
},
{
"epoch": 0.07054673721340388,
"eval_loss": 11.919013023376465,
"eval_runtime": 5.9553,
"eval_samples_per_second": 300.74,
"eval_steps_per_second": 75.227,
"step": 300
},
{
"epoch": 0.07289829512051735,
"grad_norm": 0.047770265489816666,
"learning_rate": 7.694492759663374e-05,
"loss": 11.9195,
"step": 310
},
{
"epoch": 0.0752498530276308,
"grad_norm": 0.06404738128185272,
"learning_rate": 7.013477507094284e-05,
"loss": 11.9201,
"step": 320
},
{
"epoch": 0.07760141093474426,
"grad_norm": 0.05701601132750511,
"learning_rate": 6.347743076828492e-05,
"loss": 11.9192,
"step": 330
},
{
"epoch": 0.07995296884185774,
"grad_norm": 0.05881758779287338,
"learning_rate": 5.700532860090863e-05,
"loss": 11.9182,
"step": 340
},
{
"epoch": 0.0823045267489712,
"grad_norm": 0.12579509615898132,
"learning_rate": 5.075000000000002e-05,
"loss": 11.9186,
"step": 350
},
{
"epoch": 0.0823045267489712,
"eval_loss": 11.918659210205078,
"eval_runtime": 5.9277,
"eval_samples_per_second": 302.14,
"eval_steps_per_second": 75.577,
"step": 350
},
{
"epoch": 0.08465608465608465,
"grad_norm": 0.050394270569086075,
"learning_rate": 4.4741920297719214e-05,
"loss": 11.9199,
"step": 360
},
{
"epoch": 0.08700764256319812,
"grad_norm": 0.12194881588220596,
"learning_rate": 3.901036025444568e-05,
"loss": 11.9181,
"step": 370
},
{
"epoch": 0.08935920047031158,
"grad_norm": 0.0636436864733696,
"learning_rate": 3.358324345457592e-05,
"loss": 11.9169,
"step": 380
},
{
"epoch": 0.09171075837742504,
"grad_norm": 0.08077479898929596,
"learning_rate": 2.8487010265626928e-05,
"loss": 11.9168,
"step": 390
},
{
"epoch": 0.09406231628453851,
"grad_norm": 0.16011905670166016,
"learning_rate": 2.3746489023423744e-05,
"loss": 11.9207,
"step": 400
},
{
"epoch": 0.09406231628453851,
"eval_loss": 11.918496131896973,
"eval_runtime": 5.892,
"eval_samples_per_second": 303.971,
"eval_steps_per_second": 76.035,
"step": 400
},
{
"epoch": 0.09641387419165197,
"grad_norm": 0.060036178678274155,
"learning_rate": 1.9384775070942844e-05,
"loss": 11.9204,
"step": 410
},
{
"epoch": 0.09876543209876543,
"grad_norm": 0.09110881388187408,
"learning_rate": 1.5423118240122765e-05,
"loss": 11.9203,
"step": 420
},
{
"epoch": 0.1011169900058789,
"grad_norm": 0.05156229808926582,
"learning_rate": 1.188081932481891e-05,
"loss": 11.9167,
"step": 430
},
{
"epoch": 0.10346854791299236,
"grad_norm": 0.05308821424841881,
"learning_rate": 8.775136049276001e-06,
"loss": 11.9178,
"step": 440
},
{
"epoch": 0.10582010582010581,
"grad_norm": 0.20463146269321442,
"learning_rate": 6.121198990230306e-06,
"loss": 11.9188,
"step": 450
},
{
"epoch": 0.10582010582010581,
"eval_loss": 11.918533325195312,
"eval_runtime": 5.9192,
"eval_samples_per_second": 302.573,
"eval_steps_per_second": 75.685,
"step": 450
},
{
"epoch": 0.10817166372721929,
"grad_norm": 0.03463096171617508,
"learning_rate": 3.931937862260632e-06,
"loss": 11.9197,
"step": 460
},
{
"epoch": 0.11052322163433274,
"grad_norm": 0.0692448616027832,
"learning_rate": 2.2180185255187225e-06,
"loss": 11.9187,
"step": 470
},
{
"epoch": 0.1128747795414462,
"grad_norm": 0.07259061187505722,
"learning_rate": 9.877910227306082e-07,
"loss": 11.9176,
"step": 480
},
{
"epoch": 0.11522633744855967,
"grad_norm": 0.04710669815540314,
"learning_rate": 2.472488986278439e-07,
"loss": 11.9172,
"step": 490
},
{
"epoch": 0.11757789535567313,
"grad_norm": 0.13223369419574738,
"learning_rate": 0.0,
"loss": 11.9168,
"step": 500
},
{
"epoch": 0.11757789535567313,
"eval_loss": 11.918383598327637,
"eval_runtime": 5.9937,
"eval_samples_per_second": 298.813,
"eval_steps_per_second": 74.745,
"step": 500
}
],
"logging_steps": 10,
"max_steps": 500,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 50,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 3,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 435983646720.0,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}