ardaspear's picture
Training in progress, step 600, checkpoint
b35de2d verified
{
"best_metric": 3.989581823348999,
"best_model_checkpoint": "miner_id_24/checkpoint-600",
"epoch": 1.1320754716981132,
"eval_steps": 50,
"global_step": 600,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0018867924528301887,
"eval_loss": 4.477752685546875,
"eval_runtime": 40.7428,
"eval_samples_per_second": 21.918,
"eval_steps_per_second": 5.498,
"step": 1
},
{
"epoch": 0.018867924528301886,
"grad_norm": 15.21529483795166,
"learning_rate": 0.0002,
"loss": 4.2379,
"step": 10
},
{
"epoch": 0.03773584905660377,
"grad_norm": 54.98305130004883,
"learning_rate": 0.0001998582695676762,
"loss": 27.8114,
"step": 20
},
{
"epoch": 0.05660377358490566,
"grad_norm": 16.903823852539062,
"learning_rate": 0.00019943348002101371,
"loss": 25.2762,
"step": 30
},
{
"epoch": 0.07547169811320754,
"grad_norm": 172.9484405517578,
"learning_rate": 0.00019872683547213446,
"loss": 23.4547,
"step": 40
},
{
"epoch": 0.09433962264150944,
"grad_norm": 164.54025268554688,
"learning_rate": 0.00019774033898178667,
"loss": 8.2119,
"step": 50
},
{
"epoch": 0.09433962264150944,
"eval_loss": 7.419450759887695,
"eval_runtime": 41.389,
"eval_samples_per_second": 21.576,
"eval_steps_per_second": 5.412,
"step": 50
},
{
"epoch": 0.11320754716981132,
"grad_norm": 44.91653060913086,
"learning_rate": 0.0001964767868814516,
"loss": 30.0243,
"step": 60
},
{
"epoch": 0.1320754716981132,
"grad_norm": 9.489733695983887,
"learning_rate": 0.00019493976084683813,
"loss": 25.1776,
"step": 70
},
{
"epoch": 0.1509433962264151,
"grad_norm": 47.95170211791992,
"learning_rate": 0.00019313361774523385,
"loss": 24.4991,
"step": 80
},
{
"epoch": 0.16981132075471697,
"grad_norm": 166.98104858398438,
"learning_rate": 0.00019106347728549135,
"loss": 16.642,
"step": 90
},
{
"epoch": 0.18867924528301888,
"grad_norm": 155.18824768066406,
"learning_rate": 0.00018873520750565718,
"loss": 6.9458,
"step": 100
},
{
"epoch": 0.18867924528301888,
"eval_loss": 14.900463104248047,
"eval_runtime": 41.4137,
"eval_samples_per_second": 21.563,
"eval_steps_per_second": 5.409,
"step": 100
},
{
"epoch": 0.20754716981132076,
"grad_norm": 31.870784759521484,
"learning_rate": 0.0001861554081393806,
"loss": 55.9772,
"step": 110
},
{
"epoch": 0.22641509433962265,
"grad_norm": 19.77384376525879,
"learning_rate": 0.0001833313919082515,
"loss": 25.2894,
"step": 120
},
{
"epoch": 0.24528301886792453,
"grad_norm": 71.0095443725586,
"learning_rate": 0.00018027116379309638,
"loss": 24.5163,
"step": 130
},
{
"epoch": 0.2641509433962264,
"grad_norm": 328.7481384277344,
"learning_rate": 0.00017698339834299061,
"loss": 15.8981,
"step": 140
},
{
"epoch": 0.2830188679245283,
"grad_norm": 242.60081481933594,
"learning_rate": 0.00017347741508630672,
"loss": 6.2023,
"step": 150
},
{
"epoch": 0.2830188679245283,
"eval_loss": 11.450271606445312,
"eval_runtime": 41.4022,
"eval_samples_per_second": 21.569,
"eval_steps_per_second": 5.41,
"step": 150
},
{
"epoch": 0.3018867924528302,
"grad_norm": 31.868478775024414,
"learning_rate": 0.0001697631521134985,
"loss": 38.3331,
"step": 160
},
{
"epoch": 0.32075471698113206,
"grad_norm": 25.038724899291992,
"learning_rate": 0.00016585113790650388,
"loss": 24.7657,
"step": 170
},
{
"epoch": 0.33962264150943394,
"grad_norm": 53.333683013916016,
"learning_rate": 0.0001617524614946192,
"loss": 23.8673,
"step": 180
},
{
"epoch": 0.3584905660377358,
"grad_norm": 183.1505889892578,
"learning_rate": 0.0001574787410214407,
"loss": 12.851,
"step": 190
},
{
"epoch": 0.37735849056603776,
"grad_norm": 199.62698364257812,
"learning_rate": 0.00015304209081197425,
"loss": 3.7041,
"step": 200
},
{
"epoch": 0.37735849056603776,
"eval_loss": 10.561553001403809,
"eval_runtime": 41.4742,
"eval_samples_per_second": 21.531,
"eval_steps_per_second": 5.401,
"step": 200
},
{
"epoch": 0.39622641509433965,
"grad_norm": 30.381412506103516,
"learning_rate": 0.00014845508703326504,
"loss": 36.4437,
"step": 210
},
{
"epoch": 0.41509433962264153,
"grad_norm": 25.108572006225586,
"learning_rate": 0.00014373073204588556,
"loss": 24.811,
"step": 220
},
{
"epoch": 0.4339622641509434,
"grad_norm": 25.86770248413086,
"learning_rate": 0.00013888241754733208,
"loss": 22.9303,
"step": 230
},
{
"epoch": 0.4528301886792453,
"grad_norm": 83.9566650390625,
"learning_rate": 0.00013392388661180303,
"loss": 14.6497,
"step": 240
},
{
"epoch": 0.4716981132075472,
"grad_norm": 46.80604934692383,
"learning_rate": 0.0001288691947339621,
"loss": 3.5489,
"step": 250
},
{
"epoch": 0.4716981132075472,
"eval_loss": 13.115553855895996,
"eval_runtime": 41.4443,
"eval_samples_per_second": 21.547,
"eval_steps_per_second": 5.405,
"step": 250
},
{
"epoch": 0.49056603773584906,
"grad_norm": 60.517303466796875,
"learning_rate": 0.0001237326699871115,
"loss": 52.7818,
"step": 260
},
{
"epoch": 0.5094339622641509,
"grad_norm": 88.43193817138672,
"learning_rate": 0.00011852887240871145,
"loss": 28.1595,
"step": 270
},
{
"epoch": 0.5283018867924528,
"grad_norm": 39.37873458862305,
"learning_rate": 0.00011327255272837221,
"loss": 23.8529,
"step": 280
},
{
"epoch": 0.5471698113207547,
"grad_norm": 118.77758026123047,
"learning_rate": 0.00010797861055530831,
"loss": 14.4378,
"step": 290
},
{
"epoch": 0.5660377358490566,
"grad_norm": 136.2257537841797,
"learning_rate": 0.00010266205214377748,
"loss": 4.6485,
"step": 300
},
{
"epoch": 0.5660377358490566,
"eval_loss": 15.816683769226074,
"eval_runtime": 41.4418,
"eval_samples_per_second": 21.548,
"eval_steps_per_second": 5.405,
"step": 300
},
{
"epoch": 0.5849056603773585,
"grad_norm": 54.89567947387695,
"learning_rate": 9.733794785622253e-05,
"loss": 53.7256,
"step": 310
},
{
"epoch": 0.6037735849056604,
"grad_norm": 32.46332931518555,
"learning_rate": 9.202138944469168e-05,
"loss": 24.7777,
"step": 320
},
{
"epoch": 0.6226415094339622,
"grad_norm": 23.78044319152832,
"learning_rate": 8.672744727162781e-05,
"loss": 23.3428,
"step": 330
},
{
"epoch": 0.6415094339622641,
"grad_norm": 223.9010467529297,
"learning_rate": 8.147112759128859e-05,
"loss": 17.2469,
"step": 340
},
{
"epoch": 0.660377358490566,
"grad_norm": 55.03976821899414,
"learning_rate": 7.626733001288851e-05,
"loss": 1.8853,
"step": 350
},
{
"epoch": 0.660377358490566,
"eval_loss": 12.180185317993164,
"eval_runtime": 41.5042,
"eval_samples_per_second": 21.516,
"eval_steps_per_second": 5.397,
"step": 350
},
{
"epoch": 0.6792452830188679,
"grad_norm": 707.2769775390625,
"learning_rate": 7.113080526603792e-05,
"loss": 68.5596,
"step": 360
},
{
"epoch": 0.6981132075471698,
"grad_norm": 32.202911376953125,
"learning_rate": 6.607611338819697e-05,
"loss": 27.5274,
"step": 370
},
{
"epoch": 0.7169811320754716,
"grad_norm": 32.115299224853516,
"learning_rate": 6.111758245266794e-05,
"loss": 22.6553,
"step": 380
},
{
"epoch": 0.7358490566037735,
"grad_norm": 260.97906494140625,
"learning_rate": 5.626926795411447e-05,
"loss": 17.7957,
"step": 390
},
{
"epoch": 0.7547169811320755,
"grad_norm": 19.01742935180664,
"learning_rate": 5.1544912966734994e-05,
"loss": 1.821,
"step": 400
},
{
"epoch": 0.7547169811320755,
"eval_loss": 10.776637077331543,
"eval_runtime": 41.3976,
"eval_samples_per_second": 21.571,
"eval_steps_per_second": 5.411,
"step": 400
},
{
"epoch": 0.7735849056603774,
"grad_norm": 814.9197998046875,
"learning_rate": 4.695790918802576e-05,
"loss": 66.7377,
"step": 410
},
{
"epoch": 0.7924528301886793,
"grad_norm": 30.05072784423828,
"learning_rate": 4.252125897855932e-05,
"loss": 30.2499,
"step": 420
},
{
"epoch": 0.8113207547169812,
"grad_norm": 24.943675994873047,
"learning_rate": 3.824753850538082e-05,
"loss": 23.6266,
"step": 430
},
{
"epoch": 0.8301886792452831,
"grad_norm": 178.87713623046875,
"learning_rate": 3.414886209349615e-05,
"loss": 11.4625,
"step": 440
},
{
"epoch": 0.8490566037735849,
"grad_norm": 0.000857223873026669,
"learning_rate": 3.0236847886501542e-05,
"loss": 1.0218,
"step": 450
},
{
"epoch": 0.8490566037735849,
"eval_loss": 20.48834991455078,
"eval_runtime": 41.5021,
"eval_samples_per_second": 21.517,
"eval_steps_per_second": 5.397,
"step": 450
},
{
"epoch": 0.8679245283018868,
"grad_norm": 695.1412353515625,
"learning_rate": 2.6522584913693294e-05,
"loss": 73.7925,
"step": 460
},
{
"epoch": 0.8867924528301887,
"grad_norm": 118.63365936279297,
"learning_rate": 2.301660165700936e-05,
"loss": 34.6368,
"step": 470
},
{
"epoch": 0.9056603773584906,
"grad_norm": 50.097171783447266,
"learning_rate": 1.9728836206903656e-05,
"loss": 23.91,
"step": 480
},
{
"epoch": 0.9245283018867925,
"grad_norm": 127.65890502929688,
"learning_rate": 1.6668608091748495e-05,
"loss": 9.4513,
"step": 490
},
{
"epoch": 0.9433962264150944,
"grad_norm": 3.7929797172546387,
"learning_rate": 1.3844591860619383e-05,
"loss": 1.6077,
"step": 500
},
{
"epoch": 0.9433962264150944,
"eval_loss": 4.622971534729004,
"eval_runtime": 41.3713,
"eval_samples_per_second": 21.585,
"eval_steps_per_second": 5.414,
"step": 500
},
{
"epoch": 0.9622641509433962,
"grad_norm": 320.5762634277344,
"learning_rate": 1.1264792494342857e-05,
"loss": 31.4437,
"step": 510
},
{
"epoch": 0.9811320754716981,
"grad_norm": 46.00068664550781,
"learning_rate": 8.936522714508678e-06,
"loss": 23.0031,
"step": 520
},
{
"epoch": 1.0,
"grad_norm": 4.2149858474731445,
"learning_rate": 6.866382254766157e-06,
"loss": 0.5552,
"step": 530
},
{
"epoch": 1.0188679245283019,
"grad_norm": 88.85747528076172,
"learning_rate": 5.060239153161872e-06,
"loss": 27.1639,
"step": 540
},
{
"epoch": 1.0377358490566038,
"grad_norm": 45.806617736816406,
"learning_rate": 3.5232131185484076e-06,
"loss": 24.8611,
"step": 550
},
{
"epoch": 1.0377358490566038,
"eval_loss": 3.9979488849639893,
"eval_runtime": 41.4471,
"eval_samples_per_second": 21.546,
"eval_steps_per_second": 5.404,
"step": 550
},
{
"epoch": 1.0566037735849056,
"grad_norm": 22.6519832611084,
"learning_rate": 2.259661018213333e-06,
"loss": 22.9177,
"step": 560
},
{
"epoch": 1.0754716981132075,
"grad_norm": 48.2337760925293,
"learning_rate": 1.2731645278655445e-06,
"loss": 3.576,
"step": 570
},
{
"epoch": 1.0943396226415094,
"grad_norm": 13.664042472839355,
"learning_rate": 5.665199789862907e-07,
"loss": 0.382,
"step": 580
},
{
"epoch": 1.1132075471698113,
"grad_norm": 96.89643096923828,
"learning_rate": 1.4173043232380557e-07,
"loss": 25.7203,
"step": 590
},
{
"epoch": 1.1320754716981132,
"grad_norm": 19.166614532470703,
"learning_rate": 0.0,
"loss": 24.3324,
"step": 600
},
{
"epoch": 1.1320754716981132,
"eval_loss": 3.989581823348999,
"eval_runtime": 41.5204,
"eval_samples_per_second": 21.507,
"eval_steps_per_second": 5.395,
"step": 600
}
],
"logging_steps": 10,
"max_steps": 600,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 150,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 5,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 4.6255532515157606e+17,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}