eeeebbb2's picture
Training in progress, step 36, checkpoint
9908426 verified
raw
history blame
7.58 kB
{
"best_metric": 1.7455496788024902,
"best_model_checkpoint": "miner_id_24/checkpoint-25",
"epoch": 3.0797872340425534,
"eval_steps": 25,
"global_step": 36,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0851063829787234,
"grad_norm": 0.337377667427063,
"learning_rate": 5e-05,
"loss": 1.543,
"step": 1
},
{
"epoch": 0.0851063829787234,
"eval_loss": 2.8193228244781494,
"eval_runtime": 1.9155,
"eval_samples_per_second": 26.103,
"eval_steps_per_second": 6.787,
"step": 1
},
{
"epoch": 0.1702127659574468,
"grad_norm": 1.0103968381881714,
"learning_rate": 0.0001,
"loss": 2.3898,
"step": 2
},
{
"epoch": 0.2553191489361702,
"grad_norm": 0.40538832545280457,
"learning_rate": 9.980803793327656e-05,
"loss": 1.3125,
"step": 3
},
{
"epoch": 0.3404255319148936,
"grad_norm": 0.30337655544281006,
"learning_rate": 9.923378948577559e-05,
"loss": 1.6227,
"step": 4
},
{
"epoch": 0.425531914893617,
"grad_norm": 2.6769063472747803,
"learning_rate": 9.828215394277687e-05,
"loss": 2.5671,
"step": 5
},
{
"epoch": 0.5106382978723404,
"grad_norm": 0.2441849261522293,
"learning_rate": 9.6961250323196e-05,
"loss": 1.1992,
"step": 6
},
{
"epoch": 0.5957446808510638,
"grad_norm": 0.3681361973285675,
"learning_rate": 9.528234811097782e-05,
"loss": 1.7804,
"step": 7
},
{
"epoch": 0.6808510638297872,
"grad_norm": 1.3270093202590942,
"learning_rate": 9.325977110783264e-05,
"loss": 2.2679,
"step": 8
},
{
"epoch": 0.7659574468085106,
"grad_norm": 0.2342531681060791,
"learning_rate": 9.091077522761079e-05,
"loss": 1.1517,
"step": 9
},
{
"epoch": 0.851063829787234,
"grad_norm": 1.1225703954696655,
"learning_rate": 8.825540127492967e-05,
"loss": 1.8651,
"step": 10
},
{
"epoch": 0.9361702127659575,
"grad_norm": 1.0222384929656982,
"learning_rate": 8.531630396409507e-05,
"loss": 2.0007,
"step": 11
},
{
"epoch": 1.0265957446808511,
"grad_norm": 0.36375167965888977,
"learning_rate": 8.211855863706654e-05,
"loss": 1.9288,
"step": 12
},
{
"epoch": 1.1117021276595744,
"grad_norm": 0.33239567279815674,
"learning_rate": 7.868944732948101e-05,
"loss": 1.6168,
"step": 13
},
{
"epoch": 1.196808510638298,
"grad_norm": 0.26805180311203003,
"learning_rate": 7.505822600994424e-05,
"loss": 1.0579,
"step": 14
},
{
"epoch": 1.2819148936170213,
"grad_norm": 0.3009487986564636,
"learning_rate": 7.12558749784219e-05,
"loss": 1.5312,
"step": 15
},
{
"epoch": 1.3670212765957448,
"grad_norm": 0.39399126172065735,
"learning_rate": 6.731483455324374e-05,
"loss": 1.8618,
"step": 16
},
{
"epoch": 1.452127659574468,
"grad_norm": 0.34218811988830566,
"learning_rate": 6.326872830174567e-05,
"loss": 0.9687,
"step": 17
},
{
"epoch": 1.5372340425531914,
"grad_norm": 0.3016256093978882,
"learning_rate": 5.9152076175848594e-05,
"loss": 1.8985,
"step": 18
},
{
"epoch": 1.622340425531915,
"grad_norm": 0.38652870059013367,
"learning_rate": 5.500000000000001e-05,
"loss": 1.827,
"step": 19
},
{
"epoch": 1.7074468085106385,
"grad_norm": 0.4191890060901642,
"learning_rate": 5.0847923824151424e-05,
"loss": 1.2199,
"step": 20
},
{
"epoch": 1.7925531914893615,
"grad_norm": 0.278137743473053,
"learning_rate": 4.673127169825433e-05,
"loss": 1.7785,
"step": 21
},
{
"epoch": 1.877659574468085,
"grad_norm": 0.4404103755950928,
"learning_rate": 4.268516544675628e-05,
"loss": 2.0553,
"step": 22
},
{
"epoch": 1.9627659574468086,
"grad_norm": 0.537523627281189,
"learning_rate": 3.8744125021578126e-05,
"loss": 2.2151,
"step": 23
},
{
"epoch": 2.0531914893617023,
"grad_norm": 0.16800740361213684,
"learning_rate": 3.494177399005578e-05,
"loss": 1.1205,
"step": 24
},
{
"epoch": 2.1382978723404253,
"grad_norm": 0.34991270303726196,
"learning_rate": 3.1310552670518986e-05,
"loss": 1.9167,
"step": 25
},
{
"epoch": 2.1382978723404253,
"eval_loss": 1.7455496788024902,
"eval_runtime": 0.9406,
"eval_samples_per_second": 53.157,
"eval_steps_per_second": 13.821,
"step": 25
},
{
"epoch": 2.223404255319149,
"grad_norm": 0.18386214971542358,
"learning_rate": 2.7881441362933468e-05,
"loss": 0.4926,
"step": 26
},
{
"epoch": 2.3085106382978724,
"grad_norm": 0.2846694588661194,
"learning_rate": 2.4683696035904928e-05,
"loss": 1.9249,
"step": 27
},
{
"epoch": 2.393617021276596,
"grad_norm": 0.40197649598121643,
"learning_rate": 2.1744598725070347e-05,
"loss": 2.0293,
"step": 28
},
{
"epoch": 2.478723404255319,
"grad_norm": 0.21188139915466309,
"learning_rate": 1.9089224772389225e-05,
"loss": 0.7225,
"step": 29
},
{
"epoch": 2.5638297872340425,
"grad_norm": 0.25818443298339844,
"learning_rate": 1.674022889216737e-05,
"loss": 1.9215,
"step": 30
},
{
"epoch": 2.648936170212766,
"grad_norm": 0.4542253315448761,
"learning_rate": 1.4717651889022202e-05,
"loss": 1.9208,
"step": 31
},
{
"epoch": 2.7340425531914896,
"grad_norm": 0.29611721634864807,
"learning_rate": 1.3038749676803994e-05,
"loss": 1.176,
"step": 32
},
{
"epoch": 2.8191489361702127,
"grad_norm": 0.27275022864341736,
"learning_rate": 1.1717846057223144e-05,
"loss": 1.8955,
"step": 33
},
{
"epoch": 2.904255319148936,
"grad_norm": 0.6470306515693665,
"learning_rate": 1.076621051422442e-05,
"loss": 2.0274,
"step": 34
},
{
"epoch": 2.9893617021276597,
"grad_norm": 0.5650705695152283,
"learning_rate": 1.019196206672345e-05,
"loss": 1.638,
"step": 35
},
{
"epoch": 3.0797872340425534,
"grad_norm": 0.15843090415000916,
"learning_rate": 1e-05,
"loss": 1.2924,
"step": 36
}
],
"logging_steps": 1,
"max_steps": 36,
"num_input_tokens_seen": 0,
"num_train_epochs": 4,
"save_steps": 25,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 1,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 5.637908650681958e+16,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}