adammandic87's picture
Training in progress, step 50, checkpoint
7b5fc09 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.001505253334136135,
"eval_steps": 13,
"global_step": 50,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 3.0105066682722702e-05,
"grad_norm": NaN,
"learning_rate": 5e-06,
"loss": 0.0,
"step": 1
},
{
"epoch": 3.0105066682722702e-05,
"eval_loss": NaN,
"eval_runtime": 766.2942,
"eval_samples_per_second": 18.253,
"eval_steps_per_second": 9.127,
"step": 1
},
{
"epoch": 6.0210133365445404e-05,
"grad_norm": NaN,
"learning_rate": 1e-05,
"loss": 0.0,
"step": 2
},
{
"epoch": 9.031520004816811e-05,
"grad_norm": NaN,
"learning_rate": 1.5e-05,
"loss": 0.0,
"step": 3
},
{
"epoch": 0.00012042026673089081,
"grad_norm": NaN,
"learning_rate": 2e-05,
"loss": 0.0,
"step": 4
},
{
"epoch": 0.00015052533341361352,
"grad_norm": NaN,
"learning_rate": 2.5e-05,
"loss": 0.0,
"step": 5
},
{
"epoch": 0.00018063040009633623,
"grad_norm": NaN,
"learning_rate": 3e-05,
"loss": 0.0,
"step": 6
},
{
"epoch": 0.0002107354667790589,
"grad_norm": NaN,
"learning_rate": 3.5e-05,
"loss": 0.0,
"step": 7
},
{
"epoch": 0.00024084053346178162,
"grad_norm": NaN,
"learning_rate": 4e-05,
"loss": 0.0,
"step": 8
},
{
"epoch": 0.0002709456001445043,
"grad_norm": NaN,
"learning_rate": 4.5e-05,
"loss": 0.0,
"step": 9
},
{
"epoch": 0.00030105066682722703,
"grad_norm": NaN,
"learning_rate": 5e-05,
"loss": 0.0,
"step": 10
},
{
"epoch": 0.00033115573350994974,
"grad_norm": NaN,
"learning_rate": 4.99229333433282e-05,
"loss": 0.0,
"step": 11
},
{
"epoch": 0.00036126080019267245,
"grad_norm": NaN,
"learning_rate": 4.9692208514878444e-05,
"loss": 0.0,
"step": 12
},
{
"epoch": 0.0003913658668753951,
"grad_norm": NaN,
"learning_rate": 4.9309248009941914e-05,
"loss": 0.0,
"step": 13
},
{
"epoch": 0.0003913658668753951,
"eval_loss": NaN,
"eval_runtime": 764.7494,
"eval_samples_per_second": 18.29,
"eval_steps_per_second": 9.145,
"step": 13
},
{
"epoch": 0.0004214709335581178,
"grad_norm": NaN,
"learning_rate": 4.877641290737884e-05,
"loss": 0.0,
"step": 14
},
{
"epoch": 0.0004515760002408405,
"grad_norm": NaN,
"learning_rate": 4.8096988312782174e-05,
"loss": 0.0,
"step": 15
},
{
"epoch": 0.00048168106692356323,
"grad_norm": NaN,
"learning_rate": 4.72751631047092e-05,
"loss": 0.0,
"step": 16
},
{
"epoch": 0.0005117861336062859,
"grad_norm": NaN,
"learning_rate": 4.6316004108852305e-05,
"loss": 0.0,
"step": 17
},
{
"epoch": 0.0005418912002890087,
"grad_norm": NaN,
"learning_rate": 4.522542485937369e-05,
"loss": 0.0,
"step": 18
},
{
"epoch": 0.0005719962669717314,
"grad_norm": NaN,
"learning_rate": 4.401014914000078e-05,
"loss": 0.0,
"step": 19
},
{
"epoch": 0.0006021013336544541,
"grad_norm": NaN,
"learning_rate": 4.267766952966369e-05,
"loss": 0.0,
"step": 20
},
{
"epoch": 0.0006322064003371768,
"grad_norm": NaN,
"learning_rate": 4.123620120825459e-05,
"loss": 0.0,
"step": 21
},
{
"epoch": 0.0006623114670198995,
"grad_norm": NaN,
"learning_rate": 3.969463130731183e-05,
"loss": 0.0,
"step": 22
},
{
"epoch": 0.0006924165337026222,
"grad_norm": NaN,
"learning_rate": 3.8062464117898724e-05,
"loss": 0.0,
"step": 23
},
{
"epoch": 0.0007225216003853449,
"grad_norm": NaN,
"learning_rate": 3.634976249348867e-05,
"loss": 0.0,
"step": 24
},
{
"epoch": 0.0007526266670680675,
"grad_norm": NaN,
"learning_rate": 3.456708580912725e-05,
"loss": 0.0,
"step": 25
},
{
"epoch": 0.0007827317337507902,
"grad_norm": NaN,
"learning_rate": 3.272542485937369e-05,
"loss": 0.0,
"step": 26
},
{
"epoch": 0.0007827317337507902,
"eval_loss": NaN,
"eval_runtime": 764.8818,
"eval_samples_per_second": 18.286,
"eval_steps_per_second": 9.144,
"step": 26
},
{
"epoch": 0.0008128368004335129,
"grad_norm": NaN,
"learning_rate": 3.083613409639764e-05,
"loss": 0.0,
"step": 27
},
{
"epoch": 0.0008429418671162356,
"grad_norm": NaN,
"learning_rate": 2.8910861626005776e-05,
"loss": 0.0,
"step": 28
},
{
"epoch": 0.0008730469337989583,
"grad_norm": NaN,
"learning_rate": 2.6961477393196126e-05,
"loss": 0.0,
"step": 29
},
{
"epoch": 0.000903152000481681,
"grad_norm": NaN,
"learning_rate": 2.5e-05,
"loss": 0.0,
"step": 30
},
{
"epoch": 0.0009332570671644038,
"grad_norm": NaN,
"learning_rate": 2.303852260680388e-05,
"loss": 0.0,
"step": 31
},
{
"epoch": 0.0009633621338471265,
"grad_norm": NaN,
"learning_rate": 2.1089138373994223e-05,
"loss": 0.0,
"step": 32
},
{
"epoch": 0.000993467200529849,
"grad_norm": NaN,
"learning_rate": 1.9163865903602374e-05,
"loss": 0.0,
"step": 33
},
{
"epoch": 0.0010235722672125719,
"grad_norm": NaN,
"learning_rate": 1.7274575140626318e-05,
"loss": 0.0,
"step": 34
},
{
"epoch": 0.0010536773338952945,
"grad_norm": NaN,
"learning_rate": 1.5432914190872757e-05,
"loss": 0.0,
"step": 35
},
{
"epoch": 0.0010837824005780173,
"grad_norm": NaN,
"learning_rate": 1.3650237506511331e-05,
"loss": 0.0,
"step": 36
},
{
"epoch": 0.00111388746726074,
"grad_norm": NaN,
"learning_rate": 1.1937535882101281e-05,
"loss": 0.0,
"step": 37
},
{
"epoch": 0.0011439925339434627,
"grad_norm": NaN,
"learning_rate": 1.0305368692688174e-05,
"loss": 0.0,
"step": 38
},
{
"epoch": 0.0011740976006261853,
"grad_norm": NaN,
"learning_rate": 8.763798791745411e-06,
"loss": 0.0,
"step": 39
},
{
"epoch": 0.0011740976006261853,
"eval_loss": NaN,
"eval_runtime": 765.0262,
"eval_samples_per_second": 18.283,
"eval_steps_per_second": 9.142,
"step": 39
},
{
"epoch": 0.0012042026673089081,
"grad_norm": NaN,
"learning_rate": 7.3223304703363135e-06,
"loss": 0.0,
"step": 40
},
{
"epoch": 0.0012343077339916307,
"grad_norm": NaN,
"learning_rate": 5.989850859999227e-06,
"loss": 0.0,
"step": 41
},
{
"epoch": 0.0012644128006743536,
"grad_norm": NaN,
"learning_rate": 4.7745751406263165e-06,
"loss": 0.0,
"step": 42
},
{
"epoch": 0.0012945178673570762,
"grad_norm": NaN,
"learning_rate": 3.6839958911476957e-06,
"loss": 0.0,
"step": 43
},
{
"epoch": 0.001324622934039799,
"grad_norm": NaN,
"learning_rate": 2.7248368952908053e-06,
"loss": 0.0,
"step": 44
},
{
"epoch": 0.0013547280007225216,
"grad_norm": NaN,
"learning_rate": 1.9030116872178316e-06,
"loss": 0.0,
"step": 45
},
{
"epoch": 0.0013848330674052444,
"grad_norm": NaN,
"learning_rate": 1.2235870926211619e-06,
"loss": 0.0,
"step": 46
},
{
"epoch": 0.001414938134087967,
"grad_norm": NaN,
"learning_rate": 6.907519900580861e-07,
"loss": 0.0,
"step": 47
},
{
"epoch": 0.0014450432007706898,
"grad_norm": NaN,
"learning_rate": 3.077914851215585e-07,
"loss": 0.0,
"step": 48
},
{
"epoch": 0.0014751482674534124,
"grad_norm": NaN,
"learning_rate": 7.706665667180091e-08,
"loss": 0.0,
"step": 49
},
{
"epoch": 0.001505253334136135,
"grad_norm": NaN,
"learning_rate": 0.0,
"loss": 0.0,
"step": 50
}
],
"logging_steps": 1,
"max_steps": 50,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 13,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.7220000427278336e+16,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}