|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.08437038599451592, |
|
"eval_steps": 500, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0016874077198903185, |
|
"grad_norm": 65.32470703125, |
|
"learning_rate": 0.0001, |
|
"loss": 25.2321, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.003374815439780637, |
|
"grad_norm": 70.6180419921875, |
|
"learning_rate": 0.0002, |
|
"loss": 25.3485, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.0050622231596709555, |
|
"grad_norm": 51.371097564697266, |
|
"learning_rate": 0.00019978589232386035, |
|
"loss": 21.7018, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.006749630879561274, |
|
"grad_norm": 590.1074829101562, |
|
"learning_rate": 0.00019914448613738106, |
|
"loss": 54.5225, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.008437038599451592, |
|
"grad_norm": 209.5045623779297, |
|
"learning_rate": 0.00019807852804032305, |
|
"loss": 31.0961, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.010124446319341911, |
|
"grad_norm": 58.842044830322266, |
|
"learning_rate": 0.00019659258262890683, |
|
"loss": 24.73, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.011811854039232229, |
|
"grad_norm": 71.44770812988281, |
|
"learning_rate": 0.0001946930129495106, |
|
"loss": 23.8403, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.013499261759122548, |
|
"grad_norm": 106.37022399902344, |
|
"learning_rate": 0.0001923879532511287, |
|
"loss": 27.1841, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.015186669479012866, |
|
"grad_norm": 45.72689437866211, |
|
"learning_rate": 0.00018968727415326884, |
|
"loss": 19.8725, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.016874077198903183, |
|
"grad_norm": 37.188865661621094, |
|
"learning_rate": 0.00018660254037844388, |
|
"loss": 21.7315, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.018561484918793503, |
|
"grad_norm": 45.79566192626953, |
|
"learning_rate": 0.00018314696123025454, |
|
"loss": 22.6619, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.020248892638683822, |
|
"grad_norm": 55.1727180480957, |
|
"learning_rate": 0.00017933533402912354, |
|
"loss": 21.1365, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.02193630035857414, |
|
"grad_norm": 41.42280578613281, |
|
"learning_rate": 0.00017518398074789775, |
|
"loss": 22.4909, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.023623708078464457, |
|
"grad_norm": 45.82218551635742, |
|
"learning_rate": 0.00017071067811865476, |
|
"loss": 18.2044, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.025311115798354777, |
|
"grad_norm": 37.43087387084961, |
|
"learning_rate": 0.00016593458151000688, |
|
"loss": 20.438, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.026998523518245096, |
|
"grad_norm": 114.20457458496094, |
|
"learning_rate": 0.00016087614290087208, |
|
"loss": 23.5819, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.028685931238135415, |
|
"grad_norm": 67.7957992553711, |
|
"learning_rate": 0.00015555702330196023, |
|
"loss": 21.258, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.03037333895802573, |
|
"grad_norm": 33.19402313232422, |
|
"learning_rate": 0.00015000000000000001, |
|
"loss": 20.2971, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.03206074667791605, |
|
"grad_norm": 40.10993194580078, |
|
"learning_rate": 0.00014422886902190014, |
|
"loss": 19.7575, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.03374815439780637, |
|
"grad_norm": 49.910343170166016, |
|
"learning_rate": 0.000138268343236509, |
|
"loss": 18.6194, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.03543556211769669, |
|
"grad_norm": 38.23054504394531, |
|
"learning_rate": 0.00013214394653031616, |
|
"loss": 20.5953, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.037122969837587005, |
|
"grad_norm": 56.188289642333984, |
|
"learning_rate": 0.00012588190451025207, |
|
"loss": 20.2318, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.03881037755747733, |
|
"grad_norm": 44.83216857910156, |
|
"learning_rate": 0.00011950903220161285, |
|
"loss": 19.2367, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.040497785277367644, |
|
"grad_norm": 48.0411262512207, |
|
"learning_rate": 0.00011305261922200519, |
|
"loss": 20.9958, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.04218519299725796, |
|
"grad_norm": 52.180850982666016, |
|
"learning_rate": 0.00010654031292301432, |
|
"loss": 20.8292, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.04387260071714828, |
|
"grad_norm": 46.67422103881836, |
|
"learning_rate": 0.0001, |
|
"loss": 23.289, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.0455600084370386, |
|
"grad_norm": 38.597694396972656, |
|
"learning_rate": 9.345968707698569e-05, |
|
"loss": 19.8904, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.047247416156928915, |
|
"grad_norm": 47.50412368774414, |
|
"learning_rate": 8.694738077799488e-05, |
|
"loss": 19.5885, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.04893482387681924, |
|
"grad_norm": 37.85395431518555, |
|
"learning_rate": 8.049096779838719e-05, |
|
"loss": 21.6278, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.050622231596709553, |
|
"grad_norm": 33.44654083251953, |
|
"learning_rate": 7.411809548974792e-05, |
|
"loss": 21.5126, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.052309639316599876, |
|
"grad_norm": 37.50754165649414, |
|
"learning_rate": 6.785605346968386e-05, |
|
"loss": 20.6339, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.05399704703649019, |
|
"grad_norm": 43.75811767578125, |
|
"learning_rate": 6.173165676349103e-05, |
|
"loss": 19.563, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.05568445475638051, |
|
"grad_norm": 40.8135986328125, |
|
"learning_rate": 5.577113097809989e-05, |
|
"loss": 17.5488, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.05737186247627083, |
|
"grad_norm": 31.21071434020996, |
|
"learning_rate": 5.000000000000002e-05, |
|
"loss": 17.7708, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.05905927019616115, |
|
"grad_norm": 29.12379264831543, |
|
"learning_rate": 4.444297669803981e-05, |
|
"loss": 16.4581, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.06074667791605146, |
|
"grad_norm": 44.50994873046875, |
|
"learning_rate": 3.9123857099127936e-05, |
|
"loss": 17.5144, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.062434085635941786, |
|
"grad_norm": 41.542293548583984, |
|
"learning_rate": 3.406541848999312e-05, |
|
"loss": 21.657, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.0641214933558321, |
|
"grad_norm": 37.01569747924805, |
|
"learning_rate": 2.9289321881345254e-05, |
|
"loss": 18.6949, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.06580890107572242, |
|
"grad_norm": 31.14872932434082, |
|
"learning_rate": 2.4816019252102273e-05, |
|
"loss": 17.1006, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.06749630879561273, |
|
"grad_norm": 37.147621154785156, |
|
"learning_rate": 2.0664665970876496e-05, |
|
"loss": 21.1422, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.06918371651550306, |
|
"grad_norm": 38.87228775024414, |
|
"learning_rate": 1.6853038769745467e-05, |
|
"loss": 20.6313, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.07087112423539338, |
|
"grad_norm": 28.88852882385254, |
|
"learning_rate": 1.339745962155613e-05, |
|
"loss": 16.6456, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.0725585319552837, |
|
"grad_norm": 31.49118995666504, |
|
"learning_rate": 1.0312725846731175e-05, |
|
"loss": 20.5823, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.07424593967517401, |
|
"grad_norm": 26.752351760864258, |
|
"learning_rate": 7.612046748871327e-06, |
|
"loss": 16.891, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.07593334739506433, |
|
"grad_norm": 30.51628303527832, |
|
"learning_rate": 5.306987050489442e-06, |
|
"loss": 16.887, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.07762075511495466, |
|
"grad_norm": 37.371604919433594, |
|
"learning_rate": 3.40741737109318e-06, |
|
"loss": 18.2874, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.07930816283484497, |
|
"grad_norm": 32.94172286987305, |
|
"learning_rate": 1.921471959676957e-06, |
|
"loss": 20.0595, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.08099557055473529, |
|
"grad_norm": 33.590179443359375, |
|
"learning_rate": 8.555138626189618e-07, |
|
"loss": 18.6175, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.0826829782746256, |
|
"grad_norm": 34.63749694824219, |
|
"learning_rate": 2.141076761396521e-07, |
|
"loss": 18.973, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.08437038599451592, |
|
"grad_norm": 32.18989944458008, |
|
"learning_rate": 0.0, |
|
"loss": 19.8617, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 7.03130303987712e+16, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|