{ "best_metric": null, "best_model_checkpoint": null, "epoch": 2.7777777777777777, "eval_steps": 50, "global_step": 200, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.06944444444444445, "grad_norm": 28.03362215670935, "learning_rate": 5e-07, "loss": 1.6525, "step": 5 }, { "epoch": 0.1388888888888889, "grad_norm": 15.104101542150465, "learning_rate": 1e-06, "loss": 1.5085, "step": 10 }, { "epoch": 0.20833333333333334, "grad_norm": 8.033686730596932, "learning_rate": 9.985471028179154e-07, "loss": 1.2944, "step": 15 }, { "epoch": 0.2777777777777778, "grad_norm": 5.527358615393283, "learning_rate": 9.94196854912548e-07, "loss": 1.2274, "step": 20 }, { "epoch": 0.3472222222222222, "grad_norm": 5.639639905741326, "learning_rate": 9.869745381355905e-07, "loss": 1.1796, "step": 25 }, { "epoch": 0.4166666666666667, "grad_norm": 4.841439267473979, "learning_rate": 9.769221256218162e-07, "loss": 1.162, "step": 30 }, { "epoch": 0.4861111111111111, "grad_norm": 5.031380897521579, "learning_rate": 9.64098037858483e-07, "loss": 1.1511, "step": 35 }, { "epoch": 0.5555555555555556, "grad_norm": 4.623507128763141, "learning_rate": 9.485768031694871e-07, "loss": 1.1321, "step": 40 }, { "epoch": 0.625, "grad_norm": 4.438078790320084, "learning_rate": 9.304486245873971e-07, "loss": 1.141, "step": 45 }, { "epoch": 0.6944444444444444, "grad_norm": 4.553312192573781, "learning_rate": 9.098188556305262e-07, "loss": 1.1353, "step": 50 }, { "epoch": 0.6944444444444444, "eval_loss": 1.1089767217636108, "eval_runtime": 36.7554, "eval_samples_per_second": 55.584, "eval_steps_per_second": 0.871, "step": 50 }, { "epoch": 0.7638888888888888, "grad_norm": 4.657055083923456, "learning_rate": 8.868073880316123e-07, "loss": 1.1085, "step": 55 }, { "epoch": 0.8333333333333334, "grad_norm": 4.899356041473821, "learning_rate": 8.615479549763755e-07, "loss": 1.1058, "step": 60 }, { "epoch": 0.9027777777777778, "grad_norm": 4.752445427346823, "learning_rate": 8.341873539012443e-07, "loss": 1.1054, "step": 65 }, { "epoch": 0.9722222222222222, "grad_norm": 4.9284072371353504, "learning_rate": 8.048845933670271e-07, "loss": 1.1129, "step": 70 }, { "epoch": 1.0416666666666667, "grad_norm": 4.916541328032108, "learning_rate": 7.738099689665539e-07, "loss": 1.0225, "step": 75 }, { "epoch": 1.1111111111111112, "grad_norm": 4.755137577065741, "learning_rate": 7.41144073636728e-07, "loss": 0.9791, "step": 80 }, { "epoch": 1.1805555555555556, "grad_norm": 5.211754164119227, "learning_rate": 7.070767481266492e-07, "loss": 0.9602, "step": 85 }, { "epoch": 1.25, "grad_norm": 5.068955447006314, "learning_rate": 6.718059777212565e-07, "loss": 0.9586, "step": 90 }, { "epoch": 1.3194444444444444, "grad_norm": 5.0242480045435896, "learning_rate": 6.355367416322778e-07, "loss": 0.9528, "step": 95 }, { "epoch": 1.3888888888888888, "grad_norm": 5.100859792567275, "learning_rate": 5.984798217433531e-07, "loss": 0.9543, "step": 100 }, { "epoch": 1.3888888888888888, "eval_loss": 1.0777673721313477, "eval_runtime": 36.2431, "eval_samples_per_second": 56.369, "eval_steps_per_second": 0.883, "step": 100 }, { "epoch": 1.4583333333333333, "grad_norm": 5.158449874528905, "learning_rate": 5.608505776324157e-07, "loss": 0.9546, "step": 105 }, { "epoch": 1.5277777777777777, "grad_norm": 5.0308140222864886, "learning_rate": 5.228676949903973e-07, "loss": 0.9602, "step": 110 }, { "epoch": 1.5972222222222223, "grad_norm": 4.902344567141826, "learning_rate": 4.847519147099294e-07, "loss": 0.9547, "step": 115 }, { "epoch": 1.6666666666666665, "grad_norm": 5.1345646964010605, "learning_rate": 4.46724750030062e-07, "loss": 0.9369, "step": 120 }, { "epoch": 1.7361111111111112, "grad_norm": 5.243970140246485, "learning_rate": 4.0900719919241935e-07, "loss": 0.9455, "step": 125 }, { "epoch": 1.8055555555555556, "grad_norm": 4.923533966119208, "learning_rate": 3.7181846109031e-07, "loss": 0.9392, "step": 130 }, { "epoch": 1.875, "grad_norm": 4.905055116114625, "learning_rate": 3.353746613749093e-07, "loss": 0.9439, "step": 135 }, { "epoch": 1.9444444444444444, "grad_norm": 4.947955537361124, "learning_rate": 2.9988759642186093e-07, "loss": 0.9396, "step": 140 }, { "epoch": 2.013888888888889, "grad_norm": 5.937445383303241, "learning_rate": 2.655635024578483e-07, "loss": 0.9073, "step": 145 }, { "epoch": 2.0833333333333335, "grad_norm": 5.340090037535277, "learning_rate": 2.3260185700046292e-07, "loss": 0.8627, "step": 150 }, { "epoch": 2.0833333333333335, "eval_loss": 1.0668742656707764, "eval_runtime": 36.2443, "eval_samples_per_second": 56.367, "eval_steps_per_second": 0.883, "step": 150 }, { "epoch": 2.1527777777777777, "grad_norm": 5.393268774778699, "learning_rate": 2.0119421957691218e-07, "loss": 0.8666, "step": 155 }, { "epoch": 2.2222222222222223, "grad_norm": 5.388556278114857, "learning_rate": 1.7152311845883094e-07, "loss": 0.8574, "step": 160 }, { "epoch": 2.2916666666666665, "grad_norm": 5.732293363878604, "learning_rate": 1.4376098988303404e-07, "loss": 0.8581, "step": 165 }, { "epoch": 2.361111111111111, "grad_norm": 5.45272302861646, "learning_rate": 1.1806917592302761e-07, "loss": 0.8505, "step": 170 }, { "epoch": 2.4305555555555554, "grad_norm": 5.302389477646773, "learning_rate": 9.459698683523204e-08, "loss": 0.847, "step": 175 }, { "epoch": 2.5, "grad_norm": 5.366246438508766, "learning_rate": 7.348083332917926e-08, "loss": 0.8505, "step": 180 }, { "epoch": 2.5694444444444446, "grad_norm": 5.427530127252247, "learning_rate": 5.484343380457124e-08, "loss": 0.844, "step": 185 }, { "epoch": 2.638888888888889, "grad_norm": 5.2796646154487865, "learning_rate": 3.879310116241041e-08, "loss": 0.8417, "step": 190 }, { "epoch": 2.7083333333333335, "grad_norm": 5.36802540722628, "learning_rate": 2.5423113334966218e-08, "loss": 0.8382, "step": 195 }, { "epoch": 2.7777777777777777, "grad_norm": 5.431008385979752, "learning_rate": 1.4811171192794624e-08, "loss": 0.8551, "step": 200 }, { "epoch": 2.7777777777777777, "eval_loss": 1.0724252462387085, "eval_runtime": 36.3038, "eval_samples_per_second": 56.275, "eval_steps_per_second": 0.881, "step": 200 } ], "logging_steps": 5, "max_steps": 216, "num_input_tokens_seen": 0, "num_train_epochs": 3, "save_steps": 100, "stateful_callbacks": { "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": false }, "attributes": {} } }, "total_flos": 1179056703799296.0, "train_batch_size": 8, "trial_name": null, "trial_params": null }