{ "best_metric": null, "best_model_checkpoint": null, "epoch": 1.8021265092809515, "eval_steps": 500, "global_step": 20000, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.04505316273202379, "grad_norm": 0.0, "learning_rate": 1.25e-05, "loss": 1347.2, "step": 500 }, { "epoch": 0.09010632546404758, "grad_norm": 3153.3505859375, "learning_rate": 2.5e-05, "loss": 477.367, "step": 1000 }, { "epoch": 0.13515948819607138, "grad_norm": 1858.1285400390625, "learning_rate": 3.7500000000000003e-05, "loss": 442.957, "step": 1500 }, { "epoch": 0.18021265092809516, "grad_norm": 2545.223388671875, "learning_rate": 5e-05, "loss": 433.9167, "step": 2000 }, { "epoch": 0.22526581366011894, "grad_norm": 2296.039794921875, "learning_rate": 4.990486745229364e-05, "loss": 418.3493, "step": 2500 }, { "epoch": 0.27031897639214275, "grad_norm": 2642.889404296875, "learning_rate": 4.962019382530521e-05, "loss": 407.9998, "step": 3000 }, { "epoch": 0.3153721391241665, "grad_norm": 0.0, "learning_rate": 4.914814565722671e-05, "loss": 410.2617, "step": 3500 }, { "epoch": 0.3604253018561903, "grad_norm": 2563.190673828125, "learning_rate": 4.849231551964771e-05, "loss": 397.9934, "step": 4000 }, { "epoch": 0.40547846458821407, "grad_norm": 3283.779052734375, "learning_rate": 4.765769467591625e-05, "loss": 417.8361, "step": 4500 }, { "epoch": 0.4505316273202379, "grad_norm": 2174.240234375, "learning_rate": 4.665063509461097e-05, "loss": 375.8966, "step": 5000 }, { "epoch": 0.4955847900522617, "grad_norm": 2514.682373046875, "learning_rate": 4.54788011072248e-05, "loss": 388.2395, "step": 5500 }, { "epoch": 0.5406379527842855, "grad_norm": 3136.03759765625, "learning_rate": 4.415111107797445e-05, "loss": 394.8071, "step": 6000 }, { "epoch": 0.5856911155163093, "grad_norm": 1837.290283203125, "learning_rate": 4.267766952966369e-05, "loss": 366.3096, "step": 6500 }, { "epoch": 0.630744278248333, "grad_norm": 2366.833740234375, "learning_rate": 4.1069690242163484e-05, "loss": 367.1677, "step": 7000 }, { "epoch": 0.6757974409803568, "grad_norm": 2263.440673828125, "learning_rate": 3.933941090877615e-05, "loss": 372.6355, "step": 7500 }, { "epoch": 0.7208506037123806, "grad_norm": 2522.50244140625, "learning_rate": 3.7500000000000003e-05, "loss": 367.763, "step": 8000 }, { "epoch": 0.7659037664444044, "grad_norm": 2936.105712890625, "learning_rate": 3.556545654351749e-05, "loss": 354.5393, "step": 8500 }, { "epoch": 0.8109569291764281, "grad_norm": 0.0, "learning_rate": 3.355050358314172e-05, "loss": 362.7008, "step": 9000 }, { "epoch": 0.856010091908452, "grad_norm": 2301.9541015625, "learning_rate": 3.147047612756302e-05, "loss": 339.9951, "step": 9500 }, { "epoch": 0.9010632546404758, "grad_norm": 2941.145751953125, "learning_rate": 2.9341204441673266e-05, "loss": 357.0448, "step": 10000 }, { "epoch": 0.9461164173724995, "grad_norm": 2575.2666015625, "learning_rate": 2.717889356869146e-05, "loss": 356.5437, "step": 10500 }, { "epoch": 0.9911695801045234, "grad_norm": 3181.543212890625, "learning_rate": 2.5e-05, "loss": 350.985, "step": 11000 }, { "epoch": 1.0, "eval_loss": 382.581298828125, "eval_runtime": 227.6629, "eval_samples_per_second": 43.332, "eval_steps_per_second": 5.42, "step": 11098 }, { "epoch": 1.0362227428365471, "grad_norm": 2919.064208984375, "learning_rate": 2.2821106431308544e-05, "loss": 328.9697, "step": 11500 }, { "epoch": 1.081275905568571, "grad_norm": 1792.9368896484375, "learning_rate": 2.0658795558326743e-05, "loss": 316.0124, "step": 12000 }, { "epoch": 1.1263290683005946, "grad_norm": 3145.044921875, "learning_rate": 1.852952387243698e-05, "loss": 318.4672, "step": 12500 }, { "epoch": 1.1713822310326185, "grad_norm": 2122.60693359375, "learning_rate": 1.6449496416858284e-05, "loss": 310.955, "step": 13000 }, { "epoch": 1.2164353937646424, "grad_norm": 0.0, "learning_rate": 1.443454345648252e-05, "loss": 308.6115, "step": 13500 }, { "epoch": 1.261488556496666, "grad_norm": 2367.4453125, "learning_rate": 1.2500000000000006e-05, "loss": 308.7791, "step": 14000 }, { "epoch": 1.30654171922869, "grad_norm": 3921.755126953125, "learning_rate": 1.0660589091223855e-05, "loss": 314.4502, "step": 14500 }, { "epoch": 1.3515948819607138, "grad_norm": 3169.499267578125, "learning_rate": 8.930309757836517e-06, "loss": 299.5019, "step": 15000 }, { "epoch": 1.3966480446927374, "grad_norm": 1866.3717041015625, "learning_rate": 7.3223304703363135e-06, "loss": 303.1934, "step": 15500 }, { "epoch": 1.4417012074247613, "grad_norm": 2293.599365234375, "learning_rate": 5.848888922025553e-06, "loss": 299.5156, "step": 16000 }, { "epoch": 1.4867543701567851, "grad_norm": 2268.71142578125, "learning_rate": 4.521198892775203e-06, "loss": 307.7869, "step": 16500 }, { "epoch": 1.5318075328888088, "grad_norm": 3546.200439453125, "learning_rate": 3.3493649053890326e-06, "loss": 290.4306, "step": 17000 }, { "epoch": 1.5768606956208324, "grad_norm": 2096.766845703125, "learning_rate": 2.3423053240837515e-06, "loss": 299.7911, "step": 17500 }, { "epoch": 1.6219138583528565, "grad_norm": 0.0, "learning_rate": 1.5076844803522922e-06, "loss": 302.4306, "step": 18000 }, { "epoch": 1.6669670210848802, "grad_norm": 1852.839111328125, "learning_rate": 8.51854342773295e-07, "loss": 305.6073, "step": 18500 }, { "epoch": 1.7120201838169038, "grad_norm": 2610.0205078125, "learning_rate": 3.7980617469479953e-07, "loss": 308.6735, "step": 19000 }, { "epoch": 1.7570733465489279, "grad_norm": 1428.6187744140625, "learning_rate": 9.513254770636137e-08, "loss": 296.7373, "step": 19500 }, { "epoch": 1.8021265092809515, "grad_norm": 0.0, "learning_rate": 0.0, "loss": 290.4464, "step": 20000 } ], "logging_steps": 500, "max_steps": 20000, "num_input_tokens_seen": 0, "num_train_epochs": 2, "save_steps": 10000, "stateful_callbacks": { "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": true }, "attributes": {} } }, "total_flos": 0.0, "train_batch_size": 8, "trial_name": null, "trial_params": null }