|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.8021265092809515, |
|
"eval_steps": 500, |
|
"global_step": 20000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.04505316273202379, |
|
"grad_norm": 5023.73388671875, |
|
"learning_rate": 1.25e-05, |
|
"loss": 2425.6955, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.09010632546404758, |
|
"grad_norm": 4085.17724609375, |
|
"learning_rate": 2.5e-05, |
|
"loss": 633.1036, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.13515948819607138, |
|
"grad_norm": 3114.9833984375, |
|
"learning_rate": 3.7500000000000003e-05, |
|
"loss": 586.7181, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.18021265092809516, |
|
"grad_norm": 3313.659423828125, |
|
"learning_rate": 5e-05, |
|
"loss": 565.9839, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.22526581366011894, |
|
"grad_norm": 2783.360107421875, |
|
"learning_rate": 4.990486745229364e-05, |
|
"loss": 558.0812, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.27031897639214275, |
|
"grad_norm": 3958.57763671875, |
|
"learning_rate": 4.962019382530521e-05, |
|
"loss": 542.2432, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.3153721391241665, |
|
"grad_norm": 2197.1630859375, |
|
"learning_rate": 4.914814565722671e-05, |
|
"loss": 521.7359, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 0.3604253018561903, |
|
"grad_norm": 2326.522705078125, |
|
"learning_rate": 4.849231551964771e-05, |
|
"loss": 522.5125, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.40547846458821407, |
|
"grad_norm": 2209.858642578125, |
|
"learning_rate": 4.765769467591625e-05, |
|
"loss": 514.8256, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 0.4505316273202379, |
|
"grad_norm": 2931.34130859375, |
|
"learning_rate": 4.665063509461097e-05, |
|
"loss": 500.6537, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 0.4955847900522617, |
|
"grad_norm": 2907.71484375, |
|
"learning_rate": 4.54788011072248e-05, |
|
"loss": 497.3337, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 0.5406379527842855, |
|
"grad_norm": 1822.9918212890625, |
|
"learning_rate": 4.415111107797445e-05, |
|
"loss": 484.2432, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 0.5856911155163093, |
|
"grad_norm": 2211.70458984375, |
|
"learning_rate": 4.267766952966369e-05, |
|
"loss": 489.1086, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 0.630744278248333, |
|
"grad_norm": 1908.6292724609375, |
|
"learning_rate": 4.1069690242163484e-05, |
|
"loss": 487.3377, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 0.6757974409803568, |
|
"grad_norm": 3507.5302734375, |
|
"learning_rate": 3.933941090877615e-05, |
|
"loss": 479.4596, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 0.7208506037123806, |
|
"grad_norm": 2594.98291015625, |
|
"learning_rate": 3.7500000000000003e-05, |
|
"loss": 487.1899, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 0.7659037664444044, |
|
"grad_norm": 2933.35595703125, |
|
"learning_rate": 3.556545654351749e-05, |
|
"loss": 482.0614, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 0.8109569291764281, |
|
"grad_norm": 2508.786376953125, |
|
"learning_rate": 3.355050358314172e-05, |
|
"loss": 469.1376, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 0.856010091908452, |
|
"grad_norm": 2515.1328125, |
|
"learning_rate": 3.147047612756302e-05, |
|
"loss": 462.4763, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 0.9010632546404758, |
|
"grad_norm": 2034.9927978515625, |
|
"learning_rate": 2.9341204441673266e-05, |
|
"loss": 459.6087, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 0.9461164173724995, |
|
"grad_norm": 1929.3106689453125, |
|
"learning_rate": 2.717889356869146e-05, |
|
"loss": 453.5379, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 0.9911695801045234, |
|
"grad_norm": 2851.670654296875, |
|
"learning_rate": 2.5e-05, |
|
"loss": 453.0242, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_loss": 428.85467529296875, |
|
"eval_runtime": 57.9798, |
|
"eval_samples_per_second": 170.145, |
|
"eval_steps_per_second": 21.283, |
|
"step": 11098 |
|
}, |
|
{ |
|
"epoch": 1.0362227428365471, |
|
"grad_norm": 2632.15185546875, |
|
"learning_rate": 2.2821106431308544e-05, |
|
"loss": 437.7418, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 1.081275905568571, |
|
"grad_norm": 2240.501220703125, |
|
"learning_rate": 2.0658795558326743e-05, |
|
"loss": 438.0496, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 1.1263290683005946, |
|
"grad_norm": 1960.9959716796875, |
|
"learning_rate": 1.852952387243698e-05, |
|
"loss": 427.4685, |
|
"step": 12500 |
|
}, |
|
{ |
|
"epoch": 1.1713822310326185, |
|
"grad_norm": 2526.84228515625, |
|
"learning_rate": 1.6449496416858284e-05, |
|
"loss": 429.6636, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 1.2164353937646424, |
|
"grad_norm": 2094.966552734375, |
|
"learning_rate": 1.443454345648252e-05, |
|
"loss": 429.0512, |
|
"step": 13500 |
|
}, |
|
{ |
|
"epoch": 1.261488556496666, |
|
"grad_norm": 1433.662109375, |
|
"learning_rate": 1.2500000000000006e-05, |
|
"loss": 436.467, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 1.30654171922869, |
|
"grad_norm": 1956.2445068359375, |
|
"learning_rate": 1.0660589091223855e-05, |
|
"loss": 422.1393, |
|
"step": 14500 |
|
}, |
|
{ |
|
"epoch": 1.3515948819607138, |
|
"grad_norm": 2243.373779296875, |
|
"learning_rate": 8.930309757836517e-06, |
|
"loss": 429.4757, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 1.3966480446927374, |
|
"grad_norm": 2793.48095703125, |
|
"learning_rate": 7.3223304703363135e-06, |
|
"loss": 422.5827, |
|
"step": 15500 |
|
}, |
|
{ |
|
"epoch": 1.4417012074247613, |
|
"grad_norm": 2228.3212890625, |
|
"learning_rate": 5.848888922025553e-06, |
|
"loss": 430.6597, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 1.4867543701567851, |
|
"grad_norm": 3403.7626953125, |
|
"learning_rate": 4.521198892775203e-06, |
|
"loss": 426.2394, |
|
"step": 16500 |
|
}, |
|
{ |
|
"epoch": 1.5318075328888088, |
|
"grad_norm": 1795.3551025390625, |
|
"learning_rate": 3.3493649053890326e-06, |
|
"loss": 422.1375, |
|
"step": 17000 |
|
}, |
|
{ |
|
"epoch": 1.5768606956208324, |
|
"grad_norm": 3233.425537109375, |
|
"learning_rate": 2.3423053240837515e-06, |
|
"loss": 426.7428, |
|
"step": 17500 |
|
}, |
|
{ |
|
"epoch": 1.6219138583528565, |
|
"grad_norm": 2393.4365234375, |
|
"learning_rate": 1.5076844803522922e-06, |
|
"loss": 422.9055, |
|
"step": 18000 |
|
}, |
|
{ |
|
"epoch": 1.6669670210848802, |
|
"grad_norm": 2255.08837890625, |
|
"learning_rate": 8.51854342773295e-07, |
|
"loss": 418.1114, |
|
"step": 18500 |
|
}, |
|
{ |
|
"epoch": 1.7120201838169038, |
|
"grad_norm": 1896.0711669921875, |
|
"learning_rate": 3.7980617469479953e-07, |
|
"loss": 419.0474, |
|
"step": 19000 |
|
}, |
|
{ |
|
"epoch": 1.7570733465489279, |
|
"grad_norm": 2811.251953125, |
|
"learning_rate": 9.513254770636137e-08, |
|
"loss": 424.493, |
|
"step": 19500 |
|
}, |
|
{ |
|
"epoch": 1.8021265092809515, |
|
"grad_norm": 2713.771484375, |
|
"learning_rate": 0.0, |
|
"loss": 423.2755, |
|
"step": 20000 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 20000, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 2, |
|
"save_steps": 5000, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 0.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|