|
{ |
|
"best_metric": 1.661128044128418, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 0.19083969465648856, |
|
"eval_steps": 50, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.003816793893129771, |
|
"grad_norm": 1.1639742851257324, |
|
"learning_rate": 1.6666666666666668e-07, |
|
"loss": 1.7599, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.003816793893129771, |
|
"eval_loss": 1.690576195716858, |
|
"eval_runtime": 10.0391, |
|
"eval_samples_per_second": 10.957, |
|
"eval_steps_per_second": 1.395, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.007633587786259542, |
|
"grad_norm": 0.9837440848350525, |
|
"learning_rate": 3.3333333333333335e-07, |
|
"loss": 1.7152, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.011450381679389313, |
|
"grad_norm": 1.191903829574585, |
|
"learning_rate": 5.000000000000001e-07, |
|
"loss": 1.8818, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.015267175572519083, |
|
"grad_norm": 1.2343465089797974, |
|
"learning_rate": 6.666666666666667e-07, |
|
"loss": 1.9395, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.019083969465648856, |
|
"grad_norm": 1.1668846607208252, |
|
"learning_rate": 8.333333333333333e-07, |
|
"loss": 1.9077, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.022900763358778626, |
|
"grad_norm": 1.2681121826171875, |
|
"learning_rate": 1.0000000000000002e-06, |
|
"loss": 2.25, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.026717557251908396, |
|
"grad_norm": 1.410936951637268, |
|
"learning_rate": 1.1666666666666668e-06, |
|
"loss": 2.1375, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.030534351145038167, |
|
"grad_norm": 1.2283226251602173, |
|
"learning_rate": 1.3333333333333334e-06, |
|
"loss": 2.0093, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.03435114503816794, |
|
"grad_norm": 1.3780529499053955, |
|
"learning_rate": 1.5e-06, |
|
"loss": 2.2188, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.03816793893129771, |
|
"grad_norm": 1.2002503871917725, |
|
"learning_rate": 1.6666666666666667e-06, |
|
"loss": 2.01, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.04198473282442748, |
|
"grad_norm": 1.1847559213638306, |
|
"learning_rate": 1.8333333333333333e-06, |
|
"loss": 1.9388, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.04580152671755725, |
|
"grad_norm": 1.1541310548782349, |
|
"learning_rate": 2.0000000000000003e-06, |
|
"loss": 1.9568, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.04961832061068702, |
|
"grad_norm": 1.1507000923156738, |
|
"learning_rate": 2.166666666666667e-06, |
|
"loss": 1.9077, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.05343511450381679, |
|
"grad_norm": 1.1916567087173462, |
|
"learning_rate": 2.3333333333333336e-06, |
|
"loss": 1.9743, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.05725190839694656, |
|
"grad_norm": 1.2536592483520508, |
|
"learning_rate": 2.5e-06, |
|
"loss": 2.0885, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.061068702290076333, |
|
"grad_norm": 1.296707034111023, |
|
"learning_rate": 2.666666666666667e-06, |
|
"loss": 2.3976, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.0648854961832061, |
|
"grad_norm": 1.2552423477172852, |
|
"learning_rate": 2.8333333333333335e-06, |
|
"loss": 1.9453, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.06870229007633588, |
|
"grad_norm": 1.0085850954055786, |
|
"learning_rate": 3e-06, |
|
"loss": 1.1812, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.07251908396946564, |
|
"grad_norm": 1.083329200744629, |
|
"learning_rate": 3.1666666666666667e-06, |
|
"loss": 1.5001, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.07633587786259542, |
|
"grad_norm": 1.091967225074768, |
|
"learning_rate": 3.3333333333333333e-06, |
|
"loss": 1.5606, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.08015267175572519, |
|
"grad_norm": 1.0827946662902832, |
|
"learning_rate": 3.5e-06, |
|
"loss": 1.5546, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.08396946564885496, |
|
"grad_norm": 1.137137532234192, |
|
"learning_rate": 3.6666666666666666e-06, |
|
"loss": 1.6243, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.08778625954198473, |
|
"grad_norm": 1.0543386936187744, |
|
"learning_rate": 3.833333333333334e-06, |
|
"loss": 1.4913, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.0916030534351145, |
|
"grad_norm": 1.092016577720642, |
|
"learning_rate": 4.000000000000001e-06, |
|
"loss": 1.4779, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.09541984732824428, |
|
"grad_norm": 1.1408716440200806, |
|
"learning_rate": 4.166666666666667e-06, |
|
"loss": 1.7049, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.09923664122137404, |
|
"grad_norm": 1.2250564098358154, |
|
"learning_rate": 4.333333333333334e-06, |
|
"loss": 1.6621, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.10305343511450382, |
|
"grad_norm": 1.0289937257766724, |
|
"learning_rate": 4.5e-06, |
|
"loss": 1.5643, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.10687022900763359, |
|
"grad_norm": 1.1041069030761719, |
|
"learning_rate": 4.666666666666667e-06, |
|
"loss": 1.4019, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.11068702290076336, |
|
"grad_norm": 1.0776981115341187, |
|
"learning_rate": 4.833333333333333e-06, |
|
"loss": 1.3278, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.11450381679389313, |
|
"grad_norm": 1.094165325164795, |
|
"learning_rate": 5e-06, |
|
"loss": 1.3775, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.1183206106870229, |
|
"grad_norm": 1.0802825689315796, |
|
"learning_rate": 4.997482666353287e-06, |
|
"loss": 1.3885, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.12213740458015267, |
|
"grad_norm": 1.177674651145935, |
|
"learning_rate": 4.989935734988098e-06, |
|
"loss": 1.4748, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.12595419847328243, |
|
"grad_norm": 1.188259482383728, |
|
"learning_rate": 4.977374404419838e-06, |
|
"loss": 1.5043, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.1297709923664122, |
|
"grad_norm": 1.1320470571517944, |
|
"learning_rate": 4.959823971496575e-06, |
|
"loss": 1.5926, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.13358778625954199, |
|
"grad_norm": 1.0940425395965576, |
|
"learning_rate": 4.937319780454559e-06, |
|
"loss": 1.3909, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.13740458015267176, |
|
"grad_norm": 1.0931893587112427, |
|
"learning_rate": 4.909907151739634e-06, |
|
"loss": 1.5625, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.14122137404580154, |
|
"grad_norm": 1.0794775485992432, |
|
"learning_rate": 4.8776412907378845e-06, |
|
"loss": 1.4332, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.1450381679389313, |
|
"grad_norm": 1.135691523551941, |
|
"learning_rate": 4.8405871765993435e-06, |
|
"loss": 1.4607, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.14885496183206107, |
|
"grad_norm": 1.143457055091858, |
|
"learning_rate": 4.7988194313786275e-06, |
|
"loss": 1.5162, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.15267175572519084, |
|
"grad_norm": 1.1650444269180298, |
|
"learning_rate": 4.752422169756048e-06, |
|
"loss": 1.5281, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.15648854961832062, |
|
"grad_norm": 1.0388603210449219, |
|
"learning_rate": 4.701488829641845e-06, |
|
"loss": 1.2957, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.16030534351145037, |
|
"grad_norm": 1.0882115364074707, |
|
"learning_rate": 4.646121984004666e-06, |
|
"loss": 1.4225, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.16412213740458015, |
|
"grad_norm": 1.115893840789795, |
|
"learning_rate": 4.586433134303257e-06, |
|
"loss": 1.3005, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.16793893129770993, |
|
"grad_norm": 1.2251698970794678, |
|
"learning_rate": 4.522542485937369e-06, |
|
"loss": 1.3565, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.1717557251908397, |
|
"grad_norm": 1.0498777627944946, |
|
"learning_rate": 4.454578706170075e-06, |
|
"loss": 1.1824, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.17557251908396945, |
|
"grad_norm": 1.0242071151733398, |
|
"learning_rate": 4.382678665009028e-06, |
|
"loss": 1.2668, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.17938931297709923, |
|
"grad_norm": 1.2367092370986938, |
|
"learning_rate": 4.3069871595684795e-06, |
|
"loss": 1.2987, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.183206106870229, |
|
"grad_norm": 1.0739256143569946, |
|
"learning_rate": 4.227656622467162e-06, |
|
"loss": 1.3642, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.18702290076335878, |
|
"grad_norm": 1.1677788496017456, |
|
"learning_rate": 4.144846814849282e-06, |
|
"loss": 1.4148, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.19083969465648856, |
|
"grad_norm": 0.8875530958175659, |
|
"learning_rate": 4.058724504646834e-06, |
|
"loss": 1.2016, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.19083969465648856, |
|
"eval_loss": 1.661128044128418, |
|
"eval_runtime": 9.665, |
|
"eval_samples_per_second": 11.381, |
|
"eval_steps_per_second": 1.449, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 100, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 50, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 5, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 7101382096060416.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|