| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 0.9998338042213728, | |
| "eval_steps": 500, | |
| "global_step": 47, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.021273059664284527, | |
| "learning_rate": 3.3333333333333333e-06, | |
| "loss": 0.4212, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.042546119328569054, | |
| "learning_rate": 6.666666666666667e-06, | |
| "loss": 0.4212, | |
| "step": 2 | |
| }, | |
| { | |
| "epoch": 0.06381917899285358, | |
| "learning_rate": 1e-05, | |
| "loss": 0.4147, | |
| "step": 3 | |
| }, | |
| { | |
| "epoch": 0.08509223865713811, | |
| "learning_rate": 1.3333333333333333e-05, | |
| "loss": 0.3917, | |
| "step": 4 | |
| }, | |
| { | |
| "epoch": 0.10636529832142264, | |
| "learning_rate": 1.6666666666666667e-05, | |
| "loss": 0.3838, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.12763835798570716, | |
| "learning_rate": 2e-05, | |
| "loss": 0.3703, | |
| "step": 6 | |
| }, | |
| { | |
| "epoch": 0.1489114176499917, | |
| "learning_rate": 2.3333333333333336e-05, | |
| "loss": 0.3615, | |
| "step": 7 | |
| }, | |
| { | |
| "epoch": 0.17018447731427622, | |
| "learning_rate": 2.6666666666666667e-05, | |
| "loss": 0.3562, | |
| "step": 8 | |
| }, | |
| { | |
| "epoch": 0.19145753697856074, | |
| "learning_rate": 3e-05, | |
| "loss": 0.3718, | |
| "step": 9 | |
| }, | |
| { | |
| "epoch": 0.21273059664284527, | |
| "learning_rate": 3.3333333333333335e-05, | |
| "loss": 0.404, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.2340036563071298, | |
| "learning_rate": 3.6666666666666666e-05, | |
| "loss": 0.38, | |
| "step": 11 | |
| }, | |
| { | |
| "epoch": 0.2552767159714143, | |
| "learning_rate": 4e-05, | |
| "loss": 0.5008, | |
| "step": 12 | |
| }, | |
| { | |
| "epoch": 0.2765497756356989, | |
| "learning_rate": 4.3333333333333334e-05, | |
| "loss": 0.4064, | |
| "step": 13 | |
| }, | |
| { | |
| "epoch": 0.2978228352999834, | |
| "learning_rate": 4.666666666666667e-05, | |
| "loss": 0.3862, | |
| "step": 14 | |
| }, | |
| { | |
| "epoch": 0.31909589496426793, | |
| "learning_rate": 5e-05, | |
| "loss": 0.4611, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.34036895462855243, | |
| "learning_rate": 4.8437500000000005e-05, | |
| "loss": 0.3895, | |
| "step": 16 | |
| }, | |
| { | |
| "epoch": 0.361642014292837, | |
| "learning_rate": 4.6875e-05, | |
| "loss": 0.3724, | |
| "step": 17 | |
| }, | |
| { | |
| "epoch": 0.3829150739571215, | |
| "learning_rate": 4.5312500000000004e-05, | |
| "loss": 0.3525, | |
| "step": 18 | |
| }, | |
| { | |
| "epoch": 0.40418813362140604, | |
| "learning_rate": 4.375e-05, | |
| "loss": 0.345, | |
| "step": 19 | |
| }, | |
| { | |
| "epoch": 0.42546119328569054, | |
| "learning_rate": 4.21875e-05, | |
| "loss": 0.3351, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.4467342529499751, | |
| "learning_rate": 4.0625000000000005e-05, | |
| "loss": 0.3275, | |
| "step": 21 | |
| }, | |
| { | |
| "epoch": 0.4680073126142596, | |
| "learning_rate": 3.90625e-05, | |
| "loss": 0.3232, | |
| "step": 22 | |
| }, | |
| { | |
| "epoch": 0.48928037227854415, | |
| "learning_rate": 3.7500000000000003e-05, | |
| "loss": 0.3338, | |
| "step": 23 | |
| }, | |
| { | |
| "epoch": 0.5105534319428287, | |
| "learning_rate": 3.59375e-05, | |
| "loss": 0.3233, | |
| "step": 24 | |
| }, | |
| { | |
| "epoch": 0.5318264916071131, | |
| "learning_rate": 3.4375e-05, | |
| "loss": 0.3256, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.5530995512713978, | |
| "learning_rate": 3.2812500000000005e-05, | |
| "loss": 0.3273, | |
| "step": 26 | |
| }, | |
| { | |
| "epoch": 0.5743726109356823, | |
| "learning_rate": 3.125e-05, | |
| "loss": 0.3289, | |
| "step": 27 | |
| }, | |
| { | |
| "epoch": 0.5956456705999668, | |
| "learning_rate": 2.96875e-05, | |
| "loss": 0.3188, | |
| "step": 28 | |
| }, | |
| { | |
| "epoch": 0.6169187302642513, | |
| "learning_rate": 2.8125000000000003e-05, | |
| "loss": 0.318, | |
| "step": 29 | |
| }, | |
| { | |
| "epoch": 0.6381917899285359, | |
| "learning_rate": 2.6562500000000002e-05, | |
| "loss": 0.318, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.6594648495928204, | |
| "learning_rate": 2.5e-05, | |
| "loss": 0.3218, | |
| "step": 31 | |
| }, | |
| { | |
| "epoch": 0.6807379092571049, | |
| "learning_rate": 2.34375e-05, | |
| "loss": 0.3187, | |
| "step": 32 | |
| }, | |
| { | |
| "epoch": 0.7020109689213894, | |
| "learning_rate": 2.1875e-05, | |
| "loss": 0.3213, | |
| "step": 33 | |
| }, | |
| { | |
| "epoch": 0.723284028585674, | |
| "learning_rate": 2.0312500000000002e-05, | |
| "loss": 0.3152, | |
| "step": 34 | |
| }, | |
| { | |
| "epoch": 0.7445570882499585, | |
| "learning_rate": 1.8750000000000002e-05, | |
| "loss": 0.3173, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.765830147914243, | |
| "learning_rate": 1.71875e-05, | |
| "loss": 0.311, | |
| "step": 36 | |
| }, | |
| { | |
| "epoch": 0.7871032075785275, | |
| "learning_rate": 1.5625e-05, | |
| "loss": 0.3177, | |
| "step": 37 | |
| }, | |
| { | |
| "epoch": 0.8083762672428121, | |
| "learning_rate": 1.4062500000000001e-05, | |
| "loss": 0.318, | |
| "step": 38 | |
| }, | |
| { | |
| "epoch": 0.8296493269070966, | |
| "learning_rate": 1.25e-05, | |
| "loss": 0.3116, | |
| "step": 39 | |
| }, | |
| { | |
| "epoch": 0.8509223865713811, | |
| "learning_rate": 1.09375e-05, | |
| "loss": 0.3077, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.8721954462356656, | |
| "learning_rate": 9.375000000000001e-06, | |
| "loss": 0.3127, | |
| "step": 41 | |
| }, | |
| { | |
| "epoch": 0.8934685058999502, | |
| "learning_rate": 7.8125e-06, | |
| "loss": 0.3209, | |
| "step": 42 | |
| }, | |
| { | |
| "epoch": 0.9147415655642347, | |
| "learning_rate": 6.25e-06, | |
| "loss": 0.3118, | |
| "step": 43 | |
| }, | |
| { | |
| "epoch": 0.9360146252285192, | |
| "learning_rate": 4.6875000000000004e-06, | |
| "loss": 0.3174, | |
| "step": 44 | |
| }, | |
| { | |
| "epoch": 0.9572876848928037, | |
| "learning_rate": 3.125e-06, | |
| "loss": 0.3233, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.9785607445570883, | |
| "learning_rate": 1.5625e-06, | |
| "loss": 0.3267, | |
| "step": 46 | |
| }, | |
| { | |
| "epoch": 0.9998338042213728, | |
| "learning_rate": 0.0, | |
| "loss": 0.3177, | |
| "step": 47 | |
| } | |
| ], | |
| "logging_steps": 1.0, | |
| "max_steps": 47, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 1.1220801562030899e+18, | |
| "train_batch_size": 2, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |