|
{ |
|
"best_metric": 0.6090161800384521, |
|
"best_model_checkpoint": "autotrain-k2nef-2mp1e/checkpoint-300", |
|
"epoch": 3.0, |
|
"eval_steps": 500, |
|
"global_step": 300, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.05, |
|
"grad_norm": 2.0926642417907715, |
|
"learning_rate": 8.333333333333334e-06, |
|
"loss": 0.7016, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"grad_norm": 4.052657127380371, |
|
"learning_rate": 1.6666666666666667e-05, |
|
"loss": 0.7067, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"grad_norm": 5.750674724578857, |
|
"learning_rate": 2.5e-05, |
|
"loss": 0.6934, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"grad_norm": 3.0751872062683105, |
|
"learning_rate": 3.3333333333333335e-05, |
|
"loss": 0.7022, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"grad_norm": 7.624610900878906, |
|
"learning_rate": 4.166666666666667e-05, |
|
"loss": 0.6635, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"grad_norm": 6.501454830169678, |
|
"learning_rate": 5e-05, |
|
"loss": 0.7008, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"grad_norm": 1.1909492015838623, |
|
"learning_rate": 4.9074074074074075e-05, |
|
"loss": 0.7069, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"grad_norm": 4.995346546173096, |
|
"learning_rate": 4.814814814814815e-05, |
|
"loss": 0.6667, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"grad_norm": 4.344069004058838, |
|
"learning_rate": 4.722222222222222e-05, |
|
"loss": 0.6798, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"grad_norm": 2.269436836242676, |
|
"learning_rate": 4.62962962962963e-05, |
|
"loss": 0.7114, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"grad_norm": 3.151176929473877, |
|
"learning_rate": 4.5370370370370374e-05, |
|
"loss": 0.6304, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"grad_norm": 2.426318407058716, |
|
"learning_rate": 4.4444444444444447e-05, |
|
"loss": 0.7266, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"grad_norm": 2.212985038757324, |
|
"learning_rate": 4.351851851851852e-05, |
|
"loss": 0.7392, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"grad_norm": 1.5283116102218628, |
|
"learning_rate": 4.259259259259259e-05, |
|
"loss": 0.7266, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"grad_norm": 2.971235752105713, |
|
"learning_rate": 4.166666666666667e-05, |
|
"loss": 0.6728, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"grad_norm": 3.4530913829803467, |
|
"learning_rate": 4.074074074074074e-05, |
|
"loss": 0.7161, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"grad_norm": 4.985769271850586, |
|
"learning_rate": 3.981481481481482e-05, |
|
"loss": 0.6379, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"grad_norm": 1.9118386507034302, |
|
"learning_rate": 3.888888888888889e-05, |
|
"loss": 0.6737, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"grad_norm": 3.0985093116760254, |
|
"learning_rate": 3.7962962962962964e-05, |
|
"loss": 0.6806, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"grad_norm": 8.572888374328613, |
|
"learning_rate": 3.7037037037037037e-05, |
|
"loss": 0.8093, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_accuracy": 0.525, |
|
"eval_auc": 0.6939348370927318, |
|
"eval_f1": 0.6885245901639344, |
|
"eval_loss": 0.6925095319747925, |
|
"eval_precision": 0.525, |
|
"eval_recall": 1.0, |
|
"eval_runtime": 35.807, |
|
"eval_samples_per_second": 5.585, |
|
"eval_steps_per_second": 0.363, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 1.05, |
|
"grad_norm": 1.2586256265640259, |
|
"learning_rate": 3.611111111111111e-05, |
|
"loss": 0.716, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 1.1, |
|
"grad_norm": 1.7259572744369507, |
|
"learning_rate": 3.518518518518519e-05, |
|
"loss": 0.6773, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 1.15, |
|
"grad_norm": 1.5281599760055542, |
|
"learning_rate": 3.425925925925926e-05, |
|
"loss": 0.7073, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 1.2, |
|
"grad_norm": 2.143573760986328, |
|
"learning_rate": 3.3333333333333335e-05, |
|
"loss": 0.6748, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 1.25, |
|
"grad_norm": 2.464813709259033, |
|
"learning_rate": 3.240740740740741e-05, |
|
"loss": 0.7014, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 1.3, |
|
"grad_norm": 3.4055287837982178, |
|
"learning_rate": 3.148148148148148e-05, |
|
"loss": 0.6871, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 1.35, |
|
"grad_norm": 2.0612735748291016, |
|
"learning_rate": 3.055555555555556e-05, |
|
"loss": 0.6959, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 1.4, |
|
"grad_norm": 4.995764255523682, |
|
"learning_rate": 2.962962962962963e-05, |
|
"loss": 0.6947, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 1.45, |
|
"grad_norm": 5.578982830047607, |
|
"learning_rate": 2.8703703703703706e-05, |
|
"loss": 0.6397, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"grad_norm": 13.800985336303711, |
|
"learning_rate": 2.777777777777778e-05, |
|
"loss": 0.7913, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 1.55, |
|
"grad_norm": 6.1693806648254395, |
|
"learning_rate": 2.6851851851851855e-05, |
|
"loss": 0.7603, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"grad_norm": 2.4309499263763428, |
|
"learning_rate": 2.5925925925925925e-05, |
|
"loss": 0.6809, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 1.65, |
|
"grad_norm": 3.3699936866760254, |
|
"learning_rate": 2.5e-05, |
|
"loss": 0.702, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 1.7, |
|
"grad_norm": 7.083240032196045, |
|
"learning_rate": 2.4074074074074074e-05, |
|
"loss": 0.7349, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 1.75, |
|
"grad_norm": 3.991142988204956, |
|
"learning_rate": 2.314814814814815e-05, |
|
"loss": 0.6858, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 1.8, |
|
"grad_norm": 2.0945520401000977, |
|
"learning_rate": 2.2222222222222223e-05, |
|
"loss": 0.7108, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 1.85, |
|
"grad_norm": 4.659959316253662, |
|
"learning_rate": 2.1296296296296296e-05, |
|
"loss": 0.6984, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 1.9, |
|
"grad_norm": 5.092476844787598, |
|
"learning_rate": 2.037037037037037e-05, |
|
"loss": 0.7022, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 1.95, |
|
"grad_norm": 3.881072521209717, |
|
"learning_rate": 1.9444444444444445e-05, |
|
"loss": 0.6897, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"grad_norm": 2.4203667640686035, |
|
"learning_rate": 1.8518518518518518e-05, |
|
"loss": 0.7064, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_accuracy": 0.53, |
|
"eval_auc": 0.7506766917293233, |
|
"eval_f1": 0.6907894736842105, |
|
"eval_loss": 0.6854195594787598, |
|
"eval_precision": 0.5276381909547738, |
|
"eval_recall": 1.0, |
|
"eval_runtime": 35.8907, |
|
"eval_samples_per_second": 5.572, |
|
"eval_steps_per_second": 0.362, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 2.05, |
|
"grad_norm": 4.770285606384277, |
|
"learning_rate": 1.7592592592592595e-05, |
|
"loss": 0.688, |
|
"step": 205 |
|
}, |
|
{ |
|
"epoch": 2.1, |
|
"grad_norm": 2.5155324935913086, |
|
"learning_rate": 1.6666666666666667e-05, |
|
"loss": 0.6474, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 2.15, |
|
"grad_norm": 2.7012557983398438, |
|
"learning_rate": 1.574074074074074e-05, |
|
"loss": 0.7223, |
|
"step": 215 |
|
}, |
|
{ |
|
"epoch": 2.2, |
|
"grad_norm": 4.726611614227295, |
|
"learning_rate": 1.4814814814814815e-05, |
|
"loss": 0.6941, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 2.25, |
|
"grad_norm": 1.7803250551223755, |
|
"learning_rate": 1.388888888888889e-05, |
|
"loss": 0.6845, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 2.3, |
|
"grad_norm": 11.724671363830566, |
|
"learning_rate": 1.2962962962962962e-05, |
|
"loss": 0.6677, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 2.35, |
|
"grad_norm": 5.12057638168335, |
|
"learning_rate": 1.2037037037037037e-05, |
|
"loss": 0.6699, |
|
"step": 235 |
|
}, |
|
{ |
|
"epoch": 2.4, |
|
"grad_norm": 4.75656270980835, |
|
"learning_rate": 1.1111111111111112e-05, |
|
"loss": 0.7428, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 2.45, |
|
"grad_norm": 7.563472747802734, |
|
"learning_rate": 1.0185185185185185e-05, |
|
"loss": 0.6592, |
|
"step": 245 |
|
}, |
|
{ |
|
"epoch": 2.5, |
|
"grad_norm": 1.8900893926620483, |
|
"learning_rate": 9.259259259259259e-06, |
|
"loss": 0.6835, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 2.55, |
|
"grad_norm": 6.5432024002075195, |
|
"learning_rate": 8.333333333333334e-06, |
|
"loss": 0.6682, |
|
"step": 255 |
|
}, |
|
{ |
|
"epoch": 2.6, |
|
"grad_norm": 3.4254777431488037, |
|
"learning_rate": 7.4074074074074075e-06, |
|
"loss": 0.7187, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 2.65, |
|
"grad_norm": 5.301848411560059, |
|
"learning_rate": 6.481481481481481e-06, |
|
"loss": 0.6392, |
|
"step": 265 |
|
}, |
|
{ |
|
"epoch": 2.7, |
|
"grad_norm": 2.3317384719848633, |
|
"learning_rate": 5.555555555555556e-06, |
|
"loss": 0.6316, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 2.75, |
|
"grad_norm": 4.123948574066162, |
|
"learning_rate": 4.6296296296296296e-06, |
|
"loss": 0.6764, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 2.8, |
|
"grad_norm": 6.580291748046875, |
|
"learning_rate": 3.7037037037037037e-06, |
|
"loss": 0.6914, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 2.85, |
|
"grad_norm": 6.623916149139404, |
|
"learning_rate": 2.777777777777778e-06, |
|
"loss": 0.5946, |
|
"step": 285 |
|
}, |
|
{ |
|
"epoch": 2.9, |
|
"grad_norm": 13.349700927734375, |
|
"learning_rate": 1.8518518518518519e-06, |
|
"loss": 0.6564, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 2.95, |
|
"grad_norm": 29.791109085083008, |
|
"learning_rate": 9.259259259259259e-07, |
|
"loss": 0.613, |
|
"step": 295 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"grad_norm": 3.5324182510375977, |
|
"learning_rate": 0.0, |
|
"loss": 0.5895, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_accuracy": 0.74, |
|
"eval_auc": 0.8025062656641604, |
|
"eval_f1": 0.7886178861788617, |
|
"eval_loss": 0.6090161800384521, |
|
"eval_precision": 0.6879432624113475, |
|
"eval_recall": 0.9238095238095239, |
|
"eval_runtime": 40.0605, |
|
"eval_samples_per_second": 4.992, |
|
"eval_steps_per_second": 0.325, |
|
"step": 300 |
|
} |
|
], |
|
"logging_steps": 5, |
|
"max_steps": 300, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 5, |
|
"early_stopping_threshold": 0.01 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 157866633216000.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|