| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 3.0, | |
| "eval_steps": 100, | |
| "global_step": 621, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.04830917874396135, | |
| "grad_norm": 30.896953582763672, | |
| "learning_rate": 4.0000000000000003e-07, | |
| "loss": 0.6247, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.0966183574879227, | |
| "grad_norm": 21.597583770751953, | |
| "learning_rate": 8.000000000000001e-07, | |
| "loss": 0.5939, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.14492753623188406, | |
| "grad_norm": 24.991493225097656, | |
| "learning_rate": 1.2000000000000002e-06, | |
| "loss": 0.5012, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.1932367149758454, | |
| "grad_norm": 12.620665550231934, | |
| "learning_rate": 1.6000000000000001e-06, | |
| "loss": 0.3985, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.24154589371980675, | |
| "grad_norm": 13.614625930786133, | |
| "learning_rate": 2.0000000000000003e-06, | |
| "loss": 0.2872, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.2898550724637681, | |
| "grad_norm": 8.550847053527832, | |
| "learning_rate": 2.4000000000000003e-06, | |
| "loss": 0.2276, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.33816425120772947, | |
| "grad_norm": 4.672711372375488, | |
| "learning_rate": 2.8000000000000003e-06, | |
| "loss": 0.163, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.3864734299516908, | |
| "grad_norm": 2.859722137451172, | |
| "learning_rate": 3.2000000000000003e-06, | |
| "loss": 0.116, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.43478260869565216, | |
| "grad_norm": 1.972380518913269, | |
| "learning_rate": 3.6000000000000003e-06, | |
| "loss": 0.0859, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.4830917874396135, | |
| "grad_norm": 1.3796651363372803, | |
| "learning_rate": 4.000000000000001e-06, | |
| "loss": 0.0546, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.4830917874396135, | |
| "eval_accuracy": 0.9902200488997555, | |
| "eval_accuracy_label_Clickbait": 0.9866220735785953, | |
| "eval_accuracy_label_Factual": 0.9922928709055877, | |
| "eval_f1": 0.9902200488997555, | |
| "eval_loss": 0.05041695758700371, | |
| "eval_precision": 0.9902200488997555, | |
| "eval_recall": 0.9902200488997555, | |
| "eval_runtime": 7.1469, | |
| "eval_samples_per_second": 114.455, | |
| "eval_steps_per_second": 7.276, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.5314009661835749, | |
| "grad_norm": 0.7740299105644226, | |
| "learning_rate": 4.4e-06, | |
| "loss": 0.0426, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.5797101449275363, | |
| "grad_norm": 0.6256905794143677, | |
| "learning_rate": 4.800000000000001e-06, | |
| "loss": 0.0456, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.6280193236714976, | |
| "grad_norm": 9.98182201385498, | |
| "learning_rate": 5.2e-06, | |
| "loss": 0.0372, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.6763285024154589, | |
| "grad_norm": 0.35714802145957947, | |
| "learning_rate": 5.600000000000001e-06, | |
| "loss": 0.0328, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.7246376811594203, | |
| "grad_norm": 0.33720284700393677, | |
| "learning_rate": 6e-06, | |
| "loss": 0.0302, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.7729468599033816, | |
| "grad_norm": 0.48025211691856384, | |
| "learning_rate": 6.4000000000000006e-06, | |
| "loss": 0.0284, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.821256038647343, | |
| "grad_norm": 0.233476921916008, | |
| "learning_rate": 6.800000000000001e-06, | |
| "loss": 0.0049, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.8695652173913043, | |
| "grad_norm": 9.039719581604004, | |
| "learning_rate": 7.2000000000000005e-06, | |
| "loss": 0.0278, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.9178743961352657, | |
| "grad_norm": 0.14749686419963837, | |
| "learning_rate": 7.600000000000001e-06, | |
| "loss": 0.0186, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.966183574879227, | |
| "grad_norm": 0.09157315641641617, | |
| "learning_rate": 8.000000000000001e-06, | |
| "loss": 0.0071, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.966183574879227, | |
| "eval_accuracy": 0.9987775061124694, | |
| "eval_accuracy_label_Clickbait": 0.9966555183946488, | |
| "eval_accuracy_label_Factual": 1.0, | |
| "eval_f1": 0.998777070551364, | |
| "eval_loss": 0.006007679738104343, | |
| "eval_precision": 0.9987798570622531, | |
| "eval_recall": 0.9987775061124694, | |
| "eval_runtime": 6.8277, | |
| "eval_samples_per_second": 119.807, | |
| "eval_steps_per_second": 7.616, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 1.0144927536231885, | |
| "grad_norm": 3.1745381355285645, | |
| "learning_rate": 8.400000000000001e-06, | |
| "loss": 0.0201, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 1.0628019323671498, | |
| "grad_norm": 0.12533485889434814, | |
| "learning_rate": 8.8e-06, | |
| "loss": 0.0205, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 1.1111111111111112, | |
| "grad_norm": 0.12013684958219528, | |
| "learning_rate": 9.200000000000002e-06, | |
| "loss": 0.0016, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 1.1594202898550725, | |
| "grad_norm": 0.05909249931573868, | |
| "learning_rate": 9.600000000000001e-06, | |
| "loss": 0.0138, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 1.2077294685990339, | |
| "grad_norm": 0.05546995997428894, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0013, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 1.2560386473429952, | |
| "grad_norm": 0.06852398812770844, | |
| "learning_rate": 1.04e-05, | |
| "loss": 0.0146, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 1.3043478260869565, | |
| "grad_norm": 0.040230970829725266, | |
| "learning_rate": 1.0800000000000002e-05, | |
| "loss": 0.0012, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 1.3526570048309179, | |
| "grad_norm": 0.8520398139953613, | |
| "learning_rate": 1.1200000000000001e-05, | |
| "loss": 0.0051, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 1.4009661835748792, | |
| "grad_norm": 0.039729055017232895, | |
| "learning_rate": 1.16e-05, | |
| "loss": 0.0221, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 1.4492753623188406, | |
| "grad_norm": 0.0512034147977829, | |
| "learning_rate": 1.2e-05, | |
| "loss": 0.0008, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 1.4492753623188406, | |
| "eval_accuracy": 0.9975550122249389, | |
| "eval_accuracy_label_Clickbait": 0.9933110367892977, | |
| "eval_accuracy_label_Factual": 1.0, | |
| "eval_f1": 0.997553260846241, | |
| "eval_loss": 0.008844349533319473, | |
| "eval_precision": 0.9975643979745553, | |
| "eval_recall": 0.9975550122249389, | |
| "eval_runtime": 6.9281, | |
| "eval_samples_per_second": 118.07, | |
| "eval_steps_per_second": 7.506, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 1.497584541062802, | |
| "grad_norm": 0.028753971680998802, | |
| "learning_rate": 1.2400000000000002e-05, | |
| "loss": 0.0068, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 1.5458937198067633, | |
| "grad_norm": 0.022516731172800064, | |
| "learning_rate": 1.2800000000000001e-05, | |
| "loss": 0.0149, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 1.5942028985507246, | |
| "grad_norm": 0.04537273570895195, | |
| "learning_rate": 1.3200000000000002e-05, | |
| "loss": 0.0009, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 1.642512077294686, | |
| "grad_norm": 0.03621845692396164, | |
| "learning_rate": 1.3600000000000002e-05, | |
| "loss": 0.0352, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 1.6908212560386473, | |
| "grad_norm": 0.03534127026796341, | |
| "learning_rate": 1.4e-05, | |
| "loss": 0.0212, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 1.7391304347826086, | |
| "grad_norm": 0.16322049498558044, | |
| "learning_rate": 1.4400000000000001e-05, | |
| "loss": 0.0064, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 1.78743961352657, | |
| "grad_norm": 0.024673329666256905, | |
| "learning_rate": 1.48e-05, | |
| "loss": 0.0083, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 1.8357487922705316, | |
| "grad_norm": 0.021423442289233208, | |
| "learning_rate": 1.5200000000000002e-05, | |
| "loss": 0.0005, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 1.8840579710144927, | |
| "grad_norm": 5.73327112197876, | |
| "learning_rate": 1.5600000000000003e-05, | |
| "loss": 0.0498, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 1.9323671497584543, | |
| "grad_norm": 0.03400229662656784, | |
| "learning_rate": 1.6000000000000003e-05, | |
| "loss": 0.0006, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 1.9323671497584543, | |
| "eval_accuracy": 0.9938875305623472, | |
| "eval_accuracy_label_Clickbait": 0.9832775919732442, | |
| "eval_accuracy_label_Factual": 1.0, | |
| "eval_f1": 0.9938764124714249, | |
| "eval_loss": 0.030972076579928398, | |
| "eval_precision": 0.9939458556524011, | |
| "eval_recall": 0.9938875305623472, | |
| "eval_runtime": 6.6447, | |
| "eval_samples_per_second": 123.106, | |
| "eval_steps_per_second": 7.826, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 1.9806763285024154, | |
| "grad_norm": 0.08127538114786148, | |
| "learning_rate": 1.64e-05, | |
| "loss": 0.0331, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 2.028985507246377, | |
| "grad_norm": 0.015063919126987457, | |
| "learning_rate": 1.6800000000000002e-05, | |
| "loss": 0.0005, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 2.077294685990338, | |
| "grad_norm": 0.017816560342907906, | |
| "learning_rate": 1.72e-05, | |
| "loss": 0.0004, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 2.1256038647342996, | |
| "grad_norm": 71.2354736328125, | |
| "learning_rate": 1.76e-05, | |
| "loss": 0.0167, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 2.1739130434782608, | |
| "grad_norm": 1.164095401763916, | |
| "learning_rate": 1.8e-05, | |
| "loss": 0.0085, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 2.2222222222222223, | |
| "grad_norm": 0.012879505753517151, | |
| "learning_rate": 1.8400000000000003e-05, | |
| "loss": 0.0071, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 2.2705314009661834, | |
| "grad_norm": 0.011784674599766731, | |
| "learning_rate": 1.88e-05, | |
| "loss": 0.0003, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 2.318840579710145, | |
| "grad_norm": 0.01087210513651371, | |
| "learning_rate": 1.9200000000000003e-05, | |
| "loss": 0.0063, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 2.367149758454106, | |
| "grad_norm": 0.007153944578021765, | |
| "learning_rate": 1.9600000000000002e-05, | |
| "loss": 0.0005, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 2.4154589371980677, | |
| "grad_norm": 0.009129374288022518, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0007, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 2.4154589371980677, | |
| "eval_accuracy": 1.0, | |
| "eval_accuracy_label_Clickbait": 1.0, | |
| "eval_accuracy_label_Factual": 1.0, | |
| "eval_f1": 1.0, | |
| "eval_loss": 0.00023450622393283993, | |
| "eval_precision": 1.0, | |
| "eval_recall": 1.0, | |
| "eval_runtime": 7.0062, | |
| "eval_samples_per_second": 116.753, | |
| "eval_steps_per_second": 7.422, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 2.463768115942029, | |
| "grad_norm": 0.01728486455976963, | |
| "learning_rate": 1.834710743801653e-05, | |
| "loss": 0.0773, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 2.5120772946859904, | |
| "grad_norm": 0.01601503975689411, | |
| "learning_rate": 1.669421487603306e-05, | |
| "loss": 0.0174, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 2.5603864734299515, | |
| "grad_norm": 7.45504903793335, | |
| "learning_rate": 1.504132231404959e-05, | |
| "loss": 0.024, | |
| "step": 530 | |
| }, | |
| { | |
| "epoch": 2.608695652173913, | |
| "grad_norm": 0.12504085898399353, | |
| "learning_rate": 1.3388429752066117e-05, | |
| "loss": 0.0664, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 2.6570048309178746, | |
| "grad_norm": 0.02562067098915577, | |
| "learning_rate": 1.1735537190082646e-05, | |
| "loss": 0.001, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 2.7053140096618358, | |
| "grad_norm": 0.01268259808421135, | |
| "learning_rate": 1.0082644628099174e-05, | |
| "loss": 0.0003, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 2.753623188405797, | |
| "grad_norm": 0.00972472783178091, | |
| "learning_rate": 8.429752066115703e-06, | |
| "loss": 0.0134, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 2.8019323671497585, | |
| "grad_norm": 0.009192025288939476, | |
| "learning_rate": 6.776859504132232e-06, | |
| "loss": 0.0002, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 2.85024154589372, | |
| "grad_norm": 0.009301126934587955, | |
| "learning_rate": 5.12396694214876e-06, | |
| "loss": 0.0005, | |
| "step": 590 | |
| }, | |
| { | |
| "epoch": 2.898550724637681, | |
| "grad_norm": 0.007780455518513918, | |
| "learning_rate": 3.4710743801652895e-06, | |
| "loss": 0.0009, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 2.898550724637681, | |
| "eval_accuracy": 0.9987775061124694, | |
| "eval_accuracy_label_Clickbait": 0.9966555183946488, | |
| "eval_accuracy_label_Factual": 1.0, | |
| "eval_f1": 0.998777070551364, | |
| "eval_loss": 0.007947328500449657, | |
| "eval_precision": 0.9987798570622531, | |
| "eval_recall": 0.9987775061124694, | |
| "eval_runtime": 7.418, | |
| "eval_samples_per_second": 110.273, | |
| "eval_steps_per_second": 7.01, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 2.9468599033816423, | |
| "grad_norm": 0.008009534329175949, | |
| "learning_rate": 1.8181818181818183e-06, | |
| "loss": 0.0002, | |
| "step": 610 | |
| }, | |
| { | |
| "epoch": 2.995169082125604, | |
| "grad_norm": 0.01077974308282137, | |
| "learning_rate": 1.6528925619834713e-07, | |
| "loss": 0.0002, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "step": 621, | |
| "total_flos": 29633646182400.0, | |
| "train_loss": 0.06199527310998867, | |
| "train_runtime": 500.1208, | |
| "train_samples_per_second": 39.674, | |
| "train_steps_per_second": 1.242 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "eval_accuracy": 0.9975550122249389, | |
| "eval_accuracy_label_Clickbait": 0.9933110367892977, | |
| "eval_accuracy_label_Factual": 1.0, | |
| "eval_f1": 0.997553260846241, | |
| "eval_loss": 0.012968887574970722, | |
| "eval_precision": 0.9975643979745553, | |
| "eval_recall": 0.9975550122249389, | |
| "eval_runtime": 7.9435, | |
| "eval_samples_per_second": 102.978, | |
| "eval_steps_per_second": 6.546, | |
| "step": 621 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 621, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 1000, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": false, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 29633646182400.0, | |
| "train_batch_size": 16, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |