Anukul-02's picture
End of training
4d954c8
raw
history blame
22.2 kB
{
"best_metric": 0.8695652173913043,
"best_model_checkpoint": "videomae-base-finetuned-ucf101-subset/checkpoint-1620",
"epoch": 5.166666666666667,
"eval_steps": 500,
"global_step": 1620,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.01,
"learning_rate": 3.0864197530864196e-06,
"loss": 2.1068,
"step": 10
},
{
"epoch": 0.01,
"learning_rate": 6.172839506172839e-06,
"loss": 2.274,
"step": 20
},
{
"epoch": 0.02,
"learning_rate": 9.259259259259259e-06,
"loss": 2.2436,
"step": 30
},
{
"epoch": 0.02,
"learning_rate": 1.2345679012345678e-05,
"loss": 2.2524,
"step": 40
},
{
"epoch": 0.03,
"learning_rate": 1.54320987654321e-05,
"loss": 2.1734,
"step": 50
},
{
"epoch": 0.04,
"learning_rate": 1.8518518518518518e-05,
"loss": 2.3132,
"step": 60
},
{
"epoch": 0.04,
"learning_rate": 2.1604938271604937e-05,
"loss": 2.1998,
"step": 70
},
{
"epoch": 0.05,
"learning_rate": 2.4691358024691357e-05,
"loss": 2.0898,
"step": 80
},
{
"epoch": 0.06,
"learning_rate": 2.777777777777778e-05,
"loss": 2.148,
"step": 90
},
{
"epoch": 0.06,
"learning_rate": 3.08641975308642e-05,
"loss": 2.1234,
"step": 100
},
{
"epoch": 0.07,
"learning_rate": 3.395061728395062e-05,
"loss": 2.2654,
"step": 110
},
{
"epoch": 0.07,
"learning_rate": 3.7037037037037037e-05,
"loss": 2.0256,
"step": 120
},
{
"epoch": 0.08,
"learning_rate": 4.012345679012346e-05,
"loss": 2.2243,
"step": 130
},
{
"epoch": 0.09,
"learning_rate": 4.3209876543209875e-05,
"loss": 2.095,
"step": 140
},
{
"epoch": 0.09,
"learning_rate": 4.62962962962963e-05,
"loss": 2.0638,
"step": 150
},
{
"epoch": 0.1,
"learning_rate": 4.938271604938271e-05,
"loss": 1.7901,
"step": 160
},
{
"epoch": 0.1,
"learning_rate": 4.972565157750343e-05,
"loss": 2.0289,
"step": 170
},
{
"epoch": 0.11,
"learning_rate": 4.938271604938271e-05,
"loss": 2.1525,
"step": 180
},
{
"epoch": 0.12,
"learning_rate": 4.9039780521262005e-05,
"loss": 1.8115,
"step": 190
},
{
"epoch": 0.12,
"learning_rate": 4.86968449931413e-05,
"loss": 2.3977,
"step": 200
},
{
"epoch": 0.13,
"learning_rate": 4.835390946502058e-05,
"loss": 1.9927,
"step": 210
},
{
"epoch": 0.14,
"learning_rate": 4.801097393689987e-05,
"loss": 1.7865,
"step": 220
},
{
"epoch": 0.14,
"learning_rate": 4.766803840877915e-05,
"loss": 2.5667,
"step": 230
},
{
"epoch": 0.15,
"learning_rate": 4.732510288065844e-05,
"loss": 2.239,
"step": 240
},
{
"epoch": 0.15,
"learning_rate": 4.6982167352537723e-05,
"loss": 1.7814,
"step": 250
},
{
"epoch": 0.16,
"learning_rate": 4.6639231824417016e-05,
"loss": 1.9021,
"step": 260
},
{
"epoch": 0.17,
"learning_rate": 4.62962962962963e-05,
"loss": 1.6087,
"step": 270
},
{
"epoch": 0.17,
"eval_accuracy": 0.3719806763285024,
"eval_loss": 1.6755967140197754,
"eval_runtime": 23.2696,
"eval_samples_per_second": 8.896,
"eval_steps_per_second": 8.896,
"step": 270
},
{
"epoch": 1.01,
"learning_rate": 4.5953360768175586e-05,
"loss": 1.7744,
"step": 280
},
{
"epoch": 1.01,
"learning_rate": 4.561042524005487e-05,
"loss": 1.6711,
"step": 290
},
{
"epoch": 1.02,
"learning_rate": 4.5267489711934157e-05,
"loss": 2.2113,
"step": 300
},
{
"epoch": 1.02,
"learning_rate": 4.492455418381344e-05,
"loss": 1.2746,
"step": 310
},
{
"epoch": 1.03,
"learning_rate": 4.4581618655692734e-05,
"loss": 1.8943,
"step": 320
},
{
"epoch": 1.04,
"learning_rate": 4.423868312757202e-05,
"loss": 2.0154,
"step": 330
},
{
"epoch": 1.04,
"learning_rate": 4.3895747599451304e-05,
"loss": 1.4239,
"step": 340
},
{
"epoch": 1.05,
"learning_rate": 4.355281207133059e-05,
"loss": 1.7982,
"step": 350
},
{
"epoch": 1.06,
"learning_rate": 4.3209876543209875e-05,
"loss": 1.3468,
"step": 360
},
{
"epoch": 1.06,
"learning_rate": 4.286694101508916e-05,
"loss": 1.9243,
"step": 370
},
{
"epoch": 1.07,
"learning_rate": 4.252400548696845e-05,
"loss": 1.7117,
"step": 380
},
{
"epoch": 1.07,
"learning_rate": 4.2181069958847744e-05,
"loss": 1.9821,
"step": 390
},
{
"epoch": 1.08,
"learning_rate": 4.183813443072703e-05,
"loss": 1.3613,
"step": 400
},
{
"epoch": 1.09,
"learning_rate": 4.1495198902606315e-05,
"loss": 1.3157,
"step": 410
},
{
"epoch": 1.09,
"learning_rate": 4.11522633744856e-05,
"loss": 2.9511,
"step": 420
},
{
"epoch": 1.1,
"learning_rate": 4.0809327846364885e-05,
"loss": 2.1262,
"step": 430
},
{
"epoch": 1.1,
"learning_rate": 4.046639231824417e-05,
"loss": 1.7427,
"step": 440
},
{
"epoch": 1.11,
"learning_rate": 4.012345679012346e-05,
"loss": 1.4527,
"step": 450
},
{
"epoch": 1.12,
"learning_rate": 3.978052126200275e-05,
"loss": 1.4661,
"step": 460
},
{
"epoch": 1.12,
"learning_rate": 3.943758573388203e-05,
"loss": 1.7663,
"step": 470
},
{
"epoch": 1.13,
"learning_rate": 3.909465020576132e-05,
"loss": 1.5297,
"step": 480
},
{
"epoch": 1.14,
"learning_rate": 3.8751714677640603e-05,
"loss": 1.3894,
"step": 490
},
{
"epoch": 1.14,
"learning_rate": 3.840877914951989e-05,
"loss": 1.3816,
"step": 500
},
{
"epoch": 1.15,
"learning_rate": 3.806584362139918e-05,
"loss": 1.2794,
"step": 510
},
{
"epoch": 1.15,
"learning_rate": 3.7722908093278466e-05,
"loss": 1.616,
"step": 520
},
{
"epoch": 1.16,
"learning_rate": 3.737997256515775e-05,
"loss": 1.9916,
"step": 530
},
{
"epoch": 1.17,
"learning_rate": 3.7037037037037037e-05,
"loss": 1.1954,
"step": 540
},
{
"epoch": 1.17,
"eval_accuracy": 0.5990338164251208,
"eval_loss": 1.100610375404358,
"eval_runtime": 23.1997,
"eval_samples_per_second": 8.923,
"eval_steps_per_second": 8.923,
"step": 540
},
{
"epoch": 2.01,
"learning_rate": 3.669410150891632e-05,
"loss": 1.2202,
"step": 550
},
{
"epoch": 2.01,
"learning_rate": 3.635116598079561e-05,
"loss": 0.502,
"step": 560
},
{
"epoch": 2.02,
"learning_rate": 3.60082304526749e-05,
"loss": 1.3205,
"step": 570
},
{
"epoch": 2.02,
"learning_rate": 3.566529492455419e-05,
"loss": 0.7886,
"step": 580
},
{
"epoch": 2.03,
"learning_rate": 3.5322359396433476e-05,
"loss": 1.1766,
"step": 590
},
{
"epoch": 2.04,
"learning_rate": 3.497942386831276e-05,
"loss": 0.7688,
"step": 600
},
{
"epoch": 2.04,
"learning_rate": 3.463648834019205e-05,
"loss": 1.3003,
"step": 610
},
{
"epoch": 2.05,
"learning_rate": 3.429355281207133e-05,
"loss": 0.7499,
"step": 620
},
{
"epoch": 2.06,
"learning_rate": 3.395061728395062e-05,
"loss": 0.4675,
"step": 630
},
{
"epoch": 2.06,
"learning_rate": 3.360768175582991e-05,
"loss": 1.2491,
"step": 640
},
{
"epoch": 2.07,
"learning_rate": 3.3264746227709195e-05,
"loss": 1.1091,
"step": 650
},
{
"epoch": 2.07,
"learning_rate": 3.292181069958848e-05,
"loss": 0.4996,
"step": 660
},
{
"epoch": 2.08,
"learning_rate": 3.2578875171467765e-05,
"loss": 1.112,
"step": 670
},
{
"epoch": 2.09,
"learning_rate": 3.223593964334705e-05,
"loss": 0.6491,
"step": 680
},
{
"epoch": 2.09,
"learning_rate": 3.1893004115226336e-05,
"loss": 0.5949,
"step": 690
},
{
"epoch": 2.1,
"learning_rate": 3.155006858710563e-05,
"loss": 1.13,
"step": 700
},
{
"epoch": 2.1,
"learning_rate": 3.120713305898491e-05,
"loss": 0.2593,
"step": 710
},
{
"epoch": 2.11,
"learning_rate": 3.08641975308642e-05,
"loss": 0.157,
"step": 720
},
{
"epoch": 2.12,
"learning_rate": 3.0521262002743484e-05,
"loss": 0.6718,
"step": 730
},
{
"epoch": 2.12,
"learning_rate": 3.017832647462277e-05,
"loss": 1.2776,
"step": 740
},
{
"epoch": 2.13,
"learning_rate": 2.9835390946502057e-05,
"loss": 0.8115,
"step": 750
},
{
"epoch": 2.14,
"learning_rate": 2.949245541838135e-05,
"loss": 0.5362,
"step": 760
},
{
"epoch": 2.14,
"learning_rate": 2.9149519890260635e-05,
"loss": 0.5839,
"step": 770
},
{
"epoch": 2.15,
"learning_rate": 2.880658436213992e-05,
"loss": 1.6678,
"step": 780
},
{
"epoch": 2.15,
"learning_rate": 2.846364883401921e-05,
"loss": 0.2518,
"step": 790
},
{
"epoch": 2.16,
"learning_rate": 2.8120713305898494e-05,
"loss": 0.805,
"step": 800
},
{
"epoch": 2.17,
"learning_rate": 2.777777777777778e-05,
"loss": 0.9847,
"step": 810
},
{
"epoch": 2.17,
"eval_accuracy": 0.6714975845410628,
"eval_loss": 0.9659531712532043,
"eval_runtime": 23.2172,
"eval_samples_per_second": 8.916,
"eval_steps_per_second": 8.916,
"step": 810
},
{
"epoch": 3.01,
"learning_rate": 2.7434842249657068e-05,
"loss": 0.5909,
"step": 820
},
{
"epoch": 3.01,
"learning_rate": 2.7091906721536353e-05,
"loss": 0.4683,
"step": 830
},
{
"epoch": 3.02,
"learning_rate": 2.6748971193415638e-05,
"loss": 0.767,
"step": 840
},
{
"epoch": 3.02,
"learning_rate": 2.6406035665294927e-05,
"loss": 0.8964,
"step": 850
},
{
"epoch": 3.03,
"learning_rate": 2.6063100137174212e-05,
"loss": 0.1575,
"step": 860
},
{
"epoch": 3.04,
"learning_rate": 2.5720164609053497e-05,
"loss": 0.4401,
"step": 870
},
{
"epoch": 3.04,
"learning_rate": 2.5377229080932786e-05,
"loss": 0.8977,
"step": 880
},
{
"epoch": 3.05,
"learning_rate": 2.503429355281207e-05,
"loss": 0.9708,
"step": 890
},
{
"epoch": 3.06,
"learning_rate": 2.4691358024691357e-05,
"loss": 0.2658,
"step": 900
},
{
"epoch": 3.06,
"learning_rate": 2.434842249657065e-05,
"loss": 0.052,
"step": 910
},
{
"epoch": 3.07,
"learning_rate": 2.4005486968449934e-05,
"loss": 0.0205,
"step": 920
},
{
"epoch": 3.07,
"learning_rate": 2.366255144032922e-05,
"loss": 0.7582,
"step": 930
},
{
"epoch": 3.08,
"learning_rate": 2.3319615912208508e-05,
"loss": 0.0451,
"step": 940
},
{
"epoch": 3.09,
"learning_rate": 2.2976680384087793e-05,
"loss": 0.8926,
"step": 950
},
{
"epoch": 3.09,
"learning_rate": 2.2633744855967078e-05,
"loss": 0.6986,
"step": 960
},
{
"epoch": 3.1,
"learning_rate": 2.2290809327846367e-05,
"loss": 0.5859,
"step": 970
},
{
"epoch": 3.1,
"learning_rate": 2.1947873799725652e-05,
"loss": 0.3363,
"step": 980
},
{
"epoch": 3.11,
"learning_rate": 2.1604938271604937e-05,
"loss": 0.2691,
"step": 990
},
{
"epoch": 3.12,
"learning_rate": 2.1262002743484226e-05,
"loss": 0.495,
"step": 1000
},
{
"epoch": 3.12,
"learning_rate": 2.0919067215363515e-05,
"loss": 0.6676,
"step": 1010
},
{
"epoch": 3.13,
"learning_rate": 2.05761316872428e-05,
"loss": 1.4811,
"step": 1020
},
{
"epoch": 3.14,
"learning_rate": 2.0233196159122085e-05,
"loss": 0.564,
"step": 1030
},
{
"epoch": 3.14,
"learning_rate": 1.9890260631001374e-05,
"loss": 0.6851,
"step": 1040
},
{
"epoch": 3.15,
"learning_rate": 1.954732510288066e-05,
"loss": 0.883,
"step": 1050
},
{
"epoch": 3.15,
"learning_rate": 1.9204389574759944e-05,
"loss": 0.7177,
"step": 1060
},
{
"epoch": 3.16,
"learning_rate": 1.8861454046639233e-05,
"loss": 0.0267,
"step": 1070
},
{
"epoch": 3.17,
"learning_rate": 1.8518518518518518e-05,
"loss": 0.0737,
"step": 1080
},
{
"epoch": 3.17,
"eval_accuracy": 0.8309178743961353,
"eval_loss": 0.6721560955047607,
"eval_runtime": 22.244,
"eval_samples_per_second": 9.306,
"eval_steps_per_second": 9.306,
"step": 1080
},
{
"epoch": 4.01,
"learning_rate": 1.8175582990397804e-05,
"loss": 0.0189,
"step": 1090
},
{
"epoch": 4.01,
"learning_rate": 1.7832647462277096e-05,
"loss": 0.9507,
"step": 1100
},
{
"epoch": 4.02,
"learning_rate": 1.748971193415638e-05,
"loss": 0.0717,
"step": 1110
},
{
"epoch": 4.02,
"learning_rate": 1.7146776406035666e-05,
"loss": 0.2791,
"step": 1120
},
{
"epoch": 4.03,
"learning_rate": 1.6803840877914955e-05,
"loss": 0.2997,
"step": 1130
},
{
"epoch": 4.04,
"learning_rate": 1.646090534979424e-05,
"loss": 0.1088,
"step": 1140
},
{
"epoch": 4.04,
"learning_rate": 1.6117969821673525e-05,
"loss": 0.8001,
"step": 1150
},
{
"epoch": 4.05,
"learning_rate": 1.5775034293552814e-05,
"loss": 0.2641,
"step": 1160
},
{
"epoch": 4.06,
"learning_rate": 1.54320987654321e-05,
"loss": 0.0067,
"step": 1170
},
{
"epoch": 4.06,
"learning_rate": 1.5089163237311384e-05,
"loss": 0.0054,
"step": 1180
},
{
"epoch": 4.07,
"learning_rate": 1.4746227709190675e-05,
"loss": 0.7073,
"step": 1190
},
{
"epoch": 4.07,
"learning_rate": 1.440329218106996e-05,
"loss": 0.2777,
"step": 1200
},
{
"epoch": 4.08,
"learning_rate": 1.4060356652949247e-05,
"loss": 0.014,
"step": 1210
},
{
"epoch": 4.09,
"learning_rate": 1.3717421124828534e-05,
"loss": 0.5616,
"step": 1220
},
{
"epoch": 4.09,
"learning_rate": 1.3374485596707819e-05,
"loss": 0.5849,
"step": 1230
},
{
"epoch": 4.1,
"learning_rate": 1.3031550068587106e-05,
"loss": 0.4199,
"step": 1240
},
{
"epoch": 4.1,
"learning_rate": 1.2688614540466393e-05,
"loss": 1.4511,
"step": 1250
},
{
"epoch": 4.11,
"learning_rate": 1.2345679012345678e-05,
"loss": 0.0497,
"step": 1260
},
{
"epoch": 4.12,
"learning_rate": 1.2002743484224967e-05,
"loss": 0.0345,
"step": 1270
},
{
"epoch": 4.12,
"learning_rate": 1.1659807956104254e-05,
"loss": 0.025,
"step": 1280
},
{
"epoch": 4.13,
"learning_rate": 1.1316872427983539e-05,
"loss": 0.1749,
"step": 1290
},
{
"epoch": 4.14,
"learning_rate": 1.0973936899862826e-05,
"loss": 0.737,
"step": 1300
},
{
"epoch": 4.14,
"learning_rate": 1.0631001371742113e-05,
"loss": 0.0137,
"step": 1310
},
{
"epoch": 4.15,
"learning_rate": 1.02880658436214e-05,
"loss": 0.2798,
"step": 1320
},
{
"epoch": 4.15,
"learning_rate": 9.945130315500687e-06,
"loss": 0.6646,
"step": 1330
},
{
"epoch": 4.16,
"learning_rate": 9.602194787379972e-06,
"loss": 0.4453,
"step": 1340
},
{
"epoch": 4.17,
"learning_rate": 9.259259259259259e-06,
"loss": 0.0054,
"step": 1350
},
{
"epoch": 4.17,
"eval_accuracy": 0.8502415458937198,
"eval_loss": 0.528615415096283,
"eval_runtime": 23.4632,
"eval_samples_per_second": 8.822,
"eval_steps_per_second": 8.822,
"step": 1350
},
{
"epoch": 5.01,
"learning_rate": 8.916323731138548e-06,
"loss": 0.4661,
"step": 1360
},
{
"epoch": 5.01,
"learning_rate": 8.573388203017833e-06,
"loss": 0.004,
"step": 1370
},
{
"epoch": 5.02,
"learning_rate": 8.23045267489712e-06,
"loss": 0.0031,
"step": 1380
},
{
"epoch": 5.02,
"learning_rate": 7.887517146776407e-06,
"loss": 0.5041,
"step": 1390
},
{
"epoch": 5.03,
"learning_rate": 7.544581618655692e-06,
"loss": 0.1095,
"step": 1400
},
{
"epoch": 5.04,
"learning_rate": 7.20164609053498e-06,
"loss": 0.0438,
"step": 1410
},
{
"epoch": 5.04,
"learning_rate": 6.858710562414267e-06,
"loss": 0.4582,
"step": 1420
},
{
"epoch": 5.05,
"learning_rate": 6.515775034293553e-06,
"loss": 0.0087,
"step": 1430
},
{
"epoch": 5.06,
"learning_rate": 6.172839506172839e-06,
"loss": 0.003,
"step": 1440
},
{
"epoch": 5.06,
"learning_rate": 5.829903978052127e-06,
"loss": 0.0177,
"step": 1450
},
{
"epoch": 5.07,
"learning_rate": 5.486968449931413e-06,
"loss": 0.5798,
"step": 1460
},
{
"epoch": 5.07,
"learning_rate": 5.1440329218107e-06,
"loss": 0.0867,
"step": 1470
},
{
"epoch": 5.08,
"learning_rate": 4.801097393689986e-06,
"loss": 0.66,
"step": 1480
},
{
"epoch": 5.09,
"learning_rate": 4.458161865569274e-06,
"loss": 0.003,
"step": 1490
},
{
"epoch": 5.09,
"learning_rate": 4.11522633744856e-06,
"loss": 0.4618,
"step": 1500
},
{
"epoch": 5.1,
"learning_rate": 3.772290809327846e-06,
"loss": 0.2452,
"step": 1510
},
{
"epoch": 5.1,
"learning_rate": 3.4293552812071335e-06,
"loss": 0.0234,
"step": 1520
},
{
"epoch": 5.11,
"learning_rate": 3.0864197530864196e-06,
"loss": 0.007,
"step": 1530
},
{
"epoch": 5.12,
"learning_rate": 2.7434842249657065e-06,
"loss": 0.0066,
"step": 1540
},
{
"epoch": 5.12,
"learning_rate": 2.400548696844993e-06,
"loss": 0.6313,
"step": 1550
},
{
"epoch": 5.13,
"learning_rate": 2.05761316872428e-06,
"loss": 0.0198,
"step": 1560
},
{
"epoch": 5.14,
"learning_rate": 1.7146776406035667e-06,
"loss": 0.5825,
"step": 1570
},
{
"epoch": 5.14,
"learning_rate": 1.3717421124828533e-06,
"loss": 0.6457,
"step": 1580
},
{
"epoch": 5.15,
"learning_rate": 1.02880658436214e-06,
"loss": 0.0335,
"step": 1590
},
{
"epoch": 5.15,
"learning_rate": 6.858710562414266e-07,
"loss": 0.0737,
"step": 1600
},
{
"epoch": 5.16,
"learning_rate": 3.429355281207133e-07,
"loss": 0.7907,
"step": 1610
},
{
"epoch": 5.17,
"learning_rate": 0.0,
"loss": 0.1586,
"step": 1620
},
{
"epoch": 5.17,
"eval_accuracy": 0.8695652173913043,
"eval_loss": 0.5369048118591309,
"eval_runtime": 23.2611,
"eval_samples_per_second": 8.899,
"eval_steps_per_second": 8.899,
"step": 1620
},
{
"epoch": 5.17,
"step": 1620,
"total_flos": 2.0187506515909018e+18,
"train_loss": 0.9597556127292782,
"train_runtime": 491.4225,
"train_samples_per_second": 3.297,
"train_steps_per_second": 3.297
},
{
"epoch": 5.17,
"eval_accuracy": 0.8601036269430051,
"eval_loss": 0.552123486995697,
"eval_runtime": 21.5343,
"eval_samples_per_second": 8.962,
"eval_steps_per_second": 8.962,
"step": 1620
},
{
"epoch": 5.17,
"eval_accuracy": 0.8601036269430051,
"eval_loss": 0.552123486995697,
"eval_runtime": 21.6981,
"eval_samples_per_second": 8.895,
"eval_steps_per_second": 8.895,
"step": 1620
}
],
"logging_steps": 10,
"max_steps": 1620,
"num_train_epochs": 9223372036854775807,
"save_steps": 500,
"total_flos": 2.0187506515909018e+18,
"trial_name": null,
"trial_params": null
}