PersianSentiment / checkpoint-3576 /trainer_state.json
explorewithai's picture
Upload folder using huggingface_hub
b730cdb verified
{
"best_metric": 0.8825053995680345,
"best_model_checkpoint": "./sentiment_model/checkpoint-3129",
"epoch": 8.0,
"eval_steps": 500,
"global_step": 3576,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.11185682326621924,
"grad_norm": 18.68804168701172,
"learning_rate": 5.3691275167785235e-06,
"loss": 4.4686,
"step": 50
},
{
"epoch": 0.22371364653243847,
"grad_norm": 33.30501937866211,
"learning_rate": 1.0738255033557047e-05,
"loss": 4.0139,
"step": 100
},
{
"epoch": 0.33557046979865773,
"grad_norm": 48.02063751220703,
"learning_rate": 1.633109619686801e-05,
"loss": 2.9933,
"step": 150
},
{
"epoch": 0.44742729306487694,
"grad_norm": 35.39257049560547,
"learning_rate": 2.192393736017897e-05,
"loss": 2.5147,
"step": 200
},
{
"epoch": 0.5592841163310962,
"grad_norm": 39.318363189697266,
"learning_rate": 2.7404921700223713e-05,
"loss": 2.4221,
"step": 250
},
{
"epoch": 0.6711409395973155,
"grad_norm": 50.962833404541016,
"learning_rate": 3.2997762863534674e-05,
"loss": 2.3124,
"step": 300
},
{
"epoch": 0.7829977628635347,
"grad_norm": 37.32981491088867,
"learning_rate": 3.859060402684564e-05,
"loss": 2.4092,
"step": 350
},
{
"epoch": 0.8948545861297539,
"grad_norm": 32.42803955078125,
"learning_rate": 4.4183445190156604e-05,
"loss": 2.4183,
"step": 400
},
{
"epoch": 1.0,
"eval_accuracy": 0.7844492440604751,
"eval_loss": 0.5040740370750427,
"eval_runtime": 19.6714,
"eval_samples_per_second": 117.683,
"eval_steps_per_second": 3.711,
"step": 447
},
{
"epoch": 1.0067114093959733,
"grad_norm": 20.918420791625977,
"learning_rate": 4.977628635346757e-05,
"loss": 2.3532,
"step": 450
},
{
"epoch": 1.1185682326621924,
"grad_norm": 28.06084442138672,
"learning_rate": 4.94034302759135e-05,
"loss": 2.1244,
"step": 500
},
{
"epoch": 1.2304250559284116,
"grad_norm": 40.768096923828125,
"learning_rate": 4.8782003479990054e-05,
"loss": 2.3004,
"step": 550
},
{
"epoch": 1.342281879194631,
"grad_norm": 19.545913696289062,
"learning_rate": 4.816057668406662e-05,
"loss": 2.2399,
"step": 600
},
{
"epoch": 1.45413870246085,
"grad_norm": 69.29216766357422,
"learning_rate": 4.753914988814318e-05,
"loss": 2.1458,
"step": 650
},
{
"epoch": 1.5659955257270695,
"grad_norm": 29.00174903869629,
"learning_rate": 4.691772309221974e-05,
"loss": 2.0778,
"step": 700
},
{
"epoch": 1.6778523489932886,
"grad_norm": 28.77162742614746,
"learning_rate": 4.62962962962963e-05,
"loss": 2.1317,
"step": 750
},
{
"epoch": 1.7897091722595078,
"grad_norm": 36.04485321044922,
"learning_rate": 4.5674869500372856e-05,
"loss": 2.1345,
"step": 800
},
{
"epoch": 1.901565995525727,
"grad_norm": 51.31829071044922,
"learning_rate": 4.505344270444942e-05,
"loss": 2.2048,
"step": 850
},
{
"epoch": 2.0,
"eval_accuracy": 0.8138228941684665,
"eval_loss": 0.43246370553970337,
"eval_runtime": 19.9941,
"eval_samples_per_second": 115.784,
"eval_steps_per_second": 3.651,
"step": 894
},
{
"epoch": 2.0134228187919465,
"grad_norm": 19.70203971862793,
"learning_rate": 4.443201590852597e-05,
"loss": 2.0482,
"step": 900
},
{
"epoch": 2.1252796420581657,
"grad_norm": 44.575721740722656,
"learning_rate": 4.381058911260254e-05,
"loss": 1.8304,
"step": 950
},
{
"epoch": 2.237136465324385,
"grad_norm": 48.66209411621094,
"learning_rate": 4.31891623166791e-05,
"loss": 1.7784,
"step": 1000
},
{
"epoch": 2.348993288590604,
"grad_norm": 19.984678268432617,
"learning_rate": 4.256773552075566e-05,
"loss": 1.8457,
"step": 1050
},
{
"epoch": 2.460850111856823,
"grad_norm": 64.9332275390625,
"learning_rate": 4.194630872483222e-05,
"loss": 1.7747,
"step": 1100
},
{
"epoch": 2.5727069351230423,
"grad_norm": 20.591524124145508,
"learning_rate": 4.1324881928908774e-05,
"loss": 1.9082,
"step": 1150
},
{
"epoch": 2.684563758389262,
"grad_norm": 24.512027740478516,
"learning_rate": 4.0703455132985336e-05,
"loss": 1.8399,
"step": 1200
},
{
"epoch": 2.796420581655481,
"grad_norm": 18.300504684448242,
"learning_rate": 4.00820283370619e-05,
"loss": 1.7937,
"step": 1250
},
{
"epoch": 2.9082774049217,
"grad_norm": 40.87727737426758,
"learning_rate": 3.946060154113845e-05,
"loss": 1.8174,
"step": 1300
},
{
"epoch": 3.0,
"eval_accuracy": 0.8535637149028078,
"eval_loss": 0.37263187766075134,
"eval_runtime": 19.6701,
"eval_samples_per_second": 117.691,
"eval_steps_per_second": 3.711,
"step": 1341
},
{
"epoch": 3.0201342281879193,
"grad_norm": 29.54434585571289,
"learning_rate": 3.883917474521502e-05,
"loss": 1.8262,
"step": 1350
},
{
"epoch": 3.131991051454139,
"grad_norm": 19.222951889038086,
"learning_rate": 3.8217747949291576e-05,
"loss": 1.5486,
"step": 1400
},
{
"epoch": 3.243847874720358,
"grad_norm": 36.32904052734375,
"learning_rate": 3.759632115336814e-05,
"loss": 1.5273,
"step": 1450
},
{
"epoch": 3.3557046979865772,
"grad_norm": 27.05373764038086,
"learning_rate": 3.697489435744469e-05,
"loss": 1.5796,
"step": 1500
},
{
"epoch": 3.4675615212527964,
"grad_norm": 50.08060836791992,
"learning_rate": 3.6353467561521254e-05,
"loss": 1.5104,
"step": 1550
},
{
"epoch": 3.5794183445190155,
"grad_norm": 29.98794174194336,
"learning_rate": 3.5732040765597816e-05,
"loss": 1.6298,
"step": 1600
},
{
"epoch": 3.6912751677852347,
"grad_norm": 33.08045196533203,
"learning_rate": 3.511061396967437e-05,
"loss": 1.4769,
"step": 1650
},
{
"epoch": 3.8031319910514543,
"grad_norm": 30.475862503051758,
"learning_rate": 3.448918717375094e-05,
"loss": 1.547,
"step": 1700
},
{
"epoch": 3.9149888143176734,
"grad_norm": 29.52466583251953,
"learning_rate": 3.3867760377827495e-05,
"loss": 1.4918,
"step": 1750
},
{
"epoch": 4.0,
"eval_accuracy": 0.8501079913606912,
"eval_loss": 0.38409608602523804,
"eval_runtime": 19.6128,
"eval_samples_per_second": 118.035,
"eval_steps_per_second": 3.722,
"step": 1788
},
{
"epoch": 4.026845637583893,
"grad_norm": 27.849456787109375,
"learning_rate": 3.3246333581904056e-05,
"loss": 1.4653,
"step": 1800
},
{
"epoch": 4.138702460850112,
"grad_norm": 39.9719123840332,
"learning_rate": 3.262490678598061e-05,
"loss": 1.2247,
"step": 1850
},
{
"epoch": 4.250559284116331,
"grad_norm": 43.72661590576172,
"learning_rate": 3.200347999005717e-05,
"loss": 1.2065,
"step": 1900
},
{
"epoch": 4.3624161073825505,
"grad_norm": 29.79233169555664,
"learning_rate": 3.1382053194133735e-05,
"loss": 1.2585,
"step": 1950
},
{
"epoch": 4.47427293064877,
"grad_norm": 30.871849060058594,
"learning_rate": 3.076062639821029e-05,
"loss": 1.2602,
"step": 2000
},
{
"epoch": 4.586129753914989,
"grad_norm": 30.835567474365234,
"learning_rate": 3.0139199602286848e-05,
"loss": 1.2781,
"step": 2050
},
{
"epoch": 4.697986577181208,
"grad_norm": 37.51513671875,
"learning_rate": 2.9517772806363413e-05,
"loss": 1.228,
"step": 2100
},
{
"epoch": 4.809843400447427,
"grad_norm": 35.483665466308594,
"learning_rate": 2.889634601043997e-05,
"loss": 1.2876,
"step": 2150
},
{
"epoch": 4.921700223713646,
"grad_norm": 21.087987899780273,
"learning_rate": 2.8274919214516533e-05,
"loss": 1.2457,
"step": 2200
},
{
"epoch": 5.0,
"eval_accuracy": 0.8660907127429806,
"eval_loss": 0.3854082524776459,
"eval_runtime": 19.9697,
"eval_samples_per_second": 115.926,
"eval_steps_per_second": 3.656,
"step": 2235
},
{
"epoch": 5.033557046979865,
"grad_norm": 32.84519577026367,
"learning_rate": 2.765349241859309e-05,
"loss": 1.1247,
"step": 2250
},
{
"epoch": 5.145413870246085,
"grad_norm": 42.308837890625,
"learning_rate": 2.703206562266965e-05,
"loss": 0.9676,
"step": 2300
},
{
"epoch": 5.257270693512305,
"grad_norm": 32.68018341064453,
"learning_rate": 2.6410638826746208e-05,
"loss": 0.9049,
"step": 2350
},
{
"epoch": 5.369127516778524,
"grad_norm": 40.90736770629883,
"learning_rate": 2.5789212030822766e-05,
"loss": 0.8973,
"step": 2400
},
{
"epoch": 5.480984340044743,
"grad_norm": 42.41788864135742,
"learning_rate": 2.516778523489933e-05,
"loss": 0.972,
"step": 2450
},
{
"epoch": 5.592841163310962,
"grad_norm": 61.866180419921875,
"learning_rate": 2.454635843897589e-05,
"loss": 0.9687,
"step": 2500
},
{
"epoch": 5.704697986577181,
"grad_norm": 54.289512634277344,
"learning_rate": 2.392493164305245e-05,
"loss": 0.986,
"step": 2550
},
{
"epoch": 5.8165548098434,
"grad_norm": 32.10563659667969,
"learning_rate": 2.330350484712901e-05,
"loss": 0.981,
"step": 2600
},
{
"epoch": 5.9284116331096195,
"grad_norm": 34.02315139770508,
"learning_rate": 2.2682078051205568e-05,
"loss": 0.9679,
"step": 2650
},
{
"epoch": 6.0,
"eval_accuracy": 0.8678185745140389,
"eval_loss": 0.3706440329551697,
"eval_runtime": 19.6479,
"eval_samples_per_second": 117.824,
"eval_steps_per_second": 3.715,
"step": 2682
},
{
"epoch": 6.040268456375839,
"grad_norm": 36.86701965332031,
"learning_rate": 2.2060651255282127e-05,
"loss": 0.9318,
"step": 2700
},
{
"epoch": 6.152125279642058,
"grad_norm": 42.711204528808594,
"learning_rate": 2.145165299527716e-05,
"loss": 0.703,
"step": 2750
},
{
"epoch": 6.263982102908278,
"grad_norm": 38.12577819824219,
"learning_rate": 2.0830226199353717e-05,
"loss": 0.764,
"step": 2800
},
{
"epoch": 6.375838926174497,
"grad_norm": 32.06464767456055,
"learning_rate": 2.0208799403430276e-05,
"loss": 0.739,
"step": 2850
},
{
"epoch": 6.487695749440716,
"grad_norm": 36.63414001464844,
"learning_rate": 1.9587372607506838e-05,
"loss": 0.7173,
"step": 2900
},
{
"epoch": 6.599552572706935,
"grad_norm": 61.28206253051758,
"learning_rate": 1.8965945811583396e-05,
"loss": 0.7164,
"step": 2950
},
{
"epoch": 6.7114093959731544,
"grad_norm": 27.83030891418457,
"learning_rate": 1.8344519015659954e-05,
"loss": 0.7629,
"step": 3000
},
{
"epoch": 6.823266219239374,
"grad_norm": 46.308189392089844,
"learning_rate": 1.7723092219736516e-05,
"loss": 0.7298,
"step": 3050
},
{
"epoch": 6.935123042505593,
"grad_norm": 63.58484649658203,
"learning_rate": 1.7101665423813078e-05,
"loss": 0.6876,
"step": 3100
},
{
"epoch": 7.0,
"eval_accuracy": 0.8825053995680345,
"eval_loss": 0.3750421702861786,
"eval_runtime": 19.8931,
"eval_samples_per_second": 116.372,
"eval_steps_per_second": 3.67,
"step": 3129
},
{
"epoch": 7.046979865771812,
"grad_norm": 44.923614501953125,
"learning_rate": 1.6480238627889636e-05,
"loss": 0.6694,
"step": 3150
},
{
"epoch": 7.158836689038031,
"grad_norm": 55.236083984375,
"learning_rate": 1.5858811831966194e-05,
"loss": 0.5705,
"step": 3200
},
{
"epoch": 7.27069351230425,
"grad_norm": 57.69952392578125,
"learning_rate": 1.5237385036042756e-05,
"loss": 0.5369,
"step": 3250
},
{
"epoch": 7.382550335570469,
"grad_norm": 48.969078063964844,
"learning_rate": 1.4615958240119314e-05,
"loss": 0.5266,
"step": 3300
},
{
"epoch": 7.494407158836689,
"grad_norm": 30.38613510131836,
"learning_rate": 1.3994531444195874e-05,
"loss": 0.6426,
"step": 3350
},
{
"epoch": 7.6062639821029085,
"grad_norm": 32.06161880493164,
"learning_rate": 1.3385533184190904e-05,
"loss": 0.5366,
"step": 3400
},
{
"epoch": 7.718120805369128,
"grad_norm": 41.513572692871094,
"learning_rate": 1.2764106388267464e-05,
"loss": 0.536,
"step": 3450
},
{
"epoch": 7.829977628635347,
"grad_norm": 64.65313720703125,
"learning_rate": 1.2142679592344022e-05,
"loss": 0.5542,
"step": 3500
},
{
"epoch": 7.941834451901566,
"grad_norm": 59.38785934448242,
"learning_rate": 1.1521252796420582e-05,
"loss": 0.5386,
"step": 3550
},
{
"epoch": 8.0,
"eval_accuracy": 0.8825053995680345,
"eval_loss": 0.42663174867630005,
"eval_runtime": 19.6465,
"eval_samples_per_second": 117.833,
"eval_steps_per_second": 3.716,
"step": 3576
}
],
"logging_steps": 50,
"max_steps": 4470,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 500,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 3,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 1
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 8.013346575910502e+16,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}