Geodeberta2 / checkpoint-126 /trainer_state.json
luukschmitz's picture
Upload folder using huggingface_hub
5ce3df9 verified
{
"best_metric": 0.9531590342521667,
"best_model_checkpoint": "Geodeberta2/checkpoint-126",
"epoch": 2.0,
"eval_steps": 500,
"global_step": 126,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.031746031746031744,
"grad_norm": 3.382016897201538,
"learning_rate": 1.5873015873015873e-06,
"loss": 1.2687,
"step": 2
},
{
"epoch": 0.06349206349206349,
"grad_norm": 3.2359426021575928,
"learning_rate": 3.1746031746031746e-06,
"loss": 1.2944,
"step": 4
},
{
"epoch": 0.09523809523809523,
"grad_norm": 2.8667497634887695,
"learning_rate": 4.7619047619047615e-06,
"loss": 1.2725,
"step": 6
},
{
"epoch": 0.12698412698412698,
"grad_norm": 1.7016966342926025,
"learning_rate": 6.349206349206349e-06,
"loss": 1.3089,
"step": 8
},
{
"epoch": 0.15873015873015872,
"grad_norm": 2.4599409103393555,
"learning_rate": 7.936507936507936e-06,
"loss": 1.2902,
"step": 10
},
{
"epoch": 0.19047619047619047,
"grad_norm": 2.53178334236145,
"learning_rate": 9.523809523809523e-06,
"loss": 1.2766,
"step": 12
},
{
"epoch": 0.2222222222222222,
"grad_norm": 3.3443331718444824,
"learning_rate": 1.1111111111111112e-05,
"loss": 1.2274,
"step": 14
},
{
"epoch": 0.25396825396825395,
"grad_norm": 2.784144878387451,
"learning_rate": 1.2698412698412699e-05,
"loss": 1.2141,
"step": 16
},
{
"epoch": 0.2857142857142857,
"grad_norm": 3.0724332332611084,
"learning_rate": 1.4285714285714285e-05,
"loss": 1.2465,
"step": 18
},
{
"epoch": 0.31746031746031744,
"grad_norm": 1.5863182544708252,
"learning_rate": 1.5873015873015872e-05,
"loss": 1.2876,
"step": 20
},
{
"epoch": 0.3492063492063492,
"grad_norm": 1.5585477352142334,
"learning_rate": 1.746031746031746e-05,
"loss": 1.2536,
"step": 22
},
{
"epoch": 0.38095238095238093,
"grad_norm": 2.9539477825164795,
"learning_rate": 1.9047619047619046e-05,
"loss": 1.1878,
"step": 24
},
{
"epoch": 0.4126984126984127,
"grad_norm": 1.7762205600738525,
"learning_rate": 2.0634920634920636e-05,
"loss": 1.2631,
"step": 26
},
{
"epoch": 0.4444444444444444,
"grad_norm": 3.783928155899048,
"learning_rate": 2.2222222222222223e-05,
"loss": 1.0052,
"step": 28
},
{
"epoch": 0.47619047619047616,
"grad_norm": 1.4549317359924316,
"learning_rate": 2.380952380952381e-05,
"loss": 1.1053,
"step": 30
},
{
"epoch": 0.5079365079365079,
"grad_norm": 2.450103521347046,
"learning_rate": 2.5396825396825397e-05,
"loss": 1.0221,
"step": 32
},
{
"epoch": 0.5396825396825397,
"grad_norm": 3.955317258834839,
"learning_rate": 2.6984126984126984e-05,
"loss": 1.1519,
"step": 34
},
{
"epoch": 0.5714285714285714,
"grad_norm": 3.0787367820739746,
"learning_rate": 2.857142857142857e-05,
"loss": 0.8755,
"step": 36
},
{
"epoch": 0.6031746031746031,
"grad_norm": 2.1669228076934814,
"learning_rate": 3.0158730158730158e-05,
"loss": 1.0728,
"step": 38
},
{
"epoch": 0.6349206349206349,
"grad_norm": 2.1683146953582764,
"learning_rate": 3.1746031746031745e-05,
"loss": 1.0817,
"step": 40
},
{
"epoch": 0.6666666666666666,
"grad_norm": 2.4933903217315674,
"learning_rate": 3.3333333333333335e-05,
"loss": 1.0527,
"step": 42
},
{
"epoch": 0.6984126984126984,
"grad_norm": 2.5503716468811035,
"learning_rate": 3.492063492063492e-05,
"loss": 1.1201,
"step": 44
},
{
"epoch": 0.7301587301587301,
"grad_norm": 2.326674461364746,
"learning_rate": 3.650793650793651e-05,
"loss": 0.8091,
"step": 46
},
{
"epoch": 0.7619047619047619,
"grad_norm": 2.899217128753662,
"learning_rate": 3.809523809523809e-05,
"loss": 0.851,
"step": 48
},
{
"epoch": 0.7936507936507936,
"grad_norm": 2.776383399963379,
"learning_rate": 3.968253968253968e-05,
"loss": 1.0539,
"step": 50
},
{
"epoch": 0.8253968253968254,
"grad_norm": 3.472299814224243,
"learning_rate": 4.126984126984127e-05,
"loss": 0.9422,
"step": 52
},
{
"epoch": 0.8571428571428571,
"grad_norm": 4.994964122772217,
"learning_rate": 4.2857142857142856e-05,
"loss": 1.0585,
"step": 54
},
{
"epoch": 0.8888888888888888,
"grad_norm": 7.897761821746826,
"learning_rate": 4.4444444444444447e-05,
"loss": 1.1766,
"step": 56
},
{
"epoch": 0.9206349206349206,
"grad_norm": 3.434422731399536,
"learning_rate": 4.603174603174603e-05,
"loss": 1.0149,
"step": 58
},
{
"epoch": 0.9523809523809523,
"grad_norm": 4.816616535186768,
"learning_rate": 4.761904761904762e-05,
"loss": 0.8752,
"step": 60
},
{
"epoch": 0.9841269841269841,
"grad_norm": 2.6246337890625,
"learning_rate": 4.9206349206349204e-05,
"loss": 0.9825,
"step": 62
},
{
"epoch": 1.0,
"eval_accuracy": 0.5271966527196653,
"eval_f1_macro": 0.29448106332528445,
"eval_f1_micro": 0.5271966527196653,
"eval_f1_weighted": 0.47887320097744607,
"eval_loss": 1.0690832138061523,
"eval_precision_macro": 0.2979542433081334,
"eval_precision_micro": 0.5271966527196653,
"eval_precision_weighted": 0.5181255807964261,
"eval_recall_macro": 0.35484535475707013,
"eval_recall_micro": 0.5271966527196653,
"eval_recall_weighted": 0.5271966527196653,
"eval_runtime": 1.6359,
"eval_samples_per_second": 292.199,
"eval_steps_per_second": 4.89,
"step": 63
},
{
"epoch": 1.0158730158730158,
"grad_norm": 4.054161071777344,
"learning_rate": 4.991181657848325e-05,
"loss": 1.0958,
"step": 64
},
{
"epoch": 1.0476190476190477,
"grad_norm": 2.788057565689087,
"learning_rate": 4.973544973544973e-05,
"loss": 1.1205,
"step": 66
},
{
"epoch": 1.0793650793650793,
"grad_norm": 4.333444595336914,
"learning_rate": 4.955908289241622e-05,
"loss": 0.9343,
"step": 68
},
{
"epoch": 1.1111111111111112,
"grad_norm": 2.873152732849121,
"learning_rate": 4.938271604938271e-05,
"loss": 0.8455,
"step": 70
},
{
"epoch": 1.1428571428571428,
"grad_norm": 3.179229736328125,
"learning_rate": 4.9206349206349204e-05,
"loss": 0.905,
"step": 72
},
{
"epoch": 1.1746031746031746,
"grad_norm": 6.202491760253906,
"learning_rate": 4.9029982363315695e-05,
"loss": 0.7701,
"step": 74
},
{
"epoch": 1.2063492063492063,
"grad_norm": 7.958104610443115,
"learning_rate": 4.8853615520282185e-05,
"loss": 0.9304,
"step": 76
},
{
"epoch": 1.2380952380952381,
"grad_norm": 6.333682537078857,
"learning_rate": 4.8677248677248676e-05,
"loss": 1.0795,
"step": 78
},
{
"epoch": 1.2698412698412698,
"grad_norm": 5.300474166870117,
"learning_rate": 4.850088183421517e-05,
"loss": 0.9147,
"step": 80
},
{
"epoch": 1.3015873015873016,
"grad_norm": 3.8622822761535645,
"learning_rate": 4.832451499118166e-05,
"loss": 0.7233,
"step": 82
},
{
"epoch": 1.3333333333333333,
"grad_norm": 3.2566518783569336,
"learning_rate": 4.814814814814815e-05,
"loss": 0.9643,
"step": 84
},
{
"epoch": 1.3650793650793651,
"grad_norm": 3.669085741043091,
"learning_rate": 4.797178130511464e-05,
"loss": 0.5868,
"step": 86
},
{
"epoch": 1.3968253968253967,
"grad_norm": 5.4853363037109375,
"learning_rate": 4.779541446208113e-05,
"loss": 0.9615,
"step": 88
},
{
"epoch": 1.4285714285714286,
"grad_norm": 4.062696933746338,
"learning_rate": 4.761904761904762e-05,
"loss": 0.827,
"step": 90
},
{
"epoch": 1.4603174603174602,
"grad_norm": 2.2320899963378906,
"learning_rate": 4.744268077601411e-05,
"loss": 0.8415,
"step": 92
},
{
"epoch": 1.492063492063492,
"grad_norm": 5.46465539932251,
"learning_rate": 4.72663139329806e-05,
"loss": 0.7415,
"step": 94
},
{
"epoch": 1.5238095238095237,
"grad_norm": 2.736971139907837,
"learning_rate": 4.708994708994709e-05,
"loss": 0.8833,
"step": 96
},
{
"epoch": 1.5555555555555556,
"grad_norm": 2.601299524307251,
"learning_rate": 4.691358024691358e-05,
"loss": 0.7582,
"step": 98
},
{
"epoch": 1.5873015873015874,
"grad_norm": 6.022972106933594,
"learning_rate": 4.673721340388007e-05,
"loss": 0.7518,
"step": 100
},
{
"epoch": 1.619047619047619,
"grad_norm": 6.7905378341674805,
"learning_rate": 4.656084656084656e-05,
"loss": 1.0717,
"step": 102
},
{
"epoch": 1.6507936507936507,
"grad_norm": 3.2041382789611816,
"learning_rate": 4.638447971781305e-05,
"loss": 0.6562,
"step": 104
},
{
"epoch": 1.6825396825396826,
"grad_norm": 6.4817681312561035,
"learning_rate": 4.620811287477954e-05,
"loss": 0.9219,
"step": 106
},
{
"epoch": 1.7142857142857144,
"grad_norm": 4.928455829620361,
"learning_rate": 4.603174603174603e-05,
"loss": 0.8101,
"step": 108
},
{
"epoch": 1.746031746031746,
"grad_norm": 4.112485885620117,
"learning_rate": 4.585537918871252e-05,
"loss": 0.8257,
"step": 110
},
{
"epoch": 1.7777777777777777,
"grad_norm": 6.042239189147949,
"learning_rate": 4.567901234567901e-05,
"loss": 0.6938,
"step": 112
},
{
"epoch": 1.8095238095238095,
"grad_norm": 2.9191532135009766,
"learning_rate": 4.55026455026455e-05,
"loss": 0.8586,
"step": 114
},
{
"epoch": 1.8412698412698414,
"grad_norm": 2.405008554458618,
"learning_rate": 4.532627865961199e-05,
"loss": 0.6218,
"step": 116
},
{
"epoch": 1.873015873015873,
"grad_norm": 3.959949016571045,
"learning_rate": 4.5149911816578484e-05,
"loss": 0.7355,
"step": 118
},
{
"epoch": 1.9047619047619047,
"grad_norm": 3.4744226932525635,
"learning_rate": 4.4973544973544974e-05,
"loss": 0.7225,
"step": 120
},
{
"epoch": 1.9365079365079365,
"grad_norm": 3.96034836769104,
"learning_rate": 4.4797178130511465e-05,
"loss": 0.7276,
"step": 122
},
{
"epoch": 1.9682539682539684,
"grad_norm": 4.284174919128418,
"learning_rate": 4.4620811287477956e-05,
"loss": 0.6909,
"step": 124
},
{
"epoch": 2.0,
"grad_norm": 11.619244575500488,
"learning_rate": 4.4444444444444447e-05,
"loss": 0.7797,
"step": 126
},
{
"epoch": 2.0,
"eval_accuracy": 0.6548117154811716,
"eval_f1_macro": 0.4940971741477817,
"eval_f1_micro": 0.6548117154811716,
"eval_f1_weighted": 0.628735443682528,
"eval_loss": 0.9531590342521667,
"eval_precision_macro": 0.6898690717515761,
"eval_precision_micro": 0.6548117154811716,
"eval_precision_weighted": 0.6835107777099136,
"eval_recall_macro": 0.47932568993970953,
"eval_recall_micro": 0.6548117154811716,
"eval_recall_weighted": 0.6548117154811716,
"eval_runtime": 1.6407,
"eval_samples_per_second": 291.337,
"eval_steps_per_second": 4.876,
"step": 126
}
],
"logging_steps": 2,
"max_steps": 630,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 500,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 5,
"early_stopping_threshold": 0.01
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 525977876877312.0,
"train_batch_size": 32,
"trial_name": null,
"trial_params": null
}