zkava01's picture
Upload folder using huggingface_hub
a233c59 verified
{
"best_metric": 0.8140808343887329,
"best_model_checkpoint": "autotrain-g8xiw-ebx5n/checkpoint-80",
"epoch": 1.0,
"eval_steps": 500,
"global_step": 80,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.05,
"grad_norm": 18.177003860473633,
"learning_rate": 8.333333333333334e-06,
"loss": 0.7215,
"step": 4
},
{
"epoch": 0.1,
"grad_norm": 25.83134651184082,
"learning_rate": 1.6666666666666667e-05,
"loss": 0.9735,
"step": 8
},
{
"epoch": 0.15,
"grad_norm": 28.273202896118164,
"learning_rate": 2.5e-05,
"loss": 0.9053,
"step": 12
},
{
"epoch": 0.2,
"grad_norm": 77.66250610351562,
"learning_rate": 3.3333333333333335e-05,
"loss": 0.7484,
"step": 16
},
{
"epoch": 0.25,
"grad_norm": 78.92176055908203,
"learning_rate": 4.166666666666667e-05,
"loss": 0.8162,
"step": 20
},
{
"epoch": 0.3,
"grad_norm": 24.40717315673828,
"learning_rate": 5e-05,
"loss": 1.2395,
"step": 24
},
{
"epoch": 0.35,
"grad_norm": 20.296764373779297,
"learning_rate": 4.9074074074074075e-05,
"loss": 1.2468,
"step": 28
},
{
"epoch": 0.4,
"grad_norm": 17.20810890197754,
"learning_rate": 4.814814814814815e-05,
"loss": 0.7842,
"step": 32
},
{
"epoch": 0.45,
"grad_norm": 13.720386505126953,
"learning_rate": 4.722222222222222e-05,
"loss": 0.8349,
"step": 36
},
{
"epoch": 0.5,
"grad_norm": 19.7871150970459,
"learning_rate": 4.62962962962963e-05,
"loss": 0.6975,
"step": 40
},
{
"epoch": 0.55,
"grad_norm": 8.968467712402344,
"learning_rate": 4.5370370370370374e-05,
"loss": 0.6712,
"step": 44
},
{
"epoch": 0.6,
"grad_norm": 14.593846321105957,
"learning_rate": 4.4444444444444447e-05,
"loss": 0.6436,
"step": 48
},
{
"epoch": 0.65,
"grad_norm": 32.06483459472656,
"learning_rate": 4.351851851851852e-05,
"loss": 0.5649,
"step": 52
},
{
"epoch": 0.7,
"grad_norm": 13.875551223754883,
"learning_rate": 4.259259259259259e-05,
"loss": 0.5657,
"step": 56
},
{
"epoch": 0.75,
"grad_norm": 15.4502534866333,
"learning_rate": 4.166666666666667e-05,
"loss": 0.6948,
"step": 60
},
{
"epoch": 0.8,
"grad_norm": 42.757694244384766,
"learning_rate": 4.074074074074074e-05,
"loss": 0.9738,
"step": 64
},
{
"epoch": 0.85,
"grad_norm": 21.160280227661133,
"learning_rate": 3.981481481481482e-05,
"loss": 0.6472,
"step": 68
},
{
"epoch": 0.9,
"grad_norm": 11.125871658325195,
"learning_rate": 3.888888888888889e-05,
"loss": 0.9644,
"step": 72
},
{
"epoch": 0.95,
"grad_norm": 11.052087783813477,
"learning_rate": 3.7962962962962964e-05,
"loss": 0.5686,
"step": 76
},
{
"epoch": 1.0,
"grad_norm": 8.721762657165527,
"learning_rate": 3.7037037037037037e-05,
"loss": 0.3979,
"step": 80
},
{
"epoch": 1.0,
"eval_accuracy": 0.71875,
"eval_f1_macro": 0.7169680062645076,
"eval_f1_micro": 0.71875,
"eval_f1_weighted": 0.719803137322762,
"eval_loss": 0.8140808343887329,
"eval_precision_macro": 0.7124963414309661,
"eval_precision_micro": 0.71875,
"eval_precision_weighted": 0.7473707776787377,
"eval_recall_macro": 0.7500480606863587,
"eval_recall_micro": 0.71875,
"eval_recall_weighted": 0.71875,
"eval_runtime": 27.1736,
"eval_samples_per_second": 5.888,
"eval_steps_per_second": 0.368,
"step": 80
}
],
"logging_steps": 4,
"max_steps": 240,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 5,
"early_stopping_threshold": 0.01
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 41835033418752.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}