|
{ |
|
"best_metric": 0.9192785620689392, |
|
"best_model_checkpoint": "facial_emotions_image_detection_rafdb_microsoft_vit/checkpoint-5016", |
|
"epoch": 8.0, |
|
"eval_steps": 500, |
|
"global_step": 5016, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.8, |
|
"grad_norm": 14.677521705627441, |
|
"learning_rate": 2.7281514297221103e-06, |
|
"loss": 1.7706, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_accuracy": 0.45075587486903157, |
|
"eval_loss": 1.4894078969955444, |
|
"eval_runtime": 69.9167, |
|
"eval_samples_per_second": 191.113, |
|
"eval_steps_per_second": 11.957, |
|
"step": 627 |
|
}, |
|
{ |
|
"epoch": 1.59, |
|
"grad_norm": 17.35078239440918, |
|
"learning_rate": 2.4260974627466776e-06, |
|
"loss": 1.4285, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_accuracy": 0.5424337674000899, |
|
"eval_loss": 1.2471543550491333, |
|
"eval_runtime": 65.1157, |
|
"eval_samples_per_second": 205.204, |
|
"eval_steps_per_second": 12.839, |
|
"step": 1254 |
|
}, |
|
{ |
|
"epoch": 2.39, |
|
"grad_norm": 15.841980934143066, |
|
"learning_rate": 2.1240434957712445e-06, |
|
"loss": 1.2563, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_accuracy": 0.5873372249663225, |
|
"eval_loss": 1.1151762008666992, |
|
"eval_runtime": 65.1132, |
|
"eval_samples_per_second": 205.212, |
|
"eval_steps_per_second": 12.839, |
|
"step": 1881 |
|
}, |
|
{ |
|
"epoch": 3.19, |
|
"grad_norm": 24.84258270263672, |
|
"learning_rate": 1.8219895287958114e-06, |
|
"loss": 1.1575, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 3.99, |
|
"grad_norm": 14.571057319641113, |
|
"learning_rate": 1.5199355618203787e-06, |
|
"loss": 1.0741, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_accuracy": 0.6200419098937285, |
|
"eval_loss": 1.0384080410003662, |
|
"eval_runtime": 65.4313, |
|
"eval_samples_per_second": 204.214, |
|
"eval_steps_per_second": 12.777, |
|
"step": 2508 |
|
}, |
|
{ |
|
"epoch": 4.78, |
|
"grad_norm": 19.834932327270508, |
|
"learning_rate": 1.2178815948449456e-06, |
|
"loss": 1.0238, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_accuracy": 0.6430923514443946, |
|
"eval_loss": 0.981325626373291, |
|
"eval_runtime": 66.0473, |
|
"eval_samples_per_second": 202.31, |
|
"eval_steps_per_second": 12.658, |
|
"step": 3135 |
|
}, |
|
{ |
|
"epoch": 5.58, |
|
"grad_norm": 17.73134994506836, |
|
"learning_rate": 9.158276278695127e-07, |
|
"loss": 0.9837, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"eval_accuracy": 0.6525969166292471, |
|
"eval_loss": 0.9464946985244751, |
|
"eval_runtime": 66.383, |
|
"eval_samples_per_second": 201.286, |
|
"eval_steps_per_second": 12.594, |
|
"step": 3762 |
|
}, |
|
{ |
|
"epoch": 6.38, |
|
"grad_norm": 14.271998405456543, |
|
"learning_rate": 6.137736608940798e-07, |
|
"loss": 0.9694, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"eval_accuracy": 0.6600059871276756, |
|
"eval_loss": 0.9257609248161316, |
|
"eval_runtime": 66.6168, |
|
"eval_samples_per_second": 200.58, |
|
"eval_steps_per_second": 12.549, |
|
"step": 4389 |
|
}, |
|
{ |
|
"epoch": 7.18, |
|
"grad_norm": 26.607189178466797, |
|
"learning_rate": 3.117196939186468e-07, |
|
"loss": 0.9391, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 7.97, |
|
"grad_norm": 20.43568992614746, |
|
"learning_rate": 9.665726943213855e-09, |
|
"loss": 0.9235, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"eval_accuracy": 0.663897620116749, |
|
"eval_loss": 0.9192785620689392, |
|
"eval_runtime": 64.6255, |
|
"eval_samples_per_second": 206.761, |
|
"eval_steps_per_second": 12.936, |
|
"step": 5016 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 5016, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 8, |
|
"save_steps": 500, |
|
"total_flos": 3.98586664581061e+18, |
|
"train_batch_size": 32, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|