|
{ |
|
"best_metric": 0.9142857142857143, |
|
"best_model_checkpoint": "videomae-base-finetuned-ucf101-subset/checkpoint-148", |
|
"epoch": 7.101351351351352, |
|
"eval_steps": 500, |
|
"global_step": 148, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.06756756756756757, |
|
"grad_norm": 6.34252405166626, |
|
"learning_rate": 3.3333333333333335e-05, |
|
"loss": 2.0466, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.12837837837837837, |
|
"eval_accuracy": 0.6142857142857143, |
|
"eval_loss": 1.6348506212234497, |
|
"eval_runtime": 6.1663, |
|
"eval_samples_per_second": 11.352, |
|
"eval_steps_per_second": 0.811, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 1.0067567567567568, |
|
"grad_norm": 7.780186176300049, |
|
"learning_rate": 4.81203007518797e-05, |
|
"loss": 1.8425, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 1.0743243243243243, |
|
"grad_norm": 7.907790184020996, |
|
"learning_rate": 4.43609022556391e-05, |
|
"loss": 1.348, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 1.1283783783783783, |
|
"eval_accuracy": 0.8428571428571429, |
|
"eval_loss": 0.804053783416748, |
|
"eval_runtime": 6.6922, |
|
"eval_samples_per_second": 10.46, |
|
"eval_steps_per_second": 0.747, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 2.0135135135135136, |
|
"grad_norm": 5.862756729125977, |
|
"learning_rate": 4.0601503759398494e-05, |
|
"loss": 0.8369, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 2.081081081081081, |
|
"grad_norm": 5.44201135635376, |
|
"learning_rate": 3.6842105263157895e-05, |
|
"loss": 0.6208, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 2.1283783783783785, |
|
"eval_accuracy": 0.7285714285714285, |
|
"eval_loss": 0.7582912445068359, |
|
"eval_runtime": 6.1646, |
|
"eval_samples_per_second": 11.355, |
|
"eval_steps_per_second": 0.811, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 3.02027027027027, |
|
"grad_norm": 12.206607818603516, |
|
"learning_rate": 3.3082706766917295e-05, |
|
"loss": 0.5199, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 3.0878378378378377, |
|
"grad_norm": 5.501271724700928, |
|
"learning_rate": 2.9323308270676693e-05, |
|
"loss": 0.332, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 3.1283783783783785, |
|
"eval_accuracy": 0.8285714285714286, |
|
"eval_loss": 0.45565441250801086, |
|
"eval_runtime": 6.0418, |
|
"eval_samples_per_second": 11.586, |
|
"eval_steps_per_second": 0.828, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 4.027027027027027, |
|
"grad_norm": 7.0609965324401855, |
|
"learning_rate": 2.556390977443609e-05, |
|
"loss": 0.2761, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 4.094594594594595, |
|
"grad_norm": 3.5807061195373535, |
|
"learning_rate": 2.1804511278195487e-05, |
|
"loss": 0.2229, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 4.128378378378378, |
|
"eval_accuracy": 0.8857142857142857, |
|
"eval_loss": 0.3132798373699188, |
|
"eval_runtime": 6.754, |
|
"eval_samples_per_second": 10.364, |
|
"eval_steps_per_second": 0.74, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 5.033783783783784, |
|
"grad_norm": 1.8226597309112549, |
|
"learning_rate": 1.8045112781954888e-05, |
|
"loss": 0.2292, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 5.101351351351352, |
|
"grad_norm": 6.401218414306641, |
|
"learning_rate": 1.4285714285714285e-05, |
|
"loss": 0.1479, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 5.128378378378378, |
|
"eval_accuracy": 0.9, |
|
"eval_loss": 0.2872016131877899, |
|
"eval_runtime": 5.5369, |
|
"eval_samples_per_second": 12.643, |
|
"eval_steps_per_second": 0.903, |
|
"step": 114 |
|
}, |
|
{ |
|
"epoch": 6.04054054054054, |
|
"grad_norm": 4.436432361602783, |
|
"learning_rate": 1.0526315789473684e-05, |
|
"loss": 0.0848, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 6.108108108108108, |
|
"grad_norm": 1.9256197214126587, |
|
"learning_rate": 6.766917293233083e-06, |
|
"loss": 0.0761, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 6.128378378378378, |
|
"eval_accuracy": 0.9, |
|
"eval_loss": 0.28876611590385437, |
|
"eval_runtime": 6.7507, |
|
"eval_samples_per_second": 10.369, |
|
"eval_steps_per_second": 0.741, |
|
"step": 133 |
|
}, |
|
{ |
|
"epoch": 7.047297297297297, |
|
"grad_norm": 7.098940372467041, |
|
"learning_rate": 3.007518796992481e-06, |
|
"loss": 0.0696, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 7.101351351351352, |
|
"eval_accuracy": 0.9142857142857143, |
|
"eval_loss": 0.26643699407577515, |
|
"eval_runtime": 6.7738, |
|
"eval_samples_per_second": 10.334, |
|
"eval_steps_per_second": 0.738, |
|
"step": 148 |
|
}, |
|
{ |
|
"epoch": 7.101351351351352, |
|
"step": 148, |
|
"total_flos": 2.915999166844109e+18, |
|
"train_loss": 0.5872005439690642, |
|
"train_runtime": 676.4093, |
|
"train_samples_per_second": 3.501, |
|
"train_steps_per_second": 0.219 |
|
}, |
|
{ |
|
"epoch": 7.101351351351352, |
|
"eval_accuracy": 0.9290322580645162, |
|
"eval_loss": 0.2252088487148285, |
|
"eval_runtime": 13.824, |
|
"eval_samples_per_second": 11.212, |
|
"eval_steps_per_second": 0.723, |
|
"step": 148 |
|
}, |
|
{ |
|
"epoch": 7.101351351351352, |
|
"eval_accuracy": 0.9290322580645162, |
|
"eval_loss": 0.2266511768102646, |
|
"eval_runtime": 13.6046, |
|
"eval_samples_per_second": 11.393, |
|
"eval_steps_per_second": 0.735, |
|
"step": 148 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 148, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 9223372036854775807, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 2.915999166844109e+18, |
|
"train_batch_size": 16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|