Qwen2.5-1.5B-Open-R1-Distill / trainer_state.json
amrn's picture
Model save
78d4a52 verified
raw
history blame
6.93 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 100,
"global_step": 169,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.029585798816568046,
"grad_norm": 2.342892423396605,
"learning_rate": 5.882352941176471e-06,
"loss": 1.0992,
"step": 5
},
{
"epoch": 0.05917159763313609,
"grad_norm": 1.6368371604135565,
"learning_rate": 1.1764705882352942e-05,
"loss": 1.0402,
"step": 10
},
{
"epoch": 0.08875739644970414,
"grad_norm": 0.8989769231311087,
"learning_rate": 1.7647058823529414e-05,
"loss": 0.9533,
"step": 15
},
{
"epoch": 0.11834319526627218,
"grad_norm": 0.6137256595940042,
"learning_rate": 1.9980782984658682e-05,
"loss": 0.8922,
"step": 20
},
{
"epoch": 0.14792899408284024,
"grad_norm": 0.5492009965568149,
"learning_rate": 1.9863613034027224e-05,
"loss": 0.8551,
"step": 25
},
{
"epoch": 0.17751479289940827,
"grad_norm": 0.4349573184458919,
"learning_rate": 1.9641197940012136e-05,
"loss": 0.8283,
"step": 30
},
{
"epoch": 0.20710059171597633,
"grad_norm": 0.38659663740930433,
"learning_rate": 1.9315910880512792e-05,
"loss": 0.8229,
"step": 35
},
{
"epoch": 0.23668639053254437,
"grad_norm": 0.4119151414508529,
"learning_rate": 1.8891222681391853e-05,
"loss": 0.8225,
"step": 40
},
{
"epoch": 0.26627218934911245,
"grad_norm": 0.35458480206676984,
"learning_rate": 1.8371664782625287e-05,
"loss": 0.8073,
"step": 45
},
{
"epoch": 0.2958579881656805,
"grad_norm": 0.34430708054184417,
"learning_rate": 1.7762780887657576e-05,
"loss": 0.7977,
"step": 50
},
{
"epoch": 0.3254437869822485,
"grad_norm": 0.3838154059326915,
"learning_rate": 1.7071067811865477e-05,
"loss": 0.7877,
"step": 55
},
{
"epoch": 0.35502958579881655,
"grad_norm": 0.383532491946636,
"learning_rate": 1.6303906161279554e-05,
"loss": 0.7981,
"step": 60
},
{
"epoch": 0.38461538461538464,
"grad_norm": 0.35083975989542854,
"learning_rate": 1.5469481581224274e-05,
"loss": 0.7722,
"step": 65
},
{
"epoch": 0.41420118343195267,
"grad_norm": 0.35395203723670016,
"learning_rate": 1.4576697415156818e-05,
"loss": 0.7741,
"step": 70
},
{
"epoch": 0.4437869822485207,
"grad_norm": 0.34555629817939354,
"learning_rate": 1.3635079705638298e-05,
"loss": 0.7852,
"step": 75
},
{
"epoch": 0.47337278106508873,
"grad_norm": 0.35805756615265727,
"learning_rate": 1.2654675551080724e-05,
"loss": 0.7667,
"step": 80
},
{
"epoch": 0.5029585798816568,
"grad_norm": 0.3781865827387411,
"learning_rate": 1.164594590280734e-05,
"loss": 0.7706,
"step": 85
},
{
"epoch": 0.5325443786982249,
"grad_norm": 0.36455865865784565,
"learning_rate": 1.0619653946285948e-05,
"loss": 0.7701,
"step": 90
},
{
"epoch": 0.5621301775147929,
"grad_norm": 0.37271134883355717,
"learning_rate": 9.586750257511868e-06,
"loss": 0.7772,
"step": 95
},
{
"epoch": 0.591715976331361,
"grad_norm": 0.3429534883848641,
"learning_rate": 8.558255959926533e-06,
"loss": 0.753,
"step": 100
},
{
"epoch": 0.591715976331361,
"eval_loss": 0.7881382703781128,
"eval_runtime": 1.6822,
"eval_samples_per_second": 76.093,
"eval_steps_per_second": 2.378,
"step": 100
},
{
"epoch": 0.621301775147929,
"grad_norm": 0.34272683406564375,
"learning_rate": 7.545145128592009e-06,
"loss": 0.757,
"step": 105
},
{
"epoch": 0.650887573964497,
"grad_norm": 0.35326011905545884,
"learning_rate": 6.558227696373617e-06,
"loss": 0.7682,
"step": 110
},
{
"epoch": 0.6804733727810651,
"grad_norm": 0.3175593796339319,
"learning_rate": 5.608034111526298e-06,
"loss": 0.7623,
"step": 115
},
{
"epoch": 0.7100591715976331,
"grad_norm": 0.30113559390386235,
"learning_rate": 4.704702977392914e-06,
"loss": 0.7514,
"step": 120
},
{
"epoch": 0.7396449704142012,
"grad_norm": 0.3514515497713594,
"learning_rate": 3.857872873103322e-06,
"loss": 0.7538,
"step": 125
},
{
"epoch": 0.7692307692307693,
"grad_norm": 0.32077249449415507,
"learning_rate": 3.0765795095517026e-06,
"loss": 0.7555,
"step": 130
},
{
"epoch": 0.7988165680473372,
"grad_norm": 0.32980480359237835,
"learning_rate": 2.369159318001937e-06,
"loss": 0.7585,
"step": 135
},
{
"epoch": 0.8284023668639053,
"grad_norm": 0.30552938976565036,
"learning_rate": 1.743160500034443e-06,
"loss": 0.7498,
"step": 140
},
{
"epoch": 0.8579881656804734,
"grad_norm": 0.3199948980261291,
"learning_rate": 1.2052624879351105e-06,
"loss": 0.7566,
"step": 145
},
{
"epoch": 0.8875739644970414,
"grad_norm": 0.3029762272084133,
"learning_rate": 7.612046748871327e-07,
"loss": 0.7666,
"step": 150
},
{
"epoch": 0.9171597633136095,
"grad_norm": 0.30367592763134704,
"learning_rate": 4.1572517541747294e-07,
"loss": 0.7613,
"step": 155
},
{
"epoch": 0.9467455621301775,
"grad_norm": 0.29238016566832153,
"learning_rate": 1.7251026952640583e-07,
"loss": 0.7607,
"step": 160
},
{
"epoch": 0.9763313609467456,
"grad_norm": 0.2814543563664885,
"learning_rate": 3.4155069933301535e-08,
"loss": 0.7446,
"step": 165
},
{
"epoch": 1.0,
"step": 169,
"total_flos": 76916824473600.0,
"train_loss": 0.8026671296746067,
"train_runtime": 1041.4464,
"train_samples_per_second": 20.75,
"train_steps_per_second": 0.162
}
],
"logging_steps": 5,
"max_steps": 169,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": false,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 76916824473600.0,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}