dora-demo-31 / trainer_state.json
haixuantao's picture
Upload 33 files
4c22867 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 16.0,
"eval_steps": 500,
"global_step": 208,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.7692307692307693,
"grad_norm": 1.4849790334701538,
"learning_rate": 4.761904761904762e-05,
"loss": 1.4899,
"step": 10
},
{
"epoch": 1.5384615384615383,
"grad_norm": 1.4494893550872803,
"learning_rate": 9.523809523809524e-05,
"loss": 1.4047,
"step": 20
},
{
"epoch": 2.3076923076923075,
"grad_norm": 1.1887791156768799,
"learning_rate": 9.94295546780682e-05,
"loss": 1.1335,
"step": 30
},
{
"epoch": 3.076923076923077,
"grad_norm": 2.6606452465057373,
"learning_rate": 9.747435008912438e-05,
"loss": 0.758,
"step": 40
},
{
"epoch": 3.8461538461538463,
"grad_norm": 2.238654136657715,
"learning_rate": 9.418238419956484e-05,
"loss": 0.6976,
"step": 50
},
{
"epoch": 4.615384615384615,
"grad_norm": 2.202763080596924,
"learning_rate": 8.964635069757802e-05,
"loss": 0.5176,
"step": 60
},
{
"epoch": 5.384615384615385,
"grad_norm": 1.5672999620437622,
"learning_rate": 8.399397316510596e-05,
"loss": 0.5374,
"step": 70
},
{
"epoch": 6.153846153846154,
"grad_norm": 1.7563928365707397,
"learning_rate": 7.738440869493018e-05,
"loss": 0.4344,
"step": 80
},
{
"epoch": 6.923076923076923,
"grad_norm": 1.8594719171524048,
"learning_rate": 7.000376641716133e-05,
"loss": 0.3991,
"step": 90
},
{
"epoch": 7.6923076923076925,
"grad_norm": 1.8146908283233643,
"learning_rate": 6.205986712243875e-05,
"loss": 0.3497,
"step": 100
},
{
"epoch": 8.461538461538462,
"grad_norm": 1.8267848491668701,
"learning_rate": 5.377639153800229e-05,
"loss": 0.3176,
"step": 110
},
{
"epoch": 9.23076923076923,
"grad_norm": 1.6012214422225952,
"learning_rate": 4.5386582026834906e-05,
"loss": 0.2394,
"step": 120
},
{
"epoch": 10.0,
"grad_norm": 2.043828010559082,
"learning_rate": 3.712667505458622e-05,
"loss": 0.2328,
"step": 130
},
{
"epoch": 10.76923076923077,
"grad_norm": 2.1970419883728027,
"learning_rate": 2.9229249349905684e-05,
"loss": 0.1849,
"step": 140
},
{
"epoch": 11.538461538461538,
"grad_norm": 1.2817729711532593,
"learning_rate": 2.1916677057681785e-05,
"loss": 0.1416,
"step": 150
},
{
"epoch": 12.307692307692308,
"grad_norm": 1.8149962425231934,
"learning_rate": 1.5394862284655264e-05,
"loss": 0.1323,
"step": 160
},
{
"epoch": 13.076923076923077,
"grad_norm": 2.4009556770324707,
"learning_rate": 9.847443344610297e-06,
"loss": 0.1315,
"step": 170
},
{
"epoch": 13.846153846153847,
"grad_norm": 1.9544780254364014,
"learning_rate": 5.430621953703785e-06,
"loss": 0.1065,
"step": 180
},
{
"epoch": 14.615384615384615,
"grad_norm": 1.0407898426055908,
"learning_rate": 2.268764973114684e-06,
"loss": 0.1174,
"step": 190
},
{
"epoch": 15.384615384615385,
"grad_norm": 3.4905996322631836,
"learning_rate": 4.5090254315662826e-07,
"loss": 0.0959,
"step": 200
},
{
"epoch": 16.0,
"step": 208,
"total_flos": 3.485945501879501e+16,
"train_loss": 0.457215073876656,
"train_runtime": 4064.4346,
"train_samples_per_second": 1.638,
"train_steps_per_second": 0.051
}
],
"logging_steps": 10,
"max_steps": 208,
"num_input_tokens_seen": 0,
"num_train_epochs": 16,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 3.485945501879501e+16,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}