infogep's picture
Training in progress, step 150, checkpoint
6eb09f0 verified
raw
history blame
4.41 kB
{
"best_metric": 2.1078147888183594,
"best_model_checkpoint": "miner_id_24/checkpoint-150",
"epoch": 0.08522727272727272,
"eval_steps": 50,
"global_step": 150,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0005681818181818182,
"eval_loss": 6.134354591369629,
"eval_runtime": 47.1556,
"eval_samples_per_second": 15.714,
"eval_steps_per_second": 3.944,
"step": 1
},
{
"epoch": 0.005681818181818182,
"grad_norm": 10.32190227508545,
"learning_rate": 5.05e-06,
"loss": 4.6056,
"step": 10
},
{
"epoch": 0.011363636363636364,
"grad_norm": 8.61007022857666,
"learning_rate": 1.01e-05,
"loss": 4.191,
"step": 20
},
{
"epoch": 0.017045454545454544,
"grad_norm": 8.55370044708252,
"learning_rate": 9.323076923076924e-06,
"loss": 3.2708,
"step": 30
},
{
"epoch": 0.022727272727272728,
"grad_norm": 8.809952735900879,
"learning_rate": 8.546153846153847e-06,
"loss": 2.3297,
"step": 40
},
{
"epoch": 0.028409090909090908,
"grad_norm": 19.515151977539062,
"learning_rate": 7.76923076923077e-06,
"loss": 1.6582,
"step": 50
},
{
"epoch": 0.028409090909090908,
"eval_loss": 2.4909262657165527,
"eval_runtime": 47.5356,
"eval_samples_per_second": 15.588,
"eval_steps_per_second": 3.913,
"step": 50
},
{
"epoch": 0.03409090909090909,
"grad_norm": 6.00586462020874,
"learning_rate": 6.992307692307692e-06,
"loss": 2.9064,
"step": 60
},
{
"epoch": 0.03977272727272727,
"grad_norm": 6.616552352905273,
"learning_rate": 6.215384615384615e-06,
"loss": 2.5966,
"step": 70
},
{
"epoch": 0.045454545454545456,
"grad_norm": 7.654419898986816,
"learning_rate": 5.438461538461538e-06,
"loss": 2.2871,
"step": 80
},
{
"epoch": 0.05113636363636364,
"grad_norm": 6.925200939178467,
"learning_rate": 4.661538461538462e-06,
"loss": 1.8554,
"step": 90
},
{
"epoch": 0.056818181818181816,
"grad_norm": 13.769781112670898,
"learning_rate": 3.884615384615385e-06,
"loss": 1.2905,
"step": 100
},
{
"epoch": 0.056818181818181816,
"eval_loss": 2.180443048477173,
"eval_runtime": 47.2352,
"eval_samples_per_second": 15.687,
"eval_steps_per_second": 3.938,
"step": 100
},
{
"epoch": 0.0625,
"grad_norm": 6.1928181648254395,
"learning_rate": 3.1076923076923076e-06,
"loss": 2.8003,
"step": 110
},
{
"epoch": 0.06818181818181818,
"grad_norm": 6.664099216461182,
"learning_rate": 2.330769230769231e-06,
"loss": 2.417,
"step": 120
},
{
"epoch": 0.07386363636363637,
"grad_norm": 7.768091201782227,
"learning_rate": 1.5538461538461538e-06,
"loss": 1.9968,
"step": 130
},
{
"epoch": 0.07954545454545454,
"grad_norm": 10.426623344421387,
"learning_rate": 7.769230769230769e-07,
"loss": 1.6976,
"step": 140
},
{
"epoch": 0.08522727272727272,
"grad_norm": 14.00831413269043,
"learning_rate": 0.0,
"loss": 1.2394,
"step": 150
},
{
"epoch": 0.08522727272727272,
"eval_loss": 2.1078147888183594,
"eval_runtime": 47.3252,
"eval_samples_per_second": 15.658,
"eval_steps_per_second": 3.93,
"step": 150
}
],
"logging_steps": 10,
"max_steps": 150,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 50,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 3,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 5.979554657914061e+16,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}