|
{ |
|
"best_metric": 3.6801536083221436, |
|
"best_model_checkpoint": "/content/drive/MyDrive/models/finetuned/meta-llama_Meta-Llama-3.1-8B-Instruct/checkpoint-100", |
|
"epoch": 10.0, |
|
"eval_steps": 50, |
|
"global_step": 110, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.9090909090909091, |
|
"grad_norm": 6.6702094078063965, |
|
"learning_rate": 9.900412805461967e-05, |
|
"loss": 11.2023, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 1.8181818181818183, |
|
"grad_norm": 8.338553428649902, |
|
"learning_rate": 9.422164654989072e-05, |
|
"loss": 9.4483, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 2.7272727272727275, |
|
"grad_norm": 18.10846519470215, |
|
"learning_rate": 8.585659023794818e-05, |
|
"loss": 7.6668, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 3.6363636363636362, |
|
"grad_norm": 16.715173721313477, |
|
"learning_rate": 7.45866462322802e-05, |
|
"loss": 6.1567, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 4.545454545454545, |
|
"grad_norm": 22.686342239379883, |
|
"learning_rate": 6.132483837128823e-05, |
|
"loss": 4.9888, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 4.545454545454545, |
|
"eval_loss": 4.827907562255859, |
|
"eval_runtime": 3.1939, |
|
"eval_samples_per_second": 3.444, |
|
"eval_steps_per_second": 0.939, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 5.454545454545454, |
|
"grad_norm": 18.679607391357422, |
|
"learning_rate": 4.71455594568616e-05, |
|
"loss": 4.3176, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 6.363636363636363, |
|
"grad_norm": 17.530826568603516, |
|
"learning_rate": 3.3197530339228487e-05, |
|
"loss": 3.9827, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 7.2727272727272725, |
|
"grad_norm": 13.915569305419922, |
|
"learning_rate": 2.061073738537635e-05, |
|
"loss": 3.5305, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 8.181818181818182, |
|
"grad_norm": 14.944436073303223, |
|
"learning_rate": 1.0404887703886251e-05, |
|
"loss": 3.4139, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 9.090909090909092, |
|
"grad_norm": 15.253994941711426, |
|
"learning_rate": 3.406798539427386e-06, |
|
"loss": 3.326, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 9.090909090909092, |
|
"eval_loss": 3.6801536083221436, |
|
"eval_runtime": 3.2002, |
|
"eval_samples_per_second": 3.437, |
|
"eval_steps_per_second": 0.937, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"grad_norm": 15.849159240722656, |
|
"learning_rate": 1.8341345686543332e-07, |
|
"loss": 3.2823, |
|
"step": 110 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 110, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 10, |
|
"save_steps": 50, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 3, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.014886293307392e+16, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|