|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.23668639053254437, |
|
"eval_steps": 20, |
|
"global_step": 200, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.001183431952662722, |
|
"eval_loss": 6.91423225402832, |
|
"eval_runtime": 2.3165, |
|
"eval_samples_per_second": 648.398, |
|
"eval_steps_per_second": 40.579, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.011834319526627219, |
|
"grad_norm": 8640.0, |
|
"learning_rate": 1.6000000000000003e-05, |
|
"loss": 6.7118, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.023668639053254437, |
|
"grad_norm": 2368.0, |
|
"learning_rate": 3.2000000000000005e-05, |
|
"loss": 6.7944, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.023668639053254437, |
|
"eval_loss": 6.8260087966918945, |
|
"eval_runtime": 2.3051, |
|
"eval_samples_per_second": 651.607, |
|
"eval_steps_per_second": 40.78, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.03550295857988166, |
|
"grad_norm": 8096.0, |
|
"learning_rate": 4.8e-05, |
|
"loss": 6.762, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.047337278106508875, |
|
"grad_norm": 4160.0, |
|
"learning_rate": 6.400000000000001e-05, |
|
"loss": 6.5961, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.047337278106508875, |
|
"eval_loss": 6.617987632751465, |
|
"eval_runtime": 2.3313, |
|
"eval_samples_per_second": 644.27, |
|
"eval_steps_per_second": 40.321, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.05917159763313609, |
|
"grad_norm": 17920.0, |
|
"learning_rate": 8e-05, |
|
"loss": 6.4182, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.07100591715976332, |
|
"grad_norm": 13632.0, |
|
"learning_rate": 9.6e-05, |
|
"loss": 6.5851, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.07100591715976332, |
|
"eval_loss": 6.849647045135498, |
|
"eval_runtime": 2.5266, |
|
"eval_samples_per_second": 594.476, |
|
"eval_steps_per_second": 37.204, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.08284023668639054, |
|
"grad_norm": 11136.0, |
|
"learning_rate": 0.00011200000000000001, |
|
"loss": 7.1215, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.09467455621301775, |
|
"grad_norm": 13568.0, |
|
"learning_rate": 0.00012800000000000002, |
|
"loss": 6.9277, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.09467455621301775, |
|
"eval_loss": 7.1117143630981445, |
|
"eval_runtime": 2.3547, |
|
"eval_samples_per_second": 637.865, |
|
"eval_steps_per_second": 39.92, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.10650887573964497, |
|
"grad_norm": 7520.0, |
|
"learning_rate": 0.000144, |
|
"loss": 6.9477, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.11834319526627218, |
|
"grad_norm": 5344.0, |
|
"learning_rate": 0.00016, |
|
"loss": 7.4897, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.11834319526627218, |
|
"eval_loss": 7.819643497467041, |
|
"eval_runtime": 2.3681, |
|
"eval_samples_per_second": 634.257, |
|
"eval_steps_per_second": 39.694, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.1301775147928994, |
|
"grad_norm": 1944.0, |
|
"learning_rate": 0.00017600000000000002, |
|
"loss": 8.0353, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.14201183431952663, |
|
"grad_norm": 386.0, |
|
"learning_rate": 0.000192, |
|
"loss": 8.3949, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.14201183431952663, |
|
"eval_loss": 8.92322063446045, |
|
"eval_runtime": 2.2886, |
|
"eval_samples_per_second": 656.309, |
|
"eval_steps_per_second": 41.074, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.15384615384615385, |
|
"grad_norm": 2752.0, |
|
"learning_rate": 0.0001999978128380225, |
|
"loss": 8.8096, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.16568047337278108, |
|
"grad_norm": 2208.0, |
|
"learning_rate": 0.0001999803161162393, |
|
"loss": 9.3893, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.16568047337278108, |
|
"eval_loss": 10.95390510559082, |
|
"eval_runtime": 2.3845, |
|
"eval_samples_per_second": 629.893, |
|
"eval_steps_per_second": 39.421, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.17751479289940827, |
|
"grad_norm": 1104.0, |
|
"learning_rate": 0.00019994532573409262, |
|
"loss": 10.2318, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.1893491124260355, |
|
"grad_norm": 664.0, |
|
"learning_rate": 0.00019989284781388617, |
|
"loss": 8.9254, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.1893491124260355, |
|
"eval_loss": 9.343338966369629, |
|
"eval_runtime": 2.3475, |
|
"eval_samples_per_second": 639.824, |
|
"eval_steps_per_second": 40.042, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.20118343195266272, |
|
"grad_norm": 576.0, |
|
"learning_rate": 0.00019982289153773646, |
|
"loss": 8.9456, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.21301775147928995, |
|
"grad_norm": 3888.0, |
|
"learning_rate": 0.00019973546914596623, |
|
"loss": 9.1233, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.21301775147928995, |
|
"eval_loss": 9.207473754882812, |
|
"eval_runtime": 2.8413, |
|
"eval_samples_per_second": 528.635, |
|
"eval_steps_per_second": 33.084, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.22485207100591717, |
|
"grad_norm": 884.0, |
|
"learning_rate": 0.00019963059593496268, |
|
"loss": 9.3116, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.23668639053254437, |
|
"grad_norm": 410.0, |
|
"learning_rate": 0.00019950829025450114, |
|
"loss": 9.2642, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.23668639053254437, |
|
"eval_loss": 9.775616645812988, |
|
"eval_runtime": 2.3679, |
|
"eval_samples_per_second": 634.329, |
|
"eval_steps_per_second": 39.698, |
|
"step": 200 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 2500, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 40, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 300220566994944.0, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|