|
{ |
|
"best_metric": 1.2476589679718018, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 0.03395585738539898, |
|
"eval_steps": 50, |
|
"global_step": 200, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.00016977928692699492, |
|
"eval_loss": 1.4329637289047241, |
|
"eval_runtime": 234.0581, |
|
"eval_samples_per_second": 5.298, |
|
"eval_steps_per_second": 1.324, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.001697792869269949, |
|
"grad_norm": 1.311955213546753, |
|
"learning_rate": 4.1400000000000003e-05, |
|
"loss": 1.4203, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.003395585738539898, |
|
"grad_norm": 0.7833012342453003, |
|
"learning_rate": 8.280000000000001e-05, |
|
"loss": 1.338, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.0050933786078098476, |
|
"grad_norm": 0.7402932047843933, |
|
"learning_rate": 0.00012419999999999998, |
|
"loss": 1.2575, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.006791171477079796, |
|
"grad_norm": 0.7024871110916138, |
|
"learning_rate": 0.00016560000000000001, |
|
"loss": 1.2019, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.008488964346349746, |
|
"grad_norm": 0.804834246635437, |
|
"learning_rate": 0.000207, |
|
"loss": 1.2279, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.008488964346349746, |
|
"eval_loss": 1.2476589679718018, |
|
"eval_runtime": 235.3963, |
|
"eval_samples_per_second": 5.268, |
|
"eval_steps_per_second": 1.317, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.010186757215619695, |
|
"grad_norm": 0.7924010753631592, |
|
"learning_rate": 0.00020674787920189178, |
|
"loss": 1.222, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.011884550084889643, |
|
"grad_norm": 0.8740193247795105, |
|
"learning_rate": 0.00020599274511475253, |
|
"loss": 1.2045, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.013582342954159592, |
|
"grad_norm": 0.8409563899040222, |
|
"learning_rate": 0.00020473827667594888, |
|
"loss": 1.2077, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.015280135823429542, |
|
"grad_norm": 1.0255435705184937, |
|
"learning_rate": 0.00020299058552961598, |
|
"loss": 1.2556, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.01697792869269949, |
|
"grad_norm": 1.3903775215148926, |
|
"learning_rate": 0.00020075818625134152, |
|
"loss": 1.303, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.01697792869269949, |
|
"eval_loss": 1.3105939626693726, |
|
"eval_runtime": 235.3286, |
|
"eval_samples_per_second": 5.269, |
|
"eval_steps_per_second": 1.317, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.01867572156196944, |
|
"grad_norm": 0.6778289675712585, |
|
"learning_rate": 0.00019805195486600916, |
|
"loss": 1.3131, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.02037351443123939, |
|
"grad_norm": 0.7167655825614929, |
|
"learning_rate": 0.00019488507586089894, |
|
"loss": 1.1878, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.022071307300509338, |
|
"grad_norm": 0.786454975605011, |
|
"learning_rate": 0.00019127297795219008, |
|
"loss": 1.2166, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.023769100169779286, |
|
"grad_norm": 0.8451493382453918, |
|
"learning_rate": 0.00018723325891780706, |
|
"loss": 1.2909, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.025466893039049237, |
|
"grad_norm": 0.7425501942634583, |
|
"learning_rate": 0.0001827855998628142, |
|
"loss": 1.1985, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.025466893039049237, |
|
"eval_loss": 1.248077630996704, |
|
"eval_runtime": 235.4875, |
|
"eval_samples_per_second": 5.266, |
|
"eval_steps_per_second": 1.316, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.027164685908319185, |
|
"grad_norm": 0.8560181856155396, |
|
"learning_rate": 0.0001779516693350504, |
|
"loss": 1.1788, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.028862478777589132, |
|
"grad_norm": 0.7847142815589905, |
|
"learning_rate": 0.00017275501775814182, |
|
"loss": 1.2282, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.030560271646859084, |
|
"grad_norm": 0.8509446382522583, |
|
"learning_rate": 0.00016722096269620562, |
|
"loss": 1.2029, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.03225806451612903, |
|
"grad_norm": 0.9557679891586304, |
|
"learning_rate": 0.00016137646550922228, |
|
"loss": 1.2601, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.03395585738539898, |
|
"grad_norm": 1.3051382303237915, |
|
"learning_rate": 0.00015525, |
|
"loss": 1.2807, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.03395585738539898, |
|
"eval_loss": 1.330949306488037, |
|
"eval_runtime": 236.0189, |
|
"eval_samples_per_second": 5.254, |
|
"eval_steps_per_second": 1.313, |
|
"step": 200 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 500, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 50, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 3, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 3 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 6.387406029088358e+16, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|