|
{ |
|
"best_metric": 10.372808456420898, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 0.06038191561627292, |
|
"eval_steps": 25, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0012076383123254585, |
|
"grad_norm": 0.020386414602398872, |
|
"learning_rate": 5e-05, |
|
"loss": 10.377, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0012076383123254585, |
|
"eval_loss": 10.376673698425293, |
|
"eval_runtime": 9.1376, |
|
"eval_samples_per_second": 610.554, |
|
"eval_steps_per_second": 76.388, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.002415276624650917, |
|
"grad_norm": 0.021383745595812798, |
|
"learning_rate": 0.0001, |
|
"loss": 10.3772, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.0036229149369763755, |
|
"grad_norm": 0.021724741905927658, |
|
"learning_rate": 9.989294616193017e-05, |
|
"loss": 10.3769, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.004830553249301834, |
|
"grad_norm": 0.02241404540836811, |
|
"learning_rate": 9.957224306869053e-05, |
|
"loss": 10.3762, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.0060381915616272925, |
|
"grad_norm": 0.02375703491270542, |
|
"learning_rate": 9.903926402016153e-05, |
|
"loss": 10.3757, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.007245829873952751, |
|
"grad_norm": 0.022783808410167694, |
|
"learning_rate": 9.829629131445342e-05, |
|
"loss": 10.3758, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.00845346818627821, |
|
"grad_norm": 0.02458474412560463, |
|
"learning_rate": 9.73465064747553e-05, |
|
"loss": 10.3752, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.009661106498603668, |
|
"grad_norm": 0.024050414562225342, |
|
"learning_rate": 9.619397662556435e-05, |
|
"loss": 10.3766, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.010868744810929127, |
|
"grad_norm": 0.02697114460170269, |
|
"learning_rate": 9.484363707663442e-05, |
|
"loss": 10.3753, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.012076383123254585, |
|
"grad_norm": 0.027155030518770218, |
|
"learning_rate": 9.330127018922194e-05, |
|
"loss": 10.3766, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.013284021435580044, |
|
"grad_norm": 0.02795632928609848, |
|
"learning_rate": 9.157348061512727e-05, |
|
"loss": 10.3749, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.014491659747905502, |
|
"grad_norm": 0.0274726040661335, |
|
"learning_rate": 8.966766701456177e-05, |
|
"loss": 10.3751, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.01569929806023096, |
|
"grad_norm": 0.022757207974791527, |
|
"learning_rate": 8.759199037394887e-05, |
|
"loss": 10.3763, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.01690693637255642, |
|
"grad_norm": 0.0227514561265707, |
|
"learning_rate": 8.535533905932738e-05, |
|
"loss": 10.3763, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.018114574684881878, |
|
"grad_norm": 0.02591072767972946, |
|
"learning_rate": 8.296729075500344e-05, |
|
"loss": 10.375, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.019322212997207336, |
|
"grad_norm": 0.029294557869434357, |
|
"learning_rate": 8.043807145043604e-05, |
|
"loss": 10.3743, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.020529851309532793, |
|
"grad_norm": 0.027765313163399696, |
|
"learning_rate": 7.777851165098012e-05, |
|
"loss": 10.3754, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.021737489621858255, |
|
"grad_norm": 0.027076933532953262, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 10.3743, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.022945127934183712, |
|
"grad_norm": 0.02902560494840145, |
|
"learning_rate": 7.211443451095007e-05, |
|
"loss": 10.3754, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.02415276624650917, |
|
"grad_norm": 0.03063380718231201, |
|
"learning_rate": 6.91341716182545e-05, |
|
"loss": 10.3743, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.025360404558834627, |
|
"grad_norm": 0.033042989671230316, |
|
"learning_rate": 6.607197326515808e-05, |
|
"loss": 10.3739, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.02656804287116009, |
|
"grad_norm": 0.030266039073467255, |
|
"learning_rate": 6.294095225512603e-05, |
|
"loss": 10.3745, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.027775681183485546, |
|
"grad_norm": 0.03352139890193939, |
|
"learning_rate": 5.9754516100806423e-05, |
|
"loss": 10.3736, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.028983319495811004, |
|
"grad_norm": 0.03301949426531792, |
|
"learning_rate": 5.6526309611002594e-05, |
|
"loss": 10.3734, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.03019095780813646, |
|
"grad_norm": 0.038696322590112686, |
|
"learning_rate": 5.327015646150716e-05, |
|
"loss": 10.3731, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.03019095780813646, |
|
"eval_loss": 10.373895645141602, |
|
"eval_runtime": 9.18, |
|
"eval_samples_per_second": 607.736, |
|
"eval_steps_per_second": 76.035, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.03139859612046192, |
|
"grad_norm": 0.02821424789726734, |
|
"learning_rate": 5e-05, |
|
"loss": 10.3751, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.03260623443278738, |
|
"grad_norm": 0.03072463348507881, |
|
"learning_rate": 4.6729843538492847e-05, |
|
"loss": 10.3737, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.03381387274511284, |
|
"grad_norm": 0.03460706025362015, |
|
"learning_rate": 4.347369038899744e-05, |
|
"loss": 10.3738, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.035021511057438295, |
|
"grad_norm": 0.038342833518981934, |
|
"learning_rate": 4.0245483899193595e-05, |
|
"loss": 10.3741, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.036229149369763756, |
|
"grad_norm": 0.03766153007745743, |
|
"learning_rate": 3.705904774487396e-05, |
|
"loss": 10.3733, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.03743678768208922, |
|
"grad_norm": 0.03494866192340851, |
|
"learning_rate": 3.392802673484193e-05, |
|
"loss": 10.3735, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.03864442599441467, |
|
"grad_norm": 0.040823884308338165, |
|
"learning_rate": 3.086582838174551e-05, |
|
"loss": 10.3736, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.03985206430674013, |
|
"grad_norm": 0.03876008465886116, |
|
"learning_rate": 2.7885565489049946e-05, |
|
"loss": 10.3731, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.04105970261906559, |
|
"grad_norm": 0.04227786511182785, |
|
"learning_rate": 2.500000000000001e-05, |
|
"loss": 10.3729, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.04226734093139105, |
|
"grad_norm": 0.04130228981375694, |
|
"learning_rate": 2.2221488349019903e-05, |
|
"loss": 10.3725, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.04347497924371651, |
|
"grad_norm": 0.04358939826488495, |
|
"learning_rate": 1.9561928549563968e-05, |
|
"loss": 10.3733, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.04468261755604196, |
|
"grad_norm": 0.04161683842539787, |
|
"learning_rate": 1.703270924499656e-05, |
|
"loss": 10.3719, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.045890255868367424, |
|
"grad_norm": 0.03958718478679657, |
|
"learning_rate": 1.4644660940672627e-05, |
|
"loss": 10.3736, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.047097894180692886, |
|
"grad_norm": 0.035522960126399994, |
|
"learning_rate": 1.2408009626051137e-05, |
|
"loss": 10.3745, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.04830553249301834, |
|
"grad_norm": 0.03779358044266701, |
|
"learning_rate": 1.0332332985438248e-05, |
|
"loss": 10.3737, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.0495131708053438, |
|
"grad_norm": 0.040716782212257385, |
|
"learning_rate": 8.426519384872733e-06, |
|
"loss": 10.3729, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.050720809117669255, |
|
"grad_norm": 0.042443014681339264, |
|
"learning_rate": 6.698729810778065e-06, |
|
"loss": 10.3738, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.051928447429994716, |
|
"grad_norm": 0.041323788464069366, |
|
"learning_rate": 5.156362923365588e-06, |
|
"loss": 10.373, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.05313608574232018, |
|
"grad_norm": 0.04533396661281586, |
|
"learning_rate": 3.8060233744356633e-06, |
|
"loss": 10.3722, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.05434372405464563, |
|
"grad_norm": 0.04312126711010933, |
|
"learning_rate": 2.653493525244721e-06, |
|
"loss": 10.3719, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.05555136236697109, |
|
"grad_norm": 0.04327261447906494, |
|
"learning_rate": 1.70370868554659e-06, |
|
"loss": 10.3726, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.05675900067929655, |
|
"grad_norm": 0.04568940028548241, |
|
"learning_rate": 9.607359798384785e-07, |
|
"loss": 10.3721, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.05796663899162201, |
|
"grad_norm": 0.046326469630002975, |
|
"learning_rate": 4.277569313094809e-07, |
|
"loss": 10.372, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.05917427730394747, |
|
"grad_norm": 0.04663577303290367, |
|
"learning_rate": 1.0705383806982606e-07, |
|
"loss": 10.3722, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.06038191561627292, |
|
"grad_norm": 0.05151713266968727, |
|
"learning_rate": 0.0, |
|
"loss": 10.3723, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.06038191561627292, |
|
"eval_loss": 10.372808456420898, |
|
"eval_runtime": 9.1415, |
|
"eval_samples_per_second": 610.293, |
|
"eval_steps_per_second": 76.355, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 42781424615424.0, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|