|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.16181229773462782, |
|
"eval_steps": 100, |
|
"global_step": 100, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 5e-05, |
|
"loss": 1.9981, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 4.9999675930251536e-05, |
|
"loss": 2.0613, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 4.99987037294078e-05, |
|
"loss": 1.8228, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 4.99970834226737e-05, |
|
"loss": 1.707, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 4.999481505205661e-05, |
|
"loss": 1.5271, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 4.999189867636535e-05, |
|
"loss": 1.4562, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 4.998833437120866e-05, |
|
"loss": 1.3805, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 4.998412222899321e-05, |
|
"loss": 1.2998, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 4.997926235892124e-05, |
|
"loss": 1.4383, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 4.997375488698769e-05, |
|
"loss": 1.2441, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 4.996759995597697e-05, |
|
"loss": 1.2275, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 4.996079772545923e-05, |
|
"loss": 1.2233, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 4.995334837178625e-05, |
|
"loss": 1.1886, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 4.9945252088086825e-05, |
|
"loss": 1.1779, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 4.993650908426182e-05, |
|
"loss": 1.1122, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 4.992711958697868e-05, |
|
"loss": 1.0766, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 4.991708383966556e-05, |
|
"loss": 1.0836, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 4.9906402102505026e-05, |
|
"loss": 0.9728, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 4.989507465242732e-05, |
|
"loss": 0.9883, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 4.988310178310315e-05, |
|
"loss": 1.0307, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 4.9870483804936084e-05, |
|
"loss": 0.9613, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 4.9857221045054535e-05, |
|
"loss": 1.1038, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 4.9843313847303246e-05, |
|
"loss": 0.9206, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 4.9828762572234374e-05, |
|
"loss": 0.9893, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 4.9813567597098166e-05, |
|
"loss": 0.8328, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 4.979772931583317e-05, |
|
"loss": 0.8712, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 4.978124813905599e-05, |
|
"loss": 0.8031, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 4.976412449405072e-05, |
|
"loss": 0.7675, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 4.974635882475778e-05, |
|
"loss": 0.7851, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 4.972795159176243e-05, |
|
"loss": 0.7447, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 4.9708903272282884e-05, |
|
"loss": 0.7719, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 4.9689214360157844e-05, |
|
"loss": 0.7142, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 4.9668885365833795e-05, |
|
"loss": 0.8649, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 4.96479168163517e-05, |
|
"loss": 0.7574, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 4.9626309255333346e-05, |
|
"loss": 0.9205, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 4.9604063242967315e-05, |
|
"loss": 0.7987, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 4.958117935599434e-05, |
|
"loss": 0.7626, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 4.955765818769249e-05, |
|
"loss": 0.8332, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 4.9533500347861675e-05, |
|
"loss": 0.9178, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 4.950870646280791e-05, |
|
"loss": 0.9919, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 4.948327717532705e-05, |
|
"loss": 0.7098, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 4.9457213144688095e-05, |
|
"loss": 0.7552, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 4.9430515046616175e-05, |
|
"loss": 0.7496, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 4.940318357327495e-05, |
|
"loss": 0.7667, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 4.937521943324873e-05, |
|
"loss": 0.8685, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 4.934662335152405e-05, |
|
"loss": 0.6715, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 4.931739606947091e-05, |
|
"loss": 0.8149, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 4.9287538344823544e-05, |
|
"loss": 0.7346, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 4.925705095166079e-05, |
|
"loss": 0.7803, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 4.922593468038599e-05, |
|
"loss": 0.7451, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 4.919419033770652e-05, |
|
"loss": 0.8402, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 4.916181874661286e-05, |
|
"loss": 0.7988, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 4.91288207463573e-05, |
|
"loss": 0.7444, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 4.9095197192432105e-05, |
|
"loss": 0.8545, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 4.906094895654744e-05, |
|
"loss": 0.761, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 4.902607692660865e-05, |
|
"loss": 0.6741, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 4.8990582006693365e-05, |
|
"loss": 0.8457, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 4.895446511702793e-05, |
|
"loss": 0.8096, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 4.891772719396369e-05, |
|
"loss": 0.7989, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 4.888036918995258e-05, |
|
"loss": 0.7683, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 4.884239207352252e-05, |
|
"loss": 0.7912, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 4.880379682925228e-05, |
|
"loss": 0.7417, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 4.876458445774594e-05, |
|
"loss": 0.7511, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 4.872475597560699e-05, |
|
"loss": 0.8021, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 4.8684312415411897e-05, |
|
"loss": 0.8154, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 4.864325482568344e-05, |
|
"loss": 0.7109, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 4.860158427086341e-05, |
|
"loss": 0.7915, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 4.855930183128513e-05, |
|
"loss": 0.6363, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 4.851640860314536e-05, |
|
"loss": 0.6987, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 4.8472905698475906e-05, |
|
"loss": 0.6498, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 4.84287942451148e-05, |
|
"loss": 0.7768, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 4.8384075386677054e-05, |
|
"loss": 0.7979, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 4.833875028252499e-05, |
|
"loss": 0.7611, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 4.8292820107738235e-05, |
|
"loss": 0.7889, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 4.824628605308319e-05, |
|
"loss": 0.6706, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 4.819914932498222e-05, |
|
"loss": 0.7762, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 4.815141114548232e-05, |
|
"loss": 0.7517, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 4.8103072752223486e-05, |
|
"loss": 0.7793, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 4.805413539840659e-05, |
|
"loss": 0.7306, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 4.80046003527609e-05, |
|
"loss": 0.834, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 4.7954468899511215e-05, |
|
"loss": 0.8076, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 4.790374233834452e-05, |
|
"loss": 0.8375, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 4.7852421984376324e-05, |
|
"loss": 0.7839, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 4.780050916811658e-05, |
|
"loss": 0.6221, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 4.7748005235435137e-05, |
|
"loss": 0.7212, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 4.76949115475269e-05, |
|
"loss": 0.6369, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 4.7641229480876515e-05, |
|
"loss": 0.7167, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 4.758696042722269e-05, |
|
"loss": 0.6908, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 4.753210579352211e-05, |
|
"loss": 0.6681, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 4.747666700191297e-05, |
|
"loss": 0.6566, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 4.7420645489678076e-05, |
|
"loss": 0.6869, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 4.7364042709207626e-05, |
|
"loss": 0.7106, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 4.730686012796153e-05, |
|
"loss": 0.6782, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 4.724909922843136e-05, |
|
"loss": 0.7148, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 4.719076150810193e-05, |
|
"loss": 0.8887, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 4.7131848479412476e-05, |
|
"loss": 0.7408, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 4.707236166971742e-05, |
|
"loss": 0.7046, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 4.7012302621246804e-05, |
|
"loss": 0.7657, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 4.695167289106629e-05, |
|
"loss": 0.8138, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 4.689047405103678e-05, |
|
"loss": 0.6964, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"eval_loss": 0.796372652053833, |
|
"eval_runtime": 5.3558, |
|
"eval_samples_per_second": 1.867, |
|
"eval_steps_per_second": 0.373, |
|
"step": 100 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 618, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 100, |
|
"total_flos": 2.520597053571072e+17, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|