File size: 3,948 Bytes
526003b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 |
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.3318777292576419,
"eval_steps": 500,
"global_step": 19,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.017467248908296942,
"grad_norm": 0.12968653440475464,
"learning_rate": 1.0000000000000002e-06,
"loss": 1.3378,
"step": 1
},
{
"epoch": 0.034934497816593885,
"grad_norm": 0.1782296746969223,
"learning_rate": 2.0000000000000003e-06,
"loss": 1.2777,
"step": 2
},
{
"epoch": 0.05240174672489083,
"grad_norm": 0.1216743066906929,
"learning_rate": 3e-06,
"loss": 1.2269,
"step": 3
},
{
"epoch": 0.06986899563318777,
"grad_norm": 0.13824662566184998,
"learning_rate": 4.000000000000001e-06,
"loss": 1.2621,
"step": 4
},
{
"epoch": 0.08733624454148471,
"grad_norm": 0.15446175634860992,
"learning_rate": 5e-06,
"loss": 1.2518,
"step": 5
},
{
"epoch": 0.10480349344978165,
"grad_norm": 0.13843178749084473,
"learning_rate": 6e-06,
"loss": 1.2438,
"step": 6
},
{
"epoch": 0.1222707423580786,
"grad_norm": 0.15130339562892914,
"learning_rate": 7e-06,
"loss": 1.2331,
"step": 7
},
{
"epoch": 0.13973799126637554,
"grad_norm": 0.15838982164859772,
"learning_rate": 8.000000000000001e-06,
"loss": 1.2703,
"step": 8
},
{
"epoch": 0.1572052401746725,
"grad_norm": 0.15755866467952728,
"learning_rate": 9e-06,
"loss": 1.2603,
"step": 9
},
{
"epoch": 0.17467248908296942,
"grad_norm": 0.16667789220809937,
"learning_rate": 1e-05,
"loss": 1.2428,
"step": 10
},
{
"epoch": 0.19213973799126638,
"grad_norm": 0.1794327050447464,
"learning_rate": 9.988834393115768e-06,
"loss": 1.2442,
"step": 11
},
{
"epoch": 0.2096069868995633,
"grad_norm": 0.18840420246124268,
"learning_rate": 9.955387440773902e-06,
"loss": 1.2562,
"step": 12
},
{
"epoch": 0.22707423580786026,
"grad_norm": 0.24771647155284882,
"learning_rate": 9.899808525182935e-06,
"loss": 1.2056,
"step": 13
},
{
"epoch": 0.2445414847161572,
"grad_norm": 0.2212584763765335,
"learning_rate": 9.822345875271884e-06,
"loss": 1.2462,
"step": 14
},
{
"epoch": 0.26200873362445415,
"grad_norm": 0.2274945080280304,
"learning_rate": 9.723345458039595e-06,
"loss": 1.262,
"step": 15
},
{
"epoch": 0.2794759825327511,
"grad_norm": 0.22981807589530945,
"learning_rate": 9.603249433382145e-06,
"loss": 1.2918,
"step": 16
},
{
"epoch": 0.29694323144104806,
"grad_norm": 0.27434590458869934,
"learning_rate": 9.462594179299408e-06,
"loss": 1.2858,
"step": 17
},
{
"epoch": 0.314410480349345,
"grad_norm": 0.23107963800430298,
"learning_rate": 9.302007896300697e-06,
"loss": 1.1679,
"step": 18
},
{
"epoch": 0.3318777292576419,
"grad_norm": 0.23975740373134613,
"learning_rate": 9.122207801708802e-06,
"loss": 1.2119,
"step": 19
}
],
"logging_steps": 1,
"max_steps": 57,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 19,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 1.9698239407086633e+18,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}
|