masatochi commited on
Commit
231e3c9
·
verified ·
1 Parent(s): dde7b3d

Training in progress, step 30, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:fffe989d2ee97d3d62cc7091b870276d764db85c56a2a741a7b91ad41e5e6b72
3
  size 83945296
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0488ffe2817b80ca4606a42889a735911c4effb49520ec0a230c66e9b727c4ea
3
  size 83945296
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:95b7874d29fe785653c7c3aacea25164cb32403cde9cf5f8e12a60a07f9155a4
3
  size 43122580
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cbf817b27e890b9ab6bc9ea279859d4decd38a42b821989b4e1f510978bb734b
3
  size 43122580
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:cc695d50fa9c7a7b39e7508c0827b13b4400b02508b8aa483545d12dba8b4308
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:42700a3931f169d1a7cb24ca5c5bfdf8c30401d4efdc7d55be9e5bed753e25b1
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7cbe95d5ecd33771846042e20aabf210775f4e6a78ced16f0764898d40abeba5
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cedaf8893734b19717a3bbbc716629d55965a18bdde504cf46d9182fcb60eb14
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.012225686166636102,
5
  "eval_steps": 34,
6
- "global_step": 25,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -190,6 +190,41 @@
190
  "learning_rate": 0.0001666666666666667,
191
  "loss": 1.471,
192
  "step": 25
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
193
  }
194
  ],
195
  "logging_steps": 1,
@@ -209,7 +244,7 @@
209
  "attributes": {}
210
  }
211
  },
212
- "total_flos": 1.109738548887552e+17,
213
  "train_batch_size": 3,
214
  "trial_name": null,
215
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.014670823399963322,
5
  "eval_steps": 34,
6
+ "global_step": 30,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
190
  "learning_rate": 0.0001666666666666667,
191
  "loss": 1.471,
192
  "step": 25
193
+ },
194
+ {
195
+ "epoch": 0.012714713613301546,
196
+ "grad_norm": 1.876035451889038,
197
+ "learning_rate": 0.00017333333333333334,
198
+ "loss": 1.4368,
199
+ "step": 26
200
+ },
201
+ {
202
+ "epoch": 0.013203741059966991,
203
+ "grad_norm": 1.6051979064941406,
204
+ "learning_rate": 0.00018,
205
+ "loss": 1.3951,
206
+ "step": 27
207
+ },
208
+ {
209
+ "epoch": 0.013692768506632435,
210
+ "grad_norm": 1.7268619537353516,
211
+ "learning_rate": 0.0001866666666666667,
212
+ "loss": 1.1915,
213
+ "step": 28
214
+ },
215
+ {
216
+ "epoch": 0.014181795953297879,
217
+ "grad_norm": 1.888218879699707,
218
+ "learning_rate": 0.00019333333333333333,
219
+ "loss": 1.1358,
220
+ "step": 29
221
+ },
222
+ {
223
+ "epoch": 0.014670823399963322,
224
+ "grad_norm": 1.7404896020889282,
225
+ "learning_rate": 0.0002,
226
+ "loss": 1.2051,
227
+ "step": 30
228
  }
229
  ],
230
  "logging_steps": 1,
 
244
  "attributes": {}
245
  }
246
  },
247
+ "total_flos": 1.3316862586650624e+17,
248
  "train_batch_size": 3,
249
  "trial_name": null,
250
  "trial_params": null