masatochi commited on
Commit
b5ca8f7
·
verified ·
1 Parent(s): bf5c98e

Training in progress, step 170, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1313627e57f50b000f77a18dc14176b975cb6869558c43f866b9b1c2e8eb7115
3
  size 83945296
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8d96cf5df8387a45ab1b175187a9f26bc043e803c8bb1d8c17007c49a7bcd82d
3
  size 83945296
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:172f779c9438b79e3c86227407df67a2b35a9df322198efd751e21975aae1ded
3
  size 43122580
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4e847f0a9aae8832c8e1bd08ae0e13ce8582dd32aeeb5c1e5da6d5913c813d12
3
  size 43122580
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6728ffdf9bf59bf13703e6ecc746855ce34369dd62dc3ca69a894e5a32ceaccd
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:82e3dacf2b05874132bf6fb4325d351a4ab2ba13cd877561594093394c8a7acc
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b9d255475980828d00dffcac4a69741da74c7e5ef4f645ab21d69e57306e3317
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:963abeacc51d2fd967b0d854376103780952d47675257465be60a3612cecb103
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.08068952869979827,
5
  "eval_steps": 34,
6
- "global_step": 165,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -1202,6 +1202,49 @@
1202
  "learning_rate": 2.0198277271976052e-05,
1203
  "loss": 1.0459,
1204
  "step": 165
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1205
  }
1206
  ],
1207
  "logging_steps": 1,
@@ -1221,7 +1264,7 @@
1221
  "attributes": {}
1222
  }
1223
  },
1224
- "total_flos": 7.324274422657843e+17,
1225
  "train_batch_size": 3,
1226
  "trial_name": null,
1227
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.08313466593312549,
5
  "eval_steps": 34,
6
+ "global_step": 170,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
1202
  "learning_rate": 2.0198277271976052e-05,
1203
  "loss": 1.0459,
1204
  "step": 165
1205
+ },
1206
+ {
1207
+ "epoch": 0.08117855614646372,
1208
+ "grad_norm": 1.1469403505325317,
1209
+ "learning_rate": 1.9098300562505266e-05,
1210
+ "loss": 0.9885,
1211
+ "step": 166
1212
+ },
1213
+ {
1214
+ "epoch": 0.08166758359312916,
1215
+ "grad_norm": 1.1436965465545654,
1216
+ "learning_rate": 1.8025951709277898e-05,
1217
+ "loss": 0.9769,
1218
+ "step": 167
1219
+ },
1220
+ {
1221
+ "epoch": 0.0821566110397946,
1222
+ "grad_norm": 1.0451879501342773,
1223
+ "learning_rate": 1.6981596918444953e-05,
1224
+ "loss": 1.0134,
1225
+ "step": 168
1226
+ },
1227
+ {
1228
+ "epoch": 0.08264563848646005,
1229
+ "grad_norm": 1.125041127204895,
1230
+ "learning_rate": 1.5965592836210743e-05,
1231
+ "loss": 0.9923,
1232
+ "step": 169
1233
+ },
1234
+ {
1235
+ "epoch": 0.08313466593312549,
1236
+ "grad_norm": 0.8700924515724182,
1237
+ "learning_rate": 1.4978286427038601e-05,
1238
+ "loss": 0.7571,
1239
+ "step": 170
1240
+ },
1241
+ {
1242
+ "epoch": 0.08313466593312549,
1243
+ "eval_loss": 0.9938088655471802,
1244
+ "eval_runtime": 1314.6112,
1245
+ "eval_samples_per_second": 1.965,
1246
+ "eval_steps_per_second": 0.655,
1247
+ "step": 170
1248
  }
1249
  ],
1250
  "logging_steps": 1,
 
1264
  "attributes": {}
1265
  }
1266
  },
1267
+ "total_flos": 7.546222132435354e+17,
1268
  "train_batch_size": 3,
1269
  "trial_name": null,
1270
  "trial_params": null