masatochi commited on
Commit
7d95ae8
·
verified ·
1 Parent(s): 6326a2f

Training in progress, step 180, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5c6e2ac9f8a2ad3476e171fd08f07b0b8f23c6bd8293be2a79d76677a206d830
3
  size 83945296
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a982ffc522f303c27a661a3c6b07bc6ecf9028d56d4934c408e109cf133f6517
3
  size 83945296
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:73f348b4c9f60ffb3e93dfe2f4b1adc4c42952d961aeec51271f487b7722d78f
3
  size 43122580
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bf626a3d53d70160de91f85c3f20078e2616061d67ee7b90870c0ae486bb5883
3
  size 43122580
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:350629c8726cd8603e94d6658b9108dc8785e8448290744abeabee7c4b3b48d6
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6c9e95e8038f40b4d739b9be60c3742d2a869eaba46d8fd74d89cee2de436ed5
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:416d32fdf638b555a0fc031fb149fd18abaec9c234026f168a4a4bd45704a2a9
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b7c990948ce2aaf07d64c7544604482e458e5281aa73955448bb3e7dee4b1367
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.08557980316645272,
5
  "eval_steps": 34,
6
- "global_step": 175,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -1280,6 +1280,41 @@
1280
  "learning_rate": 1.0483670864493778e-05,
1281
  "loss": 0.8914,
1282
  "step": 175
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1283
  }
1284
  ],
1285
  "logging_steps": 1,
@@ -1299,7 +1334,7 @@
1299
  "attributes": {}
1300
  }
1301
  },
1302
- "total_flos": 7.768169842212864e+17,
1303
  "train_batch_size": 3,
1304
  "trial_name": null,
1305
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.08802494039977994,
5
  "eval_steps": 34,
6
+ "global_step": 180,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
1280
  "learning_rate": 1.0483670864493778e-05,
1281
  "loss": 0.8914,
1282
  "step": 175
1283
+ },
1284
+ {
1285
+ "epoch": 0.08606883061311817,
1286
+ "grad_norm": 1.2630629539489746,
1287
+ "learning_rate": 9.675280065387116e-06,
1288
+ "loss": 1.0159,
1289
+ "step": 176
1290
+ },
1291
+ {
1292
+ "epoch": 0.08655785805978361,
1293
+ "grad_norm": 1.059738278388977,
1294
+ "learning_rate": 8.897735075391155e-06,
1295
+ "loss": 0.897,
1296
+ "step": 177
1297
+ },
1298
+ {
1299
+ "epoch": 0.08704688550644905,
1300
+ "grad_norm": 1.183966875076294,
1301
+ "learning_rate": 8.151301425407699e-06,
1302
+ "loss": 0.9258,
1303
+ "step": 178
1304
+ },
1305
+ {
1306
+ "epoch": 0.0875359129531145,
1307
+ "grad_norm": 1.2419488430023193,
1308
+ "learning_rate": 7.43623402184438e-06,
1309
+ "loss": 0.9676,
1310
+ "step": 179
1311
+ },
1312
+ {
1313
+ "epoch": 0.08802494039977994,
1314
+ "grad_norm": 1.0326918363571167,
1315
+ "learning_rate": 6.75277705956443e-06,
1316
+ "loss": 0.8304,
1317
+ "step": 180
1318
  }
1319
  ],
1320
  "logging_steps": 1,
 
1334
  "attributes": {}
1335
  }
1336
  },
1337
+ "total_flos": 7.990117551990374e+17,
1338
  "train_batch_size": 3,
1339
  "trial_name": null,
1340
  "trial_params": null