masatochi commited on
Commit
4f5d674
·
verified ·
1 Parent(s): 6dbef0a

Training in progress, step 185, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a982ffc522f303c27a661a3c6b07bc6ecf9028d56d4934c408e109cf133f6517
3
  size 83945296
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c9b58cf00ae33d931950ca83d390cce441e85e2e6a88d1037cba299822818883
3
  size 83945296
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:bf626a3d53d70160de91f85c3f20078e2616061d67ee7b90870c0ae486bb5883
3
  size 43122580
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8a0f5c214d8eef696d9225ce31c5918e21d737ee9b33a2e3528207dadf48babe
3
  size 43122580
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6c9e95e8038f40b4d739b9be60c3742d2a869eaba46d8fd74d89cee2de436ed5
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:44f07ee7727b2a57ec87aa201844eb2406bc5b699b1d99c8190698d036889705
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b7c990948ce2aaf07d64c7544604482e458e5281aa73955448bb3e7dee4b1367
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:31cc4c125027b06153274d1c1fcc2291ff49e04af7d8c2cae65dd480bfd90a0c
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.08802494039977994,
5
  "eval_steps": 34,
6
- "global_step": 180,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -1315,6 +1315,41 @@
1315
  "learning_rate": 6.75277705956443e-06,
1316
  "loss": 0.8304,
1317
  "step": 180
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1318
  }
1319
  ],
1320
  "logging_steps": 1,
@@ -1334,7 +1369,7 @@
1334
  "attributes": {}
1335
  }
1336
  },
1337
- "total_flos": 7.990117551990374e+17,
1338
  "train_batch_size": 3,
1339
  "trial_name": null,
1340
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.09047007763310716,
5
  "eval_steps": 34,
6
+ "global_step": 185,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
1315
  "learning_rate": 6.75277705956443e-06,
1316
  "loss": 0.8304,
1317
  "step": 180
1318
+ },
1319
+ {
1320
+ "epoch": 0.08851396784644538,
1321
+ "grad_norm": 1.0449199676513672,
1322
+ "learning_rate": 6.1011639384943585e-06,
1323
+ "loss": 1.0186,
1324
+ "step": 181
1325
+ },
1326
+ {
1327
+ "epoch": 0.08900299529311083,
1328
+ "grad_norm": 1.2107876539230347,
1329
+ "learning_rate": 5.481617183918053e-06,
1330
+ "loss": 1.1832,
1331
+ "step": 182
1332
+ },
1333
+ {
1334
+ "epoch": 0.08949202273977627,
1335
+ "grad_norm": 1.49854576587677,
1336
+ "learning_rate": 4.8943483704846475e-06,
1337
+ "loss": 1.2453,
1338
+ "step": 183
1339
+ },
1340
+ {
1341
+ "epoch": 0.08998105018644172,
1342
+ "grad_norm": 1.2521921396255493,
1343
+ "learning_rate": 4.339558049955927e-06,
1344
+ "loss": 0.9645,
1345
+ "step": 184
1346
+ },
1347
+ {
1348
+ "epoch": 0.09047007763310716,
1349
+ "grad_norm": 1.0324766635894775,
1350
+ "learning_rate": 3.817435682718096e-06,
1351
+ "loss": 0.9331,
1352
+ "step": 185
1353
  }
1354
  ],
1355
  "logging_steps": 1,
 
1369
  "attributes": {}
1370
  }
1371
  },
1372
+ "total_flos": 8.212065261767885e+17,
1373
  "train_batch_size": 3,
1374
  "trial_name": null,
1375
  "trial_params": null