rakhman-llm commited on
Commit
8d0a493
·
verified ·
1 Parent(s): 0a7dd44

Training in progress, step 4500, checkpoint

Browse files
last-checkpoint/model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7cbe649c71b516b094216fbea21197a8afdeff556e86902ecc63fac11ca9d235
3
  size 891558696
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1bdcea07d77b677c78e6c852f8f008c8fae5d04a2f8735d08be078cf14a800a8
3
  size 891558696
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:272c3de368eb938e5ec04ee3958d5faee41704c34b38097bf92699ca98709501
3
  size 1783272762
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a426c1319c8371c822d9a891ae4d493e112eab044e83da934efd1a314fdfeffd
3
  size 1783272762
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e6909786b07113d046b6984022b8f3c1d4a309c9e0a18f83af53a35bec4523b1
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:509ee6f2dc95c367cf1eda5ebb7ac0772535dbbc4fb915944d9e317d5b2f8b3a
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:09e9e60d2c8faf134da25c0954086bcd3b7f390c1be768462509e5eb18839183
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e2b5d040d5c740697a7c2dcc153f257910aea505a3c7139823305a169f6a1ef0
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.8312551953449709,
5
  "eval_steps": 500,
6
- "global_step": 4000,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -63,6 +63,13 @@
63
  "learning_rate": 1.446245497367692e-05,
64
  "loss": 0.4065,
65
  "step": 4000
 
 
 
 
 
 
 
66
  }
67
  ],
68
  "logging_steps": 500,
@@ -82,7 +89,7 @@
82
  "attributes": {}
83
  }
84
  },
85
- "total_flos": 4871663124480000.0,
86
  "train_batch_size": 2,
87
  "trial_name": null,
88
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.9351620947630923,
5
  "eval_steps": 500,
6
+ "global_step": 4500,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
63
  "learning_rate": 1.446245497367692e-05,
64
  "loss": 0.4065,
65
  "step": 4000
66
+ },
67
+ {
68
+ "epoch": 0.9351620947630923,
69
+ "grad_norm": 1.4909119606018066,
70
+ "learning_rate": 1.3769742310889445e-05,
71
+ "loss": 0.4031,
72
+ "step": 4500
73
  }
74
  ],
75
  "logging_steps": 500,
 
89
  "attributes": {}
90
  }
91
  },
92
+ "total_flos": 5480621015040000.0,
93
  "train_batch_size": 2,
94
  "trial_name": null,
95
  "trial_params": null