rakhman-llm commited on
Commit
aecb01b
·
verified ·
1 Parent(s): 379a30b

Training in progress, step 10500, checkpoint

Browse files
last-checkpoint/model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3730a1d878d4053b143d57a57c1fac448e69341b9479dfd3f851a6ab33144e33
3
  size 891558696
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:43947f7dff9276008c5ac3a3bb1a1f78045f6daf3ab26be1df501497ecb5a13c
3
  size 891558696
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f89d84528008d151b3dd4170e97962e1d6e21f8f9d5216a258aa552ef4052471
3
  size 1783272762
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:659ad3bb4e988117552420945713bf112a7f8dc606586d8b6b10aff068671da7
3
  size 1783272762
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2e2beb5f3eae46760fe93c244c16d7260ed6d437cc6536ace16b095ba9f37e6d
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f2f8cb1d04d85cb700bc0a165ddfffe857827b34a325813bd7675c23cb4f568a
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:022b8217d4b55852b997e3b5ca16c9910cfc8e1e5dae3900bbbe1878452b5ee9
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6ac8c1d8a81c24b85fd218732e29aedf5ad8d51951edcda5923c223ea804a4dd
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 2.0781379883624274,
5
  "eval_steps": 500,
6
- "global_step": 10000,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -163,6 +163,13 @@
163
  "learning_rate": 6.15267387087836e-06,
164
  "loss": 0.3293,
165
  "step": 10000
 
 
 
 
 
 
 
166
  }
167
  ],
168
  "logging_steps": 500,
@@ -182,7 +189,7 @@
182
  "attributes": {}
183
  }
184
  },
185
- "total_flos": 1.217793989541888e+16,
186
  "train_batch_size": 2,
187
  "trial_name": null,
188
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 2.1820448877805485,
5
  "eval_steps": 500,
6
+ "global_step": 10500,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
163
  "learning_rate": 6.15267387087836e-06,
164
  "loss": 0.3293,
165
  "step": 10000
166
+ },
167
+ {
168
+ "epoch": 2.1820448877805485,
169
+ "grad_norm": 1.2724053859710693,
170
+ "learning_rate": 5.459961208090885e-06,
171
+ "loss": 0.3369,
172
+ "step": 10500
173
  }
174
  ],
175
  "logging_steps": 500,
 
189
  "attributes": {}
190
  }
191
  },
192
+ "total_flos": 1.278689778597888e+16,
193
  "train_batch_size": 2,
194
  "trial_name": null,
195
  "trial_params": null