rakhman-llm commited on
Commit
761437f
·
verified ·
1 Parent(s): 999d545

Training in progress, step 7500, checkpoint

Browse files
last-checkpoint/model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:65f41221813633184b2538c9109cba635ca76c786eea27784f452a640f0ed68f
3
  size 891558696
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:80f13988b844ed8c69a71d954cca56c06260c9c862e37c2b37d479d3aa847423
3
  size 891558696
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:bf7a4bf67da87410a6d09254af4d762ab91388990e0e2f7d30094dbe9596864a
3
  size 1783272762
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7537b80366f1058d42502431de3c635a4774b2db45ac3cd9565cc2f7c197bc84
3
  size 1783272762
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d201843135a1eed508b1bfdef27db8413b4debdba9826e312a0df4e52244dac2
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:616c142757d7ec77ca22084861554955efb9fa5f5da5efb368b0b3385f62bf1c
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0f12d6282f561619ed37157e6869ea88b19ad7c33ffa9cf992ccd45bf1251ffb
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3ff581a4a3c68f40d8f4f714dabed1501f0a612483eace7a936c21011b06e648
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 1.4546965918536992,
5
  "eval_steps": 500,
6
- "global_step": 7000,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -113,6 +113,13 @@
113
  "learning_rate": 1.030756442227764e-05,
114
  "loss": 0.352,
115
  "step": 7000
 
 
 
 
 
 
 
116
  }
117
  ],
118
  "logging_steps": 500,
@@ -132,7 +139,7 @@
132
  "attributes": {}
133
  }
134
  },
135
- "total_flos": 8524801509949440.0,
136
  "train_batch_size": 2,
137
  "trial_name": null,
138
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 1.5586034912718203,
5
  "eval_steps": 500,
6
+ "global_step": 7500,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
113
  "learning_rate": 1.030756442227764e-05,
114
  "loss": 0.352,
115
  "step": 7000
116
+ },
117
+ {
118
+ "epoch": 1.5586034912718203,
119
+ "grad_norm": 2.3900763988494873,
120
+ "learning_rate": 9.614851759490165e-06,
121
+ "loss": 0.3564,
122
+ "step": 7500
123
  }
124
  ],
125
  "logging_steps": 500,
 
139
  "attributes": {}
140
  }
141
  },
142
+ "total_flos": 9133759400509440.0,
143
  "train_batch_size": 2,
144
  "trial_name": null,
145
  "trial_params": null