infogep commited on
Commit
c144fcb
·
verified ·
1 Parent(s): 27d9375

Training in progress, step 150, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:780ca9ea6bec6229fe7e0d5a3838448b72bdcfa4a6a92192f128e5a44d814f87
3
  size 645975704
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4daa8372a646806b343e84b941ed388aff25fa982442144d18a8d0db02ab75aa
3
  size 645975704
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:03a4c5996e7a1ff890ee9fd657be505265cdc6e49597fb1e49968d6f0e1cded3
3
  size 328468404
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:95f1e43a0751897bc233882d0c2a84cdc1a54609a966e1c223d4671acc7cfe8b
3
  size 328468404
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:98f0b46a055895d37d66fcedbb237bcf5d7d58216e3c9875c0855f56c45a5101
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6589d018f91f97165f298b9b659197fbb9fe8847d828b74d5b8f2a889526405a
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d4d0f57336dd58c2282a758b1873df2644647c71e8296b0dab58cb3a9f5f7c78
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7e45926e8bb4228c69b4b56f6b51f1445c0aeb3fb7bb09ed84764ec2b4c3a8ff
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 0.5890767574310303,
3
- "best_model_checkpoint": "miner_id_24/checkpoint-100",
4
- "epoch": 0.10582010582010581,
5
  "eval_steps": 50,
6
- "global_step": 100,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -101,6 +101,49 @@
101
  "eval_samples_per_second": 19.483,
102
  "eval_steps_per_second": 4.895,
103
  "step": 100
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
104
  }
105
  ],
106
  "logging_steps": 10,
@@ -124,12 +167,12 @@
124
  "should_evaluate": false,
125
  "should_log": false,
126
  "should_save": true,
127
- "should_training_stop": false
128
  },
129
  "attributes": {}
130
  }
131
  },
132
- "total_flos": 3.5547217133568e+16,
133
  "train_batch_size": 8,
134
  "trial_name": null,
135
  "trial_params": null
 
1
  {
2
+ "best_metric": 0.47778764367103577,
3
+ "best_model_checkpoint": "miner_id_24/checkpoint-150",
4
+ "epoch": 0.15873015873015872,
5
  "eval_steps": 50,
6
+ "global_step": 150,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
101
  "eval_samples_per_second": 19.483,
102
  "eval_steps_per_second": 4.895,
103
  "step": 100
104
+ },
105
+ {
106
+ "epoch": 0.1164021164021164,
107
+ "grad_norm": 7.185388088226318,
108
+ "learning_rate": 3.1076923076923076e-06,
109
+ "loss": 0.3998,
110
+ "step": 110
111
+ },
112
+ {
113
+ "epoch": 0.12698412698412698,
114
+ "grad_norm": 5.154370307922363,
115
+ "learning_rate": 2.330769230769231e-06,
116
+ "loss": 0.3106,
117
+ "step": 120
118
+ },
119
+ {
120
+ "epoch": 0.13756613756613756,
121
+ "grad_norm": 11.753735542297363,
122
+ "learning_rate": 1.5538461538461538e-06,
123
+ "loss": 0.3945,
124
+ "step": 130
125
+ },
126
+ {
127
+ "epoch": 0.14814814814814814,
128
+ "grad_norm": 8.9371976852417,
129
+ "learning_rate": 7.769230769230769e-07,
130
+ "loss": 0.438,
131
+ "step": 140
132
+ },
133
+ {
134
+ "epoch": 0.15873015873015872,
135
+ "grad_norm": 16.33173942565918,
136
+ "learning_rate": 0.0,
137
+ "loss": 0.4991,
138
+ "step": 150
139
+ },
140
+ {
141
+ "epoch": 0.15873015873015872,
142
+ "eval_loss": 0.47778764367103577,
143
+ "eval_runtime": 20.4229,
144
+ "eval_samples_per_second": 19.488,
145
+ "eval_steps_per_second": 4.896,
146
+ "step": 150
147
  }
148
  ],
149
  "logging_steps": 10,
 
167
  "should_evaluate": false,
168
  "should_log": false,
169
  "should_save": true,
170
+ "should_training_stop": true
171
  },
172
  "attributes": {}
173
  }
174
  },
175
+ "total_flos": 5.3320825700352e+16,
176
  "train_batch_size": 8,
177
  "trial_name": null,
178
  "trial_params": null