infogep commited on
Commit
ba36411
·
verified ·
1 Parent(s): 8a5105b

Training in progress, step 150, checkpoint

Browse files
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:361ea9c27136638fc65e2fd4a45ef475f4dd8019914b17050dae329c9c627e0a
3
  size 341314196
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f31371b53faf842937b0eff8c4a37bb1c35e3313c27a3d7b8df656c10e5ea498
3
  size 341314196
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2de289fc62b0955bf411ab13d10605a45a9101f5c219e2a75328a64289bad3b5
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8172e745ec5d2dadd5e6968a9b6792c6b4631abe0b4f80522f8d7a9d4ea592af
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d4d0f57336dd58c2282a758b1873df2644647c71e8296b0dab58cb3a9f5f7c78
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7e45926e8bb4228c69b4b56f6b51f1445c0aeb3fb7bb09ed84764ec2b4c3a8ff
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": NaN,
3
  "best_model_checkpoint": "miner_id_24/checkpoint-50",
4
- "epoch": 0.29069767441860467,
5
  "eval_steps": 50,
6
- "global_step": 100,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -101,6 +101,49 @@
101
  "eval_samples_per_second": 18.737,
102
  "eval_steps_per_second": 4.781,
103
  "step": 100
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
104
  }
105
  ],
106
  "logging_steps": 10,
@@ -115,7 +158,7 @@
115
  "early_stopping_threshold": 0.0
116
  },
117
  "attributes": {
118
- "early_stopping_patience_counter": 1
119
  }
120
  },
121
  "TrainerControl": {
@@ -124,12 +167,12 @@
124
  "should_evaluate": false,
125
  "should_log": false,
126
  "should_save": true,
127
- "should_training_stop": false
128
  },
129
  "attributes": {}
130
  }
131
  },
132
- "total_flos": 3.61327403949097e+16,
133
  "train_batch_size": 8,
134
  "trial_name": null,
135
  "trial_params": null
 
1
  {
2
  "best_metric": NaN,
3
  "best_model_checkpoint": "miner_id_24/checkpoint-50",
4
+ "epoch": 0.436046511627907,
5
  "eval_steps": 50,
6
+ "global_step": 150,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
101
  "eval_samples_per_second": 18.737,
102
  "eval_steps_per_second": 4.781,
103
  "step": 100
104
+ },
105
+ {
106
+ "epoch": 0.31976744186046513,
107
+ "grad_norm": NaN,
108
+ "learning_rate": 3.1076923076923076e-06,
109
+ "loss": 0.0,
110
+ "step": 110
111
+ },
112
+ {
113
+ "epoch": 0.3488372093023256,
114
+ "grad_norm": NaN,
115
+ "learning_rate": 2.330769230769231e-06,
116
+ "loss": 0.0,
117
+ "step": 120
118
+ },
119
+ {
120
+ "epoch": 0.37790697674418605,
121
+ "grad_norm": NaN,
122
+ "learning_rate": 1.5538461538461538e-06,
123
+ "loss": 0.0,
124
+ "step": 130
125
+ },
126
+ {
127
+ "epoch": 0.4069767441860465,
128
+ "grad_norm": NaN,
129
+ "learning_rate": 7.769230769230769e-07,
130
+ "loss": 0.0,
131
+ "step": 140
132
+ },
133
+ {
134
+ "epoch": 0.436046511627907,
135
+ "grad_norm": NaN,
136
+ "learning_rate": 0.0,
137
+ "loss": 0.0,
138
+ "step": 150
139
+ },
140
+ {
141
+ "epoch": 0.436046511627907,
142
+ "eval_loss": NaN,
143
+ "eval_runtime": 7.7414,
144
+ "eval_samples_per_second": 18.73,
145
+ "eval_steps_per_second": 4.779,
146
+ "step": 150
147
  }
148
  ],
149
  "logging_steps": 10,
 
158
  "early_stopping_threshold": 0.0
159
  },
160
  "attributes": {
161
+ "early_stopping_patience_counter": 2
162
  }
163
  },
164
  "TrainerControl": {
 
167
  "should_evaluate": false,
168
  "should_log": false,
169
  "should_save": true,
170
+ "should_training_stop": true
171
  },
172
  "attributes": {}
173
  }
174
  },
175
+ "total_flos": 5.437798554481459e+16,
176
  "train_batch_size": 8,
177
  "trial_name": null,
178
  "trial_params": null