infogep commited on
Commit
d58d47a
·
verified ·
1 Parent(s): 690c02b

Training in progress, step 150, checkpoint

Browse files
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:23ec1a37f0f59fc6a05e9063f15da20a868ccbdb2dafe7ad28de654b4c53e73d
3
  size 198011252
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:df1d64c2b7e78cccf52b8fd89561794311c736dd0cf71c94f48717db83624eea
3
  size 198011252
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:facf61e19608174386a390900e13d0aa47506868d94138ed8774a017c650c68e
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a4eb8a76209844dfdef21bb4b3fbf751bdb9e3846d711e505846df0ae8a9458b
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d4d0f57336dd58c2282a758b1873df2644647c71e8296b0dab58cb3a9f5f7c78
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7e45926e8bb4228c69b4b56f6b51f1445c0aeb3fb7bb09ed84764ec2b4c3a8ff
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": NaN,
3
  "best_model_checkpoint": "miner_id_24/checkpoint-50",
4
- "epoch": 0.07418397626112759,
5
  "eval_steps": 50,
6
- "global_step": 100,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -101,6 +101,49 @@
101
  "eval_samples_per_second": 30.61,
102
  "eval_steps_per_second": 7.652,
103
  "step": 100
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
104
  }
105
  ],
106
  "logging_steps": 10,
@@ -115,7 +158,7 @@
115
  "early_stopping_threshold": 0.0
116
  },
117
  "attributes": {
118
- "early_stopping_patience_counter": 1
119
  }
120
  },
121
  "TrainerControl": {
@@ -124,12 +167,12 @@
124
  "should_evaluate": false,
125
  "should_log": false,
126
  "should_save": true,
127
- "should_training_stop": false
128
  },
129
  "attributes": {}
130
  }
131
  },
132
- "total_flos": 1.43327370412032e+16,
133
  "train_batch_size": 8,
134
  "trial_name": null,
135
  "trial_params": null
 
1
  {
2
  "best_metric": NaN,
3
  "best_model_checkpoint": "miner_id_24/checkpoint-50",
4
+ "epoch": 0.11127596439169139,
5
  "eval_steps": 50,
6
+ "global_step": 150,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
101
  "eval_samples_per_second": 30.61,
102
  "eval_steps_per_second": 7.652,
103
  "step": 100
104
+ },
105
+ {
106
+ "epoch": 0.08160237388724036,
107
+ "grad_norm": NaN,
108
+ "learning_rate": 3.1076923076923076e-06,
109
+ "loss": 0.0,
110
+ "step": 110
111
+ },
112
+ {
113
+ "epoch": 0.08902077151335312,
114
+ "grad_norm": NaN,
115
+ "learning_rate": 2.330769230769231e-06,
116
+ "loss": 0.0,
117
+ "step": 120
118
+ },
119
+ {
120
+ "epoch": 0.09643916913946587,
121
+ "grad_norm": NaN,
122
+ "learning_rate": 1.5538461538461538e-06,
123
+ "loss": 0.0,
124
+ "step": 130
125
+ },
126
+ {
127
+ "epoch": 0.10385756676557864,
128
+ "grad_norm": NaN,
129
+ "learning_rate": 7.769230769230769e-07,
130
+ "loss": 0.0,
131
+ "step": 140
132
+ },
133
+ {
134
+ "epoch": 0.11127596439169139,
135
+ "grad_norm": NaN,
136
+ "learning_rate": 0.0,
137
+ "loss": 0.0,
138
+ "step": 150
139
+ },
140
+ {
141
+ "epoch": 0.11127596439169139,
142
+ "eval_loss": NaN,
143
+ "eval_runtime": 18.5754,
144
+ "eval_samples_per_second": 30.578,
145
+ "eval_steps_per_second": 7.645,
146
+ "step": 150
147
  }
148
  ],
149
  "logging_steps": 10,
 
158
  "early_stopping_threshold": 0.0
159
  },
160
  "attributes": {
161
+ "early_stopping_patience_counter": 2
162
  }
163
  },
164
  "TrainerControl": {
 
167
  "should_evaluate": false,
168
  "should_log": false,
169
  "should_save": true,
170
+ "should_training_stop": true
171
  },
172
  "attributes": {}
173
  }
174
  },
175
+ "total_flos": 2.14991055618048e+16,
176
  "train_batch_size": 8,
177
  "trial_name": null,
178
  "trial_params": null