sheepy928 commited on
Commit
143012a
·
verified ·
1 Parent(s): af3c3fa

Model save

Browse files
README.md ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: Qwen/Qwen2.5-1.5B-Instruct
3
+ library_name: transformers
4
+ model_name: qwen2.5_1.5b_ins-pt-bf16
5
+ tags:
6
+ - generated_from_trainer
7
+ - trl
8
+ - sft
9
+ licence: license
10
+ ---
11
+
12
+ # Model Card for qwen2.5_1.5b_ins-pt-bf16
13
+
14
+ This model is a fine-tuned version of [Qwen/Qwen2.5-1.5B-Instruct](https://huggingface.co/Qwen/Qwen2.5-1.5B-Instruct).
15
+ It has been trained using [TRL](https://github.com/huggingface/trl).
16
+
17
+ ## Quick start
18
+
19
+ ```python
20
+ from transformers import pipeline
21
+
22
+ question = "If you had a time machine, but could only go to the past or the future once and never return, which would you choose and why?"
23
+ generator = pipeline("text-generation", model="sheepy928/qwen2.5_1.5b_ins-pt-bf16", device="cuda")
24
+ output = generator([{"role": "user", "content": question}], max_new_tokens=128, return_full_text=False)[0]
25
+ print(output["generated_text"])
26
+ ```
27
+
28
+ ## Training procedure
29
+
30
+ [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="150" height="24"/>](https://wandb.ai/sheepy928/r1fp4/runs/a00adzvk)
31
+
32
+
33
+ This model was trained with SFT.
34
+
35
+ ### Framework versions
36
+
37
+ - TRL: 0.15.0.dev0
38
+ - Transformers: 4.49.0.dev0
39
+ - Pytorch: 2.5.1
40
+ - Datasets: 3.2.0
41
+ - Tokenizers: 0.21.0
42
+
43
+ ## Citations
44
+
45
+
46
+
47
+ Cite TRL as:
48
+
49
+ ```bibtex
50
+ @misc{vonwerra2022trl,
51
+ title = {{TRL: Transformer Reinforcement Learning}},
52
+ author = {Leandro von Werra and Younes Belkada and Lewis Tunstall and Edward Beeching and Tristan Thrush and Nathan Lambert and Shengyi Huang and Kashif Rasul and Quentin Gallouédec},
53
+ year = 2020,
54
+ journal = {GitHub repository},
55
+ publisher = {GitHub},
56
+ howpublished = {\url{https://github.com/huggingface/trl}}
57
+ }
58
+ ```
all_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "total_flos": 768626978193408.0,
3
+ "train_loss": 2.0993992060908564,
4
+ "train_runtime": 74825.3444,
5
+ "train_samples": 16610,
6
+ "train_samples_per_second": 2.888,
7
+ "train_steps_per_second": 0.09
8
+ }
generation_config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 151643,
4
+ "eos_token_id": 151645,
5
+ "transformers_version": "4.49.0.dev0",
6
+ "use_cache": false
7
+ }
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a1d5b8b898dcb89f79b89cb27821239cbe27cc37f1b6379c7d4e1e825244017f
3
  size 3086634632
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7032e60ca0e278caef3fba96e60e82f8ec48372333b05f7bc68292efb1b68c55
3
  size 3086634632
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "total_flos": 768626978193408.0,
3
+ "train_loss": 2.0993992060908564,
4
+ "train_runtime": 74825.3444,
5
+ "train_samples": 16610,
6
+ "train_samples_per_second": 2.888,
7
+ "train_steps_per_second": 0.09
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,604 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 9.985936343449296,
5
+ "eval_steps": 200,
6
+ "global_step": 6750,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.29607698001480387,
13
+ "grad_norm": 2.7709997274459304,
14
+ "learning_rate": 1.9407407407407407e-05,
15
+ "loss": 6.1641,
16
+ "mean_token_accuracy": 0.1623367673992674,
17
+ "step": 200
18
+ },
19
+ {
20
+ "epoch": 0.29607698001480387,
21
+ "eval_loss": 4.407998561859131,
22
+ "eval_mean_token_accuracy": 0.25814827533577533,
23
+ "eval_runtime": 17.8969,
24
+ "eval_samples_per_second": 7.152,
25
+ "eval_steps_per_second": 0.894,
26
+ "step": 200
27
+ },
28
+ {
29
+ "epoch": 0.5921539600296077,
30
+ "grad_norm": 3.4130161394563543,
31
+ "learning_rate": 1.8814814814814816e-05,
32
+ "loss": 3.9329,
33
+ "mean_token_accuracy": 0.30167891483516485,
34
+ "step": 400
35
+ },
36
+ {
37
+ "epoch": 0.5921539600296077,
38
+ "eval_loss": 3.607056140899658,
39
+ "eval_mean_token_accuracy": 0.337335927960928,
40
+ "eval_runtime": 17.8919,
41
+ "eval_samples_per_second": 7.154,
42
+ "eval_steps_per_second": 0.894,
43
+ "step": 400
44
+ },
45
+ {
46
+ "epoch": 0.8882309400444115,
47
+ "grad_norm": 1.9205457771259444,
48
+ "learning_rate": 1.8222222222222224e-05,
49
+ "loss": 3.3827,
50
+ "mean_token_accuracy": 0.35828514194139194,
51
+ "step": 600
52
+ },
53
+ {
54
+ "epoch": 0.8882309400444115,
55
+ "eval_loss": 3.2180376052856445,
56
+ "eval_mean_token_accuracy": 0.3790006868131868,
57
+ "eval_runtime": 17.8941,
58
+ "eval_samples_per_second": 7.153,
59
+ "eval_steps_per_second": 0.894,
60
+ "step": 600
61
+ },
62
+ {
63
+ "epoch": 1.1835677276091783,
64
+ "grad_norm": 2.219040531148233,
65
+ "learning_rate": 1.7629629629629633e-05,
66
+ "loss": 3.0463,
67
+ "mean_token_accuracy": 0.3991521079866944,
68
+ "step": 800
69
+ },
70
+ {
71
+ "epoch": 1.1835677276091783,
72
+ "eval_loss": 2.958108901977539,
73
+ "eval_mean_token_accuracy": 0.41293498168498166,
74
+ "eval_runtime": 17.8905,
75
+ "eval_samples_per_second": 7.155,
76
+ "eval_steps_per_second": 0.894,
77
+ "step": 800
78
+ },
79
+ {
80
+ "epoch": 1.4796447076239823,
81
+ "grad_norm": 1.9578494903008412,
82
+ "learning_rate": 1.7037037037037038e-05,
83
+ "loss": 2.8108,
84
+ "mean_token_accuracy": 0.4309763431013431,
85
+ "step": 1000
86
+ },
87
+ {
88
+ "epoch": 1.4796447076239823,
89
+ "eval_loss": 2.753509044647217,
90
+ "eval_mean_token_accuracy": 0.4397664835164835,
91
+ "eval_runtime": 17.886,
92
+ "eval_samples_per_second": 7.156,
93
+ "eval_steps_per_second": 0.895,
94
+ "step": 1000
95
+ },
96
+ {
97
+ "epoch": 1.7757216876387862,
98
+ "grad_norm": 1.6226934039550227,
99
+ "learning_rate": 1.6444444444444444e-05,
100
+ "loss": 2.6416,
101
+ "mean_token_accuracy": 0.4566705586080586,
102
+ "step": 1200
103
+ },
104
+ {
105
+ "epoch": 1.7757216876387862,
106
+ "eval_loss": 2.5862350463867188,
107
+ "eval_mean_token_accuracy": 0.4640758547008547,
108
+ "eval_runtime": 17.9046,
109
+ "eval_samples_per_second": 7.149,
110
+ "eval_steps_per_second": 0.894,
111
+ "step": 1200
112
+ },
113
+ {
114
+ "epoch": 2.071058475203553,
115
+ "grad_norm": 1.5520186396248283,
116
+ "learning_rate": 1.5851851851851852e-05,
117
+ "loss": 2.4744,
118
+ "mean_token_accuracy": 0.48079080485095527,
119
+ "step": 1400
120
+ },
121
+ {
122
+ "epoch": 2.071058475203553,
123
+ "eval_loss": 2.44563627243042,
124
+ "eval_mean_token_accuracy": 0.4833009004884005,
125
+ "eval_runtime": 17.8972,
126
+ "eval_samples_per_second": 7.152,
127
+ "eval_steps_per_second": 0.894,
128
+ "step": 1400
129
+ },
130
+ {
131
+ "epoch": 2.3671354552183566,
132
+ "grad_norm": 1.5647563757923375,
133
+ "learning_rate": 1.525925925925926e-05,
134
+ "loss": 2.3221,
135
+ "mean_token_accuracy": 0.5035665064102564,
136
+ "step": 1600
137
+ },
138
+ {
139
+ "epoch": 2.3671354552183566,
140
+ "eval_loss": 2.3370866775512695,
141
+ "eval_mean_token_accuracy": 0.49927884615384616,
142
+ "eval_runtime": 17.8991,
143
+ "eval_samples_per_second": 7.151,
144
+ "eval_steps_per_second": 0.894,
145
+ "step": 1600
146
+ },
147
+ {
148
+ "epoch": 2.6632124352331608,
149
+ "grad_norm": 1.5085290967116813,
150
+ "learning_rate": 1.4666666666666666e-05,
151
+ "loss": 2.2258,
152
+ "mean_token_accuracy": 0.5176262591575091,
153
+ "step": 1800
154
+ },
155
+ {
156
+ "epoch": 2.6632124352331608,
157
+ "eval_loss": 2.240328550338745,
158
+ "eval_mean_token_accuracy": 0.5144898504273504,
159
+ "eval_runtime": 17.8923,
160
+ "eval_samples_per_second": 7.154,
161
+ "eval_steps_per_second": 0.894,
162
+ "step": 1800
163
+ },
164
+ {
165
+ "epoch": 2.9592894152479645,
166
+ "grad_norm": 1.5061171138250924,
167
+ "learning_rate": 1.4074074074074075e-05,
168
+ "loss": 2.1464,
169
+ "mean_token_accuracy": 0.5297419108669108,
170
+ "step": 2000
171
+ },
172
+ {
173
+ "epoch": 2.9592894152479645,
174
+ "eval_loss": 2.162349224090576,
175
+ "eval_mean_token_accuracy": 0.5255132020757021,
176
+ "eval_runtime": 17.9046,
177
+ "eval_samples_per_second": 7.149,
178
+ "eval_steps_per_second": 0.894,
179
+ "step": 2000
180
+ },
181
+ {
182
+ "epoch": 3.254626202812731,
183
+ "grad_norm": 1.5604440734793847,
184
+ "learning_rate": 1.3481481481481482e-05,
185
+ "loss": 2.0498,
186
+ "mean_token_accuracy": 0.5452948610843348,
187
+ "step": 2200
188
+ },
189
+ {
190
+ "epoch": 3.254626202812731,
191
+ "eval_loss": 2.100726366043091,
192
+ "eval_mean_token_accuracy": 0.534930173992674,
193
+ "eval_runtime": 17.8785,
194
+ "eval_samples_per_second": 7.159,
195
+ "eval_steps_per_second": 0.895,
196
+ "step": 2200
197
+ },
198
+ {
199
+ "epoch": 3.5507031828275353,
200
+ "grad_norm": 1.5621392816113453,
201
+ "learning_rate": 1.288888888888889e-05,
202
+ "loss": 1.9897,
203
+ "mean_token_accuracy": 0.554459249084249,
204
+ "step": 2400
205
+ },
206
+ {
207
+ "epoch": 3.5507031828275353,
208
+ "eval_loss": 2.05090069770813,
209
+ "eval_mean_token_accuracy": 0.5429983211233211,
210
+ "eval_runtime": 17.896,
211
+ "eval_samples_per_second": 7.152,
212
+ "eval_steps_per_second": 0.894,
213
+ "step": 2400
214
+ },
215
+ {
216
+ "epoch": 3.846780162842339,
217
+ "grad_norm": 1.2766839744837124,
218
+ "learning_rate": 1.2296296296296298e-05,
219
+ "loss": 1.9316,
220
+ "mean_token_accuracy": 0.5623727106227107,
221
+ "step": 2600
222
+ },
223
+ {
224
+ "epoch": 3.846780162842339,
225
+ "eval_loss": 1.9980659484863281,
226
+ "eval_mean_token_accuracy": 0.5504884004884005,
227
+ "eval_runtime": 17.8898,
228
+ "eval_samples_per_second": 7.155,
229
+ "eval_steps_per_second": 0.894,
230
+ "step": 2600
231
+ },
232
+ {
233
+ "epoch": 4.142116950407106,
234
+ "grad_norm": 1.3499621558383104,
235
+ "learning_rate": 1.1703703703703703e-05,
236
+ "loss": 1.8843,
237
+ "mean_token_accuracy": 0.5703756491350477,
238
+ "step": 2800
239
+ },
240
+ {
241
+ "epoch": 4.142116950407106,
242
+ "eval_loss": 1.9619176387786865,
243
+ "eval_mean_token_accuracy": 0.5568185286935287,
244
+ "eval_runtime": 17.8884,
245
+ "eval_samples_per_second": 7.155,
246
+ "eval_steps_per_second": 0.894,
247
+ "step": 2800
248
+ },
249
+ {
250
+ "epoch": 4.438193930421909,
251
+ "grad_norm": 1.2966133165796498,
252
+ "learning_rate": 1.1111111111111113e-05,
253
+ "loss": 1.8236,
254
+ "mean_token_accuracy": 0.58019971001221,
255
+ "step": 3000
256
+ },
257
+ {
258
+ "epoch": 4.438193930421909,
259
+ "eval_loss": 1.928423523902893,
260
+ "eval_mean_token_accuracy": 0.5617177960927962,
261
+ "eval_runtime": 17.9022,
262
+ "eval_samples_per_second": 7.15,
263
+ "eval_steps_per_second": 0.894,
264
+ "step": 3000
265
+ },
266
+ {
267
+ "epoch": 4.734270910436713,
268
+ "grad_norm": 1.3799177772818907,
269
+ "learning_rate": 1.0518518518518519e-05,
270
+ "loss": 1.8055,
271
+ "mean_token_accuracy": 0.5831678113553114,
272
+ "step": 3200
273
+ },
274
+ {
275
+ "epoch": 4.734270910436713,
276
+ "eval_loss": 1.8989052772521973,
277
+ "eval_mean_token_accuracy": 0.5659836691086692,
278
+ "eval_runtime": 17.9121,
279
+ "eval_samples_per_second": 7.146,
280
+ "eval_steps_per_second": 0.893,
281
+ "step": 3200
282
+ },
283
+ {
284
+ "epoch": 5.02960769800148,
285
+ "grad_norm": 1.189701686001914,
286
+ "learning_rate": 9.925925925925927e-06,
287
+ "loss": 1.7795,
288
+ "mean_token_accuracy": 0.5869535331001496,
289
+ "step": 3400
290
+ },
291
+ {
292
+ "epoch": 5.02960769800148,
293
+ "eval_loss": 1.8730087280273438,
294
+ "eval_mean_token_accuracy": 0.5700644841269842,
295
+ "eval_runtime": 17.8877,
296
+ "eval_samples_per_second": 7.156,
297
+ "eval_steps_per_second": 0.894,
298
+ "step": 3400
299
+ },
300
+ {
301
+ "epoch": 5.325684678016284,
302
+ "grad_norm": 1.2336131696453405,
303
+ "learning_rate": 9.333333333333334e-06,
304
+ "loss": 1.7181,
305
+ "mean_token_accuracy": 0.5980093101343101,
306
+ "step": 3600
307
+ },
308
+ {
309
+ "epoch": 5.325684678016284,
310
+ "eval_loss": 1.8516058921813965,
311
+ "eval_mean_token_accuracy": 0.5734088827838828,
312
+ "eval_runtime": 17.8893,
313
+ "eval_samples_per_second": 7.155,
314
+ "eval_steps_per_second": 0.894,
315
+ "step": 3600
316
+ },
317
+ {
318
+ "epoch": 5.6217616580310885,
319
+ "grad_norm": 1.3072064080159254,
320
+ "learning_rate": 8.740740740740741e-06,
321
+ "loss": 1.7039,
322
+ "mean_token_accuracy": 0.5995736797924298,
323
+ "step": 3800
324
+ },
325
+ {
326
+ "epoch": 5.6217616580310885,
327
+ "eval_loss": 1.8317267894744873,
328
+ "eval_mean_token_accuracy": 0.5765376984126984,
329
+ "eval_runtime": 17.8909,
330
+ "eval_samples_per_second": 7.154,
331
+ "eval_steps_per_second": 0.894,
332
+ "step": 3800
333
+ },
334
+ {
335
+ "epoch": 5.917838638045892,
336
+ "grad_norm": 1.1207509804360356,
337
+ "learning_rate": 8.148148148148148e-06,
338
+ "loss": 1.6899,
339
+ "mean_token_accuracy": 0.6014800442612942,
340
+ "step": 4000
341
+ },
342
+ {
343
+ "epoch": 5.917838638045892,
344
+ "eval_loss": 1.8143231868743896,
345
+ "eval_mean_token_accuracy": 0.5789148351648352,
346
+ "eval_runtime": 17.9255,
347
+ "eval_samples_per_second": 7.141,
348
+ "eval_steps_per_second": 0.893,
349
+ "step": 4000
350
+ },
351
+ {
352
+ "epoch": 6.213175425610658,
353
+ "grad_norm": 1.1903087300751234,
354
+ "learning_rate": 7.555555555555556e-06,
355
+ "loss": 1.6474,
356
+ "mean_token_accuracy": 0.6092672156581931,
357
+ "step": 4200
358
+ },
359
+ {
360
+ "epoch": 6.213175425610658,
361
+ "eval_loss": 1.8032296895980835,
362
+ "eval_mean_token_accuracy": 0.580849358974359,
363
+ "eval_runtime": 17.9223,
364
+ "eval_samples_per_second": 7.142,
365
+ "eval_steps_per_second": 0.893,
366
+ "step": 4200
367
+ },
368
+ {
369
+ "epoch": 6.509252405625462,
370
+ "grad_norm": 1.1713052935721413,
371
+ "learning_rate": 6.962962962962964e-06,
372
+ "loss": 1.6235,
373
+ "mean_token_accuracy": 0.6131747557997558,
374
+ "step": 4400
375
+ },
376
+ {
377
+ "epoch": 6.509252405625462,
378
+ "eval_loss": 1.7903690338134766,
379
+ "eval_mean_token_accuracy": 0.5832226800976801,
380
+ "eval_runtime": 17.9071,
381
+ "eval_samples_per_second": 7.148,
382
+ "eval_steps_per_second": 0.893,
383
+ "step": 4400
384
+ },
385
+ {
386
+ "epoch": 6.805329385640267,
387
+ "grad_norm": 1.219773082768343,
388
+ "learning_rate": 6.370370370370371e-06,
389
+ "loss": 1.6199,
390
+ "mean_token_accuracy": 0.6140330815018316,
391
+ "step": 4600
392
+ },
393
+ {
394
+ "epoch": 6.805329385640267,
395
+ "eval_loss": 1.775641679763794,
396
+ "eval_mean_token_accuracy": 0.5847260378510378,
397
+ "eval_runtime": 17.9026,
398
+ "eval_samples_per_second": 7.15,
399
+ "eval_steps_per_second": 0.894,
400
+ "step": 4600
401
+ },
402
+ {
403
+ "epoch": 7.100666173205033,
404
+ "grad_norm": 1.227329336420539,
405
+ "learning_rate": 5.777777777777778e-06,
406
+ "loss": 1.5968,
407
+ "mean_token_accuracy": 0.6178009507896726,
408
+ "step": 4800
409
+ },
410
+ {
411
+ "epoch": 7.100666173205033,
412
+ "eval_loss": 1.767627239227295,
413
+ "eval_mean_token_accuracy": 0.5867120726495727,
414
+ "eval_runtime": 17.9009,
415
+ "eval_samples_per_second": 7.15,
416
+ "eval_steps_per_second": 0.894,
417
+ "step": 4800
418
+ },
419
+ {
420
+ "epoch": 7.3967431532198376,
421
+ "grad_norm": 1.190619502177119,
422
+ "learning_rate": 5.185185185185185e-06,
423
+ "loss": 1.564,
424
+ "mean_token_accuracy": 0.6240972603785103,
425
+ "step": 5000
426
+ },
427
+ {
428
+ "epoch": 7.3967431532198376,
429
+ "eval_loss": 1.7600514888763428,
430
+ "eval_mean_token_accuracy": 0.5872462606837607,
431
+ "eval_runtime": 17.8856,
432
+ "eval_samples_per_second": 7.157,
433
+ "eval_steps_per_second": 0.895,
434
+ "step": 5000
435
+ },
436
+ {
437
+ "epoch": 7.692820133234641,
438
+ "grad_norm": 1.0951341674513762,
439
+ "learning_rate": 4.592592592592593e-06,
440
+ "loss": 1.5614,
441
+ "mean_token_accuracy": 0.624267322954823,
442
+ "step": 5200
443
+ },
444
+ {
445
+ "epoch": 7.692820133234641,
446
+ "eval_loss": 1.7513068914413452,
447
+ "eval_mean_token_accuracy": 0.5891063797313797,
448
+ "eval_runtime": 17.8894,
449
+ "eval_samples_per_second": 7.155,
450
+ "eval_steps_per_second": 0.894,
451
+ "step": 5200
452
+ },
453
+ {
454
+ "epoch": 7.988897113249445,
455
+ "grad_norm": 1.0806878005212168,
456
+ "learning_rate": 4.000000000000001e-06,
457
+ "loss": 1.5575,
458
+ "mean_token_accuracy": 0.6250475427350427,
459
+ "step": 5400
460
+ },
461
+ {
462
+ "epoch": 7.988897113249445,
463
+ "eval_loss": 1.7434966564178467,
464
+ "eval_mean_token_accuracy": 0.5898237179487179,
465
+ "eval_runtime": 17.917,
466
+ "eval_samples_per_second": 7.144,
467
+ "eval_steps_per_second": 0.893,
468
+ "step": 5400
469
+ },
470
+ {
471
+ "epoch": 8.284233900814211,
472
+ "grad_norm": 1.1517570180426586,
473
+ "learning_rate": 3.4074074074074077e-06,
474
+ "loss": 1.5248,
475
+ "mean_token_accuracy": 0.6320280631370857,
476
+ "step": 5600
477
+ },
478
+ {
479
+ "epoch": 8.284233900814211,
480
+ "eval_loss": 1.741744041442871,
481
+ "eval_mean_token_accuracy": 0.5906135531135531,
482
+ "eval_runtime": 17.8965,
483
+ "eval_samples_per_second": 7.152,
484
+ "eval_steps_per_second": 0.894,
485
+ "step": 5600
486
+ },
487
+ {
488
+ "epoch": 8.580310880829016,
489
+ "grad_norm": 1.0340477842720268,
490
+ "learning_rate": 2.814814814814815e-06,
491
+ "loss": 1.5212,
492
+ "mean_token_accuracy": 0.6322646138583639,
493
+ "step": 5800
494
+ },
495
+ {
496
+ "epoch": 8.580310880829016,
497
+ "eval_loss": 1.735644817352295,
498
+ "eval_mean_token_accuracy": 0.5920615842490843,
499
+ "eval_runtime": 17.8899,
500
+ "eval_samples_per_second": 7.155,
501
+ "eval_steps_per_second": 0.894,
502
+ "step": 5800
503
+ },
504
+ {
505
+ "epoch": 8.876387860843819,
506
+ "grad_norm": 1.030414959224373,
507
+ "learning_rate": 2.222222222222222e-06,
508
+ "loss": 1.5159,
509
+ "mean_token_accuracy": 0.6326660561660562,
510
+ "step": 6000
511
+ },
512
+ {
513
+ "epoch": 8.876387860843819,
514
+ "eval_loss": 1.7318824529647827,
515
+ "eval_mean_token_accuracy": 0.5921760531135531,
516
+ "eval_runtime": 17.9183,
517
+ "eval_samples_per_second": 7.144,
518
+ "eval_steps_per_second": 0.893,
519
+ "step": 6000
520
+ },
521
+ {
522
+ "epoch": 9.171724648408587,
523
+ "grad_norm": 0.9859235701888986,
524
+ "learning_rate": 1.62962962962963e-06,
525
+ "loss": 1.5007,
526
+ "mean_token_accuracy": 0.6362307248585444,
527
+ "step": 6200
528
+ },
529
+ {
530
+ "epoch": 9.171724648408587,
531
+ "eval_loss": 1.7302496433258057,
532
+ "eval_mean_token_accuracy": 0.5925709706959706,
533
+ "eval_runtime": 17.9121,
534
+ "eval_samples_per_second": 7.146,
535
+ "eval_steps_per_second": 0.893,
536
+ "step": 6200
537
+ },
538
+ {
539
+ "epoch": 9.46780162842339,
540
+ "grad_norm": 0.9796094549613502,
541
+ "learning_rate": 1.0370370370370371e-06,
542
+ "loss": 1.488,
543
+ "mean_token_accuracy": 0.638957036019536,
544
+ "step": 6400
545
+ },
546
+ {
547
+ "epoch": 9.46780162842339,
548
+ "eval_loss": 1.7277941703796387,
549
+ "eval_mean_token_accuracy": 0.5929544413919414,
550
+ "eval_runtime": 17.9246,
551
+ "eval_samples_per_second": 7.141,
552
+ "eval_steps_per_second": 0.893,
553
+ "step": 6400
554
+ },
555
+ {
556
+ "epoch": 9.763878608438194,
557
+ "grad_norm": 0.9868368122782655,
558
+ "learning_rate": 4.444444444444445e-07,
559
+ "loss": 1.4899,
560
+ "mean_token_accuracy": 0.6385683379120879,
561
+ "step": 6600
562
+ },
563
+ {
564
+ "epoch": 9.763878608438194,
565
+ "eval_loss": 1.7258272171020508,
566
+ "eval_mean_token_accuracy": 0.5932234432234432,
567
+ "eval_runtime": 17.8902,
568
+ "eval_samples_per_second": 7.155,
569
+ "eval_steps_per_second": 0.894,
570
+ "step": 6600
571
+ },
572
+ {
573
+ "epoch": 9.985936343449296,
574
+ "mean_token_accuracy": 0.638465811965812,
575
+ "step": 6750,
576
+ "total_flos": 768626978193408.0,
577
+ "train_loss": 2.0993992060908564,
578
+ "train_runtime": 74825.3444,
579
+ "train_samples_per_second": 2.888,
580
+ "train_steps_per_second": 0.09
581
+ }
582
+ ],
583
+ "logging_steps": 200,
584
+ "max_steps": 6750,
585
+ "num_input_tokens_seen": 0,
586
+ "num_train_epochs": 10,
587
+ "save_steps": 500,
588
+ "stateful_callbacks": {
589
+ "TrainerControl": {
590
+ "args": {
591
+ "should_epoch_stop": false,
592
+ "should_evaluate": false,
593
+ "should_log": false,
594
+ "should_save": true,
595
+ "should_training_stop": true
596
+ },
597
+ "attributes": {}
598
+ }
599
+ },
600
+ "total_flos": 768626978193408.0,
601
+ "train_batch_size": 4,
602
+ "trial_name": null,
603
+ "trial_params": null
604
+ }