saemin21 commited on
Commit
c1e5f99
·
verified ·
1 Parent(s): e9f02da

Model save

Browse files
README.md ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: Qwen/Qwen2.5-1.5B-Instruct
3
+ library_name: transformers
4
+ model_name: Qwen-2.5-1.5B-Simple-RL
5
+ tags:
6
+ - generated_from_trainer
7
+ - trl
8
+ - grpo
9
+ licence: license
10
+ ---
11
+
12
+ # Model Card for Qwen-2.5-1.5B-Simple-RL
13
+
14
+ This model is a fine-tuned version of [Qwen/Qwen2.5-1.5B-Instruct](https://huggingface.co/Qwen/Qwen2.5-1.5B-Instruct).
15
+ It has been trained using [TRL](https://github.com/huggingface/trl).
16
+
17
+ ## Quick start
18
+
19
+ ```python
20
+ from transformers import pipeline
21
+
22
+ question = "If you had a time machine, but could only go to the past or the future once and never return, which would you choose and why?"
23
+ generator = pipeline("text-generation", model="saemin21/Qwen-2.5-1.5B-Simple-RL", device="cuda")
24
+ output = generator([{"role": "user", "content": question}], max_new_tokens=128, return_full_text=False)[0]
25
+ print(output["generated_text"])
26
+ ```
27
+
28
+ ## Training procedure
29
+
30
+ [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="150" height="24"/>](https://wandb.ai/saemin21-postech/huggingface/runs/kc3dy4cu)
31
+
32
+
33
+ This model was trained with GRPO, a method introduced in [DeepSeekMath: Pushing the Limits of Mathematical Reasoning in Open Language Models](https://huggingface.co/papers/2402.03300).
34
+
35
+ ### Framework versions
36
+
37
+ - TRL: 0.15.0.dev0
38
+ - Transformers: 4.49.0.dev0
39
+ - Pytorch: 2.5.1
40
+ - Datasets: 3.2.0
41
+ - Tokenizers: 0.21.0
42
+
43
+ ## Citations
44
+
45
+ Cite GRPO as:
46
+
47
+ ```bibtex
48
+ @article{zhihong2024deepseekmath,
49
+ title = {{DeepSeekMath: Pushing the Limits of Mathematical Reasoning in Open Language Models}},
50
+ author = {Zhihong Shao and Peiyi Wang and Qihao Zhu and Runxin Xu and Junxiao Song and Mingchuan Zhang and Y. K. Li and Y. Wu and Daya Guo},
51
+ year = 2024,
52
+ eprint = {arXiv:2402.03300},
53
+ }
54
+
55
+ ```
56
+
57
+ Cite TRL as:
58
+
59
+ ```bibtex
60
+ @misc{vonwerra2022trl,
61
+ title = {{TRL: Transformer Reinforcement Learning}},
62
+ author = {Leandro von Werra and Younes Belkada and Lewis Tunstall and Edward Beeching and Tristan Thrush and Nathan Lambert and Shengyi Huang and Kashif Rasul and Quentin Gallouédec},
63
+ year = 2020,
64
+ journal = {GitHub repository},
65
+ publisher = {GitHub},
66
+ howpublished = {\url{https://github.com/huggingface/trl}}
67
+ }
68
+ ```
all_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "total_flos": 0.0,
3
+ "train_loss": 0.20938398721150356,
4
+ "train_runtime": 4954.7093,
5
+ "train_samples": 7500,
6
+ "train_samples_per_second": 1.514,
7
+ "train_steps_per_second": 0.012
8
+ }
generation_config.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 151643,
3
+ "do_sample": true,
4
+ "eos_token_id": [
5
+ 151645,
6
+ 151643
7
+ ],
8
+ "pad_token_id": 151643,
9
+ "repetition_penalty": 1.1,
10
+ "temperature": 0.7,
11
+ "top_k": 20,
12
+ "top_p": 0.8,
13
+ "transformers_version": "4.49.0.dev0"
14
+ }
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "total_flos": 0.0,
3
+ "train_loss": 0.20938398721150356,
4
+ "train_runtime": 4954.7093,
5
+ "train_samples": 7500,
6
+ "train_samples_per_second": 1.514,
7
+ "train_steps_per_second": 0.012
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,796 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 0.9893390191897654,
5
+ "eval_steps": 100,
6
+ "global_step": 58,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "completion_length": 365.3112087249756,
13
+ "epoch": 0.017057569296375266,
14
+ "grad_norm": 1.04221985968125,
15
+ "kl": 0.0,
16
+ "learning_rate": 3.3333333333333333e-06,
17
+ "loss": 0.0,
18
+ "reward": 0.6432291865348816,
19
+ "reward_std": 0.4505743272602558,
20
+ "rewards/accuracy_reward": 0.11718750232830644,
21
+ "rewards/format_reward": 0.5260416772216558,
22
+ "step": 1
23
+ },
24
+ {
25
+ "completion_length": 367.3958435058594,
26
+ "epoch": 0.03411513859275053,
27
+ "grad_norm": 1.1434252212815785,
28
+ "kl": 0.0,
29
+ "learning_rate": 6.666666666666667e-06,
30
+ "loss": 0.0,
31
+ "reward": 0.6783854328095913,
32
+ "reward_std": 0.46897680312395096,
33
+ "rewards/accuracy_reward": 0.12630208570044488,
34
+ "rewards/format_reward": 0.5520833525806665,
35
+ "step": 2
36
+ },
37
+ {
38
+ "completion_length": 339.8724060058594,
39
+ "epoch": 0.0511727078891258,
40
+ "grad_norm": 0.8635963812006352,
41
+ "kl": 0.0010623931884765625,
42
+ "learning_rate": 1e-05,
43
+ "loss": 0.0,
44
+ "reward": 0.6835937723517418,
45
+ "reward_std": 0.43934670090675354,
46
+ "rewards/accuracy_reward": 0.09895833639893681,
47
+ "rewards/format_reward": 0.5846354383975267,
48
+ "step": 3
49
+ },
50
+ {
51
+ "completion_length": 149.22526454925537,
52
+ "epoch": 0.06823027718550106,
53
+ "grad_norm": 307.2682075780713,
54
+ "kl": 2.980987548828125,
55
+ "learning_rate": 1.3333333333333333e-05,
56
+ "loss": 0.1193,
57
+ "reward": 1.010416690260172,
58
+ "reward_std": 0.2095302422530949,
59
+ "rewards/accuracy_reward": 0.06640625139698386,
60
+ "rewards/format_reward": 0.9440104439854622,
61
+ "step": 4
62
+ },
63
+ {
64
+ "completion_length": 112.50521183013916,
65
+ "epoch": 0.08528784648187633,
66
+ "grad_norm": 507.14990688734866,
67
+ "kl": 9.401611328125,
68
+ "learning_rate": 1.6666666666666667e-05,
69
+ "loss": 0.3775,
70
+ "reward": 1.007812526077032,
71
+ "reward_std": 0.15946260537020862,
72
+ "rewards/accuracy_reward": 0.04427083441987634,
73
+ "rewards/format_reward": 0.9635416828095913,
74
+ "step": 5
75
+ },
76
+ {
77
+ "completion_length": 100.34375333786011,
78
+ "epoch": 0.1023454157782516,
79
+ "grad_norm": 5.424616910263027,
80
+ "kl": 0.33245849609375,
81
+ "learning_rate": 2e-05,
82
+ "loss": 0.0133,
83
+ "reward": 1.0052083618938923,
84
+ "reward_std": 0.15602844418026507,
85
+ "rewards/accuracy_reward": 0.05338541930541396,
86
+ "rewards/format_reward": 0.9518229402601719,
87
+ "step": 6
88
+ },
89
+ {
90
+ "completion_length": 133.2343783378601,
91
+ "epoch": 0.11940298507462686,
92
+ "grad_norm": 1.7843437966022808,
93
+ "kl": 0.23712158203125,
94
+ "learning_rate": 1.9981755542233175e-05,
95
+ "loss": 0.0095,
96
+ "reward": 0.9570312686264515,
97
+ "reward_std": 0.2223757691681385,
98
+ "rewards/accuracy_reward": 0.04166666802484542,
99
+ "rewards/format_reward": 0.9153646044433117,
100
+ "step": 7
101
+ },
102
+ {
103
+ "completion_length": 114.69271230697632,
104
+ "epoch": 0.13646055437100213,
105
+ "grad_norm": 0.623961957547949,
106
+ "kl": 0.1710205078125,
107
+ "learning_rate": 1.992708874098054e-05,
108
+ "loss": 0.0068,
109
+ "reward": 0.9895833544433117,
110
+ "reward_std": 0.22396012931130826,
111
+ "rewards/accuracy_reward": 0.054687500931322575,
112
+ "rewards/format_reward": 0.934895858168602,
113
+ "step": 8
114
+ },
115
+ {
116
+ "completion_length": 104.41406488418579,
117
+ "epoch": 0.1535181236673774,
118
+ "grad_norm": 1.5259640643959018,
119
+ "kl": 0.224365234375,
120
+ "learning_rate": 1.983619906947144e-05,
121
+ "loss": 0.009,
122
+ "reward": 0.9765625260770321,
123
+ "reward_std": 0.2754378484096378,
124
+ "rewards/accuracy_reward": 0.0664062516298145,
125
+ "rewards/format_reward": 0.9101562611758709,
126
+ "step": 9
127
+ },
128
+ {
129
+ "completion_length": 231.64844512939453,
130
+ "epoch": 0.17057569296375266,
131
+ "grad_norm": 40.31352492325995,
132
+ "kl": 0.45977783203125,
133
+ "learning_rate": 1.9709418174260523e-05,
134
+ "loss": 0.0184,
135
+ "reward": 0.9257812723517418,
136
+ "reward_std": 0.39183398708701134,
137
+ "rewards/accuracy_reward": 0.0976562516298145,
138
+ "rewards/format_reward": 0.8281250186264515,
139
+ "step": 10
140
+ },
141
+ {
142
+ "completion_length": 613.7070560455322,
143
+ "epoch": 0.18763326226012794,
144
+ "grad_norm": 840.2414358802456,
145
+ "kl": 3.21197509765625,
146
+ "learning_rate": 1.954720866508546e-05,
147
+ "loss": 0.1284,
148
+ "reward": 0.5429687686264515,
149
+ "reward_std": 0.4636515509337187,
150
+ "rewards/accuracy_reward": 0.076822918956168,
151
+ "rewards/format_reward": 0.46614584140479565,
152
+ "step": 11
153
+ },
154
+ {
155
+ "completion_length": 762.7265853881836,
156
+ "epoch": 0.2046908315565032,
157
+ "grad_norm": 10.528180991568039,
158
+ "kl": 0.7625732421875,
159
+ "learning_rate": 1.9350162426854152e-05,
160
+ "loss": 0.0305,
161
+ "reward": 0.38411459513008595,
162
+ "reward_std": 0.3724043210968375,
163
+ "rewards/accuracy_reward": 0.09505208616610616,
164
+ "rewards/format_reward": 0.2890625102445483,
165
+ "step": 12
166
+ },
167
+ {
168
+ "completion_length": 382.18230056762695,
169
+ "epoch": 0.22174840085287847,
170
+ "grad_norm": 2.761605849015544,
171
+ "kl": 0.384521484375,
172
+ "learning_rate": 1.91189984599209e-05,
173
+ "loss": 0.0154,
174
+ "reward": 0.6484375223517418,
175
+ "reward_std": 0.5331083796918392,
176
+ "rewards/accuracy_reward": 0.06901041930541396,
177
+ "rewards/format_reward": 0.5794271007180214,
178
+ "step": 13
179
+ },
180
+ {
181
+ "completion_length": 198.7330780029297,
182
+ "epoch": 0.23880597014925373,
183
+ "grad_norm": 0.6078465083236722,
184
+ "kl": 0.23638916015625,
185
+ "learning_rate": 1.8854560256532098e-05,
186
+ "loss": 0.0095,
187
+ "reward": 0.912760429084301,
188
+ "reward_std": 0.4155522510409355,
189
+ "rewards/accuracy_reward": 0.10937500302679837,
190
+ "rewards/format_reward": 0.8033854328095913,
191
+ "step": 14
192
+ },
193
+ {
194
+ "completion_length": 136.53516006469727,
195
+ "epoch": 0.255863539445629,
196
+ "grad_norm": 0.8617440561534008,
197
+ "kl": 0.21514892578125,
198
+ "learning_rate": 1.8557812723014476e-05,
199
+ "loss": 0.0086,
200
+ "reward": 0.9492187723517418,
201
+ "reward_std": 0.38575689122080803,
202
+ "rewards/accuracy_reward": 0.09895833593327552,
203
+ "rewards/format_reward": 0.8502604328095913,
204
+ "step": 15
205
+ },
206
+ {
207
+ "completion_length": 98.51042032241821,
208
+ "epoch": 0.27292110874200426,
209
+ "grad_norm": 0.6595763666425144,
210
+ "kl": 0.2208251953125,
211
+ "learning_rate": 1.8229838658936566e-05,
212
+ "loss": 0.0088,
213
+ "reward": 0.9726562686264515,
214
+ "reward_std": 0.3207697505131364,
215
+ "rewards/accuracy_reward": 0.08854167000390589,
216
+ "rewards/format_reward": 0.8841146044433117,
217
+ "step": 16
218
+ },
219
+ {
220
+ "completion_length": 76.04166841506958,
221
+ "epoch": 0.2899786780383795,
222
+ "grad_norm": 3.048254714245794,
223
+ "kl": 0.44549560546875,
224
+ "learning_rate": 1.7871834806090502e-05,
225
+ "loss": 0.0178,
226
+ "reward": 1.0468750298023224,
227
+ "reward_std": 0.15973232360556722,
228
+ "rewards/accuracy_reward": 0.07552083500195295,
229
+ "rewards/format_reward": 0.9713541865348816,
230
+ "step": 17
231
+ },
232
+ {
233
+ "completion_length": 81.40234661102295,
234
+ "epoch": 0.3070362473347548,
235
+ "grad_norm": 0.4832905802799025,
236
+ "kl": 0.24029541015625,
237
+ "learning_rate": 1.7485107481711014e-05,
238
+ "loss": 0.0096,
239
+ "reward": 1.0742187798023224,
240
+ "reward_std": 0.20187148824334145,
241
+ "rewards/accuracy_reward": 0.10026042012032121,
242
+ "rewards/format_reward": 0.9739583507180214,
243
+ "step": 18
244
+ },
245
+ {
246
+ "completion_length": 126.81640911102295,
247
+ "epoch": 0.32409381663113007,
248
+ "grad_norm": 0.43834266665765215,
249
+ "kl": 0.18353271484375,
250
+ "learning_rate": 1.7071067811865477e-05,
251
+ "loss": 0.0073,
252
+ "reward": 1.0924479514360428,
253
+ "reward_std": 0.2407330577261746,
254
+ "rewards/accuracy_reward": 0.13151042140088975,
255
+ "rewards/format_reward": 0.9609375186264515,
256
+ "step": 19
257
+ },
258
+ {
259
+ "completion_length": 174.13021278381348,
260
+ "epoch": 0.3411513859275053,
261
+ "grad_norm": 0.3691172026913083,
262
+ "kl": 0.170654296875,
263
+ "learning_rate": 1.6631226582407954e-05,
264
+ "loss": 0.0068,
265
+ "reward": 1.0807292088866234,
266
+ "reward_std": 0.17400484485551715,
267
+ "rewards/accuracy_reward": 0.10546875465661287,
268
+ "rewards/format_reward": 0.9752604365348816,
269
+ "step": 20
270
+ },
271
+ {
272
+ "completion_length": 226.26042366027832,
273
+ "epoch": 0.3582089552238806,
274
+ "grad_norm": 0.35348063732954105,
275
+ "kl": 0.162994384765625,
276
+ "learning_rate": 1.6167188726285433e-05,
277
+ "loss": 0.0065,
278
+ "reward": 1.0768229514360428,
279
+ "reward_std": 0.26323840813711286,
280
+ "rewards/accuracy_reward": 0.12239583872724324,
281
+ "rewards/format_reward": 0.9544271044433117,
282
+ "step": 21
283
+ },
284
+ {
285
+ "completion_length": 226.471360206604,
286
+ "epoch": 0.3752665245202559,
287
+ "grad_norm": 0.2548959416361629,
288
+ "kl": 0.137939453125,
289
+ "learning_rate": 1.568064746731156e-05,
290
+ "loss": 0.0055,
291
+ "reward": 1.0937500447034836,
292
+ "reward_std": 0.19759728573262691,
293
+ "rewards/accuracy_reward": 0.11588542012032121,
294
+ "rewards/format_reward": 0.9778646007180214,
295
+ "step": 22
296
+ },
297
+ {
298
+ "completion_length": 222.7395896911621,
299
+ "epoch": 0.39232409381663114,
300
+ "grad_norm": 0.29372855664077896,
301
+ "kl": 0.154876708984375,
302
+ "learning_rate": 1.5173378141776569e-05,
303
+ "loss": 0.0062,
304
+ "reward": 1.1015625409781933,
305
+ "reward_std": 0.21707096393220127,
306
+ "rewards/accuracy_reward": 0.12890625349245965,
307
+ "rewards/format_reward": 0.9726562686264515,
308
+ "step": 23
309
+ },
310
+ {
311
+ "completion_length": 202.705735206604,
312
+ "epoch": 0.4093816631130064,
313
+ "grad_norm": 0.2969744538892161,
314
+ "kl": 0.155120849609375,
315
+ "learning_rate": 1.4647231720437687e-05,
316
+ "loss": 0.0062,
317
+ "reward": 1.1184896230697632,
318
+ "reward_std": 0.20930432621389627,
319
+ "rewards/accuracy_reward": 0.1328125053551048,
320
+ "rewards/format_reward": 0.9856770932674408,
321
+ "step": 24
322
+ },
323
+ {
324
+ "completion_length": 208.4622449874878,
325
+ "epoch": 0.42643923240938164,
326
+ "grad_norm": 0.2551264579415379,
327
+ "kl": 0.145782470703125,
328
+ "learning_rate": 1.410412805452757e-05,
329
+ "loss": 0.0058,
330
+ "reward": 1.1341146230697632,
331
+ "reward_std": 0.20376197341829538,
332
+ "rewards/accuracy_reward": 0.1458333374466747,
333
+ "rewards/format_reward": 0.9882812611758709,
334
+ "step": 25
335
+ },
336
+ {
337
+ "completion_length": 194.07292079925537,
338
+ "epoch": 0.44349680170575695,
339
+ "grad_norm": 0.29985133201161923,
340
+ "kl": 0.147552490234375,
341
+ "learning_rate": 1.3546048870425356e-05,
342
+ "loss": 0.0059,
343
+ "reward": 1.1875000298023224,
344
+ "reward_std": 0.22688957839272916,
345
+ "rewards/accuracy_reward": 0.19531250500585884,
346
+ "rewards/format_reward": 0.9921875074505806,
347
+ "step": 26
348
+ },
349
+ {
350
+ "completion_length": 199.3333396911621,
351
+ "epoch": 0.4605543710021322,
352
+ "grad_norm": 0.2944306259052974,
353
+ "kl": 0.144775390625,
354
+ "learning_rate": 1.297503053855203e-05,
355
+ "loss": 0.0058,
356
+ "reward": 1.1458333656191826,
357
+ "reward_std": 0.19133792025968432,
358
+ "rewards/accuracy_reward": 0.15494792093522847,
359
+ "rewards/format_reward": 0.9908854253590107,
360
+ "step": 27
361
+ },
362
+ {
363
+ "completion_length": 172.79427576065063,
364
+ "epoch": 0.47761194029850745,
365
+ "grad_norm": 0.3362991815387882,
366
+ "kl": 0.148773193359375,
367
+ "learning_rate": 1.2393156642875579e-05,
368
+ "loss": 0.006,
369
+ "reward": 1.178385466337204,
370
+ "reward_std": 0.22494715498760343,
371
+ "rewards/accuracy_reward": 0.18489583837799728,
372
+ "rewards/format_reward": 0.9934895895421505,
373
+ "step": 28
374
+ },
375
+ {
376
+ "completion_length": 176.18490028381348,
377
+ "epoch": 0.4946695095948827,
378
+ "grad_norm": 0.3044368690957198,
379
+ "kl": 0.146514892578125,
380
+ "learning_rate": 1.180255037813906e-05,
381
+ "loss": 0.0059,
382
+ "reward": 1.1614583730697632,
383
+ "reward_std": 0.19968353700824082,
384
+ "rewards/accuracy_reward": 0.16796875244472176,
385
+ "rewards/format_reward": 0.9934895895421505,
386
+ "step": 29
387
+ },
388
+ {
389
+ "completion_length": 169.21615076065063,
390
+ "epoch": 0.511727078891258,
391
+ "grad_norm": 0.3050078730361449,
392
+ "kl": 0.1395263671875,
393
+ "learning_rate": 1.1205366802553231e-05,
394
+ "loss": 0.0056,
395
+ "reward": 1.1601563021540642,
396
+ "reward_std": 0.19885429926216602,
397
+ "rewards/accuracy_reward": 0.167968753259629,
398
+ "rewards/format_reward": 0.9921875074505806,
399
+ "step": 30
400
+ },
401
+ {
402
+ "completion_length": 191.99479484558105,
403
+ "epoch": 0.5287846481876333,
404
+ "grad_norm": 0.28939261315708026,
405
+ "kl": 0.145721435546875,
406
+ "learning_rate": 1.0603784974222862e-05,
407
+ "loss": 0.0058,
408
+ "reward": 1.1354167088866234,
409
+ "reward_std": 0.2171460180543363,
410
+ "rewards/accuracy_reward": 0.15104167070239782,
411
+ "rewards/format_reward": 0.9843750074505806,
412
+ "step": 31
413
+ },
414
+ {
415
+ "completion_length": 233.89974689483643,
416
+ "epoch": 0.5458422174840085,
417
+ "grad_norm": 0.28487537328396517,
418
+ "kl": 0.13800048828125,
419
+ "learning_rate": 1e-05,
420
+ "loss": 0.0055,
421
+ "reward": 1.123697966337204,
422
+ "reward_std": 0.2813015836291015,
423
+ "rewards/accuracy_reward": 0.15885417093522847,
424
+ "rewards/format_reward": 0.9648437723517418,
425
+ "step": 32
426
+ },
427
+ {
428
+ "completion_length": 253.55078887939453,
429
+ "epoch": 0.5628997867803838,
430
+ "grad_norm": 0.2737431545513293,
431
+ "kl": 0.137664794921875,
432
+ "learning_rate": 9.39621502577714e-06,
433
+ "loss": 0.0055,
434
+ "reward": 1.128906287252903,
435
+ "reward_std": 0.19965026015415788,
436
+ "rewards/accuracy_reward": 0.1497395880287513,
437
+ "rewards/format_reward": 0.9791666828095913,
438
+ "step": 33
439
+ },
440
+ {
441
+ "completion_length": 239.842453956604,
442
+ "epoch": 0.579957356076759,
443
+ "grad_norm": 0.6254452308530076,
444
+ "kl": 0.14483642578125,
445
+ "learning_rate": 8.79463319744677e-06,
446
+ "loss": 0.0058,
447
+ "reward": 1.1757812798023224,
448
+ "reward_std": 0.26264199148863554,
449
+ "rewards/accuracy_reward": 0.195312503259629,
450
+ "rewards/format_reward": 0.9804687574505806,
451
+ "step": 34
452
+ },
453
+ {
454
+ "completion_length": 241.17187976837158,
455
+ "epoch": 0.5970149253731343,
456
+ "grad_norm": 0.3224088206959575,
457
+ "kl": 0.14569091796875,
458
+ "learning_rate": 8.197449621860944e-06,
459
+ "loss": 0.0058,
460
+ "reward": 1.1757812947034836,
461
+ "reward_std": 0.27850970113649964,
462
+ "rewards/accuracy_reward": 0.199218753259629,
463
+ "rewards/format_reward": 0.9765625186264515,
464
+ "step": 35
465
+ },
466
+ {
467
+ "completion_length": 231.39062976837158,
468
+ "epoch": 0.6140724946695096,
469
+ "grad_norm": 0.2936768596993624,
470
+ "kl": 0.142852783203125,
471
+ "learning_rate": 7.606843357124426e-06,
472
+ "loss": 0.0057,
473
+ "reward": 1.1510417088866234,
474
+ "reward_std": 0.26834863936528563,
475
+ "rewards/accuracy_reward": 0.17578125349245965,
476
+ "rewards/format_reward": 0.9752604328095913,
477
+ "step": 36
478
+ },
479
+ {
480
+ "completion_length": 200.52344417572021,
481
+ "epoch": 0.6311300639658849,
482
+ "grad_norm": 0.3398881371255209,
483
+ "kl": 0.14337158203125,
484
+ "learning_rate": 7.024969461447973e-06,
485
+ "loss": 0.0057,
486
+ "reward": 1.140625037252903,
487
+ "reward_std": 0.30551271699368954,
488
+ "rewards/accuracy_reward": 0.20312500465661287,
489
+ "rewards/format_reward": 0.9375000223517418,
490
+ "step": 37
491
+ },
492
+ {
493
+ "completion_length": 199.080735206604,
494
+ "epoch": 0.6481876332622601,
495
+ "grad_norm": 0.3950051985318622,
496
+ "kl": 0.15716552734375,
497
+ "learning_rate": 6.453951129574644e-06,
498
+ "loss": 0.0063,
499
+ "reward": 1.0898437723517418,
500
+ "reward_std": 0.3697906183078885,
501
+ "rewards/accuracy_reward": 0.1953125053551048,
502
+ "rewards/format_reward": 0.8945312686264515,
503
+ "step": 38
504
+ },
505
+ {
506
+ "completion_length": 176.2083396911621,
507
+ "epoch": 0.6652452025586354,
508
+ "grad_norm": 0.37189719351321165,
509
+ "kl": 0.1624755859375,
510
+ "learning_rate": 5.895871945472434e-06,
511
+ "loss": 0.0065,
512
+ "reward": 1.1380208805203438,
513
+ "reward_std": 0.34118577465415,
514
+ "rewards/accuracy_reward": 0.20703125558793545,
515
+ "rewards/format_reward": 0.9309896044433117,
516
+ "step": 39
517
+ },
518
+ {
519
+ "completion_length": 171.10677528381348,
520
+ "epoch": 0.6823027718550106,
521
+ "grad_norm": 0.3867912490480322,
522
+ "kl": 0.15789794921875,
523
+ "learning_rate": 5.352768279562315e-06,
524
+ "loss": 0.0063,
525
+ "reward": 1.1210937947034836,
526
+ "reward_std": 0.28729582112282515,
527
+ "rewards/accuracy_reward": 0.1640625053551048,
528
+ "rewards/format_reward": 0.9570312686264515,
529
+ "step": 40
530
+ },
531
+ {
532
+ "completion_length": 159.16016101837158,
533
+ "epoch": 0.6993603411513859,
534
+ "grad_norm": 0.3463972010603223,
535
+ "kl": 0.162689208984375,
536
+ "learning_rate": 4.826621858223431e-06,
537
+ "loss": 0.0065,
538
+ "reward": 1.1770833656191826,
539
+ "reward_std": 0.2748530600219965,
540
+ "rewards/accuracy_reward": 0.22135417209938169,
541
+ "rewards/format_reward": 0.9557291865348816,
542
+ "step": 41
543
+ },
544
+ {
545
+ "completion_length": 155.342453956604,
546
+ "epoch": 0.7164179104477612,
547
+ "grad_norm": 0.31066796112570005,
548
+ "kl": 0.16387939453125,
549
+ "learning_rate": 4.319352532688444e-06,
550
+ "loss": 0.0066,
551
+ "reward": 1.1692708805203438,
552
+ "reward_std": 0.24965191585943103,
553
+ "rewards/accuracy_reward": 0.19010417256504297,
554
+ "rewards/format_reward": 0.9791666828095913,
555
+ "step": 42
556
+ },
557
+ {
558
+ "completion_length": 147.61849355697632,
559
+ "epoch": 0.7334754797441365,
560
+ "grad_norm": 0.3883785701764709,
561
+ "kl": 0.15966796875,
562
+ "learning_rate": 3.832811273714569e-06,
563
+ "loss": 0.0064,
564
+ "reward": 1.194010466337204,
565
+ "reward_std": 0.26059679966419935,
566
+ "rewards/accuracy_reward": 0.20442709000781178,
567
+ "rewards/format_reward": 0.9895833432674408,
568
+ "step": 43
569
+ },
570
+ {
571
+ "completion_length": 160.03385829925537,
572
+ "epoch": 0.7505330490405118,
573
+ "grad_norm": 5788.508271348362,
574
+ "kl": 278.14984130859375,
575
+ "learning_rate": 3.3687734175920505e-06,
576
+ "loss": 11.1208,
577
+ "reward": 1.1757812947034836,
578
+ "reward_std": 0.23907144693657756,
579
+ "rewards/accuracy_reward": 0.18880208989139646,
580
+ "rewards/format_reward": 0.986979179084301,
581
+ "step": 44
582
+ },
583
+ {
584
+ "completion_length": 149.94141149520874,
585
+ "epoch": 0.767590618336887,
586
+ "grad_norm": 0.36297405142580474,
587
+ "kl": 0.164794921875,
588
+ "learning_rate": 2.9289321881345257e-06,
589
+ "loss": 0.0066,
590
+ "reward": 1.199218787252903,
591
+ "reward_std": 0.2734901886433363,
592
+ "rewards/accuracy_reward": 0.21744792349636555,
593
+ "rewards/format_reward": 0.9817708469927311,
594
+ "step": 45
595
+ },
596
+ {
597
+ "completion_length": 157.68620204925537,
598
+ "epoch": 0.7846481876332623,
599
+ "grad_norm": 0.3108491951827798,
600
+ "kl": 0.16436767578125,
601
+ "learning_rate": 2.514892518288988e-06,
602
+ "loss": 0.0066,
603
+ "reward": 1.1744792014360428,
604
+ "reward_std": 0.21565480902791023,
605
+ "rewards/accuracy_reward": 0.19010417233221233,
606
+ "rewards/format_reward": 0.9843750149011612,
607
+ "step": 46
608
+ },
609
+ {
610
+ "completion_length": 168.7890682220459,
611
+ "epoch": 0.8017057569296375,
612
+ "grad_norm": 1.04029324716777,
613
+ "kl": 0.16632080078125,
614
+ "learning_rate": 2.1281651939094996e-06,
615
+ "loss": 0.0067,
616
+ "reward": 1.1197917014360428,
617
+ "reward_std": 0.20990665443241596,
618
+ "rewards/accuracy_reward": 0.13281250500585884,
619
+ "rewards/format_reward": 0.9869791753590107,
620
+ "step": 47
621
+ },
622
+ {
623
+ "completion_length": 167.15625381469727,
624
+ "epoch": 0.8187633262260128,
625
+ "grad_norm": 0.32966812871466905,
626
+ "kl": 0.15936279296875,
627
+ "learning_rate": 1.7701613410634367e-06,
628
+ "loss": 0.0064,
629
+ "reward": 1.2018229588866234,
630
+ "reward_std": 0.2573004774749279,
631
+ "rewards/accuracy_reward": 0.22005208767950535,
632
+ "rewards/format_reward": 0.9817708469927311,
633
+ "step": 48
634
+ },
635
+ {
636
+ "completion_length": 171.07682704925537,
637
+ "epoch": 0.835820895522388,
638
+ "grad_norm": 0.32208043175249607,
639
+ "kl": 0.151123046875,
640
+ "learning_rate": 1.4421872769855262e-06,
641
+ "loss": 0.006,
642
+ "reward": 1.203125037252903,
643
+ "reward_std": 0.26521802693605423,
644
+ "rewards/accuracy_reward": 0.22135417209938169,
645
+ "rewards/format_reward": 0.9817708469927311,
646
+ "step": 49
647
+ },
648
+ {
649
+ "completion_length": 166.2864637374878,
650
+ "epoch": 0.8528784648187633,
651
+ "grad_norm": 0.34577456114094934,
652
+ "kl": 0.16162109375,
653
+ "learning_rate": 1.1454397434679022e-06,
654
+ "loss": 0.0065,
655
+ "reward": 1.2174479588866234,
656
+ "reward_std": 0.2847161674872041,
657
+ "rewards/accuracy_reward": 0.23307292023673654,
658
+ "rewards/format_reward": 0.9843750111758709,
659
+ "step": 50
660
+ },
661
+ {
662
+ "completion_length": 176.26693153381348,
663
+ "epoch": 0.8699360341151386,
664
+ "grad_norm": 0.32710109344570104,
665
+ "kl": 0.151611328125,
666
+ "learning_rate": 8.810015400790994e-07,
667
+ "loss": 0.0061,
668
+ "reward": 1.1367187835276127,
669
+ "reward_std": 0.2029442568309605,
670
+ "rewards/accuracy_reward": 0.14713542093522847,
671
+ "rewards/format_reward": 0.9895833395421505,
672
+ "step": 51
673
+ },
674
+ {
675
+ "completion_length": 181.84505653381348,
676
+ "epoch": 0.8869936034115139,
677
+ "grad_norm": 0.3181109150342723,
678
+ "kl": 0.177215576171875,
679
+ "learning_rate": 6.498375731458529e-07,
680
+ "loss": 0.0071,
681
+ "reward": 1.1692708618938923,
682
+ "reward_std": 0.21676216088235378,
683
+ "rewards/accuracy_reward": 0.18229167233221233,
684
+ "rewards/format_reward": 0.9869791753590107,
685
+ "step": 52
686
+ },
687
+ {
688
+ "completion_length": 171.81901359558105,
689
+ "epoch": 0.9040511727078892,
690
+ "grad_norm": 0.3007348356231504,
691
+ "kl": 0.15460205078125,
692
+ "learning_rate": 4.5279133491454406e-07,
693
+ "loss": 0.0062,
694
+ "reward": 1.169270858168602,
695
+ "reward_std": 0.26831901678815484,
696
+ "rewards/accuracy_reward": 0.18880208872724324,
697
+ "rewards/format_reward": 0.9804687686264515,
698
+ "step": 53
699
+ },
700
+ {
701
+ "completion_length": 165.6862015724182,
702
+ "epoch": 0.9211087420042644,
703
+ "grad_norm": 0.3465232635146208,
704
+ "kl": 0.160491943359375,
705
+ "learning_rate": 2.905818257394799e-07,
706
+ "loss": 0.0064,
707
+ "reward": 1.2343750223517418,
708
+ "reward_std": 0.2902528368867934,
709
+ "rewards/accuracy_reward": 0.2513020889600739,
710
+ "rewards/format_reward": 0.983072929084301,
711
+ "step": 54
712
+ },
713
+ {
714
+ "completion_length": 185.47396278381348,
715
+ "epoch": 0.9381663113006397,
716
+ "grad_norm": 0.339986245997186,
717
+ "kl": 0.151763916015625,
718
+ "learning_rate": 1.6380093052856482e-07,
719
+ "loss": 0.0061,
720
+ "reward": 1.1757812909781933,
721
+ "reward_std": 0.2616687403060496,
722
+ "rewards/accuracy_reward": 0.1953125058207661,
723
+ "rewards/format_reward": 0.9804687611758709,
724
+ "step": 55
725
+ },
726
+ {
727
+ "completion_length": 168.7773494720459,
728
+ "epoch": 0.9552238805970149,
729
+ "grad_norm": 0.33393014556768225,
730
+ "kl": 0.152862548828125,
731
+ "learning_rate": 7.291125901946027e-08,
732
+ "loss": 0.0061,
733
+ "reward": 1.2343750447034836,
734
+ "reward_std": 0.2642471818253398,
735
+ "rewards/accuracy_reward": 0.24479167396202683,
736
+ "rewards/format_reward": 0.9895833432674408,
737
+ "step": 56
738
+ },
739
+ {
740
+ "completion_length": 175.51172637939453,
741
+ "epoch": 0.9722814498933902,
742
+ "grad_norm": 0.30356714080443054,
743
+ "kl": 0.1541748046875,
744
+ "learning_rate": 1.824445776682504e-08,
745
+ "loss": 0.0062,
746
+ "reward": 1.2239583656191826,
747
+ "reward_std": 0.2539575123228133,
748
+ "rewards/accuracy_reward": 0.24739584140479565,
749
+ "rewards/format_reward": 0.9765625149011612,
750
+ "step": 57
751
+ },
752
+ {
753
+ "completion_length": 187.67057609558105,
754
+ "epoch": 0.9893390191897654,
755
+ "grad_norm": 0.3138605458712969,
756
+ "kl": 0.152740478515625,
757
+ "learning_rate": 0.0,
758
+ "loss": 0.0061,
759
+ "reward": 1.208333358168602,
760
+ "reward_std": 0.24711204366758466,
761
+ "rewards/accuracy_reward": 0.22786458861082792,
762
+ "rewards/format_reward": 0.9804687649011612,
763
+ "step": 58
764
+ },
765
+ {
766
+ "epoch": 0.9893390191897654,
767
+ "step": 58,
768
+ "total_flos": 0.0,
769
+ "train_loss": 0.20938398721150356,
770
+ "train_runtime": 4954.7093,
771
+ "train_samples_per_second": 1.514,
772
+ "train_steps_per_second": 0.012
773
+ }
774
+ ],
775
+ "logging_steps": 1,
776
+ "max_steps": 58,
777
+ "num_input_tokens_seen": 0,
778
+ "num_train_epochs": 1,
779
+ "save_steps": 100,
780
+ "stateful_callbacks": {
781
+ "TrainerControl": {
782
+ "args": {
783
+ "should_epoch_stop": false,
784
+ "should_evaluate": false,
785
+ "should_log": false,
786
+ "should_save": true,
787
+ "should_training_stop": true
788
+ },
789
+ "attributes": {}
790
+ }
791
+ },
792
+ "total_flos": 0.0,
793
+ "train_batch_size": 16,
794
+ "trial_name": null,
795
+ "trial_params": null
796
+ }