yh-yao commited on
Commit
451e16d
·
verified ·
1 Parent(s): 1e2fa74

Model save

Browse files
README.md CHANGED
@@ -1,11 +1,9 @@
1
  ---
2
  base_model: Qwen/Qwen2.5-1.5B-Instruct
3
- datasets: HuggingFaceH4/Bespoke-Stratos-17k
4
  library_name: transformers
5
  model_name: Qwen2.5-1.5B-Open-R1-Distill
6
  tags:
7
  - generated_from_trainer
8
- - open-r1
9
  - trl
10
  - sft
11
  licence: license
@@ -13,7 +11,7 @@ licence: license
13
 
14
  # Model Card for Qwen2.5-1.5B-Open-R1-Distill
15
 
16
- This model is a fine-tuned version of [Qwen/Qwen2.5-1.5B-Instruct](https://huggingface.co/Qwen/Qwen2.5-1.5B-Instruct) on the [HuggingFaceH4/Bespoke-Stratos-17k](https://huggingface.co/datasets/HuggingFaceH4/Bespoke-Stratos-17k) dataset.
17
  It has been trained using [TRL](https://github.com/huggingface/trl).
18
 
19
  ## Quick start
@@ -29,14 +27,14 @@ print(output["generated_text"])
29
 
30
  ## Training procedure
31
 
32
- [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="150" height="24"/>](https://wandb.ai/yh-yao/huggingface/runs/y2h2hjrz)
33
 
34
 
35
  This model was trained with SFT.
36
 
37
  ### Framework versions
38
 
39
- - TRL: 0.14.0
40
  - Transformers: 4.49.0.dev0
41
  - Pytorch: 2.5.1
42
  - Datasets: 3.2.0
 
1
  ---
2
  base_model: Qwen/Qwen2.5-1.5B-Instruct
 
3
  library_name: transformers
4
  model_name: Qwen2.5-1.5B-Open-R1-Distill
5
  tags:
6
  - generated_from_trainer
 
7
  - trl
8
  - sft
9
  licence: license
 
11
 
12
  # Model Card for Qwen2.5-1.5B-Open-R1-Distill
13
 
14
+ This model is a fine-tuned version of [Qwen/Qwen2.5-1.5B-Instruct](https://huggingface.co/Qwen/Qwen2.5-1.5B-Instruct).
15
  It has been trained using [TRL](https://github.com/huggingface/trl).
16
 
17
  ## Quick start
 
27
 
28
  ## Training procedure
29
 
30
+ [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="150" height="24"/>](https://wandb.ai/yh-yao/huggingface/runs/rweu490i)
31
 
32
 
33
  This model was trained with SFT.
34
 
35
  ### Framework versions
36
 
37
+ - TRL: 0.15.0.dev0
38
  - Transformers: 4.49.0.dev0
39
  - Pytorch: 2.5.1
40
  - Datasets: 3.2.0
all_results.json CHANGED
@@ -5,10 +5,10 @@
5
  "eval_samples": 100,
6
  "eval_samples_per_second": 162.832,
7
  "eval_steps_per_second": 5.089,
8
- "total_flos": 5.2247958791389184e+17,
9
- "train_loss": 0.8026480646528436,
10
- "train_runtime": 497.857,
11
  "train_samples": 16610,
12
- "train_samples_per_second": 43.406,
13
- "train_steps_per_second": 0.339
14
  }
 
5
  "eval_samples": 100,
6
  "eval_samples_per_second": 162.832,
7
  "eval_steps_per_second": 5.089,
8
+ "total_flos": 76916824473600.0,
9
+ "train_loss": 0.8026409276138396,
10
+ "train_runtime": 500.1417,
11
  "train_samples": 16610,
12
+ "train_samples_per_second": 43.208,
13
+ "train_steps_per_second": 0.338
14
  }
config.json CHANGED
@@ -23,7 +23,7 @@
23
  "tie_word_embeddings": true,
24
  "torch_dtype": "bfloat16",
25
  "transformers_version": "4.49.0.dev0",
26
- "use_cache": true,
27
  "use_sliding_window": false,
28
  "vocab_size": 151936
29
  }
 
23
  "tie_word_embeddings": true,
24
  "torch_dtype": "bfloat16",
25
  "transformers_version": "4.49.0.dev0",
26
+ "use_cache": false,
27
  "use_sliding_window": false,
28
  "vocab_size": 151936
29
  }
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3f52fbee5cd23caea4cf13a6534a707c400d798cfb4549d037349616a7263899
3
  size 3087467144
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0909704212b2cefa2c538346dae8eef898976666d2321dc8341347dc0c3f3b8b
3
  size 3087467144
train_results.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "epoch": 1.0,
3
- "total_flos": 5.2247958791389184e+17,
4
- "train_loss": 0.8026480646528436,
5
- "train_runtime": 497.857,
6
  "train_samples": 16610,
7
- "train_samples_per_second": 43.406,
8
- "train_steps_per_second": 0.339
9
  }
 
1
  {
2
  "epoch": 1.0,
3
+ "total_flos": 76916824473600.0,
4
+ "train_loss": 0.8026409276138396,
5
+ "train_runtime": 500.1417,
6
  "train_samples": 16610,
7
+ "train_samples_per_second": 43.208,
8
+ "train_steps_per_second": 0.338
9
  }
trainer_state.json CHANGED
@@ -10,239 +10,239 @@
10
  "log_history": [
11
  {
12
  "epoch": 0.029585798816568046,
13
- "grad_norm": 2.3450317910704577,
14
  "learning_rate": 5.882352941176471e-06,
15
  "loss": 1.0993,
16
  "step": 5
17
  },
18
  {
19
  "epoch": 0.05917159763313609,
20
- "grad_norm": 1.633061904367079,
21
  "learning_rate": 1.1764705882352942e-05,
22
  "loss": 1.0402,
23
  "step": 10
24
  },
25
  {
26
  "epoch": 0.08875739644970414,
27
- "grad_norm": 0.8877456959606497,
28
  "learning_rate": 1.7647058823529414e-05,
29
  "loss": 0.9533,
30
  "step": 15
31
  },
32
  {
33
  "epoch": 0.11834319526627218,
34
- "grad_norm": 0.606798846508287,
35
  "learning_rate": 1.9980782984658682e-05,
36
  "loss": 0.8923,
37
  "step": 20
38
  },
39
  {
40
  "epoch": 0.14792899408284024,
41
- "grad_norm": 0.5388811885900797,
42
  "learning_rate": 1.9863613034027224e-05,
43
  "loss": 0.8552,
44
  "step": 25
45
  },
46
  {
47
  "epoch": 0.17751479289940827,
48
- "grad_norm": 0.4250915517708058,
49
  "learning_rate": 1.9641197940012136e-05,
50
  "loss": 0.8283,
51
  "step": 30
52
  },
53
  {
54
  "epoch": 0.20710059171597633,
55
- "grad_norm": 0.37726357010572764,
56
  "learning_rate": 1.9315910880512792e-05,
57
- "loss": 0.8229,
58
  "step": 35
59
  },
60
  {
61
  "epoch": 0.23668639053254437,
62
- "grad_norm": 0.4017368360482797,
63
  "learning_rate": 1.8891222681391853e-05,
64
  "loss": 0.8225,
65
  "step": 40
66
  },
67
  {
68
  "epoch": 0.26627218934911245,
69
- "grad_norm": 0.3431204044336116,
70
  "learning_rate": 1.8371664782625287e-05,
71
- "loss": 0.8073,
72
  "step": 45
73
  },
74
  {
75
  "epoch": 0.2958579881656805,
76
- "grad_norm": 0.33341329906969774,
77
  "learning_rate": 1.7762780887657576e-05,
78
  "loss": 0.7978,
79
  "step": 50
80
  },
81
  {
82
  "epoch": 0.3254437869822485,
83
- "grad_norm": 0.36841342218843565,
84
  "learning_rate": 1.7071067811865477e-05,
85
  "loss": 0.7877,
86
  "step": 55
87
  },
88
  {
89
  "epoch": 0.35502958579881655,
90
- "grad_norm": 0.376161654126924,
91
  "learning_rate": 1.6303906161279554e-05,
92
  "loss": 0.7981,
93
  "step": 60
94
  },
95
  {
96
  "epoch": 0.38461538461538464,
97
- "grad_norm": 0.3429901732874326,
98
  "learning_rate": 1.5469481581224274e-05,
99
  "loss": 0.7721,
100
  "step": 65
101
  },
102
  {
103
  "epoch": 0.41420118343195267,
104
- "grad_norm": 0.3449881548396526,
105
  "learning_rate": 1.4576697415156818e-05,
106
  "loss": 0.7741,
107
  "step": 70
108
  },
109
  {
110
  "epoch": 0.4437869822485207,
111
- "grad_norm": 0.3373615559482272,
112
  "learning_rate": 1.3635079705638298e-05,
113
  "loss": 0.7852,
114
  "step": 75
115
  },
116
  {
117
  "epoch": 0.47337278106508873,
118
- "grad_norm": 0.35169218399134256,
119
  "learning_rate": 1.2654675551080724e-05,
120
  "loss": 0.7666,
121
  "step": 80
122
  },
123
  {
124
  "epoch": 0.5029585798816568,
125
- "grad_norm": 0.3611334142860622,
126
  "learning_rate": 1.164594590280734e-05,
127
  "loss": 0.7706,
128
  "step": 85
129
  },
130
  {
131
  "epoch": 0.5325443786982249,
132
- "grad_norm": 0.36061127549275923,
133
  "learning_rate": 1.0619653946285948e-05,
134
  "loss": 0.77,
135
  "step": 90
136
  },
137
  {
138
  "epoch": 0.5621301775147929,
139
- "grad_norm": 0.3750103303716175,
140
  "learning_rate": 9.586750257511868e-06,
141
  "loss": 0.7771,
142
  "step": 95
143
  },
144
  {
145
  "epoch": 0.591715976331361,
146
- "grad_norm": 0.33357288560635495,
147
  "learning_rate": 8.558255959926533e-06,
148
  "loss": 0.753,
149
  "step": 100
150
  },
151
  {
152
  "epoch": 0.591715976331361,
153
- "eval_loss": 0.7880157232284546,
154
- "eval_runtime": 0.7824,
155
- "eval_samples_per_second": 163.595,
156
- "eval_steps_per_second": 5.112,
157
  "step": 100
158
  },
159
  {
160
  "epoch": 0.621301775147929,
161
- "grad_norm": 0.334580352799432,
162
  "learning_rate": 7.545145128592009e-06,
163
  "loss": 0.7569,
164
  "step": 105
165
  },
166
  {
167
  "epoch": 0.650887573964497,
168
- "grad_norm": 0.3457940719409492,
169
  "learning_rate": 6.558227696373617e-06,
170
- "loss": 0.7681,
171
  "step": 110
172
  },
173
  {
174
  "epoch": 0.6804733727810651,
175
- "grad_norm": 0.3080395598137145,
176
  "learning_rate": 5.608034111526298e-06,
177
  "loss": 0.7623,
178
  "step": 115
179
  },
180
  {
181
  "epoch": 0.7100591715976331,
182
- "grad_norm": 0.2972896798042334,
183
  "learning_rate": 4.704702977392914e-06,
184
- "loss": 0.7514,
185
  "step": 120
186
  },
187
  {
188
  "epoch": 0.7396449704142012,
189
- "grad_norm": 0.34366581285699394,
190
  "learning_rate": 3.857872873103322e-06,
191
  "loss": 0.7538,
192
  "step": 125
193
  },
194
  {
195
  "epoch": 0.7692307692307693,
196
- "grad_norm": 0.3114970039276949,
197
  "learning_rate": 3.0765795095517026e-06,
198
  "loss": 0.7555,
199
  "step": 130
200
  },
201
  {
202
  "epoch": 0.7988165680473372,
203
- "grad_norm": 0.32338821484371644,
204
  "learning_rate": 2.369159318001937e-06,
205
  "loss": 0.7584,
206
  "step": 135
207
  },
208
  {
209
  "epoch": 0.8284023668639053,
210
- "grad_norm": 0.2993687193760852,
211
  "learning_rate": 1.743160500034443e-06,
212
  "loss": 0.7498,
213
  "step": 140
214
  },
215
  {
216
  "epoch": 0.8579881656804734,
217
- "grad_norm": 0.31118464680620006,
218
  "learning_rate": 1.2052624879351105e-06,
219
  "loss": 0.7566,
220
  "step": 145
221
  },
222
  {
223
  "epoch": 0.8875739644970414,
224
- "grad_norm": 0.29520967964613526,
225
  "learning_rate": 7.612046748871327e-07,
226
  "loss": 0.7665,
227
  "step": 150
228
  },
229
  {
230
  "epoch": 0.9171597633136095,
231
- "grad_norm": 0.2965665861654801,
232
  "learning_rate": 4.1572517541747294e-07,
233
  "loss": 0.7613,
234
  "step": 155
235
  },
236
  {
237
  "epoch": 0.9467455621301775,
238
- "grad_norm": 0.285789430294503,
239
  "learning_rate": 1.7251026952640583e-07,
240
- "loss": 0.7607,
241
  "step": 160
242
  },
243
  {
244
  "epoch": 0.9763313609467456,
245
- "grad_norm": 0.27681573266768217,
246
  "learning_rate": 3.4155069933301535e-08,
247
  "loss": 0.7445,
248
  "step": 165
@@ -250,11 +250,11 @@
250
  {
251
  "epoch": 1.0,
252
  "step": 169,
253
- "total_flos": 5.2247958791389184e+17,
254
- "train_loss": 0.8026480646528436,
255
- "train_runtime": 497.857,
256
- "train_samples_per_second": 43.406,
257
- "train_steps_per_second": 0.339
258
  }
259
  ],
260
  "logging_steps": 5,
@@ -274,7 +274,7 @@
274
  "attributes": {}
275
  }
276
  },
277
- "total_flos": 5.2247958791389184e+17,
278
  "train_batch_size": 4,
279
  "trial_name": null,
280
  "trial_params": null
 
10
  "log_history": [
11
  {
12
  "epoch": 0.029585798816568046,
13
+ "grad_norm": 2.344439472222262,
14
  "learning_rate": 5.882352941176471e-06,
15
  "loss": 1.0993,
16
  "step": 5
17
  },
18
  {
19
  "epoch": 0.05917159763313609,
20
+ "grad_norm": 1.63601885296728,
21
  "learning_rate": 1.1764705882352942e-05,
22
  "loss": 1.0402,
23
  "step": 10
24
  },
25
  {
26
  "epoch": 0.08875739644970414,
27
+ "grad_norm": 0.8879200743942525,
28
  "learning_rate": 1.7647058823529414e-05,
29
  "loss": 0.9533,
30
  "step": 15
31
  },
32
  {
33
  "epoch": 0.11834319526627218,
34
+ "grad_norm": 0.6062571598207454,
35
  "learning_rate": 1.9980782984658682e-05,
36
  "loss": 0.8923,
37
  "step": 20
38
  },
39
  {
40
  "epoch": 0.14792899408284024,
41
+ "grad_norm": 0.5398319056179057,
42
  "learning_rate": 1.9863613034027224e-05,
43
  "loss": 0.8552,
44
  "step": 25
45
  },
46
  {
47
  "epoch": 0.17751479289940827,
48
+ "grad_norm": 0.4257339924474833,
49
  "learning_rate": 1.9641197940012136e-05,
50
  "loss": 0.8283,
51
  "step": 30
52
  },
53
  {
54
  "epoch": 0.20710059171597633,
55
+ "grad_norm": 0.3758029788005641,
56
  "learning_rate": 1.9315910880512792e-05,
57
+ "loss": 0.823,
58
  "step": 35
59
  },
60
  {
61
  "epoch": 0.23668639053254437,
62
+ "grad_norm": 0.3990910798541474,
63
  "learning_rate": 1.8891222681391853e-05,
64
  "loss": 0.8225,
65
  "step": 40
66
  },
67
  {
68
  "epoch": 0.26627218934911245,
69
+ "grad_norm": 0.34475189059262795,
70
  "learning_rate": 1.8371664782625287e-05,
71
+ "loss": 0.8074,
72
  "step": 45
73
  },
74
  {
75
  "epoch": 0.2958579881656805,
76
+ "grad_norm": 0.333866593937156,
77
  "learning_rate": 1.7762780887657576e-05,
78
  "loss": 0.7978,
79
  "step": 50
80
  },
81
  {
82
  "epoch": 0.3254437869822485,
83
+ "grad_norm": 0.36564553792757043,
84
  "learning_rate": 1.7071067811865477e-05,
85
  "loss": 0.7877,
86
  "step": 55
87
  },
88
  {
89
  "epoch": 0.35502958579881655,
90
+ "grad_norm": 0.3991199908593827,
91
  "learning_rate": 1.6303906161279554e-05,
92
  "loss": 0.7981,
93
  "step": 60
94
  },
95
  {
96
  "epoch": 0.38461538461538464,
97
+ "grad_norm": 0.34298175038858175,
98
  "learning_rate": 1.5469481581224274e-05,
99
  "loss": 0.7721,
100
  "step": 65
101
  },
102
  {
103
  "epoch": 0.41420118343195267,
104
+ "grad_norm": 0.3456184403820059,
105
  "learning_rate": 1.4576697415156818e-05,
106
  "loss": 0.7741,
107
  "step": 70
108
  },
109
  {
110
  "epoch": 0.4437869822485207,
111
+ "grad_norm": 0.3381633650630839,
112
  "learning_rate": 1.3635079705638298e-05,
113
  "loss": 0.7852,
114
  "step": 75
115
  },
116
  {
117
  "epoch": 0.47337278106508873,
118
+ "grad_norm": 0.35047408988206236,
119
  "learning_rate": 1.2654675551080724e-05,
120
  "loss": 0.7666,
121
  "step": 80
122
  },
123
  {
124
  "epoch": 0.5029585798816568,
125
+ "grad_norm": 0.36085121845408574,
126
  "learning_rate": 1.164594590280734e-05,
127
  "loss": 0.7706,
128
  "step": 85
129
  },
130
  {
131
  "epoch": 0.5325443786982249,
132
+ "grad_norm": 0.35902845961657487,
133
  "learning_rate": 1.0619653946285948e-05,
134
  "loss": 0.77,
135
  "step": 90
136
  },
137
  {
138
  "epoch": 0.5621301775147929,
139
+ "grad_norm": 0.36863631725357704,
140
  "learning_rate": 9.586750257511868e-06,
141
  "loss": 0.7771,
142
  "step": 95
143
  },
144
  {
145
  "epoch": 0.591715976331361,
146
+ "grad_norm": 0.3368221799612284,
147
  "learning_rate": 8.558255959926533e-06,
148
  "loss": 0.753,
149
  "step": 100
150
  },
151
  {
152
  "epoch": 0.591715976331361,
153
+ "eval_loss": 0.7879893779754639,
154
+ "eval_runtime": 0.786,
155
+ "eval_samples_per_second": 162.857,
156
+ "eval_steps_per_second": 5.089,
157
  "step": 100
158
  },
159
  {
160
  "epoch": 0.621301775147929,
161
+ "grad_norm": 0.3337128215279273,
162
  "learning_rate": 7.545145128592009e-06,
163
  "loss": 0.7569,
164
  "step": 105
165
  },
166
  {
167
  "epoch": 0.650887573964497,
168
+ "grad_norm": 0.34455276378156463,
169
  "learning_rate": 6.558227696373617e-06,
170
+ "loss": 0.768,
171
  "step": 110
172
  },
173
  {
174
  "epoch": 0.6804733727810651,
175
+ "grad_norm": 0.30772420523279587,
176
  "learning_rate": 5.608034111526298e-06,
177
  "loss": 0.7623,
178
  "step": 115
179
  },
180
  {
181
  "epoch": 0.7100591715976331,
182
+ "grad_norm": 0.29781261348903965,
183
  "learning_rate": 4.704702977392914e-06,
184
+ "loss": 0.7513,
185
  "step": 120
186
  },
187
  {
188
  "epoch": 0.7396449704142012,
189
+ "grad_norm": 0.34294568370060885,
190
  "learning_rate": 3.857872873103322e-06,
191
  "loss": 0.7538,
192
  "step": 125
193
  },
194
  {
195
  "epoch": 0.7692307692307693,
196
+ "grad_norm": 0.3123527643773362,
197
  "learning_rate": 3.0765795095517026e-06,
198
  "loss": 0.7555,
199
  "step": 130
200
  },
201
  {
202
  "epoch": 0.7988165680473372,
203
+ "grad_norm": 0.3221614581767522,
204
  "learning_rate": 2.369159318001937e-06,
205
  "loss": 0.7584,
206
  "step": 135
207
  },
208
  {
209
  "epoch": 0.8284023668639053,
210
+ "grad_norm": 0.29845427650643824,
211
  "learning_rate": 1.743160500034443e-06,
212
  "loss": 0.7498,
213
  "step": 140
214
  },
215
  {
216
  "epoch": 0.8579881656804734,
217
+ "grad_norm": 0.3105750284295203,
218
  "learning_rate": 1.2052624879351105e-06,
219
  "loss": 0.7566,
220
  "step": 145
221
  },
222
  {
223
  "epoch": 0.8875739644970414,
224
+ "grad_norm": 0.2941366532979867,
225
  "learning_rate": 7.612046748871327e-07,
226
  "loss": 0.7665,
227
  "step": 150
228
  },
229
  {
230
  "epoch": 0.9171597633136095,
231
+ "grad_norm": 0.2970058759706501,
232
  "learning_rate": 4.1572517541747294e-07,
233
  "loss": 0.7613,
234
  "step": 155
235
  },
236
  {
237
  "epoch": 0.9467455621301775,
238
+ "grad_norm": 0.28596540886894695,
239
  "learning_rate": 1.7251026952640583e-07,
240
+ "loss": 0.7606,
241
  "step": 160
242
  },
243
  {
244
  "epoch": 0.9763313609467456,
245
+ "grad_norm": 0.2764995113045065,
246
  "learning_rate": 3.4155069933301535e-08,
247
  "loss": 0.7445,
248
  "step": 165
 
250
  {
251
  "epoch": 1.0,
252
  "step": 169,
253
+ "total_flos": 76916824473600.0,
254
+ "train_loss": 0.8026409276138396,
255
+ "train_runtime": 500.1417,
256
+ "train_samples_per_second": 43.208,
257
+ "train_steps_per_second": 0.338
258
  }
259
  ],
260
  "logging_steps": 5,
 
274
  "attributes": {}
275
  }
276
  },
277
+ "total_flos": 76916824473600.0,
278
  "train_batch_size": 4,
279
  "trial_name": null,
280
  "trial_params": null
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:bb626b66b49ea77e8a67fa44a8a0a656bac2735ba7f30e25e289e491b4481d1d
3
  size 7352
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6c75eb157f86ab6474612e5f9b9d34ac42ec3740174caf5a923e5a7ae4f8be53
3
  size 7352