WangXFng commited on
Commit
c9accb5
·
verified ·
1 Parent(s): b57816e

Model save

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: meta-llama/Llama-3.2-3B-Instruct
3
+ library_name: peft
4
+ license: llama3.2
5
+ tags:
6
+ - generated_from_trainer
7
+ model-index:
8
+ - name: Instruments-8bit-3B-4Epoch-RanOrd
9
+ results: []
10
+ ---
11
+
12
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
13
+ should probably proofread and complete it, then remove this comment. -->
14
+
15
+ # Instruments-8bit-3B-4Epoch-RanOrd
16
+
17
+ This model is a fine-tuned version of [meta-llama/Llama-3.2-3B-Instruct](https://huggingface.co/meta-llama/Llama-3.2-3B-Instruct) on an unknown dataset.
18
+
19
+ ## Model description
20
+
21
+ More information needed
22
+
23
+ ## Intended uses & limitations
24
+
25
+ More information needed
26
+
27
+ ## Training and evaluation data
28
+
29
+ More information needed
30
+
31
+ ## Training procedure
32
+
33
+ ### Training hyperparameters
34
+
35
+ The following hyperparameters were used during training:
36
+ - learning_rate: 0.0001
37
+ - train_batch_size: 4
38
+ - eval_batch_size: 8
39
+ - seed: 42
40
+ - gradient_accumulation_steps: 16
41
+ - total_train_batch_size: 64
42
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
43
+ - lr_scheduler_type: linear
44
+ - lr_scheduler_warmup_steps: 2
45
+ - num_epochs: 4
46
+
47
+ ### Training results
48
+
49
+
50
+
51
+ ### Framework versions
52
+
53
+ - PEFT 0.13.0
54
+ - Transformers 4.45.2
55
+ - Pytorch 2.4.0
56
+ - Tokenizers 0.20.0
adapter_config.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "meta-llama/Llama-3.2-3B-Instruct",
5
+ "bias": "none",
6
+ "fan_in_fan_out": false,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layer_replication": null,
10
+ "layers_pattern": null,
11
+ "layers_to_transform": null,
12
+ "loftq_config": {},
13
+ "lora_alpha": 32,
14
+ "lora_dropout": 0.05,
15
+ "megatron_config": null,
16
+ "megatron_core": "megatron.core",
17
+ "modules_to_save": null,
18
+ "peft_type": "LORA",
19
+ "r": 16,
20
+ "rank_pattern": {},
21
+ "revision": null,
22
+ "target_modules": [
23
+ "k_proj",
24
+ "gate_proj",
25
+ "o_proj",
26
+ "down_proj",
27
+ "v_proj",
28
+ "up_proj",
29
+ "q_proj"
30
+ ],
31
+ "task_type": "CAUSAL_LM",
32
+ "use_dora": false,
33
+ "use_rslora": false
34
+ }
adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9263832cf062e46372b5dade00486f2a669da7935a629c8ed4ec4efb6f11dbb2
3
+ size 1684597880
config.json ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "LlamaForCausalLM"
4
+ ],
5
+ "attention_bias": false,
6
+ "attention_dropout": 0.0,
7
+ "bos_token_id": 128000,
8
+ "eos_token_id": [
9
+ 128001,
10
+ 128008,
11
+ 128009
12
+ ],
13
+ "head_dim": 128,
14
+ "hidden_act": "silu",
15
+ "hidden_size": 3072,
16
+ "initializer_range": 0.02,
17
+ "intermediate_size": 8192,
18
+ "max_position_embeddings": 131072,
19
+ "mlp_bias": false,
20
+ "model_type": "llama",
21
+ "num_attention_heads": 24,
22
+ "num_hidden_layers": 28,
23
+ "num_key_value_heads": 8,
24
+ "pretraining_tp": 1,
25
+ "rms_norm_eps": 1e-05,
26
+ "rope_scaling": {
27
+ "factor": 32.0,
28
+ "high_freq_factor": 4.0,
29
+ "low_freq_factor": 1.0,
30
+ "original_max_position_embeddings": 8192,
31
+ "rope_type": "llama3"
32
+ },
33
+ "rope_theta": 500000.0,
34
+ "tie_word_embeddings": true,
35
+ "torch_dtype": "bfloat16",
36
+ "transformers_version": "4.45.2",
37
+ "use_cache": true,
38
+ "vocab_size": 129174
39
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<|begin_of_text|>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "<|eot_id|>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": "!"
17
+ }
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6cdb5a41c4ff365aaf322d31b46b6ce2c9f8493fa398bd5ad4fbe6179ebfa53e
3
+ size 17378691
tokenizer_config.json ADDED
The diff for this file is too large to render. See raw diff
 
trainer_state.json ADDED
@@ -0,0 +1,959 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 3.9996055735491556,
5
+ "eval_steps": 500,
6
+ "global_step": 32956,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.030340496218815658,
13
+ "grad_norm": 1.0293707847595215,
14
+ "learning_rate": 9.924743581962737e-05,
15
+ "loss": 1.2653,
16
+ "step": 250
17
+ },
18
+ {
19
+ "epoch": 0.060680992437631316,
20
+ "grad_norm": 0.8869585990905762,
21
+ "learning_rate": 9.848880257328398e-05,
22
+ "loss": 0.757,
23
+ "step": 500
24
+ },
25
+ {
26
+ "epoch": 0.09102148865644698,
27
+ "grad_norm": 0.9423841834068298,
28
+ "learning_rate": 9.773016932694059e-05,
29
+ "loss": 0.6578,
30
+ "step": 750
31
+ },
32
+ {
33
+ "epoch": 0.12136198487526263,
34
+ "grad_norm": 0.7434495091438293,
35
+ "learning_rate": 9.697153608059721e-05,
36
+ "loss": 0.6204,
37
+ "step": 1000
38
+ },
39
+ {
40
+ "epoch": 0.1517024810940783,
41
+ "grad_norm": 0.6856955885887146,
42
+ "learning_rate": 9.621290283425382e-05,
43
+ "loss": 0.6006,
44
+ "step": 1250
45
+ },
46
+ {
47
+ "epoch": 0.18204297731289396,
48
+ "grad_norm": 0.6636335849761963,
49
+ "learning_rate": 9.545426958791042e-05,
50
+ "loss": 0.5822,
51
+ "step": 1500
52
+ },
53
+ {
54
+ "epoch": 0.2123834735317096,
55
+ "grad_norm": 0.6647191643714905,
56
+ "learning_rate": 9.469563634156705e-05,
57
+ "loss": 0.5681,
58
+ "step": 1750
59
+ },
60
+ {
61
+ "epoch": 0.24272396975052526,
62
+ "grad_norm": 0.825007438659668,
63
+ "learning_rate": 9.393700309522365e-05,
64
+ "loss": 0.5531,
65
+ "step": 2000
66
+ },
67
+ {
68
+ "epoch": 0.2730644659693409,
69
+ "grad_norm": 0.651090681552887,
70
+ "learning_rate": 9.317836984888026e-05,
71
+ "loss": 0.5387,
72
+ "step": 2250
73
+ },
74
+ {
75
+ "epoch": 0.3034049621881566,
76
+ "grad_norm": 0.7366721034049988,
77
+ "learning_rate": 9.241973660253687e-05,
78
+ "loss": 0.517,
79
+ "step": 2500
80
+ },
81
+ {
82
+ "epoch": 0.33374545840697223,
83
+ "grad_norm": 0.8244208693504333,
84
+ "learning_rate": 9.166110335619349e-05,
85
+ "loss": 0.4931,
86
+ "step": 2750
87
+ },
88
+ {
89
+ "epoch": 0.3640859546257879,
90
+ "grad_norm": 0.7282492518424988,
91
+ "learning_rate": 9.09024701098501e-05,
92
+ "loss": 0.4741,
93
+ "step": 3000
94
+ },
95
+ {
96
+ "epoch": 0.39442645084460354,
97
+ "grad_norm": 0.877441942691803,
98
+ "learning_rate": 9.014383686350671e-05,
99
+ "loss": 0.4543,
100
+ "step": 3250
101
+ },
102
+ {
103
+ "epoch": 0.4247669470634192,
104
+ "grad_norm": 0.8125002980232239,
105
+ "learning_rate": 8.938520361716333e-05,
106
+ "loss": 0.4301,
107
+ "step": 3500
108
+ },
109
+ {
110
+ "epoch": 0.4551074432822349,
111
+ "grad_norm": 0.7860125303268433,
112
+ "learning_rate": 8.862657037081994e-05,
113
+ "loss": 0.4165,
114
+ "step": 3750
115
+ },
116
+ {
117
+ "epoch": 0.48544793950105053,
118
+ "grad_norm": 0.8634310364723206,
119
+ "learning_rate": 8.786793712447655e-05,
120
+ "loss": 0.3964,
121
+ "step": 4000
122
+ },
123
+ {
124
+ "epoch": 0.5157884357198662,
125
+ "grad_norm": 0.8578837513923645,
126
+ "learning_rate": 8.710930387813317e-05,
127
+ "loss": 0.3847,
128
+ "step": 4250
129
+ },
130
+ {
131
+ "epoch": 0.5461289319386818,
132
+ "grad_norm": 0.6492015719413757,
133
+ "learning_rate": 8.635067063178978e-05,
134
+ "loss": 0.3763,
135
+ "step": 4500
136
+ },
137
+ {
138
+ "epoch": 0.5764694281574975,
139
+ "grad_norm": 0.7463727593421936,
140
+ "learning_rate": 8.559203738544639e-05,
141
+ "loss": 0.36,
142
+ "step": 4750
143
+ },
144
+ {
145
+ "epoch": 0.6068099243763132,
146
+ "grad_norm": 0.7797712683677673,
147
+ "learning_rate": 8.483340413910301e-05,
148
+ "loss": 0.3597,
149
+ "step": 5000
150
+ },
151
+ {
152
+ "epoch": 0.6371504205951288,
153
+ "grad_norm": 0.8424202799797058,
154
+ "learning_rate": 8.407477089275962e-05,
155
+ "loss": 0.344,
156
+ "step": 5250
157
+ },
158
+ {
159
+ "epoch": 0.6674909168139445,
160
+ "grad_norm": 0.7570486664772034,
161
+ "learning_rate": 8.331613764641622e-05,
162
+ "loss": 0.3402,
163
+ "step": 5500
164
+ },
165
+ {
166
+ "epoch": 0.6978314130327602,
167
+ "grad_norm": 0.741788923740387,
168
+ "learning_rate": 8.255750440007283e-05,
169
+ "loss": 0.3378,
170
+ "step": 5750
171
+ },
172
+ {
173
+ "epoch": 0.7281719092515758,
174
+ "grad_norm": 0.7841416597366333,
175
+ "learning_rate": 8.179887115372944e-05,
176
+ "loss": 0.3305,
177
+ "step": 6000
178
+ },
179
+ {
180
+ "epoch": 0.7585124054703914,
181
+ "grad_norm": 0.7679227590560913,
182
+ "learning_rate": 8.104023790738605e-05,
183
+ "loss": 0.3263,
184
+ "step": 6250
185
+ },
186
+ {
187
+ "epoch": 0.7888529016892071,
188
+ "grad_norm": 0.7030394673347473,
189
+ "learning_rate": 8.028160466104267e-05,
190
+ "loss": 0.3232,
191
+ "step": 6500
192
+ },
193
+ {
194
+ "epoch": 0.8191933979080228,
195
+ "grad_norm": 0.7032948136329651,
196
+ "learning_rate": 7.952297141469928e-05,
197
+ "loss": 0.3142,
198
+ "step": 6750
199
+ },
200
+ {
201
+ "epoch": 0.8495338941268384,
202
+ "grad_norm": 0.7051456570625305,
203
+ "learning_rate": 7.876433816835589e-05,
204
+ "loss": 0.3101,
205
+ "step": 7000
206
+ },
207
+ {
208
+ "epoch": 0.8798743903456541,
209
+ "grad_norm": 0.680454432964325,
210
+ "learning_rate": 7.80057049220125e-05,
211
+ "loss": 0.3128,
212
+ "step": 7250
213
+ },
214
+ {
215
+ "epoch": 0.9102148865644698,
216
+ "grad_norm": 0.6378083229064941,
217
+ "learning_rate": 7.724707167566912e-05,
218
+ "loss": 0.3058,
219
+ "step": 7500
220
+ },
221
+ {
222
+ "epoch": 0.9405553827832854,
223
+ "grad_norm": 0.6744751930236816,
224
+ "learning_rate": 7.648843842932573e-05,
225
+ "loss": 0.3021,
226
+ "step": 7750
227
+ },
228
+ {
229
+ "epoch": 0.9708958790021011,
230
+ "grad_norm": 0.6540088057518005,
231
+ "learning_rate": 7.572980518298233e-05,
232
+ "loss": 0.3028,
233
+ "step": 8000
234
+ },
235
+ {
236
+ "epoch": 1.0012363752209168,
237
+ "grad_norm": 0.6479789614677429,
238
+ "learning_rate": 7.497117193663896e-05,
239
+ "loss": 0.2993,
240
+ "step": 8250
241
+ },
242
+ {
243
+ "epoch": 1.0315768714397324,
244
+ "grad_norm": 0.696811854839325,
245
+ "learning_rate": 7.421253869029556e-05,
246
+ "loss": 0.29,
247
+ "step": 8500
248
+ },
249
+ {
250
+ "epoch": 1.061917367658548,
251
+ "grad_norm": 0.6420058608055115,
252
+ "learning_rate": 7.345390544395217e-05,
253
+ "loss": 0.2862,
254
+ "step": 8750
255
+ },
256
+ {
257
+ "epoch": 1.0922578638773637,
258
+ "grad_norm": 0.7226221561431885,
259
+ "learning_rate": 7.26952721976088e-05,
260
+ "loss": 0.2876,
261
+ "step": 9000
262
+ },
263
+ {
264
+ "epoch": 1.1225983600961793,
265
+ "grad_norm": 0.5515549182891846,
266
+ "learning_rate": 7.19366389512654e-05,
267
+ "loss": 0.284,
268
+ "step": 9250
269
+ },
270
+ {
271
+ "epoch": 1.1529388563149952,
272
+ "grad_norm": 0.5973398685455322,
273
+ "learning_rate": 7.117800570492201e-05,
274
+ "loss": 0.2828,
275
+ "step": 9500
276
+ },
277
+ {
278
+ "epoch": 1.1832793525338108,
279
+ "grad_norm": 0.6326724886894226,
280
+ "learning_rate": 7.041937245857862e-05,
281
+ "loss": 0.281,
282
+ "step": 9750
283
+ },
284
+ {
285
+ "epoch": 1.2136198487526264,
286
+ "grad_norm": 0.5345270037651062,
287
+ "learning_rate": 6.966073921223524e-05,
288
+ "loss": 0.2817,
289
+ "step": 10000
290
+ },
291
+ {
292
+ "epoch": 1.243960344971442,
293
+ "grad_norm": 0.6941035389900208,
294
+ "learning_rate": 6.890210596589185e-05,
295
+ "loss": 0.2783,
296
+ "step": 10250
297
+ },
298
+ {
299
+ "epoch": 1.2743008411902577,
300
+ "grad_norm": 0.499006062746048,
301
+ "learning_rate": 6.814347271954846e-05,
302
+ "loss": 0.2765,
303
+ "step": 10500
304
+ },
305
+ {
306
+ "epoch": 1.3046413374090733,
307
+ "grad_norm": 0.5554171204566956,
308
+ "learning_rate": 6.738483947320508e-05,
309
+ "loss": 0.2769,
310
+ "step": 10750
311
+ },
312
+ {
313
+ "epoch": 1.334981833627889,
314
+ "grad_norm": 0.6185881495475769,
315
+ "learning_rate": 6.662620622686169e-05,
316
+ "loss": 0.2737,
317
+ "step": 11000
318
+ },
319
+ {
320
+ "epoch": 1.3653223298467045,
321
+ "grad_norm": 0.5652614235877991,
322
+ "learning_rate": 6.58675729805183e-05,
323
+ "loss": 0.2736,
324
+ "step": 11250
325
+ },
326
+ {
327
+ "epoch": 1.3956628260655202,
328
+ "grad_norm": 0.602508008480072,
329
+ "learning_rate": 6.510893973417492e-05,
330
+ "loss": 0.2726,
331
+ "step": 11500
332
+ },
333
+ {
334
+ "epoch": 1.426003322284336,
335
+ "grad_norm": 0.6180042028427124,
336
+ "learning_rate": 6.435030648783153e-05,
337
+ "loss": 0.2713,
338
+ "step": 11750
339
+ },
340
+ {
341
+ "epoch": 1.4563438185031516,
342
+ "grad_norm": 0.510152280330658,
343
+ "learning_rate": 6.359167324148813e-05,
344
+ "loss": 0.2697,
345
+ "step": 12000
346
+ },
347
+ {
348
+ "epoch": 1.4866843147219673,
349
+ "grad_norm": 0.5209817886352539,
350
+ "learning_rate": 6.283303999514476e-05,
351
+ "loss": 0.2724,
352
+ "step": 12250
353
+ },
354
+ {
355
+ "epoch": 1.517024810940783,
356
+ "grad_norm": 0.5717406868934631,
357
+ "learning_rate": 6.207440674880136e-05,
358
+ "loss": 0.2675,
359
+ "step": 12500
360
+ },
361
+ {
362
+ "epoch": 1.5473653071595987,
363
+ "grad_norm": 0.5593615770339966,
364
+ "learning_rate": 6.131577350245797e-05,
365
+ "loss": 0.2683,
366
+ "step": 12750
367
+ },
368
+ {
369
+ "epoch": 1.5777058033784144,
370
+ "grad_norm": 0.6112098693847656,
371
+ "learning_rate": 6.055714025611459e-05,
372
+ "loss": 0.2637,
373
+ "step": 13000
374
+ },
375
+ {
376
+ "epoch": 1.60804629959723,
377
+ "grad_norm": 0.6081228256225586,
378
+ "learning_rate": 5.97985070097712e-05,
379
+ "loss": 0.2662,
380
+ "step": 13250
381
+ },
382
+ {
383
+ "epoch": 1.6383867958160456,
384
+ "grad_norm": 0.6320655345916748,
385
+ "learning_rate": 5.903987376342781e-05,
386
+ "loss": 0.2622,
387
+ "step": 13500
388
+ },
389
+ {
390
+ "epoch": 1.6687272920348613,
391
+ "grad_norm": 0.5471298098564148,
392
+ "learning_rate": 5.8281240517084425e-05,
393
+ "loss": 0.2596,
394
+ "step": 13750
395
+ },
396
+ {
397
+ "epoch": 1.6990677882536769,
398
+ "grad_norm": 0.5194515585899353,
399
+ "learning_rate": 5.7522607270741034e-05,
400
+ "loss": 0.2627,
401
+ "step": 14000
402
+ },
403
+ {
404
+ "epoch": 1.7294082844724925,
405
+ "grad_norm": 0.64277184009552,
406
+ "learning_rate": 5.676397402439765e-05,
407
+ "loss": 0.26,
408
+ "step": 14250
409
+ },
410
+ {
411
+ "epoch": 1.7597487806913081,
412
+ "grad_norm": 0.5324087738990784,
413
+ "learning_rate": 5.6005340778054264e-05,
414
+ "loss": 0.2608,
415
+ "step": 14500
416
+ },
417
+ {
418
+ "epoch": 1.7900892769101238,
419
+ "grad_norm": 0.5574278235435486,
420
+ "learning_rate": 5.524670753171087e-05,
421
+ "loss": 0.2617,
422
+ "step": 14750
423
+ },
424
+ {
425
+ "epoch": 1.8204297731289394,
426
+ "grad_norm": 0.5711286664009094,
427
+ "learning_rate": 5.448807428536749e-05,
428
+ "loss": 0.2571,
429
+ "step": 15000
430
+ },
431
+ {
432
+ "epoch": 1.8507702693477552,
433
+ "grad_norm": 0.5730472207069397,
434
+ "learning_rate": 5.3729441039024095e-05,
435
+ "loss": 0.2563,
436
+ "step": 15250
437
+ },
438
+ {
439
+ "epoch": 1.8811107655665709,
440
+ "grad_norm": 0.6286032199859619,
441
+ "learning_rate": 5.297080779268071e-05,
442
+ "loss": 0.2546,
443
+ "step": 15500
444
+ },
445
+ {
446
+ "epoch": 1.9114512617853865,
447
+ "grad_norm": 0.5809808373451233,
448
+ "learning_rate": 5.2212174546337325e-05,
449
+ "loss": 0.2548,
450
+ "step": 15750
451
+ },
452
+ {
453
+ "epoch": 1.9417917580042021,
454
+ "grad_norm": 0.5805879831314087,
455
+ "learning_rate": 5.1453541299993933e-05,
456
+ "loss": 0.2553,
457
+ "step": 16000
458
+ },
459
+ {
460
+ "epoch": 1.972132254223018,
461
+ "grad_norm": 0.6372638940811157,
462
+ "learning_rate": 5.069490805365055e-05,
463
+ "loss": 0.2524,
464
+ "step": 16250
465
+ },
466
+ {
467
+ "epoch": 2.0024727504418336,
468
+ "grad_norm": 0.643139660358429,
469
+ "learning_rate": 4.993627480730716e-05,
470
+ "loss": 0.251,
471
+ "step": 16500
472
+ },
473
+ {
474
+ "epoch": 2.032813246660649,
475
+ "grad_norm": 0.5501179099082947,
476
+ "learning_rate": 4.917764156096377e-05,
477
+ "loss": 0.244,
478
+ "step": 16750
479
+ },
480
+ {
481
+ "epoch": 2.063153742879465,
482
+ "grad_norm": 0.6013950109481812,
483
+ "learning_rate": 4.841900831462039e-05,
484
+ "loss": 0.2448,
485
+ "step": 17000
486
+ },
487
+ {
488
+ "epoch": 2.0934942390982805,
489
+ "grad_norm": 0.4996771216392517,
490
+ "learning_rate": 4.7660375068276995e-05,
491
+ "loss": 0.2442,
492
+ "step": 17250
493
+ },
494
+ {
495
+ "epoch": 2.123834735317096,
496
+ "grad_norm": 0.6059885025024414,
497
+ "learning_rate": 4.690174182193361e-05,
498
+ "loss": 0.242,
499
+ "step": 17500
500
+ },
501
+ {
502
+ "epoch": 2.1541752315359117,
503
+ "grad_norm": 0.48200371861457825,
504
+ "learning_rate": 4.614310857559022e-05,
505
+ "loss": 0.2418,
506
+ "step": 17750
507
+ },
508
+ {
509
+ "epoch": 2.1845157277547274,
510
+ "grad_norm": 0.6055967211723328,
511
+ "learning_rate": 4.5384475329246827e-05,
512
+ "loss": 0.2428,
513
+ "step": 18000
514
+ },
515
+ {
516
+ "epoch": 2.214856223973543,
517
+ "grad_norm": 0.5236734747886658,
518
+ "learning_rate": 4.462584208290344e-05,
519
+ "loss": 0.2409,
520
+ "step": 18250
521
+ },
522
+ {
523
+ "epoch": 2.2451967201923586,
524
+ "grad_norm": 0.6422255635261536,
525
+ "learning_rate": 4.386720883656005e-05,
526
+ "loss": 0.2415,
527
+ "step": 18500
528
+ },
529
+ {
530
+ "epoch": 2.2755372164111742,
531
+ "grad_norm": 0.545559823513031,
532
+ "learning_rate": 4.3108575590216665e-05,
533
+ "loss": 0.242,
534
+ "step": 18750
535
+ },
536
+ {
537
+ "epoch": 2.3058777126299903,
538
+ "grad_norm": 0.547564685344696,
539
+ "learning_rate": 4.234994234387328e-05,
540
+ "loss": 0.2406,
541
+ "step": 19000
542
+ },
543
+ {
544
+ "epoch": 2.3362182088488055,
545
+ "grad_norm": 0.5706421732902527,
546
+ "learning_rate": 4.159130909752989e-05,
547
+ "loss": 0.2431,
548
+ "step": 19250
549
+ },
550
+ {
551
+ "epoch": 2.3665587050676216,
552
+ "grad_norm": 0.6321772933006287,
553
+ "learning_rate": 4.08326758511865e-05,
554
+ "loss": 0.2378,
555
+ "step": 19500
556
+ },
557
+ {
558
+ "epoch": 2.396899201286437,
559
+ "grad_norm": 0.6109116077423096,
560
+ "learning_rate": 4.007404260484312e-05,
561
+ "loss": 0.2374,
562
+ "step": 19750
563
+ },
564
+ {
565
+ "epoch": 2.427239697505253,
566
+ "grad_norm": 0.4645892083644867,
567
+ "learning_rate": 3.9315409358499727e-05,
568
+ "loss": 0.2365,
569
+ "step": 20000
570
+ },
571
+ {
572
+ "epoch": 2.4575801937240684,
573
+ "grad_norm": 0.5845937728881836,
574
+ "learning_rate": 3.855677611215634e-05,
575
+ "loss": 0.2396,
576
+ "step": 20250
577
+ },
578
+ {
579
+ "epoch": 2.487920689942884,
580
+ "grad_norm": 0.6609899401664734,
581
+ "learning_rate": 3.779814286581295e-05,
582
+ "loss": 0.2358,
583
+ "step": 20500
584
+ },
585
+ {
586
+ "epoch": 2.5182611861616997,
587
+ "grad_norm": 0.6136410236358643,
588
+ "learning_rate": 3.7039509619469565e-05,
589
+ "loss": 0.2367,
590
+ "step": 20750
591
+ },
592
+ {
593
+ "epoch": 2.5486016823805153,
594
+ "grad_norm": 0.6023163795471191,
595
+ "learning_rate": 3.628087637312618e-05,
596
+ "loss": 0.2342,
597
+ "step": 21000
598
+ },
599
+ {
600
+ "epoch": 2.578942178599331,
601
+ "grad_norm": 0.5570552349090576,
602
+ "learning_rate": 3.552224312678279e-05,
603
+ "loss": 0.2368,
604
+ "step": 21250
605
+ },
606
+ {
607
+ "epoch": 2.6092826748181466,
608
+ "grad_norm": 0.5860863327980042,
609
+ "learning_rate": 3.47636098804394e-05,
610
+ "loss": 0.2345,
611
+ "step": 21500
612
+ },
613
+ {
614
+ "epoch": 2.639623171036962,
615
+ "grad_norm": 0.6390525698661804,
616
+ "learning_rate": 3.400497663409601e-05,
617
+ "loss": 0.2365,
618
+ "step": 21750
619
+ },
620
+ {
621
+ "epoch": 2.669963667255778,
622
+ "grad_norm": 0.6538860201835632,
623
+ "learning_rate": 3.3246343387752626e-05,
624
+ "loss": 0.2335,
625
+ "step": 22000
626
+ },
627
+ {
628
+ "epoch": 2.700304163474594,
629
+ "grad_norm": 0.5609804391860962,
630
+ "learning_rate": 3.248771014140924e-05,
631
+ "loss": 0.2366,
632
+ "step": 22250
633
+ },
634
+ {
635
+ "epoch": 2.730644659693409,
636
+ "grad_norm": 0.5518357753753662,
637
+ "learning_rate": 3.172907689506585e-05,
638
+ "loss": 0.2335,
639
+ "step": 22500
640
+ },
641
+ {
642
+ "epoch": 2.760985155912225,
643
+ "grad_norm": 0.6421113014221191,
644
+ "learning_rate": 3.0970443648722465e-05,
645
+ "loss": 0.2348,
646
+ "step": 22750
647
+ },
648
+ {
649
+ "epoch": 2.7913256521310403,
650
+ "grad_norm": 0.6312738656997681,
651
+ "learning_rate": 3.0211810402379076e-05,
652
+ "loss": 0.2324,
653
+ "step": 23000
654
+ },
655
+ {
656
+ "epoch": 2.8216661483498564,
657
+ "grad_norm": 0.6342710256576538,
658
+ "learning_rate": 2.9453177156035688e-05,
659
+ "loss": 0.2291,
660
+ "step": 23250
661
+ },
662
+ {
663
+ "epoch": 2.852006644568672,
664
+ "grad_norm": 0.5744002461433411,
665
+ "learning_rate": 2.86945439096923e-05,
666
+ "loss": 0.2318,
667
+ "step": 23500
668
+ },
669
+ {
670
+ "epoch": 2.8823471407874877,
671
+ "grad_norm": 0.6222126483917236,
672
+ "learning_rate": 2.7935910663348915e-05,
673
+ "loss": 0.2319,
674
+ "step": 23750
675
+ },
676
+ {
677
+ "epoch": 2.9126876370063033,
678
+ "grad_norm": 0.5072076916694641,
679
+ "learning_rate": 2.7177277417005526e-05,
680
+ "loss": 0.2288,
681
+ "step": 24000
682
+ },
683
+ {
684
+ "epoch": 2.943028133225119,
685
+ "grad_norm": 0.6769903898239136,
686
+ "learning_rate": 2.6418644170662138e-05,
687
+ "loss": 0.2296,
688
+ "step": 24250
689
+ },
690
+ {
691
+ "epoch": 2.9733686294439345,
692
+ "grad_norm": 0.6178023815155029,
693
+ "learning_rate": 2.566001092431875e-05,
694
+ "loss": 0.228,
695
+ "step": 24500
696
+ },
697
+ {
698
+ "epoch": 3.00370912566275,
699
+ "grad_norm": 0.623375654220581,
700
+ "learning_rate": 2.490137767797536e-05,
701
+ "loss": 0.2271,
702
+ "step": 24750
703
+ },
704
+ {
705
+ "epoch": 3.034049621881566,
706
+ "grad_norm": 0.5049629211425781,
707
+ "learning_rate": 2.4142744431631973e-05,
708
+ "loss": 0.2177,
709
+ "step": 25000
710
+ },
711
+ {
712
+ "epoch": 3.0643901181003814,
713
+ "grad_norm": 0.5621640682220459,
714
+ "learning_rate": 2.3384111185288585e-05,
715
+ "loss": 0.2203,
716
+ "step": 25250
717
+ },
718
+ {
719
+ "epoch": 3.094730614319197,
720
+ "grad_norm": 0.6677132844924927,
721
+ "learning_rate": 2.2625477938945196e-05,
722
+ "loss": 0.2188,
723
+ "step": 25500
724
+ },
725
+ {
726
+ "epoch": 3.1250711105380127,
727
+ "grad_norm": 0.6030067801475525,
728
+ "learning_rate": 2.1866844692601808e-05,
729
+ "loss": 0.2197,
730
+ "step": 25750
731
+ },
732
+ {
733
+ "epoch": 3.1554116067568283,
734
+ "grad_norm": 0.6289698481559753,
735
+ "learning_rate": 2.1108211446258423e-05,
736
+ "loss": 0.2206,
737
+ "step": 26000
738
+ },
739
+ {
740
+ "epoch": 3.1857521029756444,
741
+ "grad_norm": 0.650068461894989,
742
+ "learning_rate": 2.0349578199915035e-05,
743
+ "loss": 0.2193,
744
+ "step": 26250
745
+ },
746
+ {
747
+ "epoch": 3.21609259919446,
748
+ "grad_norm": 0.6510699987411499,
749
+ "learning_rate": 1.9590944953571646e-05,
750
+ "loss": 0.218,
751
+ "step": 26500
752
+ },
753
+ {
754
+ "epoch": 3.2464330954132756,
755
+ "grad_norm": 0.6897627115249634,
756
+ "learning_rate": 1.8832311707228258e-05,
757
+ "loss": 0.2172,
758
+ "step": 26750
759
+ },
760
+ {
761
+ "epoch": 3.2767735916320913,
762
+ "grad_norm": 0.6440379023551941,
763
+ "learning_rate": 1.8073678460884873e-05,
764
+ "loss": 0.2157,
765
+ "step": 27000
766
+ },
767
+ {
768
+ "epoch": 3.307114087850907,
769
+ "grad_norm": 0.6011075973510742,
770
+ "learning_rate": 1.7315045214541485e-05,
771
+ "loss": 0.2159,
772
+ "step": 27250
773
+ },
774
+ {
775
+ "epoch": 3.3374545840697225,
776
+ "grad_norm": 0.6770527362823486,
777
+ "learning_rate": 1.6556411968198096e-05,
778
+ "loss": 0.2178,
779
+ "step": 27500
780
+ },
781
+ {
782
+ "epoch": 3.367795080288538,
783
+ "grad_norm": 0.5674268007278442,
784
+ "learning_rate": 1.5797778721854708e-05,
785
+ "loss": 0.2155,
786
+ "step": 27750
787
+ },
788
+ {
789
+ "epoch": 3.3981355765073538,
790
+ "grad_norm": 0.6782290935516357,
791
+ "learning_rate": 1.5039145475511321e-05,
792
+ "loss": 0.2165,
793
+ "step": 28000
794
+ },
795
+ {
796
+ "epoch": 3.4284760727261694,
797
+ "grad_norm": 0.6378525495529175,
798
+ "learning_rate": 1.4280512229167931e-05,
799
+ "loss": 0.2165,
800
+ "step": 28250
801
+ },
802
+ {
803
+ "epoch": 3.458816568944985,
804
+ "grad_norm": 0.6417750716209412,
805
+ "learning_rate": 1.3521878982824543e-05,
806
+ "loss": 0.2154,
807
+ "step": 28500
808
+ },
809
+ {
810
+ "epoch": 3.4891570651638006,
811
+ "grad_norm": 0.733974039554596,
812
+ "learning_rate": 1.2763245736481154e-05,
813
+ "loss": 0.2145,
814
+ "step": 28750
815
+ },
816
+ {
817
+ "epoch": 3.5194975613826163,
818
+ "grad_norm": 0.6935612559318542,
819
+ "learning_rate": 1.200461249013777e-05,
820
+ "loss": 0.2126,
821
+ "step": 29000
822
+ },
823
+ {
824
+ "epoch": 3.5498380576014323,
825
+ "grad_norm": 0.6449461579322815,
826
+ "learning_rate": 1.124597924379438e-05,
827
+ "loss": 0.2133,
828
+ "step": 29250
829
+ },
830
+ {
831
+ "epoch": 3.5801785538202475,
832
+ "grad_norm": 0.630962610244751,
833
+ "learning_rate": 1.0487345997450993e-05,
834
+ "loss": 0.2137,
835
+ "step": 29500
836
+ },
837
+ {
838
+ "epoch": 3.6105190500390636,
839
+ "grad_norm": 0.6320120692253113,
840
+ "learning_rate": 9.728712751107604e-06,
841
+ "loss": 0.2151,
842
+ "step": 29750
843
+ },
844
+ {
845
+ "epoch": 3.6408595462578788,
846
+ "grad_norm": 0.6066524386405945,
847
+ "learning_rate": 8.970079504764218e-06,
848
+ "loss": 0.2132,
849
+ "step": 30000
850
+ },
851
+ {
852
+ "epoch": 3.671200042476695,
853
+ "grad_norm": 0.707771897315979,
854
+ "learning_rate": 8.21144625842083e-06,
855
+ "loss": 0.2122,
856
+ "step": 30250
857
+ },
858
+ {
859
+ "epoch": 3.7015405386955105,
860
+ "grad_norm": 0.6526840925216675,
861
+ "learning_rate": 7.452813012077442e-06,
862
+ "loss": 0.2107,
863
+ "step": 30500
864
+ },
865
+ {
866
+ "epoch": 3.731881034914326,
867
+ "grad_norm": 0.7959334254264832,
868
+ "learning_rate": 6.694179765734054e-06,
869
+ "loss": 0.2119,
870
+ "step": 30750
871
+ },
872
+ {
873
+ "epoch": 3.7622215311331417,
874
+ "grad_norm": 0.6414825320243835,
875
+ "learning_rate": 5.935546519390666e-06,
876
+ "loss": 0.2092,
877
+ "step": 31000
878
+ },
879
+ {
880
+ "epoch": 3.7925620273519574,
881
+ "grad_norm": 0.6206453442573547,
882
+ "learning_rate": 5.1769132730472785e-06,
883
+ "loss": 0.2105,
884
+ "step": 31250
885
+ },
886
+ {
887
+ "epoch": 3.822902523570773,
888
+ "grad_norm": 0.6464530825614929,
889
+ "learning_rate": 4.418280026703891e-06,
890
+ "loss": 0.2109,
891
+ "step": 31500
892
+ },
893
+ {
894
+ "epoch": 3.8532430197895886,
895
+ "grad_norm": 0.6245775818824768,
896
+ "learning_rate": 3.6596467803605027e-06,
897
+ "loss": 0.2108,
898
+ "step": 31750
899
+ },
900
+ {
901
+ "epoch": 3.8835835160084042,
902
+ "grad_norm": 0.6799646615982056,
903
+ "learning_rate": 2.9010135340171147e-06,
904
+ "loss": 0.2116,
905
+ "step": 32000
906
+ },
907
+ {
908
+ "epoch": 3.91392401222722,
909
+ "grad_norm": 0.6648467183113098,
910
+ "learning_rate": 2.1423802876737272e-06,
911
+ "loss": 0.2108,
912
+ "step": 32250
913
+ },
914
+ {
915
+ "epoch": 3.9442645084460355,
916
+ "grad_norm": 0.6555809378623962,
917
+ "learning_rate": 1.3837470413303393e-06,
918
+ "loss": 0.2117,
919
+ "step": 32500
920
+ },
921
+ {
922
+ "epoch": 3.974605004664851,
923
+ "grad_norm": 0.5603693127632141,
924
+ "learning_rate": 6.251137949869516e-07,
925
+ "loss": 0.209,
926
+ "step": 32750
927
+ },
928
+ {
929
+ "epoch": 3.9996055735491556,
930
+ "step": 32956,
931
+ "total_flos": 5.234804195348718e+18,
932
+ "train_loss": 0.2934305334803057,
933
+ "train_runtime": 64350.2603,
934
+ "train_samples_per_second": 32.78,
935
+ "train_steps_per_second": 0.512
936
+ }
937
+ ],
938
+ "logging_steps": 250,
939
+ "max_steps": 32956,
940
+ "num_input_tokens_seen": 0,
941
+ "num_train_epochs": 4,
942
+ "save_steps": 500,
943
+ "stateful_callbacks": {
944
+ "TrainerControl": {
945
+ "args": {
946
+ "should_epoch_stop": false,
947
+ "should_evaluate": false,
948
+ "should_log": false,
949
+ "should_save": false,
950
+ "should_training_stop": false
951
+ },
952
+ "attributes": {}
953
+ }
954
+ },
955
+ "total_flos": 5.234804195348718e+18,
956
+ "train_batch_size": 4,
957
+ "trial_name": null,
958
+ "trial_params": null
959
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:35744e6ddcf7ee8368192d537215378713b7338af0242efa99be366135789906
3
+ size 5240