Lansechen commited on
Commit
f66ba41
·
verified ·
1 Parent(s): 6d72610

Model save

Browse files
README.md ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: deepseek-ai/DeepSeek-V2-Lite-Chat
3
+ library_name: transformers
4
+ model_name: deepseek-v2-lite-16b-chat-R1-Distill-bs17k-batch32
5
+ tags:
6
+ - generated_from_trainer
7
+ - trl
8
+ - sft
9
+ licence: license
10
+ ---
11
+
12
+ # Model Card for deepseek-v2-lite-16b-chat-R1-Distill-bs17k-batch32
13
+
14
+ This model is a fine-tuned version of [deepseek-ai/DeepSeek-V2-Lite-Chat](https://huggingface.co/deepseek-ai/DeepSeek-V2-Lite-Chat).
15
+ It has been trained using [TRL](https://github.com/huggingface/trl).
16
+
17
+ ## Quick start
18
+
19
+ ```python
20
+ from transformers import pipeline
21
+
22
+ question = "If you had a time machine, but could only go to the past or the future once and never return, which would you choose and why?"
23
+ generator = pipeline("text-generation", model="Lansechen/deepseek-v2-lite-16b-chat-R1-Distill-bs17k-batch32", device="cuda")
24
+ output = generator([{"role": "user", "content": question}], max_new_tokens=128, return_full_text=False)[0]
25
+ print(output["generated_text"])
26
+ ```
27
+
28
+ ## Training procedure
29
+
30
+ [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="150" height="24"/>](https://wandb.ai/chenran1995-the-chinese-university-of-hong-kong/huggingface/runs/kwoa4uzo)
31
+
32
+
33
+ This model was trained with SFT.
34
+
35
+ ### Framework versions
36
+
37
+ - TRL: 0.16.0.dev0
38
+ - Transformers: 4.49.0.dev0
39
+ - Pytorch: 2.5.1+cu121
40
+ - Datasets: 3.3.1
41
+ - Tokenizers: 0.21.0
42
+
43
+ ## Citations
44
+
45
+
46
+
47
+ Cite TRL as:
48
+
49
+ ```bibtex
50
+ @misc{vonwerra2022trl,
51
+ title = {{TRL: Transformer Reinforcement Learning}},
52
+ author = {Leandro von Werra and Younes Belkada and Lewis Tunstall and Edward Beeching and Tristan Thrush and Nathan Lambert and Shengyi Huang and Kashif Rasul and Quentin Gallouédec},
53
+ year = 2020,
54
+ journal = {GitHub repository},
55
+ publisher = {GitHub},
56
+ howpublished = {\url{https://github.com/huggingface/trl}}
57
+ }
58
+ ```
all_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "total_flos": 72689419026432.0,
3
+ "train_loss": 0.8537281411617138,
4
+ "train_runtime": 90647.457,
5
+ "train_samples": 16610,
6
+ "train_samples_per_second": 1.032,
7
+ "train_steps_per_second": 0.008
8
+ }
config.json ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "deepseek-ai/DeepSeek-V2-Lite-Chat",
3
+ "architectures": [
4
+ "DeepseekV2ForCausalLM"
5
+ ],
6
+ "attention_bias": false,
7
+ "attention_dropout": 0.0,
8
+ "auto_map": {
9
+ "AutoConfig": "deepseek-ai/DeepSeek-V2-Lite-Chat--configuration_deepseek.DeepseekV2Config",
10
+ "AutoModel": "deepseek-ai/DeepSeek-V2-Lite-Chat--modeling_deepseek.DeepseekV2Model",
11
+ "AutoModelForCausalLM": "deepseek-ai/DeepSeek-V2-Lite-Chat--modeling_deepseek.DeepseekV2ForCausalLM"
12
+ },
13
+ "aux_loss_alpha": 0.001,
14
+ "bos_token_id": 100000,
15
+ "eos_token_id": 100001,
16
+ "ep_size": 1,
17
+ "first_k_dense_replace": 1,
18
+ "hidden_act": "silu",
19
+ "hidden_size": 2048,
20
+ "initializer_range": 0.02,
21
+ "intermediate_size": 10944,
22
+ "kv_lora_rank": 512,
23
+ "max_position_embeddings": 163840,
24
+ "model_type": "deepseek_v2",
25
+ "moe_intermediate_size": 1408,
26
+ "moe_layer_freq": 1,
27
+ "n_group": 1,
28
+ "n_routed_experts": 64,
29
+ "n_shared_experts": 2,
30
+ "norm_topk_prob": false,
31
+ "num_attention_heads": 16,
32
+ "num_experts_per_tok": 6,
33
+ "num_hidden_layers": 27,
34
+ "num_key_value_heads": 16,
35
+ "pretraining_tp": 1,
36
+ "q_lora_rank": null,
37
+ "qk_nope_head_dim": 128,
38
+ "qk_rope_head_dim": 64,
39
+ "rms_norm_eps": 1e-06,
40
+ "rope_scaling": {
41
+ "beta_fast": 32,
42
+ "beta_slow": 1,
43
+ "factor": 40,
44
+ "mscale": 0.707,
45
+ "mscale_all_dim": 0.707,
46
+ "original_max_position_embeddings": 4096,
47
+ "type": "yarn"
48
+ },
49
+ "rope_theta": 10000,
50
+ "routed_scaling_factor": 1.0,
51
+ "scoring_func": "softmax",
52
+ "seq_aux": true,
53
+ "tie_word_embeddings": false,
54
+ "topk_group": 1,
55
+ "topk_method": "greedy",
56
+ "torch_dtype": "bfloat16",
57
+ "transformers_version": "4.49.0.dev0",
58
+ "use_cache": false,
59
+ "v_head_dim": 128,
60
+ "vocab_size": 102400
61
+ }
generation_config.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 100000,
4
+ "do_sample": true,
5
+ "eos_token_id": 100001,
6
+ "temperature": 0.3,
7
+ "top_p": 0.95,
8
+ "transformers_version": "4.49.0.dev0"
9
+ }
model-00001-of-00007.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0cc97131433f4cc5634e9f7a8ffaca4a8f0c6721f841e7a1d513a3cbe48b1f9a
3
+ size 4994763632
model-00002-of-00007.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c2a3a5bcc27dcb6019ac84db2c87c65ecc46ba87e8c9a395fdd666e532a54b4a
3
+ size 4995044944
model-00003-of-00007.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6311af615e50fae51ec08bd8e9b61738372df446a16c948897280a7cb79bd88e
3
+ size 4996085000
model-00004-of-00007.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:70e37fc64359608a64994aab61b492d64af3cd5668bad0b272626d49dca6bfcf
3
+ size 4996085224
model-00005-of-00007.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5446c81955d560513eff8c3f0ed6e70faba77b5c0e464acbe63d4049d6cdc0df
3
+ size 4996085224
model-00006-of-00007.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7986d6683bba741c3fb6d93ea969911571d794b452a7a21e4b969a9f605cd5f2
3
+ size 4995045792
model-00007-of-00007.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:be1cbb5b0d11e7bdfd6a18e60876cda7762f632103faadee4881fd624ffc1e3b
3
+ size 1440515736
model.safetensors.index.json ADDED
The diff for this file is too large to render. See raw diff
 
special_tokens_map.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<|begin▁of▁sentence|>",
4
+ "lstrip": false,
5
+ "normalized": true,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "<|end▁of▁sentence|>",
11
+ "lstrip": false,
12
+ "normalized": true,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": "<|end▁of▁sentence|>"
17
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "add_prefix_space": null,
5
+ "added_tokens_decoder": {
6
+ "100000": {
7
+ "content": "<|begin▁of▁sentence|>",
8
+ "lstrip": false,
9
+ "normalized": true,
10
+ "rstrip": false,
11
+ "single_word": false,
12
+ "special": true
13
+ },
14
+ "100001": {
15
+ "content": "<|end▁of▁sentence|>",
16
+ "lstrip": false,
17
+ "normalized": true,
18
+ "rstrip": false,
19
+ "single_word": false,
20
+ "special": true
21
+ }
22
+ },
23
+ "bos_token": "<|begin▁of▁sentence|>",
24
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{{ bos_token }}{% for message in messages %}{% if message['role'] == 'user' %}{{ 'User: ' + message['content'] + '\n\n' }}{% elif message['role'] == 'assistant' %}{{ 'Assistant: ' + message['content'] + eos_token }}{% elif message['role'] == 'system' %}{{ message['content'] + '\n\n' }}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ 'Assistant:' }}{% endif %}",
25
+ "clean_up_tokenization_spaces": false,
26
+ "eos_token": "<|end▁of▁sentence|>",
27
+ "extra_special_tokens": {},
28
+ "legacy": true,
29
+ "model_max_length": 16384,
30
+ "pad_token": "<|end▁of▁sentence|>",
31
+ "sp_model_kwargs": {},
32
+ "tokenizer_class": "LlamaTokenizer",
33
+ "unk_token": null,
34
+ "use_default_system_prompt": false
35
+ }
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "total_flos": 72689419026432.0,
3
+ "train_loss": 0.8537281411617138,
4
+ "train_runtime": 90647.457,
5
+ "train_samples": 16610,
6
+ "train_samples_per_second": 1.032,
7
+ "train_steps_per_second": 0.008
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,1211 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 0.999829030603522,
5
+ "eval_steps": 100,
6
+ "global_step": 731,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.006838775859121217,
13
+ "grad_norm": 4.283134460449219,
14
+ "learning_rate": 1.3513513513513515e-06,
15
+ "loss": 1.3383,
16
+ "mean_token_accuracy": 0.6803730711340904,
17
+ "step": 5
18
+ },
19
+ {
20
+ "epoch": 0.013677551718242434,
21
+ "grad_norm": 2.8879575729370117,
22
+ "learning_rate": 2.702702702702703e-06,
23
+ "loss": 1.2973,
24
+ "mean_token_accuracy": 0.6889803171157837,
25
+ "step": 10
26
+ },
27
+ {
28
+ "epoch": 0.02051632757736365,
29
+ "grad_norm": 1.4510750770568848,
30
+ "learning_rate": 4.0540540540540545e-06,
31
+ "loss": 1.2647,
32
+ "mean_token_accuracy": 0.6867526054382325,
33
+ "step": 15
34
+ },
35
+ {
36
+ "epoch": 0.027355103436484868,
37
+ "grad_norm": 2.0351901054382324,
38
+ "learning_rate": 5.405405405405406e-06,
39
+ "loss": 1.1765,
40
+ "mean_token_accuracy": 0.7007667943835258,
41
+ "step": 20
42
+ },
43
+ {
44
+ "epoch": 0.034193879295606085,
45
+ "grad_norm": 1.0107135772705078,
46
+ "learning_rate": 6.7567567567567575e-06,
47
+ "loss": 1.1098,
48
+ "mean_token_accuracy": 0.7095919325947762,
49
+ "step": 25
50
+ },
51
+ {
52
+ "epoch": 0.0410326551547273,
53
+ "grad_norm": 0.7789012789726257,
54
+ "learning_rate": 8.108108108108109e-06,
55
+ "loss": 1.0612,
56
+ "mean_token_accuracy": 0.717575378715992,
57
+ "step": 30
58
+ },
59
+ {
60
+ "epoch": 0.04787143101384852,
61
+ "grad_norm": 0.6472299098968506,
62
+ "learning_rate": 9.45945945945946e-06,
63
+ "loss": 1.0272,
64
+ "mean_token_accuracy": 0.7225469693541526,
65
+ "step": 35
66
+ },
67
+ {
68
+ "epoch": 0.054710206872969736,
69
+ "grad_norm": 0.5901947021484375,
70
+ "learning_rate": 1.0810810810810812e-05,
71
+ "loss": 0.9874,
72
+ "mean_token_accuracy": 0.7313357755541802,
73
+ "step": 40
74
+ },
75
+ {
76
+ "epoch": 0.06154898273209095,
77
+ "grad_norm": 0.6146127581596375,
78
+ "learning_rate": 1.2162162162162164e-05,
79
+ "loss": 0.9738,
80
+ "mean_token_accuracy": 0.7330265626311302,
81
+ "step": 45
82
+ },
83
+ {
84
+ "epoch": 0.06838775859121217,
85
+ "grad_norm": 0.6039953827857971,
86
+ "learning_rate": 1.3513513513513515e-05,
87
+ "loss": 0.9604,
88
+ "mean_token_accuracy": 0.7354672074317932,
89
+ "step": 50
90
+ },
91
+ {
92
+ "epoch": 0.0752265344503334,
93
+ "grad_norm": 0.5497832894325256,
94
+ "learning_rate": 1.4864864864864865e-05,
95
+ "loss": 0.9501,
96
+ "mean_token_accuracy": 0.7374408826231956,
97
+ "step": 55
98
+ },
99
+ {
100
+ "epoch": 0.0820653103094546,
101
+ "grad_norm": 0.5113657712936401,
102
+ "learning_rate": 1.6216216216216218e-05,
103
+ "loss": 0.9237,
104
+ "mean_token_accuracy": 0.7434996381402016,
105
+ "step": 60
106
+ },
107
+ {
108
+ "epoch": 0.08890408616857583,
109
+ "grad_norm": 0.5408641695976257,
110
+ "learning_rate": 1.756756756756757e-05,
111
+ "loss": 0.9378,
112
+ "mean_token_accuracy": 0.7394065439701081,
113
+ "step": 65
114
+ },
115
+ {
116
+ "epoch": 0.09574286202769704,
117
+ "grad_norm": 0.5544885396957397,
118
+ "learning_rate": 1.891891891891892e-05,
119
+ "loss": 0.9347,
120
+ "mean_token_accuracy": 0.739613801240921,
121
+ "step": 70
122
+ },
123
+ {
124
+ "epoch": 0.10258163788681826,
125
+ "grad_norm": 0.5568746328353882,
126
+ "learning_rate": 1.9999885675796825e-05,
127
+ "loss": 0.9144,
128
+ "mean_token_accuracy": 0.7448009416460991,
129
+ "step": 75
130
+ },
131
+ {
132
+ "epoch": 0.10942041374593947,
133
+ "grad_norm": 0.5509134531021118,
134
+ "learning_rate": 1.9995884603149403e-05,
135
+ "loss": 0.9181,
136
+ "mean_token_accuracy": 0.7419906392693519,
137
+ "step": 80
138
+ },
139
+ {
140
+ "epoch": 0.1162591896050607,
141
+ "grad_norm": 0.548611044883728,
142
+ "learning_rate": 1.9986169934079135e-05,
143
+ "loss": 0.9106,
144
+ "mean_token_accuracy": 0.7441679835319519,
145
+ "step": 85
146
+ },
147
+ {
148
+ "epoch": 0.1230979654641819,
149
+ "grad_norm": 0.5514124631881714,
150
+ "learning_rate": 1.9970747221441084e-05,
151
+ "loss": 0.9151,
152
+ "mean_token_accuracy": 0.7427051544189454,
153
+ "step": 90
154
+ },
155
+ {
156
+ "epoch": 0.12993674132330313,
157
+ "grad_norm": 0.549062967300415,
158
+ "learning_rate": 1.994962528077878e-05,
159
+ "loss": 0.9029,
160
+ "mean_token_accuracy": 0.7451761096715928,
161
+ "step": 95
162
+ },
163
+ {
164
+ "epoch": 0.13677551718242434,
165
+ "grad_norm": 0.573813796043396,
166
+ "learning_rate": 1.9922816185285264e-05,
167
+ "loss": 0.8884,
168
+ "mean_token_accuracy": 0.7493397817015648,
169
+ "step": 100
170
+ },
171
+ {
172
+ "epoch": 0.14361429304154558,
173
+ "grad_norm": 0.5562133193016052,
174
+ "learning_rate": 1.9890335258902177e-05,
175
+ "loss": 0.8855,
176
+ "mean_token_accuracy": 0.7493105083703995,
177
+ "step": 105
178
+ },
179
+ {
180
+ "epoch": 0.1504530689006668,
181
+ "grad_norm": 0.5802770853042603,
182
+ "learning_rate": 1.9852201067560607e-05,
183
+ "loss": 0.9027,
184
+ "mean_token_accuracy": 0.7449040159583091,
185
+ "step": 110
186
+ },
187
+ {
188
+ "epoch": 0.157291844759788,
189
+ "grad_norm": 0.5636809468269348,
190
+ "learning_rate": 1.9808435408568938e-05,
191
+ "loss": 0.8876,
192
+ "mean_token_accuracy": 0.7495187863707542,
193
+ "step": 115
194
+ },
195
+ {
196
+ "epoch": 0.1641306206189092,
197
+ "grad_norm": 0.5830625891685486,
198
+ "learning_rate": 1.97590632981536e-05,
199
+ "loss": 0.8871,
200
+ "mean_token_accuracy": 0.7475770503282547,
201
+ "step": 120
202
+ },
203
+ {
204
+ "epoch": 0.17096939647803044,
205
+ "grad_norm": 0.5543267726898193,
206
+ "learning_rate": 1.970411295715994e-05,
207
+ "loss": 0.8918,
208
+ "mean_token_accuracy": 0.7472289979457856,
209
+ "step": 125
210
+ },
211
+ {
212
+ "epoch": 0.17780817233715165,
213
+ "grad_norm": 0.5292794704437256,
214
+ "learning_rate": 1.964361579492132e-05,
215
+ "loss": 0.8764,
216
+ "mean_token_accuracy": 0.7520371958613395,
217
+ "step": 130
218
+ },
219
+ {
220
+ "epoch": 0.18464694819627286,
221
+ "grad_norm": 0.5680862069129944,
222
+ "learning_rate": 1.9577606391305705e-05,
223
+ "loss": 0.8934,
224
+ "mean_token_accuracy": 0.7469688639044761,
225
+ "step": 135
226
+ },
227
+ {
228
+ "epoch": 0.19148572405539407,
229
+ "grad_norm": 0.5672624111175537,
230
+ "learning_rate": 1.950612247694998e-05,
231
+ "loss": 0.889,
232
+ "mean_token_accuracy": 0.7479311898350716,
233
+ "step": 140
234
+ },
235
+ {
236
+ "epoch": 0.1983244999145153,
237
+ "grad_norm": 0.5509658455848694,
238
+ "learning_rate": 1.9429204911693333e-05,
239
+ "loss": 0.8708,
240
+ "mean_token_accuracy": 0.7525908648967743,
241
+ "step": 145
242
+ },
243
+ {
244
+ "epoch": 0.20516327577363652,
245
+ "grad_norm": 0.573887825012207,
246
+ "learning_rate": 1.9346897661221957e-05,
247
+ "loss": 0.8748,
248
+ "mean_token_accuracy": 0.7516056269407272,
249
+ "step": 150
250
+ },
251
+ {
252
+ "epoch": 0.21200205163275773,
253
+ "grad_norm": 0.5755806565284729,
254
+ "learning_rate": 1.92592477719385e-05,
255
+ "loss": 0.88,
256
+ "mean_token_accuracy": 0.7498792737722397,
257
+ "step": 155
258
+ },
259
+ {
260
+ "epoch": 0.21884082749187894,
261
+ "grad_norm": 0.5494788885116577,
262
+ "learning_rate": 1.916630534407058e-05,
263
+ "loss": 0.8739,
264
+ "mean_token_accuracy": 0.7512936532497406,
265
+ "step": 160
266
+ },
267
+ {
268
+ "epoch": 0.22567960335100018,
269
+ "grad_norm": 0.543487548828125,
270
+ "learning_rate": 1.9068123503033752e-05,
271
+ "loss": 0.8587,
272
+ "mean_token_accuracy": 0.7555688112974167,
273
+ "step": 165
274
+ },
275
+ {
276
+ "epoch": 0.2325183792101214,
277
+ "grad_norm": 0.5982221364974976,
278
+ "learning_rate": 1.8964758369065303e-05,
279
+ "loss": 0.8923,
280
+ "mean_token_accuracy": 0.7462443545460701,
281
+ "step": 170
282
+ },
283
+ {
284
+ "epoch": 0.2393571550692426,
285
+ "grad_norm": 0.5857303142547607,
286
+ "learning_rate": 1.8856269025146182e-05,
287
+ "loss": 0.8738,
288
+ "mean_token_accuracy": 0.750712414085865,
289
+ "step": 175
290
+ },
291
+ {
292
+ "epoch": 0.2461959309283638,
293
+ "grad_norm": 0.5462734699249268,
294
+ "learning_rate": 1.874271748322951e-05,
295
+ "loss": 0.8523,
296
+ "mean_token_accuracy": 0.7573387727141381,
297
+ "step": 180
298
+ },
299
+ {
300
+ "epoch": 0.253034706787485,
301
+ "grad_norm": 0.5329501032829285,
302
+ "learning_rate": 1.8624168648794833e-05,
303
+ "loss": 0.8671,
304
+ "mean_token_accuracy": 0.7528049916028976,
305
+ "step": 185
306
+ },
307
+ {
308
+ "epoch": 0.25987348264660626,
309
+ "grad_norm": 0.516778290271759,
310
+ "learning_rate": 1.8500690283748502e-05,
311
+ "loss": 0.8363,
312
+ "mean_token_accuracy": 0.7607189759612083,
313
+ "step": 190
314
+ },
315
+ {
316
+ "epoch": 0.2667122585057275,
317
+ "grad_norm": 0.5503281354904175,
318
+ "learning_rate": 1.837235296769131e-05,
319
+ "loss": 0.8646,
320
+ "mean_token_accuracy": 0.7525520265102387,
321
+ "step": 195
322
+ },
323
+ {
324
+ "epoch": 0.2735510343648487,
325
+ "grad_norm": 0.5709648728370667,
326
+ "learning_rate": 1.8239230057575542e-05,
327
+ "loss": 0.8547,
328
+ "mean_token_accuracy": 0.7552181035280228,
329
+ "step": 200
330
+ },
331
+ {
332
+ "epoch": 0.2803898102239699,
333
+ "grad_norm": 0.5268268585205078,
334
+ "learning_rate": 1.810139764577454e-05,
335
+ "loss": 0.8569,
336
+ "mean_token_accuracy": 0.7549711287021637,
337
+ "step": 205
338
+ },
339
+ {
340
+ "epoch": 0.28722858608309115,
341
+ "grad_norm": 0.5442251563072205,
342
+ "learning_rate": 1.7958934516588665e-05,
343
+ "loss": 0.8596,
344
+ "mean_token_accuracy": 0.7541473567485809,
345
+ "step": 210
346
+ },
347
+ {
348
+ "epoch": 0.29406736194221234,
349
+ "grad_norm": 0.5293693542480469,
350
+ "learning_rate": 1.7811922101212622e-05,
351
+ "loss": 0.8692,
352
+ "mean_token_accuracy": 0.7512467160820961,
353
+ "step": 215
354
+ },
355
+ {
356
+ "epoch": 0.3009061378013336,
357
+ "grad_norm": 0.5479506850242615,
358
+ "learning_rate": 1.766044443118978e-05,
359
+ "loss": 0.8619,
360
+ "mean_token_accuracy": 0.7537136003375053,
361
+ "step": 220
362
+ },
363
+ {
364
+ "epoch": 0.30774491366045476,
365
+ "grad_norm": 0.5542939901351929,
366
+ "learning_rate": 1.75045880903802e-05,
367
+ "loss": 0.8419,
368
+ "mean_token_accuracy": 0.7578989654779434,
369
+ "step": 225
370
+ },
371
+ {
372
+ "epoch": 0.314583689519576,
373
+ "grad_norm": 0.5070068836212158,
374
+ "learning_rate": 1.7344442165469714e-05,
375
+ "loss": 0.8448,
376
+ "mean_token_accuracy": 0.7581931978464127,
377
+ "step": 230
378
+ },
379
+ {
380
+ "epoch": 0.32142246537869723,
381
+ "grad_norm": 0.5048102140426636,
382
+ "learning_rate": 1.7180098195048458e-05,
383
+ "loss": 0.8317,
384
+ "mean_token_accuracy": 0.7602518483996391,
385
+ "step": 235
386
+ },
387
+ {
388
+ "epoch": 0.3282612412378184,
389
+ "grad_norm": 0.5216901302337646,
390
+ "learning_rate": 1.7011650117287868e-05,
391
+ "loss": 0.8427,
392
+ "mean_token_accuracy": 0.7580358654260635,
393
+ "step": 240
394
+ },
395
+ {
396
+ "epoch": 0.33510001709693965,
397
+ "grad_norm": 0.5171140432357788,
398
+ "learning_rate": 1.683919421624611e-05,
399
+ "loss": 0.8286,
400
+ "mean_token_accuracy": 0.7620488360524178,
401
+ "step": 245
402
+ },
403
+ {
404
+ "epoch": 0.3419387929560609,
405
+ "grad_norm": 0.5429340600967407,
406
+ "learning_rate": 1.6662829066832595e-05,
407
+ "loss": 0.8452,
408
+ "mean_token_accuracy": 0.7569812595844269,
409
+ "step": 250
410
+ },
411
+ {
412
+ "epoch": 0.34877756881518207,
413
+ "grad_norm": 0.5294564962387085,
414
+ "learning_rate": 1.648265547846308e-05,
415
+ "loss": 0.8478,
416
+ "mean_token_accuracy": 0.756881557404995,
417
+ "step": 255
418
+ },
419
+ {
420
+ "epoch": 0.3556163446743033,
421
+ "grad_norm": 0.530605137348175,
422
+ "learning_rate": 1.6298776437437526e-05,
423
+ "loss": 0.8495,
424
+ "mean_token_accuracy": 0.7560626447200776,
425
+ "step": 260
426
+ },
427
+ {
428
+ "epoch": 0.3624551205334245,
429
+ "grad_norm": 0.5185920000076294,
430
+ "learning_rate": 1.611129704807362e-05,
431
+ "loss": 0.8438,
432
+ "mean_token_accuracy": 0.7576473146677017,
433
+ "step": 265
434
+ },
435
+ {
436
+ "epoch": 0.36929389639254573,
437
+ "grad_norm": 0.5438268184661865,
438
+ "learning_rate": 1.592032447262973e-05,
439
+ "loss": 0.8481,
440
+ "mean_token_accuracy": 0.7563759610056877,
441
+ "step": 270
442
+ },
443
+ {
444
+ "epoch": 0.37613267225166697,
445
+ "grad_norm": 0.5615527033805847,
446
+ "learning_rate": 1.572596787005149e-05,
447
+ "loss": 0.8418,
448
+ "mean_token_accuracy": 0.757819227874279,
449
+ "step": 275
450
+ },
451
+ {
452
+ "epoch": 0.38297144811078815,
453
+ "grad_norm": 0.48962655663490295,
454
+ "learning_rate": 1.55283383335771e-05,
455
+ "loss": 0.8406,
456
+ "mean_token_accuracy": 0.7577673494815826,
457
+ "step": 280
458
+ },
459
+ {
460
+ "epoch": 0.3898102239699094,
461
+ "grad_norm": 0.4873958230018616,
462
+ "learning_rate": 1.5327548827237008e-05,
463
+ "loss": 0.8389,
464
+ "mean_token_accuracy": 0.7587939977645874,
465
+ "step": 285
466
+ },
467
+ {
468
+ "epoch": 0.3966489998290306,
469
+ "grad_norm": 0.49630922079086304,
470
+ "learning_rate": 1.512371412128424e-05,
471
+ "loss": 0.8501,
472
+ "mean_token_accuracy": 0.7546382084488868,
473
+ "step": 290
474
+ },
475
+ {
476
+ "epoch": 0.4034877756881518,
477
+ "grad_norm": 0.4898976683616638,
478
+ "learning_rate": 1.4916950726592322e-05,
479
+ "loss": 0.8335,
480
+ "mean_token_accuracy": 0.7595216274261475,
481
+ "step": 295
482
+ },
483
+ {
484
+ "epoch": 0.41032655154727304,
485
+ "grad_norm": 0.486189603805542,
486
+ "learning_rate": 1.4707376828058264e-05,
487
+ "loss": 0.8334,
488
+ "mean_token_accuracy": 0.7587142795324325,
489
+ "step": 300
490
+ },
491
+ {
492
+ "epoch": 0.4171653274063943,
493
+ "grad_norm": 0.4933895170688629,
494
+ "learning_rate": 1.449511221704866e-05,
495
+ "loss": 0.8543,
496
+ "mean_token_accuracy": 0.7549725160002708,
497
+ "step": 305
498
+ },
499
+ {
500
+ "epoch": 0.42400410326551546,
501
+ "grad_norm": 0.4862573444843292,
502
+ "learning_rate": 1.428027822292758e-05,
503
+ "loss": 0.8328,
504
+ "mean_token_accuracy": 0.7595344498753548,
505
+ "step": 310
506
+ },
507
+ {
508
+ "epoch": 0.4308428791246367,
509
+ "grad_norm": 0.48415231704711914,
510
+ "learning_rate": 1.4062997643705308e-05,
511
+ "loss": 0.8282,
512
+ "mean_token_accuracy": 0.7610621720552444,
513
+ "step": 315
514
+ },
515
+ {
516
+ "epoch": 0.4376816549837579,
517
+ "grad_norm": 0.5168640613555908,
518
+ "learning_rate": 1.3843394675847635e-05,
519
+ "loss": 0.8472,
520
+ "mean_token_accuracy": 0.7552296608686447,
521
+ "step": 320
522
+ },
523
+ {
524
+ "epoch": 0.4445204308428791,
525
+ "grad_norm": 0.5019128918647766,
526
+ "learning_rate": 1.3621594843285801e-05,
527
+ "loss": 0.8287,
528
+ "mean_token_accuracy": 0.7620115980505944,
529
+ "step": 325
530
+ },
531
+ {
532
+ "epoch": 0.45135920670200036,
533
+ "grad_norm": 0.5697439312934875,
534
+ "learning_rate": 1.3397724925667657e-05,
535
+ "loss": 0.8361,
536
+ "mean_token_accuracy": 0.7582670867443084,
537
+ "step": 330
538
+ },
539
+ {
540
+ "epoch": 0.45819798256112154,
541
+ "grad_norm": 0.5009385943412781,
542
+ "learning_rate": 1.3171912885891063e-05,
543
+ "loss": 0.8402,
544
+ "mean_token_accuracy": 0.756958456337452,
545
+ "step": 335
546
+ },
547
+ {
548
+ "epoch": 0.4650367584202428,
549
+ "grad_norm": 0.5170038938522339,
550
+ "learning_rate": 1.2944287796960949e-05,
551
+ "loss": 0.8277,
552
+ "mean_token_accuracy": 0.7602688521146774,
553
+ "step": 340
554
+ },
555
+ {
556
+ "epoch": 0.471875534279364,
557
+ "grad_norm": 0.500216007232666,
558
+ "learning_rate": 1.2714979768211854e-05,
559
+ "loss": 0.8267,
560
+ "mean_token_accuracy": 0.760731266438961,
561
+ "step": 345
562
+ },
563
+ {
564
+ "epoch": 0.4787143101384852,
565
+ "grad_norm": 0.5052452683448792,
566
+ "learning_rate": 1.2484119870938102e-05,
567
+ "loss": 0.8289,
568
+ "mean_token_accuracy": 0.760765828192234,
569
+ "step": 350
570
+ },
571
+ {
572
+ "epoch": 0.48555308599760644,
573
+ "grad_norm": 0.47336429357528687,
574
+ "learning_rate": 1.2251840063474108e-05,
575
+ "loss": 0.8225,
576
+ "mean_token_accuracy": 0.7622879460453987,
577
+ "step": 355
578
+ },
579
+ {
580
+ "epoch": 0.4923918618567276,
581
+ "grad_norm": 0.4850134551525116,
582
+ "learning_rate": 1.2018273115767673e-05,
583
+ "loss": 0.8289,
584
+ "mean_token_accuracy": 0.7606528863310814,
585
+ "step": 360
586
+ },
587
+ {
588
+ "epoch": 0.49923063771584886,
589
+ "grad_norm": 0.49699971079826355,
590
+ "learning_rate": 1.1783552533489372e-05,
591
+ "loss": 0.8221,
592
+ "mean_token_accuracy": 0.762169836461544,
593
+ "step": 365
594
+ },
595
+ {
596
+ "epoch": 0.50606941357497,
597
+ "grad_norm": 0.5126599073410034,
598
+ "learning_rate": 1.1547812481721387e-05,
599
+ "loss": 0.8363,
600
+ "mean_token_accuracy": 0.757779236137867,
601
+ "step": 370
602
+ },
603
+ {
604
+ "epoch": 0.5129081894340913,
605
+ "grad_norm": 0.5152341723442078,
606
+ "learning_rate": 1.1311187708269442e-05,
607
+ "loss": 0.8414,
608
+ "mean_token_accuracy": 0.756208673119545,
609
+ "step": 375
610
+ },
611
+ {
612
+ "epoch": 0.5197469652932125,
613
+ "grad_norm": 0.47980859875679016,
614
+ "learning_rate": 1.1073813466641633e-05,
615
+ "loss": 0.8262,
616
+ "mean_token_accuracy": 0.7605276107788086,
617
+ "step": 380
618
+ },
619
+ {
620
+ "epoch": 0.5265857411523337,
621
+ "grad_norm": 0.5068893432617188,
622
+ "learning_rate": 1.0835825438738232e-05,
623
+ "loss": 0.8334,
624
+ "mean_token_accuracy": 0.7591528907418251,
625
+ "step": 385
626
+ },
627
+ {
628
+ "epoch": 0.533424517011455,
629
+ "grad_norm": 0.4852905571460724,
630
+ "learning_rate": 1.0597359657296602e-05,
631
+ "loss": 0.8242,
632
+ "mean_token_accuracy": 0.7600131437182427,
633
+ "step": 390
634
+ },
635
+ {
636
+ "epoch": 0.5402632928705762,
637
+ "grad_norm": 0.4789692461490631,
638
+ "learning_rate": 1.0358552428135576e-05,
639
+ "loss": 0.8224,
640
+ "mean_token_accuracy": 0.7617440149188042,
641
+ "step": 395
642
+ },
643
+ {
644
+ "epoch": 0.5471020687296974,
645
+ "grad_norm": 0.5026122331619263,
646
+ "learning_rate": 1.0119540252243755e-05,
647
+ "loss": 0.8374,
648
+ "mean_token_accuracy": 0.7577085196971893,
649
+ "step": 400
650
+ },
651
+ {
652
+ "epoch": 0.5539408445888186,
653
+ "grad_norm": 0.5124115347862244,
654
+ "learning_rate": 9.880459747756247e-06,
655
+ "loss": 0.8158,
656
+ "mean_token_accuracy": 0.7633502900600433,
657
+ "step": 405
658
+ },
659
+ {
660
+ "epoch": 0.5607796204479398,
661
+ "grad_norm": 0.46995291113853455,
662
+ "learning_rate": 9.641447571864429e-06,
663
+ "loss": 0.8309,
664
+ "mean_token_accuracy": 0.7596037566661835,
665
+ "step": 410
666
+ },
667
+ {
668
+ "epoch": 0.567618396307061,
669
+ "grad_norm": 0.4657708704471588,
670
+ "learning_rate": 9.402640342703401e-06,
671
+ "loss": 0.8258,
672
+ "mean_token_accuracy": 0.7604287952184677,
673
+ "step": 415
674
+ },
675
+ {
676
+ "epoch": 0.5744571721661823,
677
+ "grad_norm": 0.4966000020503998,
678
+ "learning_rate": 9.164174561261771e-06,
679
+ "loss": 0.8191,
680
+ "mean_token_accuracy": 0.7622967541217804,
681
+ "step": 420
682
+ },
683
+ {
684
+ "epoch": 0.5812959480253035,
685
+ "grad_norm": 0.5118598937988281,
686
+ "learning_rate": 8.92618653335837e-06,
687
+ "loss": 0.8145,
688
+ "mean_token_accuracy": 0.7636432871222496,
689
+ "step": 425
690
+ },
691
+ {
692
+ "epoch": 0.5881347238844247,
693
+ "grad_norm": 0.47218063473701477,
694
+ "learning_rate": 8.688812291730565e-06,
695
+ "loss": 0.8175,
696
+ "mean_token_accuracy": 0.7625760570168495,
697
+ "step": 430
698
+ },
699
+ {
700
+ "epoch": 0.5949734997435459,
701
+ "grad_norm": 0.4943382441997528,
702
+ "learning_rate": 8.452187518278615e-06,
703
+ "loss": 0.8282,
704
+ "mean_token_accuracy": 0.7598957225680352,
705
+ "step": 435
706
+ },
707
+ {
708
+ "epoch": 0.6018122756026671,
709
+ "grad_norm": 0.4632488489151001,
710
+ "learning_rate": 8.216447466510633e-06,
711
+ "loss": 0.8166,
712
+ "mean_token_accuracy": 0.7627444177865982,
713
+ "step": 440
714
+ },
715
+ {
716
+ "epoch": 0.6086510514617883,
717
+ "grad_norm": 0.46436184644699097,
718
+ "learning_rate": 7.981726884232328e-06,
719
+ "loss": 0.8141,
720
+ "mean_token_accuracy": 0.7631665468215942,
721
+ "step": 445
722
+ },
723
+ {
724
+ "epoch": 0.6154898273209095,
725
+ "grad_norm": 0.4806179702281952,
726
+ "learning_rate": 7.748159936525896e-06,
727
+ "loss": 0.8158,
728
+ "mean_token_accuracy": 0.762781310081482,
729
+ "step": 450
730
+ },
731
+ {
732
+ "epoch": 0.6223286031800308,
733
+ "grad_norm": 0.4713290333747864,
734
+ "learning_rate": 7.5158801290619e-06,
735
+ "loss": 0.8197,
736
+ "mean_token_accuracy": 0.7615951552987099,
737
+ "step": 455
738
+ },
739
+ {
740
+ "epoch": 0.629167379039152,
741
+ "grad_norm": 0.46885767579078674,
742
+ "learning_rate": 7.285020231788149e-06,
743
+ "loss": 0.8073,
744
+ "mean_token_accuracy": 0.7656721040606499,
745
+ "step": 460
746
+ },
747
+ {
748
+ "epoch": 0.6360061548982732,
749
+ "grad_norm": 0.495272159576416,
750
+ "learning_rate": 7.0557122030390545e-06,
751
+ "loss": 0.8063,
752
+ "mean_token_accuracy": 0.765167286992073,
753
+ "step": 465
754
+ },
755
+ {
756
+ "epoch": 0.6428449307573945,
757
+ "grad_norm": 0.47429198026657104,
758
+ "learning_rate": 6.8280871141089415e-06,
759
+ "loss": 0.8026,
760
+ "mean_token_accuracy": 0.7673456132411957,
761
+ "step": 470
762
+ },
763
+ {
764
+ "epoch": 0.6496837066165156,
765
+ "grad_norm": 0.48846620321273804,
766
+ "learning_rate": 6.602275074332345e-06,
767
+ "loss": 0.8201,
768
+ "mean_token_accuracy": 0.7618905574083328,
769
+ "step": 475
770
+ },
771
+ {
772
+ "epoch": 0.6565224824756368,
773
+ "grad_norm": 0.5203046798706055,
774
+ "learning_rate": 6.378405156714202e-06,
775
+ "loss": 0.8054,
776
+ "mean_token_accuracy": 0.7656926274299621,
777
+ "step": 480
778
+ },
779
+ {
780
+ "epoch": 0.6633612583347581,
781
+ "grad_norm": 0.481134295463562,
782
+ "learning_rate": 6.156605324152369e-06,
783
+ "loss": 0.7985,
784
+ "mean_token_accuracy": 0.7675068721175193,
785
+ "step": 485
786
+ },
787
+ {
788
+ "epoch": 0.6702000341938793,
789
+ "grad_norm": 0.465944766998291,
790
+ "learning_rate": 5.937002356294699e-06,
791
+ "loss": 0.8082,
792
+ "mean_token_accuracy": 0.7647843450307846,
793
+ "step": 490
794
+ },
795
+ {
796
+ "epoch": 0.6770388100530005,
797
+ "grad_norm": 0.4643535614013672,
798
+ "learning_rate": 5.719721777072425e-06,
799
+ "loss": 0.8081,
800
+ "mean_token_accuracy": 0.7649053990840912,
801
+ "step": 495
802
+ },
803
+ {
804
+ "epoch": 0.6838775859121218,
805
+ "grad_norm": 0.48063191771507263,
806
+ "learning_rate": 5.504887782951343e-06,
807
+ "loss": 0.8067,
808
+ "mean_token_accuracy": 0.765461964905262,
809
+ "step": 500
810
+ },
811
+ {
812
+ "epoch": 0.690716361771243,
813
+ "grad_norm": 0.47620853781700134,
814
+ "learning_rate": 5.29262317194174e-06,
815
+ "loss": 0.8017,
816
+ "mean_token_accuracy": 0.7668887257575989,
817
+ "step": 505
818
+ },
819
+ {
820
+ "epoch": 0.6975551376303641,
821
+ "grad_norm": 0.46232712268829346,
822
+ "learning_rate": 5.083049273407681e-06,
823
+ "loss": 0.8034,
824
+ "mean_token_accuracy": 0.7657584965229034,
825
+ "step": 510
826
+ },
827
+ {
828
+ "epoch": 0.7043939134894854,
829
+ "grad_norm": 0.45751717686653137,
830
+ "learning_rate": 4.876285878715764e-06,
831
+ "loss": 0.8035,
832
+ "mean_token_accuracy": 0.766502857208252,
833
+ "step": 515
834
+ },
835
+ {
836
+ "epoch": 0.7112326893486066,
837
+ "grad_norm": 0.4558106064796448,
838
+ "learning_rate": 4.672451172762998e-06,
839
+ "loss": 0.8103,
840
+ "mean_token_accuracy": 0.7645265579223632,
841
+ "step": 520
842
+ },
843
+ {
844
+ "epoch": 0.7180714652077278,
845
+ "grad_norm": 0.4815528988838196,
846
+ "learning_rate": 4.471661666422899e-06,
847
+ "loss": 0.814,
848
+ "mean_token_accuracy": 0.7626894161105156,
849
+ "step": 525
850
+ },
851
+ {
852
+ "epoch": 0.724910241066849,
853
+ "grad_norm": 0.4826851189136505,
854
+ "learning_rate": 4.274032129948512e-06,
855
+ "loss": 0.8078,
856
+ "mean_token_accuracy": 0.7646446511149406,
857
+ "step": 530
858
+ },
859
+ {
860
+ "epoch": 0.7317490169259703,
861
+ "grad_norm": 0.4646717309951782,
862
+ "learning_rate": 4.079675527370273e-06,
863
+ "loss": 0.8139,
864
+ "mean_token_accuracy": 0.7622727945446968,
865
+ "step": 535
866
+ },
867
+ {
868
+ "epoch": 0.7385877927850915,
869
+ "grad_norm": 0.48112139105796814,
870
+ "learning_rate": 3.888702951926384e-06,
871
+ "loss": 0.7982,
872
+ "mean_token_accuracy": 0.7667050585150719,
873
+ "step": 540
874
+ },
875
+ {
876
+ "epoch": 0.7454265686442126,
877
+ "grad_norm": 0.47457870841026306,
878
+ "learning_rate": 3.701223562562478e-06,
879
+ "loss": 0.8082,
880
+ "mean_token_accuracy": 0.7652096658945083,
881
+ "step": 545
882
+ },
883
+ {
884
+ "epoch": 0.7522653445033339,
885
+ "grad_norm": 0.48485058546066284,
886
+ "learning_rate": 3.5173445215369183e-06,
887
+ "loss": 0.8027,
888
+ "mean_token_accuracy": 0.7657500252127647,
889
+ "step": 550
890
+ },
891
+ {
892
+ "epoch": 0.7591041203624551,
893
+ "grad_norm": 0.49953824281692505,
894
+ "learning_rate": 3.3371709331674075e-06,
895
+ "loss": 0.8036,
896
+ "mean_token_accuracy": 0.7657319813966751,
897
+ "step": 555
898
+ },
899
+ {
900
+ "epoch": 0.7659428962215763,
901
+ "grad_norm": 0.45230066776275635,
902
+ "learning_rate": 3.1608057837538976e-06,
903
+ "loss": 0.8029,
904
+ "mean_token_accuracy": 0.7664909601211548,
905
+ "step": 560
906
+ },
907
+ {
908
+ "epoch": 0.7727816720806976,
909
+ "grad_norm": 0.4862216114997864,
910
+ "learning_rate": 2.988349882712135e-06,
911
+ "loss": 0.82,
912
+ "mean_token_accuracy": 0.7606521427631379,
913
+ "step": 565
914
+ },
915
+ {
916
+ "epoch": 0.7796204479398188,
917
+ "grad_norm": 0.4636688232421875,
918
+ "learning_rate": 2.819901804951547e-06,
919
+ "loss": 0.7885,
920
+ "mean_token_accuracy": 0.7696284845471382,
921
+ "step": 570
922
+ },
923
+ {
924
+ "epoch": 0.78645922379894,
925
+ "grad_norm": 0.46034541726112366,
926
+ "learning_rate": 2.655557834530288e-06,
927
+ "loss": 0.8148,
928
+ "mean_token_accuracy": 0.7626620322465897,
929
+ "step": 575
930
+ },
931
+ {
932
+ "epoch": 0.7932979996580612,
933
+ "grad_norm": 0.46980300545692444,
934
+ "learning_rate": 2.495411909619804e-06,
935
+ "loss": 0.8209,
936
+ "mean_token_accuracy": 0.760961389541626,
937
+ "step": 580
938
+ },
939
+ {
940
+ "epoch": 0.8001367755171824,
941
+ "grad_norm": 0.4673324227333069,
942
+ "learning_rate": 2.339555568810221e-06,
943
+ "loss": 0.8131,
944
+ "mean_token_accuracy": 0.7633547097444534,
945
+ "step": 585
946
+ },
947
+ {
948
+ "epoch": 0.8069755513763036,
949
+ "grad_norm": 0.46234130859375,
950
+ "learning_rate": 2.1880778987873806e-06,
951
+ "loss": 0.7929,
952
+ "mean_token_accuracy": 0.7690023958683014,
953
+ "step": 590
954
+ },
955
+ {
956
+ "epoch": 0.8138143272354249,
957
+ "grad_norm": 0.4616515338420868,
958
+ "learning_rate": 2.0410654834113362e-06,
959
+ "loss": 0.7883,
960
+ "mean_token_accuracy": 0.7698821023106575,
961
+ "step": 595
962
+ },
963
+ {
964
+ "epoch": 0.8206531030945461,
965
+ "grad_norm": 0.4555530548095703,
966
+ "learning_rate": 1.8986023542254617e-06,
967
+ "loss": 0.7933,
968
+ "mean_token_accuracy": 0.7684802070260048,
969
+ "step": 600
970
+ },
971
+ {
972
+ "epoch": 0.8274918789536673,
973
+ "grad_norm": 0.45201539993286133,
974
+ "learning_rate": 1.7607699424244583e-06,
975
+ "loss": 0.8116,
976
+ "mean_token_accuracy": 0.7643455028533935,
977
+ "step": 605
978
+ },
979
+ {
980
+ "epoch": 0.8343306548127886,
981
+ "grad_norm": 0.47511157393455505,
982
+ "learning_rate": 1.6276470323086936e-06,
983
+ "loss": 0.798,
984
+ "mean_token_accuracy": 0.7679879561066627,
985
+ "step": 610
986
+ },
987
+ {
988
+ "epoch": 0.8411694306719097,
989
+ "grad_norm": 0.443029522895813,
990
+ "learning_rate": 1.499309716251498e-06,
991
+ "loss": 0.8061,
992
+ "mean_token_accuracy": 0.7647668555378914,
993
+ "step": 615
994
+ },
995
+ {
996
+ "epoch": 0.8480082065310309,
997
+ "grad_norm": 0.4504885673522949,
998
+ "learning_rate": 1.3758313512051702e-06,
999
+ "loss": 0.7991,
1000
+ "mean_token_accuracy": 0.7665026307106018,
1001
+ "step": 620
1002
+ },
1003
+ {
1004
+ "epoch": 0.8548469823901521,
1005
+ "grad_norm": 0.4775781035423279,
1006
+ "learning_rate": 1.257282516770494e-06,
1007
+ "loss": 0.8099,
1008
+ "mean_token_accuracy": 0.7643145814538002,
1009
+ "step": 625
1010
+ },
1011
+ {
1012
+ "epoch": 0.8616857582492734,
1013
+ "grad_norm": 0.4560346305370331,
1014
+ "learning_rate": 1.1437309748538205e-06,
1015
+ "loss": 0.788,
1016
+ "mean_token_accuracy": 0.7699791714549065,
1017
+ "step": 630
1018
+ },
1019
+ {
1020
+ "epoch": 0.8685245341083946,
1021
+ "grad_norm": 0.46563640236854553,
1022
+ "learning_rate": 1.0352416309347003e-06,
1023
+ "loss": 0.7882,
1024
+ "mean_token_accuracy": 0.7693348750472069,
1025
+ "step": 635
1026
+ },
1027
+ {
1028
+ "epoch": 0.8753633099675158,
1029
+ "grad_norm": 0.4712006449699402,
1030
+ "learning_rate": 9.318764969662475e-07,
1031
+ "loss": 0.7948,
1032
+ "mean_token_accuracy": 0.7693790286779404,
1033
+ "step": 640
1034
+ },
1035
+ {
1036
+ "epoch": 0.8822020858266371,
1037
+ "grad_norm": 0.4499385952949524,
1038
+ "learning_rate": 8.336946559294223e-07,
1039
+ "loss": 0.7904,
1040
+ "mean_token_accuracy": 0.7691716372966766,
1041
+ "step": 645
1042
+ },
1043
+ {
1044
+ "epoch": 0.8890408616857582,
1045
+ "grad_norm": 0.46574312448501587,
1046
+ "learning_rate": 7.40752228061502e-07,
1047
+ "loss": 0.8119,
1048
+ "mean_token_accuracy": 0.7633272022008896,
1049
+ "step": 650
1050
+ },
1051
+ {
1052
+ "epoch": 0.8958796375448794,
1053
+ "grad_norm": 0.4548204243183136,
1054
+ "learning_rate": 6.531023387780433e-07,
1055
+ "loss": 0.7967,
1056
+ "mean_token_accuracy": 0.7682895168662072,
1057
+ "step": 655
1058
+ },
1059
+ {
1060
+ "epoch": 0.9027184134040007,
1061
+ "grad_norm": 0.475398987531662,
1062
+ "learning_rate": 5.707950883066681e-07,
1063
+ "loss": 0.8046,
1064
+ "mean_token_accuracy": 0.7655658751726151,
1065
+ "step": 660
1066
+ },
1067
+ {
1068
+ "epoch": 0.9095571892631219,
1069
+ "grad_norm": 0.4481120705604553,
1070
+ "learning_rate": 4.938775230500192e-07,
1071
+ "loss": 0.7998,
1072
+ "mean_token_accuracy": 0.7667894646525383,
1073
+ "step": 665
1074
+ },
1075
+ {
1076
+ "epoch": 0.9163959651222431,
1077
+ "grad_norm": 0.46256303787231445,
1078
+ "learning_rate": 4.223936086942981e-07,
1079
+ "loss": 0.7981,
1080
+ "mean_token_accuracy": 0.7671072691679001,
1081
+ "step": 670
1082
+ },
1083
+ {
1084
+ "epoch": 0.9232347409813644,
1085
+ "grad_norm": 0.45697101950645447,
1086
+ "learning_rate": 3.5638420507868145e-07,
1087
+ "loss": 0.7947,
1088
+ "mean_token_accuracy": 0.7680518299341201,
1089
+ "step": 675
1090
+ },
1091
+ {
1092
+ "epoch": 0.9300735168404856,
1093
+ "grad_norm": 0.44780802726745605,
1094
+ "learning_rate": 2.9588704284006176e-07,
1095
+ "loss": 0.7935,
1096
+ "mean_token_accuracy": 0.7683573782444,
1097
+ "step": 680
1098
+ },
1099
+ {
1100
+ "epoch": 0.9369122926996067,
1101
+ "grad_norm": 0.4538120925426483,
1102
+ "learning_rate": 2.4093670184640263e-07,
1103
+ "loss": 0.8087,
1104
+ "mean_token_accuracy": 0.7649404585361481,
1105
+ "step": 685
1106
+ },
1107
+ {
1108
+ "epoch": 0.943751068558728,
1109
+ "grad_norm": 0.44170472025871277,
1110
+ "learning_rate": 1.9156459143106598e-07,
1111
+ "loss": 0.8082,
1112
+ "mean_token_accuracy": 0.764902551472187,
1113
+ "step": 690
1114
+ },
1115
+ {
1116
+ "epoch": 0.9505898444178492,
1117
+ "grad_norm": 0.450748473405838,
1118
+ "learning_rate": 1.4779893243939358e-07,
1119
+ "loss": 0.7977,
1120
+ "mean_token_accuracy": 0.7670714050531388,
1121
+ "step": 695
1122
+ },
1123
+ {
1124
+ "epoch": 0.9574286202769704,
1125
+ "grad_norm": 0.4557766020298004,
1126
+ "learning_rate": 1.0966474109782354e-07,
1127
+ "loss": 0.794,
1128
+ "mean_token_accuracy": 0.7679675281047821,
1129
+ "step": 700
1130
+ },
1131
+ {
1132
+ "epoch": 0.9642673961360917,
1133
+ "grad_norm": 0.4443546533584595,
1134
+ "learning_rate": 7.718381471473524e-08,
1135
+ "loss": 0.7979,
1136
+ "mean_token_accuracy": 0.7666744440793991,
1137
+ "step": 705
1138
+ },
1139
+ {
1140
+ "epoch": 0.9711061719952129,
1141
+ "grad_norm": 0.4413938522338867,
1142
+ "learning_rate": 5.037471922122561e-08,
1143
+ "loss": 0.8083,
1144
+ "mean_token_accuracy": 0.7643239527940751,
1145
+ "step": 710
1146
+ },
1147
+ {
1148
+ "epoch": 0.9779449478543341,
1149
+ "grad_norm": 0.4516150653362274,
1150
+ "learning_rate": 2.925277855891695e-08,
1151
+ "loss": 0.8064,
1152
+ "mean_token_accuracy": 0.7641888409852982,
1153
+ "step": 715
1154
+ },
1155
+ {
1156
+ "epoch": 0.9847837237134552,
1157
+ "grad_norm": 0.4401575028896332,
1158
+ "learning_rate": 1.3830065920867886e-08,
1159
+ "loss": 0.8108,
1160
+ "mean_token_accuracy": 0.7633798211812973,
1161
+ "step": 720
1162
+ },
1163
+ {
1164
+ "epoch": 0.9916224995725765,
1165
+ "grad_norm": 0.4507163465023041,
1166
+ "learning_rate": 4.11539685059914e-09,
1167
+ "loss": 0.7984,
1168
+ "mean_token_accuracy": 0.7672414898872375,
1169
+ "step": 725
1170
+ },
1171
+ {
1172
+ "epoch": 0.9984612754316977,
1173
+ "grad_norm": 0.4516432285308838,
1174
+ "learning_rate": 1.1432420317758486e-10,
1175
+ "loss": 0.7992,
1176
+ "mean_token_accuracy": 0.7661866798996926,
1177
+ "step": 730
1178
+ },
1179
+ {
1180
+ "epoch": 0.999829030603522,
1181
+ "mean_token_accuracy": 0.7705116346478462,
1182
+ "step": 731,
1183
+ "total_flos": 72689419026432.0,
1184
+ "train_loss": 0.8537281411617138,
1185
+ "train_runtime": 90647.457,
1186
+ "train_samples_per_second": 1.032,
1187
+ "train_steps_per_second": 0.008
1188
+ }
1189
+ ],
1190
+ "logging_steps": 5,
1191
+ "max_steps": 731,
1192
+ "num_input_tokens_seen": 0,
1193
+ "num_train_epochs": 1,
1194
+ "save_steps": 500,
1195
+ "stateful_callbacks": {
1196
+ "TrainerControl": {
1197
+ "args": {
1198
+ "should_epoch_stop": false,
1199
+ "should_evaluate": false,
1200
+ "should_log": false,
1201
+ "should_save": false,
1202
+ "should_training_stop": false
1203
+ },
1204
+ "attributes": {}
1205
+ }
1206
+ },
1207
+ "total_flos": 72689419026432.0,
1208
+ "train_batch_size": 4,
1209
+ "trial_name": null,
1210
+ "trial_params": null
1211
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f650f8fb455e47eb0fa1edd9fbdc0163319907102454909e90c31141e3a45929
3
+ size 7544