michaelfeil commited on
Commit
2d29b78
·
verified ·
1 Parent(s): 6a154be

Add files using upload-large-folder tool

Browse files
config.json ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "Qwen2MoeForCausalLM"
4
+ ],
5
+ "attention_dropout": 0.0,
6
+ "bos_token_id": 151643,
7
+ "decoder_sparse_step": 1,
8
+ "eos_token_id": 151643,
9
+ "hidden_act": "silu",
10
+ "hidden_size": 3584,
11
+ "initializer_range": 0.02,
12
+ "intermediate_size": 18944,
13
+ "max_position_embeddings": 32768,
14
+ "max_window_layers": 28,
15
+ "model_type": "qwen2_moe",
16
+ "moe_intermediate_size": 2560,
17
+ "norm_topk_prob": false,
18
+ "num_attention_heads": 28,
19
+ "num_experts": 64,
20
+ "num_experts_per_tok": 8,
21
+ "num_hidden_layers": 28,
22
+ "num_key_value_heads": 4,
23
+ "output_router_logits": false,
24
+ "rms_norm_eps": 1e-06,
25
+ "rope_theta": 1000000.0,
26
+ "router_aux_loss_coef": 0.001,
27
+ "shared_expert_intermediate_size": 20480,
28
+ "sliding_window": 65536,
29
+ "tie_word_embeddings": false,
30
+ "torch_dtype": "bfloat16",
31
+ "transformers_version": "4.40.1",
32
+ "use_cache": true,
33
+ "use_sliding_window": false,
34
+ "vocab_size": 151936
35
+ }
generation_config.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 151643,
3
+ "pad_token_id": 151643,
4
+ "do_sample": true,
5
+ "eos_token_id": [
6
+ 151645,
7
+ 151643
8
+ ],
9
+ "repetition_penalty": 1.05,
10
+ "temperature": 0.7,
11
+ "top_p": 0.8,
12
+ "top_k": 20,
13
+ "transformers_version": "4.40.0.dev0"
14
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
model.safetensors.index.json ADDED
The diff for this file is too large to render. See raw diff
 
special_tokens_map.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "eos_token": {
3
+ "content": "<|endoftext|>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "pad_token": {
10
+ "content": "<|endoftext|>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ }
16
+ }
tllm-checkpoint/config.json ADDED
@@ -0,0 +1,194 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "producer": {
3
+ "name": "modelopt",
4
+ "version": "0.25.0"
5
+ },
6
+ "architecture": "Qwen2MoeForCausalLM",
7
+ "dtype": "bfloat16",
8
+ "logits_dtype": "float16",
9
+ "num_hidden_layers": 28,
10
+ "num_attention_heads": 28,
11
+ "num_key_value_heads": 4,
12
+ "hidden_size": 3584,
13
+ "norm_epsilon": 1e-06,
14
+ "vocab_size": 151936,
15
+ "max_position_embeddings": 32768,
16
+ "hidden_act": "swiglu",
17
+ "use_parallel_embedding": true,
18
+ "embedding_sharding_dim": 0,
19
+ "head_size": 128,
20
+ "intermediate_size": 18944,
21
+ "position_embedding_type": "rope_gpt_neox",
22
+ "share_embedding_table": false,
23
+ "residual_mlp": false,
24
+ "bias": false,
25
+ "rotary_pct": 1.0,
26
+ "rank": 1,
27
+ "decoder": "qwen",
28
+ "rmsnorm": true,
29
+ "lm_head_bias": false,
30
+ "mlp_bias": false,
31
+ "attn_bias": true,
32
+ "rotary_base": 1000000.0,
33
+ "rotary_scaling": null,
34
+ "disable_weight_only_quant_plugin": false,
35
+ "num_labels": 1,
36
+ "use_logn_attn": false,
37
+ "moe": {
38
+ "num_experts": 64,
39
+ "shared_expert_intermediate_size": 0,
40
+ "top_k": 8,
41
+ "normalization_mode": 0,
42
+ "sparse_mixer_epsilon": 0.01,
43
+ "tp_mode": 0,
44
+ "device_limited_n_group": 0,
45
+ "device_limited_topk_group": 0,
46
+ "device_limited_routed_scaling_factor": 1.0
47
+ },
48
+ "runtime_defaults": null,
49
+ "mapping": {
50
+ "world_size": 2,
51
+ "gpus_per_node": 8,
52
+ "cp_size": 1,
53
+ "tp_size": 2,
54
+ "pp_size": 1,
55
+ "moe_tp_size": 2,
56
+ "moe_ep_size": 1,
57
+ "auto_parallel": false
58
+ },
59
+ "quantization": {
60
+ "quant_algo": "FP8",
61
+ "kv_cache_quant_algo": null,
62
+ "group_size": 128,
63
+ "smoothquant_val": 0.5,
64
+ "clamp_val": null,
65
+ "use_meta_recipe": false,
66
+ "has_zero_point": false,
67
+ "pre_quant_scale": false,
68
+ "exclude_modules": [
69
+ "transformer.layers.13.mlp.router",
70
+ "transformer.layers.1.mlp.shared_expert_gate",
71
+ "transformer.layers.24.mlp.router",
72
+ "transformer.layers.6.mlp.router",
73
+ "transformer.layers.25.post_layernorm",
74
+ "transformer.layers.1.input_layernorm",
75
+ "transformer.layers.18.post_layernorm",
76
+ "transformer.layers.6.mlp.shared_expert_gate",
77
+ "transformer.layers.18.input_layernorm",
78
+ "transformer.layers.16.input_layernorm",
79
+ "transformer.layers.21.mlp.router",
80
+ "transformer.layers.2.mlp.router",
81
+ "transformer.layers.11.post_layernorm",
82
+ "transformer.layers.21.input_layernorm",
83
+ "transformer.layers.6.input_layernorm",
84
+ "transformer.layers.25.mlp.router",
85
+ "transformer.layers.26.post_layernorm",
86
+ "transformer.layers.23.mlp.shared_expert_gate",
87
+ "transformer.layers.26.mlp.shared_expert_gate",
88
+ "transformer.layers.23.mlp.router",
89
+ "transformer.layers.3.mlp.shared_expert_gate",
90
+ "transformer.vocab_embedding",
91
+ "transformer.layers.15.mlp.router",
92
+ "transformer.ln_f",
93
+ "transformer.layers.25.mlp.shared_expert_gate",
94
+ "transformer.layers.19.input_layernorm",
95
+ "transformer.layers.0.post_layernorm",
96
+ "transformer.layers.22.input_layernorm",
97
+ "transformer.layers.15.mlp.shared_expert_gate",
98
+ "transformer.layers.9.mlp.shared_expert_gate",
99
+ "transformer.layers.26.mlp.router",
100
+ "transformer.layers.27.mlp.router",
101
+ "transformer.layers.2.input_layernorm",
102
+ "transformer.layers.9.input_layernorm",
103
+ "transformer.layers.16.mlp.shared_expert_gate",
104
+ "transformer.layers.20.mlp.shared_expert_gate",
105
+ "transformer.layers.9.post_layernorm",
106
+ "transformer.layers.17.input_layernorm",
107
+ "transformer.layers.13.post_layernorm",
108
+ "transformer.layers.22.mlp.router",
109
+ "transformer.layers.2.mlp.shared_expert_gate",
110
+ "transformer.layers.19.mlp.router",
111
+ "lm_head",
112
+ "transformer.layers.20.post_layernorm",
113
+ "transformer.layers.7.post_layernorm",
114
+ "transformer.layers.7.mlp.router",
115
+ "transformer.layers.18.mlp.router",
116
+ "transformer.layers.17.mlp.router",
117
+ "transformer.layers.24.mlp.shared_expert_gate",
118
+ "transformer.layers.23.input_layernorm",
119
+ "transformer.layers.12.mlp.shared_expert_gate",
120
+ "transformer.layers.4.input_layernorm",
121
+ "transformer.layers.11.mlp.shared_expert_gate",
122
+ "transformer.layers.20.mlp.router",
123
+ "transformer.layers.23.post_layernorm",
124
+ "transformer.layers.10.mlp.shared_expert_gate",
125
+ "transformer.layers.13.input_layernorm",
126
+ "transformer.layers.17.mlp.shared_expert_gate",
127
+ "transformer.layers.14.mlp.shared_expert_gate",
128
+ "transformer.layers.4.mlp.shared_expert_gate",
129
+ "transformer.layers.14.input_layernorm",
130
+ "transformer.layers.5.input_layernorm",
131
+ "transformer.layers.6.post_layernorm",
132
+ "transformer.layers.12.input_layernorm",
133
+ "transformer.layers.15.post_layernorm",
134
+ "transformer.layers.17.post_layernorm",
135
+ "transformer.layers.4.post_layernorm",
136
+ "transformer.layers.1.post_layernorm",
137
+ "transformer.layers.18.mlp.shared_expert_gate",
138
+ "transformer.layers.0.input_layernorm",
139
+ "transformer.layers.10.mlp.router",
140
+ "transformer.layers.16.post_layernorm",
141
+ "transformer.layers.12.mlp.router",
142
+ "transformer.layers.5.post_layernorm",
143
+ "transformer.layers.8.post_layernorm",
144
+ "transformer.layers.16.mlp.router",
145
+ "transformer.layers.24.input_layernorm",
146
+ "transformer.layers.22.post_layernorm",
147
+ "transformer.layers.3.input_layernorm",
148
+ "transformer.layers.5.mlp.router",
149
+ "transformer.layers.14.post_layernorm",
150
+ "transformer.layers.8.mlp.shared_expert_gate",
151
+ "transformer.layers.19.post_layernorm",
152
+ "transformer.layers.12.post_layernorm",
153
+ "transformer.layers.3.post_layernorm",
154
+ "transformer.layers.24.post_layernorm",
155
+ "transformer.layers.4.mlp.router",
156
+ "transformer.layers.11.input_layernorm",
157
+ "transformer.layers.0.mlp.shared_expert_gate",
158
+ "transformer.layers.10.post_layernorm",
159
+ "transformer.layers.2.post_layernorm",
160
+ "transformer.layers.19.mlp.shared_expert_gate",
161
+ "transformer.layers.22.mlp.shared_expert_gate",
162
+ "transformer.layers.26.input_layernorm",
163
+ "transformer.layers.27.input_layernorm",
164
+ "transformer.layers.21.post_layernorm",
165
+ "transformer.layers.9.mlp.router",
166
+ "transformer.layers.5.mlp.shared_expert_gate",
167
+ "transformer.layers.27.mlp.shared_expert_gate",
168
+ "transformer.layers.7.input_layernorm",
169
+ "transformer.layers.3.mlp.router",
170
+ "transformer.layers.7.mlp.shared_expert_gate",
171
+ "transformer.layers.8.mlp.router",
172
+ "transformer.layers.8.input_layernorm",
173
+ "transformer.layers.21.mlp.shared_expert_gate",
174
+ "transformer.layers.27.post_layernorm",
175
+ "transformer.layers.20.input_layernorm",
176
+ "transformer.layers.0.mlp.router",
177
+ "transformer.layers.25.input_layernorm",
178
+ "transformer.layers.14.mlp.router",
179
+ "transformer.layers.1.mlp.router",
180
+ "transformer.layers.13.mlp.shared_expert_gate",
181
+ "transformer.layers.11.mlp.router",
182
+ "transformer.layers.10.input_layernorm",
183
+ "transformer.layers.15.input_layernorm"
184
+ ]
185
+ },
186
+ "qk_layernorm": false,
187
+ "rotary_embedding_dim": 128,
188
+ "seq_length": 8192,
189
+ "qwen_type": "qwen2_moe",
190
+ "moe_intermediate_size": 2560,
191
+ "moe_shared_expert_intermediate_size": 20480,
192
+ "tie_word_embeddings": false,
193
+ "model_type": "qwen"
194
+ }
tllm-checkpoint/rank0.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:144703e2b8bf6ce02fea53206a4e8c73e861079aa132032fdab31077024c9b83
3
+ size 38520404352
tllm-checkpoint/rank1.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7dd89ce139dd77badd5932c8f495e8a309f9f7f74c6081100d449c746414b8fd
3
+ size 38520404352
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "151643": {
5
+ "content": "<|endoftext|>",
6
+ "lstrip": false,
7
+ "normalized": false,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "151644": {
13
+ "content": "<|im_start|>",
14
+ "lstrip": false,
15
+ "normalized": false,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "151645": {
21
+ "content": "<|im_end|>",
22
+ "lstrip": false,
23
+ "normalized": false,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ }
28
+ },
29
+ "additional_special_tokens": ["<|im_start|>", "<|im_end|>"],
30
+ "bos_token": null,
31
+ "chat_template": "{% for message in messages %}{% if loop.first and messages[0]['role'] != 'system' %}{{ '<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n' }}{% endif %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
32
+ "clean_up_tokenization_spaces": false,
33
+ "eos_token": "<|im_end|>",
34
+ "errors": "replace",
35
+ "model_max_length": 65536,
36
+ "pad_token": "<|endoftext|>",
37
+ "split_special_tokens": false,
38
+ "tokenizer_class": "Qwen2Tokenizer",
39
+ "unk_token": null
40
+ }
41
+
vocab.json ADDED
The diff for this file is too large to render. See raw diff