MrezaPRZ commited on
Commit
83f3505
·
verified ·
1 Parent(s): 71e0a75

Upload DeepseekV2ForCausalLM

Browse files
config.json ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct",
3
+ "architectures": [
4
+ "DeepseekV2ForCausalLM"
5
+ ],
6
+ "attention_bias": false,
7
+ "attention_dropout": 0.0,
8
+ "auto_map": {
9
+ "AutoConfig": "deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct--configuration_deepseek.DeepseekV2Config",
10
+ "AutoModel": "deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct--modeling_deepseek.DeepseekV2Model",
11
+ "AutoModelForCausalLM": "deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct--modeling_deepseek.DeepseekV2ForCausalLM"
12
+ },
13
+ "aux_loss_alpha": 0.001,
14
+ "bos_token_id": 100000,
15
+ "eos_token_id": 100001,
16
+ "ep_size": 1,
17
+ "first_k_dense_replace": 1,
18
+ "hidden_act": "silu",
19
+ "hidden_size": 2048,
20
+ "initializer_range": 0.02,
21
+ "intermediate_size": 10944,
22
+ "kv_lora_rank": 512,
23
+ "max_position_embeddings": 163840,
24
+ "model_type": "deepseek_v2",
25
+ "moe_intermediate_size": 1408,
26
+ "moe_layer_freq": 1,
27
+ "n_group": 1,
28
+ "n_routed_experts": 64,
29
+ "n_shared_experts": 2,
30
+ "norm_topk_prob": false,
31
+ "num_attention_heads": 16,
32
+ "num_experts_per_tok": 6,
33
+ "num_hidden_layers": 27,
34
+ "num_key_value_heads": 16,
35
+ "pretraining_tp": 1,
36
+ "q_lora_rank": null,
37
+ "qk_nope_head_dim": 128,
38
+ "qk_rope_head_dim": 64,
39
+ "rms_norm_eps": 1e-06,
40
+ "rope_scaling": {
41
+ "beta_fast": 32,
42
+ "beta_slow": 1,
43
+ "factor": 40,
44
+ "mscale": 0.707,
45
+ "mscale_all_dim": 0.707,
46
+ "original_max_position_embeddings": 4096,
47
+ "type": "yarn"
48
+ },
49
+ "rope_theta": 10000,
50
+ "routed_scaling_factor": 1.0,
51
+ "scoring_func": "softmax",
52
+ "seq_aux": true,
53
+ "tie_word_embeddings": false,
54
+ "topk_group": 1,
55
+ "topk_method": "greedy",
56
+ "torch_dtype": "bfloat16",
57
+ "transformers_version": "4.42.3",
58
+ "use_cache": true,
59
+ "v_head_dim": 128,
60
+ "vocab_size": 102400
61
+ }
generation_config.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 100000,
4
+ "do_sample": true,
5
+ "eos_token_id": 100001,
6
+ "temperature": 0.3,
7
+ "top_p": 0.95,
8
+ "transformers_version": "4.42.3"
9
+ }
model-00001-of-00007.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2d84ec3ddb2b0315799aeaff7ab2cadc3d9147c1ff43e46a4d0eb7b2fb56380d
3
+ size 4994763632
model-00002-of-00007.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1d5883900318d9bd7035f5aed429b46e3cd1d722f04c4cd8c87ef2e9dfb8dc18
3
+ size 4995044944
model-00003-of-00007.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c681dc11fe1eb58d484a3ca10cbacd5e10b0f9fd47a15f18e775b6bed3fa679d
3
+ size 4996085000
model-00004-of-00007.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:348dbdece9c4c4c5e80a6453d1279d033b62a54307500c96cb93d330fcd02447
3
+ size 4996085224
model-00005-of-00007.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7b6705b0c9569cf437ff520f67687b31bd234561e9b756fe6656edc2e85bdbea
3
+ size 4996085224
model-00006-of-00007.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0d141c86e901fe89ace9d7b1e2dbfce2da571225c137220d4b5e75b47d8a3463
3
+ size 4995045792
model-00007-of-00007.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:019536a592c6c7af433184dfd9e8903aa3c70c2e94f5973780f2d30b2b41f7c3
3
+ size 1440515736
model.safetensors.index.json ADDED
The diff for this file is too large to render. See raw diff