iswaalex commited on
Commit
5be2f1e
·
verified ·
1 Parent(s): 7f00735

Upload folder using huggingface_hub

Browse files
config.json ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "BioMistral/BioMistral-7B",
3
+ "architectures": [
4
+ "MistralForCausalLM"
5
+ ],
6
+ "attention_dropout": 0.0,
7
+ "bos_token_id": 1,
8
+ "eos_token_id": 2,
9
+ "head_dim": 128,
10
+ "hidden_act": "silu",
11
+ "hidden_size": 4096,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 14336,
14
+ "max_position_embeddings": 32768,
15
+ "model_type": "mistral",
16
+ "num_attention_heads": 32,
17
+ "num_hidden_layers": 32,
18
+ "num_key_value_heads": 8,
19
+ "quantization_config": {
20
+ "algo_config": {
21
+ "model_decoder_layers": "model.layers",
22
+ "name": "awq",
23
+ "num_attention_heads": -1,
24
+ "num_key_value_heads": -1,
25
+ "scaling_layers": [
26
+ {
27
+ "inp": "self_attn.q_proj",
28
+ "layers": [
29
+ "self_attn.q_proj",
30
+ "self_attn.k_proj",
31
+ "self_attn.v_proj"
32
+ ],
33
+ "module2inspect": "self_attn",
34
+ "prev_op": "input_layernorm"
35
+ },
36
+ {
37
+ "inp": "self_attn.o_proj",
38
+ "layers": [
39
+ "self_attn.o_proj"
40
+ ],
41
+ "prev_op": "self_attn.v_proj"
42
+ },
43
+ {
44
+ "inp": "mlp.gate_proj",
45
+ "layers": [
46
+ "mlp.gate_proj",
47
+ "mlp.up_proj"
48
+ ],
49
+ "module2inspect": "mlp",
50
+ "prev_op": "post_attention_layernorm"
51
+ },
52
+ {
53
+ "inp": "mlp.down_proj",
54
+ "layers": [
55
+ "mlp.down_proj"
56
+ ],
57
+ "prev_op": "mlp.up_proj"
58
+ }
59
+ ]
60
+ },
61
+ "exclude": [],
62
+ "export": {
63
+ "kv_cache_group": [],
64
+ "pack_method": "reorder",
65
+ "weight_format": "real_quantized",
66
+ "weight_merge_groups": null
67
+ },
68
+ "global_quant_config": {
69
+ "bias": null,
70
+ "input_tensors": null,
71
+ "output_tensors": null,
72
+ "target_device": null,
73
+ "weight": {
74
+ "ch_axis": 1,
75
+ "dtype": "uint4",
76
+ "group_size": 128,
77
+ "is_dynamic": false,
78
+ "observer_cls": "PerGroupMinMaxObserver",
79
+ "qscheme": "per_group",
80
+ "round_method": "half_even",
81
+ "scale_type": "float",
82
+ "symmetric": false
83
+ }
84
+ },
85
+ "layer_quant_config": {},
86
+ "layer_type_quant_config": {},
87
+ "quant_method": "quark",
88
+ "quant_mode": "eager_mode"
89
+ },
90
+ "rms_norm_eps": 1e-05,
91
+ "rope_theta": 10000.0,
92
+ "sliding_window": 4096,
93
+ "tie_word_embeddings": false,
94
+ "torch_dtype": "float16",
95
+ "transformers_version": "4.49.0",
96
+ "use_cache": false,
97
+ "vocab_size": 32000
98
+ }
generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 1,
4
+ "eos_token_id": 2,
5
+ "transformers_version": "4.49.0"
6
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fa2958b5af8beac4947a0be64aa8c85171eb766ca3e5a350d1588a8befaaaecf
3
+ size 3956836048
special_tokens_map.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "unk_token": {
17
+ "content": "<unk>",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ }
23
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dadfd56d766715c61d2ef780a525ab43b8e6da4de6865bda3d95fdef5e134055
3
+ size 493443
tokenizer_config.json ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "add_prefix_space": null,
5
+ "added_tokens_decoder": {
6
+ "0": {
7
+ "content": "<unk>",
8
+ "lstrip": false,
9
+ "normalized": false,
10
+ "rstrip": false,
11
+ "single_word": false,
12
+ "special": true
13
+ },
14
+ "1": {
15
+ "content": "<s>",
16
+ "lstrip": false,
17
+ "normalized": false,
18
+ "rstrip": false,
19
+ "single_word": false,
20
+ "special": true
21
+ },
22
+ "2": {
23
+ "content": "</s>",
24
+ "lstrip": false,
25
+ "normalized": false,
26
+ "rstrip": false,
27
+ "single_word": false,
28
+ "special": true
29
+ }
30
+ },
31
+ "additional_special_tokens": [],
32
+ "bos_token": "<s>",
33
+ "chat_template": "{{ bos_token }}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if message['role'] == 'user' %}{{ '[INST] ' + message['content'] + ' [/INST]' }}{% elif message['role'] == 'assistant' %}{{ message['content'] + eos_token + ' ' }}{% else %}{{ raise_exception('Only user and assistant roles are supported!') }}{% endif %}{% endfor %}",
34
+ "clean_up_tokenization_spaces": false,
35
+ "eos_token": "</s>",
36
+ "extra_special_tokens": {},
37
+ "legacy": true,
38
+ "model_max_length": 1000000000000000019884624838656,
39
+ "pad_token": null,
40
+ "sp_model_kwargs": {},
41
+ "spaces_between_special_tokens": false,
42
+ "tokenizer_class": "LlamaTokenizer",
43
+ "unk_token": "<unk>",
44
+ "use_default_system_prompt": false
45
+ }