Chan-Y commited on
Commit
f65321d
·
verified ·
1 Parent(s): 4e37e0d

Upload folder using huggingface_hub

Browse files
.DS_Store ADDED
Binary file (6.15 kB). View file
 
mlc-chat-config.json ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "version": "0.1.0",
3
+ "model_type": "qwen2",
4
+ "quantization": "q4f16_1",
5
+ "model_config": {
6
+ "hidden_size": 768,
7
+ "intermediate_size": 3072,
8
+ "num_attention_heads": 12,
9
+ "num_hidden_layers": 12,
10
+ "num_key_value_heads": 4,
11
+ "rms_norm_eps": 1e-05,
12
+ "rope_theta": 10000.0,
13
+ "vocab_size": 32768,
14
+ "tie_word_embeddings": true,
15
+ "context_window_size": 2048,
16
+ "sliding_window_size": 128,
17
+ "prefill_chunk_size": 512,
18
+ "head_dim": 64,
19
+ "dtype": "float32",
20
+ "hidden_act": "silu"
21
+ },
22
+ "vocab_size": 32768,
23
+ "conv_template": {
24
+ "name": "custom_chat",
25
+ "system_template": "<|im_start|>system\n{system_message}<|im_end|>\n",
26
+ "system_message": "You are a helpful AI assistant created by Cihan Yalçın.",
27
+ "roles": {
28
+ "user": "<|im_start|>user",
29
+ "assistant": "<|im_start|>assistant"
30
+ },
31
+ "seps": [
32
+ "<|im_end|>\n"
33
+ ],
34
+ "stop_str": [
35
+ "<|im_end|>",
36
+ "<|endoftext|>"
37
+ ],
38
+ "stop_token_ids": [
39
+ 0,
40
+ 2
41
+ ]
42
+ }
43
+ }
params/chat_template.jinja ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {%- if messages[0].role != 'system' %}
2
+ {{- '<|im_start|>system
3
+ You are a helpful AI assistant powered by Spiking Neural Networks (SNNs), created by Cihan Yalçın. You are an advanced SpikingLLM designed to provide accurate, helpful, and concise responses in English. You utilize biologically-inspired neuron models for energy-efficient processing.<|im_end|>
4
+ ' }}
5
+ {%- endif %}
6
+
7
+ {%- for message in messages %}
8
+ {%- if message.content is string %}
9
+ {%- set content = message.content %}
10
+ {%- else %}
11
+ {%- set content = '' %}
12
+ {%- endif %}
13
+
14
+ {%- if message.role == "system" %}
15
+ {{- '<|im_start|>system
16
+ ' + content + '<|im_end|>
17
+ ' }}
18
+ {%- elif message.role == "user" %}
19
+ {{- '<|im_start|>user
20
+ ' + content + '<|im_end|>
21
+ ' }}
22
+ {%- elif message.role == "assistant" %}
23
+ {{- '<|im_start|>assistant
24
+ ' + content + '<|im_end|>
25
+ ' }}
26
+ {%- endif %}
27
+ {%- endfor %}
28
+ {%- if add_generation_prompt %}
29
+ {{- '<|im_start|>assistant
30
+ ' }}
31
+ {%- endif %}
params/regular_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c3afdcbbb792bfaf0e2fdcf1f96e63e6f94c7088698ccd98d1bbba82154191f9
3
+ size 258171304
params/special_tokens_map.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|endoftext|>",
4
+ "<|im_start|>",
5
+ "<|im_end|>",
6
+ "<|pad|>"
7
+ ],
8
+ "bos_token": "<|im_start|>",
9
+ "eos_token": "<|endoftext|>",
10
+ "pad_token": "<|pad|>",
11
+ "unk_token": "<|endoftext|>"
12
+ }
params/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
params/tokenizer_config.json ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "<|endoftext|>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "1": {
12
+ "content": "<|im_start|>",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "2": {
20
+ "content": "<|im_end|>",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "3": {
28
+ "content": "<|pad|>",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ }
35
+ },
36
+ "additional_special_tokens": [
37
+ "<|endoftext|>",
38
+ "<|im_start|>",
39
+ "<|im_end|>",
40
+ "<|pad|>"
41
+ ],
42
+ "bos_token": "<|im_start|>",
43
+ "clean_up_tokenization_spaces": false,
44
+ "eos_token": "<|endoftext|>",
45
+ "extra_special_tokens": {},
46
+ "model_max_length": 1024,
47
+ "pad_token": "<|pad|>",
48
+ "tokenizer_class": "PreTrainedTokenizerFast",
49
+ "unk_token": "<|endoftext|>"
50
+ }
resolve/RegularLLM-iphone.tar ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7dbafedf96592da6c6aa255f973eb63db6e616df0060fd621bd6aaa7519e8c3e
3
+ size 224779