initial this repo
Browse files- .gitattributes +3 -0
- config.json +0 -0
- deepseek-r1_tokenizer.py +133 -0
- deepseek-r1_tokenizer/tokenizer.json +0 -0
- deepseek-r1_tokenizer/tokenizer_config.json +35 -0
- main_axcl_aarch64 +3 -0
- main_axcl_x86 +3 -0
- main_prefill +3 -0
- post_config.json +14 -0
- run_deepseek-r1_1.5b_gptq_int4_ax650.sh +14 -0
- run_deepseek-r1_1.5b_gptq_int4_axcl_aarch64.sh +14 -0
- run_deepseek-r1_1.5b_gptq_int4_axcl_x86.sh +14 -0
.gitattributes
CHANGED
@@ -33,3 +33,6 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
main_axcl_aarch64 filter=lfs diff=lfs merge=lfs -text
|
37 |
+
main_axcl_x86 filter=lfs diff=lfs merge=lfs -text
|
38 |
+
main_prefill filter=lfs diff=lfs merge=lfs -text
|
config.json
ADDED
File without changes
|
deepseek-r1_tokenizer.py
ADDED
@@ -0,0 +1,133 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from transformers import AutoTokenizer, PreTrainedTokenizerFast
|
2 |
+
from http.server import HTTPServer, BaseHTTPRequestHandler
|
3 |
+
import json
|
4 |
+
import argparse
|
5 |
+
|
6 |
+
|
7 |
+
class Tokenizer_Http():
|
8 |
+
|
9 |
+
def __init__(self):
|
10 |
+
model_id = "deepseek-r1_tokenizer"
|
11 |
+
self.tokenizer = AutoTokenizer.from_pretrained(model_id)
|
12 |
+
|
13 |
+
def encode(self, prompt):
|
14 |
+
messages = [
|
15 |
+
{"role": "system", "content": "You are DeepSeek-R1, You are a helpful assistant."},
|
16 |
+
{"role": "user", "content": prompt}
|
17 |
+
]
|
18 |
+
text = self.tokenizer.apply_chat_template(
|
19 |
+
messages,
|
20 |
+
tokenize=False,
|
21 |
+
add_generation_prompt=True
|
22 |
+
)
|
23 |
+
print(text)
|
24 |
+
token_ids = self.tokenizer.encode(text)
|
25 |
+
return token_ids
|
26 |
+
|
27 |
+
def decode(self, token_ids):
|
28 |
+
return self.tokenizer.decode(token_ids)
|
29 |
+
|
30 |
+
@property
|
31 |
+
def bos_id(self):
|
32 |
+
return self.tokenizer.bos_token_id
|
33 |
+
|
34 |
+
@property
|
35 |
+
def eos_id(self):
|
36 |
+
return self.tokenizer.eos_token_id
|
37 |
+
|
38 |
+
@property
|
39 |
+
def bos_token(self):
|
40 |
+
return self.tokenizer.bos_token
|
41 |
+
|
42 |
+
@property
|
43 |
+
def eos_token(self):
|
44 |
+
return self.tokenizer.eos_token
|
45 |
+
|
46 |
+
|
47 |
+
tokenizer = Tokenizer_Http()
|
48 |
+
|
49 |
+
print(tokenizer.bos_id, tokenizer.bos_token, tokenizer.eos_id, tokenizer.eos_token)
|
50 |
+
print(tokenizer.encode("hello world"))
|
51 |
+
|
52 |
+
|
53 |
+
class Request(BaseHTTPRequestHandler):
|
54 |
+
#通过类继承,新定义类
|
55 |
+
timeout = 5
|
56 |
+
server_version = 'Apache'
|
57 |
+
|
58 |
+
def do_GET(self):
|
59 |
+
print(self.path)
|
60 |
+
#在新类中定义get的内容(当客户端向该服务端使用get请求时,本服务端将如下运行)
|
61 |
+
self.send_response(200)
|
62 |
+
self.send_header("type", "get") #设置响应头,可省略或设置多个
|
63 |
+
self.end_headers()
|
64 |
+
|
65 |
+
if self.path == '/bos_id':
|
66 |
+
bos_id = tokenizer.bos_id
|
67 |
+
# print(bos_id)
|
68 |
+
# to json
|
69 |
+
if bos_id is None:
|
70 |
+
msg = json.dumps({'bos_id': -1})
|
71 |
+
else:
|
72 |
+
msg = json.dumps({'bos_id': bos_id})
|
73 |
+
elif self.path == '/eos_id':
|
74 |
+
eos_id = tokenizer.eos_id
|
75 |
+
if eos_id is None:
|
76 |
+
msg = json.dumps({'eos_id': -1})
|
77 |
+
else:
|
78 |
+
msg = json.dumps({'eos_id': eos_id})
|
79 |
+
else:
|
80 |
+
msg = 'error'
|
81 |
+
|
82 |
+
print(msg)
|
83 |
+
msg = str(msg).encode() #转为str再转为byte格式
|
84 |
+
|
85 |
+
self.wfile.write(msg) #将byte格式的信息返回给客户端
|
86 |
+
|
87 |
+
def do_POST(self):
|
88 |
+
#在新类中定义post的内容(当客户端向该服务端使用post请求时,本服务端将如下运行)
|
89 |
+
data = self.rfile.read(int(
|
90 |
+
self.headers['content-length'])) #获取从客户端传入的参数(byte格式)
|
91 |
+
data = data.decode() #将byte格式转为str格式
|
92 |
+
|
93 |
+
self.send_response(200)
|
94 |
+
self.send_header("type", "post") #设置响应头,可省略或设置多个
|
95 |
+
self.end_headers()
|
96 |
+
|
97 |
+
if self.path == '/encode':
|
98 |
+
req = json.loads(data)
|
99 |
+
prompt = req['text']
|
100 |
+
|
101 |
+
token_ids = tokenizer.encode(prompt)
|
102 |
+
if token_ids is None:
|
103 |
+
msg = json.dumps({'token_ids': -1})
|
104 |
+
else:
|
105 |
+
msg = json.dumps({'token_ids': token_ids})
|
106 |
+
|
107 |
+
elif self.path == '/decode':
|
108 |
+
req = json.loads(data)
|
109 |
+
token_ids = req['token_ids']
|
110 |
+
text = tokenizer.decode(token_ids)
|
111 |
+
if text is None:
|
112 |
+
msg = json.dumps({'text': ""})
|
113 |
+
else:
|
114 |
+
msg = json.dumps({'text': text})
|
115 |
+
else:
|
116 |
+
msg = 'error'
|
117 |
+
print(msg)
|
118 |
+
msg = str(msg).encode() #转为str再转为byte格式
|
119 |
+
|
120 |
+
self.wfile.write(msg) #将byte格式的信息返回给客户端
|
121 |
+
|
122 |
+
|
123 |
+
if __name__ == "__main__":
|
124 |
+
|
125 |
+
args = argparse.ArgumentParser()
|
126 |
+
args.add_argument('--host', type=str, default='localhost')
|
127 |
+
args.add_argument('--port', type=int, default=8080)
|
128 |
+
args = args.parse_args()
|
129 |
+
|
130 |
+
host = (args.host, args.port) #设定地址与端口号,'localhost'等价于'127.0.0.1'
|
131 |
+
print('http://%s:%s' % host)
|
132 |
+
server = HTTPServer(host, Request) #根据地址端口号和新定义的类,创建服务器实例
|
133 |
+
server.serve_forever() #开启服务
|
deepseek-r1_tokenizer/tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
deepseek-r1_tokenizer/tokenizer_config.json
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"add_bos_token": true,
|
3 |
+
"add_eos_token": false,
|
4 |
+
"bos_token": {
|
5 |
+
"__type": "AddedToken",
|
6 |
+
"content": "<|begin▁of▁sentence|>",
|
7 |
+
"lstrip": false,
|
8 |
+
"normalized": true,
|
9 |
+
"rstrip": false,
|
10 |
+
"single_word": false
|
11 |
+
},
|
12 |
+
"clean_up_tokenization_spaces": false,
|
13 |
+
"eos_token": {
|
14 |
+
"__type": "AddedToken",
|
15 |
+
"content": "<|end▁of▁sentence|>",
|
16 |
+
"lstrip": false,
|
17 |
+
"normalized": true,
|
18 |
+
"rstrip": false,
|
19 |
+
"single_word": false
|
20 |
+
},
|
21 |
+
"legacy": true,
|
22 |
+
"model_max_length": 16384,
|
23 |
+
"pad_token": {
|
24 |
+
"__type": "AddedToken",
|
25 |
+
"content": "<|end▁of▁sentence|>",
|
26 |
+
"lstrip": false,
|
27 |
+
"normalized": true,
|
28 |
+
"rstrip": false,
|
29 |
+
"single_word": false
|
30 |
+
},
|
31 |
+
"sp_model_kwargs": {},
|
32 |
+
"unk_token": null,
|
33 |
+
"tokenizer_class": "LlamaTokenizerFast",
|
34 |
+
"chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '</think>' in content %}{% set content = content.split('</think>')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>'}}{% endif %}"
|
35 |
+
}
|
main_axcl_aarch64
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:bb111fc00c54abb6142a8f44df087bf104c8150a1cefa6be55c6b174b932c4ec
|
3 |
+
size 999008
|
main_axcl_x86
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:6cba0be8df523e351789fcfa745772423096f52a3b0a760f8d8c9f5b8bb2ec82
|
3 |
+
size 1022384
|
main_prefill
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7661c63bcc2d2f6ca557ec81cad9371b1a196b0f9f171b6b91069e87a9ae9619
|
3 |
+
size 3055912
|
post_config.json
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"enable_temperature" : true,
|
3 |
+
"temperature" : 0.9,
|
4 |
+
|
5 |
+
"enable_repetition_penalty" : false,
|
6 |
+
"repetition_penalty" : 1.2,
|
7 |
+
"penalty_window" : 20,
|
8 |
+
|
9 |
+
"enable_top_p_sampling" : false,
|
10 |
+
"top_p" : 0.8,
|
11 |
+
|
12 |
+
"enable_top_k_sampling" : true,
|
13 |
+
"top_k" : 10
|
14 |
+
}
|
run_deepseek-r1_1.5b_gptq_int4_ax650.sh
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
./main_prefill \
|
2 |
+
--template_filename_axmodel "deepseek-r1-1.5b-gptq-int4-ax650/qwen2_p128_l%d_together.axmodel" \
|
3 |
+
--axmodel_num 28 \
|
4 |
+
--tokenizer_type 2 \
|
5 |
+
--filename_tokenizer_model "http://127.0.0.1:12345" \
|
6 |
+
--bos 0 --eos 0 \
|
7 |
+
--filename_post_axmodel "deepseek-r1-1.5b-gptq-int4-ax650/qwen2_post.axmodel" \
|
8 |
+
--filename_tokens_embed "deepseek-r1-1.5b-gptq-int4-ax650/model.embed_tokens.weight.bfloat16.bin" \
|
9 |
+
--tokens_embed_num 151936 \
|
10 |
+
--tokens_embed_size 1536 \
|
11 |
+
--use_mmap_load_embed 1 \
|
12 |
+
--live_print 1 \
|
13 |
+
--continue 1 \
|
14 |
+
--prompt "$1"
|
run_deepseek-r1_1.5b_gptq_int4_axcl_aarch64.sh
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
./main_axcl_aarch64 \
|
2 |
+
--template_filename_axmodel "deepseek-r1-1.5b-gptq-int4-ax650/qwen2_p128_l%d_together.axmodel" \
|
3 |
+
--axmodel_num 28 \
|
4 |
+
--tokenizer_type 2 \
|
5 |
+
--filename_tokenizer_model "http://127.0.0.1:12345" \
|
6 |
+
--bos 0 --eos 0 \
|
7 |
+
--filename_post_axmodel "deepseek-r1-1.5b-gptq-int4-ax650/qwen2_post.axmodel" \
|
8 |
+
--filename_tokens_embed "deepseek-r1-1.5b-gptq-int4-ax650/model.embed_tokens.weight.bfloat16.bin" \
|
9 |
+
--tokens_embed_num 151936 \
|
10 |
+
--tokens_embed_size 1536 \
|
11 |
+
--use_mmap_load_embed 0 \
|
12 |
+
--live_print 1 \
|
13 |
+
--continue 1 \
|
14 |
+
--prompt "$1"
|
run_deepseek-r1_1.5b_gptq_int4_axcl_x86.sh
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
./main_axcl_x86 \
|
2 |
+
--template_filename_axmodel "deepseek-r1-1.5b-gptq-int4-ax650/qwen2_p128_l%d_together.axmodel" \
|
3 |
+
--axmodel_num 28 \
|
4 |
+
--tokenizer_type 2 \
|
5 |
+
--filename_tokenizer_model "http://127.0.0.1:12345" \
|
6 |
+
--bos 0 --eos 0 \
|
7 |
+
--filename_post_axmodel "deepseek-r1-1.5b-gptq-int4-ax650/qwen2_post.axmodel" \
|
8 |
+
--filename_tokens_embed "deepseek-r1-1.5b-gptq-int4-ax650/model.embed_tokens.weight.bfloat16.bin" \
|
9 |
+
--tokens_embed_num 151936 \
|
10 |
+
--tokens_embed_size 1536 \
|
11 |
+
--use_mmap_load_embed 0 \
|
12 |
+
--live_print 1 \
|
13 |
+
--continue 1 \
|
14 |
+
--prompt "$1"
|