fix: check surface form against special tokens in `_add_tokens`
Browse files- tokenization_arcade100k.py +54 -49
tokenization_arcade100k.py
CHANGED
@@ -1,8 +1,8 @@
|
|
1 |
# coding=utf-8
|
2 |
# Copyright (c) 2023 Alibaba Cloud & Stability AI.
|
3 |
#
|
4 |
-
#
|
5 |
-
#
|
6 |
"""Tokenization classes for Arcade100k."""
|
7 |
|
8 |
import base64
|
@@ -29,47 +29,56 @@ def _load_tiktoken_bpe(tiktoken_bpe_file: str) -> Dict[bytes, int]:
|
|
29 |
}
|
30 |
|
31 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
32 |
def _arcade100k(vocab_file: str):
|
33 |
mergeable_ranks = _load_tiktoken_bpe(vocab_file)
|
34 |
|
35 |
-
ENDOFTEXT = "<|endoftext|>"
|
36 |
-
FIM = [
|
37 |
-
"<|fim_prefix|>",
|
38 |
-
"<|fim_middle|>",
|
39 |
-
"<|fim_suffix|>",
|
40 |
-
"<|fim_pad|>",
|
41 |
-
]
|
42 |
-
# `StarCoder` Tokens
|
43 |
-
CODE = [
|
44 |
-
"<gh_stars>",
|
45 |
-
"<filename>",
|
46 |
-
"<issue_start>",
|
47 |
-
"<issue_comment>",
|
48 |
-
"<issue_closed>",
|
49 |
-
"<jupyter_start>",
|
50 |
-
"<jupyter_text>",
|
51 |
-
"<jupyter_code>",
|
52 |
-
"<jupyter_output>",
|
53 |
-
"<empty_output>",
|
54 |
-
"<commit_before>",
|
55 |
-
"<commit_msg>",
|
56 |
-
"<commit_after>",
|
57 |
-
"<reponame>"
|
58 |
-
]
|
59 |
-
CHAT = [
|
60 |
-
"<|im_start|>", # Chat: Input message start
|
61 |
-
"<|im_end|>", # Chat: Input message end
|
62 |
-
]
|
63 |
-
PAUSE = "<|pause|>" # Think before you speak (https://arxiv.org/abs/2310.02226)
|
64 |
-
REGISTERS = [f"<|reg{i}|>" for i in range(0, 8)] # Register 0 sink token (https://arxiv.org/abs/2309.17453)
|
65 |
-
ENDOFPROMPT = "<|endofprompt|>"
|
66 |
-
SPECIAL_TOKENS_NAMES = [ENDOFTEXT] + FIM + CODE + [ENDOFPROMPT] + CHAT + [PAUSE] + REGISTERS + ["<|extra0|>"]
|
67 |
-
START_ID = len(mergeable_ranks) + 1
|
68 |
-
SPECIAL_TOKENS = {
|
69 |
-
t: START_ID + i
|
70 |
-
for i, t in enumerate(SPECIAL_TOKENS_NAMES)
|
71 |
-
}
|
72 |
-
|
73 |
return {
|
74 |
"name": NAME,
|
75 |
"pat_str": r"""(?i:'s|'t|'re|'ve|'m|'ll|'d)|[^\r\n\p{L}\p{N}]?\p{L}+|\p{N}| ?[^\s\p{L}\p{N}]+[\r\n]*|\s*[\r\n]+|\s+(?!\S)|\s+""",
|
@@ -108,16 +117,13 @@ class Arcade100kTokenizer(PreTrainedTokenizer):
|
|
108 |
# TODO: Remove this assertion
|
109 |
assert (
|
110 |
len(self.tokenizer._mergeable_ranks)
|
111 |
-
+ len(self.tokenizer._special_tokens)
|
|
|
112 |
== self.tokenizer.n_vocab
|
113 |
), f"{len(self.tokenizer._mergeable_ranks) + len(self.tokenizer._special_tokens)} != {self.tokenizer.n_vocab} in encoding"
|
114 |
|
115 |
-
self.decoder = {
|
116 |
-
|
117 |
-
}
|
118 |
-
self.decoder.update(
|
119 |
-
{i: n for n, i in self.tokenizer._special_tokens.items()}
|
120 |
-
)
|
121 |
self.eos_token = self.decoder[self.tokenizer.eot_token]
|
122 |
self.pad_token = self.decoder[self.tokenizer.eot_token]
|
123 |
|
@@ -156,8 +162,7 @@ class Arcade100kTokenizer(PreTrainedTokenizer):
|
|
156 |
raise ValueError("Adding regular tokens is not supported")
|
157 |
for token in new_tokens:
|
158 |
surface_form = token.content if isinstance(token, AddedToken) else token
|
159 |
-
|
160 |
-
if surface_form not in SPECIAL_TOKENS_SET:
|
161 |
raise ValueError("Adding unknown special tokens is not supported")
|
162 |
return 0
|
163 |
|
|
|
1 |
# coding=utf-8
|
2 |
# Copyright (c) 2023 Alibaba Cloud & Stability AI.
|
3 |
#
|
4 |
+
# Tongyi Qianwen LICENSE AGREEMENT:
|
5 |
+
# https://github.com/QwenLM/Qwen/blob/5aa84bdfd3237b37f01bc88cd49b3279b9a71d0b/Tongyi%20Qianwen%20LICENSE%20AGREEMENT
|
6 |
"""Tokenization classes for Arcade100k."""
|
7 |
|
8 |
import base64
|
|
|
29 |
}
|
30 |
|
31 |
|
32 |
+
ENDOFTEXT = "<|endoftext|>"
|
33 |
+
FIM = [
|
34 |
+
"<|fim_prefix|>",
|
35 |
+
"<|fim_middle|>",
|
36 |
+
"<|fim_suffix|>",
|
37 |
+
"<|fim_pad|>",
|
38 |
+
]
|
39 |
+
# `StarCoder` Tokens
|
40 |
+
CODE = [
|
41 |
+
"<gh_stars>",
|
42 |
+
"<filename>",
|
43 |
+
"<issue_start>",
|
44 |
+
"<issue_comment>",
|
45 |
+
"<issue_closed>",
|
46 |
+
"<jupyter_start>",
|
47 |
+
"<jupyter_text>",
|
48 |
+
"<jupyter_code>",
|
49 |
+
"<jupyter_output>",
|
50 |
+
"<empty_output>",
|
51 |
+
"<commit_before>",
|
52 |
+
"<commit_msg>",
|
53 |
+
"<commit_after>",
|
54 |
+
"<reponame>",
|
55 |
+
]
|
56 |
+
CHAT = [
|
57 |
+
"<|im_start|>", # Chat: Input message start
|
58 |
+
"<|im_end|>", # Chat: Input message end
|
59 |
+
]
|
60 |
+
PAUSE = "<|pause|>" # Think before you speak (https://arxiv.org/abs/2310.02226)
|
61 |
+
REGISTERS = [
|
62 |
+
f"<|reg{i}|>" for i in range(0, 8)
|
63 |
+
] # Register 0 sink token (https://arxiv.org/abs/2309.17453)
|
64 |
+
ENDOFPROMPT = "<|endofprompt|>"
|
65 |
+
SPECIAL_TOKENS_NAMES = (
|
66 |
+
[ENDOFTEXT]
|
67 |
+
+ FIM
|
68 |
+
+ CODE
|
69 |
+
+ [ENDOFPROMPT]
|
70 |
+
+ CHAT
|
71 |
+
+ [PAUSE]
|
72 |
+
+ REGISTERS
|
73 |
+
+ ["<|extra0|>"]
|
74 |
+
)
|
75 |
+
START_ID = 100257
|
76 |
+
SPECIAL_TOKENS = {t: START_ID + i for i, t in enumerate(SPECIAL_TOKENS_NAMES)}
|
77 |
+
|
78 |
+
|
79 |
def _arcade100k(vocab_file: str):
|
80 |
mergeable_ranks = _load_tiktoken_bpe(vocab_file)
|
81 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
82 |
return {
|
83 |
"name": NAME,
|
84 |
"pat_str": r"""(?i:'s|'t|'re|'ve|'m|'ll|'d)|[^\r\n\p{L}\p{N}]?\p{L}+|\p{N}| ?[^\s\p{L}\p{N}]+[\r\n]*|\s*[\r\n]+|\s+(?!\S)|\s+""",
|
|
|
117 |
# TODO: Remove this assertion
|
118 |
assert (
|
119 |
len(self.tokenizer._mergeable_ranks)
|
120 |
+
+ len(self.tokenizer._special_tokens)
|
121 |
+
+ 1
|
122 |
== self.tokenizer.n_vocab
|
123 |
), f"{len(self.tokenizer._mergeable_ranks) + len(self.tokenizer._special_tokens)} != {self.tokenizer.n_vocab} in encoding"
|
124 |
|
125 |
+
self.decoder = {i: n for n, i in self.tokenizer._mergeable_ranks.items()}
|
126 |
+
self.decoder.update({i: n for n, i in self.tokenizer._special_tokens.items()})
|
|
|
|
|
|
|
|
|
127 |
self.eos_token = self.decoder[self.tokenizer.eot_token]
|
128 |
self.pad_token = self.decoder[self.tokenizer.eot_token]
|
129 |
|
|
|
162 |
raise ValueError("Adding regular tokens is not supported")
|
163 |
for token in new_tokens:
|
164 |
surface_form = token.content if isinstance(token, AddedToken) else token
|
165 |
+
if surface_form not in SPECIAL_TOKENS:
|
|
|
166 |
raise ValueError("Adding unknown special tokens is not supported")
|
167 |
return 0
|
168 |
|