Upload tokenizer
Browse files- README.md +2 -2
- special_tokens_map.json +16 -16
- tokenizer_config.json +0 -0
README.md
CHANGED
@@ -3,12 +3,12 @@ base_model:
|
|
3 |
- meta-llama/Llama-3.1-8B
|
4 |
- Hastagaras/snovalite-baukit-6-14.FT-L5-7.13-22.27-31
|
5 |
library_name: transformers
|
|
|
|
|
6 |
tags:
|
7 |
- mergekit
|
8 |
- merge
|
9 |
- not-for-all-audiences
|
10 |
-
license: llama3.1
|
11 |
-
pipeline_tag: text-generation
|
12 |
---
|
13 |
|
14 |
### ZABUZA
|
|
|
3 |
- meta-llama/Llama-3.1-8B
|
4 |
- Hastagaras/snovalite-baukit-6-14.FT-L5-7.13-22.27-31
|
5 |
library_name: transformers
|
6 |
+
license: llama3.1
|
7 |
+
pipeline_tag: text-generation
|
8 |
tags:
|
9 |
- mergekit
|
10 |
- merge
|
11 |
- not-for-all-audiences
|
|
|
|
|
12 |
---
|
13 |
|
14 |
### ZABUZA
|
special_tokens_map.json
CHANGED
@@ -1,16 +1,16 @@
|
|
1 |
-
{
|
2 |
-
"bos_token": {
|
3 |
-
"content": "<|begin_of_text|>",
|
4 |
-
"lstrip": false,
|
5 |
-
"normalized": false,
|
6 |
-
"rstrip": false,
|
7 |
-
"single_word": false
|
8 |
-
},
|
9 |
-
"eos_token": {
|
10 |
-
"content": "<|eot_id|>",
|
11 |
-
"lstrip": false,
|
12 |
-
"normalized": false,
|
13 |
-
"rstrip": false,
|
14 |
-
"single_word": false
|
15 |
-
}
|
16 |
-
}
|
|
|
1 |
+
{
|
2 |
+
"bos_token": {
|
3 |
+
"content": "<|begin_of_text|>",
|
4 |
+
"lstrip": false,
|
5 |
+
"normalized": false,
|
6 |
+
"rstrip": false,
|
7 |
+
"single_word": false
|
8 |
+
},
|
9 |
+
"eos_token": {
|
10 |
+
"content": "<|eot_id|>",
|
11 |
+
"lstrip": false,
|
12 |
+
"normalized": false,
|
13 |
+
"rstrip": false,
|
14 |
+
"single_word": false
|
15 |
+
}
|
16 |
+
}
|
tokenizer_config.json
CHANGED
The diff for this file is too large to render.
See raw diff
|
|