Hibernates-2B-R1-V1 / tokenizer.json
hibernatesai's picture
Upload 9 files
d02de7c verified
raw
history blame contribute delete
751 Bytes
{
"version": 1,
"truncation": {
"max_length": 4096,
"strategy": "longest_first",
"direction": "right"
},
"padding": {
"strategy": "max_length",
"side": "left",
"length": null
},
"added_tokens": [],
"normalizer": {
"type": "BertNormalizer",
"clean_text": true,
"handle_chinese_chars": true,
"strip_accents": true,
"lowercase": true
},
"pre_tokenizer": {
"type": "Whitespace"
},
"post_processor": null,
"decoder": {
"type": "WordPiece",
"cleanup": true
},
"model": {
"type": "WordPiece",
"unk_token": "[UNK]",
"continuing_subword_prefix": "##",
"max_input_chars_per_word": 100
}
}