ec40-64k / special_tokens_map.json
speedcell4's picture
Upload tokenizer
c48538d verified
raw
history blame
1.3 kB
{
"additional_special_tokens": [
"<af>",
"<am>",
"<ar>",
"<ast>",
"<be>",
"<bg>",
"<bn>",
"<bs>",
"<ca>",
"<cs>",
"<da>",
"<de>",
"<en>",
"<es>",
"<fr>",
"<gu>",
"<ha>",
"<he>",
"<hi>",
"<is>",
"<it>",
"<kab>",
"<kn>",
"<lb>",
"<model>",
"<mr>",
"<mt>",
"<ne>",
"<nl>",
"<no>",
"<oc>",
"<pl>",
"<pt>",
"<ro>",
"<ru>",
"<sd>",
"<so>",
"<sr>",
"<sv>",
"<ti>",
"<uk>",
"<ur>",
"<vocab>"
],
"bos_token": {
"content": "<s>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false
},
"cls_token": "<s>",
"eos_token": {
"content": "</s>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false
},
"mask_token": {
"content": "<unk>",
"lstrip": true,
"normalized": true,
"rstrip": false,
"single_word": false
},
"pad_token": {
"content": "<pad>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false
},
"sep_token": "</s>",
"unk_token": {
"content": "<unk>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false
}
}