AMPLIFY_350M / tokenizer.json
Lolalb's picture
Upload tokenizer
2924d2b verified
raw
history blame
1.76 kB
{
"version": "1.0",
"truncation": null,
"padding": null,
"added_tokens": [
{
"id": 0,
"content": "<pad>",
"single_word": false,
"lstrip": false,
"rstrip": false,
"normalized": false,
"special": true
},
{
"id": 1,
"content": "<unk>",
"single_word": false,
"lstrip": false,
"rstrip": false,
"normalized": false,
"special": true
},
{
"id": 2,
"content": "<mask>",
"single_word": false,
"lstrip": false,
"rstrip": false,
"normalized": false,
"special": true
},
{
"id": 3,
"content": "<bos>",
"single_word": false,
"lstrip": false,
"rstrip": false,
"normalized": false,
"special": true
},
{
"id": 4,
"content": "<eos>",
"single_word": false,
"lstrip": false,
"rstrip": false,
"normalized": false,
"special": true
}
],
"normalizer": null,
"pre_tokenizer": {
"type": "Split",
"pattern": {
"String": ""
},
"behavior": "Removed",
"invert": false
},
"post_processor": null,
"decoder": null,
"model": {
"type": "WordPiece",
"unk_token": "<unk>",
"continuing_subword_prefix": "##",
"max_input_chars_per_word": 100,
"vocab": {
"<pad>": 0,
"<unk>": 1,
"<mask>": 2,
"<bos>": 3,
"<eos>": 4,
"|": 5,
"L": 6,
"A": 7,
"G": 8,
"V": 9,
"S": 10,
"E": 11,
"R": 12,
"T": 13,
"I": 14,
"D": 15,
"P": 16,
"K": 17,
"Q": 18,
"N": 19,
"F": 20,
"Y": 21,
"M": 22,
"H": 23,
"W": 24,
"C": 25,
"B": 26
}
}
}