calc-gpt-2 / tokenizer.json
timo13113's picture
Training in progress, step 100
4b3f9be verified
raw
history blame
1.09 kB
{
"version": "1.0",
"truncation": null,
"padding": null,
"added_tokens": [
{
"id": 0,
"content": "<|endoftext|>",
"single_word": false,
"lstrip": false,
"rstrip": false,
"normalized": false,
"special": true
}
],
"normalizer": null,
"pre_tokenizer": {
"type": "Digits",
"individual_digits": true
},
"post_processor": {
"type": "ByteLevel",
"add_prefix_space": true,
"trim_offsets": false,
"use_regex": true
},
"decoder": {
"type": "ByteLevel",
"add_prefix_space": true,
"trim_offsets": true,
"use_regex": true
},
"model": {
"type": "BPE",
"dropout": null,
"unk_token": null,
"continuing_subword_prefix": null,
"end_of_word_suffix": null,
"fuse_unk": false,
"byte_fallback": false,
"vocab": {
"<|endoftext|>": 0,
"$": 1,
"+": 2,
"0": 3,
"1": 4,
"2": 5,
"3": 6,
"4": 7,
"5": 8,
"6": 9,
"7": 10,
"8": 11,
"9": 12,
";": 13,
"=": 14
},
"merges": []
}
}