SimpleTestGenerator / tokenizer_config.json
4ervonec19's picture
Upload 8 files
b5e37f5 verified
{
"add_prefix_space": false,
"added_tokens_decoder": {
"49152": {
"content": "<|endoftext|>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": true
},
"49153": {
"content": "<fim-prefix>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": true
},
"49154": {
"content": "<fim-middle>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": true
},
"49155": {
"content": "<fim-suffix>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": true
},
"49156": {
"content": "<fim-pad>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": true
},
"49157": {
"content": "<PAD>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": true
},
"49158": {
"content": "<FUNC_TOKEN>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": true
},
"49159": {
"content": "<INFO_TOKEN>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": true
},
"49160": {
"content": "<CLS_TOKEN>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": true
},
"49161": {
"content": "<AST_TOKEN>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": true
},
"49162": {
"content": "<DESCRIPTION_TOKEN>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": true
},
"49163": {
"content": "<COMMENTS_TOKEN>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": true
}
},
"additional_special_tokens": [
"<FUNC_TOKEN>",
"<INFO_TOKEN>",
"<CLS_TOKEN>",
"<AST_TOKEN>",
"<DESCRIPTION_TOKEN>",
"<COMMENTS_TOKEN>"
],
"bos_token": "<|endoftext|>",
"clean_up_tokenization_spaces": true,
"eos_token": "<|endoftext|>",
"errors": "replace",
"model_max_length": 2048,
"pad_token": "<PAD>",
"padding_side": "left",
"tokenizer_class": "GPT2Tokenizer",
"unk_token": "<|endoftext|>"
}