{ "added_tokens_decoder": { "0": { "content": "", "lstrip": false, "normalized": false, "rstrip": false, "single_word": false, "special": true }, "1": { "content": "", "lstrip": false, "normalized": false, "rstrip": false, "single_word": false, "special": true }, "2": { "content": "", "lstrip": false, "normalized": false, "rstrip": false, "single_word": false, "special": true }, "3": { "content": "", "lstrip": false, "normalized": false, "rstrip": false, "single_word": false, "special": true }, "4": { "content": "", "lstrip": false, "normalized": false, "rstrip": false, "single_word": false, "special": true } }, "auto_map": { "AutoTokenizer": [ "tokenizer.ProteinTokenizer", null ] }, "bos_token": "", "bos_token_id": 3, "clean_up_tokenization_spaces": false, "eos_token": "", "eos_token_id": 4, "mask_token": "", "mask_token_id": 2, "model_input_names": [ "input_ids", "attention_mask", "special_tokens_mask" ], "model_max_length": 2048, "other_special_token_ids": null, "pad_token": "", "pad_token_id": 0, "padding_side": "right", "tokenizer_class": "ProteinTokenizer", "truncation_side": "right", "unk_token": "", "unk_token_id": 1, "vocab": { "": 3, "": 4, "": 2, "": 0, "": 1, "A": 7, "B": 26, "C": 25, "D": 15, "E": 11, "F": 20, "G": 8, "H": 23, "I": 14, "K": 17, "L": 6, "M": 22, "N": 19, "P": 16, "Q": 18, "R": 12, "S": 10, "T": 13, "V": 9, "W": 24, "Y": 21, "|": 5 } }