{ "added_tokens_decoder": { "0": { "content": "", "lstrip": false, "normalized": false, "rstrip": false, "single_word": false, "special": true }, "1": { "content": "", "lstrip": false, "normalized": false, "rstrip": false, "single_word": false, "special": true }, "2": { "content": "", "lstrip": false, "normalized": false, "rstrip": false, "single_word": false, "special": true }, "3": { "content": "", "lstrip": false, "normalized": false, "rstrip": false, "single_word": false, "special": true }, "4": { "content": "", "lstrip": false, "normalized": false, "rstrip": false, "single_word": false, "special": true } }, "ambiguous_token_ids": [ 1, 6, 7, 8, 9, 10, 11 ], "auto_map": { "AutoTokenizer": [ "tokenizer.ProteinTokenizer", null ] }, "bos_token": "", "bos_token_id": 3, "clean_up_tokenization_spaces": false, "eos_token": "", "eos_token_id": 4, "mask_token": "", "mask_token_id": 2, "model_input_names": [ "input_ids", "attention_mask", "special_tokens_mask" ], "model_max_length": 2048, "other_special_token_ids": null, "pad_token": "", "pad_token_id": 0, "padding_side": "right", "tokenizer_class": "ProteinTokenizer", "truncation_side": "right", "unk_token": "", "unk_token_id": 1, "vocab_path": "conf/tokenizer/amplify_vocab.txt" }