# coding=utf-8 # Copyright 2020 The Google AI Team, Stanford University and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..bert.tokenization_bert_fast import BertTokenizerFast from .tokenization_electra import ElectraTokenizer VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} PRETRAINED_VOCAB_FILES_MAP = { "vocab_file": { "google/electra-small-generator": "https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt", "google/electra-base-generator": "https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt", "google/electra-large-generator": "https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt", "google/electra-small-discriminator": "https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt", "google/electra-base-discriminator": "https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt", "google/electra-large-discriminator": "https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt", }, "tokenizer_file": { "google/electra-small-generator": "https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json", "google/electra-base-generator": "https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json", "google/electra-large-generator": "https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json", "google/electra-small-discriminator": "https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json", "google/electra-base-discriminator": "https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json", "google/electra-large-discriminator": "https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json", }, } PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { "google/electra-small-generator": 512, "google/electra-base-generator": 512, "google/electra-large-generator": 512, "google/electra-small-discriminator": 512, "google/electra-base-discriminator": 512, "google/electra-large-discriminator": 512, } PRETRAINED_INIT_CONFIGURATION = { "google/electra-small-generator": {"do_lower_case": True}, "google/electra-base-generator": {"do_lower_case": True}, "google/electra-large-generator": {"do_lower_case": True}, "google/electra-small-discriminator": {"do_lower_case": True}, "google/electra-base-discriminator": {"do_lower_case": True}, "google/electra-large-discriminator": {"do_lower_case": True}, } class ElectraTokenizerFast(BertTokenizerFast): r""" Construct a "fast" ELECTRA tokenizer (backed by HuggingFace's `tokenizers` library). :class:`~transformers.ElectraTokenizerFast` is identical to :class:`~transformers.BertTokenizerFast` and runs end-to-end tokenization: punctuation splitting and wordpiece. Refer to superclass :class:`~transformers.BertTokenizerFast` for usage examples and documentation concerning parameters. """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION slow_tokenizer_class = ElectraTokenizer