Spaces:
Sleeping
Sleeping
# This file is autogenerated by the command `make fix-copies`, do not edit. | |
from ..file_utils import requires_backends | |
class AlbertTokenizer: | |
def __init__(self, *args, **kwargs): | |
requires_backends(self, ["sentencepiece"]) | |
def from_pretrained(cls, *args, **kwargs): | |
requires_backends(cls, ["sentencepiece"]) | |
class BarthezTokenizer: | |
def __init__(self, *args, **kwargs): | |
requires_backends(self, ["sentencepiece"]) | |
def from_pretrained(cls, *args, **kwargs): | |
requires_backends(cls, ["sentencepiece"]) | |
class BertGenerationTokenizer: | |
def __init__(self, *args, **kwargs): | |
requires_backends(self, ["sentencepiece"]) | |
def from_pretrained(cls, *args, **kwargs): | |
requires_backends(cls, ["sentencepiece"]) | |
class CamembertTokenizer: | |
def __init__(self, *args, **kwargs): | |
requires_backends(self, ["sentencepiece"]) | |
def from_pretrained(cls, *args, **kwargs): | |
requires_backends(cls, ["sentencepiece"]) | |
class DebertaV2Tokenizer: | |
def __init__(self, *args, **kwargs): | |
requires_backends(self, ["sentencepiece"]) | |
def from_pretrained(cls, *args, **kwargs): | |
requires_backends(cls, ["sentencepiece"]) | |
class M2M100Tokenizer: | |
def __init__(self, *args, **kwargs): | |
requires_backends(self, ["sentencepiece"]) | |
def from_pretrained(cls, *args, **kwargs): | |
requires_backends(cls, ["sentencepiece"]) | |
class MarianTokenizer: | |
def __init__(self, *args, **kwargs): | |
requires_backends(self, ["sentencepiece"]) | |
def from_pretrained(cls, *args, **kwargs): | |
requires_backends(cls, ["sentencepiece"]) | |
class MBart50Tokenizer: | |
def __init__(self, *args, **kwargs): | |
requires_backends(self, ["sentencepiece"]) | |
def from_pretrained(cls, *args, **kwargs): | |
requires_backends(cls, ["sentencepiece"]) | |
class MBartTokenizer: | |
def __init__(self, *args, **kwargs): | |
requires_backends(self, ["sentencepiece"]) | |
def from_pretrained(cls, *args, **kwargs): | |
requires_backends(cls, ["sentencepiece"]) | |
class MT5Tokenizer: | |
def __init__(self, *args, **kwargs): | |
requires_backends(self, ["sentencepiece"]) | |
def from_pretrained(cls, *args, **kwargs): | |
requires_backends(cls, ["sentencepiece"]) | |
class PegasusTokenizer: | |
def __init__(self, *args, **kwargs): | |
requires_backends(self, ["sentencepiece"]) | |
def from_pretrained(cls, *args, **kwargs): | |
requires_backends(cls, ["sentencepiece"]) | |
class ReformerTokenizer: | |
def __init__(self, *args, **kwargs): | |
requires_backends(self, ["sentencepiece"]) | |
def from_pretrained(cls, *args, **kwargs): | |
requires_backends(cls, ["sentencepiece"]) | |
class Speech2TextTokenizer: | |
def __init__(self, *args, **kwargs): | |
requires_backends(self, ["sentencepiece"]) | |
def from_pretrained(cls, *args, **kwargs): | |
requires_backends(cls, ["sentencepiece"]) | |
class T5Tokenizer: | |
def __init__(self, *args, **kwargs): | |
requires_backends(self, ["sentencepiece"]) | |
def from_pretrained(cls, *args, **kwargs): | |
requires_backends(cls, ["sentencepiece"]) | |
class XLMProphetNetTokenizer: | |
def __init__(self, *args, **kwargs): | |
requires_backends(self, ["sentencepiece"]) | |
def from_pretrained(cls, *args, **kwargs): | |
requires_backends(cls, ["sentencepiece"]) | |
class XLMRobertaTokenizer: | |
def __init__(self, *args, **kwargs): | |
requires_backends(self, ["sentencepiece"]) | |
def from_pretrained(cls, *args, **kwargs): | |
requires_backends(cls, ["sentencepiece"]) | |
class XLNetTokenizer: | |
def __init__(self, *args, **kwargs): | |
requires_backends(self, ["sentencepiece"]) | |
def from_pretrained(cls, *args, **kwargs): | |
requires_backends(cls, ["sentencepiece"]) | |