File size: 4,089 Bytes
2260825
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..file_utils import requires_backends


class AlbertTokenizer:
    def __init__(self, *args, **kwargs):
        requires_backends(self, ["sentencepiece"])

    @classmethod
    def from_pretrained(cls, *args, **kwargs):
        requires_backends(cls, ["sentencepiece"])


class BarthezTokenizer:
    def __init__(self, *args, **kwargs):
        requires_backends(self, ["sentencepiece"])

    @classmethod
    def from_pretrained(cls, *args, **kwargs):
        requires_backends(cls, ["sentencepiece"])


class BertGenerationTokenizer:
    def __init__(self, *args, **kwargs):
        requires_backends(self, ["sentencepiece"])

    @classmethod
    def from_pretrained(cls, *args, **kwargs):
        requires_backends(cls, ["sentencepiece"])


class CamembertTokenizer:
    def __init__(self, *args, **kwargs):
        requires_backends(self, ["sentencepiece"])

    @classmethod
    def from_pretrained(cls, *args, **kwargs):
        requires_backends(cls, ["sentencepiece"])


class DebertaV2Tokenizer:
    def __init__(self, *args, **kwargs):
        requires_backends(self, ["sentencepiece"])

    @classmethod
    def from_pretrained(cls, *args, **kwargs):
        requires_backends(cls, ["sentencepiece"])


class M2M100Tokenizer:
    def __init__(self, *args, **kwargs):
        requires_backends(self, ["sentencepiece"])

    @classmethod
    def from_pretrained(cls, *args, **kwargs):
        requires_backends(cls, ["sentencepiece"])


class MarianTokenizer:
    def __init__(self, *args, **kwargs):
        requires_backends(self, ["sentencepiece"])

    @classmethod
    def from_pretrained(cls, *args, **kwargs):
        requires_backends(cls, ["sentencepiece"])


class MBart50Tokenizer:
    def __init__(self, *args, **kwargs):
        requires_backends(self, ["sentencepiece"])

    @classmethod
    def from_pretrained(cls, *args, **kwargs):
        requires_backends(cls, ["sentencepiece"])


class MBartTokenizer:
    def __init__(self, *args, **kwargs):
        requires_backends(self, ["sentencepiece"])

    @classmethod
    def from_pretrained(cls, *args, **kwargs):
        requires_backends(cls, ["sentencepiece"])


class MT5Tokenizer:
    def __init__(self, *args, **kwargs):
        requires_backends(self, ["sentencepiece"])

    @classmethod
    def from_pretrained(cls, *args, **kwargs):
        requires_backends(cls, ["sentencepiece"])


class PegasusTokenizer:
    def __init__(self, *args, **kwargs):
        requires_backends(self, ["sentencepiece"])

    @classmethod
    def from_pretrained(cls, *args, **kwargs):
        requires_backends(cls, ["sentencepiece"])


class ReformerTokenizer:
    def __init__(self, *args, **kwargs):
        requires_backends(self, ["sentencepiece"])

    @classmethod
    def from_pretrained(cls, *args, **kwargs):
        requires_backends(cls, ["sentencepiece"])


class Speech2TextTokenizer:
    def __init__(self, *args, **kwargs):
        requires_backends(self, ["sentencepiece"])

    @classmethod
    def from_pretrained(cls, *args, **kwargs):
        requires_backends(cls, ["sentencepiece"])


class T5Tokenizer:
    def __init__(self, *args, **kwargs):
        requires_backends(self, ["sentencepiece"])

    @classmethod
    def from_pretrained(cls, *args, **kwargs):
        requires_backends(cls, ["sentencepiece"])


class XLMProphetNetTokenizer:
    def __init__(self, *args, **kwargs):
        requires_backends(self, ["sentencepiece"])

    @classmethod
    def from_pretrained(cls, *args, **kwargs):
        requires_backends(cls, ["sentencepiece"])


class XLMRobertaTokenizer:
    def __init__(self, *args, **kwargs):
        requires_backends(self, ["sentencepiece"])

    @classmethod
    def from_pretrained(cls, *args, **kwargs):
        requires_backends(cls, ["sentencepiece"])


class XLNetTokenizer:
    def __init__(self, *args, **kwargs):
        requires_backends(self, ["sentencepiece"])

    @classmethod
    def from_pretrained(cls, *args, **kwargs):
        requires_backends(cls, ["sentencepiece"])