Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
# coding=utf-8 | |
# Copyright 2018 Google T5 Authors and HuggingFace Inc. team. | |
# | |
# Licensed under the Apache License, Version 2.0 (the "License"); | |
# you may not use this file except in compliance with the License. | |
# You may obtain a copy of the License at | |
# | |
# http://www.apache.org/licenses/LICENSE-2.0 | |
# | |
# Unless required by applicable law or agreed to in writing, software | |
# distributed under the License is distributed on an "AS IS" BASIS, | |
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
# See the License for the specific language governing permissions and | |
# limitations under the License. | |
import os | |
import unittest | |
from transformers.tokenization_t5 import T5Tokenizer | |
from transformers.tokenization_xlnet import SPIECE_UNDERLINE | |
from .test_tokenization_common import TokenizerTesterMixin | |
SAMPLE_VOCAB = os.path.join(os.path.dirname(os.path.abspath(__file__)), "fixtures/test_sentencepiece.model") | |
class T5TokenizationTest(TokenizerTesterMixin, unittest.TestCase): | |
tokenizer_class = T5Tokenizer | |
def setUp(self): | |
super().setUp() | |
# We have a SentencePiece fixture for testing | |
tokenizer = T5Tokenizer(SAMPLE_VOCAB) | |
tokenizer.save_pretrained(self.tmpdirname) | |
def get_tokenizer(self, **kwargs): | |
return T5Tokenizer.from_pretrained(self.tmpdirname, **kwargs) | |
def get_input_output_texts(self): | |
input_text = "This is a test" | |
output_text = "This is a test" | |
return input_text, output_text | |
def test_full_tokenizer(self): | |
tokenizer = T5Tokenizer(SAMPLE_VOCAB) | |
tokens = tokenizer.tokenize("This is a test") | |
self.assertListEqual(tokens, ["▁This", "▁is", "▁a", "▁t", "est"]) | |
self.assertListEqual(tokenizer.convert_tokens_to_ids(tokens), [285, 46, 10, 170, 382]) | |
tokens = tokenizer.tokenize("I was born in 92000, and this is falsé.") | |
self.assertListEqual( | |
tokens, | |
[ | |
SPIECE_UNDERLINE + "I", | |
SPIECE_UNDERLINE + "was", | |
SPIECE_UNDERLINE + "b", | |
"or", | |
"n", | |
SPIECE_UNDERLINE + "in", | |
SPIECE_UNDERLINE + "", | |
"9", | |
"2", | |
"0", | |
"0", | |
"0", | |
",", | |
SPIECE_UNDERLINE + "and", | |
SPIECE_UNDERLINE + "this", | |
SPIECE_UNDERLINE + "is", | |
SPIECE_UNDERLINE + "f", | |
"al", | |
"s", | |
"é", | |
".", | |
], | |
) | |
ids = tokenizer.convert_tokens_to_ids(tokens) | |
self.assertListEqual(ids, [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4]) | |
back_tokens = tokenizer.convert_ids_to_tokens(ids) | |
self.assertListEqual( | |
back_tokens, | |
[ | |
SPIECE_UNDERLINE + "I", | |
SPIECE_UNDERLINE + "was", | |
SPIECE_UNDERLINE + "b", | |
"or", | |
"n", | |
SPIECE_UNDERLINE + "in", | |
SPIECE_UNDERLINE + "", | |
"<unk>", | |
"2", | |
"0", | |
"0", | |
"0", | |
",", | |
SPIECE_UNDERLINE + "and", | |
SPIECE_UNDERLINE + "this", | |
SPIECE_UNDERLINE + "is", | |
SPIECE_UNDERLINE + "f", | |
"al", | |
"s", | |
"<unk>", | |
".", | |
], | |
) | |