Spaces:
Runtime error
Runtime error
# coding=utf-8 | |
# Copyright 2021 The HuggingFace Team. All rights reserved. | |
# | |
# Licensed under the Apache License, Version 2.0 (the "License"); | |
# you may not use this file except in compliance with the License. | |
# You may obtain a copy of the License at | |
# | |
# http://www.apache.org/licenses/LICENSE-2.0 | |
# | |
# Unless required by applicable law or agreed to in writing, software | |
# distributed under the License is distributed on an "AS IS" BASIS, | |
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
# See the License for the specific language governing permissions and | |
# limitations under the License. | |
import pickle | |
import shutil | |
import tempfile | |
import unittest | |
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast | |
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow | |
from transformers.utils import cached_property | |
from ...test_tokenization_common import TokenizerTesterMixin | |
SAMPLE_VOCAB = get_tests_dir("fixtures/test_sentencepiece.model") | |
class XGLMTokenizationTest(TokenizerTesterMixin, unittest.TestCase): | |
tokenizer_class = XGLMTokenizer | |
rust_tokenizer_class = XGLMTokenizerFast | |
test_rust_tokenizer = True | |
test_sentencepiece = True | |
def setUp(self): | |
super().setUp() | |
# We have a SentencePiece fixture for testing | |
tokenizer = XGLMTokenizer(SAMPLE_VOCAB, keep_accents=True) | |
tokenizer.save_pretrained(self.tmpdirname) | |
def test_convert_token_and_id(self): | |
"""Test ``_convert_token_to_id`` and ``_convert_id_to_token``.""" | |
token = "<pad>" | |
token_id = 1 | |
self.assertEqual(self.get_tokenizer()._convert_token_to_id(token), token_id) | |
self.assertEqual(self.get_tokenizer()._convert_id_to_token(token_id), token) | |
def test_get_vocab(self): | |
vocab_keys = list(self.get_tokenizer().get_vocab().keys()) | |
self.assertEqual(vocab_keys[0], "<s>") | |
self.assertEqual(vocab_keys[1], "<pad>") | |
self.assertEqual(len(vocab_keys), 1_008) | |
def test_vocab_size(self): | |
self.assertEqual(self.get_tokenizer().vocab_size, 1_008) | |
def test_full_tokenizer(self): | |
tokenizer = XGLMTokenizer(SAMPLE_VOCAB, keep_accents=True) | |
tokens = tokenizer.tokenize("This is a test") | |
self.assertListEqual(tokens, ["▁This", "▁is", "▁a", "▁t", "est"]) | |
self.assertListEqual( | |
tokenizer.convert_tokens_to_ids(tokens), | |
[value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]], | |
) | |
tokens = tokenizer.tokenize("I was born in 92000, and this is falsé.") | |
self.assertListEqual( | |
tokens, | |
[ | |
SPIECE_UNDERLINE + "I", | |
SPIECE_UNDERLINE + "was", | |
SPIECE_UNDERLINE + "b", | |
"or", | |
"n", | |
SPIECE_UNDERLINE + "in", | |
SPIECE_UNDERLINE + "", | |
"9", | |
"2", | |
"0", | |
"0", | |
"0", | |
",", | |
SPIECE_UNDERLINE + "and", | |
SPIECE_UNDERLINE + "this", | |
SPIECE_UNDERLINE + "is", | |
SPIECE_UNDERLINE + "f", | |
"al", | |
"s", | |
"é", | |
".", | |
], | |
) | |
ids = tokenizer.convert_tokens_to_ids(tokens) | |
self.assertListEqual( | |
ids, | |
[ | |
value + tokenizer.fairseq_offset | |
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] | |
], | |
) | |
back_tokens = tokenizer.convert_ids_to_tokens(ids) | |
self.assertListEqual( | |
back_tokens, | |
[ | |
SPIECE_UNDERLINE + "I", | |
SPIECE_UNDERLINE + "was", | |
SPIECE_UNDERLINE + "b", | |
"or", | |
"n", | |
SPIECE_UNDERLINE + "in", | |
SPIECE_UNDERLINE + "", | |
"<unk>", | |
"2", | |
"0", | |
"0", | |
"0", | |
",", | |
SPIECE_UNDERLINE + "and", | |
SPIECE_UNDERLINE + "this", | |
SPIECE_UNDERLINE + "is", | |
SPIECE_UNDERLINE + "f", | |
"al", | |
"s", | |
"<unk>", | |
".", | |
], | |
) | |
def big_tokenizer(self): | |
return XGLMTokenizer.from_pretrained("facebook/xglm-564M") | |
def test_picklable_without_disk(self): | |
with tempfile.NamedTemporaryFile() as f: | |
shutil.copyfile(SAMPLE_VOCAB, f.name) | |
tokenizer = XGLMTokenizer(f.name, keep_accents=True) | |
pickled_tokenizer = pickle.dumps(tokenizer) | |
pickle.loads(pickled_tokenizer) | |
def test_rust_and_python_full_tokenizers(self): | |
if not self.test_rust_tokenizer: | |
return | |
tokenizer = self.get_tokenizer() | |
rust_tokenizer = self.get_rust_tokenizer() | |
sequence = "I was born in 92000, and this is falsé." | |
tokens = tokenizer.tokenize(sequence) | |
rust_tokens = rust_tokenizer.tokenize(sequence) | |
self.assertListEqual(tokens, rust_tokens) | |
ids = tokenizer.encode(sequence, add_special_tokens=False) | |
rust_ids = rust_tokenizer.encode(sequence, add_special_tokens=False) | |
self.assertListEqual(ids, rust_ids) | |
rust_tokenizer = self.get_rust_tokenizer() | |
ids = tokenizer.encode(sequence) | |
rust_ids = rust_tokenizer.encode(sequence) | |
self.assertListEqual(ids, rust_ids) | |
def test_tokenization_base_easy_symbols(self): | |
symbols = "Hello World!" | |
original_tokenizer_encodings = [2, 31227, 4447, 35] | |
self.assertListEqual(original_tokenizer_encodings, self.big_tokenizer.encode(symbols)) | |
def test_tokenization_base_hard_symbols(self): | |
symbols = ( | |
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will' | |
" add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth" | |
) | |
# fmt: off | |
original_tokenizer_encodings = [2, 1018, 67, 11, 1988, 2617, 5631, 278, 11, 3407, 48, 71630, 28085, 4, 3234, 157, 13, 6, 5, 6, 4, 3526, 768, 15, 659, 57, 298, 3983, 864, 129, 21, 6, 5, 13675, 377, 652, 7580, 10341, 155, 2817, 422, 1666, 7, 1674, 53, 113, 202277, 17892, 33, 60, 87, 4, 3234, 157, 61, 2667, 52376, 19, 88, 23, 735] | |
# fmt: on | |
self.assertListEqual(original_tokenizer_encodings, self.big_tokenizer.encode(symbols)) | |
def test_tokenizer_integration(self): | |
# fmt: off | |
expected_encoding = { | |
'input_ids': [[2, 108825, 1163, 15, 88010, 473, 15898, 157, 13672, 1857, 312, 8, 238021, 1163, 53, 13672, 1857, 312, 8, 53283, 182396, 8, 18566, 16, 36733, 4101, 8, 230, 244017, 122553, 7, 15, 132597, 4, 293, 12511, 7610, 4, 3414, 132597, 9, 4, 32361, 362, 4, 734, 28512, 32569, 18, 4, 32361, 26096, 14982, 73, 18715, 21433, 235261, 15, 492, 12427, 16, 53, 18715, 21433, 65454, 15, 23659, 563, 16, 278, 597, 2843, 595, 7931, 182396, 64186, 22, 886, 595, 132981, 53, 25540, 3449, 43982, 39901, 5951, 878, 330, 4, 27694, 80269, 312, 53, 6517, 11780, 611, 20408, 5], [2, 6, 132597, 67, 42897, 33, 592, 8, 163729, 25540, 361, 136997, 109514, 173230, 7, 501, 60, 102913, 196, 5631, 235, 63243, 473, 6, 231757, 74, 5277, 7905, 53, 3095, 37317, 22, 454, 183874, 5], [2, 268, 31298, 46530, 6, 132935, 43831, 7, 597, 32, 24, 3688, 9865, 5]], | |
'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] | |
} # noqa: E501 | |
# fmt: on | |
self.tokenizer_integration_test_util( | |
expected_encoding=expected_encoding, | |
model_name="facebook/xglm-564M", | |
padding=False, | |
) | |