Spaces:
				
			
			
	
			
			
		Paused
		
	
	
	
			
			
	
	
	
	
		
		
		Paused
		
	| # coding=utf-8 | |
| # Copyright 2018 LXMERT Authors, The Hugging Face Team. | |
| # | |
| # Licensed under the Apache License, Version 2.0 (the "License"); | |
| # you may not use this file except in compliance with the License. | |
| # You may obtain a copy of the License at | |
| # | |
| # http://www.apache.org/licenses/LICENSE-2.0 | |
| # | |
| # Unless required by applicable law or agreed to in writing, software | |
| # distributed under the License is distributed on an "AS IS" BASIS, | |
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
| # See the License for the specific language governing permissions and | |
| # limitations under the License. | |
| import os | |
| import unittest | |
| from transformers import LxmertTokenizer, LxmertTokenizerFast | |
| from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES | |
| from transformers.testing_utils import require_tokenizers | |
| from ...test_tokenization_common import TokenizerTesterMixin | |
| class LxmertTokenizationTest(TokenizerTesterMixin, unittest.TestCase): | |
| tokenizer_class = LxmertTokenizer | |
| rust_tokenizer_class = LxmertTokenizerFast | |
| test_rust_tokenizer = True | |
| space_between_special_tokens = True | |
| def setUp(self): | |
| super().setUp() | |
| vocab_tokens = [ | |
| "[UNK]", | |
| "[CLS]", | |
| "[SEP]", | |
| "want", | |
| "##want", | |
| "##ed", | |
| "wa", | |
| "un", | |
| "runn", | |
| "##ing", | |
| ",", | |
| "low", | |
| "lowest", | |
| ] | |
| self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"]) | |
| with open(self.vocab_file, "w", encoding="utf-8") as vocab_writer: | |
| vocab_writer.write("".join([x + "\n" for x in vocab_tokens])) | |
| def get_input_output_texts(self, tokenizer): | |
| input_text = "UNwant\u00E9d,running" | |
| output_text = "unwanted, running" | |
| return input_text, output_text | |
| def test_full_tokenizer(self): | |
| tokenizer = self.tokenizer_class(self.vocab_file) | |
| tokens = tokenizer.tokenize("UNwant\u00E9d,running") | |
| self.assertListEqual(tokens, ["un", "##want", "##ed", ",", "runn", "##ing"]) | |
| self.assertListEqual(tokenizer.convert_tokens_to_ids(tokens), [7, 4, 5, 10, 8, 9]) | |
| def test_rust_and_python_full_tokenizers(self): | |
| if not self.test_rust_tokenizer: | |
| return | |
| tokenizer = self.get_tokenizer() | |
| rust_tokenizer = self.get_rust_tokenizer() | |
| sequence = "I was born in 92000, and this is falsé." | |
| tokens = tokenizer.tokenize(sequence) | |
| rust_tokens = rust_tokenizer.tokenize(sequence) | |
| self.assertListEqual(tokens, rust_tokens) | |
| ids = tokenizer.encode(sequence, add_special_tokens=False) | |
| rust_ids = rust_tokenizer.encode(sequence, add_special_tokens=False) | |
| self.assertListEqual(ids, rust_ids) | |
| rust_tokenizer = self.get_rust_tokenizer() | |
| ids = tokenizer.encode(sequence) | |
| rust_ids = rust_tokenizer.encode(sequence) | |
| self.assertListEqual(ids, rust_ids) | |