Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
# coding=utf-8 | |
# Copyright 2018 The Google AI Language Team Authors. | |
# | |
# Licensed under the Apache License, Version 2.0 (the "License"); | |
# you may not use this file except in compliance with the License. | |
# You may obtain a copy of the License at | |
# | |
# http://www.apache.org/licenses/LICENSE-2.0 | |
# | |
# Unless required by applicable law or agreed to in writing, software | |
# distributed under the License is distributed on an "AS IS" BASIS, | |
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
# See the License for the specific language governing permissions and | |
# limitations under the License. | |
import os | |
import unittest | |
from transformers.tokenization_bert import WordpieceTokenizer | |
from transformers.tokenization_bert_japanese import ( | |
VOCAB_FILES_NAMES, | |
BertJapaneseTokenizer, | |
CharacterTokenizer, | |
MecabTokenizer, | |
) | |
from .test_tokenization_common import TokenizerTesterMixin | |
from .utils import custom_tokenizers, slow | |
class BertJapaneseTokenizationTest(TokenizerTesterMixin, unittest.TestCase): | |
tokenizer_class = BertJapaneseTokenizer | |
def setUp(self): | |
super().setUp() | |
vocab_tokens = [ | |
"[UNK]", | |
"[CLS]", | |
"[SEP]", | |
"γγγ«γ‘γ―", | |
"γγ", | |
"γ«γ‘γ―", | |
"γ°γγ―", | |
"##γγ", | |
"##γ«γ‘γ―", | |
"##γ°γγ―", | |
"δΈη", | |
"##δΈη", | |
"γ", | |
"##γ", | |
"γ", | |
"##γ", | |
] | |
self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"]) | |
with open(self.vocab_file, "w", encoding="utf-8") as vocab_writer: | |
vocab_writer.write("".join([x + "\n" for x in vocab_tokens])) | |
def get_tokenizer(self, **kwargs): | |
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname, **kwargs) | |
def get_input_output_texts(self): | |
input_text = "γγγ«γ‘γ―γδΈηγ \nγγγ°γγ―γδΈηγ" | |
output_text = "γγγ«γ‘γ― γ δΈη γ γγγ°γγ― γ δΈη γ" | |
return input_text, output_text | |
def test_full_tokenizer(self): | |
tokenizer = self.tokenizer_class(self.vocab_file) | |
tokens = tokenizer.tokenize("γγγ«γ‘γ―γδΈηγ\nγγγ°γγ―γδΈηγ") | |
self.assertListEqual(tokens, ["γγγ«γ‘γ―", "γ", "δΈη", "γ", "γγ", "##γ°γγ―", "γ", "δΈη", "γ"]) | |
self.assertListEqual(tokenizer.convert_tokens_to_ids(tokens), [3, 12, 10, 14, 4, 9, 12, 10, 14]) | |
def test_mecab_tokenizer(self): | |
tokenizer = MecabTokenizer() | |
self.assertListEqual( | |
tokenizer.tokenize(" \tο½±ο½―οΎοΎοΎγΉγγ’γ§iPhoneοΌ γ \n ηΊε£²γγγγγ "), | |
["γ’γγγ«γΉγγ’", "γ§", "iPhone", "8", "γ", "ηΊε£²", "γ", "γ", "γ", "γ"], | |
) | |
def test_mecab_tokenizer_lower(self): | |
tokenizer = MecabTokenizer(do_lower_case=True) | |
self.assertListEqual( | |
tokenizer.tokenize(" \tο½±ο½―οΎοΎοΎγΉγγ’γ§iPhoneοΌ γ \n ηΊε£²γγγγγ "), | |
["γ’γγγ«γΉγγ’", "γ§", "iphone", "8", "γ", "ηΊε£²", "γ", "γ", "γ", "γ"], | |
) | |
def test_mecab_tokenizer_no_normalize(self): | |
tokenizer = MecabTokenizer(normalize_text=False) | |
self.assertListEqual( | |
tokenizer.tokenize(" \tο½±ο½―οΎοΎοΎγΉγγ’γ§iPhoneοΌ γ \n ηΊε£²γγγγγ "), | |
["ο½±ο½―οΎοΎοΎγΉγγ’", "γ§", "iPhone", "οΌ", "γ", "ηΊε£²", "γ", "γ", "γ", "γ", "γ"], | |
) | |
def test_wordpiece_tokenizer(self): | |
vocab_tokens = ["[UNK]", "[CLS]", "[SEP]", "γγγ«γ‘γ―", "γγ", "γ«γ‘γ―" "γ°γγ―", "##γγ", "##γ«γ‘γ―", "##γ°γγ―"] | |
vocab = {} | |
for (i, token) in enumerate(vocab_tokens): | |
vocab[token] = i | |
tokenizer = WordpieceTokenizer(vocab=vocab, unk_token="[UNK]") | |
self.assertListEqual(tokenizer.tokenize(""), []) | |
self.assertListEqual(tokenizer.tokenize("γγγ«γ‘γ―"), ["γγγ«γ‘γ―"]) | |
self.assertListEqual(tokenizer.tokenize("γγγ°γγ―"), ["γγ", "##γ°γγ―"]) | |
self.assertListEqual(tokenizer.tokenize("γγγ°γγ― γγγ°γγ«γ‘γ― γγγ«γ‘γ―"), ["γγ", "##γ°γγ―", "[UNK]", "γγγ«γ‘γ―"]) | |
def test_sequence_builders(self): | |
tokenizer = self.tokenizer_class.from_pretrained("bert-base-japanese") | |
text = tokenizer.encode("γγγγ¨γγ", add_special_tokens=False) | |
text_2 = tokenizer.encode("γ©γγγγγΎγγ¦γ", add_special_tokens=False) | |
encoded_sentence = tokenizer.build_inputs_with_special_tokens(text) | |
encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2) | |
# 2 is for "[CLS]", 3 is for "[SEP]" | |
assert encoded_sentence == [2] + text + [3] | |
assert encoded_pair == [2] + text + [3] + text_2 + [3] | |
class BertJapaneseCharacterTokenizationTest(TokenizerTesterMixin, unittest.TestCase): | |
tokenizer_class = BertJapaneseTokenizer | |
def setUp(self): | |
super().setUp() | |
vocab_tokens = ["[UNK]", "[CLS]", "[SEP]", "γ", "γ", "γ«", "γ‘", "γ―", "γ°", "δΈ", "η", "γ", "γ"] | |
self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"]) | |
with open(self.vocab_file, "w", encoding="utf-8") as vocab_writer: | |
vocab_writer.write("".join([x + "\n" for x in vocab_tokens])) | |
def get_tokenizer(self, **kwargs): | |
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname, subword_tokenizer_type="character", **kwargs) | |
def get_input_output_texts(self): | |
input_text = "γγγ«γ‘γ―γδΈηγ \nγγγ°γγ―γδΈηγ" | |
output_text = "γ γ γ« γ‘ γ― γ δΈ η γ γ γ γ° γ γ― γ δΈ η γ" | |
return input_text, output_text | |
def test_full_tokenizer(self): | |
tokenizer = self.tokenizer_class(self.vocab_file, subword_tokenizer_type="character") | |
tokens = tokenizer.tokenize("γγγ«γ‘γ―γδΈηγ \nγγγ°γγ―γδΈηγ") | |
self.assertListEqual( | |
tokens, ["γ", "γ", "γ«", "γ‘", "γ―", "γ", "δΈ", "η", "γ", "γ", "γ", "γ°", "γ", "γ―", "γ", "δΈ", "η", "γ"] | |
) | |
self.assertListEqual( | |
tokenizer.convert_tokens_to_ids(tokens), [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] | |
) | |
def test_character_tokenizer(self): | |
vocab_tokens = ["[UNK]", "[CLS]", "[SEP]", "γ", "γ", "γ«", "γ‘", "γ―", "γ°", "δΈ", "η" "γ", "γ"] | |
vocab = {} | |
for (i, token) in enumerate(vocab_tokens): | |
vocab[token] = i | |
tokenizer = CharacterTokenizer(vocab=vocab, unk_token="[UNK]") | |
self.assertListEqual(tokenizer.tokenize(""), []) | |
self.assertListEqual(tokenizer.tokenize("γγγ«γ‘γ―"), ["γ", "γ", "γ«", "γ‘", "γ―"]) | |
self.assertListEqual(tokenizer.tokenize("γγγ«γ‘γ»"), ["γ", "γ", "γ«", "γ‘", "[UNK]"]) | |
def test_sequence_builders(self): | |
tokenizer = self.tokenizer_class.from_pretrained("bert-base-japanese-char") | |
text = tokenizer.encode("γγγγ¨γγ", add_special_tokens=False) | |
text_2 = tokenizer.encode("γ©γγγγγΎγγ¦γ", add_special_tokens=False) | |
encoded_sentence = tokenizer.build_inputs_with_special_tokens(text) | |
encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2) | |
# 2 is for "[CLS]", 3 is for "[SEP]" | |
assert encoded_sentence == [2] + text + [3] | |
assert encoded_pair == [2] + text + [3] + text_2 + [3] | |