code
stringlengths 66
870k
| docstring
stringlengths 19
26.7k
| func_name
stringlengths 1
138
| language
stringclasses 1
value | repo
stringlengths 7
68
| path
stringlengths 5
324
| url
stringlengths 46
389
| license
stringclasses 7
values |
---|---|---|---|---|---|---|---|
def test_inference_fp16(self):
r"""
A small test to make sure that inference work in half precision without any problem.
"""
model = PvtForImageClassification.from_pretrained("Zetatech/pvt-tiny-224", torch_dtype=torch.float16)
model.to(torch_device)
image_processor = PvtImageProcessor(size=224)
image = prepare_img()
inputs = image_processor(images=image, return_tensors="pt")
pixel_values = inputs.pixel_values.to(torch_device, dtype=torch.float16)
# forward pass to make sure inference works in fp16
with torch.no_grad():
_ = model(pixel_values)
|
A small test to make sure that inference work in half precision without any problem.
|
test_inference_fp16
|
python
|
huggingface/transformers
|
tests/models/pvt/test_modeling_pvt.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/pvt/test_modeling_pvt.py
|
Apache-2.0
|
def test_inference_fp16(self):
r"""
A small test to make sure that inference work in half precision without any problem.
"""
model = PvtV2ForImageClassification.from_pretrained("OpenGVLab/pvt_v2_b0", torch_dtype=torch.float16)
model.to(torch_device)
image_processor = AutoImageProcessor.from_pretrained("OpenGVLab/pvt_v2_b0")
image = prepare_img()
inputs = image_processor(images=image, return_tensors="pt")
pixel_values = inputs.pixel_values.to(torch_device, dtype=torch.float16)
# forward pass to make sure inference works in fp16
with torch.no_grad():
_ = model(pixel_values)
|
A small test to make sure that inference work in half precision without any problem.
|
test_inference_fp16
|
python
|
huggingface/transformers
|
tests/models/pvt_v2/test_modeling_pvt_v2.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/pvt_v2/test_modeling_pvt_v2.py
|
Apache-2.0
|
def test_apply_chat_template_video_special_processing(self):
"""
Tests that models can use their own preprocessing to preprocess conversations.
"""
processor = self.get_processor()
if processor.chat_template is None:
self.skipTest("Processor has no chat template")
signature = inspect.signature(processor.__call__)
if "videos" not in {*signature.parameters.keys()} or (
signature.parameters.get("videos") is not None
and signature.parameters["videos"].annotation == inspect._empty
):
self.skipTest("Processor doesn't accept videos at input")
video_file_path = hf_hub_download(
repo_id="raushan-testing-hf/videos-test", filename="sample_demo_1.mp4", repo_type="dataset"
)
messages = [
[
{
"role": "user",
"content": [
{"type": "video", "path": video_file_path},
{"type": "text", "text": "What is shown in this video?"},
],
},
]
]
def _process_messages_for_chat_template(
conversation,
batch_images,
batch_videos,
batch_video_metadata,
**chat_template_kwargs,
):
# Let us just always return a dummy prompt
new_msg = [
[
{
"role": "user",
"content": [
{"type": "video"}, # no need to use path, video is loaded already by this moment
{"type": "text", "text": "Dummy prompt for preprocess testing"},
],
},
]
]
return new_msg
processor._process_messages_for_chat_template = _process_messages_for_chat_template
out_dict_with_video = processor.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=True,
return_dict=True,
)
self.assertTrue(self.videos_input_name in out_dict_with_video)
# Check with `in` because we don't know how each template formats the prompt with BOS/EOS/etc
formatted_text = processor.batch_decode(out_dict_with_video["input_ids"], skip_special_tokens=True)[0]
self.assertTrue("Dummy prompt for preprocess testing" in formatted_text)
self.assertEqual(len(out_dict_with_video[self.videos_input_name]), 145912)
|
Tests that models can use their own preprocessing to preprocess conversations.
|
test_apply_chat_template_video_special_processing
|
python
|
huggingface/transformers
|
tests/models/qwen2_5_omni/test_processor_qwen2_5_omni.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/qwen2_5_omni/test_processor_qwen2_5_omni.py
|
Apache-2.0
|
def test_mismatching_num_image_tokens(self):
"""
Tests that VLMs through an error with explicit message saying what is wrong
when number of images don't match number of image tokens in the text.
Also we need to test multi-image cases when one prompr has multiple image tokens.
"""
config, input_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config).to(torch_device)
_ = model(**input_dict) # successful forward with no modifications
curr_input_dict = copy.deepcopy(input_dict)
# remove one image but leave the image token in text
patch_size = config.vision_config.patch_size
one_img_length = (self.model_tester.image_size**2) // (patch_size**2)
curr_input_dict["pixel_values"] = curr_input_dict["pixel_values"][-one_img_length:, ...]
curr_input_dict["image_grid_thw"] = curr_input_dict["image_grid_thw"][-1:, ...]
with self.assertRaises(ValueError):
_ = model(**curr_input_dict)
# simulate multi-image case by concatenating inputs where each has exactly one image/image-token
input_ids = curr_input_dict["input_ids"][:1]
pixel_values = curr_input_dict["pixel_values"][:one_img_length]
image_grid_thw = curr_input_dict["image_grid_thw"][:1]
input_ids = torch.cat([input_ids, input_ids], dim=0)
# one image and two image tokens raise an error
with self.assertRaises(ValueError):
_ = model(
input_ids=input_ids,
pixel_values=pixel_values,
image_grid_thw=image_grid_thw,
)
# two images and two image tokens don't raise an error
pixel_values = torch.cat([pixel_values, pixel_values], dim=0)
image_grid_thw = torch.cat([image_grid_thw, image_grid_thw], dim=0)
_ = model(
input_ids=input_ids,
pixel_values=pixel_values,
image_grid_thw=image_grid_thw,
)
|
Tests that VLMs through an error with explicit message saying what is wrong
when number of images don't match number of image tokens in the text.
Also we need to test multi-image cases when one prompr has multiple image tokens.
|
test_mismatching_num_image_tokens
|
python
|
huggingface/transformers
|
tests/models/qwen2_5_vl/test_modeling_qwen2_5_vl.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/qwen2_5_vl/test_modeling_qwen2_5_vl.py
|
Apache-2.0
|
def test_apply_chat_template_video_special_processing(self):
"""
Tests that models can use their own preprocessing to preprocess conversations.
"""
processor = self.get_processor()
if processor.chat_template is None:
self.skipTest("Processor has no chat template")
signature = inspect.signature(processor.__call__)
if "videos" not in {*signature.parameters.keys()} or (
signature.parameters.get("videos") is not None
and signature.parameters["videos"].annotation == inspect._empty
):
self.skipTest("Processor doesn't accept videos at input")
video_file_path = hf_hub_download(
repo_id="raushan-testing-hf/videos-test", filename="sample_demo_1.mp4", repo_type="dataset"
)
messages = [
[
{
"role": "user",
"content": [
{"type": "video", "path": video_file_path},
{"type": "text", "text": "What is shown in this video?"},
],
},
]
]
def _process_messages_for_chat_template(
conversation,
batch_images,
batch_videos,
batch_video_metadata,
**chat_template_kwargs,
):
# Let us just always return a dummy prompt
new_msg = [
[
{
"role": "user",
"content": [
{"type": "video"}, # no need to use path, video is loaded already by this moment
{"type": "text", "text": "Dummy prompt for preprocess testing"},
],
},
]
]
return new_msg
processor._process_messages_for_chat_template = _process_messages_for_chat_template
out_dict_with_video = processor.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=True,
return_dict=True,
return_tensors="pt",
)
self.assertTrue(self.videos_input_name in out_dict_with_video)
# Check with `in` because we don't know how each template formats the prompt with BOS/EOS/etc
formatted_text = processor.batch_decode(out_dict_with_video["input_ids"], skip_special_tokens=True)[0]
self.assertTrue("Dummy prompt for preprocess testing" in formatted_text)
self.assertEqual(len(out_dict_with_video[self.videos_input_name]), 21960)
|
Tests that models can use their own preprocessing to preprocess conversations.
|
test_apply_chat_template_video_special_processing
|
python
|
huggingface/transformers
|
tests/models/qwen2_5_vl/test_processor_qwen2_5_vl.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/qwen2_5_vl/test_processor_qwen2_5_vl.py
|
Apache-2.0
|
def test_load_balancing_loss(self):
r"""
Let's make sure we can actually compute the loss and do a backward on it.
"""
config, input_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.num_labels = 3
config.num_experts = 8
config.expert_interval = 2
config.output_router_logits = True
input_ids = input_dict["input_ids"]
attention_mask = input_ids.ne(1).to(torch_device)
model = Qwen2MoeForCausalLM(config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=attention_mask)
self.assertEqual(result.router_logits[0].shape, (91, config.num_experts))
torch.testing.assert_close(result.aux_loss.cpu(), torch.tensor(2, dtype=torch.float32), rtol=1e-2, atol=1e-2)
# First, we make sure that adding padding tokens doesn't change the loss
# loss(input_ids, attention_mask=None) == loss(input_ids + padding, attention_mask=attention_mask_with_padding)
pad_length = 1000
# Add padding tokens (assume that pad_token_id=1) to input_ids
padding_block = torch.ones(input_ids.shape[0], pad_length, dtype=torch.int32).to(torch_device)
padded_input_ids = torch.cat((padding_block, input_ids), dim=1) # this is to simulate padding to the left
padded_attention_mask = padded_input_ids.ne(1).to(torch_device)
padded_result = model(padded_input_ids, attention_mask=padded_attention_mask)
torch.testing.assert_close(result.aux_loss.cpu(), padded_result.aux_loss.cpu(), rtol=1e-4, atol=1e-4)
# We make sure that the loss of including padding tokens != the loss without padding tokens
# if attention_mask=None --> we don't exclude padding tokens
include_padding_result = model(padded_input_ids, attention_mask=None)
# This is to mimic torch.testing.assert_not_close
self.assertNotAlmostEqual(include_padding_result.aux_loss.item(), result.aux_loss.item())
|
Let's make sure we can actually compute the loss and do a backward on it.
|
test_load_balancing_loss
|
python
|
huggingface/transformers
|
tests/models/qwen2_moe/test_modeling_qwen2_moe.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/qwen2_moe/test_modeling_qwen2_moe.py
|
Apache-2.0
|
def test_mismatching_num_image_tokens(self):
"""
Tests that VLMs through an error with explicit message saying what is wrong
when number of images don't match number of image tokens in the text.
Also we need to test multi-image cases when one prompt has multiple image tokens.
"""
config, input_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config).to(torch_device)
curr_input_dict = copy.deepcopy(input_dict)
_ = model(**curr_input_dict) # successfull forward with no modifications
# remove one image but leave the image token in text
patch_size = config.vision_config.patch_size
one_img_length = (self.model_tester.image_size**2) // (patch_size**2)
curr_input_dict["pixel_values"] = curr_input_dict["pixel_values"][-one_img_length:, ...]
curr_input_dict["image_grid_thw"] = curr_input_dict["image_grid_thw"][-1:, ...]
with self.assertRaises(ValueError):
_ = model(**curr_input_dict)
# simulate multi-image case by concatenating inputs where each has exactly one image/image-token
input_ids = curr_input_dict["input_ids"][:1]
pixel_values = curr_input_dict["pixel_values"][:one_img_length]
image_grid_thw = curr_input_dict["image_grid_thw"][:1]
input_ids = torch.cat([input_ids, input_ids], dim=0)
# one image and two image tokens raise an error
with self.assertRaises(ValueError):
_ = model(input_ids=input_ids, pixel_values=pixel_values, image_grid_thw=image_grid_thw)
# two images and two image tokens don't raise an error
pixel_values = torch.cat([pixel_values, pixel_values], dim=0)
image_grid_thw = torch.cat([image_grid_thw, image_grid_thw], dim=0)
_ = model(input_ids=input_ids, pixel_values=pixel_values, image_grid_thw=image_grid_thw)
|
Tests that VLMs through an error with explicit message saying what is wrong
when number of images don't match number of image tokens in the text.
Also we need to test multi-image cases when one prompt has multiple image tokens.
|
test_mismatching_num_image_tokens
|
python
|
huggingface/transformers
|
tests/models/qwen2_vl/test_modeling_qwen2_vl.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/qwen2_vl/test_modeling_qwen2_vl.py
|
Apache-2.0
|
def test_forward_with_rope_deltas_cached(self):
"""
Tests that Qwen2-VL computes new rope deltas every forward pass with new set of inputs.
Rope deltas are cached when we generate and re-used for decoding phase, byt are not reset
automatically after generation ends. See https://github.com/huggingface/transformers/pull/36013 for more
"""
config, input_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_generative_model_classes:
model = model_class(config).to(torch_device)
# Generate and make sure rope_deltas are not `None`
self.assertTrue(model.model.rope_deltas is None)
generation_output = model.generate(
**input_dict, max_new_tokens=4, return_dict_in_generate=True, output_logits=True
)
self.assertTrue(model.model.rope_deltas is not None)
# Now if we try to do forward pass, we should get new rope logits, because cache is not passed
forward_output = model(**input_dict)
torch.testing.assert_close(
generation_output.logits[0], forward_output.logits[:, -1, :], rtol=1e-4, atol=1e-4
)
|
Tests that Qwen2-VL computes new rope deltas every forward pass with new set of inputs.
Rope deltas are cached when we generate and re-used for decoding phase, byt are not reset
automatically after generation ends. See https://github.com/huggingface/transformers/pull/36013 for more
|
test_forward_with_rope_deltas_cached
|
python
|
huggingface/transformers
|
tests/models/qwen2_vl/test_modeling_qwen2_vl.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/qwen2_vl/test_modeling_qwen2_vl.py
|
Apache-2.0
|
def test_apply_chat_template_video_special_processing(self):
"""
Tests that models can use their own preprocessing to preprocess conversations.
"""
processor = self.get_processor()
if processor.chat_template is None:
self.skipTest("Processor has no chat template")
signature = inspect.signature(processor.__call__)
if "videos" not in {*signature.parameters.keys()} or (
signature.parameters.get("videos") is not None
and signature.parameters["videos"].annotation == inspect._empty
):
self.skipTest("Processor doesn't accept videos at input")
video_file_path = hf_hub_download(
repo_id="raushan-testing-hf/videos-test", filename="sample_demo_1.mp4", repo_type="dataset"
)
messages = [
[
{
"role": "user",
"content": [
{"type": "video", "path": video_file_path},
{"type": "text", "text": "What is shown in this video?"},
],
},
]
]
def _process_messages_for_chat_template(
conversation,
batch_images,
batch_videos,
batch_video_metadata,
**chat_template_kwargs,
):
# Let us just always return a dummy prompt
new_msg = [
[
{
"role": "user",
"content": [
{"type": "video"}, # no need to use path, video is loaded already by this moment
{"type": "text", "text": "Dummy prompt for preprocess testing"},
],
},
]
]
return new_msg
processor._process_messages_for_chat_template = _process_messages_for_chat_template
out_dict_with_video = processor.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=True,
return_dict=True,
return_tensors="pt",
)
self.assertTrue(self.videos_input_name in out_dict_with_video)
# Check with `in` because we don't know how each template formats the prompt with BOS/EOS/etc
formatted_text = processor.batch_decode(out_dict_with_video["input_ids"], skip_special_tokens=True)[0]
self.assertTrue("Dummy prompt for preprocess testing" in formatted_text)
self.assertEqual(len(out_dict_with_video[self.videos_input_name]), 21960)
|
Tests that models can use their own preprocessing to preprocess conversations.
|
test_apply_chat_template_video_special_processing
|
python
|
huggingface/transformers
|
tests/models/qwen2_vl/test_processor_qwen2_vl.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/qwen2_vl/test_processor_qwen2_vl.py
|
Apache-2.0
|
def test_special_mm_token_truncation(self):
"""Tests that special vision tokens do not get truncated when `truncation=True` is set."""
processor = self.get_processor()
input_str = self.prepare_text_inputs(batch_size=2, modality="image")
image_input = self.prepare_image_inputs(batch_size=2)
_ = processor(
text=input_str,
images=image_input,
return_tensors="pt",
truncation=None,
padding=True,
)
with self.assertRaises(ValueError):
_ = processor(
text=input_str,
images=image_input,
return_tensors="pt",
truncation=True,
padding=True,
max_length=20,
)
|
Tests that special vision tokens do not get truncated when `truncation=True` is set.
|
test_special_mm_token_truncation
|
python
|
huggingface/transformers
|
tests/models/qwen2_vl/test_processor_qwen2_vl.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/qwen2_vl/test_processor_qwen2_vl.py
|
Apache-2.0
|
def test_nested_input(self):
"""Tests that the processor can work with nested list where each video is a list of arrays"""
for video_processing_class in self.video_processor_list:
video_processing = video_processing_class(**self.video_processor_dict)
video_inputs = self.video_processor_tester.prepare_video_inputs(
equal_resolution=False, return_tensors="np"
)
# Test not batched input
video_inputs_nested = [list(video) for video in video_inputs]
encoded_videos = video_processing(video_inputs_nested[0], return_tensors="pt")[self.input_name]
expected_output_video_shape = self.video_processor_tester.expected_output_video_shape([video_inputs[0]])
self.assertEqual(list(encoded_videos.shape), expected_output_video_shape)
# Test batched
expected_output_video_shape = self.video_processor_tester.expected_output_video_shape(video_inputs)
encoded_videos = video_processing(video_inputs_nested, return_tensors="pt")[self.input_name]
self.assertEqual(list(encoded_videos.shape), expected_output_video_shape)
|
Tests that the processor can work with nested list where each video is a list of arrays
|
test_nested_input
|
python
|
huggingface/transformers
|
tests/models/qwen2_vl/test_video_processing_qwen2_vl.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/qwen2_vl/test_video_processing_qwen2_vl.py
|
Apache-2.0
|
def test_load_balancing_loss(self):
r"""
Let's make sure we can actually compute the loss and do a backward on it.
"""
config, input_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.num_labels = 3
config.num_experts = 8
config.expert_interval = 2
config.output_router_logits = True
input_ids = input_dict["input_ids"]
attention_mask = input_ids.ne(1).to(torch_device)
model = Qwen3MoeForCausalLM(config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=attention_mask)
self.assertEqual(result.router_logits[0].shape, (91, config.num_experts))
torch.testing.assert_close(result.aux_loss.cpu(), torch.tensor(2, dtype=torch.float32), rtol=1e-2, atol=1e-2)
# First, we make sure that adding padding tokens doesn't change the loss
# loss(input_ids, attention_mask=None) == loss(input_ids + padding, attention_mask=attention_mask_with_padding)
pad_length = 1000
# Add padding tokens (assume that pad_token_id=1) to input_ids
padding_block = torch.ones(input_ids.shape[0], pad_length, dtype=torch.int32).to(torch_device)
padded_input_ids = torch.cat((padding_block, input_ids), dim=1) # this is to simulate padding to the left
padded_attention_mask = padded_input_ids.ne(1).to(torch_device)
padded_result = model(padded_input_ids, attention_mask=padded_attention_mask)
torch.testing.assert_close(result.aux_loss.cpu(), padded_result.aux_loss.cpu(), rtol=1e-4, atol=1e-4)
# We make sure that the loss of including padding tokens != the loss without padding tokens
# if attention_mask=None --> we don't exclude padding tokens
include_padding_result = model(padded_input_ids, attention_mask=None)
# This is to mimic torch.testing.assert_not_close
self.assertNotAlmostEqual(include_padding_result.aux_loss.item(), result.aux_loss.item())
|
Let's make sure we can actually compute the loss and do a backward on it.
|
test_load_balancing_loss
|
python
|
huggingface/transformers
|
tests/models/qwen3_moe/test_modeling_qwen3_moe.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/qwen3_moe/test_modeling_qwen3_moe.py
|
Apache-2.0
|
def _assert_tensors_equal(a, b, atol=1e-12, prefix=""):
"""If tensors not close, or a and b aren't both tensors, raise a nice Assertion error."""
if a is None and b is None:
return True
try:
if torch.allclose(a, b, atol=atol):
return True
raise
except Exception:
msg = f"{a} != {b}"
if prefix:
msg = prefix + ": " + msg
raise AssertionError(msg)
|
If tensors not close, or a and b aren't both tensors, raise a nice Assertion error.
|
_assert_tensors_equal
|
python
|
huggingface/transformers
|
tests/models/rag/test_modeling_rag.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/rag/test_modeling_rag.py
|
Apache-2.0
|
def require_retrieval(test_case):
"""
Decorator marking a test that requires a set of dependencies necessary for pefrorm retrieval with
[`RagRetriever`].
These tests are skipped when respective libraries are not installed.
"""
if not (is_torch_available() and is_datasets_available() and is_faiss_available()):
test_case = unittest.skip(reason="test requires PyTorch, datasets and faiss")(test_case)
return test_case
|
Decorator marking a test that requires a set of dependencies necessary for pefrorm retrieval with
[`RagRetriever`].
These tests are skipped when respective libraries are not installed.
|
require_retrieval
|
python
|
huggingface/transformers
|
tests/models/rag/test_modeling_rag.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/rag/test_modeling_rag.py
|
Apache-2.0
|
def require_retrieval(test_case):
"""
Decorator marking a test that requires a set of dependencies necessary for pefrorm retrieval with
[`RagRetriever`].
These tests are skipped when respective libraries are not installed.
"""
if not (is_tf_available() and is_datasets_available() and is_faiss_available()):
test_case = unittest.skip("test requires tensorflow, datasets and faiss")(test_case)
return test_case
|
Decorator marking a test that requires a set of dependencies necessary for pefrorm retrieval with
[`RagRetriever`].
These tests are skipped when respective libraries are not installed.
|
require_retrieval
|
python
|
huggingface/transformers
|
tests/models/rag/test_modeling_tf_rag.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/rag/test_modeling_tf_rag.py
|
Apache-2.0
|
def test_create_position_ids_respects_padding_index(self):
"""This is a regression test for https://github.com/huggingface/transformers/issues/1761
The position ids should be masked with the embedding object's padding index. Therefore, the
first available non-padding position index is RobertaEmbeddings.padding_idx + 1
"""
config = self.model_tester.prepare_config_and_inputs()[0]
model = RobertaEmbeddings(config=config)
input_ids = torch.as_tensor([[12, 31, 13, model.padding_idx]])
expected_positions = torch.as_tensor(
[[0 + model.padding_idx + 1, 1 + model.padding_idx + 1, 2 + model.padding_idx + 1, model.padding_idx]]
)
position_ids = create_position_ids_from_input_ids(input_ids, model.padding_idx)
self.assertEqual(position_ids.shape, expected_positions.shape)
self.assertTrue(torch.all(torch.eq(position_ids, expected_positions)))
|
This is a regression test for https://github.com/huggingface/transformers/issues/1761
The position ids should be masked with the embedding object's padding index. Therefore, the
first available non-padding position index is RobertaEmbeddings.padding_idx + 1
|
test_create_position_ids_respects_padding_index
|
python
|
huggingface/transformers
|
tests/models/roberta/test_modeling_roberta.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/roberta/test_modeling_roberta.py
|
Apache-2.0
|
def test_create_position_ids_from_inputs_embeds(self):
"""This is a regression test for https://github.com/huggingface/transformers/issues/1761
The position ids should be masked with the embedding object's padding index. Therefore, the
first available non-padding position index is RobertaEmbeddings.padding_idx + 1
"""
config = self.model_tester.prepare_config_and_inputs()[0]
embeddings = RobertaEmbeddings(config=config)
inputs_embeds = torch.empty(2, 4, 30)
expected_single_positions = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
expected_positions = torch.as_tensor([expected_single_positions, expected_single_positions])
position_ids = embeddings.create_position_ids_from_inputs_embeds(inputs_embeds)
self.assertEqual(position_ids.shape, expected_positions.shape)
self.assertTrue(torch.all(torch.eq(position_ids, expected_positions)))
|
This is a regression test for https://github.com/huggingface/transformers/issues/1761
The position ids should be masked with the embedding object's padding index. Therefore, the
first available non-padding position index is RobertaEmbeddings.padding_idx + 1
|
test_create_position_ids_from_inputs_embeds
|
python
|
huggingface/transformers
|
tests/models/roberta/test_modeling_roberta.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/roberta/test_modeling_roberta.py
|
Apache-2.0
|
def test_create_position_ids_respects_padding_index(self):
"""This is a regression test for https://github.com/huggingface/transformers/issues/1761
The position ids should be masked with the embedding object's padding index. Therefore, the
first available non-padding position index is RobertaPreLayerNormEmbeddings.padding_idx + 1
"""
config = self.model_tester.prepare_config_and_inputs()[0]
model = RobertaPreLayerNormEmbeddings(config=config)
input_ids = torch.as_tensor([[12, 31, 13, model.padding_idx]])
expected_positions = torch.as_tensor(
[[0 + model.padding_idx + 1, 1 + model.padding_idx + 1, 2 + model.padding_idx + 1, model.padding_idx]]
)
position_ids = create_position_ids_from_input_ids(input_ids, model.padding_idx)
self.assertEqual(position_ids.shape, expected_positions.shape)
self.assertTrue(torch.all(torch.eq(position_ids, expected_positions)))
|
This is a regression test for https://github.com/huggingface/transformers/issues/1761
The position ids should be masked with the embedding object's padding index. Therefore, the
first available non-padding position index is RobertaPreLayerNormEmbeddings.padding_idx + 1
|
test_create_position_ids_respects_padding_index
|
python
|
huggingface/transformers
|
tests/models/roberta_prelayernorm/test_modeling_roberta_prelayernorm.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/roberta_prelayernorm/test_modeling_roberta_prelayernorm.py
|
Apache-2.0
|
def test_create_position_ids_from_inputs_embeds(self):
"""This is a regression test for https://github.com/huggingface/transformers/issues/1761
The position ids should be masked with the embedding object's padding index. Therefore, the
first available non-padding position index is RobertaPreLayerNormEmbeddings.padding_idx + 1
"""
config = self.model_tester.prepare_config_and_inputs()[0]
embeddings = RobertaPreLayerNormEmbeddings(config=config)
inputs_embeds = torch.empty(2, 4, 30)
expected_single_positions = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
expected_positions = torch.as_tensor([expected_single_positions, expected_single_positions])
position_ids = embeddings.create_position_ids_from_inputs_embeds(inputs_embeds)
self.assertEqual(position_ids.shape, expected_positions.shape)
self.assertTrue(torch.all(torch.eq(position_ids, expected_positions)))
|
This is a regression test for https://github.com/huggingface/transformers/issues/1761
The position ids should be masked with the embedding object's padding index. Therefore, the
first available non-padding position index is RobertaPreLayerNormEmbeddings.padding_idx + 1
|
test_create_position_ids_from_inputs_embeds
|
python
|
huggingface/transformers
|
tests/models/roberta_prelayernorm/test_modeling_roberta_prelayernorm.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/roberta_prelayernorm/test_modeling_roberta_prelayernorm.py
|
Apache-2.0
|
def assertInterval(self, member, container, msg=None):
r"""
Simple utility function to check if a member is inside an interval.
"""
if isinstance(member, torch.Tensor):
max_value, min_value = member.max().item(), member.min().item()
elif isinstance(member, list) or isinstance(member, tuple):
max_value, min_value = max(member), min(member)
if not isinstance(container, list):
raise TypeError("container should be a list or tuple")
elif len(container) != 2:
raise ValueError("container should have 2 elements")
expected_min, expected_max = container
is_inside_interval = (min_value >= expected_min) and (max_value <= expected_max)
if not is_inside_interval:
standardMsg = f"{safe_repr(member)} not found in {safe_repr(container)}"
self.fail(self._formatMessage(msg, standardMsg))
|
Simple utility function to check if a member is inside an interval.
|
assertInterval
|
python
|
huggingface/transformers
|
tests/models/rwkv/test_modeling_rwkv.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/rwkv/test_modeling_rwkv.py
|
Apache-2.0
|
def test_attention_outputs(self):
r"""
Overriding the test_attention_outputs test as the attention outputs of Rwkv are different from other models
it has a shape `batch_size, seq_len, hidden_size`.
"""
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
seq_len = getattr(self.model_tester, "seq_length", None)
for model_class in self.all_model_classes:
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = False
config.return_dict = True
model = model_class._from_config(config, attn_implementation="eager")
config = model.config
model.to(torch_device)
model.eval()
inputs = self._prepare_for_class(inputs_dict, model_class)
batch_size = inputs["input_ids"].shape[0]
with torch.no_grad():
outputs = model(**inputs)
attentions = outputs.attentions
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
config.output_attentions = True
model = model_class(config)
model.to(torch_device)
model.eval()
inputs = self._prepare_for_class(inputs_dict, model_class)
batch_size = inputs["input_ids"].shape[0]
with torch.no_grad():
outputs = model(**inputs)
attentions = outputs.attentions
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]),
[batch_size, seq_len, config.hidden_size],
)
out_len = len(outputs)
# Check attention is always last and order is fine
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = True
model = model_class(config)
model.to(torch_device)
model.eval()
inputs = self._prepare_for_class(inputs_dict, model_class)
batch_size = inputs["input_ids"].shape[0]
with torch.no_grad():
outputs = model(**inputs)
added_hidden_states = 1
self.assertEqual(out_len + added_hidden_states, len(outputs))
self_attentions = outputs.attentions
self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(self_attentions[0].shape[-3:]),
[batch_size, seq_len, config.hidden_size],
)
|
Overriding the test_attention_outputs test as the attention outputs of Rwkv are different from other models
it has a shape `batch_size, seq_len, hidden_size`.
|
test_attention_outputs
|
python
|
huggingface/transformers
|
tests/models/rwkv/test_modeling_rwkv.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/rwkv/test_modeling_rwkv.py
|
Apache-2.0
|
def test_sdpa_can_dispatch_composite_models(self):
"""
Tests if composite models dispatch correctly on SDPA/eager when requested so when loading the model.
This tests only by looking at layer names, as usually SDPA layers are called "SDPAAttention".
In contrast to the above test, this one checks if the "config._attn_implamentation" is a dict after the model
is loaded, because we manually replicate requested attn implementation on each sub-config when loading.
See https://github.com/huggingface/transformers/pull/32238 for more info
The test tries to cover most general cases of composite models, VLMs with vision and text configs. Any model
that has a different set of sub-configs has to overwrite this test.
"""
if not self.has_attentions:
self.skipTest(reason="Model architecture does not support attentions")
if not self._is_composite:
self.skipTest(f"{self.all_model_classes[0].__name__} does not support SDPA")
for model_class in self.all_model_classes:
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
model = model_class(config)
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
model_sdpa = model_class.from_pretrained(tmpdirname, attn_implementation="sdpa")
model_sdpa = model_sdpa.eval().to(torch_device)
model_eager = model_class.from_pretrained(tmpdirname, attn_implementation="eager")
model_eager = model_eager.eval().to(torch_device)
# Root model determines SDPA support
attn_impl = "sdpa" if model._supports_sdpa else "eager"
# Check config propagation to submodels that support it
self.assertTrue(model_sdpa.config._attn_implementation == "sdpa")
self.assertTrue(model_sdpa.vision_encoder.config._attn_implementation == attn_impl)
self.assertTrue(model_sdpa.mask_decoder.config._attn_implementation == attn_impl)
self.assertTrue(model_eager.config._attn_implementation == "eager")
self.assertTrue(model_eager.vision_encoder.config._attn_implementation == "eager")
self.assertTrue(model_eager.mask_decoder.config._attn_implementation == "eager")
# Verify SDPA/eager layer presence
has_sdpa = False
for name, submodule in model_sdpa.named_modules():
class_name = submodule.__class__.__name__
if "SdpaAttention" in class_name or "SdpaSelfAttention" in class_name:
has_sdpa = True
break
if not has_sdpa and attn_impl == "sdpa":
raise ValueError("The SDPA model should have SDPA attention layers")
for name, submodule in model_eager.named_modules():
class_name = submodule.__class__.__name__
if "SdpaAttention" in class_name or "SdpaSelfAttention" in class_name:
raise ValueError("The eager model should not have SDPA attention layers")
|
Tests if composite models dispatch correctly on SDPA/eager when requested so when loading the model.
This tests only by looking at layer names, as usually SDPA layers are called "SDPAAttention".
In contrast to the above test, this one checks if the "config._attn_implamentation" is a dict after the model
is loaded, because we manually replicate requested attn implementation on each sub-config when loading.
See https://github.com/huggingface/transformers/pull/32238 for more info
The test tries to cover most general cases of composite models, VLMs with vision and text configs. Any model
that has a different set of sub-configs has to overwrite this test.
|
test_sdpa_can_dispatch_composite_models
|
python
|
huggingface/transformers
|
tests/models/sam/test_modeling_sam.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/sam/test_modeling_sam.py
|
Apache-2.0
|
def prepare_mask_inputs(self):
"""This function prepares a list of PIL images, or a list of numpy arrays if one specifies numpify=True,
or a list of PyTorch tensors if one specifies torchify=True.
"""
mask_inputs = [np.random.randint(255, size=(30, 400), dtype=np.uint8)]
mask_inputs = [Image.fromarray(x) for x in mask_inputs]
return mask_inputs
|
This function prepares a list of PIL images, or a list of numpy arrays if one specifies numpify=True,
or a list of PyTorch tensors if one specifies torchify=True.
|
prepare_mask_inputs
|
python
|
huggingface/transformers
|
tests/models/sam/test_processor_sam.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/sam/test_processor_sam.py
|
Apache-2.0
|
def test_rle_encoding(self):
"""
Test the run-length encoding function.
"""
# Test that a mask of all zeros returns a single run [height * width].
input_mask = torch.zeros((1, 2, 2), dtype=torch.long) # shape: 1 x 2 x 2
rle = _mask_to_rle_pytorch(input_mask)
self.assertEqual(len(rle), 1)
self.assertEqual(rle[0]["size"], [2, 2])
# For a 2x2 all-zero mask, we expect a single run of length 4:
self.assertEqual(rle[0]["counts"], [4])
# Test that a mask of all ones returns [0, height * width].
input_mask = torch.ones((1, 2, 2), dtype=torch.long) # shape: 1 x 2 x 2
rle = _mask_to_rle_pytorch(input_mask)
self.assertEqual(len(rle), 1)
self.assertEqual(rle[0]["size"], [2, 2])
# For a 2x2 all-one mask, we expect two runs: [0, 4].
self.assertEqual(rle[0]["counts"], [0, 4])
# Test a mask with mixed 0s and 1s to ensure the run-length encoding is correct.
# Example mask:
# Row 0: [0, 1]
# Row 1: [1, 1]
# This is shape (1, 2, 2).
# Flattened in Fortran order -> [0, 1, 1, 1].
# The RLE for [0,1,1,1] is [1, 3].
input_mask = torch.tensor([[[0, 1], [1, 1]]], dtype=torch.long)
rle = _mask_to_rle_pytorch(input_mask)
self.assertEqual(len(rle), 1)
self.assertEqual(rle[0]["size"], [2, 2])
self.assertEqual(rle[0]["counts"], [1, 3]) # 1 zero, followed by 3 ones
|
Test the run-length encoding function.
|
test_rle_encoding
|
python
|
huggingface/transformers
|
tests/models/sam/test_processor_sam.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/sam/test_processor_sam.py
|
Apache-2.0
|
def test_rle_encoding(self):
"""
Test the run-length encoding function.
"""
# Test that a mask of all zeros returns a single run [height * width].
input_mask = tf.zeros((1, 2, 2), dtype=tf.int64) # shape: 1 x 2 x 2
rle = _mask_to_rle_tf(input_mask)
self.assertEqual(len(rle), 1)
self.assertEqual(rle[0]["size"], [2, 2])
# For a 2x2 all-zero mask, we expect a single run of length 4:
self.assertEqual(rle[0]["counts"], [4])
# Test that a mask of all ones returns [0, height * width].
input_mask = tf.ones((1, 2, 2), dtype=tf.int64) # shape: 1 x 2 x 2
rle = _mask_to_rle_tf(input_mask)
self.assertEqual(len(rle), 1)
self.assertEqual(rle[0]["size"], [2, 2])
# For a 2x2 all-one mask, we expect two runs: [0, 4].
self.assertEqual(rle[0]["counts"], [0, 4])
# Test a mask with mixed 0s and 1s to ensure the run-length encoding is correct.
# Example mask:
# Row 0: [0, 1]
# Row 1: [1, 1]
# This is shape (1, 2, 2).
# Flattened in Fortran order -> [0, 1, 1, 1].
# The RLE for [0,1,1,1] is [1, 3].
input_mask = tf.constant([[[0, 1], [1, 1]]], dtype=tf.int64)
rle = _mask_to_rle_tf(input_mask)
self.assertEqual(len(rle), 1)
self.assertEqual(rle[0]["size"], [2, 2])
self.assertEqual(rle[0]["counts"], [1, 3]) # 1 zero, followed by 3 ones
|
Test the run-length encoding function.
|
test_rle_encoding
|
python
|
huggingface/transformers
|
tests/models/sam/test_processor_sam.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/sam/test_processor_sam.py
|
Apache-2.0
|
def test_sdpa_can_dispatch_composite_models(self):
"""
Tests if composite models dispatch correctly on SDPA/eager when requested so when loading the model.
This tests only by looking at layer names, as usually SDPA layers are calles "SDPAAttention".
In contrast to the above test, this one checks if the "config._attn_implamentation" is a dict after the model
is loaded, because we manually replicate requested attn implementation on each sub-config when loading.
See https://github.com/huggingface/transformers/pull/32238 for more info
The test tries to cover most general cases of composite models, VLMs with vision and text configs. Any model
that has a different set of sub-configs has to overwrite this test.
"""
if not self.has_attentions:
self.skipTest(reason="Model architecture does not support attentions")
if not self._is_composite:
self.skipTest(f"{self.all_model_classes[0].__name__} does not support SDPA")
for model_class in self.all_model_classes:
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
model = model_class(config)
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
model_sdpa = model_class.from_pretrained(tmpdirname, attn_implementation="sdpa")
model_sdpa = model_sdpa.eval().to(torch_device)
model_eager = model_class.from_pretrained(tmpdirname, attn_implementation="eager")
model_eager = model_eager.eval().to(torch_device)
# Root model determines SDPA support
attn_impl = "sdpa" if model._supports_sdpa else "eager"
self.assertTrue(model_sdpa.config._attn_implementation == "sdpa")
self.assertTrue(model_sdpa.vision_encoder.config._attn_implementation == attn_impl)
self.assertTrue(model_sdpa.mask_decoder.config._attn_implementation == attn_impl)
self.assertTrue(model_eager.config._attn_implementation == "eager")
self.assertTrue(model_eager.vision_encoder.config._attn_implementation == "eager")
self.assertTrue(model_eager.mask_decoder.config._attn_implementation == "eager")
# Verify SDPA/eager layer presence
has_sdpa = False
for name, submodule in model_sdpa.named_modules():
class_name = submodule.__class__.__name__
if "SdpaAttention" in class_name or "SdpaSelfAttention" in class_name:
has_sdpa = True
break
if not has_sdpa and attn_impl == "sdpa":
raise ValueError("The SDPA model should have SDPA attention layers")
for name, submodule in model_eager.named_modules():
class_name = submodule.__class__.__name__
if "SdpaAttention" in class_name or "SdpaSelfAttention" in class_name:
raise ValueError("The eager model should not have SDPA attention layers")
|
Tests if composite models dispatch correctly on SDPA/eager when requested so when loading the model.
This tests only by looking at layer names, as usually SDPA layers are calles "SDPAAttention".
In contrast to the above test, this one checks if the "config._attn_implamentation" is a dict after the model
is loaded, because we manually replicate requested attn implementation on each sub-config when loading.
See https://github.com/huggingface/transformers/pull/32238 for more info
The test tries to cover most general cases of composite models, VLMs with vision and text configs. Any model
that has a different set of sub-configs has to overwrite this test.
|
test_sdpa_can_dispatch_composite_models
|
python
|
huggingface/transformers
|
tests/models/sam_hq/test_modeling_sam_hq.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/sam_hq/test_modeling_sam_hq.py
|
Apache-2.0
|
def prepare_mask_inputs(self):
"""This function prepares a list of PIL images, or a list of numpy arrays if one specifies numpify=True,
or a list of PyTorch tensors if one specifies torchify=True.
"""
mask_inputs = [np.random.randint(255, size=(30, 400), dtype=np.uint8)]
mask_inputs = [Image.fromarray(x) for x in mask_inputs]
return mask_inputs
|
This function prepares a list of PIL images, or a list of numpy arrays if one specifies numpify=True,
or a list of PyTorch tensors if one specifies torchify=True.
|
prepare_mask_inputs
|
python
|
huggingface/transformers
|
tests/models/sam_hq/test_processor_samhq.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/sam_hq/test_processor_samhq.py
|
Apache-2.0
|
def prepare_image_inputs(
self,
batch_size=None,
min_resolution=None,
max_resolution=None,
num_channels=None,
num_images=None,
size_divisor=None,
equal_resolution=False,
numpify=False,
torchify=False,
):
"""This function prepares a list of PIL images, or a list of numpy arrays if one specifies numpify=True,
or a list of PyTorch tensors if one specifies torchify=True.
One can specify whether the images are of the same resolution or not.
"""
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
batch_size = batch_size if batch_size is not None else self.batch_size
min_resolution = min_resolution if min_resolution is not None else self.min_resolution
max_resolution = max_resolution if max_resolution is not None else self.max_resolution
num_channels = num_channels if num_channels is not None else self.num_channels
num_images = num_images if num_images is not None else self.num_images
images_list = []
for i in range(batch_size):
images = []
for j in range(num_images):
if equal_resolution:
width = height = max_resolution
else:
# To avoid getting image width/height 0
if size_divisor is not None:
# If `size_divisor` is defined, the image needs to have width/size >= `size_divisor`
min_resolution = max(size_divisor, min_resolution)
width, height = np.random.choice(np.arange(min_resolution, max_resolution), 2)
images.append(np.random.randint(255, size=(num_channels, width, height), dtype=np.uint8))
images_list.append(images)
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
images_list = [[Image.fromarray(np.moveaxis(image, 0, -1)) for image in images] for images in images_list]
if torchify:
images_list = [[torch.from_numpy(image) for image in images] for images in images_list]
if numpify:
# Numpy images are typically in channels last format
images_list = [[image.transpose(1, 2, 0) for image in images] for images in images_list]
return images_list
|
This function prepares a list of PIL images, or a list of numpy arrays if one specifies numpify=True,
or a list of PyTorch tensors if one specifies torchify=True.
One can specify whether the images are of the same resolution or not.
|
prepare_image_inputs
|
python
|
huggingface/transformers
|
tests/models/smolvlm/test_image_processing_smolvlm.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/smolvlm/test_image_processing_smolvlm.py
|
Apache-2.0
|
def test_text_only_inference(self):
"""Test that the processor works correctly with text-only input."""
processor_components = self.prepare_components()
processor_components["tokenizer"] = self.get_component("tokenizer", padding_side="left")
processor_kwargs = self.prepare_processor_dict()
processor = self.processor_class(**processor_components, **processor_kwargs)
text = "This is a simple text without images."
inputs = processor(text=text)
tokenized_sentence = processor.tokenizer(text, add_special_tokens=False)
expected_input_ids = [tokenized_sentence["input_ids"]]
self.assertEqual(inputs["input_ids"], expected_input_ids)
self.assertEqual(inputs["attention_mask"], [[1] * len(expected_input_ids[0])])
self.assertTrue("pixel_values" not in inputs)
self.assertTrue("pixel_attention_mask" not in inputs)
# Test batch of texts without image tokens
texts = ["First text.", "Second piece of text."]
batch_inputs = processor(text=texts, padding=True)
tokenized_1 = processor.tokenizer(texts[0], add_special_tokens=False)
tokenized_2 = processor.tokenizer(texts[1], add_special_tokens=False)
expected_1 = tokenized_1["input_ids"]
expected_2 = tokenized_2["input_ids"]
# Pad the shorter sequence
pad_len = len(expected_2) - len(expected_1)
if pad_len > 0:
padded_expected_1 = [self.padding_token_id] * pad_len + expected_1
expected_attention_1 = [0] * pad_len + [1] * len(expected_1)
self.assertEqual(batch_inputs["input_ids"], [padded_expected_1, expected_2])
self.assertEqual(batch_inputs["attention_mask"], [expected_attention_1, [1] * len(expected_2)])
else:
pad_len = -pad_len
padded_expected_2 = [self.padding_token_id] * pad_len + expected_2
expected_attention_2 = [0] * pad_len + [1] * len(expected_2)
self.assertEqual(batch_inputs["input_ids"], [expected_1, padded_expected_2])
self.assertEqual(batch_inputs["attention_mask"], [[1] * len(expected_1), expected_attention_2])
|
Test that the processor works correctly with text-only input.
|
test_text_only_inference
|
python
|
huggingface/transformers
|
tests/models/smolvlm/test_processor_smolvlm.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/smolvlm/test_processor_smolvlm.py
|
Apache-2.0
|
def test_missing_images_error(self):
"""Test that appropriate error is raised when images are referenced but not provided."""
processor = self.get_processor()
# Test single text with image token but no image
text = "Let me show you this image: <image> What do you think?"
with self.assertRaises(ValueError) as context:
processor(text=text)
self.assertTrue("tokens in the text but no images/videos were passed" in str(context.exception))
# Test batch with image tokens but no images
texts = [
"First text with <image> token.",
"Second text <image> with token.",
]
with self.assertRaises(ValueError) as context:
processor(text=texts)
self.assertTrue("tokens in the text but no images/videos were passed" in str(context.exception))
# Test with None as Images
with self.assertRaises(ValueError) as context:
processor(text=text, images=None)
self.assertTrue("tokens in the text but no images/videos were passed" in str(context.exception))
with self.assertRaises(ValueError) as context:
processor(text=texts, images=None)
self.assertTrue("tokens in the text but no images/videos were passed" in str(context.exception))
|
Test that appropriate error is raised when images are referenced but not provided.
|
test_missing_images_error
|
python
|
huggingface/transformers
|
tests/models/smolvlm/test_processor_smolvlm.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/smolvlm/test_processor_smolvlm.py
|
Apache-2.0
|
def test_special_mm_token_truncation(self):
"""Tests that special vision tokens do not get truncated when `truncation=True` is set."""
processor = self.get_processor()
input_str = self.prepare_text_inputs(batch_size=2, modality="image")
image_input = self.prepare_image_inputs(batch_size=2)
image_input = [[image_input[0]], [image_input[1]]]
_ = processor(
text=input_str,
images=image_input,
return_tensors="pt",
truncation=None,
padding=True,
)
with self.assertRaises(ValueError):
_ = processor(
text=input_str,
images=image_input,
return_tensors="pt",
truncation=True,
padding=True,
max_length=20,
)
|
Tests that special vision tokens do not get truncated when `truncation=True` is set.
|
test_special_mm_token_truncation
|
python
|
huggingface/transformers
|
tests/models/smolvlm/test_processor_smolvlm.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/smolvlm/test_processor_smolvlm.py
|
Apache-2.0
|
def get_subsampled_output_lengths(self, input_lengths):
"""
Computes the output length of the convolutional layers
"""
for stride in self.conv_stride:
input_lengths = (input_lengths // stride) - 1
return input_lengths
|
Computes the output length of the convolutional layers
|
get_subsampled_output_lengths
|
python
|
huggingface/transformers
|
tests/models/speecht5/test_modeling_speecht5.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/speecht5/test_modeling_speecht5.py
|
Apache-2.0
|
def get_subsampled_output_lengths(self, input_lengths):
"""
Computes the output length of the convolutional layers
"""
for i in range(self.num_conv_layers):
input_lengths = (input_lengths - 1) // 2 + 1
return input_lengths
|
Computes the output length of the convolutional layers
|
get_subsampled_output_lengths
|
python
|
huggingface/transformers
|
tests/models/speech_to_text/test_modeling_speech_to_text.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/speech_to_text/test_modeling_speech_to_text.py
|
Apache-2.0
|
def get_subsampled_output_lengths(self, input_lengths):
"""
Computes the output length of the convolutional layers
"""
for _ in range(self.num_conv_layers):
input_lengths = (input_lengths - 1) // 2 + 1
return input_lengths
|
Computes the output length of the convolutional layers
|
get_subsampled_output_lengths
|
python
|
huggingface/transformers
|
tests/models/speech_to_text/test_modeling_tf_speech_to_text.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/speech_to_text/test_modeling_tf_speech_to_text.py
|
Apache-2.0
|
def test_batching_equivalence(self):
"""
Overwriting ModelTesterMixin.test_batching_equivalence since SuperGlue returns `matching_scores` tensors full of
zeros which causes the test to fail, because cosine_similarity of two zero tensors is 0.
Discussed here : https://github.com/huggingface/transformers/pull/29886#issuecomment-2481539481
"""
def recursive_check(batched_object, single_row_object, model_name, key):
if isinstance(batched_object, (list, tuple)):
for batched_object_value, single_row_object_value in zip(batched_object, single_row_object):
recursive_check(batched_object_value, single_row_object_value, model_name, key)
elif isinstance(batched_object, dict):
for batched_object_value, single_row_object_value in zip(
batched_object.values(), single_row_object.values()
):
recursive_check(batched_object_value, single_row_object_value, model_name, key)
# do not compare returned loss (0-dim tensor) / codebook ids (int) / caching objects
elif batched_object is None or not isinstance(batched_object, torch.Tensor):
return
elif batched_object.dim() == 0:
return
else:
# indexing the first element does not always work
# e.g. models that output similarity scores of size (N, M) would need to index [0, 0]
slice_ids = [slice(0, index) for index in single_row_object.shape]
batched_row = batched_object[slice_ids]
self.assertFalse(
torch.isnan(batched_row).any(), f"Batched output has `nan` in {model_name} for key={key}"
)
self.assertFalse(
torch.isinf(batched_row).any(), f"Batched output has `inf` in {model_name} for key={key}"
)
self.assertFalse(
torch.isnan(single_row_object).any(), f"Single row output has `nan` in {model_name} for key={key}"
)
self.assertFalse(
torch.isinf(single_row_object).any(), f"Single row output has `inf` in {model_name} for key={key}"
)
self.assertTrue(
(equivalence(batched_row, single_row_object)) <= 1e-03,
msg=(
f"Batched and Single row outputs are not equal in {model_name} for key={key}. "
f"Difference={equivalence(batched_row, single_row_object)}."
),
)
def equivalence(tensor1, tensor2):
return torch.max(torch.abs(tensor1 - tensor2))
config, batched_input = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
config.output_hidden_states = True
model_name = model_class.__name__
batched_input_prepared = self._prepare_for_class(batched_input, model_class)
model = model_class(config).to(torch_device).eval()
batch_size = self.model_tester.batch_size
single_row_input = {}
for key, value in batched_input_prepared.items():
if isinstance(value, torch.Tensor) and value.shape[0] % batch_size == 0:
# e.g. musicgen has inputs of size (bs*codebooks). in most cases value.shape[0] == batch_size
single_batch_shape = value.shape[0] // batch_size
single_row_input[key] = value[:single_batch_shape]
else:
single_row_input[key] = value
with torch.no_grad():
model_batched_output = model(**batched_input_prepared)
model_row_output = model(**single_row_input)
if isinstance(model_batched_output, torch.Tensor):
model_batched_output = {"model_output": model_batched_output}
model_row_output = {"model_output": model_row_output}
for key in model_batched_output:
recursive_check(model_batched_output[key], model_row_output[key], model_name, key)
|
Overwriting ModelTesterMixin.test_batching_equivalence since SuperGlue returns `matching_scores` tensors full of
zeros which causes the test to fail, because cosine_similarity of two zero tensors is 0.
Discussed here : https://github.com/huggingface/transformers/pull/29886#issuecomment-2481539481
|
test_batching_equivalence
|
python
|
huggingface/transformers
|
tests/models/superglue/test_modeling_superglue.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/superglue/test_modeling_superglue.py
|
Apache-2.0
|
def create_and_check_generate_with_past_key_values(
self,
config,
input_ids,
decoder_input_ids,
attention_mask,
decoder_attention_mask,
lm_labels,
):
r"""
This test does not pass for small models due to precision errors. It is therefore only run for slightly larger models.
"""
model = (
SwitchTransformersForConditionalGeneration.from_pretrained("google/switch-base-8").to(torch_device).eval()
)
torch.manual_seed(0)
output_without_past_cache = model.generate(
input_ids[:1], num_beams=2, max_length=5, do_sample=True, use_cache=False
)
torch.manual_seed(0)
output_with_past_cache = model.generate(input_ids[:1], num_beams=2, max_length=5, do_sample=True)
self.parent.assertTrue(torch.all(output_with_past_cache == output_without_past_cache))
|
This test does not pass for small models due to precision errors. It is therefore only run for slightly larger models.
|
create_and_check_generate_with_past_key_values
|
python
|
huggingface/transformers
|
tests/models/switch_transformers/test_modeling_switch_transformers.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/switch_transformers/test_modeling_switch_transformers.py
|
Apache-2.0
|
def test_equivalency_balancy_loss(self):
r"""
This test checks if the balancy loss is correctly implemented
as in the original implementation of the Switch Transformer .
"""
router_probs = torch.Tensor(
[
[0.35490513, 0.60419905],
[0.4275843, 0.23061597],
[0.32985854, 0.43953657],
[0.25099766, 0.27730572],
[0.7678207, 0.71474564],
]
)
expert_indices = torch.Tensor([[0], [1], [1], [0], [0]]).to(torch.int32)
loss = load_balancing_loss_func(router_probs, expert_indices)
self.assertAlmostEqual(loss.item(), 0.8741045, places=5)
|
This test checks if the balancy loss is correctly implemented
as in the original implementation of the Switch Transformer .
|
test_equivalency_balancy_loss
|
python
|
huggingface/transformers
|
tests/models/switch_transformers/test_modeling_switch_transformers.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/switch_transformers/test_modeling_switch_transformers.py
|
Apache-2.0
|
def test_equivalency_router_z_loss(self):
r"""
This test checks if the router z loss is correctly implemented
as in the original implementation of the Switch Transformer .
"""
logits = torch.Tensor(
[
[
[-4.2124424, 3.891939, -3.6481273, 1.8849981],
[0.32625437, 2.918651, 0.84758997, -4.556842],
[-3.32062, 4.6977115, -0.15439987, 0.44086337],
[3.4467149, 4.3436565, -4.7224274, -4.264637],
[-2.224406, -2.5318158, -1.3832569, 1.1891162],
[-2.320062, -0.44705987, 4.289819, -0.00662684],
],
[
[0.99470854, -0.6992364, 0.25503993, 4.2952085],
[3.5937333, -3.2408535, -4.298278, 4.426601],
[0.7669008, 2.6588762, 2.4505413, 4.6051874],
[0.23330331, -3.0845237, 0.6262374, -2.9865491],
[0.7595146, -2.1099675, -4.155346, -2.8326452],
[2.3771453, 1.004138, -3.1781673, 0.7581556],
],
]
)
loss = router_z_loss_func(logits)
self.assertAlmostEqual(loss.item(), 13.786719, places=5)
|
This test checks if the router z loss is correctly implemented
as in the original implementation of the Switch Transformer .
|
test_equivalency_router_z_loss
|
python
|
huggingface/transformers
|
tests/models/switch_transformers/test_modeling_switch_transformers.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/switch_transformers/test_modeling_switch_transformers.py
|
Apache-2.0
|
def test_equivalency_token_chose_masked_router(self):
r"""
This test tests the equivalency between the `SwitchTransformersTop1Router`
originally implemented from here: TODO: provide link
"""
input_tokens = torch.Tensor(
[
[
[0.6433916, 0.18188512, 0.02240455, 0.563781],
[0.5526401, 0.0958724, 0.34253013, 0.03644359],
[0.08744538, 0.7909105, 0.35205448, 0.53364205],
],
[
[0.02900076, 0.4168595, 0.5802449, 0.91486526],
[0.27414513, 0.14991808, 0.9383501, 0.5209162],
[0.51207185, 0.90618336, 0.7309413, 0.95533276],
],
]
)
model = SwitchTransformersTop1Router(self.config)
model.classifier.weight = torch.nn.Parameter(
torch.Tensor(
[
[0.02008116, 0.00620062],
[-0.00811031, -0.00031623],
[-0.03542127, 0.02703803],
[0.02335377, -0.02971946],
],
).t()
)
expert_index, _, router_logits = model(input_tokens)
router_probs = torch.softmax(router_logits, dim=-1)
router_z_loss = router_z_loss_func(router_logits)
auxiliary_loss = load_balancing_loss_func(router_probs, torch.argmax(expert_index, dim=-1))
self.assertAlmostEqual(auxiliary_loss.item(), 1.000308, places=5)
self.assertAlmostEqual(router_z_loss.item(), 0.4789799, places=5)
# self.assertTrue(torch.allclose(expert_index.bool().unsqueeze(-1), expected_dispatch_mask))
|
This test tests the equivalency between the `SwitchTransformersTop1Router`
originally implemented from here: TODO: provide link
|
test_equivalency_token_chose_masked_router
|
python
|
huggingface/transformers
|
tests/models/switch_transformers/test_modeling_switch_transformers.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/switch_transformers/test_modeling_switch_transformers.py
|
Apache-2.0
|
def test_small_logits(self):
r"""
Logits testing to check implementation consistency between `t5x` implementation
and `transformers` implementation of Switch-C transformers. We only check the logits
of the first batch.
"""
model = SwitchTransformersModel.from_pretrained("google/switch-base-8", torch_dtype=torch.bfloat16).to(
torch_device
)
input_ids = torch.ones((32, 64), dtype=torch.long).to(torch_device)
decoder_input_ids = torch.ones((32, 64), dtype=torch.long).to(torch_device)
# fmt: off
EXPECTED_MEAN_LOGITS = torch.Tensor(
[
-0.204102, -0.193359, 0.523438, -0.296875, 0.108887,
0.0211182, 0.605469, -0.100586, -0.0551758, 0.296875,
0.0090332, 0.174805, 0.139648, -0.170898, -0.0981445,
0.0245361, 0.0373535, 0.050293, -0.212891, 0.129883,
0.390625, -0.203125, -0.122559, -0.180664, 0.0437012,
-0.349609, -0.0250244, -0.104004, -0.15918, -0.133789
]
).to(torch.bfloat16)
# fmt: on
hf_logits = model(input_ids, decoder_input_ids=decoder_input_ids).last_hidden_state.cpu()
hf_logits = hf_logits[0, 0, :30]
torch.testing.assert_close(hf_logits, EXPECTED_MEAN_LOGITS, rtol=6e-3, atol=9e-3)
|
Logits testing to check implementation consistency between `t5x` implementation
and `transformers` implementation of Switch-C transformers. We only check the logits
of the first batch.
|
test_small_logits
|
python
|
huggingface/transformers
|
tests/models/switch_transformers/test_modeling_switch_transformers.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/switch_transformers/test_modeling_switch_transformers.py
|
Apache-2.0
|
def test_token_dropping(self):
r"""
This test checks if the token dropping actually drops tokens.
"""
config = SwitchTransformersConfig(expert_capacity=0) # we drop everything
moe = SwitchTransformersSparseMLP(config)
dropped_token_results = moe(torch.randn(2, 3, 768))[0]
assert (dropped_token_results == 0).all(), f"Some tokens not dropped: {dropped_token_results}."
|
This test checks if the token dropping actually drops tokens.
|
test_token_dropping
|
python
|
huggingface/transformers
|
tests/models/switch_transformers/test_modeling_switch_transformers.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/switch_transformers/test_modeling_switch_transformers.py
|
Apache-2.0
|
def test_small_integration_test(self):
"""
For comparison run:
>>> import t5 # pip install t5==0.7.1
>>> from t5.data.sentencepiece_vocabulary import SentencePieceVocabulary
>>> path_to_mtf_small_t5_checkpoint = '<fill_in>'
>>> path_to_mtf_small_spm_model_path = '<fill_in>'
>>> t5_model = t5.models.MtfModel(model_dir=path_to_mtf_small_t5_checkpoint, batch_size=1, tpu=None)
>>> vocab = SentencePieceVocabulary(path_to_mtf_small_spm_model_path, extra_ids=100)
>>> score = t5_model.score(inputs=["Hello there"], targets=["Hi I am"], vocabulary=vocab)
"""
model = FlaxT5ForConditionalGeneration.from_pretrained("google-t5/t5-small")
tokenizer = T5Tokenizer.from_pretrained("google-t5/t5-small")
input_ids = tokenizer("Hello there", return_tensors="np").input_ids
labels = tokenizer("Hi I am", return_tensors="np").input_ids
decoder_input_ids = shift_tokens_right(labels, model.config.pad_token_id, model.config.decoder_start_token_id)
logits = model(input_ids, decoder_input_ids=decoder_input_ids).logits
loss = optax.softmax_cross_entropy(logits, onehot(labels, logits.shape[-1])).mean()
mtf_score = -(labels.shape[-1] * loss.item())
EXPECTED_SCORE = -19.0845
self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1e-4)
|
For comparison run:
>>> import t5 # pip install t5==0.7.1
>>> from t5.data.sentencepiece_vocabulary import SentencePieceVocabulary
>>> path_to_mtf_small_t5_checkpoint = '<fill_in>'
>>> path_to_mtf_small_spm_model_path = '<fill_in>'
>>> t5_model = t5.models.MtfModel(model_dir=path_to_mtf_small_t5_checkpoint, batch_size=1, tpu=None)
>>> vocab = SentencePieceVocabulary(path_to_mtf_small_spm_model_path, extra_ids=100)
>>> score = t5_model.score(inputs=["Hello there"], targets=["Hi I am"], vocabulary=vocab)
|
test_small_integration_test
|
python
|
huggingface/transformers
|
tests/models/t5/test_modeling_flax_t5.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/t5/test_modeling_flax_t5.py
|
Apache-2.0
|
def test_small_v1_1_integration_test(self):
"""
For comparison run:
>>> import t5 # pip install t5==0.7.1
>>> from t5.data.sentencepiece_vocabulary import SentencePieceVocabulary
>>> path_to_mtf_small_t5_v1_1_checkpoint = '<fill_in>'
>>> path_to_mtf_small_spm_model_path = '<fill_in>'
>>> t5_model = t5.models.MtfModel(model_dir=path_to_mtf_small_t5_v1_1_checkpoint, batch_size=1, tpu=None)
>>> vocab = SentencePieceVocabulary(path_to_mtf_small_spm_model_path, extra_ids=100)
>>> score = t5_model.score(inputs=["Hello there"], targets=["Hi I am"], vocabulary=vocab)
"""
model = FlaxT5ForConditionalGeneration.from_pretrained("google/t5-v1_1-small")
tokenizer = T5Tokenizer.from_pretrained("google/t5-v1_1-small")
input_ids = tokenizer("Hello there", return_tensors="np").input_ids
labels = tokenizer("Hi I am", return_tensors="np").input_ids
decoder_input_ids = shift_tokens_right(labels, model.config.pad_token_id, model.config.decoder_start_token_id)
logits = model(input_ids, decoder_input_ids=decoder_input_ids).logits
loss = optax.softmax_cross_entropy(logits, onehot(labels, logits.shape[-1])).mean()
mtf_score = -(labels.shape[-1] * loss.item())
EXPECTED_SCORE = -59.0293
self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1e-4)
|
For comparison run:
>>> import t5 # pip install t5==0.7.1
>>> from t5.data.sentencepiece_vocabulary import SentencePieceVocabulary
>>> path_to_mtf_small_t5_v1_1_checkpoint = '<fill_in>'
>>> path_to_mtf_small_spm_model_path = '<fill_in>'
>>> t5_model = t5.models.MtfModel(model_dir=path_to_mtf_small_t5_v1_1_checkpoint, batch_size=1, tpu=None)
>>> vocab = SentencePieceVocabulary(path_to_mtf_small_spm_model_path, extra_ids=100)
>>> score = t5_model.score(inputs=["Hello there"], targets=["Hi I am"], vocabulary=vocab)
|
test_small_v1_1_integration_test
|
python
|
huggingface/transformers
|
tests/models/t5/test_modeling_flax_t5.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/t5/test_modeling_flax_t5.py
|
Apache-2.0
|
def test_small_byt5_integration_test(self):
"""
For comparison run:
>>> import t5 # pip install t5==0.9.1
>>> path_to_byt5_small_checkpoint = '<fill_in>'
>>> t5_model = t5.models.MtfModel(model_dir=path_to_tf_checkpoint, batch_size=1, tpu=None)
>>> vocab = t5.data.ByteVocabulary()
>>> score = t5_model.score(inputs=["Hello there"], targets=["Hi I am"], vocabulary=vocab)
"""
model = FlaxT5ForConditionalGeneration.from_pretrained("google/byt5-small")
tokenizer = ByT5Tokenizer.from_pretrained("google/byt5-small")
input_ids = tokenizer("Hello there", return_tensors="np").input_ids
labels = tokenizer("Hi I am", return_tensors="np").input_ids
decoder_input_ids = shift_tokens_right(labels, model.config.pad_token_id, model.config.decoder_start_token_id)
logits = model(input_ids, decoder_input_ids=decoder_input_ids).logits
loss = optax.softmax_cross_entropy(logits, onehot(labels, logits.shape[-1])).mean()
mtf_score = -(labels.shape[-1] * loss.item())
EXPECTED_SCORE = -60.7397
self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1e-4)
|
For comparison run:
>>> import t5 # pip install t5==0.9.1
>>> path_to_byt5_small_checkpoint = '<fill_in>'
>>> t5_model = t5.models.MtfModel(model_dir=path_to_tf_checkpoint, batch_size=1, tpu=None)
>>> vocab = t5.data.ByteVocabulary()
>>> score = t5_model.score(inputs=["Hello there"], targets=["Hi I am"], vocabulary=vocab)
|
test_small_byt5_integration_test
|
python
|
huggingface/transformers
|
tests/models/t5/test_modeling_flax_t5.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/t5/test_modeling_flax_t5.py
|
Apache-2.0
|
def test_fp16_fp32_conversion(self):
r"""
A test to check whether the argument `keep_in_fp32_modules` correctly does its job
"""
orig_import = __import__
accelerate_mock = unittest.mock.Mock()
# mock import of accelerate
def import_accelerate_mock(name, *args, **kwargs):
if name == "accelerate":
if accelerate_available:
return accelerate_mock
else:
raise ImportError
return orig_import(name, *args, **kwargs)
# Load without using `accelerate`
with unittest.mock.patch("builtins.__import__", side_effect=import_accelerate_mock):
accelerate_available = False
model = T5ForConditionalGeneration.from_pretrained("google-t5/t5-small", torch_dtype=torch.float16)
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.float32)
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wi.weight.dtype == torch.float16)
# Load without in bf16
model = T5ForConditionalGeneration.from_pretrained("google-t5/t5-small", torch_dtype=torch.bfloat16)
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.bfloat16)
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wi.weight.dtype == torch.bfloat16)
# Load using `accelerate` in bf16
model = T5ForConditionalGeneration.from_pretrained(
"google-t5/t5-small", torch_dtype=torch.bfloat16, device_map="auto"
)
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.bfloat16)
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wi.weight.dtype == torch.bfloat16)
# Load using `accelerate` in bf16
model = T5ForConditionalGeneration.from_pretrained(
"google-t5/t5-small", torch_dtype=torch.bfloat16, low_cpu_mem_usage=True
)
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.bfloat16)
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wi.weight.dtype == torch.bfloat16)
# Load without using `accelerate`
model = T5ForConditionalGeneration.from_pretrained(
"google-t5/t5-small", torch_dtype=torch.float16, low_cpu_mem_usage=True
)
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.float32)
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wi.weight.dtype == torch.float16)
# Load using `accelerate`
model = T5ForConditionalGeneration.from_pretrained(
"google-t5/t5-small", torch_dtype=torch.float16, device_map="auto"
)
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.float32)
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wi.weight.dtype == torch.float16)
|
A test to check whether the argument `keep_in_fp32_modules` correctly does its job
|
test_fp16_fp32_conversion
|
python
|
huggingface/transformers
|
tests/models/t5/test_modeling_t5.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/t5/test_modeling_t5.py
|
Apache-2.0
|
def test_torch_quant(self):
r"""
Test that a simple `torch.quantization.quantize_dynamic` call works on a T5 model.
"""
model_name = "google/flan-t5-small"
tokenizer = T5Tokenizer.from_pretrained(model_name)
model = T5ForConditionalGeneration.from_pretrained(model_name)
model = torch.quantization.quantize_dynamic(model, {torch.nn.Linear}, dtype=torch.qint8)
input_text = "Answer the following yes/no question by reasoning step-by-step. Can you write a whole Haiku in a single tweet?"
input_ids = tokenizer(input_text, return_tensors="pt").input_ids
_ = model.generate(input_ids)
|
Test that a simple `torch.quantization.quantize_dynamic` call works on a T5 model.
|
test_torch_quant
|
python
|
huggingface/transformers
|
tests/models/t5/test_modeling_t5.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/t5/test_modeling_t5.py
|
Apache-2.0
|
def test_small_integration_test(self):
"""
For comparison run:
>>> import t5 # pip install t5==0.7.1
>>> from t5.data.sentencepiece_vocabulary import SentencePieceVocabulary
>>> path_to_mtf_small_t5_checkpoint = '<fill_in>'
>>> path_to_mtf_small_spm_model_path = '<fill_in>'
>>> t5_model = t5.models.MtfModel(model_dir=path_to_mtf_small_t5_checkpoint, batch_size=1, tpu=None)
>>> vocab = SentencePieceVocabulary(path_to_mtf_small_spm_model_path, extra_ids=100)
>>> score = t5_model.score(inputs=["Hello there"], targets=["Hi I am"], vocabulary=vocab)
"""
model = T5ForConditionalGeneration.from_pretrained("google-t5/t5-small").to(torch_device)
tokenizer = T5Tokenizer.from_pretrained("google-t5/t5-small")
input_ids = tokenizer("Hello there", return_tensors="pt").input_ids
labels = tokenizer("Hi I am", return_tensors="pt").input_ids
loss = model(input_ids.to(torch_device), labels=labels.to(torch_device)).loss
mtf_score = -(labels.shape[-1] * loss.item())
EXPECTED_SCORE = -19.0845
self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1e-4)
|
For comparison run:
>>> import t5 # pip install t5==0.7.1
>>> from t5.data.sentencepiece_vocabulary import SentencePieceVocabulary
>>> path_to_mtf_small_t5_checkpoint = '<fill_in>'
>>> path_to_mtf_small_spm_model_path = '<fill_in>'
>>> t5_model = t5.models.MtfModel(model_dir=path_to_mtf_small_t5_checkpoint, batch_size=1, tpu=None)
>>> vocab = SentencePieceVocabulary(path_to_mtf_small_spm_model_path, extra_ids=100)
>>> score = t5_model.score(inputs=["Hello there"], targets=["Hi I am"], vocabulary=vocab)
|
test_small_integration_test
|
python
|
huggingface/transformers
|
tests/models/t5/test_modeling_t5.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/t5/test_modeling_t5.py
|
Apache-2.0
|
def test_small_v1_1_integration_test(self):
"""
For comparison run:
>>> import t5 # pip install t5==0.7.1
>>> from t5.data.sentencepiece_vocabulary import SentencePieceVocabulary
>>> path_to_mtf_small_t5_v1_1_checkpoint = '<fill_in>'
>>> path_to_mtf_small_spm_model_path = '<fill_in>'
>>> t5_model = t5.models.MtfModel(model_dir=path_to_mtf_small_t5_v1_1_checkpoint, batch_size=1, tpu=None)
>>> vocab = SentencePieceVocabulary(path_to_mtf_small_spm_model_path, extra_ids=100)
>>> score = t5_model.score(inputs=["Hello there"], targets=["Hi I am"], vocabulary=vocab)
"""
model = T5ForConditionalGeneration.from_pretrained("google/t5-v1_1-small").to(torch_device)
tokenizer = T5Tokenizer.from_pretrained("google/t5-v1_1-small")
input_ids = tokenizer("Hello there", return_tensors="pt").input_ids
labels = tokenizer("Hi I am", return_tensors="pt").input_ids
loss = model(input_ids.to(torch_device), labels=labels.to(torch_device)).loss
mtf_score = -(labels.shape[-1] * loss.item())
EXPECTED_SCORE = -59.0293
self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1e-4)
|
For comparison run:
>>> import t5 # pip install t5==0.7.1
>>> from t5.data.sentencepiece_vocabulary import SentencePieceVocabulary
>>> path_to_mtf_small_t5_v1_1_checkpoint = '<fill_in>'
>>> path_to_mtf_small_spm_model_path = '<fill_in>'
>>> t5_model = t5.models.MtfModel(model_dir=path_to_mtf_small_t5_v1_1_checkpoint, batch_size=1, tpu=None)
>>> vocab = SentencePieceVocabulary(path_to_mtf_small_spm_model_path, extra_ids=100)
>>> score = t5_model.score(inputs=["Hello there"], targets=["Hi I am"], vocabulary=vocab)
|
test_small_v1_1_integration_test
|
python
|
huggingface/transformers
|
tests/models/t5/test_modeling_t5.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/t5/test_modeling_t5.py
|
Apache-2.0
|
def test_small_byt5_integration_test(self):
"""
For comparison run:
>>> import t5 # pip install t5==0.9.1
>>> path_to_byt5_small_checkpoint = '<fill_in>'
>>> t5_model = t5.models.MtfModel(model_dir=path_to_tf_checkpoint, batch_size=1, tpu=None)
>>> vocab = t5.data.ByteVocabulary()
>>> score = t5_model.score(inputs=["Hello there"], targets=["Hi I am"], vocabulary=vocab)
"""
model = T5ForConditionalGeneration.from_pretrained("google/byt5-small").to(torch_device)
tokenizer = ByT5Tokenizer.from_pretrained("google/byt5-small")
input_ids = tokenizer("Hello there", return_tensors="pt").input_ids
labels = tokenizer("Hi I am", return_tensors="pt").input_ids
loss = model(input_ids.to(torch_device), labels=labels.to(torch_device)).loss
mtf_score = -(labels.shape[-1] * loss.item())
EXPECTED_SCORE = -60.7397
self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1e-4)
|
For comparison run:
>>> import t5 # pip install t5==0.9.1
>>> path_to_byt5_small_checkpoint = '<fill_in>'
>>> t5_model = t5.models.MtfModel(model_dir=path_to_tf_checkpoint, batch_size=1, tpu=None)
>>> vocab = t5.data.ByteVocabulary()
>>> score = t5_model.score(inputs=["Hello there"], targets=["Hi I am"], vocabulary=vocab)
|
test_small_byt5_integration_test
|
python
|
huggingface/transformers
|
tests/models/t5/test_modeling_t5.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/t5/test_modeling_t5.py
|
Apache-2.0
|
def test_export_encoder(self):
"""Test exporting T5EncoderModel to torch export format."""
if not is_torch_greater_or_equal_than_2_4:
self.skipTest("This test requires torch >= 2.4 to run.")
from transformers.integrations.executorch import Seq2SeqLMEncoderExportableModule
model_id = "google-t5/t5-small"
device = "cpu"
example_input_ids = torch.ones((1, 10), dtype=torch.long).to(device)
# Load model
model = T5EncoderModel.from_pretrained(model_id).to(device=device).eval()
# Get original output for comparison
with torch.no_grad():
original_output = model(input_ids=example_input_ids).last_hidden_state
encoder_model = Seq2SeqLMEncoderExportableModule(model)
# Export the encoder_model
with torch.no_grad():
seq_len_dim = torch.export.Dim("sequence_length", max=4096)
exported_program = torch.export.export(
encoder_model, (example_input_ids,), dynamic_shapes={"input_ids": {1: seq_len_dim}}, strict=True
)
# Test the exported model
with torch.no_grad():
exported_output = exported_program.module()(example_input_ids)
# Verify outputs are close enough
self.assertTrue(torch.allclose(original_output, exported_output, atol=1e-5))
|
Test exporting T5EncoderModel to torch export format.
|
test_export_encoder
|
python
|
huggingface/transformers
|
tests/models/t5/test_modeling_t5.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/t5/test_modeling_t5.py
|
Apache-2.0
|
def test_export_decoder(self):
"""Test exporting T5 decoder with static cache to torch export format."""
if not is_torch_greater_or_equal_than_2_4:
self.skipTest("This test requires torch >= 2.4 to run.")
from transformers import AutoModelForSeq2SeqLM, T5ForConditionalGeneration
from transformers.integrations.executorch import Seq2SeqLMDecoderExportableModuleWithStaticCache
model_id = "google-t5/t5-small"
# Configuration for static cache
batch_size = 1
max_cache_len = 123
device = "cpu"
full_model = AutoModelForSeq2SeqLM.from_pretrained(model_id).to(device)
self.assertIsInstance(full_model, T5ForConditionalGeneration)
decoder_model = (
Seq2SeqLMDecoderExportableModuleWithStaticCache(full_model, max_cache_len, batch_size).to(device).eval()
)
# Prepare test inputs
example_decoder_input_ids = torch.tensor([[0]], dtype=torch.long) # Start token
example_cache_position = torch.tensor([0], dtype=torch.long)
# For T5-small, hidden size is 512
example_encoder_hidden_states = torch.zeros((batch_size, 10, 512), dtype=torch.float32)
# Export the model
with torch.no_grad():
encoder_sequence_length_dim = torch.export.Dim("encoder_sequence_length", max=4096)
exported_program = torch.export.export(
decoder_model,
(example_decoder_input_ids, example_encoder_hidden_states, example_cache_position),
dynamic_shapes={
"decoder_input_ids": None,
"encoder_hidden_states": {1: encoder_sequence_length_dim},
"cache_position": None,
},
strict=True,
)
# We won't directly verify outputs here as it's complicated with caching,
# but we'll check the export was successful
self.assertIsNotNone(exported_program)
# Verify cache buffers existence and shapes
cache_buffers = [
(name, buffer)
for name, buffer in exported_program.named_buffers()
if name.startswith("key_cache_") or name.startswith("value_cache_")
]
# Verify cache buffers
self.assertTrue(len(cache_buffers) > 0, "No cache buffers found in exported model")
for name, buffer in cache_buffers:
# Verify cache buffers are 3D
self.assertEqual(buffer.shape[2], max_cache_len)
|
Test exporting T5 decoder with static cache to torch export format.
|
test_export_decoder
|
python
|
huggingface/transformers
|
tests/models/t5/test_modeling_t5.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/t5/test_modeling_t5.py
|
Apache-2.0
|
def test_export_t5_summarization(self):
"""Test composing exported T5 encoder and decoder for summarization."""
if not is_torch_greater_or_equal_than_2_4:
self.skipTest("This test requires torch >= 2.4 to run.")
from transformers import AutoModelForSeq2SeqLM, AutoTokenizer, T5ForConditionalGeneration
from transformers.integrations.executorch import Seq2SeqLMExportableModule
device = "cpu"
batch_size = 1
max_cache_length = 1234
max_hidden_seq_length = 5678
model_id = "google-t5/t5-small"
tokenizer = AutoTokenizer.from_pretrained(model_id)
full_model = AutoModelForSeq2SeqLM.from_pretrained(model_id).to(device).eval()
self.assertIsInstance(full_model, T5ForConditionalGeneration)
wrapped_model = Seq2SeqLMExportableModule(
full_model,
batch_size=batch_size,
max_hidden_seq_length=max_hidden_seq_length,
max_cache_length=max_cache_length,
)
exported_t5 = wrapped_model.export()
# Test Summarization with Composed Models
prompts = [
"summarize: Simply put, the theory of relativity states that 1) the speed of light is constant in all inertial "
"reference frames, and 2) the laws of physics are the same for all inertial reference frames.\nThe "
"theory of relativity is not hard to grasp."
]
input_ids = tokenizer(prompts, return_tensors="pt").input_ids
generated_ids = exported_t5.generate(prompt_token_ids=input_ids, max_new_tokens=max_cache_length)
generated_summary = tokenizer.decode(generated_ids, skip_special_tokens=True)
# Also run original model for comparison
original_model = T5ForConditionalGeneration.from_pretrained(model_id).eval()
with torch.no_grad():
original_outputs = original_model.generate(input_ids, max_length=50, num_beams=1)
original_summary = tokenizer.decode(original_outputs[0], skip_special_tokens=True)
# Basic verification that we got a reasonable summary
self.assertEqual(generated_summary, original_summary)
|
Test composing exported T5 encoder and decoder for summarization.
|
test_export_t5_summarization
|
python
|
huggingface/transformers
|
tests/models/t5/test_modeling_t5.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/t5/test_modeling_t5.py
|
Apache-2.0
|
def test_small_integration_test(self):
"""
For comparison run:
>>> import t5 # pip install t5==0.7.1
>>> from t5.data.sentencepiece_vocabulary import SentencePieceVocabulary
>>> path_to_mtf_small_t5_checkpoint = '<fill_in>'
>>> path_to_mtf_small_spm_model_path = '<fill_in>'
>>> t5_model = t5.models.MtfModel(model_dir=path_to_mtf_small_t5_checkpoint, batch_size=1, tpu=None)
>>> vocab = SentencePieceVocabulary(path_to_mtf_small_spm_model_path, extra_ids=100)
>>> score = t5_model.score(inputs=["Hello there"], targets=["Hi I am"], vocabulary=vocab)
"""
model = TFT5ForConditionalGeneration.from_pretrained("google-t5/t5-small")
tokenizer = T5Tokenizer.from_pretrained("google-t5/t5-small")
input_ids = tokenizer("Hello there", return_tensors="tf").input_ids
labels = tokenizer("Hi I am", return_tensors="tf").input_ids
loss = model(input_ids, labels=labels).loss
mtf_score = -tf.math.reduce_mean(loss).numpy()
EXPECTED_SCORE = -4.771147
self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1e-4)
|
For comparison run:
>>> import t5 # pip install t5==0.7.1
>>> from t5.data.sentencepiece_vocabulary import SentencePieceVocabulary
>>> path_to_mtf_small_t5_checkpoint = '<fill_in>'
>>> path_to_mtf_small_spm_model_path = '<fill_in>'
>>> t5_model = t5.models.MtfModel(model_dir=path_to_mtf_small_t5_checkpoint, batch_size=1, tpu=None)
>>> vocab = SentencePieceVocabulary(path_to_mtf_small_spm_model_path, extra_ids=100)
>>> score = t5_model.score(inputs=["Hello there"], targets=["Hi I am"], vocabulary=vocab)
|
test_small_integration_test
|
python
|
huggingface/transformers
|
tests/models/t5/test_modeling_tf_t5.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/t5/test_modeling_tf_t5.py
|
Apache-2.0
|
def test_small_v1_1_integration_test(self):
"""
For comparison run:
>>> import t5 # pip install t5==0.7.1
>>> from t5.data.sentencepiece_vocabulary import SentencePieceVocabulary
>>> path_to_mtf_small_t5_v1.1_checkpoint = '<fill_in>'
>>> path_to_mtf_small_spm_model_path = '<fill_in>'
>>> t5_model = t5.models.MtfModel(model_dir=path_to_mtf_small_t5_v1.1_checkpoint, batch_size=1, tpu=None)
>>> vocab = SentencePieceVocabulary(path_to_mtf_small_spm_model_path, extra_ids=100)
>>> score = t5_model.score(inputs=["Hello there"], targets=["Hi I am"], vocabulary=vocab)
"""
model = TFT5ForConditionalGeneration.from_pretrained("google/t5-v1_1-small")
tokenizer = T5Tokenizer.from_pretrained("google/t5-v1_1-small")
input_ids = tokenizer("Hello there", return_tensors="tf").input_ids
labels = tokenizer("Hi I am", return_tensors="tf").input_ids
loss = model(input_ids, labels=labels).loss
mtf_score = -tf.math.reduce_mean(loss).numpy()
EXPECTED_SCORE = -14.757326
self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1e-4)
|
For comparison run:
>>> import t5 # pip install t5==0.7.1
>>> from t5.data.sentencepiece_vocabulary import SentencePieceVocabulary
>>> path_to_mtf_small_t5_v1.1_checkpoint = '<fill_in>'
>>> path_to_mtf_small_spm_model_path = '<fill_in>'
>>> t5_model = t5.models.MtfModel(model_dir=path_to_mtf_small_t5_v1.1_checkpoint, batch_size=1, tpu=None)
>>> vocab = SentencePieceVocabulary(path_to_mtf_small_spm_model_path, extra_ids=100)
>>> score = t5_model.score(inputs=["Hello there"], targets=["Hi I am"], vocabulary=vocab)
|
test_small_v1_1_integration_test
|
python
|
huggingface/transformers
|
tests/models/t5/test_modeling_tf_t5.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/t5/test_modeling_tf_t5.py
|
Apache-2.0
|
def test_small_byt5_integration_test(self):
"""
For comparison run:
>>> import t5 # pip install t5==0.9.1
>>> path_to_byt5_small_checkpoint = '<fill_in>'
>>> t5_model = t5.models.MtfModel(model_dir=path_to_tf_checkpoint, batch_size=1, tpu=None)
>>> vocab = t5.data.ByteVocabulary()
>>> score = t5_model.score(inputs=["Hello there"], targets=["Hi I am"], vocabulary=vocab)
"""
model = TFT5ForConditionalGeneration.from_pretrained("google/byt5-small")
tokenizer = ByT5Tokenizer.from_pretrained("google/byt5-small")
input_ids = tokenizer("Hello there", return_tensors="tf").input_ids
labels = tokenizer("Hi I am", return_tensors="tf").input_ids
loss = model(input_ids, labels=labels).loss
mtf_score = -tf.math.reduce_mean(loss).numpy()
EXPECTED_SCORE = -7.592465
self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1e-4)
|
For comparison run:
>>> import t5 # pip install t5==0.9.1
>>> path_to_byt5_small_checkpoint = '<fill_in>'
>>> t5_model = t5.models.MtfModel(model_dir=path_to_tf_checkpoint, batch_size=1, tpu=None)
>>> vocab = t5.data.ByteVocabulary()
>>> score = t5_model.score(inputs=["Hello there"], targets=["Hi I am"], vocabulary=vocab)
|
test_small_byt5_integration_test
|
python
|
huggingface/transformers
|
tests/models/t5/test_modeling_tf_t5.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/t5/test_modeling_tf_t5.py
|
Apache-2.0
|
def _prepare_tables(self):
"""Prepares two tables, both with three distinct rows.
The first table has two columns:
1.0, 2.0 | 3.0
2.0, 0.0 | 1.0
1.0, 3.0 | 4.0
The second table has three columns:
1.0 | 2.0 | 3.0
2.0 | 0.0 | 1.0
1.0 | 3.0 | 4.0
Returns:
SegmentedTensors with the tables.
"""
values = torch.tensor(
[
[[1.0, 2.0, 3.0], [2.0, 0.0, 1.0], [1.0, 3.0, 4.0]],
[[1.0, 2.0, 3.0], [2.0, 0.0, 1.0], [1.0, 3.0, 4.0]],
]
)
row_index = IndexMap(
indices=torch.tensor(
[
[[0, 0, 0], [1, 1, 1], [2, 2, 2]],
[[0, 0, 0], [1, 1, 1], [2, 2, 2]],
]
),
num_segments=3,
batch_dims=1,
)
col_index = IndexMap(
indices=torch.tensor(
[
[[0, 0, 1], [0, 0, 1], [0, 0, 1]],
[[0, 1, 2], [0, 1, 2], [0, 1, 2]],
]
),
num_segments=3,
batch_dims=1,
)
return values, row_index, col_index
|
Prepares two tables, both with three distinct rows.
The first table has two columns:
1.0, 2.0 | 3.0
2.0, 0.0 | 1.0
1.0, 3.0 | 4.0
The second table has three columns:
1.0 | 2.0 | 3.0
2.0 | 0.0 | 1.0
1.0 | 3.0 | 4.0
Returns:
SegmentedTensors with the tables.
|
_prepare_tables
|
python
|
huggingface/transformers
|
tests/models/tapas/test_modeling_tapas.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/tapas/test_modeling_tapas.py
|
Apache-2.0
|
def _prepare_tables(self):
"""Prepares two tables, both with three distinct rows.
The first table has two columns:
1.0, 2.0 | 3.0
2.0, 0.0 | 1.0
1.0, 3.0 | 4.0
The second table has three columns:
1.0 | 2.0 | 3.0
2.0 | 0.0 | 1.0
1.0 | 3.0 | 4.0
Returns:
SegmentedTensors with the tables.
"""
values = tf.constant(
[
[[1.0, 2.0, 3.0], [2.0, 0.0, 1.0], [1.0, 3.0, 4.0]],
[[1.0, 2.0, 3.0], [2.0, 0.0, 1.0], [1.0, 3.0, 4.0]],
]
)
row_index = IndexMap(
indices=[
[[0, 0, 0], [1, 1, 1], [2, 2, 2]],
[[0, 0, 0], [1, 1, 1], [2, 2, 2]],
],
num_segments=3,
batch_dims=1,
)
col_index = IndexMap(
indices=[
[[0, 0, 1], [0, 0, 1], [0, 0, 1]],
[[0, 1, 2], [0, 1, 2], [0, 1, 2]],
],
num_segments=3,
batch_dims=1,
)
return values, row_index, col_index
|
Prepares two tables, both with three distinct rows.
The first table has two columns:
1.0, 2.0 | 3.0
2.0, 0.0 | 1.0
1.0, 3.0 | 4.0
The second table has three columns:
1.0 | 2.0 | 3.0
2.0 | 0.0 | 1.0
1.0 | 3.0 | 4.0
Returns:
SegmentedTensors with the tables.
|
_prepare_tables
|
python
|
huggingface/transformers
|
tests/models/tapas/test_modeling_tf_tapas.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/tapas/test_modeling_tf_tapas.py
|
Apache-2.0
|
def get_expected_values(self, image_inputs, batched=False):
"""
This function computes the expected height and width when providing images to TvpImageProcessor,
assuming do_resize is set to True with a scalar size.
"""
if not batched:
return (int(self.pad_size["height"]), int(self.pad_size["width"]))
else:
expected_values = []
for image in image_inputs:
expected_height, expected_width = self.get_expected_values([image])
expected_values.append((expected_height, expected_width))
expected_height = max(expected_values, key=lambda item: item[0])[0]
expected_width = max(expected_values, key=lambda item: item[1])[1]
return expected_height, expected_width
|
This function computes the expected height and width when providing images to TvpImageProcessor,
assuming do_resize is set to True with a scalar size.
|
get_expected_values
|
python
|
huggingface/transformers
|
tests/models/tvp/test_image_processing_tvp.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/tvp/test_image_processing_tvp.py
|
Apache-2.0
|
def test_batch_encode_dynamic_overflowing(self):
"""
When calling batch_encode with multiple sequences, it can return different number of
overflowing encoding for each sequence:
[
Sequence 1: [Encoding 1, Encoding 2],
Sequence 2: [Encoding 1],
Sequence 3: [Encoding 1, Encoding 2, ... Encoding N]
]
This needs to be padded so that it can represented as a tensor
"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
tokenizer = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name}, {tokenizer.__class__.__name__})"):
if is_torch_available():
returned_tensor = "pt"
elif is_tf_available():
returned_tensor = "tf"
else:
returned_tensor = "jax"
# Single example
words, boxes = self.get_words_and_boxes()
tokens = tokenizer.encode_plus_boxes(
words,
boxes=boxes,
max_length=6,
padding=True,
truncation=True,
return_tensors=returned_tensor,
return_overflowing_tokens=True,
)
for key in filter(lambda x: "overflow_to_sample_mapping" not in x, tokens.keys()):
if key != "bbox":
self.assertEqual(len(tokens[key].shape), 2)
else:
self.assertEqual(len(tokens[key].shape), 3)
# Batch of examples
# For these 2 examples, 3 training examples will be created
words, boxes = self.get_words_and_boxes_batch()
tokens = tokenizer.batch_encode_plus_boxes(
words,
boxes=boxes,
max_length=6,
padding=True,
truncation="only_first",
return_tensors=returned_tensor,
return_overflowing_tokens=True,
)
for key in filter(lambda x: "overflow_to_sample_mapping" not in x, tokens.keys()):
if key != "bbox":
self.assertEqual(len(tokens[key].shape), 2)
self.assertEqual(tokens[key].shape[-1], 6)
else:
self.assertEqual(len(tokens[key].shape), 3)
self.assertEqual(tokens[key].shape[-1], 4)
|
When calling batch_encode with multiple sequences, it can return different number of
overflowing encoding for each sequence:
[
Sequence 1: [Encoding 1, Encoding 2],
Sequence 2: [Encoding 1],
Sequence 3: [Encoding 1, Encoding 2, ... Encoding N]
]
This needs to be padded so that it can represented as a tensor
|
test_batch_encode_dynamic_overflowing
|
python
|
huggingface/transformers
|
tests/models/udop/test_tokenization_udop.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/udop/test_tokenization_udop.py
|
Apache-2.0
|
def test_mismatching_num_image_tokens(self):
"""
Tests that VLMs through an error with explicit message saying what is wrong
when number of images don't match number of image tokens in the text.
Also we need to test multi-image cases when one prompr has multiple image tokens.
"""
config, input_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config).to(torch_device)
curr_input_dict = copy.deepcopy(input_dict)
_ = model(**curr_input_dict) # successful forward with no modifications
# remove one image but leave the image token in text
curr_input_dict["pixel_values_images"] = curr_input_dict["pixel_values_images"][-1:, ...]
with self.assertRaises(ValueError):
_ = model(**curr_input_dict)
# simulate multi-image case by concatenating inputs where each has exactly one image/image-token
input_ids = curr_input_dict["input_ids"][:1]
pixel_values = curr_input_dict["pixel_values_images"][:1]
input_ids = torch.cat([input_ids, input_ids], dim=0)
# one image and two image tokens raise an error
with self.assertRaises(ValueError):
_ = model(input_ids=input_ids, pixel_values_images=pixel_values)
# two images and two image tokens don't raise an error
pixel_values = torch.cat([pixel_values, pixel_values], dim=0)
_ = model(input_ids=input_ids, pixel_values_images=pixel_values)
|
Tests that VLMs through an error with explicit message saying what is wrong
when number of images don't match number of image tokens in the text.
Also we need to test multi-image cases when one prompr has multiple image tokens.
|
test_mismatching_num_image_tokens
|
python
|
huggingface/transformers
|
tests/models/video_llava/test_modeling_video_llava.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/video_llava/test_modeling_video_llava.py
|
Apache-2.0
|
def test_vision_feature_layers(self, vision_feature_layer):
"""
Test that we can use either one vision feature layer, or a list of
vision feature layers.
"""
config, input_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.vision_feature_layer = vision_feature_layer
num_feature_layers = 1 if isinstance(vision_feature_layer, int) else len(vision_feature_layer)
hidden_size = config.vision_config.hidden_size
expected_features = hidden_size * num_feature_layers
for model_class in self.all_model_classes:
model = model_class(config).to(torch_device)
# We should have the right number of input features,
# and should be able to run a forward pass without exploding
base_model = getattr(model, "model", model)
assert base_model.multi_modal_projector.linear_1.in_features == expected_features
model(**input_dict)
|
Test that we can use either one vision feature layer, or a list of
vision feature layers.
|
test_vision_feature_layers
|
python
|
huggingface/transformers
|
tests/models/video_llava/test_modeling_video_llava.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/video_llava/test_modeling_video_llava.py
|
Apache-2.0
|
def get_expected_values(self, image_inputs, batched=False):
"""
This function computes the expected height and width when providing images to ViltImageProcessor,
assuming do_resize is set to True with a scalar size and size_divisor.
"""
if not batched:
size = self.size["shortest_edge"]
image = image_inputs[0]
if isinstance(image, Image.Image):
w, h = image.size
elif isinstance(image, np.ndarray):
h, w = image.shape[0], image.shape[1]
else:
h, w = image.shape[1], image.shape[2]
scale = size / min(w, h)
if h < w:
newh, neww = size, scale * w
else:
newh, neww = scale * h, size
max_size = int((1333 / 800) * size)
if max(newh, neww) > max_size:
scale = max_size / max(newh, neww)
newh = newh * scale
neww = neww * scale
newh, neww = int(newh + 0.5), int(neww + 0.5)
expected_height, expected_width = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
expected_values = []
for image in image_inputs:
expected_height, expected_width = self.get_expected_values([image])
expected_values.append((expected_height, expected_width))
expected_height = max(expected_values, key=lambda item: item[0])[0]
expected_width = max(expected_values, key=lambda item: item[1])[1]
return expected_height, expected_width
|
This function computes the expected height and width when providing images to ViltImageProcessor,
assuming do_resize is set to True with a scalar size and size_divisor.
|
get_expected_values
|
python
|
huggingface/transformers
|
tests/models/vilt/test_image_processing_vilt.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/vilt/test_image_processing_vilt.py
|
Apache-2.0
|
def test_mismatching_num_image_tokens(self):
"""
Tests that VLMs through an error with explicit message saying what is wrong
when number of images doesn't match number of image tokens in the text.
Also we need to test multi-image cases when one prompr has multiple image tokens.
"""
config, input_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config).to(torch_device)
curr_input_dict = copy.deepcopy(input_dict) # in=place modifications further
_ = model(**curr_input_dict) # successful forward with no modifications
# remove one image but leave the image token in text
curr_input_dict["pixel_values"] = curr_input_dict["pixel_values"][-1:, ...]
with self.assertRaises(ValueError):
_ = model(**curr_input_dict)
# simulate multi-image case by concatenating inputs where each has exactly one image/image-token
input_ids = curr_input_dict["input_ids"][:1]
pixel_values = curr_input_dict["pixel_values"][:1]
input_ids = torch.cat([input_ids, input_ids], dim=0)
# one image and two image tokens raise an error
with self.assertRaises(ValueError):
_ = model(input_ids=input_ids, pixel_values=pixel_values)
# two images and two image tokens don't raise an error
pixel_values = torch.cat([pixel_values, pixel_values], dim=0)
_ = model(input_ids=input_ids, pixel_values=pixel_values)
|
Tests that VLMs through an error with explicit message saying what is wrong
when number of images doesn't match number of image tokens in the text.
Also we need to test multi-image cases when one prompr has multiple image tokens.
|
test_mismatching_num_image_tokens
|
python
|
huggingface/transformers
|
tests/models/vipllava/test_modeling_vipllava.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/vipllava/test_modeling_vipllava.py
|
Apache-2.0
|
def test_vision_feature_layers(self, vision_feature_layers):
"""
Test that we can use either one vision feature layer, or a list of
vision feature layers.
"""
# NOTE: vipllava uses vision_feature_layers instead of vision_feature_layer as the
# config key. The reason is that other llava classes supported one vision feature layer
# and added support for a list of layers with granite vision support, while vipllava
# originally supported multiple feature layers, and added support for a single layer for
# for compatibility reasons.
config, input_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.vision_feature_layers = vision_feature_layers
num_feature_layers = 1 if isinstance(vision_feature_layers, int) else len(vision_feature_layers)
hidden_size = config.vision_config.hidden_size
expected_features = hidden_size * num_feature_layers
for model_class in self.all_model_classes:
model = model_class(config).to(torch_device)
# We should have the right number of input features,
# and should be able to run a forward pass without exploding
base_model = getattr(model, "model", model)
assert base_model.multi_modal_projector.linear_1.in_features == expected_features
model(**input_dict)
|
Test that we can use either one vision feature layer, or a list of
vision feature layers.
|
test_vision_feature_layers
|
python
|
huggingface/transformers
|
tests/models/vipllava/test_modeling_vipllava.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/vipllava/test_modeling_vipllava.py
|
Apache-2.0
|
def test_inference_fp16(self):
r"""
A small test to make sure that inference work in half precision without any problem.
"""
model = ViTModel.from_pretrained("facebook/dino-vits8", torch_dtype=torch.float16, device_map="auto")
image_processor = self.default_image_processor
image = prepare_img()
inputs = image_processor(images=image, return_tensors="pt")
pixel_values = inputs.pixel_values.to(torch_device)
# forward pass to make sure inference works in fp16
with torch.no_grad():
_ = model(pixel_values)
|
A small test to make sure that inference work in half precision without any problem.
|
test_inference_fp16
|
python
|
huggingface/transformers
|
tests/models/vit/test_modeling_vit.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/vit/test_modeling_vit.py
|
Apache-2.0
|
def get_subsampled_output_lengths(self, input_lengths):
"""
Computes the output length of the convolutional layers
"""
for i in range(self.num_conv_layers):
input_lengths = (input_lengths - 1) // 2 + 1
return input_lengths
|
Computes the output length of the convolutional layers
|
get_subsampled_output_lengths
|
python
|
huggingface/transformers
|
tests/models/whisper/test_modeling_flax_whisper.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/whisper/test_modeling_flax_whisper.py
|
Apache-2.0
|
def get_subsampled_output_lengths(self, input_lengths):
"""
Computes the output length of the convolutional layers
"""
for i in range(self.num_conv_layers):
input_lengths = (input_lengths - 1) // 2 + 1
return input_lengths
|
Computes the output length of the convolutional layers
|
get_subsampled_output_lengths
|
python
|
huggingface/transformers
|
tests/models/whisper/test_modeling_tf_whisper.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/whisper/test_modeling_tf_whisper.py
|
Apache-2.0
|
def get_subsampled_output_lengths(self, input_lengths):
"""
Computes the output length of the convolutional layers
"""
for i in range(self.num_conv_layers):
input_lengths = (input_lengths - 1) // 2 + 1
return input_lengths
|
Computes the output length of the convolutional layers
|
get_subsampled_output_lengths
|
python
|
huggingface/transformers
|
tests/models/whisper/test_modeling_whisper.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/whisper/test_modeling_whisper.py
|
Apache-2.0
|
def get_subsampled_output_lengths(self, input_lengths):
"""
Computes the output length of the convolutional layers
"""
for i in range(self.num_conv_layers):
input_lengths = (input_lengths - 1) // 2 + 1
return input_lengths
|
Computes the output length of the convolutional layers
|
get_subsampled_output_lengths
|
python
|
huggingface/transformers
|
tests/models/whisper/test_modeling_whisper.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/whisper/test_modeling_whisper.py
|
Apache-2.0
|
def test_find_longest_common_subsequence_old(self):
"""Test using the old processing functions used in the ASR pipeline, but that serves as a BC reference."""
max_source_positions = 1500
processor = WhisperProcessor.from_pretrained("openai/whisper-tiny")
previous_sequence = [[51492, 406, 3163, 1953, 466, 13, 51612, 51612]]
self.assertEqual(
processor.decode(previous_sequence[0], output_offsets=True),
{
"text": " not worth thinking about.",
"offsets": [{"text": " not worth thinking about.", "timestamp": (22.56, 24.96)}],
},
)
# Merge when the previous sequence is a suffix of the next sequence
# fmt: off
next_sequences_1 = [
[50364, 295, 6177, 3391, 11, 19817, 3337, 507, 307, 406, 3163, 1953, 466, 13, 50614, 50614, 2812, 9836, 14783, 390, 6263, 538, 257, 1359, 11, 8199, 6327, 1090, 322, 702, 7443, 13, 50834, 50257]
]
# fmt: on
self.assertEqual(
processor.decode(next_sequences_1[0], output_offsets=True),
{
"text": (
" of spectators, retrievality is not worth thinking about. His instant panic was followed by a"
" small, sharp blow high on his chest.<|endoftext|>"
),
"offsets": [
{"text": " of spectators, retrievality is not worth thinking about.", "timestamp": (0.0, 5.0)},
{
"text": " His instant panic was followed by a small, sharp blow high on his chest.",
"timestamp": (5.0, 9.4),
},
],
},
)
merge = _find_timestamp_sequence(
[[previous_sequence, (480_000, 0, 0)], [next_sequences_1, (480_000, 120_000, 0)]],
processor.tokenizer,
processor.feature_extractor,
max_source_positions,
)
# fmt: off
self.assertEqual(
merge,
[51492, 406, 3163, 1953, 466, 13, 51739, 51739, 2812, 9836, 14783, 390, 6263, 538, 257, 1359, 11, 8199, 6327, 1090, 322, 702, 7443, 13, 51959],
)
# fmt: on
self.assertEqual(
processor.decode(merge, output_offsets=True),
{
"text": (
" not worth thinking about. His instant panic was followed by a small, sharp blow high on his"
" chest."
),
"offsets": [
{"text": " not worth thinking about.", "timestamp": (22.56, 27.5)},
{
"text": " His instant panic was followed by a small, sharp blow high on his chest.",
"timestamp": (27.5, 31.900000000000002),
},
],
},
)
# Merge when the sequence is in the middle of the 1st next sequence
# fmt: off
next_sequences_2 = [
[50364, 295, 6177, 3391, 11, 19817, 3337, 507, 307, 406, 3163, 1953, 466, 13, 2812, 9836, 14783, 390, 6263, 538, 257, 1359, 11, 8199, 6327, 1090, 322, 702, 7443, 13, 50834, 50257]
]
# fmt: on
# {'text': ' of spectators, retrievality is not worth thinking about. His instant panic was followed by a small, sharp blow high on his chest.','timestamp': (0.0, 9.4)}
merge = _find_timestamp_sequence(
[[previous_sequence, (480_000, 0, 0)], [next_sequences_2, (480_000, 120_000, 0)]],
processor.tokenizer,
processor.feature_extractor,
max_source_positions,
)
# fmt: off
self.assertEqual(
merge,
[51492, 406, 3163, 1953, 466, 13, 2812, 9836, 14783, 390, 6263, 538, 257, 1359, 11, 8199, 6327, 1090, 322, 702, 7443, 13, 51959],
)
# fmt: on
self.assertEqual(
processor.decode(merge, output_offsets=True),
{
"text": (
" not worth thinking about. His instant panic was followed by a small, sharp blow high on his"
" chest."
),
"offsets": [
{
"text": (
" not worth thinking about. His instant panic was followed by a small, sharp blow high on"
" his chest."
),
"timestamp": (22.56, 31.900000000000002),
},
],
},
)
# Merge when the previous sequence is not included in the current sequence
next_sequences_3 = [[50364, 2812, 9836, 14783, 390, 6263, 538, 257, 1359, 11, 8199, 6327, 1090, 322, 702, 7443, 13, 50584, 50257]] # fmt: skip
# {'text': ' His instant panic was followed by a small, sharp blow high on his chest.','timestamp': (0.0, 9.4)}
merge = _find_timestamp_sequence(
[[previous_sequence, (480_000, 0, 0)], [next_sequences_3, (480_000, 120_000, 0)]],
processor.tokenizer,
processor.feature_extractor,
max_source_positions,
)
self.assertEqual(
merge,
[51492, 406, 3163, 1953, 466, 13, 51612, 51612, 2812, 9836, 14783, 390, 6263, 538, 257, 1359, 11, 8199, 6327, 1090, 322, 702, 7443, 13, 51832],
) # fmt: skip
self.assertEqual(
processor.decode(merge, output_offsets=True),
{
"text": (
" not worth thinking about. His instant panic was followed by a small, sharp blow high on his"
" chest."
),
"offsets": [
{"text": " not worth thinking about.", "timestamp": (22.56, 24.96)},
{
"text": " His instant panic was followed by a small, sharp blow high on his chest.",
"timestamp": (24.96, 29.36),
},
],
},
)
# last case is when the sequence is not in the first next predicted start and end of timestamp
next_sequences_3 = [
[50364, 2812, 9836, 14783, 390, 406, 3163, 1953, 466, 13, 50634, 50634, 2812, 9836, 14783, 390, 6263, 538, 257, 1359, 11, 8199, 6327, 1090, 322, 702, 7443, 13, 50934]
] # fmt: skip
merge = _find_timestamp_sequence(
[[previous_sequence, (480_000, 0, 0)], [next_sequences_3, (480_000, 167_000, 0)]],
processor.tokenizer,
processor.feature_extractor,
max_source_positions,
)
self.assertEqual(
merge,
[51492, 406, 3163, 1953, 466, 13, 51612, 51612, 2812, 9836, 14783, 390, 6263, 538, 257, 1359, 11, 8199, 6327, 1090, 322, 702, 7443, 13, 51912]
) # fmt: skip
self.assertEqual(
processor.decode(merge, output_offsets=True),
{
"text": (
" not worth thinking about. His instant panic was followed by a small, sharp blow high on his"
" chest."
),
"offsets": [
{"text": " not worth thinking about.", "timestamp": (22.56, 24.96)},
{
"text": " His instant panic was followed by a small, sharp blow high on his chest.",
"timestamp": (24.96, 30.96),
},
],
},
)
|
Test using the old processing functions used in the ASR pipeline, but that serves as a BC reference.
|
test_find_longest_common_subsequence_old
|
python
|
huggingface/transformers
|
tests/models/whisper/test_processor_whisper.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/whisper/test_processor_whisper.py
|
Apache-2.0
|
def _fast_find_longest_common_sequence(sequence_left, sequence_right):
"""Old processing function used in the ASR pipeline."""
seq_len_left = len(sequence_left)
seq_len_right = len(sequence_right)
counter = [[0] * (seq_len_right + 1) for _ in range(seq_len_left + 1)]
longest = 0
for i in range(seq_len_left):
for j in range(seq_len_right):
if sequence_left[i] == sequence_right[j]:
previous_counter = counter[i][j] + 1
counter[i + 1][j + 1] = previous_counter
if previous_counter > longest:
longest = previous_counter
counter = np.array(counter)
# we return the idx of the first element of the longest common sequence in the left sequence
index_left = np.argwhere(counter == longest)[-1][0] - longest if longest != 0 else -1
index_right = np.argwhere(counter == longest)[-1][1] - longest if longest != 0 else -1
return index_left, index_right, longest
|
Old processing function used in the ASR pipeline.
|
_fast_find_longest_common_sequence
|
python
|
huggingface/transformers
|
tests/models/whisper/test_processor_whisper.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/whisper/test_processor_whisper.py
|
Apache-2.0
|
def _find_timestamp_sequence(sequences, tokenizer, feature_extractor, max_source_positions):
"""
Old processing function used in the ASR pipeline.
Computes the final sequences by merging the end of the nth sequence with the beginning of the n+1th sequence. Since
`WhisperForConditionalGeneration` produces the timestamps pairwise, we filter the consecutive timestamps and only
iterate over them. We keep track of the `time` which indicates the actual starting time of the chunk that is
processed. We need to make sure to offset the timestamps tokens by the `time` in order for the tokenizer to
properly compute the final `offset`.
"""
# index of the first timestamp token
timestamp_begin = tokenizer.convert_tokens_to_ids("<|notimestamps|>") + 1
items = []
# approximation of the token to time ratio : ~0.2seconds
time_precision = feature_extractor.chunk_length / max_source_positions
time = 0
for seq_idx, item in enumerate(sequences):
sequence, stride = item
if isinstance(sequence, list):
sequence = np.array(sequence)
chunk_len, stride_left, stride_right = stride
sequence = sequence.squeeze(0)
# get rid of the `forced_decoder_idx` that are use to parametrize the generation
begin_idx = np.where(sequence == timestamp_begin)[0][0] if timestamp_begin in sequence else 0
sequence = sequence[begin_idx:]
timestamp_tokens = sequence >= timestamp_begin
if seq_idx != 0 and sum(timestamp_tokens) > 0:
consecutive = np.where(timestamp_tokens[:-1] & timestamp_tokens[1:])[0] + 1
last_timestamp = np.where(timestamp_tokens)[0][-1]
consecutive = np.append(consecutive, last_timestamp) if last_timestamp not in consecutive else consecutive
time -= stride_left + stride_right
offset = int((time / feature_extractor.sampling_rate) / time_precision)
overlap_time = int((stride_left / feature_extractor.sampling_rate) / time_precision)
# relevant timestamps are in the overlapping part
relevant_timestamp = np.where(sequence[consecutive] >= timestamp_begin + overlap_time)[0]
if relevant_timestamp.shape[0] > 0:
relevant_timestamp = (
consecutive[relevant_timestamp[0] - 1] if relevant_timestamp[0] > 0 else consecutive[0]
)
# if a big stride is used, we need to check some of the previous items for the best overlap
best_match = 0
sliced_sequence = []
for idx, previous_sequence in enumerate(reversed(items)):
previous_tokens = previous_sequence[1:-1]
if previous_sequence[0] < (timestamp_begin + offset - overlap_time) and idx != 0:
break # the previous sequence is too far in the past
if len(previous_tokens) > 0:
# find the longest common sequence between the overlapping parts
index_left, index_right, match_length = _fast_find_longest_common_sequence(
sequence[1:relevant_timestamp], previous_tokens
)
# don't do anything if only 1 token was matched
if match_length > 1 and match_length > best_match:
best_match = match_length
best_idx = idx
end_of_curr_sequence_idx = (
np.where(sequence[index_left + 1 :] >= timestamp_begin)[0][0] + 1
)
end_of_curr_sequence_idx = end_of_curr_sequence_idx + 1 + index_left
# if all the tokens are matched, suffix
if index_left == 0 and match_length == len(previous_tokens):
sliced_sequence = np.insert(
sequence[index_left + 1 : end_of_curr_sequence_idx], 0, previous_sequence[0]
)
sliced_sequence[-1] = previous_sequence[-1]
# if part of the previous sequence is not taken
elif index_left >= 0:
sliced_sequence = sequence[index_left + 1 : end_of_curr_sequence_idx]
# let's insert the missing part of the previous sequence
previous_slice = (
previous_sequence[: index_right + 1] if index_right > 0 else [previous_sequence[0]]
)
sliced_sequence = np.insert(sliced_sequence, 0, previous_slice)
sliced_sequence[-1] += offset
if len(sliced_sequence) > 0:
items[len(items) - best_idx - 1] = sliced_sequence
items = items[: len(items) - best_idx]
sequence = sequence[end_of_curr_sequence_idx:]
# sequence might have changed
timestamp_tokens = sequence >= timestamp_begin
consecutive = np.where(timestamp_tokens[:-1] & timestamp_tokens[1:])[0] + 1
if sum(timestamp_tokens) > 0:
last_timestamp = np.where(timestamp_tokens)[0][-1]
consecutive = (
np.append(consecutive, last_timestamp + 1) if last_timestamp not in consecutive else consecutive
)
if len(consecutive) > 0:
last_slice = 0
for current_slice in consecutive:
actual_offset = items[-1][-1] if seq_idx != 0 or last_slice != 0 else sequence[0]
sliced_tokens = sequence[last_slice:current_slice]
duration = sliced_tokens[-1] - sliced_tokens[0]
sliced_tokens[0] = actual_offset
sliced_tokens[-1] = actual_offset + duration
items.append(sliced_tokens)
last_slice = current_slice
time += chunk_len
result = []
for i in range(len(items)):
result += items[i].tolist()
return result
|
Old processing function used in the ASR pipeline.
Computes the final sequences by merging the end of the nth sequence with the beginning of the n+1th sequence. Since
`WhisperForConditionalGeneration` produces the timestamps pairwise, we filter the consecutive timestamps and only
iterate over them. We keep track of the `time` which indicates the actual starting time of the chunk that is
processed. We need to make sure to offset the timestamps tokens by the `time` in order for the tokenizer to
properly compute the final `offset`.
|
_find_timestamp_sequence
|
python
|
huggingface/transformers
|
tests/models/whisper/test_processor_whisper.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/whisper/test_processor_whisper.py
|
Apache-2.0
|
def test_full_tokenizer(self):
"""Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt"""
tokenizer = XLMTokenizer(self.vocab_file, self.merges_file)
text = "lower"
bpe_tokens = ["low", "er</w>"]
tokens = tokenizer.tokenize(text)
self.assertListEqual(tokens, bpe_tokens)
input_tokens = tokens + ["<unk>"]
input_bpe_tokens = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens)
|
Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
|
test_full_tokenizer
|
python
|
huggingface/transformers
|
tests/models/xlm/test_tokenization_xlm.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/xlm/test_tokenization_xlm.py
|
Apache-2.0
|
def test_create_position_ids_respects_padding_index(self):
"""This is a regression test for https://github.com/huggingface/transformers/issues/1761
The position ids should be masked with the embedding object's padding index. Therefore, the
first available non-padding position index is XLMRobertaXLEmbeddings.padding_idx + 1
"""
config = self.model_tester.prepare_config_and_inputs()[0]
model = XLMRobertaXLEmbeddings(config=config)
input_ids = torch.as_tensor([[12, 31, 13, model.padding_idx]])
expected_positions = torch.as_tensor(
[[0 + model.padding_idx + 1, 1 + model.padding_idx + 1, 2 + model.padding_idx + 1, model.padding_idx]]
)
position_ids = create_position_ids_from_input_ids(input_ids, model.padding_idx)
self.assertEqual(position_ids.shape, expected_positions.shape)
self.assertTrue(torch.all(torch.eq(position_ids, expected_positions)))
|
This is a regression test for https://github.com/huggingface/transformers/issues/1761
The position ids should be masked with the embedding object's padding index. Therefore, the
first available non-padding position index is XLMRobertaXLEmbeddings.padding_idx + 1
|
test_create_position_ids_respects_padding_index
|
python
|
huggingface/transformers
|
tests/models/xlm_roberta_xl/test_modeling_xlm_roberta_xl.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/xlm_roberta_xl/test_modeling_xlm_roberta_xl.py
|
Apache-2.0
|
def test_create_position_ids_from_inputs_embeds(self):
"""This is a regression test for https://github.com/huggingface/transformers/issues/1761
The position ids should be masked with the embedding object's padding index. Therefore, the
first available non-padding position index is XLMRobertaXLEmbeddings.padding_idx + 1
"""
config = self.model_tester.prepare_config_and_inputs()[0]
embeddings = XLMRobertaXLEmbeddings(config=config)
inputs_embeds = torch.empty(2, 4, 30)
expected_single_positions = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
expected_positions = torch.as_tensor([expected_single_positions, expected_single_positions])
position_ids = embeddings.create_position_ids_from_inputs_embeds(inputs_embeds)
self.assertEqual(position_ids.shape, expected_positions.shape)
self.assertTrue(torch.all(torch.eq(position_ids, expected_positions)))
|
This is a regression test for https://github.com/huggingface/transformers/issues/1761
The position ids should be masked with the embedding object's padding index. Therefore, the
first available non-padding position index is XLMRobertaXLEmbeddings.padding_idx + 1
|
test_create_position_ids_from_inputs_embeds
|
python
|
huggingface/transformers
|
tests/models/xlm_roberta_xl/test_modeling_xlm_roberta_xl.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/xlm_roberta_xl/test_modeling_xlm_roberta_xl.py
|
Apache-2.0
|
def test_create_position_ids_respects_padding_index(self):
"""This is a regression test for https://github.com/huggingface/transformers/issues/1761
The position ids should be masked with the embedding object's padding index. Therefore, the
first available non-padding position index is XmodEmbeddings.padding_idx + 1
"""
config = self.model_tester.prepare_config_and_inputs()[0]
model = XmodEmbeddings(config=config)
input_ids = torch.as_tensor([[12, 31, 13, model.padding_idx]])
expected_positions = torch.as_tensor(
[[0 + model.padding_idx + 1, 1 + model.padding_idx + 1, 2 + model.padding_idx + 1, model.padding_idx]]
)
position_ids = create_position_ids_from_input_ids(input_ids, model.padding_idx)
self.assertEqual(position_ids.shape, expected_positions.shape)
self.assertTrue(torch.all(torch.eq(position_ids, expected_positions)))
|
This is a regression test for https://github.com/huggingface/transformers/issues/1761
The position ids should be masked with the embedding object's padding index. Therefore, the
first available non-padding position index is XmodEmbeddings.padding_idx + 1
|
test_create_position_ids_respects_padding_index
|
python
|
huggingface/transformers
|
tests/models/xmod/test_modeling_xmod.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/xmod/test_modeling_xmod.py
|
Apache-2.0
|
def test_create_position_ids_from_inputs_embeds(self):
"""This is a regression test for https://github.com/huggingface/transformers/issues/1761
The position ids should be masked with the embedding object's padding index. Therefore, the
first available non-padding position index is XmodEmbeddings.padding_idx + 1
"""
config = self.model_tester.prepare_config_and_inputs()[0]
embeddings = XmodEmbeddings(config=config)
inputs_embeds = torch.empty(2, 4, 30)
expected_single_positions = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
expected_positions = torch.as_tensor([expected_single_positions, expected_single_positions])
position_ids = embeddings.create_position_ids_from_inputs_embeds(inputs_embeds)
self.assertEqual(position_ids.shape, expected_positions.shape)
self.assertTrue(torch.all(torch.eq(position_ids, expected_positions)))
|
This is a regression test for https://github.com/huggingface/transformers/issues/1761
The position ids should be masked with the embedding object's padding index. Therefore, the
first available non-padding position index is XmodEmbeddings.padding_idx + 1
|
test_create_position_ids_from_inputs_embeds
|
python
|
huggingface/transformers
|
tests/models/xmod/test_modeling_xmod.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/xmod/test_modeling_xmod.py
|
Apache-2.0
|
def get_expected_values(self, image_inputs, batched=False):
"""
This function computes the expected height and width when providing images to YolosImageProcessor,
assuming do_resize is set to True with a scalar size.
"""
if not batched:
image = image_inputs[0]
if isinstance(image, Image.Image):
width, height = image.size
elif isinstance(image, np.ndarray):
height, width = image.shape[0], image.shape[1]
else:
height, width = image.shape[1], image.shape[2]
size = self.size["shortest_edge"]
max_size = self.size.get("longest_edge", None)
if max_size is not None:
min_original_size = float(min((height, width)))
max_original_size = float(max((height, width)))
if max_original_size / min_original_size * size > max_size:
size = int(round(max_size * min_original_size / max_original_size))
if width <= height and width != size:
height = int(size * height / width)
width = size
elif height < width and height != size:
width = int(size * width / height)
height = size
width_mod = width % 16
height_mod = height % 16
expected_width = width - width_mod
expected_height = height - height_mod
else:
expected_values = []
for image in image_inputs:
expected_height, expected_width = self.get_expected_values([image])
expected_values.append((expected_height, expected_width))
expected_height = max(expected_values, key=lambda item: item[0])[0]
expected_width = max(expected_values, key=lambda item: item[1])[1]
return expected_height, expected_width
|
This function computes the expected height and width when providing images to YolosImageProcessor,
assuming do_resize is set to True with a scalar size.
|
get_expected_values
|
python
|
huggingface/transformers
|
tests/models/yolos/test_image_processing_yolos.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/yolos/test_image_processing_yolos.py
|
Apache-2.0
|
def test_initialization(self):
r"""
Overriding the test_initialization test as the A_log and D params of the Mamba block are initialized differently
"""
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
configs_no_init = _config_zero_init(config)
for model_class in self.all_model_classes:
model = model_class(config=configs_no_init)
for name, param in model.named_parameters():
if param.requires_grad:
if "A_log" in name:
A = torch.arange(1, config.mamba_d_state + 1, dtype=torch.float32)[None, :]
intermediate_dim = config.mamba_expand * config.hidden_size
A = A.expand(intermediate_dim, -1).reshape(
config.n_mamba_heads, intermediate_dim // config.n_mamba_heads, -1
)
torch.testing.assert_close(param.data, torch.log(A), rtol=1e-5, atol=1e-5)
elif "D" in name:
# check if it's a ones like
torch.testing.assert_close(param.data, torch.ones_like(param.data), rtol=1e-5, atol=1e-5)
elif "x_proj" in name or "dt_proj_weight" in name:
self.assertIn(
((param.data.mean() * 1e2).round() / 1e2).item(),
[0.0, 1.0],
msg=f"Parameter {name} of model {model_class} seems not properly initialized (raw value {param.data.mean()})",
)
elif "dt_proj_bias" in name:
dt = torch.exp(
torch.tensor([0, 1]) * (math.log(config.time_step_max) - math.log(config.time_step_min))
+ math.log(config.time_step_min)
).clamp(min=config.time_step_floor)
inv_dt = dt + torch.log(-torch.expm1(-dt))
if param.requires_grad:
self.assertTrue(param.data.max().item() <= inv_dt[1])
self.assertTrue(param.data.min().item() >= inv_dt[0])
else:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item(),
[0.0, 1.0],
msg=f"Parameter {name} of model {model_class} seems not properly initialized",
)
|
Overriding the test_initialization test as the A_log and D params of the Mamba block are initialized differently
|
test_initialization
|
python
|
huggingface/transformers
|
tests/models/zamba/test_modeling_zamba.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/zamba/test_modeling_zamba.py
|
Apache-2.0
|
def test_attention_outputs(self):
r"""
Overriding the test_attention_outputs test as the Zamba model outputs attention only for its attention layers
"""
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
seq_len = getattr(self.model_tester, "seq_length", None)
encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", seq_len)
encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length)
expected_num_attentions = (
math.ceil(
(self.model_tester.num_hidden_layers - self.model_tester.attn_layer_offset)
/ self.model_tester.attn_layer_period
)
+ 1
)
for model_class in self.all_model_classes:
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = False
config.return_dict = True
model = model_class._from_config(config, attn_implementation="eager")
config = model.config
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.attentions
self.assertEqual(len(attentions), expected_num_attentions)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
config.output_attentions = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.attentions
self.assertEqual(len(attentions), expected_num_attentions)
self.assertListEqual(
list(attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length],
)
out_len = len(outputs)
# Check attention is always last and order is fine
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
added_hidden_states = 1
self.assertEqual(out_len + added_hidden_states, len(outputs))
self_attentions = outputs.attentions
self.assertEqual(len(self_attentions), expected_num_attentions)
self.assertListEqual(
list(self_attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length],
)
|
Overriding the test_attention_outputs test as the Zamba model outputs attention only for its attention layers
|
test_attention_outputs
|
python
|
huggingface/transformers
|
tests/models/zamba/test_modeling_zamba.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/zamba/test_modeling_zamba.py
|
Apache-2.0
|
def test_left_padding_compatibility(self):
r"""
Overriding the test_left_padding_compatibility test as the mamba layers accentuate the numerical differences
effect of the left padding discussed in the issue in the note. Using a more permissive tolerance value.
"""
import inspect
# NOTE: left-padding results in small numerical differences. This is expected.
# See https://github.com/huggingface/transformers/issues/25420#issuecomment-1775317535
# First, filter out models that don't support left padding - generative and decoder-only.
# Zamba is a decoder-only architecture
decoder_only_classes = self.all_generative_model_classes
# Then, test left-padding
def _prepare_model_kwargs(input_ids, attention_mask, signature):
model_kwargs = {"input_ids": input_ids, "attention_mask": attention_mask}
if "position_ids" in signature:
position_ids = torch.cumsum(attention_mask, dim=-1) - 1
position_ids.masked_fill_(attention_mask == 0, 1)
model_kwargs["position_ids"] = position_ids
if "cache_position" in signature:
cache_position = torch.arange(input_ids.shape[-1], device=torch_device)
model_kwargs["cache_position"] = cache_position
return model_kwargs
for model_class in decoder_only_classes:
config, input_ids, attention_mask = self._get_input_ids_and_config()
model = model_class(config).to(torch_device).eval()
signature = inspect.signature(model.forward).parameters.keys()
# Without padding
model_kwargs = _prepare_model_kwargs(input_ids, attention_mask, signature)
next_logits_wo_padding = model(**model_kwargs).logits[:, -1, :]
# With left-padding (length 32)
pad_size = (input_ids.shape[0], 32)
padding = torch.ones(pad_size, dtype=input_ids.dtype, device=torch_device) * config.pad_token_id
padded_input_ids = torch.cat((padding, input_ids), dim=1)
padded_attention_mask = torch.cat((torch.zeros_like(padding), attention_mask), dim=1)
model_kwargs = _prepare_model_kwargs(padded_input_ids, padded_attention_mask, signature)
next_logits_with_padding = model(**model_kwargs).logits[:, -1, :]
# They should result in very similar logits
torch.testing.assert_close(next_logits_wo_padding, next_logits_with_padding, rtol=3e-3, atol=3e-3)
|
Overriding the test_left_padding_compatibility test as the mamba layers accentuate the numerical differences
effect of the left padding discussed in the issue in the note. Using a more permissive tolerance value.
|
test_left_padding_compatibility
|
python
|
huggingface/transformers
|
tests/models/zamba/test_modeling_zamba.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/zamba/test_modeling_zamba.py
|
Apache-2.0
|
def test_flash_attn_2_fp32_ln(self):
r"""
Overriding the test_flash_attn_2_fp32_ln test as the Zamba model, like Mixtral, doesn't support
right padding + use cache with FA2
"""
for model_class in self.all_generative_model_classes:
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
model = model_class(config)
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
dummy_input = inputs_dict[model.main_input_name]
dummy_attention_mask = inputs_dict.get("attention_mask", torch.ones_like(dummy_input))
# NOTE: Zamba does not support right padding + use_cache with FA2.
dummy_attention_mask[:, -1] = 1
model = model_class.from_pretrained(
tmpdirname,
torch_dtype=torch.float16,
attn_implementation="flash_attention_2",
low_cpu_mem_usage=True,
load_in_4bit=True,
)
for _, param in model.named_parameters():
# upcast only layer norms
if (param.dtype == torch.float16) or (param.dtype == torch.bfloat16):
param.data = param.data.to(torch.float32)
_ = model(dummy_input)
# with attention mask
_ = model(dummy_input, attention_mask=dummy_attention_mask)
|
Overriding the test_flash_attn_2_fp32_ln test as the Zamba model, like Mixtral, doesn't support
right padding + use cache with FA2
|
test_flash_attn_2_fp32_ln
|
python
|
huggingface/transformers
|
tests/models/zamba/test_modeling_zamba.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/zamba/test_modeling_zamba.py
|
Apache-2.0
|
def test_past_key_values_format(self):
"""
Overwriting to pass the expected cache shapes (Zamba2 has cache shape = [batch_size, 0] for mamba layers)
"""
config, inputs = self.model_tester.prepare_config_and_inputs_for_common()
batch_size, seq_length = inputs["input_ids"].shape
per_head_embed_dim = config.attention_head_dim # note: this one is not a common attribute name
self_attention_cache_shape = (batch_size, config.num_key_value_heads, seq_length, per_head_embed_dim)
# build the full cache shapes, including mamba layers
all_cache_shapes = []
for i in range(config.num_hidden_layers):
if config.layers_block_type[i] == "mamba":
all_cache_shapes.append([torch.Size([batch_size, 0]), torch.Size([batch_size, 0])])
else:
all_cache_shapes.append([self_attention_cache_shape, self_attention_cache_shape])
super().test_past_key_values_format(custom_all_cache_shapes=all_cache_shapes)
|
Overwriting to pass the expected cache shapes (Zamba2 has cache shape = [batch_size, 0] for mamba layers)
|
test_past_key_values_format
|
python
|
huggingface/transformers
|
tests/models/zamba2/test_modeling_zamba2.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/zamba2/test_modeling_zamba2.py
|
Apache-2.0
|
def test_initialization(self):
r"""
Overriding the test_initialization test as the A_log and D params of the Mamba block are initialized differently
"""
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
configs_no_init = _config_zero_init(config)
for model_class in self.all_model_classes:
model = model_class(config=configs_no_init)
for name, param in model.named_parameters():
if param.requires_grad:
if "A_log" in name:
A = torch.arange(1, config.n_mamba_heads + 1, dtype=torch.float32)[None, :]
self.assertTrue(torch.allclose(param.data, torch.log(A), atol=1e-5, rtol=1e-5))
elif "D" in name:
# check if it's a ones like
self.assertTrue(torch.allclose(param.data, torch.ones_like(param.data), atol=1e-5, rtol=1e-5))
elif "dt_bias" in name:
dt = torch.exp(
torch.tensor([0, 1]) * (math.log(config.time_step_max) - math.log(config.time_step_min))
+ math.log(config.time_step_min)
).clamp(min=config.time_step_floor)
inv_dt = dt + torch.log(-torch.expm1(-dt))
if param.requires_grad:
self.assertTrue(param.data.max().item() <= inv_dt[1])
self.assertTrue(param.data.min().item() >= inv_dt[0])
else:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item(),
[0.0, 1.0],
msg=f"Parameter {name} of model {model_class} seems not properly initialized",
)
|
Overriding the test_initialization test as the A_log and D params of the Mamba block are initialized differently
|
test_initialization
|
python
|
huggingface/transformers
|
tests/models/zamba2/test_modeling_zamba2.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/zamba2/test_modeling_zamba2.py
|
Apache-2.0
|
def test_attention_outputs(self):
r"""
Overriding the test_attention_outputs test as the Zamba2 model outputs attention only for its attention layers
"""
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
seq_len = getattr(self.model_tester, "seq_length", None)
encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", seq_len)
encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length)
for model_class in self.all_model_classes:
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = False
config.return_dict = True
model = model_class._from_config(config, attn_implementation="eager")
config = model.config
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.attentions
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
config.output_attentions = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.attentions
self.assertListEqual(
list(attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length],
)
out_len = len(outputs)
# Check attention is always last and order is fine
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
added_hidden_states = 1
self.assertEqual(out_len + added_hidden_states, len(outputs))
self_attentions = outputs.attentions
self.assertListEqual(
list(self_attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length],
)
|
Overriding the test_attention_outputs test as the Zamba2 model outputs attention only for its attention layers
|
test_attention_outputs
|
python
|
huggingface/transformers
|
tests/models/zamba2/test_modeling_zamba2.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/zamba2/test_modeling_zamba2.py
|
Apache-2.0
|
def test_left_padding_compatibility(self):
r"""
Overriding the test_left_padding_compatibility test as the mamba layers accentuate the numerical differences
effect of the left padding discussed in the issue in the note. Using a more permissive tolerance value.
"""
import inspect
# NOTE: left-padding results in small numerical differences. This is expected.
# See https://github.com/huggingface/transformers/issues/25420#issuecomment-1775317535
# First, filter out models that don't support left padding - generative and decoder-only.
# Zamba2 is a decoder-only architecture
decoder_only_classes = self.all_generative_model_classes
# Then, test left-padding
def _prepare_model_kwargs(input_ids, attention_mask, signature):
model_kwargs = {"input_ids": input_ids, "attention_mask": attention_mask}
if "position_ids" in signature:
position_ids = torch.cumsum(attention_mask, dim=-1) - 1
position_ids.masked_fill_(attention_mask == 0, 1)
model_kwargs["position_ids"] = position_ids
if "cache_position" in signature:
cache_position = torch.arange(input_ids.shape[-1], device=torch_device)
model_kwargs["cache_position"] = cache_position
return model_kwargs
for model_class in decoder_only_classes:
config, input_ids, attention_mask = self._get_input_ids_and_config()
model = model_class(config).to(torch_device).eval()
signature = inspect.signature(model.forward).parameters.keys()
# Without padding
model_kwargs = _prepare_model_kwargs(input_ids, attention_mask, signature)
next_logits_wo_padding = model(**model_kwargs).logits[:, -1, :]
# With left-padding (length 32)
pad_size = (input_ids.shape[0], 32)
padding = torch.ones(pad_size, dtype=input_ids.dtype, device=torch_device) * config.pad_token_id
padded_input_ids = torch.cat((padding, input_ids), dim=1)
padded_attention_mask = torch.cat((torch.zeros_like(padding), attention_mask), dim=1)
model_kwargs = _prepare_model_kwargs(padded_input_ids, padded_attention_mask, signature)
next_logits_with_padding = model(**model_kwargs).logits[:, -1, :]
# They should result in very similar logits
self.assertTrue(torch.allclose(next_logits_wo_padding, next_logits_with_padding, atol=3e-3))
|
Overriding the test_left_padding_compatibility test as the mamba layers accentuate the numerical differences
effect of the left padding discussed in the issue in the note. Using a more permissive tolerance value.
|
test_left_padding_compatibility
|
python
|
huggingface/transformers
|
tests/models/zamba2/test_modeling_zamba2.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/zamba2/test_modeling_zamba2.py
|
Apache-2.0
|
def test_flash_attn_2_fp32_ln(self):
r"""
Overriding the test_flash_attn_2_fp32_ln test as the Zamba2 model, like Mixtral, doesn't support
right padding + use cache with FA2
"""
for model_class in self.all_generative_model_classes:
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
model = model_class(config)
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
dummy_input = inputs_dict[model.main_input_name]
dummy_attention_mask = inputs_dict.get("attention_mask", torch.ones_like(dummy_input))
# NOTE: Zamba2 does not support right padding + use_cache with FA2.
dummy_attention_mask[:, -1] = 1
model = model_class.from_pretrained(
tmpdirname,
torch_dtype=torch.float16,
attn_implementation="flash_attention_2",
low_cpu_mem_usage=True,
load_in_4bit=True,
)
for _, param in model.named_parameters():
# upcast only layer norms
if (param.dtype == torch.float16) or (param.dtype == torch.bfloat16):
param.data = param.data.to(torch.float32)
_ = model(dummy_input)
# with attention mask
_ = model(dummy_input, attention_mask=dummy_attention_mask)
|
Overriding the test_flash_attn_2_fp32_ln test as the Zamba2 model, like Mixtral, doesn't support
right padding + use cache with FA2
|
test_flash_attn_2_fp32_ln
|
python
|
huggingface/transformers
|
tests/models/zamba2/test_modeling_zamba2.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/zamba2/test_modeling_zamba2.py
|
Apache-2.0
|
def test_flex_attention_with_grads(self):
"""
Overwriting as the base hidden size is big enough for compile.
Manipulation of dims causes issues due to other constraints not being satisfied anymore.
"""
for model_class in self.all_model_classes:
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config._attn_implementation = "flex_attention"
model = model_class(config).to(device=torch_device)
self.assertTrue(model.config._attn_implementation == "flex_attention")
# Elaborate workaround for encoder-decoder models as some do not specify their main input
dummy_inputs = {model.main_input_name: inputs_dict[model.main_input_name].to(torch_device)}
if config.is_encoder_decoder:
dummy_inputs["decoder_input_ids"] = inputs_dict["decoder_input_ids"].to(torch_device)
dummy_inputs["decoder_attention_mask"] = inputs_dict["decoder_attention_mask"].to(torch_device)
# If this does not raise an error, the test passes (see https://github.com/huggingface/transformers/pull/35605)
_ = model(**dummy_inputs)
|
Overwriting as the base hidden size is big enough for compile.
Manipulation of dims causes issues due to other constraints not being satisfied anymore.
|
test_flex_attention_with_grads
|
python
|
huggingface/transformers
|
tests/models/zamba2/test_modeling_zamba2.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/zamba2/test_modeling_zamba2.py
|
Apache-2.0
|
def _check_lora_correctly_converted(self, model):
"""
Utility method to check if the model has correctly adapters injected on it.
"""
from peft.tuners.tuners_utils import BaseTunerLayer
is_peft_loaded = False
for _, m in model.named_modules():
if isinstance(m, BaseTunerLayer):
is_peft_loaded = True
break
return is_peft_loaded
|
Utility method to check if the model has correctly adapters injected on it.
|
_check_lora_correctly_converted
|
python
|
huggingface/transformers
|
tests/peft_integration/test_peft_integration.py
|
https://github.com/huggingface/transformers/blob/master/tests/peft_integration/test_peft_integration.py
|
Apache-2.0
|
def test_peft_from_pretrained(self):
"""
Simple test that tests the basic usage of PEFT model through `from_pretrained`.
This checks if we pass a remote folder that contains an adapter config and adapter weights, it
should correctly load a model that has adapters injected on it.
"""
logger = logging.get_logger("transformers.integrations.peft")
for model_id in self.peft_test_model_ids:
for transformers_class in self.transformers_test_model_classes:
with CaptureLogger(logger) as cl:
peft_model = transformers_class.from_pretrained(model_id).to(torch_device)
# ensure that under normal circumstances, there are no warnings about keys
self.assertNotIn("unexpected keys", cl.out)
self.assertNotIn("missing keys", cl.out)
self.assertTrue(self._check_lora_correctly_converted(peft_model))
self.assertTrue(peft_model._hf_peft_config_loaded)
# dummy generation
_ = peft_model.generate(input_ids=torch.LongTensor([[0, 1, 2, 3, 4, 5, 6, 7]]).to(torch_device))
|
Simple test that tests the basic usage of PEFT model through `from_pretrained`.
This checks if we pass a remote folder that contains an adapter config and adapter weights, it
should correctly load a model that has adapters injected on it.
|
test_peft_from_pretrained
|
python
|
huggingface/transformers
|
tests/peft_integration/test_peft_integration.py
|
https://github.com/huggingface/transformers/blob/master/tests/peft_integration/test_peft_integration.py
|
Apache-2.0
|
def test_peft_state_dict(self):
"""
Simple test that checks if the returned state dict of `get_adapter_state_dict()` method contains
the expected keys.
"""
for model_id in self.peft_test_model_ids:
for transformers_class in self.transformers_test_model_classes:
peft_model = transformers_class.from_pretrained(model_id).to(torch_device)
state_dict = peft_model.get_adapter_state_dict()
for key in state_dict.keys():
self.assertTrue("lora" in key)
|
Simple test that checks if the returned state dict of `get_adapter_state_dict()` method contains
the expected keys.
|
test_peft_state_dict
|
python
|
huggingface/transformers
|
tests/peft_integration/test_peft_integration.py
|
https://github.com/huggingface/transformers/blob/master/tests/peft_integration/test_peft_integration.py
|
Apache-2.0
|
def test_peft_save_pretrained(self):
"""
Test that checks various combinations of `save_pretrained` with a model that has adapters loaded
on it. This checks if the saved model contains the expected files (adapter weights and adapter config).
"""
for model_id in self.peft_test_model_ids:
for transformers_class in self.transformers_test_model_classes:
peft_model = transformers_class.from_pretrained(model_id).to(torch_device)
with tempfile.TemporaryDirectory() as tmpdirname:
peft_model.save_pretrained(tmpdirname)
self.assertTrue("adapter_model.safetensors" in os.listdir(tmpdirname))
self.assertTrue("adapter_config.json" in os.listdir(tmpdirname))
self.assertTrue("config.json" not in os.listdir(tmpdirname))
self.assertTrue("pytorch_model.bin" not in os.listdir(tmpdirname))
self.assertTrue("model.safetensors" not in os.listdir(tmpdirname))
peft_model = transformers_class.from_pretrained(tmpdirname).to(torch_device)
self.assertTrue(self._check_lora_correctly_converted(peft_model))
peft_model.save_pretrained(tmpdirname, safe_serialization=False)
self.assertTrue("adapter_model.bin" in os.listdir(tmpdirname))
self.assertTrue("adapter_config.json" in os.listdir(tmpdirname))
peft_model = transformers_class.from_pretrained(tmpdirname).to(torch_device)
self.assertTrue(self._check_lora_correctly_converted(peft_model))
|
Test that checks various combinations of `save_pretrained` with a model that has adapters loaded
on it. This checks if the saved model contains the expected files (adapter weights and adapter config).
|
test_peft_save_pretrained
|
python
|
huggingface/transformers
|
tests/peft_integration/test_peft_integration.py
|
https://github.com/huggingface/transformers/blob/master/tests/peft_integration/test_peft_integration.py
|
Apache-2.0
|
def test_peft_enable_disable_adapters(self):
"""
A test that checks if `enable_adapters` and `disable_adapters` methods work as expected.
"""
from peft import LoraConfig
dummy_input = torch.LongTensor([[0, 1, 2, 3, 4, 5, 6, 7]]).to(torch_device)
for model_id in self.transformers_test_model_ids:
for transformers_class in self.transformers_test_model_classes:
peft_model = transformers_class.from_pretrained(model_id).to(torch_device)
peft_config = LoraConfig(init_lora_weights=False)
peft_model.add_adapter(peft_config)
peft_logits = peft_model(dummy_input).logits
peft_model.disable_adapters()
peft_logits_disabled = peft_model(dummy_input).logits
peft_model.enable_adapters()
peft_logits_enabled = peft_model(dummy_input).logits
torch.testing.assert_close(peft_logits, peft_logits_enabled, rtol=1e-12, atol=1e-12)
self.assertFalse(torch.allclose(peft_logits_enabled, peft_logits_disabled, atol=1e-12, rtol=1e-12))
|
A test that checks if `enable_adapters` and `disable_adapters` methods work as expected.
|
test_peft_enable_disable_adapters
|
python
|
huggingface/transformers
|
tests/peft_integration/test_peft_integration.py
|
https://github.com/huggingface/transformers/blob/master/tests/peft_integration/test_peft_integration.py
|
Apache-2.0
|
def test_peft_add_adapter(self):
"""
Simple test that tests if `add_adapter` works as expected
"""
from peft import LoraConfig
for model_id in self.transformers_test_model_ids:
for transformers_class in self.transformers_test_model_classes:
model = transformers_class.from_pretrained(model_id).to(torch_device)
peft_config = LoraConfig(init_lora_weights=False)
model.add_adapter(peft_config)
self.assertTrue(self._check_lora_correctly_converted(model))
# dummy generation
_ = model.generate(input_ids=torch.LongTensor([[0, 1, 2, 3, 4, 5, 6, 7]]).to(torch_device))
|
Simple test that tests if `add_adapter` works as expected
|
test_peft_add_adapter
|
python
|
huggingface/transformers
|
tests/peft_integration/test_peft_integration.py
|
https://github.com/huggingface/transformers/blob/master/tests/peft_integration/test_peft_integration.py
|
Apache-2.0
|
def test_peft_add_adapter_from_pretrained(self):
"""
Simple test that tests if `add_adapter` works as expected
"""
from peft import LoraConfig
for model_id in self.transformers_test_model_ids:
for transformers_class in self.transformers_test_model_classes:
model = transformers_class.from_pretrained(model_id).to(torch_device)
peft_config = LoraConfig(init_lora_weights=False)
model.add_adapter(peft_config)
self.assertTrue(self._check_lora_correctly_converted(model))
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
model_from_pretrained = transformers_class.from_pretrained(tmpdirname).to(torch_device)
self.assertTrue(self._check_lora_correctly_converted(model_from_pretrained))
|
Simple test that tests if `add_adapter` works as expected
|
test_peft_add_adapter_from_pretrained
|
python
|
huggingface/transformers
|
tests/peft_integration/test_peft_integration.py
|
https://github.com/huggingface/transformers/blob/master/tests/peft_integration/test_peft_integration.py
|
Apache-2.0
|
def test_peft_add_adapter_modules_to_save(self):
"""
Simple test that tests if `add_adapter` works as expected when training with
modules to save.
"""
from peft import LoraConfig
from peft.utils import ModulesToSaveWrapper
for model_id in self.transformers_test_model_ids:
for transformers_class in self.transformers_test_model_classes:
dummy_input = torch.LongTensor([[0, 1, 2, 3, 4, 5, 6, 7]]).to(torch_device)
model = transformers_class.from_pretrained(model_id).to(torch_device)
peft_config = LoraConfig(init_lora_weights=False, modules_to_save=["lm_head"])
model.add_adapter(peft_config)
self._check_lora_correctly_converted(model)
_has_modules_to_save_wrapper = False
for name, module in model.named_modules():
if isinstance(module, ModulesToSaveWrapper):
_has_modules_to_save_wrapper = True
self.assertTrue(module.modules_to_save.default.weight.requires_grad)
self.assertTrue("lm_head" in name)
break
self.assertTrue(_has_modules_to_save_wrapper)
state_dict = model.get_adapter_state_dict()
self.assertTrue("lm_head.weight" in state_dict.keys())
logits = model(dummy_input).logits
loss = logits.mean()
loss.backward()
for _, param in model.named_parameters():
if param.requires_grad:
self.assertTrue(param.grad is not None)
|
Simple test that tests if `add_adapter` works as expected when training with
modules to save.
|
test_peft_add_adapter_modules_to_save
|
python
|
huggingface/transformers
|
tests/peft_integration/test_peft_integration.py
|
https://github.com/huggingface/transformers/blob/master/tests/peft_integration/test_peft_integration.py
|
Apache-2.0
|
def test_peft_add_adapter_training_gradient_checkpointing(self):
"""
Simple test that tests if `add_adapter` works as expected when training with
gradient checkpointing.
"""
from peft import LoraConfig
for model_id in self.transformers_test_model_ids:
for transformers_class in self.transformers_test_model_classes:
model = transformers_class.from_pretrained(model_id).to(torch_device)
peft_config = LoraConfig(init_lora_weights=False)
model.add_adapter(peft_config)
self.assertTrue(self._check_lora_correctly_converted(model))
# When attaching adapters the input embeddings will stay frozen, this will
# lead to the output embedding having requires_grad=False.
dummy_input = torch.LongTensor([[0, 1, 2, 3, 4, 5, 6, 7]]).to(torch_device)
frozen_output = model.get_input_embeddings()(dummy_input)
self.assertTrue(frozen_output.requires_grad is False)
model.gradient_checkpointing_enable()
# Since here we attached the hook, the input should have requires_grad to set
# properly
non_frozen_output = model.get_input_embeddings()(dummy_input)
self.assertTrue(non_frozen_output.requires_grad is True)
# To repro the Trainer issue
dummy_input.requires_grad = False
for name, param in model.named_parameters():
if "lora" in name.lower():
self.assertTrue(param.requires_grad)
logits = model(dummy_input).logits
loss = logits.mean()
loss.backward()
for name, param in model.named_parameters():
if param.requires_grad:
self.assertTrue("lora" in name.lower())
self.assertTrue(param.grad is not None)
|
Simple test that tests if `add_adapter` works as expected when training with
gradient checkpointing.
|
test_peft_add_adapter_training_gradient_checkpointing
|
python
|
huggingface/transformers
|
tests/peft_integration/test_peft_integration.py
|
https://github.com/huggingface/transformers/blob/master/tests/peft_integration/test_peft_integration.py
|
Apache-2.0
|
def test_peft_add_multi_adapter(self):
"""
Simple test that tests the basic usage of PEFT model through `from_pretrained`. This test tests if
add_adapter works as expected in multi-adapter setting.
"""
from peft import LoraConfig
from peft.tuners.tuners_utils import BaseTunerLayer
dummy_input = torch.LongTensor([[0, 1, 2, 3, 4, 5, 6, 7]]).to(torch_device)
for model_id in self.transformers_test_model_ids:
for transformers_class in self.transformers_test_model_classes:
is_peft_loaded = False
model = transformers_class.from_pretrained(model_id).to(torch_device)
logits_original_model = model(dummy_input).logits
peft_config = LoraConfig(init_lora_weights=False)
model.add_adapter(peft_config)
logits_adapter_1 = model(dummy_input)
model.add_adapter(peft_config, adapter_name="adapter-2")
logits_adapter_2 = model(dummy_input)
for _, m in model.named_modules():
if isinstance(m, BaseTunerLayer):
is_peft_loaded = True
break
self.assertTrue(is_peft_loaded)
# dummy generation
_ = model.generate(input_ids=dummy_input)
model.set_adapter("default")
self.assertTrue(model.active_adapters() == ["default"])
self.assertTrue(model.active_adapter() == "default")
model.set_adapter("adapter-2")
self.assertTrue(model.active_adapters() == ["adapter-2"])
self.assertTrue(model.active_adapter() == "adapter-2")
# Logits comparison
self.assertFalse(
torch.allclose(logits_adapter_1.logits, logits_adapter_2.logits, atol=1e-6, rtol=1e-6)
)
self.assertFalse(torch.allclose(logits_original_model, logits_adapter_2.logits, atol=1e-6, rtol=1e-6))
model.set_adapter(["adapter-2", "default"])
self.assertTrue(model.active_adapters() == ["adapter-2", "default"])
self.assertTrue(model.active_adapter() == "adapter-2")
logits_adapter_mixed = model(dummy_input)
self.assertFalse(
torch.allclose(logits_adapter_1.logits, logits_adapter_mixed.logits, atol=1e-6, rtol=1e-6)
)
self.assertFalse(
torch.allclose(logits_adapter_2.logits, logits_adapter_mixed.logits, atol=1e-6, rtol=1e-6)
)
# multi active adapter saving not supported
with self.assertRaises(ValueError), tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
|
Simple test that tests the basic usage of PEFT model through `from_pretrained`. This test tests if
add_adapter works as expected in multi-adapter setting.
|
test_peft_add_multi_adapter
|
python
|
huggingface/transformers
|
tests/peft_integration/test_peft_integration.py
|
https://github.com/huggingface/transformers/blob/master/tests/peft_integration/test_peft_integration.py
|
Apache-2.0
|
def test_delete_adapter(self):
"""
Enhanced test for `delete_adapter` to handle multiple adapters,
edge cases, and proper error handling.
"""
from peft import LoraConfig
for model_id in self.transformers_test_model_ids:
for transformers_class in self.transformers_test_model_classes:
model = transformers_class.from_pretrained(model_id).to(torch_device)
# Add multiple adapters
peft_config_1 = LoraConfig(init_lora_weights=False)
peft_config_2 = LoraConfig(init_lora_weights=False)
model.add_adapter(peft_config_1, adapter_name="adapter_1")
model.add_adapter(peft_config_2, adapter_name="adapter_2")
# Ensure adapters were added
self.assertIn("adapter_1", model.peft_config)
self.assertIn("adapter_2", model.peft_config)
# Delete a single adapter
model.delete_adapter("adapter_1")
self.assertNotIn("adapter_1", model.peft_config)
self.assertIn("adapter_2", model.peft_config)
# Delete remaining adapter
model.delete_adapter("adapter_2")
self.assertFalse(hasattr(model, "peft_config"))
self.assertFalse(model._hf_peft_config_loaded)
# Re-add adapters for edge case tests
model.add_adapter(peft_config_1, adapter_name="adapter_1")
model.add_adapter(peft_config_2, adapter_name="adapter_2")
# Attempt to delete multiple adapters at once
model.delete_adapter(["adapter_1", "adapter_2"])
self.assertFalse(hasattr(model, "peft_config"))
self.assertFalse(model._hf_peft_config_loaded)
# Test edge cases
msg = re.escape("No adapter loaded. Please load an adapter first.")
with self.assertRaisesRegex(ValueError, msg):
model.delete_adapter("nonexistent_adapter")
model.add_adapter(peft_config_1, adapter_name="adapter_1")
with self.assertRaisesRegex(ValueError, "The following adapter\\(s\\) are not present"):
model.delete_adapter("nonexistent_adapter")
with self.assertRaisesRegex(ValueError, "The following adapter\\(s\\) are not present"):
model.delete_adapter(["adapter_1", "nonexistent_adapter"])
# Deleting with an empty list or None should not raise errors
model.add_adapter(peft_config_2, adapter_name="adapter_2")
model.delete_adapter([]) # No-op
self.assertIn("adapter_1", model.peft_config)
self.assertIn("adapter_2", model.peft_config)
# Deleting duplicate adapter names in the list
model.delete_adapter(["adapter_1", "adapter_1"])
self.assertNotIn("adapter_1", model.peft_config)
self.assertIn("adapter_2", model.peft_config)
|
Enhanced test for `delete_adapter` to handle multiple adapters,
edge cases, and proper error handling.
|
test_delete_adapter
|
python
|
huggingface/transformers
|
tests/peft_integration/test_peft_integration.py
|
https://github.com/huggingface/transformers/blob/master/tests/peft_integration/test_peft_integration.py
|
Apache-2.0
|
def test_peft_from_pretrained_kwargs(self):
"""
Simple test that tests the basic usage of PEFT model through `from_pretrained` + additional kwargs
and see if the integraiton behaves as expected.
"""
for model_id in self.peft_test_model_ids:
for transformers_class in self.transformers_test_model_classes:
peft_model = transformers_class.from_pretrained(model_id, load_in_8bit=True, device_map="auto")
module = peft_model.model.decoder.layers[0].self_attn.v_proj
self.assertTrue(module.__class__.__name__ == "Linear8bitLt")
self.assertTrue(peft_model.hf_device_map is not None)
# dummy generation
_ = peft_model.generate(input_ids=torch.LongTensor([[0, 1, 2, 3, 4, 5, 6, 7]]).to(torch_device))
|
Simple test that tests the basic usage of PEFT model through `from_pretrained` + additional kwargs
and see if the integraiton behaves as expected.
|
test_peft_from_pretrained_kwargs
|
python
|
huggingface/transformers
|
tests/peft_integration/test_peft_integration.py
|
https://github.com/huggingface/transformers/blob/master/tests/peft_integration/test_peft_integration.py
|
Apache-2.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.