code
stringlengths 66
870k
| docstring
stringlengths 19
26.7k
| func_name
stringlengths 1
138
| language
stringclasses 1
value | repo
stringlengths 7
68
| path
stringlengths 5
324
| url
stringlengths 46
389
| license
stringclasses 7
values |
---|---|---|---|---|---|---|---|
def test_generation_beyond_sliding_window(self, attn_implementation: str):
"""Test that we can correctly generate beyond the sliding window. This is non trivial as
we need to correctly slice the attention mask in all cases (because we use a HybridCache).
Outputs for every attention functions should be coherent and identical.
"""
model_id = "google/gemma-3-1b-it"
if attn_implementation == "flash_attention_2" and not is_flash_attn_2_available():
self.skipTest("FlashAttention2 is required for this test.")
input_text = [
"This is a nice place. " * 800 + "I really enjoy the scenery,", # This is larger than 4096 tokens
"A list of colors: red, blue", # This will almost all be padding tokens
]
tokenizer = AutoTokenizer.from_pretrained(model_id, padding="left")
inputs = tokenizer(input_text, padding=True, return_tensors="pt").to(torch_device)
model = AutoModelForCausalLM.from_pretrained(
model_id, attn_implementation=attn_implementation, torch_dtype=torch.float16
).to(torch_device)
# Make sure prefill is larger than sliding window
input_size = inputs.input_ids.shape[-1]
self.assertTrue(input_size > model.config.sliding_window)
out = model.generate(**inputs, max_new_tokens=20, do_sample=False)[:, input_size:]
output_text = tokenizer.batch_decode(out)
EXPECTED_COMPLETIONS = [" and I'm going to take a walk.\n\nI really enjoy the scenery, and I'", ", green, yellow, orange, purple, brown, black, white, gray.\n\nI'"] # fmt: skip
self.assertEqual(output_text, EXPECTED_COMPLETIONS)
|
Test that we can correctly generate beyond the sliding window. This is non trivial as
we need to correctly slice the attention mask in all cases (because we use a HybridCache).
Outputs for every attention functions should be coherent and identical.
|
test_generation_beyond_sliding_window
|
python
|
huggingface/transformers
|
tests/models/gemma3/test_modeling_gemma3.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/gemma3/test_modeling_gemma3.py
|
Apache-2.0
|
def prepare_image_inputs(self, batch_size: Optional[int] = None):
"""This function prepares a list of PIL images for testing"""
images = super().prepare_image_inputs(batch_size)
if isinstance(images, (list, tuple)):
images = [[image] for image in images]
return images
|
This function prepares a list of PIL images for testing
|
prepare_image_inputs
|
python
|
huggingface/transformers
|
tests/models/gemma3/test_processing_gemma3.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/gemma3/test_processing_gemma3.py
|
Apache-2.0
|
def test_special_mm_token_truncation(self):
"""Tests that special vision tokens do not get truncated when `truncation=True` is set."""
processor = self.get_processor()
input_str = self.prepare_text_inputs(batch_size=2, modality="image")
image_input = self.prepare_image_inputs(batch_size=2)
_ = processor(
text=input_str,
images=image_input,
return_tensors="pt",
truncation=None,
padding=True,
)
with self.assertRaises(ValueError):
_ = processor(
text=input_str,
images=image_input,
return_tensors="pt",
truncation=True,
padding=True,
max_length=5,
)
|
Tests that special vision tokens do not get truncated when `truncation=True` is set.
|
test_special_mm_token_truncation
|
python
|
huggingface/transformers
|
tests/models/gemma3/test_processing_gemma3.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/gemma3/test_processing_gemma3.py
|
Apache-2.0
|
def test_flash_attn_2_generate_padding_left(self):
"""
Overwriting the common test as the test is flaky on tiny models
"""
model = GPT2LMHeadModel.from_pretrained("gpt2", torch_dtype=torch.float16).to(0)
tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
texts = ["hi", "Hello this is a very long sentence"]
tokenizer.padding_side = "left"
tokenizer.pad_token = tokenizer.eos_token
inputs = tokenizer(texts, return_tensors="pt", padding=True).to(0)
output_native = model.generate(**inputs, max_new_tokens=20, do_sample=False)
output_native = tokenizer.batch_decode(output_native)
model = GPT2LMHeadModel.from_pretrained(
"gpt2", device_map={"": 0}, attn_implementation="flash_attention_2", torch_dtype=torch.float16
)
output_fa_2 = model.generate(**inputs, max_new_tokens=20, do_sample=False)
output_fa_2 = tokenizer.batch_decode(output_fa_2)
expected_output = [
"<|endoftext|><|endoftext|><|endoftext|><|endoftext|><|endoftext|><|endoftext|>hi, who was born in the city of Kolkata, was a member of the Kolkata",
"Hello this is a very long sentence. I'm sorry. I'm sorry. I'm sorry. I'm sorry. I'm sorry",
]
self.assertListEqual(output_native, output_fa_2)
self.assertListEqual(output_native, expected_output)
|
Overwriting the common test as the test is flaky on tiny models
|
test_flash_attn_2_generate_padding_left
|
python
|
huggingface/transformers
|
tests/models/gpt2/test_modeling_gpt2.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/gpt2/test_modeling_gpt2.py
|
Apache-2.0
|
def test_lm_generate_distilgpt2_left_padding(self):
"""Tests that the generated text is the same, regardless of left padding"""
model = TFGPT2LMHeadModel.from_pretrained("distilbert/distilgpt2")
tokenizer = GPT2Tokenizer.from_pretrained("distilbert/distilgpt2")
tokenizer.pad_token = tokenizer.eos_token
tokenizer.padding_side = "left"
generation_kwargs = {
"bad_words_ids": [tokenizer("is").input_ids, tokenizer("angry about").input_ids],
"no_repeat_ngram_size": 2,
"do_sample": False,
"repetition_penalty": 1.3,
}
expected_output_string = (
"Today is a beautiful day and I am so happy to be able take part in this amazing event."
)
sentences = ["Today is a beautiful day and"]
input_ids = tokenizer(sentences, return_tensors="tf", padding=True)
# using default length
output_ids = model.generate(**input_ids, **generation_kwargs)
output_strings = tokenizer.batch_decode(output_ids, skip_special_tokens=True)
self.assertEqual(output_strings[0], expected_output_string)
sentences = ["Today is a beautiful day and", "This is a very long input that we absolutely don't care about"]
input_ids = tokenizer(sentences, return_tensors="tf", padding=True)
# longer max length to capture the full length (remember: it is left padded)
output_ids = model.generate(**input_ids, **generation_kwargs, max_length=27)
output_strings = tokenizer.batch_decode(output_ids, skip_special_tokens=True)
self.assertEqual(output_strings[0], expected_output_string)
|
Tests that the generated text is the same, regardless of left padding
|
test_lm_generate_distilgpt2_left_padding
|
python
|
huggingface/transformers
|
tests/models/gpt2/test_modeling_tf_gpt2.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/gpt2/test_modeling_tf_gpt2.py
|
Apache-2.0
|
def copy_cache(cache: DynamicCache):
"""Deep copy a DynamicCache to reuse the same one multiple times."""
new_cache = cache
for i in range(len(cache)):
new_cache.key_cache[i] = cache.key_cache[i].clone()
new_cache.value_cache[i] = cache.value_cache[i].clone()
|
Deep copy a DynamicCache to reuse the same one multiple times.
|
copy_cache
|
python
|
huggingface/transformers
|
tests/models/gpt_neox/test_modeling_gpt_neox.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/gpt_neox/test_modeling_gpt_neox.py
|
Apache-2.0
|
def test_save_load_pretrained_default(self):
"""Ensure we can save / reload a processor correctly."""
tokenizer = self.get_tokenizer()
audio_processor = self.get_audio_processor()
processor = GraniteSpeechProcessor(
tokenizer=tokenizer,
audio_processor=audio_processor,
)
processor.save_pretrained(self.tmpdirname)
processor = GraniteSpeechProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer.get_vocab())
self.assertIsInstance(processor.tokenizer, GPT2TokenizerFast)
self.assertEqual(processor.audio_processor.to_json_string(), audio_processor.to_json_string())
self.assertIsInstance(processor.audio_processor, GraniteSpeechFeatureExtractor)
|
Ensure we can save / reload a processor correctly.
|
test_save_load_pretrained_default
|
python
|
huggingface/transformers
|
tests/models/granite_speech/test_processor_granite_speech.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/granite_speech/test_processor_granite_speech.py
|
Apache-2.0
|
def test_bad_text_fails(self):
"""Ensure we gracefully fail if text is the wrong type."""
tokenizer = self.get_tokenizer()
audio_processor = self.get_audio_processor()
processor = GraniteSpeechProcessor(tokenizer=tokenizer, audio_processor=audio_processor)
with pytest.raises(TypeError):
processor(text=424, audio=None)
|
Ensure we gracefully fail if text is the wrong type.
|
test_bad_text_fails
|
python
|
huggingface/transformers
|
tests/models/granite_speech/test_processor_granite_speech.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/granite_speech/test_processor_granite_speech.py
|
Apache-2.0
|
def test_bad_nested_text_fails(self):
"""Ensure we gracefully fail if text is the wrong nested type."""
tokenizer = self.get_tokenizer()
audio_processor = self.get_audio_processor()
processor = GraniteSpeechProcessor(
tokenizer=tokenizer,
audio_processor=audio_processor,
)
with pytest.raises(TypeError):
processor(text=[424], audio=None)
|
Ensure we gracefully fail if text is the wrong nested type.
|
test_bad_nested_text_fails
|
python
|
huggingface/transformers
|
tests/models/granite_speech/test_processor_granite_speech.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/granite_speech/test_processor_granite_speech.py
|
Apache-2.0
|
def test_bad_audio_fails(self):
"""Ensure we gracefully fail if audio is the wrong type."""
tokenizer = self.get_tokenizer()
audio_processor = self.get_audio_processor()
processor = GraniteSpeechProcessor(
tokenizer=tokenizer,
audio_processor=audio_processor,
)
with pytest.raises(TypeError):
processor(text=None, audio="foo")
|
Ensure we gracefully fail if audio is the wrong type.
|
test_bad_audio_fails
|
python
|
huggingface/transformers
|
tests/models/granite_speech/test_processor_granite_speech.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/granite_speech/test_processor_granite_speech.py
|
Apache-2.0
|
def test_nested_bad_audio_fails(self):
"""Ensure we gracefully fail if audio is the wrong nested type."""
tokenizer = self.get_tokenizer()
audio_processor = self.get_audio_processor()
processor = GraniteSpeechProcessor(
tokenizer=tokenizer,
audio_processor=audio_processor,
)
with pytest.raises(TypeError):
processor(text=None, audio=["foo"])
|
Ensure we gracefully fail if audio is the wrong nested type.
|
test_nested_bad_audio_fails
|
python
|
huggingface/transformers
|
tests/models/granite_speech/test_processor_granite_speech.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/granite_speech/test_processor_granite_speech.py
|
Apache-2.0
|
def test_audio_token_filling_same_len_feature_tensors(self, vec_dims, num_expected_features, random_func):
"""Ensure audio token filling is handled correctly when we have
one or more audio inputs whose features are all the same length
stacked into a tensor / numpy array.
NOTE: Currently we enforce that each sample can only have one audio.
"""
tokenizer = self.get_tokenizer()
audio_processor = self.get_audio_processor()
processor = GraniteSpeechProcessor(
tokenizer=tokenizer,
audio_processor=audio_processor,
)
audio = random_func(*vec_dims) - 0.5
audio_tokens = processor.audio_token * vec_dims[0]
inputs = processor(text=f"{audio_tokens} Can you compare this audio?", audio=audio, return_tensors="pt")
# Check the number of audio tokens
audio_token_id = tokenizer.get_vocab()[processor.audio_token]
# Make sure the number of audio tokens matches the number of features
num_computed_features = processor.audio_processor._get_num_audio_features(
[vec_dims[1] for _ in range(vec_dims[0])],
)
num_audio_tokens = int(torch.sum(inputs["input_ids"] == audio_token_id))
assert list(inputs["input_features"].shape) == [vec_dims[0], 844, 160]
assert sum(num_computed_features) == num_audio_tokens
|
Ensure audio token filling is handled correctly when we have
one or more audio inputs whose features are all the same length
stacked into a tensor / numpy array.
NOTE: Currently we enforce that each sample can only have one audio.
|
test_audio_token_filling_same_len_feature_tensors
|
python
|
huggingface/transformers
|
tests/models/granite_speech/test_processor_granite_speech.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/granite_speech/test_processor_granite_speech.py
|
Apache-2.0
|
def test_audio_token_filling_varying_len_feature_list(self):
"""Ensure audio token filling is handled correctly when we have
multiple varying len audio sequences passed as a list.
"""
tokenizer = self.get_tokenizer()
audio_processor = self.get_audio_processor()
processor = GraniteSpeechProcessor(
tokenizer=tokenizer,
audio_processor=audio_processor,
)
vec_dims = [[1, 142100], [1, 269920]]
num_expected_features = [90, 171]
audio = [torch.rand(dims) - 0.5 for dims in vec_dims]
inputs = processor(
text=[
f"{processor.audio_token} Can you describe this audio?",
f"{processor.audio_token} How does it compare with this audio?",
],
audio=audio,
return_tensors="pt",
)
# Check the number of audio tokens
audio_token_id = tokenizer.get_vocab()[processor.audio_token]
# Make sure the number of audio tokens matches the number of features
num_calculated_features = processor.audio_processor._get_num_audio_features(
[dims[1] for dims in vec_dims],
)
num_audio_tokens = int(torch.sum(inputs["input_ids"] == audio_token_id))
assert num_calculated_features == [90, 171]
assert sum(num_expected_features) == num_audio_tokens
|
Ensure audio token filling is handled correctly when we have
multiple varying len audio sequences passed as a list.
|
test_audio_token_filling_varying_len_feature_list
|
python
|
huggingface/transformers
|
tests/models/granite_speech/test_processor_granite_speech.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/granite_speech/test_processor_granite_speech.py
|
Apache-2.0
|
def test_device_override(self):
"""Ensure that we regardless of the processing device, the tensors
produced are on the CPU.
"""
tokenizer = self.get_tokenizer()
audio_processor = self.get_audio_processor()
processor = GraniteSpeechProcessor(
tokenizer=tokenizer,
audio_processor=audio_processor,
)
vec_dims = [1, 269920]
wav = torch.rand(vec_dims) - 0.5
inputs = processor(
text=f"{processor.audio_token} Can you transcribe this audio?",
audio=wav,
return_tensors="pt",
device=torch_device,
)
assert inputs["input_features"].device.type == "cpu"
|
Ensure that we regardless of the processing device, the tensors
produced are on the CPU.
|
test_device_override
|
python
|
huggingface/transformers
|
tests/models/granite_speech/test_processor_granite_speech.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/granite_speech/test_processor_granite_speech.py
|
Apache-2.0
|
def get_expected_values(self, image_inputs, batched=False):
"""
This function computes the expected height and width when providing images to GroundingDinoImageProcessor,
assuming do_resize is set to True with a scalar size.
"""
if not batched:
image = image_inputs[0]
if isinstance(image, Image.Image):
w, h = image.size
elif isinstance(image, np.ndarray):
h, w = image.shape[0], image.shape[1]
else:
h, w = image.shape[1], image.shape[2]
if w < h:
expected_height = int(self.size["shortest_edge"] * h / w)
expected_width = self.size["shortest_edge"]
elif w > h:
expected_height = self.size["shortest_edge"]
expected_width = int(self.size["shortest_edge"] * w / h)
else:
expected_height = self.size["shortest_edge"]
expected_width = self.size["shortest_edge"]
else:
expected_values = []
for image in image_inputs:
expected_height, expected_width = self.get_expected_values([image])
expected_values.append((expected_height, expected_width))
expected_height = max(expected_values, key=lambda item: item[0])[0]
expected_width = max(expected_values, key=lambda item: item[1])[1]
return expected_height, expected_width
|
This function computes the expected height and width when providing images to GroundingDinoImageProcessor,
assuming do_resize is set to True with a scalar size.
|
get_expected_values
|
python
|
huggingface/transformers
|
tests/models/grounding_dino/test_image_processing_grounding_dino.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/grounding_dino/test_image_processing_grounding_dino.py
|
Apache-2.0
|
def generate_fake_bounding_boxes(n_boxes):
"""Generate bounding boxes in the format (center_x, center_y, width, height)"""
# Validate the input
if not isinstance(n_boxes, int):
raise ValueError("n_boxes must be an integer")
if n_boxes <= 0:
raise ValueError("n_boxes must be a positive integer")
# Generate random bounding boxes in the format (center_x, center_y, width, height)
bounding_boxes = torch.rand((n_boxes, 4))
# Extract the components
center_x = bounding_boxes[:, 0]
center_y = bounding_boxes[:, 1]
width = bounding_boxes[:, 2]
height = bounding_boxes[:, 3]
# Ensure width and height do not exceed bounds
width = torch.min(width, torch.tensor(1.0))
height = torch.min(height, torch.tensor(1.0))
# Ensure the bounding box stays within the normalized space
center_x = torch.where(center_x - width / 2 < 0, width / 2, center_x)
center_x = torch.where(center_x + width / 2 > 1, 1 - width / 2, center_x)
center_y = torch.where(center_y - height / 2 < 0, height / 2, center_y)
center_y = torch.where(center_y + height / 2 > 1, 1 - height / 2, center_y)
# Combine back into bounding boxes
bounding_boxes = torch.stack([center_x, center_y, width, height], dim=1)
return bounding_boxes
|
Generate bounding boxes in the format (center_x, center_y, width, height)
|
generate_fake_bounding_boxes
|
python
|
huggingface/transformers
|
tests/models/grounding_dino/test_modeling_grounding_dino.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/grounding_dino/test_modeling_grounding_dino.py
|
Apache-2.0
|
def test_create_position_ids_respects_padding_index(self):
"""This is a regression test for https://github.com/huggingface/transformers/issues/1761
The position ids should be masked with the embedding object's padding index. Therefore, the
first available non-padding position index is IBertEmbeddings.padding_idx + 1
"""
config = self.model_tester.prepare_config_and_inputs()[0]
model = IBertEmbeddings(config=config)
input_ids = torch.as_tensor([[12, 31, 13, model.padding_idx]])
expected_positions = torch.as_tensor(
[[0 + model.padding_idx + 1, 1 + model.padding_idx + 1, 2 + model.padding_idx + 1, model.padding_idx]]
)
position_ids = create_position_ids_from_input_ids(input_ids, model.padding_idx)
self.assertEqual(position_ids.shape, expected_positions.shape)
self.assertTrue(torch.all(torch.eq(position_ids, expected_positions)))
|
This is a regression test for https://github.com/huggingface/transformers/issues/1761
The position ids should be masked with the embedding object's padding index. Therefore, the
first available non-padding position index is IBertEmbeddings.padding_idx + 1
|
test_create_position_ids_respects_padding_index
|
python
|
huggingface/transformers
|
tests/models/ibert/test_modeling_ibert.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/ibert/test_modeling_ibert.py
|
Apache-2.0
|
def test_create_position_ids_from_inputs_embeds(self):
"""This is a regression test for https://github.com/huggingface/transformers/issues/1761
The position ids should be masked with the embedding object's padding index. Therefore, the
first available non-padding position index is IBertEmbeddings.padding_idx + 1
"""
config = self.model_tester.prepare_config_and_inputs()[0]
embeddings = IBertEmbeddings(config=config)
inputs_embeds = torch.empty(2, 4, 30)
expected_single_positions = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
expected_positions = torch.as_tensor([expected_single_positions, expected_single_positions])
position_ids = embeddings.create_position_ids_from_inputs_embeds(inputs_embeds)
self.assertEqual(position_ids.shape, expected_positions.shape)
self.assertTrue(torch.all(torch.eq(position_ids, expected_positions)))
|
This is a regression test for https://github.com/huggingface/transformers/issues/1761
The position ids should be masked with the embedding object's padding index. Therefore, the
first available non-padding position index is IBertEmbeddings.padding_idx + 1
|
test_create_position_ids_from_inputs_embeds
|
python
|
huggingface/transformers
|
tests/models/ibert/test_modeling_ibert.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/ibert/test_modeling_ibert.py
|
Apache-2.0
|
def get_expected_values(self, image_inputs, batched=False):
"""
This function computes the expected height and width when providing images to IdeficsImageProcessor,
assuming do_resize is set to True with a scalar size and size_divisor.
"""
if not batched:
size = self.image_size
image = image_inputs[0]
if isinstance(image, Image.Image):
w, h = image.size
elif isinstance(image, np.ndarray):
h, w = image.shape[0], image.shape[1]
else:
h, w = image.shape[1], image.shape[2]
scale = size / min(w, h)
if h < w:
newh, neww = size, scale * w
else:
newh, neww = scale * h, size
max_size = int((1333 / 800) * size)
if max(newh, neww) > max_size:
scale = max_size / max(newh, neww)
newh = newh * scale
neww = neww * scale
newh, neww = int(newh + 0.5), int(neww + 0.5)
expected_height, expected_width = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
expected_values = []
for image in image_inputs:
expected_height, expected_width = self.get_expected_values([image])
expected_values.append((expected_height, expected_width))
expected_height = max(expected_values, key=lambda item: item[0])[0]
expected_width = max(expected_values, key=lambda item: item[1])[1]
return expected_height, expected_width
|
This function computes the expected height and width when providing images to IdeficsImageProcessor,
assuming do_resize is set to True with a scalar size and size_divisor.
|
get_expected_values
|
python
|
huggingface/transformers
|
tests/models/idefics/test_image_processing_idefics.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/idefics/test_image_processing_idefics.py
|
Apache-2.0
|
def test_left_padding_compatibility(self):
"""Overwrite because IDEFICS needs image attention mask to be also padded"""
# NOTE: left-padding results in small numerical differences. This is expected.
# See https://github.com/huggingface/transformers/issues/25420#issuecomment-1775317535
def _prepare_model_kwargs(input_ids, attention_mask, image_attention_mask, signature):
model_kwargs = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"image_attention_mask": image_attention_mask,
}
if "position_ids" in signature:
position_ids = torch.cumsum(attention_mask, dim=-1) - 1
position_ids.masked_fill_(attention_mask == 0, 1)
model_kwargs["position_ids"] = position_ids
if "cache_position" in signature:
cache_position = torch.arange(input_ids.shape[-1], device=torch_device)
model_kwargs["cache_position"] = cache_position
return model_kwargs
for model_class in self.all_generative_model_classes:
config, inputs_dict = self.prepare_config_and_inputs_for_generate()
input_ids = inputs_dict.pop("input_ids")
attention_mask = inputs_dict.pop("attention_mask")
if attention_mask is None:
attention_mask = torch.ones_like(input_ids)
image_attention_mask = inputs_dict.pop("image_attention_mask", None)
model = model_class(config).to(torch_device).eval()
signature = inspect.signature(model.forward).parameters.keys()
# no cache as some models require special cache classes to be init outside forward
model.generation_config.use_cache = False
# Without padding
model_kwargs = _prepare_model_kwargs(input_ids, attention_mask, image_attention_mask, signature)
next_logits_wo_padding = model(**model_kwargs, **inputs_dict).logits[:, -1, :]
# With left-padding (length 32)
# can hardcode pad_token to be 0 as we'll do attn masking anyway
pad_token_id = (
config.get_text_config().pad_token_id if config.get_text_config().pad_token_id is not None else 0
)
pad_size = (input_ids.shape[0], 32)
padding = torch.ones(pad_size, dtype=input_ids.dtype, device=torch_device) * pad_token_id
padded_input_ids = torch.cat((padding, input_ids), dim=1)
padded_attention_mask = torch.cat((torch.zeros_like(padding), attention_mask), dim=1)
pad_size_img = (input_ids.shape[0], 32, image_attention_mask.shape[-1])
extra_img_mask = torch.zeros(pad_size_img, dtype=image_attention_mask.dtype, device=torch_device)
padded_image_attention_mask = torch.cat([extra_img_mask, image_attention_mask], dim=1)
model_kwargs = _prepare_model_kwargs(
padded_input_ids, padded_attention_mask, padded_image_attention_mask, signature
)
next_logits_with_padding = model(**model_kwargs, **inputs_dict).logits[:, -1, :]
# They should result in very similar logits
torch.testing.assert_close(next_logits_wo_padding, next_logits_with_padding, rtol=1e-5, atol=1e-5)
|
Overwrite because IDEFICS needs image attention mask to be also padded
|
test_left_padding_compatibility
|
python
|
huggingface/transformers
|
tests/models/idefics/test_modeling_idefics.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/idefics/test_modeling_idefics.py
|
Apache-2.0
|
def test_generate_continue_from_past_key_values(self):
"""Overwrite because IDEFICS needs image attention mask to be also processed"""
# Tests that we can continue generating from past key values, returned from a previous `generate` call
for model_class in self.all_generative_model_classes:
config, inputs = self.model_tester.prepare_config_and_inputs_for_common()
# Let's make it always:
# 1. use cache (for obvious reasons)
# 2. generate to max length (which can be achieved by setting the eos token to an invalid value), which
# would make the test flaky (e.g. EOS is generated on iteration 1 on both generations, but the
# continuation would force it to generate beyond an EOS token)
# 3. ignore `token_type_ids` for simplicity
# 4. ignore `forced_eos_token_id`, which requires further manipulation of the continuation inputs and is
# active by default on some models
# 5. ignore `encoder_no_repeat_ngram_size`, which is set by default in some encoder-decoder models. When
# we use their decoder as a stand-alone model, `encoder_no_repeat_ngram_size` actually prevents
# repetition exclusively from the prompt. This test relies on comparing one call vs 2 calls
# with cache, what is considered a prompt is different in the two cases.
model = model_class(config).to(torch_device)
model.eval()
model.generation_config.pad_token_id = model.generation_config.eos_token_id = -1
model.generation_config.forced_eos_token_id = None
model.generation_config.encoder_no_repeat_ngram_size = 0
model.generation_config.use_cache = True
# Traditional way of generating text, with `return_dict_in_generate` to return the past key values
outputs = model.generate(**inputs, do_sample=False, max_new_tokens=4, return_dict_in_generate=True)
# Let's generate again, but passing the past key values in between (3 + 1 = 4 tokens). Note that the
# inputs may need to be tweaked across `generate` calls (like the attention mask).
outputs_cached = model.generate(**inputs, do_sample=False, max_new_tokens=3, return_dict_in_generate=True)
# Continue from the tokens generated above, preparing the inputs accordingly
inputs["past_key_values"] = outputs_cached.past_key_values
new_attention_len = outputs_cached.sequences.shape[-1]
inputs["input_ids"] = outputs_cached.sequences
if "attention_mask" in inputs:
inputs["attention_mask"] = torch.nn.functional.pad(
inputs["attention_mask"],
(0, new_attention_len - inputs["attention_mask"].shape[1]),
mode="constant",
value=1,
)
if "image_attention_mask" in inputs:
inputs["image_attention_mask"] = inputs["image_attention_mask"][:, -1:, :]
outputs_cached = model.generate(**inputs, do_sample=False, max_new_tokens=1, return_dict_in_generate=True)
# The two sets of generated text and past kv should be equal to each other
self.assertListEqual(outputs.sequences.tolist(), outputs_cached.sequences.tolist())
for layer_idx in range(len(outputs_cached.past_key_values)):
for kv_idx in range(len(outputs_cached.past_key_values[layer_idx])):
self.assertTrue(
torch.allclose(
outputs.past_key_values[layer_idx][kv_idx],
outputs_cached.past_key_values[layer_idx][kv_idx],
)
)
|
Overwrite because IDEFICS needs image attention mask to be also processed
|
test_generate_continue_from_past_key_values
|
python
|
huggingface/transformers
|
tests/models/idefics/test_modeling_idefics.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/idefics/test_modeling_idefics.py
|
Apache-2.0
|
def test_generate_without_input_ids(self):
"""Overwrite because IDEFICS needs image attention mask to be also processed and requires image at input always."""
config, input_dict = self.prepare_config_and_inputs_for_generate()
pixel_values = input_dict["pixel_values"]
image_attention_mask = input_dict["image_attention_mask"][:, -1:, :]
# hack in case they are equal, otherwise the attn mask will be [0]
if config.bos_token_id == config.pad_token_id:
config.pad_token_id = None
for model_class in self.all_generative_model_classes:
model = model_class(config).to(torch_device)
model.eval()
output_ids_generate = model.generate(
pixel_values=pixel_values,
image_attention_mask=image_attention_mask,
do_sample=False,
max_new_tokens=self.max_new_tokens,
remove_invalid_values=True,
)
self.assertIsNotNone(output_ids_generate)
|
Overwrite because IDEFICS needs image attention mask to be also processed and requires image at input always.
|
test_generate_without_input_ids
|
python
|
huggingface/transformers
|
tests/models/idefics/test_modeling_idefics.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/idefics/test_modeling_idefics.py
|
Apache-2.0
|
def test_generate_continue_from_inputs_embeds(self):
"""Overwrite for IDEFICS: Ensure image attention mask is processed while continuing from `inputs_embeds`."""
for model_class in self.all_generative_model_classes:
config, inputs = self.model_tester.prepare_config_and_inputs_for_common()
print(inputs)
model = model_class(config).to(torch_device).eval()
model.generation_config.pad_token_id = model.generation_config.eos_token_id = -1
model.generation_config.forced_eos_token_id = None
model.generation_config.use_cache = True
input_ids = inputs.pop("input_ids")
input_embeds = model.get_input_embeddings()(input_ids)
generation_kwargs = {
"return_dict_in_generate": True,
"do_sample": False,
}
inputs["inputs_embeds"] = input_embeds
# Traditional way of generating text, with `return_dict_in_generate` to return the past key values
outputs = model.generate(**inputs, max_new_tokens=4, **generation_kwargs)
# Let's generate again, but passing the past key values in between (3 + 1 = 4 tokens). Note that the
# inputs may need to be tweaked across `generate` calls (like the attention mask).
initial_output = model.generate(**inputs, max_new_tokens=3, **generation_kwargs)
inputs["past_key_values"] = initial_output.past_key_values
new_attention_len = input_ids.shape[1] + initial_output.sequences.shape[-1]
continued_embeds = torch.cat([input_embeds, model.get_input_embeddings()(initial_output.sequences)], dim=1)
inputs["inputs_embeds"] = continued_embeds
if "attention_mask" in inputs:
inputs["attention_mask"] = torch.nn.functional.pad(
inputs["attention_mask"],
(0, new_attention_len - inputs["attention_mask"].shape[1]),
mode="constant",
value=1,
)
if "image_attention_mask" in inputs:
inputs["image_attention_mask"] = inputs["image_attention_mask"][..., -1:, :]
cached_output = model.generate(**inputs, max_new_tokens=1, **generation_kwargs)
# Verify that the combined outputs match the full generation.
combined_output_sequences = torch.concat([initial_output.sequences, cached_output.sequences], axis=1)
self.assertListEqual(outputs.sequences.tolist(), combined_output_sequences.tolist())
for layer_idx in range(len(cached_output.past_key_values)):
for kv_idx in range(len(cached_output.past_key_values[layer_idx])):
self.assertTrue(
torch.allclose(
outputs.past_key_values[layer_idx][kv_idx],
cached_output.past_key_values[layer_idx][kv_idx],
)
)
|
Overwrite for IDEFICS: Ensure image attention mask is processed while continuing from `inputs_embeds`.
|
test_generate_continue_from_inputs_embeds
|
python
|
huggingface/transformers
|
tests/models/idefics/test_modeling_idefics.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/idefics/test_modeling_idefics.py
|
Apache-2.0
|
def _check_attentions_for_generate(
self, batch_size, attentions, prompt_length, output_length, config, decoder_past_key_values
):
"""
Overwrite from generation tests because Idefics has only SDPA layers.
Do not skip because we still want generation tests to run. Rather we can remove checks for shape.
"""
pass
|
Overwrite from generation tests because Idefics has only SDPA layers.
Do not skip because we still want generation tests to run. Rather we can remove checks for shape.
|
_check_attentions_for_generate
|
python
|
huggingface/transformers
|
tests/models/idefics/test_modeling_idefics.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/idefics/test_modeling_idefics.py
|
Apache-2.0
|
def prepare_prompts(self):
"""This function prepares a list of PIL images"""
num_images = 2
images = [np.random.randint(255, size=(3, 30, 400), dtype=np.uint8) for x in range(num_images)]
images = [Image.fromarray(np.moveaxis(x, 0, -1)) for x in images]
# print([type(x) for x in images])
# die
prompts = [
# text and 1 image
[
"User:",
images[0],
"Describe this image.\nAssistant:",
],
# text and images
[
"User:",
images[0],
"Describe this image.\nAssistant: An image of two dogs.\n",
"User:",
images[1],
"Describe this image.\nAssistant:",
],
# only text
[
"User:",
"Describe this image.\nAssistant: An image of two kittens.\n",
"User:",
"Describe this image.\nAssistant:",
],
# only images
[
images[0],
images[1],
],
]
return prompts
|
This function prepares a list of PIL images
|
prepare_prompts
|
python
|
huggingface/transformers
|
tests/models/idefics/test_processor_idefics.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/idefics/test_processor_idefics.py
|
Apache-2.0
|
def test_tokenizer_left_padding(self):
"""Identical to test_tokenizer_padding, but with padding_side not explicitly set."""
image_processor = self.get_image_processor()
tokenizer = self.get_tokenizer()
processor = IdeficsProcessor(tokenizer=tokenizer, image_processor=image_processor)
predicted_tokens = [
"<unk><unk><unk><unk><unk><unk><unk><unk><unk><s> Describe this image.\nAssistant:",
"<unk><unk><unk><unk><unk><unk><unk><unk><unk><unk><s> Describe this image.\nAssistant:",
]
predicted_attention_masks = [
([0] * 9) + ([1] * 10),
([0] * 10) + ([1] * 10),
]
prompts = [[prompt] for prompt in self.prepare_prompts()[2]]
max_length = processor(text=prompts, padding="max_length", truncation=True, max_length=20)
longest = processor(text=prompts, padding="longest", truncation=True, max_length=30)
decoded_max_length = processor.tokenizer.decode(max_length["input_ids"][-1])
decoded_longest = processor.tokenizer.decode(longest["input_ids"][-1])
self.assertEqual(decoded_max_length, predicted_tokens[1])
self.assertEqual(decoded_longest, predicted_tokens[0])
self.assertListEqual(max_length["attention_mask"][-1].tolist(), predicted_attention_masks[1])
self.assertListEqual(longest["attention_mask"][-1].tolist(), predicted_attention_masks[0])
|
Identical to test_tokenizer_padding, but with padding_side not explicitly set.
|
test_tokenizer_left_padding
|
python
|
huggingface/transformers
|
tests/models/idefics/test_processor_idefics.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/idefics/test_processor_idefics.py
|
Apache-2.0
|
def get_expected_values(self, image_inputs, batched=False):
"""
This function computes the expected height and width when providing images to BridgeTowerImageProcessor,
assuming do_resize is set to True with a scalar size and size_divisor.
"""
if not batched:
shortest_edge = self.size["shortest_edge"]
longest_edge = self.size["longest_edge"]
image = image_inputs[0]
if isinstance(image, Image.Image):
w, h = image.size
elif isinstance(image, np.ndarray):
h, w = image.shape[0], image.shape[1]
else:
h, w = image.shape[1], image.shape[2]
aspect_ratio = w / h
if w > h and w >= longest_edge:
w = longest_edge
h = int(w / aspect_ratio)
elif h > w and h >= longest_edge:
h = longest_edge
w = int(h * aspect_ratio)
w = max(w, shortest_edge)
h = max(h, shortest_edge)
expected_height = h
expected_width = w
else:
expected_values = []
for images in image_inputs:
for image in images:
expected_height, expected_width = self.get_expected_values([image])
expected_values.append((expected_height, expected_width))
expected_height = max(expected_values, key=lambda item: item[0])[0]
expected_width = max(expected_values, key=lambda item: item[1])[1]
return expected_height, expected_width
|
This function computes the expected height and width when providing images to BridgeTowerImageProcessor,
assuming do_resize is set to True with a scalar size and size_divisor.
|
get_expected_values
|
python
|
huggingface/transformers
|
tests/models/idefics2/test_image_processing_idefics2.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/idefics2/test_image_processing_idefics2.py
|
Apache-2.0
|
def prepare_image_inputs(
self,
batch_size=None,
min_resolution=None,
max_resolution=None,
num_channels=None,
num_images=None,
size_divisor=None,
equal_resolution=False,
numpify=False,
torchify=False,
):
"""This function prepares a list of PIL images, or a list of numpy arrays if one specifies numpify=True,
or a list of PyTorch tensors if one specifies torchify=True.
One can specify whether the images are of the same resolution or not.
"""
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
batch_size = batch_size if batch_size is not None else self.batch_size
min_resolution = min_resolution if min_resolution is not None else self.min_resolution
max_resolution = max_resolution if max_resolution is not None else self.max_resolution
num_channels = num_channels if num_channels is not None else self.num_channels
num_images = num_images if num_images is not None else self.num_images
images_list = []
for i in range(batch_size):
images = []
for j in range(num_images):
if equal_resolution:
width = height = max_resolution
else:
# To avoid getting image width/height 0
if size_divisor is not None:
# If `size_divisor` is defined, the image needs to have width/size >= `size_divisor`
min_resolution = max(size_divisor, min_resolution)
width, height = np.random.choice(np.arange(min_resolution, max_resolution), 2)
images.append(np.random.randint(255, size=(num_channels, width, height), dtype=np.uint8))
images_list.append(images)
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
images_list = [[Image.fromarray(np.moveaxis(image, 0, -1)) for image in images] for images in images_list]
if torchify:
images_list = [[torch.from_numpy(image) for image in images] for images in images_list]
if numpify:
# Numpy images are typically in channels last format
images_list = [[image.transpose(1, 2, 0) for image in images] for images in images_list]
return images_list
|
This function prepares a list of PIL images, or a list of numpy arrays if one specifies numpify=True,
or a list of PyTorch tensors if one specifies torchify=True.
One can specify whether the images are of the same resolution or not.
|
prepare_image_inputs
|
python
|
huggingface/transformers
|
tests/models/idefics2/test_image_processing_idefics2.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/idefics2/test_image_processing_idefics2.py
|
Apache-2.0
|
def prepare_image_inputs(
self,
batch_size=None,
min_resolution=None,
max_resolution=None,
num_channels=None,
num_images=None,
size_divisor=None,
equal_resolution=False,
numpify=False,
torchify=False,
):
"""This function prepares a list of PIL images, or a list of numpy arrays if one specifies numpify=True,
or a list of PyTorch tensors if one specifies torchify=True.
One can specify whether the images are of the same resolution or not.
"""
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
batch_size = batch_size if batch_size is not None else self.batch_size
min_resolution = min_resolution if min_resolution is not None else self.min_resolution
max_resolution = max_resolution if max_resolution is not None else self.max_resolution
num_channels = num_channels if num_channels is not None else self.num_channels
num_images = num_images if num_images is not None else self.num_images
images_list = []
for i in range(batch_size):
images = []
for j in range(num_images):
if equal_resolution:
width = height = max_resolution
else:
# To avoid getting image width/height 0
if size_divisor is not None:
# If `size_divisor` is defined, the image needs to have width/size >= `size_divisor`
min_resolution = max(size_divisor, min_resolution)
width, height = np.random.choice(np.arange(min_resolution, max_resolution), 2)
images.append(np.random.randint(255, size=(num_channels, width, height), dtype=np.uint8))
images_list.append(images)
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
images_list = [[Image.fromarray(np.moveaxis(image, 0, -1)) for image in images] for images in images_list]
if torchify:
images_list = [[torch.from_numpy(image) for image in images] for images in images_list]
if numpify:
# Numpy images are typically in channels last format
images_list = [[image.transpose(1, 2, 0) for image in images] for images in images_list]
return images_list
|
This function prepares a list of PIL images, or a list of numpy arrays if one specifies numpify=True,
or a list of PyTorch tensors if one specifies torchify=True.
One can specify whether the images are of the same resolution or not.
|
prepare_image_inputs
|
python
|
huggingface/transformers
|
tests/models/idefics3/test_image_processing_idefics3.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/idefics3/test_image_processing_idefics3.py
|
Apache-2.0
|
def test_text_only_inference(self):
"""Test that the processor works correctly with text-only input."""
processor = self.get_processor()
text = "This is a simple text without images."
inputs = processor(text=text)
tokenized_sentence = processor.tokenizer(text, add_special_tokens=False)
expected_input_ids = [[self.bos_token_id] + tokenized_sentence["input_ids"]]
self.assertEqual(inputs["input_ids"], expected_input_ids)
self.assertEqual(inputs["attention_mask"], [[1] * len(expected_input_ids[0])])
self.assertTrue("pixel_values" not in inputs)
self.assertTrue("pixel_attention_mask" not in inputs)
# Test batch of texts without image tokens
texts = ["First text.", "Second piece of text."]
batch_inputs = processor(text=texts, padding=True)
tokenized_1 = processor.tokenizer(texts[0], add_special_tokens=False)
tokenized_2 = processor.tokenizer(texts[1], add_special_tokens=False)
expected_1 = [self.bos_token_id] + tokenized_1["input_ids"]
expected_2 = [self.bos_token_id] + tokenized_2["input_ids"]
# Pad the shorter sequence
pad_len = len(expected_2) - len(expected_1)
if pad_len > 0:
padded_expected_1 = [self.padding_token_id] * pad_len + expected_1
expected_attention_1 = [0] * pad_len + [1] * len(expected_1)
self.assertEqual(batch_inputs["input_ids"], [padded_expected_1, expected_2])
self.assertEqual(batch_inputs["attention_mask"], [expected_attention_1, [1] * len(expected_2)])
else:
pad_len = -pad_len
padded_expected_2 = [self.padding_token_id] * pad_len + expected_2
expected_attention_2 = [0] * pad_len + [1] * len(expected_2)
self.assertEqual(batch_inputs["input_ids"], [expected_1, padded_expected_2])
self.assertEqual(batch_inputs["attention_mask"], [[1] * len(expected_1), expected_attention_2])
|
Test that the processor works correctly with text-only input.
|
test_text_only_inference
|
python
|
huggingface/transformers
|
tests/models/idefics3/test_processor_idefics3.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/idefics3/test_processor_idefics3.py
|
Apache-2.0
|
def test_missing_images_error(self):
"""Test that appropriate error is raised when images are referenced but not provided."""
processor = self.get_processor()
# Test single text with image token but no image
text = "Let me show you this image: <image> What do you think?"
with self.assertRaises(ValueError) as context:
processor(text=text)
self.assertTrue("tokens in the text but no images were passed" in str(context.exception))
# Test batch with image tokens but no images
texts = [
"First text with <image> token.",
"Second text <image> with token.",
]
with self.assertRaises(ValueError) as context:
processor(text=texts)
self.assertTrue("tokens in the text but no images were passed" in str(context.exception))
# Test with None as Images
with self.assertRaises(ValueError) as context:
processor(text=text, images=None)
self.assertTrue("tokens in the text but no images were passed" in str(context.exception))
with self.assertRaises(ValueError) as context:
processor(text=texts, images=None)
self.assertTrue("tokens in the text but no images were passed" in str(context.exception))
|
Test that appropriate error is raised when images are referenced but not provided.
|
test_missing_images_error
|
python
|
huggingface/transformers
|
tests/models/idefics3/test_processor_idefics3.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/idefics3/test_processor_idefics3.py
|
Apache-2.0
|
def test_inference_fp16(self):
r"""
A small test to make sure that inference work in half precision without any problem.
"""
model = IJepaModel.from_pretrained(
"facebook/ijepa_vith14_1k",
torch_dtype=torch.float16,
device_map="auto",
)
image_processor = self.default_image_processor
image = prepare_img()
inputs = image_processor(images=image, return_tensors="pt")
pixel_values = inputs.pixel_values.to(torch_device)
# forward pass to make sure inference works in fp16
with torch.no_grad():
_ = model(pixel_values)
|
A small test to make sure that inference work in half precision without any problem.
|
test_inference_fp16
|
python
|
huggingface/transformers
|
tests/models/ijepa/test_modeling_ijepa.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/ijepa/test_modeling_ijepa.py
|
Apache-2.0
|
def test_sdpa_can_dispatch_composite_models(self):
"""
Tests if composite models dispatch correctly on SDPA/eager when requested so when loading the model.
This tests only by looking at layer names, as usually SDPA layers are called "SDPAAttention".
In contrast to the above test, this one checks if the "config._attn_implamentation" is a dict after the model
is loaded, because we manually replicate requested attn implementation on each sub-config when loading.
See https://github.com/huggingface/transformers/pull/32238 for more info
The test tries to cover most general cases of composite models, VLMs with vision and text configs. Any model
that has a different set of sub-configs has to overwrite this test.
"""
if not self.has_attentions:
self.skipTest(reason="Model architecture does not support attentions")
if not self._is_composite:
self.skipTest(f"{self.all_model_classes[0].__name__} does not support SDPA")
for model_class in self.all_model_classes:
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
model = model_class(config)
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
model_sdpa = model_class.from_pretrained(tmpdirname)
model_sdpa = model_sdpa.eval().to(torch_device)
# `None` as it is the requested one which will be assigned to each sub-config
# Sub-model will dispatch to SDPA if it can (checked below that `SDPA` layers are present)
self.assertTrue(model.language_model.config._attn_implementation == "sdpa")
self.assertTrue(model.vision_model.config._attn_implementation == "sdpa")
self.assertTrue(model.qformer.config._attn_implementation == "eager")
model_eager = model_class.from_pretrained(tmpdirname, attn_implementation="eager")
model_eager = model_eager.eval().to(torch_device)
self.assertTrue(model_eager.config._attn_implementation == "eager")
self.assertTrue(model_eager.language_model.config._attn_implementation == "eager")
self.assertTrue(model_eager.vision_model.config._attn_implementation == "eager")
self.assertTrue(model_eager.qformer.config._attn_implementation == "eager")
for name, submodule in model_eager.named_modules():
class_name = submodule.__class__.__name__
if (
class_name.endswith("Attention")
and getattr(submodule, "config", None)
and submodule.config._attn_implementation == "sdpa"
):
raise ValueError("The eager model should not have SDPA attention layers")
|
Tests if composite models dispatch correctly on SDPA/eager when requested so when loading the model.
This tests only by looking at layer names, as usually SDPA layers are called "SDPAAttention".
In contrast to the above test, this one checks if the "config._attn_implamentation" is a dict after the model
is loaded, because we manually replicate requested attn implementation on each sub-config when loading.
See https://github.com/huggingface/transformers/pull/32238 for more info
The test tries to cover most general cases of composite models, VLMs with vision and text configs. Any model
that has a different set of sub-configs has to overwrite this test.
|
test_sdpa_can_dispatch_composite_models
|
python
|
huggingface/transformers
|
tests/models/instructblip/test_modeling_instructblip.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/instructblip/test_modeling_instructblip.py
|
Apache-2.0
|
def test_sdpa_can_dispatch_composite_models(self):
"""
Tests if composite models dispatch correctly on SDPA/eager when requested so when loading the model.
This tests only by looking at layer names, as usually SDPA layers are calles "SDPAAttention".
In contrast to the above test, this one checks if the "config._attn_implamentation" is a dict after the model
is loaded, because we manually replicate requested attn implementation on each sub-config when loading.
See https://github.com/huggingface/transformers/pull/32238 for more info
The test tries to cover most general cases of composite models, VLMs with vision and text configs. Any model
that has a different set of sub-configs has to overwrite this test.
"""
if not self.has_attentions:
self.skipTest(reason="Model architecture does not support attentions")
if not self._is_composite:
self.skipTest(f"{self.all_model_classes[0].__name__} does not support SDPA")
for model_class in self.all_model_classes:
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
model = model_class(config)
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
model_sdpa = model_class.from_pretrained(tmpdirname)
model_sdpa = model_sdpa.eval().to(torch_device)
# `None` as it is the requested one which will be assigned to each sub-config
# Sub-model will dispatch to SDPA if it can (checked below that `SDPA` layers are present)
self.assertTrue(model.language_model.config._attn_implementation == "sdpa")
self.assertTrue(model.vision_model.config._attn_implementation == "sdpa")
self.assertTrue(model.qformer.config._attn_implementation == "eager")
model_eager = model_class.from_pretrained(tmpdirname, attn_implementation="eager")
model_eager = model_eager.eval().to(torch_device)
self.assertTrue(model_eager.config._attn_implementation == "eager")
self.assertTrue(model_eager.language_model.config._attn_implementation == "eager")
self.assertTrue(model_eager.vision_model.config._attn_implementation == "eager")
self.assertTrue(model_eager.qformer.config._attn_implementation == "eager")
for name, submodule in model_eager.named_modules():
class_name = submodule.__class__.__name__
if (
class_name.endswith("Attention")
and getattr(submodule, "config", None)
and submodule.config._attn_implementation == "sdpa"
):
raise ValueError("The eager model should not have SDPA attention layers")
|
Tests if composite models dispatch correctly on SDPA/eager when requested so when loading the model.
This tests only by looking at layer names, as usually SDPA layers are calles "SDPAAttention".
In contrast to the above test, this one checks if the "config._attn_implamentation" is a dict after the model
is loaded, because we manually replicate requested attn implementation on each sub-config when loading.
See https://github.com/huggingface/transformers/pull/32238 for more info
The test tries to cover most general cases of composite models, VLMs with vision and text configs. Any model
that has a different set of sub-configs has to overwrite this test.
|
test_sdpa_can_dispatch_composite_models
|
python
|
huggingface/transformers
|
tests/models/instructblipvideo/test_modeling_instructblipvideo.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/instructblipvideo/test_modeling_instructblipvideo.py
|
Apache-2.0
|
def test_apply_chat_template_video_special_processing(self):
"""
Tests that models can use their own preprocessing to preprocess conversations.
"""
processor = self.get_processor()
if processor.chat_template is None:
self.skipTest("Processor has no chat template")
signature = inspect.signature(processor.__call__)
if "videos" not in {*signature.parameters.keys()} or (
signature.parameters.get("videos") is not None
and signature.parameters["videos"].annotation == inspect._empty
):
self.skipTest("Processor doesn't accept videos at input")
video_file_path = hf_hub_download(
repo_id="raushan-testing-hf/videos-test", filename="sample_demo_1.mp4", repo_type="dataset"
)
messages = [
[
{
"role": "user",
"content": [
{"type": "video", "path": video_file_path},
{"type": "text", "text": "What is shown in this video?"},
],
},
]
]
def _process_messages_for_chat_template(
conversation,
batch_images,
batch_videos,
batch_video_metadata,
**chat_template_kwargs,
):
# Let us just always return a dummy prompt
new_msg = [
[
{
"role": "user",
"content": [
{"type": "video"}, # no need to use path, video is loaded already by this moment
{"type": "text", "text": "Dummy prompt for preprocess testing"},
],
},
]
]
return new_msg
processor._process_messages_for_chat_template = _process_messages_for_chat_template
out_dict_with_video = processor.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=True,
return_dict=True,
return_tensors="pt",
num_frames=8,
)
self.assertTrue(self.videos_input_name in out_dict_with_video)
# Check with `in` because we don't know how each template formats the prompt with BOS/EOS/etc
formatted_text = processor.batch_decode(out_dict_with_video["input_ids"], skip_special_tokens=True)[0]
self.assertTrue("Dummy prompt for preprocess testing" in formatted_text)
# Difference with common tests, InternVLProcessor returns flattened video features, and uses 8 frames by default
self.assertEqual(len(out_dict_with_video[self.videos_input_name]), 8)
|
Tests that models can use their own preprocessing to preprocess conversations.
|
test_apply_chat_template_video_special_processing
|
python
|
huggingface/transformers
|
tests/models/internvl/test_processor_internvl.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/internvl/test_processor_internvl.py
|
Apache-2.0
|
def test_load_balancing_loss(self):
r"""
Let's make sure we can actually compute the loss and do a backward on it.
"""
config, input_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.num_labels = 3
config.num_experts = 16
config.output_router_logits = True
input_ids = input_dict["input_ids"]
attention_mask = input_ids.ne(config.pad_token_id).to(torch_device)
model = JambaForCausalLM(config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=attention_mask)
bs, seqlen = input_ids.shape
self.assertEqual(result.router_logits[0].shape, (bs * seqlen, config.num_experts))
torch.testing.assert_close(result.aux_loss.cpu(), torch.tensor(2, dtype=torch.float32), rtol=1e-2, atol=1e-2)
# First, we make sure that adding padding tokens doesn't change the loss
# loss(input_ids, attention_mask=None) == loss(input_ids + padding, attention_mask=attention_mask_with_padding)
pad_length = 1000
# Add padding tokens to input_ids
padding_block = config.pad_token_id * torch.ones(input_ids.shape[0], pad_length, dtype=torch.int32).to(
torch_device
)
padded_input_ids = torch.cat((padding_block, input_ids), dim=1) # this is to simulate padding to the left
padded_attention_mask = padded_input_ids.ne(config.pad_token_id).to(torch_device)
padded_result = model(padded_input_ids, attention_mask=padded_attention_mask)
torch.testing.assert_close(result.aux_loss.cpu(), padded_result.aux_loss.cpu(), rtol=1e-4, atol=1e-4)
# We make sure that the loss of including padding tokens != the loss without padding tokens
# if attention_mask=None --> we don't exclude padding tokens
include_padding_result = model(padded_input_ids, attention_mask=None)
# This is to mimic torch.testing.assert_not_close
self.assertNotAlmostEqual(include_padding_result.aux_loss.item(), result.aux_loss.item())
|
Let's make sure we can actually compute the loss and do a backward on it.
|
test_load_balancing_loss
|
python
|
huggingface/transformers
|
tests/models/jamba/test_modeling_jamba.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/jamba/test_modeling_jamba.py
|
Apache-2.0
|
def test_initialization(self):
r"""
Overriding the test_initialization test as the A_log and D params of the Mamba block are initialized differently
"""
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
configs_no_init = _config_zero_init(config)
for model_class in self.all_model_classes:
model = model_class(config=configs_no_init)
for name, param in model.named_parameters():
if param.requires_grad:
if "A_log" in name:
A = torch.arange(1, config.mamba_d_state + 1, dtype=torch.float32)[None, :]
A = A.expand(config.mamba_expand * config.hidden_size, -1).contiguous()
torch.testing.assert_close(param.data, torch.log(A), rtol=1e-5, atol=1e-5)
elif "D" in name:
# check if it's a ones like
torch.testing.assert_close(param.data, torch.ones_like(param.data), rtol=1e-5, atol=1e-5)
else:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item(),
[0.0, 1.0],
msg=f"Parameter {name} of model {model_class} seems not properly initialized",
)
|
Overriding the test_initialization test as the A_log and D params of the Mamba block are initialized differently
|
test_initialization
|
python
|
huggingface/transformers
|
tests/models/jamba/test_modeling_jamba.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/jamba/test_modeling_jamba.py
|
Apache-2.0
|
def test_attention_outputs(self):
r"""
Overriding the test_attention_outputs test as the Jamba model outputs attention only for its attention layers
"""
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
seq_len = getattr(self.model_tester, "seq_length", None)
encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", seq_len)
encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length)
expected_num_attentions = math.ceil(
(self.model_tester.num_hidden_layers - self.model_tester.attn_layer_offset)
/ self.model_tester.attn_layer_period
)
for model_class in self.all_model_classes:
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = False
config.return_dict = True
model = model_class._from_config(config, attn_implementation="eager")
config = model.config
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.attentions
self.assertEqual(len(attentions), expected_num_attentions)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
config.output_attentions = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.attentions
self.assertEqual(len(attentions), expected_num_attentions)
self.assertListEqual(
list(attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length],
)
out_len = len(outputs)
# Check attention is always last and order is fine
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
added_hidden_states = 1
self.assertEqual(out_len + added_hidden_states, len(outputs))
self_attentions = outputs.attentions
self.assertEqual(len(self_attentions), expected_num_attentions)
self.assertListEqual(
list(self_attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length],
)
|
Overriding the test_attention_outputs test as the Jamba model outputs attention only for its attention layers
|
test_attention_outputs
|
python
|
huggingface/transformers
|
tests/models/jamba/test_modeling_jamba.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/jamba/test_modeling_jamba.py
|
Apache-2.0
|
def test_flash_attn_2_fp32_ln(self):
r"""
Overriding the test_flash_attn_2_fp32_ln test as the Jamba model, like Mixtral, doesn't support
right padding + use cache with FA2
"""
for model_class in self.all_generative_model_classes:
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
model = model_class(config)
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
dummy_input = inputs_dict[model.main_input_name]
dummy_attention_mask = inputs_dict.get("attention_mask", torch.ones_like(dummy_input))
# NOTE: Jamba does not support right padding + use_cache with FA2.
dummy_attention_mask[:, -1] = 1
model = model_class.from_pretrained(
tmpdirname,
torch_dtype=torch.float16,
attn_implementation="flash_attention_2",
low_cpu_mem_usage=True,
load_in_4bit=True,
)
for _, param in model.named_parameters():
# upcast only layer norms
if (param.dtype == torch.float16) or (param.dtype == torch.bfloat16):
param.data = param.data.to(torch.float32)
_ = model(dummy_input)
# with attention mask
_ = model(dummy_input, attention_mask=dummy_attention_mask)
|
Overriding the test_flash_attn_2_fp32_ln test as the Jamba model, like Mixtral, doesn't support
right padding + use cache with FA2
|
test_flash_attn_2_fp32_ln
|
python
|
huggingface/transformers
|
tests/models/jamba/test_modeling_jamba.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/jamba/test_modeling_jamba.py
|
Apache-2.0
|
def test_chat_template_accepts_processing_kwargs(self):
"""Tests that the chat template correctly handles additional processing arguments."""
# Get processor and skip if it doesn't have a chat template
processor = self.get_processor()
if processor.chat_template is None:
self.skipTest("Processor has no chat template")
# Create a simple text message for testing
messages = [
[
{
"role": "user",
"content": [
{"type": "text", "text": "What is shown in this image?"},
],
},
]
]
# Test 1: Padding to max_length
# PS: we have to override the parent max_length of 50 to 80 because the output is already 51 tokens
formatted_prompt_tokenized = processor.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=True,
padding="max_length",
max_length=80,
)
self.assertEqual(len(formatted_prompt_tokenized[0]), 80)
# Test 2: Truncation
# Verify that the output is truncated to exactly 5 tokens
formatted_prompt_tokenized = processor.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=True,
truncation=True,
max_length=5,
)
self.assertEqual(len(formatted_prompt_tokenized[0]), 5)
# Test 3: Image processing kwargs
# Add an image and test image processing parameters
messages[0][0]["content"].append(
{"type": "image", "url": "https://www.ilankelman.org/stopsigns/australia.jpg"}
)
# Process with image rescaling and verify the pixel values are negative
out_dict = processor.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=True,
return_dict=True,
do_rescale=True,
rescale_factor=-1,
return_tensors="np",
)
self.assertLessEqual(out_dict[self.images_input_name][0][0].mean(), 0)
|
Tests that the chat template correctly handles additional processing arguments.
|
test_chat_template_accepts_processing_kwargs
|
python
|
huggingface/transformers
|
tests/models/janus/test_processor_janus.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/janus/test_processor_janus.py
|
Apache-2.0
|
def test_batch_encode_dynamic_overflowing(self):
"""
When calling batch_encode with multiple sequences, it can return different number of
overflowing encoding for each sequence:
[
Sequence 1: [Encoding 1, Encoding 2],
Sequence 2: [Encoding 1],
Sequence 3: [Encoding 1, Encoding 2, ... Encoding N]
]
This needs to be padded so that it can represented as a tensor
"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
tokenizer = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name}, {tokenizer.__class__.__name__})"):
if is_torch_available():
returned_tensor = "pt"
elif is_tf_available():
returned_tensor = "tf"
else:
returned_tensor = "jax"
# Single example
words, boxes = self.get_words_and_boxes()
tokens = tokenizer.encode_plus(
words,
boxes=boxes,
max_length=6,
padding=True,
truncation=True,
return_tensors=returned_tensor,
return_overflowing_tokens=True,
)
for key in filter(lambda x: "overflow_to_sample_mapping" not in x, tokens.keys()):
if key != "bbox":
self.assertEqual(len(tokens[key].shape), 2)
else:
self.assertEqual(len(tokens[key].shape), 3)
# Batch of examples
# For these 2 examples, 3 training examples will be created
words, boxes = self.get_words_and_boxes_batch()
tokens = tokenizer.batch_encode_plus(
words,
boxes=boxes,
max_length=6,
padding=True,
truncation="only_first",
return_tensors=returned_tensor,
return_overflowing_tokens=True,
)
for key in filter(lambda x: "overflow_to_sample_mapping" not in x, tokens.keys()):
if key != "bbox":
self.assertEqual(len(tokens[key].shape), 2)
self.assertEqual(tokens[key].shape[-1], 6)
else:
self.assertEqual(len(tokens[key].shape), 3)
self.assertEqual(tokens[key].shape[-1], 4)
|
When calling batch_encode with multiple sequences, it can return different number of
overflowing encoding for each sequence:
[
Sequence 1: [Encoding 1, Encoding 2],
Sequence 2: [Encoding 1],
Sequence 3: [Encoding 1, Encoding 2, ... Encoding N]
]
This needs to be padded so that it can represented as a tensor
|
test_batch_encode_dynamic_overflowing
|
python
|
huggingface/transformers
|
tests/models/layoutlmv2/test_tokenization_layoutlmv2.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/layoutlmv2/test_tokenization_layoutlmv2.py
|
Apache-2.0
|
def test_batch_encode_dynamic_overflowing(self):
"""
When calling batch_encode with multiple sequences, it can return different number of
overflowing encoding for each sequence:
[
Sequence 1: [Encoding 1, Encoding 2],
Sequence 2: [Encoding 1],
Sequence 3: [Encoding 1, Encoding 2, ... Encoding N]
]
This needs to be padded so that it can represented as a tensor
"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
tokenizer = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name}, {tokenizer.__class__.__name__})"):
if is_torch_available():
returned_tensor = "pt"
elif is_tf_available():
returned_tensor = "tf"
else:
returned_tensor = "jax"
# Single example
words = ["HuggingFace", "is", "solving", "NLP", "one", "commit", "at", "a", "time"]
boxes = [[i, i, i, i] for i in range(len(words))]
tokens = tokenizer.encode_plus(
words,
boxes=boxes,
max_length=6,
padding=True,
truncation=True,
return_tensors=returned_tensor,
return_overflowing_tokens=True,
)
for key in filter(lambda x: "overflow_to_sample_mapping" not in x, tokens.keys()):
if key != "bbox":
self.assertEqual(len(tokens[key].shape), 2)
else:
self.assertEqual(len(tokens[key].shape), 3)
# Batch of examples
# For these 2 examples, 3 training examples will be created
words_batched = [
["HuggingFace", "is", "solving", "NLP", "one", "commit", "at", "a", "time"],
["Very", "tiny", "input"],
]
boxes_batched = [[[i, i, i, i] for i in range(len(words_item))] for words_item in words_batched]
tokens = tokenizer.batch_encode_plus(
words_batched,
boxes=boxes_batched,
max_length=6,
padding=True,
truncation="only_first",
return_tensors=returned_tensor,
return_overflowing_tokens=True,
)
for key in filter(lambda x: "overflow_to_sample_mapping" not in x, tokens.keys()):
if key != "bbox":
self.assertEqual(len(tokens[key].shape), 2)
self.assertEqual(tokens[key].shape[-1], 6)
else:
self.assertEqual(len(tokens[key].shape), 3)
self.assertEqual(tokens[key].shape[-1], 4)
|
When calling batch_encode with multiple sequences, it can return different number of
overflowing encoding for each sequence:
[
Sequence 1: [Encoding 1, Encoding 2],
Sequence 2: [Encoding 1],
Sequence 3: [Encoding 1, Encoding 2, ... Encoding N]
]
This needs to be padded so that it can represented as a tensor
|
test_batch_encode_dynamic_overflowing
|
python
|
huggingface/transformers
|
tests/models/layoutlmv3/test_tokenization_layoutlmv3.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/layoutlmv3/test_tokenization_layoutlmv3.py
|
Apache-2.0
|
def test_batch_encode_dynamic_overflowing(self):
"""
When calling batch_encode with multiple sequences, it can return different number of
overflowing encoding for each sequence:
[
Sequence 1: [Encoding 1, Encoding 2],
Sequence 2: [Encoding 1],
Sequence 3: [Encoding 1, Encoding 2, ... Encoding N]
]
This needs to be padded so that it can represented as a tensor
"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
tokenizer = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name}, {tokenizer.__class__.__name__})"):
if is_torch_available():
returned_tensor = "pt"
elif is_tf_available():
returned_tensor = "tf"
else:
returned_tensor = "jax"
# Single example
words, boxes = self.get_words_and_boxes()
tokens = tokenizer.encode_plus(
words,
boxes=boxes,
max_length=6,
padding=True,
truncation=True,
return_tensors=returned_tensor,
return_overflowing_tokens=True,
)
for key in filter(lambda x: "overflow_to_sample_mapping" not in x, tokens.keys()):
if key != "bbox":
self.assertEqual(len(tokens[key].shape), 2)
else:
self.assertEqual(len(tokens[key].shape), 3)
# Batch of examples
# For these 2 examples, 3 training examples will be created
words, boxes = self.get_words_and_boxes_batch()
tokens = tokenizer.batch_encode_plus(
words,
boxes=boxes,
max_length=6,
padding=True,
truncation="only_first",
return_tensors=returned_tensor,
return_overflowing_tokens=True,
)
for key in filter(lambda x: "overflow_to_sample_mapping" not in x, tokens.keys()):
if key != "bbox":
self.assertEqual(len(tokens[key].shape), 2)
self.assertEqual(tokens[key].shape[-1], 6)
else:
self.assertEqual(len(tokens[key].shape), 3)
self.assertEqual(tokens[key].shape[-1], 4)
|
When calling batch_encode with multiple sequences, it can return different number of
overflowing encoding for each sequence:
[
Sequence 1: [Encoding 1, Encoding 2],
Sequence 2: [Encoding 1],
Sequence 3: [Encoding 1, Encoding 2, ... Encoding N]
]
This needs to be padded so that it can represented as a tensor
|
test_batch_encode_dynamic_overflowing
|
python
|
huggingface/transformers
|
tests/models/layoutxlm/test_tokenization_layoutxlm.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/layoutxlm/test_tokenization_layoutxlm.py
|
Apache-2.0
|
def assert_tensors_close(a, b, atol=1e-12, prefix=""):
"""If tensors have different shapes, different values or a and b are not both tensors, raise a nice Assertion error."""
if a is None and b is None:
return True
try:
if torch.allclose(a, b, atol=atol):
return True
raise
except Exception:
pct_different = (torch.gt((a - b).abs(), atol)).float().mean().item()
if a.numel() > 100:
msg = f"tensor values are {pct_different:.1%} percent different."
else:
msg = f"{a} != {b}"
if prefix:
msg = prefix + ": " + msg
raise AssertionError(msg)
|
If tensors have different shapes, different values or a and b are not both tensors, raise a nice Assertion error.
|
assert_tensors_close
|
python
|
huggingface/transformers
|
tests/models/led/test_modeling_led.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/led/test_modeling_led.py
|
Apache-2.0
|
def test_llama_3_1_hard(self):
"""
An integration test for llama 3.1. It tests against a long output to ensure the subtle numerical differences
from llama 3.1.'s RoPE can be detected
"""
# diff on `EXPECTED_TEXT`:
# 2024-08-26: updating from torch 2.3.1 to 2.4.0 slightly changes the results.
EXPECTED_TEXT = (
"Tell me about the french revolution. The french revolution was a period of radical political and social "
"upheaval in France that lasted from 1789 until 1799. It was a time of great change and upheaval, marked "
"by the overthrow of the monarchy, the rise of the middle class, and the eventual establishment of the "
"First French Republic.\nThe revolution began in 1789 with the Estates-General, a representative "
"assembly that had not met since 1614. The Third Estate, which represented the common people, "
"demanded greater representation and eventually broke away to form the National Assembly. This marked "
"the beginning of the end of the absolute monarchy and the rise of the middle class.\n"
)
tokenizer = AutoTokenizer.from_pretrained("meta-llama/Meta-Llama-3.1-8B-Instruct")
model = LlamaForCausalLM.from_pretrained(
"meta-llama/Meta-Llama-3.1-8B-Instruct", device_map="auto", torch_dtype=torch.bfloat16
)
input_text = ["Tell me about the french revolution."]
model_inputs = tokenizer(input_text, return_tensors="pt").to(model.device)
generated_ids = model.generate(**model_inputs, max_new_tokens=128, do_sample=False)
generated_text = tokenizer.decode(generated_ids[0], skip_special_tokens=True)
self.assertEqual(generated_text, EXPECTED_TEXT)
|
An integration test for llama 3.1. It tests against a long output to ensure the subtle numerical differences
from llama 3.1.'s RoPE can be detected
|
test_llama_3_1_hard
|
python
|
huggingface/transformers
|
tests/models/llama/test_modeling_llama.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/llama/test_modeling_llama.py
|
Apache-2.0
|
def test_stacked_causal_mask_static_cache(self):
"""same as above but with StaticCache"""
(
input_ids,
position_ids,
input_ids_shared_prefix,
mask_shared_prefix,
position_ids_shared_prefix,
) = self.get_test_data()
# regular batch
logits = self.model.forward(input_ids, position_ids=position_ids).logits
logits_last = logits[:, -1, :] # last tokens in each batch line
decoded = [self.tokenizer.decode(t) for t in logits_last.argmax(dim=-1)]
# upgrade the model with StaticCache
max_cache_len = 16 # note that max_cache_len is greater than the attention_mask.shape[-1]
past_key_values = StaticCache(
config=self.model.config,
max_batch_size=1,
max_cache_len=max_cache_len,
device=torch_device,
dtype=self.model.dtype,
)
padded_attention_mask = torch.nn.functional.pad(
input=mask_shared_prefix,
pad=(0, max_cache_len - mask_shared_prefix.shape[-1]),
mode="constant",
value=torch.finfo(self.model_dtype).min,
)
# single forward run with 4D custom mask
logits_shared_prefix = self.model.forward(
input_ids_shared_prefix,
attention_mask=padded_attention_mask,
position_ids=position_ids_shared_prefix,
cache_position=torch.arange(input_ids_shared_prefix.shape[-1], device=torch_device),
past_key_values=past_key_values,
).logits
logits_shared_prefix_last = logits_shared_prefix[
0, torch.where(position_ids_shared_prefix == position_ids_shared_prefix.max())[1], :
] # last three tokens
decoded_shared_prefix = [self.tokenizer.decode(t) for t in logits_shared_prefix_last.argmax(dim=-1)]
self.assertEqual(decoded, decoded_shared_prefix)
|
same as above but with StaticCache
|
test_stacked_causal_mask_static_cache
|
python
|
huggingface/transformers
|
tests/models/llama/test_modeling_llama.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/llama/test_modeling_llama.py
|
Apache-2.0
|
def test_llava_reload(self):
"""
Simple test for reloading default llava configs
"""
with tempfile.TemporaryDirectory() as tmp_dir:
config = LlavaConfig()
config.save_pretrained(tmp_dir)
reloaded = LlavaConfig.from_pretrained(tmp_dir)
assert config.to_dict() == reloaded.to_dict()
|
Simple test for reloading default llava configs
|
test_llava_reload
|
python
|
huggingface/transformers
|
tests/models/llava/test_configuration_llava.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/llava/test_configuration_llava.py
|
Apache-2.0
|
def test_pixtral_reload(self):
"""
Simple test for reloading pixtral configs
"""
vision_config = {
"model_type": "pixtral",
"head_dim": 64,
"hidden_act": "silu",
"image_size": 1024,
"is_composition": True,
"patch_size": 16,
"rope_theta": 10000.0,
"tie_word_embeddings": False,
}
text_config = {
"model_type": "mistral",
"hidden_size": 5120,
"head_dim": 128,
"num_attention_heads": 32,
"intermediate_size": 14336,
"is_composition": True,
"max_position_embeddings": 1024000,
"num_hidden_layers": 40,
"num_key_value_heads": 8,
"rms_norm_eps": 1e-05,
"rope_theta": 1000000000.0,
"sliding_window": None,
"vocab_size": 131072,
}
with tempfile.TemporaryDirectory() as tmp_dir:
config = LlavaConfig(vision_config=vision_config, text_config=text_config)
config.save_pretrained(tmp_dir)
reloaded = LlavaConfig.from_pretrained(tmp_dir)
assert config.to_dict() == reloaded.to_dict()
|
Simple test for reloading pixtral configs
|
test_pixtral_reload
|
python
|
huggingface/transformers
|
tests/models/llava/test_configuration_llava.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/llava/test_configuration_llava.py
|
Apache-2.0
|
def test_arbitrary_reload(self):
"""
Simple test for reloading arbitrarily composed subconfigs
"""
default_values = LlavaConfig().to_diff_dict()
default_values["vision_config"]["model_type"] = "pixtral"
default_values["text_config"]["model_type"] = "opt"
self.maxDiff = None
with tempfile.TemporaryDirectory() as tmp_dir:
config = LlavaConfig(**default_values)
config.save_pretrained(tmp_dir)
reloaded = LlavaConfig.from_pretrained(tmp_dir)
self.assertDictEqual(config.to_dict(), reloaded.to_dict())
|
Simple test for reloading arbitrarily composed subconfigs
|
test_arbitrary_reload
|
python
|
huggingface/transformers
|
tests/models/llava/test_configuration_llava.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/llava/test_configuration_llava.py
|
Apache-2.0
|
def test_padding(self):
"""
LLaVA needs to pad images to square size before processing as per orig implementation.
Checks that image processor pads images correctly given different background colors.
"""
# taken from original implementation: https://github.com/haotian-liu/LLaVA/blob/c121f0432da27facab705978f83c4ada465e46fd/llava/mm_utils.py#L152
def pad_to_square_original(
image: Image.Image, background_color: Union[int, tuple[int, int, int]] = 0
) -> Image.Image:
width, height = image.size
if width == height:
return image
elif width > height:
result = Image.new(image.mode, (width, width), background_color)
result.paste(image, (0, (width - height) // 2))
return result
else:
result = Image.new(image.mode, (height, height), background_color)
result.paste(image, ((height - width) // 2, 0))
return result
for i, image_processing_class in enumerate(self.image_processor_list):
image_processor = image_processing_class.from_dict(self.image_processor_dict)
numpify = i == 0
torchify = i == 1
image_inputs = self.image_processor_tester.prepare_image_inputs(
equal_resolution=False, numpify=numpify, torchify=torchify
)
# test with images in channel-last and channel-first format (only channel-first for torch)
for image in image_inputs:
padded_image = image_processor.pad_to_square(image)
if i == 0:
padded_image_original = pad_to_square_original(Image.fromarray(image))
padded_image_original = np.array(padded_image_original)
np.testing.assert_allclose(padded_image, padded_image_original)
padded_image = image_processor.pad_to_square(
image.transpose(2, 0, 1), input_data_format="channels_first"
)
padded_image = padded_image.transpose(1, 2, 0)
np.testing.assert_allclose(padded_image, padded_image_original)
else:
padded_image_original = pad_to_square_original(F.to_pil_image(image))
padded_image = padded_image.permute(1, 2, 0)
np.testing.assert_allclose(padded_image, padded_image_original)
# test background color
background_color = (122, 116, 104)
for image in image_inputs:
padded_image = image_processor.pad_to_square(image, background_color=background_color)
if i == 0:
padded_image_original = pad_to_square_original(
Image.fromarray(image), background_color=background_color
)
else:
padded_image_original = pad_to_square_original(
F.to_pil_image(image), background_color=background_color
)
padded_image = padded_image.permute(1, 2, 0)
padded_image_original = np.array(padded_image_original)
np.testing.assert_allclose(padded_image, padded_image_original)
background_color = 122
for image in image_inputs:
padded_image = image_processor.pad_to_square(image, background_color=background_color)
if i == 0:
padded_image_original = pad_to_square_original(
Image.fromarray(image), background_color=background_color
)
else:
padded_image_original = pad_to_square_original(
F.to_pil_image(image), background_color=background_color
)
padded_image = padded_image.permute(1, 2, 0)
padded_image_original = np.array(padded_image_original)
np.testing.assert_allclose(padded_image, padded_image_original)
# background color length should match channel length
with self.assertRaises(ValueError):
padded_image = image_processor.pad_to_square(image_inputs[0], background_color=(122, 104))
with self.assertRaises(ValueError):
padded_image = image_processor.pad_to_square(image_inputs[0], background_color=(122, 104, 0, 0))
|
LLaVA needs to pad images to square size before processing as per orig implementation.
Checks that image processor pads images correctly given different background colors.
|
test_padding
|
python
|
huggingface/transformers
|
tests/models/llava/test_image_processing_llava.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/llava/test_image_processing_llava.py
|
Apache-2.0
|
def test_mismatching_num_image_tokens(self):
"""
Tests that VLMs through an error with explicit message saying what is wrong
when number of images doesn't match number of image tokens in the text.
Also we need to test multi-image cases when one prompr has multiple image tokens.
"""
config, input_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config).to(torch_device)
curr_input_dict = copy.deepcopy(input_dict) # in=place modifications further
_ = model(**curr_input_dict) # successful forward with no modifications
# remove one image but leave the image token in text
curr_input_dict["pixel_values"] = curr_input_dict["pixel_values"][-1:, ...]
with self.assertRaises(ValueError):
_ = model(**curr_input_dict)
# simulate multi-image case by concatenating inputs where each has exactly one image/image-token
input_ids = curr_input_dict["input_ids"][:1]
pixel_values = curr_input_dict["pixel_values"][:1]
input_ids = torch.cat([input_ids, input_ids], dim=0)
# one image and two image tokens raise an error
with self.assertRaises(ValueError):
_ = model(input_ids=input_ids, pixel_values=pixel_values)
# two images and two image tokens don't raise an error
pixel_values = torch.cat([pixel_values, pixel_values], dim=0)
_ = model(input_ids=input_ids, pixel_values=pixel_values)
|
Tests that VLMs through an error with explicit message saying what is wrong
when number of images doesn't match number of image tokens in the text.
Also we need to test multi-image cases when one prompr has multiple image tokens.
|
test_mismatching_num_image_tokens
|
python
|
huggingface/transformers
|
tests/models/llava/test_modeling_llava.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/llava/test_modeling_llava.py
|
Apache-2.0
|
def test_vision_feature_layers(self, vision_feature_layer):
"""
Test that we can use either one vision feature layer, or a list of
vision feature layers.
"""
config, input_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.vision_feature_layer = vision_feature_layer
num_feature_layers = 1 if isinstance(vision_feature_layer, int) else len(vision_feature_layer)
hidden_size = config.vision_config.hidden_size
expected_features = hidden_size * num_feature_layers
for model_class in self.all_model_classes:
model = model_class(config).to(torch_device)
# We should have the right number of input features,
# and should be able to run a forward pass without exploding
base_model = getattr(model, "model", model)
assert base_model.multi_modal_projector.linear_1.in_features == expected_features
model(**input_dict)
|
Test that we can use either one vision feature layer, or a list of
vision feature layers.
|
test_vision_feature_layers
|
python
|
huggingface/transformers
|
tests/models/llava/test_modeling_llava.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/llava/test_modeling_llava.py
|
Apache-2.0
|
def test_special_mm_token_truncation(self):
"""Tests that special vision tokens do not get truncated when `truncation=True` is set."""
processor = LlavaProcessor.from_pretrained("llava-hf/llava-1.5-7b-hf")
input_str = self.prepare_text_inputs(batch_size=2, modality="image")
image_input = self.prepare_image_inputs(batch_size=2)
_ = processor(
text=input_str,
images=image_input,
return_tensors="pt",
truncation=None,
padding=True,
)
with self.assertRaises(ValueError):
_ = processor(
text=input_str,
images=image_input,
return_tensors="pt",
truncation=True,
padding=True,
max_length=5,
)
|
Tests that special vision tokens do not get truncated when `truncation=True` is set.
|
test_special_mm_token_truncation
|
python
|
huggingface/transformers
|
tests/models/llava/test_processor_llava.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/llava/test_processor_llava.py
|
Apache-2.0
|
def test_mismatching_num_image_tokens(self):
"""
Tests that VLMs through an error with explicit message saying what is wrong
when number of images don't match number of image tokens in the text.
Also we need to test multi-image cases when one prompr has multiple image tokens.
"""
config, input_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config).to(torch_device)
curr_input_dict = copy.deepcopy(input_dict) # in=place modifications further
_ = model(**curr_input_dict) # successful forward with no modifications
# remove one image but leave the image token in text
curr_input_dict["pixel_values"] = curr_input_dict["pixel_values"][-1:, ...]
curr_input_dict["image_sizes"] = curr_input_dict["image_sizes"][-1:, ...]
with self.assertRaises(ValueError):
_ = model(**curr_input_dict)
# simulate multi-image case by concatenating inputs where each has exactly one image/image-token
input_ids = curr_input_dict["input_ids"][:1]
pixel_values = curr_input_dict["pixel_values"][:1]
image_sizes = curr_input_dict["image_sizes"][:1]
input_ids = torch.cat([input_ids, input_ids], dim=0)
# one image and two image tokens raise an error
with self.assertRaises(ValueError):
_ = model(input_ids=input_ids, pixel_values=pixel_values, image_sizes=image_sizes)
# two images and two image tokens don't raise an error
pixel_values = torch.cat([pixel_values, pixel_values], dim=0)
image_sizes = torch.cat([image_sizes, image_sizes], dim=0)
_ = model(input_ids=input_ids, pixel_values=pixel_values, image_sizes=image_sizes)
|
Tests that VLMs through an error with explicit message saying what is wrong
when number of images don't match number of image tokens in the text.
Also we need to test multi-image cases when one prompr has multiple image tokens.
|
test_mismatching_num_image_tokens
|
python
|
huggingface/transformers
|
tests/models/llava_next/test_modeling_llava_next.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/llava_next/test_modeling_llava_next.py
|
Apache-2.0
|
def test_vision_feature_layers(self, vision_feature_layer):
"""
Test that we can use either one vision feature layer, or a list of
vision feature layers.
"""
config, input_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.vision_feature_layer = vision_feature_layer
num_feature_layers = 1 if isinstance(vision_feature_layer, int) else len(vision_feature_layer)
hidden_size = config.vision_config.hidden_size
expected_features = hidden_size * num_feature_layers
for model_class in self.all_model_classes:
model = model_class(config).to(torch_device)
# We should have the right number of input features,
# and should be able to run a forward pass without exploding
base_model = getattr(model, "model", model)
assert base_model.multi_modal_projector.linear_1.in_features == expected_features
model(**input_dict)
|
Test that we can use either one vision feature layer, or a list of
vision feature layers.
|
test_vision_feature_layers
|
python
|
huggingface/transformers
|
tests/models/llava_next/test_modeling_llava_next.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/llava_next/test_modeling_llava_next.py
|
Apache-2.0
|
def test_granite_vision(self):
"""
Check the expected output of a granite vision model, which leverages
multiple vision feature layers and a visual encoder with no CLS (siglip).
"""
granite_model_path = "ibm-granite/granite-vision-3.1-2b-preview"
model = LlavaNextForConditionalGeneration.from_pretrained(granite_model_path)
self.processor = AutoProcessor.from_pretrained(granite_model_path)
prompt = "<|user|>\n<image>\nWhat is shown in this image?\n<|assistant|>\n"
inputs = self.processor(prompt, self.image, return_tensors="pt").to(model.device)
# verify generation
output = model.generate(**inputs, max_new_tokens=30)
EXPECTED_DECODED_TEXT = "<|user|>\n\nWhat is shown in this image?\n<|assistant|>\nThe image displays a radar chart comparing the performance of various machine learning models." # fmt: skip
self.assertEqual(
self.processor.decode(output[0], skip_special_tokens=True),
EXPECTED_DECODED_TEXT,
)
|
Check the expected output of a granite vision model, which leverages
multiple vision feature layers and a visual encoder with no CLS (siglip).
|
test_granite_vision
|
python
|
huggingface/transformers
|
tests/models/llava_next/test_modeling_llava_next.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/llava_next/test_modeling_llava_next.py
|
Apache-2.0
|
def test_mismatching_num_image_tokens(self):
"""
Tests that VLMs through an error with explicit message saying what is wrong
when number of images don't match number of image tokens in the text.
Also we need to test multi-image cases when one prompr has multiple image tokens.
"""
config, input_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config).to(torch_device)
curr_input_dict = copy.deepcopy(input_dict) # in=place modifications further
_ = model(**curr_input_dict) # successful forward with no modifications
# remove one image but leave the image token in text
curr_input_dict["pixel_values"] = curr_input_dict["pixel_values"][-1:, ...]
curr_input_dict["image_sizes"] = curr_input_dict["image_sizes"][-1:, ...]
with self.assertRaises(ValueError):
_ = model(**curr_input_dict)
# simulate multi-image case by concatenating inputs where each has exactly one image/image-token
input_ids = curr_input_dict["input_ids"][:1]
pixel_values = curr_input_dict["pixel_values"][:1]
image_sizes = curr_input_dict["image_sizes"][:1]
input_ids = torch.cat([input_ids, input_ids], dim=0)
# one image and two image tokens raise an error
with self.assertRaises(ValueError):
_ = model(input_ids=input_ids, pixel_values=pixel_values, image_sizes=image_sizes)
# two images and two image tokens don't raise an error
pixel_values = torch.cat([pixel_values, pixel_values], dim=0)
image_sizes = torch.cat([image_sizes, image_sizes], dim=0)
_ = model(input_ids=input_ids, pixel_values=pixel_values, image_sizes=image_sizes)
|
Tests that VLMs through an error with explicit message saying what is wrong
when number of images don't match number of image tokens in the text.
Also we need to test multi-image cases when one prompr has multiple image tokens.
|
test_mismatching_num_image_tokens
|
python
|
huggingface/transformers
|
tests/models/llava_next_video/test_modeling_llava_next_video.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/llava_next_video/test_modeling_llava_next_video.py
|
Apache-2.0
|
def test_vision_feature_layers(self, vision_feature_layer):
"""
Test that we can use either one vision feature layer, or a list of
vision feature layers.
"""
config, input_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.vision_feature_layer = vision_feature_layer
num_feature_layers = 1 if isinstance(vision_feature_layer, int) else len(vision_feature_layer)
hidden_size = config.vision_config.hidden_size
expected_features = hidden_size * num_feature_layers
for model_class in self.all_model_classes:
model = model_class(config).to(torch_device)
# We should have the right number of input features,
# and should be able to run a forward pass without exploding
base_model = getattr(model, "model", model)
assert base_model.multi_modal_projector.linear_1.in_features == expected_features
model(**input_dict)
|
Test that we can use either one vision feature layer, or a list of
vision feature layers.
|
test_vision_feature_layers
|
python
|
huggingface/transformers
|
tests/models/llava_next_video/test_modeling_llava_next_video.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/llava_next_video/test_modeling_llava_next_video.py
|
Apache-2.0
|
def test_vision_feature_layers(self, vision_feature_layer):
"""
Test that we can use either one vision feature layer, or a list of
vision feature layers.
"""
config, input_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.vision_feature_layer = vision_feature_layer
num_feature_layers = 1 if isinstance(vision_feature_layer, int) else len(vision_feature_layer)
hidden_size = config.vision_config.hidden_size
expected_features = hidden_size * num_feature_layers
for model_class in self.all_model_classes:
model = model_class(config).to(torch_device)
# We should have the right number of input features,
# and should be able to run a forward pass without exploding
base_model = getattr(model, "model", model)
assert base_model.multi_modal_projector.linear_1.in_features == expected_features
model(**input_dict)
|
Test that we can use either one vision feature layer, or a list of
vision feature layers.
|
test_vision_feature_layers
|
python
|
huggingface/transformers
|
tests/models/llava_onevision/test_modeling_llava_onevision.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/llava_onevision/test_modeling_llava_onevision.py
|
Apache-2.0
|
def assertInterval(self, member, container, msg=None):
r"""
Simple utility function to check if a member is inside an interval.
"""
if isinstance(member, torch.Tensor):
max_value, min_value = member.max().item(), member.min().item()
elif isinstance(member, list) or isinstance(member, tuple):
max_value, min_value = max(member), min(member)
if not isinstance(container, list):
raise TypeError("container should be a list or tuple")
elif len(container) != 2:
raise ValueError("container should have 2 elements")
expected_min, expected_max = container
is_inside_interval = (min_value >= expected_min) and (max_value <= expected_max)
if not is_inside_interval:
standardMsg = f"{safe_repr(member)} not found in {safe_repr(container)}"
self.fail(self._formatMessage(msg, standardMsg))
|
Simple utility function to check if a member is inside an interval.
|
assertInterval
|
python
|
huggingface/transformers
|
tests/models/mamba/test_modeling_mamba.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/mamba/test_modeling_mamba.py
|
Apache-2.0
|
def test_simple_generate(self):
"""
Simple generate test to avoid regressions.
Note: state-spaces (cuda) implementation and pure torch implementation
have irreconciliable differences as of now, which will cause this test to fail
in an environment with state-spaces installed.
"""
tokenizer = self.tokenizer
tokenizer.pad_token_id = tokenizer.eos_token_id
model = Mamba2ForCausalLM.from_pretrained(self.model_id, torch_dtype=torch.bfloat16)
model.to(torch_device)
input_ids = tokenizer("[INST]Write a hello world program in C++.[/INST]", return_tensors="pt")["input_ids"].to(
torch_device
)
out = model.generate(input_ids, do_sample=False, use_cache=True, max_new_tokens=30)
output_sentence = tokenizer.decode(out[0])
ground_truth_sentences = Expectations(
{
("xpu", 3): """<s>[INST]Write a hello world program in C++.[/INST] Sure, here is a simple "Hello, World!" program written in C++:\n\n```cpp\n#include <iostream>\n""",
("cuda", 7): """<s>[INST]Write a hello world program in C++.[/INST] Sure, here is a simple "Hello, World!" program in C++:\n\n```cpp\n#include <iostream>\n\n""",
}
) # fmt: skip
ground_truth_sentence = ground_truth_sentences.get_expectation()
self.assertEqual(output_sentence, ground_truth_sentence)
|
Simple generate test to avoid regressions.
Note: state-spaces (cuda) implementation and pure torch implementation
have irreconciliable differences as of now, which will cause this test to fail
in an environment with state-spaces installed.
|
test_simple_generate
|
python
|
huggingface/transformers
|
tests/models/mamba2/test_modeling_mamba2.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/mamba2/test_modeling_mamba2.py
|
Apache-2.0
|
def test_batched_equivalence_with_cache(self):
"""
Verifies that batched generation matches individual generation.
Important because of the specific caching mechanism + statefulness of mamba model.
Depending on precision and devices, differences can be observed from generation to generation.
"""
tokenizer = self.tokenizer
prompt = [
"[INST]Write C#.[/INST]",
"[INST]Write a hello world in C++.[/INST]",
"[INST] Write a simple Fibonacci number computation function in Rust that does memoization, with comments, in safe Rust.[/INST]",
]
model = Mamba2ForCausalLM.from_pretrained(self.model_id, torch_dtype=torch.bfloat16).to(torch_device)
tokenizer.pad_token_id = tokenizer.eos_token_id
# batched generation
tokenized_prompts = tokenizer(prompt, return_tensors="pt", padding="longest").to(torch_device)
batched_gen = model.generate(**tokenized_prompts, max_new_tokens=30, use_cache=True)
batched_output = tokenizer.batch_decode(batched_gen, skip_special_tokens=True)
# individual generation
for index_gen, individual_prompt in enumerate(prompt):
inputs = tokenizer(individual_prompt, return_tensors="pt", padding="longest").to(torch_device)
individual_gen = model.generate(**inputs, max_new_tokens=30, use_cache=True)
individual_output = tokenizer.batch_decode(individual_gen, skip_special_tokens=True)[0]
self.assertEqual(individual_output[:100], batched_output[index_gen][:100])
|
Verifies that batched generation matches individual generation.
Important because of the specific caching mechanism + statefulness of mamba model.
Depending on precision and devices, differences can be observed from generation to generation.
|
test_batched_equivalence_with_cache
|
python
|
huggingface/transformers
|
tests/models/mamba2/test_modeling_mamba2.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/mamba2/test_modeling_mamba2.py
|
Apache-2.0
|
def test_batched_equivalence_without_cache(self):
"""
Verifies that batched generation matches individual generation without cache.
Important because of the specific caching mechanism + statefulness of mamba model.
Depending on precision and devices, differences can be observed from generation to generation.
"""
tokenizer = self.tokenizer
prompt = [
"[INST]Write C#.[/INST]",
"[INST]Write a hello world in C++.[/INST]",
"[INST] Write a simple Fibonacci number computation function in Rust that does memoization, with comments, in safe Rust.[/INST]",
]
model = Mamba2ForCausalLM.from_pretrained(self.model_id, torch_dtype=torch.bfloat16).to(torch_device)
tokenizer.pad_token_id = tokenizer.eos_token_id
# batched generation
tokenized_prompts = tokenizer(prompt, return_tensors="pt", padding="longest").to(torch_device)
batched_gen = model.generate(**tokenized_prompts, max_new_tokens=30, use_cache=True)
batched_output = tokenizer.batch_decode(batched_gen, skip_special_tokens=True)
# individual generation
for index_gen, individual_prompt in enumerate(prompt):
inputs = tokenizer(individual_prompt, return_tensors="pt", padding="longest").to(torch_device)
individual_gen = model.generate(**inputs, max_new_tokens=30, use_cache=True)
individual_output = tokenizer.batch_decode(individual_gen, skip_special_tokens=True)[0]
self.assertEqual(individual_output[:100], batched_output[index_gen][:100])
|
Verifies that batched generation matches individual generation without cache.
Important because of the specific caching mechanism + statefulness of mamba model.
Depending on precision and devices, differences can be observed from generation to generation.
|
test_batched_equivalence_without_cache
|
python
|
huggingface/transformers
|
tests/models/mamba2/test_modeling_mamba2.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/mamba2/test_modeling_mamba2.py
|
Apache-2.0
|
def assert_tensors_close(a, b, atol=1e-12, prefix=""):
"""If tensors have different shapes, different values or a and b are not both tensors, raise a nice Assertion error."""
if a is None and b is None:
return True
try:
if torch.allclose(a, b, atol=atol):
return True
raise
except Exception:
pct_different = (torch.gt((a - b).abs(), atol)).float().mean().item()
if a.numel() > 100:
msg = f"tensor values are {pct_different:.1%} percent different."
else:
msg = f"{a} != {b}"
if prefix:
msg = prefix + ": " + msg
raise AssertionError(msg)
|
If tensors have different shapes, different values or a and b are not both tensors, raise a nice Assertion error.
|
assert_tensors_close
|
python
|
huggingface/transformers
|
tests/models/marian/test_modeling_marian.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/marian/test_modeling_marian.py
|
Apache-2.0
|
def test_batch_encode_dynamic_overflowing(self):
"""
When calling batch_encode with multiple sequences, it can return different number of
overflowing encoding for each sequence:
[
Sequence 1: [Encoding 1, Encoding 2],
Sequence 2: [Encoding 1],
Sequence 3: [Encoding 1, Encoding 2, ... Encoding N]
]
This needs to be padded so that it can represented as a tensor
"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
tokenizer = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name}, {tokenizer.__class__.__name__})"):
if is_torch_available():
returned_tensor = "pt"
elif is_tf_available():
returned_tensor = "tf"
else:
returned_tensor = "jax"
# Single example
nodes, xpaths = self.get_nodes_and_xpaths()
tokens = tokenizer.encode_plus(
nodes,
xpaths=xpaths,
max_length=1,
padding=True,
truncation=True,
return_tensors=returned_tensor,
return_overflowing_tokens=True,
)
for key in filter(lambda x: "overflow_to_sample_mapping" not in x, tokens.keys()):
if "xpath" not in key:
self.assertEqual(len(tokens[key].shape), 2)
else:
self.assertEqual(len(tokens[key].shape), 3)
# Batch of examples
# For these 2 examples, 3 training examples will be created
nodes, xpaths = self.get_nodes_and_xpaths_batch()
tokens = tokenizer.batch_encode_plus(
nodes,
xpaths=xpaths,
max_length=6,
padding=True,
truncation="only_first",
return_tensors=returned_tensor,
return_overflowing_tokens=True,
)
for key in filter(lambda x: "overflow_to_sample_mapping" not in x, tokens.keys()):
if "xpath" not in key:
self.assertEqual(len(tokens[key].shape), 2)
self.assertEqual(tokens[key].shape[-1], 6)
else:
self.assertEqual(len(tokens[key].shape), 3)
self.assertEqual(tokens[key].shape[-2], 6)
|
When calling batch_encode with multiple sequences, it can return different number of
overflowing encoding for each sequence:
[
Sequence 1: [Encoding 1, Encoding 2],
Sequence 2: [Encoding 1],
Sequence 3: [Encoding 1, Encoding 2, ... Encoding N]
]
This needs to be padded so that it can represented as a tensor
|
test_batch_encode_dynamic_overflowing
|
python
|
huggingface/transformers
|
tests/models/markuplm/test_tokenization_markuplm.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/markuplm/test_tokenization_markuplm.py
|
Apache-2.0
|
def get_expected_values(self, image_inputs, batched=False):
"""
This function computes the expected height and width when providing images to Mask2FormerImageProcessor,
assuming do_resize is set to True with a scalar size.
"""
if not batched:
image = image_inputs[0]
if isinstance(image, Image.Image):
w, h = image.size
elif isinstance(image, np.ndarray):
h, w = image.shape[0], image.shape[1]
else:
h, w = image.shape[1], image.shape[2]
if w < h:
expected_height = int(self.size["shortest_edge"] * h / w)
expected_width = self.size["shortest_edge"]
elif w > h:
expected_height = self.size["shortest_edge"]
expected_width = int(self.size["shortest_edge"] * w / h)
else:
expected_height = self.size["shortest_edge"]
expected_width = self.size["shortest_edge"]
else:
expected_values = []
for image in image_inputs:
expected_height, expected_width = self.get_expected_values([image])
expected_values.append((expected_height, expected_width))
expected_height = max(expected_values, key=lambda item: item[0])[0]
expected_width = max(expected_values, key=lambda item: item[1])[1]
return expected_height, expected_width
|
This function computes the expected height and width when providing images to Mask2FormerImageProcessor,
assuming do_resize is set to True with a scalar size.
|
get_expected_values
|
python
|
huggingface/transformers
|
tests/models/mask2former/test_image_processing_mask2former.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/mask2former/test_image_processing_mask2former.py
|
Apache-2.0
|
def get_expected_values(self, image_inputs, batched=False):
"""
This function computes the expected height and width when providing images to MaskFormerImageProcessor,
assuming do_resize is set to True with a scalar size.
"""
if not batched:
image = image_inputs[0]
if isinstance(image, Image.Image):
w, h = image.size
elif isinstance(image, np.ndarray):
h, w = image.shape[0], image.shape[1]
else:
h, w = image.shape[1], image.shape[2]
if w < h:
expected_height = int(self.size["shortest_edge"] * h / w)
expected_width = self.size["shortest_edge"]
elif w > h:
expected_height = self.size["shortest_edge"]
expected_width = int(self.size["shortest_edge"] * w / h)
else:
expected_height = self.size["shortest_edge"]
expected_width = self.size["shortest_edge"]
else:
expected_values = []
for image in image_inputs:
expected_height, expected_width = self.get_expected_values([image])
expected_values.append((expected_height, expected_width))
expected_height = max(expected_values, key=lambda item: item[0])[0]
expected_width = max(expected_values, key=lambda item: item[1])[1]
return expected_height, expected_width
|
This function computes the expected height and width when providing images to MaskFormerImageProcessor,
assuming do_resize is set to True with a scalar size.
|
get_expected_values
|
python
|
huggingface/transformers
|
tests/models/maskformer/test_image_processing_maskformer.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/maskformer/test_image_processing_maskformer.py
|
Apache-2.0
|
def assert_tensors_close(a, b, atol=1e-12, prefix=""):
"""If tensors have different shapes, different values or a and b are not both tensors, raise a nice Assertion error."""
if a is None and b is None:
return True
try:
if torch.allclose(a, b, atol=atol):
return True
raise
except Exception:
pct_different = (torch.gt((a - b).abs(), atol)).float().mean().item()
if a.numel() > 100:
msg = f"tensor values are {pct_different:.1%} percent different."
else:
msg = f"{a} != {b}"
if prefix:
msg = prefix + ": " + msg
raise AssertionError(msg)
|
If tensors have different shapes, different values or a and b are not both tensors, raise a nice Assertion error.
|
assert_tensors_close
|
python
|
huggingface/transformers
|
tests/models/mbart/test_modeling_mbart.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/mbart/test_modeling_mbart.py
|
Apache-2.0
|
def model(self):
"""Only load the model if needed."""
model = MBartForConditionalGeneration.from_pretrained(self.checkpoint_name).to(torch_device)
if "cuda" in torch_device:
model = model.half()
return model
|
Only load the model if needed.
|
model
|
python
|
huggingface/transformers
|
tests/models/mbart/test_modeling_mbart.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/mbart/test_modeling_mbart.py
|
Apache-2.0
|
def prepare_image_inputs(self):
"""This function prepares a list of PIL images, or a list of numpy arrays if one specifies numpify=True,
or a list of PyTorch tensors if one specifies torchify=True.
"""
image_inputs = [np.random.randint(255, size=(3, 30, 400), dtype=np.uint8)]
image_inputs = [Image.fromarray(np.moveaxis(x, 0, -1)) for x in image_inputs]
return image_inputs
|
This function prepares a list of PIL images, or a list of numpy arrays if one specifies numpify=True,
or a list of PyTorch tensors if one specifies torchify=True.
|
prepare_image_inputs
|
python
|
huggingface/transformers
|
tests/models/mgp_str/test_processor_mgp_str.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/mgp_str/test_processor_mgp_str.py
|
Apache-2.0
|
def test_load_balancing_loss(self):
r"""
Let's make sure we can actually compute the loss and do a backward on it.
"""
config, input_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.num_labels = 3
config.num_local_experts = 8
config.output_router_logits = True
input_ids = input_dict["input_ids"]
attention_mask = input_ids.ne(1).to(torch_device)
model = MiniMaxForCausalLM(config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=attention_mask)
self.assertEqual(result.router_logits[0].shape, (91, config.num_local_experts))
torch.testing.assert_close(result.aux_loss.cpu(), torch.tensor(2, dtype=torch.float32), rtol=1e-2, atol=1e-2)
# First, we make sure that adding padding tokens doesn't change the loss
# loss(input_ids, attention_mask=None) == loss(input_ids + padding, attention_mask=attention_mask_with_padding)
pad_length = 1000
# Add padding tokens (assume that pad_token_id=1) to input_ids
padding_block = torch.ones(input_ids.shape[0], pad_length, dtype=torch.int32).to(torch_device)
padded_input_ids = torch.cat((padding_block, input_ids), dim=1) # this is to simulate padding to the left
padded_attention_mask = padded_input_ids.ne(1).to(torch_device)
padded_result = model(padded_input_ids, attention_mask=padded_attention_mask)
torch.testing.assert_close(result.aux_loss.cpu(), padded_result.aux_loss.cpu(), rtol=1e-4, atol=1e-4)
# We make sure that the loss of including padding tokens != the loss without padding tokens
# if attention_mask=None --> we don't exclude padding tokens
include_padding_result = model(padded_input_ids, attention_mask=None)
# This is to mimic torch.testing.assert_not_close
self.assertNotAlmostEqual(include_padding_result.aux_loss.item(), result.aux_loss.item())
|
Let's make sure we can actually compute the loss and do a backward on it.
|
test_load_balancing_loss
|
python
|
huggingface/transformers
|
tests/models/minimax/test_modeling_minimax.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/minimax/test_modeling_minimax.py
|
Apache-2.0
|
def test_past_key_values_format(self, custom_all_cache_shapes=None):
"""
Test that the KV cache is formatted correctly.
"""
for model_class in self.all_generative_model_classes:
config, inputs = self.model_tester.prepare_config_and_inputs_for_common()
model = model_class(config).to(torch_device)
model = model.eval()
if "use_cache" not in inputs:
inputs["use_cache"] = True
outputs = model(**inputs)
past_kv = outputs["past_key_values"]
batch_size, seq_length = inputs["input_ids"].shape
self._check_past_key_values_for_generate(batch_size, past_kv, seq_length, config)
|
Test that the KV cache is formatted correctly.
|
test_past_key_values_format
|
python
|
huggingface/transformers
|
tests/models/minimax/test_modeling_minimax.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/minimax/test_modeling_minimax.py
|
Apache-2.0
|
def test_special_mm_token_truncation(self):
"""Tests that special vision tokens do not get truncated when `truncation=True` is set."""
processor = self.get_processor()
input_str = self.prepare_text_inputs(batch_size=2, modality="image")
image_input = self.prepare_image_inputs(batch_size=2)
_ = processor(
text=input_str,
images=image_input,
return_tensors="pt",
truncation=None,
padding=True,
)
with self.assertRaises(ValueError):
_ = processor(
text=input_str,
images=image_input,
return_tensors="pt",
truncation=True,
padding=True,
max_length=3,
)
|
Tests that special vision tokens do not get truncated when `truncation=True` is set.
|
test_special_mm_token_truncation
|
python
|
huggingface/transformers
|
tests/models/mistral3/test_processor_mistral3.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/mistral3/test_processor_mistral3.py
|
Apache-2.0
|
def test_load_balancing_loss(self):
r"""
Let's make sure we can actually compute the loss and do a backward on it.
"""
config, input_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.num_labels = 3
config.num_local_experts = 8
config.output_router_logits = True
input_ids = input_dict["input_ids"]
attention_mask = input_ids.ne(1).to(torch_device)
model = MixtralForCausalLM(config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=attention_mask)
self.assertEqual(result.router_logits[0].shape, (91, config.num_local_experts))
torch.testing.assert_close(result.aux_loss.cpu(), torch.tensor(2, dtype=torch.float32), rtol=1e-2, atol=1e-2)
# First, we make sure that adding padding tokens doesn't change the loss
# loss(input_ids, attention_mask=None) == loss(input_ids + padding, attention_mask=attention_mask_with_padding)
pad_length = 1000
# Add padding tokens (assume that pad_token_id=1) to input_ids
padding_block = torch.ones(input_ids.shape[0], pad_length, dtype=torch.int32).to(torch_device)
padded_input_ids = torch.cat((padding_block, input_ids), dim=1) # this is to simulate padding to the left
padded_attention_mask = padded_input_ids.ne(1).to(torch_device)
padded_result = model(padded_input_ids, attention_mask=padded_attention_mask)
torch.testing.assert_close(result.aux_loss.cpu(), padded_result.aux_loss.cpu(), rtol=1e-4, atol=1e-4)
# We make sure that the loss of including padding tokens != the loss without padding tokens
# if attention_mask=None --> we don't exclude padding tokens
include_padding_result = model(padded_input_ids, attention_mask=None)
# This is to mimic torch.testing.assert_not_close
self.assertNotAlmostEqual(include_padding_result.aux_loss.item(), result.aux_loss.item())
|
Let's make sure we can actually compute the loss and do a backward on it.
|
test_load_balancing_loss
|
python
|
huggingface/transformers
|
tests/models/mixtral/test_modeling_mixtral.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/mixtral/test_modeling_mixtral.py
|
Apache-2.0
|
def prepare_image_inputs(
self,
batch_size=None,
min_resolution=None,
max_resolution=None,
num_channels=None,
num_images=None,
size_divisor=None,
equal_resolution=False,
numpify=False,
torchify=False,
):
"""This function prepares a list of PIL images, or a list of numpy arrays if one specifies numpify=True,
or a list of PyTorch tensors if one specifies torchify=True.
One can specify whether the images are of the same resolution or not.
"""
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
batch_size = batch_size if batch_size is not None else self.batch_size
min_resolution = min_resolution if min_resolution is not None else self.min_resolution
max_resolution = max_resolution if max_resolution is not None else self.max_resolution
num_channels = num_channels if num_channels is not None else self.num_channels
num_images = num_images if num_images is not None else self.num_images
images_list = []
for i in range(batch_size):
images = []
for j in range(num_images):
if equal_resolution:
width = height = max_resolution
else:
# To avoid getting image width/height 0
if size_divisor is not None:
# If `size_divisor` is defined, the image needs to have width/size >= `size_divisor`
min_resolution = max(size_divisor, min_resolution)
width, height = np.random.choice(np.arange(min_resolution, max_resolution), 2)
images.append(np.random.randint(255, size=(num_channels, width, height), dtype=np.uint8))
images_list.append(images)
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
images_list = [[Image.fromarray(np.moveaxis(image, 0, -1)) for image in images] for images in images_list]
if torchify:
images_list = [[torch.from_numpy(image) for image in images] for images in images_list]
return images_list
|
This function prepares a list of PIL images, or a list of numpy arrays if one specifies numpify=True,
or a list of PyTorch tensors if one specifies torchify=True.
One can specify whether the images are of the same resolution or not.
|
prepare_image_inputs
|
python
|
huggingface/transformers
|
tests/models/mllama/test_image_processing_mllama.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/mllama/test_image_processing_mllama.py
|
Apache-2.0
|
def test_generate_text_only_with_cache(self):
"""
Tests that our cached generation with text-only inputs works. When mllama was introduced, this feature
required cache modifications (because layers are skipped in practice). This test should prevent regressions.
"""
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_generative_model_classes:
model = model_class(config)
model.to(torch_device)
model.eval()
inputs = self._prepare_for_class(inputs_dict, model_class)
input_ids = inputs["input_ids"]
del inputs["input_ids"]
del inputs["pixel_values"]
model.generate(input_ids, use_cache=True)
|
Tests that our cached generation with text-only inputs works. When mllama was introduced, this feature
required cache modifications (because layers are skipped in practice). This test should prevent regressions.
|
test_generate_text_only_with_cache
|
python
|
huggingface/transformers
|
tests/models/mllama/test_modeling_mllama.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/mllama/test_modeling_mllama.py
|
Apache-2.0
|
def test_special_mm_token_truncation(self):
"""Tests that special vision tokens do not get truncated when `truncation=True` is set."""
processor = self.get_processor()
input_str = self.prepare_text_inputs(batch_size=2, modality="image")
image_input = self.prepare_image_inputs(batch_size=2)
image_input = [[image_input[0]], [image_input[1]]]
_ = processor(
text=input_str,
images=image_input,
return_tensors="pt",
truncation=None,
padding=True,
)
with self.assertRaises(ValueError):
_ = processor(
text=input_str,
images=image_input,
return_tensors="pt",
truncation=True,
padding=True,
max_length=3,
)
|
Tests that special vision tokens do not get truncated when `truncation=True` is set.
|
test_special_mm_token_truncation
|
python
|
huggingface/transformers
|
tests/models/mllama/test_processor_mllama.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/mllama/test_processor_mllama.py
|
Apache-2.0
|
def test_eager_matches_sdpa_generate(self):
"""Overwritten -- mochi has custom inputs and custom output checks"""
max_new_tokens = 5
for model_class in self.all_generative_model_classes:
if not model_class._supports_sdpa:
self.skipTest(f"{model_class.__name__} does not support SDPA")
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
dummy_input = inputs_dict[model_class.main_input_name]
if dummy_input.dtype in [torch.float32, torch.bfloat16]:
dummy_input = dummy_input.to(torch.float16)
inputs_dict[model_class.main_input_name] = dummy_input
# make sure that all models have enough positions for generation
if hasattr(config, "max_position_embeddings"):
config.max_position_embeddings = max_new_tokens + dummy_input.shape[1] + 1
model = model_class(config)
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
model_sdpa = model_class.from_pretrained(
tmpdirname,
torch_dtype=torch.float16,
low_cpu_mem_usage=True,
).to(torch_device)
self.assertTrue(model_sdpa.config._attn_implementation == "sdpa")
model_eager = model_class.from_pretrained(
tmpdirname,
torch_dtype=torch.float16,
low_cpu_mem_usage=True,
attn_implementation="eager",
).to(torch_device)
self.assertTrue(model_eager.config._attn_implementation == "eager")
for name, submodule in model_eager.named_modules():
class_name = submodule.__class__.__name__
if "SdpaAttention" in class_name or "SdpaSelfAttention" in class_name:
raise ValueError("The eager model should not have SDPA attention layers")
has_sdpa = False
for name, submodule in model_sdpa.named_modules():
class_name = submodule.__class__.__name__
if "SdpaAttention" in class_name or "SdpaSelfAttention" in class_name:
has_sdpa = True
break
if not has_sdpa:
raise ValueError("The SDPA model should have SDPA attention layers")
# Just test that a large cache works as expected
res_eager = model_eager.generate(
**inputs_dict,
max_new_tokens=max_new_tokens,
do_sample=False,
depth_decoder_do_sample=False,
)
res_sdpa = model_sdpa.generate(
**inputs_dict,
max_new_tokens=max_new_tokens,
do_sample=False,
depth_decoder_do_sample=False,
)
torch.testing.assert_close(res_eager.sequences, res_sdpa.sequences)
torch.testing.assert_close(res_eager.audio_sequences, res_sdpa.audio_sequences)
|
Overwritten -- mochi has custom inputs and custom output checks
|
test_eager_matches_sdpa_generate
|
python
|
huggingface/transformers
|
tests/models/moshi/test_modeling_moshi.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/moshi/test_modeling_moshi.py
|
Apache-2.0
|
def test_small_integration_test(self):
"""
For comparison run:
>>> import t5 # pip install t5==0.7.1
>>> from t5.data.sentencepiece_vocabulary import SentencePieceVocabulary
>>> path_to_mtf_small_mt5_checkpoint = '<fill_in>'
>>> path_to_mtf_small_mt5_spm_model_path = '<fill_in>'
>>> t5_model = t5.models.MtfModel(model_dir=path_to_mtf_small_mt5_checkpoint, batch_size=1, tpu=None)
>>> vocab = SentencePieceVocabulary(path_to_mtf_small_mt5_spm_model_path)
>>> score = t5_model.score(inputs=["Hello there"], targets=["Hi I am"], vocabulary=vocab)
"""
model = FlaxMT5ForConditionalGeneration.from_pretrained("google/mt5-small")
tokenizer = AutoTokenizer.from_pretrained("google/mt5-small")
input_ids = tokenizer("Hello there", return_tensors="np").input_ids
labels = tokenizer("Hi I am", return_tensors="np").input_ids
decoder_input_ids = shift_tokens_right(labels, model.config.pad_token_id, model.config.decoder_start_token_id)
logits = model(input_ids, decoder_input_ids=decoder_input_ids).logits
loss = optax.softmax_cross_entropy(logits, onehot(labels, logits.shape[-1])).mean()
mtf_score = -(labels.shape[-1] * loss.item())
EXPECTED_SCORE = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1e-4)
|
For comparison run:
>>> import t5 # pip install t5==0.7.1
>>> from t5.data.sentencepiece_vocabulary import SentencePieceVocabulary
>>> path_to_mtf_small_mt5_checkpoint = '<fill_in>'
>>> path_to_mtf_small_mt5_spm_model_path = '<fill_in>'
>>> t5_model = t5.models.MtfModel(model_dir=path_to_mtf_small_mt5_checkpoint, batch_size=1, tpu=None)
>>> vocab = SentencePieceVocabulary(path_to_mtf_small_mt5_spm_model_path)
>>> score = t5_model.score(inputs=["Hello there"], targets=["Hi I am"], vocabulary=vocab)
|
test_small_integration_test
|
python
|
huggingface/transformers
|
tests/models/mt5/test_modeling_flax_mt5.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/mt5/test_modeling_flax_mt5.py
|
Apache-2.0
|
def test_small_integration_test(self):
"""
For comparison run:
>>> import t5 # pip install t5==0.7.1
>>> from t5.data.sentencepiece_vocabulary import SentencePieceVocabulary
>>> path_to_mtf_small_mt5_checkpoint = '<fill_in>'
>>> path_to_mtf_small_mt5_spm_model_path = '<fill_in>'
>>> t5_model = t5.models.MtfModel(model_dir=path_to_mtf_small_mt5_checkpoint, batch_size=1, tpu=None)
>>> vocab = SentencePieceVocabulary(path_to_mtf_small_mt5_spm_model_path)
>>> score = t5_model.score(inputs=["Hello there"], targets=["Hi I am"], vocabulary=vocab)
"""
model = AutoModelForSeq2SeqLM.from_pretrained("google/mt5-small", return_dict=True).to(torch_device)
tokenizer = AutoTokenizer.from_pretrained("google/mt5-small")
input_ids = tokenizer("Hello there", return_tensors="pt").input_ids
labels = tokenizer("Hi I am", return_tensors="pt").input_ids
loss = model(input_ids.to(torch_device), labels=labels.to(torch_device)).loss
mtf_score = -(labels.shape[-1] * loss.item())
EXPECTED_SCORE = -84.9127
self.assertLess(abs(mtf_score - EXPECTED_SCORE), 2e-4)
|
For comparison run:
>>> import t5 # pip install t5==0.7.1
>>> from t5.data.sentencepiece_vocabulary import SentencePieceVocabulary
>>> path_to_mtf_small_mt5_checkpoint = '<fill_in>'
>>> path_to_mtf_small_mt5_spm_model_path = '<fill_in>'
>>> t5_model = t5.models.MtfModel(model_dir=path_to_mtf_small_mt5_checkpoint, batch_size=1, tpu=None)
>>> vocab = SentencePieceVocabulary(path_to_mtf_small_mt5_spm_model_path)
>>> score = t5_model.score(inputs=["Hello there"], targets=["Hi I am"], vocabulary=vocab)
|
test_small_integration_test
|
python
|
huggingface/transformers
|
tests/models/mt5/test_modeling_mt5.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/mt5/test_modeling_mt5.py
|
Apache-2.0
|
def test_small_integration_test(self):
"""
For comparison run:
>>> import t5 # pip install t5==0.7.1
>>> from t5.data.sentencepiece_vocabulary import SentencePieceVocabulary
>>> path_to_mtf_small_mt5_checkpoint = '<fill_in>'
>>> path_to_mtf_small_mt5_spm_model_path = '<fill_in>'
>>> t5_model = t5.models.MtfModel(model_dir=path_to_mtf_small_mt5_checkpoint, batch_size=1, tpu=None)
>>> vocab = SentencePieceVocabulary(path_to_mtf_small_mt5_spm_model_path, extra_ids=100)
>>> score = t5_model.score(inputs=["Hello there"], targets=["Hi I am"], vocabulary=vocab)
"""
model = TFAutoModelForSeq2SeqLM.from_pretrained("google/mt5-small")
tokenizer = AutoTokenizer.from_pretrained("google/mt5-small")
input_ids = tokenizer("Hello there", return_tensors="tf").input_ids
labels = tokenizer("Hi I am", return_tensors="tf").input_ids
loss = model(input_ids, labels=labels).loss
mtf_score = -tf.math.reduce_mean(loss).numpy()
EXPECTED_SCORE = -21.228168
self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 2e-4)
|
For comparison run:
>>> import t5 # pip install t5==0.7.1
>>> from t5.data.sentencepiece_vocabulary import SentencePieceVocabulary
>>> path_to_mtf_small_mt5_checkpoint = '<fill_in>'
>>> path_to_mtf_small_mt5_spm_model_path = '<fill_in>'
>>> t5_model = t5.models.MtfModel(model_dir=path_to_mtf_small_mt5_checkpoint, batch_size=1, tpu=None)
>>> vocab = SentencePieceVocabulary(path_to_mtf_small_mt5_spm_model_path, extra_ids=100)
>>> score = t5_model.score(inputs=["Hello there"], targets=["Hi I am"], vocabulary=vocab)
|
test_small_integration_test
|
python
|
huggingface/transformers
|
tests/models/mt5/test_modeling_tf_mt5.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/mt5/test_modeling_tf_mt5.py
|
Apache-2.0
|
def get_bip_bip(bip_duration=0.125, duration=0.5, sample_rate=32000):
"""Produces a series of 'bip bip' sounds at a given frequency."""
timesteps = np.arange(int(duration * sample_rate)) / sample_rate
wav = np.cos(2 * math.pi * 440 * timesteps)
time_period = (timesteps % (2 * bip_duration)) / (2 * bip_duration)
envelope = time_period >= 0.5
return wav * envelope
|
Produces a series of 'bip bip' sounds at a given frequency.
|
get_bip_bip
|
python
|
huggingface/transformers
|
tests/models/musicgen/test_modeling_musicgen.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/musicgen/test_modeling_musicgen.py
|
Apache-2.0
|
def get_bip_bip(bip_duration=0.125, duration=0.5, sample_rate=32000):
"""Produces a series of 'bip bip' sounds at a given frequency."""
timesteps = np.arange(int(duration * sample_rate)) / sample_rate
wav = np.cos(2 * math.pi * 440 * timesteps)
time_period = (timesteps % (2 * bip_duration)) / (2 * bip_duration)
envelope = time_period >= 0.5
return wav * envelope
|
Produces a series of 'bip bip' sounds at a given frequency.
|
get_bip_bip
|
python
|
huggingface/transformers
|
tests/models/musicgen_melody/test_feature_extraction_musicgen_melody.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/musicgen_melody/test_feature_extraction_musicgen_melody.py
|
Apache-2.0
|
def get_bip_bip(bip_duration=0.125, duration=0.5, sample_rate=32000):
"""Produces a series of 'bip bip' sounds at a given frequency."""
timesteps = np.arange(int(duration * sample_rate)) / sample_rate
wav = np.cos(2 * math.pi * 440 * timesteps)
time_period = (timesteps % (2 * bip_duration)) / (2 * bip_duration)
envelope = time_period >= 0.5
return wav * envelope
|
Produces a series of 'bip bip' sounds at a given frequency.
|
get_bip_bip
|
python
|
huggingface/transformers
|
tests/models/musicgen_melody/test_modeling_musicgen_melody.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/musicgen_melody/test_modeling_musicgen_melody.py
|
Apache-2.0
|
def assert_tensors_close(a, b, atol=1e-12, prefix=""):
"""If tensors have different shapes, different values or a and b are not both tensors, raise a nice Assertion error."""
if a is None and b is None:
return True
try:
if torch.allclose(a, b, atol=atol):
return True
raise
except Exception:
pct_different = (torch.gt((a - b).abs(), atol)).float().mean().item()
if a.numel() > 100:
msg = f"tensor values are {pct_different:.1%} percent different."
else:
msg = f"{a} != {b}"
if prefix:
msg = prefix + ": " + msg
raise AssertionError(msg)
|
If tensors have different shapes, different values or a and b are not both tensors, raise a nice Assertion error.
|
assert_tensors_close
|
python
|
huggingface/transformers
|
tests/models/mvp/test_modeling_mvp.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/mvp/test_modeling_mvp.py
|
Apache-2.0
|
def test_inference_logits(self):
r"""
Logits testing to check implementation consistency between `fairseq` implementation
and `transformers` implementation of NLLB-MoE transformers. We only check the logits
of the second sample of the batch, as it is padded.
"""
model = NllbMoeForConditionalGeneration.from_pretrained("hf-internal-testing/random-nllb-moe-2-experts").eval()
with torch.no_grad():
output = model(**self.model_inputs)
EXPECTED_LOGTIS = torch.Tensor([-0.3059, 0.0000, 9.3029, 0.6456, -0.9148, 1.7836, 0.6478, 0.9438, -0.5272, -0.6617, -1.2717, 0.4564, 0.1345, -0.2301, -1.0140, 1.1427, -1.5535, 0.1337, 0.2082, -0.8112, -0.3842, -0.3377, 0.1256, 0.6450, -0.0452, 0.0219, 1.4274, -0.4991, -0.2063, -0.4409,]) # fmt: skip
torch.testing.assert_close(output.logits[1, 0, :30], EXPECTED_LOGTIS, rtol=6e-3, atol=9e-3)
|
Logits testing to check implementation consistency between `fairseq` implementation
and `transformers` implementation of NLLB-MoE transformers. We only check the logits
of the second sample of the batch, as it is padded.
|
test_inference_logits
|
python
|
huggingface/transformers
|
tests/models/nllb_moe/test_modeling_nllb_moe.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/nllb_moe/test_modeling_nllb_moe.py
|
Apache-2.0
|
def test_batching_equivalence(self):
"""
Tests that the model supports batching and that the output is nearly the same for the same input in
different batch sizes.
(Why "nearly the same" not "exactly the same"? Batching uses different matmul shapes, which often leads to
different results: https://github.com/huggingface/transformers/issues/25420#issuecomment-1775317535)
"""
def get_tensor_equivalence_function(batched_input):
# models operating on continuous spaces have higher abs difference than LMs
# instead, we can rely on cos distance for image/speech models, similar to `diffusers`
if "input_ids" not in batched_input:
return lambda tensor1, tensor2: (
1.0 - F.cosine_similarity(tensor1.float().flatten(), tensor2.float().flatten(), dim=0, eps=1e-38)
)
return lambda tensor1, tensor2: torch.max(torch.abs(tensor1 - tensor2))
def recursive_check(batched_object, single_row_object, model_name, key):
if isinstance(batched_object, (list, tuple)):
for batched_object_value, single_row_object_value in zip(batched_object, single_row_object):
recursive_check(batched_object_value, single_row_object_value, model_name, key)
elif isinstance(batched_object, dict):
for batched_object_value, single_row_object_value in zip(
batched_object.values(), single_row_object.values()
):
recursive_check(batched_object_value, single_row_object_value, model_name, key)
# do not compare returned loss (0-dim tensor) / codebook ids (int) / caching objects
elif batched_object is None or not isinstance(batched_object, torch.Tensor):
return
elif batched_object.dim() == 0:
return
elif key != "init_reference_points":
# init
# indexing the first element does not always work
# e.g. models that output similarity scores of size (N, M) would need to index [0, 0]
slice_ids = [slice(0, index) for index in single_row_object.shape]
batched_row = batched_object[slice_ids]
self.assertFalse(
torch.isnan(batched_row).any(), f"Batched output has `nan` in {model_name} for key={key}"
)
self.assertFalse(
torch.isinf(batched_row).any(), f"Batched output has `inf` in {model_name} for key={key}"
)
self.assertFalse(
torch.isnan(single_row_object).any(), f"Single row output has `nan` in {model_name} for key={key}"
)
self.assertFalse(
torch.isinf(single_row_object).any(),
f"Single row output has `inf` in {model_name} for key={key}",
)
self.assertTrue(
(equivalence(batched_row, single_row_object)) <= 1e-03,
msg=(
f"Batched and Single row outputs are not equal in {model_name} for key={key}. "
f"Difference={equivalence(batched_row, single_row_object)}."
),
)
config, batched_input = self.model_tester.prepare_config_and_inputs_for_common()
equivalence = get_tensor_equivalence_function(batched_input)
for model_class in self.all_model_classes:
config.output_hidden_states = True
model_name = model_class.__name__
if hasattr(self.model_tester, "prepare_config_and_inputs_for_model_class"):
config, batched_input = self.model_tester.prepare_config_and_inputs_for_model_class(model_class)
batched_input_prepared = self._prepare_for_class(batched_input, model_class)
model = model_class(config).to(torch_device).eval()
batch_size = self.model_tester.batch_size
single_row_input = {}
for key, value in batched_input_prepared.items():
single_batch_shape = value.shape[0] // batch_size
single_row_input[key] = value[:single_batch_shape]
with torch.no_grad():
model_batched_output = model(**batched_input_prepared)
model_row_output = model(**single_row_input)
if isinstance(model_batched_output, torch.Tensor):
model_batched_output = {"model_output": model_batched_output}
model_row_output = {"model_output": model_row_output}
for key in model_batched_output:
# DETR starts from zero-init queries to decoder, leading to cos_similarity = `nan`
if hasattr(self, "zero_init_hidden_state") and "decoder_hidden_states" in key:
model_batched_output[key] = model_batched_output[key][1:]
model_row_output[key] = model_row_output[key][1:]
if key in ("decoder_class_logits", "decoder_classes", "encoder_class_logits"):
# check if all elements are close to 0, if so skip the test as the test strugles with comparing
# tensors with all elements close to 0
if torch.allclose(
model_batched_output[key], torch.zeros_like(model_batched_output[key]), atol=1e-6
) and torch.allclose(model_row_output[key], torch.zeros_like(model_row_output[key]), atol=1e-6):
continue
recursive_check(model_batched_output[key], model_row_output[key], model_name, key)
|
Tests that the model supports batching and that the output is nearly the same for the same input in
different batch sizes.
(Why "nearly the same" not "exactly the same"? Batching uses different matmul shapes, which often leads to
different results: https://github.com/huggingface/transformers/issues/25420#issuecomment-1775317535)
|
test_batching_equivalence
|
python
|
huggingface/transformers
|
tests/models/omdet_turbo/test_modeling_omdet_turbo.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/omdet_turbo/test_modeling_omdet_turbo.py
|
Apache-2.0
|
def get_expected_values(self, image_inputs, batched=False):
"""
This function computes the expected height and width when providing images to OneFormerImageProcessor,
assuming do_resize is set to True with a scalar size.
"""
if not batched:
image = image_inputs[0]
if isinstance(image, Image.Image):
w, h = image.size
elif isinstance(image, np.ndarray):
h, w = image.shape[0], image.shape[1]
else:
h, w = image.shape[1], image.shape[2]
if w < h:
expected_height = int(self.size["shortest_edge"] * h / w)
expected_width = self.size["shortest_edge"]
elif w > h:
expected_height = self.size["shortest_edge"]
expected_width = int(self.size["shortest_edge"] * w / h)
else:
expected_height = self.size["shortest_edge"]
expected_width = self.size["shortest_edge"]
else:
expected_values = []
for image in image_inputs:
expected_height, expected_width = self.get_expected_values([image])
expected_values.append((expected_height, expected_width))
expected_height = max(expected_values, key=lambda item: item[0])[0]
expected_width = max(expected_values, key=lambda item: item[1])[1]
return expected_height, expected_width
|
This function computes the expected height and width when providing images to OneFormerImageProcessor,
assuming do_resize is set to True with a scalar size.
|
get_expected_values
|
python
|
huggingface/transformers
|
tests/models/oneformer/test_image_processing_oneformer.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/oneformer/test_image_processing_oneformer.py
|
Apache-2.0
|
def get_expected_values(self, image_inputs, batched=False):
"""
This function computes the expected height and width when providing images to OneFormerProcessor,
assuming do_resize is set to True with a scalar size. It also provides the expected sequence length
for the task_inputs and text_list_input.
"""
if not batched:
image = image_inputs[0]
if isinstance(image, Image.Image):
w, h = image.size
elif isinstance(image, np.ndarray):
h, w = image.shape[0], image.shape[1]
else:
h, w = image.shape[1], image.shape[2]
if w < h:
expected_height = int(self.size["shortest_edge"] * h / w)
expected_width = self.size["shortest_edge"]
elif w > h:
expected_height = self.size["shortest_edge"]
expected_width = int(self.size["shortest_edge"] * w / h)
else:
expected_height = self.size["shortest_edge"]
expected_width = self.size["shortest_edge"]
else:
expected_values = []
for image in image_inputs:
expected_height, expected_width, expected_sequence_length = self.get_expected_values([image])
expected_values.append((expected_height, expected_width, expected_sequence_length))
expected_height = max(expected_values, key=lambda item: item[0])[0]
expected_width = max(expected_values, key=lambda item: item[1])[1]
expected_sequence_length = self.max_seq_length
return expected_height, expected_width, expected_sequence_length
|
This function computes the expected height and width when providing images to OneFormerProcessor,
assuming do_resize is set to True with a scalar size. It also provides the expected sequence length
for the task_inputs and text_list_input.
|
get_expected_values
|
python
|
huggingface/transformers
|
tests/models/oneformer/test_processor_oneformer.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/oneformer/test_processor_oneformer.py
|
Apache-2.0
|
def assert_tensors_close(a, b, atol=1e-12, prefix=""):
"""If tensors have different shapes, different values or a and b are not both tensors, raise a nice Assertion error."""
if a is None and b is None:
return True
try:
if torch.allclose(a, b, atol=atol):
return True
raise
except Exception:
pct_different = (torch.gt((a - b).abs(), atol)).float().mean().item()
if a.numel() > 100:
msg = f"tensor values are {pct_different:.1%} percent different."
else:
msg = f"{a} != {b}"
if prefix:
msg = prefix + ": " + msg
raise AssertionError(msg)
|
If tensors have different shapes, different values or a and b are not both tensors, raise a nice Assertion error.
|
assert_tensors_close
|
python
|
huggingface/transformers
|
tests/models/opt/test_modeling_opt.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/opt/test_modeling_opt.py
|
Apache-2.0
|
def test_mismatching_num_image_tokens(self):
"""
Tests that VLMs through an error with explicit message saying what is wrong
when number of images doesn't match number of image tokens in the text.
Also we need to test multi-image cases when one prompr has multiple image tokens.
"""
config, input_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config).to(torch_device)
curr_input_dict = copy.deepcopy(input_dict) # in=place modifications further
_ = model(**curr_input_dict) # successful forward with no modifications
# remove one image but leave the image token in text
curr_input_dict["pixel_values"] = curr_input_dict["pixel_values"][-1:, ...]
with self.assertRaises(ValueError):
_ = model(**curr_input_dict)
# simulate multi-image case by concatenating inputs where each has exactly one image/image-token
input_ids = curr_input_dict["input_ids"][:1]
pixel_values = curr_input_dict["pixel_values"][:1]
input_ids = torch.cat([input_ids, input_ids], dim=0)
# one image and two image tokens raise an error
with self.assertRaises(ValueError):
_ = model(input_ids=input_ids, pixel_values=pixel_values)
# two images and two image tokens don't raise an error
pixel_values = torch.cat([pixel_values, pixel_values], dim=0)
_ = model(input_ids=input_ids, pixel_values=pixel_values)
|
Tests that VLMs through an error with explicit message saying what is wrong
when number of images doesn't match number of image tokens in the text.
Also we need to test multi-image cases when one prompr has multiple image tokens.
|
test_mismatching_num_image_tokens
|
python
|
huggingface/transformers
|
tests/models/paligemma/test_modeling_paligemma.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/paligemma/test_modeling_paligemma.py
|
Apache-2.0
|
def test_attention_mask_with_token_types(self):
"""Test that attention masking works correctly both with and without token type IDs."""
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class._from_config(config, attn_implementation="eager")
config = model.config
model.to(torch_device)
model.eval()
# Case 1: With token_type_ids
outputs_with_types = model(
**inputs_dict,
output_attentions=True,
)
# Case 2: Without token_type_ids
inputs_no_types = {k: v for k, v in inputs_dict.items() if k != "token_type_ids"}
outputs_no_types = model(
**inputs_no_types,
output_attentions=True,
)
attention_outputs_with_types = outputs_with_types.attentions
attention_outputs_no_types = outputs_no_types.attentions
# Verify pad tokens remain masked in both cases
attention_mask = inputs_dict["attention_mask"]
pad_positions = attention_mask == 0
for layer_attentions in [attention_outputs_with_types, attention_outputs_no_types]:
for layer_attn in layer_attentions:
# Check if pad tokens are properly masked
for batch_idx in range(layer_attn.shape[0]):
for seq_idx in range(layer_attn.shape[-1]):
if pad_positions[batch_idx, seq_idx]:
# Verify attention weights for pad tokens are zero
self.assertTrue(
torch.all(layer_attn[batch_idx, :, :, seq_idx] == 0),
f"Found non-zero attention weights for padding token at batch {batch_idx}, sequence position {seq_idx}",
)
|
Test that attention masking works correctly both with and without token type IDs.
|
test_attention_mask_with_token_types
|
python
|
huggingface/transformers
|
tests/models/paligemma/test_modeling_paligemma.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/paligemma/test_modeling_paligemma.py
|
Apache-2.0
|
def test_mismatching_num_image_tokens(self):
"""
Tests that VLMs through an error with explicit message saying what is wrong
when number of images doesn't match number of image tokens in the text.
Also we need to test multi-image cases when one prompr has multiple image tokens.
"""
config, input_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config).to(torch_device)
curr_input_dict = copy.deepcopy(input_dict) # in=place modifications further
_ = model(**curr_input_dict) # successful forward with no modifications
# remove one image but leave the image token in text
curr_input_dict["pixel_values"] = curr_input_dict["pixel_values"][-1:, ...]
with self.assertRaises(ValueError):
_ = model(**curr_input_dict)
# simulate multi-image case by concatenating inputs where each has exactly one image/image-token
input_ids = curr_input_dict["input_ids"][:1]
pixel_values = curr_input_dict["pixel_values"][:1]
input_ids = torch.cat([input_ids, input_ids], dim=0)
# one image and two image tokens raise an error
with self.assertRaises(ValueError):
_ = model(input_ids=input_ids, pixel_values=pixel_values)
# two images and two image tokens don't raise an error
pixel_values = torch.cat([pixel_values, pixel_values], dim=0)
_ = model(input_ids=input_ids, pixel_values=pixel_values)
|
Tests that VLMs through an error with explicit message saying what is wrong
when number of images doesn't match number of image tokens in the text.
Also we need to test multi-image cases when one prompr has multiple image tokens.
|
test_mismatching_num_image_tokens
|
python
|
huggingface/transformers
|
tests/models/paligemma2/test_modeling_paligemma2.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/paligemma2/test_modeling_paligemma2.py
|
Apache-2.0
|
def setUpClass(cls):
"""Setup method: Called once before test-cases execution"""
cls.params = {}
cls.params.update(
context_length=32,
patch_length=8,
num_input_channels=3,
patch_stride=8,
d_model=4,
expansion_factor=2,
num_layers=3,
dropout=0.2,
mode="common_channel", # common_channel, mix_channel
gated_attn=True,
norm_mlp="LayerNorm",
mask_type="random",
random_mask_ratio=0.5,
mask_patches=[2, 3],
forecast_mask_ratios=[1, 1],
mask_value=0,
masked_loss=True,
channel_consistent_masking=True,
head_dropout=0.2,
prediction_length=64,
out_channels=None,
# num_labels=3,
num_targets=3,
output_range=None,
head_aggregation=None,
scaling="std",
use_positional_encoding=False,
positional_encoding="sincos",
self_attn=False,
self_attn_heads=1,
num_parallel_samples=4,
)
cls.num_patches = (
max(cls.params["context_length"], cls.params["patch_length"]) - cls.params["patch_length"]
) // cls.params["patch_stride"] + 1
# batch_size = 32
batch_size = 2
int(cls.params["prediction_length"] / cls.params["patch_length"])
cls.data = torch.rand(
batch_size,
cls.params["context_length"],
cls.params["num_input_channels"],
)
cls.enc_data = torch.rand(
batch_size,
cls.params["num_input_channels"],
cls.num_patches,
cls.params["patch_length"],
)
cls.enc_output = torch.rand(
batch_size,
cls.params["num_input_channels"],
cls.num_patches,
cls.params["d_model"],
)
cls.flat_enc_output = torch.rand(
batch_size,
cls.num_patches,
cls.params["d_model"],
)
cls.correct_pred_output = torch.rand(
batch_size,
cls.params["prediction_length"],
cls.params["num_input_channels"],
)
cls.correct_regression_output = torch.rand(batch_size, cls.params["num_targets"])
cls.correct_pretrain_output = torch.rand(
batch_size,
cls.params["num_input_channels"],
cls.num_patches,
cls.params["patch_length"],
)
cls.correct_forecast_output = torch.rand(
batch_size,
cls.params["prediction_length"],
cls.params["num_input_channels"],
)
cls.correct_sel_forecast_output = torch.rand(batch_size, cls.params["prediction_length"], 2)
cls.correct_classification_output = torch.rand(
batch_size,
cls.params["num_targets"],
)
cls.correct_classification_classes = torch.randint(0, cls.params["num_targets"], (batch_size,))
|
Setup method: Called once before test-cases execution
|
setUpClass
|
python
|
huggingface/transformers
|
tests/models/patchtsmixer/test_modeling_patchtsmixer.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/patchtsmixer/test_modeling_patchtsmixer.py
|
Apache-2.0
|
def assert_tensors_close(a, b, atol=1e-12, prefix=""):
"""If tensors have different shapes, different values or a and b are not both tensors, raise a nice Assertion error."""
if a is None and b is None:
return True
try:
if torch.allclose(a, b, atol=atol):
return True
raise
except Exception:
pct_different = (torch.gt((a - b).abs(), atol)).float().mean().item()
if a.numel() > 100:
msg = f"tensor values are {pct_different:.1%} percent different."
else:
msg = f"{a} != {b}"
if prefix:
msg = prefix + ": " + msg
raise AssertionError(msg)
|
If tensors have different shapes, different values or a and b are not both tensors, raise a nice Assertion error.
|
assert_tensors_close
|
python
|
huggingface/transformers
|
tests/models/pegasus/test_modeling_pegasus.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/pegasus/test_modeling_pegasus.py
|
Apache-2.0
|
def test_equivalence_to_orig_tokenizer(self):
"""
To run with original TF tokenizer:
!wget https://github.com/google-research/bigbird/raw/master/bigbird/vocab/pegasus.model
!pip install tensorflow-text
import tensorflow.compat.v2 as tf
import tensorflow_text as tft
VOCAB_FILE = "./pegasus.model"
tf.enable_v2_behavior()
test_str = "This is an example string that is used to test the original TF implementation against the HF implementation"
tokenizer = tft.SentencepieceTokenizer(model=tf.io.gfile.GFile(VOCAB_FILE, "rb").read())
tokenizer.tokenize(test_str)
"""
test_str = (
"This is an example string that is used to test the original TF implementation against the HF"
" implementation"
)
token_ids = self._large_tokenizer(test_str).input_ids
self.assertListEqual(
token_ids,
[182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 25016, 3137, 464, 109, 26955, 3137, 1],
)
|
To run with original TF tokenizer:
!wget https://github.com/google-research/bigbird/raw/master/bigbird/vocab/pegasus.model
!pip install tensorflow-text
import tensorflow.compat.v2 as tf
import tensorflow_text as tft
VOCAB_FILE = "./pegasus.model"
tf.enable_v2_behavior()
test_str = "This is an example string that is used to test the original TF implementation against the HF implementation"
tokenizer = tft.SentencepieceTokenizer(model=tf.io.gfile.GFile(VOCAB_FILE, "rb").read())
tokenizer.tokenize(test_str)
|
test_equivalence_to_orig_tokenizer
|
python
|
huggingface/transformers
|
tests/models/pegasus/test_tokenization_pegasus.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/pegasus/test_tokenization_pegasus.py
|
Apache-2.0
|
def assert_tensors_close(a, b, atol=1e-12, prefix=""):
"""If tensors have different shapes, different values or a and b are not both tensors, raise a nice Assertion error."""
if a is None and b is None:
return True
try:
if torch.allclose(a, b, atol=atol):
return True
raise
except Exception:
pct_different = (torch.gt((a - b).abs(), atol)).float().mean().item()
if a.numel() > 100:
msg = f"tensor values are {pct_different:.1%} percent different."
else:
msg = f"{a} != {b}"
if prefix:
msg = prefix + ": " + msg
raise AssertionError(msg)
|
If tensors have different shapes, different values or a and b are not both tensors, raise a nice Assertion error.
|
assert_tensors_close
|
python
|
huggingface/transformers
|
tests/models/pegasus_x/test_modeling_pegasus_x.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/pegasus_x/test_modeling_pegasus_x.py
|
Apache-2.0
|
def assert_tensors_close(a, b, atol=1e-12, prefix=""):
"""If tensors have different shapes, different values or a and b are not both tensors, raise a nice Assertion error."""
if a is None and b is None:
return True
try:
if torch.allclose(a, b, atol=atol):
return True
raise
except Exception:
pct_different = (torch.gt((a - b).abs(), atol)).float().mean().item()
if a.numel() > 100:
msg = f"tensor values are {pct_different:.1%} percent different."
else:
msg = f"{a} != {b}"
if prefix:
msg = prefix + ": " + msg
raise AssertionError(msg)
|
If tensors have different shapes, different values or a and b are not both tensors, raise a nice Assertion error.
|
assert_tensors_close
|
python
|
huggingface/transformers
|
tests/models/plbart/test_modeling_plbart.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/plbart/test_modeling_plbart.py
|
Apache-2.0
|
def model(self):
"""Only load the model if needed."""
model = PLBartForConditionalGeneration.from_pretrained(self.checkpoint_name).to(torch_device)
if "cuda" in torch_device:
model = model.half()
return model
|
Only load the model if needed.
|
model
|
python
|
huggingface/transformers
|
tests/models/plbart/test_modeling_plbart.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/plbart/test_modeling_plbart.py
|
Apache-2.0
|
def get_inputs(self):
"""get inputs for both feature extractor and tokenizer"""
ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
speech_samples = ds.sort("id").select([0])["audio"]
input_speech = [x["array"] for x in speech_samples][0]
sampling_rate = [x["sampling_rate"] for x in speech_samples][0]
feature_extractor_outputs = self.get_feature_extractor()(
audio=input_speech, sampling_rate=sampling_rate, return_tensors="pt"
)
model = Pop2PianoForConditionalGeneration.from_pretrained("sweetcocoa/pop2piano")
token_ids = model.generate(input_features=feature_extractor_outputs["input_features"], composer="composer1")
dummy_notes = [
[
pretty_midi.Note(start=0.441179, end=2.159456, pitch=70, velocity=77),
pretty_midi.Note(start=0.673379, end=0.905578, pitch=73, velocity=77),
pretty_midi.Note(start=0.905578, end=2.159456, pitch=73, velocity=77),
pretty_midi.Note(start=1.114558, end=2.159456, pitch=78, velocity=77),
pretty_midi.Note(start=1.323537, end=1.532517, pitch=80, velocity=77),
],
[
pretty_midi.Note(start=0.441179, end=2.159456, pitch=70, velocity=77),
],
]
return input_speech, sampling_rate, token_ids, dummy_notes
|
get inputs for both feature extractor and tokenizer
|
get_inputs
|
python
|
huggingface/transformers
|
tests/models/pop2piano/test_processor_pop2piano.py
|
https://github.com/huggingface/transformers/blob/master/tests/models/pop2piano/test_processor_pop2piano.py
|
Apache-2.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.