code
stringlengths
66
870k
docstring
stringlengths
19
26.7k
func_name
stringlengths
1
138
language
stringclasses
1 value
repo
stringlengths
7
68
path
stringlengths
5
324
url
stringlengths
46
389
license
stringclasses
7 values
def get_current_gpu_memory_use(): """returns a list of VRAM allocations per GPU in MBs""" per_device_memory = [] for id in range(backend_device_count(torch_device)): with backend_torch_accelerator_module(torch_device).device(id): per_device_memory.append(backend_memory_allocated(torch_device) >> 20) return per_device_memory
returns a list of VRAM allocations per GPU in MBs
get_current_gpu_memory_use
python
huggingface/transformers
tests/test_modeling_common.py
https://github.com/huggingface/transformers/blob/master/tests/test_modeling_common.py
Apache-2.0
def test_attn_implementation_composite_models(self): """ Tests if composite models can receive a dict object as attn_implementation, where each key should be one of the sub-configs from the model's config. """ if not self.has_attentions: self.skipTest(reason="Model architecture does not support attentions") for model_class in self.all_model_classes: if not self._is_composite: self.skipTest("Model is not a composite model.") config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() # set eager as it will be the one supported in all models # we just need to test if passing 'attn_implementation' as a dict fails or not attn_implementation_per_subconfig = {} for key in config.sub_configs.keys(): attn_implementation_per_subconfig[key] = "eager" config._attn_implementation = attn_implementation_per_subconfig model = model_class(config) for key in config.sub_configs.keys(): sub_config = getattr(model.config, key) self.assertTrue(sub_config._attn_implementation == "eager") for name, submodule in model.named_modules(): class_name = submodule.__class__.__name__ if ( class_name.endswith("Attention") and getattr(submodule, "config", None) and submodule.config._attn_implementation != "eager" ): raise ValueError( f"The eager model should not have SDPA/FA2 attention layers but got `{class_name}.config._attn_implementation={submodule.config._attn_implementation}`" ) # Set the attention to default `None` but the text config to `eager` # The model should load encoders in SDPA but not the text attention config._attn_implementation = None config.get_text_config(decoder=True)._attn_implementation = "eager" model = model_class(config) self.assertTrue(model.config.get_text_config(decoder=True)._attn_implementation == "eager")
Tests if composite models can receive a dict object as attn_implementation, where each key should be one of the sub-configs from the model's config.
test_attn_implementation_composite_models
python
huggingface/transformers
tests/test_modeling_common.py
https://github.com/huggingface/transformers/blob/master/tests/test_modeling_common.py
Apache-2.0
def test_sdpa_can_dispatch_non_composite_models(self): """ Tests if non-composite models dispatch correctly on SDPA/eager when requested so when loading the model. This tests only by looking at layer names, as usually SDPA layers are called "SDPAAttention". """ if not self.has_attentions: self.skipTest(reason="Model architecture does not support attentions") if not self.all_model_classes[0]._supports_sdpa or self._is_composite: self.skipTest(f"{self.all_model_classes[0].__name__} does not support SDPA") for model_class in self.all_model_classes: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model_sdpa = model_class.from_pretrained(tmpdirname) model_sdpa = model_sdpa.eval().to(torch_device) self.assertTrue(model_sdpa.config._attn_implementation == "sdpa") model_eager = model_class.from_pretrained(tmpdirname, attn_implementation="eager") model_eager = model_eager.eval().to(torch_device) self.assertTrue(model_eager.config._attn_implementation == "eager") for name, submodule in model_eager.named_modules(): class_name = submodule.__class__.__name__ if ( class_name.endswith("Attention") and getattr(submodule, "config", None) and submodule.config._attn_implementation == "sdpa" ): raise ValueError( f"The eager model should not have SDPA attention layers but got `{class_name}.config._attn_implementation={submodule.config._attn_implementation}`" )
Tests if non-composite models dispatch correctly on SDPA/eager when requested so when loading the model. This tests only by looking at layer names, as usually SDPA layers are called "SDPAAttention".
test_sdpa_can_dispatch_non_composite_models
python
huggingface/transformers
tests/test_modeling_common.py
https://github.com/huggingface/transformers/blob/master/tests/test_modeling_common.py
Apache-2.0
def test_sdpa_can_dispatch_composite_models(self): """ Tests if composite models dispatch correctly on SDPA/eager when requested so when loading the model. This tests only by looking at layer names, as usually SDPA layers are called "SDPAAttention". In contrast to the above test, this one checks if the "config._attn_implamentation" is a dict after the model is loaded, because we manually replicate requested attn implementation on each sub-config when loading. See https://github.com/huggingface/transformers/pull/32238 for more info The test tries to cover most general cases of composite models, VLMs with vision and text configs. Any model that has a different set of sub-configs has to overwrite this test. """ if not self.has_attentions: self.skipTest(reason="Model architecture does not support attentions") if not self._is_composite: self.skipTest(f"{self.all_model_classes[0].__name__} does not support SDPA") for model_class in self.all_model_classes: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model_sdpa = model_class.from_pretrained(tmpdirname) model_sdpa = model_sdpa.eval().to(torch_device) vision_model_names = {"visual", "image_tower", "vision_tower", "vision_model"} language_model_names = {"language_model", "model", "text_model"} vision_model_name = [name for name in vision_model_names if hasattr(model_sdpa, name)][0] language_model_name = [name for name in language_model_names if hasattr(model_sdpa, name)][0] vision_model_sdpa = getattr(model_sdpa, vision_model_name) language_model_sdpa = getattr(model_sdpa, language_model_name) text_attn = "sdpa" if language_model_sdpa._supports_sdpa else "eager" vision_attn = "sdpa" if vision_model_sdpa._supports_sdpa else "eager" # `None` as it is the requested one which will be assigned to each sub-config # Sub-model will dispatch to SDPA if it can (checked below that `SDPA` layers are present) self.assertTrue(language_model_sdpa.config._attn_implementation == text_attn) self.assertTrue(vision_model_sdpa.config._attn_implementation == vision_attn) model_eager = model_class.from_pretrained(tmpdirname, attn_implementation="eager") model_eager = model_eager.eval().to(torch_device) self.assertTrue(getattr(model_eager, language_model_name).config._attn_implementation == "eager") self.assertTrue(getattr(model_eager, vision_model_name).config._attn_implementation == "eager") for name, submodule in model_eager.named_modules(): class_name = submodule.__class__.__name__ if ( class_name.endswith("Attention") and getattr(submodule, "config", None) and submodule.config._attn_implementation == "sdpa" ): raise ValueError("The eager model should not have SDPA attention layers")
Tests if composite models dispatch correctly on SDPA/eager when requested so when loading the model. This tests only by looking at layer names, as usually SDPA layers are called "SDPAAttention". In contrast to the above test, this one checks if the "config._attn_implamentation" is a dict after the model is loaded, because we manually replicate requested attn implementation on each sub-config when loading. See https://github.com/huggingface/transformers/pull/32238 for more info The test tries to cover most general cases of composite models, VLMs with vision and text configs. Any model that has a different set of sub-configs has to overwrite this test.
test_sdpa_can_dispatch_composite_models
python
huggingface/transformers
tests/test_modeling_common.py
https://github.com/huggingface/transformers/blob/master/tests/test_modeling_common.py
Apache-2.0
def test_flash_attn_2_can_dispatch_composite_models(self): """ Tests if composite models can dispatch on FA2 if the sub-models support FA2. The tests is needed as we handle differently composite models and we cannot check them with above tests. If any of the sub-models does not support FA2, we'll raise an error when dispatching that particular sub-model. Otherwise we dispatch safely in all sub-models, where "sub-models" are specific backbone models (LM/vision/audio/etc) """ if not self.has_attentions: self.skipTest(reason="Model architecture does not support attentions") if not is_torch_fp16_available_on_device(torch_device): self.skipTest(f"float16 not supported on {torch_device} (on the specific device currently used)") torch_dtype = torch.float16 for model_class in self.all_model_classes: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() model = model_class(config) if not self._is_composite: self.skipTest("This model is not a composite model!") with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model = model_class.from_pretrained(tmpdirname, torch_dtype=torch_dtype) sub_models_supporting_fa2 = [ module._supports_flash_attn_2 for name, module in model.named_modules() if isinstance(module, PreTrainedModel) and name != "" ] supports_fa2_all_modules = ( all(sub_models_supporting_fa2) if len(sub_models_supporting_fa2) > 0 else model._supports_flash_attn_2 ) if not supports_fa2_all_modules: with self.assertRaises(ValueError): model_fa2 = model_class.from_pretrained( tmpdirname, torch_dtype=torch_dtype, attn_implementation="flash_attention_2", ) else: model_fa2 = model_class.from_pretrained( tmpdirname, torch_dtype=torch_dtype, attn_implementation="flash_attention_2" ) for key in model_fa2.config: if isinstance(getattr(model_fa2.config, key), PretrainedConfig): sub_config = getattr(model_fa2.config, key) self.assertTrue(sub_config._attn_implementation == "flash_attention_2") has_fa2 = False for name, submodule in model_fa2.named_modules(): class_name = submodule.__class__.__name__ if ( "Attention" in class_name and getattr(submodule, "config", None) and submodule.config._attn_implementation == "flash_attention_2" ): has_fa2 = True break if not has_fa2: raise ValueError("The FA2 model should have FA2 layers")
Tests if composite models can dispatch on FA2 if the sub-models support FA2. The tests is needed as we handle differently composite models and we cannot check them with above tests. If any of the sub-models does not support FA2, we'll raise an error when dispatching that particular sub-model. Otherwise we dispatch safely in all sub-models, where "sub-models" are specific backbone models (LM/vision/audio/etc)
test_flash_attn_2_can_dispatch_composite_models
python
huggingface/transformers
tests/test_modeling_common.py
https://github.com/huggingface/transformers/blob/master/tests/test_modeling_common.py
Apache-2.0
def test_sliding_window_mask(self): """Tests that we can control the sliding window attention behavior of a model.""" config, inputs = self.model_tester.prepare_config_and_inputs_for_common() if not self.has_attentions: self.skipTest(reason="Model does not support output_attentions") if not (hasattr(config, "sliding_window") and hasattr(config, "use_sliding_window")): self.skipTest(reason="Model does not support sliding window mask") seq_len = self.model_tester.seq_length batch_size = self.model_tester.batch_size sliding_window = 3 # set to arbitrary small number sliding_mask = torch.zeros((seq_len, seq_len), dtype=torch.bool) for i in range(seq_len): start = max(0, i - sliding_window + 1) sliding_mask[i, start : i + 1] = True sliding_mask = sliding_mask.to(torch_device) config.sliding_window = sliding_window inputs["attention_mask"] = torch.ones(batch_size, seq_len).to(torch.int64).to(torch_device) for model_class in self.all_model_classes: # Set sliding window to `True` and check that all tokens beyond window size are masked config.use_sliding_window = True config_dict = config.to_diff_dict() if hasattr(config, "layer_types"): del config_dict["layer_types"] new_config = config.__class__(**config_dict) # We need to set eager as otherwise `output_attentions` is not supported model = model_class._from_config(new_config, attn_implementation="eager").to(torch_device) model.eval() layer_types = getattr(model.config, "layer_types", ["sliding_attention"] * config.num_hidden_layers) attentions = model(**inputs, output_attentions=True).attentions for layer_attention, layer_type in zip(attentions, layer_types): if layer_type == "sliding_attention": self.assertTrue((layer_attention[:, :, ~sliding_mask] == 0).all().item()) else: self.assertFalse((layer_attention[:, :, ~sliding_mask] == 0).all().item()) # Set sliding window to `False` while keeping `sliding_window=3` # Check that all tokens beyond window size are not masked config.use_sliding_window = False config_dict = config.to_diff_dict() if hasattr(config, "layer_types"): del config_dict["layer_types"] new_config = config.__class__(**config_dict) # We need to set eager as otherwise `output_attentions` is not supported model = model_class._from_config(new_config, attn_implementation="eager").to(torch_device) model.eval() attentions_not_sliding = model(**inputs, output_attentions=True).attentions for layer_attention in attentions_not_sliding: self.assertFalse((layer_attention[:, :, ~sliding_mask] == 0).all().item())
Tests that we can control the sliding window attention behavior of a model.
test_sliding_window_mask
python
huggingface/transformers
tests/test_modeling_common.py
https://github.com/huggingface/transformers/blob/master/tests/test_modeling_common.py
Apache-2.0
def test_torch_export(self, config=None, inputs_dict=None, tolerance=1e-4): """ Test if model can be exported with torch.export.export() Args: config (PretrainedConfig): Config to use for the model, if None, use default config from model_tester inputs_dict (dict): Inputs to use for the model, if None, use default inputs from model_tester tolerance (float): `atol` for torch.allclose(), defined in signature for test overriding """ if not self.test_torch_exportable: self.skipTest(reason="test_torch_exportable=False for this model.") def recursively_check(eager_outputs, exported_outputs): is_tested = False if isinstance(eager_outputs, torch.Tensor): torch.testing.assert_close(eager_outputs, exported_outputs, atol=tolerance, rtol=tolerance) return True elif isinstance(eager_outputs, (tuple, list)): for eager_output, exported_output in zip(eager_outputs, exported_outputs): is_tested = is_tested or recursively_check(eager_output, exported_output) return is_tested elif isinstance(eager_outputs, dict): for key in eager_outputs: is_tested = is_tested or recursively_check(eager_outputs[key], exported_outputs[key]) return is_tested return is_tested default_config, default_inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config = config or default_config inputs_dict = inputs_dict or default_inputs_dict for model_class in self.all_model_classes: if model_class.__name__.endswith("ForPreTraining"): continue with self.subTest(model_class.__name__): model = model_class(config).eval().to(torch_device) # Export model exported_model = torch.export.export( model, args=(), kwargs=inputs_dict, strict=True, ) # Run exported model and eager model with torch.no_grad(): # set seed in case anything is not deterministic in model (e.g. vit_mae noise) torch.manual_seed(1234) eager_outputs = model(**inputs_dict) torch.manual_seed(1234) exported_outputs = exported_model.module().forward(**inputs_dict) # Check if outputs are close: # is_tested is a boolean flag indicating if we compare any outputs, # e.g. there might be a situation when outputs are empty list, then is_tested will be False. # In case of outputs are different the error will be raised in `recursively_check` function. is_tested = recursively_check(eager_outputs, exported_outputs) self.assertTrue(is_tested, msg=f"No outputs were compared for {model_class.__name__}")
Test if model can be exported with torch.export.export() Args: config (PretrainedConfig): Config to use for the model, if None, use default config from model_tester inputs_dict (dict): Inputs to use for the model, if None, use default inputs from model_tester tolerance (float): `atol` for torch.allclose(), defined in signature for test overriding
test_torch_export
python
huggingface/transformers
tests/test_modeling_common.py
https://github.com/huggingface/transformers/blob/master/tests/test_modeling_common.py
Apache-2.0
def test_generation_tester_mixin_inheritance(self): """ Ensures that we have the generation tester mixin if the model can generate. The test will fail otherwise, forcing the mixin to be added -- and ensuring proper test coverage """ if len(self.all_generative_model_classes) > 0: self.assertTrue( issubclass(self.__class__, GenerationTesterMixin), msg=( "This model can call `generate` from `GenerationMixin`, so one of two things must happen: 1) the " "tester must inherit from `GenerationTesterMixin` to run `generate` tests, or 2) if the model " "doesn't fully support the original `generate` or has a custom `generate` with partial feature " "support, the tester must overwrite `all_generative_model_classes` to skip the failing classes " "(make sure to comment why). If `all_generative_model_classes` is overwritten as `()`, then we " "need to remove the `GenerationTesterMixin` inheritance -- no `generate` tests are being run." ), ) else: self.assertFalse( issubclass(self.__class__, GenerationTesterMixin), msg=( "This model can't call `generate`, so its tester can't inherit `GenerationTesterMixin`. (If you " "think the model should be able to `generate`, the model may be missing the `GenerationMixin` " "inheritance)" ), )
Ensures that we have the generation tester mixin if the model can generate. The test will fail otherwise, forcing the mixin to be added -- and ensuring proper test coverage
test_generation_tester_mixin_inheritance
python
huggingface/transformers
tests/test_modeling_common.py
https://github.com/huggingface/transformers/blob/master/tests/test_modeling_common.py
Apache-2.0
def ids_tensor(shape, vocab_size, rng=None): """Creates a random int32 tensor of the shape within the vocab size.""" if rng is None: rng = random.Random() total_dims = 1 for dim in shape: total_dims *= dim values = [] for _ in range(total_dims): values.append(rng.randint(0, vocab_size - 1)) output = np.array(values, dtype=jnp.int32).reshape(shape) return output
Creates a random int32 tensor of the shape within the vocab size.
ids_tensor
python
huggingface/transformers
tests/test_modeling_flax_common.py
https://github.com/huggingface/transformers/blob/master/tests/test_modeling_flax_common.py
Apache-2.0
def get_params(params, from_head_prefix=None): """Function extracts relevant parameters into flatten dict from model params, appends batch normalization statistics if present""" # If Both parameters and batch normalization statistics are present if "batch_stats" in params: # Extract only parameters for the specified head prefix (if specified) and add batch statistics if from_head_prefix is not None: extracted_params = flatten_dict(unfreeze(params["params"][from_head_prefix])) extracted_params.update(flatten_dict(params["batch_stats"][from_head_prefix])) else: extracted_params = flatten_dict(unfreeze(params["params"])) extracted_params.update(flatten_dict(params["batch_stats"])) # Only parameters are present else: if from_head_prefix is not None: extracted_params = flatten_dict(unfreeze(params[from_head_prefix])) else: extracted_params = flatten_dict(unfreeze(params)) return extracted_params
Function extracts relevant parameters into flatten dict from model params, appends batch normalization statistics if present
get_params
python
huggingface/transformers
tests/test_modeling_flax_common.py
https://github.com/huggingface/transformers/blob/master/tests/test_modeling_flax_common.py
Apache-2.0
def _make_attention_mask_non_null(self, inputs_dict): """Make sure no sequence has all zeros as attention mask""" for k in ["attention_mask", "encoder_attention_mask", "decoder_attention_mask"]: if k in inputs_dict: attention_mask = inputs_dict[k] # Make sure no all 0s attention masks - to avoid failure at this moment. # Put `1` at the beginning of sequences to make it still work when combining causal attention masks. # TODO: remove this line once a fix regarding large negative values for attention mask is done. attention_mask = tf.concat( [tf.ones_like(attention_mask[:, :1], dtype=attention_mask.dtype), attention_mask[:, 1:]], axis=-1 ) # Here we make the first sequence with all 0s as attention mask. # Currently, this will fail for `TFWav2Vec2Model`. This is caused by the different large negative # values, like `1e-4`, `1e-9`, `1e-30` and `-inf` for attention mask across models/frameworks. # TODO: enable this block once the large negative values thing is cleaned up. # (see https://github.com/huggingface/transformers/issues/14859) # attention_mask = tf.concat( # [ # tf.zeros_like(attention_mask[:1], dtype=tf.int32), # tf.cast(attention_mask[1:], dtype=tf.int32) # ], # axis=0 # ) inputs_dict[k] = attention_mask
Make sure no sequence has all zeros as attention mask
_make_attention_mask_non_null
python
huggingface/transformers
tests/test_modeling_tf_common.py
https://github.com/huggingface/transformers/blob/master/tests/test_modeling_tf_common.py
Apache-2.0
def _postprocessing_to_ignore_test_cases(self, tf_outputs, pt_outputs, model_class): """For temporarily ignoring some failed test cases (issues to be fixed)""" tf_keys = {k for k, v in tf_outputs.items() if v is not None} pt_keys = {k for k, v in pt_outputs.items() if v is not None} key_differences = tf_keys.symmetric_difference(pt_keys) if model_class.__name__ in [ "TFFlaubertWithLMHeadModel", "TFFunnelForPreTraining", "TFElectraForPreTraining", "TFXLMWithLMHeadModel", ]: for k in key_differences: if k in ["loss", "losses"]: tf_keys.discard(k) pt_keys.discard(k) elif model_class.__name__.startswith("TFGPT2"): # `TFGPT2` has `past_key_values` as a tensor while `GPT2` has it as a tuple. tf_keys.discard("past_key_values") pt_keys.discard("past_key_values") # create new outputs from the remaining fields new_tf_outputs = type(tf_outputs)(**{k: tf_outputs[k] for k in tf_keys}) new_pt_outputs = type(pt_outputs)(**{k: pt_outputs[k] for k in pt_keys}) return new_tf_outputs, new_pt_outputs
For temporarily ignoring some failed test cases (issues to be fixed)
_postprocessing_to_ignore_test_cases
python
huggingface/transformers
tests/test_modeling_tf_common.py
https://github.com/huggingface/transformers/blob/master/tests/test_modeling_tf_common.py
Apache-2.0
def ids_tensor(shape, vocab_size, rng=None, name=None, dtype=None): """Creates a random int32 tensor of the shape within the vocab size.""" if rng is None: rng = random.Random() total_dims = 1 for dim in shape: total_dims *= dim values = [] for _ in range(total_dims): values.append(rng.randint(0, vocab_size - 1)) output = tf.constant(values, shape=shape, dtype=dtype if dtype is not None else tf.int32) return output
Creates a random int32 tensor of the shape within the vocab size.
ids_tensor
python
huggingface/transformers
tests/test_modeling_tf_common.py
https://github.com/huggingface/transformers/blob/master/tests/test_modeling_tf_common.py
Apache-2.0
def run_task_tests(self, task, torch_dtype="float32"): """Run pipeline tests for a specific `task` Args: task (`str`): A task name. This should be a key in the mapping `pipeline_test_mapping`. torch_dtype (`str`, `optional`, defaults to `'float32'`): The torch dtype to use for the model. Can be used for FP16/other precision inference. """ if task not in self.pipeline_model_mapping: self.skipTest( f"{self.__class__.__name__}::test_pipeline_{task.replace('-', '_')}_{torch_dtype} is skipped: `{task}` is not in " f"`self.pipeline_model_mapping` for `{self.__class__.__name__}`." ) model_architectures = self.pipeline_model_mapping[task] if not isinstance(model_architectures, tuple): model_architectures = (model_architectures,) # We are going to run tests for multiple model architectures, some of them might be skipped # with this flag we are control if at least one model were tested or all were skipped at_least_one_model_is_tested = False for model_architecture in model_architectures: model_arch_name = model_architecture.__name__ model_type = model_architecture.config_class.model_type # Get the canonical name for _prefix in ["Flax", "TF"]: if model_arch_name.startswith(_prefix): model_arch_name = model_arch_name[len(_prefix) :] break if model_arch_name not in tiny_model_summary: continue tokenizer_names = tiny_model_summary[model_arch_name]["tokenizer_classes"] # Sort image processors and feature extractors from tiny-models json file image_processor_names = [] feature_extractor_names = [] processor_classes = tiny_model_summary[model_arch_name]["processor_classes"] for cls_name in processor_classes: if "ImageProcessor" in cls_name: image_processor_names.append(cls_name) elif "FeatureExtractor" in cls_name: feature_extractor_names.append(cls_name) # Processor classes are not in tiny models JSON file, so extract them from the mapping # processors are mapped to instance, e.g. "XxxProcessor" processor_names = PROCESSOR_MAPPING_NAMES.get(model_type, None) if not isinstance(processor_names, (list, tuple)): processor_names = [processor_names] commit = None if model_arch_name in tiny_model_summary and "sha" in tiny_model_summary[model_arch_name]: commit = tiny_model_summary[model_arch_name]["sha"] repo_name = f"tiny-random-{model_arch_name}" if TRANSFORMERS_TINY_MODEL_PATH != "hf-internal-testing": repo_name = model_arch_name self.run_model_pipeline_tests( task, repo_name, model_architecture, tokenizer_names=tokenizer_names, image_processor_names=image_processor_names, feature_extractor_names=feature_extractor_names, processor_names=processor_names, commit=commit, torch_dtype=torch_dtype, ) at_least_one_model_is_tested = True if task in task_to_pipeline_and_spec_mapping: pipeline, hub_spec = task_to_pipeline_and_spec_mapping[task] compare_pipeline_args_to_hub_spec(pipeline, hub_spec) if not at_least_one_model_is_tested: self.skipTest( f"{self.__class__.__name__}::test_pipeline_{task.replace('-', '_')}_{torch_dtype} is skipped: Could not find any " f"model architecture in the tiny models JSON file for `{task}`." )
Run pipeline tests for a specific `task` Args: task (`str`): A task name. This should be a key in the mapping `pipeline_test_mapping`. torch_dtype (`str`, `optional`, defaults to `'float32'`): The torch dtype to use for the model. Can be used for FP16/other precision inference.
run_task_tests
python
huggingface/transformers
tests/test_pipeline_mixin.py
https://github.com/huggingface/transformers/blob/master/tests/test_pipeline_mixin.py
Apache-2.0
def is_pipeline_test_to_skip( self, pipeline_test_case_name, config_class, model_architecture, tokenizer_name, image_processor_name, feature_extractor_name, processor_name, ): """Skip some tests based on the classes or their names without the instantiated objects. This is to avoid calling `from_pretrained` (so reducing the runtime) if we already know the tests will fail. """ # No fix is required for this case. if ( pipeline_test_case_name == "DocumentQuestionAnsweringPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith("Fast") ): # `DocumentQuestionAnsweringPipelineTests` requires a fast tokenizer. return True return False
Skip some tests based on the classes or their names without the instantiated objects. This is to avoid calling `from_pretrained` (so reducing the runtime) if we already know the tests will fail.
is_pipeline_test_to_skip
python
huggingface/transformers
tests/test_pipeline_mixin.py
https://github.com/huggingface/transformers/blob/master/tests/test_pipeline_mixin.py
Apache-2.0
def is_pipeline_test_to_skip_more( self, pipeline_test_case_name, config, model, tokenizer, image_processor=None, feature_extractor=None, processor=None, ): # noqa """Skip some more tests based on the information from the instantiated objects.""" # No fix is required for this case. if ( pipeline_test_case_name == "QAPipelineTests" and tokenizer is not None and getattr(tokenizer, "pad_token", None) is None and not tokenizer.__class__.__name__.endswith("Fast") ): # `QAPipelineTests` doesn't work with a slow tokenizer that has no pad token. return True return False
Skip some more tests based on the information from the instantiated objects.
is_pipeline_test_to_skip_more
python
huggingface/transformers
tests/test_pipeline_mixin.py
https://github.com/huggingface/transformers/blob/master/tests/test_pipeline_mixin.py
Apache-2.0
def compare_pipeline_args_to_hub_spec(pipeline_class, hub_spec): """ Compares the docstring of a pipeline class to the fields of the matching Hub input signature class to ensure that they match. This guarantees that Transformers pipelines can be used in inference without needing to manually refactor or rename inputs. """ ALLOWED_TRANSFORMERS_ONLY_ARGS = ["timeout"] docstring = inspect.getdoc(pipeline_class.__call__).strip() docstring_args = set(parse_args_from_docstring_by_indentation(docstring)) hub_args = set(get_arg_names_from_hub_spec(hub_spec)) # Special casing: We allow the name of this arg to differ hub_generate_args = [ hub_arg for hub_arg in hub_args if hub_arg.startswith("generate") or hub_arg.startswith("generation") ] docstring_generate_args = [ docstring_arg for docstring_arg in docstring_args if docstring_arg.startswith("generate") or docstring_arg.startswith("generation") ] if ( len(hub_generate_args) == 1 and len(docstring_generate_args) == 1 and hub_generate_args != docstring_generate_args ): hub_args.remove(hub_generate_args[0]) docstring_args.remove(docstring_generate_args[0]) # Special casing 2: We permit some transformers-only arguments that don't affect pipeline output for arg in ALLOWED_TRANSFORMERS_ONLY_ARGS: if arg in docstring_args and arg not in hub_args: docstring_args.remove(arg) if hub_args != docstring_args: error = [f"{pipeline_class.__name__} differs from JS spec {hub_spec.__name__}"] matching_args = hub_args & docstring_args huggingface_hub_only = hub_args - docstring_args transformers_only = docstring_args - hub_args if matching_args: error.append(f"Matching args: {matching_args}") if huggingface_hub_only: error.append(f"Huggingface Hub only: {huggingface_hub_only}") if transformers_only: error.append(f"Transformers only: {transformers_only}") raise ValueError("\n".join(error))
Compares the docstring of a pipeline class to the fields of the matching Hub input signature class to ensure that they match. This guarantees that Transformers pipelines can be used in inference without needing to manually refactor or rename inputs.
compare_pipeline_args_to_hub_spec
python
huggingface/transformers
tests/test_pipeline_mixin.py
https://github.com/huggingface/transformers/blob/master/tests/test_pipeline_mixin.py
Apache-2.0
def prepare_image_inputs(): """This function prepares a list of PIL images""" image_inputs = [np.random.randint(255, size=(3, 30, 400), dtype=np.uint8)] image_inputs = [Image.fromarray(np.moveaxis(x, 0, -1)) for x in image_inputs] return image_inputs
This function prepares a list of PIL images
prepare_image_inputs
python
huggingface/transformers
tests/test_processing_common.py
https://github.com/huggingface/transformers/blob/master/tests/test_processing_common.py
Apache-2.0
def prepare_image_inputs(self, batch_size: Optional[int] = None): """This function prepares a list of PIL images for testing""" if batch_size is None: return prepare_image_inputs()[0] if batch_size < 1: raise ValueError("batch_size must be greater than 0") return prepare_image_inputs() * batch_size
This function prepares a list of PIL images for testing
prepare_image_inputs
python
huggingface/transformers
tests/test_processing_common.py
https://github.com/huggingface/transformers/blob/master/tests/test_processing_common.py
Apache-2.0
def prepare_video_inputs(self, batch_size: Optional[int] = None): """This function prepares a list of numpy videos.""" video_input = [np.random.randint(255, size=(3, 30, 400), dtype=np.uint8)] * 8 if batch_size is None: return video_input return [video_input] * batch_size
This function prepares a list of numpy videos.
prepare_video_inputs
python
huggingface/transformers
tests/test_processing_common.py
https://github.com/huggingface/transformers/blob/master/tests/test_processing_common.py
Apache-2.0
def test_image_processor_defaults_preserved_by_image_kwargs(self): """ We use do_rescale=True, rescale_factor=-1 to ensure that image_processor kwargs are preserved in the processor. We then check that the mean of the pixel_values is less than or equal to 0 after processing. Since the original pixel_values are in [0, 255], this is a good indicator that the rescale_factor is indeed applied. """ if "image_processor" not in self.processor_class.attributes: self.skipTest(f"image_processor attribute not present in {self.processor_class}") processor_components = self.prepare_components() processor_components["image_processor"] = self.get_component( "image_processor", do_rescale=True, rescale_factor=-1 ) processor_components["tokenizer"] = self.get_component("tokenizer", max_length=117, padding="max_length") processor_kwargs = self.prepare_processor_dict() processor = self.processor_class(**processor_components, **processor_kwargs) self.skip_processor_without_typed_kwargs(processor) input_str = self.prepare_text_inputs(modality="image") image_input = self.prepare_image_inputs() inputs = processor(text=input_str, images=image_input, return_tensors="pt") self.assertLessEqual(inputs[self.images_input_name][0][0].mean(), 0)
We use do_rescale=True, rescale_factor=-1 to ensure that image_processor kwargs are preserved in the processor. We then check that the mean of the pixel_values is less than or equal to 0 after processing. Since the original pixel_values are in [0, 255], this is a good indicator that the rescale_factor is indeed applied.
test_image_processor_defaults_preserved_by_image_kwargs
python
huggingface/transformers
tests/test_processing_common.py
https://github.com/huggingface/transformers/blob/master/tests/test_processing_common.py
Apache-2.0
def test_video_processor_defaults_preserved_by_video_kwargs(self): """ We use do_rescale=True, rescale_factor=-1 to ensure that image_processor kwargs are preserved in the processor. We then check that the mean of the pixel_values is less than or equal to 0 after processing. Since the original pixel_values are in [0, 255], this is a good indicator that the rescale_factor is indeed applied. """ if "video_processor" not in self.processor_class.attributes: self.skipTest(f"video_processor attribute not present in {self.processor_class}") processor_components = self.prepare_components() processor_components["video_processor"] = self.get_component( "video_processor", do_rescale=True, rescale_factor=-1 ) processor_components["tokenizer"] = self.get_component("tokenizer", max_length=117, padding="max_length") processor_kwargs = self.prepare_processor_dict() processor = self.processor_class(**processor_components, **processor_kwargs) self.skip_processor_without_typed_kwargs(processor) input_str = self.prepare_text_inputs(modality="video") video_input = self.prepare_video_inputs() inputs = processor(text=input_str, videos=video_input, return_tensors="pt") self.assertLessEqual(inputs[self.videos_input_name][0].mean(), 0)
We use do_rescale=True, rescale_factor=-1 to ensure that image_processor kwargs are preserved in the processor. We then check that the mean of the pixel_values is less than or equal to 0 after processing. Since the original pixel_values are in [0, 255], this is a good indicator that the rescale_factor is indeed applied.
test_video_processor_defaults_preserved_by_video_kwargs
python
huggingface/transformers
tests/test_processing_common.py
https://github.com/huggingface/transformers/blob/master/tests/test_processing_common.py
Apache-2.0
def test_overlapping_text_audio_kwargs_handling(self): """ Checks that `padding`, or any other overlap arg between audio extractor and tokenizer is be passed to only text and ignored for audio for BC purposes """ if "feature_extractor" not in self.processor_class.attributes: self.skipTest(f"feature_extractor attribute not present in {self.processor_class}") processor_components = self.prepare_components() processor_kwargs = self.prepare_processor_dict() processor = self.processor_class(**processor_components, **processor_kwargs) self.skip_processor_without_typed_kwargs(processor) input_str = self.prepare_text_inputs(batch_size=3, modality="audio") audio_lengths = [4000, 8000, 16000, 32000] raw_speech = [np.asarray(audio)[:length] for audio, length in zip(floats_list((3, 32_000)), audio_lengths)] # padding = True should not raise an error and will if the audio processor popped its value to None _ = processor(text=input_str, audio=raw_speech, padding=True, return_tensors="pt")
Checks that `padding`, or any other overlap arg between audio extractor and tokenizer is be passed to only text and ignored for audio for BC purposes
test_overlapping_text_audio_kwargs_handling
python
huggingface/transformers
tests/test_processing_common.py
https://github.com/huggingface/transformers/blob/master/tests/test_processing_common.py
Apache-2.0
def test_apply_chat_template_video_special_processing(self): """ Tests that models can use their own preprocessing to preprocess conversations. """ processor = self.get_processor() if processor.chat_template is None: self.skipTest("Processor has no chat template") signature = inspect.signature(processor.__call__) if "videos" not in {*signature.parameters.keys()} or ( signature.parameters.get("videos") is not None and signature.parameters["videos"].annotation == inspect._empty ): self.skipTest("Processor doesn't accept videos at input") video_file_path = hf_hub_download( repo_id="raushan-testing-hf/videos-test", filename="sample_demo_1.mp4", repo_type="dataset" ) messages = [ [ { "role": "user", "content": [ {"type": "video", "path": video_file_path}, {"type": "text", "text": "What is shown in this video?"}, ], }, ] ] def _process_messages_for_chat_template( conversation, batch_images, batch_videos, batch_video_metadata, **chat_template_kwargs, ): # Let us just always return a dummy prompt new_msg = [ [ { "role": "user", "content": [ {"type": "video"}, # no need to use path, video is loaded already by this moment {"type": "text", "text": "Dummy prompt for preprocess testing"}, ], }, ] ] return new_msg processor._process_messages_for_chat_template = _process_messages_for_chat_template out_dict_with_video = processor.apply_chat_template( messages, add_generation_prompt=True, tokenize=True, return_dict=True, return_tensors="pt", ) self.assertTrue(self.videos_input_name in out_dict_with_video) # Check with `in` because we don't know how each template formats the prompt with BOS/EOS/etc formatted_text = processor.batch_decode(out_dict_with_video["input_ids"], skip_special_tokens=True)[0] self.assertTrue("Dummy prompt for preprocess testing" in formatted_text) self.assertEqual(len(out_dict_with_video[self.videos_input_name]), 1) self.assertEqual(len(out_dict_with_video[self.videos_input_name][0]), 243)
Tests that models can use their own preprocessing to preprocess conversations.
test_apply_chat_template_video_special_processing
python
huggingface/transformers
tests/test_processing_common.py
https://github.com/huggingface/transformers/blob/master/tests/test_processing_common.py
Apache-2.0
def check_subword_sampling( tokenizer: PreTrainedTokenizer, text: Optional[str] = None, test_sentencepiece_ignore_case: bool = True, ) -> None: """ Check if the tokenizer generates different results when subword regularization is enabled. Subword regularization augments training data with subword sampling. This has a random component. Args: tokenizer: The tokenizer to check. text: The text to use for the checks. test_sentencepiece_ignore_case: See `TokenizerTesterMixin.test_sentencepiece_ignore_case`. """ text = "This is a test for subword regularization." if text is None else text if test_sentencepiece_ignore_case: text = text.lower() tokens_list = [] for _ in range(5): tokens_list.append(tokenizer.tokenize(text)) # the list of different pairs of tokens_list combinations = itertools.combinations(tokens_list, 2) # check of sampling is done subword_sampling_found = False for combination in combinations: if combination[0] != combination[1]: subword_sampling_found = True unittest.TestCase().assertTrue(subword_sampling_found) # check if converting back to original text works for tokens in tokens_list: if test_sentencepiece_ignore_case: unittest.TestCase().assertEqual(text, tokenizer.convert_tokens_to_string(tokens).lower()) else: unittest.TestCase().assertEqual(text, tokenizer.convert_tokens_to_string(tokens))
Check if the tokenizer generates different results when subword regularization is enabled. Subword regularization augments training data with subword sampling. This has a random component. Args: tokenizer: The tokenizer to check. text: The text to use for the checks. test_sentencepiece_ignore_case: See `TokenizerTesterMixin.test_sentencepiece_ignore_case`.
check_subword_sampling
python
huggingface/transformers
tests/test_tokenization_common.py
https://github.com/huggingface/transformers/blob/master/tests/test_tokenization_common.py
Apache-2.0
def tokenizer_integration_test_util( self, expected_encoding: dict, model_name: str, revision: Optional[str] = None, sequences: Optional[list[str]] = None, decode_kwargs: Optional[dict[str, Any]] = None, padding: bool = True, ): """ Util for integration test. Text is tokenized and then reverted back to text. Both results are then checked. Args: expected_encoding: The expected result of the tokenizer output. model_name: The model name of the tokenizer to load and use. revision: The full git revision number of the model. This is to pin the tokenizer config and to avoid that tests start to fail if the config gets changed upstream. sequences: Can overwrite the texts that are used to check the tokenizer. This is useful if the tokenizer supports non english languages like france. decode_kwargs: Additional args for the ``decode`` function which reverts the tokenized text back to a string. padding: Activates and controls padding of the tokenizer. """ decode_kwargs = {} if decode_kwargs is None else decode_kwargs if sequences is None: sequences = [ "Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides " "general-purpose architectures (BERT, GPT-2, RoBERTa, XLM, DistilBert, XLNet...) for Natural " "Language Understanding (NLU) and Natural Language Generation (NLG) with over 32+ pretrained " "models in 100+ languages and deep interoperability between Jax, PyTorch and TensorFlow.", "BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly " "conditioning on both left and right context in all layers.", "The quick brown fox jumps over the lazy dog.", ] if self.test_sentencepiece_ignore_case: sequences = [sequence.lower() for sequence in sequences] tokenizer_classes = [self.tokenizer_class] if self.test_rust_tokenizer: tokenizer_classes.append(self.rust_tokenizer_class) for tokenizer_class in tokenizer_classes: tokenizer = tokenizer_class.from_pretrained( model_name, revision=revision, # to pin the tokenizer version ) encoding = tokenizer(sequences, padding=padding) decoded_sequences = [ tokenizer.decode(seq, skip_special_tokens=True, **decode_kwargs) for seq in encoding["input_ids"] ] encoding_data = encoding.data self.assertDictEqual(encoding_data, expected_encoding) for expected, decoded in zip(sequences, decoded_sequences): if self.test_sentencepiece_ignore_case: expected = expected.lower() self.assertEqual(expected, decoded)
Util for integration test. Text is tokenized and then reverted back to text. Both results are then checked. Args: expected_encoding: The expected result of the tokenizer output. model_name: The model name of the tokenizer to load and use. revision: The full git revision number of the model. This is to pin the tokenizer config and to avoid that tests start to fail if the config gets changed upstream. sequences: Can overwrite the texts that are used to check the tokenizer. This is useful if the tokenizer supports non english languages like france. decode_kwargs: Additional args for the ``decode`` function which reverts the tokenized text back to a string. padding: Activates and controls padding of the tokenizer.
tokenizer_integration_test_util
python
huggingface/transformers
tests/test_tokenization_common.py
https://github.com/huggingface/transformers/blob/master/tests/test_tokenization_common.py
Apache-2.0
def test_pickle_tokenizer(self): """Google pickle __getstate__ __setstate__ if you are struggling with this.""" tokenizers = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): self.assertIsNotNone(tokenizer) text = "Munich and Berlin are nice cities" subwords = tokenizer.tokenize(text) filename = os.path.join(self.tmpdirname, "tokenizer.bin") with open(filename, "wb") as handle: pickle.dump(tokenizer, handle) with open(filename, "rb") as handle: tokenizer_new = pickle.load(handle) subwords_loaded = tokenizer_new.tokenize(text) self.assertListEqual(subwords, subwords_loaded)
Google pickle __getstate__ __setstate__ if you are struggling with this.
test_pickle_tokenizer
python
huggingface/transformers
tests/test_tokenization_common.py
https://github.com/huggingface/transformers/blob/master/tests/test_tokenization_common.py
Apache-2.0
def test_continue_final_message_with_trim(self): """Regression test for chat templates with trimming: https://github.com/huggingface/transformers/pull/34214""" dummy_template = """ {%- for message in messages %} {{- "<|im_start|>" + message['role'] + "\n" + message['content'] | trim + "<|im_end|>" + "\n"}} {%- endfor %}""" dummy_conversation = [ {"role": "system", "content": "system message"}, {"role": "user", "content": "user message"}, {"role": "assistant", "content": "assistant message "}, # Note the trailing whitespace ] tokenizers = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): output = tokenizer.apply_chat_template( dummy_conversation, chat_template=dummy_template, tokenize=False, continue_final_message=False ) self.assertEqual( output, "<|im_start|>system\nsystem message<|im_end|>\n<|im_start|>user\nuser message<|im_end|>\n<|im_start|>assistant\nassistant message<|im_end|>\n", ) prefill_output = tokenizer.apply_chat_template( dummy_conversation, chat_template=dummy_template, tokenize=False, continue_final_message=True ) # Assert that the final message is unterminated self.assertEqual( prefill_output, "<|im_start|>system\nsystem message<|im_end|>\n<|im_start|>user\nuser message<|im_end|>\n<|im_start|>assistant\nassistant message", )
Regression test for chat templates with trimming: https://github.com/huggingface/transformers/pull/34214
test_continue_final_message_with_trim
python
huggingface/transformers
tests/test_tokenization_common.py
https://github.com/huggingface/transformers/blob/master/tests/test_tokenization_common.py
Apache-2.0
def test_continue_final_message_with_decoy_earlier_message(self): """Regression test for chat templates where an earlier message has similar content to the final message https://github.com/huggingface/transformers/issues/35433""" dummy_template = """ {%- for message in messages %} {{- "<|im_start|>" + message['role'] + "\n" + message['content'] | trim + "<|im_end|>" + "\n"}} {%- endfor %}""" dummy_conversation = [ {"role": "user", "content": "hi 0"}, {"role": "assistant", "content": "bye: 0"}, {"role": "user", "content": "hi 1"}, {"role": "assistant", "content": "bye: "}, ] tokenizers = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): prefill_output = tokenizer.apply_chat_template( dummy_conversation, chat_template=dummy_template, tokenize=False, continue_final_message=True ) # Assert that the final message is unterminated self.assertEqual( prefill_output, "<|im_start|>user\nhi 0<|im_end|>\n<|im_start|>assistant\nbye: 0<|im_end|>\n<|im_start|>user\nhi 1<|im_end|>\n<|im_start|>assistant\nbye:", )
Regression test for chat templates where an earlier message has similar content to the final message https://github.com/huggingface/transformers/issues/35433
test_continue_final_message_with_decoy_earlier_message
python
huggingface/transformers
tests/test_tokenization_common.py
https://github.com/huggingface/transformers/blob/master/tests/test_tokenization_common.py
Apache-2.0
def test_encode_plus_with_padding(self, use_padding_as_call_kwarg: bool): """ This test checks that padding works as expected when tokenizing a sequence. Padding is expected to have no effect when the input is a single sequence and the padding-strategy is not `max_length`. Otherwise it pads to the specified max-length using tokenizer classes `padding_side` attribute. Also, we check that passing `padding_side` as call time kwarg works same way as when one sets `tokenizer.padding_side` attribute. """ tokenizers = self.get_tokenizers(do_lower_case=False) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): sequence = "Sequence" # check correct behaviour if no pad_token_id exists and add it eventually self._check_no_pad_token_padding(tokenizer, sequence) padding_size = 10 padding_idx = tokenizer.pad_token_id token_type_padding_idx = tokenizer.pad_token_type_id encoded_sequence = tokenizer.encode_plus(sequence, return_special_tokens_mask=True) input_ids = encoded_sequence["input_ids"] special_tokens_mask = encoded_sequence["special_tokens_mask"] sequence_length = len(input_ids) # Test 'longest' and 'no_padding' don't do anything not_padded_sequence = tokenizer.encode_plus( sequence, padding=True, return_special_tokens_mask=True, ) not_padded_input_ids = not_padded_sequence["input_ids"] not_padded_special_tokens_mask = not_padded_sequence["special_tokens_mask"] not_padded_sequence_length = len(not_padded_input_ids) self.assertEqual(sequence_length, not_padded_sequence_length) self.assertEqual(input_ids, not_padded_input_ids) self.assertEqual(special_tokens_mask, not_padded_special_tokens_mask) not_padded_sequence = tokenizer.encode_plus( sequence, padding=False, return_special_tokens_mask=True, ) not_padded_input_ids = not_padded_sequence["input_ids"] not_padded_special_tokens_mask = not_padded_sequence["special_tokens_mask"] not_padded_sequence_length = len(not_padded_input_ids) self.assertEqual(sequence_length, not_padded_sequence_length) self.assertEqual(input_ids, not_padded_input_ids) self.assertEqual(special_tokens_mask, not_padded_special_tokens_mask) # Test right padding tokenizer_kwargs_right = { "max_length": sequence_length + padding_size, "padding": "max_length", "return_special_tokens_mask": True, } if not use_padding_as_call_kwarg: tokenizer.padding_side = "right" else: tokenizer_kwargs_right["padding_side"] = "right" right_padded_sequence = tokenizer.encode_plus(sequence, **tokenizer_kwargs_right) right_padded_input_ids = right_padded_sequence["input_ids"] right_padded_special_tokens_mask = right_padded_sequence["special_tokens_mask"] right_padded_sequence_length = len(right_padded_input_ids) self.assertEqual(sequence_length + padding_size, right_padded_sequence_length) self.assertEqual(input_ids + [padding_idx] * padding_size, right_padded_input_ids) self.assertEqual(special_tokens_mask + [1] * padding_size, right_padded_special_tokens_mask) # Test left padding tokenizer_kwargs_left = { "max_length": sequence_length + padding_size, "padding": "max_length", "return_special_tokens_mask": True, } if not use_padding_as_call_kwarg: tokenizer.padding_side = "left" else: tokenizer_kwargs_left["padding_side"] = "left" left_padded_sequence = tokenizer.encode_plus(sequence, **tokenizer_kwargs_left) left_padded_input_ids = left_padded_sequence["input_ids"] left_padded_special_tokens_mask = left_padded_sequence["special_tokens_mask"] left_padded_sequence_length = len(left_padded_input_ids) self.assertEqual(sequence_length + padding_size, left_padded_sequence_length) self.assertEqual([padding_idx] * padding_size + input_ids, left_padded_input_ids) self.assertEqual([1] * padding_size + special_tokens_mask, left_padded_special_tokens_mask) if "token_type_ids" in tokenizer.model_input_names: token_type_ids = encoded_sequence["token_type_ids"] left_padded_token_type_ids = left_padded_sequence["token_type_ids"] right_padded_token_type_ids = right_padded_sequence["token_type_ids"] self.assertEqual( token_type_ids + [token_type_padding_idx] * padding_size, right_padded_token_type_ids ) self.assertEqual( [token_type_padding_idx] * padding_size + token_type_ids, left_padded_token_type_ids ) if "attention_mask" in tokenizer.model_input_names: attention_mask = encoded_sequence["attention_mask"] right_padded_attention_mask = right_padded_sequence["attention_mask"] left_padded_attention_mask = left_padded_sequence["attention_mask"] self.assertEqual(attention_mask + [0] * padding_size, right_padded_attention_mask) self.assertEqual([0] * padding_size + attention_mask, left_padded_attention_mask)
This test checks that padding works as expected when tokenizing a sequence. Padding is expected to have no effect when the input is a single sequence and the padding-strategy is not `max_length`. Otherwise it pads to the specified max-length using tokenizer classes `padding_side` attribute. Also, we check that passing `padding_side` as call time kwarg works same way as when one sets `tokenizer.padding_side` attribute.
test_encode_plus_with_padding
python
huggingface/transformers
tests/test_tokenization_common.py
https://github.com/huggingface/transformers/blob/master/tests/test_tokenization_common.py
Apache-2.0
def test_batch_encode_dynamic_overflowing(self): """ When calling batch_encode with multiple sequence it can returns different number of overflowing encoding for each sequence: [ Sequence 1: [Encoding 1, Encoding 2], Sequence 2: [Encoding 1], Sequence 3: [Encoding 1, Encoding 2, ... Encoding N] ] This needs to be padded so that it can represented as a tensor """ for tokenizer, pretrained_name, kwargs in self.tokenizers_list: tokenizer = self.get_rust_tokenizer(pretrained_name, **kwargs) with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name}, {tokenizer.__class__.__name__})"): if is_torch_available(): returned_tensor = "pt" elif is_tf_available(): returned_tensor = "tf" elif is_flax_available(): returned_tensor = "jax" else: self.skipTest(reason="No expected framework from PT, TF or JAX found") if not tokenizer.pad_token or tokenizer.pad_token_id < 0: self.skipTest(reason="This tokenizer has no padding token set, or pad_token_id < 0") tokens = tokenizer.encode_plus( "HuggingFace is solving NLP one commit at a time", max_length=6, padding=True, truncation=True, return_tensors=returned_tensor, return_overflowing_tokens=True, ) for key in filter(lambda x: "overflow_to_sample_mapping" not in x, tokens.keys()): self.assertEqual(len(tokens[key].shape), 2) # Mono sample tokens = tokenizer.batch_encode_plus( ["HuggingFace is solving NLP one commit at a time"], max_length=6, padding=True, truncation="only_first", return_tensors=returned_tensor, return_overflowing_tokens=True, ) for key in filter(lambda x: "overflow_to_sample_mapping" not in x, tokens.keys()): self.assertEqual(len(tokens[key].shape), 2) self.assertEqual(tokens[key].shape[-1], 6) # Multi sample tokens = tokenizer.batch_encode_plus( ["HuggingFace is solving NLP one commit at a time", "Very tiny input"], max_length=6, padding=True, truncation="only_first", return_tensors=returned_tensor, return_overflowing_tokens=True, ) for key in filter(lambda x: "overflow_to_sample_mapping" not in x, tokens.keys()): self.assertEqual(len(tokens[key].shape), 2) self.assertEqual(tokens[key].shape[-1], 6)
When calling batch_encode with multiple sequence it can returns different number of overflowing encoding for each sequence: [ Sequence 1: [Encoding 1, Encoding 2], Sequence 2: [Encoding 1], Sequence 3: [Encoding 1, Encoding 2, ... Encoding N] ] This needs to be padded so that it can represented as a tensor
test_batch_encode_dynamic_overflowing
python
huggingface/transformers
tests/test_tokenization_common.py
https://github.com/huggingface/transformers/blob/master/tests/test_tokenization_common.py
Apache-2.0
def test_custom_output_dir(self): """Test that output_dir is respected when specified.""" with tempfile.TemporaryDirectory() as tmp_dir: args = TrainingArguments(output_dir=tmp_dir) self.assertEqual(args.output_dir, tmp_dir)
Test that output_dir is respected when specified.
test_custom_output_dir
python
huggingface/transformers
tests/test_training_args.py
https://github.com/huggingface/transformers/blob/master/tests/test_training_args.py
Apache-2.0
def test_output_dir_creation(self): """Test that output_dir is created only when needed.""" with tempfile.TemporaryDirectory() as tmp_dir: output_dir = os.path.join(tmp_dir, "test_output") # Directory should not exist before creating args self.assertFalse(os.path.exists(output_dir)) # Create args with save_strategy="no" - should not create directory args = TrainingArguments( output_dir=output_dir, do_train=True, save_strategy="no", report_to=None, ) self.assertFalse(os.path.exists(output_dir)) # Now set save_strategy="steps" - should create directory when needed args.save_strategy = "steps" args.save_steps = 1 self.assertFalse(os.path.exists(output_dir)) # Still shouldn't exist # Directory should be created when actually needed (e.g. in Trainer)
Test that output_dir is created only when needed.
test_output_dir_creation
python
huggingface/transformers
tests/test_training_args.py
https://github.com/huggingface/transformers/blob/master/tests/test_training_args.py
Apache-2.0
def test_torch_empty_cache_steps_requirements(self): """Test that torch_empty_cache_steps is a positive integer or None.""" # None is acceptable (feature is disabled): args = TrainingArguments(torch_empty_cache_steps=None) self.assertIsNone(args.torch_empty_cache_steps) # non-int is unacceptable: with self.assertRaises(ValueError): TrainingArguments(torch_empty_cache_steps=1.0) with self.assertRaises(ValueError): TrainingArguments(torch_empty_cache_steps="none") # negative int is unacceptable: with self.assertRaises(ValueError): TrainingArguments(torch_empty_cache_steps=-1) # zero is unacceptable: with self.assertRaises(ValueError): TrainingArguments(torch_empty_cache_steps=0) # positive int is acceptable: args = TrainingArguments(torch_empty_cache_steps=1) self.assertEqual(args.torch_empty_cache_steps, 1)
Test that torch_empty_cache_steps is a positive integer or None.
test_torch_empty_cache_steps_requirements
python
huggingface/transformers
tests/test_training_args.py
https://github.com/huggingface/transformers/blob/master/tests/test_training_args.py
Apache-2.0
def prepare_video(num_frames, num_channels, width=10, height=10, return_tensors="pil"): """This function prepares a video as a list of PIL images/NumPy arrays/PyTorch tensors.""" video = [] for i in range(num_frames): video.append(np.random.randint(255, size=(width, height, num_channels), dtype=np.uint8)) if return_tensors == "pil": # PIL expects the channel dimension as last dimension video = [Image.fromarray(frame) for frame in video] elif return_tensors == "torch": # Torch images are typically in channels first format video = torch.tensor(video).permute(0, 3, 1, 2) elif return_tensors == "np": # Numpy images are typically in channels last format video = np.array(video) return video
This function prepares a video as a list of PIL images/NumPy arrays/PyTorch tensors.
prepare_video
python
huggingface/transformers
tests/test_video_processing_common.py
https://github.com/huggingface/transformers/blob/master/tests/test_video_processing_common.py
Apache-2.0
def prepare_video_inputs( batch_size, num_frames, num_channels, min_resolution, max_resolution, equal_resolution=False, return_tensors="pil", ): """This function prepares a batch of videos: a list of list of PIL images, or a list of list of numpy arrays if one specifies return_tensors="np", or a list of list of PyTorch tensors if one specifies return_tensors="torch". One can specify whether the videos are of the same resolution or not. """ video_inputs = [] for i in range(batch_size): if equal_resolution: width = height = max_resolution else: width, height = np.random.choice(np.arange(min_resolution, max_resolution), 2) video = prepare_video( num_frames=num_frames, num_channels=num_channels, width=width, height=height, return_tensors=return_tensors, ) video_inputs.append(video) return video_inputs
This function prepares a batch of videos: a list of list of PIL images, or a list of list of numpy arrays if one specifies return_tensors="np", or a list of list of PyTorch tensors if one specifies return_tensors="torch". One can specify whether the videos are of the same resolution or not.
prepare_video_inputs
python
huggingface/transformers
tests/test_video_processing_common.py
https://github.com/huggingface/transformers/blob/master/tests/test_video_processing_common.py
Apache-2.0
def test_nested_input(self): """Tests that the processor can work with nested list where each video is a list of arrays""" for video_processing_class in self.video_processor_list: video_processing = video_processing_class(**self.video_processor_dict) video_inputs = self.video_processor_tester.prepare_video_inputs( equal_resolution=False, return_tensors="np" ) # Test not batched input video_inputs = [list(video) for video in video_inputs] encoded_videos = video_processing(video_inputs[0], return_tensors="pt")[self.input_name] expected_output_video_shape = self.video_processor_tester.expected_output_video_shape([video_inputs[0]]) self.assertEqual(tuple(encoded_videos.shape), (1, *expected_output_video_shape)) # Test batched expected_output_video_shape = self.video_processor_tester.expected_output_video_shape(video_inputs) encoded_videos = video_processing(video_inputs, return_tensors="pt")[self.input_name] self.assertEqual( tuple(encoded_videos.shape), (self.video_processor_tester.batch_size, *expected_output_video_shape), )
Tests that the processor can work with nested list where each video is a list of arrays
test_nested_input
python
huggingface/transformers
tests/test_video_processing_common.py
https://github.com/huggingface/transformers/blob/master/tests/test_video_processing_common.py
Apache-2.0
def test_transform_and_reverse(self): r""" Classic tests to simply check if the conversion has been successful. """ model_id = "hf-internal-testing/tiny-random-t5" tokenizer = AutoTokenizer.from_pretrained(model_id) model = AutoModelForSeq2SeqLM.from_pretrained(model_id) inp = tokenizer("This is me", return_tensors="pt") model = model.to_bettertransformer() self.assertTrue(any("BetterTransformer" in mod.__class__.__name__ for _, mod in model.named_modules())) output = model.generate(**inp) model = model.reverse_bettertransformer() self.assertFalse(any("BetterTransformer" in mod.__class__.__name__ for _, mod in model.named_modules())) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model_reloaded = AutoModelForSeq2SeqLM.from_pretrained(tmpdirname) self.assertFalse( any("BetterTransformer" in mod.__class__.__name__ for _, mod in model_reloaded.named_modules()) ) output_from_pretrained = model_reloaded.generate(**inp) torch.testing.assert_close(output, output_from_pretrained)
Classic tests to simply check if the conversion has been successful.
test_transform_and_reverse
python
huggingface/transformers
tests/bettertransformer/test_integration.py
https://github.com/huggingface/transformers/blob/master/tests/bettertransformer/test_integration.py
Apache-2.0
def test_error_save_pretrained(self): r""" The save_pretrained method should raise a ValueError if the model is in BetterTransformer mode. All should be good if the model is reversed. """ model_id = "hf-internal-testing/tiny-random-t5" model = AutoModelForSeq2SeqLM.from_pretrained(model_id) model = model.to_bettertransformer() with tempfile.TemporaryDirectory() as tmpdirname: with self.assertRaises(ValueError): model.save_pretrained(tmpdirname) model = model.reverse_bettertransformer() model.save_pretrained(tmpdirname)
The save_pretrained method should raise a ValueError if the model is in BetterTransformer mode. All should be good if the model is reversed.
test_error_save_pretrained
python
huggingface/transformers
tests/bettertransformer/test_integration.py
https://github.com/huggingface/transformers/blob/master/tests/bettertransformer/test_integration.py
Apache-2.0
def get_master_port(real_launcher=False): """ When using a single gpu launcher emulation (i.e. not deepspeed or python -m torch.distributed) the issue is that once the port is tied it can't be used anywhere else outside of this process, since torch.dist doesn't free the port until the process exits. Therefore for the sake of being able to run both emulated launcher and normal launcher tests we need 2 distinct ports. This function will give the right port in the right context. For real launcher it'll give the base port, for emulated launcher it'll give the base port + 1. In both cases a string is returned. Args: `real_launcher`: whether a real launcher is going to be used, or the emulated one """ master_port_base = os.environ.get("DS_TEST_PORT", DEFAULT_MASTER_PORT) if not real_launcher: master_port_base = str(int(master_port_base) + 1) return master_port_base
When using a single gpu launcher emulation (i.e. not deepspeed or python -m torch.distributed) the issue is that once the port is tied it can't be used anywhere else outside of this process, since torch.dist doesn't free the port until the process exits. Therefore for the sake of being able to run both emulated launcher and normal launcher tests we need 2 distinct ports. This function will give the right port in the right context. For real launcher it'll give the base port, for emulated launcher it'll give the base port + 1. In both cases a string is returned. Args: `real_launcher`: whether a real launcher is going to be used, or the emulated one
get_master_port
python
huggingface/transformers
tests/deepspeed/test_deepspeed.py
https://github.com/huggingface/transformers/blob/master/tests/deepspeed/test_deepspeed.py
Apache-2.0
def require_deepspeed_aio(test_case): """ Decorator marking a test that requires deepspeed aio (nvme) """ if not is_deepspeed_available(): return unittest.skip(reason="test requires deepspeed")(test_case) import deepspeed from deepspeed.ops.aio import AsyncIOBuilder if not deepspeed.ops.__compatible_ops__[AsyncIOBuilder.NAME]: return unittest.skip(reason="test requires deepspeed async-io")(test_case) else: return test_case
Decorator marking a test that requires deepspeed aio (nvme)
require_deepspeed_aio
python
huggingface/transformers
tests/deepspeed/test_deepspeed.py
https://github.com/huggingface/transformers/blob/master/tests/deepspeed/test_deepspeed.py
Apache-2.0
def get_master_port(real_launcher=False): """ When using a single gpu launcher emulation (i.e. not deepspeed or python -m torch.distributed) the issue is that once the port is tied it can't be used anywhere else outside of this process, since torch.dist doesn't free the port until the process exits. Therefore for the sake of being able to run both emulated launcher and normal launcher tests we need 2 distinct ports. This function will give the right port in the right context. For real launcher it'll give the base port, for emulated launcher it'll give the base port + 1. In both cases a string is returned. Args: `real_launcher`: whether a real launcher is going to be used, or the emulated one """ master_port_base = os.environ.get("DS_TEST_PORT", DEFAULT_MASTER_PORT) if not real_launcher: master_port_base = str(int(master_port_base) + 1) return master_port_base
When using a single gpu launcher emulation (i.e. not deepspeed or python -m torch.distributed) the issue is that once the port is tied it can't be used anywhere else outside of this process, since torch.dist doesn't free the port until the process exits. Therefore for the sake of being able to run both emulated launcher and normal launcher tests we need 2 distinct ports. This function will give the right port in the right context. For real launcher it'll give the base port, for emulated launcher it'll give the base port + 1. In both cases a string is returned. Args: `real_launcher`: whether a real launcher is going to be used, or the emulated one
get_master_port
python
huggingface/transformers
tests/fsdp/test_fsdp.py
https://github.com/huggingface/transformers/blob/master/tests/fsdp/test_fsdp.py
Apache-2.0
def test_get_assistant_to_target_input_ids(self): """Test the mapping from assistant tokens to target tokens.""" expected_mapping = [0, 1, 2, self.translator.SUPPRESS_TOKEN_ID, self.translator.SUPPRESS_TOKEN_ID] actual_mapping = self.translator._assistant_to_target_input_ids.tolist() self.assertEqual(actual_mapping, expected_mapping)
Test the mapping from assistant tokens to target tokens.
test_get_assistant_to_target_input_ids
python
huggingface/transformers
tests/generation/test_candidate_generator.py
https://github.com/huggingface/transformers/blob/master/tests/generation/test_candidate_generator.py
Apache-2.0
def test_get_suppress_input_ids(self): """Test the suppression of assistant input IDs not present in the target vocabulary.""" expected_suppress_ids = [3, 4] actual_suppress_ids = self.translator._get_suppress_input_ids().tolist() self.assertEqual(actual_suppress_ids, expected_suppress_ids)
Test the suppression of assistant input IDs not present in the target vocabulary.
test_get_suppress_input_ids
python
huggingface/transformers
tests/generation/test_candidate_generator.py
https://github.com/huggingface/transformers/blob/master/tests/generation/test_candidate_generator.py
Apache-2.0
def test_get_target_ids(self): """Test the translation of assistant candidate IDs to target candidate IDs.""" assistant_input_ids = torch.LongTensor([[0, 1, 2]]).to( self.assistant_model.device ) # 'hello world foo' in assistant tokenizer target_input_ids = torch.LongTensor([[0, 1, 2]]).to( self.assistant_model.device ) # 'hello world foo' in target tokenizer assistant_candidate_ids = torch.LongTensor([[0, 1, 2, 4]]).to( self.assistant_model.device ) # 'hello world foo baz' in assistant tokenizer expected_target_ids = torch.LongTensor( [[0, 1, 2, self.translator.SUPPRESS_TOKEN_ID]] ).to( self.assistant_model.device ) # 'hello world foo baz' in target tokenizer (baz is mapped to self.translator.suppress_tokens_id since it does not exist in target vocab) actual_target_ids = self.translator.get_target_ids( assistant_input_ids, target_input_ids, assistant_candidate_ids ) self.assertTrue(torch.equal(actual_target_ids, expected_target_ids))
Test the translation of assistant candidate IDs to target candidate IDs.
test_get_target_ids
python
huggingface/transformers
tests/generation/test_candidate_generator.py
https://github.com/huggingface/transformers/blob/master/tests/generation/test_candidate_generator.py
Apache-2.0
def test_get_target_logits(self): """Test the conversion of assistant logits to target logits.""" # Assistant logits for IDs 0, 1, 2 assistant_logits = torch.FloatTensor([[[0.1, 0.2, 0.3, 0.4, self.translator.FILTER_VALUE]]]).to( self.assistant_model.device ) # Shape (1, 1, 5) # Expected target logits (target_vocab_size = 4) expected_target_logits = torch.full((1, 1, self.target_vocab_size), self.translator.FILTER_VALUE).to( self.assistant_model.device ) expected_target_logits[0, 0, 0] = 0.1 # 'hello' expected_target_logits[0, 0, 1] = 0.2 # 'world' expected_target_logits[0, 0, 2] = 0.3 # 'foo' # The 'bar' token in target vocab remains at -inf actual_target_logits = self.translator.get_target_logits(assistant_logits) self.assertTrue(torch.equal(actual_target_logits, expected_target_logits))
Test the conversion of assistant logits to target logits.
test_get_target_logits
python
huggingface/transformers
tests/generation/test_candidate_generator.py
https://github.com/huggingface/transformers/blob/master/tests/generation/test_candidate_generator.py
Apache-2.0
def test_same_instance_for_same_tokenizers(self): """Test that the same translator is returned for the same tokenizers.""" translator1 = AssistantVocabTranslatorCache.get_translator( self.target_tokenizer, self.assistant_tokenizer, target_vocab_size=self.target_vocab_size, assistant_model=self.assistant_model, assistant_prune_lm_head=False, ) translator2 = AssistantVocabTranslatorCache.get_translator( self.target_tokenizer, self.assistant_tokenizer, target_vocab_size=self.target_vocab_size, assistant_model=self.assistant_model, assistant_prune_lm_head=False, ) self.assertIs(translator1, translator2, "Translators should be cached and identical")
Test that the same translator is returned for the same tokenizers.
test_same_instance_for_same_tokenizers
python
huggingface/transformers
tests/generation/test_candidate_generator.py
https://github.com/huggingface/transformers/blob/master/tests/generation/test_candidate_generator.py
Apache-2.0
def test_different_instances_for_different_tokenizers(self): """Test that different tokenizers produce different translators.""" translator1 = AssistantVocabTranslatorCache.get_translator( self.target_tokenizer, self.assistant_tokenizer, target_vocab_size=self.target_vocab_size, assistant_model=self.assistant_model, assistant_prune_lm_head=False, ) translator2 = AssistantVocabTranslatorCache.get_translator( self.other_target_tokenizer, self.other_assistant_tokenizer, target_vocab_size=self.target_vocab_size, assistant_model=self.assistant_model, assistant_prune_lm_head=False, ) self.assertIsNot(translator1, translator2, "Translators should differ for different tokenizers")
Test that different tokenizers produce different translators.
test_different_instances_for_different_tokenizers
python
huggingface/transformers
tests/generation/test_candidate_generator.py
https://github.com/huggingface/transformers/blob/master/tests/generation/test_candidate_generator.py
Apache-2.0
def test_cache_with_weakref_key(self): """Ensure that the cache uses weak references as keys.""" initial_cache_size = len(AssistantVocabTranslatorCache._cache) target_tokenizer = MockTokenizer({"hello": 0}) assistant_tokenizer = MockTokenizer({"hello": 0}) # Store translator in a local variable to avoid it being kept alive translator = AssistantVocabTranslatorCache.get_translator( target_tokenizer, assistant_tokenizer, target_vocab_size=self.target_vocab_size, assistant_model=self.assistant_model, assistant_prune_lm_head=False, ) self.assertEqual(len(AssistantVocabTranslatorCache._cache), initial_cache_size + 1) # Delete all strong references del target_tokenizer del assistant_tokenizer del translator # Force garbage collection gc.collect() # Call cleanup to remove dead entries AssistantVocabTranslatorCache.cleanup() # The cache size remains increased due to strong references self.assertEqual(len(AssistantVocabTranslatorCache._cache), initial_cache_size + 1)
Ensure that the cache uses weak references as keys.
test_cache_with_weakref_key
python
huggingface/transformers
tests/generation/test_candidate_generator.py
https://github.com/huggingface/transformers/blob/master/tests/generation/test_candidate_generator.py
Apache-2.0
def test_weakref_cache_cleanup(self): """Test that the cache cleans up translators when tokenizers are garbage collected.""" def create_translator(): target_tokenizer = MockTokenizer({"hello": 0}) assistant_tokenizer = MockTokenizer({"hello": 0}) translator = AssistantVocabTranslatorCache.get_translator( target_tokenizer, assistant_tokenizer, target_vocab_size=self.target_vocab_size, assistant_model=self.assistant_model, assistant_prune_lm_head=False, ) # Create weak references before returning refs = (weakref.ref(translator), weakref.ref(target_tokenizer), weakref.ref(assistant_tokenizer)) # Remove strong references inside the function del target_tokenizer del assistant_tokenizer del translator return refs translator_ref, target_ref, assistant_ref = create_translator() # Force garbage collection gc.collect() # Call cleanup to remove dead entries AssistantVocabTranslatorCache.cleanup() # The tokenizers and translator are not garbage collected due to strong references self.assertIsNotNone(target_ref(), "Target tokenizer should still be alive due to strong references") self.assertIsNotNone(assistant_ref(), "Assistant tokenizer should still be alive due to strong references") self.assertIsNotNone(translator_ref(), "Translator should still be alive due to strong references")
Test that the cache cleans up translators when tokenizers are garbage collected.
test_weakref_cache_cleanup
python
huggingface/transformers
tests/generation/test_candidate_generator.py
https://github.com/huggingface/transformers/blob/master/tests/generation/test_candidate_generator.py
Apache-2.0
def test_mismatched_vocabularies(self): """Test handling of mismatched vocabularies between models""" # Create input with tokens present in main but not assistant vocab # Find a token that is not in the assistant tokenizer but in # the main tokenizer. missing_token = next( token for token in self.target_tokenizer.get_vocab() if token not in self.assistant_tokenizer.get_vocab() and token not in self.target_tokenizer.all_special_tokens and "reserved_" not in token ) input_ids = torch.tensor([[self.target_tokenizer.convert_tokens_to_ids(missing_token)]]) self.generator.input_ids = input_ids candidates, _ = self.generator.get_candidates(input_ids) self.assertIsNotNone(candidates)
Test handling of mismatched vocabularies between models
test_mismatched_vocabularies
python
huggingface/transformers
tests/generation/test_candidate_generator.py
https://github.com/huggingface/transformers/blob/master/tests/generation/test_candidate_generator.py
Apache-2.0
def test_device_consistency(self): """Test handling of inputs on different devices""" input_ids = torch.tensor([[1, 2, 3]]).to(torch_device) self.generator.input_ids = input_ids candidates, _ = self.generator.get_candidates(input_ids) self.assertEqual(candidates.device, input_ids.device)
Test handling of inputs on different devices
test_device_consistency
python
huggingface/transformers
tests/generation/test_candidate_generator.py
https://github.com/huggingface/transformers/blob/master/tests/generation/test_candidate_generator.py
Apache-2.0
def test_usd_vs_vanilla_sampling(cls): """Test that USD matches vanilla sampling with temperature set to nearly 0""" prompt = "Test text" pipe_vanilla = pipeline( "text-generation", model=cls.target_name, ) pipe_vanilla_output = pipe_vanilla(prompt, max_new_tokens=5, do_sample=False) vanilla_text = pipe_vanilla_output[0]["generated_text"] pipe_usd = pipeline( "text-generation", model=cls.target_name, assistant_model=cls.assistant_name, ) pipe_usd_output = pipe_usd(prompt, max_new_tokens=5, do_sample=True, temperature=1e-9) # Nearly 0 temperature usd_text = pipe_usd_output[0]["generated_text"] # Assert that the outputs match cls.assertEqual(usd_text, vanilla_text)
Test that USD matches vanilla sampling with temperature set to nearly 0
test_usd_vs_vanilla_sampling
python
huggingface/transformers
tests/generation/test_candidate_generator.py
https://github.com/huggingface/transformers/blob/master/tests/generation/test_candidate_generator.py
Apache-2.0
def test_kwarg_init(self): """Tests that we can overwrite attributes at `from_pretrained` time.""" default_config = GenerationConfig() self.assertEqual(default_config.temperature, 1.0) self.assertEqual(default_config.do_sample, False) self.assertEqual(default_config.num_beams, 1) config = GenerationConfig( do_sample=True, temperature=0.7, length_penalty=1.0, bad_words_ids=[[1, 2, 3], [4, 5]], ) self.assertEqual(config.temperature, 0.7) self.assertEqual(config.do_sample, True) self.assertEqual(config.num_beams, 1) with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(tmp_dir) loaded_config = GenerationConfig.from_pretrained(tmp_dir, temperature=1.0) self.assertEqual(loaded_config.temperature, 1.0) self.assertEqual(loaded_config.do_sample, True) self.assertEqual(loaded_config.num_beams, 1) # default value
Tests that we can overwrite attributes at `from_pretrained` time.
test_kwarg_init
python
huggingface/transformers
tests/generation/test_configuration_utils.py
https://github.com/huggingface/transformers/blob/master/tests/generation/test_configuration_utils.py
Apache-2.0
def test_validate(self): """ Tests that the `validate` method is working as expected. Note that `validate` is called at initialization time """ logger = transformers_logging.get_logger("transformers.generation.configuration_utils") # A correct configuration will not throw any warning with CaptureLogger(logger) as captured_logs: GenerationConfig() self.assertEqual(len(captured_logs.out), 0) # Inconsequent but technically wrong configuration will throw a warning (e.g. setting sampling # parameters with `do_sample=False`). May be escalated to an error in the future. with CaptureLogger(logger) as captured_logs: GenerationConfig(return_dict_in_generate=False, output_scores=True) self.assertNotEqual(len(captured_logs.out), 0) with CaptureLogger(logger) as captured_logs: generation_config_bad_temperature = GenerationConfig(do_sample=False, temperature=0.5) # store for later self.assertNotEqual(len(captured_logs.out), 0) # Expanding on the case above, we can update a bad configuration to get rid of the warning. Ideally, # that is done by unsetting the parameter (i.e. setting it to None) with CaptureLogger(logger) as captured_logs: # BAD - 0.9 means it is still set, we should warn generation_config_bad_temperature.update(temperature=0.9) self.assertNotEqual(len(captured_logs.out), 0) with CaptureLogger(logger) as captured_logs: # CORNER CASE - 1.0 is the default, we can't detect whether it is set by the user or not, we shouldn't warn generation_config_bad_temperature.update(temperature=1.0) self.assertEqual(len(captured_logs.out), 0) with CaptureLogger(logger) as captured_logs: # OK - None means it is unset, nothing to warn about generation_config_bad_temperature.update(temperature=None) self.assertEqual(len(captured_logs.out), 0) # Impossible sets of constraints/parameters will raise an exception with self.assertRaises(ValueError): GenerationConfig(do_sample=False, num_beams=1, num_return_sequences=2) with self.assertRaises(ValueError): # dummy constraint GenerationConfig(do_sample=True, num_beams=2, constraints=["dummy"]) with self.assertRaises(ValueError): GenerationConfig(do_sample=True, num_beams=2, force_words_ids=[[[1, 2, 3]]]) # Passing `generate()`-only flags to `validate` will raise an exception with self.assertRaises(ValueError): GenerationConfig(logits_processor="foo") # Model-specific parameters will NOT raise an exception or a warning with CaptureLogger(logger) as captured_logs: GenerationConfig(foo="bar") self.assertEqual(len(captured_logs.out), 0) # By default we throw a short warning. However, we log with INFO level the details. # Default: we don't log the incorrect input values, only a short summary. We explain how to get more details. with LoggingLevel(logging.WARNING): with CaptureLogger(logger) as captured_logs: GenerationConfig(do_sample=False, temperature=0.5) self.assertNotIn("0.5", captured_logs.out) self.assertTrue(len(captured_logs.out) < 150) # short log self.assertIn("Set `TRANSFORMERS_VERBOSITY=info` for more details", captured_logs.out) # INFO level: we share the full deets with LoggingLevel(logging.INFO): with CaptureLogger(logger) as captured_logs: GenerationConfig(do_sample=False, temperature=0.5) self.assertIn("0.5", captured_logs.out) self.assertTrue(len(captured_logs.out) > 400) # long log self.assertNotIn("Set `TRANSFORMERS_VERBOSITY=info` for more details", captured_logs.out) # Finally, we can set `strict=True` to raise an exception on what would otherwise be a warning. generation_config = GenerationConfig() generation_config.temperature = 0.5 generation_config.do_sample = False with self.assertRaises(ValueError): generation_config.validate(strict=True)
Tests that the `validate` method is working as expected. Note that `validate` is called at initialization time
test_validate
python
huggingface/transformers
tests/generation/test_configuration_utils.py
https://github.com/huggingface/transformers/blob/master/tests/generation/test_configuration_utils.py
Apache-2.0
def test_refuse_to_save(self): """Tests that we refuse to save a generation config that fails validation.""" # setting the temperature alone is invalid, as we also need to set do_sample to True -> throws a warning that # is caught, doesn't save, and raises an exception config = GenerationConfig() config.temperature = 0.5 with tempfile.TemporaryDirectory() as tmp_dir: with self.assertRaises(ValueError) as exc: config.save_pretrained(tmp_dir) self.assertTrue("Fix these issues to save the configuration." in str(exc.exception)) self.assertTrue("`temperature` is set to `0.5`" in str(exc.exception)) self.assertTrue(len(os.listdir(tmp_dir)) == 0) # greedy decoding throws an exception if we try to return multiple sequences -> throws an exception that is # caught, doesn't save, and raises a warning config = GenerationConfig() config.num_return_sequences = 2 with tempfile.TemporaryDirectory() as tmp_dir: with self.assertRaises(ValueError) as exc: config.save_pretrained(tmp_dir) self.assertTrue("Fix these issues to save the configuration." in str(exc.exception)) self.assertTrue( "Greedy methods without beam search do not support `num_return_sequences` different than 1" in str(exc.exception) ) self.assertTrue(len(os.listdir(tmp_dir)) == 0) # Final check: no logs at warning level/warnings/exceptions thrown if it is correct, and file is saved. config = GenerationConfig() with tempfile.TemporaryDirectory() as tmp_dir: # Catch warnings with warnings.catch_warnings(record=True) as captured_warnings: # Catch logs (up to WARNING level, the default level) with LoggingLevel(logging.WARNING): logger = transformers_logging.get_logger("transformers.generation.configuration_utils") with CaptureLogger(logger) as captured_logs: config.save_pretrained(tmp_dir) self.assertEqual(len(captured_warnings), 0) self.assertEqual(len(captured_logs.out), 0) self.assertEqual(len(os.listdir(tmp_dir)), 1)
Tests that we refuse to save a generation config that fails validation.
test_refuse_to_save
python
huggingface/transformers
tests/generation/test_configuration_utils.py
https://github.com/huggingface/transformers/blob/master/tests/generation/test_configuration_utils.py
Apache-2.0
def test_generation_mode(self): """Tests that the `get_generation_mode` method is working as expected.""" config = GenerationConfig() self.assertEqual(config.get_generation_mode(), GenerationMode.GREEDY_SEARCH) config = GenerationConfig(do_sample=True) self.assertEqual(config.get_generation_mode(), GenerationMode.SAMPLE) config = GenerationConfig(num_beams=2) self.assertEqual(config.get_generation_mode(), GenerationMode.BEAM_SEARCH) config = GenerationConfig(top_k=10, do_sample=False, penalty_alpha=0.6) self.assertEqual(config.get_generation_mode(), GenerationMode.CONTRASTIVE_SEARCH) config = GenerationConfig() self.assertEqual(config.get_generation_mode(assistant_model="foo"), GenerationMode.ASSISTED_GENERATION)
Tests that the `get_generation_mode` method is working as expected.
test_generation_mode
python
huggingface/transformers
tests/generation/test_configuration_utils.py
https://github.com/huggingface/transformers/blob/master/tests/generation/test_configuration_utils.py
Apache-2.0
def test_static_cache_without_cache_config(self): """Regression test for #35026 -- static cache should work without a cache config.""" config = GenerationConfig(cache_implementation="static") self.assertEqual(config.cache_implementation, "static") self.assertEqual(config.cache_config, None)
Regression test for #35026 -- static cache should work without a cache config.
test_static_cache_without_cache_config
python
huggingface/transformers
tests/generation/test_configuration_utils.py
https://github.com/huggingface/transformers/blob/master/tests/generation/test_configuration_utils.py
Apache-2.0
def test_serialize_generation_sequence_bias(self): """Tests that GenerationConfig is serialized and SequenceBiasLogitsProcessor is initialized with sequence_bias parameter""" generation_config = GenerationConfig() sequence_bias = [[[45, 67], -0.6], [[89], 1.2]] generation_config.sequence_bias = sequence_bias with tempfile.TemporaryDirectory("test-generation-config") as tmp_dir: generation_config.save_pretrained(tmp_dir) new_config = GenerationConfig.from_pretrained(tmp_dir) self.assertSequenceEqual(new_config.sequence_bias, sequence_bias) expected_sequence_bias = {(45, 67): -0.6, (89,): 1.2} bias_logits_processor = SequenceBiasLogitsProcessor(new_config.sequence_bias) self.assertDictEqual(bias_logits_processor.sequence_bias, expected_sequence_bias)
Tests that GenerationConfig is serialized and SequenceBiasLogitsProcessor is initialized with sequence_bias parameter
test_serialize_generation_sequence_bias
python
huggingface/transformers
tests/generation/test_configuration_utils.py
https://github.com/huggingface/transformers/blob/master/tests/generation/test_configuration_utils.py
Apache-2.0
def test_serialize_generation_min_length_eos_token(self): """Tests that GenerationConfig is serialized and MinLengthLogitsProcessor is initialized with min_length and eos_token_id""" eos_token_id = 0 min_length = 10 generation_config = GenerationConfig(min_length=min_length, eos_token_id=eos_token_id) with tempfile.TemporaryDirectory("test-generation-config") as tmp_dir: generation_config.save_pretrained(tmp_dir) new_config = GenerationConfig.from_pretrained(tmp_dir) self.assertEqual(new_config.min_length, min_length) self.assertEqual(new_config.eos_token_id, eos_token_id) min_dist_processor = MinLengthLogitsProcessor( min_length=new_config.min_length, eos_token_id=new_config.eos_token_id ) self.assertEqual(min_dist_processor.min_length, min_length) self.assertEqual(min_dist_processor.eos_token_id, eos_token_id)
Tests that GenerationConfig is serialized and MinLengthLogitsProcessor is initialized with min_length and eos_token_id
test_serialize_generation_min_length_eos_token
python
huggingface/transformers
tests/generation/test_configuration_utils.py
https://github.com/huggingface/transformers/blob/master/tests/generation/test_configuration_utils.py
Apache-2.0
def test_serialize_generation_min_new_tokens(self): """Tests that GenerationConfig is serialized and MinNewTokensLengthLogitsProcessor is initialized with min_new_tokens""" eos_token_id = 0 min_new_tokens = 5 prompt_length_to_skip = 2 generation_config = GenerationConfig(min_new_tokens=min_new_tokens) with tempfile.TemporaryDirectory("test-generation-config") as tmp_dir: generation_config.save_pretrained(tmp_dir) new_config = GenerationConfig.from_pretrained(tmp_dir) self.assertEqual(new_config.min_new_tokens, min_new_tokens) min_new_tokens_processor = MinNewTokensLengthLogitsProcessor( prompt_length_to_skip=prompt_length_to_skip, min_new_tokens=new_config.min_new_tokens, eos_token_id=eos_token_id, ) self.assertEqual(min_new_tokens_processor.min_new_tokens, min_new_tokens)
Tests that GenerationConfig is serialized and MinNewTokensLengthLogitsProcessor is initialized with min_new_tokens
test_serialize_generation_min_new_tokens
python
huggingface/transformers
tests/generation/test_configuration_utils.py
https://github.com/huggingface/transformers/blob/master/tests/generation/test_configuration_utils.py
Apache-2.0
def test_serialize_generation_temperature(self): """Tests that GenerationConfig is serialized and TemperatureLogitsWarper is initialized with temperature""" temperature = 2.0 generation_config = GenerationConfig(temperature=temperature, do_sample=True) with tempfile.TemporaryDirectory("test-generation-config") as tmp_dir: generation_config.save_pretrained(tmp_dir) new_config = GenerationConfig.from_pretrained(tmp_dir) self.assertEqual(new_config.temperature, temperature) temperature_logits_warper = TemperatureLogitsWarper(temperature=new_config.temperature) self.assertEqual(temperature_logits_warper.temperature, temperature)
Tests that GenerationConfig is serialized and TemperatureLogitsWarper is initialized with temperature
test_serialize_generation_temperature
python
huggingface/transformers
tests/generation/test_configuration_utils.py
https://github.com/huggingface/transformers/blob/master/tests/generation/test_configuration_utils.py
Apache-2.0
def test_serialize_generation_repetition_penalty(self): """Tests that GenerationConfig is serialized and RepetitionPenaltyLogitsProcessor is initialized with repetition_penalty""" penalty = 2.0 generation_config = GenerationConfig(repetition_penalty=penalty) with tempfile.TemporaryDirectory("test-generation-config") as tmp_dir: generation_config.save_pretrained(tmp_dir) new_config = GenerationConfig.from_pretrained(tmp_dir) self.assertEqual(new_config.repetition_penalty, penalty) rep_penalty_proc = RepetitionPenaltyLogitsProcessor(penalty=new_config.repetition_penalty) self.assertEqual(rep_penalty_proc.penalty, penalty)
Tests that GenerationConfig is serialized and RepetitionPenaltyLogitsProcessor is initialized with repetition_penalty
test_serialize_generation_repetition_penalty
python
huggingface/transformers
tests/generation/test_configuration_utils.py
https://github.com/huggingface/transformers/blob/master/tests/generation/test_configuration_utils.py
Apache-2.0
def test_serialize_generation_encoder_repetition_penalty(self): """Tests that GenerationConfig is serialized and EncoderRepetitionPenaltyLogitsProcessor is initialized with penalty and input_ids""" penalty = 2.0 input_ids = torch.tensor([[0, 1], [5, 0]], device=torch_device, dtype=torch.long) generation_config = GenerationConfig(encoder_repetition_penalty=penalty) with tempfile.TemporaryDirectory("test-generation-config") as tmp_dir: generation_config.save_pretrained(tmp_dir) new_config = GenerationConfig.from_pretrained(tmp_dir) self.assertEqual(new_config.encoder_repetition_penalty, penalty) rep_penalty_proc = EncoderRepetitionPenaltyLogitsProcessor( penalty=new_config.encoder_repetition_penalty, encoder_input_ids=input_ids ) self.assertEqual(rep_penalty_proc.penalty, 1 / penalty) torch.testing.assert_close(rep_penalty_proc.encoder_input_ids, input_ids)
Tests that GenerationConfig is serialized and EncoderRepetitionPenaltyLogitsProcessor is initialized with penalty and input_ids
test_serialize_generation_encoder_repetition_penalty
python
huggingface/transformers
tests/generation/test_configuration_utils.py
https://github.com/huggingface/transformers/blob/master/tests/generation/test_configuration_utils.py
Apache-2.0
def test_serialize_generation_top_p(self): """Tests that GenerationConfig is serialized and TopPLogitsWarper is initialized with top_p""" top_p = 0.8 generation_config = GenerationConfig(top_p=top_p, do_sample=True) with tempfile.TemporaryDirectory("test-generation-config") as tmp_dir: generation_config.save_pretrained(tmp_dir) new_config = GenerationConfig.from_pretrained(tmp_dir) self.assertEqual(new_config.top_p, top_p) rep_penalty_proc = TopPLogitsWarper(top_p=new_config.top_p) self.assertEqual(rep_penalty_proc.top_p, top_p)
Tests that GenerationConfig is serialized and TopPLogitsWarper is initialized with top_p
test_serialize_generation_top_p
python
huggingface/transformers
tests/generation/test_configuration_utils.py
https://github.com/huggingface/transformers/blob/master/tests/generation/test_configuration_utils.py
Apache-2.0
def test_serialize_generation_top_k(self): """Tests that GenerationConfig is serialized and TopKLogitsWarper is initialized with top_k""" top_k = 2 generation_config = GenerationConfig(top_k=top_k, do_sample=True) with tempfile.TemporaryDirectory("test-generation-config") as tmp_dir: generation_config.save_pretrained(tmp_dir) new_config = GenerationConfig.from_pretrained(tmp_dir) self.assertEqual(new_config.top_k, top_k) top_k_logits_wrap = TopKLogitsWarper(top_k=new_config.top_k) self.assertEqual(top_k_logits_wrap.top_k, top_k)
Tests that GenerationConfig is serialized and TopKLogitsWarper is initialized with top_k
test_serialize_generation_top_k
python
huggingface/transformers
tests/generation/test_configuration_utils.py
https://github.com/huggingface/transformers/blob/master/tests/generation/test_configuration_utils.py
Apache-2.0
def test_serialize_generation_min_p(self): """Tests that GenerationConfig is serialized and MinPLogitsWarper is initialized with min_p""" min_p = 0.8 generation_config = GenerationConfig(min_p=min_p, do_sample=True) with tempfile.TemporaryDirectory("test-generation-config") as tmp_dir: generation_config.save_pretrained(tmp_dir) new_config = GenerationConfig.from_pretrained(tmp_dir) self.assertEqual(new_config.min_p, min_p) min_k_logits_wrap = MinPLogitsWarper(min_p=new_config.min_p) self.assertEqual(min_k_logits_wrap.min_p, min_p)
Tests that GenerationConfig is serialized and MinPLogitsWarper is initialized with min_p
test_serialize_generation_min_p
python
huggingface/transformers
tests/generation/test_configuration_utils.py
https://github.com/huggingface/transformers/blob/master/tests/generation/test_configuration_utils.py
Apache-2.0
def test_serialize_generation_typical_p(self): """Tests that GenerationConfig is serialized and TypicalLogitsWarper is initialized with mass""" mass = 0.8 generation_config = GenerationConfig(typical_p=mass, do_sample=True) with tempfile.TemporaryDirectory("test-generation-config") as tmp_dir: generation_config.save_pretrained(tmp_dir) new_config = GenerationConfig.from_pretrained(tmp_dir) self.assertEqual(new_config.typical_p, mass) typical_p_logits_wrap = TypicalLogitsWarper(mass=new_config.typical_p) self.assertEqual(typical_p_logits_wrap.mass, mass)
Tests that GenerationConfig is serialized and TypicalLogitsWarper is initialized with mass
test_serialize_generation_typical_p
python
huggingface/transformers
tests/generation/test_configuration_utils.py
https://github.com/huggingface/transformers/blob/master/tests/generation/test_configuration_utils.py
Apache-2.0
def test_serialize_generation_epsilon_cutoff(self): """Tests that GenerationConfig is serialized and EpsilonLogitsWarper is initialized with epsilon""" epsilon = 0.8 generation_config = GenerationConfig(epsilon_cutoff=epsilon, do_sample=True) with tempfile.TemporaryDirectory("test-generation-config") as tmp_dir: generation_config.save_pretrained(tmp_dir) new_config = GenerationConfig.from_pretrained(tmp_dir) self.assertEqual(new_config.epsilon_cutoff, epsilon) epsilon_logits_wrap = EpsilonLogitsWarper(epsilon=new_config.epsilon_cutoff) self.assertEqual(epsilon_logits_wrap.epsilon, epsilon)
Tests that GenerationConfig is serialized and EpsilonLogitsWarper is initialized with epsilon
test_serialize_generation_epsilon_cutoff
python
huggingface/transformers
tests/generation/test_configuration_utils.py
https://github.com/huggingface/transformers/blob/master/tests/generation/test_configuration_utils.py
Apache-2.0
def test_serialize_generation_eta_cutoff(self): """Tests that GenerationConfig is serialized and EtaLogitsWarper is initialized with epsilon""" epsilon = 0.8 generation_config = GenerationConfig(eta_cutoff=epsilon, do_sample=True) with tempfile.TemporaryDirectory("test-generation-config") as tmp_dir: generation_config.save_pretrained(tmp_dir) new_config = GenerationConfig.from_pretrained(tmp_dir) self.assertEqual(new_config.eta_cutoff, epsilon) eta_logits_wrap = EtaLogitsWarper(epsilon=new_config.eta_cutoff) self.assertEqual(eta_logits_wrap.epsilon, epsilon)
Tests that GenerationConfig is serialized and EtaLogitsWarper is initialized with epsilon
test_serialize_generation_eta_cutoff
python
huggingface/transformers
tests/generation/test_configuration_utils.py
https://github.com/huggingface/transformers/blob/master/tests/generation/test_configuration_utils.py
Apache-2.0
def test_serialize_generation_ngram_size(self): """Tests that GenerationConfig is serialized and NoRepeatNGramLogitsProcessor is initialized with ngram_size""" ngram_size = 2 generation_config = GenerationConfig(no_repeat_ngram_size=ngram_size, do_sample=True) with tempfile.TemporaryDirectory("test-generation-config") as tmp_dir: generation_config.save_pretrained(tmp_dir) new_config = GenerationConfig.from_pretrained(tmp_dir) self.assertEqual(new_config.no_repeat_ngram_size, ngram_size) no_repeat_ngram_proc = NoRepeatNGramLogitsProcessor(ngram_size=new_config.no_repeat_ngram_size) self.assertEqual(no_repeat_ngram_proc.ngram_size, ngram_size)
Tests that GenerationConfig is serialized and NoRepeatNGramLogitsProcessor is initialized with ngram_size
test_serialize_generation_ngram_size
python
huggingface/transformers
tests/generation/test_configuration_utils.py
https://github.com/huggingface/transformers/blob/master/tests/generation/test_configuration_utils.py
Apache-2.0
def test_serialize_generation_encoder_ngram_size(self): """Tests that GenerationConfig is serialized and EncoderNoRepeatNGramLogitsProcessor is initialized with ngram_size""" ngram_size = 2 input_ids = torch.tensor([[0, 1], [5, 0]], device=torch_device, dtype=torch.long) generation_config = GenerationConfig(encoder_no_repeat_ngram_size=ngram_size, do_sample=True) with tempfile.TemporaryDirectory("test-generation-config") as tmp_dir: generation_config.save_pretrained(tmp_dir) new_config = GenerationConfig.from_pretrained(tmp_dir) self.assertEqual(new_config.encoder_no_repeat_ngram_size, ngram_size) encoder_no_repeat_ngram_proc = EncoderNoRepeatNGramLogitsProcessor( encoder_ngram_size=new_config.encoder_no_repeat_ngram_size, encoder_input_ids=input_ids ) self.assertEqual(encoder_no_repeat_ngram_proc.ngram_size, ngram_size)
Tests that GenerationConfig is serialized and EncoderNoRepeatNGramLogitsProcessor is initialized with ngram_size
test_serialize_generation_encoder_ngram_size
python
huggingface/transformers
tests/generation/test_configuration_utils.py
https://github.com/huggingface/transformers/blob/master/tests/generation/test_configuration_utils.py
Apache-2.0
def test_serialize_generation_bad_words_ids(self): """Tests that GenerationConfig is serialized and NoBadWordsLogitsProcessor is initialized with bad_words_ids""" bad_word_tokens = [[1], [4], [1, 0], [0, 1, 2], [1, 3, 1, 3]] generation_config = GenerationConfig(bad_words_ids=bad_word_tokens) with tempfile.TemporaryDirectory("test-generation-config") as tmp_dir: generation_config.save_pretrained(tmp_dir) new_config = GenerationConfig.from_pretrained(tmp_dir) self.assertSequenceEqual(new_config.bad_words_ids, bad_word_tokens) no_bad_words_dist_proc = NoBadWordsLogitsProcessor(bad_words_ids=new_config.bad_words_ids) self.assertSequenceEqual(no_bad_words_dist_proc.bad_word_ids, bad_word_tokens)
Tests that GenerationConfig is serialized and NoBadWordsLogitsProcessor is initialized with bad_words_ids
test_serialize_generation_bad_words_ids
python
huggingface/transformers
tests/generation/test_configuration_utils.py
https://github.com/huggingface/transformers/blob/master/tests/generation/test_configuration_utils.py
Apache-2.0
def test_serialize_generation_num_beams(self): """Tests that GenerationConfig is serialized and PrefixConstrainedLogitsProcessor is initialized with num_beams""" num_beams = 1 def prefix_allowed_tokens_fn(batch_id, inputs_ids): return [[0, 1], [2, 3]][batch_id] generation_config = GenerationConfig(num_beams=num_beams) with tempfile.TemporaryDirectory("test-generation-config") as tmp_dir: generation_config.save_pretrained(tmp_dir) new_config = GenerationConfig.from_pretrained(tmp_dir) self.assertEqual(new_config.num_beams, num_beams) prefix_constrained_logits_proc = PrefixConstrainedLogitsProcessor( prefix_allowed_tokens_fn, num_beams=new_config.num_beams ) self.assertEqual(prefix_constrained_logits_proc._num_beams, num_beams)
Tests that GenerationConfig is serialized and PrefixConstrainedLogitsProcessor is initialized with num_beams
test_serialize_generation_num_beams
python
huggingface/transformers
tests/generation/test_configuration_utils.py
https://github.com/huggingface/transformers/blob/master/tests/generation/test_configuration_utils.py
Apache-2.0
def test_serialize_generation_diversity_penalty_and_num_bean_groups(self): """Tests that GenerationConfig is serialized and HammingDiversityLogitsProcessor is initialized with diversity_penalty_and_num_bean_groups""" num_beams = 2 num_beam_groups = 2 diversity_penalty = 1.0 generation_config = GenerationConfig( num_beams=num_beams, diversity_penalty=diversity_penalty, num_beam_groups=num_beam_groups ) with tempfile.TemporaryDirectory("test-generation-config") as tmp_dir: generation_config.save_pretrained(tmp_dir) new_config = GenerationConfig.from_pretrained(tmp_dir) self.assertEqual(new_config.num_beams, num_beams) self.assertEqual(new_config.diversity_penalty, diversity_penalty) self.assertEqual(new_config.num_beam_groups, num_beam_groups) diversity_logits_processor = HammingDiversityLogitsProcessor( diversity_penalty=new_config.diversity_penalty, num_beams=new_config.num_beams, num_beam_groups=new_config.num_beam_groups, ) self.assertEqual(diversity_logits_processor._num_beams, num_beams) self.assertEqual(diversity_logits_processor._diversity_penalty, diversity_penalty) self.assertEqual(diversity_logits_processor._num_sub_beams, num_beams // num_beam_groups)
Tests that GenerationConfig is serialized and HammingDiversityLogitsProcessor is initialized with diversity_penalty_and_num_bean_groups
test_serialize_generation_diversity_penalty_and_num_bean_groups
python
huggingface/transformers
tests/generation/test_configuration_utils.py
https://github.com/huggingface/transformers/blob/master/tests/generation/test_configuration_utils.py
Apache-2.0
def test_serialize_generation_bos_token_id(self): """Tests that GenerationConfig is serialized and ForcedBOSTokenLogitsProcessor is initialized with bos_token_id""" bos_token_id = 0 generation_config = GenerationConfig(bos_token_id=bos_token_id) with tempfile.TemporaryDirectory("test-generation-config") as tmp_dir: generation_config.save_pretrained(tmp_dir) new_config = GenerationConfig.from_pretrained(tmp_dir) self.assertEqual(new_config.bos_token_id, bos_token_id) logits_processor = ForcedBOSTokenLogitsProcessor(bos_token_id=new_config.bos_token_id) self.assertEqual(logits_processor.bos_token_id, bos_token_id)
Tests that GenerationConfig is serialized and ForcedBOSTokenLogitsProcessor is initialized with bos_token_id
test_serialize_generation_bos_token_id
python
huggingface/transformers
tests/generation/test_configuration_utils.py
https://github.com/huggingface/transformers/blob/master/tests/generation/test_configuration_utils.py
Apache-2.0
def test_serialize_generation_eos_token_id(self): """Tests that GenerationConfig is serialized and ForcedEOSTokenLogitsProcessor is initialized with eos_token_id""" eos_token_id = 0 max_length = 5 generation_config = GenerationConfig(eos_token_id=eos_token_id) with tempfile.TemporaryDirectory("test-generation-config") as tmp_dir: generation_config.save_pretrained(tmp_dir) new_config = GenerationConfig.from_pretrained(tmp_dir) self.assertEqual(new_config.eos_token_id, eos_token_id) logits_processor = ForcedEOSTokenLogitsProcessor( max_length=max_length, eos_token_id=new_config.eos_token_id, device=torch_device ) self.assertEqual(logits_processor.eos_token_id, eos_token_id)
Tests that GenerationConfig is serialized and ForcedEOSTokenLogitsProcessor is initialized with eos_token_id
test_serialize_generation_eos_token_id
python
huggingface/transformers
tests/generation/test_configuration_utils.py
https://github.com/huggingface/transformers/blob/master/tests/generation/test_configuration_utils.py
Apache-2.0
def test_serialize_generation_exponential_decay_length_penalty(self): """Tests that GenerationConfig is serialized and ExponentialDecayLengthPenalty is initialized with regulation_start and regulation_factor""" eos_token_id = 0 penalty_start = 5 penalty_factor = 1.1 input_ids_seq_length = 10 exponential_decay_length_penalty = (penalty_start, penalty_factor) generation_config = GenerationConfig(exponential_decay_length_penalty=exponential_decay_length_penalty) with tempfile.TemporaryDirectory("test-generation-config") as tmp_dir: generation_config.save_pretrained(tmp_dir) new_config = GenerationConfig.from_pretrained(tmp_dir) self.assertEqual(new_config.exponential_decay_length_penalty, [penalty_start, penalty_factor]) exponential_decay_processor = ExponentialDecayLengthPenalty( exponential_decay_length_penalty=new_config.exponential_decay_length_penalty, eos_token_id=eos_token_id, input_ids_seq_length=input_ids_seq_length, ) self.assertEqual( exponential_decay_processor.regulation_start, exponential_decay_length_penalty[0] + input_ids_seq_length ) self.assertEqual(exponential_decay_processor.regulation_factor, exponential_decay_length_penalty[1])
Tests that GenerationConfig is serialized and ExponentialDecayLengthPenalty is initialized with regulation_start and regulation_factor
test_serialize_generation_exponential_decay_length_penalty
python
huggingface/transformers
tests/generation/test_configuration_utils.py
https://github.com/huggingface/transformers/blob/master/tests/generation/test_configuration_utils.py
Apache-2.0
def test_serialize_generation_begin_suppress_tokens(self): """Tests that GenerationConfig is serialized and SuppressTokensAtBeginLogitsProcessor is initialized with begin_suppress_token and begin_index""" begin_suppress_tokens = [220, 50256] begin_index = 0 generation_config = GenerationConfig(begin_suppress_tokens=begin_suppress_tokens) with tempfile.TemporaryDirectory("test-generation-config") as tmp_dir: generation_config.save_pretrained(tmp_dir) new_config = GenerationConfig.from_pretrained(tmp_dir) self.assertSequenceEqual(new_config.begin_suppress_tokens, begin_suppress_tokens) suppress_processor = SuppressTokensAtBeginLogitsProcessor( begin_suppress_tokens=new_config.begin_suppress_tokens, begin_index=begin_index ) self.assertSequenceEqual(suppress_processor.begin_suppress_tokens, begin_suppress_tokens) self.assertEqual(suppress_processor.begin_index, begin_index)
Tests that GenerationConfig is serialized and SuppressTokensAtBeginLogitsProcessor is initialized with begin_suppress_token and begin_index
test_serialize_generation_begin_suppress_tokens
python
huggingface/transformers
tests/generation/test_configuration_utils.py
https://github.com/huggingface/transformers/blob/master/tests/generation/test_configuration_utils.py
Apache-2.0
def test_serialize_generation_suppress_tokens(self): """Tests that GenerationConfig is serialized and SuppressTokensLogitsProcessor is initialized with suppress_token""" suppress_tokens = [220, 50256] generation_config = GenerationConfig(suppress_tokens=suppress_tokens) with tempfile.TemporaryDirectory("test-generation-config") as tmp_dir: generation_config.save_pretrained(tmp_dir) new_config = GenerationConfig.from_pretrained(tmp_dir) self.assertSequenceEqual(new_config.suppress_tokens, suppress_tokens) suppress_processor = SuppressTokensLogitsProcessor(suppress_tokens=new_config.suppress_tokens) self.assertSequenceEqual(suppress_processor.suppress_tokens, suppress_tokens)
Tests that GenerationConfig is serialized and SuppressTokensLogitsProcessor is initialized with suppress_token
test_serialize_generation_suppress_tokens
python
huggingface/transformers
tests/generation/test_configuration_utils.py
https://github.com/huggingface/transformers/blob/master/tests/generation/test_configuration_utils.py
Apache-2.0
def test_serialize_generation_guidance_scale(self): """Tests that GenerationConfig is serialized and ClassifierFreeGuidanceLogitsProcessor is initialized with guidance_scale""" guidance_scale = 2.0 generation_config = GenerationConfig(guidance_scale=guidance_scale) with tempfile.TemporaryDirectory("test-generation-config") as tmp_dir: generation_config.save_pretrained(tmp_dir) new_config = GenerationConfig.from_pretrained(tmp_dir) self.assertEqual(new_config.guidance_scale, guidance_scale) classifier_processor = ClassifierFreeGuidanceLogitsProcessor(guidance_scale=new_config.guidance_scale) self.assertEqual(classifier_processor.guidance_scale, guidance_scale)
Tests that GenerationConfig is serialized and ClassifierFreeGuidanceLogitsProcessor is initialized with guidance_scale
test_serialize_generation_guidance_scale
python
huggingface/transformers
tests/generation/test_configuration_utils.py
https://github.com/huggingface/transformers/blob/master/tests/generation/test_configuration_utils.py
Apache-2.0
def test_serialize_generation_guidance_scale_unbatched(self): """Tests that GenerationConfig is serialized and UnbatchedClassifierFreeGuidanceLogitsProcessor is initialized with guidance_scale""" guidance_scale = 2.0 input_ids = torch.LongTensor([[0]]) generation_config = GenerationConfig(guidance_scale=guidance_scale) with tempfile.TemporaryDirectory("test-generation-config") as tmp_dir: generation_config.save_pretrained(tmp_dir) new_config = GenerationConfig.from_pretrained(tmp_dir) self.assertEqual(new_config.guidance_scale, guidance_scale) cfg = UnbatchedClassifierFreeGuidanceLogitsProcessor(new_config.guidance_scale, {}, input_ids) self.assertEqual(cfg.guidance_scale, guidance_scale)
Tests that GenerationConfig is serialized and UnbatchedClassifierFreeGuidanceLogitsProcessor is initialized with guidance_scale
test_serialize_generation_guidance_scale_unbatched
python
huggingface/transformers
tests/generation/test_configuration_utils.py
https://github.com/huggingface/transformers/blob/master/tests/generation/test_configuration_utils.py
Apache-2.0
def test_serialize_generation_watermarking_config(self): """Tests that GenerationConfig is serialized and WatermarkLogitsProcessor is initialized with WatermarkingConfig parameters""" vocab_size = 20 bias = 2.0 greenlist_ratio = 0.5 hashing_key = 10 seeding_scheme = "lefthash" context_width = 10 watermarking_config = WatermarkingConfig( bias=bias, greenlist_ratio=greenlist_ratio, hashing_key=hashing_key, seeding_scheme=seeding_scheme, context_width=context_width, ) generation_config = GenerationConfig(watermarking_config=watermarking_config) with tempfile.TemporaryDirectory("test-generation-config") as tmp_dir: generation_config.save_pretrained(tmp_dir) new_config = GenerationConfig.from_pretrained(tmp_dir) self.assertEqual(new_config.watermarking_config.bias, bias) self.assertEqual(new_config.watermarking_config.greenlist_ratio, greenlist_ratio) self.assertEqual(new_config.watermarking_config.hashing_key, hashing_key) self.assertEqual(new_config.watermarking_config.seeding_scheme, seeding_scheme) self.assertEqual(new_config.watermarking_config.context_width, context_width) watermark = WatermarkLogitsProcessor( vocab_size=vocab_size, device=torch_device, greenlist_ratio=new_config.watermarking_config.greenlist_ratio, bias=new_config.watermarking_config.bias, hashing_key=new_config.watermarking_config.hashing_key, seeding_scheme=new_config.watermarking_config.seeding_scheme, context_width=new_config.watermarking_config.context_width, ) self.assertEqual(watermark.bias, bias) self.assertEqual(watermark.greenlist_size, int(vocab_size * greenlist_ratio)) self.assertEqual(watermark.hash_key, hashing_key) self.assertEqual(watermark.seeding_scheme, seeding_scheme) self.assertEqual(watermark.context_width, context_width)
Tests that GenerationConfig is serialized and WatermarkLogitsProcessor is initialized with WatermarkingConfig parameters
test_serialize_generation_watermarking_config
python
huggingface/transformers
tests/generation/test_configuration_utils.py
https://github.com/huggingface/transformers/blob/master/tests/generation/test_configuration_utils.py
Apache-2.0
def manage_process_group(func: Callable[..., Any]) -> Callable[..., Any]: """Manage the creation and destruction of the distributed process group for the wrapped function.""" def wrapped(*args: Any, **kwargs: Any) -> Any: device_count = backend_device_count(torch_device) torch.distributed.init_process_group(world_size=device_count) try: return func(*args, **kwargs) finally: torch.distributed.destroy_process_group() return wrapped
Manage the creation and destruction of the distributed process group for the wrapped function.
manage_process_group
python
huggingface/transformers
tests/generation/test_fsdp.py
https://github.com/huggingface/transformers/blob/master/tests/generation/test_fsdp.py
Apache-2.0
def test_synthidtext_watermark_processor_distributional_convergence(self, vocab_size, logits_type): """Check if watermarked distribution converges to unwatermarked logits distribution.""" batch_size = 1500 num_keys = 1000 updated_softmaxes = 0 np.random.seed(0) torch.manual_seed(0) if logits_type == "uniform": fixed_logits = torch.ones((batch_size, vocab_size), device=torch_device) elif logits_type == "random": fixed_logits = torch.rand( ( 1, vocab_size, ), device=torch_device, ) fixed_logits = fixed_logits.repeat(batch_size, 1) else: raise ValueError(f"Unrecognized logits_type {logits_type}") for _ in range(num_keys): watermarking_config = { "ngram_len": 5, "keys": np.random.randint(0, 10**9, size=(1,), dtype=np.int64), "sampling_table_size": 2**16, "sampling_table_seed": 0, "context_history_size": 1024, "device": torch_device, } logits_processor = SynthIDTextWatermarkLogitsProcessor(**watermarking_config) ngrams = torch.randint( low=0, high=vocab_size, size=(batch_size, watermarking_config["ngram_len"]), device=torch_device, ) # Insert ngram-1 into logit_processor state. for idx in range(watermarking_config["ngram_len"] - 1): _ = logits_processor(ngrams[:, :idx], fixed_logits) updated_scores = logits_processor(ngrams, fixed_logits) updated_softmaxes += torch.nn.functional.softmax(updated_scores, dim=1).cpu().numpy() updated_softmaxes = np.mean(updated_softmaxes, axis=0) / num_keys is_close = torch.all( torch.isclose( torch.tensor(updated_softmaxes, device=torch_device), torch.nn.Softmax()(fixed_logits[0]), # Take any batch entry, all are same. atol=1e-3, rtol=0, ) ) self.assertTrue(is_close)
Check if watermarked distribution converges to unwatermarked logits distribution.
test_synthidtext_watermark_processor_distributional_convergence
python
huggingface/transformers
tests/generation/test_logits_process.py
https://github.com/huggingface/transformers/blob/master/tests/generation/test_logits_process.py
Apache-2.0
def test_synthidtext_watermark_processor_bias_test(self, vocab_size, ngram_len, num_layers, atol): """Test SynthID watermarking bias matches theoretical value.""" batch_size = 20000 generator = torch.Generator(device=torch_device).manual_seed(0) np.random.seed(0) keys = [np.random.randint(0, 10**9) for _ in range(num_layers)] # Use 10**9 rather than vocab_size to ensure variety in (n-1)-grams. context = torch.randint( low=0, high=10**9, size=(batch_size, ngram_len - 1), dtype=torch.int64, generator=generator, device=torch_device, ) context_history_size = 1024 logits_processor = SynthIDTextWatermarkLogitsProcessor( ngram_len=ngram_len, keys=keys, sampling_table_size=2**16, sampling_table_seed=0, context_history_size=context_history_size, device=torch_device, ) scores = torch.ones( (batch_size, vocab_size), dtype=torch.float64, device=torch_device, ) # Init state of the logits processor. logits_processor(context, scores) # insert context into the state. for idx in range(1, ngram_len - 1): _ = logits_processor(context[:, :idx], scores) updated_scores = logits_processor(context, scores) probs = torch.nn.functional.softmax(updated_scores, dim=1) generator = torch.Generator(device=torch_device).manual_seed(0) next_tokens = torch.multinomial( probs, num_samples=1, generator=generator, ) ngrams = torch.concat((context, next_tokens), dim=1) g_values = logits_processor.compute_g_values(ngrams) mean_g_values = g_values.mean(dtype=torch.float64, dim=(0, 1)) expected_mean_g_value = logits_processor.expected_mean_g_value( vocab_size=vocab_size, ) is_close = torch.all( torch.isclose( mean_g_values, torch.tensor(expected_mean_g_value, dtype=torch.float64, device=torch_device), atol=atol, rtol=0, ) ) self.assertTrue(is_close)
Test SynthID watermarking bias matches theoretical value.
test_synthidtext_watermark_processor_bias_test
python
huggingface/transformers
tests/generation/test_logits_process.py
https://github.com/huggingface/transformers/blob/master/tests/generation/test_logits_process.py
Apache-2.0
def test_stop_string_criteria_vocab_size_mismatch(self): """Test that StopStringCriteria handles tokens above len(tokenizer) correctly.""" tokenizer = AutoTokenizer.from_pretrained("openai-community/gpt2") # Create input_ids with tokens above len(tokenizer) input_ids = torch.tensor([[len(tokenizer) + 1024, 1, 2]], device=torch_device) scores = None criteria = StopStringCriteria(tokenizer=tokenizer, stop_strings=["test"]) # This should not raise an error and should return False since no stop string is matched self.assertFalse(criteria(input_ids, scores))
Test that StopStringCriteria handles tokens above len(tokenizer) correctly.
test_stop_string_criteria_vocab_size_mismatch
python
huggingface/transformers
tests/generation/test_stopping_criteria.py
https://github.com/huggingface/transformers/blob/master/tests/generation/test_stopping_criteria.py
Apache-2.0
def _check_similar_generate_outputs(self, output_1, output_2, atol=1e-5, rtol=1e-5): """ Checks whether a pair of generate outputs are similar. Two `generate` call outputs are considered similar in the following situations: 1. The sequences are the same 2. The sequences are different, but the scores up to (and including) the first mismatch are nearly identical """ # scores doesn't include data regarding decoder input tokens decoder_input_length = output_1.sequences.shape[1] - len(output_1.scores) output_matches = output_1.sequences == output_2.sequences has_matching_outputs = output_matches.all() has_matching_scores = None if not has_matching_outputs: for batch_idx in range(output_1.sequences.shape[0]): batch_matches = output_matches[batch_idx] if batch_matches.all(): continue first_mismatch_idx = batch_matches.int().argmin() # gets the index of the first False first_mismatch_idx -= decoder_input_length output_1_first_mismatch_scores = output_1.scores[first_mismatch_idx][batch_idx] output_2_first_mismatch_scores = output_2.scores[first_mismatch_idx][batch_idx] has_matching_scores = torch.allclose( output_1_first_mismatch_scores, output_2_first_mismatch_scores, rtol=atol, atol=rtol ) if not has_matching_scores: break self.assertTrue(has_matching_outputs or has_matching_scores)
Checks whether a pair of generate outputs are similar. Two `generate` call outputs are considered similar in the following situations: 1. The sequences are the same 2. The sequences are different, but the scores up to (and including) the first mismatch are nearly identical
_check_similar_generate_outputs
python
huggingface/transformers
tests/generation/test_utils.py
https://github.com/huggingface/transformers/blob/master/tests/generation/test_utils.py
Apache-2.0
def test_past_key_values_format(self, custom_all_cache_shapes=None): """ Test that the KV cache is formatted correctly. Exceptions need to explicitly overwrite this test, or pass the expected cache shapes. Having a standard KV cache format is important for a consistent API (and for advanced generation methods). """ for model_class in self.all_generative_model_classes: config, inputs = self.model_tester.prepare_config_and_inputs_for_common() # 1. If it doesn't support cache, skip the test if not hasattr(config.get_text_config(), "use_cache"): self.skipTest(reason=f"{model_class.__name__} doesn't support caching") model = model_class(config).to(torch_device) model = model.eval() if "use_cache" not in inputs: inputs["use_cache"] = True outputs = model(**inputs) if "past_key_values" not in outputs: self.skipTest(reason="This model doesn't return `past_key_values`") # 2. retrieve the KV cache and compute its default expected shapes (if no custom shapes are provided) past_kv = outputs["past_key_values"] is_legacy_cache = not isinstance(past_kv, Cache) text_config = config.get_text_config() num_decoder_layers = ( getattr(text_config, "decoder_layers", None) or getattr(text_config, "num_decoder_layers", None) or text_config.num_hidden_layers ) if custom_all_cache_shapes is None: num_query_attention_heads = getattr( text_config, "decoder_attention_heads", text_config.num_attention_heads ) embed_dim = getattr(text_config, "d_model", text_config.hidden_size) per_head_embed_dim = embed_dim // num_query_attention_heads num_key_value_heads = ( text_config.num_key_value_heads if getattr(text_config, "num_key_value_heads", None) is not None else num_query_attention_heads ) if config.is_encoder_decoder: encoder_num_attention_heads = ( text_config.encoder_attention_heads if hasattr(text_config, "encoder_attention_heads") else text_config.num_attention_heads ) encoder_per_head_embed_dim = embed_dim // encoder_num_attention_heads batch_size, seq_length = inputs["decoder_input_ids"].shape[:2] # The sequence length for the encoder K V depends on the model. Since it is not manipulated in # autoregressive generation, we're keeping the test general and not checking the 3rd dim default_cross_attention_shape = ( batch_size, encoder_num_attention_heads, encoder_per_head_embed_dim, ) default_self_attention_shape = (batch_size, num_key_value_heads, seq_length, per_head_embed_dim) all_cache_shapes = [ [ default_self_attention_shape, default_self_attention_shape, default_cross_attention_shape, default_cross_attention_shape, ] for _ in range(num_decoder_layers) ] else: batch_size, seq_length = inputs["input_ids"].shape[:2] default_self_attention_shape = (batch_size, num_key_value_heads, seq_length, per_head_embed_dim) all_cache_shapes = [ [default_self_attention_shape, default_self_attention_shape] for _ in range(num_decoder_layers) ] else: all_cache_shapes = custom_all_cache_shapes # 3. Check cache shapes # 3.1. Encoder-Decoder checks if config.is_encoder_decoder: num_cache_decoder_layers = ( len(past_kv) if is_legacy_cache else len(past_kv.self_attention_cache.key_cache) ) self.assertEqual(num_cache_decoder_layers, num_decoder_layers) for i in range(num_decoder_layers): if is_legacy_cache: self.assertEqual(len(past_kv[0]), 4) # legacy check: confirm number of elements in tuple # Self attention self_attention_layer_key_cache = ( past_kv[i][0] if is_legacy_cache else past_kv.self_attention_cache.key_cache[i] ) self_attention_layer_value_cache = ( past_kv[i][1] if is_legacy_cache else past_kv.self_attention_cache.value_cache[i] ) self.assertEqual(self_attention_layer_key_cache.shape, all_cache_shapes[i][0]) self.assertEqual(self_attention_layer_value_cache.shape, all_cache_shapes[i][1]) # Cross attention (ignore 3rd dim, see default shape preparation) cross_attention_layer_key_cache = ( past_kv[i][2] if is_legacy_cache else past_kv.cross_attention_cache.key_cache[i] ) cross_attention_layer_value_cache = ( past_kv[i][3] if is_legacy_cache else past_kv.cross_attention_cache.value_cache[i] ) cross_attention_layer_key_cache = cross_attention_layer_key_cache[:, :, 0, :] cross_attention_layer_value_cache = cross_attention_layer_value_cache[:, :, 0, :] self.assertEqual(cross_attention_layer_key_cache.shape, all_cache_shapes[i][2]) self.assertEqual(cross_attention_layer_value_cache.shape, all_cache_shapes[i][3]) # 3.2. Decoder-only checks else: num_cache_decoder_layers = len(past_kv) if is_legacy_cache else len(past_kv.key_cache) self.assertEqual(num_cache_decoder_layers, num_decoder_layers) for i in range(num_decoder_layers): if is_legacy_cache: self.assertEqual(len(past_kv[0]), 2) # legacy check: confirm number of elements in tuple # Self attention self_attention_layer_key_cache = past_kv[i][0] if is_legacy_cache else past_kv.key_cache[i] self_attention_layer_value_cache = past_kv[i][1] if is_legacy_cache else past_kv.value_cache[i] self.assertEqual(self_attention_layer_key_cache.shape, all_cache_shapes[i][0]) self.assertEqual(self_attention_layer_value_cache.shape, all_cache_shapes[i][1])
Test that the KV cache is formatted correctly. Exceptions need to explicitly overwrite this test, or pass the expected cache shapes. Having a standard KV cache format is important for a consistent API (and for advanced generation methods).
test_past_key_values_format
python
huggingface/transformers
tests/generation/test_utils.py
https://github.com/huggingface/transformers/blob/master/tests/generation/test_utils.py
Apache-2.0
def test_generate_from_inputs_embeds(self, _, num_beams): """Tests that we can generate from `inputs_embeds` instead of `input_ids` in LLMs, VLMs, etc""" # When supported, tests that the decoder model can generate from `inputs_embeds` instead of `input_ids` # if fails, you should probably update the `prepare_inputs_for_generation` function for model_class in self.all_generative_model_classes: config, inputs_dict = self.prepare_config_and_inputs_for_generate() # This test is for decoder-only models (encoder-decoder models have native input embeddings support in the # decoder) if config.get_text_config(decoder=True).is_encoder_decoder: continue config.is_decoder = True # Skip models without explicit support model = model_class(config).to(torch_device).eval() if "inputs_embeds" not in inspect.signature(model.prepare_inputs_for_generation).parameters.keys(): continue # There are a few exception patterns in this test: # 1 - Some models can't generate without `input_ids`, when `inputs_embeds` are passed requires_inputs_ids = any(model_name in model_class.__name__.lower() for model_name in ["idefics"]) # 2 - Complex `inputs_embeds` computation, i.e. the correct computation of inputs embeds is more complex # than calling the embedding layer with `input_ids`. Subcases of this exception: # 2.A - Ignore `scale_embedding`, if the model supports it (it is controlled by a model-dependent flag) if hasattr(config, "scale_embedding"): config.scale_embedding = False # 2.B - Some VLMs assume `inputs_embeds` and `pixel_values` are mutually exclusive AND fall in the # exception above (complex `inputs_embeds` computation). Popping `pixel_values` allow us to run the # checks without adding test complexity. Ditto for `pixel_values_videos` and `pixel_values_images` pixel_values_is_mutually_exclusive = any( model_name in model_class.__name__.lower() for model_name in VLM_CLASS_NAMES ) if pixel_values_is_mutually_exclusive: inputs_dict.pop("pixel_values", None) inputs_dict.pop("pixel_values_videos", None) inputs_dict.pop("pixel_values_images", None) # HACK - in the case of granite speech, input_features and inputs_embeds are mutually exclusive; # this is similar to VLMs and should likely be standardized for similar audio models in the future, # then made generic here. if "granitespeech" in model_class.__name__.lower(): inputs_dict.pop("input_features", None) # 2.C - No easy fix, let's skip the check that compares the outputs from `input_ids` and `inputs_embeds` has_complex_embeds_computation = any( model_name in model_class.__name__.lower() for model_name in ["moshi"] ) # 3 - `inputs_dict` doesn't contain `attention_mask`. When `attention_mask` is not passed to generate, # we infer it from `input_ids`. The last test case will fail if there is a pad token in the original input. missing_attention_mask = "attention_mask" not in inputs_dict # Traditional way of generating text input_ids = inputs_dict.pop("input_ids") generation_kwargs = { "return_dict_in_generate": True, "output_scores": True, "num_beams": num_beams, "do_sample": False, "max_new_tokens": 5, "min_new_tokens": 5, # generate exactly 5 tokens } outputs_from_ids = model.generate(input_ids, **generation_kwargs, **inputs_dict) self.assertEqual(outputs_from_ids.sequences.shape[:2], (input_ids.shape[0], input_ids.shape[1] + 5)) # Same thing, but from input embeddings (`input_ids` is passed so the prompt is present in the output). # The output of the two calls should be the same. inputs_embeds = model.get_input_embeddings()(input_ids) outputs_from_embeds = model.generate( input_ids, inputs_embeds=inputs_embeds, **generation_kwargs, **inputs_dict ) if not has_complex_embeds_computation: self._check_similar_generate_outputs(outputs_from_ids, outputs_from_embeds) # If we pass different inputs_embeds, we should get different outputs (the output text may be the # same, but the logits will almost surely be different) random_embeds = torch.rand_like(inputs_embeds) outputs_from_rand_embeds = model.generate( input_ids, inputs_embeds=random_embeds, **generation_kwargs, **inputs_dict ) for i in range(len(outputs_from_rand_embeds.scores)): self.assertFalse(torch.allclose(outputs_from_embeds.scores[i], outputs_from_rand_embeds.scores[i])) # input_ids is not a required input on most models -- if we don't pass it, the newly generated tokens will # be the same if not (requires_inputs_ids or missing_attention_mask): outputs_from_embeds_wo_ids = model.generate( inputs_embeds=inputs_embeds, **generation_kwargs, **inputs_dict ) outputs_from_embeds.sequences = outputs_from_embeds.sequences[:, inputs_embeds.shape[1] :] self._check_similar_generate_outputs(outputs_from_embeds_wo_ids, outputs_from_embeds)
Tests that we can generate from `inputs_embeds` instead of `input_ids` in LLMs, VLMs, etc
test_generate_from_inputs_embeds
python
huggingface/transformers
tests/generation/test_utils.py
https://github.com/huggingface/transformers/blob/master/tests/generation/test_utils.py
Apache-2.0
def test_generate_from_inputs_embeds_with_static_cache(self): """ Test that StaticCache can generate from inputs_embeds and calculates max_cache_length correctly in `generate()`. We force the model to not stop generation until max-length is reached to verify that the cache length is indeed set correctly and we don't run out of index when slicing the cache. """ for model_class in self.all_generative_model_classes: if not model_class._supports_static_cache: self.skipTest(reason="This model does not support the static cache format") config, inputs_dict = self.prepare_config_and_inputs_for_generate() if config.get_text_config(decoder=True).is_encoder_decoder: self.skipTest(reason="This model is encoder-decoder and has Encoder-Decoder Cache") model = model_class(config).to(torch_device).eval() if "inputs_embeds" not in inspect.signature(model.prepare_inputs_for_generation).parameters.keys(): self.skipTest(reason="This model does not support `inputs_embeds` in generation") # Some VLMs assume `inputs_embeds` and `pixel_values` are mutually exclusive AND fall in the # exception above (complex `inputs_embeds` computation). Popping `pixel_values` allow us to run the # checks without adding test complexity. Ditto for `pixel_values_videos` and `pixel_values_images` pixel_values_is_mutually_exclusive = any( model_name in model_class.__name__.lower() for model_name in VLM_CLASS_NAMES ) if pixel_values_is_mutually_exclusive: inputs_dict.pop("pixel_values", None) inputs_dict.pop("pixel_values_videos", None) inputs_dict.pop("pixel_values_images", None) input_ids = inputs_dict.pop("input_ids") model.config.use_cache = True model.config.is_decoder = True batch_size = input_ids.shape[0] max_new_tokens = 10 # here we force to not stop at eos and go until max-length model.generation_config.eos_token_id = model.config.get_text_config().eos_token_id = -1 generation_kwargs = { "max_new_tokens": max_new_tokens, "cache_implementation": "static", "return_dict_in_generate": True, # Required to return `past_key_values` } text_config = model.config.get_text_config() head_dim = ( getattr(text_config, "head_dim", None) or text_config.hidden_size // text_config.num_attention_heads ) num_key_value_heads = ( text_config.num_attention_heads if getattr(text_config, "num_key_value_heads", None) is None else text_config.num_key_value_heads ) num_hidden_layers = text_config.num_hidden_layers inputs_embeds = model.get_input_embeddings()(input_ids) outputs = model.generate(inputs_embeds=inputs_embeds, **generation_kwargs, **inputs_dict) # we should get `max_length - 1` in shape, not `max_length - embeds_length`. # -1 because the last generated token isn't yet in the cache. max_length = max_new_tokens + inputs_embeds.shape[1] - 1 cache_shape = [batch_size, num_key_value_heads, max_length, head_dim] self.assertIsInstance(outputs.past_key_values, StaticCache) self.assertEqual(len(outputs.past_key_values.key_cache), num_hidden_layers) self.assertListEqual(list(outputs.past_key_values.key_cache[0].shape), cache_shape)
Test that StaticCache can generate from inputs_embeds and calculates max_cache_length correctly in `generate()`. We force the model to not stop generation until max-length is reached to verify that the cache length is indeed set correctly and we don't run out of index when slicing the cache.
test_generate_from_inputs_embeds_with_static_cache
python
huggingface/transformers
tests/generation/test_utils.py
https://github.com/huggingface/transformers/blob/master/tests/generation/test_utils.py
Apache-2.0
def test_generate_continue_from_inputs_embeds(self): """Tests that we can continue generation from `inputs_embeds` and past key values returned from a previous `generate` call.""" for model_class in self.all_generative_model_classes: if any(model_name in model_class.__name__.lower() for model_name in ["imagegpt"]): self.skipTest(reason="Won't fix: old model with unique inputs/caches/other") if any(model_name in model_class.__name__.lower() for model_name in ["umt5"]): self.skipTest(reason="TODO: needs modeling or test input preparation fixes for compatibility") config, inputs_dict = self.prepare_config_and_inputs_for_generate() if "token_type_ids" in inputs_dict: del inputs_dict["token_type_ids"] if config.get_text_config(decoder=True).is_encoder_decoder: self.skipTest(reason="This model is encoder-decoder") # TODO (joao, raushan): the correct line below is `if not hasattr(config.get_text_config(), "use_cache")`, # but it breaks a few models. Fix and then apply `_check_similar_generate_outputs` pattern if not hasattr(config, "use_cache"): self.skipTest(reason=f"{model_class.__name__} doesn't support caching") model = model_class(config).to(torch_device).eval() if "inputs_embeds" not in inspect.signature(model.prepare_inputs_for_generation).parameters.keys(): self.skipTest(reason="This model does not support `inputs_embeds` in generation") # If "past_key_values" is not returned, skip the test (e.g. RWKV uses a different cache name and format) outputs = model(**inputs_dict) if "past_key_values" not in outputs: self.skipTest(reason="This model doesn't return `past_key_values`") pixel_values_is_mutually_exclusive = any( model_name in model_class.__name__.lower() for model_name in VLM_CLASS_NAMES ) if pixel_values_is_mutually_exclusive: inputs_dict.pop("pixel_values", None) inputs_dict.pop("pixel_values_videos", None) inputs_dict.pop("pixel_values_images", None) input_ids = inputs_dict.pop("input_ids") model.generation_config.pad_token_id = model.generation_config.eos_token_id = -1 model.generation_config.forced_eos_token_id = None model.config.is_decoder = True model.generation_config.use_cache = True generation_kwargs = { "return_dict_in_generate": True, "do_sample": False, } # Traditional way of generating text, with `return_dict_in_generate` to return the past key values. input_embeds = model.get_input_embeddings()(input_ids) outputs = model.generate(inputs_embeds=input_embeds, max_new_tokens=4, **generation_kwargs) # Let's generate again, but passing the past key values in between (3 + 1 = 4 tokens) initial_output = model.generate(inputs_embeds=input_embeds, max_new_tokens=3, **generation_kwargs) continued_embeds = torch.cat([input_embeds, model.get_input_embeddings()(initial_output.sequences)], dim=1) cached_output = model.generate( inputs_embeds=continued_embeds, max_new_tokens=1, past_key_values=initial_output.past_key_values, **generation_kwargs, ) # Combine the (3 + 1) generated tokens and verify it matches with full generation. combined_output_sequences = torch.concat([initial_output.sequences, cached_output.sequences], axis=1) self.assertListEqual(outputs.sequences.tolist(), combined_output_sequences.tolist()) # The two sets of past kv should be equal to each other for layer_idx in range(len(cached_output.past_key_values)): for kv_idx in range(len(cached_output.past_key_values[layer_idx])): self.assertTrue( torch.allclose( outputs.past_key_values[layer_idx][kv_idx], cached_output.past_key_values[layer_idx][kv_idx], ) )
Tests that we can continue generation from `inputs_embeds` and past key values returned from a previous `generate` call.
test_generate_continue_from_inputs_embeds
python
huggingface/transformers
tests/generation/test_utils.py
https://github.com/huggingface/transformers/blob/master/tests/generation/test_utils.py
Apache-2.0
def test_generate_with_static_cache(self): """ Tests that generating with static cache give almost same results as with dynamic cache, and the output cache has the expected shapes """ set_model_tester_for_less_flaky_test(self) for model_class in self.all_generative_model_classes: if not model_class._supports_static_cache: self.skipTest(reason="This model does not support the static cache format") config, inputs_dict = self.prepare_config_and_inputs_for_generate() set_config_for_less_flaky_test(config) main_input = inputs_dict[model_class.main_input_name] if config.get_text_config(decoder=True).is_encoder_decoder: self.skipTest(reason="This model is encoder-decoder and has Encoder-Decoder Cache") config.is_decoder = True batch_size = main_input.shape[0] seq_length = self.model_tester.seq_length max_new_tokens = 20 for dtype in (torch.float32, torch.float16): model = model_class(config).to(torch_device).to(dtype).eval() inputs_dict = { k: v.to(dtype) if isinstance(v, torch.Tensor) and torch.is_floating_point(v) else v for k, v in inputs_dict.items() } set_model_for_less_flaky_test(model) generation_kwargs = { "max_new_tokens": max_new_tokens, "return_dict_in_generate": True, # Required to return `past_key_values` "output_scores": True, "use_cache": True, } static_cache_generation = model.generate( **generation_kwargs, **inputs_dict, cache_implementation="static" ) # Check 1: The cache shapes must match the expected shapes max_cache_len = seq_length + max_new_tokens - 1 # cache len = gen len - 1, the last token has no cache text_config = config.text_config if hasattr(config, "text_config") else config head_dim = ( getattr(text_config, "head_dim", None) or text_config.hidden_size // text_config.num_attention_heads ) num_key_value_heads = ( text_config.num_attention_heads if getattr(text_config, "num_key_value_heads", None) is None else text_config.num_key_value_heads ) num_hidden_layers = text_config.num_hidden_layers cache_shape = (batch_size, num_key_value_heads, max_cache_len, head_dim) self.assertTrue(isinstance(static_cache_generation.past_key_values, StaticCache)) self.assertTrue(len(static_cache_generation.past_key_values.key_cache) == num_hidden_layers) self.assertTrue(static_cache_generation.past_key_values.key_cache[0].shape == cache_shape) # Check 2: The outputs must be similar to the case with dynamic cache dynamic_cache_generation = model.generate(**generation_kwargs, **inputs_dict) self._check_similar_generate_outputs(dynamic_cache_generation, static_cache_generation)
Tests that generating with static cache give almost same results as with dynamic cache, and the output cache has the expected shapes
test_generate_with_static_cache
python
huggingface/transformers
tests/generation/test_utils.py
https://github.com/huggingface/transformers/blob/master/tests/generation/test_utils.py
Apache-2.0
def test_generate_compilation_all_outputs(self): """ Tests that all optional outputs are behaving as expected when compilation is triggered. In essence, it's the same as `test_greedy_generate_dict_outputs`, but with automatic compilation triggered. """ for model_class in self.all_generative_model_classes: if not model_class._supports_static_cache: self.skipTest("This model doesn't support static cache (= no expectations of compilation support)") config, inputs_dict = self.prepare_config_and_inputs_for_generate() if self.has_attentions: config._attn_implementation = "eager" # can't output attentions otherwise model = model_class(config).to(torch_device).eval() # compilation-specific setup torch.compiler.reset() # prevent cached compilation from being used in the test has_defined_cache_implementation = model.generation_config.cache_implementation is not None # BLIP is the only exception with custom generate which call `self.lm.generate()` # We should avoid such calls in all subsequent multimodal models and try to make `generate()` # compatible with multimodality compile_config = CompileConfig() compile_config._compile_all_devices = True if "blip" in model.__class__.__name__.lower(): model.language_model.generation_config.compile_config = compile_config if not has_defined_cache_implementation: model.language_model.generation_config.cache_implementation = "static" else: # force compilation (e.g. fast CI, CPU) model.generation_config.compile_config = compile_config if not has_defined_cache_implementation: model.generation_config.cache_implementation = "static" logits_processor_kwargs = self._get_logits_processor_kwargs(do_sample=False, config=model.config) output_generate = model.generate( do_sample=False, num_beams=1, max_new_tokens=self.max_new_tokens, min_new_tokens=self.max_new_tokens, output_attentions=True, output_hidden_states=True, output_scores=True, output_logits=True, return_dict_in_generate=True, use_cache=True, **logits_processor_kwargs, **inputs_dict, ) if "blip" in model.__class__.__name__.lower(): self.assertTrue(hasattr(model.language_model, "_compiled_call")) else: self.assertTrue(hasattr(model, "_compiled_call")) # our auto compile should have been called if model.config.get_text_config(decoder=True).is_encoder_decoder: self.assertTrue(output_generate.sequences.shape[1] == self.max_new_tokens + 1) self.assertIsInstance(output_generate, GenerateEncoderDecoderOutput) else: self.assertTrue( output_generate.sequences.shape[1] == self.max_new_tokens + inputs_dict["input_ids"].shape[1] ) self.assertIsInstance(output_generate, GenerateDecoderOnlyOutput) self._check_generate_outputs(output_generate, model.config, use_cache=True)
Tests that all optional outputs are behaving as expected when compilation is triggered. In essence, it's the same as `test_greedy_generate_dict_outputs`, but with automatic compilation triggered.
test_generate_compilation_all_outputs
python
huggingface/transformers
tests/generation/test_utils.py
https://github.com/huggingface/transformers/blob/master/tests/generation/test_utils.py
Apache-2.0
def test_inherits_generation_mixin(self): """ Tests that the model class directly inherits `GenerationMixin`, as opposed to relying on `PreTrainedModel` to inherit it. """ for model_class in self.all_generative_model_classes: self.assertTrue("GenerationMixin" in str(model_class.__bases__))
Tests that the model class directly inherits `GenerationMixin`, as opposed to relying on `PreTrainedModel` to inherit it.
test_inherits_generation_mixin
python
huggingface/transformers
tests/generation/test_utils.py
https://github.com/huggingface/transformers/blob/master/tests/generation/test_utils.py
Apache-2.0
def _test_attention_implementation(self, attn_implementation): """ Compares the output of generate with the eager attention implementation against other implementations. NOTE: despite the test logic being the same, different implementations actually need different decorators, hence this separate function. """ max_new_tokens = 30 support_flag = { "sdpa": "_supports_sdpa", "flash_attention_2": "_supports_flash_attn_2", } for model_class in self.all_generative_model_classes: if not getattr(model_class, support_flag[attn_implementation]): self.skipTest(f"{model_class.__name__} does not support `attn_implementation={attn_implementation}`") config, original_inputs_dict = self.prepare_config_and_inputs_for_generate() inputs_dict = {} for input_name, input_data in original_inputs_dict.items(): if isinstance(input_data, torch.Tensor) and input_data.dtype in [torch.float32, torch.bfloat16]: inputs_dict[input_name] = input_data.to(torch.float16) else: inputs_dict[input_name] = input_data main_input = inputs_dict[model_class.main_input_name] # FA2 doesn't accept masking in the middle of the sequence for now. We usually generate right-padded # attention masks at test time and, with generate, the mask will be appended with 1s on the right, # resulting in a mask with holes (not supported properly by FA2). if attn_implementation == "flash_attention_2": for input_name in ("attention_mask", "decoder_attention_mask", "encoder_attention_mask"): if input_name in inputs_dict: inputs_dict[input_name] = torch.ones_like(inputs_dict[input_name]) # make sure that all models have enough positions for generation if hasattr(config, "max_position_embeddings"): config.max_position_embeddings = max_new_tokens + main_input.shape[1] + 1 model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) del model gc.collect() generate_kwargs = { "max_new_tokens": max_new_tokens, "do_sample": False, "return_dict_in_generate": True, "output_scores": True, "use_cache": True, } model_eager = model_class.from_pretrained( tmpdirname, torch_dtype=torch.float16, low_cpu_mem_usage=True, attn_implementation="eager", ).to(torch_device) res_eager = model_eager.generate(**inputs_dict, **generate_kwargs) del model_eager gc.collect() model_attn = model_class.from_pretrained( tmpdirname, torch_dtype=torch.float16, low_cpu_mem_usage=True, attn_implementation=attn_implementation, ).to(torch_device) res_attn = model_attn.generate(**inputs_dict, **generate_kwargs) del model_attn gc.collect() self._check_similar_generate_outputs(res_eager, res_attn, atol=1e-3, rtol=1e-3)
Compares the output of generate with the eager attention implementation against other implementations. NOTE: despite the test logic being the same, different implementations actually need different decorators, hence this separate function.
_test_attention_implementation
python
huggingface/transformers
tests/generation/test_utils.py
https://github.com/huggingface/transformers/blob/master/tests/generation/test_utils.py
Apache-2.0
def test_speculative_sampling_target_distribution(self): """ Asserts that the target distribution is preserved. Should help with catching issues like #32867. """ # assume vocab size 10, input length 5 + 3 generated candidates candidate_input_ids = torch.tensor([[8, 0, 3, 9, 8, 1, 4, 5]]) # input tokens candidate_logits = torch.tensor( [ [ [-10.0, 10.0, -10.0, -10.0, -10.0, -10.0, -10.0, -10.0, -10.0, -10.0], # generated 1 [-10.0, -10.0, -10.0, -10.0, 10.0, -10.0, -10.0, -10.0, -10.0, -10.0], # generated 4 [-10.0, -10.0, -10.0, -10.0, -10.0, 10.0, -10.0, -10.0, -10.0, -10.0], # generated 5 ] ] ) candidate_length = 3 inf = float("inf") new_logits = torch.tensor( [ [ # accepts 1: [-inf, 10.0, -inf, -inf, -inf, -inf, -inf, -inf, -inf, -inf], # accepts 4: [-inf, -inf, -inf, -inf, 10.0, -inf, -inf, -inf, -inf, -inf], # most likely to be 1 or 8, less likely to be 3, then 7, and should never be any other value: [-inf, 2.0, -inf, 1.0, -inf, -inf, -inf, -0.01, 2.0, -inf], # N/A: [-inf, -inf, -inf, -inf, -inf, -inf, -inf, -inf, -inf, -inf], ] ] ) last_assistant_token_is_eos = False last_validated_token = [] for _ in range(10_000): validated_tokens, n_matches = _speculative_sampling( candidate_input_ids, candidate_logits, candidate_length, new_logits, last_assistant_token_is_eos, ) self.assertTrue(n_matches.item() == 2) self.assertTrue(validated_tokens.tolist()[0][0] == 1) self.assertTrue(validated_tokens.tolist()[0][1] == 4) self.assertTrue(validated_tokens.tolist()[0][2] in [1, 3, 7, 8]) last_validated_token.append(validated_tokens.tolist()[0][2]) # check that the most likely tokens are selected more often than the less likely ones last_token_counts = collections.Counter(last_validated_token) self.assertTrue(last_token_counts[1] > last_token_counts[3] > last_token_counts[7] > 0) self.assertTrue(last_token_counts[8] > last_token_counts[3])
Asserts that the target distribution is preserved. Should help with catching issues like #32867.
test_speculative_sampling_target_distribution
python
huggingface/transformers
tests/generation/test_utils.py
https://github.com/huggingface/transformers/blob/master/tests/generation/test_utils.py
Apache-2.0
def test_generate_with_static_cache_multi_accelerator(self): """ Tests if the static cache has been set correctly and if generate works correctly when we are using multi-acceleratorss. """ # need to split manually as auto doesn't work well with unbalanced model device_map = {"model.embed_tokens": 0, "model.layers.0": 0, "model.layers.1": 1, "model.norm": 1, "lm_head": 0} model = AutoModelForCausalLM.from_pretrained( "hf-internal-testing/tiny-random-MistralForCausalLM", device_map=device_map ) tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-MistralForCausalLM") text = "Hello world" tokenized_inputs = tokenizer([text], return_tensors="pt") input_ids = tokenized_inputs.input_ids.to(torch_device) generation_kwargs = { "max_new_tokens": 20, "cache_implementation": "static", "return_dict_in_generate": True, # Required to return `past_key_values` } results = model.generate(input_ids, **generation_kwargs) self.assertTrue(isinstance(results.past_key_values, StaticCache)) # check device of each layer key_cache_0 = results.past_key_values.key_cache[0] value_cache_0 = results.past_key_values.value_cache[0] self.assertTrue(key_cache_0.device == value_cache_0.device == torch.device(0)) key_cache_1 = results.past_key_values.key_cache[1] value_cache_1 = results.past_key_values.value_cache[1] self.assertTrue(key_cache_1.device == value_cache_1.device == torch.device(1))
Tests if the static cache has been set correctly and if generate works correctly when we are using multi-acceleratorss.
test_generate_with_static_cache_multi_accelerator
python
huggingface/transformers
tests/generation/test_utils.py
https://github.com/huggingface/transformers/blob/master/tests/generation/test_utils.py
Apache-2.0
def test_generate_multi_accelerator_causal_mask(self): """ Tests that cache position device doesn't clash with causal mask device when we are using multi-accelerators. In real life happens only when multimodal encoder size is big, so `embed_tokens` gets allocated to the next device. The error will be triggered whenever a bacthed input is used, so that `causal_mask` is actually prepared instead of being `None`. """ # need to split manually as auto doesn't work well with unbalanced model device_map = { "visual": 0, "model.embed_tokens": 1, "model.layers.0": 1, "model.layers.1": 1, "model.rotary_emb": 1, "model.norm.weight": 1, "lm_head": 1, } model = AutoModelForImageTextToText.from_pretrained( "hf-internal-testing/tiny-random-Qwen2VLForConditionalGeneration", device_map=device_map ) processor = AutoProcessor.from_pretrained("hf-internal-testing/tiny-random-Qwen2VLForConditionalGeneration") text = ["Hello world", "Today I went to the supermarket to buy"] inputs = processor(text=text, padding=True, return_tensors="pt").to(torch_device) _ = model.generate(**inputs, max_new_tokens=20)
Tests that cache position device doesn't clash with causal mask device when we are using multi-accelerators. In real life happens only when multimodal encoder size is big, so `embed_tokens` gets allocated to the next device. The error will be triggered whenever a bacthed input is used, so that `causal_mask` is actually prepared instead of being `None`.
test_generate_multi_accelerator_causal_mask
python
huggingface/transformers
tests/generation/test_utils.py
https://github.com/huggingface/transformers/blob/master/tests/generation/test_utils.py
Apache-2.0
def test_init_static_cache_multi_accelerator(self): """ Tests if the static cache has been set correctly when we initialize it manually in a multi-accelerator setup. """ # need to split manually as auto doesn't work well with unbalanced model device_map = {"model.embed_tokens": 0, "model.layers.0": 0, "model.layers.1": 1, "model.norm": 1, "lm_head": 0} model = AutoModelForCausalLM.from_pretrained( "hf-internal-testing/tiny-random-MistralForCausalLM", device_map=device_map ) tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-MistralForCausalLM") text = "Hello world" tokenized_inputs = tokenizer([text], return_tensors="pt") input_ids = tokenized_inputs.input_ids.to(torch_device) generation_kwargs = { "max_new_tokens": 20, "return_dict_in_generate": True, # Required to return `past_key_values` } # TODO: We need to raise a warning in case the cache is not set correctly # with self.assertRaisesRegex(ValueError, "If you are manually initializing the cache"): # past_key_values = StaticCache( # config=model.config, max_batch_size=1, max_cache_len=30, device=torch_device, dtype=model.dtype # ) # results = model.generate(input_ids, past_key_values=past_key_values, **generation_kwargs) # deduced from the device_map : layer 0 on device 0 and layer 1 on device 1 layer_device_map = {0: 0, 1: 1} past_key_values = StaticCache( config=model.config, max_batch_size=1, max_cache_len=30, device=torch_device, dtype=model.dtype, layer_device_map=layer_device_map, ) results = model.generate(input_ids, past_key_values=past_key_values, **generation_kwargs) # check device of each layer key_cache_0 = results.past_key_values.key_cache[0] value_cache_0 = results.past_key_values.value_cache[0] self.assertTrue(key_cache_0.device == value_cache_0.device == torch.device(0)) key_cache_1 = results.past_key_values.key_cache[1] value_cache_1 = results.past_key_values.value_cache[1] self.assertTrue(key_cache_1.device == value_cache_1.device == torch.device(1))
Tests if the static cache has been set correctly when we initialize it manually in a multi-accelerator setup.
test_init_static_cache_multi_accelerator
python
huggingface/transformers
tests/generation/test_utils.py
https://github.com/huggingface/transformers/blob/master/tests/generation/test_utils.py
Apache-2.0