code
stringlengths
66
870k
docstring
stringlengths
19
26.7k
func_name
stringlengths
1
138
language
stringclasses
1 value
repo
stringlengths
7
68
path
stringlengths
5
324
url
stringlengths
46
389
license
stringclasses
7 values
def test_peft_save_quantized(self): """ Simple test that tests the basic usage of PEFT model save_pretrained with quantized base models """ # 4bit for model_id in self.peft_test_model_ids: for transformers_class in self.transformers_test_model_classes: peft_model = transformers_class.from_pretrained(model_id, load_in_4bit=True, device_map="auto") module = peft_model.model.decoder.layers[0].self_attn.v_proj self.assertTrue(module.__class__.__name__ == "Linear4bit") self.assertTrue(peft_model.hf_device_map is not None) with tempfile.TemporaryDirectory() as tmpdirname: peft_model.save_pretrained(tmpdirname) self.assertTrue("adapter_model.safetensors" in os.listdir(tmpdirname)) self.assertTrue("adapter_config.json" in os.listdir(tmpdirname)) self.assertTrue("pytorch_model.bin" not in os.listdir(tmpdirname)) self.assertTrue("model.safetensors" not in os.listdir(tmpdirname)) # 8-bit for model_id in self.peft_test_model_ids: for transformers_class in self.transformers_test_model_classes: peft_model = transformers_class.from_pretrained(model_id, load_in_8bit=True, device_map="auto") module = peft_model.model.decoder.layers[0].self_attn.v_proj self.assertTrue(module.__class__.__name__ == "Linear8bitLt") self.assertTrue(peft_model.hf_device_map is not None) with tempfile.TemporaryDirectory() as tmpdirname: peft_model.save_pretrained(tmpdirname) self.assertTrue("adapter_model.safetensors" in os.listdir(tmpdirname)) self.assertTrue("adapter_config.json" in os.listdir(tmpdirname)) self.assertTrue("pytorch_model.bin" not in os.listdir(tmpdirname)) self.assertTrue("model.safetensors" not in os.listdir(tmpdirname))
Simple test that tests the basic usage of PEFT model save_pretrained with quantized base models
test_peft_save_quantized
python
huggingface/transformers
tests/peft_integration/test_peft_integration.py
https://github.com/huggingface/transformers/blob/master/tests/peft_integration/test_peft_integration.py
Apache-2.0
def test_peft_save_quantized_regression(self): """ Simple test that tests the basic usage of PEFT model save_pretrained with quantized base models Regression test to make sure everything works as expected before the safetensors integration. """ # 4bit for model_id in self.peft_test_model_ids: for transformers_class in self.transformers_test_model_classes: peft_model = transformers_class.from_pretrained(model_id, load_in_4bit=True, device_map="auto") module = peft_model.model.decoder.layers[0].self_attn.v_proj self.assertTrue(module.__class__.__name__ == "Linear4bit") self.assertTrue(peft_model.hf_device_map is not None) with tempfile.TemporaryDirectory() as tmpdirname: peft_model.save_pretrained(tmpdirname, safe_serialization=False) self.assertTrue("adapter_model.bin" in os.listdir(tmpdirname)) self.assertTrue("adapter_config.json" in os.listdir(tmpdirname)) self.assertTrue("pytorch_model.bin" not in os.listdir(tmpdirname)) self.assertTrue("model.safetensors" not in os.listdir(tmpdirname)) # 8-bit for model_id in self.peft_test_model_ids: for transformers_class in self.transformers_test_model_classes: peft_model = transformers_class.from_pretrained(model_id, load_in_8bit=True, device_map="auto") module = peft_model.model.decoder.layers[0].self_attn.v_proj self.assertTrue(module.__class__.__name__ == "Linear8bitLt") self.assertTrue(peft_model.hf_device_map is not None) with tempfile.TemporaryDirectory() as tmpdirname: peft_model.save_pretrained(tmpdirname, safe_serialization=False) self.assertTrue("adapter_model.bin" in os.listdir(tmpdirname)) self.assertTrue("adapter_config.json" in os.listdir(tmpdirname)) self.assertTrue("pytorch_model.bin" not in os.listdir(tmpdirname)) self.assertTrue("model.safetensors" not in os.listdir(tmpdirname))
Simple test that tests the basic usage of PEFT model save_pretrained with quantized base models Regression test to make sure everything works as expected before the safetensors integration.
test_peft_save_quantized_regression
python
huggingface/transformers
tests/peft_integration/test_peft_integration.py
https://github.com/huggingface/transformers/blob/master/tests/peft_integration/test_peft_integration.py
Apache-2.0
def test_peft_pipeline(self): """ Simple test that tests the basic usage of PEFT model + pipeline """ from transformers import pipeline for adapter_id, base_model_id in zip(self.peft_test_model_ids, self.transformers_test_model_ids): peft_pipe = pipeline("text-generation", adapter_id) base_pipe = pipeline("text-generation", base_model_id) peft_params = list(peft_pipe.model.parameters()) base_params = list(base_pipe.model.parameters()) self.assertNotEqual(len(peft_params), len(base_params)) # Assert we actually loaded the adapter too _ = peft_pipe("Hello")
Simple test that tests the basic usage of PEFT model + pipeline
test_peft_pipeline
python
huggingface/transformers
tests/peft_integration/test_peft_integration.py
https://github.com/huggingface/transformers/blob/master/tests/peft_integration/test_peft_integration.py
Apache-2.0
def test_peft_add_adapter_with_state_dict(self): """ Simple test that tests the basic usage of PEFT model through `from_pretrained`. This test tests if add_adapter works as expected with a state_dict being passed. """ from peft import LoraConfig dummy_input = torch.LongTensor([[0, 1, 2, 3, 4, 5, 6, 7]]).to(torch_device) for model_id, peft_model_id in zip(self.transformers_test_model_ids, self.peft_test_model_ids): for transformers_class in self.transformers_test_model_classes: model = transformers_class.from_pretrained(model_id).to(torch_device) peft_config = LoraConfig(init_lora_weights=False) with self.assertRaises(ValueError): model.load_adapter(peft_model_id=None) state_dict_path = hf_hub_download(peft_model_id, "adapter_model.bin") check_torch_load_is_safe() dummy_state_dict = torch.load(state_dict_path, weights_only=True) model.load_adapter(adapter_state_dict=dummy_state_dict, peft_config=peft_config) with self.assertRaises(ValueError): model.load_adapter(model.load_adapter(adapter_state_dict=dummy_state_dict, peft_config=None)) self.assertTrue(self._check_lora_correctly_converted(model)) # dummy generation _ = model.generate(input_ids=dummy_input)
Simple test that tests the basic usage of PEFT model through `from_pretrained`. This test tests if add_adapter works as expected with a state_dict being passed.
test_peft_add_adapter_with_state_dict
python
huggingface/transformers
tests/peft_integration/test_peft_integration.py
https://github.com/huggingface/transformers/blob/master/tests/peft_integration/test_peft_integration.py
Apache-2.0
def test_peft_add_adapter_with_state_dict_low_cpu_mem_usage(self): """ Check the usage of low_cpu_mem_usage, which is supported in PEFT >= 0.13.0 """ from peft import LoraConfig min_version_lcmu = "0.13.0" is_lcmu_supported = version.parse(importlib.metadata.version("peft")) >= version.parse(min_version_lcmu) for model_id, peft_model_id in zip(self.transformers_test_model_ids, self.peft_test_model_ids): for transformers_class in self.transformers_test_model_classes: model = transformers_class.from_pretrained(model_id).to(torch_device) peft_config = LoraConfig() state_dict_path = hf_hub_download(peft_model_id, "adapter_model.bin") check_torch_load_is_safe() dummy_state_dict = torch.load(state_dict_path, weights_only=True) # this should always work model.load_adapter( adapter_state_dict=dummy_state_dict, peft_config=peft_config, low_cpu_mem_usage=False ) if is_lcmu_supported: # if supported, this should not raise an error model.load_adapter( adapter_state_dict=dummy_state_dict, adapter_name="other", peft_config=peft_config, low_cpu_mem_usage=True, ) # after loading, no meta device should be remaining self.assertFalse(any((p.device.type == "meta") for p in model.parameters())) else: err_msg = r"The version of PEFT you are using does not support `low_cpu_mem_usage` yet" with self.assertRaisesRegex(ValueError, err_msg): model.load_adapter( adapter_state_dict=dummy_state_dict, adapter_name="other", peft_config=peft_config, low_cpu_mem_usage=True, )
Check the usage of low_cpu_mem_usage, which is supported in PEFT >= 0.13.0
test_peft_add_adapter_with_state_dict_low_cpu_mem_usage
python
huggingface/transformers
tests/peft_integration/test_peft_integration.py
https://github.com/huggingface/transformers/blob/master/tests/peft_integration/test_peft_integration.py
Apache-2.0
def test_peft_from_pretrained_hub_kwargs(self): """ Tests different combinations of PEFT model + from_pretrained + hub kwargs """ peft_model_id = "peft-internal-testing/tiny-opt-lora-revision" # This should not work with self.assertRaises(OSError): _ = AutoModelForCausalLM.from_pretrained(peft_model_id) adapter_kwargs = {"revision": "test"} # This should work model = AutoModelForCausalLM.from_pretrained(peft_model_id, adapter_kwargs=adapter_kwargs) self.assertTrue(self._check_lora_correctly_converted(model)) model = OPTForCausalLM.from_pretrained(peft_model_id, adapter_kwargs=adapter_kwargs) self.assertTrue(self._check_lora_correctly_converted(model)) adapter_kwargs = {"revision": "main", "subfolder": "test_subfolder"} model = AutoModelForCausalLM.from_pretrained(peft_model_id, adapter_kwargs=adapter_kwargs) self.assertTrue(self._check_lora_correctly_converted(model)) model = OPTForCausalLM.from_pretrained(peft_model_id, adapter_kwargs=adapter_kwargs) self.assertTrue(self._check_lora_correctly_converted(model))
Tests different combinations of PEFT model + from_pretrained + hub kwargs
test_peft_from_pretrained_hub_kwargs
python
huggingface/transformers
tests/peft_integration/test_peft_integration.py
https://github.com/huggingface/transformers/blob/master/tests/peft_integration/test_peft_integration.py
Apache-2.0
def test_peft_from_pretrained_unexpected_keys_warning(self): """ Test for warning when loading a PEFT checkpoint with unexpected keys. """ from peft import LoraConfig logger = logging.get_logger("transformers.integrations.peft") for model_id, peft_model_id in zip(self.transformers_test_model_ids, self.peft_test_model_ids): for transformers_class in self.transformers_test_model_classes: model = transformers_class.from_pretrained(model_id).to(torch_device) peft_config = LoraConfig() state_dict_path = hf_hub_download(peft_model_id, "adapter_model.bin") check_torch_load_is_safe() dummy_state_dict = torch.load(state_dict_path, weights_only=True) # add unexpected key dummy_state_dict["foobar"] = next(iter(dummy_state_dict.values())) with CaptureLogger(logger) as cl: model.load_adapter( adapter_state_dict=dummy_state_dict, peft_config=peft_config, low_cpu_mem_usage=False ) msg = "Loading adapter weights from state_dict led to unexpected keys not found in the model: foobar" self.assertIn(msg, cl.out)
Test for warning when loading a PEFT checkpoint with unexpected keys.
test_peft_from_pretrained_unexpected_keys_warning
python
huggingface/transformers
tests/peft_integration/test_peft_integration.py
https://github.com/huggingface/transformers/blob/master/tests/peft_integration/test_peft_integration.py
Apache-2.0
def test_peft_from_pretrained_missing_keys_warning(self): """ Test for warning when loading a PEFT checkpoint with missing keys. """ from peft import LoraConfig logger = logging.get_logger("transformers.integrations.peft") for model_id, peft_model_id in zip(self.transformers_test_model_ids, self.peft_test_model_ids): for transformers_class in self.transformers_test_model_classes: model = transformers_class.from_pretrained(model_id).to(torch_device) peft_config = LoraConfig() state_dict_path = hf_hub_download(peft_model_id, "adapter_model.bin") check_torch_load_is_safe() dummy_state_dict = torch.load(state_dict_path, weights_only=True) # remove a key so that we have missing keys key = next(iter(dummy_state_dict.keys())) del dummy_state_dict[key] with CaptureLogger(logger) as cl: model.load_adapter( adapter_state_dict=dummy_state_dict, peft_config=peft_config, low_cpu_mem_usage=False, adapter_name="other", ) # Here we need to adjust the key name a bit to account for PEFT-specific naming. # 1. Remove PEFT-specific prefix # If merged after dropping Python 3.8, we can use: key = key.removeprefix(peft_prefix) peft_prefix = "base_model.model." key = key[len(peft_prefix) :] # 2. Insert adapter name prefix, _, suffix = key.rpartition(".") key = f"{prefix}.other.{suffix}" msg = f"Loading adapter weights from state_dict led to missing keys in the model: {key}" self.assertIn(msg, cl.out)
Test for warning when loading a PEFT checkpoint with missing keys.
test_peft_from_pretrained_missing_keys_warning
python
huggingface/transformers
tests/peft_integration/test_peft_integration.py
https://github.com/huggingface/transformers/blob/master/tests/peft_integration/test_peft_integration.py
Apache-2.0
def test_peft_load_adapter_training_inference_mode_true(self): """ By default, when loading an adapter, the whole model should be in eval mode and no parameter should have requires_grad=False. """ for model_id in self.peft_test_model_ids: for transformers_class in self.transformers_test_model_classes: peft_model = transformers_class.from_pretrained(model_id).to(torch_device) with tempfile.TemporaryDirectory() as tmpdirname: peft_model.save_pretrained(tmpdirname) model = transformers_class.from_pretrained(peft_model.config._name_or_path) model.load_adapter(tmpdirname) assert not any(p.requires_grad for p in model.parameters()) assert not any(m.training for m in model.modules()) del model
By default, when loading an adapter, the whole model should be in eval mode and no parameter should have requires_grad=False.
test_peft_load_adapter_training_inference_mode_true
python
huggingface/transformers
tests/peft_integration/test_peft_integration.py
https://github.com/huggingface/transformers/blob/master/tests/peft_integration/test_peft_integration.py
Apache-2.0
def test_peft_load_adapter_training_inference_mode_false(self): """ When passing is_trainable=True, the LoRA modules should be in training mode and their parameters should have requires_grad=True. """ for model_id in self.peft_test_model_ids: for transformers_class in self.transformers_test_model_classes: peft_model = transformers_class.from_pretrained(model_id).to(torch_device) with tempfile.TemporaryDirectory() as tmpdirname: peft_model.save_pretrained(tmpdirname) model = transformers_class.from_pretrained(peft_model.config._name_or_path) model.load_adapter(tmpdirname, is_trainable=True) for name, module in model.named_modules(): if len(list(module.children())): # only check leaf modules continue if "lora_" in name: assert module.training assert all(p.requires_grad for p in module.parameters()) else: assert not module.training assert all(not p.requires_grad for p in module.parameters())
When passing is_trainable=True, the LoRA modules should be in training mode and their parameters should have requires_grad=True.
test_peft_load_adapter_training_inference_mode_false
python
huggingface/transformers
tests/peft_integration/test_peft_integration.py
https://github.com/huggingface/transformers/blob/master/tests/peft_integration/test_peft_integration.py
Apache-2.0
def test_peft_pipeline_no_warning(self): """ Test to verify that the warning message "The model 'PeftModel' is not supported for text-generation" does not appear when using PeftModel with text-generation pipeline. """ from peft import PeftModel from transformers import pipeline ADAPTER_PATH = "peft-internal-testing/tiny-OPTForCausalLM-lora" BASE_PATH = "hf-internal-testing/tiny-random-OPTForCausalLM" # Input text for testing text = "Who is a Elon Musk?" model = AutoModelForCausalLM.from_pretrained( BASE_PATH, device_map="auto", ) tokenizer = AutoTokenizer.from_pretrained(BASE_PATH) lora_model = PeftModel.from_pretrained( model, ADAPTER_PATH, device_map="auto", ) # Create pipeline with PEFT model while capturing log output # Check that the warning message is not present in the logs pipeline_logger = logging.get_logger("transformers.pipelines.base") with self.assertNoLogs(pipeline_logger, logging.ERROR): lora_generator = pipeline( task="text-generation", model=lora_model, tokenizer=tokenizer, max_length=10, ) # Generate text to verify pipeline works _ = lora_generator(text)
Test to verify that the warning message "The model 'PeftModel' is not supported for text-generation" does not appear when using PeftModel with text-generation pipeline.
test_peft_pipeline_no_warning
python
huggingface/transformers
tests/peft_integration/test_peft_integration.py
https://github.com/huggingface/transformers/blob/master/tests/peft_integration/test_peft_integration.py
Apache-2.0
def test_input_parameter_passthrough(self): """Test that chunked vs non chunked versions of ASR pipelines returns the same structure for the same inputs.""" speech_recognizer = pipeline( task="automatic-speech-recognition", model="hf-internal-testing/tiny-random-wav2vec2", ) ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation").sort("id") audio = ds[40]["audio"]["array"] inputs = {"raw": audio, "sampling_rate": 16_000, "id": 1} chunked_output = speech_recognizer(inputs.copy(), chunk_length_s=30) non_chunked_output = speech_recognizer(inputs.copy()) assert chunked_output.keys() == non_chunked_output.keys(), ( "The output structure should be the same for chunked vs non-chunked versions of asr pipelines." )
Test that chunked vs non chunked versions of ASR pipelines returns the same structure for the same inputs.
test_input_parameter_passthrough
python
huggingface/transformers
tests/pipelines/test_pipelines_automatic_speech_recognition.py
https://github.com/huggingface/transformers/blob/master/tests/pipelines/test_pipelines_automatic_speech_recognition.py
Apache-2.0
def test_pipeline_assisted_generation(self): """Tests that we can run assisted generation in the pipeline""" model = "openai/whisper-tiny" pipe = pipeline("automatic-speech-recognition", model=model, assistant_model=model) # We can run the pipeline prompt = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation[:1]")["audio"] _ = pipe(prompt) # It is running assisted generation under the hood (e.g. flags incompatible with assisted gen will crash) with self.assertRaises(ValueError): _ = pipe(prompt, generate_kwargs={"num_beams": 2})
Tests that we can run assisted generation in the pipeline
test_pipeline_assisted_generation
python
huggingface/transformers
tests/pipelines/test_pipelines_automatic_speech_recognition.py
https://github.com/huggingface/transformers/blob/master/tests/pipelines/test_pipelines_automatic_speech_recognition.py
Apache-2.0
def require_ffmpeg(test_case): """ Decorator marking a test that requires FFmpeg. These tests are skipped when FFmpeg isn't installed. """ import subprocess try: subprocess.check_output(["ffmpeg", "-h"], stderr=subprocess.DEVNULL) return test_case except Exception: return unittest.skip(reason="test requires ffmpeg")(test_case)
Decorator marking a test that requires FFmpeg. These tests are skipped when FFmpeg isn't installed.
require_ffmpeg
python
huggingface/transformers
tests/pipelines/test_pipelines_automatic_speech_recognition.py
https://github.com/huggingface/transformers/blob/master/tests/pipelines/test_pipelines_automatic_speech_recognition.py
Apache-2.0
def test_pipeline_with_task_parameters_no_side_effects(self): """ Regression test: certain pipeline flags, like `task`, modified the model configuration, causing unexpected side-effects """ # This checkpoint has task-specific parameters that will modify the behavior of the pipeline model = T5ForConditionalGeneration.from_pretrained("t5-small") self.assertTrue(model.config.num_beams == 1) # The task-specific parameters used to cause side-effects on `model.config` -- not anymore pipe = pipeline(model=model, tokenizer=AutoTokenizer.from_pretrained("t5-small"), task="translation_en_to_de") self.assertTrue(model.config.num_beams == 1) self.assertTrue(model.generation_config.num_beams == 1) # Under the hood: we now store a generation config in the pipeline. This generation config stores the # task-specific parameters. self.assertTrue(pipe.generation_config.num_beams == 4) # We can confirm that the task-specific parameters have an effect. (In this case, the default is `num_beams=1`, # which would crash when `num_return_sequences=4` is passed.) pipe("Hugging Face doesn't sell hugs.", num_return_sequences=4) with self.assertRaises(ValueError): pipe("Hugging Face doesn't sell hugs.", num_return_sequences=4, num_beams=1)
Regression test: certain pipeline flags, like `task`, modified the model configuration, causing unexpected side-effects
test_pipeline_with_task_parameters_no_side_effects
python
huggingface/transformers
tests/pipelines/test_pipelines_common.py
https://github.com/huggingface/transformers/blob/master/tests/pipelines/test_pipelines_common.py
Apache-2.0
def test_pipeline_assisted_generation(self): """Tests that we can run assisted generation in the pipeline""" model = "hf-internal-testing/tiny-random-MistralForCausalLM" pipe = pipeline("text-generation", model=model, assistant_model=model, max_new_tokens=2) # We can run the pipeline prompt = "Hello world" _ = pipe(prompt) # It is running assisted generation under the hood (e.g. flags incompatible with assisted gen will crash) with self.assertRaises(ValueError): _ = pipe(prompt, generate_kwargs={"num_beams": 2})
Tests that we can run assisted generation in the pipeline
test_pipeline_assisted_generation
python
huggingface/transformers
tests/pipelines/test_pipelines_text_generation.py
https://github.com/huggingface/transformers/blob/master/tests/pipelines/test_pipelines_text_generation.py
Apache-2.0
def test_to_dict(self): """ Simple test that checks if one uses a config and converts it to a dict, the dict is the same as the config object """ quantization_config = AqlmConfig() config_to_dict = quantization_config.to_dict() for key in config_to_dict: self.assertEqual(getattr(quantization_config, key), config_to_dict[key])
Simple test that checks if one uses a config and converts it to a dict, the dict is the same as the config object
test_to_dict
python
huggingface/transformers
tests/quantization/aqlm_integration/test_aqlm.py
https://github.com/huggingface/transformers/blob/master/tests/quantization/aqlm_integration/test_aqlm.py
Apache-2.0
def test_from_dict(self): """ Simple test that checks if one uses a dict and converts it to a config object, the config object is the same as the dict """ dict = { "in_group_size": 32, "num_codebooks": 8, "nbits_per_codebook": 8, "linear_weights_not_to_quantize": ["lm_head.weight"], } quantization_config = AqlmConfig.from_dict(dict) self.assertEqual(dict["in_group_size"], quantization_config.in_group_size) self.assertEqual(dict["num_codebooks"], quantization_config.num_codebooks) self.assertEqual(dict["nbits_per_codebook"], quantization_config.nbits_per_codebook) self.assertEqual(dict["linear_weights_not_to_quantize"], quantization_config.linear_weights_not_to_quantize)
Simple test that checks if one uses a dict and converts it to a config object, the config object is the same as the dict
test_from_dict
python
huggingface/transformers
tests/quantization/aqlm_integration/test_aqlm.py
https://github.com/huggingface/transformers/blob/master/tests/quantization/aqlm_integration/test_aqlm.py
Apache-2.0
def test_quantized_model_conversion(self): """ Simple test that checks if the quantized model has been converted properly """ from aqlm import QuantizedLinear from transformers.integrations import replace_with_aqlm_linear model_id = "facebook/opt-350m" config = AutoConfig.from_pretrained(model_id, revision="cb32f77e905cccbca1d970436fb0f5e6b58ee3c5") quantization_config = AqlmConfig() with init_empty_weights(): model = OPTForCausalLM(config) nb_linears = 0 for module in model.modules(): if isinstance(module, torch.nn.Linear): nb_linears += 1 model, _ = replace_with_aqlm_linear(model, quantization_config=quantization_config) nb_aqlm_linear = 0 for module in model.modules(): if isinstance(module, QuantizedLinear): nb_aqlm_linear += 1 self.assertEqual(nb_linears, nb_aqlm_linear) # Try with `linear_weights_not_to_quantize` with init_empty_weights(): model = OPTForCausalLM(config) model, _ = replace_with_aqlm_linear( model, quantization_config=quantization_config, linear_weights_not_to_quantize=["lm_head.weight"] ) nb_aqlm_linear = 0 for module in model.modules(): if isinstance(module, QuantizedLinear): nb_aqlm_linear += 1 self.assertEqual(nb_linears - 1, nb_aqlm_linear)
Simple test that checks if the quantized model has been converted properly
test_quantized_model_conversion
python
huggingface/transformers
tests/quantization/aqlm_integration/test_aqlm.py
https://github.com/huggingface/transformers/blob/master/tests/quantization/aqlm_integration/test_aqlm.py
Apache-2.0
def test_quantized_model(self): """ Simple test that checks if the quantized model is working properly """ input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device) output = self.quantized_model.generate(**input_ids, max_new_tokens=self.max_new_tokens) self.assertEqual(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT)
Simple test that checks if the quantized model is working properly
test_quantized_model
python
huggingface/transformers
tests/quantization/aqlm_integration/test_aqlm.py
https://github.com/huggingface/transformers/blob/master/tests/quantization/aqlm_integration/test_aqlm.py
Apache-2.0
def test_save_pretrained(self): """ Simple test that checks if the quantized model is working properly after being saved and loaded """ with tempfile.TemporaryDirectory() as tmpdirname: self.quantized_model.save_pretrained(tmpdirname) model = AutoModelForCausalLM.from_pretrained(tmpdirname, device_map=torch_device) input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device) output = model.generate(**input_ids, max_new_tokens=self.max_new_tokens) self.assertEqual(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT)
Simple test that checks if the quantized model is working properly after being saved and loaded
test_save_pretrained
python
huggingface/transformers
tests/quantization/aqlm_integration/test_aqlm.py
https://github.com/huggingface/transformers/blob/master/tests/quantization/aqlm_integration/test_aqlm.py
Apache-2.0
def test_quantized_model_multi_gpu(self): """ Simple test that checks if the quantized model is working properly with multiple GPUs """ input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device) quantized_model = AutoModelForCausalLM.from_pretrained(self.model_name, device_map="auto") self.assertTrue(set(quantized_model.hf_device_map.values()) == {0, 1}) output = quantized_model.generate(**input_ids, max_new_tokens=self.max_new_tokens) self.assertEqual(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT)
Simple test that checks if the quantized model is working properly with multiple GPUs
test_quantized_model_multi_gpu
python
huggingface/transformers
tests/quantization/aqlm_integration/test_aqlm.py
https://github.com/huggingface/transformers/blob/master/tests/quantization/aqlm_integration/test_aqlm.py
Apache-2.0
def test_quantized_model_compile(self): """ Simple test that checks if the quantized model is working properly """ # Sample tokens greedily def decode_one_tokens(model, cur_token, input_pos, cache_position, past_key_values): logits = model( cur_token, position_ids=input_pos, cache_position=cache_position, past_key_values=past_key_values, return_dict=False, use_cache=True, )[0] new_token = torch.argmax(logits[:, [-1]], dim=-1).to(torch.int) return new_token # Tokenize the test input input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device)["input_ids"] seq_length = input_ids.shape[1] # Setup static KV cache for generation past_key_values = StaticCache( config=self.quantized_model.config, max_batch_size=1, max_cache_len=seq_length + self.max_new_tokens + 1, device=torch_device, dtype=self.quantized_model.config._pre_quantization_dtype, ) # Allocate token ids to be generated and copy prefix ids cache_position = torch.arange(seq_length, device=torch_device) generated_ids = torch.zeros(1, seq_length + self.max_new_tokens, dtype=torch.int, device=torch_device) generated_ids[:, cache_position] = input_ids.to(torch_device).to(torch.int) # Do a forward pass to fill the prefix cache and compile the kernels if necessary logits = self.quantized_model( input_ids, cache_position=cache_position, past_key_values=past_key_values, return_dict=False, use_cache=True, )[0] next_token = torch.argmax(logits[:, [-1]], dim=-1).to(torch.int) generated_ids[:, [seq_length]] = next_token with torch.no_grad(): # Compile the CUDA graph decode_one_tokens = torch.compile(decode_one_tokens, mode="reduce-overhead", fullgraph=True) # Generate tokens one by one cache_position = torch.tensor([seq_length + 1], device=torch_device) for _ in range(1, self.max_new_tokens): with torch.backends.cuda.sdp_kernel(enable_flash=False, enable_mem_efficient=False, enable_math=True): next_token = decode_one_tokens( self.quantized_model, next_token.clone(), None, cache_position, past_key_values ) generated_ids.index_copy_(1, cache_position, next_token) cache_position += 1 # Check generated text self.assertEqual(self.tokenizer.decode(generated_ids[0], skip_special_tokens=True), self.EXPECTED_OUTPUT)
Simple test that checks if the quantized model is working properly
test_quantized_model_compile
python
huggingface/transformers
tests/quantization/aqlm_integration/test_aqlm.py
https://github.com/huggingface/transformers/blob/master/tests/quantization/aqlm_integration/test_aqlm.py
Apache-2.0
def test_wrong_backend(self): """ Simple test that checks if a user passes a wrong backend an error is raised """ # This should work fine _ = AwqConfig(bits=4) with self.assertRaises(ValueError): AwqConfig(bits=4, backend="") # These should work fine _ = AwqConfig(bits=4, version="GEMM") _ = AwqConfig(bits=4, version="gemm") with self.assertRaises(ValueError): AwqConfig(bits=4, backend="unexisting-backend") # Only cuda and xpu devices can run this function support_llm_awq = False device_type, major = get_device_properties() if device_type == "cuda" and major >= 8: support_llm_awq = True elif device_type == "xpu": support_llm_awq = True if support_llm_awq: # LLMAWQ should work on an A100 AwqConfig(bits=4, backend="llm-awq") else: # LLMAWQ does not work on a T4 with self.assertRaises(ValueError): AwqConfig(bits=4, backend="llm-awq")
Simple test that checks if a user passes a wrong backend an error is raised
test_wrong_backend
python
huggingface/transformers
tests/quantization/autoawq/test_awq.py
https://github.com/huggingface/transformers/blob/master/tests/quantization/autoawq/test_awq.py
Apache-2.0
def test_to_dict(self): """ Simple test that checks if one uses a config and converts it to a dict, the dict is the same as the config object """ quantization_config = AwqConfig(bits=4) config_to_dict = quantization_config.to_dict() for key in config_to_dict: self.assertEqual(getattr(quantization_config, key), config_to_dict[key])
Simple test that checks if one uses a config and converts it to a dict, the dict is the same as the config object
test_to_dict
python
huggingface/transformers
tests/quantization/autoawq/test_awq.py
https://github.com/huggingface/transformers/blob/master/tests/quantization/autoawq/test_awq.py
Apache-2.0
def test_from_dict(self): """ Simple test that checks if one uses a dict and converts it to a config object, the config object is the same as the dict """ dict = {"bits": 2, "zero_point": False, "backend": "autoawq"} quantization_config = AwqConfig.from_dict(dict) self.assertEqual(dict["bits"], quantization_config.bits) self.assertEqual(dict["zero_point"], quantization_config.zero_point) self.assertEqual(dict["backend"], quantization_config.backend)
Simple test that checks if one uses a dict and converts it to a config object, the config object is the same as the dict
test_from_dict
python
huggingface/transformers
tests/quantization/autoawq/test_awq.py
https://github.com/huggingface/transformers/blob/master/tests/quantization/autoawq/test_awq.py
Apache-2.0
def test_quantized_model_conversion(self): """ Simple test that checks if the quantized model has been converted properly """ from awq.modules.linear import WQLinear_GEMM, WQLinear_GEMV from transformers.integrations.awq import replace_with_awq_linear model_id = "facebook/opt-350m" config = AutoConfig.from_pretrained(model_id, revision="cb32f77e905cccbca1d970436fb0f5e6b58ee3c5") quantization_config = AwqConfig(bits=4) with init_empty_weights(): model = OPTForCausalLM(config) nb_linears = 0 for module in model.modules(): if isinstance(module, torch.nn.Linear): nb_linears += 1 model, _ = replace_with_awq_linear(model, quantization_config=quantization_config) nb_awq_linear = 0 for module in model.modules(): if isinstance(module, (WQLinear_GEMM, WQLinear_GEMV)): nb_awq_linear += 1 self.assertEqual(nb_linears, nb_awq_linear) # Try with `modules_not_to_convert` with init_empty_weights(): model = OPTForCausalLM(config) model, _ = replace_with_awq_linear( model, quantization_config=quantization_config, modules_to_not_convert=["lm_head"] ) nb_awq_linear = 0 for module in model.modules(): if isinstance(module, (WQLinear_GEMM, WQLinear_GEMV)): nb_awq_linear += 1 self.assertEqual(nb_linears - 1, nb_awq_linear)
Simple test that checks if the quantized model has been converted properly
test_quantized_model_conversion
python
huggingface/transformers
tests/quantization/autoawq/test_awq.py
https://github.com/huggingface/transformers/blob/master/tests/quantization/autoawq/test_awq.py
Apache-2.0
def test_quantized_model(self): """ Simple test that checks if the quantized model is working properly """ input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device) output = self.quantized_model.generate(**input_ids, max_new_tokens=40) self.assertEqual(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT)
Simple test that checks if the quantized model is working properly
test_quantized_model
python
huggingface/transformers
tests/quantization/autoawq/test_awq.py
https://github.com/huggingface/transformers/blob/master/tests/quantization/autoawq/test_awq.py
Apache-2.0
def test_quantized_model_bf16(self): """ Simple test that checks if the quantized model is working properly with bf16 """ input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device) quantized_model = AutoModelForCausalLM.from_pretrained(self.model_name, torch_dtype=torch.bfloat16).to( torch_device ) output = quantized_model.generate(**input_ids, max_new_tokens=40) self.assertEqual(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT_BF16)
Simple test that checks if the quantized model is working properly with bf16
test_quantized_model_bf16
python
huggingface/transformers
tests/quantization/autoawq/test_awq.py
https://github.com/huggingface/transformers/blob/master/tests/quantization/autoawq/test_awq.py
Apache-2.0
def test_quantized_model_exllama(self): """ Simple test that checks if the quantized model is working properly with exllama backend """ input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device) quantization_config = AwqConfig(version="exllama") quantized_model = AutoModelForCausalLM.from_pretrained( self.model_name, quantization_config=quantization_config, device_map=torch_device ) output = quantized_model.generate(**input_ids, max_new_tokens=40) self.assertIn(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT_EXLLAMA)
Simple test that checks if the quantized model is working properly with exllama backend
test_quantized_model_exllama
python
huggingface/transformers
tests/quantization/autoawq/test_awq.py
https://github.com/huggingface/transformers/blob/master/tests/quantization/autoawq/test_awq.py
Apache-2.0
def test_quantized_model_no_device_map(self): """ Simple test that checks if the quantized model is working properly """ input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device) quantized_model = AutoModelForCausalLM.from_pretrained(self.model_name).to(torch_device) output = quantized_model.generate(**input_ids, max_new_tokens=40) self.assertEqual(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT)
Simple test that checks if the quantized model is working properly
test_quantized_model_no_device_map
python
huggingface/transformers
tests/quantization/autoawq/test_awq.py
https://github.com/huggingface/transformers/blob/master/tests/quantization/autoawq/test_awq.py
Apache-2.0
def test_save_pretrained(self): """ Simple test that checks if the quantized model is working properly after being saved and loaded """ with tempfile.TemporaryDirectory() as tmpdirname: self.quantized_model.save_pretrained(tmpdirname) model = AutoModelForCausalLM.from_pretrained(tmpdirname, device_map=self.device_map) input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device) output = model.generate(**input_ids, max_new_tokens=40) self.assertEqual(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT)
Simple test that checks if the quantized model is working properly after being saved and loaded
test_save_pretrained
python
huggingface/transformers
tests/quantization/autoawq/test_awq.py
https://github.com/huggingface/transformers/blob/master/tests/quantization/autoawq/test_awq.py
Apache-2.0
def test_quantized_model_multi_accelerator(self): """ Simple test that checks if the quantized model is working properly with multiple GPUs """ input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device) quantized_model = AutoModelForCausalLM.from_pretrained(self.model_name, device_map="auto") self.assertTrue(set(quantized_model.hf_device_map.values()) == {0, 1}) output = quantized_model.generate(**input_ids, max_new_tokens=40) self.assertEqual(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT)
Simple test that checks if the quantized model is working properly with multiple GPUs
test_quantized_model_multi_accelerator
python
huggingface/transformers
tests/quantization/autoawq/test_awq.py
https://github.com/huggingface/transformers/blob/master/tests/quantization/autoawq/test_awq.py
Apache-2.0
def test_quantized_model_no_k_proj_quantized(self): """ Simple test that checks if the quantized model is working properly with multiple GPUs """ dummy_input = torch.LongTensor([[0, 1, 0]]).to(torch_device) quantized_model = AutoModelForCausalLM.from_pretrained(self.model_with_no_k_proj_quantized).to(torch_device) self.assertTrue(isinstance(quantized_model.model.decoder.layers[0].self_attn.k_proj, torch.nn.Linear)) self.assertFalse(isinstance(quantized_model.model.decoder.layers[0].self_attn.v_proj, torch.nn.Linear)) EXPECTED_OUTPUT = torch.LongTensor([[0, 1, 0, 50118, 50118, 133, 248, 12, 134, 16, 10, 372, 2031]]).to( torch_device ) output = quantized_model.generate(dummy_input, max_new_tokens=10) self.assertTrue((EXPECTED_OUTPUT == output).all())
Simple test that checks if the quantized model is working properly with multiple GPUs
test_quantized_model_no_k_proj_quantized
python
huggingface/transformers
tests/quantization/autoawq/test_awq.py
https://github.com/huggingface/transformers/blob/master/tests/quantization/autoawq/test_awq.py
Apache-2.0
def test_raise_save_pretrained(self): """ Test that `save_pretrained` is effectively blocked for fused models """ quantization_config = AwqConfig(bits=4, fuse_max_seq_len=128, do_fuse=True) model = AutoModelForCausalLM.from_pretrained( self.model_name, quantization_config=quantization_config, low_cpu_mem_usage=True, revision=self.model_revision, ).to(torch_device) self._check_fused_modules(model) with self.assertRaises(ValueError), tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname)
Test that `save_pretrained` is effectively blocked for fused models
test_raise_save_pretrained
python
huggingface/transformers
tests/quantization/autoawq/test_awq.py
https://github.com/huggingface/transformers/blob/master/tests/quantization/autoawq/test_awq.py
Apache-2.0
def test_fused_modules_to_not_convert(self): """ Test if fused + modules to_not_covnert work as expected """ model_id = "hf-internal-testing/Mixtral-tiny-AWQ" quantization_config = AwqConfig(bits=4, fuse_max_seq_len=128, do_fuse=True) model = AutoModelForCausalLM.from_pretrained( model_id, quantization_config=quantization_config, low_cpu_mem_usage=True, ).to(torch_device) # Check if model has been correctly fused self._check_fused_modules(model) # Checks if the modules_to_not_convert (here gate layer) is a Linear self.assertTrue(isinstance(model.model.layers[0].block_sparse_moe.gate, torch.nn.Linear))
Test if fused + modules to_not_covnert work as expected
test_fused_modules_to_not_convert
python
huggingface/transformers
tests/quantization/autoawq/test_awq.py
https://github.com/huggingface/transformers/blob/master/tests/quantization/autoawq/test_awq.py
Apache-2.0
def test_generation_fused(self): """ Test generation quality for fused models - single batch case """ quantization_config = AwqConfig(bits=4, fuse_max_seq_len=128, do_fuse=True) model = AutoModelForCausalLM.from_pretrained( self.model_name, quantization_config=quantization_config, low_cpu_mem_usage=True, revision=self.model_revision, ).to(torch_device) self._check_fused_modules(model) tokenizer = AutoTokenizer.from_pretrained(self.model_name, revision=self.model_revision) inputs = tokenizer(self.prompt, return_tensors="pt").to(torch_device) outputs = model.generate(**inputs, max_new_tokens=12) self.assertEqual(tokenizer.decode(outputs[0], skip_special_tokens=True), self.EXPECTED_GENERATION)
Test generation quality for fused models - single batch case
test_generation_fused
python
huggingface/transformers
tests/quantization/autoawq/test_awq.py
https://github.com/huggingface/transformers/blob/master/tests/quantization/autoawq/test_awq.py
Apache-2.0
def test_generation_fused_batched(self): """ Test generation quality for fused models - multi batch case """ quantization_config = AwqConfig(bits=4, fuse_max_seq_len=128, do_fuse=True) model = AutoModelForCausalLM.from_pretrained( self.model_name, quantization_config=quantization_config, low_cpu_mem_usage=True, revision=self.model_revision, ).to(torch_device) self._check_fused_modules(model) tokenizer = AutoTokenizer.from_pretrained(self.model_name, revision=self.model_revision) tokenizer.pad_token_id = tokenizer.eos_token_id inputs = tokenizer([self.prompt, self.prompt], return_tensors="pt", padding=True).to(torch_device) outputs = model.generate(**inputs, max_new_tokens=12) self.assertEqual(tokenizer.decode(outputs[0], skip_special_tokens=True), self.EXPECTED_GENERATION)
Test generation quality for fused models - multi batch case
test_generation_fused_batched
python
huggingface/transformers
tests/quantization/autoawq/test_awq.py
https://github.com/huggingface/transformers/blob/master/tests/quantization/autoawq/test_awq.py
Apache-2.0
def test_generation_custom_model(self): """ Test generation quality for fused models using custom fused map. """ quantization_config = AwqConfig( bits=4, fuse_max_seq_len=512, modules_to_fuse={ "attention": ["q_proj", "k_proj", "v_proj", "o_proj"], "mlp": ["gate_proj", "up_proj", "down_proj"], "layernorm": ["input_layernorm", "post_attention_layernorm", "norm"], "use_alibi": False, "hidden_size": 4096, "num_attention_heads": 32, "num_key_value_heads": 8, }, ) model = AutoModelForCausalLM.from_pretrained( self.custom_mapping_model_id, quantization_config=quantization_config, device_map="balanced", revision=self.custom_model_revision, ) self._check_fused_modules(model) tokenizer = AutoTokenizer.from_pretrained(self.custom_mapping_model_id, revision=self.custom_model_revision) prompt = "Hello" inputs = tokenizer(prompt, return_tensors="pt").to(torch_device) outputs = model.generate(**inputs, max_new_tokens=12) self.assertEqual(tokenizer.decode(outputs[0], skip_special_tokens=True), self.EXPECTED_GENERATION_CUSTOM_MODEL)
Test generation quality for fused models using custom fused map.
test_generation_custom_model
python
huggingface/transformers
tests/quantization/autoawq/test_awq.py
https://github.com/huggingface/transformers/blob/master/tests/quantization/autoawq/test_awq.py
Apache-2.0
def test_generation_mixtral_fused(self): """ Text generation test for Mixtral + AWQ + fused """ quantization_config = AwqConfig(bits=4, fuse_max_seq_len=1024, do_fuse=True) model = AutoModelForCausalLM.from_pretrained( self.mixtral_model_name, quantization_config=quantization_config, device_map="auto", revision=self.mixtral_model_revision, ) tokenizer = AutoTokenizer.from_pretrained(self.mixtral_model_name) tokenizer.pad_token = tokenizer.eos_token inputs = tokenizer([self.prompt, self.prompt], return_tensors="pt", padding=True).to(torch_device) outputs = model.generate(**inputs, max_new_tokens=12) self.assertEqual(tokenizer.decode(outputs[0], skip_special_tokens=True), self.EXPECTED_GENERATION_MIXTRAL)
Text generation test for Mixtral + AWQ + fused
test_generation_mixtral_fused
python
huggingface/transformers
tests/quantization/autoawq/test_awq.py
https://github.com/huggingface/transformers/blob/master/tests/quantization/autoawq/test_awq.py
Apache-2.0
def test_quantized_model(self): """ Simple test that checks if the quantized model is working properly """ input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device) output = self.quantized_model.generate(**input_ids, max_new_tokens=40, do_sample=False) self.assertIn(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS)
Simple test that checks if the quantized model is working properly
test_quantized_model
python
huggingface/transformers
tests/quantization/autoround/test_auto_round.py
https://github.com/huggingface/transformers/blob/master/tests/quantization/autoround/test_auto_round.py
Apache-2.0
def test_quantized_model_bf16(self): """ Simple test that checks if the quantized model is working properly with bf16 """ input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device) quantization_config = AutoRoundConfig(backend="triton") quantized_model = AutoModelForCausalLM.from_pretrained( self.model_name, torch_dtype=torch.bfloat16, device_map=self.device_map, quantization_config=quantization_config, ) output = quantized_model.generate(**input_ids, max_new_tokens=40, do_sample=False) self.assertIn(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS)
Simple test that checks if the quantized model is working properly with bf16
test_quantized_model_bf16
python
huggingface/transformers
tests/quantization/autoround/test_auto_round.py
https://github.com/huggingface/transformers/blob/master/tests/quantization/autoround/test_auto_round.py
Apache-2.0
def test_quantized_model_on_cpu(self): """ Simple test that checks if the quantized model is working properly """ input_ids = self.tokenizer(self.input_text, return_tensors="pt") quantized_model = AutoModelForCausalLM.from_pretrained(self.model_name, torch_dtype="auto") output = quantized_model.generate(**input_ids, max_new_tokens=40, do_sample=False) self.assertIn(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS)
Simple test that checks if the quantized model is working properly
test_quantized_model_on_cpu
python
huggingface/transformers
tests/quantization/autoround/test_auto_round.py
https://github.com/huggingface/transformers/blob/master/tests/quantization/autoround/test_auto_round.py
Apache-2.0
def test_save_pretrained(self): """ Simple test that checks if the quantized model is working properly after being saved and loaded """ ## some backends like marlin/ipex will repack the weight that caused the weight shape changed with tempfile.TemporaryDirectory() as tmpdirname: quantization_config = AutoRoundConfig(backend="triton") quantized_model = AutoModelForCausalLM.from_pretrained( self.model_name, device_map=self.device_map, torch_dtype=torch.float16, quantization_config=quantization_config, ) quantized_model.save_pretrained(tmpdirname) model = AutoModelForCausalLM.from_pretrained(tmpdirname, device_map=torch_device) input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device) output = model.generate(**input_ids, max_new_tokens=40, do_sample=False) output_tokens = self.tokenizer.decode(output[0], skip_special_tokens=True) self.assertIn(output_tokens, self.EXPECTED_OUTPUTS)
Simple test that checks if the quantized model is working properly after being saved and loaded
test_save_pretrained
python
huggingface/transformers
tests/quantization/autoround/test_auto_round.py
https://github.com/huggingface/transformers/blob/master/tests/quantization/autoround/test_auto_round.py
Apache-2.0
def test_quantized_model_multi_accelerator(self): """ Simple test that checks if the quantized model is working properly with multiple accelerators """ input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device) quantization_config = AutoRoundConfig(backend="triton") quantized_model = AutoModelForCausalLM.from_pretrained( self.model_name, device_map="auto", quantization_config=quantization_config, torch_dtype="auto" ) output = quantized_model.generate(**input_ids, max_new_tokens=40, do_sample=False) self.assertIn(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS)
Simple test that checks if the quantized model is working properly with multiple accelerators
test_quantized_model_multi_accelerator
python
huggingface/transformers
tests/quantization/autoround/test_auto_round.py
https://github.com/huggingface/transformers/blob/master/tests/quantization/autoround/test_auto_round.py
Apache-2.0
def test_convert_from_gptq(self): """ Simple test that checks if auto-round work properly with gptq format """ model_name = "ybelkada/opt-125m-gptq-4bit" quantization_config = AutoRoundConfig() model = AutoModelForCausalLM.from_pretrained( model_name, device_map=torch_device, quantization_config=quantization_config, torch_dtype="auto" ) tokenizer = AutoTokenizer.from_pretrained(model_name) text = "There is a girl who likes adventure," inputs = tokenizer(text, return_tensors="pt").to(model.device) tokenizer.decode(model.generate(**inputs, max_new_tokens=5)[0])
Simple test that checks if auto-round work properly with gptq format
test_convert_from_gptq
python
huggingface/transformers
tests/quantization/autoround/test_auto_round.py
https://github.com/huggingface/transformers/blob/master/tests/quantization/autoround/test_auto_round.py
Apache-2.0
def test_convert_from_awq_cpu(self): """ Simple test that checks if auto-round work properly with awq format """ model_name = "casperhansen/opt-125m-awq" quantization_config = AutoRoundConfig() model = AutoModelForCausalLM.from_pretrained( model_name, device_map="cpu", quantization_config=quantization_config, torch_dtype="auto" ) tokenizer = AutoTokenizer.from_pretrained(model_name) text = "There is a girl who likes adventure," inputs = tokenizer(text, return_tensors="pt").to(model.device) tokenizer.decode(model.generate(**inputs, max_new_tokens=5)[0])
Simple test that checks if auto-round work properly with awq format
test_convert_from_awq_cpu
python
huggingface/transformers
tests/quantization/autoround/test_auto_round.py
https://github.com/huggingface/transformers/blob/master/tests/quantization/autoround/test_auto_round.py
Apache-2.0
def test_mixed_bits(self): """ Simple test that checks if auto-round work properly with mixed bits """ model_name = "facebook/opt-125m" model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype="auto") tokenizer = AutoTokenizer.from_pretrained(model_name) layer_config = { "model.decoder.layers.0.self_attn.k_proj": {"bits": 8}, "model.decoder.layers.6.self_attn.out_proj": {"bits": 2, "group_size": 32}, } bits, group_size, sym = 4, 128, True from auto_round import AutoRound autoround = AutoRound(model, tokenizer, bits=bits, group_size=group_size, sym=sym, layer_config=layer_config) with tempfile.TemporaryDirectory() as tmpdirname: autoround.quantize_and_save(output_dir=tmpdirname) model = AutoModelForCausalLM.from_pretrained( tmpdirname, torch_dtype=torch.float16, device_map=torch_device ) text = "There is a girl who likes adventure," inputs = tokenizer(text, return_tensors="pt").to(model.device) tokenizer.decode(model.generate(**inputs, max_new_tokens=5)[0])
Simple test that checks if auto-round work properly with mixed bits
test_mixed_bits
python
huggingface/transformers
tests/quantization/autoround/test_auto_round.py
https://github.com/huggingface/transformers/blob/master/tests/quantization/autoround/test_auto_round.py
Apache-2.0
def test_to_dict(self): """ Simple test that checks if one uses a config and converts it to a dict, the dict is the same as the config object """ quantization_config = BitNetQuantConfig() config_to_dict = quantization_config.to_dict() for key in config_to_dict: self.assertEqual(getattr(quantization_config, key), config_to_dict[key])
Simple test that checks if one uses a config and converts it to a dict, the dict is the same as the config object
test_to_dict
python
huggingface/transformers
tests/quantization/bitnet_integration/test_bitnet.py
https://github.com/huggingface/transformers/blob/master/tests/quantization/bitnet_integration/test_bitnet.py
Apache-2.0
def test_quantized_model(self): """ Simple test that checks if the quantized model is working properly """ input_text = "What are we having for dinner?" expected_output = "What are we having for dinner? What are we going to do for fun this weekend?" input_ids = self.tokenizer(input_text, return_tensors="pt").to(torch_device) output = self.quantized_model.generate(**input_ids, max_new_tokens=11, do_sample=False) self.assertEqual(self.tokenizer.decode(output[0], skip_special_tokens=True), expected_output)
Simple test that checks if the quantized model is working properly
test_quantized_model
python
huggingface/transformers
tests/quantization/bitnet_integration/test_bitnet.py
https://github.com/huggingface/transformers/blob/master/tests/quantization/bitnet_integration/test_bitnet.py
Apache-2.0
def test_packing_unpacking(self): """ Simple test the packing and unpacking logic """ from transformers.integrations import pack_weights, unpack_weights u = torch.randint(0, 255, (256, 256), dtype=torch.uint8) unpacked_u = unpack_weights(u, dtype=torch.bfloat16) repacked_u = pack_weights(unpacked_u) for i in range(u.shape[0]): for j in range(u.shape[1]): self.assertEqual(repacked_u[i][j], u[i][j])
Simple test the packing and unpacking logic
test_packing_unpacking
python
huggingface/transformers
tests/quantization/bitnet_integration/test_bitnet.py
https://github.com/huggingface/transformers/blob/master/tests/quantization/bitnet_integration/test_bitnet.py
Apache-2.0
def test_weights_dtype(self): """ test the weights dtype after loading """ self_attn_q = self.quantized_model.model.layers[0].self_attn.q_proj.weight self_attn_k = self.quantized_model.model.layers[0].self_attn.k_proj.weight self_attn_v = self.quantized_model.model.layers[0].self_attn.v_proj.weight self_attn_o = self.quantized_model.model.layers[0].self_attn.o_proj.weight mlp_gate = self.quantized_model.model.layers[0].mlp.gate_proj.weight mlp_up = self.quantized_model.model.layers[0].mlp.up_proj.weight mlp_down = self.quantized_model.model.layers[0].mlp.down_proj.weight self.assertEqual(self_attn_q.dtype, torch.uint8) self.assertEqual(self_attn_k.dtype, torch.uint8) self.assertEqual(self_attn_v.dtype, torch.uint8) self.assertEqual(self_attn_o.dtype, torch.uint8) self.assertEqual(mlp_up.dtype, torch.uint8) self.assertEqual(mlp_gate.dtype, torch.uint8) self.assertEqual(mlp_down.dtype, torch.uint8)
test the weights dtype after loading
test_weights_dtype
python
huggingface/transformers
tests/quantization/bitnet_integration/test_bitnet.py
https://github.com/huggingface/transformers/blob/master/tests/quantization/bitnet_integration/test_bitnet.py
Apache-2.0
def test_replace_with_bitlinear_shape(self): """ test that the BitNet layer weight shapes are correct, and the weight_scale is correctly initialized to 1 """ from transformers.integrations import replace_with_bitnet_linear out_features = 1024 in_features = 512 class SimpleLinearModule(torch.nn.Module): """ Simple class to test BitLinear """ def __init__( self, in_features: int = in_features, out_features: int = out_features, bias: bool = False, ): super().__init__() self.linear = torch.nn.Linear(in_features=in_features, out_features=out_features, bias=bias) def forward(self, x): return self.linear(x) model = SimpleLinearModule() replace_with_bitnet_linear(model) self.assertEqual(list(model.linear.weight.shape), [out_features // 4, in_features]) self.assertEqual(model.linear.weight_scale, 1)
test that the BitNet layer weight shapes are correct, and the weight_scale is correctly initialized to 1
test_replace_with_bitlinear_shape
python
huggingface/transformers
tests/quantization/bitnet_integration/test_bitnet.py
https://github.com/huggingface/transformers/blob/master/tests/quantization/bitnet_integration/test_bitnet.py
Apache-2.0
def tearDown(self): r""" TearDown function needs to be called at the end of each test to free the GPU memory and cache, also to avoid unexpected behaviors. Please see: https://discuss.pytorch.org/t/how-can-we-release-gpu-memory-cache/14530/27 """ del self.model_fp16 del self.model_4bit gc.collect() backend_empty_cache(torch_device)
TearDown function needs to be called at the end of each test to free the GPU memory and cache, also to avoid unexpected behaviors. Please see: https://discuss.pytorch.org/t/how-can-we-release-gpu-memory-cache/14530/27
tearDown
python
huggingface/transformers
tests/quantization/bnb/test_4bit.py
https://github.com/huggingface/transformers/blob/master/tests/quantization/bnb/test_4bit.py
Apache-2.0
def test_quantization_num_parameters(self): r""" Test if the number of returned parameters is correct See: https://github.com/huggingface/transformers/issues/25978 """ num_params_4bit = self.model_4bit.num_parameters() num_params_fp16 = self.model_fp16.num_parameters() self.assertEqual(num_params_4bit, num_params_fp16)
Test if the number of returned parameters is correct See: https://github.com/huggingface/transformers/issues/25978
test_quantization_num_parameters
python
huggingface/transformers
tests/quantization/bnb/test_4bit.py
https://github.com/huggingface/transformers/blob/master/tests/quantization/bnb/test_4bit.py
Apache-2.0
def test_quantization_config_json_serialization(self): r""" A simple test to check if the quantization config is correctly serialized and deserialized """ config = self.model_4bit.config self.assertTrue(hasattr(config, "quantization_config")) _ = config.to_dict() _ = config.to_diff_dict() _ = config.to_json_string()
A simple test to check if the quantization config is correctly serialized and deserialized
test_quantization_config_json_serialization
python
huggingface/transformers
tests/quantization/bnb/test_4bit.py
https://github.com/huggingface/transformers/blob/master/tests/quantization/bnb/test_4bit.py
Apache-2.0
def test_memory_footprint(self): r""" A simple test to check if the model conversion has been done correctly by checking on the memory footprint of the converted model and the class type of the linear layers of the converted models """ from bitsandbytes.nn import Params4bit mem_fp16 = self.model_fp16.get_memory_footprint() mem_4bit = self.model_4bit.get_memory_footprint() self.assertAlmostEqual(mem_fp16 / mem_4bit, self.EXPECTED_RELATIVE_DIFFERENCE, delta=1e-5) linear = get_some_linear_layer(self.model_4bit) self.assertTrue(linear.weight.__class__ == Params4bit)
A simple test to check if the model conversion has been done correctly by checking on the memory footprint of the converted model and the class type of the linear layers of the converted models
test_memory_footprint
python
huggingface/transformers
tests/quantization/bnb/test_4bit.py
https://github.com/huggingface/transformers/blob/master/tests/quantization/bnb/test_4bit.py
Apache-2.0
def test_original_dtype(self): r""" A simple test to check if the model successfully stores the original dtype """ self.assertTrue(hasattr(self.model_4bit.config, "_pre_quantization_dtype")) self.assertFalse(hasattr(self.model_fp16.config, "_pre_quantization_dtype")) self.assertTrue(self.model_4bit.config._pre_quantization_dtype == torch.float16)
A simple test to check if the model successfully stores the original dtype
test_original_dtype
python
huggingface/transformers
tests/quantization/bnb/test_4bit.py
https://github.com/huggingface/transformers/blob/master/tests/quantization/bnb/test_4bit.py
Apache-2.0
def test_linear_are_4bit(self): r""" A simple test to check if the model conversion has been done correctly by checking on the memory footprint of the converted model and the class type of the linear layers of the converted models """ from transformers import T5PreTrainedModel self.model_fp16.get_memory_footprint() self.model_4bit.get_memory_footprint() for name, module in self.model_4bit.named_modules(): if isinstance(module, torch.nn.Linear): if name not in ["lm_head"] + T5PreTrainedModel._keep_in_fp32_modules: # 4-bit parameters are packed in uint8 variables self.assertTrue(module.weight.dtype == torch.uint8)
A simple test to check if the model conversion has been done correctly by checking on the memory footprint of the converted model and the class type of the linear layers of the converted models
test_linear_are_4bit
python
huggingface/transformers
tests/quantization/bnb/test_4bit.py
https://github.com/huggingface/transformers/blob/master/tests/quantization/bnb/test_4bit.py
Apache-2.0
def test_rwkv_4bit(self): r""" A simple test to check if 4-bit RWKV inference works as expected. """ model_id = "RWKV/rwkv-4-169m-pile" quantization_config = BitsAndBytesConfig(load_in_4bit=True, bnb_4bit_use_double_quant=True) model = AutoModelForCausalLM.from_pretrained(model_id, quantization_config=quantization_config) tok = AutoTokenizer.from_pretrained(model_id) text = "Hello my name is" input_ids = tok.encode(text, return_tensors="pt").to(torch_device) _ = model.generate(input_ids, max_new_tokens=30)
A simple test to check if 4-bit RWKV inference works as expected.
test_rwkv_4bit
python
huggingface/transformers
tests/quantization/bnb/test_4bit.py
https://github.com/huggingface/transformers/blob/master/tests/quantization/bnb/test_4bit.py
Apache-2.0
def test_generate_quality(self): r""" Test the generation quality of the quantized model and see that we are matching the expected output. Given that we are operating on small numbers + the testing model is relatively small, we might not get the same output across GPUs. So we'll generate few tokens (5-10) and check their output. """ encoded_input = self.tokenizer(self.input_text, return_tensors="pt") output_sequences = self.model_4bit.generate( input_ids=encoded_input["input_ids"].to(self.model_4bit.device), max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS)
Test the generation quality of the quantized model and see that we are matching the expected output. Given that we are operating on small numbers + the testing model is relatively small, we might not get the same output across GPUs. So we'll generate few tokens (5-10) and check their output.
test_generate_quality
python
huggingface/transformers
tests/quantization/bnb/test_4bit.py
https://github.com/huggingface/transformers/blob/master/tests/quantization/bnb/test_4bit.py
Apache-2.0
def test_generate_quality_config(self): r""" Test that loading the model with the config is equivalent """ bnb_config = BitsAndBytesConfig() bnb_config.load_in_4bit = True model_4bit_from_config = AutoModelForCausalLM.from_pretrained( self.model_name, quantization_config=bnb_config, device_map="auto" ) encoded_input = self.tokenizer(self.input_text, return_tensors="pt") output_sequences = model_4bit_from_config.generate( input_ids=encoded_input["input_ids"].to(model_4bit_from_config.device), max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS)
Test that loading the model with the config is equivalent
test_generate_quality_config
python
huggingface/transformers
tests/quantization/bnb/test_4bit.py
https://github.com/huggingface/transformers/blob/master/tests/quantization/bnb/test_4bit.py
Apache-2.0
def test_generate_quality_dequantize(self): r""" Test that loading the model and unquantize it produce correct results """ bnb_config = BitsAndBytesConfig(load_in_4bit=True) model_4bit = AutoModelForCausalLM.from_pretrained( self.model_name, quantization_config=bnb_config, device_map="auto" ) model_4bit.dequantize() encoded_input = self.tokenizer(self.input_text, return_tensors="pt") output_sequences = model_4bit.generate( input_ids=encoded_input["input_ids"].to(model_4bit.device), max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS)
Test that loading the model and unquantize it produce correct results
test_generate_quality_dequantize
python
huggingface/transformers
tests/quantization/bnb/test_4bit.py
https://github.com/huggingface/transformers/blob/master/tests/quantization/bnb/test_4bit.py
Apache-2.0
def test_device_and_dtype_assignment(self): r""" Test whether attempting to change the device or cast the dtype of a model after converting it to 4-bit precision will raise an appropriate error. The test ensures that such operations are prohibited on 4-bit models to prevent invalid conversions. """ # Moving with `to` or `cuda` is not supported with versions < 0.43.2. if version.parse(importlib.metadata.version("bitsandbytes")) < version.parse("0.43.2"): with self.assertRaises(ValueError): # Tries with `str` self.model_4bit.to("cpu") with self.assertRaises(ValueError): # Tries with a `device` self.model_4bit.to(torch.device("cuda:0")) with self.assertRaises(ValueError): # Tries with `cuda` self.model_4bit.cuda() with self.assertRaises(ValueError): # Tries with a `dtype` self.model_4bit.to(torch.float16) with self.assertRaises(ValueError): # Tries to cast the 4-bit model to float32 using `float()` self.model_4bit.float() with self.assertRaises(ValueError): # Tries to cast the 4-bit model to float16 using `half()` self.model_4bit.half() # Test if we did not break anything self.model_4bit.to(torch.device(torch_device)) encoded_input = self.tokenizer(self.input_text, return_tensors="pt") self.model_fp16 = self.model_fp16.to(torch.float32) _ = self.model_fp16.generate( input_ids=encoded_input["input_ids"].to(self.model_fp16.device), max_new_tokens=10 ) if torch_device in ["cuda", "xpu"]: # Check that this does not throw an error _ = self.model_fp16.to(torch_device) # Check this does not throw an error _ = self.model_fp16.to("cpu") # Check this does not throw an error _ = self.model_fp16.half() # Check this does not throw an error _ = self.model_fp16.float()
Test whether attempting to change the device or cast the dtype of a model after converting it to 4-bit precision will raise an appropriate error. The test ensures that such operations are prohibited on 4-bit models to prevent invalid conversions.
test_device_and_dtype_assignment
python
huggingface/transformers
tests/quantization/bnb/test_4bit.py
https://github.com/huggingface/transformers/blob/master/tests/quantization/bnb/test_4bit.py
Apache-2.0
def test_fp32_4bit_conversion(self): r""" Test whether it is possible to mix both `4bit` and `fp32` weights when using `keep_in_fp32_modules` correctly. """ model = AutoModelForSeq2SeqLM.from_pretrained("google-t5/t5-small", load_in_4bit=True, device_map="auto") self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.float32)
Test whether it is possible to mix both `4bit` and `fp32` weights when using `keep_in_fp32_modules` correctly.
test_fp32_4bit_conversion
python
huggingface/transformers
tests/quantization/bnb/test_4bit.py
https://github.com/huggingface/transformers/blob/master/tests/quantization/bnb/test_4bit.py
Apache-2.0
def test_bnb_4bit_wrong_config(self): r""" Test whether creating a bnb config with unsupported values leads to errors. """ with self.assertRaises(ValueError): _ = BitsAndBytesConfig(load_in_4bit=True, bnb_4bit_quant_storage="add")
Test whether creating a bnb config with unsupported values leads to errors.
test_bnb_4bit_wrong_config
python
huggingface/transformers
tests/quantization/bnb/test_4bit.py
https://github.com/huggingface/transformers/blob/master/tests/quantization/bnb/test_4bit.py
Apache-2.0
def test_inference_without_keep_in_fp32(self): r""" Test whether it is possible to mix both `4bit` and `fp32` weights when using `keep_in_fp32_modules` correctly. `flan-t5-small` uses `T5DenseGatedActDense` whereas `google-t5/t5-small` uses `T5DenseReluDense`. We need to test both cases. """ from transformers import T5ForConditionalGeneration modules = T5ForConditionalGeneration._keep_in_fp32_modules T5ForConditionalGeneration._keep_in_fp32_modules = None # test with `google-t5/t5-small` model = T5ForConditionalGeneration.from_pretrained(self.model_name, load_in_4bit=True, device_map="auto") encoded_input = self.tokenizer(self.input_text, return_tensors="pt").to(model.device) _ = model.generate(**encoded_input) # test with `flan-t5-small` model = T5ForConditionalGeneration.from_pretrained( self.dense_act_model_name, load_in_4bit=True, device_map="auto" ) encoded_input = self.tokenizer(self.input_text, return_tensors="pt").to(model.device) _ = model.generate(**encoded_input) T5ForConditionalGeneration._keep_in_fp32_modules = modules
Test whether it is possible to mix both `4bit` and `fp32` weights when using `keep_in_fp32_modules` correctly. `flan-t5-small` uses `T5DenseGatedActDense` whereas `google-t5/t5-small` uses `T5DenseReluDense`. We need to test both cases.
test_inference_without_keep_in_fp32
python
huggingface/transformers
tests/quantization/bnb/test_4bit.py
https://github.com/huggingface/transformers/blob/master/tests/quantization/bnb/test_4bit.py
Apache-2.0
def test_inference_with_keep_in_fp32(self): r""" Test whether it is possible to mix both `4bit` and `fp32` weights when using `keep_in_fp32_modules` correctly. `flan-t5-small` uses `T5DenseGatedActDense` whereas `google-t5/t5-small` uses `T5DenseReluDense`. We need to test both cases. """ from transformers import T5ForConditionalGeneration # test with `google-t5/t5-small` model = T5ForConditionalGeneration.from_pretrained(self.model_name, load_in_4bit=True, device_map="auto") # there was a bug with decoders - this test checks that it is fixed self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q, bnb.nn.Linear4bit)) encoded_input = self.tokenizer(self.input_text, return_tensors="pt").to(model.device) _ = model.generate(**encoded_input) # test with `flan-t5-small` model = T5ForConditionalGeneration.from_pretrained( self.dense_act_model_name, load_in_4bit=True, device_map="auto" ) encoded_input = self.tokenizer(self.input_text, return_tensors="pt").to(model.device) _ = model.generate(**encoded_input)
Test whether it is possible to mix both `4bit` and `fp32` weights when using `keep_in_fp32_modules` correctly. `flan-t5-small` uses `T5DenseGatedActDense` whereas `google-t5/t5-small` uses `T5DenseReluDense`. We need to test both cases.
test_inference_with_keep_in_fp32
python
huggingface/transformers
tests/quantization/bnb/test_4bit.py
https://github.com/huggingface/transformers/blob/master/tests/quantization/bnb/test_4bit.py
Apache-2.0
def tearDown(self): r""" TearDown function needs to be called at the end of each test to free the GPU memory and cache, also to avoid unexpected behaviors. Please see: https://discuss.pytorch.org/t/how-can-we-release-gpu-memory-cache/14530/27 """ del self.base_model del self.sequence_model del self.model_4bit del self.seq_to_seq_model gc.collect() backend_empty_cache(torch_device)
TearDown function needs to be called at the end of each test to free the GPU memory and cache, also to avoid unexpected behaviors. Please see: https://discuss.pytorch.org/t/how-can-we-release-gpu-memory-cache/14530/27
tearDown
python
huggingface/transformers
tests/quantization/bnb/test_4bit.py
https://github.com/huggingface/transformers/blob/master/tests/quantization/bnb/test_4bit.py
Apache-2.0
def test_correct_head_class(self): r""" A simple test to check if the last modules for some classes (AutoModelForCausalLM or SequenceClassification) are kept in their native class. """ from bitsandbytes.nn import Params4bit self.assertTrue(self.base_model.h[-1].mlp.dense_4h_to_h.weight.__class__ == Params4bit) # Other heads should be nn.Parameter self.assertTrue(self.model_4bit.lm_head.weight.__class__ == torch.nn.Parameter) self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter) self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter)
A simple test to check if the last modules for some classes (AutoModelForCausalLM or SequenceClassification) are kept in their native class.
test_correct_head_class
python
huggingface/transformers
tests/quantization/bnb/test_4bit.py
https://github.com/huggingface/transformers/blob/master/tests/quantization/bnb/test_4bit.py
Apache-2.0
def tearDown(self): r""" TearDown function needs to be called at the end of each test to free the GPU memory and cache, also to avoid unexpected behaviors. Please see: https://discuss.pytorch.org/t/how-can-we-release-gpu-memory-cache/14530/27 """ if hasattr(self, "pipe"): del self.pipe gc.collect() backend_empty_cache(torch_device)
TearDown function needs to be called at the end of each test to free the GPU memory and cache, also to avoid unexpected behaviors. Please see: https://discuss.pytorch.org/t/how-can-we-release-gpu-memory-cache/14530/27
tearDown
python
huggingface/transformers
tests/quantization/bnb/test_4bit.py
https://github.com/huggingface/transformers/blob/master/tests/quantization/bnb/test_4bit.py
Apache-2.0
def test_pipeline(self): r""" The aim of this test is to verify that the mixed 4bit is compatible with `pipeline` from transformers. Since we used pipeline for inference speed benchmarking we want to make sure that this feature does not break anything on pipeline. """ # self._clear_cuda_cache() self.pipe = pipeline( "text-generation", model=self.model_name, model_kwargs={ "device_map": "auto", "load_in_4bit": True, # float16 isn't supported on CPU, use bfloat16 instead "torch_dtype": torch.bfloat16 if torch_device == "cpu" else torch.float16, }, max_new_tokens=self.MAX_NEW_TOKENS, ) # Real second forward pass pipeline_output = self.pipe(self.input_text) self.assertIn(pipeline_output[0]["generated_text"], self.EXPECTED_OUTPUTS)
The aim of this test is to verify that the mixed 4bit is compatible with `pipeline` from transformers. Since we used pipeline for inference speed benchmarking we want to make sure that this feature does not break anything on pipeline.
test_pipeline
python
huggingface/transformers
tests/quantization/bnb/test_4bit.py
https://github.com/huggingface/transformers/blob/master/tests/quantization/bnb/test_4bit.py
Apache-2.0
def test_multi_gpu_loading(self): r""" This tests that the model has been loaded and can be used correctly on a multi-GPU setup. Let's just try to load a model on 2 GPUs and see if it works. The model we test has ~2GB of total, 3GB should suffice """ device_map = { "transformer.word_embeddings": 0, "transformer.word_embeddings_layernorm": 0, "lm_head": 0, "transformer.h.0": 0, "transformer.h.1": 0, "transformer.h.2": 0, "transformer.h.3": 0, "transformer.h.4": 0, "transformer.h.5": 0, "transformer.h.6": 0, "transformer.h.7": 0, "transformer.h.8": 0, "transformer.h.9": 0, "transformer.h.10": 1, "transformer.h.11": 1, "transformer.h.12": 1, "transformer.h.13": 1, "transformer.h.14": 1, "transformer.h.15": 1, "transformer.h.16": 1, "transformer.h.17": 0, "transformer.h.18": 0, "transformer.h.19": 0, "transformer.h.20": 0, "transformer.h.21": 0, "transformer.h.22": 0, "transformer.h.23": 1, "transformer.ln_f": 0, } model_parallel = AutoModelForCausalLM.from_pretrained( self.model_name, load_in_4bit=True, device_map=device_map ) # Check correct device map self.assertEqual(set(model_parallel.hf_device_map.values()), {0, 1}) # Check that inference pass works on the model encoded_input = self.tokenizer(self.input_text, return_tensors="pt") # Second real batch output_parallel = model_parallel.generate( input_ids=encoded_input["input_ids"].to(torch_device), max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_parallel[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS)
This tests that the model has been loaded and can be used correctly on a multi-GPU setup. Let's just try to load a model on 2 GPUs and see if it works. The model we test has ~2GB of total, 3GB should suffice
test_multi_gpu_loading
python
huggingface/transformers
tests/quantization/bnb/test_4bit.py
https://github.com/huggingface/transformers/blob/master/tests/quantization/bnb/test_4bit.py
Apache-2.0
def test_serialization(self, quant_type="nf4", double_quant=True, safe_serialization=True): r""" Test whether it is possible to serialize a model in 4-bit. Uses most typical params as default. See ExtendedSerializationTest class for more params combinations. """ tokenizer = AutoTokenizer.from_pretrained(self.model_name) self.quantization_config = BitsAndBytesConfig( load_in_4bit=True, bnb_4bit_quant_type=quant_type, bnb_4bit_use_double_quant=double_quant, bnb_4bit_compute_dtype=torch.bfloat16, ) model_0 = AutoModelForCausalLM.from_pretrained( self.model_name, quantization_config=self.quantization_config, device_map=torch_device, ) with tempfile.TemporaryDirectory() as tmpdirname: model_0.save_pretrained(tmpdirname, safe_serialization=safe_serialization) config = AutoConfig.from_pretrained(tmpdirname) self.assertTrue(hasattr(config, "quantization_config")) model_1 = AutoModelForCausalLM.from_pretrained(tmpdirname, device_map=torch_device) # checking quantized linear module weight linear = get_some_linear_layer(model_1) self.assertTrue(linear.weight.__class__ == bnb.nn.Params4bit) self.assertTrue(hasattr(linear.weight, "quant_state")) self.assertTrue(linear.weight.quant_state.__class__ == bnb.functional.QuantState) # checking memory footpring self.assertAlmostEqual(model_0.get_memory_footprint() / model_1.get_memory_footprint(), 1, places=2) # Matching all parameters and their quant_state items: d0 = dict(model_0.named_parameters()) d1 = dict(model_1.named_parameters()) self.assertTrue(d0.keys() == d1.keys()) for k in d0.keys(): self.assertTrue(d0[k].shape == d1[k].shape) self.assertTrue(d0[k].device.type == d1[k].device.type) self.assertTrue(d0[k].device == d1[k].device) self.assertTrue(d0[k].dtype == d1[k].dtype) self.assertTrue(torch.equal(d0[k], d1[k].to(d0[k].device))) if isinstance(d0[k], bnb.nn.modules.Params4bit): for v0, v1 in zip( d0[k].quant_state.as_dict().values(), d1[k].quant_state.as_dict().values(), ): if isinstance(v0, torch.Tensor): # The absmax will not be saved in the quant_state when using NF4 in CPU if v0.numel() != 0: self.assertTrue(torch.equal(v0, v1.to(v0.device))) else: self.assertTrue(v0 == v1) # comparing forward() outputs encoded_input = tokenizer(self.input_text, return_tensors="pt").to(torch_device) out_0 = model_0(**encoded_input) out_1 = model_1(**encoded_input) torch.testing.assert_close(out_0["logits"], out_1["logits"], rtol=0.05, atol=0.05) # comparing generate() outputs encoded_input = tokenizer(self.input_text, return_tensors="pt").to(torch_device) output_sequences_0 = model_0.generate(**encoded_input, max_new_tokens=10) output_sequences_1 = model_1.generate(**encoded_input, max_new_tokens=10) def _decode(token): return tokenizer.decode(token, skip_special_tokens=True) self.assertEqual( [_decode(x) for x in output_sequences_0], [_decode(x) for x in output_sequences_1], )
Test whether it is possible to serialize a model in 4-bit. Uses most typical params as default. See ExtendedSerializationTest class for more params combinations.
test_serialization
python
huggingface/transformers
tests/quantization/bnb/test_4bit.py
https://github.com/huggingface/transformers/blob/master/tests/quantization/bnb/test_4bit.py
Apache-2.0
def tearDown(self): r""" TearDown function needs to be called at the end of each test to free the GPU memory and cache, also to avoid unexpected behaviors. Please see: https://discuss.pytorch.org/t/how-can-we-release-gpu-memory-cache/14530/27 """ del self.model_fp16 del self.model_8bit gc.collect() backend_empty_cache(torch_device)
TearDown function needs to be called at the end of each test to free the GPU memory and cache, also to avoid unexpected behaviors. Please see: https://discuss.pytorch.org/t/how-can-we-release-gpu-memory-cache/14530/27
tearDown
python
huggingface/transformers
tests/quantization/bnb/test_mixed_int8.py
https://github.com/huggingface/transformers/blob/master/tests/quantization/bnb/test_mixed_int8.py
Apache-2.0
def test_quantization_config_json_serialization(self): r""" A simple test to check if the quantization config is correctly serialized and deserialized """ config = self.model_8bit.config self.assertTrue(hasattr(config, "quantization_config")) _ = config.to_dict() _ = config.to_diff_dict() _ = config.to_json_string()
A simple test to check if the quantization config is correctly serialized and deserialized
test_quantization_config_json_serialization
python
huggingface/transformers
tests/quantization/bnb/test_mixed_int8.py
https://github.com/huggingface/transformers/blob/master/tests/quantization/bnb/test_mixed_int8.py
Apache-2.0
def test_original_dtype(self): r""" A simple test to check if the model successfully stores the original dtype """ self.assertTrue(hasattr(self.model_8bit.config, "_pre_quantization_dtype")) self.assertFalse(hasattr(self.model_fp16.config, "_pre_quantization_dtype")) self.assertTrue(self.model_8bit.config._pre_quantization_dtype == torch.float16)
A simple test to check if the model successfully stores the original dtype
test_original_dtype
python
huggingface/transformers
tests/quantization/bnb/test_mixed_int8.py
https://github.com/huggingface/transformers/blob/master/tests/quantization/bnb/test_mixed_int8.py
Apache-2.0
def test_memory_footprint(self): r""" A simple test to check if the model conversion has been done correctly by checking on the memory footprint of the converted model and the class type of the linear layers of the converted models """ from bitsandbytes.nn import Int8Params mem_fp16 = self.model_fp16.get_memory_footprint() mem_8bit = self.model_8bit.get_memory_footprint() self.assertAlmostEqual(mem_fp16 / mem_8bit, self.EXPECTED_RELATIVE_DIFFERENCE, delta=1e-5) self.assertTrue(get_some_linear_layer(self.model_8bit).weight.__class__ == Int8Params)
A simple test to check if the model conversion has been done correctly by checking on the memory footprint of the converted model and the class type of the linear layers of the converted models
test_memory_footprint
python
huggingface/transformers
tests/quantization/bnb/test_mixed_int8.py
https://github.com/huggingface/transformers/blob/master/tests/quantization/bnb/test_mixed_int8.py
Apache-2.0
def test_linear_are_8bit(self): r""" A simple test to check if the model conversion has been done correctly by checking on the memory footprint of the converted model and the class type of the linear layers of the converted models """ from transformers import T5PreTrainedModel self.model_fp16.get_memory_footprint() self.model_8bit.get_memory_footprint() for name, module in self.model_8bit.named_modules(): if isinstance(module, torch.nn.Linear): if name not in ["lm_head"] + T5PreTrainedModel._keep_in_fp32_modules: self.assertTrue(module.weight.dtype == torch.int8)
A simple test to check if the model conversion has been done correctly by checking on the memory footprint of the converted model and the class type of the linear layers of the converted models
test_linear_are_8bit
python
huggingface/transformers
tests/quantization/bnb/test_mixed_int8.py
https://github.com/huggingface/transformers/blob/master/tests/quantization/bnb/test_mixed_int8.py
Apache-2.0
def test_llm_skip(self): r""" A simple test to check if `llm_int8_skip_modules` works as expected """ quantization_config = BitsAndBytesConfig(load_in_8bit=True, llm_int8_skip_modules=["classifier"]) seq_classification_model = AutoModelForSequenceClassification.from_pretrained( "FacebookAI/roberta-large-mnli", quantization_config=quantization_config ) self.assertTrue(seq_classification_model.roberta.encoder.layer[0].output.dense.weight.dtype == torch.int8) self.assertTrue( isinstance(seq_classification_model.roberta.encoder.layer[0].output.dense, bnb.nn.Linear8bitLt) ) self.assertTrue(isinstance(seq_classification_model.classifier.dense, nn.Linear)) self.assertTrue(seq_classification_model.classifier.dense.weight.dtype != torch.int8) self.assertTrue(isinstance(seq_classification_model.classifier.out_proj, nn.Linear)) self.assertTrue(seq_classification_model.classifier.out_proj != torch.int8)
A simple test to check if `llm_int8_skip_modules` works as expected
test_llm_skip
python
huggingface/transformers
tests/quantization/bnb/test_mixed_int8.py
https://github.com/huggingface/transformers/blob/master/tests/quantization/bnb/test_mixed_int8.py
Apache-2.0
def test_generate_quality(self): r""" Test the generation quality of the quantized model and see that we are matching the expected output. Given that we are operating on small numbers + the testing model is relatively small, we might not get the same output across GPUs. So we'll generate few tokens (5-10) and check their output. """ encoded_input = self.tokenizer(self.input_text, return_tensors="pt") output_sequences = self.model_8bit.generate( input_ids=encoded_input["input_ids"].to(self.model_8bit.device), max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS)
Test the generation quality of the quantized model and see that we are matching the expected output. Given that we are operating on small numbers + the testing model is relatively small, we might not get the same output across GPUs. So we'll generate few tokens (5-10) and check their output.
test_generate_quality
python
huggingface/transformers
tests/quantization/bnb/test_mixed_int8.py
https://github.com/huggingface/transformers/blob/master/tests/quantization/bnb/test_mixed_int8.py
Apache-2.0
def test_generate_quality_config(self): r""" Test that loading the model with the config is equivalent """ bnb_config = BitsAndBytesConfig() bnb_config.load_in_8bit = True model_8bit_from_config = AutoModelForCausalLM.from_pretrained( self.model_name, quantization_config=bnb_config, device_map="auto" ) encoded_input = self.tokenizer(self.input_text, return_tensors="pt") output_sequences = model_8bit_from_config.generate( input_ids=encoded_input["input_ids"].to(model_8bit_from_config.device), max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS)
Test that loading the model with the config is equivalent
test_generate_quality_config
python
huggingface/transformers
tests/quantization/bnb/test_mixed_int8.py
https://github.com/huggingface/transformers/blob/master/tests/quantization/bnb/test_mixed_int8.py
Apache-2.0
def test_generate_quality_dequantize(self): r""" Test that loading the model and dequantizing it produce correct results """ bnb_config = BitsAndBytesConfig(load_in_8bit=True) model_8bit = AutoModelForCausalLM.from_pretrained( self.model_name, quantization_config=bnb_config, device_map="auto" ) model_8bit.dequantize() encoded_input = self.tokenizer(self.input_text, return_tensors="pt") output_sequences = model_8bit.generate( input_ids=encoded_input["input_ids"].to(model_8bit.device), max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS)
Test that loading the model and dequantizing it produce correct results
test_generate_quality_dequantize
python
huggingface/transformers
tests/quantization/bnb/test_mixed_int8.py
https://github.com/huggingface/transformers/blob/master/tests/quantization/bnb/test_mixed_int8.py
Apache-2.0
def test_raise_if_config_and_load_in_8bit(self): r""" Test that loading the model with the config and `load_in_8bit` raises an error """ bnb_config = BitsAndBytesConfig() with self.assertRaises(ValueError): _ = AutoModelForCausalLM.from_pretrained( self.model_name, quantization_config=bnb_config, load_in_8bit=True, device_map="auto", llm_int8_enable_fp32_cpu_offload=True, )
Test that loading the model with the config and `load_in_8bit` raises an error
test_raise_if_config_and_load_in_8bit
python
huggingface/transformers
tests/quantization/bnb/test_mixed_int8.py
https://github.com/huggingface/transformers/blob/master/tests/quantization/bnb/test_mixed_int8.py
Apache-2.0
def test_device_and_dtype_assignment(self): r""" Test whether attempting to change the device or cast the dtype of a model after converting it to 8-bit precision will raise an appropriate error. The test ensures that such operations are prohibited on 8-bit models to prevent invalid conversions. """ with self.assertRaises(ValueError): # Tries with `str` self.model_8bit.to("cpu") with self.assertRaises(ValueError): # Tries with a `dtype`` self.model_8bit.to(torch.float16) with self.assertRaises(ValueError): # Tries with a `device` self.model_8bit.to(torch.device(torch_device)) with self.assertRaises(ValueError): # Tries to cast the 8-bit model to float32 using `float()` self.model_8bit.float() with self.assertRaises(ValueError): # Tries to cast the 4-bit model to float16 using `half()` self.model_8bit.half() # Test if we did not break anything encoded_input = self.tokenizer(self.input_text, return_tensors="pt") self.model_fp16 = self.model_fp16.to(torch.float32) _ = self.model_fp16.generate( input_ids=encoded_input["input_ids"].to(self.model_fp16.device), max_new_tokens=10 ) # Check this does not throw an error _ = self.model_fp16.to("cpu") # Check this does not throw an error _ = self.model_fp16.half() # Check this does not throw an error _ = self.model_fp16.float()
Test whether attempting to change the device or cast the dtype of a model after converting it to 8-bit precision will raise an appropriate error. The test ensures that such operations are prohibited on 8-bit models to prevent invalid conversions.
test_device_and_dtype_assignment
python
huggingface/transformers
tests/quantization/bnb/test_mixed_int8.py
https://github.com/huggingface/transformers/blob/master/tests/quantization/bnb/test_mixed_int8.py
Apache-2.0
def test_fp32_int8_conversion(self): r""" Test whether it is possible to mix both `int8` and `fp32` weights when using `keep_in_fp32_modules` correctly. """ model = AutoModelForSeq2SeqLM.from_pretrained("google-t5/t5-small", load_in_8bit=True, device_map="auto") self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.float32)
Test whether it is possible to mix both `int8` and `fp32` weights when using `keep_in_fp32_modules` correctly.
test_fp32_int8_conversion
python
huggingface/transformers
tests/quantization/bnb/test_mixed_int8.py
https://github.com/huggingface/transformers/blob/master/tests/quantization/bnb/test_mixed_int8.py
Apache-2.0
def test_int8_serialization(self): r""" Test whether it is possible to serialize a model in 8-bit. """ from bitsandbytes.nn import Int8Params with tempfile.TemporaryDirectory() as tmpdirname: self.model_8bit.save_pretrained(tmpdirname) # check that the file `quantization_config` is present config = AutoConfig.from_pretrained(tmpdirname) self.assertTrue(hasattr(config, "quantization_config")) model_from_saved = AutoModelForCausalLM.from_pretrained(tmpdirname, load_in_8bit=True, device_map="auto") linear = get_some_linear_layer(model_from_saved) self.assertTrue(linear.weight.__class__ == Int8Params) self.assertTrue(hasattr(linear.weight, "SCB")) # generate encoded_input = self.tokenizer(self.input_text, return_tensors="pt") output_sequences = model_from_saved.generate( input_ids=encoded_input["input_ids"].to(model_from_saved.device), max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS)
Test whether it is possible to serialize a model in 8-bit.
test_int8_serialization
python
huggingface/transformers
tests/quantization/bnb/test_mixed_int8.py
https://github.com/huggingface/transformers/blob/master/tests/quantization/bnb/test_mixed_int8.py
Apache-2.0
def test_int8_serialization_regression(self): r""" Test whether it is possible to serialize a model in 8-bit - using not safetensors """ from bitsandbytes.nn import Int8Params with tempfile.TemporaryDirectory() as tmpdirname: self.model_8bit.save_pretrained(tmpdirname, safe_serialization=False) # check that the file `quantization_config` is present config = AutoConfig.from_pretrained(tmpdirname) self.assertTrue(hasattr(config, "quantization_config")) model_from_saved = AutoModelForCausalLM.from_pretrained(tmpdirname, load_in_8bit=True, device_map="auto") linear = get_some_linear_layer(model_from_saved) self.assertTrue(linear.weight.__class__ == Int8Params) self.assertTrue(hasattr(linear.weight, "SCB")) # generate encoded_input = self.tokenizer(self.input_text, return_tensors="pt") output_sequences = model_from_saved.generate( input_ids=encoded_input["input_ids"].to(model_from_saved.device), max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS)
Test whether it is possible to serialize a model in 8-bit - using not safetensors
test_int8_serialization_regression
python
huggingface/transformers
tests/quantization/bnb/test_mixed_int8.py
https://github.com/huggingface/transformers/blob/master/tests/quantization/bnb/test_mixed_int8.py
Apache-2.0
def test_int8_serialization_sharded(self): r""" Test whether it is possible to serialize a model in 8-bit - sharded version. """ from bitsandbytes.nn import Int8Params with tempfile.TemporaryDirectory() as tmpdirname: self.model_8bit.save_pretrained(tmpdirname, max_shard_size="200MB") # check that the file `quantization_config` is present config = AutoConfig.from_pretrained(tmpdirname) self.assertTrue(hasattr(config, "quantization_config")) model_from_saved = AutoModelForCausalLM.from_pretrained(tmpdirname) linear = get_some_linear_layer(model_from_saved) self.assertTrue(linear.weight.__class__ == Int8Params) self.assertTrue(hasattr(linear.weight, "SCB")) # generate encoded_input = self.tokenizer(self.input_text, return_tensors="pt") output_sequences = model_from_saved.generate( input_ids=encoded_input["input_ids"].to(torch_device), max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS)
Test whether it is possible to serialize a model in 8-bit - sharded version.
test_int8_serialization_sharded
python
huggingface/transformers
tests/quantization/bnb/test_mixed_int8.py
https://github.com/huggingface/transformers/blob/master/tests/quantization/bnb/test_mixed_int8.py
Apache-2.0
def test_int8_from_pretrained(self): r""" Test whether loading a 8bit model from the Hub works as expected """ from bitsandbytes.nn import Int8Params model_id = "ybelkada/bloom-1b7-8bit" model = AutoModelForCausalLM.from_pretrained(model_id) linear = get_some_linear_layer(model) self.assertTrue(linear.weight.__class__ == Int8Params) self.assertTrue(hasattr(linear.weight, "SCB")) # generate encoded_input = self.tokenizer(self.input_text, return_tensors="pt") output_sequences = model.generate(input_ids=encoded_input["input_ids"].to(torch_device), max_new_tokens=10) self.assertIn(self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS)
Test whether loading a 8bit model from the Hub works as expected
test_int8_from_pretrained
python
huggingface/transformers
tests/quantization/bnb/test_mixed_int8.py
https://github.com/huggingface/transformers/blob/master/tests/quantization/bnb/test_mixed_int8.py
Apache-2.0
def test_inference_without_keep_in_fp32(self): r""" Test whether it is possible to mix both `int8` and `fp32` weights when using `keep_in_fp32_modules` correctly. `flan-t5-small` uses `T5DenseGatedActDense` whereas `google-t5/t5-small` uses `T5DenseReluDense`. We need to test both cases. """ from transformers import T5ForConditionalGeneration modules = T5ForConditionalGeneration._keep_in_fp32_modules T5ForConditionalGeneration._keep_in_fp32_modules = None # test with `google-t5/t5-small` model = T5ForConditionalGeneration.from_pretrained(self.model_name, load_in_8bit=True, device_map="auto") encoded_input = self.tokenizer(self.input_text, return_tensors="pt").to(model.device) _ = model.generate(**encoded_input) # test with `flan-t5-small` model = T5ForConditionalGeneration.from_pretrained( self.dense_act_model_name, load_in_8bit=True, device_map="auto" ) encoded_input = self.tokenizer(self.input_text, return_tensors="pt").to(model.device) _ = model.generate(**encoded_input) T5ForConditionalGeneration._keep_in_fp32_modules = modules
Test whether it is possible to mix both `int8` and `fp32` weights when using `keep_in_fp32_modules` correctly. `flan-t5-small` uses `T5DenseGatedActDense` whereas `google-t5/t5-small` uses `T5DenseReluDense`. We need to test both cases.
test_inference_without_keep_in_fp32
python
huggingface/transformers
tests/quantization/bnb/test_mixed_int8.py
https://github.com/huggingface/transformers/blob/master/tests/quantization/bnb/test_mixed_int8.py
Apache-2.0
def test_inference_with_keep_in_fp32(self): r""" Test whether it is possible to mix both `int8` and `fp32` weights when using `keep_in_fp32_modules` correctly. `flan-t5-small` uses `T5DenseGatedActDense` whereas `google-t5/t5-small` uses `T5DenseReluDense`. We need to test both cases. """ from transformers import T5ForConditionalGeneration # test with `google-t5/t5-small` model = T5ForConditionalGeneration.from_pretrained(self.model_name, load_in_8bit=True, device_map="auto") # there was a bug with decoders - this test checks that it is fixed self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q, bnb.nn.Linear8bitLt)) encoded_input = self.tokenizer(self.input_text, return_tensors="pt").to(model.device) _ = model.generate(**encoded_input) # test with `flan-t5-small` model = T5ForConditionalGeneration.from_pretrained( self.dense_act_model_name, load_in_8bit=True, device_map="auto" ) encoded_input = self.tokenizer(self.input_text, return_tensors="pt").to(model.device) _ = model.generate(**encoded_input)
Test whether it is possible to mix both `int8` and `fp32` weights when using `keep_in_fp32_modules` correctly. `flan-t5-small` uses `T5DenseGatedActDense` whereas `google-t5/t5-small` uses `T5DenseReluDense`. We need to test both cases.
test_inference_with_keep_in_fp32
python
huggingface/transformers
tests/quantization/bnb/test_mixed_int8.py
https://github.com/huggingface/transformers/blob/master/tests/quantization/bnb/test_mixed_int8.py
Apache-2.0
def test_inference_with_keep_in_fp32_serialized(self): r""" Test whether it is possible to mix both `int8` and `fp32` weights when using `keep_in_fp32_modules` correctly on a serialized model. `flan-t5-small` uses `T5DenseGatedActDense` whereas `google-t5/t5-small` uses `T5DenseReluDense`. We need to test both cases. """ from transformers import T5ForConditionalGeneration # test with `google-t5/t5-small` model = T5ForConditionalGeneration.from_pretrained(self.model_name, load_in_8bit=True, device_map="auto") with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir) model = T5ForConditionalGeneration.from_pretrained(tmp_dir) # there was a bug with decoders - this test checks that it is fixed self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q, bnb.nn.Linear8bitLt)) encoded_input = self.tokenizer(self.input_text, return_tensors="pt").to(model.device) _ = model.generate(**encoded_input) # test with `flan-t5-small` model = T5ForConditionalGeneration.from_pretrained( self.dense_act_model_name, load_in_8bit=True, device_map="auto" ) encoded_input = self.tokenizer(self.input_text, return_tensors="pt").to(model.device) _ = model.generate(**encoded_input)
Test whether it is possible to mix both `int8` and `fp32` weights when using `keep_in_fp32_modules` correctly on a serialized model. `flan-t5-small` uses `T5DenseGatedActDense` whereas `google-t5/t5-small` uses `T5DenseReluDense`. We need to test both cases.
test_inference_with_keep_in_fp32_serialized
python
huggingface/transformers
tests/quantization/bnb/test_mixed_int8.py
https://github.com/huggingface/transformers/blob/master/tests/quantization/bnb/test_mixed_int8.py
Apache-2.0
def tearDown(self): r""" TearDown function needs to be called at the end of each test to free the GPU memory and cache, also to avoid unexpected behaviors. Please see: https://discuss.pytorch.org/t/how-can-we-release-gpu-memory-cache/14530/27 """ del self.base_model del self.sequence_model del self.model_8bit del self.seq_to_seq_model gc.collect() backend_empty_cache(torch_device)
TearDown function needs to be called at the end of each test to free the GPU memory and cache, also to avoid unexpected behaviors. Please see: https://discuss.pytorch.org/t/how-can-we-release-gpu-memory-cache/14530/27
tearDown
python
huggingface/transformers
tests/quantization/bnb/test_mixed_int8.py
https://github.com/huggingface/transformers/blob/master/tests/quantization/bnb/test_mixed_int8.py
Apache-2.0
def test_correct_head_class(self): r""" A simple test to check if the last modules for some classes (AutoModelForCausalLM or SequenceClassification) are kept in their native class. """ from bitsandbytes.nn import Int8Params # last param of a base model should be a linear8bit module self.assertTrue(self.base_model.h[-1].mlp.dense_4h_to_h.weight.__class__ == Int8Params) # Other heads should be nn.Parameter self.assertTrue(self.model_8bit.lm_head.weight.__class__ == torch.nn.Parameter) self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter) self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter)
A simple test to check if the last modules for some classes (AutoModelForCausalLM or SequenceClassification) are kept in their native class.
test_correct_head_class
python
huggingface/transformers
tests/quantization/bnb/test_mixed_int8.py
https://github.com/huggingface/transformers/blob/master/tests/quantization/bnb/test_mixed_int8.py
Apache-2.0
def tearDown(self): r""" TearDown function needs to be called at the end of each test to free the GPU memory and cache, also to avoid unexpected behaviors. Please see: https://discuss.pytorch.org/t/how-can-we-release-gpu-memory-cache/14530/27 """ if hasattr(self, "pipe"): del self.pipe gc.collect() backend_empty_cache(torch_device)
TearDown function needs to be called at the end of each test to free the GPU memory and cache, also to avoid unexpected behaviors. Please see: https://discuss.pytorch.org/t/how-can-we-release-gpu-memory-cache/14530/27
tearDown
python
huggingface/transformers
tests/quantization/bnb/test_mixed_int8.py
https://github.com/huggingface/transformers/blob/master/tests/quantization/bnb/test_mixed_int8.py
Apache-2.0
def test_pipeline(self): r""" The aim of this test is to verify that the mixed int8 is compatible with `pipeline` from transformers. Since we used pipeline for inference speed benchmarking we want to make sure that this feature does not break anything on pipeline. """ # self._clear_cuda_cache() self.pipe = pipeline( "text-generation", model=self.model_name, model_kwargs={"device_map": "auto", "load_in_8bit": True}, max_new_tokens=self.MAX_NEW_TOKENS, ) # Real second forward pass pipeline_output = self.pipe(self.input_text) self.assertIn(pipeline_output[0]["generated_text"], self.EXPECTED_OUTPUTS)
The aim of this test is to verify that the mixed int8 is compatible with `pipeline` from transformers. Since we used pipeline for inference speed benchmarking we want to make sure that this feature does not break anything on pipeline.
test_pipeline
python
huggingface/transformers
tests/quantization/bnb/test_mixed_int8.py
https://github.com/huggingface/transformers/blob/master/tests/quantization/bnb/test_mixed_int8.py
Apache-2.0
def test_multi_gpu_loading(self): r""" This tests that the model has been loaded and can be used correctly on a multi-GPU setup. Let's just try to load a model on 2 GPUs and see if it works. The model we test has ~2GB of total, 3GB should suffice """ device_map = { "transformer.word_embeddings": 0, "transformer.word_embeddings_layernorm": 0, "lm_head": 0, "transformer.h.0": 0, "transformer.h.1": 0, "transformer.h.2": 0, "transformer.h.3": 0, "transformer.h.4": 0, "transformer.h.5": 0, "transformer.h.6": 0, "transformer.h.7": 0, "transformer.h.8": 0, "transformer.h.9": 0, "transformer.h.10": 1, "transformer.h.11": 1, "transformer.h.12": 1, "transformer.h.13": 1, "transformer.h.14": 1, "transformer.h.15": 1, "transformer.h.16": 1, "transformer.h.17": 0, "transformer.h.18": 0, "transformer.h.19": 0, "transformer.h.20": 0, "transformer.h.21": 0, "transformer.h.22": 0, "transformer.h.23": 1, "transformer.ln_f": 0, } model_parallel = AutoModelForCausalLM.from_pretrained( self.model_name, load_in_8bit=True, device_map=device_map ) # Check correct device map self.assertEqual(set(model_parallel.hf_device_map.values()), {0, 1}) # Check that inference pass works on the model encoded_input = self.tokenizer(self.input_text, return_tensors="pt") # Second real batch output_parallel = model_parallel.generate( input_ids=encoded_input["input_ids"].to(torch_device), max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_parallel[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS)
This tests that the model has been loaded and can be used correctly on a multi-GPU setup. Let's just try to load a model on 2 GPUs and see if it works. The model we test has ~2GB of total, 3GB should suffice
test_multi_gpu_loading
python
huggingface/transformers
tests/quantization/bnb/test_mixed_int8.py
https://github.com/huggingface/transformers/blob/master/tests/quantization/bnb/test_mixed_int8.py
Apache-2.0
def test_cpu_accelerator_loading_random_device_map(self): r""" A test to check is dispatching a model on cpu & gpu works correctly using a random `device_map`. """ device_map = { "transformer.word_embeddings": 0, "transformer.word_embeddings_layernorm": 0, "lm_head": 0, "transformer.h.0": "cpu", "transformer.h.1": "cpu", "transformer.h.2": 0, "transformer.h.3": 0, "transformer.h.4": 0, "transformer.h.5": 0, "transformer.h.6": 0, "transformer.h.7": 0, "transformer.h.8": 0, "transformer.h.9": 1, "transformer.h.10": 0, "transformer.h.11": 1, "transformer.h.12": 0, "transformer.h.13": 0, "transformer.h.14": 1, "transformer.h.15": 0, "transformer.h.16": 0, "transformer.h.17": 1, "transformer.h.18": 1, "transformer.h.19": 0, "transformer.h.20": 1, "transformer.h.21": 1, "transformer.h.22": 0, "transformer.h.23": 0, "transformer.ln_f": 1, } bnb_config = BitsAndBytesConfig(llm_int8_enable_fp32_cpu_offload=True, load_in_8bit=True) model_8bit = AutoModelForCausalLM.from_pretrained( self.model_name, device_map=device_map, quantization_config=bnb_config, ) # Check that the model has been correctly set on device 0, 1, and `cpu`. self.assertEqual(set(model_8bit.hf_device_map.values()), {0, 1, "cpu"}) self.check_inference_correctness(model_8bit)
A test to check is dispatching a model on cpu & gpu works correctly using a random `device_map`.
test_cpu_accelerator_loading_random_device_map
python
huggingface/transformers
tests/quantization/bnb/test_mixed_int8.py
https://github.com/huggingface/transformers/blob/master/tests/quantization/bnb/test_mixed_int8.py
Apache-2.0
def test_cpu_accelerator_loading_custom_device_map(self): r""" A test to check is dispatching a model on cpu & gpu works correctly using a custom `device_map`. This time the device map is more organized than the test above and uses the abstraction `transformer.h` to encapsulate all the decoder layers. """ device_map = { "transformer.word_embeddings": "cpu", "transformer.word_embeddings_layernorm": "cpu", "lm_head": "cpu", "transformer.h": 0, "transformer.ln_f": 1, } bnb_config = BitsAndBytesConfig(llm_int8_enable_fp32_cpu_offload=True, load_in_8bit=True) # Load model model_8bit = AutoModelForCausalLM.from_pretrained( self.model_name, device_map=device_map, quantization_config=bnb_config, ) # Check that the model has been correctly set on device 0, 1, and `cpu`. self.assertEqual(set(model_8bit.hf_device_map.values()), {0, 1, "cpu"}) self.check_inference_correctness(model_8bit)
A test to check is dispatching a model on cpu & gpu works correctly using a custom `device_map`. This time the device map is more organized than the test above and uses the abstraction `transformer.h` to encapsulate all the decoder layers.
test_cpu_accelerator_loading_custom_device_map
python
huggingface/transformers
tests/quantization/bnb/test_mixed_int8.py
https://github.com/huggingface/transformers/blob/master/tests/quantization/bnb/test_mixed_int8.py
Apache-2.0