code
stringlengths 66
870k
| docstring
stringlengths 19
26.7k
| func_name
stringlengths 1
138
| language
stringclasses 1
value | repo
stringlengths 7
68
| path
stringlengths 5
324
| url
stringlengths 46
389
| license
stringclasses 7
values |
---|---|---|---|---|---|---|---|
def test_cpu_accelerator_disk_loading_custom_device_map(self):
r"""
A test to check is dispatching a model on cpu & gpu works correctly using a custom `device_map`.
This time we also add `disk` on the device_map.
"""
device_map = {
"transformer.word_embeddings": 0,
"transformer.word_embeddings_layernorm": "cpu",
"lm_head": 0,
"transformer.h": 1,
"transformer.ln_f": "disk",
}
bnb_config = BitsAndBytesConfig(llm_int8_enable_fp32_cpu_offload=True, load_in_8bit=True)
with tempfile.TemporaryDirectory() as tmpdirname:
# Load model
model_8bit = AutoModelForCausalLM.from_pretrained(
self.model_name,
device_map=device_map,
quantization_config=bnb_config,
offload_folder=tmpdirname,
)
# Check that the model has been correctly set on device 0, 1, and `cpu`.
self.assertEqual(set(model_8bit.hf_device_map.values()), {0, 1, "cpu", "disk"})
self.check_inference_correctness(model_8bit)
|
A test to check is dispatching a model on cpu & gpu works correctly using a custom `device_map`.
This time we also add `disk` on the device_map.
|
test_cpu_accelerator_disk_loading_custom_device_map
|
python
|
huggingface/transformers
|
tests/quantization/bnb/test_mixed_int8.py
|
https://github.com/huggingface/transformers/blob/master/tests/quantization/bnb/test_mixed_int8.py
|
Apache-2.0
|
def test_cpu_accelerator_disk_loading_custom_device_map_kwargs(self):
r"""
A test to check is dispatching a model on cpu & gpu works correctly using a custom `device_map`.
This time we also add `disk` on the device_map - using the kwargs directly instead of the quantization config
"""
device_map = {
"transformer.word_embeddings": 0,
"transformer.word_embeddings_layernorm": "cpu",
"lm_head": 0,
"transformer.h": 1,
"transformer.ln_f": "disk",
}
with tempfile.TemporaryDirectory() as tmpdirname:
# Load model
model_8bit = AutoModelForCausalLM.from_pretrained(
self.model_name,
device_map=device_map,
load_in_8bit=True,
llm_int8_enable_fp32_cpu_offload=True,
offload_folder=tmpdirname,
)
# Check that the model has been correctly set on device 0, 1, and `cpu`.
self.assertEqual(set(model_8bit.hf_device_map.values()), {0, 1, "cpu", "disk"})
self.check_inference_correctness(model_8bit)
|
A test to check is dispatching a model on cpu & gpu works correctly using a custom `device_map`.
This time we also add `disk` on the device_map - using the kwargs directly instead of the quantization config
|
test_cpu_accelerator_disk_loading_custom_device_map_kwargs
|
python
|
huggingface/transformers
|
tests/quantization/bnb/test_mixed_int8.py
|
https://github.com/huggingface/transformers/blob/master/tests/quantization/bnb/test_mixed_int8.py
|
Apache-2.0
|
def test_int8_from_pretrained(self):
r"""
Test whether loading a 8bit model from the Hub works as expected
"""
from bitsandbytes.nn import Int8Params
model_id = "ybelkada/gpt2-xl-8bit"
model = AutoModelForCausalLM.from_pretrained(model_id)
linear = get_some_linear_layer(model)
self.assertTrue(linear.weight.__class__ == Int8Params)
self.assertTrue(hasattr(linear.weight, "SCB"))
# generate
encoded_input = self.tokenizer(self.input_text, return_tensors="pt")
output_sequences = model.generate(input_ids=encoded_input["input_ids"].to(torch_device), max_new_tokens=10)
self.assertIn(self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS)
|
Test whether loading a 8bit model from the Hub works as expected
|
test_int8_from_pretrained
|
python
|
huggingface/transformers
|
tests/quantization/bnb/test_mixed_int8.py
|
https://github.com/huggingface/transformers/blob/master/tests/quantization/bnb/test_mixed_int8.py
|
Apache-2.0
|
def test_int8_from_pretrained(self):
r"""
Test whether loading a 8bit model from the Hub works as expected
"""
from bitsandbytes.nn import Int8Params
model_id = "Jiqing/TinyLlama-1.1B-Chat-v1.0-bnb-8bit"
model = AutoModelForCausalLM.from_pretrained(model_id)
linear = get_some_linear_layer(model)
self.assertTrue(linear.weight.__class__ == Int8Params)
self.assertTrue(hasattr(linear.weight, "SCB"))
# generate
encoded_input = self.tokenizer(self.input_text, return_tensors="pt")
output_sequences = model.generate(input_ids=encoded_input["input_ids"].to(torch_device), max_new_tokens=10)
self.assertIn(self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS)
|
Test whether loading a 8bit model from the Hub works as expected
|
test_int8_from_pretrained
|
python
|
huggingface/transformers
|
tests/quantization/bnb/test_mixed_int8.py
|
https://github.com/huggingface/transformers/blob/master/tests/quantization/bnb/test_mixed_int8.py
|
Apache-2.0
|
def test_compressed_uncompressed_model_shapes(self):
"""
Verify that the weights of an uncompressed model and its decompressed compressed counterpart match.
Note: Weights for sparsely compressed models may differ due to packing.
"""
def _has_nested_attr(obj, attr_path):
attrs = attr_path.split(".")
for attr in attrs:
if not hasattr(obj, attr):
return None
obj = getattr(obj, attr)
return obj
from compressed_tensors.quantization.utils import iter_named_leaf_modules
for compressed_model, uncompressed_model in self.compressed_uncompressed_model_stubs:
with self.subTest(compressed_model=compressed_model, uncompressed_model=uncompressed_model):
uncompressed = AutoModelForCausalLM.from_pretrained(
uncompressed_model,
device_map="auto",
torch_dtype="auto",
quantization_config=CompressedTensorsConfig(run_compressed=False),
)
compressed_decompressed = AutoModelForCausalLM.from_pretrained(
compressed_model,
device_map="auto",
torch_dtype="auto",
quantization_config=CompressedTensorsConfig(run_compressed=False),
)
for name, submodule in iter_named_leaf_modules(uncompressed):
comp_decomp_obj = _has_nested_attr(compressed_decompressed, name)
if comp_decomp_obj is not None and hasattr(submodule, "weight"):
if "sparse-only" in uncompressed_model:
self.assertTrue(
torch.equal(submodule.weight, comp_decomp_obj.weight),
f"Weight mismatch for module '{name}' in sparse-only model.",
)
else:
self.assertTrue(
torch.allclose(submodule.weight, comp_decomp_obj.weight, atol=0.2),
f"Weight mismatch for module '{name}' in quantized-only or stacked model.",
)
|
Verify that the weights of an uncompressed model and its decompressed compressed counterpart match.
Note: Weights for sparsely compressed models may differ due to packing.
|
test_compressed_uncompressed_model_shapes
|
python
|
huggingface/transformers
|
tests/quantization/compressed_tensors_integration/test_compressed_models.py
|
https://github.com/huggingface/transformers/blob/master/tests/quantization/compressed_tensors_integration/test_compressed_models.py
|
Apache-2.0
|
def test_outputs_match(self):
"""
Ensure that the generated outputs match between the uncompressed model
and its decompressed compressed counterpart.
"""
tokenizer = AutoTokenizer.from_pretrained(self.sparse_uncompressed_model)
input_ids = tokenizer(self.prompt, return_tensors="pt").input_ids
uncompressed = AutoModelForCausalLM.from_pretrained(
self.sparse_uncompressed_model,
device_map="auto",
torch_dtype="auto",
quantization_config=CompressedTensorsConfig(run_compressed=False),
)
output_uncompressed = uncompressed.generate(input_ids.to(uncompressed.device), max_new_tokens=100)
decompressed = AutoModelForCausalLM.from_pretrained(
self.sparse_compressed_model,
device_map="auto",
torch_dtype="auto",
quantization_config=CompressedTensorsConfig(run_compressed=False),
)
output_decompressed = decompressed.generate(input_ids.to(decompressed.device), max_new_tokens=100)
self.assertEqual(
tokenizer.decode(output_uncompressed[0]),
tokenizer.decode(output_decompressed[0]),
"Generated outputs do not match between compressed and uncompressed models.",
)
|
Ensure that the generated outputs match between the uncompressed model
and its decompressed compressed counterpart.
|
test_outputs_match
|
python
|
huggingface/transformers
|
tests/quantization/compressed_tensors_integration/test_compressed_models.py
|
https://github.com/huggingface/transformers/blob/master/tests/quantization/compressed_tensors_integration/test_compressed_models.py
|
Apache-2.0
|
def test_no_warnings_for_all_models(self):
"""
Confirm that loading any model using compressed tensors does not trigger
warnings about missing or unexpected keys.
"""
for model_stub in self.model_stubs:
with self.subTest(model_stub=model_stub):
with warnings.catch_warnings(record=True) as caught_warnings:
warnings.simplefilter("always")
AutoModelForCausalLM.from_pretrained(
model_stub,
device_map="auto",
torch_dtype="auto",
quantization_config=CompressedTensorsConfig(run_compressed=False),
)
for warning in caught_warnings:
self.assertNotIn(
"missing keys",
str(warning.message).lower(),
f"'missing keys' found in warnings for model {model_stub}",
)
self.assertNotIn(
"unexpected keys",
str(warning.message).lower(),
f"'unexpected keys' found in warnings for model {model_stub}",
)
|
Confirm that loading any model using compressed tensors does not trigger
warnings about missing or unexpected keys.
|
test_no_warnings_for_all_models
|
python
|
huggingface/transformers
|
tests/quantization/compressed_tensors_integration/test_compressed_models.py
|
https://github.com/huggingface/transformers/blob/master/tests/quantization/compressed_tensors_integration/test_compressed_models.py
|
Apache-2.0
|
def test_run_compressed_outputs_match(self):
"""Check that run_compressed=True/False output are the same"""
from transformers import AutoTokenizer
from transformers.utils.quantization_config import CompressedTensorsConfig
quantization_config = CompressedTensorsConfig(run_compressed=False)
for stub in self.stubs:
tokenizer = AutoTokenizer.from_pretrained(stub)
input_ids = tokenizer(self.prompt, return_tensors="pt").input_ids
model_run_compressed__True = AutoModelForCausalLM.from_pretrained(
stub,
)
output_rc_true = model_run_compressed__True.generate(input_ids, max_new_tokens=100)
model_run_compressed__False = AutoModelForCausalLM.from_pretrained(
stub,
quantization_config=quantization_config,
)
output_rc_false = model_run_compressed__False.generate(input_ids, max_new_tokens=100)
assert tokenizer.decode(output_rc_true[0]) == tokenizer.decode(output_rc_false[0])
|
Check that run_compressed=True/False output are the same
|
test_run_compressed_outputs_match
|
python
|
huggingface/transformers
|
tests/quantization/compressed_tensors_integration/test_compressed_models.py
|
https://github.com/huggingface/transformers/blob/master/tests/quantization/compressed_tensors_integration/test_compressed_models.py
|
Apache-2.0
|
def test_to_dict(self):
"""
Simple test that checks if one uses a config and converts it to a dict, the dict is the same as the config object
"""
quantization_config = EetqConfig()
config_to_dict = quantization_config.to_dict()
for key in config_to_dict:
self.assertEqual(getattr(quantization_config, key), config_to_dict[key])
|
Simple test that checks if one uses a config and converts it to a dict, the dict is the same as the config object
|
test_to_dict
|
python
|
huggingface/transformers
|
tests/quantization/eetq_integration/test_eetq.py
|
https://github.com/huggingface/transformers/blob/master/tests/quantization/eetq_integration/test_eetq.py
|
Apache-2.0
|
def test_from_dict(self):
"""
Simple test that checks if one uses a dict and converts it to a config object, the config object is the same as the dict
"""
dict = {"modules_to_not_convert": ["lm_head.weight"], "quant_method": "eetq", "weights": "int8"}
quantization_config = EetqConfig.from_dict(dict)
self.assertEqual(dict["modules_to_not_convert"], quantization_config.modules_to_not_convert)
self.assertEqual(dict["quant_method"], quantization_config.quant_method)
self.assertEqual(dict["weights"], quantization_config.weights)
|
Simple test that checks if one uses a dict and converts it to a config object, the config object is the same as the dict
|
test_from_dict
|
python
|
huggingface/transformers
|
tests/quantization/eetq_integration/test_eetq.py
|
https://github.com/huggingface/transformers/blob/master/tests/quantization/eetq_integration/test_eetq.py
|
Apache-2.0
|
def test_quantized_model_conversion(self):
"""
Simple test that checks if the quantized model has been converted properly
"""
from eetq import EetqLinear
from transformers.integrations import replace_with_eetq_linear
model_id = "facebook/opt-350m"
config = AutoConfig.from_pretrained(model_id, revision="cb32f77e905cccbca1d970436fb0f5e6b58ee3c5")
quantization_config = EetqConfig(weights="int8")
with init_empty_weights():
model = OPTForCausalLM(config)
nb_linears = 0
for module in model.modules():
if isinstance(module, torch.nn.Linear):
nb_linears += 1
model = replace_with_eetq_linear(model, quantization_config=quantization_config)
nb_eetq_linear = 0
for module in model.modules():
if isinstance(module, EetqLinear):
nb_eetq_linear += 1
self.assertEqual(nb_linears - 1, nb_eetq_linear)
# Try with `modules_to_not_convert`
with init_empty_weights():
model = OPTForCausalLM(config)
quantization_config = EetqConfig(modules_to_not_convert=["fc1"])
model = replace_with_eetq_linear(model, quantization_config=quantization_config)
nb_eetq_linear = 0
for module in model.modules():
if isinstance(module, EetqLinear):
nb_eetq_linear += 1
# 25 corresponds to the lm_head along with 24 fc1 layers.
self.assertEqual(nb_linears - 25, nb_eetq_linear)
|
Simple test that checks if the quantized model has been converted properly
|
test_quantized_model_conversion
|
python
|
huggingface/transformers
|
tests/quantization/eetq_integration/test_eetq.py
|
https://github.com/huggingface/transformers/blob/master/tests/quantization/eetq_integration/test_eetq.py
|
Apache-2.0
|
def test_quantized_model(self):
"""
Simple test that checks if the quantized model is working properly
"""
input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device)
output = self.quantized_model.generate(**input_ids, max_new_tokens=self.max_new_tokens)
self.assertEqual(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT)
|
Simple test that checks if the quantized model is working properly
|
test_quantized_model
|
python
|
huggingface/transformers
|
tests/quantization/eetq_integration/test_eetq.py
|
https://github.com/huggingface/transformers/blob/master/tests/quantization/eetq_integration/test_eetq.py
|
Apache-2.0
|
def test_save_pretrained(self):
"""
Simple test that checks if the quantized model is working properly after being saved and loaded
"""
with tempfile.TemporaryDirectory() as tmpdirname:
self.quantized_model.save_pretrained(tmpdirname)
model = AutoModelForCausalLM.from_pretrained(tmpdirname, device_map=self.device_map)
input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device)
output = model.generate(**input_ids, max_new_tokens=self.max_new_tokens)
self.assertEqual(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT)
|
Simple test that checks if the quantized model is working properly after being saved and loaded
|
test_save_pretrained
|
python
|
huggingface/transformers
|
tests/quantization/eetq_integration/test_eetq.py
|
https://github.com/huggingface/transformers/blob/master/tests/quantization/eetq_integration/test_eetq.py
|
Apache-2.0
|
def test_quantized_model_multi_gpu(self):
"""
Simple test that checks if the quantized model is working properly with multiple GPUs
set CUDA_VISIBLE_DEVICES=0,1 if you have more than 2 GPUs
"""
input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device)
quantization_config = EetqConfig()
quantized_model = AutoModelForCausalLM.from_pretrained(
self.model_name, device_map="auto", quantization_config=quantization_config
)
self.assertTrue(set(quantized_model.hf_device_map.values()) == {0, 1})
output = quantized_model.generate(**input_ids, max_new_tokens=self.max_new_tokens)
self.assertEqual(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT)
|
Simple test that checks if the quantized model is working properly with multiple GPUs
set CUDA_VISIBLE_DEVICES=0,1 if you have more than 2 GPUs
|
test_quantized_model_multi_gpu
|
python
|
huggingface/transformers
|
tests/quantization/eetq_integration/test_eetq.py
|
https://github.com/huggingface/transformers/blob/master/tests/quantization/eetq_integration/test_eetq.py
|
Apache-2.0
|
def test_to_dict(self):
"""
Simple test that checks if one uses a config and converts it to a dict, the dict is the same as the config object
"""
quantization_config = FbgemmFp8Config()
config_to_dict = quantization_config.to_dict()
for key in config_to_dict:
self.assertEqual(getattr(quantization_config, key), config_to_dict[key])
|
Simple test that checks if one uses a config and converts it to a dict, the dict is the same as the config object
|
test_to_dict
|
python
|
huggingface/transformers
|
tests/quantization/fbgemm_fp8/test_fbgemm_fp8.py
|
https://github.com/huggingface/transformers/blob/master/tests/quantization/fbgemm_fp8/test_fbgemm_fp8.py
|
Apache-2.0
|
def test_from_dict(self):
"""
Simple test that checks if one uses a dict and converts it to a config object, the config object is the same as the dict
"""
dict = {"modules_to_not_convert": ["lm_head.weight"], "quant_method": "fbgemm_fp8"}
quantization_config = FbgemmFp8Config.from_dict(dict)
self.assertEqual(dict["modules_to_not_convert"], quantization_config.modules_to_not_convert)
self.assertEqual(dict["quant_method"], quantization_config.quant_method)
|
Simple test that checks if one uses a dict and converts it to a config object, the config object is the same as the dict
|
test_from_dict
|
python
|
huggingface/transformers
|
tests/quantization/fbgemm_fp8/test_fbgemm_fp8.py
|
https://github.com/huggingface/transformers/blob/master/tests/quantization/fbgemm_fp8/test_fbgemm_fp8.py
|
Apache-2.0
|
def test_quantized_model_conversion(self):
"""
Simple test that checks if the quantized model has been converted properly
"""
from transformers.integrations import FbgemmFp8Linear, replace_with_fbgemm_fp8_linear
model_id = "facebook/opt-350m"
config = AutoConfig.from_pretrained(model_id, revision="cb32f77e905cccbca1d970436fb0f5e6b58ee3c5")
quantization_config = FbgemmFp8Config()
with init_empty_weights():
model = OPTForCausalLM(config)
nb_linears = 0
for module in model.modules():
if isinstance(module, torch.nn.Linear):
nb_linears += 1
model = replace_with_fbgemm_fp8_linear(model, quantization_config=quantization_config)
nb_fbgemm_linear = 0
for module in model.modules():
if isinstance(module, FbgemmFp8Linear):
nb_fbgemm_linear += 1
self.assertEqual(nb_linears - 1, nb_fbgemm_linear)
with init_empty_weights():
model = OPTForCausalLM(config)
quantization_config = FbgemmFp8Config(modules_to_not_convert=["fc1"])
model = replace_with_fbgemm_fp8_linear(model, quantization_config=quantization_config)
nb_fbgemm_linear = 0
for module in model.modules():
if isinstance(module, FbgemmFp8Linear):
nb_fbgemm_linear += 1
self.assertEqual(nb_linears - 25, nb_fbgemm_linear)
|
Simple test that checks if the quantized model has been converted properly
|
test_quantized_model_conversion
|
python
|
huggingface/transformers
|
tests/quantization/fbgemm_fp8/test_fbgemm_fp8.py
|
https://github.com/huggingface/transformers/blob/master/tests/quantization/fbgemm_fp8/test_fbgemm_fp8.py
|
Apache-2.0
|
def test_quantized_model(self):
"""
Simple test that checks if the quantized model is working properly
"""
input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device)
output = self.quantized_model.generate(**input_ids, max_new_tokens=self.max_new_tokens)
self.assertEqual(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT)
|
Simple test that checks if the quantized model is working properly
|
test_quantized_model
|
python
|
huggingface/transformers
|
tests/quantization/fbgemm_fp8/test_fbgemm_fp8.py
|
https://github.com/huggingface/transformers/blob/master/tests/quantization/fbgemm_fp8/test_fbgemm_fp8.py
|
Apache-2.0
|
def test_save_pretrained(self):
"""
Simple test that checks if the quantized model is working properly after being saved and loaded
"""
with tempfile.TemporaryDirectory() as tmpdirname:
self.quantized_model.save_pretrained(tmpdirname)
model = AutoModelForCausalLM.from_pretrained(tmpdirname, device_map=self.device_map)
input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device)
output = model.generate(**input_ids, max_new_tokens=self.max_new_tokens)
self.assertEqual(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT)
|
Simple test that checks if the quantized model is working properly after being saved and loaded
|
test_save_pretrained
|
python
|
huggingface/transformers
|
tests/quantization/fbgemm_fp8/test_fbgemm_fp8.py
|
https://github.com/huggingface/transformers/blob/master/tests/quantization/fbgemm_fp8/test_fbgemm_fp8.py
|
Apache-2.0
|
def test_change_loading_attributes(self):
"""
Simple test that checks if the quantized model is working properly after being saved and loaded
"""
with tempfile.TemporaryDirectory() as tmpdirname:
self.quantized_model.save_pretrained(tmpdirname)
quantization_config = FbgemmFp8Config(activation_scale_ub=1000.0)
model = AutoModelForCausalLM.from_pretrained(
tmpdirname, device_map=self.device_map, quantization_config=quantization_config
)
self.assertEqual(model.model.layers[1].mlp.down_proj.input_scale_ub.item(), 1000.0)
input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device)
output = model.generate(**input_ids, max_new_tokens=self.max_new_tokens)
self.assertEqual(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT)
|
Simple test that checks if the quantized model is working properly after being saved and loaded
|
test_change_loading_attributes
|
python
|
huggingface/transformers
|
tests/quantization/fbgemm_fp8/test_fbgemm_fp8.py
|
https://github.com/huggingface/transformers/blob/master/tests/quantization/fbgemm_fp8/test_fbgemm_fp8.py
|
Apache-2.0
|
def test_quantized_model_multi_gpu(self):
"""
Simple test that checks if the quantized model is working properly with multiple GPUs
set CUDA_VISIBLE_DEVICES=0,1 if you have more than 2 GPUs
"""
input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device)
quantization_config = FbgemmFp8Config()
quantized_model = AutoModelForCausalLM.from_pretrained(
self.model_name, device_map="auto", quantization_config=quantization_config
)
self.assertTrue(set(quantized_model.hf_device_map.values()) == {0, 1})
output = quantized_model.generate(**input_ids, max_new_tokens=self.max_new_tokens)
self.assertEqual(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT)
|
Simple test that checks if the quantized model is working properly with multiple GPUs
set CUDA_VISIBLE_DEVICES=0,1 if you have more than 2 GPUs
|
test_quantized_model_multi_gpu
|
python
|
huggingface/transformers
|
tests/quantization/fbgemm_fp8/test_fbgemm_fp8.py
|
https://github.com/huggingface/transformers/blob/master/tests/quantization/fbgemm_fp8/test_fbgemm_fp8.py
|
Apache-2.0
|
def test_quantized_model_offload(self):
"""
Simple test that checks if the quantized model returns an error when loading with cpu/disk offloaded
"""
quantization_config = FbgemmFp8Config()
with self.assertRaisesRegex(
ValueError, "You are attempting to load an FP8 model with a device_map that contains a CPU or disk device."
):
AutoModelForCausalLM.from_pretrained(
self.model_name, device_map=self.offload_device_map, quantization_config=quantization_config
)
|
Simple test that checks if the quantized model returns an error when loading with cpu/disk offloaded
|
test_quantized_model_offload
|
python
|
huggingface/transformers
|
tests/quantization/fbgemm_fp8/test_fbgemm_fp8.py
|
https://github.com/huggingface/transformers/blob/master/tests/quantization/fbgemm_fp8/test_fbgemm_fp8.py
|
Apache-2.0
|
def test_save_pretrained_offload(self):
"""
Simple test that checks if the saved quantized model is working properly cpu/disk offload
"""
with tempfile.TemporaryDirectory() as tmpdirname:
self.quantized_model.save_pretrained(tmpdirname)
input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device)
quantized_model = AutoModelForCausalLM.from_pretrained(tmpdirname, device_map=self.offload_device_map)
output = quantized_model.generate(**input_ids, max_new_tokens=self.max_new_tokens)
self.assertEqual(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT)
|
Simple test that checks if the saved quantized model is working properly cpu/disk offload
|
test_save_pretrained_offload
|
python
|
huggingface/transformers
|
tests/quantization/fbgemm_fp8/test_fbgemm_fp8.py
|
https://github.com/huggingface/transformers/blob/master/tests/quantization/fbgemm_fp8/test_fbgemm_fp8.py
|
Apache-2.0
|
def test_save_pretrained_multi_gpu(self):
"""
Simple test that checks if the quantized model is working properly after being saved and loaded
"""
with tempfile.TemporaryDirectory() as tmpdirname:
self.quantized_model.save_pretrained(tmpdirname)
model = AutoModelForCausalLM.from_pretrained(tmpdirname, device_map="auto")
self.assertTrue(set(model.hf_device_map.values()) == {0, 1})
input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device)
output = model.generate(**input_ids, max_new_tokens=self.max_new_tokens)
self.assertEqual(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT)
|
Simple test that checks if the quantized model is working properly after being saved and loaded
|
test_save_pretrained_multi_gpu
|
python
|
huggingface/transformers
|
tests/quantization/fbgemm_fp8/test_fbgemm_fp8.py
|
https://github.com/huggingface/transformers/blob/master/tests/quantization/fbgemm_fp8/test_fbgemm_fp8.py
|
Apache-2.0
|
def test_linear_with_diff_feature_size_preserves_shape(self):
"""
Test that FbgemmFp8Linear generates the correct shape when in_features != out_features.
"""
from transformers.integrations import FbgemmFp8Linear
with init_empty_weights(include_buffers=True):
linear = FbgemmFp8Linear(1024, 2048, True)
x = torch.rand((17, 23, 1024))
x_ = linear(x)
self.assertEqual(x_.shape, (17, 23, 2048))
|
Test that FbgemmFp8Linear generates the correct shape when in_features != out_features.
|
test_linear_with_diff_feature_size_preserves_shape
|
python
|
huggingface/transformers
|
tests/quantization/fbgemm_fp8/test_fbgemm_fp8.py
|
https://github.com/huggingface/transformers/blob/master/tests/quantization/fbgemm_fp8/test_fbgemm_fp8.py
|
Apache-2.0
|
def test_to_dict(self):
"""
Simple test that checks if one uses a config and converts it to a dict, the dict is the same as the config object
"""
quantization_config = FineGrainedFP8Config()
config_to_dict = quantization_config.to_dict()
for key in config_to_dict:
self.assertEqual(getattr(quantization_config, key), config_to_dict[key])
|
Simple test that checks if one uses a config and converts it to a dict, the dict is the same as the config object
|
test_to_dict
|
python
|
huggingface/transformers
|
tests/quantization/finegrained_fp8/test_fp8.py
|
https://github.com/huggingface/transformers/blob/master/tests/quantization/finegrained_fp8/test_fp8.py
|
Apache-2.0
|
def test_from_dict(self):
"""
Simple test that checks if one uses a dict and converts it to a config object, the config object is the same as the dict
"""
dict = {"modules_to_not_convert": ["lm_head.weight"], "quant_method": "fp8"}
quantization_config = FineGrainedFP8Config.from_dict(dict)
self.assertEqual(dict["modules_to_not_convert"], quantization_config.modules_to_not_convert)
self.assertEqual(dict["quant_method"], quantization_config.quant_method)
|
Simple test that checks if one uses a dict and converts it to a config object, the config object is the same as the dict
|
test_from_dict
|
python
|
huggingface/transformers
|
tests/quantization/finegrained_fp8/test_fp8.py
|
https://github.com/huggingface/transformers/blob/master/tests/quantization/finegrained_fp8/test_fp8.py
|
Apache-2.0
|
def test_quantized_model_conversion(self):
"""
Simple test that checks if the quantized model has been converted properly
"""
from transformers.integrations import FP8Linear, replace_with_fp8_linear
model_id = "facebook/opt-350m"
config = AutoConfig.from_pretrained(model_id, revision="cb32f77e905cccbca1d970436fb0f5e6b58ee3c5")
quantization_config = FineGrainedFP8Config()
with init_empty_weights():
model = OPTForCausalLM(config)
nb_linears = 0
for module in model.modules():
if isinstance(module, torch.nn.Linear):
nb_linears += 1
model = replace_with_fp8_linear(model, quantization_config=quantization_config)
nb_fp8_linear = 0
for module in model.modules():
if isinstance(module, FP8Linear):
nb_fp8_linear += 1
self.assertEqual(nb_linears - 1, nb_fp8_linear)
with init_empty_weights():
model = OPTForCausalLM(config)
quantization_config = FineGrainedFP8Config(modules_to_not_convert=["fc1"])
model = replace_with_fp8_linear(model, quantization_config=quantization_config)
nb_fp8_linear = 0
for module in model.modules():
if isinstance(module, FP8Linear):
nb_fp8_linear += 1
self.assertEqual(nb_linears - 25, nb_fp8_linear)
|
Simple test that checks if the quantized model has been converted properly
|
test_quantized_model_conversion
|
python
|
huggingface/transformers
|
tests/quantization/finegrained_fp8/test_fp8.py
|
https://github.com/huggingface/transformers/blob/master/tests/quantization/finegrained_fp8/test_fp8.py
|
Apache-2.0
|
def test_quantized_model(self):
"""
Simple test that checks if the quantized model is working properly
"""
input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(self.device_map)
output = self.quantized_model.generate(**input_ids, max_new_tokens=self.max_new_tokens, do_sample=False)
output_tokens = self.tokenizer.decode(output[0], skip_special_tokens=True)
self.assertEqual(output_tokens, self.EXPECTED_OUTPUT)
|
Simple test that checks if the quantized model is working properly
|
test_quantized_model
|
python
|
huggingface/transformers
|
tests/quantization/finegrained_fp8/test_fp8.py
|
https://github.com/huggingface/transformers/blob/master/tests/quantization/finegrained_fp8/test_fp8.py
|
Apache-2.0
|
def test_save_pretrained(self):
"""
Simple test that checks if the quantized model is working properly after being saved and loaded
"""
with tempfile.TemporaryDirectory() as tmpdirname:
self.quantized_model.save_pretrained(tmpdirname)
model = AutoModelForCausalLM.from_pretrained(tmpdirname, device_map=self.device_map)
input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(self.device_map)
output = model.generate(**input_ids, max_new_tokens=self.max_new_tokens, do_sample=False)
self.assertEqual(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT)
|
Simple test that checks if the quantized model is working properly after being saved and loaded
|
test_save_pretrained
|
python
|
huggingface/transformers
|
tests/quantization/finegrained_fp8/test_fp8.py
|
https://github.com/huggingface/transformers/blob/master/tests/quantization/finegrained_fp8/test_fp8.py
|
Apache-2.0
|
def test_weight_and_weight_scale_inv(self):
"""
Simple test that checks if the weight and weight_scale_inv are working properly
"""
weight = self.quantized_model.model.layers[0].self_attn.q_proj.weight
weight_scale_inv = self.quantized_model.model.layers[0].self_attn.q_proj.weight_scale_inv
self.assertEqual(weight.dtype, torch.float8_e4m3fn)
self.assertEqual(weight_scale_inv.dtype, torch.float32)
self.assertEqual(weight.shape, (weight_scale_inv.shape[0] * 128, weight_scale_inv.shape[1] * 128))
|
Simple test that checks if the weight and weight_scale_inv are working properly
|
test_weight_and_weight_scale_inv
|
python
|
huggingface/transformers
|
tests/quantization/finegrained_fp8/test_fp8.py
|
https://github.com/huggingface/transformers/blob/master/tests/quantization/finegrained_fp8/test_fp8.py
|
Apache-2.0
|
def test_block_size(self):
"""
Simple test that checks if the block size is working properly
"""
self.assertEqual(self.quantized_model.config.quantization_config.weight_block_size, (128, 128))
quantization_config = FineGrainedFP8Config(weight_block_size=(32, 32))
quantized_model = AutoModelForCausalLM.from_pretrained(
self.model_name, device_map=self.device_map, quantization_config=quantization_config
)
self.assertEqual(quantized_model.config.quantization_config.weight_block_size, (32, 32))
|
Simple test that checks if the block size is working properly
|
test_block_size
|
python
|
huggingface/transformers
|
tests/quantization/finegrained_fp8/test_fp8.py
|
https://github.com/huggingface/transformers/blob/master/tests/quantization/finegrained_fp8/test_fp8.py
|
Apache-2.0
|
def test_quantized_model_multi_accelerator(self):
"""
Simple test that checks if the quantized model is working properly with multiple accelerators
set CUDA_VISIBLE_DEVICES=0,1 if you have more than 2 GPUs; or set ZE_AFFINITY_MASK=0,1 if you
have more than 2 XPUs.
"""
input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(self.device_map)
quantization_config = FineGrainedFP8Config()
quantized_model = AutoModelForCausalLM.from_pretrained(
self.model_name, device_map="auto", quantization_config=quantization_config
)
self.assertTrue(set(quantized_model.hf_device_map.values()) == {0, 1})
output = quantized_model.generate(**input_ids, max_new_tokens=self.max_new_tokens, do_sample=False)
self.assertEqual(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT)
|
Simple test that checks if the quantized model is working properly with multiple accelerators
set CUDA_VISIBLE_DEVICES=0,1 if you have more than 2 GPUs; or set ZE_AFFINITY_MASK=0,1 if you
have more than 2 XPUs.
|
test_quantized_model_multi_accelerator
|
python
|
huggingface/transformers
|
tests/quantization/finegrained_fp8/test_fp8.py
|
https://github.com/huggingface/transformers/blob/master/tests/quantization/finegrained_fp8/test_fp8.py
|
Apache-2.0
|
def test_save_pretrained_multi_accelerators(self):
"""
Simple test that checks if the quantized model is working properly after being saved and loaded
"""
with tempfile.TemporaryDirectory() as tmpdirname:
self.quantized_model.save_pretrained(tmpdirname)
model = AutoModelForCausalLM.from_pretrained(tmpdirname, device_map="auto")
self.assertTrue(set(model.hf_device_map.values()) == {0, 1})
input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(self.device_map)
output = model.generate(**input_ids, max_new_tokens=self.max_new_tokens, do_sample=False)
self.assertEqual(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT)
|
Simple test that checks if the quantized model is working properly after being saved and loaded
|
test_save_pretrained_multi_accelerators
|
python
|
huggingface/transformers
|
tests/quantization/finegrained_fp8/test_fp8.py
|
https://github.com/huggingface/transformers/blob/master/tests/quantization/finegrained_fp8/test_fp8.py
|
Apache-2.0
|
def test_quantized_model_offload(self):
"""
Simple test that checks if the quantized model returns an error when loading with cpu/disk offloaded
"""
with self.assertRaisesRegex(
ValueError, "You are attempting to load an FP8 model with a device_map that contains a cpu/disk device."
):
AutoModelForCausalLM.from_pretrained(
self.model_name, device_map=self.offload_device_map, quantization_config=self.quantization_config
)
|
Simple test that checks if the quantized model returns an error when loading with cpu/disk offloaded
|
test_quantized_model_offload
|
python
|
huggingface/transformers
|
tests/quantization/finegrained_fp8/test_fp8.py
|
https://github.com/huggingface/transformers/blob/master/tests/quantization/finegrained_fp8/test_fp8.py
|
Apache-2.0
|
def test_save_pretrained_offload(self):
"""
Simple test that checks if the saved quantized model is working properly cpu/disk offload
"""
with tempfile.TemporaryDirectory() as tmpdirname:
self.quantized_model.save_pretrained(tmpdirname)
input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(self.device_map)
quantized_model = AutoModelForCausalLM.from_pretrained(tmpdirname, device_map=self.offload_device_map)
output = quantized_model.generate(**input_ids, max_new_tokens=self.max_new_tokens, do_sample=False)
self.assertEqual(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT)
|
Simple test that checks if the saved quantized model is working properly cpu/disk offload
|
test_save_pretrained_offload
|
python
|
huggingface/transformers
|
tests/quantization/finegrained_fp8/test_fp8.py
|
https://github.com/huggingface/transformers/blob/master/tests/quantization/finegrained_fp8/test_fp8.py
|
Apache-2.0
|
def test_linear_with_diff_feature_size_preserves_shape(self):
"""
Test that FP8Linear generates the correct shape when in_features != out_features.
"""
from transformers.integrations import FP8Linear
linear = FP8Linear(128, 256, block_size=(128, 128), device=self.device)
x = torch.rand((1, 5, 128)).to(self.device)
x_ = linear(x)
self.assertEqual(x_.shape, (1, 5, 256))
|
Test that FP8Linear generates the correct shape when in_features != out_features.
|
test_linear_with_diff_feature_size_preserves_shape
|
python
|
huggingface/transformers
|
tests/quantization/finegrained_fp8/test_fp8.py
|
https://github.com/huggingface/transformers/blob/master/tests/quantization/finegrained_fp8/test_fp8.py
|
Apache-2.0
|
def test_memory_footprint(self):
r"""
A simple test to check if the model conversion has been done correctly by checking on the
memory footprint of the converted model
"""
mem_quantized = self.quantized_model.get_memory_footprint()
self.assertAlmostEqual(self.mem_fp16 / mem_quantized, self.EXPECTED_RELATIVE_DIFFERENCE, places=4)
|
A simple test to check if the model conversion has been done correctly by checking on the
memory footprint of the converted model
|
test_memory_footprint
|
python
|
huggingface/transformers
|
tests/quantization/gptq/test_gptq.py
|
https://github.com/huggingface/transformers/blob/master/tests/quantization/gptq/test_gptq.py
|
Apache-2.0
|
def test_device_and_dtype_assignment(self):
r"""
Test whether trying to cast (or assigning a device to) a model after quantization will throw an error.
Checks also if other models are casted correctly.
"""
# This should work
if self.device_map in (None, "cpu"):
_ = self.quantized_model.to(0)
with self.assertRaises(ValueError):
# Tries with a `dtype``
self.quantized_model.to(torch.float16)
|
Test whether trying to cast (or assigning a device to) a model after quantization will throw an error.
Checks also if other models are casted correctly.
|
test_device_and_dtype_assignment
|
python
|
huggingface/transformers
|
tests/quantization/gptq/test_gptq.py
|
https://github.com/huggingface/transformers/blob/master/tests/quantization/gptq/test_gptq.py
|
Apache-2.0
|
def test_original_dtype(self):
r"""
A simple test to check if the model successfully stores the original dtype
"""
self.assertTrue(hasattr(self.quantized_model.config, "_pre_quantization_dtype"))
self.assertFalse(hasattr(self.model_fp16.config, "_pre_quantization_dtype"))
self.assertTrue(self.quantized_model.config._pre_quantization_dtype == torch.float16)
|
A simple test to check if the model successfully stores the original dtype
|
test_original_dtype
|
python
|
huggingface/transformers
|
tests/quantization/gptq/test_gptq.py
|
https://github.com/huggingface/transformers/blob/master/tests/quantization/gptq/test_gptq.py
|
Apache-2.0
|
def test_quantized_layers_class(self):
"""
Simple test to check if the model conversion has been done correctly by checking on
the class type of the linear layers of the converted models
"""
if is_gptqmodel_available():
from gptqmodel.utils.importer import hf_select_quant_linear
if hasattr(self.config, "quantization_config"):
checkpoint_format = self.config.quantization_config.get("checkpoint_format")
meta = self.config.quantization_config.get("meta")
else:
checkpoint_format = "gptq"
meta = None
QuantLinear = hf_select_quant_linear(
bits=self.bits,
group_size=self.group_size,
desc_act=self.desc_act,
sym=self.sym,
device_map=self.device_map,
checkpoint_format=checkpoint_format,
meta=meta,
backend=self.quantization_config.backend,
)
elif is_auto_gptq_available():
from auto_gptq.utils.import_utils import dynamically_import_QuantLinear as hf_select_quant_linear
QuantLinear = hf_select_quant_linear(
use_triton=False,
desc_act=self.desc_act,
group_size=self.group_size,
bits=self.bits,
disable_exllama=not self.use_exllama,
disable_exllamav2=True,
)
self.assertTrue(self.quantized_model.transformer.h[0].mlp.dense_4h_to_h.__class__ == QuantLinear)
|
Simple test to check if the model conversion has been done correctly by checking on
the class type of the linear layers of the converted models
|
test_quantized_layers_class
|
python
|
huggingface/transformers
|
tests/quantization/gptq/test_gptq.py
|
https://github.com/huggingface/transformers/blob/master/tests/quantization/gptq/test_gptq.py
|
Apache-2.0
|
def check_inference_correctness(self, model):
r"""
Test the generation quality of the quantized model and see that we are matching the expected output.
Given that we are operating on small numbers + the testing model is relatively small, we might not get
the same output across GPUs. So we'll generate few tokens (5-10) and check their output.
"""
# Check that inference pass works on the model
encoded_input = self.tokenizer(self.input_text, return_tensors="pt")
# Check the exactness of the results
output_sequences = model.generate(input_ids=encoded_input["input_ids"].to(model.device), max_new_tokens=10)
# Get the generation
self.assertIn(self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS)
|
Test the generation quality of the quantized model and see that we are matching the expected output.
Given that we are operating on small numbers + the testing model is relatively small, we might not get
the same output across GPUs. So we'll generate few tokens (5-10) and check their output.
|
check_inference_correctness
|
python
|
huggingface/transformers
|
tests/quantization/gptq/test_gptq.py
|
https://github.com/huggingface/transformers/blob/master/tests/quantization/gptq/test_gptq.py
|
Apache-2.0
|
def test_generate_quality(self):
"""
Simple test to check the quality of the model by comparing the generated tokens with the expected tokens
"""
if self.device_map is None:
self.check_inference_correctness(self.quantized_model.to(0))
else:
if self.device_map == "cpu" and self.quantized_model.device.type != "cpu":
self.quantized_model.to("cpu")
self.check_inference_correctness(self.quantized_model)
|
Simple test to check the quality of the model by comparing the generated tokens with the expected tokens
|
test_generate_quality
|
python
|
huggingface/transformers
|
tests/quantization/gptq/test_gptq.py
|
https://github.com/huggingface/transformers/blob/master/tests/quantization/gptq/test_gptq.py
|
Apache-2.0
|
def test_serialization(self):
"""
Test the serialization of the model and the loading of the quantized weights works
"""
with tempfile.TemporaryDirectory() as tmpdirname:
self.quantized_model.save_pretrained(tmpdirname)
if is_auto_gptq_available() and not is_gptqmodel_available():
quant_type = "cuda-old" if not self.use_exllama else "exllama"
if not self.use_exllama:
quantized_model_from_saved = AutoModelForCausalLM.from_pretrained(
tmpdirname, quantization_config=GPTQConfig(use_exllama=False, bits=4)
)
if self.device_map != "cpu":
quantized_model_from_saved = quantized_model_from_saved.to(0)
else:
quantized_model_from_saved = AutoModelForCausalLM.from_pretrained(
tmpdirname, device_map=self.device_map
)
else:
if self.device_map == "cpu":
quant_type = "ipex" if is_ipex_available() else "torch"
else:
# We expect tritonv2 to be used here, because exllama backend doesn't support packing https://github.com/ModelCloud/GPTQModel/issues/1354
# TODO: Remove this once GPTQModel exllama kernels supports packing
quant_type = "tritonv2"
quantized_model_from_saved = AutoModelForCausalLM.from_pretrained(
tmpdirname, device_map=self.device_map
)
self.check_quantized_layers_type(quantized_model_from_saved, quant_type)
self.check_inference_correctness(quantized_model_from_saved)
|
Test the serialization of the model and the loading of the quantized weights works
|
test_serialization
|
python
|
huggingface/transformers
|
tests/quantization/gptq/test_gptq.py
|
https://github.com/huggingface/transformers/blob/master/tests/quantization/gptq/test_gptq.py
|
Apache-2.0
|
def test_serialization_big_model_inference(self):
"""
Test the serialization of the model and the loading of the quantized weights with big model inference
"""
with tempfile.TemporaryDirectory() as tmpdirname:
self.quantized_model.save_pretrained(tmpdirname)
device_map = self.device_map or "auto"
quantized_model_from_saved = AutoModelForCausalLM.from_pretrained(tmpdirname, device_map=device_map)
self.check_inference_correctness(quantized_model_from_saved)
|
Test the serialization of the model and the loading of the quantized weights with big model inference
|
test_serialization_big_model_inference
|
python
|
huggingface/transformers
|
tests/quantization/gptq/test_gptq.py
|
https://github.com/huggingface/transformers/blob/master/tests/quantization/gptq/test_gptq.py
|
Apache-2.0
|
def test_change_loading_attributes(self):
"""
Test the serialization of the model and the loading of the quantized weights works with another config file
"""
with tempfile.TemporaryDirectory() as tmpdirname:
self.quantized_model.save_pretrained(tmpdirname)
if is_auto_gptq_available() and not is_gptqmodel_available() and not self.use_exllama:
self.check_quantized_layers_type(self.quantized_model, "cuda-old")
# we need to put it directly to the gpu. Otherwise, we won't be able to initialize the exllama kernel
quantized_model_from_saved = AutoModelForCausalLM.from_pretrained(
tmpdirname, quantization_config=GPTQConfig(use_exllama=True, bits=4), device_map=self.device_map
)
self.assertEqual(quantized_model_from_saved.config.quantization_config.bits, self.bits)
self.check_quantized_layers_type(quantized_model_from_saved, "exllama")
self.check_inference_correctness(quantized_model_from_saved)
|
Test the serialization of the model and the loading of the quantized weights works with another config file
|
test_change_loading_attributes
|
python
|
huggingface/transformers
|
tests/quantization/gptq/test_gptq.py
|
https://github.com/huggingface/transformers/blob/master/tests/quantization/gptq/test_gptq.py
|
Apache-2.0
|
def check_inference_correctness(self, model):
"""
Test the generation quality of the quantized model and see that we are matching the expected output.
Given that we are operating on small numbers + the testing model is relatively small, we might not get
the same output across GPUs. So we'll generate few tokens (5-10) and check their output.
"""
# Check that inference pass works on the model
encoded_input = self.tokenizer(self.input_text, return_tensors="pt")
# Check the exactness of the results
output_sequences = model.generate(input_ids=encoded_input["input_ids"].to(0), max_new_tokens=10)
# Get the generation
self.assertIn(self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS)
|
Test the generation quality of the quantized model and see that we are matching the expected output.
Given that we are operating on small numbers + the testing model is relatively small, we might not get
the same output across GPUs. So we'll generate few tokens (5-10) and check their output.
|
check_inference_correctness
|
python
|
huggingface/transformers
|
tests/quantization/gptq/test_gptq.py
|
https://github.com/huggingface/transformers/blob/master/tests/quantization/gptq/test_gptq.py
|
Apache-2.0
|
def test_max_input_length(self):
"""
Test if the max_input_length works. It modifies the maximum input length that of the model that runs with exllama backend.
"""
prompt = "I am in Paris and" * 1000
inp = self.tokenizer(prompt, return_tensors="pt").to(0)
self.assertTrue(inp["input_ids"].shape[1] > 4028)
with self.assertRaises(RuntimeError) as cm:
self.quantized_model.generate(**inp, num_beams=1, min_new_tokens=3, max_new_tokens=3)
self.assertTrue("temp_state buffer is too small" in str(cm.exception))
prompt = "I am in Paris and"
inp = self.tokenizer(prompt, return_tensors="pt").to(0)
self.assertTrue(inp["input_ids"].shape[1] < 4028)
self.quantized_model.generate(**inp, num_beams=1, min_new_tokens=3, max_new_tokens=3)
|
Test if the max_input_length works. It modifies the maximum input length that of the model that runs with exllama backend.
|
test_max_input_length
|
python
|
huggingface/transformers
|
tests/quantization/gptq/test_gptq.py
|
https://github.com/huggingface/transformers/blob/master/tests/quantization/gptq/test_gptq.py
|
Apache-2.0
|
def check_inference_correctness(self, model):
"""
Test the generation quality of the quantized model and see that we are matching the expected output.
Given that we are operating on small numbers + the testing model is relatively small, we might not get
the same output across GPUs. So we'll generate few tokens (5-10) and check their output.
"""
# Check that inference pass works on the model
encoded_input = self.tokenizer(self.input_text, return_tensors="pt")
# Check the exactness of the results
output_sequences = model.generate(input_ids=encoded_input["input_ids"].to(0), max_new_tokens=10)
# Get the generation
self.assertIn(self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS)
|
Test the generation quality of the quantized model and see that we are matching the expected output.
Given that we are operating on small numbers + the testing model is relatively small, we might not get
the same output across GPUs. So we'll generate few tokens (5-10) and check their output.
|
check_inference_correctness
|
python
|
huggingface/transformers
|
tests/quantization/gptq/test_gptq.py
|
https://github.com/huggingface/transformers/blob/master/tests/quantization/gptq/test_gptq.py
|
Apache-2.0
|
def test_to_dict(self):
"""
Simple test that checks if one uses a config and converts it to a dict, the dict is the same as the config object
"""
quantization_config = HiggsConfig()
config_to_dict = quantization_config.to_dict()
for key in config_to_dict:
self.assertEqual(getattr(quantization_config, key), config_to_dict[key])
|
Simple test that checks if one uses a config and converts it to a dict, the dict is the same as the config object
|
test_to_dict
|
python
|
huggingface/transformers
|
tests/quantization/higgs/test_higgs.py
|
https://github.com/huggingface/transformers/blob/master/tests/quantization/higgs/test_higgs.py
|
Apache-2.0
|
def test_from_dict(self):
"""
Simple test that checks if one uses a dict and converts it to a config object, the config object is the same as the dict
"""
dict = {"modules_to_not_convert": ["embed_tokens", "lm_head"], "quant_method": "higgs"}
quantization_config = HiggsConfig.from_dict(dict)
self.assertEqual(dict["modules_to_not_convert"], quantization_config.modules_to_not_convert)
self.assertEqual(dict["quant_method"], quantization_config.quant_method)
|
Simple test that checks if one uses a dict and converts it to a config object, the config object is the same as the dict
|
test_from_dict
|
python
|
huggingface/transformers
|
tests/quantization/higgs/test_higgs.py
|
https://github.com/huggingface/transformers/blob/master/tests/quantization/higgs/test_higgs.py
|
Apache-2.0
|
def test_quantized_model_conversion(self):
"""
Simple test that checks if the quantized model has been converted properly
"""
from transformers.integrations import HiggsLinear, replace_with_higgs_linear
model_id = "facebook/opt-350m"
config = AutoConfig.from_pretrained(model_id, revision="cb32f77e905cccbca1d970436fb0f5e6b58ee3c5")
quantization_config = HiggsConfig()
with init_empty_weights():
model = OPTForCausalLM(config)
nb_linears = 0
for module in model.modules():
if isinstance(module, torch.nn.Linear):
nb_linears += 1
model, _ = replace_with_higgs_linear(model, quantization_config=quantization_config)
nb_higgs_linear = 0
for module in model.modules():
if isinstance(module, HiggsLinear):
nb_higgs_linear += 1
self.assertEqual(nb_linears - 1, nb_higgs_linear)
with init_empty_weights():
model = OPTForCausalLM(config)
quantization_config = HiggsConfig(modules_to_not_convert=["fc1"])
model, _ = replace_with_higgs_linear(model, quantization_config=quantization_config)
nb_higgs_linear = 0
for module in model.modules():
if isinstance(module, HiggsLinear):
nb_higgs_linear += 1
self.assertEqual(nb_linears - 24, nb_higgs_linear)
|
Simple test that checks if the quantized model has been converted properly
|
test_quantized_model_conversion
|
python
|
huggingface/transformers
|
tests/quantization/higgs/test_higgs.py
|
https://github.com/huggingface/transformers/blob/master/tests/quantization/higgs/test_higgs.py
|
Apache-2.0
|
def test_quantized_model(self):
"""
Simple test that checks if the quantized model is working properly
"""
input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device)
output = self.quantized_model.generate(**input_ids, max_new_tokens=self.max_new_tokens)
self.assertEqual(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT)
|
Simple test that checks if the quantized model is working properly
|
test_quantized_model
|
python
|
huggingface/transformers
|
tests/quantization/higgs/test_higgs.py
|
https://github.com/huggingface/transformers/blob/master/tests/quantization/higgs/test_higgs.py
|
Apache-2.0
|
def test_save_pretrained(self):
"""
Simple test that checks if the quantized model is working properly after being saved and loaded
"""
with tempfile.TemporaryDirectory() as tmpdirname:
self.quantized_model.save_pretrained(tmpdirname)
model = AutoModelForCausalLM.from_pretrained(tmpdirname, device_map=self.device_map)
input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device)
output = model.generate(**input_ids, max_new_tokens=self.max_new_tokens)
self.assertEqual(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT)
|
Simple test that checks if the quantized model is working properly after being saved and loaded
|
test_save_pretrained
|
python
|
huggingface/transformers
|
tests/quantization/higgs/test_higgs.py
|
https://github.com/huggingface/transformers/blob/master/tests/quantization/higgs/test_higgs.py
|
Apache-2.0
|
def test_quantized_model_multi_gpu(self):
"""
Simple test that checks if the quantized model is working properly with multiple GPUs
set CUDA_VISIBLE_DEVICES=0,1 if you have more than 2 GPUs
"""
input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device)
quantization_config = HiggsConfig()
quantized_model = AutoModelForCausalLM.from_pretrained(
self.model_name, device_map="auto", quantization_config=quantization_config
)
self.assertTrue(set(quantized_model.hf_device_map.values()) == {0, 1})
output = quantized_model.generate(**input_ids, max_new_tokens=self.max_new_tokens)
self.assertEqual(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT)
|
Simple test that checks if the quantized model is working properly with multiple GPUs
set CUDA_VISIBLE_DEVICES=0,1 if you have more than 2 GPUs
|
test_quantized_model_multi_gpu
|
python
|
huggingface/transformers
|
tests/quantization/higgs/test_higgs.py
|
https://github.com/huggingface/transformers/blob/master/tests/quantization/higgs/test_higgs.py
|
Apache-2.0
|
def test_save_pretrained_multi_gpu(self):
"""
Simple test that checks if the quantized model is working properly after being saved and loaded
"""
with tempfile.TemporaryDirectory() as tmpdirname:
self.quantized_model.save_pretrained(tmpdirname)
model = AutoModelForCausalLM.from_pretrained(tmpdirname, device_map="auto")
self.assertTrue(set(model.hf_device_map.values()) == {0, 1})
input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device)
output = model.generate(**input_ids, max_new_tokens=self.max_new_tokens)
self.assertEqual(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT)
|
Simple test that checks if the quantized model is working properly after being saved and loaded
|
test_save_pretrained_multi_gpu
|
python
|
huggingface/transformers
|
tests/quantization/higgs/test_higgs.py
|
https://github.com/huggingface/transformers/blob/master/tests/quantization/higgs/test_higgs.py
|
Apache-2.0
|
def test_dequantize(self):
"""
Test the ability to dequantize a model
"""
self.quantized_model.dequantize()
input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device)
output = self.quantized_model.generate(**input_ids, max_new_tokens=self.max_new_tokens)
self.assertEqual(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT)
|
Test the ability to dequantize a model
|
test_dequantize
|
python
|
huggingface/transformers
|
tests/quantization/higgs/test_higgs.py
|
https://github.com/huggingface/transformers/blob/master/tests/quantization/higgs/test_higgs.py
|
Apache-2.0
|
def test_to_dict(self):
"""
Makes sure the config format is properly set
"""
quantization_config = HqqConfig()
hqq_orig_config = quantization_config.to_dict()
self.assertEqual(quantization_config.quant_config, hqq_orig_config["quant_config"])
|
Makes sure the config format is properly set
|
test_to_dict
|
python
|
huggingface/transformers
|
tests/quantization/hqq/test_hqq.py
|
https://github.com/huggingface/transformers/blob/master/tests/quantization/hqq/test_hqq.py
|
Apache-2.0
|
def test_fp16_quantized_model_multipgpu(self):
"""
Simple LLM model testing fp16 with multi-gpu
"""
quant_config = HqqConfig(nbits=8, group_size=64)
hqq_runner = HQQLLMRunner(
model_id=MODEL_ID, quant_config=quant_config, compute_dtype=torch.float16, device="auto"
)
check_hqqlayer(self, hqq_runner.model.model.layers[0].self_attn.v_proj)
check_forward(self, hqq_runner.model)
|
Simple LLM model testing fp16 with multi-gpu
|
test_fp16_quantized_model_multipgpu
|
python
|
huggingface/transformers
|
tests/quantization/hqq/test_hqq.py
|
https://github.com/huggingface/transformers/blob/master/tests/quantization/hqq/test_hqq.py
|
Apache-2.0
|
def test_fp16_quantized_model(self):
"""
Simple LLM model testing fp16 with bias
"""
quant_config = HqqConfig(nbits=8, group_size=64)
hqq_runner = HQQLLMRunner(
model_id="facebook/opt-125m", quant_config=quant_config, compute_dtype=torch.float16, device=torch_device
)
check_hqqlayer(self, hqq_runner.model.model.decoder.layers[0].self_attn.v_proj)
check_forward(self, hqq_runner.model)
|
Simple LLM model testing fp16 with bias
|
test_fp16_quantized_model
|
python
|
huggingface/transformers
|
tests/quantization/hqq/test_hqq.py
|
https://github.com/huggingface/transformers/blob/master/tests/quantization/hqq/test_hqq.py
|
Apache-2.0
|
def test_save_and_load_quantized_model(self):
"""
Test saving and loading a quantized model with bias
"""
import tempfile
quant_config = HqqConfig(nbits=8, group_size=64)
hqq_runner = HQQLLMRunner(
model_id="facebook/opt-125m", quant_config=quant_config, compute_dtype=torch.float16, device=torch_device
)
input_tensor = torch.zeros((1, 8), dtype=torch.int32, device=torch_device)
# Get reference logits
with torch.no_grad():
logits_ref = hqq_runner.model.forward(input_tensor).logits
with tempfile.TemporaryDirectory() as tmpdirname:
hqq_runner.model.save_pretrained(tmpdirname)
del hqq_runner.model
backend_empty_cache(torch_device)
model_loaded = AutoModelForCausalLM.from_pretrained(
tmpdirname, torch_dtype=torch.float16, device_map=torch_device
)
with torch.no_grad():
logits_loaded = model_loaded.forward(input_tensor).logits
self.assertEqual((logits_loaded - logits_ref).abs().mean().item(), 0)
|
Test saving and loading a quantized model with bias
|
test_save_and_load_quantized_model
|
python
|
huggingface/transformers
|
tests/quantization/hqq/test_hqq.py
|
https://github.com/huggingface/transformers/blob/master/tests/quantization/hqq/test_hqq.py
|
Apache-2.0
|
def test_model_serialization(self):
"""
Simple HQQ LLM save/load test
"""
quant_config = HqqConfig(nbits=4, group_size=64)
hqq_runner = HQQLLMRunner(
model_id=MODEL_ID, quant_config=quant_config, compute_dtype=torch.float16, device=torch_device
)
input_tensor = torch.zeros((1, 8), dtype=torch.int32, device=torch_device)
with torch.no_grad():
logits_ref = hqq_runner.model.forward(input_tensor).logits
# Save
saved_model_id = "quant_model"
hqq_runner.model.save_pretrained(saved_model_id)
# Remove old model
del hqq_runner.model
backend_empty_cache(torch_device)
# Load and check if the logits match
model_loaded = AutoModelForCausalLM.from_pretrained(
"quant_model", torch_dtype=torch.float16, device_map=torch_device, low_cpu_mem_usage=True
)
with torch.no_grad():
logits_loaded = model_loaded.forward(input_tensor).logits
self.assertEqual((logits_loaded - logits_ref).abs().mean().item(), 0)
|
Simple HQQ LLM save/load test
|
test_model_serialization
|
python
|
huggingface/transformers
|
tests/quantization/hqq/test_hqq.py
|
https://github.com/huggingface/transformers/blob/master/tests/quantization/hqq/test_hqq.py
|
Apache-2.0
|
def test_model_serialization_dynamic_quant_with_skip(self):
"""
Simple HQQ LLM save/load test with dynamic quant
"""
q4_config = {"nbits": 4, "group_size": 64}
q3_config = {"nbits": 3, "group_size": 64}
quant_config = HqqConfig(
dynamic_config={
"self_attn.q_proj": q4_config,
"self_attn.k_proj": q4_config,
"self_attn.v_proj": q4_config,
"self_attn.o_proj": q4_config,
"mlp.gate_proj": q3_config,
"mlp.up_proj": q3_config,
},
skip_modules=["lm_head", "down_proj"],
)
hqq_runner = HQQLLMRunner(
model_id=MODEL_ID, quant_config=quant_config, compute_dtype=torch.float16, device=torch_device
)
model = hqq_runner.model
input_tensor = torch.zeros((1, 8), dtype=torch.int32, device=torch_device)
with torch.no_grad():
model.forward(input_tensor).logits
self.assertEqual(isinstance(model.model.layers[1].mlp.down_proj, torch.nn.Linear), True)
self.assertEqual(model.model.layers[1].self_attn.v_proj.quant_config["weight_quant_params"]["nbits"], 4)
self.assertEqual(model.model.layers[1].mlp.gate_proj.quant_config["weight_quant_params"]["nbits"], 3)
|
Simple HQQ LLM save/load test with dynamic quant
|
test_model_serialization_dynamic_quant_with_skip
|
python
|
huggingface/transformers
|
tests/quantization/hqq/test_hqq.py
|
https://github.com/huggingface/transformers/blob/master/tests/quantization/hqq/test_hqq.py
|
Apache-2.0
|
def test_weight_only_quantization_conversion(self):
"""
Simple test that checks if the quantized model has been converted properly when using weight only quantization
"""
# Try with weight only quantization
quantization_config = QuantoConfig(weights="int8", activations=None)
self.model, _ = replace_with_quanto_layers(self.model, quantization_config=quantization_config)
nb_qlinear = 0
for module in self.model.modules():
if isinstance(module, QLinear):
nb_qlinear += 1
self.assertEqual(self.nb_linear, nb_qlinear)
|
Simple test that checks if the quantized model has been converted properly when using weight only quantization
|
test_weight_only_quantization_conversion
|
python
|
huggingface/transformers
|
tests/quantization/quanto_integration/test_quanto.py
|
https://github.com/huggingface/transformers/blob/master/tests/quantization/quanto_integration/test_quanto.py
|
Apache-2.0
|
def test_weight_and_activation_quantization_conversion(self):
"""
Simple test that checks if the quantized model has been converted properly when using weight + activation quantization
"""
# Try with weight + activation quantization
quantization_config = QuantoConfig(weights="int8", activations="int8")
self.model, _ = replace_with_quanto_layers(self.model, quantization_config=quantization_config)
nb_qlinear = 0
nb_qlayernorm = 0
for module in self.model.modules():
if isinstance(module, QLinear):
nb_qlinear += 1
if isinstance(module, QLayerNorm):
nb_qlayernorm += 1
self.assertEqual(self.nb_linear, nb_qlinear)
self.assertEqual(self.nb_layernorm, nb_qlayernorm)
|
Simple test that checks if the quantized model has been converted properly when using weight + activation quantization
|
test_weight_and_activation_quantization_conversion
|
python
|
huggingface/transformers
|
tests/quantization/quanto_integration/test_quanto.py
|
https://github.com/huggingface/transformers/blob/master/tests/quantization/quanto_integration/test_quanto.py
|
Apache-2.0
|
def test_conversion_with_modules_to_not_convert(self):
"""
Simple test that checks if the quantized model has been converted properly when specifying modules_to_not_convert argument
"""
# Try with weight + activatioin quantization
quantization_config = QuantoConfig(weights="int8", activations="int8")
self.model, _ = replace_with_quanto_layers(
self.model, quantization_config=quantization_config, modules_to_not_convert=["lm_head"]
)
nb_qlinear = 0
nb_qlayernorm = 0
for module in self.model.modules():
if isinstance(module, QLinear):
nb_qlinear += 1
if isinstance(module, QLayerNorm):
nb_qlayernorm += 1
self.assertEqual(self.nb_linear - 1, nb_qlinear)
|
Simple test that checks if the quantized model has been converted properly when specifying modules_to_not_convert argument
|
test_conversion_with_modules_to_not_convert
|
python
|
huggingface/transformers
|
tests/quantization/quanto_integration/test_quanto.py
|
https://github.com/huggingface/transformers/blob/master/tests/quantization/quanto_integration/test_quanto.py
|
Apache-2.0
|
def check_inference_correctness(self, model, device):
r"""
Test the generation quality of the quantized model and see that we are matching the expected output.
Given that we are operating on small numbers + the testing model is relatively small, we might not get
the same output across GPUs. So we'll generate few tokens (5-10) and check their output.
"""
if not self.have_accelerate_hooks:
model.to(device)
encoded_input = self.tokenizer(self.input_text, return_tensors="pt")
output_sequences = model.generate(input_ids=encoded_input["input_ids"].to(device), max_new_tokens=10)
self.assertIn(self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS)
|
Test the generation quality of the quantized model and see that we are matching the expected output.
Given that we are operating on small numbers + the testing model is relatively small, we might not get
the same output across GPUs. So we'll generate few tokens (5-10) and check their output.
|
check_inference_correctness
|
python
|
huggingface/transformers
|
tests/quantization/quanto_integration/test_quanto.py
|
https://github.com/huggingface/transformers/blob/master/tests/quantization/quanto_integration/test_quanto.py
|
Apache-2.0
|
def test_serialization_bin(self):
"""
Test the serialization, the loading and the inference of the quantized weights
"""
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(ValueError) as e:
self.quantized_model.save_pretrained(tmpdirname, safe_serialization=False)
self.assertIn("The model is quantized with quanto and is not serializable", str(e.exception))
# TODO: replace by the following when it works
# quantized_model_from_saved = AutoModelForCausalLM.from_pretrained(
# tmpdirname, torch_dtype=torch.float32, device_map="cpu"
# )
# self.check_inference_correctness(quantized_model_from_saved, device="cuda")
|
Test the serialization, the loading and the inference of the quantized weights
|
test_serialization_bin
|
python
|
huggingface/transformers
|
tests/quantization/quanto_integration/test_quanto.py
|
https://github.com/huggingface/transformers/blob/master/tests/quantization/quanto_integration/test_quanto.py
|
Apache-2.0
|
def test_serialization_safetensors(self):
"""
Test the serialization, the loading and the inference of the quantized weights
"""
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(ValueError) as e:
self.quantized_model.save_pretrained(tmpdirname)
self.assertIn("The model is quantized with quanto and is not serializable", str(e.exception))
# quantized_model_from_saved = AutoModelForCausalLM.from_pretrained(
# tmpdirname, torch_dtype=torch.float32, device_map="cpu"
# )
# self.check_inference_correctness(quantized_model_from_saved, device="cuda")
|
Test the serialization, the loading and the inference of the quantized weights
|
test_serialization_safetensors
|
python
|
huggingface/transformers
|
tests/quantization/quanto_integration/test_quanto.py
|
https://github.com/huggingface/transformers/blob/master/tests/quantization/quanto_integration/test_quanto.py
|
Apache-2.0
|
def test_check_offload_quantized(self):
"""
We check that we have unquantized value in the cpu and in the disk
"""
from optimum.quanto import QBitsTensor, QTensor
cpu_weights = self.quantized_model.transformer.h[22].self_attention.query_key_value._hf_hook.weights_map[
"weight"
]
disk_weights = self.quantized_model.transformer.h[23].self_attention.query_key_value._hf_hook.weights_map[
"weight"
]
self.assertTrue(isinstance(cpu_weights, torch.Tensor) and not isinstance(cpu_weights, QTensor))
self.assertTrue(isinstance(disk_weights, torch.Tensor) and not isinstance(disk_weights, QTensor))
if self.weights == "int4":
self.assertTrue(isinstance(cpu_weights, torch.Tensor) and not isinstance(disk_weights, QBitsTensor))
self.assertTrue(isinstance(disk_weights, torch.Tensor) and not isinstance(disk_weights, QBitsTensor))
|
We check that we have unquantized value in the cpu and in the disk
|
test_check_offload_quantized
|
python
|
huggingface/transformers
|
tests/quantization/quanto_integration/test_quanto.py
|
https://github.com/huggingface/transformers/blob/master/tests/quantization/quanto_integration/test_quanto.py
|
Apache-2.0
|
def test_device_and_dtype_assignment(self):
r"""
Test whether trying to cast (or assigning a device to) a model after quantization will throw an error.
Checks also if other models are casted correctly .
"""
# This should work
if self.device_map is None:
_ = self.quantized_model.to(0)
with self.assertRaises(ValueError):
# Tries with a `dtype``
self.quantized_model.to(torch.float16)
|
Test whether trying to cast (or assigning a device to) a model after quantization will throw an error.
Checks also if other models are casted correctly .
|
test_device_and_dtype_assignment
|
python
|
huggingface/transformers
|
tests/quantization/quark_integration/test_quark.py
|
https://github.com/huggingface/transformers/blob/master/tests/quantization/quark_integration/test_quark.py
|
Apache-2.0
|
def test_original_dtype(self):
r"""
A simple test to check if the model successfully stores the original dtype
"""
self.assertTrue(hasattr(self.quantized_model.config, "_pre_quantization_dtype"))
self.assertFalse(hasattr(self.model_fp16.config, "_pre_quantization_dtype"))
self.assertTrue(self.quantized_model.config._pre_quantization_dtype == torch.float16)
self.assertTrue(isinstance(self.quantized_model.model.layers[0].mlp.gate_proj, QParamsLinear))
|
A simple test to check if the model successfully stores the original dtype
|
test_original_dtype
|
python
|
huggingface/transformers
|
tests/quantization/quark_integration/test_quark.py
|
https://github.com/huggingface/transformers/blob/master/tests/quantization/quark_integration/test_quark.py
|
Apache-2.0
|
def check_inference_correctness(self, model):
r"""
Test the generation quality of the quantized model and see that we are matching the expected output.
Given that we are operating on small numbers + the testing model is relatively small, we might not get
the same output across GPUs. So we'll generate few tokens (5-10) and check their output.
"""
# Check that inference pass works on the model
encoded_input = self.tokenizer(self.input_text, return_tensors="pt")
gen_config = GenerationConfig(
max_new_tokens=15,
min_new_tokens=15,
use_cache=True,
num_beams=1,
do_sample=False,
)
# Check the exactness of the results
output_sequences = model.generate(input_ids=encoded_input["input_ids"].to(0), generation_config=gen_config)
# Get the generation
self.assertIn(self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS)
|
Test the generation quality of the quantized model and see that we are matching the expected output.
Given that we are operating on small numbers + the testing model is relatively small, we might not get
the same output across GPUs. So we'll generate few tokens (5-10) and check their output.
|
check_inference_correctness
|
python
|
huggingface/transformers
|
tests/quantization/quark_integration/test_quark.py
|
https://github.com/huggingface/transformers/blob/master/tests/quantization/quark_integration/test_quark.py
|
Apache-2.0
|
def test_generate_quality(self):
"""
Simple test to check the quality of the model by comparing the generated tokens with the expected tokens
"""
if self.device_map is None:
self.check_inference_correctness(self.quantized_model.to(0))
else:
self.check_inference_correctness(self.quantized_model)
|
Simple test to check the quality of the model by comparing the generated tokens with the expected tokens
|
test_generate_quality
|
python
|
huggingface/transformers
|
tests/quantization/quark_integration/test_quark.py
|
https://github.com/huggingface/transformers/blob/master/tests/quantization/quark_integration/test_quark.py
|
Apache-2.0
|
def test_to_dict(self):
"""
Simple test that checks if one uses a config and converts it to a dict, the dict is the same as the config object
"""
quantization_config = SpQRConfig()
config_to_dict = quantization_config.to_dict()
for key in config_to_dict:
self.assertEqual(getattr(quantization_config, key), config_to_dict[key])
|
Simple test that checks if one uses a config and converts it to a dict, the dict is the same as the config object
|
test_to_dict
|
python
|
huggingface/transformers
|
tests/quantization/spqr_integration/test_spqr.py
|
https://github.com/huggingface/transformers/blob/master/tests/quantization/spqr_integration/test_spqr.py
|
Apache-2.0
|
def test_from_dict(self):
"""
Simple test that checks if one uses a dict and converts it to a config object, the config object is the same as the dict
"""
dict = {
"beta1": 16,
"beta2": 16,
"bits": 3,
"modules_to_not_convert": ["lm_head.weight"],
"shapes": {"model.layers.0.self_attn.q_proj.dense_weights.shape": 16},
}
quantization_config = SpQRConfig.from_dict(dict)
self.assertEqual(dict["beta1"], quantization_config.beta1)
self.assertEqual(dict["beta2"], quantization_config.beta2)
self.assertEqual(dict["bits"], quantization_config.bits)
self.assertEqual(dict["modules_to_not_convert"], quantization_config.modules_to_not_convert)
self.assertEqual(dict["shapes"], quantization_config.shapes)
|
Simple test that checks if one uses a dict and converts it to a config object, the config object is the same as the dict
|
test_from_dict
|
python
|
huggingface/transformers
|
tests/quantization/spqr_integration/test_spqr.py
|
https://github.com/huggingface/transformers/blob/master/tests/quantization/spqr_integration/test_spqr.py
|
Apache-2.0
|
def test_quantized_model_conversion(self):
"""
Simple test that checks if the quantized model has been converted properly
"""
from spqr_quant import QuantizedLinear
from transformers.integrations import replace_with_spqr_linear
model_id = "meta-llama/Llama-2-7b-hf"
config = AutoConfig.from_pretrained(model_id)
quantization_config = AutoConfig.from_pretrained(self.model_name, return_dict=False).quantization_config
quantization_config = SpQRConfig.from_dict(quantization_config)
with init_empty_weights():
model = AutoModelForCausalLM.from_pretrained(pretrained_model_name_or_path=model_id, config=config)
nb_linears = 0
for module in model.modules():
if isinstance(module, torch.nn.Linear):
nb_linears += 1
model, _ = replace_with_spqr_linear(
model,
quantization_config=quantization_config,
modules_to_not_convert=quantization_config.modules_to_not_convert,
)
nb_spqr_linear = 0
for module in model.modules():
if isinstance(module, QuantizedLinear):
nb_spqr_linear += 1
self.assertEqual(nb_linears - 1, nb_spqr_linear)
|
Simple test that checks if the quantized model has been converted properly
|
test_quantized_model_conversion
|
python
|
huggingface/transformers
|
tests/quantization/spqr_integration/test_spqr.py
|
https://github.com/huggingface/transformers/blob/master/tests/quantization/spqr_integration/test_spqr.py
|
Apache-2.0
|
def test_quantized_model(self):
"""
Simple test that checks if the quantized model is working properly
"""
input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device)
output = self.quantized_model.generate(**input_ids, max_new_tokens=self.max_new_tokens)
self.assertEqual(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT)
|
Simple test that checks if the quantized model is working properly
|
test_quantized_model
|
python
|
huggingface/transformers
|
tests/quantization/spqr_integration/test_spqr.py
|
https://github.com/huggingface/transformers/blob/master/tests/quantization/spqr_integration/test_spqr.py
|
Apache-2.0
|
def test_save_pretrained(self):
"""
Simple test that checks if the quantized model is working properly after being saved and loaded
"""
with tempfile.TemporaryDirectory() as tmpdirname:
self.quantized_model.save_pretrained(tmpdirname)
model = AutoModelForCausalLM.from_pretrained(tmpdirname, device_map=torch_device)
input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device)
output = model.generate(**input_ids, max_new_tokens=self.max_new_tokens)
self.assertEqual(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT)
|
Simple test that checks if the quantized model is working properly after being saved and loaded
|
test_save_pretrained
|
python
|
huggingface/transformers
|
tests/quantization/spqr_integration/test_spqr.py
|
https://github.com/huggingface/transformers/blob/master/tests/quantization/spqr_integration/test_spqr.py
|
Apache-2.0
|
def test_quantized_model_multi_gpu(self):
"""
Simple test that checks if the quantized model is working properly with multiple GPUs
"""
input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device)
quantized_model = AutoModelForCausalLM.from_pretrained(self.model_name, device_map="auto")
self.assertTrue(set(quantized_model.hf_device_map.values()) == {0, 1})
output = quantized_model.generate(**input_ids, max_new_tokens=self.max_new_tokens)
self.assertEqual(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT)
|
Simple test that checks if the quantized model is working properly with multiple GPUs
|
test_quantized_model_multi_gpu
|
python
|
huggingface/transformers
|
tests/quantization/spqr_integration/test_spqr.py
|
https://github.com/huggingface/transformers/blob/master/tests/quantization/spqr_integration/test_spqr.py
|
Apache-2.0
|
def test_quantized_model_compile(self):
"""
Simple test that checks if the quantized model is working properly
"""
# Sample tokens greedily
def decode_one_tokens(model, cur_token, input_pos, cache_position, past_key_values):
logits = model(
cur_token,
position_ids=input_pos,
cache_position=cache_position,
past_key_values=past_key_values,
return_dict=False,
use_cache=True,
)[0]
new_token = torch.argmax(logits[:, [-1]], dim=-1).to(torch.int)
return new_token
# Tokenize the test input
input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device)["input_ids"]
seq_length = input_ids.shape[1]
# Setup static KV cache for generation
past_key_values = StaticCache(
config=self.quantized_model.config,
max_batch_size=1,
max_cache_len=seq_length + self.max_new_tokens + 1,
device=torch_device,
dtype=self.quantized_model.config._pre_quantization_dtype,
)
# Allocate token ids to be generated and copy prefix ids
cache_position = torch.arange(seq_length, device=torch_device)
generated_ids = torch.zeros(1, seq_length + self.max_new_tokens, dtype=torch.int, device=torch_device)
generated_ids[:, cache_position] = input_ids.to(torch_device).to(torch.int)
# Do a forward pass to fill the prefix cache and compile the kernels if necessary
logits = self.quantized_model(
input_ids,
cache_position=cache_position,
past_key_values=past_key_values,
return_dict=False,
use_cache=True,
)[0]
next_token = torch.argmax(logits[:, [-1]], dim=-1).to(torch.int)
generated_ids[:, [seq_length]] = next_token
with torch.no_grad():
# Compile the CUDA graph
decode_one_tokens = torch.compile(decode_one_tokens, mode="default", backend="inductor", fullgraph=True)
# Generate tokens one by one
cache_position = torch.tensor([seq_length + 1], device=torch_device)
for _ in range(1, self.max_new_tokens):
with torch.backends.cuda.sdp_kernel(enable_flash=False, enable_mem_efficient=False, enable_math=True):
next_token = decode_one_tokens(
self.quantized_model, next_token.clone(), None, cache_position, past_key_values
)
generated_ids.index_copy_(1, cache_position, next_token)
cache_position += 1
# Check generated text
self.assertEqual(
self.tokenizer.decode(generated_ids[0], skip_special_tokens=True), self.EXPECTED_OUTPUT_COMPILE
)
|
Simple test that checks if the quantized model is working properly
|
test_quantized_model_compile
|
python
|
huggingface/transformers
|
tests/quantization/spqr_integration/test_spqr.py
|
https://github.com/huggingface/transformers/blob/master/tests/quantization/spqr_integration/test_spqr.py
|
Apache-2.0
|
def test_to_dict(self):
"""
Makes sure the config format is properly set
"""
quantization_config = TorchAoConfig("int4_weight_only")
torchao_orig_config = quantization_config.to_dict()
for key in torchao_orig_config:
self.assertEqual(getattr(quantization_config, key), torchao_orig_config[key])
|
Makes sure the config format is properly set
|
test_to_dict
|
python
|
huggingface/transformers
|
tests/quantization/torchao_integration/test_torchao.py
|
https://github.com/huggingface/transformers/blob/master/tests/quantization/torchao_integration/test_torchao.py
|
Apache-2.0
|
def test_json_serializable(self):
"""
Check that the config dict can be JSON serialized.
"""
quantization_config = TorchAoConfig("int4_weight_only", group_size=32, layout=TensorCoreTiledLayout())
d = quantization_config.to_dict()
self.assertIsInstance(d["quant_type_kwargs"]["layout"], list)
self.assertTrue("inner_k_tiles" in d["quant_type_kwargs"]["layout"][1])
quantization_config.to_json_string(use_diff=False)
|
Check that the config dict can be JSON serialized.
|
test_json_serializable
|
python
|
huggingface/transformers
|
tests/quantization/torchao_integration/test_torchao.py
|
https://github.com/huggingface/transformers/blob/master/tests/quantization/torchao_integration/test_torchao.py
|
Apache-2.0
|
def test_int4wo_quant(self):
"""
Simple LLM model testing int4 weight only quantization
"""
quant_config = TorchAoConfig("int4_weight_only", **self.quant_scheme_kwargs)
# Note: we quantize the bfloat16 model on the fly to int4
quantized_model = AutoModelForCausalLM.from_pretrained(
self.model_name,
torch_dtype=torch.bfloat16,
device_map=self.device,
quantization_config=quant_config,
)
tokenizer = AutoTokenizer.from_pretrained(self.model_name)
check_torchao_int4_wo_quantized(self, quantized_model.model.layers[0].self_attn.v_proj)
input_ids = tokenizer(self.input_text, return_tensors="pt").to(self.device)
output = quantized_model.generate(**input_ids, max_new_tokens=self.max_new_tokens)
self.assertEqual(tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT)
|
Simple LLM model testing int4 weight only quantization
|
test_int4wo_quant
|
python
|
huggingface/transformers
|
tests/quantization/torchao_integration/test_torchao.py
|
https://github.com/huggingface/transformers/blob/master/tests/quantization/torchao_integration/test_torchao.py
|
Apache-2.0
|
def test_int4wo_quant_bfloat16_conversion(self):
"""
Testing the dtype of model will be modified to be bfloat16 for int4 weight only quantization
"""
quant_config = TorchAoConfig("int4_weight_only", **self.quant_scheme_kwargs)
# Note: we quantize the bfloat16 model on the fly to int4
quantized_model = AutoModelForCausalLM.from_pretrained(
self.model_name,
torch_dtype=torch.bfloat16,
device_map=self.device,
quantization_config=quant_config,
)
tokenizer = AutoTokenizer.from_pretrained(self.model_name)
check_torchao_int4_wo_quantized(self, quantized_model.model.layers[0].self_attn.v_proj)
input_ids = tokenizer(self.input_text, return_tensors="pt").to(self.device)
output = quantized_model.generate(**input_ids, max_new_tokens=self.max_new_tokens)
self.assertEqual(tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT)
|
Testing the dtype of model will be modified to be bfloat16 for int4 weight only quantization
|
test_int4wo_quant_bfloat16_conversion
|
python
|
huggingface/transformers
|
tests/quantization/torchao_integration/test_torchao.py
|
https://github.com/huggingface/transformers/blob/master/tests/quantization/torchao_integration/test_torchao.py
|
Apache-2.0
|
def test_int4wo_offload(self):
"""
Simple test that checks if the quantized model int4 weight only is working properly with cpu/disk offload
"""
device_map_offload = {
"model.embed_tokens": 0,
"model.layers.0": 0,
"model.layers.1": 0,
"model.layers.2": 0,
"model.layers.3": 0,
"model.layers.4": 0,
"model.layers.5": 0,
"model.layers.6": 0,
"model.layers.7": 0,
"model.layers.8": 0,
"model.layers.9": 0,
"model.layers.10": 0,
"model.layers.11": 0,
"model.layers.12": 0,
"model.layers.13": 0,
"model.layers.14": 0,
"model.layers.15": 0,
"model.layers.16": 0,
"model.layers.17": 0,
"model.layers.18": 0,
"model.layers.19": "cpu",
"model.layers.20": "cpu",
"model.layers.21": "disk",
"model.norm": 0,
"model.rotary_emb": 0,
"lm_head": 0,
}
quant_config = TorchAoConfig("int4_weight_only", group_size=32)
quantized_model = AutoModelForCausalLM.from_pretrained(
self.model_name,
torch_dtype=torch.bfloat16,
device_map=device_map_offload,
quantization_config=quant_config,
)
tokenizer = AutoTokenizer.from_pretrained(self.model_name)
input_ids = tokenizer(self.input_text, return_tensors="pt").to(self.device)
# fmt: off
EXPECTED_OUTPUTS = Expectations(
{
("xpu", 3): "What are we having for dinner?\n\nJessica: (smiling)",
("cuda", 7): "What are we having for dinner?\n- 2. What is the temperature outside",
}
)
# fmt: on
EXPECTED_OUTPUT = EXPECTED_OUTPUTS.get_expectation()
output = quantized_model.generate(**input_ids, max_new_tokens=self.max_new_tokens)
generated_text = tokenizer.decode(output[0], skip_special_tokens=True)
self.assertEqual(generated_text, EXPECTED_OUTPUT)
|
Simple test that checks if the quantized model int4 weight only is working properly with cpu/disk offload
|
test_int4wo_offload
|
python
|
huggingface/transformers
|
tests/quantization/torchao_integration/test_torchao.py
|
https://github.com/huggingface/transformers/blob/master/tests/quantization/torchao_integration/test_torchao.py
|
Apache-2.0
|
def test_int4wo_quant_multi_accelerator(self):
"""
Simple test that checks if the quantized model int4 weight only is working properly with multiple accelerators
set CUDA_VISIBLE_DEVICES=0,1 if you have more than 2 CUDA GPUs
set ZE_AFFINITY_MASK=0,1 if you have more than 2 Intel XPUs
"""
quant_config = TorchAoConfig("int4_weight_only", **self.quant_scheme_kwargs)
quantized_model = AutoModelForCausalLM.from_pretrained(
self.model_name,
torch_dtype=torch.bfloat16,
device_map="auto",
quantization_config=quant_config,
)
tokenizer = AutoTokenizer.from_pretrained(self.model_name)
self.assertTrue(set(quantized_model.hf_device_map.values()) == {0, 1})
input_ids = tokenizer(self.input_text, return_tensors="pt").to(self.device)
output = quantized_model.generate(**input_ids, max_new_tokens=self.max_new_tokens)
self.assertEqual(tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT)
|
Simple test that checks if the quantized model int4 weight only is working properly with multiple accelerators
set CUDA_VISIBLE_DEVICES=0,1 if you have more than 2 CUDA GPUs
set ZE_AFFINITY_MASK=0,1 if you have more than 2 Intel XPUs
|
test_int4wo_quant_multi_accelerator
|
python
|
huggingface/transformers
|
tests/quantization/torchao_integration/test_torchao.py
|
https://github.com/huggingface/transformers/blob/master/tests/quantization/torchao_integration/test_torchao.py
|
Apache-2.0
|
def check_serialization_expected_output(self, device, expected_output):
"""
Test if we can serialize and load/infer the model again on the same device
"""
torch_dtype = torch.bfloat16 if self.quant_scheme == "int4_weight_only" else "auto"
with tempfile.TemporaryDirectory() as tmpdirname:
self.quantized_model.save_pretrained(tmpdirname, safe_serialization=False)
loaded_quantized_model = AutoModelForCausalLM.from_pretrained(
tmpdirname, torch_dtype=torch_dtype, device_map=device
)
input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(device)
output = loaded_quantized_model.generate(**input_ids, max_new_tokens=self.max_new_tokens)
self.assertEqual(self.tokenizer.decode(output[0], skip_special_tokens=True), expected_output)
|
Test if we can serialize and load/infer the model again on the same device
|
check_serialization_expected_output
|
python
|
huggingface/transformers
|
tests/quantization/torchao_integration/test_torchao.py
|
https://github.com/huggingface/transformers/blob/master/tests/quantization/torchao_integration/test_torchao.py
|
Apache-2.0
|
def test_to_dict(self):
"""
Makes sure the config format is properly set
"""
quantization_config = VptqConfig()
vptq_orig_config = quantization_config.to_dict()
self.assertEqual(vptq_orig_config["quant_method"], quantization_config.quant_method)
|
Makes sure the config format is properly set
|
test_to_dict
|
python
|
huggingface/transformers
|
tests/quantization/vptq_integration/test_vptq.py
|
https://github.com/huggingface/transformers/blob/master/tests/quantization/vptq_integration/test_vptq.py
|
Apache-2.0
|
def test_quantized_model(self):
"""
Simple test that checks if the quantized model is working properly
"""
input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device)
output = self.quantized_model.generate(**input_ids, max_new_tokens=self.max_new_tokens, do_sample=False)
self.assertEqual(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT)
|
Simple test that checks if the quantized model is working properly
|
test_quantized_model
|
python
|
huggingface/transformers
|
tests/quantization/vptq_integration/test_vptq.py
|
https://github.com/huggingface/transformers/blob/master/tests/quantization/vptq_integration/test_vptq.py
|
Apache-2.0
|
def test_save_pretrained(self):
"""
Simple test that checks if the quantized model is working properly after being saved and loaded
"""
with tempfile.TemporaryDirectory() as tmpdirname:
self.quantized_model.save_pretrained(tmpdirname)
model = AutoModelForCausalLM.from_pretrained(tmpdirname, device_map=self.device_map)
input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device)
output = model.generate(**input_ids, max_new_tokens=self.max_new_tokens, do_sample=False)
self.assertEqual(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT)
|
Simple test that checks if the quantized model is working properly after being saved and loaded
|
test_save_pretrained
|
python
|
huggingface/transformers
|
tests/quantization/vptq_integration/test_vptq.py
|
https://github.com/huggingface/transformers/blob/master/tests/quantization/vptq_integration/test_vptq.py
|
Apache-2.0
|
def test_quantized_model_multi_gpu(self):
"""
Simple test that checks if the quantized model is working properly with multiple GPUs
"""
input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device)
quantized_model = AutoModelForCausalLM.from_pretrained(self.model_name, device_map="auto")
self.assertTrue(set(quantized_model.hf_device_map.values()) == {0, 1})
output = quantized_model.generate(**input_ids, max_new_tokens=self.max_new_tokens, do_sample=False)
self.assertEqual(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT)
|
Simple test that checks if the quantized model is working properly with multiple GPUs
|
test_quantized_model_multi_gpu
|
python
|
huggingface/transformers
|
tests/quantization/vptq_integration/test_vptq.py
|
https://github.com/huggingface/transformers/blob/master/tests/quantization/vptq_integration/test_vptq.py
|
Apache-2.0
|
def test_quantized_model_conversion(self):
"""
Simple test that checks if the quantized model has been converted properly
"""
from vptq import VQuantLinear
from transformers.integrations import replace_with_vptq_linear
model_id = "facebook/opt-350m"
config = AutoConfig.from_pretrained(model_id, revision="cb32f77e905cccbca1d970436fb0f5e6b58ee3c5")
modules_to_not_convert = ["lm_head"]
names = [
"q_proj",
"k_proj",
"v_proj",
"out_proj",
"fc1",
"fc2",
]
value = {
"enable_norm": True,
"enable_perm": True,
"group_num": 1,
"group_size": 128,
"indices_as_float": False,
"num_centroids": [-1, 128],
"num_res_centroids": [-1, 128],
"outlier_size": 0,
"vector_lens": [-1, 12],
}
shared_layer_config = {}
for name in names:
shared_layer_config[name] = value
for i in range(24):
modules_to_not_convert.append(f"model.decoder.layers.{i}.fc1")
layer_configs = {}
layer_configs["model.decoder.project_out"] = value
layer_configs["model.decoder.project_in"] = value
quantization_config = VptqConfig(config_for_layers=layer_configs, shared_layer_config=shared_layer_config)
with init_empty_weights():
model = AutoModelForCausalLM.from_config(config)
nb_linears = 0
for module in model.modules():
if isinstance(module, torch.nn.Linear):
nb_linears += 1
model, _ = replace_with_vptq_linear(model, quantization_config=quantization_config)
nb_vptq_linear = 0
for module in model.modules():
if isinstance(module, VQuantLinear):
nb_vptq_linear += 1
self.assertEqual(nb_linears - 1, nb_vptq_linear)
# Try with `linear_weights_not_to_quantize`
with init_empty_weights():
model = AutoModelForCausalLM.from_config(config)
quantization_config = VptqConfig(config_for_layers=layer_configs, shared_layer_config=shared_layer_config)
model, _ = replace_with_vptq_linear(
model, quantization_config=quantization_config, modules_to_not_convert=modules_to_not_convert
)
nb_vptq_linear = 0
for module in model.modules():
if isinstance(module, VQuantLinear):
nb_vptq_linear += 1
# 25 comes from 24 decoder.layers.{layer_idx}.fc1
# and the last lm_head
self.assertEqual(nb_linears - 25, nb_vptq_linear)
|
Simple test that checks if the quantized model has been converted properly
|
test_quantized_model_conversion
|
python
|
huggingface/transformers
|
tests/quantization/vptq_integration/test_vptq.py
|
https://github.com/huggingface/transformers/blob/master/tests/quantization/vptq_integration/test_vptq.py
|
Apache-2.0
|
def create_tmp_repo(tmp_dir):
"""
Creates a mock repository in a temporary folder for testing.
"""
tmp_dir = Path(tmp_dir)
if tmp_dir.exists():
shutil.rmtree(tmp_dir)
tmp_dir.mkdir(exist_ok=True)
model_dir = tmp_dir / "src" / "transformers" / "models"
model_dir.mkdir(parents=True, exist_ok=True)
models = {
"bert": MOCK_BERT_CODE,
"bertcopy": MOCK_BERT_COPY_CODE,
"dummy_bert_match": MOCK_DUMMY_BERT_CODE_MATCH,
"dummy_roberta_match": MOCK_DUMMY_ROBERTA_CODE_MATCH,
"dummy_bert_no_match": MOCK_DUMMY_BERT_CODE_NO_MATCH,
"dummy_roberta_no_match": MOCK_DUMMY_ROBERTA_CODE_NO_MATCH,
}
for model, code in models.items():
model_subdir = model_dir / model
model_subdir.mkdir(exist_ok=True)
with open(model_subdir / f"modeling_{model}.py", "w", encoding="utf-8", newline="\n") as f:
f.write(code)
|
Creates a mock repository in a temporary folder for testing.
|
create_tmp_repo
|
python
|
huggingface/transformers
|
tests/repo_utils/test_check_copies.py
|
https://github.com/huggingface/transformers/blob/master/tests/repo_utils/test_check_copies.py
|
Apache-2.0
|
def patch_transformer_repo_path(new_folder):
"""
Temporarily patches the variables defines in `check_copies` to use a different location for the repo.
"""
old_repo_path = check_copies.REPO_PATH
old_doc_path = check_copies.PATH_TO_DOCS
old_transformer_path = check_copies.TRANSFORMERS_PATH
repo_path = Path(new_folder).resolve()
check_copies.REPO_PATH = str(repo_path)
check_copies.PATH_TO_DOCS = str(repo_path / "docs" / "source" / "en")
check_copies.TRANSFORMERS_PATH = str(repo_path / "src" / "transformers")
try:
yield
finally:
check_copies.REPO_PATH = old_repo_path
check_copies.PATH_TO_DOCS = old_doc_path
check_copies.TRANSFORMERS_PATH = old_transformer_path
|
Temporarily patches the variables defines in `check_copies` to use a different location for the repo.
|
patch_transformer_repo_path
|
python
|
huggingface/transformers
|
tests/repo_utils/test_check_copies.py
|
https://github.com/huggingface/transformers/blob/master/tests/repo_utils/test_check_copies.py
|
Apache-2.0
|
def create_tmp_repo(tmp_dir, models=None):
"""
Creates a repository in a temporary directory mimicking the structure of Transformers. Uses the list of models
provided (which defaults to just `["bert"]`).
"""
tmp_dir = Path(tmp_dir)
if tmp_dir.exists():
shutil.rmtree(tmp_dir)
tmp_dir.mkdir(exist_ok=True)
repo = Repo.init(tmp_dir)
if models is None:
models = ["bert"]
class_names = [model[0].upper() + model[1:] for model in models]
transformers_dir = tmp_dir / "src" / "transformers"
transformers_dir.mkdir(parents=True, exist_ok=True)
with open(transformers_dir / "__init__.py", "w") as f:
init_lines = ["from .utils import cached_file, is_torch_available"]
init_lines.extend(
[f"from .models.{model} import {cls}Config, {cls}Model" for model, cls in zip(models, class_names)]
)
f.write("\n".join(init_lines) + "\n")
with open(transformers_dir / "configuration_utils.py", "w") as f:
f.write("from .utils import cached_file\n\ncode")
with open(transformers_dir / "modeling_utils.py", "w") as f:
f.write("from .utils import cached_file\n\ncode")
utils_dir = tmp_dir / "src" / "transformers" / "utils"
utils_dir.mkdir(exist_ok=True)
with open(utils_dir / "__init__.py", "w") as f:
f.write("from .hub import cached_file\nfrom .imports import is_torch_available\n")
with open(utils_dir / "hub.py", "w") as f:
f.write("import huggingface_hub\n\ncode")
with open(utils_dir / "imports.py", "w") as f:
f.write("code")
model_dir = tmp_dir / "src" / "transformers" / "models"
model_dir.mkdir(parents=True, exist_ok=True)
with open(model_dir / "__init__.py", "w") as f:
f.write("\n".join([f"import {model}" for model in models]))
for model, cls in zip(models, class_names):
model_dir = tmp_dir / "src" / "transformers" / "models" / model
model_dir.mkdir(parents=True, exist_ok=True)
with open(model_dir / "__init__.py", "w") as f:
f.write(f"from .configuration_{model} import {cls}Config\nfrom .modeling_{model} import {cls}Model\n")
with open(model_dir / f"configuration_{model}.py", "w") as f:
f.write("from ...configuration_utils import PretrainedConfig\ncode")
with open(model_dir / f"modeling_{model}.py", "w") as f:
modeling_code = BERT_MODEL_FILE.replace("bert", model).replace("Bert", cls)
f.write(modeling_code)
test_dir = tmp_dir / "tests"
test_dir.mkdir(exist_ok=True)
with open(test_dir / "test_modeling_common.py", "w") as f:
f.write("from transformers.modeling_utils import PreTrainedModel\ncode")
for model, cls in zip(models, class_names):
test_model_dir = test_dir / "models" / model
test_model_dir.mkdir(parents=True, exist_ok=True)
(test_model_dir / "__init__.py").touch()
with open(test_model_dir / f"test_modeling_{model}.py", "w") as f:
f.write(
f"from transformers import {cls}Config, {cls}Model\nfrom ...test_modeling_common import ModelTesterMixin\n\ncode"
)
example_dir = tmp_dir / "examples"
example_dir.mkdir(exist_ok=True)
for framework in ["flax", "pytorch", "tensorflow"]:
framework_dir = example_dir / framework
framework_dir.mkdir(exist_ok=True)
with open(framework_dir / f"test_{framework}_examples.py", "w") as f:
f.write("""test_args = "run_glue.py"\n""")
glue_dir = framework_dir / "text-classification"
glue_dir.mkdir(exist_ok=True)
with open(glue_dir / "run_glue.py", "w") as f:
f.write("from transformers import BertModel\n\ncode")
repo.index.add(["examples", "src", "tests"])
repo.index.commit("Initial commit")
repo.create_head("main")
repo.head.reference = repo.refs.main
repo.delete_head("master")
return repo
|
Creates a repository in a temporary directory mimicking the structure of Transformers. Uses the list of models
provided (which defaults to just `["bert"]`).
|
create_tmp_repo
|
python
|
huggingface/transformers
|
tests/repo_utils/test_tests_fetcher.py
|
https://github.com/huggingface/transformers/blob/master/tests/repo_utils/test_tests_fetcher.py
|
Apache-2.0
|
def patch_transformer_repo_path(new_folder):
"""
Temporarily patches the variables defines in `tests_fetcher` to use a different location for the repo.
"""
old_repo_path = tests_fetcher.PATH_TO_REPO
tests_fetcher.PATH_TO_REPO = Path(new_folder).resolve()
tests_fetcher.PATH_TO_EXAMPLES = tests_fetcher.PATH_TO_REPO / "examples"
tests_fetcher.PATH_TO_TRANFORMERS = tests_fetcher.PATH_TO_REPO / "src/transformers"
tests_fetcher.PATH_TO_TESTS = tests_fetcher.PATH_TO_REPO / "tests"
try:
yield
finally:
tests_fetcher.PATH_TO_REPO = old_repo_path
tests_fetcher.PATH_TO_EXAMPLES = tests_fetcher.PATH_TO_REPO / "examples"
tests_fetcher.PATH_TO_TRANFORMERS = tests_fetcher.PATH_TO_REPO / "src/transformers"
tests_fetcher.PATH_TO_TESTS = tests_fetcher.PATH_TO_REPO / "tests"
|
Temporarily patches the variables defines in `tests_fetcher` to use a different location for the repo.
|
patch_transformer_repo_path
|
python
|
huggingface/transformers
|
tests/repo_utils/test_tests_fetcher.py
|
https://github.com/huggingface/transformers/blob/master/tests/repo_utils/test_tests_fetcher.py
|
Apache-2.0
|
def torchrun(self, script: str, is_torchrun: bool = True):
"""Run the `script` using `torchrun` command for multi-processing in a subprocess. Captures errors as necessary."""
with tempfile.NamedTemporaryFile(mode="w+", suffix=".py") as tmp:
tmp.write(script)
tmp.flush()
tmp.seek(0)
if is_torchrun:
cmd = (
f"torchrun --nproc_per_node {self.nproc_per_node} --master_port {get_torch_dist_unique_port()} {tmp.name}"
).split()
else:
cmd = ["python3", tmp.name]
# Note that the subprocess will be waited for here, and raise an error if not successful
try:
_ = subprocess.run(cmd, capture_output=True, env=self.get_env(), text=True, check=True)
except subprocess.CalledProcessError as e:
raise Exception(f"The following error was captured: {e.stderr}")
|
Run the `script` using `torchrun` command for multi-processing in a subprocess. Captures errors as necessary.
|
torchrun
|
python
|
huggingface/transformers
|
tests/tensor_parallel/test_tensor_parallel.py
|
https://github.com/huggingface/transformers/blob/master/tests/tensor_parallel/test_tensor_parallel.py
|
Apache-2.0
|
def test_probability_sum_error(self):
"""Test that the sum of mask_replace_prob and random_replace_prob exceeding 1 raises an error."""
tokenizer = BertTokenizer(self.vocab_file)
with self.assertRaises(ValueError):
DataCollatorForLanguageModeling(tokenizer=tokenizer, mask_replace_prob=0.9, random_replace_prob=0.2)
|
Test that the sum of mask_replace_prob and random_replace_prob exceeding 1 raises an error.
|
test_probability_sum_error
|
python
|
huggingface/transformers
|
tests/trainer/test_data_collator.py
|
https://github.com/huggingface/transformers/blob/master/tests/trainer/test_data_collator.py
|
Apache-2.0
|
def test_load_backbone_from_config(self):
"""
Test that load_backbone correctly loads a backbone from a backbone config.
"""
config = MaskFormerConfig(backbone_config=ResNetConfig(out_indices=(0, 2)))
backbone = load_backbone(config)
self.assertEqual(backbone.out_features, ["stem", "stage2"])
self.assertEqual(backbone.out_indices, (0, 2))
self.assertIsInstance(backbone, ResNetBackbone)
|
Test that load_backbone correctly loads a backbone from a backbone config.
|
test_load_backbone_from_config
|
python
|
huggingface/transformers
|
tests/utils/test_backbone_utils.py
|
https://github.com/huggingface/transformers/blob/master/tests/utils/test_backbone_utils.py
|
Apache-2.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.