python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""tests of Clip module."""
import pytest
import numpy as np
import torch
from pytorch_quantization.nn.modules import clip
# make everything run on the GPU
torch.set_default_tensor_type('torch.cuda.FloatTensor')
np.random.seed(1234)
torch.manual_seed(1234)
# pylint:disable=missing-docstring, no-self-use
class TestClip():
def test_simple_run(self):
x_np = np.random.rand(1023).astype(np.float32)
x_torch = torch.Tensor(x_np)
clip_op = clip.Clip(torch.tensor(0.3), torch.tensor(0.7))
clip_x_np = np.clip(x_np, 0.3, 0.7)
clip_x_torch = clip_op(x_torch)
np.testing.assert_array_equal(clip_x_torch.cpu().numpy(), clip_x_np)
def test_raise(self):
with pytest.raises(ValueError, match="must be scalar"):
clip_op = clip.Clip(torch.tensor(0.3), torch.tensor(0.7), learn_min=True)
def test_backward(self):
x = torch.randn(3, 7, requires_grad=True)
x.retain_grad()
min_value = 0.3
max_value = 0.7
clip_op = clip.Clip(min_value, max_value, learn_min=True, learn_max=True)
clip_x = clip_op(x)
clip_x.retain_grad()
labels = torch.randint(6, (3,)).type(torch.LongTensor).cuda()
criterion = torch.nn.CrossEntropyLoss()
loss = criterion(clip_x, labels)
loss.backward()
assert x.grad.cpu()[x < min_value].sum() == 0
assert x.grad.cpu()[x > max_value].sum() == 0
assert torch.equal(clip_x.grad[(x > min_value) & (x < max_value)], x.grad[(x > min_value) & (x < max_value)])
| TensorRT-master | tools/pytorch-quantization/tests/clip_test.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
| TensorRT-master | tools/pytorch-quantization/tests/__init__.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""tests of tensor quantization function and module"""
import pytest
import numpy as np
import torch
from torch.nn.parameter import Parameter
from pytorch_quantization import calib
from pytorch_quantization import cuda_ext
from pytorch_quantization import tensor_quant
from pytorch_quantization.nn.modules.tensor_quantizer import TensorQuantizer
import tests.utils as test_utils
from tests.fixtures import verbose
np.random.seed(123456) # seed 1234 causes 1 number mismatch at 6th decimal in one of the tests
# pylint:disable=missing-docstring, no-self-use
class TestTensorQuant():
def test_simple_run(self):
""" quantizer passes gradcheck
"""
x = Parameter(torch.randn(2, 3, dtype=torch.float64).cuda()) * 100
tensor_quant.tensor_quant(x, torch.max(torch.abs(x)), 7)
def test_per_tensor_scale(self):
""" tensor_quant matches numpy quantization
"""
torch.set_default_tensor_type('torch.cuda.FloatTensor') # Test on GPU
x_np = np.random.rand(1023)
x_torch = torch.Tensor(x_np)
quant_x_np = test_utils.quant_np(x_np, np.max(np.abs(x_np)))
quant_x_torch, _ = tensor_quant.tensor_quant(x_torch, torch.max(torch.abs(x_torch)))
np.testing.assert_array_equal(quant_x_torch.cpu().numpy(), quant_x_np)
torch.set_default_tensor_type('torch.FloatTensor')
def test_per_channel_scale(self):
""" fake_tensor_quant performs per channel quantization
"""
x_np = np.random.rand(15, 15, 64, 128).astype('float32')
x_torch = torch.Tensor(x_np).cuda()
# Pytorch filter layout seems to be KCRS, reduce max to shape [K, 1, 1, 1] to test per channel scale
# Shrink max a little, so that clip behavior is tested
amax_x_np = 0.7 * np.max(np.abs(x_np), axis=(1, 2, 3), keepdims=True)
# Pytorch's max function doesn't support reduces multiple axis, and returns (max, argmax) tuple,
# so it has to be reduced by multiple torch.max
amax_x_torch = 0.7 * torch.max(
torch.max(torch.max(x_torch, dim=1, keepdim=True)[0], dim=2, keepdim=True)[0], dim=3, keepdim=True)[0]
quant_x_np = test_utils.quant_np(x_np, amax_x_np)
quant_x_torch, _ = tensor_quant.tensor_quant(x_torch, amax_x_torch)
# np.testing.assert_array_equal(quant_x_torch.cpu().numpy(), quant_x_np)
# Pytorch numerics is not the same as numpy, it will be off by 1
np.testing.assert_array_less(np.abs(quant_x_torch.cpu().numpy() - quant_x_np), 2)
if verbose:
mismatches = np.where(np.abs(quant_x_torch.cpu().numpy() - quant_x_np) >= 1)
print("Mismatches:")
print(" Original: ", x_np[mismatches])
print(" numpy: ", quant_x_np[mismatches])
print(" Pytorch: ", quant_x_torch.cpu().numpy()[mismatches])
def test_backward(self):
""" tensor_quant implements straight through estimator on the backward pass
Note: this does not work for integer output_dtype
"""
x = torch.randn(3, 7, requires_grad=True).cuda()
labels = torch.randint(6, (3,)).type(torch.LongTensor).cuda()
quant_x, _ = tensor_quant.tensor_quant(x, x.abs().max(), 7)
float_quant_x = quant_x.type(torch.FloatTensor).cuda()
x.retain_grad()
float_quant_x.retain_grad()
criterion = torch.nn.CrossEntropyLoss().cuda()
loss = criterion(float_quant_x, labels)
loss.backward()
np.testing.assert_array_equal(float_quant_x.grad.cpu().numpy(), x.grad.cpu().numpy())
def test_unsigned(self):
x_np = np.random.rand(1023).astype('float32')
x_torch = torch.Tensor(x_np)
quant_x_np = test_utils.quant_np(x_np, np.max(np.abs(x_np)), num_bits=9, fake=False)
quant_x_torch, _ = tensor_quant.tensor_quant(x_torch, torch.max(torch.abs(x_torch)), 8, True)
np.testing.assert_array_almost_equal(quant_x_torch.cpu().numpy(), quant_x_np)
x_torch = torch.randn(3, 7)
with pytest.raises(TypeError, match="Negative values encountered"):
tensor_quant.tensor_quant(x_torch, torch.max(torch.abs(x_torch)), 8, True)
def test_overflow_fp16(self):
x_torch = torch.randn(1023).cuda().half()
with pytest.raises(ValueError, match="scale is too large for FP16"):
quant_x_torch, scale = tensor_quant.tensor_quant(x_torch, torch.tensor(1e-4).cuda().half(), 8, False)
def test_clip_gradient(self):
x = torch.randn(3, 7, requires_grad=True).cuda()
x.retain_grad()
amax = x.abs().max() / 2
x_in_range = (-amax <= x) * (x <= amax)
quant_x, _ = tensor_quant.tensor_quant(x, amax, 8)
loss = torch.sum((quant_x - 0.5)**2)
loss.backward()
np.testing.assert_array_equal(x.grad.cpu().numpy() != 0, x_in_range.cpu().numpy())
def test_full_range(self):
""" fake_tensor_quant uses the full integer range when narrow=False
"""
x_np = np.random.rand(1023).astype('float32')
x_torch = torch.Tensor(x_np).cuda()
amax = np.max(np.abs(x_np))
quant_x_np = test_utils.quant_np(x_np, amax, num_bits=9, fake=False, narrow_range=False)
quant_x_torch, _ = tensor_quant.tensor_quant(x_torch, torch.max(torch.abs(x_torch)), 8, True, False)
np.testing.assert_array_almost_equal(quant_x_torch.cpu().numpy(), quant_x_np)
class TestFakeTensorQuant():
def test_simple_run(self):
x = Parameter(torch.randn(3, 7).cuda())
tensor_quant.fake_tensor_quant(x, torch.max(torch.abs(x)))
def test_per_tensor_scale(self):
""" fake_tensor_quant matches numpy quantization
"""
x_np = np.random.rand(13).astype('float32')
print(x_np)
x_torch = torch.Tensor(x_np).cuda()
quant_x_np = test_utils.quant_np(x_np, np.max(np.abs(x_np)), fake=True)
quant_x_torch = tensor_quant.fake_tensor_quant(x_torch, torch.max(torch.abs(x_torch)))
np.testing.assert_array_almost_equal(quant_x_torch.cpu().numpy(), quant_x_np)
def test_per_channel_scale(self):
""" fake_tensor_quant performs per channel quantization
"""
x_np = np.random.rand(15, 15, 64, 128).astype('float32')
x_torch = torch.Tensor(x_np).cuda()
# Pytorch filter layout seems to be KCRS, reduce max to shape [K, 1, 1, 1] to test per channel scale
# Shrink max a little, so that clip behavior is tested
amax_x_np = 0.9 * np.max(np.abs(x_np), axis=(1, 2, 3), keepdims=True)
# Pytorch's max function doesn't support reduces multiple axis, and returns (max, argmax) tuple,
# so it has to be reduced by multiple torch.max
amax_x_torch = 0.9 * torch.max(
torch.max(torch.max(x_torch, dim=1, keepdim=True)[0], dim=2, keepdim=True)[0], dim=3, keepdim=True)[0]
quant_x_np = test_utils.quant_np(x_np, amax_x_np, fake=True)
quant_x_torch = tensor_quant.fake_tensor_quant(x_torch, amax_x_torch)
# Pytorch numerics is not the same as numpy, results will be off a little
# np.testing.assert_array_equal(quant_x_torch.cpu().numpy(), quant_x_np)
np.testing.assert_array_almost_equal(quant_x_torch.cpu().numpy(), quant_x_np, decimal=2)
if verbose:
mismatches = np.where(np.abs(quant_x_torch.cpu().numpy() - quant_x_np) >= 1e-5)
print("Mismatches:")
print(" Original: ", x_np[mismatches])
print(" numpy: ", quant_x_np[mismatches])
print(" Pytorch: ", quant_x_torch.cpu().numpy()[mismatches])
def test_backward(self):
""" fake_tensor_quant implements straight through estimator on the backward pass
"""
x = torch.randn(3, 7, requires_grad=True).cuda()
labels = torch.randint(6, (3,)).type(torch.LongTensor).cuda()
quant_x = tensor_quant.fake_tensor_quant(x, torch.max(torch.abs(x)), 7)
x.retain_grad()
quant_x.retain_grad()
criterion = torch.nn.CrossEntropyLoss().cuda()
loss = criterion(quant_x, labels)
loss.backward()
np.testing.assert_array_equal(quant_x.grad.cpu().numpy(), x.grad.cpu().numpy())
def test_unsigned(self):
x_np = np.random.rand(1023).astype('float32')
x_torch = torch.Tensor(x_np).cuda()
quant_x_np = test_utils.quant_np(x_np, np.max(np.abs(x_np)), num_bits=9, fake=True)
quant_x_torch = tensor_quant.fake_tensor_quant(x_torch, torch.max(torch.abs(x_torch)), 8, True)
np.testing.assert_array_almost_equal(quant_x_torch.cpu().numpy(), quant_x_np)
def test_cuda_ext(self):
x_np = np.random.rand(1023).astype('float32')
x_torch = torch.Tensor(x_np).cuda()
for num_bits in [3, 4, 5, 7, 8, 11]:
for unsigned in [True, False]:
test_utils.compare(
cuda_ext.fake_tensor_quant(x_torch, torch.max(torch.abs(x_torch)), num_bits, unsigned),
tensor_quant.fake_tensor_quant(x_torch, torch.max(torch.abs(x_torch)), num_bits, unsigned),
rtol=0, atol=0)
# Test fp16
x_np_fp16 = np.random.rand(1023).astype('float16')
x_torch_fp16 = torch.Tensor(x_np_fp16).cuda().half()
test_utils.compare(
cuda_ext.fake_tensor_quant(x_torch_fp16, torch.max(torch.abs(x_torch_fp16))),
tensor_quant.fake_tensor_quant(x_torch_fp16, torch.max(torch.abs(x_torch_fp16))),
rtol=0, atol=0)
def test_cuda_ext_with_axis(self):
x_np = np.random.rand(3, 4, 5, 6).astype('float32')
x_torch = torch.Tensor(x_np).cuda()
# amax along axis 1
amax_torch = torch.tensor([0.8, 0.9, 0.7, 0.6], device="cuda")
for num_bits in [3, 4, 5, 7, 8, 11]:
for unsigned in [True, False]:
cuda_ext_out = cuda_ext.fake_tensor_quant_with_axis(x_torch, amax_torch, 1, num_bits, unsigned)
pytorch_out = tensor_quant.fake_tensor_quant(x_torch, amax_torch.view(1, -1, 1, 1), num_bits, unsigned)
test_utils.compare(cuda_ext_out, pytorch_out, rtol=0, atol=0)
def test_cuda_ext_inplace(self):
x_np = np.random.rand(1023).astype('float32')
x_torch = torch.Tensor(x_np).cuda()
quant_x_np = test_utils.quant_np(x_np, np.max(np.abs(x_np)), fake=True)
cuda_ext.fake_tensor_quant_(x_torch, torch.max(torch.abs(x_torch)))
np.testing.assert_array_equal(x_torch.cpu().numpy(), quant_x_np)
# Test fp16
x_np_fp16 = np.random.rand(1023).astype('float16')
x_torch_fp16 = torch.Tensor(x_np_fp16).cuda().half()
quant_x_np_fp16 = test_utils.quant_np(x_np_fp16, np.max(np.abs(x_np_fp16)), fake=True)
cuda_ext.fake_tensor_quant_(x_torch_fp16, torch.max(torch.abs(x_torch_fp16)))
np.testing.assert_array_almost_equal(x_torch_fp16.cpu().numpy(), quant_x_np_fp16, decimal=2)
def test_overflow_fp16(self):
x_torch = torch.randn(1023).cuda().half()
quant_x_torch = tensor_quant.fake_tensor_quant(x_torch, torch.tensor(1e-4).cuda().half(), 8, False)
assert not (torch.isinf(quant_x_torch).any() or torch.isnan(quant_x_torch).any())
def test_clip_gradient(self):
x = torch.randn(3, 7, requires_grad=True).cuda()
x.retain_grad()
amax = x.abs().max() / 2
x_in_range = (-amax <= x) * (x <= amax)
quant_x = tensor_quant.fake_tensor_quant(x, amax, 8)
loss = torch.sum((quant_x - 0.5)**2)
loss.backward()
np.testing.assert_array_equal(x.grad.cpu().numpy() != 0, x_in_range.cpu().numpy())
def test_full_range(self):
""" fake_tensor_quant uses the full integer range when narrow=False
"""
x_np = np.random.rand(1023).astype('float32')
x_torch = torch.Tensor(x_np).cuda()
amax = np.max(np.abs(x_np))
quant_x_np = test_utils.quant_np(x_np, amax, num_bits=9, fake=True, narrow_range=False)
quant_x_torch = tensor_quant.fake_tensor_quant(x_torch, torch.max(torch.abs(x_torch)), 8, True, False)
np.testing.assert_array_almost_equal(quant_x_torch.cpu().numpy(), quant_x_np)
class TestQuantDescriptor():
def test_scaled_mode(self):
num_bits = np.random.randint(0, 16)
test_quant_desc = tensor_quant.QuantDescriptor(num_bits=num_bits)
assert test_quant_desc.num_bits == num_bits
assert test_quant_desc.axis is None
assert test_quant_desc.amax is None
assert not test_quant_desc.learn_amax
axis = (0, 1, 3)
test_quant_desc = tensor_quant.QuantDescriptor(axis=axis)
assert test_quant_desc.num_bits == 8 # default value
assert test_quant_desc.axis == axis
assert test_quant_desc.amax is None
amax = 0.7
test_quant_desc = tensor_quant.QuantDescriptor(amax=amax, unsigned=True)
assert test_quant_desc.axis is None
assert test_quant_desc.amax == np.float32(amax)
assert test_quant_desc.unsigned
amax = 0.7
test_quant_desc = tensor_quant.QuantDescriptor(amax=amax, learn_amax=True)
assert test_quant_desc.amax == np.float32(amax)
assert test_quant_desc.learn_amax
# Test the print string once if verbose is set.
if verbose:
print(test_quant_desc)
with pytest.raises(TypeError, match="must be float, list or ndarray"):
tensor_quant.QuantDescriptor(amax='oops')
with pytest.raises(TypeError, match="amax must be float, list or ndarray"):
tensor_quant.QuantDescriptor(amax='oops', learn_amax=True)
with pytest.raises(TypeError, match="axis is ignored and must be None"):
tensor_quant.QuantDescriptor(axis=(1, 2), amax=0.7, learn_amax=True)
def test_amax(self):
test_quant_desc = tensor_quant.QuantDescriptor()
assert test_quant_desc.amax is None
test_quant_desc = tensor_quant.QuantDescriptor(amax=1.2)
assert isinstance(test_quant_desc.amax, np.ndarray)
np.testing.assert_array_equal(test_quant_desc.amax, np.float32(1.2))
test_quant_desc = tensor_quant.QuantDescriptor(amax=[1.3, 1.4])
assert isinstance(test_quant_desc.amax, np.ndarray)
np.testing.assert_array_equal(test_quant_desc.amax, np.float32([1.3, 1.4]))
with pytest.raises(TypeError, match="must be float, list or ndarray"):
tensor_quant.QuantDescriptor(amax='oops')
def test_from_to_dict(self):
quant_desc_1 = tensor_quant.QuantDescriptor(
num_bits=2, name='a', fake_quant=True, axis=(1, 2),
amax=3.1415926536)
quant_desc_2 = tensor_quant.QuantDescriptor(**quant_desc_1.dict())
if verbose:
print(quant_desc_1.dict())
assert quant_desc_1 == quant_desc_2
quant_desc_1 = tensor_quant.QuantDescriptor(num_bits=2, amax=0.1, unsigned=True)
quant_desc_2 = tensor_quant.QuantDescriptor(**quant_desc_1.dict())
assert quant_desc_1 == quant_desc_2
def test_from_to_yaml(self):
quant_desc_1 = tensor_quant.QuantDescriptor(
num_bits=2, name='a', fake_quant=True, axis=(1, 2),
amax=3.1415926536)
quant_desc_2 = tensor_quant.QuantDescriptor.from_yaml(quant_desc_1.to_yaml())
if verbose:
print(quant_desc_1.to_yaml())
assert quant_desc_1 == quant_desc_2
quant_desc_1 = tensor_quant.QuantDescriptor(num_bits=2, amax=0.1)
quant_desc_2 = tensor_quant.QuantDescriptor.from_yaml(quant_desc_1.to_yaml())
assert quant_desc_1 == quant_desc_2
class TestFakeAffineTensorQuant():
def test_simple_run(self, verbose):
x = np.array([-1., -13., -101., -128., 0., 2., 5., 13., 93., 111., 127.], dtype=np.float32)
torch_x = torch.tensor(x).cuda()
quant_x = tensor_quant.fake_affine_tensor_quant(torch_x, torch.min(torch_x), torch.max(torch_x))
if verbose:
print(quant_x)
np.testing.assert_array_almost_equal(quant_x.cpu().numpy(), x)
def test_clip_gradient(self):
x = torch.randn(3, 7, requires_grad=True).cuda()
x.retain_grad()
xmin = x.min() / 2
xmax = x.max() / 2
x_in_range = (xmin <= x) * (x <= xmax)
quant_x = tensor_quant.fake_affine_tensor_quant(x, xmin, xmax, 8)
loss = torch.sum((quant_x - 0.5)**2)
loss.backward()
np.testing.assert_array_equal(x.grad.cpu().numpy() != 0, x_in_range.cpu().numpy())
| TensorRT-master | tools/pytorch-quantization/tests/tensor_quant_test.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""tests of tensor quantizer"""
import yaml
import pytest
import numpy as np
import torch
from pytorch_quantization import tensor_quant
from pytorch_quantization import calib
from pytorch_quantization.nn.modules import tensor_quantizer
from pytorch_quantization import utils as quant_utils
import tests.utils as test_utils
from tests.fixtures import verbose
np.random.seed(12345)
# pylint:disable=missing-docstring, no-self-use
class TestTensorQuantizer():
def test_simple_run(self):
"""Quantizer calls fake_tensor_quant by default"""
x = torch.randn(3, 7).cuda()
amax_x = torch.max(torch.abs(x))
fn_quant_x = tensor_quant.fake_tensor_quant(x, amax_x)
quantizer = tensor_quantizer.TensorQuantizer()
module_quant_x = quantizer(x)
np.testing.assert_array_equal(fn_quant_x.cpu().numpy(), module_quant_x.cpu().numpy())
def test_simple_run_no_fake(self):
"""Quantizer fake_quant=False calls tensor_quant and sets the scale property"""
x = torch.randn(3, 7).cuda()
amax_x = torch.max(torch.abs(x))
fn_quant_x, fn_scale = tensor_quant.tensor_quant(x, amax_x)
quantizer = tensor_quantizer.TensorQuantizer(tensor_quant.QuantDescriptor(num_bits=8, fake_quant=False))
module_quant_x = quantizer(x)
module_scale = quantizer.scale
np.testing.assert_array_equal(fn_quant_x.cpu().numpy(), module_quant_x.cpu().numpy())
np.testing.assert_array_equal(fn_scale.cpu().numpy(), module_scale.cpu().numpy())
def test_per_tensor_scale(self):
"""Quantizer performs expected quantization"""
x_np = np.random.rand(1023)
x_torch = torch.Tensor(x_np)
quant_x_np = test_utils.quant_np(x_np, np.max(np.abs(x_np)))
quantizer = tensor_quantizer.TensorQuantizer(tensor_quant.QuantDescriptor(num_bits=8, fake_quant=False))
module_quant_x = quantizer(x_torch)
np.testing.assert_array_equal(module_quant_x.cpu().numpy(), quant_x_np)
def test_per_channel_scale(self, verbose):
"""Quantizer performs per channel scaling"""
x_np = np.random.rand(15, 15, 64, 128).astype('float32')
x_torch = torch.Tensor(x_np).cuda()
# Pytorch filter layout seems to be KCRS, reduce max to shape [K, 1, 1, 1] to test per channel scale
# Shrink max a little, so that clip behavior is tested
amax_x_np = 0.7 * np.max(np.abs(x_np), axis=(1, 2, 3), keepdims=True)
quant_x_np = test_utils.quant_np(x_np, amax_x_np)
quantizer = tensor_quantizer.TensorQuantizer(
tensor_quant.QuantDescriptor(num_bits=8, axis=(0), fake_quant=False, scale_amax=0.7))
quantizer.cuda()
module_quant_x = quantizer(x_torch)
# np.testing.assert_array_equal(quant_x_torch.cpu().numpy(), quant_x_np)
# Pytorch numerics is not the same as numpy, it will be off by 1
error = np.abs(module_quant_x.cpu().numpy() - quant_x_np)
np.testing.assert_array_less(error, 2)
if verbose:
mismatches = np.where(error >= 1)
print("Mismatches:")
print(" Original: ", x_np[mismatches])
print(" numpy: ", quant_x_np[mismatches])
print(" TensorQuantizer: ", module_quant_x.cpu().numpy()[mismatches])
def test_learn_amax(self):
"""Test the clip implied by learn_amax"""
x_np = np.random.rand(1023).astype(np.float32)
x_torch = torch.Tensor(x_np)
amax = 0.5
quant_x_np = test_utils.quant_np(x_np, 0.5, fake=True)
quantizer = tensor_quantizer.TensorQuantizer(
tensor_quant.QuantDescriptor(num_bits=8, amax=amax, learn_amax=True))
assert hasattr(quantizer, 'clip')
module_quant_x = quantizer(x_torch)
np.testing.assert_array_equal(module_quant_x.cpu().detach().numpy(), quant_x_np)
def test_clip_mode(self):
"""Test the clip stage only"""
x_np = np.random.rand(1023).astype(np.float32)
x_torch = torch.Tensor(x_np)
amax = 0.5
clip_x_np = np.clip(x_np, -amax, amax)
quantizer = tensor_quantizer.TensorQuantizer(
tensor_quant.QuantDescriptor(amax=amax, learn_amax=True), if_quant=False, if_clip=True)
assert hasattr(quantizer, 'clip')
module_clip_x = quantizer(x_torch)
np.testing.assert_array_equal(module_clip_x.cpu().detach().numpy(), clip_x_np)
def test_scale_amax(self):
x_np = np.random.rand(1023).astype(np.float32)
x_torch = torch.Tensor(x_np)
amax = 0.5
scale_amax = 0.9
quant_x_np = test_utils.quant_np(x_np, amax * scale_amax, fake=True)
quantizer = tensor_quantizer.TensorQuantizer(
tensor_quant.QuantDescriptor(num_bits=8, amax=amax, scale_amax=scale_amax))
module_quant_x = quantizer(x_torch)
np.testing.assert_array_equal(module_quant_x.cpu().detach().numpy(), quant_x_np)
# Test twice. There was a but in scale amax logic that modify the amax every time
module_quant_x = quantizer(x_torch)
np.testing.assert_array_equal(module_quant_x.cpu().detach().numpy(), quant_x_np)
def test_disable(self):
x = torch.randn(3, 7).cuda()
amax_x = torch.max(torch.abs(x))
quantizer = tensor_quantizer.TensorQuantizer(disabled=True)
module_quant_x = quantizer(x)
np.testing.assert_array_equal(x.cpu().numpy(), module_quant_x.cpu().numpy())
def test_state_loading(self):
"""Test quant_desc loading via state_dict"""
amax = [3.142, 2.718]
quant_desc1 = tensor_quant.QuantDescriptor(amax=amax)
quantizer1 = tensor_quantizer.TensorQuantizer(quant_desc1)
# copy state
quantizer1.load_state_dict(quantizer1.state_dict())
np.testing.assert_array_equal(quantizer1.amax.detach().cpu().numpy(), quant_desc1.amax)
def test_properties(self):
quant_desc1 = tensor_quant.QuantDescriptor(amax=3.14)
quantizer1 = tensor_quantizer.TensorQuantizer(quant_desc1)
quantizer1.amax = 0.577
assert quantizer1.amax.detach().cpu().numpy() == np.float32(0.577)
np.testing.assert_array_equal(quantizer1.amax.detach().cpu().numpy(), quantizer1.amax)
assert quantizer1.step_size == 0.577 / 127.
quant_desc2 = tensor_quant.QuantDescriptor()
quantizer2 = tensor_quantizer.TensorQuantizer(quant_desc2)
amax_np = np.array([3.142, 2.718], dtype=np.float32)
quantizer2.amax = amax_np
np.testing.assert_array_equal(quantizer2.amax.detach().cpu().numpy(), amax_np)
quant_desc3 = tensor_quant.QuantDescriptor()
quantizer3 = tensor_quantizer.TensorQuantizer(quant_desc3)
assert quantizer3.amax is None
def test_init_calib(self):
quant_desc2 = tensor_quant.QuantDescriptor(axis=(0, 1))
quantizer2 = tensor_quantizer.TensorQuantizer(quant_desc2, if_calib=True).cuda()
x_2 = torch.rand(127, 63, 7, 7).cuda()
quantizer2(x_2)
quantizer2.load_calib_amax()
assert quantizer2.amax.numel() == 127 * 63
def test_max_calib(self):
axis = 0
reduce_axis = (1, 2, 3)
quant_desc1 = tensor_quant.QuantDescriptor(axis=axis)
quantizer1 = tensor_quantizer.TensorQuantizer(quant_desc1).cuda()
quantizer1.enable_calib()
quant_desc1 = tensor_quant.QuantDescriptor(axis=axis)
quantizer1 = tensor_quantizer.TensorQuantizer(quant_desc1).cuda()
quantizer1.enable_calib()
with pytest.raises(RuntimeError, match="Calibrator returned None"):
quantizer1.load_calib_amax()
x_1 = torch.rand(127, 63, 7, 7).cuda()
x_2 = torch.rand(127, 63, 7, 7).cuda()
quantizer1(x_1)
quantizer1(x_2)
quantizer1.disable_calib()
global_amax = torch.max(
quant_utils.reduce_amax(x_1, axis=reduce_axis, keepdims=True),
quant_utils.reduce_amax(x_2, axis=reduce_axis, keepdims=True))
test_utils.compare(quantizer1._calibrator.compute_amax(), global_amax, atol=0, rtol=0, ctol=0)
quantizer1.load_calib_amax()
test_utils.compare(quantizer1.amax, global_amax, atol=0, rtol=0, ctol=0)
quant_desc2 = tensor_quant.QuantDescriptor(learn_amax=True)
quantizer2 = tensor_quantizer.TensorQuantizer(quant_desc2).cuda()
quantizer2.enable_calib()
quantizer2(x_1)
quantizer2(x_2)
quantizer2.load_calib_amax()
quantizer2.init_learn_amax()
test_utils.compare(quantizer2.clip.clip_value_min, -torch.max(global_amax), atol=0, rtol=0, ctol=0)
test_utils.compare(quantizer2.clip.clip_value_max, torch.max(global_amax), atol=0, rtol=0, ctol=0)
def test_entropy_and_percentile_calib(self):
"""Don't really have a good way to test it."""
quant_desc1 = tensor_quant.QuantDescriptor(calib_method='histogram')
quantizer1 = tensor_quantizer.TensorQuantizer(quant_desc1, if_calib=True, if_quant=False).cuda()
x_1 = torch.rand(3, 63, 7, 7).cuda()
x_2 = torch.rand(3, 63, 7, 7).cuda()
quantizer1(x_1)
quantizer1(x_2)
quantizer1.load_calib_amax("entropy")
test_utils.compare(quantizer1._calibrator.compute_amax("entropy"), quantizer1.amax, atol=0, rtol=0, ctol=0)
quantizer1._calibrator.reset()
quantizer1(x_1)
quantizer1(x_2)
quantizer1.load_calib_amax("percentile", percentile=99.99)
test_utils.compare(quantizer1._calibrator.compute_amax(
"percentile", percentile=99.99), quantizer1.amax, atol=0, rtol=0, ctol=0)
def test_setters(self):
quantizer = tensor_quantizer.TensorQuantizer()
quantizer.num_bits = 7
quantizer.unsigned = True
assert quantizer.num_bits == 7
assert quantizer.unsigned
| TensorRT-master | tools/pytorch-quantization/tests/tensor_quantizer_test.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Utils for testing quantization."""
import numpy as np
from scipy.spatial import distance
import torch
from pytorch_quantization import tensor_quant
def quantize_by_range(x, num_bits):
"""Quantize torch tensor by range to num_bits with symmetric zero-mean quantizer."""
amax = x.abs().max()
x_q = tensor_quant.fake_tensor_quant(x, amax, num_bits)
return x_q
def quantize_by_range_fused(x_tuple, num_bits):
"""Quantize multiple torch tensors by combined range to num_bits with symmetric zero-mean quantizer."""
# compute aggregate amax across all tensors
amax = max([x.abs().max() for x in x_tuple])
# quantize each tensor with the aggregate amax
x_q_tuple = tuple(tensor_quant.fake_tensor_quant(x, amax, num_bits) for x in x_tuple)
return x_q_tuple
def copy_state_and_quantize(dst, src, num_bits):
"""Copy src to dst, quantize all 'weight' entries to num_bits."""
src_state_dict = src.state_dict()
dst_state_dict = dict()
for key in src_state_dict:
if 'weight' in key:
dst_state_dict[key] = quantize_by_range(src_state_dict[key], num_bits)
else:
dst_state_dict[key] = src_state_dict[key].clone()
dst.load_state_dict(dst_state_dict)
def copy_state_and_quantize_fused(dst, src, num_bits):
"""Copy src to dst, quantize all 'weight' entries to num_bits using the aggregate amax."""
src_state_dict = src.state_dict()
dst_state_dict = dict()
# compute aggregate amax across all weight tensors
amax = 0
for key in src_state_dict:
if 'weight' in key:
amax = max(amax, src_state_dict[key].abs().max())
# quantize each weight tensor with the aggregate amax
for key in src_state_dict:
if 'weight' in key:
dst_state_dict[key] = tensor_quant.fake_tensor_quant(src_state_dict[key], amax, num_bits)
else:
dst_state_dict[key] = src_state_dict[key].clone()
dst.load_state_dict(dst_state_dict)
def compare(a, b, rtol=1e-7, atol=1e-6, ctol=1e-6):
"""Compare two tensors and raise AssertionError if their difference is outside of tolerance."""
if torch.isinf(a).any():
raise ValueError("a contains infs")
if torch.isinf(b).any():
raise ValueError("b contains infs")
a = a.detach().cpu().numpy().flatten()
b = b.detach().cpu().numpy().flatten()
# compare elements of a and b relative to the max value in b
# large fp32 values may cause quantization errors that propagate to small values
rel_diff = np.abs(a-b)/np.linalg.norm(b)
abs_diff = np.abs(a-b)
cos_diff = distance.cosine(a, b)
try:
if rel_diff.max() > rtol:
raise AssertionError("Tensor relative error > %.2e (%.2e)" % (rtol, rel_diff.max()))
if abs_diff.max() > atol:
raise AssertionError("Tensor absolute error > %.2e (%.2e)" % (atol, abs_diff.max()))
if cos_diff > ctol:
raise AssertionError("Tensor cosine distance > %.2e (%.2e)" % (ctol, cos_diff))
# np.testing.assert_allclose(a, b, rtol=rtol, atol=atol)
# np.testing.assert_array_almost_equal_nulp(a, b)
except AssertionError as e:
print('norm(a) =', np.linalg.norm(a))
print('norm(b) =', np.linalg.norm(b))
print('Largest relative difference = %.2e' % rel_diff.max())
idx = np.argmax(rel_diff)
print('a[%d] = %.10f' % (idx, a[idx]))
print('b[%d] = %.10f' % (idx, b[idx]))
print('Largest absolute difference = %.2e' % abs_diff.max())
idx = np.argmax(abs_diff)
print('a[%d] = %.10f' % (idx, a[idx]))
print('b[%d] = %.10f' % (idx, b[idx]))
print('Cosine distance = %.2e' % cos_diff)
raise e
def assert_min_mse(a, b, tol=1e-20):
"""Assert that the mean squared error between a and b is at least tol."""
a = a.detach().cpu().numpy()
b = b.detach().cpu().numpy()
mse = ((a-b)**2).mean()
if mse < tol:
raise AssertionError("MSE = %.2e < %.2e" % (mse, tol))
def quant_np(x, amax, num_bits=8, fake=False, narrow_range=True):
"""Quantize x using numpy."""
intmax = 2.0**(num_bits - 1) - 1
intmin = -intmax if narrow_range else -intmax - 1
scale = intmax / amax
x_q = np.round(np.clip(x * scale, intmin, intmax))
if fake:
x_q /= scale
return x_q
| TensorRT-master | tools/pytorch-quantization/tests/utils.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Tests of calibrators"""
import pytest
import numpy as np
import torch
from pytorch_quantization import utils as quant_utils
from pytorch_quantization import calib
from pytorch_quantization import nn as quant_nn
import tests.utils as test_utils
from tests.fixtures import verbose
from tests.fixtures.models import QuantLeNet
np.random.seed(12345)
torch.manual_seed(12345)
# pylint:disable=missing-docstring, no-self-use
class TestMaxCalibrator():
def test_simple_run(self):
max_calibrator = calib.MaxCalibrator(8, None, False)
x_1 = torch.rand(129).cuda()
x_2 = torch.rand(127).cuda()
max_calibrator.collect(x_1)
max_calibrator.collect(x_2)
test_utils.compare(max_calibrator.compute_amax(), torch.max(x_1.max(), x_2.max()), atol=0, rtol=0, ctol=0)
# Nothing to test other than creation
max_calibrator = calib.MaxCalibrator(8, None, True)
def test_fine_grain(self):
axis = 0
reducs_axis = (1, 2, 3)
max_calibrator = calib.MaxCalibrator(8, axis, False)
x_1 = torch.rand(31, 63, 7, 7).cuda()
x_2 = torch.rand(31, 63, 7, 7).cuda()
max_calibrator.collect(x_1)
max_calibrator.collect(x_2)
assert max_calibrator.compute_amax().shape[0] == 31
test_utils.compare(max_calibrator.compute_amax(),
quant_utils.reduce_amax(torch.max(x_1, x_2), axis=reducs_axis),
atol=0, rtol=0, ctol=0)
max_calibrator.reset()
assert max_calibrator.compute_amax() is None
def test_raises(self):
axis = 0
max_calibrator = calib.MaxCalibrator(8, axis, False)
x_2 = torch.rand(32, 63, 7, 7).cuda()
x_3 = torch.rand(33, 63, 7, 7).cuda()
max_calibrator.collect(x_2)
with pytest.raises(RuntimeError, match="shape changed"):
max_calibrator.collect(x_3)
def test_track_amax(self):
max_calibrator = calib.MaxCalibrator(8, None, False, track_amax=True)
x_1 = torch.rand(129).cuda()
x_2 = torch.rand(127).cuda()
max_calibrator.collect(x_1)
max_calibrator.collect(x_2)
test_utils.compare(max_calibrator.compute_amax(), torch.max(x_1.max(), x_2.max()), atol=0, rtol=0, ctol=0)
np.testing.assert_array_equal(max_calibrator.amaxs[0], x_1.max().cpu().numpy())
np.testing.assert_array_equal(max_calibrator.amaxs[1], x_2.max().cpu().numpy())
def test_repr(self):
max_calibrator = calib.MaxCalibrator(8, None, False, track_amax=True)
repr(max_calibrator)
class TestHistogramCalibrator():
def test_grow(self, verbose):
x_1 = torch.tensor([0, 255, 255, 255, 255, 255]).cuda()
x_2 = torch.tensor([0, 255, 255, 255, 255, 256]).cuda()
hist_calibrator = calib.HistogramCalibrator(8, None, False, grow_method='stretch')
hist_calibrator.collect(x_1)
hist_calibrator.collect(x_2)
amax = hist_calibrator.compute_amax(method='entropy')
if verbose:
print('amax={:.4f}'.format(amax.item()), end=' ')
# amax should be closer to 256 because the last bin gets stretched to (~255, 257)
assert (amax - 255.).abs() < (amax - 256.).abs()
hist_calibrator = calib.HistogramCalibrator(8, None, False, grow_method='append')
hist_calibrator.collect(x_1)
hist_calibrator.collect(x_2)
amax = hist_calibrator.compute_amax(method='mse')
if verbose:
print('amax={:.4f}'.format(amax.item()), end=' ')
# amax should be closer to 255
assert (amax - 255.).abs() < 0.5
def test_skip_zeros(self, verbose):
x_1 = torch.tensor([0, 0, 0, 0, 0, 1, 2, 3, 4, 5])
x_2 = torch.tensor([0, 0, 0, 0, 0, 6, 7, 8, 9, 10])
calibrator = calib.HistogramCalibrator(8, None, False, skip_zeros=True)
calibrator.collect(x_1)
calibrator.collect(x_2)
amax = calibrator.compute_amax("percentile", percentile=50)
if verbose:
print('amax={:.4f}'.format(amax.item()), end=' ')
# amax should be close to 5
assert (amax - 5.).abs() < 10/2048
def test_torch_hist(self):
x_1 = torch.rand(1023, device="cuda")
x_1[0] = 0
x_2 = torch.rand(1023, device="cuda") + 1 # Make sure histogram bins need to be grown
x_2[1] = 0
calibrator_np = calib.HistogramCalibrator(8, None, False, num_bins=19, torch_hist=False)
calibrator_torch = calib.HistogramCalibrator(8, None, False, num_bins=19, torch_hist=True)
calibrator_np.collect(x_1)
calibrator_torch.collect(x_1)
assert calibrator_torch._calib_hist.numel() == calibrator_torch._calib_bin_edges.numel() - 1
np.testing.assert_array_equal(calibrator_np._calib_hist, calibrator_torch._calib_hist.cpu().numpy())
np.testing.assert_array_almost_equal(
calibrator_np._calib_bin_edges, calibrator_torch._calib_bin_edges.cpu().numpy())
# Test multiple collections with some of them needs to expand range
for _ in range(3):
calibrator_np.collect(x_2)
calibrator_torch.collect(x_2)
calibrator_np.collect(x_1)
calibrator_torch.collect(x_1)
# Test compute_amax function doesn't convert _calib_hist and _calib_bin_edges unnecessarily
calibrator_np.compute_amax("percentile", percentile=99.99)
calibrator_torch.compute_amax("percentile", percentile=99.99)
np.testing.assert_array_equal(calibrator_np._calib_hist, calibrator_torch._calib_hist.cpu().numpy())
np.testing.assert_array_almost_equal(
calibrator_np._calib_bin_edges, calibrator_torch._calib_bin_edges.cpu().numpy())
assert calibrator_torch._calib_hist.numel() == calibrator_torch._calib_bin_edges.numel() - 1
class TestEntropyCalibrator():
def test_one_tensor(self, verbose):
hist_calibrator = calib.HistogramCalibrator(8, None, False, grow_method='stretch')
x_2 = torch.rand(11, 7, 3, 3).cuda() # uniform in (0,1)
x_2[1, 1, 1, 1] = 10. # create outlier
hist_calibrator.collect(x_2)
# Don't have a better test metric. One outlier 10 should be discared by KL-divergence
amax = hist_calibrator.compute_amax("entropy")
if verbose:
print('amax={:.4f}'.format(amax.item()), end=' ')
assert amax < 1.1
def test_unsigned(self, verbose):
hist_calibrator = calib.HistogramCalibrator(8, None, True, grow_method='stretch')
x_2 = torch.rand(11, 7, 3, 3).cuda() # uniform in (0,1)
x_2[1, 1, 1, 1] = 10. # create outlier
hist_calibrator.collect(x_2)
amax = hist_calibrator.compute_amax("entropy")
if verbose:
print('amax={:.4f}'.format(amax.item()), end=' ')
assert amax < 1.1
@pytest.mark.parametrize("torch_hist", [False, True])
def test_two_tensor(self, torch_hist, verbose):
hist_calibrator = calib.HistogramCalibrator(8, None, False, torch_hist=torch_hist)
x_2 = torch.rand(11, 7, 3, 3).cuda() # uniform in (0,1)
x_2[1, 1, 1, 1] = 10. # create outlier
x_2 = torch.rand(11, 7, 3, 3).cuda() # uniform in (0,1)
x_2[1, 1, 1, 1] = 10. # create outlier
hist_calibrator.collect(x_2)
x_3 = torch.rand(11, 7, 3, 3).cuda()
hist_calibrator.collect(x_3)
# Don't have a better test metric. One outlier 10 should be discared by KL-divergence
amax = hist_calibrator.compute_amax("entropy")
if verbose:
print('amax={:.4f}'.format(amax.item()), end=' ')
assert amax < 1.1
def test_repr(self):
hist_calibrator = calib.HistogramCalibrator(8, None, True)
repr(hist_calibrator)
class TestMSECalibrator():
def test_one_tensor(self, verbose):
calibrator = calib.HistogramCalibrator(8, None, False)
x_1 = torch.ones(11, 7, 3, 3).cuda() * 255.
x_1[1, 1, 1, 1] = 256. # create an outlier
calibrator.collect(x_1)
amax = calibrator.compute_amax("mse")
if verbose:
print('amax={:.4f}'.format(amax.item()), end=' ')
# amax should be closer to 255
assert (amax - 255.).abs() < (amax - 256.).abs()
def test_unsigned_one_tensor(self, verbose):
calibrator = calib.HistogramCalibrator(8, None, True)
x_1 = torch.ones(11, 7, 3, 3).cuda() * 512.
x_1[1, 1, 1, 1] = 513. # create an outlier
calibrator.collect(x_1)
amax = calibrator.compute_amax("mse")
if verbose:
print('amax={:.4f}'.format(amax.item()), end=' ')
# amax should be closer to 512
assert (amax - 512.).abs() < (amax - 513.).abs()
@pytest.mark.parametrize("torch_hist", [False, True])
def test_two_tensor(self, torch_hist, verbose):
calibrator = calib.HistogramCalibrator(8, None, False, torch_hist=torch_hist)
x_1 = torch.ones(11, 7, 3, 3).cuda() * 255.
x_1[1, 1, 1, 1] = 256. # create an outlier
calibrator.collect(x_1)
x_2 = torch.ones(11, 7, 3, 3).cuda() * 255.
calibrator.collect(x_2)
amax = calibrator.compute_amax("mse")
if verbose:
print('amax={:.4f}'.format(amax.item()), end=' ')
# amax should be closer to 255
assert (amax - 255.).abs() < (amax - 256.).abs()
def test_repr(self):
calibrator = calib.HistogramCalibrator(8, None, False)
repr(calibrator)
class TestPercentileCalibrator():
def test_one_tensor(self, verbose):
calibrator = calib.HistogramCalibrator(8, None, False)
x_1 = torch.arange(100)
calibrator.collect(x_1)
amax = calibrator.compute_amax("percentile", percentile=90)
if verbose:
print('amax={:.4f}'.format(amax.item()), end=' ')
# amax should be approximately 89
assert (amax - 89.).abs() < 100/1024
def test_unsigned_one_tensor(self, verbose):
calibrator = calib.HistogramCalibrator( 8, None, True)
x_1 = torch.arange(100)
calibrator.collect(x_1)
amax = calibrator.compute_amax("percentile", percentile=80)
if verbose:
print('amax={:.4f}'.format(amax.item()), end=' ')
# amax should be approximately 79
assert (amax - 79.).abs() < 100/2048
@pytest.mark.parametrize("torch_hist", [False, True])
def test_two_tensor(self, torch_hist, verbose):
calibrator = calib.HistogramCalibrator(8, None, False, torch_hist=torch_hist)
x_1 = torch.arange(100)
calibrator.collect(x_1)
x_2 = torch.arange(0, 50, 0.5)
calibrator.collect(x_2)
amax = calibrator.compute_amax("percentile", percentile=99)
if verbose:
print('amax={:.4f}'.format(amax.item()), end=' ')
# amax should be approximately 97
assert (amax - 97.).abs() < 100/1024
def test_repr(self):
calibrator = calib.HistogramCalibrator(8, None, False)
repr(calibrator)
def test_range(self):
calibrator = calib.HistogramCalibrator(8, None, False)
x_1 = torch.arange(100)
calibrator.collect(x_1)
with pytest.raises(ValueError, match="range"):
calibrator.compute_amax("percentile", percentile=-10)
with pytest.raises(ValueError, match="range"):
calibrator.compute_amax("percentile", percentile=200)
class TestCalibrateWeights():
def test_max(self):
torch.manual_seed(12345)
ref_lenet = QuantLeNet()
torch.manual_seed(12345)
test_lenet = QuantLeNet()
for module in ref_lenet.modules():
if isinstance(module, (quant_nn.QuantConv2d, quant_nn.QuantLinear)):
module.weight_quantizer.enable_calib()
module.weight_quantizer.disable_quant()
module.weight_quantizer(module.weight)
module.weight_quantizer.load_calib_amax()
calib.calibrate_weights(test_lenet, method="max")
for ref_module, test_module in zip(ref_lenet.modules(), test_lenet.modules()):
if isinstance(ref_module, (quant_nn.QuantConv2d, quant_nn.QuantLinear)):
test_utils.compare(
ref_module.weight_quantizer.amax, test_module.weight_quantizer.amax, rtol=0, atol=0, ctol=0)
assert ref_module.weight_quantizer.amax.shape == test_module.weight_quantizer.amax.shape
def test_shape_with_axis(self):
"""Check calibrate_weight function returns same shape as TensorQuantizer"""
torch.manual_seed(12345)
ref_lenet = QuantLeNet()
torch.manual_seed(12345)
test_lenet = QuantLeNet()
for module in ref_lenet.modules():
if isinstance(module, (quant_nn.QuantConv2d, quant_nn.QuantLinear)):
module.weight_quantizer.enable_calib()
module.weight_quantizer.disable_quant()
module.weight_quantizer(module.weight)
module.weight_quantizer.load_calib_amax()
calib.calibrate_weights(test_lenet, method="percentile")
for ref_module, test_module in zip(ref_lenet.modules(), test_lenet.modules()):
if isinstance(ref_module, (quant_nn.QuantConv2d, quant_nn.QuantLinear)):
assert ref_module.weight_quantizer.amax.shape == test_module.weight_quantizer.amax.shape
def test_percentile(self):
torch.manual_seed(12345)
test_lenet = QuantLeNet()
test_percentile = 99.99
ref_calibrator = calib.HistogramCalibrator(8, None, False)
calib.calibrate_weights(test_lenet, method="percentile", perchannel=False, percentile=test_percentile)
ref_calibrator.collect(test_lenet.conv1.weight)
ref_amax = ref_calibrator.compute_amax("percentile", percentile=test_percentile)
test_utils.compare(ref_amax, test_lenet.conv1.weight_quantizer.amax, rtol=0, atol=0, ctol=0)
def test_percentile_with_axis(self):
torch.manual_seed(12345)
test_lenet = QuantLeNet()
test_percentile = 99.99
ref_calibrator = calib.HistogramCalibrator(8, None, False)
calib.calibrate_weights(test_lenet, method="percentile", perchannel=True, percentile=test_percentile)
ref_calibrator.collect(test_lenet.conv2.weight[1])
ref_amax = ref_calibrator.compute_amax("percentile", percentile=test_percentile)
test_utils.compare(ref_amax, test_lenet.conv2.weight_quantizer.amax[1], rtol=0, atol=0, ctol=0)
def test_mse(self):
torch.manual_seed(12345)
test_lenet = QuantLeNet()
ref_calibrator = calib.HistogramCalibrator(8, None, False)
calib.calibrate_weights(test_lenet, method="mse", perchannel=False)
ref_calibrator.collect(test_lenet.conv1.weight)
ref_amax = ref_calibrator.compute_amax("mse")
test_utils.compare(ref_amax, test_lenet.conv1.weight_quantizer.amax, rtol=0, atol=0, ctol=0)
def test_mse_with_axis(self):
torch.manual_seed(12345)
test_lenet = QuantLeNet()
ref_calibrator = calib.HistogramCalibrator(8, None, False)
calib.calibrate_weights(test_lenet, method="mse", perchannel=True)
ref_calibrator.collect(test_lenet.conv2.weight[1])
ref_amax = ref_calibrator.compute_amax("mse")
test_utils.compare(ref_amax, test_lenet.conv2.weight_quantizer.amax[1], rtol=0, atol=0, ctol=0)
| TensorRT-master | tools/pytorch-quantization/tests/calibrator_test.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""tests of QuantInstanceNorm module.
Mose tests check the functionality of all the combinations in Quant instancenorm against the corresponding functionalities in
tensor_quant. There are tests for all the three QuantInstaceNorm1D, QuantInstanceNorm2D, and QuantInstanceNorm3D
"""
import pytest
import numpy as np
import torch
from torch import nn
import torch.nn.functional as F
from pytorch_quantization import tensor_quant
from pytorch_quantization.tensor_quant import QuantDescriptor
from pytorch_quantization.nn.modules.tensor_quantizer import TensorQuantizer
from pytorch_quantization import utils as quant_utils
from pytorch_quantization.nn.modules import quant_instancenorm
#import tests.utils as test_utils
# make everything run on the GPU
torch.set_default_tensor_type('torch.cuda.FloatTensor')
torch.backends.cudnn.deterministic = True
np.random.seed(1234)
# pylint:disable=missing-docstring, no-self-use
NUM_CHANNELS = 15
class TestQuantInstanceNorm1D():
def test_no_quant(self):
quant_instancenorm_object = quant_instancenorm.QuantInstanceNorm1d(NUM_CHANNELS, affine=True)
quant_instancenorm_object.input_quantizer.disable()
test_input = torch.randn(8, NUM_CHANNELS, 128)
out1 = quant_instancenorm_object(test_input)
out2 = F.instance_norm(test_input,
quant_instancenorm_object.running_mean,
quant_instancenorm_object.running_var,
quant_instancenorm_object.weight,
quant_instancenorm_object.bias)
np.testing.assert_array_equal(out1.detach().cpu().numpy(), out2.detach().cpu().numpy())
def test_fake_quant_per_tensor(self):
quant_instancenorm_object = quant_instancenorm.QuantInstanceNorm1d(NUM_CHANNELS, affine=True,
quant_desc_input=QuantDescriptor())
test_input = torch.randn(8, NUM_CHANNELS, 128)
quant_input = tensor_quant.fake_tensor_quant(test_input, torch.max(torch.abs(test_input)))
out1 = quant_instancenorm_object(test_input)
out2 = F.instance_norm(quant_input,
quant_instancenorm_object.running_mean,
quant_instancenorm_object.running_var,
quant_instancenorm_object.weight,
quant_instancenorm_object.bias)
np.testing.assert_array_equal(out1.detach().cpu().numpy(), out2.detach().cpu().numpy())
def test_fake_quant_per_channel(self):
quant_instancenorm_object = quant_instancenorm.QuantInstanceNorm1d(NUM_CHANNELS, affine=True,
quant_desc_input=QuantDescriptor(axis=(1)))
test_input = torch.randn(8, NUM_CHANNELS, 128)
quant_input = tensor_quant.fake_tensor_quant(test_input,
torch.abs(test_input).max(0, keepdim=True)[0].max(2, keepdim=True)[0])
out1 = quant_instancenorm_object(test_input)
out2 = F.instance_norm(quant_input,
quant_instancenorm_object.running_mean,
quant_instancenorm_object.running_var,
quant_instancenorm_object.weight,
quant_instancenorm_object.bias)
np.testing.assert_array_equal(out1.detach().cpu().numpy(), out2.detach().cpu().numpy())
class TestQuantInstanceNorm2D():
def test_no_quant(self):
quant_instancenorm_object = quant_instancenorm.QuantInstanceNorm2d(NUM_CHANNELS, affine=True)
quant_instancenorm_object.input_quantizer.disable()
test_input = torch.randn(8, NUM_CHANNELS, 128, 128)
out1 = quant_instancenorm_object(test_input)
out2 = F.instance_norm(test_input,
quant_instancenorm_object.running_mean,
quant_instancenorm_object.running_var,
quant_instancenorm_object.weight,
quant_instancenorm_object.bias)
np.testing.assert_array_equal(out1.detach().cpu().numpy(), out2.detach().cpu().numpy())
def test_fake_quant_per_tensor(self):
quant_instancenorm_object = quant_instancenorm.QuantInstanceNorm2d(NUM_CHANNELS, affine=True,
quant_desc_input=QuantDescriptor())
test_input = torch.randn(8, NUM_CHANNELS, 128, 128)
quant_input = tensor_quant.fake_tensor_quant(test_input, torch.max(torch.abs(test_input)))
out1 = quant_instancenorm_object(test_input)
out2 = F.instance_norm(quant_input,
quant_instancenorm_object.running_mean,
quant_instancenorm_object.running_var,
quant_instancenorm_object.weight,
quant_instancenorm_object.bias)
np.testing.assert_array_equal(out1.detach().cpu().numpy(), out2.detach().cpu().numpy())
def test_fake_quant_per_channel(self):
quant_instancenorm_object = quant_instancenorm.QuantInstanceNorm2d(NUM_CHANNELS, affine=True,
quant_desc_input=QuantDescriptor(axis=(1)))
test_input = torch.randn(8, NUM_CHANNELS, 128, 128)
quant_input = tensor_quant.fake_tensor_quant(test_input,
torch.abs(test_input).max(0, keepdim=True)[0].max(2, keepdim=True)[0].max(3, keepdim=True)[0])
out1 = quant_instancenorm_object(test_input)
out2 = F.instance_norm(quant_input,
quant_instancenorm_object.running_mean,
quant_instancenorm_object.running_var,
quant_instancenorm_object.weight,
quant_instancenorm_object.bias)
np.testing.assert_array_equal(out1.detach().cpu().numpy(), out2.detach().cpu().numpy())
class TestQuantInstanceNorm3D():
def test_no_quant(self):
quant_instancenorm_object = quant_instancenorm.QuantInstanceNorm3d(NUM_CHANNELS, affine=True)
quant_instancenorm_object.input_quantizer.disable()
test_input = torch.randn(8, NUM_CHANNELS, 128, 128, 128)
out1 = quant_instancenorm_object(test_input)
out2 = F.instance_norm(test_input,
quant_instancenorm_object.running_mean,
quant_instancenorm_object.running_var,
quant_instancenorm_object.weight,
quant_instancenorm_object.bias)
np.testing.assert_array_equal(out1.detach().cpu().numpy(), out2.detach().cpu().numpy())
def test_fake_quant_per_tensor(self):
quant_instancenorm_object = quant_instancenorm.QuantInstanceNorm3d(NUM_CHANNELS, affine=True,
quant_desc_input=QuantDescriptor())
test_input = torch.randn(8, NUM_CHANNELS, 128, 128, 128)
quant_input = tensor_quant.fake_tensor_quant(test_input, torch.max(torch.abs(test_input)))
out1 = quant_instancenorm_object(test_input)
out2 = F.instance_norm(quant_input,
quant_instancenorm_object.running_mean,
quant_instancenorm_object.running_var,
quant_instancenorm_object.weight,
quant_instancenorm_object.bias)
np.testing.assert_array_equal(out1.detach().cpu().numpy(), out2.detach().cpu().numpy())
def test_fake_quant_per_channel(self):
quant_instancenorm_object = quant_instancenorm.QuantInstanceNorm3d(NUM_CHANNELS, affine=True,
quant_desc_input=QuantDescriptor(axis=(1)))
test_input = torch.randn(8, NUM_CHANNELS, 128, 128, 128)
quant_input = tensor_quant.fake_tensor_quant(test_input,
torch.abs(test_input).max(0, keepdim=True)[0].max(2, keepdim=True)[0]
.max(3, keepdim=True)[0].max(4, keepdim=True)[0])
out1 = quant_instancenorm_object(test_input)
out2 = F.instance_norm(quant_input,
quant_instancenorm_object.running_mean,
quant_instancenorm_object.running_var,
quant_instancenorm_object.weight,
quant_instancenorm_object.bias)
np.testing.assert_array_equal(out1.detach().cpu().numpy(), out2.detach().cpu().numpy())
| TensorRT-master | tools/pytorch-quantization/tests/quant_instancenorm_test.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Test pytorch_quantization.utils"""
import pytest
import numpy as np
import torch
from pytorch_quantization import utils as quant_utils
from tests.fixtures import verbose
np.random.seed(12345)
# pylint:disable=missing-docstring, no-self-use
class TestQuantUtils():
def test_reduce_amax(self):
x_np = (np.random.rand(3, 7, 11, 13, 17) - 0.1).astype(np.float32)
x_torch = torch.tensor(x_np)
# Test reduce to one value
amax_np = np.max(np.abs(x_np))
amax_torch = quant_utils.reduce_amax(x_torch)
np.testing.assert_array_equal(amax_np, amax_torch.cpu().numpy())
# Test different axis
axes = [(1, 2, 3), (0, 2, 3), (0, 3), (0, 1, 3, 4)]
for axis in axes:
keepdims = np.random.rand() > 0.5
amax_np = np.max(np.abs(x_np), axis=axis, keepdims=keepdims)
amax_torch = quant_utils.reduce_amax(x_torch, axis=axis, keepdims=keepdims)
np.testing.assert_array_almost_equal(amax_np, amax_torch.cpu().numpy())
with pytest.raises(ValueError) as excinfo:
quant_utils.reduce_amax(x_torch, axis=(0, 1, 2, 3, 4, 5))
assert "Cannot reduce more axes" in str(excinfo.value)
| TensorRT-master | tools/pytorch-quantization/tests/quant_utils_test.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""tests of QuantLinear module.
Most tests check the functionality of all the combinations in Quant Linear against the corresponding functionalities
in tensor_quant.
"""
import pytest
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
from pytorch_quantization import tensor_quant
from pytorch_quantization.nn.modules.tensor_quantizer import TensorQuantizer
from pytorch_quantization import utils as quant_utils
from pytorch_quantization.nn.modules import quant_linear
import tests.utils as test_utils
# make everything run on the GPU
torch.set_default_tensor_type('torch.cuda.FloatTensor')
np.random.seed(1234)
torch.manual_seed(1234)
# pylint:disable=missing-docstring, no-self-use
class TestQuantLinear():
def test_raise(self):
with pytest.raises(ValueError) as excinfo:
quant_linear_object = quant_linear.QuantLinear(
7, 9, bias=False, quant_desc_weight=tensor_quant.QuantDescriptor(fake_quant=False))
assert "Only fake quantization is supported" in str(excinfo.value)
#Quantizing weight
def test_weight_fake_per_tensor(self):
with torch.cuda.device(0):
size = 256
quant_linear_object = quant_linear.QuantLinear(
size,
size,
bias=False,
quant_desc_weight=tensor_quant.QuantDescriptor(axis=None))
quant_linear_object.input_quantizer.disable()
test_input = torch.randn(size, size)
weight_copy = quant_linear_object.weight.clone()
quant_weight = tensor_quant.fake_tensor_quant(weight_copy, torch.max(torch.abs(weight_copy)))
out1 = F.linear(test_input, quant_weight)
out2 = quant_linear_object(test_input)
np.testing.assert_array_equal(out1.detach().cpu().numpy(), out2.detach().cpu().numpy())
def test_weight_fake_per_channel(self):
size_in = 255
size_out = 257
quant_linear_object = quant_linear.QuantLinear(
size_in, size_out, bias=False,
quant_desc_weight=tensor_quant.QUANT_DESC_8BIT_LINEAR_WEIGHT_PER_ROW)
quant_linear_object.input_quantizer.disable()
test_input = torch.randn(32, size_in)
weight_copy = quant_linear_object.weight.clone()
amax = quant_utils.reduce_amax(weight_copy, axis=1, keepdims=True)
quant_weight = tensor_quant.fake_tensor_quant(weight_copy, amax)
out1 = F.linear(test_input, quant_weight)
out2 = quant_linear_object(test_input)
np.testing.assert_array_equal(out1.detach().cpu().numpy(), out2.detach().cpu().numpy())
# Quantizing activations
def test_test_input_fake_per_tensor(self):
size_in = 255
size_out = 257
quant_linear_object = quant_linear.QuantLinear(
size_in, size_out, bias=False)
quant_linear_object.weight_quantizer.disable()
test_input = torch.randn(32, size_in)
weight_copy = quant_linear_object.weight.clone()
quant_input = tensor_quant.fake_tensor_quant(test_input, torch.max(torch.abs(test_input)))
out1 = F.linear(quant_input, weight_copy)
out2 = quant_linear_object(test_input)
np.testing.assert_array_equal(out1.detach().cpu().numpy(), out2.detach().cpu().numpy())
def test_fake_quant_per_tensor(self):
"""quantize everything, activations will scaled per tensor in ALL cases"""
size_in = 255
size_out = 257
quant_linear_object = quant_linear.QuantLinear(
size_in, size_out, bias=False, quant_desc_weight=tensor_quant.QuantDescriptor())
test_input = torch.randn(32, size_in)
weight_copy = quant_linear_object.weight.clone()
quant_input = tensor_quant.fake_tensor_quant(test_input, torch.max(torch.abs(test_input)))
quant_weight = tensor_quant.fake_tensor_quant(weight_copy, torch.max(torch.abs(weight_copy)))
out1 = F.linear(quant_input, quant_weight)
out2 = quant_linear_object(test_input)
np.testing.assert_array_equal(out1.detach().cpu().numpy(), out2.detach().cpu().numpy())
def test_fake_quant_per_tensor_with_bias(self):
"""quantize everything, activations will scaled per tensor in ALL cases"""
size_in = 255
size_out = 257
quant_linear_object = quant_linear.QuantLinear(
size_in, size_out, bias=False, quant_desc_weight=tensor_quant.QuantDescriptor())
test_input = torch.randn(32, 17, 93, size_in) # Test input other than 2 dimensional
weight_copy = quant_linear_object.weight.clone()
quant_input = tensor_quant.fake_tensor_quant(test_input, torch.max(torch.abs(test_input)))
quant_weight = tensor_quant.fake_tensor_quant(weight_copy, torch.max(torch.abs(weight_copy)))
out1 = F.linear(quant_input, quant_weight, bias=quant_linear_object.bias)
out2 = quant_linear_object(test_input)
np.testing.assert_array_equal(out1.detach().cpu().numpy(), out2.detach().cpu().numpy())
def test_fake_quant_per_channel(self):
"""quantize everything, activations will scaled per tensor in ALL cases"""
size_in = 255
size_out = 257
quant_linear_object = quant_linear.QuantLinear(size_in, size_out, bias=False,
quant_desc_weight=tensor_quant.QUANT_DESC_8BIT_LINEAR_WEIGHT_PER_ROW)
test_input = torch.randn(32, size_in)
weight_copy = quant_linear_object.weight.clone()
quant_input = tensor_quant.fake_tensor_quant(test_input, torch.max(torch.abs(test_input)))
quant_weight = tensor_quant.fake_tensor_quant(weight_copy,
torch.max(torch.abs(weight_copy), dim=1, keepdim=True)[0])
out1 = F.linear(quant_input, quant_weight)
out2 = quant_linear_object(test_input)
np.testing.assert_array_equal(out1.detach().cpu().numpy(), out2.detach().cpu().numpy())
def test_fake_quant_per_channel_other_precs(self):
"""Test some precisions other than 8bit."""
size_in = 255
size_out = 257
quant_desc_input = tensor_quant.QuantDescriptor(num_bits=4)
quant_desc_weight = tensor_quant.QuantDescriptor(num_bits=3)
quant_linear_object = quant_linear.QuantLinear(
size_in,
size_out,
bias=False,
quant_desc_input=quant_desc_input,
quant_desc_weight=quant_desc_weight)
weight_quantizer = TensorQuantizer(quant_desc_weight)
test_input_quantizer = TensorQuantizer(quant_desc_input)
test_input = torch.randn(32, size_in)
weight_copy = quant_linear_object.weight.clone()
quant_input = test_input_quantizer(test_input)
quant_weight = weight_quantizer(weight_copy)
out1 = F.linear(quant_input, quant_weight)
out2 = quant_linear_object(test_input)
np.testing.assert_array_equal(out1.detach().cpu().numpy(), out2.detach().cpu().numpy())
def test_fake_quant_against_unquantized(self):
"""
Quantized Linear should introduce bounded error compare to Linear
"""
size_in = 255
size_out = 257
test_input = torch.randn(32, size_in).cuda()
torch.manual_seed(1234)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(1234)
quant_linear_layer = quant_linear.QuantLinear(
size_in,
size_out,
bias=True,
quant_desc_input=tensor_quant.QuantDescriptor(num_bits=16),
quant_desc_weight=tensor_quant.QuantDescriptor(num_bits=16, axis=0))
# Reset seed. Make sure weight and bias are the same
torch.manual_seed(1234)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(1234)
linear_layer = nn.Linear(size_in, size_out, bias=True)
quant_out_features = quant_linear_layer(test_input)
out_features = linear_layer(test_input)
# The difference between Linear and QuantLinear should be bounded in a range
# Small values which become 0 after quantization lead to large relative errors. rtol and atol could be
# much smaller without those values
np.testing.assert_allclose(
quant_out_features.detach().cpu().numpy(), out_features.detach().cpu().numpy(), rtol=0.01, atol=1e-4)
def test_set_default_quant_desc(self):
quant_linear_layer = quant_linear.QuantLinear(32, 257)
assert quant_linear_layer.input_quantizer.axis == None
assert quant_linear_layer.weight_quantizer.axis == (0)
# set default to a different one
quant_desc_input = tensor_quant.QuantDescriptor(num_bits=11)
quant_desc_weight = tensor_quant.QuantDescriptor(num_bits=13, axis=1)
quant_linear.Linear.set_default_quant_desc_input(quant_desc_input)
quant_linear.Linear.set_default_quant_desc_weight(quant_desc_weight)
# Create one with default descriptor
quant_linear_layer = quant_linear.QuantLinear(32, 257)
# Check quant_desc in quantizer created with default descriptor
assert quant_linear_layer.input_quantizer.num_bits == quant_desc_input.num_bits
assert quant_linear_layer.weight_quantizer.axis == quant_desc_weight.axis
def test_unused_kwargs(self):
with pytest.raises(TypeError, match="Unused keys"):
quant_linear_layer = quant_linear.QuantLinear(32, 257, descriptor='oops')
| TensorRT-master | tools/pytorch-quantization/tests/quant_linear_test.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Tests of Quant Module Replacement"""
import pytest
import numpy as np
import torch
from pytorch_quantization import nn as quant_nn
from pytorch_quantization import quant_modules
from pytorch_quantization.quant_modules import QuantModuleReplacementHelper
import tests.utils as test_utils
from tests.fixtures import verbose
# pylint:disable=missing-docstring, no-self-use
class TestQuantModuleReplace():
def test_simple_default_args(self):
replacement_helper = QuantModuleReplacementHelper()
replacement_helper.prepare_state()
replacement_helper.apply_quant_modules()
# Linear module should not be replaced with its quantized version
assert(type(quant_nn.QuantLinear(16, 256, 3)) == type(torch.nn.Linear(16, 256, 3)))
assert(type(quant_nn.QuantConv2d(16, 256, 3)) == type(torch.nn.Conv2d(16, 256, 3)))
replacement_helper.restore_float_modules()
def test_with_no_replace_list(self):
no_replace_list = ["Linear"]
custom_quant_modules = None
replacement_helper = QuantModuleReplacementHelper()
replacement_helper.prepare_state(no_replace_list, custom_quant_modules)
replacement_helper.apply_quant_modules()
# Linear module should not be replaced with its quantized version
assert(type(quant_nn.QuantLinear(16, 256, 3)) != type(torch.nn.Linear(16, 256, 3)))
assert(type(quant_nn.QuantConv2d(16, 256, 3)) == type(torch.nn.Conv2d(16, 256, 3)))
replacement_helper.restore_float_modules()
def test_with_custom_quant_modules(self):
no_replace_list = ["Linear"]
custom_quant_modules = [(torch.nn, "Linear", quant_nn.QuantLinear)]
replacement_helper = QuantModuleReplacementHelper()
replacement_helper.prepare_state(no_replace_list, custom_quant_modules)
replacement_helper.apply_quant_modules()
# Although no replace list indicates Linear module should not be replaced with its
# quantized version, since the custom_quant_modules still contains the Linear module's
# mapping, it will replaced.
assert(type(quant_nn.QuantLinear(16, 256, 3)) == type(torch.nn.Linear(16, 256, 3)))
assert(type(quant_nn.QuantConv2d(16, 256, 3)) == type(torch.nn.Conv2d(16, 256, 3)))
replacement_helper.restore_float_modules()
def test_initialize_deactivate(self):
no_replace_list = ["Linear"]
custom_quant_modules = [(torch.nn, "Linear", quant_nn.QuantLinear)]
quant_modules.initialize(no_replace_list, custom_quant_modules)
assert(type(quant_nn.QuantLinear(16, 256, 3)) == type(torch.nn.Linear(16, 256, 3)))
assert(type(quant_nn.QuantConv2d(16, 256, 3)) == type(torch.nn.Conv2d(16, 256, 3)))
quant_modules.deactivate()
| TensorRT-master | tools/pytorch-quantization/tests/quant_modules_test.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""tests of QuantConv module.
Test for QuantConvTransposed
"""
import pytest
import numpy as np
import torch
from torch import nn
import torch.nn.functional as F
from pytorch_quantization import tensor_quant
from pytorch_quantization.tensor_quant import QuantDescriptor
from pytorch_quantization.nn.modules.tensor_quantizer import TensorQuantizer
from pytorch_quantization import utils as quant_utils
from pytorch_quantization.nn.modules import quant_conv
import tests.utils as test_utils
# make everything run on the GPU
torch.set_default_tensor_type('torch.cuda.FloatTensor')
torch.backends.cudnn.deterministic = True
np.random.seed(1234)
# pylint:disable=missing-docstring, no-self-use
_NUM_IN_CHANNELS = 13
_NUM_OUT_CHANNELS = 17
class TestQuantConvTranspose2D():
def test_no_quant(self):
kernel_size = 3
quant_conv_object = quant_conv.QuantConvTranspose2d(
_NUM_IN_CHANNELS,
_NUM_OUT_CHANNELS,
kernel_size,
bias=False)
quant_conv_object.input_quantizer.disable()
quant_conv_object.weight_quantizer.disable()
test_input = torch.randn(16, _NUM_IN_CHANNELS, 32, 32)
weight_copy = quant_conv_object.weight.clone()
quant_weight = weight_copy
out1 = F.conv_transpose2d(test_input, quant_weight)
out2 = quant_conv_object(test_input)
np.testing.assert_array_equal(out1.detach().cpu().numpy(), out2.detach().cpu().numpy())
def test_weight_fake_quant_per_tensor(self):
kernel_size = 8
quant_conv_object = quant_conv.QuantConvTranspose2d(
_NUM_IN_CHANNELS,
_NUM_OUT_CHANNELS,
kernel_size,
bias=False,
quant_desc_weight=QuantDescriptor())
quant_conv_object.input_quantizer.disable()
test_input = torch.randn(256, _NUM_IN_CHANNELS, 32, 32)
weight_copy = quant_conv_object.weight.clone()
quant_weight = tensor_quant.fake_tensor_quant(weight_copy, torch.max(torch.abs(weight_copy)))
out1 = F.conv_transpose2d(test_input, quant_weight)
out2 = quant_conv_object(test_input)
np.testing.assert_array_equal(out1.detach().cpu().numpy(), out2.detach().cpu().numpy())
def test_weight_fake_quant_per_channel(self):
kernel_size = 3
quant_conv_object = quant_conv.QuantConvTranspose2d(
_NUM_IN_CHANNELS,
_NUM_OUT_CHANNELS,
kernel_size,
bias=False,
quant_desc_weight=tensor_quant.QUANT_DESC_8BIT_CONVTRANSPOSE2D_WEIGHT_PER_CHANNEL)
quant_conv_object.input_quantizer.disable()
test_input = torch.randn(16, _NUM_IN_CHANNELS, 256, 256)
weight_copy = quant_conv_object.weight.clone()
amax = quant_utils.reduce_amax(weight_copy, axis=(0, 2, 3))
quant_weight = tensor_quant.fake_tensor_quant(weight_copy, amax)
out1 = F.conv_transpose2d(test_input, quant_weight)
out2 = quant_conv_object(test_input)
np.testing.assert_array_equal(out1.detach().cpu().numpy(), out2.detach().cpu().numpy())
def test_fake_quant_input(self):
kernel_size = 3
quant_conv_object = quant_conv.QuantConvTranspose2d(
_NUM_IN_CHANNELS,
_NUM_OUT_CHANNELS,
kernel_size,
bias=False)
quant_conv_object.weight_quantizer.disable()
test_input = torch.randn(20, _NUM_IN_CHANNELS, 50, 50)
quant_input = tensor_quant.fake_tensor_quant(test_input, torch.max(torch.abs(test_input)))
out1 = F.conv_transpose2d(quant_input, quant_conv_object.weight)
out2 = quant_conv_object(test_input)
np.testing.assert_array_equal(out1.detach().cpu().numpy(), out2.detach().cpu().numpy())
def test_fake_quant_per_tensor(self):
kernel_size = 3
quant_conv_object = quant_conv.QuantConvTranspose2d(
_NUM_IN_CHANNELS, _NUM_OUT_CHANNELS, kernel_size, bias=False, quant_desc_weight=QuantDescriptor())
test_input = torch.randn(16, _NUM_IN_CHANNELS, 16, 16)
quant_input = tensor_quant.fake_tensor_quant(test_input, torch.max(torch.abs(test_input)))
weight_copy = quant_conv_object.weight.clone()
quant_weight = tensor_quant.fake_tensor_quant(weight_copy, torch.max(torch.abs(weight_copy)))
out1 = F.conv_transpose2d(quant_input, quant_weight)
out2 = quant_conv_object(test_input)
np.testing.assert_array_equal(out1.detach().cpu().numpy(), out2.detach().cpu().numpy())
def test_fake_quant_per_channel(self):
kernel_size = 3
quant_conv_object = quant_conv.QuantConvTranspose2d(
_NUM_IN_CHANNELS,
_NUM_OUT_CHANNELS,
kernel_size,
bias=False,
quant_desc_weight=QuantDescriptor(axis=(1)))
test_input = torch.randn(16, _NUM_IN_CHANNELS, 16, 16)
quant_input = tensor_quant.fake_tensor_quant(test_input, torch.max(torch.abs(test_input)))
weight_copy = quant_conv_object.weight.clone()
amax = quant_utils.reduce_amax(weight_copy, axis=(0, 2, 3))
quant_weight = tensor_quant.fake_tensor_quant(weight_copy, amax)
out1 = F.conv_transpose2d(quant_input, quant_weight)
out2 = quant_conv_object(test_input)
np.testing.assert_array_equal(out1.detach().cpu().numpy(), out2.detach().cpu().numpy())
def test_fake_quant_per_channel_other_prec(self):
kernel_size = 3
quant_desc_input = QuantDescriptor(num_bits=4)
quant_desc_weight = QuantDescriptor(num_bits=3, axis=(1))
quant_conv_object = quant_conv.QuantConvTranspose2d(
_NUM_IN_CHANNELS,
_NUM_OUT_CHANNELS,
kernel_size,
bias=False,
quant_desc_input=quant_desc_input,
quant_desc_weight=quant_desc_weight)
test_input = torch.randn(16, _NUM_IN_CHANNELS, 16, 16)
test_input_quantizer = TensorQuantizer(quant_desc_input)
weight_quantizer = TensorQuantizer(quant_desc_weight)
quant_input = test_input_quantizer(test_input)
weight_copy = quant_conv_object.weight.clone()
quant_weight = weight_quantizer(weight_copy)
out1 = F.conv_transpose2d(quant_input, quant_weight)
out2 = quant_conv_object(test_input)
np.testing.assert_array_equal(out1.detach().cpu().numpy(), out2.detach().cpu().numpy())
def test_fake_quant_per_channel_bias(self):
kernel_size = 3
quant_conv_object = quant_conv.QuantConvTranspose2d(
_NUM_IN_CHANNELS,
_NUM_OUT_CHANNELS,
kernel_size,
bias=True,
quant_desc_weight=QuantDescriptor(axis=(1)))
test_input = torch.randn(2, _NUM_IN_CHANNELS, 2, 2)
quant_input = tensor_quant.fake_tensor_quant(test_input, torch.max(torch.abs(test_input)))
weight_copy = quant_conv_object.weight.clone()
amax = quant_utils.reduce_amax(weight_copy, axis=(0, 2, 3))
quant_weight = tensor_quant.fake_tensor_quant(weight_copy, amax)
out1 = F.conv_transpose2d(quant_input, quant_weight, bias=quant_conv_object.bias)
out2 = quant_conv_object(test_input)
np.testing.assert_array_equal(out1.detach().cpu().numpy(), out2.detach().cpu().numpy())
def test_against_unquantized(self):
kernel_size = 3
test_input = torch.randn(16, _NUM_IN_CHANNELS, 32, 32).cuda()
torch.manual_seed(1234)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(1234)
fake_quant_conv2d = quant_conv.QuantConvTranspose2d(
_NUM_IN_CHANNELS,
_NUM_OUT_CHANNELS,
kernel_size,
bias=True,
quant_desc_input=QuantDescriptor(num_bits=16),
quant_desc_weight=QuantDescriptor(num_bits=16, axis=(1)))
# Reset seed. Make sure weight and bias are the same
torch.manual_seed(1234)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(1234)
conv2d = nn.ConvTranspose2d(_NUM_IN_CHANNELS, _NUM_OUT_CHANNELS, kernel_size, bias=True)
fake_quant_output = fake_quant_conv2d(test_input)
output = conv2d(test_input)
test_utils.compare(fake_quant_output, output, rtol=1e-5, atol=2e-4)
class TestQuantConvTranspose3D():
def test_no_quant(self):
kernel_size = 3
quant_conv_object = quant_conv.QuantConvTranspose3d(
_NUM_IN_CHANNELS,
_NUM_OUT_CHANNELS,
kernel_size,
bias=False)
quant_conv_object.input_quantizer.disable()
quant_conv_object.weight_quantizer.disable()
test_input = torch.randn(16, _NUM_IN_CHANNELS, 32, 32, 32)
weight_copy = quant_conv_object.weight.clone()
quant_weight = weight_copy
out1 = F.conv_transpose3d(test_input, quant_weight)
out2 = quant_conv_object(test_input)
np.testing.assert_array_equal(out1.detach().cpu().numpy(), out2.detach().cpu().numpy())
def test_fake_quant_per_channel_other_prec(self):
kernel_size = 3
quant_desc_input = QuantDescriptor(num_bits=4)
quant_desc_weight = QuantDescriptor(num_bits=3, axis=(1))
quant_conv_object = quant_conv.QuantConvTranspose3d(
_NUM_IN_CHANNELS,
_NUM_OUT_CHANNELS,
kernel_size,
bias=False,
quant_desc_input=quant_desc_input,
quant_desc_weight=quant_desc_weight)
test_input = torch.randn(16, _NUM_IN_CHANNELS, 16, 16, 16)
test_input_quantizer = TensorQuantizer(quant_desc_input)
weight_quantizer = TensorQuantizer(quant_desc_weight)
quant_input = test_input_quantizer(test_input)
weight_copy = quant_conv_object.weight.clone()
quant_weight = weight_quantizer(weight_copy)
out1 = F.conv_transpose3d(quant_input, quant_weight)
out2 = quant_conv_object(test_input)
np.testing.assert_array_equal(out1.detach().cpu().numpy(), out2.detach().cpu().numpy())
def test_fake_quant_per_channel_bias(self):
kernel_size = 3
quant_conv_object = quant_conv.QuantConvTranspose3d(
_NUM_IN_CHANNELS,
_NUM_OUT_CHANNELS,
kernel_size,
bias=True,
quant_desc_weight=tensor_quant.QUANT_DESC_8BIT_CONVTRANSPOSE3D_WEIGHT_PER_CHANNEL)
test_input = torch.randn(2, _NUM_IN_CHANNELS, 2, 2, 2)
quant_input = tensor_quant.fake_tensor_quant(test_input, torch.max(torch.abs(test_input)))
weight_copy = quant_conv_object.weight.clone()
amax = quant_utils.reduce_amax(weight_copy, axis=(0, 2, 3, 4))
quant_weight = tensor_quant.fake_tensor_quant(weight_copy, amax)
out1 = F.conv_transpose3d(quant_input, quant_weight, bias=quant_conv_object.bias)
out2 = quant_conv_object(test_input)
np.testing.assert_array_equal(out1.detach().cpu().numpy(), out2.detach().cpu().numpy())
def test_against_unquantized(self):
kernel_size = 3
test_input = torch.randn(16, _NUM_IN_CHANNELS, 32, 32, 32).cuda()
torch.manual_seed(1234)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(1234)
fake_quant_conv3d = quant_conv.QuantConvTranspose3d(
_NUM_IN_CHANNELS,
_NUM_OUT_CHANNELS,
kernel_size,
bias=True,
quant_desc_input=QuantDescriptor(num_bits=16),
quant_desc_weight=QuantDescriptor(num_bits=16, axis=(1)))
# Reset seed. Make sure weight and bias are the same
torch.manual_seed(1234)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(1234)
conv3d = nn.ConvTranspose3d(_NUM_IN_CHANNELS, _NUM_OUT_CHANNELS, kernel_size, bias=True)
fake_quant_output = fake_quant_conv3d(test_input)
output = conv3d(test_input)
test_utils.compare(fake_quant_output, output, rtol=1e-5, atol=2e-4)
class TestQuantConvTranspose1D():
def test_no_quant(self):
kernel_size = 3
quant_conv_object = quant_conv.QuantConvTranspose1d(
_NUM_IN_CHANNELS,
_NUM_OUT_CHANNELS,
kernel_size,
bias=False)
quant_conv_object.input_quantizer.disable()
quant_conv_object.weight_quantizer.disable()
test_input = torch.randn(16, _NUM_IN_CHANNELS, 32)
weight_copy = quant_conv_object.weight.clone()
quant_weight = weight_copy
out1 = F.conv_transpose1d(test_input, quant_weight)
out2 = quant_conv_object(test_input)
np.testing.assert_array_equal(out1.detach().cpu().numpy(), out2.detach().cpu().numpy())
def test_weight_fake_quant_per_tensor(self):
kernel_size = 8
quant_conv_object = quant_conv.QuantConvTranspose1d(
_NUM_IN_CHANNELS,
_NUM_OUT_CHANNELS,
kernel_size,
bias=False,
quant_desc_weight=QuantDescriptor())
quant_conv_object.input_quantizer.disable()
test_input = torch.randn(256, _NUM_IN_CHANNELS, 32)
weight_copy = quant_conv_object.weight.clone()
quant_weight = tensor_quant.fake_tensor_quant(weight_copy, torch.max(torch.abs(weight_copy)))
out1 = F.conv_transpose1d(test_input, quant_weight)
out2 = quant_conv_object(test_input)
np.testing.assert_array_equal(out1.detach().cpu().numpy(), out2.detach().cpu().numpy())
def test_weight_fake_quant_per_channel(self):
kernel_size = 3
quant_conv_object = quant_conv.QuantConvTranspose1d(
_NUM_IN_CHANNELS,
_NUM_OUT_CHANNELS,
kernel_size,
bias=False,
quant_desc_weight=tensor_quant.QUANT_DESC_8BIT_CONVTRANSPOSE1D_WEIGHT_PER_CHANNEL)
quant_conv_object.input_quantizer.disable()
test_input = torch.randn(16, _NUM_IN_CHANNELS, 256)
weight_copy = quant_conv_object.weight.clone()
amax = quant_utils.reduce_amax(weight_copy, axis=(0, 2))
quant_weight = tensor_quant.fake_tensor_quant(weight_copy, amax)
out1 = F.conv_transpose1d(test_input, quant_weight)
out2 = quant_conv_object(test_input)
np.testing.assert_array_equal(out1.detach().cpu().numpy(), out2.detach().cpu().numpy())
def test_fake_quant_input(self):
kernel_size = 3
quant_conv_object = quant_conv.QuantConvTranspose1d(
_NUM_IN_CHANNELS,
_NUM_OUT_CHANNELS,
kernel_size,
bias=False)
quant_conv_object.weight_quantizer.disable()
test_input = torch.randn(20, _NUM_IN_CHANNELS, 50)
quant_input = tensor_quant.fake_tensor_quant(test_input, torch.max(torch.abs(test_input)))
out1 = F.conv_transpose1d(quant_input, quant_conv_object.weight)
out2 = quant_conv_object(test_input)
np.testing.assert_array_equal(out1.detach().cpu().numpy(), out2.detach().cpu().numpy())
def test_fake_quant_per_tensor(self):
kernel_size = 3
quant_conv_object = quant_conv.QuantConvTranspose1d(
_NUM_IN_CHANNELS, _NUM_OUT_CHANNELS, kernel_size, bias=False, quant_desc_weight=QuantDescriptor())
test_input = torch.randn(16, _NUM_IN_CHANNELS, 16)
quant_input = tensor_quant.fake_tensor_quant(test_input, torch.max(torch.abs(test_input)))
weight_copy = quant_conv_object.weight.clone()
quant_weight = tensor_quant.fake_tensor_quant(weight_copy, torch.max(torch.abs(weight_copy)))
out1 = F.conv_transpose1d(quant_input, quant_weight)
out2 = quant_conv_object(test_input)
np.testing.assert_array_equal(out1.detach().cpu().numpy(), out2.detach().cpu().numpy())
def test_fake_quant_per_channel(self):
kernel_size = 3
quant_conv_object = quant_conv.QuantConvTranspose1d(
_NUM_IN_CHANNELS,
_NUM_OUT_CHANNELS,
kernel_size,
bias=False,
quant_desc_weight=QuantDescriptor(axis=(1)))
test_input = torch.randn(16, _NUM_IN_CHANNELS, 16)
quant_input = tensor_quant.fake_tensor_quant(test_input, torch.max(torch.abs(test_input)))
weight_copy = quant_conv_object.weight.clone()
amax = quant_utils.reduce_amax(weight_copy, axis=(0, 2))
quant_weight = tensor_quant.fake_tensor_quant(weight_copy, amax)
out1 = F.conv_transpose1d(quant_input, quant_weight)
out2 = quant_conv_object(test_input)
np.testing.assert_array_equal(out1.detach().cpu().numpy(), out2.detach().cpu().numpy())
def test_fake_quant_per_channel_other_prec(self):
kernel_size = 3
quant_desc_input = QuantDescriptor(num_bits=4)
quant_desc_weight = QuantDescriptor(num_bits=3, axis=(1))
quant_conv_object = quant_conv.QuantConvTranspose1d(
_NUM_IN_CHANNELS,
_NUM_OUT_CHANNELS,
kernel_size,
bias=False,
quant_desc_input=quant_desc_input,
quant_desc_weight=quant_desc_weight)
test_input = torch.randn(16, _NUM_IN_CHANNELS, 16)
test_input_quantizer = TensorQuantizer(quant_desc_input)
weight_quantizer = TensorQuantizer(quant_desc_weight)
quant_input = test_input_quantizer(test_input)
weight_copy = quant_conv_object.weight.clone()
quant_weight = weight_quantizer(weight_copy)
out1 = F.conv_transpose1d(quant_input, quant_weight)
out2 = quant_conv_object(test_input)
np.testing.assert_array_equal(out1.detach().cpu().numpy(), out2.detach().cpu().numpy())
def test_fake_quant_per_channel_bias(self):
kernel_size = 3
quant_conv_object = quant_conv.QuantConvTranspose1d(
_NUM_IN_CHANNELS,
_NUM_OUT_CHANNELS,
kernel_size,
bias=True,
quant_desc_weight=QuantDescriptor(axis=(1)))
test_input = torch.randn(2, _NUM_IN_CHANNELS, 2)
quant_input = tensor_quant.fake_tensor_quant(test_input, torch.max(torch.abs(test_input)))
weight_copy = quant_conv_object.weight.clone()
amax = quant_utils.reduce_amax(weight_copy, axis=(0, 2))
quant_weight = tensor_quant.fake_tensor_quant(weight_copy, amax)
out1 = F.conv_transpose1d(quant_input, quant_weight, bias=quant_conv_object.bias)
out2 = quant_conv_object(test_input)
np.testing.assert_array_equal(out1.detach().cpu().numpy(), out2.detach().cpu().numpy())
def test_against_unquantized(self):
kernel_size = 3
test_input = torch.randn(16, _NUM_IN_CHANNELS, 24).cuda()
torch.manual_seed(1234)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(1234)
fake_quant_conv1d = quant_conv.QuantConvTranspose1d(
_NUM_IN_CHANNELS,
_NUM_OUT_CHANNELS,
kernel_size,
bias=True,
quant_desc_input=QuantDescriptor(num_bits=16),
quant_desc_weight=QuantDescriptor(num_bits=16, axis=(1)))
# Reset seed. Make sure weight and bias are the same
torch.manual_seed(1234)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(1234)
conv1d = nn.ConvTranspose1d(_NUM_IN_CHANNELS, _NUM_OUT_CHANNELS, kernel_size, bias=True)
fake_quant_output = fake_quant_conv1d(test_input)
output = conv1d(test_input)
test_utils.compare(fake_quant_output, output, rtol=1e-5, atol=1e-4)
| TensorRT-master | tools/pytorch-quantization/tests/quant_conv_transposed_test.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""tests of integrating Quant layers into a network"""
import pytest
import numpy as np
import torch
import torch.nn.functional as F
import torch.optim as optim
from apex.amp import _amp_state
from pytorch_quantization import tensor_quant
from pytorch_quantization import quant_modules
from pytorch_quantization import nn as quant_nn
from pytorch_quantization.tensor_quant import QuantDescriptor
from tests.fixtures.models import LeNet, QuantLeNet
from tests.fixtures import verbose
np.random.seed(12345) # seed 1234 causes 1 number mismatch at 6th decimal in one of the tests
# make everything run on the GPU
torch.set_default_tensor_type('torch.cuda.FloatTensor')
# pylint:disable=missing-docstring, no-self-use
class TestNetwork():
"""test basic operations of quantized network"""
def test_simple_build(self):
"""test instantiation"""
quant_model = QuantLeNet(quant_desc_input=QuantDescriptor(), quant_desc_weight=QuantDescriptor())
for name, module in quant_model.named_modules():
if "quantizer" in name:
module.disable()
input_desc = tensor_quant.QUANT_DESC_8BIT_PER_TENSOR
weight_desc = tensor_quant.QUANT_DESC_8BIT_PER_TENSOR
quant_model = QuantLeNet(quant_desc_input=input_desc, quant_desc_weight=weight_desc)
input_desc = QuantDescriptor(amax=6.)
weight_desc = QuantDescriptor(amax=1.)
quant_model = QuantLeNet(quant_desc_input=input_desc, quant_desc_weight=weight_desc)
def test_forward(self):
"""test forward pass with random data"""
input_desc = tensor_quant.QUANT_DESC_8BIT_PER_TENSOR
weight_desc = tensor_quant.QUANT_DESC_8BIT_PER_TENSOR
quant_model = QuantLeNet(quant_desc_input=input_desc, quant_desc_weight=weight_desc)
output = quant_model(torch.empty(16, 1, 28, 28))
def test_backward(self):
"""test one iteration with random data and labels"""
input_desc = tensor_quant.QUANT_DESC_8BIT_PER_TENSOR
weight_desc = tensor_quant.QUANT_DESC_8BIT_PER_TENSOR
quant_model = QuantLeNet(quant_desc_input=input_desc, quant_desc_weight=weight_desc)
optimizer = optim.SGD(quant_model.parameters(), lr=0.01)
optimizer.zero_grad()
output = quant_model(torch.empty(16, 1, 28, 28))
loss = F.nll_loss(output, torch.randint(10, (16,), dtype=torch.int64))
loss.backward()
optimizer.step()
def test_apex_amp_fp16(self):
"""test one iteration with random data and labels"""
try:
from apex import amp
except ImportError:
pytest.skip("AMP is not available.")
input_desc = tensor_quant.QUANT_DESC_8BIT_PER_TENSOR
weight_desc = tensor_quant.QUANT_DESC_8BIT_PER_TENSOR
model = QuantLeNet(quant_desc_input=input_desc, quant_desc_weight=weight_desc)
optimizer = optim.SGD(model.parameters(), lr=0.01)
model, optimizer = amp.initialize(model, optimizer, opt_level="O1")
optimizer.zero_grad()
output = model(torch.empty(16, 1, 28, 28))
loss = F.nll_loss(output, torch.randint(10, (16,), dtype=torch.int64))
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
optimizer.step()
assert loss.dtype == torch.float32
_amp_state.handle._deactivate()
def test_native_amp_fp16(self):
"""test one iteration with random data and labels"""
input_desc = tensor_quant.QUANT_DESC_8BIT_PER_TENSOR
weight_desc = tensor_quant.QUANT_DESC_8BIT_PER_TENSOR
model = QuantLeNet(quant_desc_input=input_desc, quant_desc_weight=weight_desc)
optimizer = optim.SGD(model.parameters(), lr=0.01)
optimizer.zero_grad()
with torch.cuda.amp.autocast():
output = model(torch.empty(16, 1, 28, 28))
loss = F.nll_loss(output, torch.randint(10, (16,), dtype=torch.int64))
loss.backward()
optimizer.step()
assert loss.dtype == torch.float32
def test_asp(self):
"""test Sparsity (ASP) and QAT toolkits together"""
try:
from apex.contrib.sparsity import ASP
except ImportError:
pytest.skip("ASP is not available.")
quant_modules.initialize()
model = LeNet()
quant_modules.deactivate()
optimizer = optim.SGD(model.parameters(), lr=0.01)
ASP.init_model_for_pruning(
model,
mask_calculator="m4n2_1d",
verbosity=2,
whitelist=[torch.nn.Linear, torch.nn.Conv2d, torch.nn.Conv3d, quant_nn.modules.quant_linear.QuantLinear],
allow_recompute_mask=False,
custom_layer_dict={
quant_nn.QuantConv1d: ['weight'],
quant_nn.QuantConv2d: ['weight'],
quant_nn.QuantConv3d: ['weight'],
quant_nn.QuantConvTranspose1d: ['weight'],
quant_nn.QuantConvTranspose2d: ['weight'],
quant_nn.QuantConvTranspose3d: ['weight'],
quant_nn.QuantLinear: ['weight']
})
ASP.init_optimizer_for_pruning(optimizer)
ASP.compute_sparse_masks()
model = model.to('cuda')
output = model(torch.empty(16, 1, 28, 28).to('cuda'))
optimizer.zero_grad()
loss = F.nll_loss(output, torch.randint(10, (16,), dtype=torch.int64))
loss.backward()
optimizer.step()
def test_quant_module_replacement(self):
"""test monkey patching of modules with their quantized versions"""
lenet = LeNet()
qlenet = QuantLeNet()
mod_list = [type(mod) for name, mod in lenet.named_modules()]
mod_list = mod_list[1:]
qmod_list = [type(mod) for name, mod in qlenet.named_modules()]
qmod_list = qmod_list[1:]
# Before any monkey patching, the networks should be different
assert(mod_list != qmod_list)
# Monkey patch the modules
no_replace_list = ["Linear"]
custom_quant_modules = [(torch.nn, "Linear", quant_nn.QuantLinear)]
quant_modules.initialize(no_replace_list, custom_quant_modules)
lenet = LeNet()
qlenet = QuantLeNet()
mod_list = [type(mod) for name, mod in lenet.named_modules()]
mod_list = mod_list[1:]
qmod_list = [type(mod) for name, mod in qlenet.named_modules()]
qmod_list = qmod_list[1:]
# After monkey patching, the networks should be same
assert(mod_list == qmod_list)
# Reverse monkey patching
quant_modules.deactivate()
lenet = LeNet()
qlenet = QuantLeNet()
mod_list = [type(mod) for name, mod in lenet.named_modules()]
mod_list = mod_list[1:]
qmod_list = [type(mod) for name, mod in qlenet.named_modules()]
qmod_list = qmod_list[1:]
# After reversing monkey patching, the networks should again be different
assert(mod_list != qmod_list)
def test_calibration(self):
quant_model = QuantLeNet(quant_desc_input=QuantDescriptor(), quant_desc_weight=QuantDescriptor()).cuda()
for name, module in quant_model.named_modules():
if name.endswith("_quantizer"):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
print(F"{name:40}: {module}")
quant_model(torch.rand(16, 1, 224, 224, device="cuda"))
# Load calib result and disable calibration
for name, module in quant_model.named_modules():
if name.endswith("_quantizer"):
if module._calibrator is not None:
module.load_calib_amax()
module.enable_quant()
module.disable_calib()
else:
module.enable()
quant_model.cuda()
| TensorRT-master | tools/pytorch-quantization/tests/integration_test.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""test for str and repr
Make sure things can print and in a nice form. Put all the print tests together so that running this test file alone
can inspect all the print messages in the project
"""
import torch
from torch import nn
from pytorch_quantization import calib
from pytorch_quantization import tensor_quant
from pytorch_quantization import nn as quant_nn
from pytorch_quantization.nn.modules.tensor_quantizer import TensorQuantizer
# pylint:disable=missing-docstring, no-self-use
class TestPrint():
def test_print_descriptor(self):
test_desc = tensor_quant.QUANT_DESC_8BIT_CONV2D_WEIGHT_PER_CHANNEL
print(test_desc)
def test_print_tensor_quantizer(self):
test_quantizer = TensorQuantizer()
print(test_quantizer)
def test_print_module(self):
class _TestModule(nn.Module):
def __init__(self):
super(_TestModule, self).__init__()
self.conv = nn.Conv2d(33, 65, 3)
self.quant_conv = quant_nn.Conv2d(33, 65, 3)
self.linear = nn.Linear(33, 65)
self.quant_linear = quant_nn.Linear(33, 65)
test_module = _TestModule()
print(test_module)
def test_print_calibrator(self):
print(calib.MaxCalibrator(7, 1, False))
hist_calibrator = calib.HistogramCalibrator(8, None, True)
hist_calibrator.collect(torch.rand(10))
print(hist_calibrator)
| TensorRT-master | tools/pytorch-quantization/tests/print_test.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""tests of QuantConv module.
Mose tests check the functionality of all the combinations in Quant conv against the corresponding functionalities in
tensor_quant. There are tests for all the three QuantConv1D, QuantConv2D, and QuantConv3D
"""
import pytest
import numpy as np
import torch
from torch import nn
import torch.nn.functional as F
from pytorch_quantization import tensor_quant
from pytorch_quantization.tensor_quant import QuantDescriptor
from pytorch_quantization.nn.modules.tensor_quantizer import TensorQuantizer
from pytorch_quantization import utils as quant_utils
from pytorch_quantization.nn.modules import quant_conv
import tests.utils as test_utils
# make everything run on the GPU
torch.set_default_tensor_type('torch.cuda.FloatTensor')
torch.backends.cudnn.deterministic = True
np.random.seed(1234)
# pylint:disable=missing-docstring, no-self-use
_NUM_IN_CHANNELS = 13
_NUM_OUT_CHANNELS = 17
class TestQuantConv2D():
#Quantizing weight
def test_no_quant(self):
kernel_size = 3
quant_conv_object = quant_conv.QuantConv2d(
_NUM_IN_CHANNELS,
_NUM_OUT_CHANNELS,
kernel_size,
bias=False)
quant_conv_object.input_quantizer.disable()
quant_conv_object.weight_quantizer.disable()
test_input = torch.randn(16, _NUM_IN_CHANNELS, 256, 256)
weight_copy = quant_conv_object.weight.clone()
quant_weight = weight_copy
out1 = F.conv2d(test_input, quant_weight)
out2 = quant_conv_object(test_input)
np.testing.assert_array_equal(out1.detach().cpu().numpy(), out2.detach().cpu().numpy())
def test_weight_fake_quant_per_tensor(self):
kernel_size = 3
quant_conv_object = quant_conv.QuantConv2d(
_NUM_IN_CHANNELS,
_NUM_OUT_CHANNELS,
kernel_size,
bias=False,
quant_desc_weight=QuantDescriptor())
quant_conv_object.input_quantizer.disable()
test_input = torch.randn(16, _NUM_IN_CHANNELS, 256, 256)
weight_copy = quant_conv_object.weight.clone()
quant_weight = tensor_quant.fake_tensor_quant(weight_copy, torch.max(torch.abs(weight_copy)))
out1 = F.conv2d(test_input, quant_weight)
out2 = quant_conv_object(test_input)
np.testing.assert_array_equal(out1.detach().cpu().numpy(), out2.detach().cpu().numpy())
def test_weight_fake_quant_per_channel(self):
kernel_size = 3
quant_conv_object = quant_conv.QuantConv2d(
_NUM_IN_CHANNELS,
_NUM_OUT_CHANNELS,
kernel_size,
bias=False,
quant_desc_weight=tensor_quant.QUANT_DESC_8BIT_CONV2D_WEIGHT_PER_CHANNEL)
quant_conv_object.input_quantizer.disable()
test_input = torch.randn(16, _NUM_IN_CHANNELS, 256, 256)
weight_copy = quant_conv_object.weight.clone()
quant_weight = tensor_quant.fake_tensor_quant(
weight_copy,
torch.max(torch.abs(weight_copy).view(_NUM_OUT_CHANNELS, -1), dim=1, keepdim=True)[0].view(
_NUM_OUT_CHANNELS, 1, 1, 1))
out1 = F.conv2d(test_input, quant_weight)
out2 = quant_conv_object(test_input)
np.testing.assert_array_equal(out1.detach().cpu().numpy(), out2.detach().cpu().numpy())
def test_in_feature_fake_quant(self):
kernel_size = 3
quant_conv_object = quant_conv.QuantConv2d(
_NUM_IN_CHANNELS,
_NUM_OUT_CHANNELS,
kernel_size,
bias=False)
quant_conv_object.weight_quantizer.disable()
test_input = torch.randn(16, _NUM_IN_CHANNELS, 256, 256)
quant_input = tensor_quant.fake_tensor_quant(test_input, torch.max(torch.abs(test_input)))
out1 = F.conv2d(quant_input, quant_conv_object.weight)
out2 = quant_conv_object(test_input)
np.testing.assert_array_equal(out1.detach().cpu().numpy(), out2.detach().cpu().numpy())
def test_fake_quant_per_tensor(self):
kernel_size = 3
quant_conv_object = quant_conv.QuantConv2d(
_NUM_IN_CHANNELS, _NUM_OUT_CHANNELS, kernel_size, bias=False, quant_desc_weight=QuantDescriptor())
test_input = torch.randn(16, _NUM_IN_CHANNELS, 16, 16)
quant_input = tensor_quant.fake_tensor_quant(test_input, torch.max(torch.abs(test_input)))
weight_copy = quant_conv_object.weight.clone()
quant_weight = tensor_quant.fake_tensor_quant(weight_copy, torch.max(torch.abs(weight_copy)))
out1 = F.conv2d(quant_input, quant_weight)
out2 = quant_conv_object(test_input)
np.testing.assert_array_equal(out1.detach().cpu().numpy(), out2.detach().cpu().numpy())
def test_fake_quant_per_channel(self):
kernel_size = 3
quant_conv_object = quant_conv.QuantConv2d(_NUM_IN_CHANNELS, _NUM_OUT_CHANNELS, kernel_size, bias=False,
quant_desc_weight=tensor_quant.QUANT_DESC_8BIT_CONV2D_WEIGHT_PER_CHANNEL)
test_input = torch.randn(16, _NUM_IN_CHANNELS, 16, 16)
quant_input = tensor_quant.fake_tensor_quant(test_input, torch.max(torch.abs(test_input)))
weight_copy = quant_conv_object.weight.clone()
quant_weight = tensor_quant.fake_tensor_quant(
weight_copy,
torch.max(torch.abs(weight_copy).view(_NUM_OUT_CHANNELS, -1), dim=1, keepdim=True)[0].view(
_NUM_OUT_CHANNELS, 1, 1, 1))
out1 = F.conv2d(quant_input, quant_weight)
out2 = quant_conv_object(test_input)
np.testing.assert_array_equal(out1.detach().cpu().numpy(), out2.detach().cpu().numpy())
def test_fake_quant_per_channel_other_prec(self):
kernel_size = 3
quant_desc_input = QuantDescriptor(num_bits=4)
quant_desc_weight = QuantDescriptor(num_bits=3)
quant_conv_object = quant_conv.QuantConv2d(
_NUM_IN_CHANNELS,
_NUM_OUT_CHANNELS,
kernel_size,
bias=False,
quant_desc_input=quant_desc_input,
quant_desc_weight=quant_desc_weight)
test_input = torch.randn(16, _NUM_IN_CHANNELS, 16, 16)
test_input_quantizer = TensorQuantizer(quant_desc_input)
weight_quantizer = TensorQuantizer(quant_desc_weight)
quant_input = test_input_quantizer(test_input)
weight_copy = quant_conv_object.weight.clone()
quant_weight = weight_quantizer(weight_copy)
out1 = F.conv2d(quant_input, quant_weight)
out2 = quant_conv_object(test_input)
np.testing.assert_array_equal(out1.detach().cpu().numpy(), out2.detach().cpu().numpy())
def test_fake_quant_per_channel_bias(self):
kernel_size = 3
quant_conv_object = quant_conv.QuantConv2d(_NUM_IN_CHANNELS, _NUM_OUT_CHANNELS, kernel_size, bias=True,
quant_desc_weight=tensor_quant.QUANT_DESC_8BIT_CONV2D_WEIGHT_PER_CHANNEL)
test_input = torch.randn(16, _NUM_IN_CHANNELS, 16, 16)
quant_input = tensor_quant.fake_tensor_quant(test_input, torch.max(torch.abs(test_input)))
weight_copy = quant_conv_object.weight.clone()
quant_weight = tensor_quant.fake_tensor_quant(
weight_copy,
torch.max(torch.abs(weight_copy).view(_NUM_OUT_CHANNELS, -1), dim=1, keepdim=True)[0].view(
_NUM_OUT_CHANNELS, 1, 1, 1))
out1 = F.conv2d(quant_input, quant_weight, bias=quant_conv_object.bias)
out2 = quant_conv_object(test_input)
np.testing.assert_array_equal(out1.detach().cpu().numpy(), out2.detach().cpu().numpy())
def test_against_unquantized(self):
kernel_size = 3
test_input = torch.randn(16, _NUM_IN_CHANNELS, 24, 24).cuda()
torch.manual_seed(12345)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(12345)
fake_quant_conv2d = quant_conv.QuantConv2d(
_NUM_IN_CHANNELS,
_NUM_OUT_CHANNELS,
kernel_size,
bias=True,
quant_desc_input=QuantDescriptor(num_bits=16),
quant_desc_weight=QuantDescriptor(num_bits=16, axis=(0)))
# Reset seed. Make sure weight and bias are the same
torch.manual_seed(12345)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(12345)
conv2d = nn.Conv2d(_NUM_IN_CHANNELS, _NUM_OUT_CHANNELS, kernel_size, bias=True)
fake_quant_output = fake_quant_conv2d(test_input)
output = conv2d(test_input)
test_utils.compare(fake_quant_output, output, rtol=1e-6, atol=1.5e-4)
def test_set_default_quant_desc(self):
quant_conv_layer = quant_conv.Conv2d(32, 257, 3)
assert quant_conv_layer.input_quantizer._axis == None
assert quant_conv_layer.weight_quantizer._axis == (0)
# set default to a different one
quant_desc_input = QuantDescriptor(num_bits=11)
quant_desc_weight = QuantDescriptor(num_bits=13, axis=(1))
quant_conv.QuantConv2d.set_default_quant_desc_input(quant_desc_input)
quant_conv.QuantConv2d.set_default_quant_desc_weight(quant_desc_weight)
# Create one with default descriptor
quant_conv_layer = quant_conv.Conv2d(32, 257, 3)
# Check quant_desc in quantizer created with default descriptor
assert quant_conv_layer.input_quantizer._num_bits == quant_desc_input.num_bits
assert quant_conv_layer.weight_quantizer._axis == quant_desc_weight.axis
# Test default is per class
quant_conv_layer = quant_conv.Conv3d(31, 255, 5)
assert quant_conv_layer.input_quantizer._num_bits != quant_desc_input.num_bits
assert quant_conv_layer.weight_quantizer._axis != quant_desc_weight.axis
# Reset default
quant_conv.QuantConv2d.set_default_quant_desc_input(QuantDescriptor())
quant_conv.QuantConv2d.set_default_quant_desc_weight(QuantDescriptor(axis=(0)))
def test_unused_kwargs(self):
with pytest.raises(TypeError, match="Unused keys"):
quant_conv.Conv2d(32, 257, 3, descriptor='oops')
class TestQuantConv1D():
def test_no_quant(self):
kernel_size = 8
quant_conv_object = quant_conv.QuantConv1d(
_NUM_IN_CHANNELS,
_NUM_OUT_CHANNELS,
kernel_size,
bias=False)
quant_conv_object.input_quantizer.disable()
quant_conv_object.weight_quantizer.disable()
test_input = torch.randn(16, _NUM_IN_CHANNELS, 256)
weight_copy = quant_conv_object.weight.clone()
quant_weight = weight_copy
out1 = F.conv1d(test_input, quant_weight)
out2 = quant_conv_object(test_input)
np.testing.assert_array_equal(out1.detach().cpu().numpy(), out2.detach().cpu().numpy())
def test_weight_fake_quant_per_tensor(self):
kernel_size = 8
quant_conv_object = quant_conv.QuantConv1d(
_NUM_IN_CHANNELS,
_NUM_OUT_CHANNELS,
kernel_size,
bias=False,
quant_desc_weight=QuantDescriptor())
quant_conv_object.input_quantizer.disable()
test_input = torch.randn(16, _NUM_IN_CHANNELS, 256)
weight_copy = quant_conv_object.weight.clone()
quant_weight = tensor_quant.fake_tensor_quant(weight_copy, torch.max(torch.abs(weight_copy)))
out1 = F.conv1d(test_input, quant_weight)
out2 = quant_conv_object(test_input)
np.testing.assert_array_equal(out1.detach().cpu().numpy(), out2.detach().cpu().numpy())
def test_weight_fake_quant_per_channel(self):
kernel_size = 3
quant_conv_object = quant_conv.QuantConv1d(
_NUM_IN_CHANNELS,
_NUM_OUT_CHANNELS,
kernel_size,
bias=False,
quant_desc_weight=QuantDescriptor(axis=(0)))
quant_conv_object.input_quantizer.disable()
test_input = torch.randn(16, _NUM_IN_CHANNELS, 256)
weight_copy = quant_conv_object.weight.clone()
amax = quant_utils.reduce_amax(weight_copy, axis=(1, 2))
quant_weight = tensor_quant.fake_tensor_quant(weight_copy, amax)
out1 = F.conv1d(test_input, quant_weight)
out2 = quant_conv_object(test_input)
np.testing.assert_array_equal(out1.detach().cpu().numpy(), out2.detach().cpu().numpy())
def test_fake_quant_input(self):
kernel_size = 3
quant_conv_object = quant_conv.QuantConv1d(
_NUM_IN_CHANNELS,
_NUM_OUT_CHANNELS,
kernel_size,
bias=False)
quant_conv_object.weight_quantizer.disable()
test_input = torch.randn(20, _NUM_IN_CHANNELS, 50)
quant_input = tensor_quant.fake_tensor_quant(test_input, torch.max(torch.abs(test_input)))
out1 = F.conv1d(quant_input, quant_conv_object.weight)
out2 = quant_conv_object(test_input)
np.testing.assert_array_equal(out1.detach().cpu().numpy(), out2.detach().cpu().numpy())
def test_fake_quant_per_tensor(self):
kernel_size = 3
quant_conv_object = quant_conv.QuantConv1d(
_NUM_IN_CHANNELS, _NUM_OUT_CHANNELS, kernel_size, bias=False, quant_desc_weight=QuantDescriptor())
test_input = torch.randn(16, _NUM_IN_CHANNELS, 16)
quant_input = tensor_quant.fake_tensor_quant(test_input, torch.max(torch.abs(test_input)))
weight_copy = quant_conv_object.weight.clone()
quant_weight = tensor_quant.fake_tensor_quant(weight_copy, torch.max(torch.abs(weight_copy)))
out1 = F.conv1d(quant_input, quant_weight)
out2 = quant_conv_object(test_input)
np.testing.assert_array_equal(out1.detach().cpu().numpy(), out2.detach().cpu().numpy())
def test_fake_quant_per_channel(self):
kernel_size = 3
quant_conv_object = quant_conv.QuantConv1d(
_NUM_IN_CHANNELS,
_NUM_OUT_CHANNELS,
kernel_size,
bias=False,
quant_desc_weight=QuantDescriptor(axis=(0)))
test_input = torch.randn(16, _NUM_IN_CHANNELS, 16)
quant_input = tensor_quant.fake_tensor_quant(test_input, torch.max(torch.abs(test_input)))
weight_copy = quant_conv_object.weight.clone()
quant_weight = tensor_quant.fake_tensor_quant(
weight_copy,
torch.max(torch.abs(weight_copy).view(_NUM_OUT_CHANNELS, -1), dim=1, keepdim=True)[0].view(
_NUM_OUT_CHANNELS, 1, 1))
out1 = F.conv1d(quant_input, quant_weight)
out2 = quant_conv_object(test_input)
np.testing.assert_array_equal(out1.detach().cpu().numpy(), out2.detach().cpu().numpy())
def test_fake_quant_per_channel_other_prec(self):
kernel_size = 3
quant_desc_input = QuantDescriptor(num_bits=4)
quant_desc_weight = QuantDescriptor(num_bits=3, axis=(0))
quant_conv_object = quant_conv.QuantConv1d(
_NUM_IN_CHANNELS,
_NUM_OUT_CHANNELS,
kernel_size,
bias=False,
quant_desc_input=quant_desc_input,
quant_desc_weight=quant_desc_weight)
test_input = torch.randn(16, _NUM_IN_CHANNELS, 16)
test_input_quantizer = TensorQuantizer(quant_desc_input)
weight_quantizer = TensorQuantizer(quant_desc_weight)
quant_input = test_input_quantizer(test_input)
weight_copy = quant_conv_object.weight.clone()
quant_weight = weight_quantizer(weight_copy)
out1 = F.conv1d(quant_input, quant_weight)
out2 = quant_conv_object(test_input)
np.testing.assert_array_equal(out1.detach().cpu().numpy(), out2.detach().cpu().numpy())
def test_fake_quant_per_channel_bias(self):
kernel_size = 3
quant_conv_object = quant_conv.QuantConv1d(
_NUM_IN_CHANNELS,
_NUM_OUT_CHANNELS,
kernel_size,
bias=True,
quant_desc_weight=QuantDescriptor(axis=(0)))
test_input = torch.randn(16, _NUM_IN_CHANNELS, 16)
quant_input = tensor_quant.fake_tensor_quant(test_input, torch.max(torch.abs(test_input)))
weight_copy = quant_conv_object.weight.clone()
quant_weight = tensor_quant.fake_tensor_quant(
weight_copy,
torch.max(torch.abs(weight_copy).view(_NUM_OUT_CHANNELS, -1), dim=1, keepdim=True)[0].view(
_NUM_OUT_CHANNELS, 1, 1))
out1 = F.conv1d(quant_input, quant_weight, bias=quant_conv_object.bias)
out2 = quant_conv_object(test_input)
np.testing.assert_array_equal(out1.detach().cpu().numpy(), out2.detach().cpu().numpy())
def test_against_unquantized(self):
kernel_size = 3
test_input = torch.randn(16, _NUM_IN_CHANNELS, 24).cuda()
torch.manual_seed(12345)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(12345)
fake_quant_conv1d = quant_conv.QuantConv1d(
_NUM_IN_CHANNELS,
_NUM_OUT_CHANNELS,
kernel_size,
bias=True,
quant_desc_input=QuantDescriptor(num_bits=16),
quant_desc_weight=QuantDescriptor(num_bits=16, axis=(0)))
# Reset seed. Make sure weight and bias are the same
torch.manual_seed(12345)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(12345)
conv1d = nn.Conv1d(_NUM_IN_CHANNELS, _NUM_OUT_CHANNELS, kernel_size, bias=True)
fake_quant_output = fake_quant_conv1d(test_input)
output = conv1d(test_input)
test_utils.compare(fake_quant_output, output, rtol=1e-5, atol=1e-4)
class TestQuantConv3D():
#Quantizing weight
def test_no_quant(self):
kernel_size = 8
quant_conv_object = quant_conv.QuantConv3d(
_NUM_IN_CHANNELS,
_NUM_OUT_CHANNELS,
kernel_size,
bias=False)
quant_conv_object.input_quantizer.disable()
quant_conv_object.weight_quantizer.disable()
test_input = torch.randn(16, _NUM_IN_CHANNELS, 8, 8, 8)
weight_copy = quant_conv_object.weight.clone()
quant_weight = weight_copy
out1 = F.conv3d(test_input, quant_weight)
out2 = quant_conv_object(test_input)
np.testing.assert_array_equal(out1.detach().cpu().numpy(), out2.detach().cpu().numpy())
def test_fake_quant_quant_per_channel_other_prec(self):
kernel_size = 3
quant_desc_input = QuantDescriptor(num_bits=4)
quant_desc_weight = QuantDescriptor(num_bits=3, axis=(0))
quant_conv_object = quant_conv.QuantConv3d(
_NUM_IN_CHANNELS,
_NUM_OUT_CHANNELS,
kernel_size,
bias=False,
quant_desc_input=quant_desc_input,
quant_desc_weight=quant_desc_weight)
test_input = torch.randn(16, _NUM_IN_CHANNELS, 8, 8, 8)
test_input_quantizer = TensorQuantizer(quant_desc_input)
weight_quantizer = TensorQuantizer(quant_desc_weight)
quant_input = test_input_quantizer(test_input)
weight_copy = quant_conv_object.weight.clone()
quant_weight = weight_quantizer(weight_copy)
out1 = F.conv3d(quant_input, quant_weight)
out2 = quant_conv_object(test_input)
np.testing.assert_array_equal(out1.detach().cpu().numpy(), out2.detach().cpu().numpy())
def test_fake_quant_quant_per_channel_bias(self):
kernel_size = 3
quant_conv_object = quant_conv.QuantConv3d(
_NUM_IN_CHANNELS,
_NUM_OUT_CHANNELS,
kernel_size,
bias=True,
quant_desc_weight=QuantDescriptor(axis=(0)))
test_input = torch.randn(8, _NUM_IN_CHANNELS, 8, 8, 8)
quant_input = tensor_quant.fake_tensor_quant(test_input, torch.max(torch.abs(test_input)))
weight_copy = quant_conv_object.weight.clone()
quant_weight = tensor_quant.fake_tensor_quant(
weight_copy,
torch.max(torch.abs(weight_copy).view(_NUM_OUT_CHANNELS, -1), dim=1, keepdim=True)[0].view(
_NUM_OUT_CHANNELS, 1, 1, 1, 1))
out1 = F.conv3d(quant_input, quant_weight, bias=quant_conv_object.bias)
out2 = quant_conv_object(test_input)
np.testing.assert_array_equal(out1.detach().cpu().numpy(), out2.detach().cpu().numpy())
def test_against_unquantized(self):
kernel_size = 3
test_input = torch.randn(16, _NUM_IN_CHANNELS, 24, 24, 24).cuda()
torch.manual_seed(1234)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(1234)
fake_quant_conv3d = quant_conv.QuantConv3d(
_NUM_IN_CHANNELS,
_NUM_OUT_CHANNELS,
kernel_size,
bias=True,
quant_desc_input=QuantDescriptor(num_bits=16),
quant_desc_weight=QuantDescriptor(num_bits=16, axis=(0)))
# Reset seed. Make sure weight and bias are the same
torch.manual_seed(1234)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(1234)
conv3d = nn.Conv3d(_NUM_IN_CHANNELS, _NUM_OUT_CHANNELS, kernel_size, bias=True)
fake_quant_output = fake_quant_conv3d(test_input)
output = conv3d(test_input)
test_utils.compare(fake_quant_output, output, rtol=1e-6, atol=2e-4)
| TensorRT-master | tools/pytorch-quantization/tests/quant_conv_test.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Tests of helper functions for quant optimizer"""
import numpy as np
import pytest
import torch.optim as optim
from pytorch_quantization.optim import helper
from pytorch_quantization.tensor_quant import QuantDescriptor
from .fixtures.models import QuantLeNet
from .fixtures.models import resnet18
# pylint:disable=missing-docstring, no-self-use
class TestMatchParameters():
def test_single_key(self, resnet18):
param = helper.match_parameters(resnet18, ['downsample.0.weight'])
assert len(list(param)) == 3
def test_multi_keys(self, resnet18):
param = list(helper.match_parameters(resnet18, ['conv1', 'downsample']))
assert len(param) == 18
def test_regex(self, resnet18):
param = helper.match_parameters(resnet18, ['downsample.*.weight$'])
assert len(list(param)) == 6
param = helper.match_parameters(resnet18, ['downsample.*.wei$'])
assert not list(param)
class TestGroupParameters():
def test_single_key(self, resnet18):
param_groups = helper.group_parameters(resnet18, [['downsample.1.weight']])
assert len(list(param_groups[0]['params'])) == 3
def test_lr_momentum_decay(self, resnet18):
lrs = [0.01, 0.001]
momentums = [0.02, 0.002]
weight_decays = [0.03, 0.003]
param_groups = helper.group_parameters(
resnet18, [['conv1.*weight'], ['downsample.*.weight']], lrs, momentums, weight_decays)
assert param_groups[0]['lr'] == lrs[0]
assert param_groups[1]['lr'] == lrs[1]
assert param_groups[0]['momentum'] == momentums[0]
assert param_groups[1]['momentum'] == momentums[1]
assert param_groups[0]['weight_decay'] == weight_decays[0]
assert param_groups[1]['weight_decay'] == weight_decays[1]
def test_optimizer_feed(self, resnet18):
"""Feed grouped parameters to optimizer, see what happens"""
lrs = [0.01, 0.001]
momentums = [0.02, 0.002]
weight_decays = [0.03, 0.003]
param_groups = helper.group_parameters(
resnet18, [['conv1.*weight'], ['downsample.*.weight']], lrs, momentums, weight_decays)
optimizer = optim.SGD(param_groups)
optimizer.step()
def test_raises(self):
with pytest.raises(TypeError, match="must be list of list of patterns"):
helper.group_parameters(None, [['downsample.1.weight'], 'conv1'])
with pytest.raises(TypeError, match="must match"):
helper.group_parameters(None, [['downsample.1.weight'], ['conv1']], lrs=[0.1])
with pytest.raises(TypeError, match="must match"):
helper.group_parameters(None, [['downsample.1.weight'], ['conv1']], momentums=[0.1])
with pytest.raises(TypeError, match="must match"):
helper.group_parameters(None, [['downsample.1.weight'], ['conv1']], weight_decays=[0.1])
class TestFreezeParameters():
def test_simple(self, resnet18):
helper.freeze_parameters(resnet18, ['downsample.0.weight'])
for name, param in resnet18.named_parameters():
if 'downsample.0.weight' in name:
assert not param.requires_grad
class TestQuantWeightInPlace():
def test_simple(self):
quant_lenet = QuantLeNet(
quant_desc_input=QuantDescriptor(),
quant_desc_weight=QuantDescriptor())
quant_lenet.eval()
helper.quant_weight_inplace(quant_lenet)
| TensorRT-master | tools/pytorch-quantization/tests/optim_helper_test.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Model used for tests"""
import pytest
import torch.nn as nn
import torch.nn.functional as F
from pytorch_quantization.nn import QuantConv2d, QuantLinear
from pytorch_quantization.tensor_quant import QuantDescriptor
class LeNet(nn.Module):
def __init__(self, **kwargs):
super(LeNet, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5, **kwargs)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5, **kwargs)
self.fc1 = nn.Linear(320, 50, **kwargs)
self.fc2 = nn.Linear(50, 10, **kwargs)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2(x), 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x, dim=1)
class QuantLeNet(nn.Module):
def __init__(self, **kwargs):
super(QuantLeNet, self).__init__()
self.conv1 = QuantConv2d(1, 10, kernel_size=5, **kwargs)
self.conv2 = QuantConv2d(10, 20, kernel_size=5, **kwargs)
self.fc1 = QuantLinear(320, 50, **kwargs)
self.fc2 = QuantLinear(50, 10, **kwargs)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2(x), 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x, dim=1)
@pytest.fixture
def resnet18():
import torchvision
return torchvision.models.resnet18()
@pytest.fixture
def quant_lenet():
return QuantLeNet(quant_desc_input=QuantDescriptor(), quant_desc_weight=QuantDescriptor())
| TensorRT-master | tools/pytorch-quantization/tests/fixtures/models.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
@pytest.fixture
def verbose(request):
return request.config.getoption("verbose")
| TensorRT-master | tools/pytorch-quantization/tests/fixtures/__init__.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
import sphinx_glpi_theme
from pytorch_quantization import __version__
# -- Project information -----------------------------------------------------
project = 'pytorch-quantization'
copyright = '2021, NVIDIA'
# The short X.Y version
version = __version__
# The full version, including alpha/beta/rc tags
release = 'master'
author = 'NVIDIA'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
needs_sphinx = '1.8'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.napoleon',
'sphinx.ext.viewcode',
]
# Napoleon settings
napoleon_google_docstring = True
napoleon_numpy_docstring = True
napoleon_include_init_with_doc = True
napoleon_include_private_with_doc = False
napoleon_include_special_with_doc = False
napoleon_use_admonition_for_examples = True
napoleon_use_admonition_for_notes = True
napoleon_use_admonition_for_references = False
napoleon_use_ivar = True
napoleon_use_param = False
napoleon_use_rtype = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
#html_theme = 'sphinx_rtd_theme'
html_theme = 'glpi'
html_theme_path = sphinx_glpi_theme.get_html_themes_path()
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'pytorch-quantizationdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'pytorch-quantization.tex', 'pytorch-quantization Documentation',
'NVIDIA', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pytorch-quantization', 'pytorch-quantization Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'pytorch-quantization', 'pytorch-quantization Documentation',
author, 'pytorch-quantization', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# -- Extension configuration -------------------------------------------------
# Disable docstring inheritance
autodoc_inherit_docstrings = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'python': ('https://docs.python.org/', None),
'numpy': ('https://docs.scipy.org/doc/numpy/', None),
}
| TensorRT-master | tools/pytorch-quantization/docs/source/conf.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import datetime
import os
import sys
import time
import argparse
import warnings
import collections
import torch
import torch.utils.data
from torch import nn
from tqdm import tqdm
import torchvision
from torchvision import transforms
from torchvision.models.utils import load_state_dict_from_url
from pytorch_quantization import nn as quant_nn
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
from pytorch_quantization import quant_modules
import onnxruntime
import numpy as np
import models
from prettytable import PrettyTable
# The following path assumes running in nvcr.io/nvidia/pytorch:20.08-py3
sys.path.insert(0,"/opt/pytorch/vision/references/classification/")
# Import functions from torchvision reference
try:
from train import evaluate, train_one_epoch, load_data, utils
except Exception as e:
raise ModuleNotFoundError(
"Add https://github.com/pytorch/vision/blob/master/references/classification/ to PYTHONPATH")
def get_parser():
"""
Creates an argument parser.
"""
parser = argparse.ArgumentParser(description='Classification quantization flow script')
parser.add_argument('--data-dir', '-d', type=str, help='input data folder', required=True)
parser.add_argument('--model-name', '-m', default='resnet50', help='model name: default resnet50')
parser.add_argument('--disable-pcq', '-dpcq', action="store_true", help='disable per-channel quantization for weights')
parser.add_argument('--out-dir', '-o', default='/tmp', help='output folder: default /tmp')
parser.add_argument('--print-freq', '-pf', type=int, default=20, help='evaluation print frequency: default 20')
parser.add_argument('--threshold', '-t', type=float, default=-1.0, help='top1 accuracy threshold (less than 0.0 means no comparison): default -1.0')
parser.add_argument('--batch-size-train', type=int, default=128, help='batch size for training: default 128')
parser.add_argument('--batch-size-test', type=int, default=128, help='batch size for testing: default 128')
parser.add_argument('--batch-size-onnx', type=int, default=1, help='batch size for onnx: default 1')
parser.add_argument('--seed', type=int, default=12345, help='random seed: default 12345')
checkpoint = parser.add_mutually_exclusive_group(required=True)
checkpoint.add_argument('--ckpt-path', default='', type=str,
help='path to latest checkpoint (default: none)')
checkpoint.add_argument('--ckpt-url', default='', type=str,
help='url to latest checkpoint (default: none)')
checkpoint.add_argument('--pretrained', action="store_true")
parser.add_argument('--num-calib-batch', default=4, type=int,
help='Number of batches for calibration. 0 will disable calibration. (default: 4)')
parser.add_argument('--num-finetune-epochs', default=0, type=int,
help='Number of epochs to fine tune. 0 will disable fine tune. (default: 0)')
parser.add_argument('--calibrator', type=str, choices=["max", "histogram"], default="max")
parser.add_argument('--percentile', nargs='+', type=float, default=[99.9, 99.99, 99.999, 99.9999])
parser.add_argument('--sensitivity', action="store_true", help="Build sensitivity profile")
parser.add_argument('--evaluate-onnx', action="store_true", help="Evaluate exported ONNX")
return parser
def prepare_model(
model_name,
data_dir,
per_channel_quantization,
batch_size_train,
batch_size_test,
batch_size_onnx,
calibrator,
pretrained=True,
ckpt_path=None,
ckpt_url=None):
"""
Prepare the model for the classification flow.
Arguments:
model_name: name to use when accessing torchvision model dictionary
data_dir: directory with train and val subdirs prepared "imagenet style"
per_channel_quantization: iff true use per channel quantization for weights
note that this isn't currently supported in ONNX-RT/Pytorch
batch_size_train: batch size to use when training
batch_size_test: batch size to use when testing in Pytorch
batch_size_onnx: batch size to use when testing with ONNX-RT
calibrator: calibration type to use (max/histogram)
pretrained: if true a pretrained model will be loaded from torchvision
ckpt_path: path to load a model checkpoint from, if not pretrained
ckpt_url: url to download a model checkpoint from, if not pretrained and no path was given
* at least one of {pretrained, path, url} must be valid
The method returns a the following list:
[
Model object,
data loader for training,
data loader for Pytorch testing,
data loader for onnx testing
]
"""
# Use 'spawn' to avoid CUDA reinitialization with forked subprocess
torch.multiprocessing.set_start_method('spawn')
## Initialize quantization, model and data loaders
if per_channel_quantization:
quant_desc_input = QuantDescriptor(calib_method=calibrator)
quant_nn.QuantConv2d.set_default_quant_desc_input(quant_desc_input)
quant_nn.QuantLinear.set_default_quant_desc_input(quant_desc_input)
else:
## Force per tensor quantization for onnx runtime
quant_desc_input = QuantDescriptor(calib_method=calibrator, axis=None)
quant_nn.QuantConv2d.set_default_quant_desc_input(quant_desc_input)
quant_nn.QuantConvTranspose2d.set_default_quant_desc_input(quant_desc_input)
quant_nn.QuantLinear.set_default_quant_desc_input(quant_desc_input)
quant_desc_weight = QuantDescriptor(calib_method=calibrator, axis=None)
quant_nn.QuantConv2d.set_default_quant_desc_weight(quant_desc_weight)
quant_nn.QuantConvTranspose2d.set_default_quant_desc_weight(quant_desc_weight)
quant_nn.QuantLinear.set_default_quant_desc_weight(quant_desc_weight)
if model_name in models.__dict__:
model = models.__dict__[model_name](pretrained=pretrained, quantize=True)
else:
quant_modules.initialize()
model = torchvision.models.__dict__[model_name](pretrained=pretrained)
quant_modules.deactivate()
if not pretrained:
if ckpt_path:
checkpoint = torch.load(ckpt_path)
else:
checkpoint = load_state_dict_from_url(ckpt_url)
if 'state_dict' in checkpoint.keys():
checkpoint = checkpoint['state_dict']
elif 'model' in checkpoint.keys():
checkpoint = checkpoint['model']
model.load_state_dict(checkpoint)
model.eval()
model.cuda()
## Prepare the data loaders
traindir = os.path.join(data_dir, 'train')
valdir = os.path.join(data_dir, 'val')
_args = collections.namedtuple("mock_args", ["model", "distributed", "cache_dataset"])
dataset, dataset_test, train_sampler, test_sampler = load_data(
traindir, valdir, _args(model=model_name, distributed=False, cache_dataset=False))
data_loader_train = torch.utils.data.DataLoader(
dataset, batch_size=batch_size_train,
sampler=train_sampler, num_workers=4, pin_memory=True)
data_loader_test = torch.utils.data.DataLoader(
dataset_test, batch_size=batch_size_test,
sampler=test_sampler, num_workers=4, pin_memory=True)
data_loader_onnx = torch.utils.data.DataLoader(
dataset_test, batch_size=batch_size_onnx,
sampler=test_sampler, num_workers=4, pin_memory=True)
return model, data_loader_train, data_loader_test, data_loader_onnx
def main(cmdline_args):
parser = get_parser()
args = parser.parse_args(cmdline_args)
print(parser.description)
print(args)
torch.manual_seed(args.seed)
np.random.seed(args.seed)
## Prepare the pretrained model and data loaders
model, data_loader_train, data_loader_test, data_loader_onnx = prepare_model(
args.model_name,
args.data_dir,
not args.disable_pcq,
args.batch_size_train,
args.batch_size_test,
args.batch_size_onnx,
args.calibrator,
args.pretrained,
args.ckpt_path,
args.ckpt_url)
## Initial accuracy evaluation
criterion = nn.CrossEntropyLoss()
with torch.no_grad():
print('Initial evaluation:')
top1_initial = evaluate(model, criterion, data_loader_test, device="cuda", print_freq=args.print_freq)
## Calibrate the model
with torch.no_grad():
calibrate_model(
model=model,
model_name=args.model_name,
data_loader=data_loader_train,
num_calib_batch=args.num_calib_batch,
calibrator=args.calibrator,
hist_percentile=args.percentile,
out_dir=args.out_dir)
## Evaluate after calibration
if args.num_calib_batch > 0:
with torch.no_grad():
print('Calibration evaluation:')
top1_calibrated = evaluate(model, criterion, data_loader_test, device="cuda", print_freq=args.print_freq)
else:
top1_calibrated = -1.0
## Build sensitivy profile
if args.sensitivity:
build_sensitivity_profile(model, criterion, data_loader_test)
## Finetune the model
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=0.0001)
lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, args.num_finetune_epochs)
for epoch in range(args.num_finetune_epochs):
# Training a single epch
train_one_epoch(model, criterion, optimizer, data_loader_train, "cuda", 0, 100)
lr_scheduler.step()
if args.num_finetune_epochs > 0:
## Evaluate after finetuning
with torch.no_grad():
print('Finetune evaluation:')
top1_finetuned = evaluate(model, criterion, data_loader_test, device="cuda")
else:
top1_finetuned = -1.0
## Export to ONNX
onnx_filename = args.out_dir + '/' + args.model_name + ".onnx"
top1_onnx = -1.0
if export_onnx(model, onnx_filename, args.batch_size_onnx, not args.disable_pcq) and args.evaluate_onnx:
## Validate ONNX and evaluate
top1_onnx = evaluate_onnx(onnx_filename, data_loader_onnx, criterion, args.print_freq)
## Print summary
print("Accuracy summary:")
table = PrettyTable(['Stage','Top1'])
table.align['Stage'] = "l"
table.add_row( [ 'Initial', "{:.2f}".format(top1_initial) ] )
table.add_row( [ 'Calibrated', "{:.2f}".format(top1_calibrated) ] )
table.add_row( [ 'Finetuned', "{:.2f}".format(top1_finetuned) ] )
table.add_row( [ 'ONNX', "{:.2f}".format(top1_onnx) ] )
print(table)
## Compare results
if args.threshold >= 0.0:
if args.evaluate_onnx and top1_onnx < 0.0:
print("Failed to export/evaluate ONNX!")
return 1
if args.num_finetune_epochs > 0:
if top1_finetuned >= (top1_onnx - args.threshold):
print("Accuracy threshold was met!")
else:
print("Accuracy threshold was missed!")
return 1
return 0
def evaluate_onnx(onnx_filename, data_loader, criterion, print_freq):
"""Evaluate accuracy on the given ONNX file using the provided data loader and criterion.
The method returns the average top-1 accuracy on the given dataset.
"""
print("Loading ONNX file: ", onnx_filename)
ort_session = onnxruntime.InferenceSession(onnx_filename)
with torch.no_grad():
metric_logger = utils.MetricLogger(delimiter=" ")
header = 'Test:'
with torch.no_grad():
for image, target in metric_logger.log_every(data_loader, print_freq, header):
image = image.to("cpu", non_blocking=True)
image_data = np.array(image)
input_data = image_data
# run the data through onnx runtime instead of torch model
input_name = ort_session.get_inputs()[0].name
raw_result = ort_session.run([], {input_name: input_data})
output = torch.tensor((raw_result[0]))
loss = criterion(output, target)
acc1, acc5 = utils.accuracy(output, target, topk=(1, 5))
batch_size = image.shape[0]
metric_logger.update(loss=loss.item())
metric_logger.meters['acc1'].update(acc1.item(), n=batch_size)
metric_logger.meters['acc5'].update(acc5.item(), n=batch_size)
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print(' ONNXRuntime: Acc@1 {top1.global_avg:.3f} Acc@5 {top5.global_avg:.3f}'
.format(top1=metric_logger.acc1, top5=metric_logger.acc5))
return metric_logger.acc1.global_avg
def export_onnx(model, onnx_filename, batch_onnx, per_channel_quantization):
model.eval()
quant_nn.TensorQuantizer.use_fb_fake_quant = True # We have to shift to pytorch's fake quant ops before exporting the model to ONNX
if per_channel_quantization:
opset_version = 13
else:
opset_version = 12
# Export ONNX for multiple batch sizes
print("Creating ONNX file: " + onnx_filename)
dummy_input = torch.randn(batch_onnx, 3, 224, 224, device='cuda') #TODO: switch input dims by model
try:
torch.onnx.export(model, dummy_input, onnx_filename, verbose=False, opset_version=opset_version, enable_onnx_checker=False, do_constant_folding=True)
except ValueError:
warnings.warn(UserWarning("Per-channel quantization is not yet supported in Pytorch/ONNX RT (requires ONNX opset 13)"))
print("Failed to export to ONNX")
return False
return True
def calibrate_model(model, model_name, data_loader, num_calib_batch, calibrator, hist_percentile, out_dir):
"""
Feed data to the network and calibrate.
Arguments:
model: classification model
model_name: name to use when creating state files
data_loader: calibration data set
num_calib_batch: amount of calibration passes to perform
calibrator: type of calibration to use (max/histogram)
hist_percentile: percentiles to be used for historgram calibration
out_dir: dir to save state files in
"""
if num_calib_batch > 0:
print("Calibrating model")
with torch.no_grad():
collect_stats(model, data_loader, num_calib_batch)
if not calibrator == "histogram":
compute_amax(model, method="max")
calib_output = os.path.join(
out_dir,
F"{model_name}-max-{num_calib_batch*data_loader.batch_size}.pth")
torch.save(model.state_dict(), calib_output)
else:
for percentile in hist_percentile:
print(F"{percentile} percentile calibration")
compute_amax(model, method="percentile")
calib_output = os.path.join(
out_dir,
F"{model_name}-percentile-{percentile}-{num_calib_batch*data_loader.batch_size}.pth")
torch.save(model.state_dict(), calib_output)
for method in ["mse", "entropy"]:
print(F"{method} calibration")
compute_amax(model, method=method)
calib_output = os.path.join(
out_dir,
F"{model_name}-{method}-{num_calib_batch*data_loader.batch_size}.pth")
torch.save(model.state_dict(), calib_output)
def collect_stats(model, data_loader, num_batches):
"""Feed data to the network and collect statistics"""
# Enable calibrators
for name, module in model.named_modules():
if isinstance(module, quant_nn.TensorQuantizer):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
# Feed data to the network for collecting stats
for i, (image, _) in tqdm(enumerate(data_loader), total=num_batches):
model(image.cuda())
if i >= num_batches:
break
# Disable calibrators
for name, module in model.named_modules():
if isinstance(module, quant_nn.TensorQuantizer):
if module._calibrator is not None:
module.enable_quant()
module.disable_calib()
else:
module.enable()
def compute_amax(model, **kwargs):
# Load calib result
for name, module in model.named_modules():
if isinstance(module, quant_nn.TensorQuantizer):
if module._calibrator is not None:
if isinstance(module._calibrator, calib.MaxCalibrator):
module.load_calib_amax()
else:
module.load_calib_amax(**kwargs)
print(F"{name:40}: {module}")
model.cuda()
def build_sensitivity_profile(model, criterion, data_loader_test):
quant_layer_names = []
for name, module in model.named_modules():
if name.endswith("_quantizer"):
module.disable()
layer_name = name.replace("._input_quantizer", "").replace("._weight_quantizer", "")
if layer_name not in quant_layer_names:
quant_layer_names.append(layer_name)
for i, quant_layer in enumerate(quant_layer_names):
print("Enable", quant_layer)
for name, module in model.named_modules():
if name.endswith("_quantizer") and quant_layer in name:
module.enable()
print(F"{name:40}: {module}")
with torch.no_grad():
evaluate(model, criterion, data_loader_test, device="cuda")
for name, module in model.named_modules():
if name.endswith("_quantizer") and quant_layer in name:
module.disable()
print(F"{name:40}: {module}")
if __name__ == '__main__':
res = main(sys.argv[1:])
exit(res)
| TensorRT-master | tools/pytorch-quantization/examples/torchvision/classification_flow.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from . import classification
| TensorRT-master | tools/pytorch-quantization/examples/torchvision/models/__init__.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .resnet import *
| TensorRT-master | tools/pytorch-quantization/examples/torchvision/models/classification/__init__.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import torch
from torch import Tensor
import torch.nn as nn
from torch.hub import load_state_dict_from_url
from typing import Type, Any, Callable, Union, List, Optional
from pytorch_quantization import quant_modules
from pytorch_quantization import nn as quant_nn
__all__ = [
'ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101', 'resnet152', 'resnext50_32x4d', 'resnext101_32x8d',
'wide_resnet50_2', 'wide_resnet101_2'
]
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
'resnext50_32x4d': 'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth',
'resnext101_32x8d': 'https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth',
'wide_resnet50_2': 'https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth',
'wide_resnet101_2': 'https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth',
}
def conv3x3(in_planes: int,
out_planes: int,
stride: int = 1,
groups: int = 1,
dilation: int = 1,
quantize: bool = False) -> nn.Conv2d:
"""3x3 convolution with padding"""
if quantize:
return quant_nn.QuantConv2d(in_planes,
out_planes,
kernel_size=3,
stride=stride,
padding=dilation,
groups=groups,
bias=False,
dilation=dilation)
else:
return nn.Conv2d(in_planes,
out_planes,
kernel_size=3,
stride=stride,
padding=dilation,
groups=groups,
bias=False,
dilation=dilation)
def conv1x1(in_planes: int, out_planes: int, stride: int = 1, quantize: bool = False) -> nn.Conv2d:
"""1x1 convolution"""
if quantize:
return quant_nn.QuantConv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
else:
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion: int = 1
def __init__(self,
inplanes: int,
planes: int,
stride: int = 1,
downsample: Optional[nn.Module] = None,
groups: int = 1,
base_width: int = 64,
dilation: int = 1,
norm_layer: Optional[Callable[..., nn.Module]] = None,
quantize: bool = False) -> None:
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride, quantize=quantize)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes, quantize=quantize)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
self._quantize = quantize
if self._quantize:
self.residual_quantizer = quant_nn.TensorQuantizer(quant_nn.QuantConv2d.default_quant_desc_input)
def forward(self, x: Tensor) -> Tensor:
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
if self._quantize:
out += self.residual_quantizer(identity)
else:
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
# Bottleneck in torchvision places the stride for downsampling at 3x3 convolution(self.conv2)
# while original implementation places the stride at the first 1x1 convolution(self.conv1)
# according to "Deep residual learning for image recognition"https://arxiv.org/abs/1512.03385.
# This variant is also known as ResNet V1.5 and improves accuracy according to
# https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch.
expansion: int = 4
def __init__(self,
inplanes: int,
planes: int,
stride: int = 1,
downsample: Optional[nn.Module] = None,
groups: int = 1,
base_width: int = 64,
dilation: int = 1,
norm_layer: Optional[Callable[..., nn.Module]] = None,
quantize: bool = False) -> None:
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width, quantize=quantize)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation, quantize=quantize)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion, quantize=quantize)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
self._quantize = quantize
if self._quantize:
self.residual_quantizer = quant_nn.TensorQuantizer(quant_nn.QuantConv2d.default_quant_desc_input)
def forward(self, x: Tensor) -> Tensor:
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
if self._quantize:
out += self.residual_quantizer(identity)
else:
out += identity
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self,
block: Type[Union[BasicBlock, Bottleneck]],
layers: List[int],
quantize: bool = False,
num_classes: int = 1000,
zero_init_residual: bool = False,
groups: int = 1,
width_per_group: int = 64,
replace_stride_with_dilation: Optional[List[bool]] = None,
norm_layer: Optional[Callable[..., nn.Module]] = None) -> None:
super(ResNet, self).__init__()
self._quantize = quantize
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
if quantize:
self.conv1 = quant_nn.QuantConv2d(3,
self.inplanes,
kernel_size=7,
stride=2,
padding=3,
bias=False)
else:
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0], quantize=quantize)
self.layer2 = self._make_layer(block,
128,
layers[1],
stride=2,
dilate=replace_stride_with_dilation[0],
quantize=quantize)
self.layer3 = self._make_layer(block,
256,
layers[2],
stride=2,
dilate=replace_stride_with_dilation[1],
quantize=quantize)
self.layer4 = self._make_layer(block,
512,
layers[3],
stride=2,
dilate=replace_stride_with_dilation[2],
quantize=quantize)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
if quantize:
self.fc = quant_nn.QuantLinear(512 * block.expansion, num_classes)
else:
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0) # type: ignore[arg-type]
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0) # type: ignore[arg-type]
def _make_layer(self,
block: Type[Union[BasicBlock, Bottleneck]],
planes: int,
blocks: int,
stride: int = 1,
dilate: bool = False,
quantize: bool = False) -> nn.Sequential:
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride, quantize=quantize),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(
block(self.inplanes, planes, stride, downsample, self.groups, self.base_width, previous_dilation,
norm_layer, self._quantize))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(
block(self.inplanes,
planes,
groups=self.groups,
base_width=self.base_width,
dilation=self.dilation,
norm_layer=norm_layer,
quantize=quantize))
return nn.Sequential(*layers)
def _forward_impl(self, x: Tensor) -> Tensor:
# See note [TorchScript super()]
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.fc(x)
return x
def forward(self, x: Tensor) -> Tensor:
return self._forward_impl(x)
def _resnet(arch: str, block: Type[Union[BasicBlock, Bottleneck]], layers: List[int], pretrained: bool, progress: bool,
quantize: bool, **kwargs: Any) -> ResNet:
model = ResNet(block, layers, quantize, **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch], progress=progress)
model.load_state_dict(state_dict)
return model
def resnet18(pretrained: bool = False, progress: bool = True, quantize: bool = False, **kwargs: Any) -> ResNet:
r"""ResNet-18 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet18', BasicBlock, [2, 2, 2, 2], pretrained, progress, quantize, **kwargs)
def resnet34(pretrained: bool = False, progress: bool = True, quantize: bool = False, **kwargs: Any) -> ResNet:
r"""ResNet-34 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet34', BasicBlock, [3, 4, 6, 3], pretrained, progress, quantize, **kwargs)
def resnet50(pretrained: bool = False, progress: bool = True, quantize: bool = False, **kwargs: Any) -> ResNet:
r"""ResNet-50 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet50', Bottleneck, [3, 4, 6, 3], pretrained, progress, quantize, **kwargs)
def resnet101(pretrained: bool = False, progress: bool = True, quantize: bool = False, **kwargs: Any) -> ResNet:
r"""ResNet-101 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet101', Bottleneck, [3, 4, 23, 3], pretrained, progress, quantize, **kwargs)
def resnet152(pretrained: bool = False, progress: bool = True, quantize: bool = False, **kwargs: Any) -> ResNet:
r"""ResNet-152 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet152', Bottleneck, [3, 8, 36, 3], pretrained, progress, quantize, **kwargs)
def resnext50_32x4d(pretrained: bool = False, progress: bool = True, quantize: bool = False, **kwargs: Any) -> ResNet:
r"""ResNeXt-50 32x4d model from
`"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['groups'] = 32
kwargs['width_per_group'] = 4
return _resnet('resnext50_32x4d', Bottleneck, [3, 4, 6, 3], pretrained, progress, quantize, **kwargs)
def resnext101_32x8d(pretrained: bool = False, progress: bool = True, quantize: bool = False, **kwargs: Any) -> ResNet:
r"""ResNeXt-101 32x8d model from
`"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['groups'] = 32
kwargs['width_per_group'] = 8
return _resnet('resnext101_32x8d', Bottleneck, [3, 4, 23, 3], pretrained, progress, quantize, **kwargs)
def wide_resnet50_2(pretrained: bool = False, progress: bool = True, quantize: bool = False, **kwargs: Any) -> ResNet:
r"""Wide ResNet-50-2 model from
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_.
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['width_per_group'] = 64 * 2
return _resnet('wide_resnet50_2', Bottleneck, [3, 4, 6, 3], pretrained, progress, quantize, **kwargs)
def wide_resnet101_2(pretrained: bool = False, progress: bool = True, quantize: bool = False, **kwargs: Any) -> ResNet:
r"""Wide ResNet-101-2 model from
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_.
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['width_per_group'] = 64 * 2
return _resnet('wide_resnet101_2', Bottleneck, [3, 4, 23, 3], pretrained, progress, quantize, **kwargs)
| TensorRT-master | tools/pytorch-quantization/examples/torchvision/models/classification/resnet.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from absl import logging
from .version import __version__
logging.use_absl_handler()
| TensorRT-master | tools/pytorch-quantization/pytorch_quantization/__init__.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Dynamically replace the modules with quantized versions."""
from collections import namedtuple
import torch
from pytorch_quantization import nn as quant_nn
# Definition of the named tuple that is used to store mapping of the quantized modules
_quant_entry = namedtuple('quant_entry', 'orig_mod mod_name replace_mod')
# Global member of the file that contains the mapping of quantized modules
_DEFAULT_QUANT_MAP = [_quant_entry(torch.nn, "Conv1d", quant_nn.QuantConv1d),
_quant_entry(torch.nn, "Conv2d", quant_nn.QuantConv2d),
_quant_entry(torch.nn, "Conv3d", quant_nn.QuantConv3d),
_quant_entry(torch.nn, "ConvTranspose1d", quant_nn.QuantConvTranspose1d),
_quant_entry(torch.nn, "ConvTranspose2d", quant_nn.QuantConvTranspose2d),
_quant_entry(torch.nn, "ConvTranspose3d", quant_nn.QuantConvTranspose3d),
_quant_entry(torch.nn, "Linear", quant_nn.QuantLinear),
_quant_entry(torch.nn, "LSTM", quant_nn.QuantLSTM),
_quant_entry(torch.nn, "LSTMCell", quant_nn.QuantLSTMCell),
_quant_entry(torch.nn, "AvgPool1d", quant_nn.QuantAvgPool1d),
_quant_entry(torch.nn, "AvgPool2d", quant_nn.QuantAvgPool2d),
_quant_entry(torch.nn, "AvgPool3d", quant_nn.QuantAvgPool3d),
_quant_entry(torch.nn, "AdaptiveAvgPool1d", quant_nn.QuantAdaptiveAvgPool1d),
_quant_entry(torch.nn, "AdaptiveAvgPool2d", quant_nn.QuantAdaptiveAvgPool2d),
_quant_entry(torch.nn, "AdaptiveAvgPool3d", quant_nn.QuantAdaptiveAvgPool3d),]
class QuantModuleReplacementHelper():
"""To help replace torch.nn modules with quantized versions.
This module is used to replace (by monkey patching) the torch.nn modules with their
quantized versions as provided by either tool's internal implementation or any other
user provided custom module.
Attributes:
orginal_func_map: A dict. Maintains the original torch.nn module mapping.
quant_support_list: A list. Contains the names of modules for which a quantized
version is provided by the tool.
quant_map: A dict. Contains the map of the module name and its quantized versions.
quant_switch_opt: A dict. A map to indicate which modules to be left unreplaced with
their quantized versions. This dict is updated by a list provided from the user
which indicates the modules to leave out in monkey patching.
"""
def __init__(self):
# Will hold the original modules to be replaced back
self.orginal_func_map = set()
# Maintains the list of supported quantized modules by the tool as default
self.default_quant_map = _DEFAULT_QUANT_MAP
# Will hold the final quantized modules after checking if user supplied any
# custom quantized functions.
self.quant_map = set()
def prepare_state(self, float_module_list=None, custom_map=None):
"""
Prepare the internal variables that would used in the monkey patching mechanism later.
1. Set up the list of quantized modules that are supported by the tool for torch.nn.
2. Set up the custom mapping for modules other than torch.nn.
3. Use the float_module_list to switch off the monkey patching replacement for user indicated modules
"""
# For the default quantized modules supported, generate the quant_map
for item in self.default_quant_map:
if float_module_list is not None and item.mod_name in float_module_list:
# Skip this module if this is present in the float_module_list
continue
else:
# append the modules into the variable that will be used in monkey patching
self.quant_map.add(item)
# also store the original module to be used in reverse monkey patching
self.orginal_func_map.add(_quant_entry(item.orig_mod, item.mod_name,
getattr(item.orig_mod, item.mod_name)))
# Add custom modules to the quant_map
if custom_map is not None:
for item in custom_map:
# append the custom modules to the list that will be used in monkey patching
# Note that we convert a tuple to a named tuple here
self.quant_map.add(_quant_entry(item[0], item[1], item[2]))
# also store the original module in another list which will be used to reverse monkey patching
self.orginal_func_map.add(_quant_entry(item[0], item[1], getattr(item[0], item[1])))
def apply_quant_modules(self):
"""
For the modules registered in the quant_map, simply monkey patch them and also store the
original modules so that they could be later replaced back.
"""
for entry in self.quant_map:
setattr(entry.orig_mod, entry.mod_name, entry.replace_mod)
def restore_float_modules(self):
"""
Reverse the effect of monkey patch by using the orginal_func_map to replace back the
original modules.
"""
for entry in self.orginal_func_map:
setattr(entry.orig_mod, entry.mod_name, entry.replace_mod)
def initialize(float_module_list=None, custom_quant_modules=None):
"""Dynamic module replacement using monkey patching.
Dynamically monkey patches the modules with their quantized versions. Internally, the
state is maintained by a helper class object which helps in replacing the original
modules back.
Args:
float_module_list: A list. User supplied list which indicates which modules to not monkey patch.
custom_quant_modules: A dict. A mapping provided by user to indicate any other module apart
from torch.nn and its corresponding quantized version.
Returns:
nothing.
Typical usage example:
# Define the deny list for torch.nn modules and custom map for modules other than torch.nn.
float_module_list = ["Linear"]
custom_quant_modules = [(torch.nn, "Linear", quant_nn.QuantLinear)]
## Monkey patch the modules
pytorch_quantization.quant_modules.initialize(float_module_list, custom_modules)
## Use the quantized modules
pytorch_quantization.quant_modules.deactivate()
"""
_quant_module_helper_object.prepare_state(float_module_list, custom_quant_modules)
_quant_module_helper_object.apply_quant_modules()
def deactivate():
"""Dynamic module replacement which reverses the monkey patching.
Dynamically replaces back the original modules that were monkey patched earlier
in the initialize() function call using helper class object which maintains the state.
"""
_quant_module_helper_object.restore_float_modules()
# Global object that maintains the state of the modules that are replaced.
_quant_module_helper_object = QuantModuleReplacementHelper()
| TensorRT-master | tools/pytorch-quantization/pytorch_quantization/quant_modules.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Basic tensor quantization functions"""
import numpy as np
import yaml
from absl import logging
import torch
from torch.autograd import Function
class ScaledQuantDescriptor():
"""Supportive descriptor of quantization
Describe how a tensor should be quantized. A QuantDescriptor and a tensor defines a quantized tensor.
Args:
num_bits: An integer. Number of bits of quantization. It is used to calculate scaling factor. Default 8.
name: Seems a nice thing to have
Keyword Arguments:
fake_quant: A boolean. If True, use fake quantization mode. Default True.
axis: None, int or tuple of int. axes which will have its own max for computing scaling factor.
If None (the default), use per tensor scale.
Must be in the range [-rank(input_tensor), rank(input_tensor)).
e.g. For a KCRS weight tensor, quant_axis=(0) will yield per channel scaling.
Default None.
amax: A float or list/ndarray of floats of user specified absolute max range. If supplied,
ignore quant_axis and use this to quantize. If learn_amax is True, will be used to initialize
learnable amax. Default None.
learn_amax: A boolean. If True, learn amax. Default False.
scale_amax: A float. If supplied, multiply amax by scale_amax. Default None. It is useful for some
quick experiment.
calib_method: A string. One of ["max", "histogram"] indicates which calibration to use. Except the simple
max calibration, other methods are all hisogram based. Default "max".
unsigned: A Boolean. If True, use unsigned. Default False.
Raises:
TypeError: If unsupported type is passed in.
Read-only properties:
- fake_quant:
- name:
- learn_amax:
- scale_amax:
- axis:
- calib_method:
- num_bits:
- amax:
- unsigned:
"""
def __init__(self, num_bits=8, name=None, **kwargs):
if not isinstance(num_bits, int):
raise TypeError("num_bits must be an integer, not {}.".format(type(num_bits)))
if num_bits < 0:
raise ValueError("num_bits must be >= 0, not {}.".format(num_bits))
if num_bits == 0:
logging.error("num_bits is 0. This will result in the tensor being quantized to all zeros."
" This mode should only be used for debugging purposes.")
self._num_bits = num_bits
if not isinstance(name, str) and name is not None:
raise TypeError("name must be a string or None, not {}.".format(type(name)))
self._name = name
self._fake_quant = kwargs.pop('fake_quant', True)
self._axis = kwargs.pop('axis', None)
if self._axis is not None:
logging.debug("Meaning of axis has changed since v2.0. Make sure to update.")
self._learn_amax = kwargs.pop('learn_amax', False)
if self._learn_amax and self._axis is not None:
raise TypeError(
"axis is ignored and must be None when learn_amax is true, got {}.".format(type(self._axis)))
amax = kwargs.pop('amax', None)
if amax is not None:
if not isinstance(amax, float) and not isinstance(
amax, list) and not isinstance(amax, np.ndarray):
raise TypeError("amax must be float, list or ndarray, not {}".format(type(amax)))
# Make it single precision array
self._amax = np.array(amax, dtype=np.float32)
else:
self._amax = amax
self._scale_amax = kwargs.pop('scale_amax', None)
self._calib_method = kwargs.pop('calib_method', "max")
self._unsigned = kwargs.pop('unsigned', False)
self._narrow_range = kwargs.pop('narrow_range', False)
if kwargs:
raise TypeError("Unused keys: {}".format(kwargs.keys()))
# pylint:disable=missing-docstring
@property
def num_bits(self):
return self._num_bits
@property
def fake_quant(self):
return self._fake_quant
@property
def axis(self):
return self._axis
@property
def amax(self):
return self._amax
@property
def learn_amax(self):
return self._learn_amax
@property
def scale_amax(self):
return self._scale_amax
@property
def name(self):
return self._name
@property
def calib_method(self):
return self._calib_method
@property
def unsigned(self):
return self._unsigned
@property
def narrow_range(self):
return self._narrow_range
# pylint:enable=missing-docstring
def __str__(self):
s = (self._name + ': ') if self._name is not None else 'QuantDescriptor'
s += "({}{}bit".format("unsigned " if self._unsigned else "", self._num_bits)
s += " fake" if self._fake_quant else " real"
s += " axis={}".format(self._axis if self._axis is not None else " per-tensor")
if isinstance(self._amax, torch.Tensor):
s += " amax={}".format(np.array2string(self._amax.cpu().numpy().flatten(), edgeitems=1,
formatter={'all': "{:.2e}".format}))
elif self._amax is not None:
s += " amax={_amax}"
s += " full_range"
if self._learn_amax:
s += " learn_amax"
if self._scale_amax:
s += " scale_amax={_scale_amax}"
s += ")"
return s.format(**self.__dict__)
def __eq__(self, rhs):
"""Compare 2 descriptors"""
return self.__dict__ == rhs.__dict__
def dict(self):
"""Serialize to dict
The build-in __dict__ method returns all the attributes, which includes those have default value and have
protected prefix "_". This method only returns those have values other than the default one and don't have _ in
key. Construct a instance by dict returned by this method should get exactly the same instance.
"""
obj_dict = {}
obj_dict['num_bits'] = self._num_bits
obj_dict['name'] = self._name
if not self._fake_quant:
obj_dict['fake_quant'] = self._fake_quant
if self._axis is not None:
obj_dict['axis'] = self._axis
if self._amax is not None:
obj_dict['amax'] = self._amax.tolist()
if self._scale_amax is not None:
obj_dict['scale_amax'] = self._scale_amax
if self._learn_amax:
obj_dict['learn_amax'] = self._learn_amax
if self._unsigned:
obj_dict['unsigned'] = self._unsigned
return obj_dict
def to_yaml(self):
"""Create yaml serialization
Some attributes need special treatment to have human readable form, including amax, axis.
"""
obj_dict = self.dict()
if "axis" in obj_dict:
obj_dict['axis'] = list(obj_dict['axis'])
return yaml.dump(obj_dict, width=120)
@classmethod
def from_yaml(cls, yaml_str):
"""Create descriptor from yaml str"""
obj_dict = yaml.safe_load(yaml_str)
if 'axis' in obj_dict:
obj_dict['axis'] = tuple(obj_dict['axis'])
quant_desc = cls(**obj_dict)
return quant_desc
QuantDescriptor = ScaledQuantDescriptor
# Predefined descriptors
QUANT_DESC_8BIT_PER_TENSOR = QuantDescriptor(num_bits=8)
QUANT_DESC_UNSIGNED_8BIT_PER_TENSOR = QuantDescriptor(num_bits=8, unsigned=True)
QUANT_DESC_8BIT_CONV1D_WEIGHT_PER_CHANNEL = QuantDescriptor(num_bits=8, axis=(0))
QUANT_DESC_8BIT_CONV2D_WEIGHT_PER_CHANNEL = QuantDescriptor(num_bits=8, axis=(0))
QUANT_DESC_8BIT_CONV3D_WEIGHT_PER_CHANNEL = QuantDescriptor(num_bits=8, axis=(0))
QUANT_DESC_8BIT_LINEAR_WEIGHT_PER_ROW = QuantDescriptor(num_bits=8, axis=(0))
QUANT_DESC_8BIT_CONVTRANSPOSE1D_WEIGHT_PER_CHANNEL = QuantDescriptor(num_bits=8, axis=(1))
QUANT_DESC_8BIT_CONVTRANSPOSE2D_WEIGHT_PER_CHANNEL = QuantDescriptor(num_bits=8, axis=(1))
QUANT_DESC_8BIT_CONVTRANSPOSE3D_WEIGHT_PER_CHANNEL = QuantDescriptor(num_bits=8, axis=(1))
class TensorQuantFunction(Function):
"""A universal tensor quantization function
Take an input tensor, output an quantized tensor. The granularity of scale can be interpreted from the
shape of amax.
output_dtype indicates whether the quantized value will be stored in integer or float. The reason we want to store
it in float is the pytorch function takes the quantized value may not accept integer input, e.g. Conv2D.
It uses 2^num_bits -1 values instead of 2^num_bits. e.g., for num_bits=8, it uses [-127, 127] instead of [-128, 127]
"""
@staticmethod
def forward(ctx, inputs, amax, num_bits=8, unsigned=False, narrow_range=True):
"""
Follow tensorflow convention, max value is passed in and used to decide scale, instead of inputing scale
directly. Though inputing scale directly may be more natural to use.
Args:
ctx: A Context object to store tensors for backward.
inputs: A Tensor of type float32.
amax: A Tensor of type float32. Inputs will be quantized within range [-amax, amax]
amax will be broadcasted to inputs tensor.
num_bits: A integer used to calculate scaling factor, scale = (2^(num_bits-1) - 1) / max
Effectively, it indicates how many integer bits is used to represent the value. Default 8.
output_dtype: A type of Tensor. torch.int32 or torch.float32.
unsigned: A boolean. Use unsigned integer range. E.g. [0, 255] for num_bits=8. Default False.
narrow_range: A boolean. Use symmetric integer range for signed quantization
E.g. [-127,127] instead of [-128,127] for num_bits=8. Default True.
Returns:
outputs: A Tensor of type output_dtype.
scale: A Tensor of type float32. outputs / scale will dequantize outputs tensor.
Raises:
ValueError:
"""
ctx.save_for_backward(inputs, amax)
outputs, scale = _tensor_quant(inputs, amax, num_bits, unsigned, narrow_range)
# Check if scale overflows FP16
if outputs.dtype == torch.half and scale.max() > 65504:
raise ValueError("scale is too large for FP16 with amax={}".format(amax))
return outputs, scale.to(inputs.dtype)
@staticmethod
def backward(ctx, grad_outputs, grad_scale):
"""
Implements straight through estimation with clipping. For -amax <= input <= amax
the gradient passes straight through, otherwise the gradient is zero.
Args:
ctx: A Context object with saved tensors from forward.
grad_outputs: A tensor of gradient of outputs.
grad_scale: A tensor of gradient of scale.
Returns:
grad_inputs: A tensor of gradient.
"""
inputs, amax = ctx.saved_tensors
zero = grad_outputs.new_zeros(1) # create a zero tensor with the same type and device
grad_inputs = torch.where(inputs.abs() <= amax, grad_outputs, zero)
return grad_inputs, None, None, None, None
class FakeTensorQuantFunction(Function):
"""Fake version of TensorQuantFunction
See comments of TensorQuantFunction, arguments are the same.
"""
@staticmethod
def forward(ctx, inputs, amax, num_bits=8, unsigned=False, narrow_range=True):
ctx.save_for_backward(inputs, amax)
outputs, scale = _tensor_quant(inputs, amax, num_bits, unsigned, narrow_range)
return outputs / scale.to(inputs.dtype)
@staticmethod
def backward(ctx, grad_outputs):
inputs, amax = ctx.saved_tensors
zero = grad_outputs.new_zeros(1)
grad_inputs = torch.where(inputs.abs() <= amax, grad_outputs, zero)
return grad_inputs, None, None, None, None
def _tensor_quant(inputs, amax, num_bits=8, unsigned=False, narrow_range=True):
"""Shared function body between TensorQuantFunction and FakeTensorQuantFunction"""
# Fine scale, per channel scale will be handled by broadcasting, which could be tricky. Pop a warning.
if isinstance(amax, torch.Tensor) and inputs.dim() != amax.dim():
logging.debug("amax %s has different shape than inputs %s. Make sure broadcast works as expected!",
amax.size(), inputs.size())
logging.debug("{} bits quantization on shape {} tensor.".format(num_bits, inputs.size()))
if unsigned:
if inputs.min() < 0.:
raise TypeError("Negative values encountered in unsigned quantization.")
# Computation must be in FP32 to prevent potential over flow.
input_dtype = inputs.dtype
if inputs.dtype == torch.half:
inputs = inputs.float()
if amax.dtype == torch.half:
amax = amax.float()
min_amax = amax.min()
if min_amax < 0:
raise ValueError("Negative values in amax")
max_bound = torch.tensor((2.0**(num_bits - 1 + int(unsigned))) - 1.0, device=amax.device)
if unsigned:
min_bound = 0
elif narrow_range:
min_bound = -max_bound
else:
min_bound = -max_bound - 1
scale = max_bound / amax
epsilon = 1. / (1<<24)
if min_amax <= epsilon: # Treat amax smaller than minimum representable of fp16 0
zero_amax_mask = (amax <= epsilon)
scale[zero_amax_mask] = 0 # Value quantized with amax=0 should all be 0
outputs = torch.clamp((inputs * scale).round_(), min_bound, max_bound)
if min_amax <= epsilon:
scale[zero_amax_mask] = 1. # Return 1 makes more sense for values quantized to 0 with amax=0
if input_dtype == torch.half:
outputs = outputs.half()
return outputs, scale
class FakeAffineTensorQuantFunction(Function):
"""Fake version of affine quantization
gemmlowp style scale+shift quantization. See more details in
https://github.com/google/gemmlowp/blob/master/doc/quantization.md.
We DO NOT recommend affine quantization on weights for performance reason. There might be value to affine quantize
activation as it can be cancelled by bias and comes with no performance penalty. This functionality is only added
for experimental purpose.
"""
@staticmethod
def forward(ctx, inputs, min_range, max_range, num_bits=8):
"""
As it will be only applied on activation with per tensor granularity, broadcast is not needed.
Args:
ctx: Pytorch convention.
inputs: A Tensor of type float32.
min_range: A float.
max_range: A float.
num_bits: An integer
Returns:
outputs: A Tensor of type output_dtype
"""
logging.debug("{} bits quantization on shape {} tensor.".format(num_bits, inputs.size()))
ctx.save_for_backward(inputs, min_range, max_range)
step_size = (max_range - min_range) / (2.0**num_bits - 1)
min_bound = -2.0**(num_bits - 1)
max_bound = 2.0**(num_bits - 1) - 1
quant_zero = torch.round(min_range / step_size) - min_bound
quantized = torch.round(inputs / step_size) - quant_zero
quantized = torch.clamp(quantized, min_bound, max_bound)
outputs = (quantized + quant_zero) * step_size
return outputs
@staticmethod
def backward(ctx, grad_outputs):
"""
Args:
ctx: Pytorch convention.
grad_output: A tensor of gradient of outputs
Returns:
grad_inputs: A tensor of gradient
"""
inputs, min_range, max_range = ctx.saved_tensors
zero = grad_outputs.new_zeros(1)
grad_inputs = torch.where((inputs <= max_range)*(inputs >= min_range), grad_outputs, zero)
return grad_inputs, None, None, None
tensor_quant = TensorQuantFunction.apply
fake_tensor_quant = FakeTensorQuantFunction.apply
fake_affine_tensor_quant = FakeAffineTensorQuantFunction.apply
| TensorRT-master | tools/pytorch-quantization/pytorch_quantization/tensor_quant.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Calibrator that returns the absolute max of all collected tensors"""
from absl import logging
import torch
from pytorch_quantization.calib.calibrator import _Calibrator
from pytorch_quantization import utils as quant_utils
class MaxCalibrator(_Calibrator):
"""Max calibrator, tracks the maximum value globally
Args:
calib_desc: A MaxCalibDescriptor.
num_bits: An integer. Number of bits of quantization.
axis: A tuple. see QuantDescriptor.
unsigned: A boolean. using unsigned quantization.
Readonly Properties:
amaxs: A list of amax. Numpy array is saved as it is likely to be used for some plot.
"""
def __init__(self, num_bits, axis, unsigned, track_amax=False):
super(MaxCalibrator, self).__init__(num_bits, axis, unsigned)
self._track_amax = track_amax
if self._track_amax:
self._amaxs = [] # shall we have a better name?
self._calib_amax = None
# pylint:disable=missing-docstring
@property
def amaxs(self):
return self._amaxs
# pylint:enable=missing-docstring
def collect(self, x):
"""Tracks the absolute max of all tensors
Args:
x: A tensor
Raises:
RuntimeError: If amax shape changes
"""
if torch.min(x) < 0.:
logging.log_first_n(
logging.INFO,
("Calibrator encountered negative values. It shouldn't happen after ReLU. "
"Make sure this is the right tensor to calibrate."),
1)
x = x.abs()
# Swap axis to reduce.
axis = self._axis if isinstance(self._axis, (list, tuple)) else [self._axis]
reduce_axis = []
for i in range(x.dim()):
if not i in axis:
reduce_axis.append(i)
local_amax = quant_utils.reduce_amax(x, axis=reduce_axis).detach()
if self._calib_amax is None:
self._calib_amax = local_amax
else:
if local_amax.shape != self._calib_amax.shape:
raise RuntimeError("amax shape changed!")
self._calib_amax.copy_(torch.max(self._calib_amax, local_amax).data)
if self._track_amax:
self._amaxs.append(local_amax.cpu().numpy())
def reset(self):
"""Reset the collected absolute max"""
self._calib_amax = None
def compute_amax(self):
"""Return the absolute max of all tensors collected"""
return self._calib_amax
# pylint:disable=missing-docstring
def __str__(self):
s = "MaxCalibrator("
s += "track_amax={_track_amax}"
s += ")"
return s.format(**self.__dict__)
def __repr__(self):
s = "MaxCalibrator("
s += super(MaxCalibrator, self).__repr__()
s += " calib_amax={_calib_amax}"
s += " track_amax={_track_amax}"
if self._track_amax:
s += " amaxs={_amaxs}"
s += ")"
return s.format(**self.__dict__)
# pylint:enable=missing-docstring
| TensorRT-master | tools/pytorch-quantization/pytorch_quantization/calib/max.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Histogram based calibrators"""
from collections import Counter
import numpy as np
from scipy.stats import entropy
from absl import logging
import torch
from pytorch_quantization.calib.calibrator import _Calibrator
from pytorch_quantization.tensor_quant import fake_tensor_quant
from pytorch_quantization import nn as quant_nn
from pytorch_quantization import utils as quant_utils
__all__ = ["HistogramCalibrator", "calibrate_weights"]
class HistogramCalibrator(_Calibrator):
"""Unified histogram calibrator
Histogram will be only collected once. compute_amax() performs entropy, percentile, or mse
calibration based on arguments
Args:
num_bits: An integer. Number of bits of quantization.
axis: A tuple. see QuantDescriptor.
unsigned: A boolean. using unsigned quantization.
num_bins: An integer. Number of histograms bins. Default 2048.
grow_method: A string. DEPRECATED. default None.
skip_zeros: A boolean. If True, skips zeros when collecting data for histogram. Default False.
torch_hist: A boolean. If True, collect histogram by torch.histc instead of np.histogram. If input tensor
is on GPU, histc will also be running on GPU. Default False.
"""
def __init__(self, num_bits, axis, unsigned, num_bins=2048, grow_method=None, skip_zeros=False, torch_hist=False):
super(HistogramCalibrator, self).__init__(num_bits, axis, unsigned)
self._num_bins = num_bins
self._skip_zeros = skip_zeros
self._calib_bin_edges = None
self._calib_hist = None
self._torch_hist = torch_hist
if axis is not None:
raise NotImplementedError("Calibrator histogram collection only supports per tensor scaling")
if grow_method is not None:
logging.warning("grow_method is deprecated. Got %s, ingored!", grow_method)
def collect(self, x):
"""Collect histogram"""
if torch.min(x) < 0.:
logging.log_first_n(
logging.INFO,
("Calibrator encountered negative values. It shouldn't happen after ReLU. "
"Make sure this is the right tensor to calibrate."),
1)
x = x.abs()
x = x.float()
if not self._torch_hist:
x_np = x.cpu().detach().numpy()
if self._skip_zeros:
x_np = x_np[np.where(x_np != 0)]
if self._calib_bin_edges is None and self._calib_hist is None:
# first time it uses num_bins to compute histogram.
self._calib_hist, self._calib_bin_edges = np.histogram(x_np, bins=self._num_bins)
else:
temp_amax = np.max(x_np)
if temp_amax > self._calib_bin_edges[-1]:
# increase the number of bins
width = self._calib_bin_edges[1] - self._calib_bin_edges[0]
# NOTE: np.arange may create an extra bin after the one containing temp_amax
new_bin_edges = np.arange(self._calib_bin_edges[-1] + width, temp_amax + width, width)
self._calib_bin_edges = np.hstack((self._calib_bin_edges, new_bin_edges))
hist, self._calib_bin_edges = np.histogram(x_np, bins=self._calib_bin_edges)
hist[:len(self._calib_hist)] += self._calib_hist
self._calib_hist = hist
else:
# This branch of code is designed to match numpy version as close as possible
with torch.no_grad():
if self._skip_zeros:
x = x[torch.where(x != 0)]
# Because we collect histogram on absolute value, setting min=0 simplifying the rare case where
# minimum value is not exactly 0 and first batch collected has larger min value than later batches
x_max = x.max()
if self._calib_bin_edges is None and self._calib_hist is None:
self._calib_hist = torch.histc(x, bins=self._num_bins, min=0, max=x_max)
self._calib_bin_edges = torch.linspace(0, x_max, self._num_bins + 1)
else:
if x_max > self._calib_bin_edges[-1]:
width = self._calib_bin_edges[1] - self._calib_bin_edges[0]
self._num_bins = int((x_max / width).ceil().item())
self._calib_bin_edges = torch.arange(0, x_max + width, width, device=x.device)
hist = torch.histc(x, bins=self._num_bins, min=0, max=self._calib_bin_edges[-1])
hist[:self._calib_hist.numel()] += self._calib_hist
self._calib_hist = hist
def reset(self):
"""Reset the collected histogram"""
self._calib_bin_edges = None
self._calib_hist = None
def compute_amax(
self, method: str, *, stride: int = 1, start_bin: int = 128, percentile: float = 99.99):
"""Compute the amax from the collected histogram
Args:
method: A string. One of ['entropy', 'mse', 'percentile']
Keyword Arguments:
stride: An integer. Default 1
start_bin: An integer. Default 128
percentils: A float number between [0, 100]. Default 99.99.
Returns:
amax: a tensor
"""
if isinstance(self._calib_hist, torch.Tensor):
calib_hist = self._calib_hist.int().cpu().numpy()
calib_bin_edges = self._calib_bin_edges.cpu().numpy()
else:
calib_hist = self._calib_hist
calib_bin_edges = self._calib_bin_edges
if method == 'entropy':
calib_amax = _compute_amax_entropy(
calib_hist, calib_bin_edges, self._num_bits, self._unsigned, stride, start_bin)
elif method == 'mse':
calib_amax = _compute_amax_mse(
calib_hist, calib_bin_edges, self._num_bits, self._unsigned, stride, start_bin)
elif method == 'percentile':
calib_amax = _compute_amax_percentile(calib_hist, calib_bin_edges, percentile)
else:
raise TypeError("Unknown calibration method {}".format(method))
return calib_amax
# pylint:disable=missing-docstring
def __str__(self):
s = "HistogramCalibrator("
if self._calib_bin_edges is None:
bin_edge_str = "None"
else:
bin_edge_str = "[{:.3f}, ..., {:.3f}]({})".format(
self._calib_bin_edges[0], self._calib_bin_edges[-1], len(self._calib_bin_edges))
s += "calib_bin_edges={})".format(bin_edge_str)
return s
def __repr__(self):
s = "HistogramCalibrator("
s += super(HistogramCalibrator, self).__repr__()
s += " calib_bin_edges={_calib_bin_edges}"
s += " calib_hist={_calib_hist})"
return s.format(**self.__dict__)
# pylint:enable=missing-docstring
# Ideally, we want to decouple collector (collect histogram) and calibrator (compute amax) as opposed to
# the current calibrator design. The following compute amax functions are broken out from the calibrator
# as first step towards there.
def _compute_amax_entropy(calib_hist, calib_bin_edges, num_bits, unsigned, stride=1, start_bin=128):
"""Returns amax that minimizes KL-Divergence of the collected histogram"""
# If calibrator hasn't collected any data, return none
if calib_bin_edges is None and calib_hist is None:
return None
def _normalize_distr(distr):
summ = np.sum(distr)
if summ != 0:
distr = distr / summ
bins = calib_hist[:]
bins[0] = bins[1]
total_data = np.sum(bins)
divergences = []
arguments = []
# we are quantizing to 128 values + sign if num_bits=8
nbins = 1 << (num_bits - 1 + int(unsigned))
starting = start_bin
stop = len(bins)
new_density_counts = np.zeros(nbins, dtype=np.float64)
for i in range(starting, stop + 1, stride):
new_density_counts.fill(0)
space = np.linspace(0, i, num=nbins + 1)
digitized_space = np.digitize(range(i), space) - 1
digitized_space[bins[:i] == 0] = -1
for idx, digitized in enumerate(digitized_space):
if digitized != -1:
new_density_counts[digitized] += bins[idx]
counter = Counter(digitized_space)
for key, val in counter.items():
if key != -1:
new_density_counts[key] = new_density_counts[key] / val
new_density = np.zeros(i, dtype=np.float64)
for idx, digitized in enumerate(digitized_space):
if digitized != -1:
new_density[idx] = new_density_counts[digitized]
total_counts_new = np.sum(new_density) + np.sum(bins[i:])
_normalize_distr(new_density)
reference_density = np.array(bins[:len(digitized_space)])
reference_density[-1] += np.sum(bins[i:])
total_counts_old = np.sum(reference_density)
if round(total_counts_new) != total_data or round(total_counts_old) != total_data:
raise RuntimeError("Count mismatch! total_counts_new={}, total_counts_old={}, total_data={}".format(
total_counts_new, total_counts_old, total_data))
_normalize_distr(reference_density)
ent = entropy(reference_density, new_density)
divergences.append(ent)
arguments.append(i)
divergences = np.array(divergences)
logging.debug("divergences={}".format(divergences))
last_argmin = len(divergences) - 1 - np.argmin(divergences[::-1])
calib_amax = calib_bin_edges[last_argmin * stride + starting]
calib_amax = torch.tensor(calib_amax.item()) #pylint: disable=not-callable
return calib_amax
def _compute_amax_mse(calib_hist, calib_bin_edges, num_bits, unsigned, stride=1, start_bin=128):
"""Returns amax that minimizes MSE of the collected histogram"""
# If calibrator hasn't collected any data, return none
if calib_bin_edges is None and calib_hist is None:
return None
counts = torch.from_numpy(calib_hist[:]).float()
edges = torch.from_numpy(calib_bin_edges[:]).float()
centers = (edges[1:] + edges[:-1]) / 2
mses = []
arguments = []
for i in range(start_bin, len(centers), stride):
amax = centers[i]
quant_centers = fake_tensor_quant(centers, amax, num_bits, unsigned)
mse = ((quant_centers - centers)**2 * counts).mean()
mses.append(mse)
arguments.append(i)
logging.debug("mses={}".format(mses))
argmin = np.argmin(mses)
calib_amax = centers[arguments[argmin]]
return calib_amax
def _compute_amax_percentile(calib_hist, calib_bin_edges, percentile):
"""Returns amax that clips the percentile fraction of collected data"""
if percentile < 0 or percentile > 100:
raise ValueError("Invalid percentile. Must be in range 0 <= percentile <= 100.")
# If calibrator hasn't collected any data, return none
if calib_bin_edges is None and calib_hist is None:
return None
total = calib_hist.sum()
cdf = np.cumsum(calib_hist / total)
idx = np.searchsorted(cdf, percentile / 100)
calib_amax = calib_bin_edges[idx]
calib_amax = torch.tensor(calib_amax.item()) #pylint: disable=not-callable
return calib_amax
def calibrate_weights(model, method="percentile", perchannel=True, percentile=99.99, num_bins=2048):
"""Calibrate weights of all child quantized modules
Ideally, we would split calibration functionality to histogram collector and calibrator which
takes histogram and compute amax. But since we haven't decoupled collector and calibrator, it
is easier to create a separate function to calibrate weight.
.. note::
This function uses `method` specified by the argument to decide which method to use, NOT the one
specified by the calibrator embedded in weight_quantizer.
We haven't moved calibration to GPU, so everything is transfered to CPU
Args:
model: A torch.nn.Module.
method: A string of calibration method. Supports "mse" and "percentile". Default "percentile"
perchannel: A bool. Set channel/neuron axis if True. Default True.
percentile: A float. Default 99.99
num_bins: A integer. Number of bins of histogram. Default 2048.
"""
for name, module in model.named_modules():
if hasattr(module, "weight") and hasattr(module, "weight_quantizer"):
logging.info("Calibrate weight of %s", name)
num_bits = module.weight_quantizer.num_bits
unsigned = module.weight_quantizer.unsigned
channel_second_modules = (
quant_nn.QuantConvTranspose1d,
quant_nn.QuantConvTranspose2d,
quant_nn.QuantConvTranspose3d
)
if perchannel:
axis = 1 if isinstance(module, channel_second_modules) else 0
else:
axis = None
axis_size = module.weight.shape[axis] if axis is not None else 1
# Histogram is always collected even if method is "max". Although "max" is supported here
# but it is not the primary usage of this function
if axis is None:
calib_hist, calib_bin_edges = np.histogram(module.weight.abs().cpu().detach().numpy(), bins=2048)
calib_hist = [calib_hist]
calib_bin_edges = [calib_bin_edges]
else:
calib_hist = []
calib_bin_edges = []
for i in range(axis_size):
hist, bin_edges = np.histogram(
module.weight.index_select(
axis, torch.tensor(i, device=module.weight.device)).abs().cpu().detach().numpy(),
bins=num_bins)
calib_hist.append(hist)
calib_bin_edges.append(bin_edges)
calib_amax = []
if method == "max":
reduce_axis = list(range(module.weight.dim()))
reduce_axis.remove(axis)
calib_amax.append(quant_utils.reduce_amax(module.weight, axis=reduce_axis))
elif method == 'mse':
for i in range(axis_size):
calib_amax.append(_compute_amax_mse(calib_hist[i], calib_bin_edges[i], num_bits, unsigned))
elif method == 'percentile':
for i in range(axis_size):
calib_amax.append(_compute_amax_percentile(calib_hist[i], calib_bin_edges[i], percentile))
else:
raise TypeError("Unsupported calibration method {}".format(method))
if axis is None:
calib_amax = calib_amax[0]
else:
calib_amax_shape = [1] * module.weight.dim()
calib_amax_shape[axis] = module.weight.shape[axis]
calib_amax = torch.stack(calib_amax).reshape(calib_amax_shape)
module.weight_quantizer.amax = calib_amax.detach().cpu().numpy()
| TensorRT-master | tools/pytorch-quantization/pytorch_quantization/calib/histogram.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""``pytorch_quantization.calib`` provides Calibrator classes that
collect data statistics and determine pytorch_quantization parameters.
"""
from .max import MaxCalibrator
from .histogram import *
| TensorRT-master | tools/pytorch-quantization/pytorch_quantization/calib/__init__.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Abstract base class for calibrators"""
class _Calibrator():
"""Abstract base class of calibrators
Args:
num_bits: An integer. Number of bits of quantization.
axis: A tuple. see QuantDescriptor.
unsigned: A boolean. using unsigned quantization.
Readonly Properties:
axis:
"""
def __init__(self, num_bits, axis, unsigned):
self._num_bits = num_bits
self._axis = axis
self._unsigned = unsigned
def collect(self, x):
"""Abstract method: collect tensor statistics used to compute amax
Args:
x: A tensor
"""
raise NotImplementedError
def reset(self):
"""Abstract method: reset calibrator to initial state"""
raise NotImplementedError
def compute_amax(self, *args, **kwargs):
"""Abstract method: compute the amax from the collected data
Returns:
amax: a tensor
"""
raise NotImplementedError
def __repr__(self):
s = "num_bits={_num_bits}"
s += " axis={_axis}"
s += " unsigned={_unsigned}"
return s.format(**self.__dict__)
| TensorRT-master | tools/pytorch-quantization/pytorch_quantization/calib/calibrator.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pytorch_quantization.nn.modules.tensor_quantizer import *
from pytorch_quantization.nn.modules.quant_conv import *
from pytorch_quantization.nn.modules.quant_linear import *
from pytorch_quantization.nn.modules.quant_pooling import *
from pytorch_quantization.nn.modules.clip import *
from pytorch_quantization.nn.modules.quant_rnn import *
from pytorch_quantization.nn.modules.quant_bert import *
from pytorch_quantization.nn.modules.quant_instancenorm import *
| TensorRT-master | tools/pytorch-quantization/pytorch_quantization/nn/__init__.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Some supportive functions"""
from absl import logging
import torch
from torch.autograd import Function
class ClipFunction(Function):
"""An universal tensor clip function
Pytorch's clamp() only supports scalar range and doesn't support broadcast. This implementation uses min/max which
is more genaral. The gradient is defined according to IBM's PACT paper https://arxiv.org/abs/1805.06085, which is
also the behavior of Tensorflow's clip_by_value()
"""
@staticmethod
def forward(ctx, input, clip_value_min, clip_value_max):
output = torch.min(input, clip_value_max)
output = torch.max(output, clip_value_min)
ctx.save_for_backward(input, clip_value_min, clip_value_max)
return output
@staticmethod
def backward(ctx, grad_output):
input, clip_value_min, clip_value_max = ctx.saved_tensors
min_mask = (input > clip_value_min).to(grad_output.dtype)
max_mask = (input < clip_value_max).to(grad_output.dtype)
grad_input = grad_output * min_mask * max_mask
if clip_value_min.requires_grad or clip_value_max.requires_grad:
logging.log_first_n(logging.WARNING, "Learning clip min/max is experimental, use at your own risk :).", 1)
if clip_value_min.numel() != 1 or clip_value_max.numel() != 1:
raise ValueError("Learnable min/max can only be scalar, got size %s and %s." % (clip_value_min.size(),
clip_value_max.size()))
# Ensure the dtypes of min/max grads matches the input dtype
# This might be necessary if running w/ AMP which will cast to fp32 before `sum()`
grad_clip_value_min = (grad_output * (1. - min_mask)).sum().to(clip_value_min.dtype) if clip_value_min.requires_grad else None
grad_clip_value_max = (grad_output * (1. - max_mask)).sum().to(clip_value_min.dtype) if clip_value_max.requires_grad else None
return grad_input, grad_clip_value_min, grad_clip_value_max
clip = ClipFunction.apply
| TensorRT-master | tools/pytorch-quantization/pytorch_quantization/nn/functional.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
| TensorRT-master | tools/pytorch-quantization/pytorch_quantization/nn/_functions/__init__.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""RNN implementation in python
Originally copied from https://github.com/pytorch/pytorch/blob/v0.4.1/torch/nn/_functions/rnn.py
with following modification
fusedBackend is removed
CudnnRNN is removed
Hack for ONNX in RNN() is removed
Only LSTM is quantized. Other paths are excluded in __all__
"""
import warnings
from torch.autograd import NestedIOFunction
from torch.nn import functional as F
import torch
import itertools
from functools import partial
__all__ = ["LSTMCell", "RNN"]
def RNNReLUCell(input, hidden, w_ih, w_hh, b_ih=None, b_hh=None):
hy = F.relu(F.linear(input, w_ih, b_ih) + F.linear(hidden, w_hh, b_hh))
return hy
def RNNTanhCell(input, hidden, w_ih, w_hh, b_ih=None, b_hh=None):
hy = torch.tanh(F.linear(input, w_ih, b_ih) + F.linear(hidden, w_hh, b_hh))
return hy
def LSTMCell(input, hidden, w_ih, w_hh, b_ih=None, b_hh=None, input_quantizer=None, weight_quantizer=None):
"""Quantized LSTM Cell
The assumption is at inference time, only one fused gemm will be launched for one time step Weights of 4 gates
are fused together, and activation from layer and recurrent paths are fused togather. ``input_quantizer`` will be
applied on the fused activation tensor. And ``weight_quantizer`` will be applied on the fused weight tensor.
"""
hx, cx = hidden
if input_quantizer is not None:
input, hx = input_quantizer(torch.cat([input, hx], 1)).split([input.size()[1], hx.size()[1]], 1)
if weight_quantizer is not None:
w_ih, w_hh = weight_quantizer(torch.cat([w_ih, w_hh], 1)).split([w_ih.size()[1], w_hh.size()[1]], 1)
gates = F.linear(input, w_ih, b_ih) + F.linear(hx, w_hh, b_hh)
ingate, forgetgate, cellgate, outgate = gates.chunk(4, 1)
ingate = torch.sigmoid(ingate)
forgetgate = torch.sigmoid(forgetgate)
cellgate = torch.tanh(cellgate)
outgate = torch.sigmoid(outgate)
cy = (forgetgate * cx) + (ingate * cellgate)
hy = outgate * torch.tanh(cy)
return hy, cy
def GRUCell(input, hidden, w_ih, w_hh, b_ih=None, b_hh=None):
gi = F.linear(input, w_ih, b_ih)
gh = F.linear(hidden, w_hh, b_hh)
i_r, i_i, i_n = gi.chunk(3, 1)
h_r, h_i, h_n = gh.chunk(3, 1)
resetgate = torch.sigmoid(i_r + h_r)
inputgate = torch.sigmoid(i_i + h_i)
newgate = torch.tanh(i_n + resetgate * h_n)
hy = newgate + inputgate * (hidden - newgate)
return hy
def StackedRNN(inners, num_layers, lstm=False, dropout=0, train=True):
num_directions = len(inners)
total_layers = num_layers * num_directions
def forward(input, hidden, weight, batch_sizes, input_quantizers, weight_quantizers):
assert(len(weight) == total_layers)
next_hidden = []
if lstm:
hidden = list(zip(*hidden))
for i in range(num_layers):
all_output = []
for j, inner in enumerate(inners):
l = i * num_directions + j
hy, output = inner(input, hidden[l], weight[l], batch_sizes,
input_quantizer=input_quantizers[l], weight_quantizer=weight_quantizers[l])
next_hidden.append(hy)
all_output.append(output)
input = torch.cat(all_output, input.dim() - 1)
if dropout != 0 and i < num_layers - 1:
input = F.dropout(input, p=dropout, training=train, inplace=False)
if lstm:
next_h, next_c = zip(*next_hidden)
next_hidden = (
torch.cat(next_h, 0).view(total_layers, *next_h[0].size()),
torch.cat(next_c, 0).view(total_layers, *next_c[0].size())
)
else:
next_hidden = torch.cat(next_hidden, 0).view(
total_layers, *next_hidden[0].size())
return next_hidden, input
return forward
def Recurrent(inner, reverse=False):
def forward(input, hidden, weight, batch_sizes, input_quantizer, weight_quantizer):
output = []
steps = range(input.size(0) - 1, -1, -1) if reverse else range(input.size(0))
for i in steps:
hidden = inner(input[i], hidden, *weight,
input_quantizer=input_quantizer, weight_quantizer=weight_quantizer)
# hack to handle LSTM
output.append(hidden[0] if isinstance(hidden, tuple) else hidden)
if reverse:
output.reverse()
output = torch.cat(output, 0).view(input.size(0), *output[0].size())
return hidden, output
return forward
def variable_recurrent_factory(inner, reverse=False):
if reverse:
return VariableRecurrentReverse(inner)
else:
return VariableRecurrent(inner)
def VariableRecurrent(inner):
def forward(input, hidden, weight, batch_sizes, input_quantizer, weight_quantizer):
output = []
input_offset = 0
last_batch_size = batch_sizes[0]
hiddens = []
flat_hidden = not isinstance(hidden, tuple)
if flat_hidden:
hidden = (hidden,)
for batch_size in batch_sizes:
step_input = input[input_offset:input_offset + batch_size]
input_offset += batch_size
dec = last_batch_size - batch_size
if dec > 0:
hiddens.append(tuple(h[-dec:] for h in hidden))
hidden = tuple(h[:-dec] for h in hidden)
last_batch_size = batch_size
if flat_hidden:
hidden = (inner(step_input, hidden[0], *weight,
input_quantizer=input_quantizer, weight_quantizer=weight_quantizer),)
else:
hidden = inner(step_input, hidden, *weight,
input_quantizer=input_quantizer, weight_quantizer=weight_quantizer)
output.append(hidden[0])
hiddens.append(hidden)
hiddens.reverse()
hidden = tuple(torch.cat(h, 0) for h in zip(*hiddens))
assert hidden[0].size(0) == batch_sizes[0]
if flat_hidden:
hidden = hidden[0]
output = torch.cat(output, 0)
return hidden, output
return forward
def VariableRecurrentReverse(inner):
def forward(input, hidden, weight, batch_sizes, input_quantizer, weight_quantizer):
output = []
input_offset = input.size(0)
last_batch_size = batch_sizes[-1]
initial_hidden = hidden
flat_hidden = not isinstance(hidden, tuple)
if flat_hidden:
hidden = (hidden,)
initial_hidden = (initial_hidden,)
hidden = tuple(h[:batch_sizes[-1]] for h in hidden)
for i in reversed(range(len(batch_sizes))):
batch_size = batch_sizes[i]
inc = batch_size - last_batch_size
if inc > 0:
hidden = tuple(torch.cat((h, ih[last_batch_size:batch_size]), 0)
for h, ih in zip(hidden, initial_hidden))
last_batch_size = batch_size
step_input = input[input_offset - batch_size:input_offset]
input_offset -= batch_size
if flat_hidden:
hidden = (inner(step_input, hidden[0], *weight,
input_quantizer=input_quantizer, weight_quantizer=weight_quantizer),)
else:
hidden = inner(step_input, hidden, *weight,
input_quantizer=input_quantizer, weight_quantizer=weight_quantizer)
output.append(hidden[0])
output.reverse()
output = torch.cat(output, 0)
if flat_hidden:
hidden = hidden[0]
return hidden, output
return forward
def AutogradRNN(mode, input_size, hidden_size, num_layers=1, batch_first=False,
dropout=0, train=True, bidirectional=False, variable_length=False,
dropout_state=None, flat_weight=None,
input_quantizers=None, weight_quantizers=None):
if mode == 'RNN_RELU':
cell = RNNReLUCell
elif mode == 'RNN_TANH':
cell = RNNTanhCell
elif mode == 'LSTM':
cell = LSTMCell
elif mode == 'GRU':
cell = GRUCell
else:
raise Exception('Unknown mode: {}'.format(mode))
rec_factory = variable_recurrent_factory if variable_length else Recurrent
if bidirectional:
layer = (rec_factory(cell), rec_factory(cell, reverse=True))
else:
layer = (rec_factory(cell),)
func = StackedRNN(layer,
num_layers,
(mode == 'LSTM'),
dropout=dropout,
train=train)
def forward(input, weight, hidden, batch_sizes, input_quantizers, weight_quantizers):
if batch_first and not variable_length:
input = input.transpose(0, 1)
nexth, output = func(input, hidden, weight, batch_sizes, input_quantizers, weight_quantizers)
if batch_first and not variable_length:
output = output.transpose(0, 1)
return output, nexth
return forward
def RNN(*args, **kwargs):
def forward(input, *fargs, **fkwargs):
func = AutogradRNN(*args, **kwargs)
return func(input, *fargs, **fkwargs)
return forward
| TensorRT-master | tools/pytorch-quantization/pytorch_quantization/nn/_functions/quant_rnn.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Quantized Linear"""
from torch import nn
from torch.nn import functional as F
from pytorch_quantization import tensor_quant
from . import _utils
__all__ = ["Linear", "QuantLinear"]
class QuantLinear(nn.Linear, _utils.QuantMixin):
"""Quantized version of nn.Linear
Apply quantized linear to the incoming data, y = dequant(quant(x)quant(A)^T + b).
Keep Module name "Linear" instead of "QuantLinear" so that it can be easily dropped into preexisting model and load
pretrained weights. An alias "QuantLinear" is defined below. The base code is a copy of nn.Linear, see detailed
comment of original arguments there.
Quantization descriptors are passed in in kwargs. If not presents, default_quant_desc_input and
default_quant_desc_weight are used.
Keyword Arguments:
quant_desc_input: An instance of :class:`QuantDescriptor <pytorch_quantization.tensor_quant.QuantDescriptor>`.
Quantization descriptor of input.
quant_desc_wegiht: An instance of :class:`QuantDescriptor <pytorch_quantization.tensor_quant.QuantDescriptor>`.
Quantization descriptor of weight.
Raises:
ValueError: If unsupported arguments are passed in.
KeyError: If unsupported kwargs are passed in.
Readonly properties:
- input_quantizer:
- weight_quantizer:
Static methods:
- set_default_quant_desc_input: Set default_quant_desc_input
- set_default_quant_desc_weight: Set default_quant_desc_weight
"""
default_quant_desc_input = tensor_quant.QUANT_DESC_8BIT_PER_TENSOR
default_quant_desc_weight = tensor_quant.QUANT_DESC_8BIT_LINEAR_WEIGHT_PER_ROW
def __init__(self, in_features, out_features, bias=True, **kwargs):
super(QuantLinear, self).__init__(in_features, out_features, bias)
quant_desc_input, quant_desc_weight = _utils.pop_quant_desc_in_kwargs(self.__class__, **kwargs)
self.init_quantizer(quant_desc_input, quant_desc_weight)
def forward(self, input):
quant_input = self._input_quantizer(input)
quant_weight = self._weight_quantizer(self.weight)
output = F.linear(quant_input, quant_weight, bias=self.bias)
return output
Linear = QuantLinear
| TensorRT-master | tools/pytorch-quantization/pytorch_quantization/nn/modules/quant_linear.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Quantized convolution
Base code is from nn.Conv, details of Module and original argument can be found there.
Module names are intentionally kept same as unquantized version so that they can be dropped into preexisting model
easily, and load pretrained weight. Aliases with Quant prefix are defined and are encouraged to be used explicitly
when start scratch.
"""
import torch
import torch.nn
import torch.nn.functional as F
from torch.nn.modules.utils import _single, _pair, _triple
from torch.nn.modules.conv import _ConvTransposeNd
from pytorch_quantization import tensor_quant
from . import _utils
__all__ = [
"Conv2d", "QuantConv2d", "Conv3d", "QuantConv3d", "Conv1d", "QuantConv1d", "ConvTranspose1d", "ConvTranspose2d",
"ConvTranspose3d", "QuantConvTranspose1d", "QuantConvTranspose2d", "QuantConvTranspose3d"
]
class _QuantConvNd(torch.nn.modules.conv._ConvNd, _utils.QuantMixin):
"""base class of quantized Conv inherited from _ConvNd
Comments of original arguments can be found in torch.nn.modules.conv
Arguments:
quant_desc_input: An instance of :class:`QuantDescriptor <pytorch_quantization.tensor_quant.QuantDescriptor>`.
Quantization descriptor of input.
quant_desc_weight: An instance of :class:`QuantDescriptor <pytorch_quantization.tensor_quant.QuantDescriptor>`.
Quantization descriptor of weight.
Raises:
ValueError: If unsupported arguments are passed in.
Readonly properties:
- input_quantizer:
- weight_quantizer:
Static methods:
- set_default_quant_desc_input: Set default_quant_desc_input
- set_default_quant_desc_weight: Set default_quant_desc_weight
"""
default_quant_desc_input = tensor_quant.QUANT_DESC_8BIT_PER_TENSOR
default_quant_desc_weight = tensor_quant.QUANT_DESC_8BIT_PER_TENSOR
def __init__(self, in_channels, out_channels, kernel_size, stride, padding, dilation, transposed, output_padding,
groups, bias, padding_mode, quant_desc_input, quant_desc_weight):
super(_QuantConvNd, self).__init__(in_channels, out_channels, kernel_size, stride, padding, dilation,
transposed, output_padding, groups, bias, padding_mode)
self.init_quantizer(quant_desc_input, quant_desc_weight)
def _quant(self, input):
"""Apply quantization on input and weight
Function called by the classes lower in the hierarchy, which actually performs the quantization before forward
in the derivate class the particular Function.
Arguments:
input: in_features to quantize
Returns:
A tuple: (quant_in_feature, quant_weight)
"""
quant_input = self._input_quantizer(input)
quant_weight = self._weight_quantizer(self.weight)
return (quant_input, quant_weight)
class QuantConv2d(_QuantConvNd):
"""Quantized 2D conv"""
default_quant_desc_weight = tensor_quant.QUANT_DESC_8BIT_CONV2D_WEIGHT_PER_CHANNEL
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=True,
padding_mode='zeros',
**kwargs):
kernel_size = _pair(kernel_size)
stride = _pair(stride)
padding = _pair(padding)
dilation = _pair(dilation)
quant_desc_input, quant_desc_weight = _utils.pop_quant_desc_in_kwargs(self.__class__, **kwargs)
super(QuantConv2d, self).__init__(in_channels, out_channels, kernel_size, stride, padding, dilation, False,
_pair(0), groups, bias, padding_mode,
quant_desc_input=quant_desc_input, quant_desc_weight=quant_desc_weight)
def forward(self, input):
# the actual quantization happens in the next level of the class hierarchy
quant_input, quant_weight = self._quant(input)
if self.padding_mode == 'circular':
expanded_padding = ((self.padding[1] + 1) // 2, self.padding[1] // 2,
(self.padding[0] + 1) // 2, self.padding[0] // 2)
output = F.conv2d(F.pad(quant_input, expanded_padding, mode='circular'),
quant_weight, self.bias, self.stride,
_pair(0), self.dilation, self.groups)
else:
output = F.conv2d(quant_input, quant_weight, self.bias, self.stride, self.padding, self.dilation,
self.groups)
return output
class QuantConv3d(_QuantConvNd):
"""Quantized 3D Conv"""
default_quant_desc_weight = tensor_quant.QUANT_DESC_8BIT_CONV3D_WEIGHT_PER_CHANNEL
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=True,
padding_mode='zeros',
**kwargs):
kernel_size = _triple(kernel_size)
stride = _triple(stride)
padding = _triple(padding)
dilation = _triple(dilation)
quant_desc_input, quant_desc_weight = _utils.pop_quant_desc_in_kwargs(self.__class__, **kwargs)
super(QuantConv3d, self).__init__(in_channels, out_channels, kernel_size, stride, padding, dilation, False,
_triple(0), groups, bias, padding_mode,
quant_desc_input=quant_desc_input, quant_desc_weight=quant_desc_weight)
def forward(self, input):
# the actual quantization happens in the next level of the class hierarchy
quant_input, quant_weight = self._quant(input)
if self.padding_mode == 'circular':
expanded_padding = ((self.padding[2] + 1) // 2, self.padding[2] // 2,
(self.padding[1] + 1) // 2, self.padding[1] // 2,
(self.padding[0] + 1) // 2, self.padding[0] // 2)
output = F.conv3d(F.pad(quant_input, expanded_padding, mode='circular'),
quant_weight, self.bias, self.stride, _triple(0),
self.dilation, self.groups)
else:
output = F.conv3d(quant_input, quant_weight, self.bias, self.stride, self.padding, self.dilation,
self.groups)
return output
class QuantConv1d(_QuantConvNd):
"""Quantized 1D Conv"""
default_quant_desc_weight = tensor_quant.QUANT_DESC_8BIT_CONV1D_WEIGHT_PER_CHANNEL
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=True,
padding_mode='zeros',
**kwargs):
kernel_size = _single(kernel_size)
stride = _single(stride)
padding = _single(padding)
dilation = _single(dilation)
quant_desc_input, quant_desc_weight = _utils.pop_quant_desc_in_kwargs(self.__class__, **kwargs)
super(QuantConv1d, self).__init__(in_channels, out_channels, kernel_size, stride, padding, dilation, False,
_single(0), groups, bias, padding_mode,
quant_desc_input=quant_desc_input, quant_desc_weight=quant_desc_weight)
def forward(self, input):
# the actual quantization happens in the next level of the class hierarchy
quant_input, quant_weight = self._quant(input)
if self.padding_mode == 'circular':
expanded_padding = ((self.padding[0] + 1) // 2, self.padding[0] // 2)
output = F.conv1d(F.pad(quant_input, expanded_padding, mode='circular'),
quant_weight, self.bias, self.stride,
_single(0), self.dilation, self.groups)
else:
output = F.conv1d(quant_input, quant_weight, self.bias, self.stride,
self.padding, self.dilation, self.groups)
return output
class _QuantConvTransposeNd(torch.nn.modules.conv._ConvTransposeNd, _utils.QuantMixin):
"""base class of quantized Transposed Conv inherited from _ConvTransposeNd
Comments of original arguments can be found in torch.nn.modules.conv
Arguments:
quant_desc_input: An instance of :class:`QuantDescriptor <pytorch_quantization.tensor_quant.QuantDescriptor>`.
Quantization descriptor of input.
quant_desc_weight: An instance of :class:`QuantDescriptor <pytorch_quantization.tensor_quant.QuantDescriptor>`.
Quantization descriptor of weight.
Raises:
ValueError: If unsupported arguments are passed in.
Readonly properties:
- input_quantizer:
- weight_quantizer:
Static methods:
- set_default_quant_desc_input: Set default_quant_desc_input
- set_default_quant_desc_weight: Set default_quant_desc_weight
"""
default_quant_desc_input = tensor_quant.QUANT_DESC_8BIT_PER_TENSOR
default_quant_desc_weight = tensor_quant.QUANT_DESC_8BIT_PER_TENSOR
def __init__(self, in_channels, out_channels, kernel_size, stride,
padding, dilation, transposed, output_padding,
groups, bias, padding_mode, quant_desc_input, quant_desc_weight):
super(_QuantConvTransposeNd, self).__init__(in_channels, out_channels, kernel_size, stride, padding, dilation,
transposed, output_padding, groups, bias, padding_mode)
self.init_quantizer(quant_desc_input, quant_desc_weight)
def _quant(self, input):
"""Apply quantization on input and weight
Function called by the classes lower in the hierarchy, which actually performs the quantization before forward
in the derivate class the particular Function.
Arguments:
input: in_features to quantize
Returns:
A tuple: (quant_in_feature, quant_weight)
"""
quant_input = self._input_quantizer(input)
quant_weight = self._weight_quantizer(self.weight)
return (quant_input, quant_weight)
class QuantConvTranspose1d(_QuantConvTransposeNd):
"""Quantized ConvTranspose1d"""
default_quant_desc_weight = tensor_quant.QUANT_DESC_8BIT_CONVTRANSPOSE1D_WEIGHT_PER_CHANNEL
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
output_padding=0,
groups=1,
bias=True,
dilation=1,
padding_mode='zeros',
**kwargs):
kernel_size = _single(kernel_size)
stride = _single(stride)
padding = _single(padding)
dilation = _single(dilation)
output_padding = _single(output_padding)
quant_desc_input, quant_desc_weight = _utils.pop_quant_desc_in_kwargs(self.__class__, **kwargs)
super(QuantConvTranspose1d, self).__init__(
in_channels, out_channels, kernel_size, stride, padding, dilation,
True, output_padding, groups, bias, padding_mode,
quant_desc_input=quant_desc_input, quant_desc_weight=quant_desc_weight)
def forward(self, input, output_size=None):
if self.padding_mode != 'zeros':
raise ValueError('Only `zeros` padding mode is supported for QuantConvTranspose1d')
output_padding = self._output_padding(input, output_size, self.stride, self.padding, self.kernel_size)
quant_input, quant_weight = self._quant(input)
output = F.conv_transpose1d(quant_input, quant_weight, self.bias, self.stride, self.padding, output_padding,
self.groups, self.dilation)
return output
class QuantConvTranspose2d(_QuantConvTransposeNd):
"""Quantized ConvTranspose2d"""
default_quant_desc_weight = tensor_quant.QUANT_DESC_8BIT_CONVTRANSPOSE2D_WEIGHT_PER_CHANNEL
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
output_padding=0,
groups=1,
bias=True,
dilation=1,
padding_mode='zeros',
**kwargs):
kernel_size = _pair(kernel_size)
stride = _pair(stride)
padding = _pair(padding)
dilation = _pair(dilation)
output_padding = _pair(output_padding)
quant_desc_input, quant_desc_weight = _utils.pop_quant_desc_in_kwargs(self.__class__, **kwargs)
super(QuantConvTranspose2d, self).__init__(
in_channels, out_channels, kernel_size, stride, padding, dilation,
True, output_padding, groups, bias, padding_mode,
quant_desc_input=quant_desc_input, quant_desc_weight=quant_desc_weight)
def forward(self, input, output_size=None):
if self.padding_mode != 'zeros':
raise ValueError('Only `zeros` padding mode is supported for QuantConvTranspose2d')
output_padding = self._output_padding(input, output_size, self.stride, self.padding, self.kernel_size)
quant_input, quant_weight = self._quant(input)
output = F.conv_transpose2d(quant_input, quant_weight, self.bias, self.stride, self.padding, output_padding,
self.groups, self.dilation)
return output
class QuantConvTranspose3d(_QuantConvTransposeNd):
"""Quantized ConvTranspose3d"""
default_quant_desc_weight = tensor_quant.QUANT_DESC_8BIT_CONVTRANSPOSE3D_WEIGHT_PER_CHANNEL
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
output_padding=0,
groups=1,
bias=True,
dilation=1,
padding_mode='zeros',
**kwargs):
kernel_size = _triple(kernel_size)
stride = _triple(stride)
padding = _triple(padding)
dilation = _triple(dilation)
output_padding = _triple(output_padding)
quant_desc_input, quant_desc_weight = _utils.pop_quant_desc_in_kwargs(self.__class__, **kwargs)
super(QuantConvTranspose3d, self).__init__(
in_channels, out_channels, kernel_size, stride, padding, dilation,
True, output_padding, groups, bias, padding_mode,
quant_desc_input=quant_desc_input, quant_desc_weight=quant_desc_weight)
def forward(self, input, output_size=None):
if self.padding_mode != 'zeros':
raise ValueError('Only `zeros` padding mode is supported for QuantConvTranspose3d')
output_padding = self._output_padding(input, output_size, self.stride, self.padding, self.kernel_size)
quant_input, quant_weight = self._quant(input)
output = F.conv_transpose3d(quant_input, quant_weight, self.bias, self.stride, self.padding, output_padding,
self.groups, self.dilation)
return output
# Define alias with Quant prefix
_ConvNd = _QuantConvNd
Conv1d = QuantConv1d
Conv2d = QuantConv2d
Conv3d = QuantConv3d
ConvTranspose1d = QuantConvTranspose1d
ConvTranspose2d = QuantConvTranspose2d
ConvTranspose3d = QuantConvTranspose3d
| TensorRT-master | tools/pytorch-quantization/pytorch_quantization/nn/modules/quant_conv.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Quantized instance normalization module
Base code is from nn.InstanceNorm, details of the module can be found from the offical repo.
"""
from torch.nn.modules.batchnorm import _NormBase
import torch.nn.functional as F
from torch.nn.modules import instancenorm
from pytorch_quantization.nn import TensorQuantizer
from pytorch_quantization import tensor_quant
from . import _utils
__all__ = [
"QuantInstanceNorm1d", "QuantInstanceNorm2d", "QuantInstanceNorm3d"
]
class QuantInstanceNorm1d(instancenorm.InstanceNorm1d, _utils.QuantInputMixin):
r"""Applies Quantized Instance Normalization over a 3D input
"""
def __init__(
self, num_features: int, eps: float = 1e-5, momentum: float = 0.1, affine: bool = False,
track_running_stats: bool = False, **kwargs):
super(QuantInstanceNorm1d, self).__init__(
num_features, eps, momentum, affine, track_running_stats)
quant_desc_input = _utils.pop_quant_desc_in_kwargs(self.__class__, input_only=True, **kwargs)
self.init_quantizer(quant_desc_input)
def forward(self, input):
quant_input = self._input_quantizer(input)
return super(QuantInstanceNorm1d, self).forward(quant_input)
class QuantInstanceNorm2d(instancenorm.InstanceNorm2d, _utils.QuantInputMixin):
r"""Applies Quantized Instance Normalization over a 4D input
"""
def __init__(
self, num_features: int, eps: float = 1e-5, momentum: float = 0.1, affine: bool = False,
track_running_stats: bool = False, **kwargs):
super(QuantInstanceNorm2d, self).__init__(
num_features, eps, momentum, affine, track_running_stats)
quant_desc_input = _utils.pop_quant_desc_in_kwargs(self.__class__, input_only=True, **kwargs)
self.init_quantizer(quant_desc_input)
def forward(self, input):
quant_input = self._input_quantizer(input)
return super(QuantInstanceNorm2d, self).forward(quant_input)
class QuantInstanceNorm3d(instancenorm.InstanceNorm3d, _utils.QuantInputMixin):
r"""Applies Quantized Instance Normalization over a 5D input
"""
def __init__(
self, num_features: int, eps: float = 1e-5, momentum: float = 0.1, affine: bool = False,
track_running_stats: bool = False, **kwargs):
super(QuantInstanceNorm3d, self).__init__(
num_features, eps, momentum, affine, track_running_stats)
quant_desc_input = _utils.pop_quant_desc_in_kwargs(self.__class__, input_only=True, **kwargs)
self.init_quantizer(quant_desc_input)
def forward(self, input):
quant_input = self._input_quantizer(input)
return super(QuantInstanceNorm3d, self).forward(quant_input)
| TensorRT-master | tools/pytorch-quantization/pytorch_quantization/nn/modules/quant_instancenorm.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Quantized Pooling
Base code is from nn.pooling, details of Module and original argument can be found there.
Module names are intentionally kept same as unquantized version so that they can be dropped into preexisting model
easily, and load pretrained weight. Aliases with Quant prefix are defined and are encouraged to be used explicitly
when start scratch.
"""
from torch.nn.modules import pooling
from . import _utils
__all__ = [
"MaxPool1d", "QuantMaxPool1d", "MaxPool2d", "QuantMaxPool2d", "MaxPool3d", "QuantMaxPool3d",
"AvgPool1d", "QuantAvgPool1d", "AvgPool2d", "QuantAvgPool2d", "AvgPool3d", "QuantAvgPool3d",
"AdaptiveAvgPool1d", "QuantAdaptiveAvgPool1d", "AdaptiveAvgPool2d", "QuantAdaptiveAvgPool2d",
"AdaptiveAvgPool3d", "QuantAdaptiveAvgPool3d"
]
class QuantMaxPool1d(pooling.MaxPool1d, _utils.QuantInputMixin):
"""Quantized 1D maxpool"""
def __init__(self, kernel_size, stride=None, padding=0, dilation=1,
return_indices=False, ceil_mode=False, **kwargs):
super(QuantMaxPool1d, self).__init__(kernel_size, stride, padding, dilation,
return_indices, ceil_mode)
quant_desc_input = _utils.pop_quant_desc_in_kwargs(self.__class__, input_only=True, **kwargs)
self.init_quantizer(quant_desc_input)
def forward(self, input):
quant_input = self._input_quantizer(input)
return super(QuantMaxPool1d, self).forward(quant_input)
class QuantMaxPool2d(pooling.MaxPool2d, _utils.QuantInputMixin):
"""Quantized 2D maxpool"""
def __init__(self, kernel_size, stride=None, padding=0, dilation=1,
return_indices=False, ceil_mode=False, **kwargs):
super(QuantMaxPool2d, self).__init__(kernel_size, stride, padding, dilation,
return_indices, ceil_mode)
quant_desc_input = _utils.pop_quant_desc_in_kwargs(self.__class__, input_only=True, **kwargs)
self.init_quantizer(quant_desc_input)
def forward(self, input):
quant_input = self._input_quantizer(input)
return super(QuantMaxPool2d, self).forward(quant_input)
class QuantMaxPool3d(pooling.MaxPool3d, _utils.QuantInputMixin):
"""Quantized 3D maxpool"""
def __init__(self, kernel_size, stride=None, padding=0, dilation=1,
return_indices=False, ceil_mode=False, **kwargs):
super(QuantMaxPool3d, self).__init__(kernel_size, stride, padding, dilation,
return_indices, ceil_mode)
quant_desc_input = _utils.pop_quant_desc_in_kwargs(self.__class__, input_only=True, **kwargs)
self.init_quantizer(quant_desc_input)
def forward(self, input):
quant_input = self._input_quantizer(input)
return super(QuantMaxPool3d, self).forward(quant_input)
class QuantAvgPool1d(pooling.AvgPool1d, _utils.QuantInputMixin):
"""Quantized 1D average pool"""
def __init__(self, kernel_size, stride=None, padding=0, ceil_mode=False,
count_include_pad=True, **kwargs):
super(QuantAvgPool1d, self).__init__(kernel_size, stride, padding, ceil_mode,
count_include_pad)
quant_desc_input = _utils.pop_quant_desc_in_kwargs(self.__class__, input_only=True, **kwargs)
self.init_quantizer(quant_desc_input)
def forward(self, input):
quant_input = self._input_quantizer(input)
return super(QuantAvgPool1d, self).forward(quant_input)
class QuantAvgPool2d(pooling.AvgPool2d, _utils.QuantInputMixin):
"""Quantized 2D average pool"""
def __init__(self, kernel_size, stride=None, padding=0, ceil_mode=False,
count_include_pad=True, divisor_override=None, **kwargs):
super(QuantAvgPool2d, self).__init__(kernel_size, stride, padding, ceil_mode,
count_include_pad, divisor_override)
quant_desc_input = _utils.pop_quant_desc_in_kwargs(self.__class__, input_only=True, **kwargs)
self.init_quantizer(quant_desc_input)
def forward(self, input):
quant_input = self._input_quantizer(input)
return super(QuantAvgPool2d, self).forward(quant_input)
class QuantAvgPool3d(pooling.AvgPool3d, _utils.QuantInputMixin):
"""Quantized 3D average pool"""
def __init__(self, kernel_size, stride=None, padding=0, ceil_mode=False,
count_include_pad=True, divisor_override=None, **kwargs):
super(QuantAvgPool3d, self).__init__(kernel_size, stride, padding, ceil_mode,
count_include_pad, divisor_override)
quant_desc_input = _utils.pop_quant_desc_in_kwargs(self.__class__, input_only=True, **kwargs)
self.init_quantizer(quant_desc_input)
def forward(self, input):
quant_input = self._input_quantizer(input)
return super(QuantAvgPool3d, self).forward(quant_input)
class QuantAdaptiveAvgPool1d(pooling.AdaptiveAvgPool1d, _utils.QuantInputMixin):
"""Quantized 1D adaptive average pool"""
def __init__(self, output_size, **kwargs):
super(QuantAdaptiveAvgPool1d, self).__init__(output_size)
quant_desc_input = _utils.pop_quant_desc_in_kwargs(self.__class__, input_only=True, **kwargs)
self.init_quantizer(quant_desc_input)
def forward(self, input):
quant_input = self._input_quantizer(input)
return super(QuantAdaptiveAvgPool1d, self).forward(quant_input)
class QuantAdaptiveAvgPool2d(pooling.AdaptiveAvgPool2d, _utils.QuantInputMixin):
"""Quantized 2D adaptive average pool"""
def __init__(self, output_size, **kwargs):
super(QuantAdaptiveAvgPool2d, self).__init__(output_size)
quant_desc_input = _utils.pop_quant_desc_in_kwargs(self.__class__, input_only=True, **kwargs)
self.init_quantizer(quant_desc_input)
def forward(self, input):
quant_input = self._input_quantizer(input)
return super(QuantAdaptiveAvgPool2d, self).forward(quant_input)
class QuantAdaptiveAvgPool3d(pooling.AdaptiveAvgPool3d, _utils.QuantInputMixin):
"""Quantized 3D adaptive average pool"""
def __init__(self, output_size, **kwargs):
super(QuantAdaptiveAvgPool3d, self).__init__(output_size)
quant_desc_input = _utils.pop_quant_desc_in_kwargs(self.__class__, input_only=True, **kwargs)
self.init_quantizer(quant_desc_input)
def forward(self, input):
quant_input = self._input_quantizer(input)
return super(QuantAdaptiveAvgPool3d, self).forward(quant_input)
# Define alias with Quant prefix
MaxPool1d = QuantMaxPool1d
MaxPool2d = QuantMaxPool2d
MaxPool3d = QuantMaxPool3d
AvgPool1d = QuantAvgPool1d
AvgPool2d = QuantAvgPool2d
AvgPool3d = QuantAvgPool3d
AdaptiveAvgPool1d = QuantAdaptiveAvgPool1d
AdaptiveAvgPool2d = QuantAdaptiveAvgPool2d
AdaptiveAvgPool3d = QuantAdaptiveAvgPool3d
| TensorRT-master | tools/pytorch-quantization/pytorch_quantization/nn/modules/quant_pooling.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
| TensorRT-master | tools/pytorch-quantization/pytorch_quantization/nn/modules/__init__.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Quantized BERT Self Attention
This method has been adapted from the transformers repo:
https://github.com/huggingface/transformers/tree/v2.9.1
"""
import math
import torch
from torch import nn
from pytorch_quantization import nn as quant_nn
from pytorch_quantization.nn.modules.tensor_quantizer import TensorQuantizer
__all__ = ["QuantBertSelfAttention"]
class QuantBertSelfAttention(nn.Module):
def __init__(self, config):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads)
)
self.output_attentions = config.output_attentions
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
# Quantized implementations of torch.nn.Linear modules
self.query = quant_nn.QuantLinear(config.hidden_size, self.all_head_size)
self.key = quant_nn.QuantLinear(config.hidden_size, self.all_head_size)
self.value = quant_nn.QuantLinear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
# Additional quantizers that will be needed to quantize the inputs to the torch.matmul() operation in the
# forward method. Since it's a simple operation and no quantized version of it exists, the inputs to this
# operation could be manually quantized to realize a quantized mat-mul operation.
self.matmul_q_input_quantizer = TensorQuantizer(quant_nn.QuantLinear.default_quant_desc_input)
self.matmul_k_input_quantizer = TensorQuantizer(quant_nn.QuantLinear.default_quant_desc_input)
self.matmul_v_input_quantizer = TensorQuantizer(quant_nn.QuantLinear.default_quant_desc_input)
self.matmul_a_input_quantizer = TensorQuantizer(quant_nn.QuantLinear.default_quant_desc_input)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
):
mixed_query_layer = self.query(hidden_states)
# If this is instantiated as a cross-attention module, the keys
# and values come from an encoder; the attention mask needs to be
# such that the encoder's padding tokens are not attended to.
if encoder_hidden_states is not None:
mixed_key_layer = self.key(encoder_hidden_states)
mixed_value_layer = self.value(encoder_hidden_states)
attention_mask = encoder_attention_mask
else:
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
# Quantized matrix multiplication. Achieved by quantizing the inputs to torch.matmul().
attention_scores = torch.matmul(
self.matmul_q_input_quantizer(query_layer),
self.matmul_k_input_quantizer(key_layer.transpose(-1, -2)))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
if attention_mask is not None:
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
# Quantized matrix multiplication. Achieved by quantizing the inputs to torch.matmul().
context_layer = torch.matmul(
self.matmul_a_input_quantizer(attention_probs),
self.matmul_v_input_quantizer(value_layer))
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
outputs = (context_layer, attention_probs) if self.output_attentions else (context_layer,)
return outputs
| TensorRT-master | tools/pytorch-quantization/pytorch_quantization/nn/modules/quant_bert.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""RNN implementation in python
originally copied from https://github.com/pytorch/pytorch/blob/v0.4.1/torch/nn/modules/rnn.py
backend is changed to _functions/rnn.py
"""
import math
import torch
import warnings
import itertools
import numbers
from torch import nn
from torch.nn import Parameter
from torch.nn.utils.rnn import PackedSequence
from pytorch_quantization import tensor_quant
from pytorch_quantization.nn._functions import quant_rnn
from . import _utils
__all__ = ["QuantLSTM", "QuantLSTMCell", "LSTM", "LSTMCell"]
class QuantRNNBase(nn.Module, _utils.QuantMixin):
default_quant_desc_weight = tensor_quant.QUANT_DESC_8BIT_LINEAR_WEIGHT_PER_ROW
def __init__(self, mode, input_size, hidden_size,
num_layers=1, bias=True, batch_first=False,
dropout=0, bidirectional=False, proj_size=0, **kwargs):
super(QuantRNNBase, self).__init__()
self.mode = mode
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layers = num_layers
self.bias = bias
self.batch_first = batch_first
self.dropout = dropout
self.dropout_state = {}
self.bidirectional = bidirectional
self.proj_size = proj_size
num_directions = 2 if bidirectional else 1
if not isinstance(dropout, numbers.Number) or not 0 <= dropout <= 1 or \
isinstance(dropout, bool):
raise ValueError("dropout should be a number in range [0, 1] "
"representing the probability of an element being "
"zeroed")
if dropout > 0 and num_layers == 1:
warnings.warn("dropout option adds dropout after all but last "
"recurrent layer, so non-zero dropout expects "
"num_layers greater than 1, but got dropout={} and "
"num_layers={}".format(dropout, num_layers))
if proj_size < 0:
raise ValueError("proj_size should be a positive integer or zero to disable projections")
if proj_size > 0:
raise ValueError("proj_size is not supported in pytorch-quantization yet")
if mode == 'LSTM':
gate_size = 4 * hidden_size
elif mode == 'GRU':
gate_size = 3 * hidden_size
else:
gate_size = hidden_size
self._all_weights = []
for layer in range(num_layers):
for direction in range(num_directions):
layer_input_size = input_size if layer == 0 else hidden_size * num_directions
w_ih = Parameter(torch.Tensor(gate_size, layer_input_size))
w_hh = Parameter(torch.Tensor(gate_size, hidden_size))
b_ih = Parameter(torch.Tensor(gate_size))
b_hh = Parameter(torch.Tensor(gate_size))
layer_params = (w_ih, w_hh, b_ih, b_hh)
suffix = '_reverse' if direction == 1 else ''
param_names = ['weight_ih_l{}{}', 'weight_hh_l{}{}']
if bias:
param_names += ['bias_ih_l{}{}', 'bias_hh_l{}{}']
param_names = [x.format(layer, suffix) for x in param_names]
for name, param in zip(param_names, layer_params):
setattr(self, name, param)
self._all_weights.append(param_names)
self.flatten_parameters()
self.reset_parameters()
quant_desc_input, quant_desc_weight = _utils.pop_quant_desc_in_kwargs(self.__class__, **kwargs)
self.init_quantizer(quant_desc_input, quant_desc_weight, num_layers=num_layers * (1 + bidirectional))
def flatten_parameters(self):
"""Resets parameter data pointer so that they can use faster code paths.
Right now, this works only if the module is on the GPU and cuDNN is enabled.
Otherwise, it's a no-op.
"""
any_param = next(self.parameters()).data
if not any_param.is_cuda or not torch.backends.cudnn.is_acceptable(any_param):
self._data_ptrs = []
return
# If any parameters alias, we fall back to the slower, copying code path. This is
# a sufficient check, because overlapping parameter buffers that don't completely
# alias would break the assumptions of the uniqueness check in
# Module.named_parameters().
unique_data_ptrs = set(p.data_ptr() for l in self.all_weights for p in l)
if len(unique_data_ptrs) != sum(len(l) for l in self.all_weights):
self._data_ptrs = []
return
with torch.cuda.device_of(any_param):
import torch.backends.cudnn.rnn as rnn
weight_arr = list(itertools.chain.from_iterable(self.all_weights))
weight_stride0 = len(self.all_weights[0])
# NB: This is a temporary hack while we still don't have Tensor
# bindings for ATen functions
with torch.no_grad():
# NB: this is an INPLACE function on weight_arr, that's why the
# no_grad() is necessary.
weight_buf = torch._cudnn_rnn_flatten_weight(weight_arr, weight_stride0, self.input_size,
rnn.get_cudnn_mode(self.mode), self.hidden_size,
self.proj_size, self.num_layers, self.batch_first,
bool(self.bidirectional))
self._param_buf_size = weight_buf.size(0)
self._data_ptrs = list(p.data.data_ptr() for p in self.parameters())
def _apply(self, fn):
ret = super(QuantRNNBase, self)._apply(fn)
self.flatten_parameters()
return ret
def reset_parameters(self):
stdv = 1.0 / math.sqrt(self.hidden_size)
for weight in self.parameters():
weight.data.uniform_(-stdv, stdv)
def check_forward_args(self, input, hidden, batch_sizes):
is_input_packed = batch_sizes is not None
expected_input_dim = 2 if is_input_packed else 3
if input.dim() != expected_input_dim:
raise RuntimeError(
'input must have {} dimensions, got {}'.format(
expected_input_dim, input.dim()))
if self.input_size != input.size(-1):
raise RuntimeError(
'input.size(-1) must be equal to input_size. Expected {}, got {}'.format(
self.input_size, input.size(-1)))
if is_input_packed:
mini_batch = int(batch_sizes[0])
else:
mini_batch = input.size(0) if self.batch_first else input.size(1)
num_directions = 2 if self.bidirectional else 1
expected_hidden_size = (self.num_layers * num_directions,
mini_batch, self.hidden_size)
def check_hidden_size(hx, expected_hidden_size, msg='Expected hidden size {}, got {}'):
if tuple(hx.size()) != expected_hidden_size:
raise RuntimeError(msg.format(expected_hidden_size, tuple(hx.size())))
if self.mode == 'LSTM':
check_hidden_size(hidden[0], expected_hidden_size,
'Expected hidden[0] size {}, got {}')
check_hidden_size(hidden[1], expected_hidden_size,
'Expected hidden[1] size {}, got {}')
else:
check_hidden_size(hidden, expected_hidden_size)
def forward(self, input, hx=None):
is_packed = isinstance(input, PackedSequence)
if is_packed:
input, batch_sizes, sorted_indices, unsorted_indices = input
max_batch_size = batch_sizes[0]
max_batch_size = int(max_batch_size)
else:
batch_sizes = None
max_batch_size = input.size(0) if self.batch_first else input.size(1)
if hx is None:
num_directions = 2 if self.bidirectional else 1
hx = input.new_zeros(self.num_layers * num_directions,
max_batch_size, self.hidden_size,
requires_grad=False)
if self.mode == 'LSTM':
hx = (hx, hx)
has_flat_weights = list(p.data.data_ptr() for p in self.parameters()) == self._data_ptrs
if has_flat_weights:
first_data = next(self.parameters()).data
assert first_data.storage().size() == self._param_buf_size
flat_weight = first_data.new().set_(first_data.storage(), 0, torch.Size([self._param_buf_size]))
else:
flat_weight = None
self.check_forward_args(input, hx, batch_sizes)
func = quant_rnn.RNN(
self.mode,
self.input_size,
self.hidden_size,
num_layers=self.num_layers,
batch_first=self.batch_first,
dropout=self.dropout,
train=self.training,
bidirectional=self.bidirectional,
dropout_state=self.dropout_state,
variable_length=is_packed,
flat_weight=flat_weight
)
output, hidden = func(input, self.all_weights, hx, batch_sizes, self._input_quantizers, self._weight_quantizers)
if is_packed:
output = PackedSequence(output, batch_sizes)
return output, hidden
def extra_repr(self):
s = '{input_size}, {hidden_size}'
if self.num_layers != 1:
s += ', num_layers={num_layers}'
if self.bias is not True:
s += ', bias={bias}'
if self.batch_first is not False:
s += ', batch_first={batch_first}'
if self.dropout != 0:
s += ', dropout={dropout}'
if self.bidirectional is not False:
s += ', bidirectional={bidirectional}'
return s.format(**self.__dict__)
def __setstate__(self, d):
super(QuantRNNBase, self).__setstate__(d)
self.__dict__.setdefault('_data_ptrs', [])
if 'all_weights' in d:
self._all_weights = d['all_weights']
if isinstance(self._all_weights[0][0], str):
return
num_layers = self.num_layers
num_directions = 2 if self.bidirectional else 1
self._all_weights = []
for layer in range(num_layers):
for direction in range(num_directions):
suffix = '_reverse' if direction == 1 else ''
weights = ['weight_ih_l{}{}', 'weight_hh_l{}{}', 'bias_ih_l{}{}', 'bias_hh_l{}{}']
weights = [x.format(layer, suffix) for x in weights]
if self.bias:
self._all_weights += [weights]
else:
self._all_weights += [weights[:2]]
@property
def all_weights(self):
return [[getattr(self, weight) for weight in weights] for weights in self._all_weights]
class QuantRNN(QuantRNNBase):
r"""Applies a multi-layer Elman RNN with `tanh` or `ReLU` non-linearity to an
input sequence.
"""
def __init__(self, *args, **kwargs):
if 'proj_size' in kwargs:
raise ValueError("proj_size argument is only supported for LSTM, not RNN or GRU")
if 'nonlinearity' in kwargs:
if kwargs['nonlinearity'] == 'tanh':
mode = 'RNN_TANH'
elif kwargs['nonlinearity'] == 'relu':
mode = 'RNN_RELU'
else:
raise ValueError("Unknown nonlinearity '{}'".format(
kwargs['nonlinearity']))
del kwargs['nonlinearity']
else:
mode = 'RNN_TANH'
super(QuantRNN, self).__init__(mode, *args, **kwargs)
class QuantLSTM(QuantRNNBase):
r"""Applies a multi-layer long short-term memory (LSTM) RNN to an input
sequence.
"""
def __init__(self, *args, **kwargs):
super(QuantLSTM, self).__init__('LSTM', *args, **kwargs)
class GRU(QuantRNNBase):
r"""Applies a multi-layer gated recurrent unit (GRU) RNN to an input sequence.
"""
def __init__(self, *args, **kwargs):
super(GRU, self).__init__('GRU', *args, **kwargs)
class QuantRNNCellBase(nn.Module, _utils.QuantMixin):
default_quant_desc_weight = tensor_quant.QUANT_DESC_8BIT_LINEAR_WEIGHT_PER_ROW
def extra_repr(self):
s = '{input_size}, {hidden_size}'
if 'bias' in self.__dict__ and self.bias is not True:
s += ', bias={bias}'
if 'nonlinearity' in self.__dict__ and self.nonlinearity != "tanh":
s += ', nonlinearity={nonlinearity}'
return s.format(**self.__dict__)
def check_forward_input(self, input):
if input.size(1) != self.input_size:
raise RuntimeError(
"input has inconsistent input_size: got {}, expected {}".format(
input.size(1), self.input_size))
def check_forward_hidden(self, input, hx, hidden_label=''):
if input.size(0) != hx.size(0):
raise RuntimeError(
"Input batch size {} doesn't match hidden{} batch size {}".format(
input.size(0), hidden_label, hx.size(0)))
if hx.size(1) != self.hidden_size:
raise RuntimeError(
"hidden{} has inconsistent hidden_size: got {}, expected {}".format(
hidden_label, hx.size(1), self.hidden_size))
class QuantRNNCell(QuantRNNCellBase):
r"""An Elman RNN cell with tanh or ReLU non-linearity.
"""
def __init__(self, input_size, hidden_size, bias=True, nonlinearity="tanh"):
super(QuantRNNCell, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bias = bias
self.nonlinearity = nonlinearity
self.weight_ih = Parameter(torch.Tensor(hidden_size, input_size))
self.weight_hh = Parameter(torch.Tensor(hidden_size, hidden_size))
if bias:
self.bias_ih = Parameter(torch.Tensor(hidden_size))
self.bias_hh = Parameter(torch.Tensor(hidden_size))
else:
self.register_parameter('bias_ih', None)
self.register_parameter('bias_hh', None)
self.reset_parameters()
def reset_parameters(self):
stdv = 1.0 / math.sqrt(self.hidden_size)
for weight in self.parameters():
weight.data.uniform_(-stdv, stdv)
def forward(self, input, hx=None):
self.check_forward_input(input)
if hx is None:
hx = input.new_zeros(input.size(0), self.hidden_size, requires_grad=False)
self.check_forward_hidden(input, hx)
if self.nonlinearity == "tanh":
func = quant_rnn.RNNTanhCell
elif self.nonlinearity == "relu":
func = quant_rnn.RNNReLUCell
else:
raise RuntimeError(
"Unknown nonlinearity: {}".format(self.nonlinearity))
return func(
input, hx,
self.weight_ih, self.weight_hh,
self.bias_ih, self.bias_hh,
)
class QuantLSTMCell(QuantRNNCellBase):
r"""A long short-term memory (LSTM) cell.
"""
def __init__(self, input_size, hidden_size, bias=True, **kwargs):
super(QuantLSTMCell, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bias = bias
self.weight_ih = Parameter(torch.Tensor(4 * hidden_size, input_size))
self.weight_hh = Parameter(torch.Tensor(4 * hidden_size, hidden_size))
if bias:
self.bias_ih = Parameter(torch.Tensor(4 * hidden_size))
self.bias_hh = Parameter(torch.Tensor(4 * hidden_size))
else:
self.register_parameter('bias_ih', None)
self.register_parameter('bias_hh', None)
self.reset_parameters()
quant_desc_input, quant_desc_weight = _utils.pop_quant_desc_in_kwargs(self.__class__, **kwargs)
self.init_quantizer(quant_desc_input, quant_desc_weight)
def reset_parameters(self):
stdv = 1.0 / math.sqrt(self.hidden_size)
for weight in self.parameters():
weight.data.uniform_(-stdv, stdv)
def forward(self, input, hx=None):
self.check_forward_input(input)
if hx is None:
hx = input.new_zeros(input.size(0), self.hidden_size, requires_grad=False)
hx = (hx, hx)
self.check_forward_hidden(input, hx[0], '[0]')
self.check_forward_hidden(input, hx[1], '[1]')
return quant_rnn.LSTMCell(
input, hx,
self.weight_ih, self.weight_hh,
self.bias_ih, self.bias_hh,
self._input_quantizer, self._weight_quantizer
)
class GRUCell(QuantRNNCellBase):
r"""A gated recurrent unit (GRU) cell
"""
def __init__(self, input_size, hidden_size, bias=True):
super(GRUCell, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bias = bias
self.weight_ih = Parameter(torch.Tensor(3 * hidden_size, input_size))
self.weight_hh = Parameter(torch.Tensor(3 * hidden_size, hidden_size))
if bias:
self.bias_ih = Parameter(torch.Tensor(3 * hidden_size))
self.bias_hh = Parameter(torch.Tensor(3 * hidden_size))
else:
self.register_parameter('bias_ih', None)
self.register_parameter('bias_hh', None)
self.reset_parameters()
def reset_parameters(self):
stdv = 1.0 / math.sqrt(self.hidden_size)
for weight in self.parameters():
weight.data.uniform_(-stdv, stdv)
def forward(self, input, hx=None):
self.check_forward_input(input)
if hx is None:
hx = input.new_zeros(input.size(0), self.hidden_size, requires_grad=False)
self.check_forward_hidden(input, hx)
return quant_rnn.GRUCell(
input, hx,
self.weight_ih, self.weight_hh,
self.bias_ih, self.bias_hh,
)
LSTM = QuantLSTM
LSTMCell = QuantLSTMCell
| TensorRT-master | tools/pytorch-quantization/pytorch_quantization/nn/modules/quant_rnn.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Implement a clip module as pytorch only has a simple clamp function """
import torch
from torch import nn
from torch.nn.parameter import Parameter
from pytorch_quantization.nn import functional as QF
__all__ = ['Clip']
class Clip(nn.Module):
"""Clip tensor
Args:
clip_value_min: A number or tensor of lower bound to clip
clip_value_max: A number of tensor of upper bound to clip
learn_min: A boolean. If True, learn min. clip_value_min will be used to initialize. Default False
learn_max: A boolean. Similar as learn_min but for max.
Raises:
ValueError:
"""
def __init__(self, clip_value_min, clip_value_max, learn_min=False, learn_max=False):
super(Clip, self).__init__()
if learn_min:
if not isinstance(clip_value_min, float) and clip_value_min.size != 1:
raise ValueError("clip_value_min/clip_value_max must be scalar for initilizing learnable range.")
self.clip_value_min = Parameter(torch.tensor(clip_value_min)) # pylint: disable=not-callable
else:
self.clip_value_min = clip_value_min
if learn_max:
if not isinstance(clip_value_max, float) and clip_value_max.size != 1:
raise ValueError("clip_value_min/clip_value_max must be scalar for initilizing learnable range.")
self.clip_value_max = Parameter(torch.tensor(clip_value_max)) # pylint: disable=not-callable
else:
self.clip_value_max = clip_value_max
def forward(self, inputs):
outputs = QF.clip(inputs, self.clip_value_min, self.clip_value_max)
return outputs
| TensorRT-master | tools/pytorch-quantization/pytorch_quantization/nn/modules/clip.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""TensorQuantizer Module"""
import math
from absl import logging
import torch
from torch import nn
from pytorch_quantization.tensor_quant import QuantDescriptor, tensor_quant, fake_tensor_quant
from pytorch_quantization.nn.modules.clip import Clip
from pytorch_quantization import calib
import pytorch_quantization.utils as quant_utils
__all__ = ['TensorQuantizer']
class TensorQuantizer(nn.Module):
"""Tensor quantizer module
This module uses tensor_quant or fake_tensor_quant function to quantize a tensor. And wrappers variable, moving
statistics we'd want when training a quantized network.
Experimental features:
``clip`` stage learns range before enabling quantization.
``calib`` stage runs calibration
Args:
quant_desc: An instance of :func:`QuantDescriptor <pytorch_quantization.tensor_quant.QuantDescriptor>`.
disabled: A boolean. If True, by pass the whole module returns input. Default False.
if_quant: A boolean. If True, run main quantization body. Default True.
if_clip: A boolean. If True, clip before quantization and learn amax. Default False.
if_calib: A boolean. If True, run calibration. Not implemented yet. Settings of calibration will probably
go to :func:`QuantDescriptor <pytorch_quantization.tensor_quant.QuantDescriptor>`.
Raises:
Readonly Properties:
- axis:
- fake_quant:
- scale:
- step_size:
Mutable Properties:
- num_bits:
- unsigned:
- amax:
"""
# An experimental static switch for using pytorch's native fake quantization
# Primary usage is to export to ONNX
use_fb_fake_quant = False
def __init__(self, quant_desc=QuantDescriptor(), disabled=False, if_quant=True, if_clip=False, if_calib=False):
"""Initialize quantizer and set up required variables"""
super(TensorQuantizer, self).__init__()
# Expand quant_desc. Use quant_desc.dict would be eaiser, but adding one-by-one explicitly gives more control
self._num_bits = quant_desc.num_bits
self._fake_quant = quant_desc.fake_quant
self._axis = quant_desc.axis
self._scale_amax = quant_desc.scale_amax
self._learn_amax = quant_desc.learn_amax
self._unsigned = quant_desc.unsigned
self._narrow_range = quant_desc.narrow_range
self._scale = None if not quant_desc.fake_quant else 1.
self._disabled = disabled
self._if_quant = if_quant
self._if_clip = False
self._if_calib = if_calib
if quant_desc.amax is not None:
self.register_buffer('_amax', torch.tensor(quant_desc.amax))
# Clip module consumes a lot of memory, so only create it if learn_amax is True
if self._learn_amax:
init_amax = quant_desc.amax if quant_desc.amax is not None else 1.
self.clip = Clip(-init_amax, init_amax, learn_min=True, learn_max=True)
# It makes more sense to enable clip stage (which learns amax) if learn_amax is true
self.enable_clip()
if if_clip:
self.enable_clip()
if quant_desc.calib_method == "histogram":
logging.info("Creating histogram calibrator")
self._calibrator = calib.HistogramCalibrator(
num_bits=self._num_bits, axis=self._axis, unsigned=self._unsigned)
elif quant_desc.calib_method == "max":
logging.info("Creating Max calibrator")
self._calibrator = calib.MaxCalibrator(num_bits=self._num_bits, axis=self._axis, unsigned=self._unsigned)
# pylint:disable=missing-docstring
@property
def num_bits(self):
return self._num_bits
@property
def unsigned(self):
return self._unsigned
@property
def scale(self):
if self._fake_quant:
logging.error("Fake quantize mode doesn't use scale explicitly!")
if self._scale is None:
logging.critical("Accessing scale before quantizing any tensor!")
return self._scale
@property
def amax(self):
if not hasattr(self, "_amax"):
return None
return self._amax
@property
def step_size(self):
if not hasattr(self, "_amax"):
logging.error("step_size is undefined under dynamic amax mode!")
return None
return self._amax / (2.0**(self._num_bits - 1 + int(self._unsigned)) - 1.0)
@property
def axis(self):
return self._axis
@property
def fake_quant(self):
return self._fake_quant
@property
def narrow_range(self):
return self._narrow_range
def disable(self):
"""Bypass the module"""
self._disabled = True
def enable(self):
self._disabled = False
def disable_clip(self):
"""Disable clip stage"""
self._if_clip = False
self.clip.clip_value_min.required_grad = False
self.clip.clip_value_max.required_grad = False
def enable_clip(self):
"""Enable clip stage"""
logging.warning("Enable `clip` stage for amax learning.")
if not self._learn_amax:
raise ValueError("learn_amax is False. Cannot enable clip.")
self.clip.clip_value_min.required_grad = True
self.clip.clip_value_max.required_grad = True
self._if_clip = True
def disable_calib(self):
logging.warning("Disable {}".format(self._calibrator.__class__.__name__))
self._if_calib = False
def enable_calib(self):
if self._calibrator is None:
raise ValueError("Calibrator was not created, cannot enable calibration.")
logging.info("Enable {}".format(self._calibrator.__class__.__name__))
self._if_calib = True
def disable_quant(self):
logging.info("Disable `quant` stage.")
self._if_quant = False
def enable_quant(self):
logging.info("Enable `quant` stage.")
self._if_quant = True
@amax.setter
def amax(self, value):
if value is None:
logging.error("Setting amax no None is meaningless.")
else:
if isinstance(value, torch.Tensor):
logging.warning("amax setter is not designed to take tensor.")
if not hasattr(self, "_amax"):
self.register_buffer('_amax', torch.tensor(value))
else:
value = torch.tensor(value, device=self._amax.device)
if self._amax.shape != value.shape:
raise TypeError("Changing shape when setting amax is not allowed.")
self._amax.data.copy_(value.data)
@num_bits.setter
def num_bits(self, value):
self._num_bits = value
@unsigned.setter
def unsigned(self, value):
self._unsigned = value
@narrow_range.setter
def narrow_range(self, value):
self._narrow_range = value
# pylint:enable=missing-docstring
def load_calib_amax(self, *args, **kwargs):
"""Load amax from calibrator.
Updates the amax buffer with value computed by the calibrator, creating it if necessary.
*args and **kwargs are directly passed to compute_amax, except "strict" in kwargs. Refer to
compute_amax for more details.
"""
strict = kwargs.pop("strict", True)
if getattr(self, '_calibrator', None) is None:
raise RuntimeError("Calibrator not created.")
calib_amax = self._calibrator.compute_amax(*args, **kwargs)
if calib_amax is None:
err_msg = "Calibrator returned None. This usually happens when calibrator hasn't seen any tensor."
if not strict:
logging.warning(err_msg)
logging.warning("Set amax to NaN!")
calib_amax = torch.tensor(math.nan)
else:
raise RuntimeError(err_msg + " Passing 'strict=False' to `load_calib_amax()` will ignore the error.")
logging.warning("Load calibrated amax, shape={}.".format(calib_amax.shape))
logging.log_first_n(
logging.WARNING, "Call .cuda() if running on GPU after loading calibrated amax.", 1)
if not hasattr(self, '_amax'):
self.register_buffer('_amax', calib_amax.data)
else:
self._amax.copy_(calib_amax)
def init_learn_amax(self):
"""Initialize learned amax from fixed amax"""
if self._learn_amax is False:
raise RuntimeError("Called init_learn_amax with learn_amax=False.")
logging.warning("Load amax as initial value for amax learning!")
if self._amax.numel() != 1:
logging.warning("Per channel learned amax not supported. Initializing with max(amax).")
init_amax = torch.max(self._amax)
else:
init_amax = self._amax
self.clip.clip_value_min.data.copy_(-init_amax.data)
self.clip.clip_value_max.data.copy_(init_amax.data)
def _get_amax(self, inputs):
"""get amax from buffer or compute it dynamically."""
if hasattr(self, '_amax'):
amax = self._amax
else:
if self._axis is None:
reduce_axis = None
else:
reduce_axis = []
# Swap axis to reduce
axis = self._axis if isinstance(self._axis, (list, tuple)) else [self._axis]
for i in range(inputs.dim()):
if not i in axis:
reduce_axis.append(i)
amax = quant_utils.reduce_amax(inputs, axis=reduce_axis, keepdims=True).detach()
if self._scale_amax is not None:
amax = amax.detach() * self._scale_amax
return amax
def _fb_fake_quant(self, inputs, amax):
"""Native pytorch fake quantization."""
logging.log_first_n(logging.WARNING, "Use Pytorch's native experimental fake quantization.", 1)
bound = (1 << (self._num_bits - 1 + int(self._unsigned))) - 1
# To be consistent with ONNX, full range is used. e.g. range is [-128, 127] in int8
if amax.numel() == 1:
outputs = torch.fake_quantize_per_tensor_affine(
inputs, amax.item() / bound, 0,
-bound - 1 if not self._unsigned else 0, bound)
else:
amax_sequeeze = amax.squeeze().detach()
if len(amax_sequeeze.shape) != 1:
raise TypeError("Pytorch's native quantization doesn't support multiple axes")
quant_dim = list(amax.shape).index(list(amax_sequeeze.shape)[0])
scale = amax_sequeeze / bound
outputs = torch.fake_quantize_per_channel_affine(
inputs, scale.data, torch.zeros_like(scale, dtype=torch.long).data, quant_dim,
-bound - 1 if not self._unsigned else 0, bound)
return outputs
def _quant_forward(self, inputs):
"""Quantized forward pass."""
if self._learn_amax:
inputs = self.clip(inputs)
amax = torch.max(-self.clip.clip_value_min, self.clip.clip_value_max).detach()
else:
amax = self._get_amax(inputs)
if self._fake_quant:
if not TensorQuantizer.use_fb_fake_quant:
outputs = fake_tensor_quant(inputs, amax, self._num_bits, self._unsigned, self._narrow_range)
else:
if inputs.dtype == torch.half or amax.dtype == torch.half:
raise Exception("Exporting to ONNX in fp16 is not supported. Please export in fp32, i.e. disable AMP.")
outputs = self._fb_fake_quant(inputs, amax)
else:
outputs, self._scale = tensor_quant(inputs, amax, self._num_bits, self._unsigned)
return outputs
def forward(self, inputs):
"""Apply tensor_quant function to inputs
Args:
inputs: A Tensor of type float32.
Returns:
outputs: A Tensor of type output_dtype
"""
if self._disabled:
return inputs
outputs = inputs
if self._if_calib:
if self._calibrator is None:
raise RuntimeError("Calibrator was not created.")
# Shape is only know when it sees the first tensor
self._calibrator.collect(inputs)
if self._if_clip:
if not self._learn_amax:
raise RuntimeError("Clip without learning amax is not implemented.")
outputs = self.clip(inputs)
if self._if_quant:
outputs = self._quant_forward(inputs)
return outputs
def _short_amax(self, fmt='.4f'):
"""Short description of amax
Returns:
'dynamic': if _amax is not registered
'amax': if _amax is per-tensor
'[min, max](size)': if _amax is per-channel
"""
if not hasattr(self, '_amax'):
return 'dynamic'
if self._amax.numel() == 1:
return '{:{fmt}}'.format(self._amax.item(), fmt=fmt)
return '[{:{fmt}}, {:{fmt}}]({})'.format(self._amax.min().item(), self._amax.max().item(),
self._amax.numel(), fmt=fmt)
def extra_repr(self):
if self._disabled:
return "disabled"
s = "{}{}bit".format("unsigned " if self._unsigned else "", self._num_bits)
s += " narrow" if (self._narrow_range) else ""
s += " fake" if (self._fake_quant) else ""
s += " axis={}".format(self._axis) if self._axis is not None else " per-tensor"
s += " amax={}".format(self._short_amax())
s += " *{}".format(self._scale_amax) if self._scale_amax else ""
s += " learned" if (self._learn_amax) else ""
s += " calibrator={}".format(self._calibrator.__class__.__name__) if (self._calibrator is not None) else ""
s += " scale={}".format(self._scale) if self._scale is not None else ""
s += " quant" if (self._if_quant) else ""
s += " clip" if (self._if_clip) else ""
s += " calib" if (self._if_calib) else ""
return s
def _load_from_state_dict(self, state_dict, prefix, *args, **kwargs):
"""Overloaded module function
Adds warnings during state_dict loading.
A workaround is implemented for loading amax from checkpoint and only supports CUDA.
Args:
state_dict: A dict containing the state of the top level module
prefix: A string that prefixes all of this modules state in state_dict, e.g. 'model.conv1.'
"""
dst_has_amax = '_amax' in self._buffers
src_has_amax = prefix + '_amax' in state_dict
if not src_has_amax and dst_has_amax:
logging.error("{}: No amax in state_dict.".format(prefix[:-1]))
elif src_has_amax and not dst_has_amax:
logging.debug(("{}: No '_amax' buffer to load amax into."
" '_amax` will be created as WAR for now. "
"This behavior will change in future.").format(prefix[:-1]))
self.register_buffer("_amax", state_dict[prefix + '_amax'].data.cuda())
elif src_has_amax and dst_has_amax:
logging.warning("{}: Overwriting amax.".format(prefix[:-1]))
super(TensorQuantizer, self)._load_from_state_dict(state_dict, prefix, *args, **kwargs)
| TensorRT-master | tools/pytorch-quantization/pytorch_quantization/nn/modules/tensor_quantizer.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Some helper functions for implementing quantized modules"""
import copy
import inspect
from absl import logging
from torch import nn
from pytorch_quantization.nn import TensorQuantizer
from pytorch_quantization.tensor_quant import QuantDescriptor, QUANT_DESC_8BIT_PER_TENSOR
class QuantMixin():
"""Mixin class for adding basic quantization logic to quantized modules"""
default_quant_desc_input = QUANT_DESC_8BIT_PER_TENSOR
default_quant_desc_weight = QUANT_DESC_8BIT_PER_TENSOR
@classmethod
def set_default_quant_desc_input(cls, value):
"""
Args:
value: An instance of :class:`QuantDescriptor <pytorch_quantization.tensor_quant.QuantDescriptor>`
"""
if not isinstance(value, QuantDescriptor):
raise ValueError("{} is not an instance of QuantDescriptor!")
cls.default_quant_desc_input = copy.deepcopy(value)
@classmethod
def set_default_quant_desc_weight(cls, value):
"""
Args:
value: An instance of :class:`QuantDescriptor <pytorch_quantization.tensor_quant.QuantDescriptor>`
"""
if not isinstance(value, QuantDescriptor):
raise ValueError("{} is not an instance of QuantDescriptor!")
cls.default_quant_desc_weight = copy.deepcopy(value)
def init_quantizer(self, quant_desc_input, quant_desc_weight, num_layers=None):
"""Helper function for __init__ of quantized module
Create input and weight quantizer based on quant_desc passed by kwargs, or default of the class.
Args:
quant_desc_input: An instance of :class:`QuantDescriptor <pytorch_quantization.tensor_quant.QuantDescriptor>`
quant_desc_weight: An instance of :class:`QuantDescriptor <pytorch_quantization.tensor_quant.QuantDescriptor>`
num_layers: An integer. Default None. If not None, create a list of quantizers.
"""
if not inspect.stack()[1].function == "__init__":
raise TypeError("{} should be only called by __init__ of quantized module.".format(__name__))
self._fake_quant = True
if (not quant_desc_input.fake_quant) or (not quant_desc_weight.fake_quant):
raise ValueError("Only fake quantization is supported!")
logging.info("Input is %squantized to %d bits in %s with axis %s!", ""
if not quant_desc_input.fake_quant else "fake ",
quant_desc_input.num_bits, self.__class__.__name__, quant_desc_input.axis)
logging.info("Weight is %squantized to %d bits in %s with axis %s!", ""
if not quant_desc_weight.fake_quant else "fake ",
quant_desc_weight.num_bits, self.__class__.__name__, quant_desc_weight.axis)
if num_layers is None:
self._input_quantizer = TensorQuantizer(quant_desc_input)
self._weight_quantizer = TensorQuantizer(quant_desc_weight)
else:
self._input_quantizers = nn.ModuleList([TensorQuantizer(quant_desc_input) for _ in range(num_layers)])
self._weight_quantizers = nn.ModuleList([TensorQuantizer(quant_desc_weight) for _ in range(num_layers)])
# pylint:disable=missing-docstring
@property
def input_quantizer(self):
return self._input_quantizer
@property
def weight_quantizer(self):
return self._weight_quantizer
# pylint:enable=missing-docstring
class QuantInputMixin():
"""Mixin class for adding basic quantization logic to quantized modules"""
default_quant_desc_input = QUANT_DESC_8BIT_PER_TENSOR
@classmethod
def set_default_quant_desc_input(cls, value):
"""
Args:
value: An instance of :class:`QuantDescriptor <pytorch_quantization.tensor_quant.QuantDescriptor>`
"""
if not isinstance(value, QuantDescriptor):
raise ValueError("{} is not an instance of QuantDescriptor!")
cls.default_quant_desc_input = copy.deepcopy(value)
def init_quantizer(self, quant_desc_input):
"""Helper function for __init__ of simple quantized module
Create input quantizer based on quant_desc passed by kwargs, or default of the class.
Args:
quant_desc_input: An instance of :class:`QuantDescriptor <pytorch_quantization.tensor_quant.QuantDescriptor>`
"""
if not inspect.stack()[1].function == "__init__":
raise TypeError("{} should be only called by __init__ of quantized module.".format(__name__))
self._fake_quant = True
if not quant_desc_input.fake_quant:
raise ValueError("Only fake quantization is supported!")
logging.info("Input is %squantized to %d bits in %s with axis %s!", ""
if not quant_desc_input.fake_quant else "fake ",
quant_desc_input.num_bits, self.__class__.__name__, quant_desc_input.axis)
self._input_quantizer = TensorQuantizer(quant_desc_input)
# pylint:disable=missing-docstring
@property
def input_quantizer(self):
return self._input_quantizer
# pylint:enable=missing-docstring
def pop_quant_desc_in_kwargs(quant_cls, input_only=False, **kwargs):
"""Pop quant descriptors in kwargs
If there is no descriptor in kwargs, the default one in quant_cls will be used
Arguments:
quant_cls: A class that has default quantization descriptors
input_only: A boolean. If True, pop quant_desc_input only, not quant_desc_weight. Default false.
Keyword Arguments:
quant_desc_input: An instance of :class:`QuantDescriptor <pytorch_quantization.tensor_quant.QuantDescriptor>`.
Quantization descriptor of input.
quant_desc_weight: An instance of :class:`QuantDescriptor <pytorch_quantization.tensor_quant.QuantDescriptor>`.
Quantization descriptor of weight.
"""
quant_desc_input = kwargs.pop('quant_desc_input', quant_cls.default_quant_desc_input)
if not input_only:
quant_desc_weight = kwargs.pop('quant_desc_weight', quant_cls.default_quant_desc_weight)
# Check if anything is left in **kwargs
if kwargs:
raise TypeError("Unused keys: {}".format(kwargs.keys()))
if input_only:
return quant_desc_input
return quant_desc_input, quant_desc_weight
| TensorRT-master | tools/pytorch-quantization/pytorch_quantization/nn/modules/_utils.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
| TensorRT-master | tools/pytorch-quantization/pytorch_quantization/optim/__init__.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Helper functions for quant optimizer/trainer"""
import re
from absl import logging
def match_parameters(model, patterns):
"""Returns an generator over module parameters if name matches key
It is useful to group parameters, and apply different functions to different group. This function provides an easy
way to group them.
Args:
model: A Module
patterns: A list of strings that will be used to match parameter names. If parameter name contains any pattern,
it will be yield
Yields:
param: Module parameters
"""
for name, param in model.named_parameters():
for pattern in patterns:
if re.search(pattern, name):
yield param
def group_parameters(model, patterns_list, lrs=None, momentums=None, weight_decays=None):
"""Group parameters for using per-parameters option in optimizer
Returns a list of dict that matches Pytorch optimizer fashion, see
https://pytorch.org/docs/stable/optim.html#per-parameter-options for more details.
Example:
>>> [
>>> {'params': model.base.parameters()},
>>> {'params': model.classifier.parameters(), 'lr': 1e-3}
>>> ]
Parameters will be grouped w.r.t first level of the keys_list. e.g. `keys_list=[['conv1', 'conv2'], ['conv3']]` will
return 2 groups, one with `conv1` and `conv2` in name, and the other with `conv3` in name.
If lr, momentum or weight_decay are supplied, they will be added to the group as well.
Args:
model: A module
patterns_list: A list of list of strings. WARNING: patters must be EXCLUSIVE, the function doesn't
perform exclusive check.
lrs: A list of float with same length as keys_list or None.
momentums: A list of float with same length as keys_list or None.
weight_decays: A list of float with same length as keys_list or None.
Returns:
param_group: A list of dict
"""
param_groups = []
for pattern in patterns_list:
if not isinstance(pattern, list):
raise TypeError("patterns_list must be list of list of patterns")
param_groups.append({'params': match_parameters(model, pattern)})
if lrs is not None:
if len(lrs) != len(patterns_list):
raise TypeError("len(lrs) must match len(patterns_list)")
for i, lr in enumerate(lrs):
param_groups[i]['lr'] = lr
if momentums is not None:
if len(momentums) != len(patterns_list):
raise TypeError("len(momentums) must match len(patterns_list)")
for i, momentum in enumerate(momentums):
param_groups[i]['momentum'] = momentum
if weight_decays is not None:
if len(weight_decays) != len(patterns_list):
raise TypeError("len(weight_decays) must match len(patterns_list)")
for i, weight_decay in enumerate(weight_decays):
param_groups[i]['weight_decay'] = weight_decay
return param_groups
def freeze_parameters(model, patterns):
"""Set requires_grad to False if patterns match name
Args:
model: A Module
patterns: A list of strings that will be used to match parameter names. If parameter name contains any pattern,
it will be frozen.
"""
for name, param in model.named_parameters():
for pattern in patterns:
if re.search(pattern, name):
logging.warning("Freeze %s.", name)
param.requires_grad = False
def quant_weight_inplace(model):
"""Make quantization inplace
Search for quantized modules including QuantConvNd and QuantLinear, make weight quantization in place using
weight_quantizer.
Most publications of quantization aware training uses STE by default, which is really an approximation of
derivative of the nondifferentiable quantization function, which works to some extended but by no means the F=ma of
the problem.
Inplace quantization can be used to implement relax-and-round, which is a common method in Discrete Optimization's
or Integer Programming.
"""
for name, module in model.named_modules():
if hasattr(module, '_weight_quantizer') and module.weight_quantizer is not None:
if not module.weight_quantizer.fake_quant:
logging.warning(("In-place real quantization is VERY dangerous and should be used for inference only. "
"Make sure that is the desired behavior."))
logging.warning("In-place quantize weight of %s", name)
module.weight.data.copy_(module.weight_quantizer(module.weight))
| TensorRT-master | tools/pytorch-quantization/pytorch_quantization/optim/helper.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""A WAR for codes that messes up logging format"""
import logging
def reset_logger_handler():
"""Remove all handler in root logger"""
root_logger = logging.getLogger()
while root_logger.handlers:
root_logger.removeHandler(root_logger.handlers[0])
| TensorRT-master | tools/pytorch-quantization/pytorch_quantization/utils/quant_logging.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Main entry of all utils"""
from .reduce_amax import reduce_amax
| TensorRT-master | tools/pytorch-quantization/pytorch_quantization/utils/__init__.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Function to get absolute maximum of a tensor
Follow numpy fashion, which is more generic as pytorch's
"""
import torch
def reduce_amax(input, axis=None, keepdims=True):
"""Compute the absolute maximum value of a tensor.
Reduces input_tensor along the dimensions given in axis. Unless keepdims is true,
the rank of the tensor is reduced by 1 for each entry in axis. If keepdims is true,
the reduced dimensions are retained with length 1.
.. note::
Gradient computeation is disabled as this function is never meant learning reduces amax
Args:
input: Input tensor
axis: The dimensions to reduce. None or int or tuple of ints. If None (the default),
reduces all dimensions. Must be in the range [-rank(input_tensor), rank(input_tensor)).
keepdims: A boolean. If true, retains reduced dimensions with length 1. Default True
granularity: DEPRECTED. specifies if the statistic has to be calculated at tensor or channel granularity
Returns:
The reduced tensor.
Raises:
ValueError: Any axis which doesn't make sense or is not supported
ValueError: If unknown granularity is passed in.
"""
with torch.no_grad():
output = input.abs()
if axis is None:
output = torch.max(output)
else:
if isinstance(axis, int):
output, _ = torch.max(output, dim=axis, keepdim=keepdims)
else:
if isinstance(axis, tuple) and len(axis) > input.dim():
raise ValueError("Cannot reduce more axes than tensor's dim.")
for i in axis:
output, _ = torch.max(output, dim=i, keepdim=True)
if not keepdims or output.numel() == 1:
output.squeeze_()
return output
| TensorRT-master | tools/pytorch-quantization/pytorch_quantization/utils/reduce_amax.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
from setuptools import find_packages, setup
import polygraphy
ROOT_DIR = os.path.abspath(os.path.dirname(__file__))
BIN_DIR = os.path.join(ROOT_DIR, "bin")
def no_publish():
blacklist = ["register"]
for cmd in blacklist:
if cmd in sys.argv:
raise RuntimeError('Command "{}" blacklisted'.format(cmd))
REQUIRED_PACKAGES = []
def main():
no_publish()
setup(
name="polygraphy",
version=polygraphy.__version__,
description="Polygraphy: A Deep Learning Inference Prototyping and Debugging Toolkit",
long_description=open("README.md", "r", encoding="utf-8").read(),
url="https://github.com/NVIDIA/TensorRT/tree/master/tools/Polygraphy",
author="NVIDIA",
author_email="[email protected]",
classifiers=[
"Intended Audience :: Developers",
"Programming Language :: Python :: 3",
],
license="Apache 2.0",
install_requires=REQUIRED_PACKAGES,
packages=find_packages(exclude=("tests", "tests.*")),
scripts=[os.path.join(BIN_DIR, "polygraphy")],
zip_safe=True,
)
if __name__ == "__main__":
main()
| TensorRT-master | tools/Polygraphy/setup.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import glob
import os
import polygraphy
import pytest
ROOT_DIR = os.path.realpath(os.path.join(os.path.dirname(__file__), os.path.pardir))
class TestWheel(object):
def test_install(self, virtualenv):
with pytest.raises(Exception, match="returned non-zero exit"):
virtualenv.run(["python3", "-c", "import polygraphy"])
virtualenv.run(["make", "install"], cwd=ROOT_DIR)
# Check Python package is installed
assert "polygraphy" in virtualenv.installed_packages()
poly_pkg = virtualenv.installed_packages()["polygraphy"]
assert poly_pkg.version == polygraphy.__version__
# Check that we only package things we actually want.
# If tests are packaged, they'll end up in a higher-level directory.
assert not os.path.exists(os.path.join(poly_pkg.source_path, "tests"))
EXCLUDE_FILES = ["__pycache__"]
all_poly_files = glob.glob(os.path.join(poly_pkg.source_path, "polygraphy", "*"))
all_poly_files = [f for f in map(os.path.basename, all_poly_files) if f not in EXCLUDE_FILES]
# NOTE: This should be updated when new files are added to the top-level package.
EXPECTED_FILES = set(
[
"backend",
"mod",
"__init__.py",
"cuda",
"logger",
"constants.py",
"util",
"comparator",
"tools",
"exception",
"func",
"common",
"json",
"config.py",
]
)
assert set(all_poly_files) == EXPECTED_FILES
# Check CLI is installed
bin_path = virtualenv.virtualenv.dirs()[1]
poly_path = os.path.join(bin_path, "polygraphy")
assert os.path.exists(poly_path)
assert polygraphy.__version__ in virtualenv.run([poly_path, "-v"], capture=True)
lib_path = virtualenv.virtualenv.dirs()[0]
output = virtualenv.run(["polygraphy", "-v"], capture=True)
assert polygraphy.__version__ in output
assert lib_path in output # Make sure we're using the binary from the venv.
| TensorRT-master | tools/Polygraphy/tests/test_packaging.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import glob
import os
import pytest
import tensorrt as trt
from polygraphy import mod, util
from polygraphy.mod.importer import _version_ok
from tests.models.meta import ONNX_MODELS
# The tests here ensure that no additional dependencies are introduced into
# the various modules under Polygraphy.
ROOT_DIR = os.path.realpath(os.path.join(os.path.dirname(__file__), os.path.pardir))
@pytest.fixture()
def virtualenv_with_poly(virtualenv):
virtualenv.env["PYTHONPATH"] = ROOT_DIR
yield virtualenv
def is_submodule(path):
file_mod = os.path.isfile(path) and path.endswith(".py") and os.path.basename(path) != "__init__.py"
dir_mod = os.path.isdir(path) and os.path.isfile(os.path.join(path, "__init__.py"))
return file_mod or dir_mod
MODULE_PATH = os.path.join(ROOT_DIR, "polygraphy")
SUBMODULE_PATHS = [
os.path.relpath(os.path.splitext(path)[0], ROOT_DIR)
for path in glob.iglob(os.path.join(MODULE_PATH, "**"), recursive=True)
if is_submodule(path)
]
class TestPublicImports(object):
def test_no_extra_submodule_dependencies_required(self, virtualenv_with_poly):
# Submodules should not require any extra dependencies to import.
for submodule_path in SUBMODULE_PATHS:
submodule_name = ".".join(submodule_path.split(os.path.sep))
cmd = ["python3", "-c", "from {:} import *".format(submodule_name)]
print(" ".join(cmd))
output = virtualenv_with_poly.run(cmd, capture=True)
print(output)
def test_can_json_without_numpy(self, virtualenv_with_poly):
cmd = ["python3", "-c", "from polygraphy.json import to_json, from_json; x = to_json(1); x = from_json(x)"]
print(" ".join(cmd))
output = virtualenv_with_poly.run(cmd, capture=True)
print(output)
# CLI tools and all their subtools
TOOLS = {
"run": [],
"convert": [],
"inspect": ["data", "model", "tactics", "capability"],
"surgeon": ["extract", "insert", "sanitize"],
"template": ["trt-network"],
"debug": ["build", "precision", "diff-tactics", "reduce", "repeat"],
}
class TestToolImports(object):
# We should be able to at least launch tools with no dependencies installed.
@pytest.mark.parametrize("tool, subtools", TOOLS.items())
def test_can_run_tool_without_deps(self, virtualenv_with_poly, tool, subtools):
POLYGRAPHY_BIN = os.path.join(ROOT_DIR, "bin", "polygraphy")
BASE_TOOL_CMD = ["python3", POLYGRAPHY_BIN, tool, "-h"]
def check_tool(tool):
output = virtualenv_with_poly.run(tool, capture=True)
assert "This tool could not be loaded due to an error:" not in output
assert "error:" not in output
assert "could not be loaded" not in output
check_tool(BASE_TOOL_CMD)
for subtool in subtools:
check_tool(BASE_TOOL_CMD + [subtool])
class TestAutoinstallDeps(object):
@pytest.mark.parametrize(
"cmd",
[
["run", ONNX_MODELS["identity"].path, "--onnxrt"],
["run", ONNX_MODELS["identity"].path, "--trt"],
[
"surgeon",
"sanitize",
"--fold-constants",
ONNX_MODELS["const_foldable"].path,
"-o",
util.NamedTemporaryFile().name,
],
],
)
def test_can_automatically_install_deps(self, virtualenv_with_poly, cmd):
if "--trt" in cmd and mod.version(trt.__version__) < mod.version("7.0"):
pytest.skip("TRT 6 container has an old version of CUDA")
if "--trt" in cmd:
pytest.xfail("TensorRT 8.0.1.6 wheels are currently broken")
virtualenv_with_poly.env["POLYGRAPHY_AUTOINSTALL_DEPS"] = "1"
POLYGRAPHY_BIN = os.path.join(ROOT_DIR, "bin", "polygraphy")
cmd = ["python3", POLYGRAPHY_BIN] + cmd
print("Running: {:}".format(" ".join(cmd)))
output = virtualenv_with_poly.run(cmd, capture=True)
print(output)
assert "is required, but not installed. Attempting to install now" in output
@pytest.mark.parametrize(
"new_ver, expected",
[
("==1.4.2", "==1.4.2"),
(mod.LATEST_VERSION, ">=1.4.2"),
],
)
def test_can_automatically_upgrade_deps(self, virtualenv_with_poly, new_ver, expected):
virtualenv_with_poly.env["POLYGRAPHY_AUTOINSTALL_DEPS"] = "1"
def get_colored_version():
return virtualenv_with_poly.installed_packages()["colored"].version
virtualenv_with_poly.run(["python3", "-m", "pip", "install", "colored==1.4.0"])
assert get_colored_version() == "1.4.0"
# Insert our own preferred version to make sure it upgrades.
virtualenv_with_poly.run(
[
"python3",
"-c",
"from polygraphy import mod; "
"colored = mod.lazy_import('colored', version='{:}'); "
"print(colored.__version__)".format(new_ver),
]
)
assert _version_ok(get_colored_version(), expected)
# We can import inner modules, and Polygraphy should still autoinstall the outermost one.
def test_can_install_for_nested_import(self, virtualenv_with_poly):
virtualenv_with_poly.env["POLYGRAPHY_AUTOINSTALL_DEPS"] = "1"
virtualenv_with_poly.run(
[
"python3",
"-c",
"from polygraphy import mod; "
"shape_inference = mod.lazy_import('onnx.shape_inference'); "
"print(shape_inference.infer_shapes)",
]
)
assert "onnx" in virtualenv_with_poly.installed_packages()
def test_all_lazy_imports(self):
# NOTE: If this test fails, it means a new lazy dependency has been
# introduced. Please ensure that AUTOINSTALL continues to work with the
# new dependency.
expected = [
"numpy",
"onnx_graphsurgeon",
"onnx.external_data_helper",
"onnx.numpy_helper",
"onnx.shape_inference",
"onnx",
"onnxmltools",
"onnxruntime",
"tensorflow",
"tensorrt",
"tf2onnx",
]
assert mod.importer._all_external_lazy_imports == set(expected)
| TensorRT-master | tools/Polygraphy/tests/test_deps.py |
TensorRT-master | tools/Polygraphy/tests/__init__.py |
|
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
class TestCuda(object):
def test_cuda(self):
from polygraphy.common import cuda
assert cuda.DeviceArray
class TestFunc(object):
def test_func(self):
from polygraphy.common import func
assert hasattr(func, "extend")
class TestException(object):
def test_exception(self):
from polygraphy.common import exception
assert hasattr(exception, "PolygraphyException")
class TestConstants(object):
def test_constants(self):
from polygraphy.common import constants
assert constants.MARK_ALL
def test_config(self):
from polygraphy import constants
assert (constants.INTERNAL_CORRECTNESS_CHECKS, constants.AUTOINSTALL_DEPS)
class TestUtilJson(object):
def test_json(self):
from polygraphy.util import Decoder, Encoder, from_json, load_json, save_json, to_json
class TestCompareFunc(object):
def test_basic_compare_func(self):
from polygraphy.comparator import CompareFunc
CompareFunc.basic_compare_func(atol=1, rtol=1)
| TensorRT-master | tools/Polygraphy/tests/test_deprecated_aliases.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import time
def get_file_size(path):
return os.stat(path).st_size
def is_file_empty(path):
return get_file_size(path) == 0
def is_file_non_empty(path):
return not is_file_empty(path)
def time_func(func, warm_up=10, iters=100):
for _ in range(warm_up):
func()
total = 0
for _ in range(iters):
start = time.time()
func()
end = time.time()
total += end - start
return total / float(iters)
| TensorRT-master | tools/Polygraphy/tests/helper.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import copy
import os
import shutil
import subprocess as sp
from textwrap import dedent
import pytest
import tensorrt as trt
from polygraphy import mod
from polygraphy.logger import G_LOGGER
ROOT_DIR = os.path.realpath(os.path.join(os.path.dirname(__file__), os.path.pardir))
EXAMPLES_ROOT = os.path.join(ROOT_DIR, "examples")
IGNORE_START_MARKER = "<!-- Polygraphy Test: Ignore Start -->"
IGNORE_STOP_MARKER = "<!-- Polygraphy Test: Ignore End -->"
# Extract any ``` blocks from the README
# Each block is stored as a separate string in the returned list
def load_code_blocks_from_readme(readme):
with open(readme, "r") as f:
contents = f.read()
# Check that the README has all the expected sections.
assert "## Introduction" in contents, "All example READMEs should have an 'Introduction' section!"
assert "## Running The Example" in contents, "All example READMEs should have a 'Running The Example' section!"
commands = []
with open(readme, "r") as f:
in_command_block = False
should_ignore = False
block = []
for line in f.readlines():
if line.strip() == IGNORE_START_MARKER:
should_ignore = True
elif line.strip() == IGNORE_STOP_MARKER:
should_ignore = False
if should_ignore:
continue
if not in_command_block and "```" in line:
block = [line.rstrip()]
in_command_block = True
elif in_command_block:
if "```" in line:
in_command_block = False
commands.append(copy.copy(block) + [line.rstrip()])
else:
block.append(line.rstrip())
# commands is List[List[str]] - flatten and remove start/end markers:
commands = [dedent("\n".join(block[1:-1])) for block in commands]
return commands
class Example(object):
def __init__(self, path_components, artifact_names=[]):
self.path = os.path.join(EXAMPLES_ROOT, *path_components)
self.artifacts = [os.path.join(self.path, name) for name in artifact_names]
def __enter__(self):
readme = os.path.join(self.path, "README.md")
return load_code_blocks_from_readme(readme)
def run(self, command):
G_LOGGER.info("Running: {:} from cwd: {:}".format(command, self.path))
env = copy.copy(os.environ)
env["PYTHONPATH"] = ROOT_DIR
env["PATH"] = os.path.join(ROOT_DIR, "bin") + os.path.pathsep + env["PATH"]
# Remove whitespace args and escaped newlines
command = [arg for arg in command.strip().split(" ") if arg.strip() and arg != "\\\n"]
print("Running: {:}".format(" ".join(command)))
status = sp.run(command, cwd=self.path, env=env, stdout=sp.PIPE, stderr=sp.PIPE, universal_newlines=True)
print(status.stdout)
print(status.stderr)
assert status.returncode == 0, status.stdout + "\n" + status.stderr
return status
def __exit__(self, exc_type, exc_value, traceback):
"""
Checks for and removes artifacts expected by this example
"""
for artifact in self.artifacts:
print("Checking for the existence of artifact: {:}".format(artifact))
assert os.path.exists(artifact), "{:} does not exist!".format(artifact)
if os.path.isdir(artifact):
shutil.rmtree(artifact)
else:
os.remove(artifact)
def __str__(self):
return os.path.relpath(self.path, EXAMPLES_ROOT)
API_EXAMPLES = [
Example(["api", "00_inference_with_tensorrt"], artifact_names=["identity.engine"]),
Example(["api", "01_comparing_frameworks"], artifact_names=["inference_results.json"]),
Example(["api", "02_validating_on_a_dataset"]),
Example(["api", "03_interoperating_with_tensorrt"]),
Example(["api", "04_int8_calibration_in_tensorrt"], artifact_names=["identity-calib.cache"]),
Example(["api", "05_using_tensorrt_network_api"]),
Example(["api", "06_immediate_eval_api"], artifact_names=["identity.engine"]),
Example(["api", "07_tensorrt_and_dynamic_shapes"], artifact_names=["dynamic_identity.engine"]),
]
@pytest.mark.skipif(mod.version(trt.__version__) < mod.version("7.0"), reason="Unsupported for TRT 6")
@pytest.mark.parametrize("example", API_EXAMPLES, ids=lambda case: str(case))
def test_api_examples(example):
if mod.version(trt.__version__) < mod.version("8.0") and (example.path.endswith("07_tensorrt_and_dynamic_shapes")):
pytest.skip("Not intended for older versions of TRT")
with example as commands:
for command in commands:
example.run(command)
CLI_EXAMPLES = [
# Run
Example(["cli", "run", "01_comparing_frameworks"]),
Example(["cli", "run", "02_comparing_across_runs"], artifact_names=["system_a_results.json"]),
Example(["cli", "run", "03_generating_a_comparison_script"], artifact_names=["compare_trt_onnxrt.py"]),
Example(
["cli", "run", "04_defining_a_tensorrt_network_or_config_manually"],
artifact_names=["my_define_network.py", "my_create_config.py"],
),
Example(["cli", "run", "05_comparing_with_custom_data"]),
# Convert
Example(["cli", "convert", "01_int8_calibration_in_tensorrt"], artifact_names=["identity.engine"]),
Example(
["cli", "convert", "02_deterministic_engine_builds_in_tensorrt"],
artifact_names=["0.engine", "1.engine", "replay.json"],
),
Example(["cli", "convert", "03_dynamic_shapes_in_tensorrt"], artifact_names=["dynamic_identity.engine"]),
# Surgeon
Example(["cli", "surgeon", "01_isolating_subgraphs"], artifact_names=["subgraph.onnx"]),
Example(["cli", "surgeon", "02_folding_constants"], artifact_names=["folded.onnx"]),
Example(["cli", "surgeon", "03_modifying_input_shapes"], artifact_names=["dynamic_identity.onnx"]),
# Debug
Example(["cli", "debug", "01_debugging_flaky_trt_tactics"], artifact_names=["replays", "golden.json"]),
Example(
["cli", "debug", "02_reducing_failing_onnx_models"],
artifact_names=[
"inputs.json",
"layerwise_golden.json",
"layerwise_inputs.json",
"initial_reduced.onnx",
"final_reduced.onnx",
],
),
]
@pytest.mark.skipif(mod.version(trt.__version__) < mod.version("7.0"), reason="Unsupported for TRT 6")
@pytest.mark.parametrize("example", CLI_EXAMPLES, ids=lambda case: str(case))
def test_cli_examples(example):
if mod.version(trt.__version__) < mod.version("8.0") and (
example.path.endswith("01_debugging_flaky_trt_tactics")
or example.path.endswith("02_deterministic_engine_builds_in_tensorrt")
):
pytest.skip("Tactic replays are not supported on older versions of TRT")
with example as commands:
for command in commands:
example.run(command)
CLI_INSPECT_EXAMPLES = [
Example(["cli", "inspect", "01_inspecting_a_tensorrt_network"]),
Example(["cli", "inspect", "02_inspecting_a_tensorrt_engine"], artifact_names=["dynamic_identity.engine"]),
Example(["cli", "inspect", "03_inspecting_an_onnx_model"]),
Example(["cli", "inspect", "04_inspecting_a_tensorflow_graph"]),
Example(["cli", "inspect", "05_inspecting_inference_outputs"], artifact_names=["outputs.json"]),
Example(["cli", "inspect", "06_inspecting_input_data"], artifact_names=["inputs.json"]),
]
if mod.version(trt.__version__) >= mod.version("8.0"):
CLI_INSPECT_EXAMPLES += [
Example(["cli", "inspect", "07_inspecting_tactic_replays"], artifact_names=["replay.json"]),
]
@pytest.mark.skipif(mod.version(trt.__version__) < mod.version("7.0"), reason="Unsupported for TRT 6")
@pytest.mark.parametrize("example", CLI_INSPECT_EXAMPLES, ids=lambda case: str(case))
def test_cli_inspect_examples(example):
# Last block should be the expected output, and last command should generate it.
with example as blocks:
commands, expected_output = blocks[:-1], blocks[-1]
for command in commands:
actual_output = example.run(command).stdout
print(actual_output)
# Makes reading the diff way easier
actual_lines = [line for line in actual_output.splitlines() if "[I] Loading " not in line and "[W] " not in line]
expected_lines = expected_output.splitlines()
assert len(actual_lines) == len(expected_lines)
# Indicates lines that may not match exactly
NON_EXACT_LINE_MARKERS = ["---- ", " Layer", " Algorithm:"]
for index, (actual_line, expected_line) in enumerate(zip(actual_lines, expected_lines)):
# Skip whitespace, and lines that include runner names (since those have timestamps)
if expected_line.strip() and all([marker not in expected_line for marker in NON_EXACT_LINE_MARKERS]):
print("Checking line {:}: {:}".format(index, expected_line))
assert actual_line == expected_line
DEV_EXAMPLES = [
Example(["dev", "01_writing_cli_tools"], artifact_names=["data.json"]),
]
@pytest.mark.parametrize("example", DEV_EXAMPLES, ids=lambda case: str(case))
def test_dev_examples(example):
with example as commands:
for command in commands:
example.run(command)
| TensorRT-master | tools/Polygraphy/tests/test_examples.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import numpy as np
from polygraphy.common import TensorMetadata
from polygraphy.comparator import DataLoader
from polygraphy.comparator.data_loader import DataLoaderCache
from tests.models.meta import ONNX_MODELS
import pytest
def meta(dtype):
return TensorMetadata().add("X", dtype=dtype, shape=(4, 4)).add("Y", dtype=dtype, shape=(5, 5))
class TestDataLoader(object):
@pytest.mark.parametrize("dtype", [np.int32, np.bool, np.float32, np.int64])
def test_default_ranges(self, dtype):
data_loader = DataLoader(input_metadata=meta(dtype))
x, y = data_loader[0].values()
assert np.all((x >= 0) & (x <= 1))
assert np.all((y >= 0) & (y <= 1))
def test_can_override_shape(self):
model = ONNX_MODELS["dynamic_identity"]
shape = (1, 1, 4, 5)
custom_input_metadata = TensorMetadata().add("X", dtype=None, shape=shape)
data_loader = DataLoader(input_metadata=custom_input_metadata)
# Simulate what the comparator does
data_loader.input_metadata = model.input_metadata
feed_dict = data_loader[0]
assert tuple(feed_dict["X"].shape) == shape
@pytest.mark.parametrize("dtype", [np.int32, np.bool, np.float32, np.int64])
@pytest.mark.parametrize("range_val", [0, 1])
def test_range_min_max_equal(self, dtype, range_val):
data_loader = DataLoader(input_metadata=meta(dtype), val_range=(range_val, range_val))
feed_dict = data_loader[0]
assert np.all(feed_dict["X"] == range_val)
assert np.all(feed_dict["Y"] == range_val)
@pytest.mark.parametrize(
"range",
[
(0, 1, np.int32),
(5.0, 5.5, np.float32),
(0, 1, np.bool),
],
)
def test_val_ranges(self, range):
min_val, max_val, dtype = range
data_loader = DataLoader(input_metadata=meta(dtype), val_range=(min_val, max_val))
feed_dict = data_loader[0]
assert np.all((feed_dict["X"] >= min_val) & (feed_dict["X"] <= max_val))
@pytest.mark.parametrize("dtype", [np.int32, np.int64, np.float32])
def test_val_range_dict(self, dtype):
val_range = {"X": (2, 5), "Y": (-1, 2)}
data_loader = DataLoader(input_metadata=meta(dtype), val_range=val_range)
feed_dict = data_loader[0]
assert np.all((feed_dict["X"] >= 2) & (feed_dict["X"] <= 5))
assert np.all((feed_dict["Y"] >= -1) & (feed_dict["Y"] <= 2))
@pytest.mark.parametrize("dtype", [np.int32, np.int64, np.float32])
def test_val_range_dict_default(self, dtype):
val_range = {"": (6, 8), "Y": (-3, 4)}
data_loader = DataLoader(input_metadata=meta(dtype), val_range=val_range)
feed_dict = data_loader[0]
assert np.all((feed_dict["X"] >= 6) & (feed_dict["X"] <= 8))
assert np.all((feed_dict["Y"] >= -3) & (feed_dict["Y"] <= 4))
@pytest.mark.parametrize("dtype", [np.int32, np.int64, np.float32])
def test_val_range_dict_fallback(self, dtype):
val_range = {"Y": (-3, 4)}
data_loader = DataLoader(input_metadata=meta(dtype), val_range=val_range)
feed_dict = data_loader[0]
assert np.all((feed_dict["X"] >= 0) & (feed_dict["X"] <= 1))
assert np.all((feed_dict["Y"] >= -3) & (feed_dict["Y"] <= 4))
def test_shape_tensor_detected(self):
INPUT_DATA = (1, 2, 3)
input_meta = TensorMetadata().add("X", dtype=np.int32, shape=(3,))
# This contains the shape values
overriden_meta = TensorMetadata().add("X", dtype=np.int32, shape=INPUT_DATA)
data_loader = DataLoader(input_metadata=overriden_meta)
data_loader.input_metadata = input_meta
feed_dict = data_loader[0]
assert np.all(feed_dict["X"] == INPUT_DATA) # values become INPUT_DATA
def test_no_shape_tensor_false_positive_negative_dims(self):
INPUT_DATA = (-100, 2, 4)
# This should NOT be detected as a shape tensor
input_meta = TensorMetadata().add("X", dtype=np.int32, shape=(3,))
overriden_meta = TensorMetadata().add("X", dtype=np.int32, shape=INPUT_DATA)
data_loader = DataLoader(input_metadata=overriden_meta)
data_loader.input_metadata = input_meta
feed_dict = data_loader[0]
assert feed_dict["X"].shape == (3,) # Shape IS (3, ), because this is NOT a shape tensor
assert np.any(
feed_dict["X"] != INPUT_DATA
) # Contents are not INPUT_DATA, since it's not treated as a shape value
def test_no_shape_tensor_false_positive_float(self):
INPUT_DATA = (-100, -50, 0)
# Float cannot be a shape tensor
input_meta = TensorMetadata().add("X", dtype=np.float32, shape=(3,))
overriden_meta = TensorMetadata().add("X", dtype=np.float32, shape=INPUT_DATA)
data_loader = DataLoader(input_metadata=overriden_meta)
data_loader.input_metadata = input_meta
feed_dict = data_loader[0]
assert feed_dict["X"].shape == (3,) # Values are NOT (3, )
assert np.any(feed_dict["X"] != INPUT_DATA) # Values are NOT (3, )
def test_non_user_provided_inputs_never_shape_tensors(self):
# If the user didn't provide metadata, then the value can never be a shape tensor.
input_meta = TensorMetadata().add("X", dtype=np.int32, shape=(3,))
data_loader = DataLoader()
data_loader.input_metadata = input_meta
feed_dict = data_loader[0]
assert feed_dict["X"].shape == (3,) # Treat as a normal tensor
class TestDataLoaderCache(object):
def test_can_cast_dtype(self):
# Ensure that the data loader can only be used once
def load_data():
yield {"X": np.ones((1, 1), dtype=np.float32)}
cache = DataLoaderCache(load_data())
fp32_meta = TensorMetadata().add("X", dtype=np.float32, shape=(1, 1))
cache.set_input_metadata(fp32_meta)
feed_dict = cache[0]
assert feed_dict["X"].dtype == np.float32
fp64_meta = TensorMetadata().add("X", dtype=np.float64, shape=(1, 1))
cache.set_input_metadata(fp64_meta)
feed_dict = cache[0]
assert feed_dict["X"].dtype == np.float64
# If one input isn't in the cache, we shouldn't give up looking
# for other inputs
def test_will_not_give_up_on_first_cache_miss(self):
SHAPE = (32, 32)
DATA = [OrderedDict()]
DATA[0]["X"] = np.zeros(SHAPE, dtype=np.int64)
DATA[0]["Y"] = np.zeros(SHAPE, dtype=np.int64)
cache = DataLoaderCache(DATA)
cache.set_input_metadata(TensorMetadata().add("X", np.int64, shape=SHAPE).add("Y", np.int64, SHAPE))
# Populate the cache with bad X but good Y
cache.cache[0] = OrderedDict()
cache.cache[0]["X"] = np.ones((64, 64), dtype=np.int64)
cache.cache[0]["Y"] = np.ones(SHAPE, dtype=np.int64)
feed_dict = cache[0]
# Cache cannot reuse X, so it'll reload - we'll get all 0s from the data loader
assert np.all(feed_dict["X"] == 0)
# Cache can reuse Y, even though it's after X, so we'll get ones from the cache
assert np.all(feed_dict["Y"] == 1)
# The cache should ignore extra data generated by the data loader
def test_ignores_extra_data(self):
SHAPE = (32, 32)
DATA = [OrderedDict()]
DATA[0]["X"] = np.zeros(SHAPE, dtype=np.int64)
DATA[0]["Y"] = np.zeros(SHAPE, dtype=np.int64)
cache = DataLoaderCache(DATA)
cache.set_input_metadata(TensorMetadata().add("X", np.int64, shape=SHAPE))
feed_dict = cache[0]
assert list(feed_dict.keys()) == ["X"]
assert np.all(feed_dict["X"] == 0)
| TensorRT-master | tools/Polygraphy/tests/comparator/test_data_loader.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
from polygraphy.comparator import PostprocessFunc, IterationResult
class TestTopK(object):
def test_basic(self):
arr = np.array([1, 2, 3, 4, 5], dtype=np.float32)
func = PostprocessFunc.topk_func(k=3)
top_k = func(IterationResult({"x": arr}))
assert np.all(top_k["x"] == [4, 3, 2])
def test_k_can_exceed_array_len(self):
arr = np.array([1, 2, 3, 4, 5], dtype=np.float32)
func = PostprocessFunc.topk_func(k=10)
top_k = func(IterationResult({"x": arr}))
assert np.all(top_k["x"] == [4, 3, 2, 1, 0])
def test_per_output_top_k(self):
arr = np.array([1, 2, 3, 4, 5], dtype=np.float32)
func = PostprocessFunc.topk_func(k={"": 10, "y": 2})
top_k = func(IterationResult({"x": arr, "y": arr}))
assert np.all(top_k["x"] == [4, 3, 2, 1, 0])
assert np.all(top_k["y"] == [4, 3])
| TensorRT-master | tools/Polygraphy/tests/comparator/test_postprocess.py |
TensorRT-master | tools/Polygraphy/tests/comparator/__init__.py |
|
import numpy as np
import pytest
import contextlib
from polygraphy import config
from polygraphy.comparator import IterationResult, RunResults
from polygraphy.comparator.struct import LazyNumpyArray
from polygraphy.exception import PolygraphyException
def make_iter_results(runner_name):
return [IterationResult(outputs={"dummy_out": np.zeros((4, 4))}, runner_name=runner_name)] * 2
@pytest.fixture(scope="session")
def run_results():
results = RunResults()
results.append(("runner0", make_iter_results("runner0")))
results.append(("runner1", make_iter_results("runner1")))
return results
class TestRunResults(object):
def test_items(self, run_results):
for name, iteration_results in run_results.items():
assert isinstance(name, str)
assert isinstance(iteration_results, list)
for iter_res in iteration_results:
assert isinstance(iter_res, IterationResult)
def test_keys(self, run_results):
assert list(run_results.keys()) == ["runner0", "runner1"]
def test_values(self, run_results):
for iteration_results in run_results.values():
for iter_res in iteration_results:
assert isinstance(iter_res, IterationResult)
def test_getitem(self, run_results):
assert isinstance(run_results["runner0"][0], IterationResult)
assert isinstance(run_results[0][1][0], IterationResult)
assert run_results[0][1] == run_results["runner0"]
assert run_results[1][1] == run_results["runner1"]
def test_getitem_out_of_bounds(self, run_results):
with pytest.raises(IndexError):
run_results[2]
with pytest.raises(PolygraphyException, match="does not exist in this"):
run_results["runner2"]
def test_setitem(self, run_results):
def check_results(results, is_none=False):
for iter_res in results["runner1"]:
if is_none:
assert not iter_res
assert iter_res.runner_name == ""
else:
assert iter_res
assert iter_res.runner_name
check_results(run_results)
iter_results = [IterationResult(outputs=None, runner_name=None)]
run_results["runner1"] = iter_results
check_results(run_results, is_none=True)
def test_setitem_out_of_bounds(self, run_results):
iter_results = [IterationResult(outputs=None, runner_name="new")]
run_results["runner2"] = iter_results
assert len(run_results) == 3
assert run_results["runner2"][0].runner_name == "new"
def test_contains(self, run_results):
assert "runner0" in run_results
assert "runner1" in run_results
assert "runner3" not in run_results
class TestLazyNumpyArray(object):
@pytest.mark.parametrize("set_threshold", [True, False])
def test_unswapped_array(self, set_threshold):
with contextlib.ExitStack() as stack:
if set_threshold:
def reset_array_swap():
config.ARRAY_SWAP_THRESHOLD_MB = -1
stack.callback(reset_array_swap)
config.ARRAY_SWAP_THRESHOLD_MB = 8
small_shape = (7 * 1024 * 1024,)
small_array = np.ones(shape=small_shape, dtype=np.byte)
lazy = LazyNumpyArray(small_array)
assert np.array_equal(small_array, lazy.arr)
assert lazy.tmpfile is None
assert np.array_equal(small_array, lazy.numpy())
def test_swapped_array(self):
with contextlib.ExitStack() as stack:
def reset_array_swap():
config.ARRAY_SWAP_THRESHOLD_MB = -1
stack.callback(reset_array_swap)
config.ARRAY_SWAP_THRESHOLD_MB = 8
large_shape = (9 * 1024 * 1024,)
large_array = np.ones(shape=large_shape, dtype=np.byte)
lazy = LazyNumpyArray(large_array)
assert lazy.arr is None
assert lazy.tmpfile is not None
assert np.array_equal(large_array, lazy.numpy())
| TensorRT-master | tools/Polygraphy/tests/comparator/test_struct.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import pytest
from polygraphy import util
from polygraphy.comparator import CompareFunc, IterationResult
from polygraphy.exception import PolygraphyException
from polygraphy.logger import G_LOGGER
class TestBasicCompareFunc(object):
def test_can_compare_bool(self):
iter_result0 = IterationResult(outputs={"output": np.zeros((4, 4), dtype=np.bool)})
iter_result1 = IterationResult(outputs={"output": np.ones((4, 4), dtype=np.bool)})
compare_func = CompareFunc.simple()
acc = compare_func(iter_result0, iter_result1)
assert not acc["output"]
@pytest.mark.parametrize("mode", ["abs", "rel"])
def test_per_output_tol(self, mode):
OUT0_NAME = "output0"
OUT1_NAME = "output1"
OUT_VALS = np.ones((4, 4))
iter_result0 = IterationResult(outputs={OUT0_NAME: OUT_VALS, OUT1_NAME: OUT_VALS})
iter_result1 = IterationResult(outputs={OUT0_NAME: OUT_VALS, OUT1_NAME: OUT_VALS + 1})
# With default tolerances, out1 is wrong for the second result.
compare_func = CompareFunc.simple()
acc = compare_func(iter_result0, iter_result1)
assert acc[OUT0_NAME]
assert not acc[OUT1_NAME]
# But with custom tolerances, it should pass.
tols = {
OUT0_NAME: 0.0,
OUT1_NAME: 1.0,
}
if mode == "abs":
compare_func = CompareFunc.simple(atol=tols)
else:
compare_func = CompareFunc.simple(rtol=tols)
acc = compare_func(iter_result0, iter_result1)
assert acc[OUT0_NAME]
assert acc[OUT1_NAME]
@pytest.mark.parametrize("mode", ["abs", "rel"])
def test_per_output_tol_fallback(self, mode):
OUT0_NAME = "output0"
OUT1_NAME = "output1"
OUT_VALS = np.ones((4, 4))
iter_result0 = IterationResult(outputs={OUT0_NAME: OUT_VALS + 1, OUT1_NAME: OUT_VALS})
iter_result1 = IterationResult(outputs={OUT0_NAME: OUT_VALS, OUT1_NAME: OUT_VALS + 1})
acc = CompareFunc.simple()(iter_result0, iter_result1)
assert not acc[OUT0_NAME]
assert not acc[OUT1_NAME]
# Do not specify tolerance for OUT0_NAME - it should fail with fallback tolerance
tols = {
OUT1_NAME: 1.0,
}
if mode == "abs":
compare_func = CompareFunc.simple(atol=tols)
else:
compare_func = CompareFunc.simple(rtol=tols)
acc = compare_func(iter_result0, iter_result1)
assert not acc[OUT0_NAME]
assert acc[OUT1_NAME]
@pytest.mark.parametrize("mode", ["abs", "rel"])
def test_default_tol_in_map(self, mode):
# "" can be used to indicate a global tolerance
OUT0_NAME = "output0"
OUT_VALS = np.ones((4, 4))
iter_result0 = IterationResult(outputs={OUT0_NAME: OUT_VALS})
iter_result1 = IterationResult(outputs={OUT0_NAME: OUT_VALS + 1})
tols = {
"": 1.0,
}
if mode == "abs":
compare_func = CompareFunc.simple(atol=tols)
else:
compare_func = CompareFunc.simple(rtol=tols)
acc = compare_func(iter_result0, iter_result1)
assert acc[OUT0_NAME]
@pytest.mark.parametrize(
"shape",
[
tuple(),
(0, 2, 1, 2),
(1,),
(2, 2, 2, 2),
],
)
def test_non_matching_outputs(self, shape):
iter_result0 = IterationResult(outputs={"output": np.zeros(shape, dtype=np.float32)})
iter_result1 = IterationResult(outputs={"output": np.ones(shape, dtype=np.float32)})
compare_func = CompareFunc.simple()
with G_LOGGER.verbosity(G_LOGGER.ULTRA_VERBOSE):
acc = compare_func(iter_result0, iter_result1)
assert util.is_empty_shape(shape) or not acc["output"]
@pytest.mark.parametrize("check_error_stat", ["max", "median", "mean", "elemwise"])
@pytest.mark.parametrize(
"func",
[
np.zeros,
np.ones,
],
)
def test_check_error_stat(self, func, check_error_stat):
iter_result0 = IterationResult(outputs={"output": func((100,), dtype=np.float32)})
iter_result1 = IterationResult(outputs={"output": func((100,), dtype=np.float32)})
iter_result0["output"][0] += 100
# Even though the max diff is 100, atol=1 should cause this to pass since we're checking
# against the mean error.
compare_func = CompareFunc.simple(check_error_stat=check_error_stat, atol=1)
if check_error_stat in ["max", "elemwise"]:
assert not compare_func(iter_result0, iter_result1)["output"]
else:
assert compare_func(iter_result0, iter_result1)["output"]
@pytest.mark.parametrize("check_error_stat", ["max", "median", "mean", "elemwise"])
def test_atol_rtol_either_pass(self, check_error_stat):
# If either rtol/atol is sufficient, the compare_func should pass
res0 = IterationResult(outputs={"output": np.array([1, 2], dtype=np.float32)})
res1 = IterationResult(outputs={"output": np.array((1.25, 2.5), dtype=np.float32)})
assert not CompareFunc.simple(check_error_stat=check_error_stat)(res0, res1)["output"]
assert CompareFunc.simple(check_error_stat=check_error_stat, rtol=0.25)(res0, res1)["output"]
assert CompareFunc.simple(check_error_stat=check_error_stat, atol=0.5)(res0, res1)["output"]
def test_atol_rtol_combined_pass(self):
# We should also be able to mix them - i.e. rtol might enough for some, atol for others.
# If they cover the entire output range, it should pass.
res0 = IterationResult(outputs={"output": np.array([0, 1, 2, 3], dtype=np.float32)})
res1 = IterationResult(outputs={"output": np.array((0.15, 1.25, 2.5, 3.75), dtype=np.float32)})
assert not CompareFunc.simple()(res0, res1)["output"]
assert not CompareFunc.simple(atol=0.3)(res0, res1)["output"]
assert not CompareFunc.simple(rtol=0.25)(res0, res1)["output"]
assert CompareFunc.simple(atol=0.3, rtol=0.25)(res0, res1)["output"]
@pytest.mark.parametrize(
"check_error_stat",
[
{"output0": "mean", "output1": "max"},
{"": "mean", "output1": "elemwise"},
{"output0": "mean"},
{"": "mean"},
],
)
def test_per_output_error_stat(self, check_error_stat):
# output0 will only pass when using check_error_stat=mean
res0 = IterationResult(
outputs={
"output0": np.array([0, 1, 2, 3], dtype=np.float32),
"output1": np.array([0, 1, 2, 3], dtype=np.float32),
}
)
res1 = IterationResult(
outputs={
"output0": np.array((0.15, 1.25, 2.5, 3.75), dtype=np.float32),
"output1": np.array((0, 1, 2, 3), dtype=np.float32),
}
)
atol = 0.4125
assert not CompareFunc.simple(atol=atol)(res0, res1)["output0"]
assert CompareFunc.simple(check_error_stat=check_error_stat, atol=atol)(res0, res1)["output0"]
assert CompareFunc.simple(check_error_stat=check_error_stat, atol=atol)(res0, res1)["output1"]
def test_invalid_error_stat(self):
res0 = IterationResult(outputs={"output": np.array([0, 1, 2, 3], dtype=np.float32)})
res1 = IterationResult(outputs={"output": np.array((0.15, 1.25, 2.5, 3.75), dtype=np.float32)})
with pytest.raises(PolygraphyException, match="Invalid choice"):
CompareFunc.simple(check_error_stat="invalid-stat")(res0, res1)
| TensorRT-master | tools/Polygraphy/tests/comparator/test_compare.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import subprocess as sp
import numpy as np
import pytest
import tensorrt as trt
from polygraphy.backend.onnx import BytesFromOnnx, OnnxFromTfGraph, GsFromOnnx
from polygraphy.backend.onnxrt import OnnxrtRunner, SessionFromOnnx
from polygraphy.backend.pluginref import PluginRefRunner
from polygraphy.backend.tf import SessionFromGraph, TfRunner
from polygraphy.backend.trt import EngineFromNetwork, NetworkFromOnnxBytes, TrtRunner
from polygraphy.exception import PolygraphyException
from polygraphy.comparator import Comparator, CompareFunc, DataLoader, IterationResult, PostprocessFunc, RunResults
from polygraphy import mod
from tests.models.meta import ONNX_MODELS, TF_MODELS
class TestComparator(object):
def test_warmup_runs(self):
onnx_loader = ONNX_MODELS["identity"].loader
runner = OnnxrtRunner(SessionFromOnnx(onnx_loader))
run_results = Comparator.run([runner], warm_up=2)
assert len(run_results[runner.name]) == 1
def test_list_as_data_loader(self):
onnx_loader = ONNX_MODELS["identity"].loader
runner = OnnxrtRunner(SessionFromOnnx(onnx_loader), name="onnx_runner")
data = [{"x": np.ones((1, 1, 2, 2), dtype=np.float32)}] * 2
run_results = Comparator.run([runner], data_loader=data)
iter_results = run_results["onnx_runner"]
assert len(iter_results) == 2
for actual, expected in zip(iter_results, data):
assert np.all(actual["y"] == expected["x"])
def test_generator_as_data_loader(self):
onnx_loader = ONNX_MODELS["identity"].loader
runner = OnnxrtRunner(SessionFromOnnx(onnx_loader), name="onnx_runner")
def data():
for feed_dict in [{"x": np.ones((1, 1, 2, 2), dtype=np.float32)}] * 2:
yield feed_dict
run_results = Comparator.run([runner], data_loader=data())
iter_results = run_results["onnx_runner"]
assert len(iter_results) == 2
for actual, expected in zip(iter_results, data()):
assert np.all(actual["y"] == expected["x"])
def test_multiple_runners(self):
load_tf = TF_MODELS["identity"].loader
build_tf_session = SessionFromGraph(load_tf)
onnx_model = OnnxFromTfGraph(load_tf)
load_serialized_onnx = BytesFromOnnx(onnx_model)
build_onnxrt_session = SessionFromOnnx(load_serialized_onnx)
load_engine = EngineFromNetwork(NetworkFromOnnxBytes(load_serialized_onnx))
gs_graph = GsFromOnnx(onnx_model)
runners = [
TfRunner(build_tf_session),
OnnxrtRunner(build_onnxrt_session),
PluginRefRunner(gs_graph),
TrtRunner(load_engine),
]
run_results = Comparator.run(runners)
compare_func = CompareFunc.simple(check_shapes=mod.version(trt.__version__) >= mod.version("7.0"))
assert bool(Comparator.compare_accuracy(run_results, compare_func=compare_func))
assert len(list(run_results.values())[0]) == 1 # Default number of iterations
def test_postprocess(self):
onnx_loader = ONNX_MODELS["identity"].loader
run_results = Comparator.run([OnnxrtRunner(SessionFromOnnx(onnx_loader))], use_subprocess=True)
# Output shape is (1, 1, 2, 2)
postprocessed = Comparator.postprocess(run_results, postprocess_func=PostprocessFunc.topk_func(k=1, axis=-1))
for _, results in postprocessed.items():
for result in results:
for _, output in result.items():
assert output.shape == (1, 1, 2, 1)
def test_errors_do_not_hang(self):
# Should error because interface is not implemented correctly.
class FakeRunner(object):
def __init__(self):
self.name = "fake"
runners = [FakeRunner()]
with pytest.raises(PolygraphyException):
Comparator.run(runners, use_subprocess=True, subprocess_polling_interval=1)
def test_segfault_does_not_hang(self):
def raise_called_process_error():
class FakeSegfault(sp.CalledProcessError):
pass
raise FakeSegfault(-11, ["simulate", "segfault"])
runners = [TrtRunner(EngineFromNetwork(raise_called_process_error))]
with pytest.raises(PolygraphyException):
Comparator.run(runners, use_subprocess=True, subprocess_polling_interval=1)
def test_multirun_outputs_are_different(self):
onnx_loader = ONNX_MODELS["identity"].loader
runner = TrtRunner(EngineFromNetwork(NetworkFromOnnxBytes(onnx_loader)))
run_results = Comparator.run([runner], data_loader=DataLoader(iterations=2))
iteration0 = run_results[runner.name][0]
iteration1 = run_results[runner.name][1]
for name in iteration0.keys():
assert np.any(iteration0[name] != iteration1[name])
def test_validate_nan(self):
run_results = RunResults()
run_results["fake-runner"] = [IterationResult(outputs={"x": np.array(np.nan)})]
assert not Comparator.validate(run_results)
def test_validate_inf(self):
run_results = RunResults()
run_results["fake-runner"] = [IterationResult(outputs={"x": np.array(np.inf)})]
assert not Comparator.validate(run_results, check_inf=True)
def test_dim_param_trt_onnxrt(self):
load_onnx_bytes = ONNX_MODELS["dim_param"].loader
build_onnxrt_session = SessionFromOnnx(load_onnx_bytes)
load_engine = EngineFromNetwork(NetworkFromOnnxBytes(load_onnx_bytes))
runners = [
OnnxrtRunner(build_onnxrt_session),
TrtRunner(load_engine),
]
run_results = Comparator.run(runners)
compare_func = CompareFunc.simple(check_shapes=mod.version(trt.__version__) >= mod.version("7.0"))
assert bool(Comparator.compare_accuracy(run_results, compare_func=compare_func))
assert len(list(run_results.values())[0]) == 1 # Default number of iterations
| TensorRT-master | tools/Polygraphy/tests/comparator/test_comparator.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import glob
import os
import sys
import tempfile
from collections import namedtuple
from textwrap import dedent
import onnx_graphsurgeon as gs
import pytest
import tensorrt as trt
from polygraphy import mod
from polygraphy.backend.onnx import onnx_from_path
from polygraphy.backend.trt import Algorithm, TacticReplayData
from polygraphy.json import save_json
from tests.models.meta import ONNX_MODELS
from tests.tools.common import run_polygraphy_debug
FakeAlgorithmContext = namedtuple("FakeAlgorithmContext", ["name", "num_inputs", "num_outputs"])
FakeAlgorithm = namedtuple("FakeAlgorithm", ["algorithm_variant", "io_info"])
FakeAlgorithm.get_algorithm_io_info = lambda this, index: this.io_info[index]
FakeAlgorithmVariant = namedtuple("FakeAlgorithmVariant", ["implementation", "tactic"])
FakeAlgorithmIOInfo = namedtuple("FakeAlgorithmIOInfo", ["tensor_format", "dtype"])
def fake_context(name, num_inputs=1, num_outputs=1):
return FakeAlgorithmContext(name=name, num_inputs=num_inputs, num_outputs=num_outputs)
def fake_algo(implementation=6, tactic=0, num_io=2, tensor_format=trt.TensorFormat.LINEAR, dtype=trt.float32):
io_info = [FakeAlgorithmIOInfo(tensor_format=tensor_format, dtype=dtype)] * num_io
return FakeAlgorithm(algorithm_variant=FakeAlgorithmVariant(implementation, tactic), io_info=io_info)
@pytest.fixture(scope="session", params=["", "subdir"])
def replay_dir(request):
def make_replay(tactic):
return TacticReplayData().add("layer0", Algorithm.from_trt(fake_context("layer0"), fake_algo(0, tactic)))
with tempfile.TemporaryDirectory() as dir:
def make_path(prefix, *args):
path = os.path.join(dir, prefix)
if request.param:
path = os.path.join(path, request.param)
path = os.path.join(path, *args)
return path
# Good tactics
save_json(make_replay(0), make_path("good", "0.json"))
save_json(make_replay(1), make_path("good", "1.json"))
# Bad tactics
save_json(make_replay(1), make_path("bad", "0.json"))
save_json(make_replay(2), make_path("bad", "1.json"))
EXPECTED_OUTPUT = dedent(
"""
[I] Loaded {num} good tactic replays.
[I] Loaded {num} bad tactic replays.
[I] Found potentially bad tactics:
[I] Layer: layer0
Algorithms: ["(Implementation: 0, Tactic: 2) | Inputs: (('TensorFormat.LINEAR', 'DataType.FLOAT'),) | Outputs: (('TensorFormat.LINEAR', 'DataType.FLOAT'),)"]
"""
)
yield dir, EXPECTED_OUTPUT
class TestDiffTactics(object):
def check_output(self, status, expected_output, expected_num=2):
output = "\n".join(
line for line in status.stdout.strip().splitlines() if "Loading tactic replay file from " not in line
)
assert output == expected_output.format(num=expected_num).strip()
def test_dir(self, replay_dir):
replay_dir, expected_output = replay_dir
status = run_polygraphy_debug(["diff-tactics", "--dir", replay_dir], disable_verbose=True)
self.check_output(status, expected_output)
def test_good_bad(self, replay_dir):
replay_dir, expected_output = replay_dir
good = os.path.join(replay_dir, "good")
bad = os.path.join(replay_dir, "bad")
status = run_polygraphy_debug(["diff-tactics", "--good", good, "--bad", bad], disable_verbose=True)
self.check_output(status, expected_output)
def test_good_bad_file(self, replay_dir):
replay_dir, expected_output = replay_dir
def find_file(dirpath, filename):
return glob.glob(os.path.join(dirpath, "**", filename), recursive=True)[0]
good = find_file(os.path.join(replay_dir, "good"), "0.json")
bad = find_file(os.path.join(replay_dir, "bad"), "1.json")
status = run_polygraphy_debug(["diff-tactics", "--good", good, "--bad", bad], disable_verbose=True)
self.check_output(status, expected_output, expected_num=1)
@pytest.mark.skipif(mod.version(trt.__version__) < mod.version("8.0"), reason="Unsupported for TRT 7.2 and older")
class TestBuild(object):
def test_good_bad(self):
with tempfile.TemporaryDirectory() as outdir:
# Also includes --show-output sanity test
status = run_polygraphy_debug(
[
"build",
ONNX_MODELS["identity"].path,
"--save-tactics=replay.json",
"--show-output",
"--artifacts-dir",
outdir,
"--until=good",
"--artifacts",
"replay.json",
"--check",
"true",
],
cwd=outdir,
)
assert "Passed: 1/1 | Pass Rate: 100.0%" in status.stdout
status = run_polygraphy_debug(
[
"build",
ONNX_MODELS["identity"].path,
"--save-tactics=replay.json",
"--artifacts-dir",
outdir,
"--until=bad",
"--artifacts",
"replay.json",
"--check",
"false",
],
cwd=outdir,
)
assert "Passed: 0/1 | Pass Rate: 0.0%" in status.stdout
def check_outdir(subdir):
files = glob.glob(os.path.join(outdir, subdir, "*"))
assert len(files) == 1
basenames = list(map(os.path.basename, files))
assert len([f for f in basenames if f.startswith("replay") and f.endswith(".json")]) == 1
check_outdir("good")
check_outdir("bad")
@pytest.mark.skipif(mod.version(trt.__version__) < mod.version("7.0"), reason="Unsupported for TRT 6")
class TestPrecision(object):
@pytest.mark.parametrize("check_status", ["true", "false"])
@pytest.mark.parametrize("mode", ["bisect", "linear"])
@pytest.mark.parametrize("direction", ["forward", "reverse"])
@pytest.mark.parametrize("model", ["reducable", "const_foldable"])
def test_sanity(self, mode, direction, check_status, model):
with tempfile.TemporaryDirectory() as outdir:
run_polygraphy_debug(
[
"precision",
"--mode",
mode,
"--direction",
direction,
ONNX_MODELS[model].path,
"--int8",
"--check",
check_status,
],
cwd=outdir,
)
class TestReduce(object):
FAKE_REDUCE_CHECKER = os.path.join(os.path.dirname(__file__), "fake_reduce_checker.py")
# Test left branch, right branch, at the point of branching, and after the branch.
@pytest.mark.parametrize(
"fail_node",
[
"onnx_graphsurgeon_node_1",
"onnx_graphsurgeon_node_3",
"onnx_graphsurgeon_node_5",
"onnx_graphsurgeon_node_7",
"onnx_graphsurgeon_node_9",
],
)
@pytest.mark.parametrize("mode", ["linear", "bisect"])
def test_can_isolate_node(self, fail_node, mode):
with tempfile.TemporaryDirectory() as outdir:
run_polygraphy_debug(
[
"reduce",
ONNX_MODELS["reducable"].path,
"--output=reduced.onnx",
"--mode",
mode,
"--show-output",
"--min-good=good_reduced.onnx",
"--check",
TestReduce.FAKE_REDUCE_CHECKER,
"polygraphy_debug.onnx",
"--fail-node",
fail_node,
],
disable_verbose=True,
cwd=outdir,
)
model = onnx_from_path(os.path.join(outdir, "reduced.onnx"))
min_good_path = os.path.join(outdir, "good_reduced.onnx")
good_model = None
if os.path.exists(min_good_path):
good_model = onnx_from_path(min_good_path)
# The model should only contain one node - the failing one.
# One exception - since bisect depends on node ordering, it sometimes doesn't
# reduce branches to the maximum possible extent.
if mode == "bisect" and fail_node == "onnx_graphsurgeon_node_1":
assert len(model.graph.node) == 3
elif mode == "bisect" and fail_node == "onnx_graphsurgeon_node_7":
assert len(model.graph.node) == 2
else:
assert len(model.graph.node) == 1
node_names = [node.name for node in model.graph.node]
assert fail_node in node_names
# For now we're just doing a very basic sanity check for --min-good
if good_model:
assert model != good_model
# Run a test where the last node in the model is failing.
# If we're not reducing inputs, then only the outputs should change
def test_no_reduce_inputs(self):
with tempfile.TemporaryDirectory() as outdir:
run_polygraphy_debug(
[
"reduce",
ONNX_MODELS["reducable"].path,
"--output=reduced.onnx",
"--show-output",
"--no-reduce-inputs",
"--mode=linear",
"--check",
TestReduce.FAKE_REDUCE_CHECKER,
"polygraphy_debug.onnx",
"--fail-node",
"onnx_graphsurgeon_node_7",
],
disable_verbose=True,
cwd=outdir,
)
model = onnx_from_path(os.path.join(outdir, "reduced.onnx"))
assert len(model.graph.node) == 4
assert len(model.graph.input) == 2
assert len(model.graph.output) == 1
assert model.graph.output[0].name == "identity_out_6"
node_names = [node.name for node in model.graph.node]
assert "onnx_graphsurgeon_node_7" in node_names
# Run a test where an input node in the model is failing.
# If we're not reducing outputs, then only the inputs should change
def test_no_reduce_outputs(self):
with tempfile.TemporaryDirectory() as outdir:
run_polygraphy_debug(
[
"reduce",
ONNX_MODELS["reducable"].path,
"--output=reduced.onnx",
"--show-output",
"--no-reduce-outputs",
"--mode=linear",
"--check",
TestReduce.FAKE_REDUCE_CHECKER,
"polygraphy_debug.onnx",
"--fail-node",
"onnx_graphsurgeon_node_3",
],
disable_verbose=True,
cwd=outdir,
)
model = onnx_from_path(os.path.join(outdir, "reduced.onnx"))
assert len(model.graph.node) == 4
assert len(model.graph.input) == 1
assert len(model.graph.output) == 2
assert model.graph.input[0].name == "Y0"
node_names = [node.name for node in model.graph.node]
assert "onnx_graphsurgeon_node_7" in node_names
# In this test, we set up the checker to return 1 for the bad node, but 2 in other cases.
# We want to ignore '2's and treat them as successes
def test_reduce_custom_return_code(self):
with tempfile.TemporaryDirectory() as outdir:
run_polygraphy_debug(
[
"reduce",
ONNX_MODELS["reducable"].path,
"--output=reduced.onnx",
"--show-output",
"--fail-code=1", # Only 1s are real failures.
"--check",
TestReduce.FAKE_REDUCE_CHECKER,
"polygraphy_debug.onnx",
"--fail-node",
"onnx_graphsurgeon_node_5",
"--default-return-code=2",
],
disable_verbose=True,
cwd=outdir,
)
model = onnx_from_path(os.path.join(outdir, "reduced.onnx"))
assert len(model.graph.node) == 1
assert model.graph.node[0].name == "onnx_graphsurgeon_node_5"
# Here we set the failure return code to 0, which would normally mark succeeding cases as failing.
# However, since we also set the --fail-regex, it will only regard as failures those runs which print the error message.
@pytest.mark.parametrize(
"fail_code_arg",
[
[],
["--fail-code=0"],
],
)
def test_reduce_custom_fail_message(self, fail_code_arg):
with tempfile.TemporaryDirectory() as outdir:
# fake_reduce_checker will alternate error messages based on whether an arbitrary node is present in the model.
run_polygraphy_debug(
[
"reduce",
ONNX_MODELS["reducable"].path,
"--output=reduced.onnx",
"--show-output",
"--fail-regex",
"REALLY BAD",
"BAD NODE",
]
+ fail_code_arg
+ [
"--check",
TestReduce.FAKE_REDUCE_CHECKER,
"polygraphy_debug.onnx",
"--fail-node",
"onnx_graphsurgeon_node_5",
"--fail-return-code=0",
],
disable_verbose=True,
cwd=outdir,
)
model = onnx_from_path(os.path.join(outdir, "reduced.onnx"))
assert len(model.graph.node) == 1
assert model.graph.node[0].name == "onnx_graphsurgeon_node_5"
# In cases where both sides of a branch are required to reproduce the failure,
# reduce should not remove the branch.
@pytest.mark.parametrize(
"fail_nodes",
[
["onnx_graphsurgeon_node_1", "onnx_graphsurgeon_node_3"],
["onnx_graphsurgeon_node_7", "onnx_graphsurgeon_node_9"],
],
)
def test_no_reduce_required_branches(self, fail_nodes):
with tempfile.TemporaryDirectory() as outdir:
run_polygraphy_debug(
[
"reduce",
ONNX_MODELS["reducable"].path,
"--output=reduced.onnx",
"--show-output",
"--check",
TestReduce.FAKE_REDUCE_CHECKER,
"polygraphy_debug.onnx",
"--fail-node",
]
+ fail_nodes,
disable_verbose=True,
cwd=outdir,
)
model = onnx_from_path(os.path.join(outdir, "reduced.onnx"))
node_names = [node.name for node in model.graph.node]
assert all(fail_node in node_names for fail_node in fail_nodes)
assert len(model.graph.node) <= 3 # The branch on the opposite side of the model should be removed.
@pytest.mark.parametrize("opts", [[], ["--force-fallback-shape-inference"]])
def test_reduce_shape_inference(self, opts):
with tempfile.TemporaryDirectory() as outdir:
status = run_polygraphy_debug(
[
"reduce",
ONNX_MODELS["dynamic_identity"].path,
"--output=reduced.onnx",
"--show-output",
"--model-input-shapes=X:[1,2,5,5]",
]
+ opts
+ ["--check", "false"],
disable_verbose=True,
cwd=outdir,
)
model = onnx_from_path(os.path.join(outdir, "reduced.onnx"))
graph = gs.import_onnx(model)
assert tuple(graph.inputs[0].shape) == (1, 2, 5, 5)
assert tuple(graph.outputs[0].shape) == (1, 2, 5, 5)
def test_reduce_with_constant(self):
# Should be no failure when models including Constant nodes use fallback
# shape inference; Constant nodes will be lowered to constant tensors.
with tempfile.TemporaryDirectory() as outdir:
run_polygraphy_debug(
[
"reduce",
ONNX_MODELS["reducable_with_const"].path,
"--no-shape-inference",
"--mode=linear",
"--output=reduced.onnx",
]
+ [
"--check",
TestReduce.FAKE_REDUCE_CHECKER,
"polygraphy_debug.onnx",
"--fail-node",
"onnx_graphsurgeon_node_3",
],
disable_verbose=True,
cwd=outdir,
)
model = onnx_from_path(os.path.join(outdir, "reduced.onnx"))
graph = gs.import_onnx(model)
assert len(graph.nodes) == 1
assert graph.nodes[0].name == "onnx_graphsurgeon_node_3"
# Outputs of Constant nodes should not become Variables; thus the model should have no inputs.
assert not graph.inputs
class TestRepeat(object):
@pytest.mark.parametrize(
"until, check, expected_iters",
[
("good", "true", 1),
("bad", "false", 1),
("5", "false", 5),
],
)
def test_until(self, until, check, expected_iters):
status = run_polygraphy_debug(["repeat", "--until", until, "--check", check])
assert "Finished {:} iteration(s)".format(expected_iters) in status.stdout
def test_iteration_info(self):
with tempfile.TemporaryDirectory() as outdir:
iter_info = os.path.join(outdir, "iter_info.json")
check_script = os.path.join(outdir, "check.py")
# Hacky Python script to make sure the iteration is actually incremented
check_num = """
import json
import os
iter_info = json.load(open('{:}', "r"))
file_name = str(iter_info["iteration"]) + ".txt"
path = os.path.abspath(file_name)
print(path)
assert not os.path.exists(path)
with open(path, "w") as f:
f.write("File")
""".format(
iter_info
)
with open(check_script, "w") as f:
f.write(dedent(check_num))
status = run_polygraphy_debug(
[
"repeat",
"--until=5",
"--iteration-info",
iter_info,
"--show-output",
"--check",
sys.executable,
check_script,
],
cwd=outdir,
)
assert "FAILED" not in status.stdout
assert "Passed: 5/5 | Pass Rate: 100.0%" in status.stdout
# Iteration info should be cleaned up afterwards
assert not os.path.exists(iter_info)
def test_ignore_fail_code(self):
# Sanity check to make sure the command normally fails.
status = run_polygraphy_debug(["repeat", "--until=5", "--check", "false"])
assert "Passed: 0/5 | Pass Rate: 0.0%" in status.stdout
status = run_polygraphy_debug(["repeat", "--until=5", "--ignore-fail-code=2", "--check", "false"])
assert "Passed: 0/5 | Pass Rate: 0.0%" in status.stdout
status = run_polygraphy_debug(["repeat", "--until=5", "--ignore-fail-code=1", "--check", "false"])
assert "Passed: 5/5 | Pass Rate: 100.0%" in status.stdout
| TensorRT-master | tools/Polygraphy/tests/tools/test_debug.py |
import os
import polygraphy
from tests.tools.common import run_polygraphy
class TestPolygraphyBin(object):
def test_version(self):
status = run_polygraphy(["-v"])
assert status.stdout.strip().replace("\n", " ").replace(
" ", " "
) == "Polygraphy | Version: {:} | Path: {:}".format(
polygraphy.__version__, list(map(os.path.realpath, polygraphy.__path__))
)
| TensorRT-master | tools/Polygraphy/tests/tools/test_polygraphy.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
from polygraphy.exception import PolygraphyInternalException
from polygraphy.tools.script import Script, inline, make_invocable, make_invocable_if_nondefault
def make_test_string():
return Script.String("test")
class TestScript(object):
@pytest.mark.parametrize(
"func",
[
lambda _: inline(make_test_string()),
lambda s: s.add_loader(make_test_string(), make_test_string()),
lambda s: s.add_runner(make_test_string()),
lambda s: s.append_preimport(make_test_string()),
lambda s: s.append_suffix(make_test_string()),
lambda s: s.set_data_loader(make_test_string()),
],
)
def test_add_funcs_fail_on_unsafe(self, func):
script = Script()
with pytest.raises(PolygraphyInternalException, match="was not checked for safety"):
func(script)
@pytest.mark.parametrize(
"case, expected",
[
("should_become_raw", "'should_become_raw'"),
("parens))", r"'parens))'"),
("'squotes'", "\"'squotes'\""),
('"dquotes"', "'\"dquotes\"'"),
(r"braces{}{})", r"'braces{}{})'"),
("commas, ,", r"'commas, ,'"),
("escape_quote_with_backslash'", '"escape_quote_with_backslash\'"'),
("unterm_in_quotes_ok))", r"'unterm_in_quotes_ok))'"),
],
)
def test_non_inlined_strings_escaped(self, case, expected):
out = make_invocable("Dummy", case, x=case)
ex_out = "Dummy({:}, x={:})".format(expected, expected)
assert out.unwrap() == ex_out
def test_invoke_none_args(self):
assert make_invocable("Dummy", None).unwrap() == "Dummy(None)"
assert make_invocable("Dummy", x=None).unwrap() == "Dummy()"
def test_invoke_if_nondefault_none_args(self):
assert make_invocable_if_nondefault("Dummy", None) is None
assert make_invocable_if_nondefault("Dummy", x=None) is None
| TensorRT-master | tools/Polygraphy/tests/tools/test_script.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import copy
import glob
import os
import subprocess as sp
import sys
import tempfile
from textwrap import dedent
import onnx
import pytest
import tensorrt as trt
from polygraphy import mod, util
from polygraphy.json import load_json
from tests.helper import get_file_size, is_file_non_empty
from tests.models.meta import ONNX_MODELS, TF_MODELS
from tests.tools.common import ROOT_DIR, check_subprocess, run_polygraphy_run
class TestGen(object):
def test_polygraphy_run_gen_script(self):
with util.NamedTemporaryFile(mode="w") as f:
run_polygraphy_run(["--gen-script={:}".format(f.name), ONNX_MODELS["identity"].path])
with open(f.name, "r") as script:
print(script.read())
env = copy.deepcopy(os.environ)
env.update({"PYTHONPATH": ROOT_DIR})
check_subprocess(sp.run([sys.executable, f.name], env=env))
class TestLogging(object):
def test_logger_verbosity(self):
run_polygraphy_run(["--silent"])
@pytest.mark.parametrize(
"log_path",
[
os.path.join("example", "example.log"),
"example.log",
],
)
def test_log_file(self, log_path):
with tempfile.TemporaryDirectory() as outdir:
run_polygraphy_run(["--log-file", log_path], cwd=outdir)
assert open(os.path.join(outdir, log_path)).read()
class TestTrtLegacy(object):
def test_uff(self):
run_polygraphy_run([TF_MODELS["identity"].path, "--trt-legacy"])
@pytest.mark.skipif(mod.version(trt.__version__) >= mod.version("7.0"), reason="Unsupported in TRT 7.0 and later")
def test_onnx(self):
run_polygraphy_run([ONNX_MODELS["identity"].path, "--trt-legacy"])
class TestTrt(object):
def test_basic(self):
run_polygraphy_run([ONNX_MODELS["identity"].path, "--trt"])
def test_plugins(self):
run_polygraphy_run(
[
ONNX_MODELS["identity"].path,
"--trt",
"--plugins",
"nvinfer_plugin.dll" if sys.platform.startswith("win") else "libnvinfer_plugin.so",
]
)
def test_custom_outputs(self):
run_polygraphy_run([ONNX_MODELS["identity_identity"].path, "--trt", "--trt-outputs", "identity_out_0"])
def test_layerwise_outputs(self):
with util.NamedTemporaryFile() as outfile0:
run_polygraphy_run(
[
ONNX_MODELS["identity_identity"].path,
"--trt",
"--trt-outputs",
"mark",
"all",
"--save-outputs",
outfile0.name,
]
)
results = load_json(outfile0.name)
[result] = list(results.values())[0]
assert len(result) == 2
assert "identity_out_0" in result
assert "identity_out_2" in result
def test_exclude_outputs_with_layerwise(self):
with util.NamedTemporaryFile() as outfile0:
run_polygraphy_run(
[
ONNX_MODELS["identity_identity"].path,
"--trt",
"--trt-outputs",
"mark",
"all",
"--trt-exclude-outputs",
"identity_out_2",
"--save-outputs",
outfile0.name,
]
)
results = load_json(outfile0.name)
[result] = list(results.values())[0]
assert len(result) == 1
assert "identity_out_0" in result
def test_int8(self):
run_polygraphy_run([ONNX_MODELS["identity"].path, "--trt", "--int8"])
@pytest.mark.skipif(mod.version(trt.__version__) < mod.version("8.0"), reason="API was added after TRT 7.2")
def test_sparse_weights(self):
run_polygraphy_run([ONNX_MODELS["identity"].path, "--trt", "--sparse-weights"])
@pytest.mark.skipif(mod.version(trt.__version__) < mod.version("7.0"), reason="Unsupported for TRT 6")
def test_input_shape(self):
run_polygraphy_run([ONNX_MODELS["dynamic_identity"].path, "--trt", "--onnxrt", "--input-shapes", "X:[1,2,4,4]"])
@pytest.mark.skipif(mod.version(trt.__version__) < mod.version("7.0"), reason="Unsupported for TRT 6")
def test_dynamic_input_shape(self):
run_polygraphy_run(
[ONNX_MODELS["dynamic_identity"].path, "--trt", "--onnxrt", "--input-shapes", "X:[1,2,-1,4]"]
)
@pytest.mark.skipif(mod.version(trt.__version__) < mod.version("7.0"), reason="Unsupported for TRT 6")
def test_dynamic_input_shape(self):
run_polygraphy_run([ONNX_MODELS["dynamic_identity"].path, "--trt", "--onnxrt", "--input-shapes", "X,1x2x-1x4"])
@pytest.mark.skipif(mod.version(trt.__version__) < mod.version("7.0"), reason="Unsupported for TRT 6")
def test_explicit_profile(self):
run_polygraphy_run(
[
ONNX_MODELS["dynamic_identity"].path,
"--trt",
"--onnxrt",
"--input-shapes",
"X:[1,2,1,1]",
"--trt-min-shapes",
"X:[1,2,1,1]",
"--trt-opt-shapes",
"X:[1,2,1,1]",
"--trt-max-shapes",
"X:[1,2,1,1]",
]
)
@pytest.mark.skipif(mod.version(trt.__version__) < mod.version("7.0"), reason="Unsupported for TRT 6")
def test_explicit_profile_implicit_runtime_shape(self):
run_polygraphy_run(
[
ONNX_MODELS["dynamic_identity"].path,
"--trt",
"--onnxrt",
"--trt-min-shapes",
"X:[1,2,1,1]",
"--trt-opt-shapes",
"X:[1,2,1,1]",
"--trt-max-shapes",
"X:[1,2,1,1]",
]
)
@pytest.mark.skipif(mod.version(trt.__version__) < mod.version("7.0"), reason="Unsupported for TRT 6")
def test_explicit_profile_opt_runtime_shapes_differ(self):
run_polygraphy_run(
[
ONNX_MODELS["dynamic_identity"].path,
"--trt",
"--onnxrt",
"--input-shapes",
"X:[1,2,2,2]",
"--trt-min-shapes",
"X:[1,2,1,1]",
"--trt-opt-shapes",
"X:[1,2,3,3]",
"--trt-max-shapes",
"X:[1,2,4,4]",
]
)
@pytest.mark.skipif(mod.version(trt.__version__) < mod.version("7.0"), reason="Unsupported for TRT 6")
def test_multiple_profiles(self):
run_polygraphy_run(
[
ONNX_MODELS["dynamic_identity"].path,
"--trt",
"--onnxrt",
"--trt-min-shapes",
"X:[1,2,1,1]",
"--trt-opt-shapes",
"X:[1,2,1,1]",
"--trt-max-shapes",
"X:[1,2,1,1]",
"--trt-min-shapes",
"X:[1,2,4,4]",
"--trt-opt-shapes",
"X:[1,2,4,4]",
"--trt-max-shapes",
"X:[1,2,4,4]",
]
)
def test_int8_calibration_cache(self):
with util.NamedTemporaryFile() as outpath:
cmd = [ONNX_MODELS["identity"].path, "--trt", "--int8", "--calibration-cache", outpath.name]
if mod.version(trt.__version__) >= mod.version("7.0"):
cmd += ["--onnxrt"]
run_polygraphy_run(cmd)
assert is_file_non_empty(outpath.name)
@pytest.mark.skipif(mod.version(trt.__version__) < mod.version("7.0"), reason="Unsupported for TRT 6")
@pytest.mark.parametrize("base_class", ["IInt8LegacyCalibrator", "IInt8EntropyCalibrator2"])
def test_int8_calibration_base_class(self, base_class):
cmd = [ONNX_MODELS["identity"].path, "--trt", "--int8", "--calibration-base-class", base_class]
if mod.version(trt.__version__) >= mod.version("7.0"):
cmd += ["--onnxrt"]
run_polygraphy_run()
@pytest.mark.skipif(mod.version(trt.__version__) < mod.version("8.0"), reason="Unsupported for TRT 7.2 and older")
def test_timing_cache(self):
with tempfile.TemporaryDirectory() as dir:
# Test with files that haven't already been created instead of using NamedTemporaryFile().
total_cache = os.path.join(dir, "total.cache")
identity_cache = os.path.join(dir, "identity.cache")
run_polygraphy_run([ONNX_MODELS["const_foldable"].path, "--trt", "--timing-cache", total_cache])
assert is_file_non_empty(total_cache)
const_foldable_cache_size = get_file_size(total_cache)
run_polygraphy_run([ONNX_MODELS["identity"].path, "--trt", "--timing-cache", identity_cache])
identity_cache_size = get_file_size(identity_cache)
run_polygraphy_run([ONNX_MODELS["identity"].path, "--trt", "--timing-cache", total_cache])
total_cache_size = get_file_size(total_cache)
# The total cache should be larger than either of the individual caches.
assert total_cache_size > const_foldable_cache_size and total_cache_size > identity_cache_size
# The total cache should also be smaller than or equal to the sum of the individual caches since
# header information should not be duplicated.
assert total_cache_size <= (const_foldable_cache_size + identity_cache_size)
def test_save_load_engine(self):
with util.NamedTemporaryFile() as outpath:
run_polygraphy_run([ONNX_MODELS["identity"].path, "--trt", "--save-engine", outpath.name])
assert is_file_non_empty(outpath.name)
run_polygraphy_run(["--trt", outpath.name, "--model-type=engine"])
@pytest.mark.skipif(mod.version(trt.__version__) < mod.version("8.0"), reason="Unsupported for TRT 7.2 and older")
def test_tactic_replay(self):
with util.NamedTemporaryFile() as tactic_replay:
run_polygraphy_run([ONNX_MODELS["identity"].path, "--trt", "--save-tactics", tactic_replay.name])
assert is_file_non_empty(tactic_replay.name)
run_polygraphy_run([ONNX_MODELS["identity"].path, "--trt", "--load-tactics", tactic_replay.name])
@pytest.mark.skipif(mod.version(trt.__version__) < mod.version("7.2"), reason="Unsupported before TRT 7.2")
def test_tactic_sources(self):
run_polygraphy_run([ONNX_MODELS["identity"].path, "--trt", "--tactic-sources", "CUBLAS", "CUBLAS_LT"])
def test_data_loader_script_calibration(self):
with util.NamedTemporaryFile("w+", suffix=".py") as f:
f.write(
dedent(
"""
import numpy as np
def load_data():
for _ in range(5):
yield {"x": np.ones((1, 1, 2, 2), dtype=np.float32) * 6.4341}
"""
)
)
f.flush()
run_polygraphy_run([ONNX_MODELS["identity"].path, "--trt", "--int8", "--data-loader-script", f.name])
class TestTf(object):
def test_tf(self):
run_polygraphy_run([TF_MODELS["identity"].path, "--tf", "--gpu-memory-fraction=0.5"])
def test_tf_save_pb(self):
with util.NamedTemporaryFile() as outpath:
run_polygraphy_run(
[TF_MODELS["identity"].path, "--tf", "--gpu-memory-fraction=0.5", "--save-pb", outpath.name]
)
assert is_file_non_empty(outpath.name)
def test_tf_save_tensorboard(self):
with tempfile.TemporaryDirectory() as outdir:
run_polygraphy_run(
[TF_MODELS["identity"].path, "--tf", "--gpu-memory-fraction=0.5", "--save-tensorboard", outdir]
)
files = glob.glob("{:}{:}*".format(outdir, os.path.sep))
assert len(files) == 1
@pytest.mark.skip(reason="Non-trivial to set up - requires CUPTI")
def test_tf_save_timeline(self):
with util.NamedTemporaryFile() as outpath:
run_polygraphy_run(
[TF_MODELS["identity"].path, "--tf", "--gpu-memory-fraction=0.5", "--save-timeline", outpath.name]
)
timelines = glob.glob(os.path.join(outpath.name, "*"))
for timeline in timelines:
assert is_file_non_empty(timeline)
@pytest.mark.skip(reason="Non-trivial to set up")
def test_tftrt(self):
run_polygraphy_run([TF_MODELS["identity"].path, "--tf", "--tftrt"])
class TestOnnxrt(object):
def test_tf2onnxrt(self):
run_polygraphy_run([TF_MODELS["identity"].path, "--onnxrt", "--model-type=frozen"])
def test_tf2onnx_save_onnx(self):
with util.NamedTemporaryFile() as outpath:
run_polygraphy_run(
[TF_MODELS["identity"].path, "--onnxrt", "--model-type=frozen", "--save-onnx", outpath.name]
)
assert is_file_non_empty(outpath.name)
assert onnx.load(outpath.name)
def test_onnx_rt(self):
run_polygraphy_run([ONNX_MODELS["identity"].path, "--onnxrt"])
def test_onnx_rt_save_onnx(self):
with util.NamedTemporaryFile() as outpath:
run_polygraphy_run([ONNX_MODELS["identity"].path, "--onnxrt", "--save-onnx", outpath.name])
assert is_file_non_empty(outpath.name)
assert onnx.load(outpath.name)
def test_onnx_rt_custom_outputs(self):
run_polygraphy_run([ONNX_MODELS["identity_identity"].path, "--onnxrt", "--onnx-outputs", "identity_out_0"])
def test_onnx_rt_layerwise_outputs(self):
with util.NamedTemporaryFile() as outfile0:
run_polygraphy_run(
[
ONNX_MODELS["identity_identity"].path,
"--onnxrt",
"--onnx-outputs",
"mark",
"all",
"--save-outputs",
outfile0.name,
]
)
results = load_json(outfile0.name)
[result] = list(results.values())[0]
assert len(result) == 2
assert "identity_out_0" in result
assert "identity_out_2" in result
def test_onnx_rt_exclude_outputs_with_layerwise(self):
with util.NamedTemporaryFile() as outfile0:
run_polygraphy_run(
[
ONNX_MODELS["identity_identity"].path,
"--onnxrt",
"--onnx-outputs",
"mark",
"all",
"--onnx-exclude-outputs",
"identity_out_2",
"--save-outputs",
outfile0.name,
]
)
results = load_json(outfile0.name)
[result] = list(results.values())[0]
assert len(result) == 1
assert "identity_out_0" in result
def test_external_data(self):
model = ONNX_MODELS["ext_weights"]
assert run_polygraphy_run([model.path, "--onnxrt", "--external-data-dir", model.ext_data])
class TestOther(object):
def test_0_iterations(self):
run_polygraphy_run([ONNX_MODELS["identity"].path, "--onnxrt", "--iterations=0"])
def test_subprocess_sanity(self):
run_polygraphy_run([ONNX_MODELS["identity"].path, "--onnxrt", "--use-subprocess"])
def test_custom_tolerance(self):
run_polygraphy_run(
[ONNX_MODELS["identity"].path, "--onnxrt", "--onnxrt", "--iterations=0", "--atol=1.0", "--rtol=1.0"]
)
def test_custom_per_output_tolerance(self):
run_polygraphy_run(
[
ONNX_MODELS["identity_identity"].path,
"--onnxrt",
"--onnxrt",
"--onnx-outputs",
"mark",
"all",
"--atol",
"identity_out_0:1.0",
"identity_out_2:3.0",
"0.5",
"--rtol",
"identity_out_0:1.0",
"identity_out_2:3.0",
"0.5",
]
)
def test_custom_input_ranges(self):
run_polygraphy_run(
[ONNX_MODELS["identity_identity"].path, "--onnxrt", "--val-range", "X:[1.0,2.0]", "[0.5,1.5]"]
)
def test_top_k(self):
run_polygraphy_run([ONNX_MODELS["identity"].path, "--onnxrt", "--top-k=5"])
@pytest.mark.parametrize("check_error_stat", ["max", "median", "mean"])
def test_check_error_stat(self, check_error_stat):
run_polygraphy_run(
[ONNX_MODELS["identity"].path, "--onnxrt", "--onnxrt", "--check-error-stat", check_error_stat]
)
def test_save_load_outputs(self, tmp_path):
OUTFILE0 = os.path.join(tmp_path, "outputs0.json")
OUTFILE1 = os.path.join(tmp_path, "outputs1.json")
run_polygraphy_run([ONNX_MODELS["identity"].path, "--onnxrt", "--save-outputs", OUTFILE0])
run_polygraphy_run([ONNX_MODELS["identity"].path, "--onnxrt", "--save-outputs", OUTFILE1])
status = run_polygraphy_run([ONNX_MODELS["identity"].path, "--onnxrt", "--load-outputs", OUTFILE0, OUTFILE1])
assert (
"Difference is within tolerance" in status.stdout + status.stderr
) # Make sure it actually compared stuff.
# Should work with only one file
status = run_polygraphy_run([ONNX_MODELS["identity"].path, "--load-outputs", OUTFILE0])
assert (
"Difference is within tolerance" not in status.stdout + status.stderr
) # Make sure it DIDN'T compare stuff.
# Should work even with no runners specified
status = run_polygraphy_run([ONNX_MODELS["identity"].path, "--load-outputs", OUTFILE0, OUTFILE1])
assert (
"Difference is within tolerance" in status.stdout + status.stderr
) # Make sure it actually compared stuff.
# Should work even when comparing a single runner to itself.
status = run_polygraphy_run([ONNX_MODELS["identity"].path, "--load-outputs", OUTFILE0, OUTFILE0])
assert (
"Difference is within tolerance" in status.stdout + status.stderr
) # Make sure it actually compared stuff.
def test_save_load_inputs(self):
with util.NamedTemporaryFile() as infile0, util.NamedTemporaryFile() as infile1:
run_polygraphy_run([ONNX_MODELS["identity"].path, "--onnxrt", "--save-input-data", infile0.name])
run_polygraphy_run(
[
ONNX_MODELS["identity"].path,
"--onnxrt",
"--load-input-data",
infile0.name,
"--save-input-data",
infile1.name,
]
) # Copy
run_polygraphy_run(
[ONNX_MODELS["identity"].path, "--onnxrt", "--load-input-data", infile0.name, infile1.name]
)
@pytest.mark.skipif(mod.version(trt.__version__) < mod.version("7.0"), reason="Unsupported for TRT 6")
def test_runner_coexistence(self):
run_polygraphy_run([TF_MODELS["identity"].path, "--model-type=frozen", "--tf", "--onnxrt", "--trt"])
class TestPluginRef(object):
def test_basic(self):
run_polygraphy_run([ONNX_MODELS["identity"].path, "--pluginref"])
@pytest.mark.skipif(mod.version(trt.__version__) < mod.version("7.0"), reason="Unsupported for TRT 6")
@pytest.mark.parametrize("model", ["identity", "instancenorm"])
def test_ref_implementations(self, model):
run_polygraphy_run([ONNX_MODELS[model].path, "--pluginref", "--onnxrt", "--trt"])
| TensorRT-master | tools/Polygraphy/tests/tools/test_run.py |
TensorRT-master | tools/Polygraphy/tests/tools/__init__.py |
|
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from textwrap import dedent
import onnx
import pytest
import tensorrt as trt
from polygraphy import mod, util
from polygraphy.backend.common.loader import BytesFromPath
from polygraphy.backend.trt.loader import EngineFromBytes
from tests.models.meta import ONNX_MODELS, TF_MODELS
from tests.tools.common import run_polygraphy_convert
class TestConvertToOnnx(object):
def test_tf2onnx(self):
with util.NamedTemporaryFile(suffix=".onnx") as outmodel:
run_polygraphy_convert([TF_MODELS["identity"].path, "--model-type=frozen", "-o", outmodel.name])
assert onnx.load(outmodel.name)
def test_fp_to_fp16(self):
with util.NamedTemporaryFile() as outmodel:
run_polygraphy_convert(
[ONNX_MODELS["identity_identity"].path, "--convert-to=onnx", "--fp-to-fp16", "-o", outmodel.name]
)
assert onnx.load(outmodel.name).graph.value_info[0].type.tensor_type.elem_type == 10
class TestConvertToTrt(object):
def check_engine(self, path):
loader = EngineFromBytes(BytesFromPath(path))
with loader() as engine:
assert isinstance(engine, trt.ICudaEngine)
def test_onnx_to_trt(self):
with util.NamedTemporaryFile(suffix=".engine") as outmodel:
run_polygraphy_convert([ONNX_MODELS["identity"].path, "--model-type=onnx", "-o", outmodel.name])
self.check_engine(outmodel.name)
@pytest.mark.skipif(
mod.version(trt.__version__) < mod.version("8.0"), reason="Bug in older versions of TRT breaks this test"
)
def test_tf_to_onnx_to_trt(self):
with util.NamedTemporaryFile() as outmodel:
run_polygraphy_convert(
[TF_MODELS["identity"].path, "--model-type=frozen", "--convert-to=trt", "-o", outmodel.name]
)
self.check_engine(outmodel.name)
def test_trt_network_config_script_to_engine(self):
script = dedent(
"""
from polygraphy.backend.trt import CreateNetwork, CreateConfig
from polygraphy import func
import tensorrt as trt
@func.extend(CreateNetwork())
def my_load_network(builder, network):
inp = network.add_input("input", dtype=trt.float32, shape=(1, 1))
out = network.add_identity(inp).get_output(0)
network.mark_output(out)
@func.extend(CreateConfig())
def load_config(config):
config.set_flag(trt.BuilderFlag.FP16)
"""
)
with util.NamedTemporaryFile("w+", suffix=".py") as f, util.NamedTemporaryFile() as outmodel:
f.write(script)
f.flush()
run_polygraphy_convert(
[
f.name,
"--model-type=trt-network-script",
"--trt-network-func-name=my_load_network",
"--trt-config-script",
f.name,
"--convert-to=trt",
"-o",
outmodel.name,
]
)
self.check_engine(outmodel.name)
def test_modify_onnx_outputs(self):
with util.NamedTemporaryFile(suffix=".onnx") as outmodel:
run_polygraphy_convert(
[ONNX_MODELS["identity_identity"].path, "-o", outmodel.name, "--onnx-outputs", "mark", "all"]
)
model = onnx.load(outmodel.name)
assert len(model.graph.output) == 2
class TestConvertToOnnxLikeTrt(object):
@pytest.mark.skipif(mod.version(trt.__version__) < mod.version("7.2"), reason="Unsupported for TRT 7.1 and older")
@pytest.mark.parametrize(
"model_name", ["identity", "empty_tensor_expand", "const_foldable", "and", "scan", "dim_param", "tensor_attr"]
)
def test_onnx_to_trt_to_onnx_like(self, model_name):
with util.NamedTemporaryFile() as outmodel:
run_polygraphy_convert(
[ONNX_MODELS[model_name].path, "--convert-to=onnx-like-trt-network", "-o", outmodel.name]
)
| TensorRT-master | tools/Polygraphy/tests/tools/test_convert.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import tempfile
import onnx
import onnx_graphsurgeon as gs
import pytest
from polygraphy import util
from tests.helper import is_file_non_empty
from tests.models.meta import ONNX_MODELS
from tests.tools.common import run_polygraphy_run, run_polygraphy_surgeon
def onnx_model_sanity_check(model_path):
run_polygraphy_run([model_path, "--model-type=onnx", "--onnxrt"])
def was_shape_inference_run(status):
return "ONNX Shape Inference completed successfully" in (status.stdout + status.stderr)
class TestSurgeonExtract(object):
def test_no_shape_inference_if_has_metadata(self):
with util.NamedTemporaryFile() as outmodel:
status = run_polygraphy_surgeon(
["extract", ONNX_MODELS["identity_identity"].path, "-o", outmodel.name, "--inputs", "X:auto:auto"]
)
onnx_model_sanity_check(outmodel.name)
assert not was_shape_inference_run(status)
def test_onnx_shape_inference_if_no_metadata(self):
with util.NamedTemporaryFile() as outmodel:
status = run_polygraphy_surgeon(
[
"extract",
ONNX_MODELS["identity_identity"].path,
"-o",
outmodel.name,
"--inputs",
"identity_out_0:auto:auto",
]
)
onnx_model_sanity_check(outmodel.name)
assert was_shape_inference_run(status)
def test_fallback_shape_inference_no_onnx_shape_inference(self):
with util.NamedTemporaryFile() as outmodel:
status = run_polygraphy_surgeon(
[
"extract",
ONNX_MODELS["identity_identity"].path,
"-o",
outmodel.name,
"--inputs",
"identity_out_0:auto:auto",
"--outputs",
"identity_out_2:auto",
"--force-fallback-shape-inference",
]
)
onnx_model_sanity_check(outmodel.name)
assert not was_shape_inference_run(status)
def test_force_fallback_shape_inference_will_override_model_shapes(self):
with util.NamedTemporaryFile() as outmodel:
run_polygraphy_surgeon(
[
"extract",
ONNX_MODELS["dynamic_identity"].path,
"-o",
outmodel.name,
"--outputs",
"Y:auto",
"--force-fallback-shape-inference",
]
)
onnx_model_sanity_check(outmodel.name)
graph = gs.import_onnx(onnx.load(outmodel.name))
# Inputs should become fixed since fallback shape inference is being forced.
for tensor in graph.tensors().values():
assert tensor.shape is not None
assert tuple(graph.inputs[0].shape) == (1, 2, 1, 1)
assert tuple(graph.outputs[0].shape) == (1, 2, 1, 1)
def test_sanity_dim_param(self):
with util.NamedTemporaryFile() as outmodel:
run_polygraphy_surgeon(["extract", ONNX_MODELS["dim_param"].path, "-o", outmodel.name])
onnx_model_sanity_check(outmodel.name)
class TestSurgeonInsert(object):
def check_insert_model(self, path, expected_node_ops, expected_graph_input_names, expected_graph_output_names):
model = onnx.load(path)
assert [node.op_type for node in model.graph.node] == expected_node_ops
graph_output_names = set([out.name for out in model.graph.output])
assert graph_output_names == set(expected_graph_output_names)
graph_input_names = set([out.name for out in model.graph.input])
assert graph_input_names == set(expected_graph_input_names)
return model
def test_insert_at_tensor(self):
# Insert a new node in between existing nodes without replacing any existing nodes.
with util.NamedTemporaryFile() as outmodel:
run_polygraphy_surgeon(
[
"insert",
ONNX_MODELS["identity_identity"].path,
"-o",
outmodel.name,
"--inputs=identity_out_0",
"--outputs=identity_out_0",
"--op=FakeOp",
]
)
self.check_insert_model(outmodel.name, ["Identity", "FakeOp", "Identity"], ["X"], ["identity_out_2"])
def test_graph_output(self):
# FakeOp output tensor should be marked as a graph output. Name should be preserved - identity_out_2
with util.NamedTemporaryFile() as outmodel:
run_polygraphy_surgeon(
[
"insert",
ONNX_MODELS["identity_identity"].path,
"-o",
outmodel.name,
"--inputs=identity_out_2",
"--outputs=identity_out_2",
"--op=FakeOp",
]
)
self.check_insert_model(outmodel.name, ["Identity", "Identity", "FakeOp"], ["X"], ["identity_out_2"])
def test_at_graph_input(self):
with util.NamedTemporaryFile() as outmodel:
run_polygraphy_surgeon(
[
"insert",
ONNX_MODELS["identity_identity"].path,
"-o",
outmodel.name,
"--inputs=X",
"--outputs=X",
"--op=FakeOp",
]
)
self.check_insert_model(outmodel.name, ["FakeOp", "Identity", "Identity"], ["X"], ["identity_out_2"])
# When a specified input tensor is used by multiple other nodes, it should not be
# disconnected from other nodes.
def test_multi_use_input(self):
with util.NamedTemporaryFile() as outmodel:
run_polygraphy_surgeon(
[
"insert",
ONNX_MODELS["reducable"].path,
"-o",
outmodel.name,
"--inputs=add_out_4",
"--outputs=identity_out_8",
"--op=FakeOp",
]
)
model = self.check_insert_model(
outmodel.name,
["Identity", "Identity", "Add", "FakeOp", "Identity"],
["X0", "Y0"],
["identity_out_6", "identity_out_8"],
)
other_branch_node = model.graph.node[-1]
assert other_branch_node.name == "onnx_graphsurgeon_node_7"
assert other_branch_node.input == ["add_out_4"]
def test_with_attributes(self):
with util.NamedTemporaryFile() as outmodel:
# str_attr='0' should be interpreted as a string, not an int
# float_attr=0.0 should be interpreted as a float, not an int
# int_attr=0 should be interpreted as an int
run_polygraphy_surgeon(
[
"insert",
ONNX_MODELS["identity_identity"].path,
"-o",
outmodel.name,
"--inputs=X",
"--outputs=X",
"--op=FakeOp",
"--attrs",
"str_attr='0'",
"int_attr=0",
"float_attr=0.0",
"other_str_attr=name",
"str_list_attr=['0','1']",
"int_list_attr=[1,2,3]",
"float_list_attr=[0.0,-1.0,-2.0]",
]
)
model = self.check_insert_model(
outmodel.name, ["FakeOp", "Identity", "Identity"], ["X"], ["identity_out_2"]
)
node = model.graph.node[0]
attrs = node.attribute
assert attrs[0].name == "str_attr"
assert attrs[0].s == b"0"
assert attrs[1].name == "int_attr"
assert attrs[1].i == 0
assert attrs[2].name == "float_attr"
assert attrs[2].f == 0.0
assert attrs[3].name == "other_str_attr"
assert attrs[3].s == b"name"
assert attrs[4].name == "str_list_attr"
assert attrs[4].strings == [b"0", b"1"]
assert attrs[5].name == "int_list_attr"
assert attrs[5].ints == [1, 2, 3]
assert attrs[6].name == "float_list_attr"
assert attrs[6].floats == [0.0, -1.0, -2.0]
class TestSurgeonSanitize(object):
@pytest.mark.parametrize("no_per_pass_shape_inf", [None, "--no-per-pass-shape-inference"])
@pytest.mark.parametrize("fold_shapes", [None, "--no-fold-shapes"])
@pytest.mark.parametrize("partitioning", [None, "basic", "recursive"])
def test_fold_constants(self, no_per_pass_shape_inf, partitioning, fold_shapes):
with util.NamedTemporaryFile() as outmodel:
cmd = ["sanitize", ONNX_MODELS["const_foldable"].path, "-o", outmodel.name, "--fold-constants"]
if fold_shapes:
cmd += [fold_shapes]
if partitioning:
cmd += ["--partitioning", partitioning]
if no_per_pass_shape_inf:
cmd += [no_per_pass_shape_inf]
run_polygraphy_surgeon(cmd)
onnx_model_sanity_check(outmodel.name)
model = onnx.load(outmodel.name)
assert len(model.graph.node) == 1
def test_fold_constants_single_pass(self):
with util.NamedTemporaryFile() as outmodel:
status = run_polygraphy_surgeon(
[
"sanitize",
ONNX_MODELS["const_foldable"].path,
"-o",
outmodel.name,
"--fold-constants",
"--num-passes=1",
]
)
assert "Pass 1" in status.stdout
assert "Pass 2" not in status.stdout
onnx_model_sanity_check(outmodel.name)
model = onnx.load(outmodel.name)
assert len(model.graph.node) == 1
@pytest.mark.parametrize("new_dim", [1, 2, 3])
def test_override_shapes(self, new_dim):
with util.NamedTemporaryFile() as outmodel:
cmd = [
"sanitize",
ONNX_MODELS["dynamic_identity"].path,
"-o",
outmodel.name,
"--override-input-shapes=X:[1,2,{new_dim},{new_dim}]".format(new_dim=new_dim),
]
run_polygraphy_surgeon(cmd)
onnx_model_sanity_check(outmodel.name)
model = onnx.load(outmodel.name)
shape = []
for dim in model.graph.input[0].type.tensor_type.shape.dim:
assert isinstance(dim.dim_value, int) and dim.dim_value >= 0
shape.append(dim.dim_value)
assert shape == [1, 2, new_dim, new_dim]
def test_override_shapes_no_clear_const_tensors_meta(self):
with util.NamedTemporaryFile() as outmodel:
run_polygraphy_surgeon(
[
"sanitize",
ONNX_MODELS["const_foldable"].path,
"-o",
outmodel.name,
"--override-input-shapes=input:[1,3]",
]
)
def test_override_shapes_partial_inputs(self):
with util.NamedTemporaryFile() as outmodel:
run_polygraphy_surgeon(
[
"sanitize",
ONNX_MODELS["dynamic_identity"].path,
"-o",
outmodel.name,
"--override-input-shapes=Y:[1,2,3,4]",
]
)
model = onnx.load(outmodel.name)
assert model.graph.input[0].type.tensor_type.shape.dim[2].dim_param == "height"
assert model.graph.input[0].type.tensor_type.shape.dim[3].dim_param == "width"
def test_override_shapes_no_reorder(self):
with util.NamedTemporaryFile() as outmodel:
run_polygraphy_surgeon(
[
"sanitize",
ONNX_MODELS["reducable"].path,
"-o",
outmodel.name,
"--override-input-shapes",
"Y0:[5]",
"X0:[5]",
]
)
model = onnx.load(outmodel.name)
assert model.graph.input[0].name == "X0"
assert model.graph.input[1].name == "Y0"
def test_modify_onnx_outputs(self):
with util.NamedTemporaryFile(suffix=".onnx") as outmodel:
run_polygraphy_surgeon(
["sanitize", ONNX_MODELS["identity_identity"].path, "-o", outmodel.name, "--outputs", "mark", "all"]
)
model = onnx.load(outmodel.name)
assert len(model.graph.output) == 2
def test_cleanup(self):
with util.NamedTemporaryFile(suffix=".onnx") as outmodel:
run_polygraphy_surgeon(
[
"sanitize",
ONNX_MODELS["identity_identity"].path,
"-o",
outmodel.name,
"--outputs",
"identity_out_0",
"--cleanup",
]
)
model = onnx.load(outmodel.name)
assert len(model.graph.node) == 1
assert model.graph.output[0].name == "identity_out_0"
def test_external_data(self):
with tempfile.TemporaryDirectory() as outdir:
model = ONNX_MODELS["ext_weights"]
outmodel = os.path.join(outdir, "out_model.onnx")
outdata = "ext_weights.data"
assert run_polygraphy_surgeon(
[
"sanitize",
model.path,
"--external-data-dir",
model.ext_data,
"--fold-constants",
"-o",
outmodel,
"--save-external-data",
outdata,
"--external-data-size-threshold=0",
"-vvvvv",
]
)
assert is_file_non_empty(outmodel)
assert is_file_non_empty(os.path.join(outdir, outdata))
assert run_polygraphy_run([outmodel, "--onnxrt", "--external-data-dir", outdir])
def test_force_fallback_shape_inference_will_override_model_shapes(self):
with util.NamedTemporaryFile() as outmodel:
status = run_polygraphy_surgeon(
[
"sanitize",
ONNX_MODELS["dynamic_identity"].path,
"-o",
outmodel.name,
"--force-fallback-shape-inference",
]
)
onnx_model_sanity_check(outmodel.name)
graph = gs.import_onnx(onnx.load(outmodel.name))
# Inputs should become fixed since fallback shape inference is being forced.
assert tuple(graph.inputs[0].shape) == (1, 2, 1, 1)
for tensor in graph.tensors().values():
assert tensor.shape is not None
assert tuple(graph.outputs[0].shape) == (1, 2, 1, 1)
assert not was_shape_inference_run(status)
| TensorRT-master | tools/Polygraphy/tests/tools/test_surgeon.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import subprocess as sp
import sys
import os
from polygraphy.logger import G_LOGGER
ROOT_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir, os.path.pardir))
BIN_DIR = os.path.join(ROOT_DIR, "bin")
polygraphy = os.path.join(BIN_DIR, "polygraphy")
def check_subprocess(status):
if status.returncode:
G_LOGGER.critical(status.stdout + status.stderr)
def run_polygraphy(additional_opts=[], *args, **kwargs):
cmd = [sys.executable, polygraphy] + additional_opts
print("Running command: {:}".format(" ".join(cmd)))
status = sp.run(cmd, stdout=sp.PIPE, stderr=sp.PIPE, *args, **kwargs)
status.stdout = status.stdout.decode()
status.stderr = status.stderr.decode()
print(status.stdout)
print(status.stderr)
check_subprocess(status)
return status
def run_subtool(subtool, additional_opts, disable_verbose=False, *args, **kwargs):
opts = [subtool]
opts += additional_opts
if not disable_verbose:
opts += ["-vvvvv"]
return run_polygraphy(opts, *args, **kwargs)
def run_polygraphy_run(additional_opts=[], disable_verbose=False, *args, **kwargs):
return run_subtool("run", additional_opts, disable_verbose, *args, **kwargs)
def run_polygraphy_convert(additional_opts=[], disable_verbose=False, *args, **kwargs):
return run_subtool("convert", additional_opts, disable_verbose, *args, **kwargs)
def run_polygraphy_inspect(additional_opts=[], disable_verbose=False, *args, **kwargs):
return run_subtool("inspect", additional_opts, disable_verbose, *args, **kwargs)
def run_polygraphy_precision(additional_opts=[], disable_verbose=False, *args, **kwargs):
return run_subtool("precision", additional_opts, disable_verbose, *args, **kwargs)
def run_polygraphy_surgeon(additional_opts=[], disable_verbose=False, *args, **kwargs):
return run_subtool("surgeon", additional_opts, disable_verbose, *args, **kwargs)
def run_polygraphy_template(additional_opts=[], disable_verbose=False, *args, **kwargs):
return run_subtool("template", additional_opts, disable_verbose, *args, **kwargs)
def run_polygraphy_debug(additional_opts=[], disable_verbose=False, *args, **kwargs):
return run_subtool("debug", additional_opts, disable_verbose, *args, **kwargs)
def run_polygraphy_data(additional_opts=[], disable_verbose=False, *args, **kwargs):
return run_subtool("data", additional_opts, disable_verbose, *args, **kwargs)
| TensorRT-master | tools/Polygraphy/tests/tools/common.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import glob
import os
import tempfile
from textwrap import dedent
import pytest
import tensorrt as trt
from polygraphy import mod, util
from tests.models.meta import ONNX_MODELS, TF_MODELS
from tests.tools.common import run_polygraphy_inspect, run_polygraphy_run
@pytest.fixture(scope="session", params=["none", "basic", "attrs", "full"])
def run_inspect_model(request):
yield lambda additional_opts: run_polygraphy_inspect(
["model"] + ["--mode={:}".format(request.param)] + additional_opts
)
@pytest.fixture(scope="session")
def identity_engine():
with util.NamedTemporaryFile() as outpath:
run_polygraphy_run([ONNX_MODELS["identity"].path, "--trt", "--save-engine", outpath.name])
yield outpath.name
def check_lines_match(actual, expected, should_check_line=lambda x: True):
print("Actual output:\n{:}".format(actual))
actual = [line for line in actual.splitlines() if "Loading" not in line]
expected = expected.splitlines()
assert len(actual) == len(expected)
for acline, exline in zip(actual, expected):
acline = acline.rstrip()
exline = exline.rstrip()
print("Checking line : {:}".format(acline))
print("Expecting line: {:}".format(exline))
if should_check_line(exline):
assert acline == exline
# ONNX cases
ONNX_CASES = [
[
"identity",
"none",
r"""
[I] ==== ONNX Model ====
Name: test_identity | Opset: 8
---- 1 Graph Input(s) ----
{x [dtype=float32, shape=(1, 1, 2, 2)]}
---- 1 Graph Output(s) ----
{y [dtype=float32, shape=(1, 1, 2, 2)]}
---- 0 Initializer(s) ----
---- 1 Node(s) ----
""",
],
[
"identity",
"basic",
r"""
[I] ==== ONNX Model ====
Name: test_identity | Opset: 8
---- 1 Graph Input(s) ----
{x [dtype=float32, shape=(1, 1, 2, 2)]}
---- 1 Graph Output(s) ----
{y [dtype=float32, shape=(1, 1, 2, 2)]}
---- 0 Initializer(s) ----
{}
---- 1 Node(s) ----
Node 0 | [Op: Identity]
{x [dtype=float32, shape=(1, 1, 2, 2)]}
-> {y [dtype=float32, shape=(1, 1, 2, 2)]}
""",
],
[
"identity_with_initializer",
"basic",
r"""
[I] ==== ONNX Model ====
Name: onnx_graphsurgeon | Opset: 11
---- 0 Graph Input(s) ----
{}
---- 1 Graph Output(s) ----
{Y [dtype=float32, shape=(2, 2)]}
---- 1 Initializer(s) ----
{X [dtype=float32, shape=(2, 2)]}
---- 1 Node(s) ----
Node 0 | [Op: Identity]
{Initializer | X [dtype=float32, shape=(2, 2)]}
-> {Y [dtype=float32, shape=(2, 2)]}
""",
],
[
"identity_with_initializer",
"full",
r"""
[I] ==== ONNX Model ====
Name: onnx_graphsurgeon | Opset: 11
---- 0 Graph Input(s) ----
{}
---- 1 Graph Output(s) ----
{Y [dtype=float32, shape=(2, 2)]}
---- 1 Initializer(s) ----
Initializer | X [dtype=float32, shape=[2, 2]] | Values:
[[1. 1.]
[1. 1.]]
---- 1 Node(s) ----
Node 0 | [Op: Identity]
{Initializer | X [dtype=float32, shape=(2, 2)]}
-> {Y [dtype=float32, shape=(2, 2)]}
""",
],
[
"tensor_attr",
"basic",
r"""
[I] ==== ONNX Model ====
Name: onnx_graphsurgeon | Opset: 11
---- 0 Graph Input(s) ----
{}
---- 1 Graph Output(s) ----
{const_out [dtype=float32, shape=(14, 14)]}
---- 0 Initializer(s) ----
{}
---- 1 Node(s) ----
Node 0 | [Op: Constant]
{} -> {const_out [dtype=float32, shape=(14, 14)]}
""",
],
[
"tensor_attr",
"attrs",
r"""
[I] ==== ONNX Model ====
Name: onnx_graphsurgeon | Opset: 11
---- 0 Graph Input(s) ----
{}
---- 1 Graph Output(s) ----
{const_out [dtype=float32, shape=(14, 14)]}
---- 0 Initializer(s) ----
{}
---- 1 Node(s) ----
Node 0 | [Op: Constant]
{} -> {const_out [dtype=float32, shape=(14, 14)]}
---- Attributes ----
value = Tensor: [dtype=float32, shape=[14, 14]]
""",
],
[
"tensor_attr",
"full",
r"""
[I] ==== ONNX Model ====
Name: onnx_graphsurgeon | Opset: 11
---- 0 Graph Input(s) ----
{}
---- 1 Graph Output(s) ----
{const_out [dtype=float32, shape=(14, 14)]}
---- 0 Initializer(s) ----
{}
---- 1 Node(s) ----
Node 0 | [Op: Constant]
{} -> {const_out [dtype=float32, shape=(14, 14)]}
---- Attributes ----
value = Tensor: [dtype=float32, shape=[14, 14]] | Values:
[[1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1.]
[1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1.]
[1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1.]
[1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1.]
[1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1.]
[1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1.]
[1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1.]
[1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1.]
[1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1.]
[1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1.]
[1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1.]
[1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1.]
[1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1.]
[1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1.]]
""",
],
[
"scan",
"full",
r"""
[I] ==== ONNX Model ====
Name: graph | Opset: 10
---- 2 Graph Input(s) ----
{initial [dtype=float32, shape=(2,)],
x [dtype=float32, shape=(3, 2)]}
---- 2 Graph Output(s) ----
{y [dtype=float32, shape=(2,)],
z [dtype=float32, shape=(3, 2)]}
---- 0 Initializer(s) ----
{}
---- 1 Node(s) ----
Node 0 | [Op: Scan]
{initial [dtype=float32, shape=(2,)],
x [dtype=float32, shape=(3, 2)]}
-> {y [dtype=float32, shape=(2,)],
z [dtype=float32, shape=(3, 2)]}
---- Attributes ----
body =
---- 2 Subgraph Input(s) ----
{sum_in [dtype=float32, shape=(2,)],
next [dtype=float32, shape=(2,)]}
---- 2 Subgraph Output(s) ----
{sum_out [dtype=float32, shape=(2,)],
scan_out [dtype=float32, shape=(2,)]}
---- 0 Initializer(s) ----
{}
---- 2 Node(s) ----
Node 0 | [Op: Add]
{sum_in [dtype=float32, shape=(2,)],
next [dtype=float32, shape=(2,)]}
-> {sum_out [dtype=float32, shape=(2,)]}
Node 1 | [Op: Identity]
{sum_out [dtype=float32, shape=(2,)]}
-> {scan_out [dtype=float32, shape=(2,)]}
num_scan_inputs = 1
""",
],
[
"dim_param",
"basic",
r"""
[I] ==== ONNX Model ====
Name: tf2onnx | Opset: 10
---- 1 Graph Input(s) ----
{Input:0 [dtype=float32, shape=('dim0', 16, 128)]}
---- 1 Graph Output(s) ----
{Output:0 [dtype=float32, shape=('dim0', 16, 128)]}
---- 0 Initializer(s) ----
{}
---- 1 Node(s) ----
Node 0 | [Op: Identity]
{Input:0 [dtype=float32, shape=('dim0', 16, 128)]}
-> {Output:0 [dtype=float32, shape=('dim0', 16, 128)]}
""",
],
]
# List[model, expected_files, expected_output]
TEST_CAPABILITY_CASES = [
(
"capability",
[
"results.txt",
"supported_subgraph-nodes-1-1.onnx",
"supported_subgraph-nodes-3-3.onnx",
"unsupported_subgraph-nodes-0-0.onnx",
"unsupported_subgraph-nodes-2-2.onnx",
"unsupported_subgraph-nodes-4-4.onnx",
],
"""
[I] ===== Summary =====
Operator | Count | Reason | Nodes
-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
FAKE! | 2 | In node 0 (importFallbackPluginImporter): UNSUPPORTED_NODE: Assertion failed: creator && "Plugin not found, are the plugin name, version, and namespace correct?" | [[0, 1], [2, 3]]
FAKER! | 1 | In node 0 (importFallbackPluginImporter): UNSUPPORTED_NODE: Assertion failed: creator && "Plugin not found, are the plugin name, version, and namespace correct?" | [[4, 5]]
""",
),
(
"identity_identity",
[],
"""
Graph is fully supported by TensorRT; Will not generate subgraphs.
""",
),
]
class TestCapability(object):
@pytest.mark.skipif(
mod.version(trt.__version__) < mod.version("8.0"), reason="supports_model API not available before TRT 8.0"
)
@pytest.mark.parametrize("case", TEST_CAPABILITY_CASES, ids=lambda case: case[0])
def test_capability(self, case):
model, expected_files, expected_summary = case
with tempfile.TemporaryDirectory() as outdir:
status = run_polygraphy_inspect(
["capability", ONNX_MODELS[model].path, "-o", os.path.join(outdir, "subgraphs")],
)
assert sorted(map(os.path.basename, glob.glob(os.path.join(outdir, "subgraphs", "**")))) == sorted(
expected_files
)
assert dedent(expected_summary).strip() in status.stdout
class TestInspectModel(object):
@pytest.mark.parametrize("case", ONNX_CASES, ids=lambda case: "{:}-{:}".format(case[0], case[1]))
def test_model_onnx(self, case):
model, mode, expected = case
status = run_polygraphy_inspect(
["model", ONNX_MODELS[model].path, "--mode={:}".format(mode)], disable_verbose=True
)
expected = dedent(expected).strip()
actual = "\n".join(status.stdout.splitlines()[1:]) # Ignore loading message
check_lines_match(actual, expected)
@pytest.mark.parametrize("model", ["identity", "scan", "tensor_attr"])
def test_model_trt_sanity(self, run_inspect_model, model):
import tensorrt as trt
if model == "tensor_attr" and mod.version(trt.__version__) < mod.version("7.2"):
pytest.skip("Models with constant outputs were not supported before 7.2")
if model == "scan" and mod.version(trt.__version__) < mod.version("7.0"):
pytest.skip("Scan was not supported until 7.0")
run_inspect_model([ONNX_MODELS[model].path, "--display-as=trt"])
def test_model_trt_network_script(self):
script = dedent(
"""
from polygraphy.backend.trt import CreateNetwork
from polygraphy import func
import tensorrt as trt
@func.extend(CreateNetwork())
def load_network(builder, network):
inp = network.add_input("input", dtype=trt.float32, shape=(1, 1))
out = network.add_identity(inp).get_output(0)
network.mark_output(out)
"""
)
with util.NamedTemporaryFile("w+", suffix=".py") as f:
f.write(script)
f.flush()
run_polygraphy_inspect(["model", f.name])
def test_model_trt_engine_sanity(self, run_inspect_model, identity_engine):
run_inspect_model([identity_engine, "--model-type=engine"])
def test_model_tf_sanity(self, run_inspect_model):
run_inspect_model([TF_MODELS["identity"].path, "--model-type=frozen"])
class TestInspectData(object):
@pytest.mark.parametrize("opts", [[], ["--show-values"]])
def test_outputs(self, opts):
with util.NamedTemporaryFile() as outpath:
run_polygraphy_run([ONNX_MODELS["identity"].path, "--onnxrt", "--save-outputs", outpath.name])
run_polygraphy_inspect(["data", outpath.name] + opts)
@pytest.mark.parametrize("opts", [[], ["--show-values"]])
def test_inputs(self, opts):
with util.NamedTemporaryFile() as outpath:
run_polygraphy_run([ONNX_MODELS["identity"].path, "--onnxrt", "--save-inputs", outpath.name])
run_polygraphy_inspect(["data", outpath.name] + opts)
TACTIC_REPLAY_CASES = [
[
"identity",
r"""
[I] Layer: node_of_y
Algorithm: (Implementation: -2147483642, Tactic: 0) | Inputs: (('TensorFormat.LINEAR', 'DataType.FLOAT'),) | Outputs: (('TensorFormat.LINEAR', 'DataType.FLOAT'),)
""",
],
]
@pytest.mark.skipif(mod.version(trt.__version__) < mod.version("8.0"), reason="Unsupported for TRT 7.2 and older")
class TestInspectTactics(object):
@pytest.mark.parametrize("case", TACTIC_REPLAY_CASES, ids=lambda case: case[0])
def test_show_tactics(self, case):
with util.NamedTemporaryFile() as replay:
model_name, expected = case
run_polygraphy_run([ONNX_MODELS[model_name].path, "--trt", "--save-tactics", replay.name])
status = run_polygraphy_inspect(["tactics", replay.name], disable_verbose=True)
expected = dedent(expected).strip()
actual = status.stdout
check_lines_match(actual, expected, should_check_line=lambda line: "Algorithm: " not in line)
| TensorRT-master | tools/Polygraphy/tests/tools/test_inspect.py |
#!/usr/bin/env python3
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
A totally fake checker script that lets us test the functionality
of `debug reduce`, which reduces ONNX models to minimal failing subgraphs,
by simulating failures in arbitrary nodes.
"""
import argparse
import sys
import onnx
def main():
parser = argparse.ArgumentParser(description="Makes Polygraphy think a node in a model is failing")
parser.add_argument("model", help="The ONNX model")
parser.add_argument(
"--fail-node",
help="The name(s) of the node(s) that 'fails'. "
"If multiple nodes are specified, they must all be present to cause a failure.",
required=True,
nargs="+",
)
parser.add_argument(
"--default-return-code", help="The return code to use when there are no failures. ", default=0, type=int
)
parser.add_argument(
"--fail-return-code", help="The return code to use when there is a failure. ", default=1, type=int
)
args = parser.parse_args()
model = onnx.load(args.model)
print(model)
node_names = [node.name for node in model.graph.node]
if all(fail_node in node_names for fail_node in args.fail_node):
# Alternate error messages to test --fail-regex
if "onnx_graphsurgeon_node_1" in node_names:
print("REALLY BAD!")
else:
print("FOUND A BAD NODE!")
return args.fail_return_code
return args.default_return_code
if __name__ == "__main__":
sys.exit(main())
| TensorRT-master | tools/Polygraphy/tests/tools/fake_reduce_checker.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
from polygraphy import util
from tests.models.meta import ONNX_MODELS
from tests.tools.common import run_polygraphy_data, run_polygraphy_run
class TestToInput(object):
def test_merge_inputs_outputs(self):
with util.NamedTemporaryFile() as inps, util.NamedTemporaryFile() as outs, util.NamedTemporaryFile() as merged:
run_polygraphy_run(
[ONNX_MODELS["identity"].path, "--onnxrt", "--save-inputs", inps.name, "--save-outputs", outs.name],
disable_verbose=True,
)
run_polygraphy_data(["to-input", inps.name, outs.name, "-o", merged.name])
merged_data = util.load_json(merged.name)
assert len(merged_data) == 1
assert list(merged_data[0].keys()) == ["x", "y"]
assert all(isinstance(val, np.ndarray) for val in merged_data[0].values())
| TensorRT-master | tools/Polygraphy/tests/tools/test_data.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import tensorrt as trt
from polygraphy import util
from polygraphy.backend.common import InvokeFromScript
from polygraphy.backend.trt import create_network
from tests.models.meta import ONNX_MODELS
from tests.tools.common import run_polygraphy_template
class TestTrtNetwork(object):
def test_no_model_file(self):
with util.NamedTemporaryFile("w+", suffix=".py") as template:
run_polygraphy_template(["trt-network", "-o", template.name])
load_network = InvokeFromScript(template.name, "load_network")
builder, network = load_network()
with builder, network:
assert isinstance(builder, trt.Builder)
assert isinstance(network, trt.INetworkDefinition)
def test_with_model_file(self):
with util.NamedTemporaryFile("w+", suffix=".py") as template:
run_polygraphy_template(["trt-network", ONNX_MODELS["identity"].path, "-o", template.name])
load_network = InvokeFromScript(template.name, "load_network")
builder, network, parser = load_network()
with builder, network, parser:
assert isinstance(builder, trt.Builder)
assert isinstance(network, trt.INetworkDefinition)
assert isinstance(parser, trt.OnnxParser)
class TestTrtConfig(object):
def test_no_opts(self):
with util.NamedTemporaryFile("w+", suffix=".py") as template:
run_polygraphy_template(["trt-config", "-o", template.name])
builder, network = create_network()
create_config = InvokeFromScript(template.name, "load_config")
with builder, network, create_config(builder, network) as config:
assert isinstance(config, trt.IBuilderConfig)
def test_opts_basic(self):
with util.NamedTemporaryFile("w+", suffix=".py") as template:
run_polygraphy_template(["trt-config", "--fp16", "--int8", "-o", template.name])
builder, network = create_network()
create_config = InvokeFromScript(template.name, "load_config")
with builder, network, create_config(builder, network) as config:
assert isinstance(config, trt.IBuilderConfig)
assert config.get_flag(trt.BuilderFlag.FP16)
assert config.get_flag(trt.BuilderFlag.INT8)
| TensorRT-master | tools/Polygraphy/tests/tools/test_template.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import pytest
from polygraphy.tools.args import ModelArgs
from tests.tools.args.helper import ArgGroupTestHelper
@pytest.fixture()
def group():
return ArgGroupTestHelper(ModelArgs())
class TestModelArgs(object):
def test_model_file(self, group):
group.parse_args([])
assert group.model_file is None
assert group.model_type is None
group.parse_args(["model.onnx"])
assert group.model_file == os.path.abspath("model.onnx")
assert group.model_type.is_onnx()
def test_input_shapes(self, group):
group.parse_args(["--input-shapes", "test0:[1,1]", "test1:[10]", "test:2:[25,301]", "test3:[]"])
assert group.input_shapes["test0"].shape == (1, 1)
assert group.input_shapes["test1"].shape == (10,)
assert group.input_shapes["test:2"].shape == (25, 301)
assert group.input_shapes["test3"].shape == tuple()
def test_fixed_model_type(self):
group = ArgGroupTestHelper(ModelArgs(model_type="onnx"))
group.parse_args(["model.pb"])
assert group.model_type.is_onnx()
| TensorRT-master | tools/Polygraphy/tests/tools/args/test_model.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from textwrap import dedent
import numpy as np
import pytest
from polygraphy import util
from polygraphy.common import TensorMetadata
from polygraphy.exception import PolygraphyException
from polygraphy.tools.args import DataLoaderArgs, ModelArgs
from tests.tools.args.helper import ArgGroupTestHelper
ARG_CASES = [
(["--seed=123"], ["seed"], [123]),
(["--int-min=23", "--int-max=94"], ["int_range"], [(23, 94)]),
(["--float-min=2.3", "--float-max=9.4"], ["float_range"], [(2.3, 9.4)]),
([], ["val_range"], [None], [(0.0, 1.0)]), # When not specified, this should default to None.
(["--val-range", "[0.0,2.3]"], ["val_range"], [{"": (0.0, 2.3)}]),
(["--val-range", "[1,5]"], ["val_range"], [{"": (1, 5)}]), # Should work for integral quantities
(["--val-range", "inp0:[0.0,2.3]", "inp1:[4.5,9.6]"], ["val_range"], [{"inp0": (0.0, 2.3), "inp1": (4.5, 9.6)}]),
(
["--val-range", "[-1,0]", "inp0:[0.0,2.3]", "inp1:[4.5,9.6]"],
["val_range"],
[{"": (-1, 0), "inp0": (0.0, 2.3), "inp1": (4.5, 9.6)}],
),
(["--val-range", "))):[0.0,2.3]"], ["val_range"], [{")))": (0.0, 2.3)}]),
(["--val-range", "'\"':[0.0,2.3]"], ["val_range"], [{"'\"'": (0.0, 2.3)}]),
(["--iterations=12"], ["iterations"], [12]),
]
class TestDataLoaderArgs(object):
@pytest.mark.parametrize("case", ARG_CASES, ids=lambda c: c[1][0])
def test_parsing(self, case):
arg_group = ArgGroupTestHelper(DataLoaderArgs())
cli_args, attrs, expected, expected_dl = util.unpack_args(case, 4)
expected_dl = expected_dl or expected
arg_group.parse_args(cli_args)
data_loader = arg_group.get_data_loader()
for attr, exp, exp_dl in zip(attrs, expected, expected_dl):
assert getattr(arg_group, attr) == exp
assert getattr(data_loader, attr) == exp_dl
def test_input_metadata(self):
arg_group = ArgGroupTestHelper(DataLoaderArgs(), deps=[ModelArgs()])
arg_group.parse_args(["--input-shapes", "test0:[1,1,1]", "test1:[2,32,2]"])
data_loader = arg_group.get_data_loader()
for feed_dict in data_loader:
assert feed_dict["test0"].shape == (1, 1, 1)
assert feed_dict["test1"].shape == (2, 32, 2)
def test_override_input_metadata(self):
arg_group = ArgGroupTestHelper(DataLoaderArgs(), deps=[ModelArgs()])
arg_group.parse_args([])
data_loader = arg_group.get_data_loader(
user_input_metadata=TensorMetadata().add("test0", dtype=np.float32, shape=(4, 4))
)
for feed_dict in data_loader:
assert feed_dict["test0"].shape == (4, 4)
def test_data_loader_script(self):
arg_group = ArgGroupTestHelper(DataLoaderArgs())
with util.NamedTemporaryFile("w+", suffix=".py") as f:
f.write(
dedent(
"""
import numpy as np
def my_load_data():
for _ in range(5):
yield {"inp": np.ones((3, 5), dtype=np.float32) * 6.4341}
"""
)
)
f.flush()
arg_group.parse_args(["--data-loader-script", f.name, "--data-loader-func-name=my_load_data"])
assert arg_group.data_loader_script == f.name
assert arg_group.data_loader_func_name == "my_load_data"
data_loader = arg_group.get_data_loader()
data = list(data_loader)
assert len(data) == 5
assert all(np.all(d["inp"] == np.ones((3, 5), dtype=np.float32) * 6.4341) for d in data)
@pytest.mark.parametrize(
"opts,expected_err",
[
(["--val-range", "x:[y,2]"], "could not be parsed as a number"),
(["--val-range", "x:[1,2,3]"], "expected to receive exactly 2 values, but received 3"),
],
)
def test_val_range_errors(self, opts, expected_err):
arg_group = ArgGroupTestHelper(DataLoaderArgs())
with pytest.raises(PolygraphyException, match=expected_err):
arg_group.parse_args(opts)
| TensorRT-master | tools/Polygraphy/tests/tools/args/test_data_loader.py |
TensorRT-master | tools/Polygraphy/tests/tools/args/__init__.py |
|
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import pytest
from polygraphy.exception import PolygraphyException
from polygraphy.tools.args import util as args_util
from polygraphy.tools.script import inline, safe
@pytest.mark.parametrize("name", ["input", "input:0"])
class TestParseMeta(object):
def test_parse_legacy(self, name): # Legacy argument format used comma.
meta_args = ["{:},1x3x224x224".format(name)]
meta = args_util.parse_meta(meta_args, includes_dtype=False)
assert meta[name].shape == (1, 3, 224, 224)
assert meta[name].dtype is None
def test_parse_shape_only(self, name):
meta_args = ["{name}:[1,3,224,224]".format(name=name)]
meta = args_util.parse_meta(meta_args, includes_dtype=False)
assert meta[name].shape == (1, 3, 224, 224)
assert meta[name].dtype is None
def test_parse_empty_shape(self, name):
meta_args = ["{name}:[0,3,0,224]".format(name=name)]
meta = args_util.parse_meta(meta_args, includes_dtype=False)
assert meta[name].shape == (0, 3, 0, 224)
assert meta[name].dtype is None
def test_parse_shape_scalar(self, name):
meta_args = ["{name}:[]".format(name=name)]
meta = args_util.parse_meta(meta_args, includes_dtype=False)
assert meta[name].shape == tuple()
def test_parse_shape_single_dim(self, name):
meta_args = ["{name}:[1]".format(name=name)]
meta = args_util.parse_meta(meta_args, includes_dtype=False)
assert meta[name].shape == (1,)
def test_parse_dtype_only(self, name):
meta_args = ["{name}:float32".format(name=name)]
meta = args_util.parse_meta(meta_args, includes_shape=False)
assert meta[name].shape is None
assert meta[name].dtype == np.float32
def test_parse_shape_dtype(self, name):
meta_args = ["{name}:[1,3,224,224]:float32".format(name=name)]
meta = args_util.parse_meta(meta_args)
assert meta[name].shape == (1, 3, 224, 224)
assert meta[name].dtype == np.float32
def test_parse_shape_dtype_auto(self, name):
meta_args = ["{name}:auto:auto".format(name=name)]
meta = args_util.parse_meta(meta_args)
assert meta[name].shape is None
assert meta[name].dtype is None
@pytest.mark.parametrize("quote", ['"', "'", ""])
def test_parse_shape_with_dim_param_quoted(self, name, quote):
meta_args = ["{name}:[{quote}batch{quote},3,224,224]".format(name=name, quote=quote)]
meta = args_util.parse_meta(meta_args, includes_dtype=False)
assert meta[name].shape == ("batch", 3, 224, 224)
class TestRunScript(object):
def test_default_args(self):
def script_add(script, arg0=0, arg1=0):
result_name = safe("result")
script.append_suffix(safe("{:} = {:} + {:}", inline(result_name), arg0, arg1))
return result_name
assert args_util.run_script(script_add) == 0
assert args_util.run_script(script_add, 1) == 1
assert args_util.run_script(script_add, 1, 2) == 3
class TestParseNumBytes(object):
def test_none(self):
assert args_util.parse_num_bytes(None) is None
@pytest.mark.parametrize(
"arg, expected",
[
("16", 16),
("1e9", 1e9),
("2M", 2 << 20),
("2.3m", int(2.3 * (1 << 20))),
("4.3K", int(4.3 * (1 << 10))),
("7k", 7 << 10),
("1G", 1 << 30),
("2.5g", int(2.5 * (1 << 30))),
],
)
def test_num_bytes(self, arg, expected):
assert args_util.parse_num_bytes(arg) == expected
@pytest.mark.parametrize("arg", ["hi", "4.5x", "2.3.4"])
def test_negative(self, arg):
with pytest.raises(
PolygraphyException,
match="Could not convert {:} to a number of bytes".format(arg),
):
args_util.parse_num_bytes(arg)
| TensorRT-master | tools/Polygraphy/tests/tools/args/test_util.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from polygraphy import util
import argparse
class ArgGroupTestHelper(object):
def __init__(self, arg_group, deps=None):
self.deps = util.default(deps, [])
self.arg_group = arg_group
self.parser = argparse.ArgumentParser()
for dep in self.deps:
for other_dep in self.deps:
other_dep.register(dep)
self.arg_group.register(dep)
dep.register(self.arg_group)
self.arg_group.check_registered()
for dep in self.deps:
dep.add_to_parser(self.parser)
self.arg_group.add_to_parser(self.parser)
def parse_args(self, cli_args):
args = self.parser.parse_args(cli_args)
for dep in self.deps:
dep.parse(args)
self.arg_group.parse(args)
return args
def __getattr__(self, name):
if name in ["arg_group", "parser"]:
return super().__getattr__(name)
return getattr(self.arg_group, name)
| TensorRT-master | tools/Polygraphy/tests/tools/args/helper.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
from polygraphy.exception import PolygraphyException
from polygraphy.tools.args import ComparatorCompareArgs
from tests.tools.args.helper import ArgGroupTestHelper
class TestComparatorCompare(object):
@pytest.mark.parametrize("check_error_stat", ["max", "median", "mean", "elemwise"])
def test_error_stat(self, check_error_stat):
arg_group = ArgGroupTestHelper(ComparatorCompareArgs())
arg_group.parse_args(["--check-error-stat={:}".format(check_error_stat)])
assert arg_group.check_error_stat == {"": check_error_stat}
@pytest.mark.parametrize(
"args, expected",
[
(["mean", "output0:median", "output1:max"], {"": "mean", "output0": "median", "output1": "max"}),
(["output0:median", "output1:elemwise"], {"output0": "median", "output1": "elemwise"}),
],
)
def test_error_stat_per_output(self, args, expected):
arg_group = ArgGroupTestHelper(ComparatorCompareArgs())
arg_group.parse_args(["--check-error-stat"] + args)
assert arg_group.check_error_stat == expected
@pytest.mark.parametrize(
"args",
[
["not-a-stat"],
["output0:fake"],
],
)
def test_invalid_error_stat(self, args):
with pytest.raises(PolygraphyException, match="Invalid choice"):
arg_group = ArgGroupTestHelper(ComparatorCompareArgs())
arg_group.parse_args(["--check-error-stat"] + args)
| TensorRT-master | tools/Polygraphy/tests/tools/args/test_comparator.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import tempfile
import pytest
from polygraphy.logger import G_LOGGER
from polygraphy.tools.args import LoggerArgs
from tests.tools.args.helper import ArgGroupTestHelper
VERBOSITY_CASES = {
"--silent": G_LOGGER.CRITICAL,
"-qqqqq": G_LOGGER.CRITICAL,
"-qqqq": G_LOGGER.ERROR,
"-qqq": G_LOGGER.WARNING,
"-qq": G_LOGGER.FINISH,
"-q": G_LOGGER.START,
"-v": G_LOGGER.VERBOSE,
"-vv": G_LOGGER.EXTRA_VERBOSE,
"-vvv": G_LOGGER.SUPER_VERBOSE,
"-vvvv": G_LOGGER.ULTRA_VERBOSE,
}
class TestLoggerArgs(object):
@pytest.mark.parametrize("case", VERBOSITY_CASES.items())
def test_get_logger_verbosities(self, case):
arg_group = ArgGroupTestHelper(LoggerArgs())
flag, sev = case
arg_group.parse_args([flag])
logger = arg_group.get_logger()
assert logger.severity == sev
def test_logger_log_file(self):
arg_group = ArgGroupTestHelper(LoggerArgs())
with tempfile.TemporaryDirectory() as dirname:
log_path = os.path.join(dirname, "fake_log_file.log")
arg_group.parse_args(["--log-file", log_path])
logger = arg_group.get_logger()
assert logger.log_file == log_path
| TensorRT-master | tools/Polygraphy/tests/tools/args/test_logger.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import glob
import os
import tempfile
import pytest
from polygraphy import util
from polygraphy.backend.onnx import onnx_from_path
from polygraphy.tools.args import DataLoaderArgs, ModelArgs, OnnxLoaderArgs, OnnxSaveArgs, OnnxShapeInferenceArgs
from polygraphy.tools.script import Script
from tests.helper import is_file_empty, is_file_non_empty
from tests.models.meta import ONNX_MODELS
from tests.tools.args.helper import ArgGroupTestHelper
def _check_ext_weights_model(model):
assert len(model.graph.node) == 3
for init in model.graph.initializer:
assert init
class TestOnnxLoaderArgs(object):
def test_basic(self):
arg_group = ArgGroupTestHelper(OnnxLoaderArgs(), deps=[ModelArgs()])
arg_group.parse_args([ONNX_MODELS["identity_identity"].path, "--onnx-outputs=identity_out_0"])
model = arg_group.load_onnx()
assert len(model.graph.output) == 1
assert model.graph.output[0].name == "identity_out_0"
def test_external_data(self):
arg_group = ArgGroupTestHelper(OnnxLoaderArgs(), deps=[ModelArgs()])
model = ONNX_MODELS["ext_weights"]
arg_group.parse_args([model.path, "--external-data-dir", model.ext_data])
model = arg_group.load_onnx()
_check_ext_weights_model(model)
def test_shape_inference(self):
# When using shape inference, we should load directly from the path
arg_group = ArgGroupTestHelper(OnnxLoaderArgs(), deps=[ModelArgs(), OnnxShapeInferenceArgs()])
model = ONNX_MODELS["identity"]
arg_group.parse_args([model.path, "--shape-inference"])
assert arg_group.should_use_onnx_loader()
script = Script()
arg_group.add_onnx_loader(script)
expected_loader = "InferShapes({:})".format(repr(model.path))
assert expected_loader in str(script)
def test_shape_inference_ext_data(self):
arg_group = ArgGroupTestHelper(OnnxLoaderArgs(), deps=[ModelArgs(), OnnxShapeInferenceArgs()])
model = ONNX_MODELS["ext_weights"]
arg_group.parse_args([model.path, "--external-data-dir", model.ext_data, "--shape-inference"])
assert arg_group.should_use_onnx_loader()
script = Script()
arg_group.add_onnx_loader(script)
expected_loader = "InferShapes({:}, external_data_dir={:})".format(repr(model.path), repr(model.ext_data))
assert expected_loader in str(script)
model = arg_group.load_onnx()
_check_ext_weights_model(model)
class TestOnnxSaveArgs(object):
def test_defaults(self):
arg_group = ArgGroupTestHelper(OnnxSaveArgs(), deps=[ModelArgs(), OnnxLoaderArgs()])
arg_group.parse_args([])
assert arg_group.size_threshold is None
def test_external_data(self):
model = onnx_from_path(ONNX_MODELS["const_foldable"].path)
arg_group = ArgGroupTestHelper(OnnxSaveArgs(), deps=[ModelArgs(), OnnxLoaderArgs()])
with util.NamedTemporaryFile() as path, util.NamedTemporaryFile() as data:
arg_group.parse_args(
["-o", path.name, "--save-external-data", data.name, "--external-data-size-threshold=0"]
)
arg_group.save_onnx(model)
assert is_file_non_empty(path.name)
assert is_file_non_empty(data.name)
def test_size_threshold(self):
model = onnx_from_path(ONNX_MODELS["const_foldable"].path)
arg_group = ArgGroupTestHelper(OnnxSaveArgs(), deps=[ModelArgs(), OnnxLoaderArgs()])
with util.NamedTemporaryFile() as path, util.NamedTemporaryFile() as data:
arg_group.parse_args(
["-o", path.name, "--save-external-data", data.name, "--external-data-size-threshold=1024"]
)
arg_group.save_onnx(model)
assert is_file_non_empty(path.name)
assert is_file_empty(data.name)
def test_no_all_tensors_to_one_file(self):
model = onnx_from_path(ONNX_MODELS["const_foldable"].path)
arg_group = ArgGroupTestHelper(OnnxSaveArgs(), deps=[ModelArgs(), OnnxLoaderArgs()])
with tempfile.TemporaryDirectory() as outdir:
path = os.path.join(outdir, "model.onnx")
arg_group.parse_args(
[
"-o",
path,
"--save-external-data",
"--external-data-size-threshold=0",
"--no-save-all-tensors-to-one-file",
]
)
arg_group.save_onnx(model)
assert is_file_non_empty(path)
outfiles = glob.glob(os.path.join(outdir, "*"))
assert len(outfiles) == 4
@pytest.mark.parametrize(
"arg, expected",
[
("16", 16),
("1e9", 1e9),
("2M", 2 << 20),
],
)
def test_size_threshold_parsing(self, arg, expected):
arg_group = ArgGroupTestHelper(OnnxSaveArgs(), deps=[ModelArgs(), OnnxLoaderArgs()])
arg_group.parse_args(["--external-data-size-threshold", arg])
assert arg_group.size_threshold == expected
class TestOnnxShapeInferenceArgs(object):
def test_shape_inference_disabled_on_fallback(self):
arg_group = ArgGroupTestHelper(
OnnxShapeInferenceArgs(default=True, enable_force_fallback=True), deps=[DataLoaderArgs()]
)
arg_group.parse_args([])
assert arg_group.do_shape_inference
arg_group.parse_args(["--force-fallback-shape-inference"])
assert not arg_group.do_shape_inference
| TensorRT-master | tools/Polygraphy/tests/tools/args/onnx/test_loader.py |
TensorRT-master | tools/Polygraphy/tests/tools/args/onnx/__init__.py |
|
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
import tensorrt as trt
from polygraphy import util
from polygraphy.backend.trt import create_network, engine_bytes_from_network, network_from_onnx_path
from polygraphy.tools.args import (
ModelArgs,
OnnxLoaderArgs,
TrtConfigArgs,
TrtEngineLoaderArgs,
TrtNetworkLoaderArgs,
TrtPluginLoaderArgs,
)
from tests.models.meta import ONNX_MODELS
from tests.tools.args.helper import ArgGroupTestHelper
class TestTrtNetworkLoaderArgs(object):
def test_load_network(self):
arg_group = ArgGroupTestHelper(
TrtNetworkLoaderArgs(), deps=[ModelArgs(), OnnxLoaderArgs(), TrtPluginLoaderArgs()]
)
arg_group.parse_args([ONNX_MODELS["identity_identity"].path, "--trt-outputs=identity_out_0"])
builder, network, parser = arg_group.load_network()
with builder, network:
assert network.num_outputs == 1
assert network.get_output(0).name == "identity_out_0"
@pytest.fixture()
def engine_loader_args():
return ArgGroupTestHelper(
TrtEngineLoaderArgs(),
deps=[ModelArgs(), OnnxLoaderArgs(), TrtConfigArgs(), TrtPluginLoaderArgs(), TrtNetworkLoaderArgs()],
)
class TestTrtEngineLoaderArgs(object):
def test_build_engine(self, engine_loader_args):
engine_loader_args.parse_args([ONNX_MODELS["identity_identity"].path, "--trt-outputs=identity_out_0"])
with engine_loader_args.build_engine() as engine:
assert isinstance(engine, trt.ICudaEngine)
assert len(engine) == 2
assert engine[1] == "identity_out_0"
def test_build_engine_custom_network(self, engine_loader_args):
engine_loader_args.parse_args([])
builder, network = create_network()
inp = network.add_input("input", dtype=trt.float32, shape=(1, 1))
out = network.add_identity(inp).get_output(0)
out.name = "output"
network.mark_output(out)
with builder, network, engine_loader_args.build_engine(network=(builder, network)) as engine:
assert isinstance(engine, trt.ICudaEngine)
assert len(engine) == 2
assert engine[0] == "input"
assert engine[1] == "output"
def test_load_serialized_engine(self, engine_loader_args):
with util.NamedTemporaryFile() as f, engine_bytes_from_network(
network_from_onnx_path(ONNX_MODELS["identity"].path)
) as engine_bytes:
f.write(engine_bytes)
f.flush()
engine_loader_args.parse_args([f.name, "--model-type=engine"])
with engine_loader_args.load_serialized_engine() as engine:
assert isinstance(engine, trt.ICudaEngine)
assert len(engine) == 2
assert engine[0] == "x"
assert engine[1] == "y"
| TensorRT-master | tools/Polygraphy/tests/tools/args/trt/test_loader.py |
TensorRT-master | tools/Polygraphy/tests/tools/args/trt/__init__.py |
|
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from textwrap import dedent
import pytest
import tensorrt as trt
from polygraphy import mod, util
from polygraphy.backend.trt import TacticRecorder, TacticReplayData, TacticReplayer, create_network
from polygraphy.exception import PolygraphyException
from polygraphy.tools.args import DataLoaderArgs, ModelArgs, TrtConfigArgs
from tests.tools.args.helper import ArgGroupTestHelper
@pytest.fixture()
def trt_config_args():
return ArgGroupTestHelper(TrtConfigArgs(), deps=[ModelArgs(), DataLoaderArgs()])
class TestTrtConfigArgs(object):
def test_defaults(self, trt_config_args):
trt_config_args.parse_args([])
assert trt_config_args.workspace is None
def test_create_config(self, trt_config_args):
trt_config_args.parse_args([])
builder, network = create_network()
with builder, network, trt_config_args.create_config(builder, network=network) as config:
assert isinstance(config, trt.IBuilderConfig)
@pytest.mark.parametrize(
"arg, flag",
[
("--int8", "INT8"),
("--fp16", "FP16"),
("--tf32", "TF32"),
("--allow-gpu-fallback", "GPU_FALLBACK"),
],
)
def test_precision_flags(self, trt_config_args, arg, flag):
if flag == "TF32" and mod.version(trt.__version__) < mod.version("7.1"):
pytest.skip("TF32 support was added in 7.1")
trt_config_args.parse_args([arg])
builder, network = create_network()
with builder, network, trt_config_args.create_config(builder, network=network) as config:
assert config.get_flag(getattr(trt.BuilderFlag, flag))
@pytest.mark.parametrize(
"workspace, expected",
[
("16", 16),
("1e9", 1e9),
("2M", 2 << 20),
],
)
def test_workspace(self, trt_config_args, workspace, expected):
trt_config_args.parse_args(["--workspace", workspace])
assert trt_config_args.workspace == expected
builder, network = create_network()
with builder, network, trt_config_args.create_config(builder, network=network) as config:
assert config.max_workspace_size == expected
def test_dla(self, trt_config_args):
trt_config_args.parse_args(["--use-dla"])
assert trt_config_args.use_dla
builder, network = create_network()
with builder, network, trt_config_args.create_config(builder, network=network) as config:
assert config.default_device_type == trt.DeviceType.DLA
assert config.DLA_core == 0
@pytest.mark.skipif(mod.version(trt.__version__) < mod.version("8.0"), reason="SAFETY_SCOPE was added in TRT 8")
def test_restricted_flags(self, trt_config_args):
trt_config_args.parse_args(["--trt-safety-restricted"])
builder, network = create_network()
with builder, network, trt_config_args.create_config(builder, network=network) as config:
assert config.get_flag(getattr(trt.BuilderFlag, "SAFETY_SCOPE"))
@pytest.mark.skipif(mod.version(trt.__version__) < mod.version("8.0"), reason="Bugged before TRT 8")
def test_tactic_replay(self, trt_config_args):
with util.NamedTemporaryFile(suffix=".json") as f:
trt_config_args.parse_args(["--tactic-replay", f.name])
builder, network = create_network()
with builder, network, trt_config_args.create_config(builder, network=network) as config:
recorder = config.algorithm_selector
assert recorder.make_func == TacticRecorder
assert recorder.path == f.name
@pytest.mark.skipif(mod.version(trt.__version__) < mod.version("8.0"), reason="Bugged before TRT 8")
@pytest.mark.parametrize(
"opt, cls",
[
("--save-tactics", TacticRecorder),
("--load-tactics", TacticReplayer),
],
)
def test_tactics(self, trt_config_args, opt, cls):
with util.NamedTemporaryFile("w+", suffix=".json") as f:
if opt == "--load-tactics":
TacticReplayData().save(f)
trt_config_args.parse_args([opt, f.name])
builder, network = create_network()
with builder, network, trt_config_args.create_config(builder, network=network) as config:
recorder = config.algorithm_selector
assert recorder.make_func == cls
assert recorder.path == f.name
if mod.version(trt.__version__) < mod.version("8.0"):
TACTIC_SOURCES_CASES = [
([], 3), # By default, all sources are enabled.
(["--tactic-sources"], 0),
(["--tactic-sources", "CUBLAS"], 1),
(["--tactic-sources", "CUBLAS_LT"], 2),
(["--tactic-sources", "CUblAS", "cublas_lt"], 3), # Not case sensitive
]
else:
TACTIC_SOURCES_CASES = [
([], 7), # By default, all sources are enabled.
(["--tactic-sources"], 0),
(["--tactic-sources", "CUBLAS"], 1),
(["--tactic-sources", "CUBLAS_LT"], 2),
(["--tactic-sources", "CUDNN"], 4),
(["--tactic-sources", "CUblAS", "cublas_lt"], 3), # Not case sensitive
(["--tactic-sources", "CUBLAS", "cuDNN"], 5),
(["--tactic-sources", "CUBLAS_LT", "CUDNN"], 6),
(["--tactic-sources", "CUDNN", "cuBLAS", "CUBLAS_LT"], 7),
]
@pytest.mark.skipif(mod.version(trt.__version__) < mod.version("7.2"), reason="Not available before 7.2")
@pytest.mark.parametrize("opt, expected", TACTIC_SOURCES_CASES)
def test_tactic_sources(self, trt_config_args, opt, expected):
trt_config_args.parse_args(opt)
builder, network = create_network()
with builder, network, trt_config_args.create_config(builder, network=network) as config:
assert config.get_tactic_sources() == expected
@pytest.mark.parametrize("base_class", ["IInt8LegacyCalibrator", "IInt8EntropyCalibrator2"])
def test_calibration_base_class(self, trt_config_args, base_class):
trt_config_args.parse_args(["--int8", "--calibration-base-class", base_class])
assert trt_config_args.calibration_base_class.unwrap() == "trt.{:}".format(base_class)
builder, network = create_network()
with builder, network, trt_config_args.create_config(builder, network=network) as config:
assert isinstance(config.int8_calibrator, getattr(trt, base_class))
@pytest.mark.skipif(mod.version(trt.__version__) < mod.version("7.0"), reason="Unsupported for TRT 6")
def test_legacy_calibrator_params(self, trt_config_args):
quantile = 0.25
regression_cutoff = 0.9
trt_config_args.parse_args(
[
"--int8",
"--calibration-base-class=IInt8LegacyCalibrator",
"--quantile",
str(quantile),
"--regression-cutoff",
str(regression_cutoff),
]
)
assert trt_config_args.quantile == quantile
assert trt_config_args.regression_cutoff == regression_cutoff
builder, network = create_network()
with builder, network, trt_config_args.create_config(builder, network=network) as config:
assert config.int8_calibrator.get_quantile() == quantile
assert config.int8_calibrator.get_regression_cutoff() == regression_cutoff
def test_no_deps_profiles_int8(self):
arg_group = ArgGroupTestHelper(TrtConfigArgs())
arg_group.parse_args(
[
"--trt-min-shapes=input:[1,25,25]",
"--trt-opt-shapes=input:[2,25,25]",
"--trt-max-shapes=input:[4,25,25]",
"--int8",
]
)
for (min_shapes, opt_shapes, max_shapes) in arg_group.profile_dicts:
assert min_shapes["input"] == [1, 25, 25]
assert opt_shapes["input"] == [2, 25, 25]
assert max_shapes["input"] == [4, 25, 25]
builder, network = create_network()
with builder, network, arg_group.create_config(builder, network=network) as config:
assert isinstance(config, trt.IBuilderConfig)
# Unfortunately there is no API to check the contents of the profile in a config.
# The checks above will have to do.
assert config.num_optimization_profiles == 1
assert config.get_flag(trt.BuilderFlag.INT8)
def test_config_script(self):
arg_group = ArgGroupTestHelper(TrtConfigArgs())
with util.NamedTemporaryFile("w+", suffix=".py") as f:
f.write(
dedent(
"""
from polygraphy.backend.trt import CreateConfig
from polygraphy import func
import tensorrt as trt
@func.extend(CreateConfig())
def my_load_config(config):
config.set_flag(trt.BuilderFlag.FP16)
"""
)
)
f.flush()
arg_group.parse_args(["--trt-config-script", f.name, "--trt-config-func-name=my_load_config"])
assert arg_group.trt_config_script == f.name
assert arg_group.trt_config_func_name == "my_load_config"
builder, network = create_network()
with builder, network, arg_group.create_config(builder, network) as config:
assert isinstance(config, trt.IBuilderConfig)
assert config.get_flag(trt.BuilderFlag.FP16)
@pytest.mark.parametrize(
"args",
[
["--int8", "--calibration-base-class", "IInt8LegacyCalibrator'"],
["--int8", "--calibration-base-class", 'IInt8LegacyCalibrator"'],
["--int8", "--calibration-base-class", "IInt8LegacyCalibrator)"],
["--int8", "--calibration-base-class", "IInt8LegacyCalibrator}"],
["--int8", "--calibration-base-class", "IInt8LegacyCalibrator]"],
["--int8", "--calibration-base-class", "IInt8LegacyCalibrator));print(('hi'"],
["--int8", "--calibration-base-class", "IInt8LegacyCalibrator;print(('hi')"],
["--int8", "--calibration-base-class", "IInt8LegacyCalibrator';print('hi')"],
["--tactic-sources", "CUBLAS, fp16=True"],
],
)
def test_code_injection_checks(self, trt_config_args, args):
with pytest.raises(PolygraphyException):
trt_config_args.parse_args(args)
| TensorRT-master | tools/Polygraphy/tests/tools/args/trt/test_config.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import tensorflow as tf
from polygraphy.tools.args import ModelArgs, TfLoaderArgs
from tests.models.meta import TF_MODELS
from tests.tools.args.helper import ArgGroupTestHelper
class TestTfLoaderArgs(object):
def test_load_graph(self):
arg_group = ArgGroupTestHelper(TfLoaderArgs(), deps=[ModelArgs()])
arg_group.parse_args([TF_MODELS["identity"].path, "--model-type=frozen"])
graph, outputs = arg_group.load_graph()
assert isinstance(graph, tf.Graph)
assert outputs == ["Identity_2:0"]
| TensorRT-master | tools/Polygraphy/tests/tools/args/tf/test_loader.py |
TensorRT-master | tools/Polygraphy/tests/tools/args/tf/__init__.py |
|
TensorRT-master | tools/Polygraphy/tests/logger/__init__.py |
|
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from polygraphy import util
from polygraphy.logger.logger import Logger
# We don't use the global logger here because we would have to reset the state each time.
class TestLogger(object):
def test_log_file(self):
logger = Logger()
with util.NamedTemporaryFile("w+") as log_file:
logger.log_file = log_file.name
assert logger.log_file == log_file.name
logger.info("Hello")
log_file.seek(0)
assert log_file.read() == "[I] Hello\n"
| TensorRT-master | tools/Polygraphy/tests/logger/test_logger.py |
TensorRT-master | tools/Polygraphy/tests/util/__init__.py |
|
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import tempfile
import random
import numpy as np
import pytest
from polygraphy import util
VOLUME_CASES = [
((1, 1, 1), 1),
((2, 3, 4), 24),
(tuple(), 1),
]
@pytest.mark.parametrize("case", VOLUME_CASES)
def test_volume(case):
it, vol = case
assert util.volume(it) == vol
class FindInDictCase(object):
def __init__(self, name, map, index, expected):
self.name = name
self.map = map
self.index = index
self.expected = expected
FIND_IN_DICT_CASES = [
FindInDictCase(
"resnet50_v1.5/output/Softmax:0",
map={"resnet50_v1.5/output/Softmax:0": "x"},
index=None,
expected="resnet50_v1.5/output/Softmax:0",
),
FindInDictCase(
"resnet50_v1.5/output/Softmax:0",
map={"resnet50_v1.5/output/softmax:0": "x"},
index=None,
expected="resnet50_v1.5/output/softmax:0",
),
]
@pytest.mark.parametrize("case", FIND_IN_DICT_CASES)
def test_find_in_dict(case):
actual = util.find_in_dict(case.name, case.map, case.index)
assert actual == case.expected
SHAPE_OVERRIDE_CASES = [
((1, 3, 224, 224), (None, 3, 224, 224), True),
]
@pytest.mark.parametrize("case", SHAPE_OVERRIDE_CASES)
def test_is_valid_shape_override(case):
override, shape, expected = case
assert util.is_valid_shape_override(new_shape=override, original_shape=shape) == expected
def arange(shape):
return np.arange(util.volume(shape)).reshape(shape)
SHAPE_MATCHING_CASES = [
(arange((1, 1, 3, 3)), (3, 3), arange((3, 3))), # Squeeze array shape
(
arange((1, 3, 3, 1)),
(1, 1, 3, 3),
arange((1, 1, 3, 3)),
), # Permutation should make no difference as other dimensions are 1s
(arange((3, 3)), (1, 1, 3, 3), arange((1, 1, 3, 3))), # Unsqueeze where needed
(arange((3, 3)), (-1, 3), arange((3, 3))), # Infer dynamic
(arange((3 * 2 * 2,)), (None, 3, 2, 2), arange((1, 3, 2, 2))), # Reshape with inferred dimension
(arange((1, 3, 2, 2)), (None, 2, 2, 3), np.transpose(arange((1, 3, 2, 2)), [0, 2, 3, 1])), # Permute
]
@pytest.mark.parametrize("arr, shape, expected", SHAPE_MATCHING_CASES)
def test_shape_matching(arr, shape, expected):
arr = util.try_match_shape(arr, shape)
assert np.array_equal(arr, expected)
UNPACK_ARGS_CASES = [
((0, 1, 2), 3, (0, 1, 2)), # no extras
((0, 1, 2), 4, (0, 1, 2, None)), # 1 extra
((0, 1, 2), 2, (0, 1)), # 1 fewer
]
@pytest.mark.parametrize("case", UNPACK_ARGS_CASES)
def test_unpack_args(case):
args, num, expected = case
assert util.unpack_args(args, num) == expected
UNIQUE_LIST_CASES = [
([], []),
([3, 1, 2], [3, 1, 2]),
([1, 2, 3, 2, 1], [1, 2, 3]),
([0, 0, 0, 0, 1, 0, 0], [0, 1]),
([5, 5, 5, 5, 5], [5]),
]
@pytest.mark.parametrize("case", UNIQUE_LIST_CASES)
def test_unique_list(case):
lst, expected = case
assert util.unique_list(lst) == expected
def test_find_in_dirs():
with tempfile.TemporaryDirectory() as topdir:
dirs = list(map(lambda x: os.path.join(topdir, x), ["test0", "test1", "test2", "test3", "test4"]))
for subdir in dirs:
os.makedirs(subdir)
path_dir = random.choice(dirs)
path = os.path.join(path_dir, "cudart64_11.dll")
with open(path, "w") as f:
f.write("This file should be found by find_in_dirs")
assert util.find_in_dirs("cudart64_*.dll", dirs) == [path]
@pytest.mark.parametrize(
"val,key,default,expected",
[
(1.0, None, None, 1.0), # Basic
({"inp": "hi"}, "inp", "", "hi"), # Per-key
({"inp": "hi"}, "out", "default", "default"), # Per-key missing
({"inp": 1.0, "": 2.0}, "out", 1.5, 2.0), # Per-key with default
],
)
def test_value_or_from_dict(val, key, default, expected):
actual = util.value_or_from_dict(val, key, default)
assert actual == expected
| TensorRT-master | tools/Polygraphy/tests/util/test_util.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import pytest
import tensorrt as trt
from polygraphy import constants, util
from polygraphy.backend.trt import Algorithm, TacticReplayData
from polygraphy.comparator import IterationResult, RunResults
from polygraphy.exception import PolygraphyException
from polygraphy.json import Decoder, Encoder, from_json, load_json, to_json
class Dummy(object):
def __init__(self, x):
self.x = x
@Encoder.register(Dummy)
def encode(dummy):
return {"x": dummy.x}
@Decoder.register(Dummy)
def decode(dct):
assert len(dct) == 1 # Custom type markers should be removed at this point
return Dummy(x=dct["x"])
class TestEncoder(object):
def test_registered(self):
d = Dummy(x=-1)
d_json = to_json(d)
assert encode(d) == {"x": d.x, constants.TYPE_MARKER: "Dummy"}
expected = '{{\n "x": {:},\n "{:}": "Dummy"\n}}'.format(d.x, constants.TYPE_MARKER)
assert d_json == expected
class TestDecoder(object):
def test_object_pairs_hook(self):
d = Dummy(x=-1)
d_json = to_json(d)
new_d = from_json(d_json)
assert new_d.x == d.x
def make_algo():
return Algorithm(
implementation=4,
tactic=5,
inputs=[(trt.TensorFormat.LINEAR, trt.float32)],
outputs=[(trt.TensorFormat.LINEAR, trt.float32)],
)
def make_iter_result():
return IterationResult(
runtime=4.5,
runner_name="test",
outputs={
"out0": np.random.random_sample((1, 2, 1)),
"out1": np.ones((1, 2), dtype=np.float32),
},
)
JSONABLE_CASES = [
RunResults([("runner0", [make_iter_result()]), ("runner0", [make_iter_result()])]),
TacticReplayData().add("hi", algorithm=make_algo()),
]
class TestImplementations(object):
@pytest.mark.parametrize(
"obj",
[
Algorithm(
implementation=4,
tactic=5,
inputs=[(trt.TensorFormat.LINEAR, trt.float32)],
outputs=[(trt.TensorFormat.LINEAR, trt.float32)],
),
Algorithm(
implementation=4,
tactic=5,
inputs=[(trt.TensorFormat.LINEAR, trt.float32), (trt.TensorFormat.CHW32, trt.int8)],
outputs=[(trt.TensorFormat.CHW32, trt.float16)],
),
np.ones((3, 4, 5), dtype=np.int64),
np.ones(5, dtype=np.int64),
np.zeros((4, 5), dtype=np.float32),
np.random.random_sample((3, 5)),
make_iter_result(),
RunResults([("runner0", [make_iter_result()]), ("runner0", [make_iter_result()])]),
],
ids=lambda x: type(x),
)
def test_serde(self, obj):
encoded = to_json(obj)
decoded = from_json(encoded)
if isinstance(obj, np.ndarray):
assert np.array_equal(decoded, obj)
else:
assert decoded == obj
@pytest.mark.parametrize("obj", JSONABLE_CASES)
def test_to_from_json(self, obj):
encoded = obj.to_json()
decoded = type(obj).from_json(encoded)
assert decoded == obj
@pytest.mark.parametrize("obj", JSONABLE_CASES)
def test_save_load(self, obj):
with util.NamedTemporaryFile("w+") as f:
obj.save(f)
decoded = type(obj).load(f)
assert decoded == obj
def test_cannot_save_load_to_different_types(self):
run_result = JSONABLE_CASES[0]
encoded = run_result.to_json()
with pytest.raises(PolygraphyException, match="JSON cannot be decoded into"):
TacticReplayData.from_json(encoded)
def test_load_json_errors_if_file_nonexistent():
with pytest.raises(FileNotFoundError, match="No such file"):
load_json("polygraphy-nonexistent-path")
| TensorRT-master | tools/Polygraphy/tests/util/test_serde.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from polygraphy.logger import G_LOGGER
from polygraphy.util.format import FormatManager, DataFormat
import pytest
class FormatTestCase:
def __init__(self, shape, format):
self.shape = shape
self.format = format
EXPECTED_FORMATS = [
FormatTestCase((1, 3, 480, 960), DataFormat.NCHW),
FormatTestCase((1, 3, 224, 224), DataFormat.NCHW),
FormatTestCase((1, 224, 224, 3), DataFormat.NHWC),
FormatTestCase((1, 9, 9, 3), DataFormat.NHWC),
]
@pytest.mark.parametrize("test_case", EXPECTED_FORMATS)
def test_format_deduction(test_case):
assert test_case.format == FormatManager.determine_format(test_case.shape)
| TensorRT-master | tools/Polygraphy/tests/util/test_format.py |
TensorRT-master | tools/Polygraphy/tests/cuda/__init__.py |
|
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import pytest
import tensorrt as trt
from polygraphy import mod, util
from polygraphy.cuda import DeviceArray, Stream, DeviceView, wrapper, MemcpyKind
from tests.helper import time_func
class TestDeviceView(object):
def test_basic(self):
with DeviceArray(shape=(1, 4, 2), dtype=np.float32) as arr:
v = DeviceView(arr.ptr, arr.shape, arr.dtype)
assert v.ptr == arr.ptr
assert v.shape == arr.shape
assert v.dtype == arr.dtype
assert v.nbytes == arr.nbytes
def test_with_int_ptr(self):
ptr = 74892
v = DeviceView(ptr=ptr, shape=(1,), dtype=np.float32)
assert v.ptr == ptr
def test_copy_to(self):
with DeviceArray((2, 2), dtype=np.float32) as arr:
arr.copy_from(np.ones((2, 2), dtype=np.float32) * 4)
v = DeviceView(arr.ptr, arr.shape, arr.dtype)
host_buf = np.zeros((2, 2), dtype=np.float32)
v.copy_to(host_buf)
assert np.all(host_buf == 4)
def test_numpy(self):
with DeviceArray((2, 2), dtype=np.float32) as arr:
arr.copy_from(np.ones((2, 2), dtype=np.float32) * 4)
v = DeviceView(arr.ptr, arr.shape, arr.dtype)
assert np.all(v.numpy() == 4)
class ResizeTestCase(object):
# *_bytes is the size of the allocated buffer, old/new are the apparent shapes of the buffer.
def __init__(self, old, old_size, new, new_size):
self.old = old
self.old_bytes = old_size * np.float32().itemsize
self.new = new
self.new_bytes = new_size * np.float32().itemsize
RESIZES = [
ResizeTestCase(tuple(), 1, (1, 1, 1), 1), # Reshape (no-op)
ResizeTestCase((2, 2, 2), 8, (1, 1), 8), # Resize to smaller buffer
ResizeTestCase((2, 2, 2), 8, (9, 9), 81), # Resize to larger buffer
]
class TestDeviceBuffer(object):
@pytest.mark.parametrize("shapes", RESIZES)
def test_device_buffer_resize(self, shapes):
with DeviceArray(shapes.old) as buf:
assert buf.allocated_nbytes == shapes.old_bytes
assert buf.shape == shapes.old
buf.resize(shapes.new)
assert buf.allocated_nbytes == shapes.new_bytes
assert buf.shape == shapes.new
@pytest.mark.serial # Sometimes the GPU may run out of memory if too many other tests are also running.
@pytest.mark.skipif(mod.version(trt.__version__) < mod.version("7.0"), reason="Breaks TRT 6 tests for some reason")
def test_large_allocation(self):
dtype = np.byte
# See if we can alloc 3GB (bigger than value of signed int)
shape = (3 * 1024 * 1024 * 1024,)
with DeviceArray(shape=shape, dtype=dtype) as buf:
assert buf.allocated_nbytes == util.volume(shape) * np.dtype(dtype).itemsize
def test_device_buffer_memcpy_async(self):
arr = np.ones((1, 384), dtype=np.int32)
with DeviceArray() as buf, Stream() as stream:
buf.copy_from(arr)
new_arr = np.empty((1, 384), dtype=np.int32)
buf.copy_to(new_arr, stream)
stream.synchronize()
assert np.all(new_arr == arr)
def test_device_buffer_memcpy_sync(self):
arr = np.ones((1, 384), dtype=np.int32)
with DeviceArray() as buf:
buf.copy_from(arr)
new_arr = np.empty((1, 384), dtype=np.int32)
buf.copy_to(new_arr)
assert np.all(new_arr == arr)
def test_device_buffer_free(self):
buf = DeviceArray(shape=(64, 64), dtype=np.float32)
assert buf.allocated_nbytes == 64 * 64 * np.float32().itemsize
buf.free()
assert buf.allocated_nbytes == 0
assert buf.shape == tuple()
def test_empty_tensor_to_host(self):
with DeviceArray(shape=(5, 2, 0, 3, 0), dtype=np.float32) as buf:
assert util.volume(buf.shape) == 0
host_buf = np.empty(tuple(), dtype=np.float32)
assert util.volume(host_buf.shape) == 1
host_buf = buf.copy_to(host_buf)
assert host_buf.shape == buf.shape
assert host_buf.nbytes == 0
assert util.volume(host_buf.shape) == 0
@pytest.mark.serial
def test_copy_from_overhead(self):
host_buf = np.ones(shape=(1, 2, 1024, 1024), dtype=np.float32)
with DeviceArray(shape=host_buf.shape, dtype=host_buf.dtype) as dev_buf:
memcpy_time = time_func(
lambda: wrapper().memcpy(
dst=dev_buf.ptr,
src=host_buf.ctypes.data,
nbytes=host_buf.nbytes,
kind=MemcpyKind.HostToDevice,
)
)
copy_from_time = time_func(lambda: dev_buf.copy_from(host_buf))
print("memcpy time: {:}, copy_from time: {:}".format(memcpy_time, copy_from_time))
assert copy_from_time <= (memcpy_time * 1.02)
@pytest.mark.serial
def test_copy_to_overhead(self):
host_buf = np.ones(shape=(1, 2, 1024, 1024), dtype=np.float32)
with DeviceArray(shape=host_buf.shape, dtype=host_buf.dtype) as dev_buf:
memcpy_time = time_func(
lambda: wrapper().memcpy(
dst=host_buf.ctypes.data,
src=dev_buf.ptr,
nbytes=host_buf.nbytes,
kind=MemcpyKind.DeviceToHost,
)
)
copy_to_time = time_func(lambda: dev_buf.copy_to(host_buf))
print("memcpy time: {:}, copy_to time: {:}".format(memcpy_time, copy_to_time))
assert copy_to_time <= (memcpy_time * 1.04)
| TensorRT-master | tools/Polygraphy/tests/cuda/test_cuda.py |
TensorRT-master | tools/Polygraphy/tests/backend/__init__.py |
|
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from polygraphy.backend.trt_legacy import TrtLegacyRunner, LoadNetworkFromUff, ConvertToUff, ParseNetworkFromOnnxLegacy
from tests.models.meta import TF_MODELS, ONNX_MODELS
import numpy as np
def test_uff_identity():
model = TF_MODELS["identity"]
loader = model.loader
with TrtLegacyRunner(LoadNetworkFromUff(ConvertToUff(loader))) as runner:
assert runner.is_active
feed_dict = {"Input": np.random.random_sample(size=(1, 15, 25, 30)).astype(np.float32)}
outputs = runner.infer(feed_dict)
assert np.all(outputs["Identity_2"] == feed_dict["Input"])
assert not runner.is_active
def test_can_construct_onnx_loader():
model = ONNX_MODELS["identity"].path
loader = ParseNetworkFromOnnxLegacy(model)
| TensorRT-master | tools/Polygraphy/tests/backend/test_tensorrt_legacy.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import tempfile
import numpy as np
import onnx_graphsurgeon as gs
import pytest
from polygraphy import constants, util
from polygraphy.backend.onnx import (
ConvertToFp16,
FoldConstants,
ModifyOutputs,
OnnxFromPath,
OnnxFromTfGraph,
SaveOnnx,
extract_subgraph,
gs_from_onnx,
infer_shapes,
onnx_from_path,
)
from polygraphy.common import TensorMetadata
from polygraphy.logger import G_LOGGER
from tests.helper import is_file_non_empty
from tests.models.meta import ONNX_MODELS, TF_MODELS
import onnx
class TestLoggerCallbacks(object):
@pytest.mark.parametrize("sev", G_LOGGER.SEVERITY_LETTER_MAPPING.keys())
def test_set_severity(self, sev):
G_LOGGER.severity = sev
class TestOnnxFromPath(object):
def test_basic(self):
loader = OnnxFromPath(ONNX_MODELS["identity"].path)
assert isinstance(loader(), onnx.ModelProto)
def test_external_data(self):
model = ONNX_MODELS["ext_weights"]
loader = OnnxFromPath(model.path, model.ext_data)
assert isinstance(loader(), onnx.ModelProto)
class TestGsFromOnnx(object):
def test_basic(self):
graph = gs_from_onnx(OnnxFromPath(ONNX_MODELS["identity"].path))
assert isinstance(graph, gs.Graph)
class TestExportOnnxFromTf(object):
def test_no_optimize(self):
loader = OnnxFromTfGraph(TF_MODELS["identity"].loader, optimize=False, fold_constant=False)
model = loader()
def test_opset(self):
loader = OnnxFromTfGraph(TF_MODELS["identity"].loader, opset=9)
model = loader()
assert model.opset_import[0].version == 9
class TestModifyOnnx(object):
@pytest.mark.parametrize("copy", [True, False])
def test_layerwise(self, copy):
original_model = onnx_from_path(ONNX_MODELS["identity_identity"].path)
loader = ModifyOutputs(original_model, outputs=constants.MARK_ALL, copy=copy)
model = loader()
assert len(original_model.graph.output) == 1 or not copy
assert len(model.graph.output) == 2
def test_custom_outputs(self):
loader = ModifyOutputs(OnnxFromPath(ONNX_MODELS["identity_identity"].path), outputs=["identity_out_0"])
model = loader()
assert len(model.graph.output) == 1
assert model.graph.output[0].name == "identity_out_0"
def test_exclude_outputs_with_layerwise(self):
loader = ModifyOutputs(
OnnxFromPath(ONNX_MODELS["identity_identity"].path),
outputs=constants.MARK_ALL,
exclude_outputs=["identity_out_2"],
)
model = loader()
assert len(model.graph.output) == 1
assert model.graph.output[0].name == "identity_out_0"
class TestInferShapes(object):
def check_model(self, model):
# Find all intermediate tensors to check if they have shapes.
tensors = set()
for node in model.graph.node:
tensors.update(node.output)
tensors -= {out.name for out in model.graph.output}
assert len(model.graph.value_info) == len(tensors)
for val in model.graph.value_info:
assert val.type.tensor_type.HasField("shape")
def test_model(self):
original_model = onnx_from_path(ONNX_MODELS["identity_identity"].path)
model = infer_shapes(original_model)
self.check_model(model)
def test_path(self):
model = infer_shapes(ONNX_MODELS["identity_identity"].path)
self.check_model(model)
@pytest.mark.parametrize("set_data_dir", [True, False])
def test_external_data(self, set_data_dir):
model = ONNX_MODELS["ext_weights_same_dir"]
model = infer_shapes(model.path, external_data_dir=model.ext_data if set_data_dir else None)
self.check_model(model)
def test_save_to_disk_on_size_threshold(self):
model = onnx_from_path(ONNX_MODELS["const_foldable"].path)
model = infer_shapes(model, save_to_disk_threshold_bytes=0)
self.check_model(model)
class TestConvertToFp16:
@pytest.mark.parametrize("copy", [True, False])
def test_basic(self, copy):
original_model = onnx_from_path(ONNX_MODELS["identity_identity"].path)
loader = ConvertToFp16(original_model, copy=copy)
model = loader()
assert original_model.graph.input[0].type.tensor_type.elem_type == 1 or not copy
assert model.graph.value_info[0].type.tensor_type.elem_type == 10
class TestFoldConstants:
@pytest.mark.parametrize("fold_shapes", [True, False])
@pytest.mark.parametrize("partitioning", [None, "basic", "recursive"])
@pytest.mark.parametrize("copy", [True, False])
def test_basic(self, partitioning, fold_shapes, copy):
original_model = onnx_from_path(ONNX_MODELS["const_foldable"].path)
loader = FoldConstants(
original_model, partitioning=partitioning, fold_shapes=fold_shapes, copy=copy, error_ok=False
)
model = loader()
assert len(original_model.graph.node) != 1 or not copy
assert len(model.graph.node) == 1
class TestSaveOnnx(object):
def test_save_onnx(self):
with tempfile.TemporaryDirectory() as outdir:
outpath = os.path.join(outdir, "test", "nested")
loader = SaveOnnx(OnnxFromPath(ONNX_MODELS["identity"].path), path=outpath)
loader()
assert is_file_non_empty(outpath)
def test_external_data(self):
with util.NamedTemporaryFile() as path, util.NamedTemporaryFile() as data:
model = OnnxFromPath(ONNX_MODELS["const_foldable"].path)
loader = SaveOnnx(model, path.name, external_data_path=data.name, size_threshold=0)
loader()
assert is_file_non_empty(path.name)
assert is_file_non_empty(data.name)
@pytest.fixture()
def extract_model():
input_metadata = TensorMetadata().add("X", dtype=np.float32, shape=(64, 64))
output_metadata = TensorMetadata().add("identity_out_0", dtype=np.float32, shape=None)
return onnx_from_path(ONNX_MODELS["identity_identity"].path), input_metadata, output_metadata
class TestExtractSubgraph(object):
def check_model(self, model):
graph = gs_from_onnx(model)
assert len(graph.nodes) == 1
assert len(graph.inputs) == 1
assert graph.inputs[0].name == "X"
assert graph.inputs[0].shape is not None
assert graph.inputs[0].dtype is not None
assert len(graph.outputs) == 1
assert graph.outputs[0].name == "identity_out_0"
assert graph.outputs[0].dtype is not None
def test_extract_onnx_model(self, extract_model):
original_model, input_meta, output_meta = extract_model
model = extract_subgraph(original_model, input_meta, output_meta)
assert original_model.graph.output[0].name == "identity_out_2"
self.check_model(model)
def test_extract_onnx_model_no_input_meta(self, extract_model):
model, _, output_meta = extract_model
model = extract_subgraph(model, output_metadata=output_meta)
self.check_model(model)
def test_extract_onnx_model_no_output_meta(self, extract_model):
model, input_meta, _ = extract_model
model = extract_subgraph(model, input_metadata=input_meta)
assert model.graph.output[0].name == "identity_out_2"
def test_extract_onnx_gs_graph(self, extract_model):
model, input_meta, output_meta = extract_model
graph = gs_from_onnx(model)
subgraph = extract_subgraph(graph, input_meta, output_meta)
# Make sure original graph isn't modified.
assert len(graph.nodes) == 2
assert isinstance(subgraph, gs.Graph)
assert len(subgraph.nodes) == 1
assert len(subgraph.inputs) == 1
assert subgraph.inputs[0].name == "X"
assert len(subgraph.outputs) == 1
assert subgraph.outputs[0].name == "identity_out_0"
def test_extract_passes_no_input_shape(self, extract_model):
model, input_meta, output_meta = extract_model
input_meta["X"].shape = None
model = extract_subgraph(model, input_meta, output_meta)
self.check_model(model)
def test_extract_passes_no_input_dtype(self, extract_model):
model, input_meta, output_meta = extract_model
input_meta["X"].dtype = None
model = extract_subgraph(model, input_meta, output_meta)
self.check_model(model)
def test_extract_passes_no_output_shape(self, extract_model):
model, input_meta, output_meta = extract_model
output_meta["identity_out_0"].shape = None
model = extract_subgraph(model, input_meta, output_meta)
self.check_model(model)
| TensorRT-master | tools/Polygraphy/tests/backend/onnx/test_loader.py |
TensorRT-master | tools/Polygraphy/tests/backend/onnx/__init__.py |
|
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from polygraphy.backend.onnx import onnx_from_path
from polygraphy.backend.onnx import util as onnx_util
from tests.models.meta import ONNX_MODELS
def test_get_num_nodes():
model = onnx_from_path(ONNX_MODELS["scan"].path)
assert onnx_util.get_num_nodes(model) == 3 # Should count subgraph nodes.
| TensorRT-master | tools/Polygraphy/tests/backend/onnx/test_util.py |
Subsets and Splits