python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import namedtuple
import pytest
import tensorrt as trt
from polygraphy import mod, util
from polygraphy.backend.trt import Algorithm, TacticRecorder, TacticReplayData, TacticReplayer
from polygraphy.exception import PolygraphyException
ALGO_EQ_CASES = [
(
Algorithm(
6, 1, inputs=[(trt.TensorFormat.LINEAR, trt.float32)], outputs=[(trt.TensorFormat.LINEAR, trt.float32)]
),
Algorithm(
6, 1, inputs=[(trt.TensorFormat.LINEAR, trt.float32)], outputs=[(trt.TensorFormat.LINEAR, trt.float32)]
),
True,
), # Same
(
Algorithm(
6, 1, inputs=[(trt.TensorFormat.LINEAR, trt.float32)], outputs=[(trt.TensorFormat.LINEAR, trt.float32)]
),
Algorithm(
7, 1, inputs=[(trt.TensorFormat.LINEAR, trt.float32)], outputs=[(trt.TensorFormat.LINEAR, trt.float32)]
),
False,
), # Different implementation
(
Algorithm(
6, 2, inputs=[(trt.TensorFormat.LINEAR, trt.float32)], outputs=[(trt.TensorFormat.LINEAR, trt.float32)]
),
Algorithm(
6, 1, inputs=[(trt.TensorFormat.LINEAR, trt.float32)], outputs=[(trt.TensorFormat.LINEAR, trt.float32)]
),
False,
), # Different tactic
(
Algorithm(
6, 1, inputs=[(trt.TensorFormat.LINEAR, trt.float32)], outputs=[(trt.TensorFormat.LINEAR, trt.float32)]
),
Algorithm(
6, 1, inputs=[(trt.TensorFormat.CHW32, trt.float32)], outputs=[(trt.TensorFormat.LINEAR, trt.float32)]
),
False,
), # Different input format
(
Algorithm(
6, 1, inputs=[(trt.TensorFormat.LINEAR, trt.float32)], outputs=[(trt.TensorFormat.LINEAR, trt.float32)]
),
Algorithm(6, 1, inputs=[(trt.TensorFormat.LINEAR, trt.int8)], outputs=[(trt.TensorFormat.LINEAR, trt.float32)]),
False,
), # Different input data type
(
Algorithm(
6, 1, inputs=[(trt.TensorFormat.LINEAR, trt.float32)], outputs=[(trt.TensorFormat.LINEAR, trt.float32)]
),
Algorithm(
6, 1, inputs=[(trt.TensorFormat.LINEAR, trt.float32)], outputs=[(trt.TensorFormat.CHW32, trt.float32)]
),
False,
), # Different output format
(
Algorithm(
6, 1, inputs=[(trt.TensorFormat.LINEAR, trt.float32)], outputs=[(trt.TensorFormat.LINEAR, trt.float32)]
),
Algorithm(6, 1, inputs=[(trt.TensorFormat.LINEAR, trt.float32)], outputs=[(trt.TensorFormat.LINEAR, trt.int8)]),
False,
), # Different output data type
(
Algorithm(
6, 1, inputs=[(trt.TensorFormat.LINEAR, trt.float32)] * 2, outputs=[(trt.TensorFormat.LINEAR, trt.float32)]
),
Algorithm(
6, 1, inputs=[(trt.TensorFormat.LINEAR, trt.float32)], outputs=[(trt.TensorFormat.LINEAR, trt.float32)]
),
False,
), # Different number of inputs
(
Algorithm(
6, 1, inputs=[(trt.TensorFormat.LINEAR, trt.float32)], outputs=[(trt.TensorFormat.LINEAR, trt.float32)] * 2
),
Algorithm(
6, 1, inputs=[(trt.TensorFormat.LINEAR, trt.float32)], outputs=[(trt.TensorFormat.LINEAR, trt.float32)]
),
False,
), # Different number of outputs
]
@pytest.mark.skipif(mod.version(trt.__version__) < mod.version("8.0"), reason="Unsupported for TRT 7.2 and older")
class TestAlgorithm(object):
@pytest.mark.parametrize("left, right, expected", ALGO_EQ_CASES)
def test_equality(self, left, right, expected):
assert (left == right) == expected
FakeAlgorithmContext = namedtuple("FakeAlgorithmContext", ["name", "num_inputs", "num_outputs"])
FakeAlgorithm = namedtuple("FakeAlgorithm", ["algorithm_variant", "io_info"])
FakeAlgorithm.get_algorithm_io_info = lambda this, index: this.io_info[index]
FakeAlgorithmVariant = namedtuple("FakeAlgorithmVariant", ["implementation", "tactic"])
FakeAlgorithmIOInfo = namedtuple("FakeAlgorithmIOInfo", ["tensor_format", "dtype"])
def fake_context(name):
return FakeAlgorithmContext(name=name, num_inputs=1, num_outputs=1)
def fake_algo(implementation=6, tactic=0, io=None):
io_info = [FakeAlgorithmIOInfo(tensor_format=trt.TensorFormat.LINEAR, dtype=trt.float32)] * 2
if io:
io_info = []
for fmt, dtype in io:
io_info.append(FakeAlgorithmIOInfo(tensor_format=fmt, dtype=dtype))
trt_algo = FakeAlgorithm(algorithm_variant=FakeAlgorithmVariant(implementation, tactic), io_info=io_info)
return trt_algo
@pytest.fixture(params=[True, False], ids=["path", "object"])
def replay(request):
"""
Returns:
Tuple[FakeAlgorithmContext, Algorithm, FakeAlgorithm,
Union[str, TacticReplayData], Union[str, TacticReplayData]]:
This fixture returns 5 things:
1. A fake TensorRT algorithm context
2. A Polygraphy Algorithm instance
3. A fake TensorRT algorithm (with the same information as (2))
4. An input tactic replay data, populated with the Polygraphy Algorithm (2), either
as a ``TacticReplayData`` instance, or a path.
5. An output tactic replay data, empty, either as a ``TacticReplayData`` instance, or
a path.
"""
jsonify = request.param
name = "node_of_y"
context = fake_context(name)
trt_algo = fake_algo()
poly_algo = Algorithm.from_trt(context, trt_algo)
in_replay_data = TacticReplayData().add(name, poly_algo)
out_replay_data = TacticReplayData()
if jsonify:
inpath = util.NamedTemporaryFile("w")
in_replay_data.save(inpath.name)
in_replay_data = inpath.name
outpath = util.NamedTemporaryFile("r")
out_replay_data = outpath.name
yield context, poly_algo, trt_algo, in_replay_data, out_replay_data
@pytest.mark.skipif(mod.version(trt.__version__) < mod.version("8.0"), reason="Unsupported for TRT 7.2 and older")
class TestReplayer(object):
def test_basic(self, replay):
context, _, algo, replay_data, _ = replay
replayer = TacticReplayer(replay_data)
selected = replayer.select_algorithms(context, [fake_algo(implementation=2), algo, fake_algo(tactic=1)])
assert selected == [1]
def test_new_layer_falls_back(self, replay):
_, _, _, replay_data, _ = replay
replayer = TacticReplayer(replay_data)
selected = replayer.select_algorithms(
fake_context(name="new_layer"), [fake_algo(2, 1), fake_algo(3, 4), fake_algo(5, 6)]
)
assert selected == [0, 1, 2]
def test_missing_algo_fails(self, replay):
context, _, _, replay_data, _ = replay
replayer = TacticReplayer(replay_data)
with pytest.raises(PolygraphyException, match="was not provided by TensorRT as a choice"):
assert replayer.select_algorithms(context, [fake_algo(2, 1)]) == [0]
@pytest.mark.parametrize(
"algo",
[
fake_algo(2),
fake_algo(tactic=2),
fake_algo(io=[(trt.TensorFormat.CHW32, trt.float32), (trt.TensorFormat.LINEAR, trt.float32)]),
fake_algo(io=[(trt.TensorFormat.LINEAR, trt.int8), (trt.TensorFormat.LINEAR, trt.float32)]),
fake_algo(io=[(trt.TensorFormat.LINEAR, trt.float32), (trt.TensorFormat.CHW32, trt.float32)]),
fake_algo(io=[(trt.TensorFormat.LINEAR, trt.float32), (trt.TensorFormat.LINEAR, trt.int32)]),
],
)
def test_different_algo_fails(self, replay, algo):
context, _, _, replay_data, _ = replay
replayer = TacticReplayer(replay_data)
with pytest.raises(PolygraphyException, match="was not provided by TensorRT as a choice"):
assert replayer.select_algorithms(context, [algo]) == [0]
def test_fails_if_wrong_selected(self, replay):
context, _, _, replay_data, _ = replay
replayer = TacticReplayer(replay_data)
# We should be able to check tactics even if we're not recording them.
with pytest.raises(PolygraphyException, match="TensorRT selected a tactic different"):
replayer.report_algorithms([context], [fake_algo(implementation=9)])
@pytest.mark.skipif(mod.version(trt.__version__) < mod.version("8.0"), reason="Unsupported for TRT 7.2 and older")
class TestRecorder(object):
def test_basic(self, replay):
context, poly_algo, algo, _, replay_data = replay
assert isinstance(replay_data, str) or not replay_data
replayer = TacticRecorder(replay_data)
replayer.report_algorithms([context], [algo])
assert len(replayer.data) == 1
assert replayer.data[context.name] == poly_algo
| TensorRT-master | tools/Polygraphy/tests/backend/trt/test_algorithm_selector.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import contextlib
import sys
import pytest
import tensorrt as trt
from polygraphy import constants, mod, util
from polygraphy.backend.trt import (
Calibrator,
CreateConfig,
EngineBytesFromNetwork,
EngineFromBytes,
EngineFromNetwork,
LoadPlugins,
ModifyNetworkOutputs,
NetworkFromOnnxBytes,
Profile,
SaveEngine,
bytes_from_engine,
engine_from_network,
modify_network_outputs,
network_from_onnx_bytes,
network_from_onnx_path,
onnx_like_from_network,
)
from polygraphy.comparator import DataLoader
from tests.helper import get_file_size, is_file_non_empty
from tests.models.meta import ONNX_MODELS
##
## Fixtures
##
@pytest.fixture(scope="session")
def identity_engine():
network_loader = NetworkFromOnnxBytes(ONNX_MODELS["identity"].loader)
engine_loader = EngineFromNetwork(network_loader, CreateConfig())
with engine_loader() as engine:
yield engine
@pytest.fixture(scope="session")
def identity_builder_network():
builder, network, parser = network_from_onnx_bytes(ONNX_MODELS["identity"].loader)
with builder, network, parser:
yield builder, network
@pytest.fixture(scope="session")
def identity_network():
builder, network, parser = network_from_onnx_bytes(ONNX_MODELS["identity"].loader)
with builder, network, parser:
yield builder, network, parser
@pytest.fixture(scope="session")
def identity_identity_network():
builder, network, parser = network_from_onnx_bytes(ONNX_MODELS["identity_identity"].loader)
with builder, network, parser:
yield builder, network, parser
@pytest.fixture(scope="session")
def reshape_network():
builder, network, parser = network_from_onnx_bytes(ONNX_MODELS["reshape"].loader)
with builder, network, parser:
yield builder, network, parser
@pytest.fixture(scope="session")
def modifiable_network():
# Must return a loader since the network will be modified each time it's loaded.
return NetworkFromOnnxBytes(ONNX_MODELS["identity_identity"].loader)
@pytest.fixture(scope="session")
def modifiable_reshape_network():
# Must return a loader since the network will be modified each time it's loaded.
return NetworkFromOnnxBytes(ONNX_MODELS["reshape"].loader)
##
## Tests
##
class TestLoadPlugins(object):
def test_can_load_libnvinfer_plugins(self):
def get_plugin_names():
return [pc.name for pc in trt.get_plugin_registry().plugin_creator_list]
loader = LoadPlugins(
plugins=["nvinfer_plugin.dll" if sys.platform.startswith("win") else "libnvinfer_plugin.so"]
)
loader()
assert get_plugin_names()
class TestSerializedEngineLoader(object):
def test_serialized_engine_loader_from_lambda(self, identity_engine):
with util.NamedTemporaryFile() as outpath:
with open(outpath.name, "wb") as f, identity_engine.serialize() as buffer:
f.write(buffer)
loader = EngineFromBytes(lambda: open(outpath.name, "rb").read())
with loader() as engine:
assert isinstance(engine, trt.ICudaEngine)
def test_serialized_engine_loader_from_buffer(self, identity_engine):
with identity_engine.serialize() as buffer:
loader = EngineFromBytes(buffer)
with loader() as engine:
assert isinstance(engine, trt.ICudaEngine)
class TestOnnxNetworkLoader(object):
def test_loader(self):
builder, network, parser = network_from_onnx_bytes(ONNX_MODELS["identity"].loader)
with builder, network, parser:
assert not network.has_implicit_batch_dimension
assert not network.has_explicit_precision
def test_loader_explicit_precision(self):
builder, network, parser = network_from_onnx_bytes(ONNX_MODELS["identity"].loader, explicit_precision=True)
with builder, network, parser:
assert not network.has_implicit_batch_dimension
if mod.version(trt.__version__) < mod.version("8.0"):
assert network.has_explicit_precision
@pytest.mark.skipif(mod.version(trt.__version__) < mod.version("7.1.0.0"), reason="API was added in TRT 7.1")
class TestNetworkFromOnnxPath(object):
def test_loader(self):
builder, network, parser = network_from_onnx_path(ONNX_MODELS["identity"].path)
with builder, network, parser:
assert not network.has_implicit_batch_dimension
assert not network.has_explicit_precision
def test_loader_explicit_precision(self):
builder, network, parser = network_from_onnx_path(ONNX_MODELS["identity"].path, explicit_precision=True)
with builder, network, parser:
assert not network.has_implicit_batch_dimension
if mod.version(trt.__version__) < mod.version("8.0"):
assert network.has_explicit_precision
class TestModifyNetwork(object):
def test_mark_layerwise(self, modifiable_network):
load_network = ModifyNetworkOutputs(modifiable_network, outputs=constants.MARK_ALL)
builder, network, parser = load_network()
with builder, network, parser:
for layer in network:
for index in range(layer.num_outputs):
assert layer.get_output(index).is_network_output
def test_mark_custom_outputs(self, modifiable_network):
builder, network, parser = modify_network_outputs(modifiable_network, outputs=["identity_out_0"])
with builder, network, parser:
assert network.num_outputs == 1
assert network.get_output(0).name == "identity_out_0"
def test_exclude_outputs_with_mark_layerwise(self, modifiable_network):
builder, network, parser = modify_network_outputs(
modifiable_network, outputs=constants.MARK_ALL, exclude_outputs=["identity_out_2"]
)
with builder, network, parser:
assert network.num_outputs == 1
assert network.get_output(0).name == "identity_out_0"
@pytest.mark.skipif(mod.version(trt.__version__) < mod.version("7.0"), reason="Unsupported for TRT 6")
def test_mark_shape_outputs(self, modifiable_reshape_network):
builder, network, parser = modify_network_outputs(
modifiable_reshape_network, outputs=["output", "reduce_prod_out_gs_2"]
)
with builder, network, parser:
assert network.num_outputs == 2
assert network.get_output(0).name == "reduce_prod_out_gs_2"
assert network.get_output(0).is_shape_tensor
@pytest.mark.skipif(mod.version(trt.__version__) < mod.version("7.0"), reason="Unsupported for TRT 6")
def test_unmark_shape_outputs(self, modifiable_reshape_network):
builder, network, parser = modify_network_outputs(
modifiable_reshape_network, outputs=constants.MARK_ALL, exclude_outputs=["reduce_prod_out_gs_2"]
)
with builder, network, parser:
assert network.num_outputs == 1
class TestConfigLoader(object):
def test_defaults(self, identity_builder_network):
builder, network = identity_builder_network
loader = CreateConfig()
assert loader.timing_cache_path is None
with loader(builder, network) as config:
assert config.max_workspace_size == 1 << 24
with contextlib.suppress(AttributeError):
assert not config.get_flag(trt.BuilderFlag.TF32)
with contextlib.suppress(AttributeError):
assert not config.get_flag(trt.BuilderFlag.SPARSE_WEIGHTS)
assert not config.get_flag(trt.BuilderFlag.FP16)
assert not config.get_flag(trt.BuilderFlag.INT8)
assert config.num_optimization_profiles == 1
assert config.int8_calibrator is None
with contextlib.suppress(AttributeError):
if mod.version(trt.__version__) < mod.version("8.0"):
assert config.get_tactic_sources() == 3
else:
assert config.get_tactic_sources() == 7
def test_workspace_size(self, identity_builder_network):
builder, network = identity_builder_network
loader = CreateConfig(max_workspace_size=0)
with loader(builder, network) as config:
assert config.max_workspace_size == 0
@pytest.mark.parametrize("flag", [True, False])
def test_strict_types(self, identity_builder_network, flag):
builder, network = identity_builder_network
loader = CreateConfig(strict_types=flag)
with loader(builder, network) as config:
assert config.get_flag(trt.BuilderFlag.STRICT_TYPES) == flag
@pytest.mark.skipif(mod.version(trt.__version__) < mod.version("8.0.0.0"), reason="API was added in TRT 8.0")
@pytest.mark.parametrize("flag", [True, False])
def test_restricted(self, identity_builder_network, flag):
builder, network = identity_builder_network
loader = CreateConfig(restricted=flag)
with loader(builder, network) as config:
assert config.get_flag(trt.BuilderFlag.SAFETY_SCOPE) == flag
@pytest.mark.skipif(mod.version(trt.__version__) < mod.version("7.1.0.0"), reason="API was added in TRT 7.1")
@pytest.mark.parametrize("flag", [True, False])
def test_tf32(self, identity_builder_network, flag):
builder, network = identity_builder_network
loader = CreateConfig(tf32=flag)
with loader(builder, network) as config:
assert config.get_flag(trt.BuilderFlag.TF32) == flag
@pytest.mark.parametrize("flag", [True, False])
def test_fp16(self, identity_builder_network, flag):
builder, network = identity_builder_network
loader = CreateConfig(fp16=flag)
with loader(builder, network) as config:
assert config.get_flag(trt.BuilderFlag.FP16) == flag
@pytest.mark.parametrize("flag", [True, False])
def test_int8(self, identity_builder_network, flag):
builder, network = identity_builder_network
loader = CreateConfig(int8=flag)
with loader(builder, network) as config:
assert config.get_flag(trt.BuilderFlag.INT8) == flag
@pytest.mark.parametrize("flag", [True, False])
def test_allow_gpu_fallback(self, identity_builder_network, flag):
builder, network = identity_builder_network
loader = CreateConfig(allow_gpu_fallback=flag)
with loader(builder, network) as config:
assert config.get_flag(trt.BuilderFlag.GPU_FALLBACK) == flag
@pytest.mark.skipif(
mod.version(trt.__version__) < mod.version("8.0"), reason="API was not available in 7.2 and older"
)
@pytest.mark.parametrize("flag", [True, False])
def test_sparse_weights(self, identity_builder_network, flag):
builder, network = identity_builder_network
loader = CreateConfig(sparse_weights=flag)
with loader(builder, network) as config:
assert config.get_flag(trt.BuilderFlag.SPARSE_WEIGHTS) == flag
def test_use_dla(self, identity_builder_network):
builder, network = identity_builder_network
loader = CreateConfig(use_dla=True)
with loader(builder, network) as config:
assert config.default_device_type == trt.DeviceType.DLA
assert config.DLA_core == 0
with contextlib.suppress(AttributeError):
if mod.version(trt.__version__) < mod.version("8.0"):
TACTIC_SOURCES_CASES = [
(None, 3), # By default, all sources are enabled.
([], 0),
([trt.TacticSource.CUBLAS], 1),
([trt.TacticSource.CUBLAS_LT], 2),
([trt.TacticSource.CUBLAS, trt.TacticSource.CUBLAS_LT], 3),
]
else:
TACTIC_SOURCES_CASES = [
(None, 7), # By default, all sources are enabled.
([], 0),
([trt.TacticSource.CUBLAS], 1),
([trt.TacticSource.CUBLAS_LT], 2),
([trt.TacticSource.CUDNN], 4),
([trt.TacticSource.CUBLAS, trt.TacticSource.CUBLAS_LT], 3),
([trt.TacticSource.CUBLAS, trt.TacticSource.CUDNN], 5),
([trt.TacticSource.CUBLAS_LT, trt.TacticSource.CUDNN], 6),
([trt.TacticSource.CUDNN, trt.TacticSource.CUBLAS, trt.TacticSource.CUBLAS_LT], 7),
]
@pytest.mark.parametrize("sources, expected", TACTIC_SOURCES_CASES)
def test_tactic_sources(self, identity_builder_network, sources, expected):
builder, network = identity_builder_network
loader = CreateConfig(tactic_sources=sources)
with loader(builder, network) as config:
assert config.get_tactic_sources() == expected
def test_calibrator_metadata_set(self, identity_builder_network):
builder, network = identity_builder_network
calibrator = Calibrator(DataLoader())
loader = CreateConfig(int8=True, calibrator=calibrator)
with loader(builder, network) as config:
assert config.int8_calibrator
assert "x" in calibrator.data_loader.input_metadata
def test_multiple_profiles(self, identity_builder_network):
builder, network = identity_builder_network
profiles = [
Profile().add("x", (1, 2, 1, 1), (1, 2, 2, 2), (1, 2, 4, 4)),
Profile().add("x", (1, 2, 4, 4), (1, 2, 8, 8), (1, 2, 16, 16)),
]
loader = CreateConfig(profiles=profiles)
with loader(builder, network) as config:
assert config.num_optimization_profiles == 2
@pytest.mark.skipif(mod.version(trt.__version__) < mod.version("8.0"), reason="Unsupported for TRT 7.2 and older")
@pytest.mark.parametrize("path_mode", [True, False], ids=["path", "file-like"])
def test_timing_cache(self, identity_builder_network, path_mode):
builder, network = identity_builder_network
with util.NamedTemporaryFile() as cache:
loader = CreateConfig(load_timing_cache=cache.name if path_mode else cache)
with loader(builder, network) as config:
assert config.get_timing_cache()
@pytest.mark.skipif(mod.version(trt.__version__) < mod.version("8.0"), reason="Unsupported for TRT 7.2 and older")
def test_empty_timing_cache_when_default(self, identity_builder_network):
builder, network = identity_builder_network
loader = CreateConfig()
with loader(builder, network) as config:
cache = config.get_timing_cache()
with cache.serialize() as buffer:
cache_size = len(bytes(buffer))
cache.reset()
with cache.serialize() as buffer:
new_cache_size = len(bytes(buffer))
assert cache_size == new_cache_size
class TestEngineBytesFromNetwork(object):
def test_can_build(self, identity_network):
loader = EngineBytesFromNetwork(identity_network)
with loader() as serialized_engine:
assert isinstance(serialized_engine, trt.IHostMemory)
class TestEngineFromNetwork(object):
def test_defaults(self, identity_network):
loader = EngineFromNetwork(identity_network)
assert loader.timing_cache_path is None
def test_can_build_with_parser_owning(self, identity_network):
loader = EngineFromNetwork(identity_network)
with loader():
pass
def test_can_build_without_parser_non_owning(self, identity_builder_network):
builder, network = identity_builder_network
loader = EngineFromNetwork((builder, network))
with loader():
pass
def test_can_build_with_calibrator(self, identity_builder_network):
builder, network = identity_builder_network
calibrator = Calibrator(DataLoader())
create_config = CreateConfig(int8=True, calibrator=calibrator)
loader = EngineFromNetwork((builder, network), create_config)
with loader():
pass
# Calibrator buffers should be freed after the build
assert all([buf.allocated_nbytes == 0 for buf in calibrator.device_buffers.values()])
@pytest.mark.skipif(mod.version(trt.__version__) < mod.version("8.0"), reason="Unsupported for TRT 7.2 and older")
@pytest.mark.parametrize("path_mode", [True, False], ids=["path", "file-like"])
def test_timing_cache_generate_and_append(self, path_mode):
with util.NamedTemporaryFile() as total_cache, util.NamedTemporaryFile() as identity_cache:
def build_engine(model, cache):
if not path_mode:
cache.seek(0)
network_loader = NetworkFromOnnxBytes(ONNX_MODELS[model].loader)
# In non-path_mode, use the file-like object directly.
# Must load the cache with CreateConfig so that new data is appended
# instead of overwriting the previous cache.
loader = EngineFromNetwork(
network_loader,
CreateConfig(load_timing_cache=cache.name),
save_timing_cache=cache.name if path_mode else cache,
)
with loader():
pass
if not path_mode:
cache.seek(0)
assert not total_cache.read()
build_engine("const_foldable", total_cache)
const_foldable_cache_size = get_file_size(total_cache.name)
# Build this network twice. Once with a fresh cache so we can determine its size.
assert get_file_size(identity_cache.name) == 0
build_engine("identity", identity_cache)
identity_cache_size = get_file_size(identity_cache.name)
build_engine("identity", total_cache)
total_cache_size = get_file_size(total_cache.name)
# The total cache should be larger than either of the individual caches.
assert total_cache_size > const_foldable_cache_size and total_cache_size > identity_cache_size
# The total cache should also be smaller than or equal to the sum of the individual caches since
# header information should not be duplicated.
assert total_cache_size <= (const_foldable_cache_size + identity_cache_size)
class TestBytesFromEngine(object):
def test_serialize_engine(self, identity_network):
with engine_from_network(identity_network) as engine:
serialized_engine = bytes_from_engine(engine)
assert isinstance(serialized_engine, bytes)
class TestSaveEngine(object):
def test_save_engine(self, identity_network):
with util.NamedTemporaryFile() as outpath:
engine_loader = SaveEngine(EngineFromNetwork(identity_network), path=outpath.name)
with engine_loader():
assert is_file_non_empty(outpath.name)
class TestOnnxLikeFromNetwork(object):
@pytest.mark.skipif(mod.version(trt.__version__) < mod.version("7.2"), reason="Unsupported for TRT 7.1 and older")
@pytest.mark.parametrize(
"model_name", ["identity", "empty_tensor_expand", "const_foldable", "and", "scan", "dim_param", "tensor_attr"]
)
def test_onnx_like_from_network(self, model_name):
assert onnx_like_from_network(NetworkFromOnnxBytes(ONNX_MODELS[model_name].loader))
| TensorRT-master | tools/Polygraphy/tests/backend/trt/test_loader.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import pytest
import tensorrt as trt
from polygraphy import cuda, mod, util
from polygraphy.backend.trt import (
Calibrator,
CreateConfig,
engine_from_network,
get_trt_logger,
network_from_onnx_bytes,
)
from polygraphy.exception import PolygraphyException
from tests.helper import is_file_non_empty, get_file_size
from tests.models.meta import ONNX_MODELS
@pytest.fixture(scope="session")
def identity_builder_network():
builder, network, parser = network_from_onnx_bytes(ONNX_MODELS["identity"].loader)
with builder, network, parser:
yield builder, network
@pytest.fixture(scope="session")
def multi_input_builder_network():
builder, network, parser = network_from_onnx_bytes(ONNX_MODELS["reducable"].loader)
with builder, network, parser:
yield builder, network
def generate_data(num_batches):
for item in [np.ones((1, 1, 2, 2), dtype=np.float32)] * num_batches:
yield {"x": item}
class TestCalibrator(object):
def check_calibrator_cleanup(self, calibrator):
# Calibrator buffers should be freed after the build
assert all([buf.allocated_nbytes == 0 for buf in calibrator.device_buffers.values()])
@pytest.mark.parametrize(
"BaseClass",
[
trt.IInt8Calibrator,
trt.IInt8LegacyCalibrator,
trt.IInt8EntropyCalibrator,
trt.IInt8EntropyCalibrator2,
trt.IInt8MinMaxCalibrator,
],
)
def test_calibrator_basic(self, identity_builder_network, BaseClass):
if mod.version(trt.__version__) < mod.version("7.0") and BaseClass == trt.IInt8LegacyCalibrator:
pytest.skip("Bug in TRT 6 causes NaNs with legacy calibrator")
builder, network = identity_builder_network
NUM_BATCHES = 2
data = [{"x": np.ones((1, 1, 2, 2), dtype=np.float32)}] * NUM_BATCHES
calibrator = Calibrator(data, BaseClass=BaseClass)
create_config = CreateConfig(int8=True, calibrator=calibrator)
with engine_from_network((builder, network), create_config):
assert calibrator.num_batches == NUM_BATCHES
self.check_calibrator_cleanup(calibrator)
def test_host_data_copied_to_device(self):
with Calibrator(generate_data(1)) as calibrator:
[ptr] = calibrator.get_batch(names=["x"])
v = cuda.DeviceView(ptr, shape=(1, 1, 2, 2), dtype=np.float32)
arr = v.numpy()
assert arr.shape == (1, 1, 2, 2)
assert np.all(arr == 1)
def test_calibrator_data_and_ordering_correct(self):
def generate_multidata(num_batches):
for _ in range(num_batches):
yield {
"x0": np.zeros((4, 5), dtype=np.float32),
"x1": cuda.DeviceArray(dtype=np.float32).copy_from(np.ones((4, 5), dtype=np.float32)),
"x2": cuda.DeviceArray(dtype=np.float32).copy_from(np.ones((4, 5), dtype=np.float32) * 2).ptr,
}
NUM_BATCHES = 2
with Calibrator(generate_multidata(NUM_BATCHES)) as calibrator:
for _ in range(NUM_BATCHES):
ptrs = calibrator.get_batch(names=["x0", "x1", "x2"])
for index, ptr in enumerate(ptrs):
v = cuda.DeviceView(ptr, shape=(4, 5), dtype=np.float32)
assert np.all(v.numpy() == index)
def test_calibrator_generator_data(self, identity_builder_network):
builder, network = identity_builder_network
NUM_BATCHES = 2
calibrator = Calibrator(generate_data(NUM_BATCHES))
create_config = CreateConfig(int8=True, calibrator=calibrator)
with engine_from_network((builder, network), create_config):
assert calibrator.num_batches == NUM_BATCHES
self.check_calibrator_cleanup(calibrator)
# We should be able to mix DeviceView with NumPy arrays.
@pytest.mark.parametrize(
"mode", ["array", "view", "pointer"]
) # We should be able to use DeviceArray in place of DeviceView
def test_calibrator_device_buffers_multiinput(self, multi_input_builder_network, mode):
def generate_dev_data(num_batches):
with cuda.DeviceArray(shape=(1,), dtype=np.float32) as x:
for _ in range(num_batches):
x.copy_from(np.ones((1,), dtype=np.float32))
xdata = {"array": x, "view": cuda.DeviceView(x.ptr, x.shape, x.dtype), "pointer": x.ptr}[mode]
yield {"X0": xdata, "Y0": np.zeros((1,), dtype=np.float32)}
builder, network = multi_input_builder_network
NUM_BATCHES = 2
calibrator = Calibrator(generate_dev_data(NUM_BATCHES))
create_config = CreateConfig(int8=True, calibrator=calibrator)
with engine_from_network((builder, network), create_config):
assert calibrator.num_batches == NUM_BATCHES
self.check_calibrator_cleanup(calibrator)
# We want the calibrator to inter-op with TRT APIs seamlessly
def test_calibrator_outside_polygraphy(self, identity_builder_network):
builder, network = identity_builder_network
NUM_BATCHES = 2
config = builder.create_builder_config()
config.set_flag(trt.BuilderFlag.INT8)
with Calibrator(generate_data(NUM_BATCHES)) as calibrator:
config.int8_calibrator = calibrator
if mod.version(trt.__version__) < mod.version("8.0"):
engine = builder.build_engine(network, config)
else:
with trt.Runtime(get_trt_logger()) as runtime:
engine = runtime.deserialize_cuda_engine(builder.build_serialized_network(network, config))
with engine:
assert engine
self.check_calibrator_cleanup(calibrator)
def test_cannot_use_calibrator_without_activation(self):
def generate_data():
for item in [np.ones((1, 1, 2, 2), dtype=np.float32)]:
yield {"x": item}
calibrator = Calibrator(generate_data())
assert calibrator.get_batch(["x"]) is None
def test_calibrator_with_path_name_cache(self, identity_builder_network):
builder, network = identity_builder_network
data = [{"x": np.ones((1, 1, 2, 2), dtype=np.float32)}]
with util.NamedTemporaryFile() as cache:
calibrator = Calibrator(data, cache=cache.name)
create_config = CreateConfig(int8=True, calibrator=calibrator)
with engine_from_network((builder, network), create_config):
assert is_file_non_empty(cache.name)
self.check_calibrator_cleanup(calibrator)
@pytest.mark.parametrize("mode", ["wb+", "rb", "wb"])
def test_calibrator_with_file_object_cache(self, identity_builder_network, mode):
builder, network = identity_builder_network
data = [{"x": np.ones((1, 1, 2, 2), dtype=np.float32)}]
with util.NamedTemporaryFile(mode=mode) as cache:
calibrator = Calibrator(data, cache=cache)
create_config = CreateConfig(int8=True, calibrator=calibrator)
with engine_from_network((builder, network), create_config):
if mode != "rb":
assert is_file_non_empty(cache.name)
self.check_calibrator_cleanup(calibrator)
# read_calibration_cache should work even if an explicit cache is not provided
# This way, it is possible to calibrate quickly when calibrating multiple times.
def test_calibrator_caches_without_explicit_cache(self, identity_builder_network):
builder, network = identity_builder_network
data = [{"x": np.ones((1, 1, 2, 2), dtype=np.float32)}]
calibrator = Calibrator(data)
# First, populate the cache
create_config = CreateConfig(int8=True, calibrator=calibrator)
with engine_from_network((builder, network), create_config):
pass
# Check that the internal cache is populated
assert calibrator.read_calibration_cache()
self.check_calibrator_cleanup(calibrator)
def test_calibrator_rechecks_cache_on_reset(self, identity_builder_network):
builder, network = identity_builder_network
data = [{"x": np.ones((1, 1, 2, 2), dtype=np.float32)}]
with util.NamedTemporaryFile(mode="wb+") as cache:
calibrator = Calibrator(data, cache=cache.name)
# First, populate the cache
create_config = CreateConfig(int8=True, calibrator=calibrator)
with engine_from_network((builder, network), create_config):
pass
# Ensure that now the calibrator will read from the cache when reset
calibrator.reset()
assert not calibrator.has_cached_scales
assert len(calibrator.read_calibration_cache()) == get_file_size(cache.name)
self.check_calibrator_cleanup(calibrator)
@pytest.mark.parametrize(
"names",
[
(["fake-input", "x"]),
(["fake-input"]),
],
)
def test_calibrator_invalid_input_fails(self, identity_builder_network, names):
builder, network = identity_builder_network
data = [{name: np.ones((1, 1, 2, 2), dtype=np.float32) for name in names}]
calibrator = Calibrator(data)
create_config = CreateConfig(int8=True, calibrator=calibrator)
with pytest.raises(PolygraphyException):
with engine_from_network((builder, network), create_config):
pass
| TensorRT-master | tools/Polygraphy/tests/backend/trt/test_calibrator.py |
TensorRT-master | tools/Polygraphy/tests/backend/trt/__init__.py |
|
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
import tensorrt as trt
from polygraphy import mod
from polygraphy.backend.trt import Profile, network_from_onnx_bytes
from tests.models.meta import ONNX_MODELS
@pytest.fixture(scope="session")
def dynamic_identity_network():
builder, network, parser = network_from_onnx_bytes(ONNX_MODELS["dynamic_identity"].loader)
with builder, network, parser:
yield builder, network, parser
class TestProfile(object):
def test_can_add(self):
profile = Profile()
min, opt, max = (1, 1), (2, 2), (4, 4)
assert profile.add("input", min=min, opt=opt, max=max) is profile
shape_tuple = profile["input"]
assert shape_tuple.min == min
assert shape_tuple.opt == opt
assert shape_tuple.max == max
@pytest.mark.skipif(mod.version(trt.__version__) < mod.version("7.0"), reason="Unsupported for TRT 6")
def test_fill_defaults_does_not_overwrite(self, dynamic_identity_network):
_, network, _ = dynamic_identity_network
profile = Profile().add("X", (1, 1, 1, 1), (1, 1, 2, 2), (1, 1, 3, 3))
profile.fill_defaults(network) is profile
assert profile["X"].min == (1, 1, 1, 1)
assert profile["X"].opt == (1, 1, 2, 2)
assert profile["X"].max == (1, 1, 3, 3)
@pytest.mark.skipif(mod.version(trt.__version__) < mod.version("7.0"), reason="Unsupported for TRT 6")
def test_to_trt(self, dynamic_identity_network):
builder, network, _ = dynamic_identity_network
profile = Profile().add("X", (1, 2, 1, 1), (1, 2, 2, 2), (1, 2, 4, 4))
trt_profile = profile.to_trt(builder, network)
trt_profile.get_shape("X") == ((1, 2, 1, 1), (1, 2, 2, 2), (1, 2, 4, 4))
| TensorRT-master | tools/Polygraphy/tests/backend/trt/test_profile.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
import tensorrt as trt
from polygraphy.backend.trt import util as trt_util
@pytest.fixture(scope="session")
def layer_class_mapping():
return trt_util.get_layer_class_mapping()
@pytest.mark.parametrize("layer_type", trt.LayerType.__members__.values())
def test_all_layer_types_mapped(layer_class_mapping, layer_type):
if layer_type == trt.LayerType.PLUGIN:
pytest.skip("PLUGIN has no corresponding ILayer")
assert layer_type in layer_class_mapping
| TensorRT-master | tools/Polygraphy/tests/backend/trt/test_util.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import threading
import numpy as np
import pytest
import tensorrt as trt
from polygraphy import cuda, mod
from polygraphy.backend.trt import (
CreateConfig,
EngineFromNetwork,
NetworkFromOnnxBytes,
Profile,
TrtRunner,
engine_from_network,
network_from_onnx_bytes,
)
from polygraphy.exception import PolygraphyException
from polygraphy.logger import G_LOGGER
from tests.models.meta import ONNX_MODELS
from tests.helper import time_func
class TestLoggerCallbacks(object):
@pytest.mark.parametrize("sev", G_LOGGER.SEVERITY_LETTER_MAPPING.keys())
def test_set_severity(self, sev):
G_LOGGER.severity = sev
class TestTrtRunner(object):
def test_can_name_runner(self):
NAME = "runner"
runner = TrtRunner(None, name=NAME)
assert runner.name == NAME
def test_basic(self):
model = ONNX_MODELS["identity"]
network_loader = NetworkFromOnnxBytes(model.loader)
with TrtRunner(EngineFromNetwork(network_loader)) as runner:
assert runner.is_active
assert runner.owns_engine
assert runner.owns_context
model.check_runner(runner)
assert runner.last_inference_time() is not None
assert not runner.is_active
def test_context(self):
model = ONNX_MODELS["identity"]
engine = engine_from_network(NetworkFromOnnxBytes(model.loader))
with engine, TrtRunner(engine.create_execution_context) as runner:
model.check_runner(runner)
assert not runner.owns_engine
assert runner.owns_context
def test_device_buffer_order_matches_bindings(self):
model = ONNX_MODELS["reducable"]
engine = engine_from_network(NetworkFromOnnxBytes(model.loader))
with engine, TrtRunner(engine) as runner:
dev_buf_order = list(runner.device_buffers.keys())
for binding, dev_buf_name in zip(engine, dev_buf_order):
assert binding == dev_buf_name
@pytest.mark.skipif(mod.version(trt.__version__) < mod.version("7.0"), reason="Unsupported for TRT 6")
def test_shape_output(self):
model = ONNX_MODELS["reshape"]
engine = engine_from_network(NetworkFromOnnxBytes(model.loader))
with engine, TrtRunner(engine.create_execution_context) as runner:
model.check_runner(runner)
def test_multithreaded_runners_from_engine(self):
model = ONNX_MODELS["identity"]
engine = engine_from_network(NetworkFromOnnxBytes(model.loader))
with engine, TrtRunner(engine) as runner0, TrtRunner(engine) as runner1:
t1 = threading.Thread(target=model.check_runner, args=(runner0,))
t2 = threading.Thread(target=model.check_runner, args=(runner1,))
t1.start()
t2.start()
t1.join()
t2.join()
@pytest.mark.skipif(mod.version(trt.__version__) < mod.version("7.0"), reason="Unsupported for TRT 6")
@pytest.mark.skipif(mod.version(trt.__version__)[0:2] == mod.version("7.2"), reason="Bugged in TRT 7.2")
def test_multiple_profiles(self):
model = ONNX_MODELS["dynamic_identity"]
profile0_shapes = [(1, 2, 1, 1), (1, 2, 1, 1), (1, 2, 1, 1)] # Use min==opt==max to fix shapes in the engine.
profile1_shapes = [(1, 2, 1, 1), (1, 2, 2, 2), (1, 2, 4, 4)]
profile2_shapes = [(1, 2, 4, 4), (1, 2, 8, 8), (1, 2, 16, 16)]
network_loader = NetworkFromOnnxBytes(model.loader)
profiles = [
Profile().add("X", *profile0_shapes),
Profile().add("X", *profile1_shapes),
Profile().add("X", *profile2_shapes),
]
config_loader = CreateConfig(profiles=profiles)
with TrtRunner(EngineFromNetwork(network_loader, config_loader)) as runner:
for index, shapes in enumerate([profile0_shapes, profile1_shapes, profile2_shapes]):
runner.set_profile(index)
for shape in shapes:
model.check_runner(runner, {"X": shape})
@pytest.mark.skipif(mod.version(trt.__version__) < mod.version("7.0"), reason="Unsupported for TRT 6")
def test_empty_tensor_with_dynamic_input_shape_tensor(self):
model = ONNX_MODELS["empty_tensor_expand"]
shapes = [(1, 2, 0, 3, 0), (2, 2, 0, 3, 0), (4, 2, 0, 3, 0)]
network_loader = NetworkFromOnnxBytes(model.loader)
profiles = [Profile().add("new_shape", *shapes)]
config_loader = CreateConfig(profiles=profiles)
with TrtRunner(EngineFromNetwork(network_loader, config_loader)) as runner:
for shape in shapes:
model.check_runner(runner, {"new_shape": shape})
@pytest.mark.skipif(mod.version(trt.__version__) < mod.version("7.0"), reason="Test not compatible with TRT 6")
@pytest.mark.parametrize(
"names, err",
[
(["fake-input", "x"], "Extra keys in"),
(["fake-input"], "Some keys are missing"),
([], "Some keys are missing"),
],
)
def test_error_on_wrong_name_feed_dict(self, names, err):
model = ONNX_MODELS["identity"]
network_loader = NetworkFromOnnxBytes(model.loader)
with TrtRunner(EngineFromNetwork(network_loader)) as runner:
with pytest.raises(PolygraphyException, match=err):
runner.infer({name: np.ones(shape=(1, 1, 2, 2), dtype=np.float32) for name in names})
@pytest.mark.skipif(mod.version(trt.__version__) < mod.version("7.0"), reason="Test not compatible with TRT 6")
def test_error_on_wrong_dtype_feed_dict(self):
model = ONNX_MODELS["identity"]
network_loader = NetworkFromOnnxBytes(model.loader)
with TrtRunner(EngineFromNetwork(network_loader)) as runner:
with pytest.raises(PolygraphyException, match="unexpected dtype."):
runner.infer({"x": np.ones(shape=(1, 1, 2, 2), dtype=np.int32)})
@pytest.mark.skipif(mod.version(trt.__version__) < mod.version("7.0"), reason="Test not compatible with TRT 6")
def test_error_on_wrong_shape_feed_dict(self):
model = ONNX_MODELS["identity"]
network_loader = NetworkFromOnnxBytes(model.loader)
with TrtRunner(EngineFromNetwork(network_loader)) as runner:
with pytest.raises(PolygraphyException, match="incompatible shape."):
runner.infer({"x": np.ones(shape=(1, 1, 3, 2), dtype=np.float32)})
@pytest.mark.parametrize("use_view", [True, False]) # We should be able to use DeviceArray in place of DeviceView
def test_device_views(self, use_view):
model = ONNX_MODELS["reducable"]
network_loader = NetworkFromOnnxBytes(model.loader)
with TrtRunner(EngineFromNetwork(network_loader)) as runner, cuda.DeviceArray((1,), dtype=np.float32) as x:
x.copy_from(np.ones((1,), dtype=np.float32))
outputs = runner.infer(
{
"X0": x.view() if use_view else x,
"Y0": np.ones((1,), dtype=np.float32),
}
)
assert outputs["identity_out_6"][0] == 2
assert outputs["identity_out_8"][0] == 2
@pytest.mark.skipif(mod.version(trt.__version__) < mod.version("7.0"), reason="Unsupported for TRT 6")
def test_no_output_copy(self):
model = ONNX_MODELS["identity"]
network_loader = NetworkFromOnnxBytes(model.loader)
with TrtRunner(EngineFromNetwork(network_loader)) as runner:
inp = np.ones(shape=(1, 1, 2, 2), dtype=np.float32)
outputs = runner.infer({"x": inp}, copy_outputs_to_host=False)
assert isinstance(outputs["y"], cuda.DeviceView)
assert np.array_equal(outputs["y"].numpy(), inp)
def test_subsequent_infers_with_different_input_types(self):
model = ONNX_MODELS["identity"]
network_loader = NetworkFromOnnxBytes(model.loader)
with TrtRunner(EngineFromNetwork(network_loader)) as runner:
inp = np.ones(shape=(1, 1, 2, 2), dtype=np.float32)
def check(outputs):
assert np.all(outputs["y"] == inp)
check(runner.infer({"x": inp}))
check(runner.infer({"x": cuda.DeviceArray().copy_from(inp)}))
check(runner.infer({"x": inp}))
@pytest.mark.skipif(mod.version(trt.__version__) < mod.version("7.0"), reason="Unsupported for TRT 6")
@pytest.mark.parametrize("use_view", [True, False]) # We should be able to use DeviceArray in place of DeviceView
def test_device_view_dynamic_shapes(self, use_view):
model = ONNX_MODELS["dynamic_identity"]
profiles = [
Profile().add("X", (1, 2, 1, 1), (1, 2, 2, 2), (1, 2, 4, 4)),
]
runner = TrtRunner(EngineFromNetwork(NetworkFromOnnxBytes(model.loader), CreateConfig(profiles=profiles)))
with runner, cuda.DeviceArray(shape=(1, 2, 3, 3), dtype=np.float32) as arr:
inp = np.random.random_sample(size=(1, 2, 3, 3)).astype(np.float32)
arr.copy_from(inp)
outputs = runner.infer({"X": cuda.DeviceView(arr.ptr, arr.shape, arr.dtype) if use_view else arr})
assert np.all(outputs["Y"] == inp)
assert outputs["Y"].shape == (1, 2, 3, 3)
@pytest.mark.skipif(mod.version(trt.__version__) < mod.version("8.0"), reason="Unsupported before TRT 8")
def test_cannot_use_device_view_shape_tensor(self):
model = ONNX_MODELS["empty_tensor_expand"]
with TrtRunner(EngineFromNetwork(NetworkFromOnnxBytes(model.loader))) as runner, cuda.DeviceArray(
shape=(5,), dtype=np.int32
) as arr:
with pytest.raises(PolygraphyException, match="it must reside in host memory"):
runner.infer({"data": np.ones((2, 0, 3, 0), dtype=np.float32), "new_shape": arr})
@pytest.mark.skipif(mod.version(trt.__version__) < mod.version("7.0"), reason="Unsupported for TRT 6")
@pytest.mark.serial
@pytest.mark.parametrize("copy_outputs", [True, False], ids=["output_dtoh", "no_output_copy"])
@pytest.mark.parametrize("copy_inputs", [True, False], ids=["input_htod", "no_input_copy"])
def test_infer_overhead(self, copy_inputs, copy_outputs):
inp = np.ones(shape=(1, 2, 1024, 1024), dtype=np.float32)
dev_inp = cuda.DeviceArray(shape=inp.shape, dtype=inp.dtype).copy_from(inp)
out = np.zeros(shape=(1, 2, 1024, 1024), dtype=np.float32) # Using identity model!
dev_out = cuda.DeviceArray(shape=out.shape, dtype=out.dtype)
stream = cuda.Stream()
model = ONNX_MODELS["dynamic_identity"]
profiles = [
Profile().add("X", (1, 2, 1024, 1024), (1, 2, 1024, 1024), (1, 2, 1024, 1024)),
]
inp_name = list(model.input_metadata.keys())[0]
with engine_from_network(
network_from_onnx_bytes(model.loader), CreateConfig(profiles=profiles)
) as engine, engine.create_execution_context() as context, TrtRunner(context) as runner, dev_inp, dev_out:
# Inference outside the TrtRunner
def infer():
if copy_inputs:
dev_inp.copy_from(inp, stream=stream)
context.execute_async_v2(bindings=[dev_inp.ptr, dev_out.ptr], stream_handle=stream.ptr)
if copy_outputs:
dev_out.copy_to(out, stream=stream)
stream.synchronize()
native_time = time_func(infer)
feed_dict = {inp_name: (inp if copy_inputs else dev_inp)}
runner_time = time_func(
lambda: runner.infer(feed_dict, check_inputs=False, copy_outputs_to_host=copy_outputs)
)
# The overhead should be less than 0.5ms, or the runtime should be within 5%
print("Absolute difference: {:.5g}".format(runner_time - native_time))
print("Relative difference: {:.5g}".format(runner_time / native_time))
assert (runner_time - native_time) < 0.5e-3 or runner_time <= (native_time * 1.05)
| TensorRT-master | tools/Polygraphy/tests/backend/trt/test_runner.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from textwrap import dedent
import pytest
import tensorrt as trt
from polygraphy import util
from polygraphy.backend.common import InvokeFromScript, invoke_from_script
from polygraphy.exception import PolygraphyException
class TestImporter(object):
@pytest.mark.parametrize("loader", [InvokeFromScript, invoke_from_script])
def test_import_from_script(self, loader):
script = dedent(
"""
from polygraphy.backend.trt import CreateNetwork
from polygraphy import func
import tensorrt as trt
@func.extend(CreateNetwork())
def load_network(builder, network):
inp = network.add_input("input", dtype=trt.float32, shape=(1, 1))
out = network.add_identity(inp).get_output(0)
network.mark_output(out)
"""
)
with util.NamedTemporaryFile("w+", suffix=".py") as f:
f.write(script)
f.flush()
if loader == InvokeFromScript:
load_network = loader(f.name, "load_network")
builder, network = load_network()
else:
builder, network = loader(f.name, "load_network")
with builder, network:
assert isinstance(builder, trt.Builder)
assert isinstance(network, trt.INetworkDefinition)
assert network.num_layers == 1
assert network.get_layer(0).type == trt.LayerType.IDENTITY
def test_import_non_existent(self):
script = dedent(
"""
def example():
pass
"""
)
with util.NamedTemporaryFile("w+", suffix=".py") as f:
f.write(script)
f.flush()
with pytest.raises(PolygraphyException, match="Could not import symbol: non_existent from"):
invoke_from_script(f.name, "non_existent")
| TensorRT-master | tools/Polygraphy/tests/backend/common/test_loader.py |
TensorRT-master | tools/Polygraphy/tests/backend/common/__init__.py |
|
TensorRT-master | tools/Polygraphy/tests/backend/onnxrt/__init__.py |
|
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import pytest
from polygraphy.backend.onnxrt import OnnxrtRunner, SessionFromOnnx
from polygraphy.exception import PolygraphyException
from polygraphy.logger import G_LOGGER
from tests.models.meta import ONNX_MODELS
class TestLoggerCallbacks(object):
@pytest.mark.parametrize("sev", G_LOGGER.SEVERITY_LETTER_MAPPING.keys())
def test_set_severity(self, sev):
G_LOGGER.severity = sev
class TestOnnxrtRunner(object):
def test_can_name_runner(self):
NAME = "runner"
runner = OnnxrtRunner(None, name=NAME)
assert runner.name == NAME
def test_basic(self):
model = ONNX_MODELS["identity"]
with OnnxrtRunner(SessionFromOnnx(model.loader)) as runner:
assert runner.is_active
model.check_runner(runner)
assert runner.last_inference_time() is not None
assert not runner.is_active
def test_shape_output(self):
model = ONNX_MODELS["reshape"]
with OnnxrtRunner(SessionFromOnnx(model.loader)) as runner:
model.check_runner(runner)
def test_dim_param_preserved(self):
model = ONNX_MODELS["dim_param"]
with OnnxrtRunner(SessionFromOnnx(model.loader)) as runner:
input_meta = runner.get_input_metadata()
# In Polygraphy, we only use None to indicate a dynamic input dimension - not strings.
assert len(input_meta) == 1
for _, (_, shape) in input_meta.items():
assert shape == ["dim0", 16, 128]
@pytest.mark.parametrize(
"names, err",
[
(["fake-input", "x"], "Extra keys in"),
(["fake-input"], "Some keys are missing"),
([], "Some keys are missing"),
],
)
def test_error_on_wrong_name_feed_dict(self, names, err):
model = ONNX_MODELS["identity"]
with OnnxrtRunner(SessionFromOnnx(model.loader)) as runner:
with pytest.raises(PolygraphyException, match=err):
runner.infer({name: np.ones(shape=(1, 1, 2, 2), dtype=np.float32) for name in names})
def test_error_on_wrong_dtype_feed_dict(self):
model = ONNX_MODELS["identity"]
with OnnxrtRunner(SessionFromOnnx(model.loader)) as runner:
with pytest.raises(PolygraphyException, match="unexpected dtype."):
runner.infer({"x": np.ones(shape=(1, 1, 2, 2), dtype=np.int32)})
def test_error_on_wrong_shape_feed_dict(self):
model = ONNX_MODELS["identity"]
with OnnxrtRunner(SessionFromOnnx(model.loader)) as runner:
with pytest.raises(PolygraphyException, match="incompatible shape."):
runner.infer({"x": np.ones(shape=(1, 1, 3, 2), dtype=np.float32)})
| TensorRT-master | tools/Polygraphy/tests/backend/onnxrt/test_runner.py |
TensorRT-master | tools/Polygraphy/tests/backend/pluginref/__init__.py |
|
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import pytest
from polygraphy.backend.onnx import GsFromOnnx, OnnxFromPath
from polygraphy.backend.pluginref import PluginRefRunner
from polygraphy.exception import PolygraphyException
from polygraphy.logger import G_LOGGER
from tests.models.meta import ONNX_MODELS
class TestLoggerCallbacks(object):
@pytest.mark.parametrize("sev", G_LOGGER.SEVERITY_LETTER_MAPPING.keys())
def test_set_severity(self, sev):
G_LOGGER.severity = sev
class TestPluginRefRunner(object):
def test_can_name_runner(self):
NAME = "runner"
runner = PluginRefRunner(None, name=NAME)
assert runner.name == NAME
def test_basic(self):
model = ONNX_MODELS["identity"]
with PluginRefRunner(GsFromOnnx(OnnxFromPath(model.path))) as runner:
assert runner.is_active
model.check_runner(runner)
assert not runner.is_active
def test_works_on_multiple_nodes(self):
model = ONNX_MODELS["identity_identity"]
with PluginRefRunner(GsFromOnnx(OnnxFromPath(model.path))) as runner:
model.check_runner(runner)
def test_fail_on_unsupported_node(self):
model = ONNX_MODELS["and"]
with PluginRefRunner(GsFromOnnx(OnnxFromPath(model.path))) as runner:
with pytest.raises(PolygraphyException, match="does not have a reference implementation registered!"):
runner.infer({"x": np.ones(shape=(3, 4), dtype=np.bool), "y": np.ones(shape=(3, 4), dtype=np.bool)})
@pytest.mark.parametrize(
"names, err",
[
(["fake-input", "x"], "Extra keys in"),
(["fake-input"], "Some keys are missing"),
([], "Some keys are missing"),
],
)
def test_error_on_wrong_name_feed_dict(self, names, err):
model = ONNX_MODELS["identity"]
with PluginRefRunner(GsFromOnnx(OnnxFromPath(model.path))) as runner:
with pytest.raises(PolygraphyException, match=err):
runner.infer({name: np.ones(shape=(1, 1, 2, 2), dtype=np.float32) for name in names})
def test_error_on_wrong_dtype_feed_dict(self):
model = ONNX_MODELS["identity"]
with PluginRefRunner(GsFromOnnx(OnnxFromPath(model.path))) as runner:
with pytest.raises(PolygraphyException, match="unexpected dtype."):
runner.infer({"x": np.ones(shape=(1, 1, 2, 2), dtype=np.int32)})
def test_error_on_wrong_shape_feed_dict(self):
model = ONNX_MODELS["identity"]
with PluginRefRunner(GsFromOnnx(OnnxFromPath(model.path))) as runner:
with pytest.raises(PolygraphyException, match="incompatible shape."):
runner.infer({"x": np.ones(shape=(1, 1, 3, 2), dtype=np.float32)})
| TensorRT-master | tools/Polygraphy/tests/backend/pluginref/test_runner.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import tempfile
import pytest
import tensorflow as tf
from polygraphy import constants, util
from polygraphy.backend.tf import GraphFromFrozen, ModifyGraphOutputs, SaveGraph, graph_from_frozen
from polygraphy.logger import G_LOGGER
from tests.helper import is_file_non_empty
from tests.models.meta import TF_MODELS
class TestLoggerCallbacks(object):
@pytest.mark.parametrize("sev", G_LOGGER.SEVERITY_LETTER_MAPPING.keys())
def test_set_severity(self, sev):
G_LOGGER.severity = sev
class TestFrozenGraphLoader(object):
def test_load_graph(self):
with tf.compat.v1.Graph().as_default() as graph:
inp = tf.placeholder(shape=(1, 1, 1, 1), dtype=tf.float32)
out = tf.identity(inp)
graph, outputs = graph_from_frozen(graph)
assert graph
assert outputs
def test_load_pb(self):
tf_loader = GraphFromFrozen(TF_MODELS["identity"].path)
tf_loader()
class TestModifyGraph(object):
def test_layerwise(self):
load_frozen = GraphFromFrozen(TF_MODELS["identity"].path)
modify_tf = ModifyGraphOutputs(load_frozen, outputs=constants.MARK_ALL)
graph, outputs = modify_tf()
assert graph
assert outputs
class TestSaveGraph(object):
def test_save_pb(self):
with util.NamedTemporaryFile() as outpath:
tf_loader = SaveGraph(GraphFromFrozen(TF_MODELS["identity"].path), path=outpath.name)
tf_loader()
assert is_file_non_empty(outpath.name)
def test_save_tensorboard(self):
with tempfile.TemporaryDirectory() as outdir:
tf_loader = SaveGraph(GraphFromFrozen(TF_MODELS["identity"].path), tensorboard_dir=outdir)
tf_loader()
assert os.path.exists(tf_loader.tensorboard_dir)
| TensorRT-master | tools/Polygraphy/tests/backend/tf/test_loader.py |
TensorRT-master | tools/Polygraphy/tests/backend/tf/__init__.py |
|
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import pytest
from polygraphy import util
from polygraphy.backend.tf import SessionFromGraph, TfRunner
from polygraphy.exception import PolygraphyException
from tests.helper import is_file_non_empty
from tests.models.meta import TF_MODELS
class TestTfRunner(object):
def test_can_name_runner(self):
NAME = "runner"
runner = TfRunner(None, name=NAME)
assert runner.name == NAME
def test_basic(self):
model = TF_MODELS["identity"]
with TfRunner(SessionFromGraph(model.loader)) as runner:
assert runner.is_active
model.check_runner(runner)
assert runner.last_inference_time() is not None
assert not runner.is_active
@pytest.mark.skip(reason="Non-trivial to set up - requires CUPTI")
def test_save_timeline(self):
model = TF_MODELS["identity"]
with util.NamedTemporaryFile() as outpath:
with TfRunner(SessionFromGraph(model.loader), allow_growth=True, save_timeline=outpath.name) as runner:
model.check_runner(runner)
assert is_file_non_empty(outpath.name)
@pytest.mark.parametrize(
"names, err",
[
(["fake-input", "Input:0"], "Extra keys in"),
(["fake-input"], "Some keys are missing"),
([], "Some keys are missing"),
],
)
def test_error_on_wrong_name_feed_dict(self, names, err):
model = TF_MODELS["identity"]
with TfRunner(SessionFromGraph(model.loader)) as runner:
with pytest.raises(PolygraphyException, match=err):
runner.infer({name: np.ones(shape=(1, 15, 25, 30), dtype=np.float32) for name in names})
def test_error_on_wrong_dtype_feed_dict(self):
model = TF_MODELS["identity"]
with TfRunner(SessionFromGraph(model.loader)) as runner:
with pytest.raises(PolygraphyException, match="unexpected dtype."):
runner.infer({"Input:0": np.ones(shape=(1, 15, 25, 30), dtype=np.int32)})
def test_error_on_wrong_shape_feed_dict(self):
model = TF_MODELS["identity"]
with TfRunner(SessionFromGraph(model.loader)) as runner:
with pytest.raises(PolygraphyException, match="incompatible shape."):
runner.infer({"Input:0": np.ones(shape=(1, 1, 25, 30), dtype=np.float32)})
| TensorRT-master | tools/Polygraphy/tests/backend/tf/test_runner.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
from polygraphy.backend.base import BaseLoader
from polygraphy.exception import PolygraphyInternalException
def test_loader_checks_call_constant_method():
class BadLoader(BaseLoader):
def __init__(self):
self.x = 2
def call_impl(self):
self.x = 3
with pytest.raises(PolygraphyInternalException):
BadLoader()()
| TensorRT-master | tools/Polygraphy/tests/backend/base/test_loader.py |
TensorRT-master | tools/Polygraphy/tests/backend/base/__init__.py |
|
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import pytest
from polygraphy.backend.onnxrt import OnnxrtRunner, SessionFromOnnx
from polygraphy.exception import PolygraphyException
from tests.models.meta import ONNX_MODELS
def test_infer_raises_if_runner_inactive():
runner = OnnxrtRunner(SessionFromOnnx(ONNX_MODELS["identity"].loader))
feed_dict = {"x": np.ones((1, 1, 2, 2), dtype=np.float32)}
with pytest.raises(PolygraphyException, match="Must be activated"):
runner.infer(feed_dict)
| TensorRT-master | tools/Polygraphy/tests/backend/base/test_runner.py |
TensorRT-master | tools/Polygraphy/tests/models/__init__.py |
|
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Helper utility to generate models to help test the `debug reduce`
subtool, which reduces failing ONNX models.
"""
import os
import numpy as np
import onnx
import onnx_graphsurgeon as gs
CURDIR = os.path.dirname(__file__)
@gs.Graph.register()
def identity(self, inp):
return self.layer(op="Identity", inputs=[inp], outputs=["identity_out"])[0]
@gs.Graph.register()
def add(self, a, b):
return self.layer(op="Add", inputs=[a, b], outputs=["add_out"])[0]
@gs.Graph.register()
def constant(self, values: gs.Constant):
return self.layer(op="Constant", outputs=["constant_out"], attrs={"value": values})[0]
def save(graph, model_name):
path = os.path.join(CURDIR, model_name)
print("Writing: {:}".format(path))
onnx.save(gs.export_onnx(graph), path)
# Generates a model with multiple inputs/outputs:
#
# X0 Y0
# | |
# X1 Y1
# \ /
# Z0
# / \
# Z1 Z2
#
def make_multi_input_output():
DTYPE = np.float32
SHAPE = (1,)
X0 = gs.Variable("X0", dtype=DTYPE, shape=SHAPE)
Y0 = gs.Variable("Y0", dtype=DTYPE, shape=SHAPE)
graph = gs.Graph(inputs=[X0, Y0])
X1 = graph.identity(X0)
Y1 = graph.identity(Y0)
Z0 = graph.add(X1, Y1)
Z1 = graph.identity(Z0)
Z1.dtype = DTYPE
Z1.shape = SHAPE
Z2 = graph.identity(Z0)
Z2.dtype = DTYPE
Z2.shape = SHAPE
graph.outputs = [Z1, Z2]
save(graph, "reducable.onnx")
make_multi_input_output()
# Generates a linear model with a Constant node and no inputs:
#
# X0 (Constant)
# |
# X1 (Identity)
# |
# X2 (Identity)
#
def make_constant_linear():
DTYPE = np.float32
SHAPE = (4, 4)
graph = gs.Graph()
X0 = graph.constant(gs.Constant("const", values=np.ones(SHAPE, dtype=DTYPE)))
# Explicitly clear shape to trigger the failure condition in reduce
X0.shape = None
X1 = graph.identity(X0)
X2 = graph.identity(X1)
X2.dtype = DTYPE
X2.shape = SHAPE
graph.outputs = [X2]
save(graph, "reducable_with_const.onnx")
make_constant_linear()
| TensorRT-master | tools/Polygraphy/tests/models/make_reducable.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import numpy as np
from polygraphy import util
from polygraphy.backend.common import BytesFromPath
from polygraphy.backend.onnx import OnnxFromPath
from polygraphy.backend.tf import GraphFromFrozen
from polygraphy.common import TensorMetadata
def model_path(name=None):
path = os.path.abspath(os.path.dirname(__file__))
if name is not None:
path = os.path.join(path, name)
return path
class Model(object):
def __init__(self, path, LoaderType, check_runner, input_metadata=None, ext_data=None):
self.path = path
self.loader = LoaderType(self.path)
self.check_runner = check_runner
self.input_metadata = input_metadata
self.ext_data = ext_data
def check_tf_identity(runner):
feed_dict = {"Input:0": np.random.random_sample(size=(1, 15, 25, 30)).astype(np.float32)}
outputs = runner.infer(feed_dict)
assert np.all(outputs["Identity_2:0"] == feed_dict["Input:0"])
TF_MODELS = {
"identity": Model(path=model_path("tf_identity.pb"), LoaderType=GraphFromFrozen, check_runner=check_tf_identity),
}
def check_identity(runner):
feed_dict = {"x": np.random.random_sample(size=(1, 1, 2, 2)).astype(np.float32)}
outputs = runner.infer(feed_dict)
assert np.all(outputs["y"] == feed_dict["x"])
def check_identity_identity(runner):
feed_dict = {"X": np.random.random_sample(size=(64, 64)).astype(np.float32)}
outputs = runner.infer(feed_dict)
assert np.all(outputs["identity_out_2"] == feed_dict["X"])
def check_dynamic_identity(runner, shapes):
feed_dict = {"X": np.random.random_sample(size=shapes["X"]).astype(np.float32)}
outputs = runner.infer(feed_dict)
assert np.array_equal(outputs["Y"], feed_dict["X"])
def check_empty_tensor_expand(runner, shapes):
shape = shapes["new_shape"]
feed_dict = {"data": np.zeros(shape=(2, 0, 3, 0), dtype=np.float32), "new_shape": np.array(shape, dtype=np.int32)}
outputs = runner.infer(feed_dict)
# Empty tensor will still be empty after broadcast
assert outputs["expanded"].shape == shape
assert util.volume(outputs["expanded"].shape) == 0
def check_reshape(runner):
feed_dict = {"data": np.random.random_sample(size=(1, 3, 5, 5)).astype(np.float32)}
outputs = runner.infer(feed_dict)
assert np.all(outputs["output"] == feed_dict["data"].ravel())
def no_check_implemented(runner):
raise NotImplementedError("No check_runner implemented for this model")
ONNX_MODELS = {
"identity": Model(
path=model_path("identity.onnx"),
LoaderType=BytesFromPath,
check_runner=check_identity,
input_metadata=TensorMetadata().add("x", dtype=np.float32, shape=(1, 1, 2, 2)),
),
"identity_identity": Model(
path=model_path("identity_identity.onnx"), LoaderType=BytesFromPath, check_runner=check_identity_identity
),
"dynamic_identity": Model(
path=model_path("dynamic_identity.onnx"),
LoaderType=BytesFromPath,
check_runner=check_dynamic_identity,
input_metadata=TensorMetadata().add("X", dtype=np.float32, shape=(1, 1, -1, -1)),
),
"empty_tensor_expand": Model(
path=model_path("empty_tensor_expand.onnx"), LoaderType=BytesFromPath, check_runner=check_empty_tensor_expand
),
"and": Model(path=model_path("and.onnx"), LoaderType=BytesFromPath, check_runner=no_check_implemented),
"scan": Model(path=model_path("scan.onnx"), LoaderType=BytesFromPath, check_runner=no_check_implemented),
"pow_scalar": Model(
path=model_path("pow_scalar.onnx"), LoaderType=BytesFromPath, check_runner=no_check_implemented
),
"dim_param": Model(path=model_path("dim_param.onnx"), LoaderType=BytesFromPath, check_runner=no_check_implemented),
"tensor_attr": Model(
path=model_path("tensor_attr.onnx"), LoaderType=BytesFromPath, check_runner=no_check_implemented
),
"identity_with_initializer": Model(
path=model_path("identity_with_initializer.onnx"), LoaderType=BytesFromPath, check_runner=no_check_implemented
),
"const_foldable": Model(
path=model_path("const_foldable.onnx"), LoaderType=BytesFromPath, check_runner=no_check_implemented
),
"reshape": Model(path=model_path("reshape.onnx"), LoaderType=BytesFromPath, check_runner=check_reshape),
"reducable": Model(
path=model_path("reducable.onnx"),
LoaderType=BytesFromPath,
check_runner=no_check_implemented,
input_metadata=TensorMetadata().add("X0", shape=(1,), dtype=np.float32).add("Y0", shape=(1,), dtype=np.float32),
),
"reducable_with_const": Model(
path=model_path("reducable_with_const.onnx"),
LoaderType=BytesFromPath,
check_runner=no_check_implemented,
),
"ext_weights": Model(
path=model_path("ext_weights.onnx"),
LoaderType=OnnxFromPath,
check_runner=no_check_implemented,
ext_data=model_path("data"),
),
"ext_weights_same_dir": Model(
path=model_path(os.path.join("ext_weights_same_dir", "ext_weights.onnx")),
LoaderType=OnnxFromPath,
check_runner=no_check_implemented,
ext_data=model_path("ext_weights_same_dir"),
),
"capability": Model(
path=model_path("capability.onnx"), LoaderType=BytesFromPath, check_runner=no_check_implemented
),
"instancenorm": Model(
path=model_path("instancenorm.onnx"), LoaderType=BytesFromPath, check_runner=no_check_implemented
),
}
| TensorRT-master | tools/Polygraphy/tests/models/meta.py |
TensorRT-master | tools/Polygraphy/tests/common/__init__.py |
|
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
from polygraphy.common import TensorMetadata
class TestTensorMetadata(object):
def test_str(self):
meta = TensorMetadata().add("X", dtype=np.float32, shape=(64, 64))
assert str(meta) == "{X [dtype=float32, shape=(64, 64)]}"
def test_str_no_dtype(self):
meta = TensorMetadata().add("X", dtype=None, shape=(64, 64))
assert str(meta) == "{X [shape=(64, 64)]}"
def test_str_no_shape(self):
meta = TensorMetadata().add("X", dtype=np.float32, shape=None)
assert str(meta) == "{X [dtype=float32]}"
def test_str_no_meta(self):
meta = TensorMetadata().add("X", dtype=None, shape=None)
assert str(meta) == "{X}"
| TensorRT-master | tools/Polygraphy/tests/common/test_struct.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
from polygraphy.common.interface import TypedDict, TypedList
from polygraphy.exception import PolygraphyException
@pytest.fixture()
def int_to_float():
class IntToFloat(TypedDict(lambda: int, lambda: float)):
pass
return IntToFloat()
class TestTypedDict(object):
def test_wrong_type_set_item_value(self, int_to_float):
with pytest.raises(PolygraphyException, match="Unsupported value type"):
int_to_float[0] = "hi"
def test_wrong_type_set_item_key(self, int_to_float):
with pytest.raises(PolygraphyException, match="Unsupported key type"):
int_to_float["hi"] = 1.0
def test_wrong_type_update(self, int_to_float):
with pytest.raises(PolygraphyException, match="Unsupported key type"):
int_to_float.update({"hi": 1.0})
@pytest.fixture()
def ints():
class Ints(TypedList(lambda: int)):
pass
return Ints()
class TestTypedList(object):
def test_wrong_type_append(self, ints):
with pytest.raises(PolygraphyException, match="Unsupported element type"):
ints.append(1.0)
def test_wrong_type_extend(self, ints):
with pytest.raises(PolygraphyException, match="Unsupported element type"):
ints.extend([0, 1, 2, 3, "surprise"])
def test_wrong_type_iadd(self, ints):
with pytest.raises(PolygraphyException, match="Unsupported element type"):
ints += [0, 1.0]
def test_wrong_type_setitem(self, ints):
ints.append(0)
with pytest.raises(PolygraphyException, match="Unsupported element type"):
ints[0] = 1.0
| TensorRT-master | tools/Polygraphy/tests/common/test_interface.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
from polygraphy import mod
from polygraphy.backend.base import BaseLoader
# For test_funcify_with_collision
functor2 = None
class TestExporter(object):
def test_func(self):
@mod.export()
def test_func0():
pass
assert "test_func0" in __all__
def test_class(self):
@mod.export()
class TestClass0:
pass
assert "TestClass0" in __all__
def test_funcify_func_fails(self):
with pytest.raises(AssertionError, match="must be a loader"):
@mod.export(funcify=True)
def test_func1():
pass
def test_funcify_non_base_loader_class(self):
with pytest.raises(AssertionError, match="must derive from BaseLoader"):
@mod.export(funcify=True)
class NonFunctor0(object):
def __init__(self, x):
self.x = x
def test_funcify_duplicate_parameters_in_call_init(self):
with pytest.raises(AssertionError, match="call_impl and __init__ have the same argument names"):
@mod.export(funcify=True)
class DupArgs(BaseLoader):
def __init__(self, x):
self.x = x
def call_impl(self, x):
self.x = x
def test_funcify_takes_docstring(self):
@mod.export(funcify=True)
class DocstringFunctor(BaseLoader):
"""This is a docstring"""
def __init__(self):
pass
def call_impl(self):
pass
assert "DocstringFunctor" in __all__
assert "docstring_functor" in __all__
assert docstring_functor.__doc__ == "Immediately evaluated functional variant of :class:`DocstringFunctor` .\n"
def test_funcify_functor_no_call_args(self):
@mod.export(funcify=True)
class Functor0(BaseLoader):
def __init__(self, x):
self.x = x
def call_impl(self):
return self.x
assert "Functor0" in __all__
assert "functor0" in __all__
assert functor0(0) == 0
def test_funcify_functor_with_call_args(self):
@mod.export(funcify=True)
class Functor1(BaseLoader):
def __init__(self, x):
self.x = x
def call_impl(self, y, z):
return self.x, y, z
assert "Functor1" in __all__
assert "functor1" in __all__
# __init__ arguments always precede __call__ arguments
x, y, z = functor1(0, 1, -1)
assert (x, y, z) == (0, 1, -1)
# Keyword arguments should behave as expected
x, y, z = functor1(y=1, x=0, z=-1)
assert (x, y, z) == (0, 1, -1)
def test_funcify_functor_with_call_args(self):
@mod.export(funcify=True)
class FunctorWithCallArgs(BaseLoader):
def __init__(self, x=0):
self.x = x
def call_impl(self, y, z=-1):
return self.x, y, z
assert "FunctorWithCallArgs" in __all__
assert "functor_with_call_args" in __all__
# __init__ arguments always precede __call__ arguments
x, y, z = functor_with_call_args(y=1)
assert (x, y, z) == (0, 1, -1)
# Keyword arguments should behave as expected
x, y, z = functor_with_call_args(y=1)
assert (x, y, z) == (0, 1, -1)
def test_funcify_with_collision(self):
with pytest.raises(AssertionError, match="symbol is already defined"):
@mod.export(funcify=True)
class Functor2(BaseLoader):
def __init__(self, x):
self.x = x
def call_impl(self, y, z):
return self.x, y, z
def test_funcify_functor_with_dynamic_call_args_kwargs(self):
@mod.export(funcify=True)
class Functor3(BaseLoader):
def __init__(self, f):
self.f = f
def call_impl(self, *args, **kwargs):
return self.f(*args, **kwargs)
assert "Functor3" in __all__
assert "functor3" in __all__
# We should be able to pass arbitrary arguments to call now.
# __init__ arguments are always first.
def f(arg0, arg1, arg2):
return arg0 + arg1 + arg2
assert functor3(f, 1, 2, arg2=4) == 7
def test_funcify_with_inherited_init(self):
class BaseFunctor4(BaseLoader):
def __init__(self, x):
self.x = x
@mod.export(funcify=True)
class Functor4(BaseFunctor4):
def call_impl(self):
return self.x
assert "Functor4" in __all__
assert "functor4" in __all__
assert functor4(-1) == -1
def test_funcify_functor_with_default_vals(self):
@mod.export(funcify=True)
class FunctorWithDefaults(BaseLoader):
def __init__(self, w, x=1):
self.w = w
self.x = x
def call_impl(self, y, z=3):
return self.w, self.x, y, z
assert "FunctorWithDefaults" in __all__
assert "functor_with_defaults" in __all__
# Since x and z have default values, the arguments will be interlaced into:
# w, y, x, z
# __init__ parameters take precedence, and call_impl parameters follow.
w, x, y, z = functor_with_defaults(-1, -2) # Set just w, y
assert (w, x, y, z) == (-1, 1, -2, 3)
w, x, y, z = functor_with_defaults(0, 1, 2, 3) # Set all
assert (w, x, y, z) == (0, 2, 1, 3)
| TensorRT-master | tools/Polygraphy/tests/mod/test_exporter.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import copy
import sys
from textwrap import dedent
import pytest
import tensorrt as trt
from polygraphy import mod, util
from polygraphy.exception import PolygraphyException
from polygraphy.mod.importer import _version_ok
class TestImporter(object):
def test_import_from_script(self):
script = dedent(
"""
from polygraphy.backend.trt import CreateNetwork
from polygraphy import func
import tensorrt as trt
@func.extend(CreateNetwork())
def load_network(builder, network):
inp = network.add_input("input", dtype=trt.float32, shape=(1, 1))
out = network.add_identity(inp).get_output(0)
network.mark_output(out)
"""
)
with util.NamedTemporaryFile("w+", suffix=".py") as f:
f.write(script)
f.flush()
orig_sys_path = copy.deepcopy(sys.path)
load_network = mod.import_from_script(f.name, "load_network")
assert sys.path == orig_sys_path
builder, network = load_network()
with builder, network:
assert isinstance(builder, trt.Builder)
assert isinstance(network, trt.INetworkDefinition)
assert network.num_layers == 1
assert network.get_layer(0).type == trt.LayerType.IDENTITY
assert sys.path == orig_sys_path
def test_import_non_existent(self):
script = dedent(
"""
def example():
pass
"""
)
with util.NamedTemporaryFile("w+", suffix=".py") as f:
f.write(script)
f.flush()
orig_sys_path = copy.deepcopy(sys.path)
example = mod.import_from_script(f.name, "example")
assert sys.path == orig_sys_path
assert example is not None
example()
with pytest.raises(PolygraphyException, match="Could not import symbol: non_existent from"):
mod.import_from_script(f.name, "non_existent")
assert sys.path == orig_sys_path
@pytest.mark.parametrize(
"ver, pref, expected",
[
("0.0.0", "==0.0.0", True),
("0.0.0", "== 0.0.1", False),
("0.0.0", ">= 0.0.0", True),
("0.0.0", ">=0.0.1", False),
("0.0.0", "<= 0.0.0", True),
("0.0.2", "<=0.0.1", False),
("0.0.1", "> 0.0.0", True),
("0.0.1", ">0.0.1", False),
("0.0.0", "< 0.0.1", True),
("0.0.0", "< 0.0.0", False),
("0.2.0", mod.LATEST_VERSION, False),
],
)
def test_version_ok(self, ver, pref, expected):
assert _version_ok(ver, pref) == expected
| TensorRT-master | tools/Polygraphy/tests/mod/test_importer.py |
TensorRT-master | tools/Polygraphy/tests/mod/__init__.py |
|
TensorRT-master | tools/Polygraphy/tests/func/__init__.py |
|
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
from polygraphy import func
from polygraphy.exception import PolygraphyException, PolygraphyInternalException
class TestExtend(object):
def test_override_rv(self):
def x():
return 1
# Since y explicitly returns something, the return value of x is discarded.
@func.extend(x)
def y(elem):
assert elem == 1
return 2
assert y() == 2
def test_extend_named_parameters(self):
def x(arg0, arg1):
return arg0, arg1
@func.extend(x)
def y(elem0, elem1):
pass
arg0, arg1 = y(arg1=1, arg0=0)
assert arg0 == 0
assert arg1 == 1
def test_extend_0_args_1_rv(self):
def x():
return 1
@func.extend(x)
def y(elem):
assert elem == 1
assert y() == 1
def test_extend_0_args_2_rv(self):
def x():
return 1, 2
@func.extend(x)
def y(elem0, elem1):
assert elem0 == 1
assert elem1 == 2
assert y() == (1, 2)
def test_extend_1_args_0_rv(self):
def x(arg0):
pass
@func.extend(x)
def y():
pass
y(1)
def test_extend_1_args_1_rv(self):
def x(arg0):
assert arg0 == 1
return 3
@func.extend(x)
def y(elem):
assert elem == 3
assert y(1) == 3
def test_extend_2_args_2_rv(self):
def x(arg0, arg1):
assert arg0 == -1
assert arg1 == -1
return 1, 2
@func.extend(x)
def y(elem0, elem1):
assert elem0 == 1
assert elem1 == 2
assert y(-1, -1) == (1, 2)
def test_extend_can_modify_rv(self):
def x():
return []
@func.extend(x)
def y(lst):
lst.extend([1, 2, 3])
assert x() == []
assert y() == [1, 2, 3]
def test_extend_can_modify_rv_objects(self):
class ModifiableObj(object):
def __init__(self):
self.value = 0
def x():
return ModifiableObj()
@func.extend(x)
def y(mo):
mo.value = 1
assert x().value == 0
assert y().value == 1
def test_extend_incorrect_num_args(self):
def x():
return 1, 2
with pytest.raises(
PolygraphyException, match=r"Function: y accepts 1 parameter\(s\), but needs to accept 2 parameter\(s\)"
):
@func.extend(x)
def y(elem0):
assert elem0 == 1
y()
class TestConstantMethod(object):
def test_cannot_modify_attrs(self):
class Dummy(object):
def __init__(self):
self.x = 1
@func.constantmethod
def modify_x(self):
self.x = 2
d = Dummy()
with pytest.raises(PolygraphyInternalException, match="was mutated in a constant method"):
d.modify_x()
def test_cannot_add_attrs(self):
class Dummy(object):
@func.constantmethod
def modify_x(self):
self.x = 2
d = Dummy()
with pytest.raises(PolygraphyInternalException, match="was mutated in a constant method"):
d.modify_x()
| TensorRT-master | tools/Polygraphy/tests/func/test_func.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import os
ROOT_DIR = os.path.join(os.path.abspath(os.path.dirname(__file__)), os.path.pardir)
sys.path.insert(0, ROOT_DIR)
import polygraphy
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.intersphinx",
"sphinx.ext.autosummary",
"sphinx.ext.napoleon",
"sphinx.ext.mathjax",
"sphinx.ext.viewcode",
]
# Want to be able to generate docs with no dependencies installed
autodoc_mock_imports = ["tensorrt", "onnx", "numpy", "tensorflow", "onnx_graphsurgeon", "onnxruntime", "tf2onnx"]
autodoc_default_options = {
"members": True,
"show-inheritance": True,
"exclude-members": "activate_impl, deactivate_impl, get_input_metadata_impl, BaseNetworkFromOnnx, Encoder, Decoder, add_json_methods, constantmethod",
"special-members": "__call__, __getitem__, __bool__, __enter__, __exit__",
}
autodoc_member_order = "bysource"
autodoc_inherit_docstrings = True
add_module_names = False
autosummary_generate = True
source_suffix = [".rst"]
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "Polygraphy"
copyright = "2020, NVIDIA"
author = "NVIDIA"
version = polygraphy.__version__
# The full version, including alpha/beta/rc tags.
release = version
# Style
pygments_style = "colorful"
html_theme = "sphinx_rtd_theme"
# Use the TRT theme and NVIDIA logo
html_static_path = ["_static"]
html_logo = "_static/img/nvlogo_white.png"
# Hide source link
html_show_sourcelink = False
# Output file base name for HTML help builder.
htmlhelp_basename = "PolygraphyDoc"
# Template files to extend default Sphinx templates.
# See https://www.sphinx-doc.org/en/master/templating.html for details.
templates_path = ["_templates"]
# For constructor arguments to show up in Sphinx generated doc
autoclass_content = "both"
# Unlimited depth sidebar.
html_theme_options = {"navigation_depth": -1}
html_sidebars = {"**": ["globaltoc.html", "relations.html", "sourcelink.html", "searchbox.html"]}
# Allows us to override the default page width in the Sphinx theme.
def setup(app):
app.add_css_file("style.css")
LATEX_BUILDER = "sphinx.builders.latex"
if LATEX_BUILDER in app.config.extensions:
app.config.extensions.remove(LATEX_BUILDER)
| TensorRT-master | tools/Polygraphy/docs/conf.py |
#!/usr/bin/env python3
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Defines a `load_data` function that returns a generator yielding
feed_dicts so that this script can be used as the argument for
the --data-loader-script command-line parameter.
"""
import numpy as np
INPUT_SHAPE = (1, 2, 28, 28)
def load_data():
for _ in range(5):
yield {"x": np.ones(shape=INPUT_SHAPE, dtype=np.float32)} # Still totally real data
| TensorRT-master | tools/Polygraphy/examples/cli/run/05_comparing_with_custom_data/data_loader.py |
#!/usr/bin/env python3
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Parses an ONNX model, and then extends it with an Identity layer.
"""
from polygraphy import func
from polygraphy.backend.trt import NetworkFromOnnxPath
parse_onnx = NetworkFromOnnxPath("identity.onnx")
# If we define a function called `load_network`, polygraphy can
# use it directly in place of using a model file.
#
# TIP: If our function isn't called `load_network`, we can explicitly specify
# the name with the `--trt-network-func-name` argument.
@func.extend(parse_onnx)
def load_network(builder, network, parser):
# NOTE: func.extend() causes the signature of this function to be `() -> (builder, network, parser)`
# For details on how this works, see examples/api/03_interoperating_with_tensorrt
# Append an identity layer to the network
prev_output = network.get_output(0)
network.unmark_output(prev_output)
output = network.add_identity(prev_output).get_output(0)
network.mark_output(output)
# Notice that we don't need to return anything - `extend()` takes care of that for us!
| TensorRT-master | tools/Polygraphy/examples/cli/run/04_defining_a_tensorrt_network_or_config_manually/define_network.py |
#!/usr/bin/env python3
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Creates a TensorRT builder configuration and enables FP16 tactics.
"""
import tensorrt as trt
from polygraphy import func
from polygraphy.backend.trt import CreateConfig
# If we define a function called `load_config`, polygraphy can use it to
# create the builder configuration.
#
# TIP: If our function isn't called `load_config`, we can explicitly specify
# the name with the `--trt-config-func-name` argument.
@func.extend(CreateConfig())
def load_config(config):
# NOTE: func.extend() causes the signature of this function to be `(builder, network) -> config`
# For details on how this works, see examples/api/03_interoperating_with_tensorrt
config.set_flag(trt.BuilderFlag.FP16)
# Notice that we don't need to return anything - `extend()` takes care of that for us!
| TensorRT-master | tools/Polygraphy/examples/cli/run/04_defining_a_tensorrt_network_or_config_manually/create_config.py |
#!/usr/bin/env python3
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Defines a `load_data` function that returns a generator yielding
feed_dicts so that this script can be used as the argument for
the --data-loader-script command-line parameter.
"""
import numpy as np
INPUT_SHAPE = (1, 1, 2, 2)
def load_data():
for _ in range(5):
yield {"x": np.ones(shape=INPUT_SHAPE, dtype=np.float32)} # Still totally real data
| TensorRT-master | tools/Polygraphy/examples/cli/convert/01_int8_calibration_in_tensorrt/data_loader.py |
#!/usr/bin/env python3
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
This script builds an engine with 3 separate optimization profiles, each
built for a specific use-case. It then creates 3 separate execution contexts
and corresponding `TrtRunner`s for inference.
"""
import numpy as np
from polygraphy.backend.trt import (
CreateConfig,
Profile,
TrtRunner,
engine_from_network,
network_from_onnx_path,
save_engine,
)
from polygraphy.logger import G_LOGGER
def main():
# A Profile maps each input tensor to a range of shapes.
# The `add()` method can be used to add shapes for a single input.
#
# TIP: To save lines, calls to `add` can be chained:
# profile.add("input0", ...).add("input1", ...)
#
# Of course, you may alternatively write this as:
# profile.add("input0", ...)
# profile.add("input1", ...)
#
profiles = [
# The low-latency case. For best performance, min == opt == max.
Profile().add("X", min=(1, 3, 28, 28), opt=(1, 3, 28, 28), max=(1, 3, 28, 28)),
# The dynamic batching case. We use `4` for the opt batch size since that's our most common case.
Profile().add("X", min=(1, 3, 28, 28), opt=(4, 3, 28, 28), max=(32, 3, 28, 28)),
# The offline case. For best performance, min == opt == max.
Profile().add("X", min=(128, 3, 28, 28), opt=(128, 3, 28, 28), max=(128, 3, 28, 28)),
]
# See examples/api/06_immediate_eval_api for details on immediately evaluated functional loaders like `engine_from_network`.
# Note that we can freely inter-mix lazy and immediately-evaluated loaders.
engine = engine_from_network(
network_from_onnx_path("dynamic_identity.onnx"), config=CreateConfig(profiles=profiles)
)
# We'll save the engine so that we can inspect it with `inspect model`.
# This should make it easy to see how the engine bindings are laid out.
save_engine(engine, "dynamic_identity.engine")
# We'll create, but not activate, three separate runners, each with a separate context.
#
# TIP: By providing a context directly, as opposed to via a lazy loader,
# we can ensure that the runner will *not* take ownership of it.
#
low_latency = TrtRunner(engine.create_execution_context())
# NOTE: The following two lines will cause TensorRT to display errors since profile 0
# is already in use by the first execution context. We'll suppress them using G_LOGGER.verbosity().
#
with G_LOGGER.verbosity(G_LOGGER.CRITICAL):
dynamic_batching = TrtRunner(engine.create_execution_context())
offline = TrtRunner(engine.create_execution_context())
# NOTE: We could update the profile index here (e.g. `context.active_optimization_profile = 2`),
# but instead, we'll use TrtRunner's `set_profile()` API when we later activate the runner.
# Finally, we can activate the runners as we need them.
#
# NOTE: Since the context and engine are already created, the runner will only need to
# allocate input and output buffers during activation.
input_img = np.ones((1, 3, 28, 28), dtype=np.float32) # An input "image"
with low_latency:
outputs = low_latency.infer({"X": input_img})
assert np.array_equal(outputs["Y"], input_img) # It's an identity model!
print("Low latency runner succeeded!")
# While we're serving requests online, we might decide that we need dynamic batching
# for a moment.
#
# NOTE: We're assuming that activating runners will be cheap here, so we can bring up
# the dynamic batching runner just-in-time.
#
# TIP: If activating the runner is not cheap (e.g. input/output buffers are large),
# it might be better to keep the runner active the whole time.
#
with dynamic_batching:
# NOTE: The very first time we activate this runner, we need to set
# the profile index (it's 0 by default). We need to do this *only once*.
# Alternatively, we could have set the profile index in the context directly (see above).
#
dynamic_batching.set_profile(1) # Use the second profile, which is intended for dynamic batching.
# We'll create fake batches by repeating our fake input image.
small_input_batch = np.repeat(input_img, 4, axis=0) # Shape: (4, 3, 28, 28)
outputs = dynamic_batching.infer({"X": small_input_batch})
assert np.array_equal(outputs["Y"], small_input_batch)
# If we need dynamic batching again later, we can activate the runner once more.
#
# NOTE: This time, we do *not* need to set the profile.
#
with dynamic_batching:
# NOTE: We can use any shape that's in the range of the profile without
# additional setup - Polygraphy handles the details behind the scenes!
#
large_input_batch = np.repeat(input_img, 16, axis=0) # Shape: (16, 3, 28, 28)
outputs = dynamic_batching.infer({"X": large_input_batch})
assert np.array_equal(outputs["Y"], large_input_batch)
print("Dynamic batching runner succeeded!")
with offline:
# NOTE: We must set the profile to something other than 0 or 1 since both of those
# are now in use by the `low_latency` and `dynamic_batching` runners respectively.
#
offline.set_profile(2) # Use the third profile, which is intended for the offline case.
large_offline_batch = np.repeat(input_img, 128, axis=0) # Shape: (128, 3, 28, 28)
outputs = offline.infer({"X": large_offline_batch})
assert np.array_equal(outputs["Y"], large_offline_batch)
print("Offline runner succeeded!")
if __name__ == "__main__":
main()
| TensorRT-master | tools/Polygraphy/examples/api/07_tensorrt_and_dynamic_shapes/example.py |
#!/usr/bin/env python3
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
This script runs an identity model with ONNX-Runtime and TensorRT,
then compares outputs.
"""
from polygraphy.backend.onnxrt import OnnxrtRunner, SessionFromOnnx
from polygraphy.backend.trt import EngineFromNetwork, NetworkFromOnnxPath, TrtRunner
from polygraphy.comparator import Comparator, CompareFunc
def main():
# The OnnxrtRunner requires an ONNX-RT session.
# We can use the SessionFromOnnx lazy loader to construct one easily:
build_onnxrt_session = SessionFromOnnx("identity.onnx")
# The TrtRunner requires a TensorRT engine.
# To create one from the ONNX model, we can chain a couple lazy loaders together:
build_engine = EngineFromNetwork(NetworkFromOnnxPath("identity.onnx"))
runners = [
TrtRunner(build_engine),
OnnxrtRunner(build_onnxrt_session),
]
# `Comparator.run()` will run each runner separately using synthetic input data and
# return a `RunResults` instance. See `polygraphy/comparator/struct.py` for details.
#
# TIP: To use custom input data, you can set the `data_loader` parameter in `Comparator.run()``
# to a generator or iterable that yields `Dict[str, np.ndarray]`.
run_results = Comparator.run(runners)
# `Comparator.compare_accuracy()` checks that outputs match between runners.
#
# TIP: The `compare_func` parameter can be used to control how outputs are compared (see API reference for details).
# The default comparison function is created by `CompareFunc.simple()`, but we can construct it
# explicitly if we want to change the default parameters, such as tolerance.
assert bool(Comparator.compare_accuracy(run_results, compare_func=CompareFunc.simple(atol=1e-8)))
# We can use `RunResults.save()` method to save the inference results to a JSON file.
# This can be useful if you want to generate and compare results separately.
run_results.save("inference_results.json")
if __name__ == "__main__":
main()
| TensorRT-master | tools/Polygraphy/examples/api/01_comparing_frameworks/example.py |
#!/usr/bin/env python3
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
This script demonstrates how to use Polygraphy in conjunction with APIs
provided by a backend. Specifically, in this case, we use TensorRT APIs
to print the network name and enable FP16 mode.
"""
import numpy as np
import tensorrt as trt
from polygraphy import func
from polygraphy.backend.trt import CreateConfig, EngineFromNetwork, NetworkFromOnnxPath, TrtRunner
# TIP: The immediately evaluated functional API makes it very easy to interoperate
# with backends like TensorRT. For details, see example 06 (`examples/api/06_immediate_eval_api`).
# We can use the `extend` decorator to easily extend lazy loaders provided by Polygraphy
# The parameters our decorated function takes should match the return values of the loader we are extending.
# For `NetworkFromOnnxPath`, we can see from the API documentation that it returns a TensorRT
# builder, network and parser. That is what our function will receive.
@func.extend(NetworkFromOnnxPath("identity.onnx"))
def load_network(builder, network, parser):
# Here we can modify the network. For this example, we'll just set the network name.
network.name = "MyIdentity"
print("Network name: {:}".format(network.name))
# Notice that we don't need to return anything - `extend()` takes care of that for us!
# In case a builder configuration option is missing from Polygraphy, we can easily set it using TensorRT APIs.
# Our function will receive a TensorRT IBuilderConfig since that's what `CreateConfig` returns.
@func.extend(CreateConfig())
def load_config(config):
# Polygraphy supports the fp16 flag, but in case it didn't, we could do this:
config.set_flag(trt.BuilderFlag.FP16)
def main():
# Since we have no further need of TensorRT APIs, we can come back to regular Polygraphy.
#
# NOTE: Since we're using lazy loaders, we provide the functions as arguments - we do *not* call them ourselves.
build_engine = EngineFromNetwork(load_network, config=load_config)
with TrtRunner(build_engine) as runner:
inp_data = np.ones(shape=(1, 1, 2, 2), dtype=np.float32)
# NOTE: The runner owns the output buffers and is free to reuse them between `infer()` calls.
# Thus, if you want to store results from multiple inferences, you should use `copy.deepcopy()`.
outputs = runner.infer({"x": inp_data})
assert np.array_equal(outputs["y"], inp_data) # It's an identity model!
print("Inference succeeded!")
if __name__ == "__main__":
main()
| TensorRT-master | tools/Polygraphy/examples/api/03_interoperating_with_tensorrt/example.py |
#!/usr/bin/env python3
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
This script demonstrates how to use the Calibrator API provided by Polygraphy
to calibrate a TensorRT engine to run in INT8 precision.
"""
import numpy as np
from polygraphy.backend.trt import Calibrator, CreateConfig, EngineFromNetwork, NetworkFromOnnxPath, TrtRunner
from polygraphy.logger import G_LOGGER
# The data loader argument to `Calibrator` can be any iterable or generator that yields `feed_dict`s.
# A `feed_dict` is just a mapping of input names to corresponding inputs.
def calib_data():
for _ in range(4):
# TIP: If your calibration data is already on the GPU, you can instead provide GPU pointers
# (as `int`s) or Polygraphy `DeviceView`s instead of NumPy arrays.
#
# For details on `DeviceView`, see `polygraphy/cuda/cuda.py`.
yield {"x": np.ones(shape=(1, 1, 2, 2), dtype=np.float32)} # Totally real data
def main():
# We can provide a path or file-like object if we want to cache calibration data.
# This lets us avoid running calibration the next time we build the engine.
#
# TIP: You can use this calibrator with TensorRT APIs directly (e.g. config.int8_calibrator).
# You don't have to use it with Polygraphy loaders if you don't want to.
calibrator = Calibrator(data_loader=calib_data(), cache="identity-calib.cache")
# We must enable int8 mode in addition to providing the calibrator.
build_engine = EngineFromNetwork(
NetworkFromOnnxPath("identity.onnx"), config=CreateConfig(int8=True, calibrator=calibrator)
)
# When we activate our runner, it will calibrate and build the engine. If we want to
# see the logging output from TensorRT, we can temporarily increase logging verbosity:
with G_LOGGER.verbosity(G_LOGGER.VERBOSE), TrtRunner(build_engine) as runner:
# Finally, we can test out our int8 TensorRT engine with some dummy input data:
inp_data = np.ones(shape=(1, 1, 2, 2), dtype=np.float32)
# NOTE: The runner owns the output buffers and is free to reuse them between `infer()` calls.
# Thus, if you want to store results from multiple inferences, you should use `copy.deepcopy()`.
outputs = runner.infer({"x": inp_data})
assert np.array_equal(outputs["y"], inp_data) # It's an identity model!
if __name__ == "__main__":
main()
| TensorRT-master | tools/Polygraphy/examples/api/04_int8_calibration_in_tensorrt/example.py |
#!/usr/bin/env python3
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
This script demonstrates how to use the extend() API covered in example 03
to construct a TensorRT network using the TensorRT Network API.
"""
import numpy as np
import tensorrt as trt
from polygraphy import func
from polygraphy.backend.trt import CreateNetwork, EngineFromNetwork, TrtRunner
INPUT_NAME = "input"
INPUT_SHAPE = (64, 64)
OUTPUT_NAME = "output"
# Just like in example 03, we can use `extend` to add our own functionality to existing lazy loaders.
# `CreateNetwork` will create an empty network, which we can then populate ourselves.
@func.extend(CreateNetwork())
def create_network(builder, network):
# This network will add 1 to the input tensor.
inp = network.add_input(name=INPUT_NAME, shape=INPUT_SHAPE, dtype=trt.float32)
ones = network.add_constant(shape=INPUT_SHAPE, weights=np.ones(shape=INPUT_SHAPE, dtype=np.float32)).get_output(0)
add = network.add_elementwise(inp, ones, op=trt.ElementWiseOperation.SUM).get_output(0)
add.name = OUTPUT_NAME
network.mark_output(add)
# Notice that we don't need to return anything - `extend()` takes care of that for us!
def main():
# After we've constructed the network, we can go back to using regular Polygraphy APIs.
#
# NOTE: Since we're using lazy loaders, we provide the `create_network` function as
# an argument - we do *not* call it ourselves.
build_engine = EngineFromNetwork(create_network)
with TrtRunner(build_engine) as runner:
feed_dict = {INPUT_NAME: np.random.random_sample(INPUT_SHAPE).astype(np.float32)}
# NOTE: The runner owns the output buffers and is free to reuse them between `infer()` calls.
# Thus, if you want to store results from multiple inferences, you should use `copy.deepcopy()`.
outputs = runner.infer(feed_dict)
assert np.array_equal(outputs[OUTPUT_NAME], (feed_dict[INPUT_NAME] + 1))
print("Inference succeeded!")
if __name__ == "__main__":
main()
| TensorRT-master | tools/Polygraphy/examples/api/05_using_tensorrt_network_api/example.py |
#!/usr/bin/env python3
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
This script loads the TensorRT engine built by `build_and_run.py` and runs inference.
"""
import numpy as np
from polygraphy.backend.common import BytesFromPath
from polygraphy.backend.trt import EngineFromBytes, TrtRunner
def main():
# Just as we did when building, we can compose multiple loaders together
# to achieve the behavior we want. Specifically, we want to load a serialized
# engine from a file, then deserialize it into a TensorRT engine.
load_engine = EngineFromBytes(BytesFromPath("identity.engine"))
# Inference remains virtually exactly the same as before:
with TrtRunner(load_engine) as runner:
inp_data = np.ones(shape=(1, 1, 2, 2), dtype=np.float32)
# NOTE: The runner owns the output buffers and is free to reuse them between `infer()` calls.
# Thus, if you want to store results from multiple inferences, you should use `copy.deepcopy()`.
outputs = runner.infer(feed_dict={"x": inp_data})
assert np.array_equal(outputs["y"], inp_data) # It's an identity model!
print("Inference succeeded!")
if __name__ == "__main__":
main()
| TensorRT-master | tools/Polygraphy/examples/api/00_inference_with_tensorrt/load_and_run.py |
#!/usr/bin/env python3
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
This script builds and runs a TensorRT engine with FP16 precision enabled
starting from an ONNX identity model.
"""
import numpy as np
from polygraphy.backend.trt import CreateConfig, EngineFromNetwork, NetworkFromOnnxPath, SaveEngine, TrtRunner
def main():
# We can compose multiple lazy loaders together to get the desired conversion.
# In this case, we want ONNX -> TensorRT Network -> TensorRT engine (w/ fp16).
#
# NOTE: `build_engine` is a *callable* that returns an engine, not the engine itself.
# To get the engine directly, you can use the immediately evaluated functional API.
# See examples/api/06_immediate_eval_api for details.
build_engine = EngineFromNetwork(
NetworkFromOnnxPath("identity.onnx"), config=CreateConfig(fp16=True)
) # Note that config is an optional argument.
# To reuse the engine elsewhere, we can serialize and save it to a file.
# The `SaveEngine` lazy loader will return the TensorRT engine when called,
# which allows us to chain it together with other loaders.
build_engine = SaveEngine(build_engine, path="identity.engine")
# Once our loader is ready, inference is simply a matter of constructing a runner,
# activating it with a context manager (i.e. `with TrtRunner(...)`) and calling `infer()`.
#
# NOTE: You can use the activate() function instead of a context manager, but you will need to make sure to
# deactivate() to avoid a memory leak. For that reason, a context manager is the safer option.
with TrtRunner(build_engine) as runner:
inp_data = np.ones(shape=(1, 1, 2, 2), dtype=np.float32)
# NOTE: The runner owns the output buffers and is free to reuse them between `infer()` calls.
# Thus, if you want to store results from multiple inferences, you should use `copy.deepcopy()`.
outputs = runner.infer(feed_dict={"x": inp_data})
assert np.array_equal(outputs["y"], inp_data) # It's an identity model!
print("Inference succeeded!")
if __name__ == "__main__":
main()
| TensorRT-master | tools/Polygraphy/examples/api/00_inference_with_tensorrt/build_and_run.py |
#!/usr/bin/env python3
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
This script uses Polygraphy's immediately evaluated functional APIs
to load the TensorRT engine built by `build_and_run.py` and run inference.
"""
import numpy as np
from polygraphy.backend.common import bytes_from_path
from polygraphy.backend.trt import TrtRunner, engine_from_bytes
def main():
engine = engine_from_bytes(bytes_from_path("identity.engine"))
# NOTE: In TensorRT 8.0 and newer, we do *not* need to use a context manager to free `engine`.
with engine, TrtRunner(engine) as runner:
inp_data = np.ones((1, 1, 2, 2), dtype=np.float32)
# NOTE: The runner owns the output buffers and is free to reuse them between `infer()` calls.
# Thus, if you want to store results from multiple inferences, you should use `copy.deepcopy()`.
outputs = runner.infer(feed_dict={"x": inp_data})
assert np.array_equal(outputs["output"], inp_data) # It's an identity model!
print("Inference succeeded!")
if __name__ == "__main__":
main()
| TensorRT-master | tools/Polygraphy/examples/api/06_immediate_eval_api/load_and_run.py |
#!/usr/bin/env python3
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
This script uses Polygraphy's immediately evaluated functional APIs
to load an ONNX model, convert it into a TensorRT network, add an identity
layer to the end of it, build an engine with FP16 mode enabled,
save the engine, and finally run inference.
"""
import numpy as np
from polygraphy.backend.trt import TrtRunner, create_config, engine_from_network, network_from_onnx_path, save_engine
def main():
# In Polygraphy, loaders and runners take ownership of objects if they are provided
# via the return values of callables. For example, we don't need to worry about object
# lifetimes when we use lazy loaders.
#
# Since we are immediately evaluating, we take ownership of objects, and are responsible for freeing them.
builder, network, parser = network_from_onnx_path("identity.onnx")
# Extend the network with an identity layer (purely for the sake of example).
# Note that unlike with lazy loaders, we don't need to do anything special to modify the network.
# If we were using lazy loaders, we would need to use `func.extend()` as described
# in example 03 and example 05.
prev_output = network.get_output(0)
network.unmark_output(prev_output)
output = network.add_identity(prev_output).get_output(0)
output.name = "output"
network.mark_output(output)
# Create a TensorRT IBuilderConfig so that we can build the engine with FP16 enabled.
config = create_config(builder, network, fp16=True)
# We can free everything we constructed above once we're done building the engine.
# NOTE: In TensorRT 8.0 and newer, we do *not* need to use a context manager here.
with builder, network, parser, config:
engine = engine_from_network((builder, network), config)
# To reuse the engine elsewhere, we can serialize it and save it to a file.
save_engine(engine, path="identity.engine")
# NOTE: In TensorRT 8.0 and newer, we do *not* need to use a context manager to free `engine`.
with engine, TrtRunner(engine) as runner:
inp_data = np.ones((1, 1, 2, 2), dtype=np.float32)
# NOTE: The runner owns the output buffers and is free to reuse them between `infer()` calls.
# Thus, if you want to store results from multiple inferences, you should use `copy.deepcopy()`.
outputs = runner.infer(feed_dict={"x": inp_data})
assert np.array_equal(outputs["output"], inp_data) # It's an identity model!
print("Inference succeeded!")
if __name__ == "__main__":
main()
| TensorRT-master | tools/Polygraphy/examples/api/06_immediate_eval_api/build_and_run.py |
#!/usr/bin/env python3
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
This script uses the Polygraphy Runner API to validate the outputs
of an identity model using a trivial dataset.
"""
import numpy as np
from polygraphy.backend.trt import EngineFromNetwork, NetworkFromOnnxPath, TrtRunner
# Pretend that this is a very large dataset.
REAL_DATASET = [
np.ones((1, 1, 2, 2), dtype=np.float32),
np.zeros((1, 1, 2, 2), dtype=np.float32),
np.ones((1, 1, 2, 2), dtype=np.float32),
np.zeros((1, 1, 2, 2), dtype=np.float32),
] # Definitely real data
# For an identity network, the golden output values are the same as the input values.
# Though such a network appears useless at first glance, it can be very useful in some cases (like here!).
EXPECTED_OUTPUTS = REAL_DATASET
def main():
build_engine = EngineFromNetwork(NetworkFromOnnxPath("identity.onnx"))
with TrtRunner(build_engine) as runner:
for (data, golden) in zip(REAL_DATASET, EXPECTED_OUTPUTS):
# NOTE: The runner owns the output buffers and is free to reuse them between `infer()` calls.
# Thus, if you want to store results from multiple inferences, you should use `copy.deepcopy()`.
outputs = runner.infer(feed_dict={"x": data})
assert np.array_equal(outputs["y"], golden)
print("Validation succeeded!")
if __name__ == "__main__":
main()
| TensorRT-master | tools/Polygraphy/examples/api/02_validating_on_a_dataset/example.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
INTERNAL_CORRECTNESS_CHECKS = bool(os.environ.get("POLYGRAPHY_INTERNAL_CORRECTNESS_CHECKS", "0") != "0")
"""
bool: Whether internal correctness checks are enabled.
This can be configured by setting the 'POLYGRAPHY_INTERNAL_CORRECTNESS_CHECKS' environment variable.
"""
AUTOINSTALL_DEPS = bool(os.environ.get("POLYGRAPHY_AUTOINSTALL_DEPS", "0") != "0")
"""
bool: Whether Polygraphy will automatically install required Python packages at runtime.
This can be configured by setting the 'POLYGRAPHY_AUTOINSTALL_DEPS' environment variable.
"""
INSTALL_CMD = os.environ.get("POLYGRAPHY_INSTALL_CMD", "{:} -m pip install".format(sys.executable)).split()
"""
List[str]: The command to use to automatically install dependencies. Only relevant when
AUTOINSTALL_DEPS is enabled. Defaults to ``["python", "-m", "pip", "install"]``.
This can be configured by setting the 'POLYGRAPHY_INSTALL_CMD' environment variable to a
string containing the command; for example: ``python3 -m pip install``.
"""
ARRAY_SWAP_THRESHOLD_MB = int(os.environ.get("POLYGRAPHY_ARRAY_SWAP_THRESHOLD_MB", "-1"))
"""
int: The threshold, in megabytes, above which Polygraphy will evict a NumPy array from memory and swap it to disk.
A negative value disables swapping and a value of 0 causes all arrays to be saved to disk.
Disabled by default.
This can be configured by setting the 'POLYGRAPHY_ARRAY_SWAP_THRESHOLD_MB' environment variable.
"""
| TensorRT-master | tools/Polygraphy/polygraphy/config.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# For legacy purposes
from polygraphy.config import AUTOINSTALL_DEPS, INTERNAL_CORRECTNESS_CHECKS
DEFAULT_SHAPE_VALUE = 1
DEFAULT_SEED = 1
TAB = " " * 4 # The one true tab
MARK_ALL = "mark-all"
"""
Special value for ModifyOutputs loaders indicating that all values should be marked as outputs
"""
LEGACY_TYPE_MARKER = "polygraphy_serialized_json_type"
TYPE_MARKER = "polygraphy_class"
| TensorRT-master | tools/Polygraphy/polygraphy/constants.py |
import polygraphy.config
__version__ = "0.33.0"
| TensorRT-master | tools/Polygraphy/polygraphy/__init__.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import contextlib
import copy
import queue
from multiprocessing import Process, Queue
from polygraphy import mod, util
from polygraphy.common import TensorMetadata
from polygraphy.comparator import util as comp_util
from polygraphy.comparator.compare import CompareFunc
from polygraphy.comparator.data_loader import DataLoader, DataLoaderCache
from polygraphy.comparator.struct import AccuracyResult, IterationResult, RunResults
from polygraphy.logger import G_LOGGER, LogMode
np = mod.lazy_import("numpy")
@mod.export()
class Comparator(object):
"""
Compares inference outputs.
"""
@staticmethod
def run(
runners,
data_loader=None,
warm_up=None,
use_subprocess=None,
subprocess_timeout=None,
subprocess_polling_interval=None,
save_inputs_path=None,
):
"""
Runs the supplied runners sequentially.
Args:
runners (List[BaseRunner]):
A list of runners to run.
data_loader (Generator -> OrderedDict[str, numpy.ndarray]):
A generator or iterable that yields a dictionary that maps input names to input numpy buffers.
In the simplest case, this can be a `List[Dict[str, numpy.ndarray]]` .
In case you don't know details about the inputs ahead of time, you can access the
`input_metadata` property in your data loader, which will be set to an `TensorMetadata`
instance by this function.
Note that this does not work for generators or lists.
The number of iterations run by this function is controlled by the number of items supplied
by the data loader.
Defaults to an instance of `DataLoader`.
warm_up (int):
The number of warm up runs to perform for each runner before timing.
Defaults to 0.
use_subprocess (bool):
Whether each runner should be run in a subprocess. This allows each runner to have exclusive
access to the GPU. When using a subprocess, runners and loaders will never be modified.
subprocess_timeout (int):
The timeout before a subprocess is killed automatically. This is useful for handling processes
that never terminate. A value of None disables the timeout. Defaults to None.
subprocess_polling_interval (int):
The polling interval, in seconds, for checking whether a subprocess has completed or crashed.
In rare cases, omitting this parameter when subprocesses are enabled may cause this function
to hang indefinitely if the subprocess crashes.
A value of 0 disables polling. Defaults to 30 seconds.
save_inputs_path (str):
[EXPERIMENTAL] Path at which to save inputs used during inference. This will include all inputs generated by
the provided data_loader, and will be saved as a JSON List[Dict[str, numpy.ndarray]].
Returns:
RunResults:
A mapping of runner names to the results of their inference.
The ordering of `runners` is preserved in this mapping.
"""
warm_up = util.default(warm_up, 0)
data_loader = util.default(data_loader, DataLoader())
use_subprocess = util.default(use_subprocess, False)
subprocess_polling_interval = util.default(subprocess_polling_interval, 30)
loader_cache = DataLoaderCache(data_loader, save_inputs_path=save_inputs_path)
def execute_runner(runner, loader_cache):
with runner as active_runner:
# DataLoaderCache will ensure that the feed_dict does not contain any extra entries
# based on the provided input_metadata.
loader_cache.set_input_metadata(active_runner.get_input_metadata())
if warm_up:
G_LOGGER.start("{:35} | Running {:} warm-up run(s)".format(active_runner.name, warm_up))
try:
feed_dict = loader_cache[0]
except IndexError:
G_LOGGER.warning(
"{:} warm-up run(s) were requested, but data loader did not supply any data. "
"Skipping warm-up run(s)".format(warm_up)
)
else:
G_LOGGER.ultra_verbose("Warm-up Input Buffers:\n{:}".format(util.indent_block(feed_dict)))
# First do a few warm-up runs, and don't time them.
for _ in range(warm_up):
active_runner.infer(feed_dict=feed_dict)
G_LOGGER.finish("{:35} | Finished {:} warm-up run(s)".format(active_runner.name, warm_up))
# Then, actual iterations.
index = 0
iteration_results = []
total_runtime = 0
for index, feed_dict in enumerate(loader_cache):
G_LOGGER.info(
"{:35}\n---- Inference Input(s) ----\n{:}".format(
active_runner.name, TensorMetadata().from_feed_dict(feed_dict)
),
mode=LogMode.ONCE,
)
G_LOGGER.extra_verbose(
lambda: "{:35} | Feeding inputs:\n{:}".format(active_runner.name, util.indent_block(feed_dict))
)
outputs = active_runner.infer(feed_dict=feed_dict)
runtime = active_runner.last_inference_time()
total_runtime += runtime
# Without a deep copy here, outputs will always reference the output of the last run
iteration_results.append(
IterationResult(outputs=copy.deepcopy(outputs), runtime=runtime, runner_name=active_runner.name)
)
G_LOGGER.info(
"{:35}\n---- Inference Output(s) ----\n{:}".format(
active_runner.name, TensorMetadata().from_feed_dict(outputs)
),
mode=LogMode.ONCE,
)
G_LOGGER.extra_verbose(
lambda: "{:35} | Inference Time: {:.3f} ms | Received outputs:\n{:}".format(
active_runner.name, runtime * 1000.0, util.indent_block(outputs)
)
)
total_runtime_ms = total_runtime * 1000.0
G_LOGGER.finish(
"{:35} | Completed {:} iteration(s) in {:.4g} ms | Average inference time: {:.4g} ms.".format(
active_runner.name, index + 1, total_runtime_ms, total_runtime_ms / float(index + 1)
)
)
return iteration_results
# Wraps execute_runner to use a queue.
def execute_runner_with_queue(runner_queue, runner, loader_cache):
iteration_results = None
try:
iteration_results = execute_runner(runner, loader_cache)
except:
# Cannot necessarily send the exception back over the queue.
G_LOGGER.backrace()
util.try_send_on_queue(runner_queue, iteration_results)
# After finishing, send the updated loader_cache back.
util.try_send_on_queue(runner_queue, loader_cache)
# Do all inferences in one loop, then comparisons at a later stage.
# We run each runner in a separate process so that we can provide exclusive GPU access for each runner.
run_results = RunResults()
if not runners:
G_LOGGER.warning(
"No runners were provided to Comparator.run(). Inference will not be run, and run results will be empty."
)
for runner in runners:
G_LOGGER.start("{:35} | Activating and starting inference".format(runner.name))
if use_subprocess:
runner_queue = Queue()
process = Process(target=execute_runner_with_queue, args=(runner_queue, runner, loader_cache))
process.start()
# If a subprocess hangs in a certain way, then process.join could block forever. Hence,
# we need to keep polling the process to make sure it really is alive.
iteration_results = None
while process.is_alive() and iteration_results is None:
try:
iteration_results = util.try_receive_on_queue(
runner_queue, timeout=subprocess_polling_interval / 2
)
# Receive updated loader cache, or fall back if it could not be sent.
loader_cache = util.try_receive_on_queue(runner_queue, timeout=subprocess_polling_interval / 2)
except queue.Empty:
G_LOGGER.extra_verbose("Polled subprocess - still running")
try:
assert iteration_results is not None
run_results.append((runner.name, iteration_results))
process.join(subprocess_timeout)
except:
G_LOGGER.critical(
"{:35} | Terminated prematurely. Check the exception logged above. "
"If there is no exception logged above, make sure not to use the --use-subprocess "
"flag or set use_subprocess=False in Comparator.run().".format(runner.name)
)
finally:
process.terminate()
if loader_cache is None:
G_LOGGER.critical(
"Could not send data loader cache to runner subprocess. Please try disabling subprocesses "
"by removing the --use-subprocess flag, or setting use_subprocess=False in Comparator.run()"
)
else:
run_results.append((runner.name, execute_runner(runner, loader_cache)))
G_LOGGER.verbose("Successfully ran: {:}".format([r.name for r in runners]))
return run_results
@staticmethod
def postprocess(run_results, postprocess_func):
"""
Applies post processing to all the outputs in the provided run results.
This is a convenience function to avoid the need for manual iteration over the run_results dictionary.
Args:
run_results (RunResults): The result of Comparator.run().
postprocess_func (Callable(IterationResult) -> IterationResult):
The function to apply to each ``IterationResult``.
Returns:
RunResults: The updated run results.
"""
for _, iteration_results in run_results:
for index, iter_res in enumerate(iteration_results):
iteration_results[index] = postprocess_func(iter_res)
return run_results
@staticmethod
def default_comparisons(run_results):
# Sets up default comparisons - which is to compare each runner to the subsequent one.
return [(i, i + 1) for i in range(len(run_results) - 1)]
@staticmethod
def compare_accuracy(run_results, fail_fast=False, comparisons=None, compare_func=None):
"""
Args:
run_results (RunResults): The result of Comparator.run()
fail_fast (bool): Whether to exit after the first failure
comparisons (List[Tuple[int, int]]):
Comparisons to perform, specified by runner indexes. For example, [(0, 1), (1, 2)]
would compare the first runner with the second, and the second with the third.
By default, this compares each result to the subsequent one.
compare_func (Callable(IterationResult, IterationResult) -> OrderedDict[str, bool]):
A function that takes in two IterationResults, and returns a dictionary that maps output
names to a boolean (or anything convertible to a boolean) indicating whether outputs matched.
The order of arguments to this function is guaranteed to be the same as the ordering of the
tuples contained in `comparisons`.
Returns:
AccuracyResult:
A summary of the results of the comparisons. The order of the keys (i.e. runner pairs) is
guaranteed to be the same as the order of `comparisons`. For more details, see the AccuracyResult
docstring (e.g. help(AccuracyResult)).
"""
def find_mismatched(match_dict):
return [name for name, matched in match_dict.items() if not bool(matched)]
compare_func = util.default(compare_func, CompareFunc.simple())
comparisons = util.default(comparisons, Comparator.default_comparisons(run_results))
accuracy_result = AccuracyResult()
for runner0_index, runner1_index in comparisons:
(runner0_name, results0), (runner1_name, results1) = run_results[runner0_index], run_results[runner1_index]
G_LOGGER.start("Accuracy Comparison | {:} vs. {:}".format(runner0_name, runner1_name))
with G_LOGGER.indent():
runner_pair = (runner0_name, runner1_name)
accuracy_result[runner_pair] = []
num_iters = min(len(results0), len(results1))
for iteration, (result0, result1) in enumerate(zip(results0, results1)):
if num_iters > 1:
G_LOGGER.info("Iteration: {:}".format(iteration))
with contextlib.ExitStack() as stack:
if num_iters > 1:
stack.enter_context(G_LOGGER.indent())
iteration_match_dict = compare_func(result0, result1)
accuracy_result[runner_pair].append(iteration_match_dict)
mismatched_outputs = find_mismatched(iteration_match_dict)
if fail_fast and mismatched_outputs:
return accuracy_result
G_LOGGER.extra_verbose(
"Finished comparing {:} with {:}".format(
runner0_name,
runner1_name,
)
)
passed, _, total = accuracy_result.stats(runner_pair)
pass_rate = accuracy_result.percentage(runner_pair) * 100.0
if num_iters > 1 or len(comparisons) > 1:
msg = "Accuracy Summary | {:} vs. {:} | Passed: {:}/{:} iterations | Pass Rate: {:}%".format(
runner0_name, runner1_name, passed, total, pass_rate
)
if passed == total:
G_LOGGER.finish(msg)
else:
G_LOGGER.error(msg)
return accuracy_result
@staticmethod
def validate(run_results, check_inf=None, check_nan=None, fail_fast=None):
"""
Checks output validity.
Args:
run_results (Dict[str, List[IterationResult]]): The result of Comparator.run().
check_inf (bool): Whether to fail on Infs. Defaults to False.
check_nan (bool): Whether to fail on NaNs. Defaults to True.
fail_fast (bool): Whether to fail after the first invalid value. Defaults to False.
Returns:
bool: True if all outputs were valid, False otherwise.
"""
check_inf = util.default(check_inf, False)
check_nan = util.default(check_nan, True)
fail_fast = util.default(fail_fast, False)
def is_finite(output):
non_finite = np.logical_not(np.isfinite(output))
if np.any(non_finite):
G_LOGGER.error("Inf Detected | One or more non-finite values were encountered in this output")
G_LOGGER.info(
"Note: Use -vv or set logging verbosity to EXTRA_VERBOSE to display non-finite values",
mode=LogMode.ONCE,
)
G_LOGGER.extra_verbose("Note: non-finite values at:\n{:}".format(non_finite))
G_LOGGER.extra_verbose("Note: non-finite values:\n{:}".format(output[non_finite]))
return False
return True
def is_not_nan(output):
nans = np.isnan(output)
if np.any(nans):
G_LOGGER.error("NaN Detected | One or more NaNs were encountered in this output")
G_LOGGER.info(
"Note: Use -vv or set logging verbosity to EXTRA_VERBOSE to display locations of NaNs",
mode=LogMode.ONCE,
)
G_LOGGER.extra_verbose("Note: NaNs at:\n{:}".format(nans))
return False
return True
def validate_output(runner_name, output_name, output):
G_LOGGER.start(
"{:35} | Validating output: {:} (check_inf={:}, check_nan={:})".format(
runner_name, output_name, check_inf, check_nan
)
)
with G_LOGGER.indent():
comp_util.log_output_stats(output)
output_valid = True
if check_nan:
output_valid &= is_not_nan(output)
if check_inf:
output_valid &= is_finite(output)
if output_valid:
G_LOGGER.finish("PASSED | Output: {:} is valid".format(output_name))
else:
G_LOGGER.error("FAILED | Errors detected in output: {:}".format(output_name))
return output_valid
all_valid = True
G_LOGGER.start("Output Validation | Runners: {:}".format(list(run_results.keys())))
with G_LOGGER.indent():
for runner_name, results in run_results:
for result in results:
for output_name, output in result.items():
all_valid &= validate_output(runner_name, output_name, output)
if fail_fast and not all_valid:
return False
if all_valid:
G_LOGGER.finish("PASSED | Output Validation")
else:
G_LOGGER.error("FAILED | Output Validation")
return all_valid
| TensorRT-master | tools/Polygraphy/polygraphy/comparator/comparator.py |
import functools
from polygraphy import mod, util, config
from polygraphy.logger import G_LOGGER
np = mod.lazy_import("numpy")
def zero_on_empty(func):
@functools.wraps(func)
def wrapped(buffer):
if util.is_empty_shape(buffer.shape):
return 0
return func(buffer)
return wrapped
@zero_on_empty
def compute_max(buffer):
return np.amax(buffer)
# Returns index of max value
@zero_on_empty
def compute_argmax(buffer):
return np.unravel_index(np.argmax(buffer), buffer.shape)
@zero_on_empty
def compute_min(buffer):
return np.amin(buffer)
# Returns index of min value
@zero_on_empty
def compute_argmin(buffer):
return np.unravel_index(np.argmin(buffer), buffer.shape)
@zero_on_empty
def compute_mean(buffer):
return np.mean(buffer)
@zero_on_empty
def compute_stddev(buffer):
return np.std(buffer)
@zero_on_empty
def compute_variance(buffer):
return np.var(buffer)
@zero_on_empty
def compute_median(buffer):
return np.median(buffer)
@zero_on_empty
def compute_average_magnitude(buffer):
return np.mean(np.abs(buffer))
def str_histogram(output, hist_range=None):
if np.issubdtype(output.dtype, np.bool_):
return ""
try:
try:
hist, bin_edges = np.histogram(output, range=hist_range)
except ValueError as err:
G_LOGGER.verbose("Could not generate histogram. Note: Error was: {:}".format(err))
return ""
max_num_elems = compute_max(hist)
if not max_num_elems: # Empty tensor
return
bin_edges = ["{:.3g}".format(bin) for bin in bin_edges]
max_start_bin_width = max(len(bin) for bin in bin_edges)
max_end_bin_width = max(len(bin) for bin in bin_edges[1:])
MAX_WIDTH = 40
ret = "---- Histogram ----\n"
ret += "{:{width}}| Num Elems | Visualization\n".format(
"Bin Range", width=max_start_bin_width + max_end_bin_width + 5
)
for num, bin_start, bin_end in zip(hist, bin_edges, bin_edges[1:]):
bar = "#" * int(MAX_WIDTH * float(num) / float(max_num_elems))
ret += "({:<{max_start_bin_width}}, {:<{max_end_bin_width}}) | {:10} | {:}\n".format(
bin_start,
bin_end,
num,
bar,
max_start_bin_width=max_start_bin_width,
max_end_bin_width=max_end_bin_width,
)
return ret
except Exception as err:
G_LOGGER.verbose("Could not generate histogram.\nNote: Error was: {:}".format(err))
if config.INTERNAL_CORRECTNESS_CHECKS:
raise
return ""
def str_output_stats(output, runner_name=None):
ret = ""
if runner_name:
ret += "{:} | Stats: ".format(runner_name)
try:
with np.testing.suppress_warnings() as sup:
sup.filter(RuntimeWarning)
ret += "mean={:.5g}, std-dev={:.5g}, var={:.5g}, median={:.5g}, min={:.5g} at {:}, max={:.5g} at {:}, avg-magnitude={:.5g}\n".format(
compute_mean(output),
compute_stddev(output),
compute_variance(output),
compute_median(output),
compute_min(output),
compute_argmin(output),
compute_max(output),
compute_argmax(output),
compute_average_magnitude(output),
)
except Exception as err:
G_LOGGER.verbose("Could not generate statistics.\nNote: Error was: {:}".format(err))
ret += "<Error while computing statistics>"
if config.INTERNAL_CORRECTNESS_CHECKS:
raise
return ret
def log_output_stats(output, info_hist=False, runner_name=None, hist_range=None):
ret = str_output_stats(output, runner_name)
G_LOGGER.info(ret)
with G_LOGGER.indent():
# Show histogram on failures.
G_LOGGER.log(
lambda: str_histogram(output, hist_range), severity=G_LOGGER.INFO if info_hist else G_LOGGER.VERBOSE
)
| TensorRT-master | tools/Polygraphy/polygraphy/comparator/util.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import contextlib
from collections import OrderedDict
from polygraphy import constants, func, mod, util
from polygraphy.json import save_json
from polygraphy.logger import G_LOGGER, LogMode
np = mod.lazy_import("numpy")
@mod.export()
class DataLoader(object):
"""
Generates synthetic input data.
"""
def __init__(
self, seed=None, iterations=None, input_metadata=None, int_range=None, float_range=None, val_range=None
):
"""
Args:
seed (int):
The seed to use when generating random inputs.
Defaults to ``util.constants.DEFAULT_SEED``.
iterations (int):
The number of iterations for which to supply data.
Defaults to 1.
input_metadata (TensorMetadata):
A mapping of input names to their corresponding shapes and data types.
This will be used to determine what shapes to supply for inputs with dynamic shape, as
well as to set the data type of the generated inputs.
If either dtype or shape are None, then the value will be automatically determined.
For input shape tensors, i.e. inputs whose *value* describes a shape in the model, the
provided shape will be used to populate the values of the inputs, rather than to determine
their shape.
val_range (Union[Tuple[number], Dict[str, Tuple[number]]]):
A tuple containing exactly 2 numbers, indicating the minimum and maximum values (inclusive)
the data loader should generate.
If either value in the tuple is None, the default will be used for that value.
If None is provided instead of a tuple, then the default values will be used for both the
minimum and maximum.
This can be specified on a per-input basis using a dictionary. In that case,
use an empty string ("") as the key to specify default range for inputs not explicitly listed.
Defaults to (0.0, 1.0).
int_range (Tuple[int]):
[DEPRECATED - Use val_range instead]
A tuple containing exactly 2 integers, indicating the minimum and maximum integer values (inclusive)
the data loader should generate. If either value in the tuple is None, the default will be used
for that value.
If None is provided instead of a tuple, then the default values will be used for both the
minimum and maximum.
float_range (Tuple[float]):
[DEPRECATED - Use val_range instead]
A tuple containing exactly 2 floats, indicating the minimum and maximum float values (inclusive)
the data loader should generate. If either value in the tuple is None, the default will be used
for that value.
If None is provided instead of a tuple, then the default values will be used for both the
minimum and maximum.
"""
def default_tuple(tup, default):
if tup is None or (not isinstance(tup, tuple) and not isinstance(tup, list)):
return default
new_tup = []
for elem, default_elem in zip(tup, default):
new_tup.append(util.default(elem, default_elem))
return tuple(new_tup)
self.seed = util.default(seed, constants.DEFAULT_SEED)
self.iterations = util.default(iterations, 1)
self.user_input_metadata = util.default(input_metadata, {})
self.int_range_set = int_range is not None
if self.int_range_set:
mod.warn_deprecated("The int_range parameter in DataLoader", "val_range", remove_in="0.35.0")
self.int_range = default_tuple(int_range, (1, 25))
self.float_range_set = float_range is not None
if self.float_range_set:
mod.warn_deprecated("The float_range parameter in DataLoader", "val_range", remove_in="0.35.0")
self.float_range = default_tuple(float_range, (-1.0, 1.0))
self.input_metadata = None
self.default_val_range = default_tuple(val_range, (0.0, 1.0))
self.val_range = util.default(val_range, self.default_val_range)
if self.user_input_metadata:
G_LOGGER.info(
"Will generate inference input data according to provided TensorMetadata: {}".format(
self.user_input_metadata
)
)
def __repr__(self):
return util.make_repr(
"DataLoader",
seed=self.seed,
iterations=self.iterations,
input_metadata=self.user_input_metadata or None,
int_range=self.int_range,
float_range=self.float_range,
val_range=self.val_range,
)[0]
def _get_range(self, name, cast_type):
if cast_type == int and self.int_range_set:
return self.int_range
elif cast_type == float and self.float_range_set:
return self.float_range
tup = util.value_or_from_dict(self.val_range, name, self.default_val_range)
return tuple(cast_type(val) for val in tup)
def __getitem__(self, index):
"""
Generates random input data.
May update the DataLoader's `input_metadata` attribute.
Args:
index (int):
Since this class behaves like an iterable, it takes an index parameter.
Generated data is guaranteed to be the same for the same index.
Returns:
OrderedDict[str, numpy.ndarray]: A mapping of input names to input numpy buffers.
"""
if index >= self.iterations:
raise IndexError()
G_LOGGER.verbose("Generating data using numpy seed: {:}".format(self.seed + index))
rng = np.random.RandomState(self.seed + index)
def get_static_shape(name, shape):
static_shape = shape
if util.is_shape_dynamic(shape):
static_shape = util.override_dynamic_shape(shape)
if static_shape != shape:
if not util.is_valid_shape_override(static_shape, shape):
G_LOGGER.critical(
"Input tensor: {:} | Cannot override original shape: {:} to {:}".format(
name, shape, static_shape
)
)
G_LOGGER.warning(
"Input tensor: {:} | Will generate data of shape: {:}.\n"
"If this is incorrect, please set input_metadata "
"or provide a custom data loader.".format(name, static_shape),
mode=LogMode.ONCE,
)
return static_shape
# Whether the user provided the values for a shape tensor input,
# rather than the shape of the input.
# If the shape is 1D, and has a value equal to the rank of the provided default shape, it is
# likely to be a shape tensor, and so its value, not shape, should be overriden.
def is_shape_tensor(name, dtype):
if name not in self.input_metadata or name not in self.user_input_metadata:
return False
_, shape = self.input_metadata[name]
is_shape = np.issubdtype(dtype, np.integer) and (not util.is_shape_dynamic(shape)) and (len(shape) == 1)
user_shape = self.user_input_metadata[name].shape
is_shape &= len(user_shape) == shape[0]
is_shape &= not util.is_shape_dynamic(user_shape) # Shape of shape cannot be dynamic.
return is_shape
def generate_buffer(name, dtype, shape):
if is_shape_tensor(name, dtype):
buffer = np.array(shape, dtype=dtype)
G_LOGGER.info(
"Assuming {:} is a shape tensor. Setting input values to: {:}. If this is not correct, "
"please set it correctly in 'input_metadata' or by providing --input-shapes".format(name, buffer),
mode=LogMode.ONCE,
)
elif np.issubdtype(dtype, np.integer) or np.issubdtype(dtype, np.bool_):
imin, imax = self._get_range(name, cast_type=int if np.issubdtype(dtype, np.integer) else bool)
G_LOGGER.verbose(
"Input tensor: {:} | Generating input data in range: [{:}, {:}]".format(name, imin, imax),
mode=LogMode.ONCE,
)
# high is 1 greater than the max int drawn.
buffer = rng.randint(low=imin, high=imax + 1, size=shape, dtype=dtype)
else:
fmin, fmax = self._get_range(name, cast_type=float)
G_LOGGER.verbose(
"Input tensor: {:} | Generating input data in range: [{:}, {:}]".format(name, fmin, fmax),
mode=LogMode.ONCE,
)
buffer = (rng.random_sample(size=shape) * (fmax - fmin) + fmin).astype(dtype)
buffer = np.array(buffer) # To handle scalars, since the above functions return a float if shape is ().
return buffer
if self.input_metadata is None and self.user_input_metadata is not None:
self.input_metadata = self.user_input_metadata
buffers = OrderedDict()
for name, (dtype, shape) in self.input_metadata.items():
if name in self.user_input_metadata:
user_dtype, user_shape = self.user_input_metadata[name]
dtype = util.default(user_dtype, dtype)
is_valid_shape_override = user_shape is not None and util.is_valid_shape_override(user_shape, shape)
if util.is_shape_dynamic(user_shape):
G_LOGGER.warning(
"Input tensor: {:} | Provided input shape: {:} is dynamic.\n"
"Dynamic shapes cannot be used to generate inference data. "
"Will use default shape instead.\n"
"To avoid this, please provide a fixed shape to the data loader. ".format(name, user_shape)
)
elif not is_valid_shape_override and not is_shape_tensor(name, dtype):
G_LOGGER.warning(
"Input tensor: {:} | Cannot use provided custom shape: {:} "
"to override: {:}. Will use default shape instead.".format(name, user_shape, shape),
mode=LogMode.ONCE,
)
else:
shape = util.default(user_shape, shape)
static_shape = get_static_shape(name, shape)
buffers[name] = generate_buffer(name, dtype, shape=static_shape)
# Warn about unused metadata
for name in self.user_input_metadata.keys():
if name not in self.input_metadata:
msg = "Input tensor: {:} | Metadata was provided, but the input does not exist in one or more runners.".format(
name
)
close_match = util.find_in_dict(name, self.input_metadata)
if close_match:
msg += "\nMaybe you meant to set: {:}".format(close_match)
G_LOGGER.warning(msg)
# Warn about unused val_range
if not isinstance(self.val_range, tuple):
util.check_dict_contains(
self.val_range, list(self.input_metadata.keys()) + [""], check_missing=False, dict_name="val_range"
)
return buffers
# Caches data loaded by a DataLoader for use across multiple runners.
class DataLoaderCache(object):
def __init__(self, data_loader, save_inputs_path=None):
self.data_loader = data_loader
self.cache = [] # List[OrderedDict[str, numpy.ndarray]]
self.save_inputs_path = save_inputs_path
@func.constantmethod
def __getitem__(self, iteration):
"""
Load the specified iteration from the cache if present, or load it from the data loader.
Args:
iteration (int): The iteration whose data to retrieve.
"""
if iteration >= len(self.cache):
raise IndexError()
# Attempts to match existing input buffers to the requested input_metadata
def coerce_cached_input(index, name, dtype, shape):
cached_feed_dict = self.cache[iteration]
cached_name = util.find_in_dict(name, cached_feed_dict, index)
util.check(cached_name is not None)
if cached_name != name:
G_LOGGER.warning(
"Input tensor: {:} | Buffer name ({:}) does not match expected input name ({:}).".format(
name, cached_name, name
)
)
buffer = cached_feed_dict[cached_name]
if dtype != buffer.dtype:
G_LOGGER.warning(
"Input tensor: {:} | Buffer dtype ({:}) does not match expected input dtype ({:}), attempting to cast. ".format(
name, buffer.dtype, np.dtype(dtype).name
)
)
type_info = None
if np.issubdtype(dtype, np.integer):
type_info = np.iinfo(np.dtype(dtype))
elif np.issubdtype(dtype, np.floating):
type_info = np.finfo(np.dtype(dtype))
if type_info is not None and np.any((buffer < type_info.min) | (buffer > type_info.max)):
G_LOGGER.warning(
"Some values in this input are out of range of {:}. Unexpected behavior may ensue!".format(
dtype
)
)
buffer = buffer.astype(dtype)
if not util.is_valid_shape_override(buffer.shape, shape):
G_LOGGER.warning(
"Input tensor: {:} | Buffer shape ({:}) does not match expected input shape ({:}), attempting to transpose/reshape. ".format(
name, buffer.shape, shape
)
)
buffer = util.try_match_shape(buffer, shape)
util.check(buffer.dtype == dtype and util.is_valid_shape_override(buffer.shape, shape))
return buffer
feed_dict = OrderedDict()
# Reload from data loader if needed
data_loader_feed_dict = None
for index, (name, (dtype, shape)) in enumerate(self.input_metadata.items()):
try:
buffer = coerce_cached_input(index, name, dtype, shape)
except AssertionError:
G_LOGGER.warning(
"Could not use buffer previously cached from data loader for input: {:}. Attempting to reload "
"inputs from the data loader.\n"
"Note that this will only work if the data loader supports random access.\n"
"Please refer to warnings above for details on why the previously generated input buffer didn't work. ".format(
name
)
)
try:
if data_loader_feed_dict is None:
data_loader_feed_dict = self.data_loader[iteration]
buffer = data_loader_feed_dict[name]
except:
G_LOGGER.critical(
"Could not reload inputs from data loader. Are the runners running the same model? "
"If not, please rewrite the data loader to support random access."
)
feed_dict[name] = buffer
return feed_dict
def set_input_metadata(self, input_metadata):
"""
Set the input metadata for the data loader.
Args:
input_metadata (TensorMetadata):
Input Metadata, including shape and type information. The cache may attempt to transform inputs to
match the specified input_metadata when data already in the cache does not exactly match.
"""
self.input_metadata = input_metadata
with contextlib.suppress(AttributeError):
self.data_loader.input_metadata = input_metadata
if not self.cache:
G_LOGGER.verbose("Loading inputs from data loader")
self.cache = list(self.data_loader)
if not self.cache:
G_LOGGER.warning("Data loader did not yield any input data.")
# Only save inputs the first time the cache is generated
if self.save_inputs_path is not None:
save_json(self.cache, self.save_inputs_path, "inference input data")
| TensorRT-master | tools/Polygraphy/polygraphy/comparator/data_loader.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from polygraphy import mod, util
np = mod.lazy_import("numpy")
@mod.export()
class PostprocessFunc(object):
"""
Provides functions that can apply post-processing to `IterationResult` s.
"""
@staticmethod
# This function returns a top_k function that can be used as a postprocess_func.
def topk_func(k=10, axis=-1):
"""
Creates a function that applies a Top-K operation to a IterationResult.
Top-K will return the indices of the k largest values in the array.
Args:
k (Union[int, Dict[str, int]]):
The number of indices to keep.
If this exceeds the axis length, it will be clamped.
This can be specified on a per-output basis by provided a dictionary. In that case,
use an empty string ("") as the key to specify default top-k value for outputs not explicitly listed.
Defaults to 10.
axis (int):
The axis along which to apply the topk.
Defaults to -1.
Returns:
Callable(IterationResult) -> IterationResult: The top-k function.
"""
# Top-K implementation.
def topk(iter_result):
for name, output in iter_result.items():
k_val = util.value_or_from_dict(k, name)
if k_val:
indices = np.argsort(-output, axis=axis, kind="stable")
axis_len = indices.shape[axis]
iter_result[name] = np.take(indices, np.arange(0, min(k_val, axis_len)), axis=axis)
return iter_result
return topk
| TensorRT-master | tools/Polygraphy/polygraphy/comparator/postprocess.py |
from polygraphy.comparator.comparator import *
from polygraphy.comparator.compare import *
from polygraphy.comparator.data_loader import *
from polygraphy.comparator.postprocess import *
from polygraphy.comparator.struct import *
| TensorRT-master | tools/Polygraphy/polygraphy/comparator/__init__.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numbers
from collections import OrderedDict
from polygraphy import mod, util
from polygraphy.comparator import util as comp_util
from polygraphy.logger import G_LOGGER, LogMode
np = mod.lazy_import("numpy")
@mod.export()
class OutputCompareResult(object):
"""
Represents the result of comparing a single output of a single iteration
between two runners.
"""
def __init__(self, passed, max_absdiff, max_reldiff, mean_absdiff, mean_reldiff, median_absdiff, median_reldiff):
"""
Records the required tolerances and other statistics gathered during comparison.
Args:
passed (bool):
Whether the error was within acceptable limits.
max_absdiff (float):
The minimum required absolute tolerance to consider the outputs equivalent.
max_reldiff (float):
The minimum required relative tolerance to consider the outputs equivalent.
mean_absdiff (float):
The mean absolute error between the outputs.
mean_reldiff (float):
The mean relative error between the outputs.
median_absdiff (float):
The median absolute error between the outputs.
median_reldiff (float):
The median relative error between the outputs.
"""
self.passed = passed
self.max_absdiff = max_absdiff
self.max_reldiff = max_reldiff
self.mean_absdiff = mean_absdiff
self.mean_reldiff = mean_reldiff
self.median_absdiff = median_absdiff
self.median_reldiff = median_reldiff
def __bool__(self):
"""
Whether the output matched.
Returns:
bool
"""
return self.passed
def __str__(self):
return "(atol={:}, rtol={:})".format(self.max_absdiff, self.max_reldiff)
def check_outputs_match(
out0, out0_name, out1, out1_name, per_out_rtol, per_out_atol, per_out_err_stat, runner0_name, runner1_name
):
"""
Checks whether two outputs matched.
Args:
out0 (np.array): The first output.
out0_name (str): The name of the first output.
out1 (np.array): The second output.
out1_name (str): The name of the second output.
per_out_rtol (float): The relative tolerance to use for comparison.
per_out_atol (float): The absolute tolerance to use for comparison.
per_out_err_stat (str): The error statistic to check. See the docstring of ``simple`` for details.
runner0_name (str): The name of the runner that generated the first output.
runner1_name (str): The name of the runner that generated the second output.
Returns:
OutputCompareResult: Details on whether the outputs matched.
"""
VALID_CHECK_ERROR_STATS = ["max", "mean", "median", "elemwise"]
if per_out_err_stat not in VALID_CHECK_ERROR_STATS:
G_LOGGER.critical(
"Invalid choice for check_error_stat: {:}.\n"
"Note: Valid choices are: {:}".format(per_out_err_stat, VALID_CHECK_ERROR_STATS)
)
G_LOGGER.super_verbose(
"{:35} | Output: {:} (dtype={:}, shape={:}):\n{:}".format(
runner0_name, out0_name, out0.dtype, out0.shape, util.indent_block(out0)
)
)
G_LOGGER.super_verbose(
"{:35} | Output: {:} (dtype={:}, shape={:}):\n{:}".format(
runner1_name, out1_name, out1.dtype, out1.shape, util.indent_block(out1)
)
)
# Check difference vs. tolerances
if np.issubdtype(out0.dtype, np.bool_) and np.issubdtype(out1.dtype, np.bool_):
absdiff = np.logical_xor(out0, out1)
else:
absdiff = np.abs(out0 - out1)
absout1 = np.abs(out1)
with np.testing.suppress_warnings() as sup:
sup.filter(RuntimeWarning)
reldiff = absdiff / absout1
max_reldiff = comp_util.compute_max(reldiff)
mean_reldiff = comp_util.compute_mean(reldiff)
median_reldiff = comp_util.compute_median(reldiff)
max_absdiff = comp_util.compute_max(absdiff)
mean_absdiff = comp_util.compute_mean(absdiff)
median_absdiff = comp_util.compute_median(absdiff)
max_elemwiseabs = "Unknown"
max_elemwiserel = "Unknown"
if per_out_err_stat == "mean":
failed = mean_absdiff > per_out_atol and (np.isnan(mean_reldiff) or mean_reldiff > per_out_rtol)
elif per_out_err_stat == "median":
failed = median_absdiff > per_out_atol and (np.isnan(median_reldiff) or median_reldiff > per_out_rtol)
elif per_out_err_stat == "max":
failed = max_absdiff > per_out_atol and (np.isnan(max_reldiff) or max_reldiff > per_out_rtol)
else:
assert per_out_err_stat == "elemwise", "This branch should be unreachable unless per_out_err_stat is 'elemwise'"
with np.testing.suppress_warnings() as sup:
sup.filter(RuntimeWarning)
mismatches = (absdiff > per_out_atol) & (reldiff > per_out_rtol)
failed = np.any(mismatches)
try:
with np.testing.suppress_warnings() as sup:
sup.filter(RuntimeWarning)
# Special because we need to account for tolerances too.
max_elemwiseabs = comp_util.compute_max(absdiff[mismatches])
max_elemwiserel = comp_util.compute_max(reldiff[mismatches])
with G_LOGGER.indent():
G_LOGGER.super_verbose("Mismatched indices:\n{:}".format(np.argwhere(mismatches)))
G_LOGGER.extra_verbose("{:35} | Mismatched values:\n{:}".format(runner0_name, out0[mismatches]))
G_LOGGER.extra_verbose("{:35} | Mismatched values:\n{:}".format(runner1_name, out1[mismatches]))
except Exception as err:
G_LOGGER.warning("Failing to log mismatches.\nNote: Error was: {:}".format(err))
# Log information about the outputs
hist_bin_range = (
min(comp_util.compute_min(out0), comp_util.compute_min(out1)),
max(comp_util.compute_max(out0), comp_util.compute_max(out1)),
)
comp_util.log_output_stats(out0, failed, runner0_name + ": " + out0_name, hist_range=hist_bin_range)
comp_util.log_output_stats(out1, failed, runner1_name + ": " + out1_name, hist_range=hist_bin_range)
G_LOGGER.info("Error Metrics: {:}".format(out0_name))
with G_LOGGER.indent():
def req_tol(mean_diff, median_diff, max_diff, elemwise_diff):
return {
"mean": mean_diff,
"median": median_diff,
"max": max_diff,
"elemwise": elemwise_diff,
}[per_out_err_stat]
G_LOGGER.info(
"Minimum Required Tolerance: {:} error | [abs={:.5g}] OR [rel={:.5g}]".format(
per_out_err_stat,
req_tol(mean_absdiff, median_absdiff, max_absdiff, max_elemwiseabs),
req_tol(mean_reldiff, median_reldiff, max_reldiff, max_elemwiserel),
)
)
comp_util.log_output_stats(absdiff, failed, "Absolute Difference")
with np.testing.suppress_warnings() as sup:
sup.filter(RuntimeWarning)
comp_util.log_output_stats(reldiff, failed, "Relative Difference")
# Finally show summary.
if failed:
G_LOGGER.error("FAILED | Difference exceeds tolerance (rel={:}, abs={:})".format(per_out_rtol, per_out_atol))
else:
G_LOGGER.finish("PASSED | Difference is within tolerance (rel={:}, abs={:})".format(per_out_rtol, per_out_atol))
G_LOGGER.extra_verbose(
"Finished comparing: '{:}' (dtype={:}, shape={:}) [{:}] and '{:}' (dtype={:}, shape={:}) [{:}]".format(
out0_name,
out0.dtype,
out0.shape,
runner0_name,
out1_name,
out1.dtype,
out1.shape,
runner1_name,
)
)
return OutputCompareResult(
not failed, max_absdiff, max_reldiff, mean_absdiff, mean_reldiff, median_absdiff, median_reldiff
)
# Provides functions to compare two IterationResults
@mod.export()
class CompareFunc(object):
"""
Provides functions that can be used to compare two `IterationResult` s.
"""
@staticmethod
def basic_compare_func(*args, **kwargs):
mod.warn_deprecated("basic_compare_func", remove_in="0.40.0", use_instead="simple")
return CompareFunc.simple(*args, **kwargs)
@staticmethod
def simple(check_shapes=None, rtol=None, atol=None, fail_fast=None, find_output_func=None, check_error_stat=None):
"""
Creates a function that compares two IterationResults, and can be used as the `compare_func` argument
in ``Comparator.compare_accuracy``.
Args:
check_shapes (bool):
Whether shapes must match exactly. If this is False, this function may
permute or reshape outputs before comparison.
Defaults to True.
rtol (Union[float, Dict[str, float]]):
The relative tolerance to use when checking accuracy.
This can be provided on a per-output basis using a dictionary. In that case,
use an empty string ("") as the key to specify default tolerance for outputs not explicitly listed.
Defaults to 1e-5.
atol (Union[float, Dict[str, float]]):
The absolute tolerance to use when checking accuracy.
This can be provided on a per-output basis using a dictionary. In that case,
use an empty string ("") as the key to specify default tolerance for outputs not explicitly listed.
Defaults to 1e-5.
fail_fast (bool):
Whether the function should exit immediately after the first failure.
Defaults to False.
find_output_func (Callable(str, int, IterationResult) -> List[str]):
A callback that returns a list of output names to compare against from the provided
IterationResult, given an output name and index from another IterationResult.
The comparison function will always iterate over the output names of the
first IterationResult, expecting names from the second. A return value of
`[]` or `None` indicates that the output should be skipped.
check_error_stat (Union[str, Dict[str, str]]):
The error statistic to check. Possible values are:
- "elemwise": Checks each element in the output to determine if it exceeds both tolerances specified.
- "max": Checks the maximum absolute/relative errors against the respective tolerances. This is the strictest possible check.
- "mean" Checks the mean absolute/relative errors against the respective tolerances.
- "median": Checks the median absolute/relative errors against the respective tolerances.
This can be provided on a per-output basis using a dictionary. In that case,
use an empty string ("") as the key to specify default error stat for outputs not explicitly listed.
Defaults to "elemwise".
Returns:
Callable(IterationResult, IterationResult) -> OrderedDict[str, OutputCompareResult]:
A callable that returns a mapping of output names to `OutputCompareResult` s, indicating
whether the corresponding output matched.
"""
check_shapes = util.default(check_shapes, True)
default_rtol = 1e-5
default_atol = 1e-5
rtol = util.default(rtol, default_rtol)
atol = util.default(atol, default_atol)
fail_fast = util.default(fail_fast, False)
default_error_stat = "elemwise"
check_error_stat = util.default(check_error_stat, default_error_stat)
def compare_output(iter_result0, iter_result1):
"""
Compare the outputs of two runners from a single iteration.
This function will always iterate over the output names of the first IterationResult,
and attempt to find corresponding output names in the second.
If no corresponding output name is found, the output is skipped.
If all output names are skipped, then this function raises an error.
Args:
iter_result0 (IterationResult): The result of the first runner.
iter_result1 (IterationResult): The result of the second runner.
Returns:
OrderedDict[str, OutputCompareResult]:
The name of the outputs compared, derived from the first IterationResult,
and whether they matched. If an output name is not found, it is omitted from this dictionary.
Raises:
PolygraphyException: If all output names are skipped, and thus no outputs are compared.
"""
def check_dict(dct, dict_name):
if isinstance(dct, dict):
util.check_dict_contains(
dct,
set(iter_result0.keys()) | set(iter_result1.keys()) | {""},
check_missing=False,
dict_name=dict_name,
)
check_dict(rtol, "the rtol dictionary")
check_dict(atol, "the atol dictionary")
check_dict(check_error_stat, "the check_error_stat dictionary")
output_status = OrderedDict() # OrderedDict[str, bool] Maps output names to whether they matched.
if not check_shapes:
G_LOGGER.info("Strict shape checking disabled. Will attempt to match output shapes before comparisons")
def default_find_output_func(output_name, index, iter_result):
found_name = util.find_in_dict(output_name, iter_result, index)
if found_name is None:
return None
elif found_name != output_name:
exact_match = util.find_in_dict(found_name, iter_result0)
if exact_match == found_name:
G_LOGGER.verbose(
"Will not compare {:} with {:}, since the former already has an exact match: {:}".format(
found_name, output_name, exact_match
)
)
return None # If the found output is being compared against another output already, skip this non-exact match
G_LOGGER.warning(
"Output names did not match exactly. Assuming {:} output: {:} "
"corresponds to output: {:}".format(iter_result.runner_name, found_name, output_name)
)
return [found_name]
nonlocal find_output_func
find_output_func = util.default(find_output_func, default_find_output_func)
for index, (out0_name, output0) in enumerate(iter_result0.items()):
out1_names = util.default(find_output_func(out0_name, index, iter_result1), [])
if len(out1_names) > 1:
G_LOGGER.info(
"Will attempt to compare output: '{:}' [{:}] with multiple outputs: '{:}' [{:}]".format(
out0_name, iter_result0.runner_name, list(out1_names), iter_result1.runner_name
)
)
for out1_name in out1_names:
if out1_name is None or out1_name not in iter_result1:
G_LOGGER.warning(
"For output: '{:}' [{:}], skipping corresponding output: '{:}' [{:}], "
"since the output was not found".format(
out0_name, iter_result0.runner_name, out1_name, iter_result1.runner_name
)
)
continue
per_out_atol = util.value_or_from_dict(atol, out0_name, default_atol)
per_out_rtol = util.value_or_from_dict(rtol, out0_name, default_rtol)
per_out_err_stat = util.value_or_from_dict(check_error_stat, out0_name, default_error_stat)
output1 = iter_result1[out1_name]
G_LOGGER.start(
"Comparing Output: '{:}' (dtype={:}, shape={:}) with '{:}' (dtype={:}, shape={:}) | "
"Tolerance: [abs={:.5g}, rel={:.5g}] | Checking {:} error".format(
out0_name,
output0.dtype,
output0.shape,
out1_name,
output1.dtype,
output1.shape,
per_out_atol,
per_out_rtol,
per_out_err_stat,
)
)
G_LOGGER.extra_verbose(
"Note: Comparing {:} vs. {:}".format(iter_result0.runner_name, iter_result1.runner_name)
)
with G_LOGGER.indent():
if check_shapes and output0.shape != output1.shape:
G_LOGGER.error(
"Will not compare outputs of different shapes. Note: Output shapes are "
"{:} and {:}.".format(output0.shape, output1.shape)
)
G_LOGGER.error(
"Note: Use --no-shape-check or set check_shapes=False to "
"attempt to compare values anyway.",
mode=LogMode.ONCE,
)
outputs_match = False
else:
output1 = util.try_match_shape(output1, output0.shape)
output0 = output0.reshape(output1.shape)
outputs_match = check_outputs_match(
output0,
out0_name,
output1,
out1_name,
per_out_rtol=per_out_rtol,
per_out_atol=per_out_atol,
per_out_err_stat=per_out_err_stat,
runner0_name=iter_result0.runner_name,
runner1_name=iter_result1.runner_name,
)
output_status[out0_name] = outputs_match
if fail_fast and not outputs_match:
return output_status
mismatched_output_names = [name for name, matched in output_status.items() if not matched]
if mismatched_output_names:
G_LOGGER.error("FAILED | Mismatched outputs: {:}".format(mismatched_output_names))
else:
G_LOGGER.finish("PASSED | All outputs matched | Outputs: {:}".format(list(output_status.keys())))
# This is useful for catching cases were Polygraphy does something wrong with the runner output buffers
if not output_status and (bool(iter_result0.keys()) or bool(iter_result1.keys())):
r0_name = iter_result0.runner_name
r0_outs = list(iter_result0.keys())
r1_name = iter_result1.runner_name
r1_outs = list(iter_result1.keys())
G_LOGGER.critical(
"All outputs were skipped, no common outputs found! Note:\n{:} outputs: "
"{:}\n{:} outputs: {:}".format(r0_name, r0_outs, r1_name, r1_outs)
)
return output_status
return compare_output
| TensorRT-master | tools/Polygraphy/polygraphy/comparator/compare.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from polygraphy import mod, util, config
from polygraphy.common.interface import TypedDict, TypedList
from polygraphy.json import Decoder, Encoder, add_json_methods, load_json, save_json
from polygraphy.logger import G_LOGGER
np = mod.lazy_import("numpy")
class LazyNumpyArray(object):
"""
Represents a lazily loaded NumPy array.
For example, large NumPy arrays may be serialized to temporary files on the disk
to save memory.
"""
def __init__(self, arr):
"""
Args:
arr (np.ndarray): The NumPy array.
"""
self.arr = None
self.tmpfile = None
if config.ARRAY_SWAP_THRESHOLD_MB >= 0 and arr.nbytes > (config.ARRAY_SWAP_THRESHOLD_MB << 20):
self.tmpfile = util.NamedTemporaryFile(suffix=".json")
G_LOGGER.extra_verbose(
"Evicting large array ({:.3f} MiB) from memory and saving to {:}".format(
arr.nbytes / (1024.0 ** 2), self.tmpfile.name
)
)
save_json(arr, self.tmpfile.name)
else:
self.arr = arr
def numpy(self):
"""
Get the NumPy array, deserializing from the disk if it was stored earlier.
Returns:
np.ndarray: The NumPy array
"""
if self.arr is not None:
return self.arr
assert self.tmpfile is not None, "Path and NumPy array cannot both be None!"
return load_json(self.tmpfile.name)
@Encoder.register(LazyNumpyArray)
def encode(lazy_arr):
return {
"values": lazy_arr.numpy(),
}
@Decoder.register(LazyNumpyArray)
def decode(dct):
return LazyNumpyArray(dct["values"])
@mod.export()
class IterationResult(TypedDict(lambda: str, lambda: LazyNumpyArray)):
"""
An ordered dictionary containing the result of a running a single iteration of a runner.
This maps output names to NumPy arrays, and preserves the output ordering from the runner.
NOTE: The ``POLYGRAPHY_ARRAY_SWAP_THRESHOLD_MB`` environment variable can be set to enable
the arrays to be swapped to the disk.
Also includes additional fields indicating the name of the runner which produced the
outputs, and the time required to do so.
"""
@staticmethod
def _to_lazy(nparray):
if isinstance(nparray, LazyNumpyArray):
return nparray
return LazyNumpyArray(nparray)
@staticmethod
def _to_lazy_dict(nparray_dict):
if nparray_dict is None:
return None
# Converts a Dict[str, np.ndarray] to a Dict[str, LazyNumpyArray]
lazy = OrderedDict()
for name, out in nparray_dict.items():
lazy[name] = IterationResult._to_lazy(out)
return lazy
def __init__(self, outputs=None, runtime=None, runner_name=None):
"""
Args:
outputs (Dict[str, np.array]): The outputs of this iteration, mapped to their names.
runtime (float): The time required for this iteration, in seconds.
runner_name (str): The name of the runner that produced this output.
"""
if outputs and config.ARRAY_SWAP_THRESHOLD_MB < 0:
total_size_gb = sum(arr.nbytes for arr in outputs.values() if isinstance(arr, np.ndarray)) / (1024.0 ** 3)
if total_size_gb >= 1:
G_LOGGER.warning(
"It looks like the outputs of this network are very large ({:.3f} GiB).\n"
"To reduce memory usage, you may want to allow Polygraphy to swap these arrays to the disk using "
"the POLYGRAPHY_ARRAY_SWAP_THRESHOLD_MB environment variable.".format(total_size_gb)
)
super().__init__(IterationResult._to_lazy_dict(outputs))
self.runtime = runtime
self.runner_name = util.default(runner_name, "")
# Convenience methods to preserve np.ndarray in the interface.
def update(self, other):
return super().update(IterationResult._to_lazy_dict(other))
def __setitem__(self, name, arr):
return super().__setitem__(name, IterationResult._to_lazy(arr))
def values(self):
for arr in super().values():
yield arr.numpy()
def items(self):
for name, arr in super().items():
yield name, arr.numpy()
def __getitem__(self, name):
return super().__getitem__(name).numpy()
def __eq__(self, other):
if self.runtime != other.runtime or self.runner_name != other.runner_name:
return False
for key, val in self.items():
if key not in other:
return False
if not np.array_equal(val, other[key]):
return False
return True
@Encoder.register(IterationResult)
def encode(iter_result):
return {
"outputs": iter_result.dct,
"runtime": iter_result.runtime,
"runner_name": iter_result.runner_name,
}
@Decoder.register(IterationResult)
def decode(dct):
return IterationResult(outputs=dct["outputs"], runtime=dct["runtime"], runner_name=dct["runner_name"])
@mod.export()
@add_json_methods("inference results")
class RunResults(TypedList(lambda: tuple)):
"""
Maps runner names to zero or more IterationResults.
Note: Technically, this is a ``List[Tuple[str, List[IterationResult]]]``, but includes
helpers that make it behave like an OrderedDict that can contain duplicates.
"""
def items(self):
"""
Creates a generator that yields ``Tuple[str, List[IterationResult]]`` - runner names
and corresponding outputs.
"""
for name, iteration_results in self.lst:
yield name, iteration_results
def keys(self):
"""
Creates a generator that yields runner names (str).
"""
for name, _ in self.lst:
yield name
def values(self):
"""
Creates a generator that yields runner outputs (List[IterationResult]).
"""
for _, iteration_results in self.lst:
yield iteration_results
def update(self, other):
"""
Updates the results stored in this instance.
Args:
other (Union[Dict[str, List[IterationResult]], RunResults]):
A dictionary or RunResults instance from which to update this one.
"""
for name, iteration_results in other.items():
self.lst[name] = iteration_results
return self
def __getitem__(self, key):
if isinstance(key, int):
return self.lst[key]
for name, iteration_results in self.lst:
if name == key:
return iteration_results
G_LOGGER.critical(
"{:35} does not exist in this RunResults instance. Note: Available runners: {:}".format(
key, list(self.keys())
)
)
def __setitem__(self, key, value):
if isinstance(key, int):
self.lst[key] = value
return
for index, name in enumerate(self.keys()):
if name == key:
self.lst[index] = (key, value)
break
else:
self.append((key, value))
def __contains__(self, val):
if isinstance(val, str) or isinstance(val, bytes):
return val in list(self.keys())
return val in self.lst
def __eq__(self, other):
for (r0, its0), (r1, its1) in zip(self.lst, other.lst):
if r0 != r1:
return False
if its0 != its1:
return False
return True
@Encoder.register(RunResults)
def encode(results):
return {"lst": results.lst}
@Decoder.register(RunResults)
def decode(dct):
return RunResults(list(map(tuple, dct["lst"])))
@mod.export()
class AccuracyResult(TypedDict(lambda: tuple, lambda: list)):
"""
An ordered dictionary including details about the result of ``Comparator.compare_accuracy``.
More specifically, it is an ``OrderedDict[Tuple[str, str], List[OrderedDict[str, bool]]]`` which maps a runner
pair (a tuple containing both runner names) to a list of dictionaries of booleans (or anything that can be
converted into a boolean, such as an ``OutputCompareResult``), indicating whether there was a match in the outputs of
the corresponding iteration. The ``List[OrderedDict[str, bool]]`` is constructed from the dictionaries returned
by ``compare_func`` in ``compare_accuracy``.
For example, to see if there's a match between ``runner0`` and
``runner1`` during the 1st iteration for an output called ``output0``:
::
runner_pair = ("runner0", "runner1")
iteration = 0
output_name = "output0"
match = bool(accuracy_result[runner_pair][iteration][output_name])
If there's a mismatch, you can inspect the outputs from
the results of ``Comparator.run()``, assumed here to be called ``run_results``:
::
runner0_output = run_results["runner0"][iteration][output_name]
runner1_output = run_results["runner1"][iteration][output_name]
"""
def __bool__(self):
"""
Whether all outputs matched for every iteration.
You can use this function to avoid manually checking each output. For example:
::
if accuracy_result:
print("All matched!")
Returns:
bool
"""
return all([bool(match) for outs in self.values() for out in outs for match in out.values()])
def _get_runner_pair(self, runner_pair):
return util.default(runner_pair, list(self.keys())[0])
def percentage(self, runner_pair=None):
"""
Returns the percentage of iterations that matched for the given pair of runners,
expressed as a decimal between 0.0 and 1.0.
Always returns 1.0 when the number of iterations is 0, or when there are no runner comparisons.
Args:
runner_pair (Tuple[str, str]):
A pair of runner names describing which runners to check.
Defaults to the first pair in the dictionary.
"""
if not list(self.keys()):
return 1.0 # No data in this result.
matched, _, total = self.stats(runner_pair)
if not total:
return 1.0 # No iterations
return float(matched) / float(total)
def stats(self, runner_pair=None):
"""
Returns the number of iterations that matched, mismatched, and the total number of iterations.
Args:
runner_pair (Tuple[str, str]):
A pair of runner names describing which runners to check.
Defaults to the first pair in the dictionary.
Returns:
Tuple[int, int, int]: Number of iterations that matched, mismatched, and total respectively.
"""
runner_pair = self._get_runner_pair(runner_pair)
outs = self[runner_pair]
matched = sum([all([match for match in out.values()]) for out in outs])
total = len(outs)
return matched, total - matched, total
| TensorRT-master | tools/Polygraphy/polygraphy/comparator/struct.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import importlib
from polygraphy.logger import G_LOGGER
from polygraphy.tools.base import Tool
TOOL_REGISTRY = []
class MissingTool(Tool):
def __init__(self, name, err):
super().__init__(name)
self.err = err
# NOTE: When modifying this error message, make sure to update the checks in
# tests/test_public_imports.py so that we don't miss errors!
self.__doc__ = (
"[!] This tool could not be loaded due to an error:\n{:}\nRun 'polygraphy {:}' for details.".format(
self.err, self.name
)
)
def __call__(self, args):
G_LOGGER.critical("Encountered an error when loading this tool:\n{:}".format(self.err))
def try_register_tool(module, tool_class):
global TOOL_REGISTRY
try:
toolmod = importlib.import_module(module)
ToolClass = getattr(toolmod, tool_class)
TOOL_REGISTRY.append(ToolClass())
except Exception as err:
G_LOGGER.internal_error(
"Could not load command-line tool: {:}.\nNote: Error was: {:}".format(tool_class.lower(), err)
)
TOOL_REGISTRY.append(MissingTool(tool_class.lower(), err=err))
try_register_tool("polygraphy.tools.run", "Run")
try_register_tool("polygraphy.tools.convert", "Convert")
try_register_tool("polygraphy.tools.inspect", "Inspect")
try_register_tool("polygraphy.tools.surgeon", "Surgeon")
try_register_tool("polygraphy.tools.template", "Template")
try_register_tool("polygraphy.tools.debug", "Debug")
try_register_tool("polygraphy.tools.data", "Data")
# Check that tool names are unique
tool_names = [tool.name for tool in TOOL_REGISTRY]
duplicates = {name for name in tool_names if tool_names.count(name) > 1}
if duplicates:
G_LOGGER.internal_error("Multiple tools have the same name. Duplicate tool names found: {:}".format(duplicates))
| TensorRT-master | tools/Polygraphy/polygraphy/tools/registry.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from polygraphy import mod
from polygraphy.logger import G_LOGGER
onnx_backend = mod.lazy_import("polygraphy.backend.onnx")
onnx_util = mod.lazy_import("polygraphy.backend.onnx.util")
gs = mod.lazy_import("onnx_graphsurgeon")
@mod.export()
def override_input_shapes(graph, user_input_metadata):
"""
Overrides input shapes in the model according to the provided input metadata.
Inputs omitted from user_input_metadata are not changed.
Shapes of intermediate tensors are cleared.
"""
# We can leverage extract_subgraph if we make sure all the current graph inputs are preserved.
# We need to be careful to preserve the order of graph inputs here.
input_metadata = onnx_util.meta_from_gs_tensors(graph.inputs)
input_metadata.update(user_input_metadata)
graph = onnx_backend.extract_subgraph(graph, input_metadata)
G_LOGGER.info("Overriding input shapes to:\n{:}".format(onnx_util.meta_from_gs_tensors(graph.inputs)))
# Have to unset intermediate shapes as they may cause problems.
tensors = graph.tensors()
for tensor in tensors.values():
if tensor not in graph.inputs and isinstance(tensor, gs.Variable):
tensor.shape = None
return graph
| TensorRT-master | tools/Polygraphy/polygraphy/tools/util.py |
from polygraphy.tools.base import *
from polygraphy.tools.registry import TOOL_REGISTRY
| TensorRT-master | tools/Polygraphy/polygraphy/tools/__init__.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
import time
from collections import OrderedDict, defaultdict
import polygraphy
from polygraphy import config, constants, util
from polygraphy.logger import G_LOGGER
def assert_identifier(inp):
"""
Checks if the argument can be a valid Python identifier.
Raises a PolygraphyException if it can't.
"""
if not inp.isidentifier():
G_LOGGER.critical(
"This argument must be a valid identifier. "
"Provided argument cannot be a Python identifier: {:}".format(inp)
)
return inp
def safe(base_str, *args, **kwargs):
"""
Marks a string as being safe.
NOTE: The caller is reponsible for checking that the string is actually safe.
Can work with format strings as well. For example:
::
safe("{:} is my name", "polygraphy")
-> "'polygraphy' is my name"
safe("{:} is my name", inline("polygraphy"))
-> "polygraphy is my name"
"""
args = [repr(arg) for arg in args]
kwargs = {key: repr(val) for key, val in kwargs.items()}
return Script.String(base_str.format(*args, **kwargs), safe=True)
def ensure_safe(inp):
"""
Ensures that the input is marked as a safe string (i.e. Script.String(safe=True)).
"""
if not isinstance(inp, Script.String):
G_LOGGER.internal_error("Input to ensure_safe must be of type Script.String, but was: {:}".format(inp))
elif not inp.safe:
G_LOGGER.internal_error(
"Input string: {:} was not checked for safety. " "This is a potential security risk!".format(inp)
)
return inp
def inline(inp):
"""
Marks a safe string as being inline. See Script.Inline for details
on what this means.
Args:
inp (Script.String):
The safe string to inline..
"""
inp = ensure_safe(inp)
inp.inline = True
return inp
def make_invocable_impl(type_str, *args, **kwargs):
"""
Generates a string that would invoke the type specified in
type_str with the specified arguments.
Skips keyword arguments that are set to ``None``.
For example, ``make_invocable_impl("Example", None, "string", w=None, x=2, y=inline("test"))``
would return a string: ``"Example(None, 'string', x=2, y=test)"``
Args:
type_str (str):
The type to invoke.
Returns:
Tuple[str, bool]:
A tuple including the `invoke` string and a boolean
indicating whether all the arguments were default (i.e. None).
"""
# We don't need to check obj_str for safety since we know that any inline
# args/kwargs are already safe - other types need no checks
obj_str, all_defaults = util.make_repr(type_str, *args, **kwargs)
return Script.String(obj_str, safe=True, inline=True), all_defaults
def make_invocable(type_str, *args, **kwargs):
"""
Creates a string representation that will invoke the specified object,
with the specified arguments.
Args:
type_str (str): A string representing the object that should be invoked.
args, kwargs:
Arguments to pass along to the object. If a keyword argument
is set to None, it will be omitted.
Returns:
str: A string representation that invokes the object specified.
Examples:
make_invocable("MyClass", 0, 1, last=3)
-> "MyClass(0, 1, last=3)"
make_invocable("my_func", 0, 1, last=None)
-> "my_func(0, 1)"
"""
return make_invocable_impl(type_str, *args, **kwargs)[0]
def make_invocable_if_nondefault(type_str, *args, **kwargs):
"""
Similar to `make_invocable`, but will return None if all arguments are None.
Examples:
make_invocable_if_nondefault("MyClass", 0, 1, last=3)
-> "MyClass(0, 1, last=3)"
make_invocable_if_nondefault("my_func", None, None, last=None)
-> None
"""
obj_str, all_defaults = make_invocable_impl(type_str, *args, **kwargs)
if all_defaults:
return None
return obj_str
################################# SCRIPT ##################################
# Used to generate a script that uses the Polygraphy API.
class Script(object):
class String(object):
"""
Represents a string that has passed security checks.
This can be spoofed easily - the purpose is to check Polygraphy's implementations,
not external ones.
"""
def __init__(self, s, safe=False, inline=False):
self.s = s
self.safe = safe
self.inline = inline
def __str__(self):
return str(self.s)
def __repr__(self):
if self.inline:
# Since only safe strings can be marked inline, self.safe is always
# True in this branch, so no need to check it.
return str(self.s)
return repr(self.s)
def __iadd__(self, other):
if not isinstance(other, Script.String):
G_LOGGER.internal_error("Cannot concatenate str and Script.String. Note: str was: {:}".format(other))
elif self.safe != other.safe:
G_LOGGER.internal_error(
"Cannot concatenate unsafe string ({:}) to safe string ({:})!".format(other, self.s)
)
self.s += other.s
return self
def unwrap(self):
return self.s
DATA_LOADER_NAME = String("data_loader", safe=True, inline=True)
def __init__(self, summary=None, always_create_runners=True):
"""
Represents a Python script that uses the Polygraphy API.
Args:
summary (str):
A summary of what the script does, which will be included in the script as a comment.
always_create_runners (bool):
Whether to create the list of runners even if it would be empty.
"""
self.imports = set()
self.from_imports = defaultdict(set) # Dict[str, List[str]] Maps from module to imported components
self.loaders = OrderedDict() # Dict[str, str] Maps a string constructing a loader to a name.
self.loader_count = defaultdict(int) # Dict[str, int] Maps loader_id to the number of loaders sharing that ID
self.runners = [] # List[str]
self.preimport = [] # List[str]
self.suffix = [] # List[str]
self.data_loader = "" # str Contains the DataLoader constructor
self.summary = summary
self.always_create_runners = always_create_runners
def add_import(self, imports, frm=None):
"""
Adds imports to this script
Args:
imports (List[str]): List of components to import
frm (str): Module frm which to import
"""
if frm:
self.from_imports[frm].update(imports)
else:
self.imports.update(imports)
def set_data_loader(self, data_loader_str):
"""
Adds a data loader to this script, overwriting
any previous data loader.
Args:
data_loader_str (str): A string constructing the data loader.
Returns:
str:
The name of the data loader in the script, or None if the
provided data loader is empty.
"""
if not data_loader_str:
return None
data_loader_str = ensure_safe(data_loader_str).unwrap()
self.data_loader = data_loader_str
return Script.DATA_LOADER_NAME
def add_loader(self, loader_str, loader_id, suffix=None):
"""
Adds a loader to the script.
If the loader is a duplicate, returns the existing loader instead.
Args:
loader_str (str):
A string constructing the loader.
For security reasons, this must be generated using
`make_invocable` or `Script.invoke_if_non_default`.
loader_id (str): A short human-friendly identifier for the loader
Returns:
str: The name of the loader added.
"""
suffix = util.default(suffix, "")
loader_str = ensure_safe(loader_str).unwrap()
if loader_str in self.loaders:
return self.loaders[loader_str]
unique_name = loader_id + suffix
if self.loader_count[unique_name]:
unique_name = "{:}_{:}".format(unique_name, self.loader_count[loader_id])
unique_name = Script.String(unique_name, safe=True, inline=True)
self.loader_count[loader_id] += 1
self.loaders[loader_str] = unique_name
return unique_name
def get_runners(self):
return Script.String("runners", safe=True, inline=True)
def add_runner(self, runner_str):
"""
Adds a runner to the script.
Args:
runner_str (str): A string constructing the runner
"""
runner_str = ensure_safe(runner_str).unwrap()
self.runners.append(runner_str)
def append_preimport(self, line):
"""
Append a line to the pre-import prefix of the script.
Args:
line (str): The line to append.
"""
line = ensure_safe(line).unwrap()
self.preimport.append(line)
def append_suffix(self, line):
"""
Append a line to the suffix of the script
Args:
line (str): The line to append.
"""
line = ensure_safe(line).unwrap()
self.suffix.append(line)
def __str__(self):
script = "#!/usr/bin/env python3\n"
script += "# Template auto-generated by polygraphy [v{:}] on {:} at {:}\n".format(
polygraphy.__version__, time.strftime("%D"), time.strftime("%H:%M:%S")
)
script += "# Generation Command: {:}\n".format(" ".join(sys.argv))
if self.summary:
script += "# " + "\n# ".join(self.summary.splitlines()) + "\n"
script += ("\n" if self.preimport else "") + "\n".join(self.preimport) + ("\n\n" if self.preimport else "")
imports = []
for imp in self.imports:
imports.append("import {:}".format(imp))
for frm, imps in self.from_imports.items():
imps = sorted(imps)
imports.append("from {:} import {:}".format(frm, ", ".join(imps)))
script += "\n".join(sorted(imports)) + "\n"
if self.data_loader:
script += "\n# Data Loader\n"
script += "{:} = {:}\n".format(Script.DATA_LOADER_NAME, self.data_loader)
script += "\n"
if self.loaders:
script += "# Loaders\n"
for loader, loader_name in self.loaders.items():
script += "{:} = {:}\n".format(loader_name, loader)
script += "\n"
if self.runners or self.always_create_runners:
script += "# Runners\n"
script += "{:} = [".format(self.get_runners())
for runner in self.runners:
script += "\n\t{:},".format(runner)
if self.runners:
script += "\n"
script += "]\n"
script += "\n".join(self.suffix) + "\n"
script = script.replace("\t", constants.TAB).replace("\n\n\n", "\n\n")
G_LOGGER.super_verbose("Created script:\n{:}".format(script))
return script
def save(self, dest):
"""
Save this script to the specified destination.
Args:
dest (file-like):
A file-like object that defines ``write()``, ``isatty``, and has a `name` attribute.
"""
with dest:
dest.write(str(self))
path = dest.name
# Somehow, piping fools isatty, e.g. `polygraphy run --gen-script - | cat`
if not dest.isatty() and path not in ["<stdout>", "<stderr>"]:
G_LOGGER.info("Writing script to: {:}".format(path))
# Make file executable
os.chmod(path, os.stat(path).st_mode | 0o111)
| TensorRT-master | tools/Polygraphy/polygraphy/tools/script.py |
from polygraphy.tools.inspect.inspect import Inspect
| TensorRT-master | tools/Polygraphy/polygraphy/tools/inspect/__init__.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from polygraphy.tools.base import Tool
from polygraphy.tools.inspect.subtool import Data, Model, Tactics, Capability
class Inspect(Tool):
"""
View information about various types of files.
"""
def __init__(self):
super().__init__("inspect")
def add_parser_args(self, parser):
subparsers = parser.add_subparsers(title="Inspection Subtools", dest="subtool")
subparsers.required = True
SUBTOOLS = [
Model(),
Data(),
Tactics(),
Capability()
]
for subtool in SUBTOOLS:
subtool.setup_parser(subparsers)
| TensorRT-master | tools/Polygraphy/polygraphy/tools/inspect/inspect.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from polygraphy import mod
from polygraphy.logger import G_LOGGER
from polygraphy.tools.base import Tool
algorithm_selector = mod.lazy_import("polygraphy.backend.trt.algorithm_selector")
class Tactics(Tool):
"""
Display the contents of tactic replay files in a human readable format.
(for example, those generated by `--save-tactics` from `polygraphy run`)
"""
def __init__(self):
super().__init__("tactics")
def add_parser_args(self, parser):
parser.add_argument("tactic_replay", help="Path to a tactic replay file")
def run(self, args):
replay = algorithm_selector.TacticReplayData.load(args.tactic_replay)
G_LOGGER.info(replay)
| TensorRT-master | tools/Polygraphy/polygraphy/tools/inspect/subtool/tactics.py |
from polygraphy.tools.inspect.subtool.model import Model
from polygraphy.tools.inspect.subtool.data import Data
from polygraphy.tools.inspect.subtool.tactics import Tactics
from polygraphy.tools.inspect.subtool.capability import Capability
| TensorRT-master | tools/Polygraphy/polygraphy/tools/inspect/subtool/__init__.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import contextlib
from polygraphy import mod, util
from polygraphy.logger import G_LOGGER
from polygraphy.tools.args import (
ModelArgs,
OnnxLoaderArgs,
OnnxShapeInferenceArgs,
TfLoaderArgs,
TrtEngineLoaderArgs,
TrtNetworkLoaderArgs,
TrtPluginLoaderArgs,
)
from polygraphy.tools.base import Tool
trt_util = mod.lazy_import("polygraphy.backend.trt.util")
onnx_util = mod.lazy_import("polygraphy.backend.onnx.util")
tf_util = mod.lazy_import("polygraphy.backend.tf.util")
class Model(Tool):
"""
Display information about a model, including inputs and outputs, as well as layers and their attributes.
"""
def __init__(self):
super().__init__("model")
self.subscribe_args(ModelArgs(model_required=True, inputs=None))
self.subscribe_args(TfLoaderArgs(artifacts=False, outputs=False))
self.subscribe_args(OnnxShapeInferenceArgs())
self.subscribe_args(OnnxLoaderArgs(output_prefix=None))
self.subscribe_args(TrtPluginLoaderArgs())
self.subscribe_args(TrtNetworkLoaderArgs(outputs=False))
self.subscribe_args(TrtEngineLoaderArgs())
def add_parser_args(self, parser):
parser.add_argument(
"--convert-to",
"--display-as",
help="Try to convert the model to the specified format before displaying",
choices=["trt"],
dest="display_as",
)
parser.add_argument(
"--mode",
"--layer-info",
help="Display layers: {{"
"'none': Display no layer information, "
"'basic': Display layer inputs and outputs, "
"'attrs': Display layer inputs, outputs and attributes, "
"'full': Display layer inputs, outputs, attributes, and weights"
"}}",
choices=["none", "basic", "attrs", "full"],
dest="mode",
default="none",
)
def run(self, args):
func = None
if self.arg_groups[ModelArgs].model_type.is_tf():
func = self.inspect_tf
if self.arg_groups[ModelArgs].model_type.is_onnx():
func = self.inspect_onnx
if self.arg_groups[ModelArgs].model_type.is_trt() or args.display_as == "trt":
func = self.inspect_trt
if func is None:
G_LOGGER.critical("Could not determine how to display this model. Maybe you need to specify --display-as?")
func(args)
def inspect_trt(self, args):
if self.arg_groups[ModelArgs].model_type == "engine":
if args.mode != "none":
G_LOGGER.warning("Displaying layer information for TensorRT engines is not currently supported")
with self.arg_groups[TrtEngineLoaderArgs].load_serialized_engine() as engine:
engine_str = trt_util.str_from_engine(engine)
G_LOGGER.info("==== TensorRT Engine ====\n{:}".format(engine_str))
else:
builder, network, parser = util.unpack_args(self.arg_groups[TrtNetworkLoaderArgs].load_network(), 3)
with contextlib.ExitStack() as stack:
stack.enter_context(builder)
stack.enter_context(network)
if parser:
stack.enter_context(parser)
network_str = trt_util.str_from_network(network, mode=args.mode).strip()
G_LOGGER.info("==== TensorRT Network ====\n{:}".format(network_str))
def inspect_onnx(self, args):
onnx_model = self.arg_groups[OnnxLoaderArgs].load_onnx()
model_str = onnx_util.str_from_onnx(onnx_model, mode=args.mode).strip()
G_LOGGER.info("==== ONNX Model ====\n{:}".format(model_str))
def inspect_tf(self, args):
tf_graph, _ = self.arg_groups[TfLoaderArgs].load_graph()
graph_str = tf_util.str_from_graph(tf_graph, mode=args.mode).strip()
G_LOGGER.info("==== TensorFlow Graph ====\n{:}".format(graph_str))
| TensorRT-master | tools/Polygraphy/polygraphy/tools/inspect/subtool/model.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from polygraphy import mod
from polygraphy.common.interface import TypedDict
from polygraphy.logger import G_LOGGER
from polygraphy.tools.args import ModelArgs, OnnxLoaderArgs, OnnxSaveArgs, OnnxShapeInferenceArgs
from polygraphy.tools.base import Tool
common_backend = mod.lazy_import("polygraphy.backend.common")
gs = mod.lazy_import("onnx_graphsurgeon")
onnx_backend = mod.lazy_import("polygraphy.backend.onnx")
onnx_util = mod.lazy_import("polygraphy.backend.onnx.util")
trt = mod.lazy_import("tensorrt")
trt_backend = mod.lazy_import("polygraphy.backend.trt")
trt_util = mod.lazy_import("polygraphy.backend.trt.util")
util = mod.lazy_import("polygraphy.util")
class UnsupportedNodeDict(TypedDict(lambda: str, lambda: dict)):
"""
An ordered dictionary that maps ops to error(s) encountered by TensorRT
while trying to parse them, and the range of node indices for the subgraphs
where these errors were encountered.
More specifically, it is an ``OrderedDict[str, Dict[str, List[Tuple[int]]]]``.
"""
def add(self, op, err_string, node_range):
"""
Add a single entry for a single error in a subgraph.
Multiple node ranges may apply to a single op/error combination.
Args:
op (str): The name of the op that was unsupported.
err_string (str): The error encountered.
node_range (Union[Tuple[int], int]):
The start (inclusive) and end (exclusive) node indices of the subgraph
"""
if op not in self:
self[op] = {}
if err_string not in self[op]:
self[op][err_string] = []
self[op][err_string].append(node_range)
def supports_model(path):
"""
Invokes the ONNX parser's `supports_model` on the specified model.
Args:
path (str): The path to the ONNX model.
Returns:
Tuple[bool, SubgraphCollection, parser]:
(1) Whether the model is supported.
(2) A List[Tuple[List[int], bool]] mapping groups of node indices to a boolean
indicating whether they are supported.
(3) The TensorRT ONNX parser instance.
"""
_, network = trt_backend.create_network()
parser = trt.OnnxParser(network, trt_backend.get_trt_logger())
try:
parser.supports_model
except AttributeError:
trt_util.fail_unavailable("supports_model in tensorrt.OnnxParser")
supported, nodelists = parser.supports_model(common_backend.bytes_from_path(path), path)
return supported, nodelists, parser
def save_subgraph(onnx_save_args, graph, start, end, prefix="", use_tmp_file=False):
"""
Extracts a subgraph from the main graph and saves it to disk.
Args:
graph (onnx_graphsurgeon.Graph): The parent/main graph.
start (int): The (inclusive) index of the start node.
end (int): The (exclusive) index of the end node.
prefix (str): The prefix for the model file name.
use_tmp_file (bool):
Whether the subgraph should be written to a temporary file instead of the output directory.
Returns:
str: The full path to the ONNX model of the subgraph.
"""
subgraph_nodes = graph.nodes[start:end]
out_dict = {out.name: out for node in subgraph_nodes for out in node.outputs}
in_dict = {inp.name: inp for node in subgraph_nodes for inp in node.inputs}
# Guess graph inputs/outputs by checking all output tensor names against all input tensor names, and vice-versa.
subgraph_inputs = onnx_util.meta_from_gs_tensors([in_dict[k] for k in in_dict if k not in out_dict])
subgraph_outputs = onnx_util.meta_from_gs_tensors([out_dict[k] for k in out_dict if k not in in_dict])
subgraph = gs.export_onnx(onnx_backend.extract_subgraph(graph, subgraph_inputs, subgraph_outputs))
if use_tmp_file:
path = util.NamedTemporaryFile(prefix=prefix, suffix=".onnx").name
else:
# end is exclusive, so subtract one to make the model names friendlier.
path = os.path.join(onnx_save_args.path, "{:}_subgraph-nodes-{:}-{:}.onnx".format(prefix, start, end - 1))
onnx_save_args.save_onnx(subgraph, path)
return path
def gen_results_summary(final_unsupported):
"""
Generates a results summary given the final unsupported nodes dictionary.
Args:
final_unsupported (UnsupportedNodeDict):
The unsupported ops and corresponding errors and node index ranges.
Returns:
str: A summary of all the unsupported ops in model, along with reasons and node index ranges.
"""
op_width = max(map(len, list(final_unsupported.keys()) + ["Operator "]))
reason_width = max(len(reason) for node_index_map in final_unsupported.values() for reason in node_index_map.keys())
summary = "===== Summary =====\n"
header = "{:{op_width}}| {:7} | {:{reason_width}} | {:}\n".format(
"Operator", "Count", "Reason", "Nodes", op_width=op_width, reason_width=reason_width
)
summary += header + "-" * len(header) + "\n"
for op, node_index_map in final_unsupported.items():
for reason, node_indices in node_index_map.items():
summary += "{:{op_width}}| {:7} | {:{reason_width}} | {:}\n".format(
op, len(node_indices), reason, node_indices, op_width=op_width, reason_width=reason_width
)
return summary
class Capability(Tool):
"""
Determine the capability of TensorRT to run an ONNX graph. Graph will be paritioned into supported and unsupported subgraphs.
"""
def __init__(self):
super().__init__("capability")
self.subscribe_args(ModelArgs(model_required=True, inputs=None, model_type="onnx"))
self.subscribe_args(OnnxShapeInferenceArgs(default=True))
self.subscribe_args(OnnxLoaderArgs(output_prefix=None))
# Disallow ext data path since we're writing multiple models - otherwise, it'll be clobbered each time.
self.subscribe_args(
OnnxSaveArgs(
allow_ext_data_path=False,
custom_help="Directory to write out supported and unsupported subgraphs. "
"Defaults to 'polygraphy_capability_dumps' in the current directory",
default_output_path="polygraphy_capability_dumps",
)
)
def run(self, args):
supported, nodelists, _ = supports_model(self.arg_groups[ModelArgs].model_file)
if supported:
G_LOGGER.info("Graph is fully supported by TensorRT; Will not generate subgraphs.")
return
parent_graph = onnx_backend.gs_from_onnx(self.arg_groups[OnnxLoaderArgs].load_onnx())
def partition(nodelists, offset):
"""
Partitions a set of subgraphs into supported and unsupported subgraphs.
Args:
nodelists (List[Tuple[List[int], bool]]):
A list that maps node indices to a boolean indicating whether they
are supported by TensorRT.
Returns:
List[List[int]]:
A list of subgraphs supported by TensorRT, each described by a list of node indices.
"""
supported_subgraphs = []
for (node_indices, supported) in nodelists:
if supported:
supported_subgraphs.append([index + offset for index in node_indices])
continue
start = node_indices[0] + offset
end = node_indices[-1] + offset + 1
subgraph_path = save_subgraph(
self.arg_groups[OnnxSaveArgs],
parent_graph,
start,
end,
prefix="intermediate_",
use_tmp_file=True,
)
_, new_nodelists, _ = supports_model(subgraph_path)
# Recursively partition each unsupported subgraph.
supported_subgraphs += partition(new_nodelists, start)
return supported_subgraphs
supported_subgraphs = partition(nodelists, offset=0)
unsupported_node_dict = UnsupportedNodeDict()
def save_unsupported_graph(start, end):
"""
Saves an unsupported subgraph, determines the error reason and adds it
to unsupported_node_dict
Args:
start (int): The (inclusive) index of the start node.
end (int): The (exclusive) index of the end node.
"""
subgraph_path = save_subgraph(self.arg_groups[OnnxSaveArgs], parent_graph, start, end, "unsupported")
_, _, parser = supports_model(subgraph_path)
err_string = (
" | ".join([str(parser.get_error(err_idx)) for err_idx in range(parser.num_errors)]) or "UNKNOWN ERROR"
)
unsupported_node_dict.add(parent_graph.nodes[start].op, err_string, [start, end])
# Log errors for all the unsupported graphs between supported subgraphs.
for index, subg_node_idxs in enumerate(supported_subgraphs):
save_subgraph(
self.arg_groups[OnnxSaveArgs],
parent_graph,
subg_node_idxs[0],
subg_node_idxs[-1] + 1,
"supported",
)
if index == 0 and subg_node_idxs[0] != 0:
save_unsupported_graph(0, subg_node_idxs[0])
if index == len(supported_subgraphs) - 1 and supported_subgraphs[-1][-1] != len(parent_graph.nodes) - 1:
save_unsupported_graph(subg_node_idxs[-1] + 1, len(parent_graph.nodes))
if index < len(supported_subgraphs) - 1:
next_subg_node_idxs = supported_subgraphs[index + 1]
save_unsupported_graph(subg_node_idxs[-1] + 1, next_subg_node_idxs[0])
summary = gen_results_summary(unsupported_node_dict)
G_LOGGER.finish(summary)
util.save_file(
summary, os.path.join(self.arg_groups[OnnxSaveArgs].path, "results.txt"), "w", description="results"
)
| TensorRT-master | tools/Polygraphy/polygraphy/tools/inspect/subtool/capability.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from polygraphy import util
from polygraphy.common import TensorMetadata
from polygraphy.comparator import RunResults
from polygraphy.comparator import util as comp_util
from polygraphy.json import load_json
from polygraphy.logger import G_LOGGER
from polygraphy.tools.base import Tool
class Data(Tool):
"""
Display information about inference inputs and outputs saved from Polygraphy's Comparator.run()
(for example, outputs saved by `--save-outputs` or inputs saved by `--save-inputs` from `polygraphy run`).
"""
def __init__(self):
super().__init__("data")
def add_parser_args(self, parser):
parser.add_argument("path", help="Path to a file containing input or output data from Polygraphy")
parser.add_argument(
"-a",
"--all",
help="Show information on all iterations present in the data instead of just the first",
action="store_true",
)
parser.add_argument(
"-s", "--show-values", help="Show values of the tensors instead of just metadata", action="store_true"
)
parser.add_argument("--histogram", help="Show a histogram of the value distribution", action="store_true")
def run(self, args):
# Note: It's important we have encode/decode JSON methods registered
# for the types we care about, e.g. RunResults. Importing the class should generally guarantee this.
data = load_json(args.path)
def meta_from_iter_result(iter_result):
meta = TensorMetadata()
for name, arr in iter_result.items():
meta.add(name, dtype=arr.dtype, shape=arr.shape)
return meta
def str_from_iters(iters):
out_str = ""
for index, iter_result in enumerate(iters):
iter_meta = meta_from_iter_result(iter_result)
indent = 1
if len(iters) > 1 and args.all:
out_str += util.indent_block("\n-- Iteration: {:}\n".format(index), indent - 1)
indent = 2
for name, arr in iter_result.items():
out_str += util.indent_block(
"\n{:} {:} | Stats: {:}".format(name, iter_meta[name], comp_util.str_output_stats(arr)),
indent - 1,
)
if args.histogram:
out_str += "\n{:}".format(util.indent_block(comp_util.str_histogram(arr), indent))
if args.show_values:
out_str += "\n{:}".format(util.indent_block(str(arr), indent))
if indent == 2:
out_str += "\n"
if not args.all:
break
return out_str
def display_results(results):
results_str = ""
results_str += "==== Run Results ({:} runners) ====\n\n".format(len(results))
max_runner_width = max(len(runner_name) for runner_name in results.keys())
for runner_name, iters in results.items():
results_str += "---- {:<{max_runner_width}} ({:} iterations) ----\n".format(
runner_name, len(iters), max_runner_width=max_runner_width
)
results_str += str_from_iters(iters) + "\n"
results_str = util.indent_block(results_str, level=0).strip()
G_LOGGER.info(results_str)
def display_inputs(input_data):
inputs_str = ""
inputs_str += "==== Data ({:} iterations) ====\n".format(len(input_data))
inputs_str += str_from_iters(input_data) + "\n"
inputs_str = util.indent_block(inputs_str, level=0).strip()
G_LOGGER.info(inputs_str)
if isinstance(data, RunResults):
display_results(data)
else:
if not util.is_sequence(data):
data = [data]
display_inputs(data)
| TensorRT-master | tools/Polygraphy/polygraphy/tools/inspect/subtool/data.py |
from polygraphy.tools.surgeon.surgeon import Surgeon
| TensorRT-master | tools/Polygraphy/polygraphy/tools/surgeon/__init__.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from polygraphy.tools.base import Tool
from polygraphy.tools.surgeon.subtool import Extract, Insert, Sanitize
################################# MAIN TOOL #################################
class Surgeon(Tool):
"""
Modify ONNX models.
"""
def __init__(self):
super().__init__("surgeon")
def add_parser_args(self, parser):
subparsers = parser.add_subparsers(title="Surgical Instruments", dest="instrument")
subparsers.required = True
SURGEON_SUBTOOLS = [
Extract(),
Sanitize(),
Insert(),
]
for subtool in SURGEON_SUBTOOLS:
subtool.setup_parser(subparsers)
| TensorRT-master | tools/Polygraphy/polygraphy/tools/surgeon/surgeon.py |
from polygraphy.tools.surgeon.subtool.extract import Extract
from polygraphy.tools.surgeon.subtool.insert import Insert
from polygraphy.tools.surgeon.subtool.sanitize import Sanitize
| TensorRT-master | tools/Polygraphy/polygraphy/tools/surgeon/subtool/__init__.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from polygraphy import mod
from polygraphy.logger import G_LOGGER
from polygraphy.tools.args import ModelArgs, OnnxLoaderArgs, OnnxSaveArgs, OnnxShapeInferenceArgs
from polygraphy.tools.args import util as args_util
from polygraphy.tools.args.base import BaseArgs
from polygraphy.tools.surgeon.subtool.base import BaseSurgeonSubtool
gs = mod.lazy_import("onnx_graphsurgeon")
onnx_backend = mod.lazy_import("polygraphy.backend.onnx")
class OnnxNodeArgs(BaseArgs):
def add_to_parser(self, parser):
node_args = parser.add_argument_group("Inserted Node", "Options for the node to insert")
node_args.add_argument(
"--inputs",
help="The names of input tensors for the new node. Order will be preserved. "
"Format: --inputs <name>. For example: --inputs name0 name1",
nargs="+",
required=True,
)
node_args.add_argument(
"--outputs",
help="The names of output tensors for the new node. Order will be preserved. "
"If an output tensor is also specified as an input, a new tensor will be generated for the output"
"Format: --outputs <name>. For example: --outputs name0 name1",
nargs="+",
required=True,
)
node_args.add_argument("--op", help="The ONNX op to use for the new node", required=True)
node_args.add_argument("--name", help="The name to use for the new node", default=None)
node_args.add_argument(
"--attrs",
help="Attributes to set in the new node. "
"Format: --attrs <name>=value. For example: --attrs axis=1 keepdims=1. "
"Attributes of type: float, int, str, and lists of these types are supported. "
"Numbers including a decimal point will always be parsed as floats, and quoted values "
"(e.g. --attrs name='53') will always be parsed as strings. Values enclosed in brackets "
"(e.g. --attrs axes=[0,1]) will be parsed as lists. ",
nargs="+",
default=[],
)
def parse(self, args):
self.op = args_util.get(args, "op")
self.name = args_util.get(args, "name")
self.attrs = args_util.parse_dict_with_default(args_util.get(args, "attrs"), sep="=")
self.inputs = args_util.get(args, "inputs")
self.outputs = args_util.get(args, "outputs")
class Insert(BaseSurgeonSubtool):
"""
[EXPERIMENTAL] Insert a single node into an ONNX model with the specified inputs and outputs.
Any existing subgraph between the inputs and outputs is replaced.
"""
def __init__(self):
super().__init__("insert")
self.subscribe_args(OnnxNodeArgs())
self.subscribe_args(ModelArgs(model_required=True, inputs=None, model_type="onnx"))
self.subscribe_args(OnnxShapeInferenceArgs())
self.subscribe_args(OnnxLoaderArgs(output_prefix=None))
self.subscribe_args(OnnxSaveArgs(infer_shapes=True, required=True))
def run_impl(self, args):
graph = onnx_backend.gs_from_onnx(super().load_model())
TENSOR_MAP = graph.tensors()
def get_tensor(name):
if name not in TENSOR_MAP:
G_LOGGER.critical("Tensor: {:} does not exist in the model.".format(name))
return TENSOR_MAP[name]
TENSOR_NAME_SUFFIX = "_polygraphy_surgeon_insert_output"
output_tensors = []
for name in self.arg_groups[OnnxNodeArgs].outputs:
if name in self.arg_groups[OnnxNodeArgs].inputs:
# When the new node's input == output, we need to generate a new tensor
# If the tensor was a graph output, try to preserve the name.
inp_tensor = get_tensor(name)
if inp_tensor in graph.outputs:
inp_tensor.name += TENSOR_NAME_SUFFIX
tensor = gs.Variable(name=name)
else:
tensor = gs.Variable(name=name + TENSOR_NAME_SUFFIX)
def replace_tensor(tensors):
# This is needed to preserve ordering and handle cases where the tensor shows up more than once.
for index, t in enumerate(tensors):
if t.name == inp_tensor.name:
tensors[index] = tensor
for out_node in inp_tensor.outputs:
replace_tensor(out_node.inputs)
replace_tensor(graph.outputs)
G_LOGGER.verbose("Generating new tensor for output: {:}".format(tensor))
else:
tensor = get_tensor(name)
tensor.inputs.clear()
output_tensors.append(tensor)
input_tensors = [get_tensor(name) for name in self.arg_groups[OnnxNodeArgs].inputs]
new_node = gs.Node(
op=self.arg_groups[OnnxNodeArgs].op,
name=self.arg_groups[OnnxNodeArgs].name,
attrs=self.arg_groups[OnnxNodeArgs].attrs,
inputs=input_tensors,
outputs=output_tensors,
)
G_LOGGER.verbose("Generated new node: {:}".format(new_node))
# Assuming the graph is topologically sorted, the node needs to be inserted
# after its last input node to maintain the sorting.
with graph.node_ids():
# Nodes with no inputs can be inserted at index 0
insert_index = max([node.id + 1 for inp in input_tensors for node in inp.inputs] + [0])
graph.nodes.insert(insert_index, new_node)
super().save_model(super().export_graph(graph.cleanup()))
| TensorRT-master | tools/Polygraphy/polygraphy/tools/surgeon/subtool/insert.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from polygraphy import mod
from polygraphy.tools import util as tools_util
from polygraphy.tools.args import DataLoaderArgs, ModelArgs, OnnxLoaderArgs, OnnxSaveArgs, OnnxShapeInferenceArgs
from polygraphy.tools.surgeon.subtool.base import BaseSurgeonSubtool
onnx_backend = mod.lazy_import("polygraphy.backend.onnx")
onnx_util = mod.lazy_import("polygraphy.backend.onnx.util")
gs = mod.lazy_import("onnx_graphsurgeon")
class Sanitize(BaseSurgeonSubtool):
"""
Clean up, optimize, and/or change input shapes in an ONNX model.
"""
def __init__(self):
super().__init__("sanitize")
self.subscribe_args(
ModelArgs(
model_required=True,
inputs="--override-inputs",
model_type="onnx",
inputs_doc="Override input shapes in the model for the given inputs",
)
)
self.subscribe_args(DataLoaderArgs())
self.subscribe_args(OnnxShapeInferenceArgs(default=True, enable_force_fallback=True))
self.subscribe_args(OnnxLoaderArgs(output_prefix=""))
self.subscribe_args(OnnxSaveArgs(infer_shapes=True, required=True))
def add_parser_args(self, parser):
const_fold_args = parser.add_argument_group("Constant Folding", "Options for folding constants")
const_fold_args.add_argument(
"--fold-constants",
help="Fold constants in the graph by computing subgraphs whose values "
"are not dependent on runtime inputs.",
action="store_true",
default=None,
)
const_fold_args.add_argument(
"--num-passes",
"--num-const-fold-passes",
help="The number of constant folding passes to run. "
"Sometimes, subgraphs that compute tensor shapes may not be foldable in a single pass. "
"If not specified, Polygraphy will automatically determine the number of passes required. ",
type=int,
default=None,
dest="num_const_fold_passes",
)
const_fold_args.add_argument(
"--partitioning",
help="Controls how to partition the graph during constant folding: {{"
"'basic': Partition the graph so failures in one part do not affect other parts, "
"'recursive': In addition to partitioning the graph, partition partitions where needed}} ",
choices=["basic", "recursive"],
default=None,
)
const_fold_args.add_argument(
"--no-fold-shapes",
help="Disable folding Shape nodes and subgraphs that operate on shapes",
dest="fold_shapes",
default=True,
action="store_false",
)
const_fold_args.add_argument(
"--no-per-pass-shape-inference",
help="Disable shape inference between passes of constant folding",
dest="per_pass_shape_inference",
default=True,
action="store_false",
)
parser.add_argument(
"--cleanup",
help="Run dead layer removal on the graph. This is generally not required if other options are set. ",
action="store_true",
default=False,
)
super().add_parser_args(parser)
def run_impl(self, args):
# First do all processing that requires an ONNX-GraphSurgeon graph, then do everything
# that operates on the ONNX model. This lets us avoid ONNX-GraphSurgeon import if we don't
# need it.
def do_graph_processing(model):
graph = None
rerun_shape_inference = False
def get_graph():
nonlocal graph
if graph is None:
graph = onnx_backend.gs_from_onnx(model)
return graph
user_input_metadata = self.arg_groups[ModelArgs].input_shapes
if user_input_metadata:
graph = get_graph()
graph = tools_util.override_input_shapes(graph, user_input_metadata)
rerun_shape_inference = True
if self.arg_groups[OnnxShapeInferenceArgs].force_fallback:
_, layerwise_meta = self.arg_groups[OnnxShapeInferenceArgs].fallback_inference(model)
graph = get_graph()
onnx_util.set_shapes_from_layerwise_meta(graph, layerwise_meta)
if args.cleanup:
graph = get_graph()
graph.cleanup()
if graph is not None:
model = gs.export_onnx(graph)
return model, rerun_shape_inference
def do_model_processing(model):
if args.fold_constants:
model = onnx_backend.fold_constants(
model,
num_passes=args.num_const_fold_passes,
do_shape_inference=self.arg_groups[OnnxShapeInferenceArgs].do_shape_inference
if args.per_pass_shape_inference
else False,
fold_shapes=args.fold_shapes,
partitioning=args.partitioning,
)
return model
model = super().load_model()
model, rerun_shape_inference = do_graph_processing(model)
if rerun_shape_inference and self.arg_groups[OnnxShapeInferenceArgs].do_shape_inference:
model = onnx_backend.infer_shapes(model)
model = do_model_processing(model)
super().save_model(model)
| TensorRT-master | tools/Polygraphy/polygraphy/tools/surgeon/subtool/sanitize.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import copy
from polygraphy import mod
from polygraphy.common import TensorMetadata
from polygraphy.logger import G_LOGGER
from polygraphy.tools.args import DataLoaderArgs, ModelArgs, OnnxLoaderArgs, OnnxSaveArgs, OnnxShapeInferenceArgs
from polygraphy.tools.args import util as args_util
from polygraphy.tools.surgeon.subtool.base import BaseSurgeonSubtool
onnx_backend = mod.lazy_import("polygraphy.backend.onnx")
onnx_util = mod.lazy_import("polygraphy.backend.onnx.util")
class Extract(BaseSurgeonSubtool):
"""
Extract a subgraph from an ONNX model based on the specified inputs and outputs.
"""
def __init__(self):
super().__init__("extract")
self.subscribe_args(
ModelArgs(
model_required=True,
inputs="--model-inputs",
model_type="onnx",
inputs_doc="Input shapes to use when generating data to run fallback shape inference. "
"Has no effect if fallback shape inference is not run",
)
)
self.subscribe_args(DataLoaderArgs())
self.subscribe_args(OnnxShapeInferenceArgs(default=False, enable_force_fallback=True))
self.subscribe_args(OnnxLoaderArgs(output_prefix=None))
self.subscribe_args(OnnxSaveArgs(required=True))
def add_parser_args(self, parser):
parser.add_argument(
"--inputs",
dest="input_meta",
help="Input metadata for subgraph (names, shapes, and data types). "
"Use 'auto' to make `extract` determine these automatically. Format: "
"--inputs <name>:<shape>:<dtype>. "
"For example: --inputs input0:[1,3,224,224]:float32 input1:auto:auto. "
"If omitted, uses the current model inputs. ",
nargs="+",
default=[],
)
parser.add_argument(
"--outputs",
dest="output_meta",
help="Output metadata for subgraph (names and data types). "
"Use 'auto' to make `extract` determine these automatically. Format: "
"--outputs <name>:<dtype>. "
"For example: --outputs output0:float32 output1:auto. "
"If omitted, uses the current model outputs. ",
nargs="+",
default=[],
)
super().add_parser_args(parser)
def run_impl(self, args):
def missing_meta_tensors(input_metadata, output_metadata):
missing = TensorMetadata()
for name, (dtype, shape) in input_metadata.items():
if dtype is None or shape is None:
missing.add(name, dtype, shape)
for name, (dtype, shape) in output_metadata.items():
if dtype is None:
missing.add(name, dtype, shape)
return missing
model = super().load_model()
user_input_metadata = args_util.parse_meta(args.input_meta)
user_output_metadata = args_util.parse_meta(args.output_meta, includes_shape=False)
# Loads an ONNX-GS graph and create new I/O metadata w/ info missing in user_input/output_metadata.
def load_graph_and_io_meta(model):
graph = onnx_backend.gs_from_onnx(model)
TENSOR_MAP = graph.tensors()
def get_tensor(name):
if name not in TENSOR_MAP:
G_LOGGER.critical("Tensor: {:} does not exist in the model.".format(name))
return TENSOR_MAP[name]
# Makes a TensorMetadata for inputs/outputs using either the user provided information
# or details derived from tensors.
def make_io_meta(user_meta, tensors):
if not user_meta:
return onnx_util.meta_from_gs_tensors(tensors)
new_meta = copy.copy(user_meta)
for name, (dtype, shape) in new_meta.items():
tensor = get_tensor(name)
new_meta.add(name, dtype or tensor.dtype, shape or tensor.shape)
return new_meta
input_metadata = make_io_meta(user_input_metadata, graph.inputs)
output_metadata = make_io_meta(user_output_metadata, graph.outputs)
return graph, input_metadata, output_metadata
graph, input_metadata, output_metadata = load_graph_and_io_meta(model)
# If we've already done ONNX shape inference, we should not do it again here.
skip_shape_inference = (
self.arg_groups[OnnxShapeInferenceArgs].force_fallback
or self.arg_groups[OnnxShapeInferenceArgs].do_shape_inference
)
if missing_meta_tensors(input_metadata, output_metadata) and not skip_shape_inference:
G_LOGGER.info(
"Running ONNX shape inference to derive shapes and/or data types for `auto` arguments.\n"
"To avoid this, you can specify the shapes and data types explicitly."
)
model = onnx_backend.infer_shapes(model)
graph, input_metadata, output_metadata = load_graph_and_io_meta(model)
missing_tensors = missing_meta_tensors(input_metadata, output_metadata)
if missing_tensors or self.arg_groups[OnnxShapeInferenceArgs].force_fallback:
# Use ONNX runtime with static shapes to infer shapes when all else fails
# Returns a TensorMetadata for all tensors in the graph.
if not self.arg_groups[OnnxShapeInferenceArgs].force_fallback:
G_LOGGER.warning(
"Some tensor shapes or dtypes are missing in the model. Note: Tensors with missing information:\n{:}\n"
"Will run inference to determine shapes. This may cause some dynamic "
"dimensions to become static.\n"
"To avoid this, please provide metadata on the command-line. ".format(missing_tensors)
)
else:
G_LOGGER.info("Forcing fallback shape inference. This will cause dynamic dimensions to become static.")
_, layerwise_meta = self.arg_groups[OnnxShapeInferenceArgs].fallback_inference(model)
def update_meta_from_layerwise(meta, user_meta, set_shapes=True):
for name in meta:
user_dtype, user_shape = None, None
if name in user_meta:
user_dtype, user_shape = user_meta[name].dtype, user_meta[name].shape
# Choose between what the user set, what's in the model, and what
# fallback shape inference said.
def choose_meta(user, model, fallback):
if self.arg_groups[OnnxShapeInferenceArgs].force_fallback:
return user or fallback
return user or model or fallback
if name in layerwise_meta:
meta[name].dtype = choose_meta(user_dtype, meta[name].dtype, layerwise_meta[name].dtype)
if set_shapes:
meta[name].shape = choose_meta(user_shape, meta[name].shape, layerwise_meta[name].shape)
G_LOGGER.verbose("Updated tensor: {:} metadata to: {:}".format(name, meta[name]))
return meta
input_metadata = update_meta_from_layerwise(input_metadata, user_input_metadata)
output_metadata = update_meta_from_layerwise(
output_metadata, user_output_metadata, set_shapes=self.arg_groups[OnnxShapeInferenceArgs].force_fallback
)
graph = onnx_backend.extract_subgraph(graph, input_metadata, output_metadata)
super().save_model(super().export_graph(graph))
| TensorRT-master | tools/Polygraphy/polygraphy/tools/surgeon/subtool/extract.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from polygraphy import mod
from polygraphy.logger import G_LOGGER
from polygraphy.tools.args import LoggerArgs, OnnxLoaderArgs, OnnxSaveArgs
from polygraphy.tools.base import Tool
gs = mod.lazy_import("onnx_graphsurgeon")
onnx_util = mod.lazy_import("polygraphy.backend.onnx.util")
class BaseSurgeonSubtool(Tool):
def __init__(self, name):
super().__init__(name)
def load_model(self, log_model=True):
model = self.arg_groups[OnnxLoaderArgs].load_onnx()
if log_model:
G_LOGGER.info("Original Model:\n{:}\n\n".format(onnx_util.str_from_onnx(model, mode="none")))
return model
# Since new graph outputs may be added, and we don't know the types,
# we skip type checks in ONNX-GraphSurgeon.
def export_graph(self, graph, do_type_check=False):
return gs.export_onnx(graph, do_type_check=do_type_check)
def save_model(self, model, log_model=True):
model = self.arg_groups[OnnxSaveArgs].save_onnx(model)
if log_model:
G_LOGGER.info("New Model:\n{:}\n\n".format(onnx_util.str_from_onnx(model, mode="none")))
def run_impl(self, args):
raise NotImplementedError("Subclasses must implement run_impl!")
def run(self, args):
def set_onnx_gs_logging_level(sev):
ONNX_GS_LOGGER = gs.logger.G_LOGGER
if sev >= G_LOGGER.CRITICAL:
ONNX_GS_LOGGER.severity = ONNX_GS_LOGGER.CRITICAL
elif sev >= G_LOGGER.ERROR:
ONNX_GS_LOGGER.severity = ONNX_GS_LOGGER.ERROR
elif sev >= G_LOGGER.WARNING:
ONNX_GS_LOGGER.severity = ONNX_GS_LOGGER.WARNING
elif sev >= G_LOGGER.INFO:
ONNX_GS_LOGGER.severity = ONNX_GS_LOGGER.INFO
elif sev >= G_LOGGER.EXTRA_VERBOSE:
ONNX_GS_LOGGER.severity = ONNX_GS_LOGGER.DEBUG
elif sev >= G_LOGGER.SUPER_VERBOSE:
ONNX_GS_LOGGER.severity = ONNX_GS_LOGGER.VERBOSE
else:
ONNX_GS_LOGGER.severity = ONNX_GS_LOGGER.ULTRA_VERBOSE
fmts = self.arg_groups[LoggerArgs].log_format
for fmt in fmts:
if fmt == "no-colors":
ONNX_GS_LOGGER.colors = False
elif fmt == "timestamp":
ONNX_GS_LOGGER.timestamp = True
elif fmt == "line-info":
ONNX_GS_LOGGER.line_info = True
G_LOGGER.register_callback(set_onnx_gs_logging_level)
return self.run_impl(args)
| TensorRT-master | tools/Polygraphy/polygraphy/tools/surgeon/subtool/base.py |
from polygraphy.tools.template.template import Template
| TensorRT-master | tools/Polygraphy/polygraphy/tools/template/__init__.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from polygraphy.tools.base import Tool
from polygraphy.tools.template.subtool import TrtNetwork, TrtConfig
class Template(Tool):
"""
[EXPERIMENTAL] Generate template files.
"""
def __init__(self):
super().__init__("template")
def add_parser_args(self, parser):
subparsers = parser.add_subparsers(title="Template Subtools", dest="subtool")
subparsers.required = True
SUBTOOLS = [
TrtNetwork(),
TrtConfig(),
]
for subtool in SUBTOOLS:
subtool.setup_parser(subparsers)
| TensorRT-master | tools/Polygraphy/polygraphy/tools/template/template.py |
from polygraphy.tools.template.subtool.trt_network import TrtNetwork
from polygraphy.tools.template.subtool.trt_config import TrtConfig
| TensorRT-master | tools/Polygraphy/polygraphy/tools/template/subtool/__init__.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
from polygraphy.tools.args import (
ModelArgs,
DataLoaderArgs,
TrtConfigArgs,
)
from polygraphy.tools.base import Tool
from polygraphy.tools.script import Script, inline, safe
class TrtConfig(Tool):
"""
Generate a template script to create a TensorRT builder configuration.
"""
def __init__(self):
super().__init__("trt-config")
self.subscribe_args(ModelArgs(model_required=False))
self.subscribe_args(DataLoaderArgs())
self.subscribe_args(TrtConfigArgs())
def add_parser_args(self, parser):
parser.add_argument(
"-o", "--output", help="Path to save the generated script.", type=argparse.FileType("w"), required=True
)
def run(self, args):
script = Script(summary="Creates a TensorRT Builder Configuration.", always_create_runners=False)
script.add_import(imports=["func"], frm="polygraphy")
script.add_import(imports=["tensorrt as trt"])
loader_name = self.arg_groups[TrtConfigArgs].add_trt_config_loader(script)
if not loader_name:
script.add_import(imports=["CreateConfig"], frm="polygraphy.backend.trt")
loader_name = script.add_loader(safe("CreateConfig()"), "create_trt_config")
params = safe("config")
script.append_suffix(safe("@func.extend({:})", inline(loader_name)))
script.append_suffix(safe("def load_config({:}):", inline(params)))
script.append_suffix(
safe("\tpass # TODO: Set up the builder configuration here. This function should not return anything.")
)
script.save(args.output)
| TensorRT-master | tools/Polygraphy/polygraphy/tools/template/subtool/trt_config.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
from polygraphy.tools.args import (
ModelArgs,
OnnxLoaderArgs,
Tf2OnnxLoaderArgs,
TfLoaderArgs,
TrtNetworkLoaderArgs,
TrtPluginLoaderArgs,
)
from polygraphy.tools.base import Tool
from polygraphy.tools.script import Script, inline, safe
class TrtNetwork(Tool):
"""
Generate a template script to create a TensorRT network using the TensorRT network API,
optionally starting from an existing model.
"""
def __init__(self):
super().__init__("trt-network")
self.subscribe_args(ModelArgs(model_required=False, inputs=None))
self.subscribe_args(TfLoaderArgs(artifacts=False))
self.subscribe_args(Tf2OnnxLoaderArgs())
self.subscribe_args(OnnxLoaderArgs())
self.subscribe_args(TrtPluginLoaderArgs())
self.subscribe_args(TrtNetworkLoaderArgs())
def add_parser_args(self, parser):
parser.add_argument(
"-o", "--output", help="Path to save the generated script.", type=argparse.FileType("w"), required=True
)
def run(self, args):
script = Script(
summary="Creates a TensorRT Network using the Network API.", always_create_runners=False
)
script.add_import(imports=["func"], frm="polygraphy")
script.add_import(imports=["tensorrt as trt"])
if self.arg_groups[ModelArgs].model_file is not None:
loader_name = self.arg_groups[TrtNetworkLoaderArgs].add_trt_network_loader(script)
params = safe("builder, network, parser")
else:
script.add_import(imports=["CreateNetwork"], frm="polygraphy.backend.trt")
loader_name = safe("CreateNetwork()")
params = safe("builder, network")
script.append_suffix(safe("@func.extend({:})", inline(loader_name)))
script.append_suffix(safe("def load_network({:}):", inline(params)))
script.append_suffix(safe("\tpass # TODO: Set up the network here. This function should not return anything."))
script.save(args.output)
| TensorRT-master | tools/Polygraphy/polygraphy/tools/template/subtool/trt_network.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import copy
from polygraphy.logger import G_LOGGER
from polygraphy.tools.args import (
ComparatorCompareArgs,
ComparatorRunArgs,
DataLoaderArgs,
LoggerArgs,
ModelArgs,
OnnxLoaderArgs,
OnnxrtRunnerArgs,
OnnxSaveArgs,
OnnxShapeInferenceArgs,
PluginRefArgs,
Tf2OnnxLoaderArgs,
TfConfigArgs,
TfLoaderArgs,
TfRunnerArgs,
TrtConfigArgs,
TrtEngineLoaderArgs,
TrtEngineSaveArgs,
TrtLegacyArgs,
TrtNetworkLoaderArgs,
TrtPluginLoaderArgs,
TrtRunnerArgs,
)
from polygraphy.tools.base import Tool
from polygraphy.tools.script import Script, inline, safe
# FIXME: This should be moved into tools/args/
def add_runner_args(parser):
class StoreRunnerOrdered(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
if not hasattr(namespace, "runners"):
namespace.runners = []
namespace.runners.append(option_string.lstrip("-").replace("-", "_"))
runner_args = parser.add_argument_group(
"Runners", "Options for selecting runners. Zero or more runners may be specified"
)
def add_runner(option, help):
runner_args.add_argument(option, help=help, action=StoreRunnerOrdered, dest="runners", default=[], nargs=0)
add_runner("--trt", help="Run inference using TensorRT")
add_runner(
"--trt-legacy",
help="Run inference using Legacy TensorRT Runner. Only supports networks using implicit batch mode",
)
add_runner("--tf", help="Run inference using TensorFlow")
add_runner("--onnxrt", help="Run inference using ONNX Runtime")
add_runner(
"--pluginref",
help="Run inference for models containing single TensorRT plugins using a CPU reference implementation",
)
# Generate a summary line to add as a comment to the script
def generate_summary(model_file, runners, load_results):
def join_list(lst):
new_list = copy.copy(lst)
if len(new_list) > 1:
new_list[-1] = "and {:}".format(new_list[-1])
return ", ".join(new_list) if len(new_list) > 2 else " ".join(new_list)
summary = ""
if runners:
summary += "This script "
if len(runners) > 1:
summary += "compares "
else:
summary += "runs "
if model_file:
summary += "{:} ".format(model_file)
runner_names = {
"trt": "TensorRT",
"trt_legacy": "TensorRT Legacy",
"tf": "TensorFlow",
"onnxrt": "ONNX Runtime",
"pluginref": "CPU plugin references",
}
runners = [runner_names[runner] for runner in runners]
summary += "between " if len(runners) > 1 else "using "
summary += join_list(runners) + "."
if load_results:
summary += "\nIt will check against outputs stored in {:}\n".format(join_list(load_results))
return summary
################################# TOOL #################################
class Run(Tool):
"""
Run inference and compare results across backends.
"""
def __init__(self):
super().__init__("run")
self.subscribe_args(ModelArgs())
self.subscribe_args(TfLoaderArgs(tftrt=True))
self.subscribe_args(TfConfigArgs())
self.subscribe_args(TfRunnerArgs())
self.subscribe_args(Tf2OnnxLoaderArgs())
self.subscribe_args(OnnxSaveArgs(output="save-onnx", short_opt=None))
self.subscribe_args(OnnxShapeInferenceArgs())
self.subscribe_args(OnnxLoaderArgs(save=True))
self.subscribe_args(OnnxrtRunnerArgs())
self.subscribe_args(PluginRefArgs())
self.subscribe_args(
TrtConfigArgs(random_data_calib_warning=False)
) # We run calibration with the inference-time data
self.subscribe_args(TrtPluginLoaderArgs())
self.subscribe_args(TrtNetworkLoaderArgs())
self.subscribe_args(TrtEngineSaveArgs(output="save-engine", short_opt=None))
self.subscribe_args(TrtEngineLoaderArgs(save=True))
self.subscribe_args(TrtRunnerArgs())
self.subscribe_args(TrtLegacyArgs())
self.subscribe_args(DataLoaderArgs())
self.subscribe_args(ComparatorRunArgs())
self.subscribe_args(ComparatorCompareArgs())
def add_parser_args(self, parser):
parser.add_argument(
"--gen",
"--gen-script",
help="Path to save a generated Python script, that will do exactly "
"what `run` would. When this option is enabled, `run` will just save the script and exit. "
"Use `-` to print the script to the standard output",
type=argparse.FileType("w"),
dest="gen_script",
)
add_runner_args(parser)
def run(self, args):
if self.arg_groups[ModelArgs].model_file is None and args.runners:
G_LOGGER.critical(
"One or more runners was specified, but no model file was provided. Make sure you've specified the model path, "
"and also that it's not being consumed as an argument for another parameter"
)
script = self.build_script(args)
if args.gen_script:
script.save(args.gen_script)
else:
exec(str(script))
# Generates a script based on command-line arguments
def build_script(self, args):
script = Script(
summary=generate_summary(self.arg_groups[ModelArgs].model_file, args.runners, args.load_results)
)
self.arg_groups[LoggerArgs].add_to_script(script)
if not args.runners:
G_LOGGER.warning("No runners have been selected. Inference will not be run!")
for runner_arg in args.runners:
add_runner_func = {
"tf": self.arg_groups[TfRunnerArgs].add_to_script,
"onnxrt": self.arg_groups[OnnxrtRunnerArgs].add_to_script,
"trt": self.arg_groups[TrtRunnerArgs].add_to_script,
"trt_legacy": self.arg_groups[TrtLegacyArgs].add_to_script,
"pluginref": self.arg_groups[PluginRefArgs].add_to_script,
}[runner_arg]
add_runner_func(script)
RESULTS_VAR_NAME = self.arg_groups[ComparatorRunArgs].add_to_script(script)
SUCCESS_VAR_NAME = self.arg_groups[ComparatorCompareArgs].add_to_script(script, results_name=RESULTS_VAR_NAME)
script.add_import(imports=["sys"])
cmd_run = inline(safe("' '.join(sys.argv)"))
exit_status = safe(
"# Report Results\n"
"cmd_run = {cmd}\n"
"if not {success}:\n"
'\tG_LOGGER.critical("FAILED | Command: {{}}".format(cmd_run))\n'
'G_LOGGER.finish("PASSED | Command: {{}}".format(cmd_run))\n',
cmd=cmd_run,
success=SUCCESS_VAR_NAME,
)
script.append_suffix(exit_status)
return script
| TensorRT-master | tools/Polygraphy/polygraphy/tools/run/run.py |
from polygraphy.tools.run.run import Run
| TensorRT-master | tools/Polygraphy/polygraphy/tools/run/__init__.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from polygraphy import mod
from polygraphy.logger import G_LOGGER
from polygraphy.tools.args import util as args_util
from polygraphy.tools.args.base import BaseArgs
from polygraphy.tools.script import inline, make_invocable, make_invocable_if_nondefault, safe
@mod.export()
class ComparatorRunArgs(BaseArgs):
def __init__(self, iters=True, write=True):
super().__init__()
self._iters = iters
self._write = write
def add_to_parser(self, parser):
comparator_args = parser.add_argument_group(
"Comparator inference", "Options for running inference via Comparator.run()"
)
if self._iters:
comparator_args.add_argument(
"--warm-up",
metavar="NUM",
help="Number of warm-up runs before timing inference",
type=int,
default=None,
)
comparator_args.add_argument(
"--use-subprocess",
help="Run runners in isolated subprocesses. Cannot be used with a debugger",
action="store_true",
default=None,
)
if self._write:
comparator_args.add_argument(
"--save-inputs",
"--save-input-data",
help="[EXPERIMENTAL] Path to save inference inputs. "
"The inputs (List[Dict[str, numpy.ndarray]]) will be encoded as JSON and saved",
default=None,
dest="save_inputs",
)
comparator_args.add_argument(
"--save-outputs",
"--save-results",
help="Path to save results from runners. " "The results (RunResults) will be encoded as JSON and saved",
default=None,
dest="save_results",
)
def register(self, maker):
from polygraphy.tools.args.data_loader import DataLoaderArgs
if isinstance(maker, DataLoaderArgs):
self.data_loader_args = maker
def check_registered(self):
assert self.data_loader_args is not None, "DataLoaderArgs is required for comparator!"
def parse(self, args):
self.warm_up = args_util.get(args, "warm_up")
self.use_subprocess = args_util.get(args, "use_subprocess")
self.save_inputs = args_util.get(args, "save_inputs")
self.save_results = args_util.get(args, "save_results")
def add_to_script(self, script):
script.add_import(imports=["Comparator"], frm="polygraphy.comparator")
RESULTS_VAR_NAME = inline(safe("results"))
comparator_run = make_invocable(
"Comparator.run",
script.get_runners(),
warm_up=self.warm_up,
data_loader=self.data_loader_args.add_data_loader(script),
use_subprocess=self.use_subprocess,
save_inputs_path=self.save_inputs,
)
script.append_suffix(safe("\n# Runner Execution\n{results} = {:}", comparator_run, results=RESULTS_VAR_NAME))
if self.save_results:
G_LOGGER.verbose("Will save runner results to: {:}".format(self.save_results))
script.add_import(imports=["util"], frm="polygraphy")
script.append_suffix(
safe("\n# Save results\n{results}.save({:})", self.save_results, results=RESULTS_VAR_NAME)
)
return RESULTS_VAR_NAME
@mod.export()
class ComparatorCompareArgs(BaseArgs):
def __init__(self, load=True):
super().__init__()
self._load = load
def add_to_parser(self, parser):
comparator_args = parser.add_argument_group("Comparator comparisons", "Options for comparing inference results")
comparator_args.add_argument(
"--no-shape-check",
help="Disable checking that output shapes match exactly",
action="store_true",
default=None,
)
comparator_args.add_argument(
"--rtol",
"--rel-tol",
dest="rtol",
help="Relative tolerance for output comparison. "
"To specify per-output tolerances, use the format: --rtol [<out_name>:]<rtol>. If no output name is provided, "
"the tolerance is used for any outputs not explicitly specified. For example: "
"--rtol 1e-5 out0:1e-4 out1:1e-3",
nargs="+",
default=None,
)
comparator_args.add_argument(
"--atol",
"--abs-tol",
dest="atol",
help="Absolute tolerance for output comparison. "
"To specify per-output tolerances, use the format: --atol [<out_name>:]<atol>. If no output name is provided, "
"the tolerance is used for any outputs not explicitly specified. For example: "
"--atol 1e-5 out0:1e-4 out1:1e-3",
nargs="+",
default=None,
)
comparator_args.add_argument(
"--validate", help="Check outputs for NaNs and Infs", action="store_true", default=None
)
comparator_args.add_argument(
"--fail-fast", help="Fail fast (stop comparing after the first failure)", action="store_true", default=None
)
comparator_args.add_argument(
"--top-k",
help="[EXPERIMENTAL] Apply Top-K (i.e. find indices of K largest values) to the outputs before comparing them."
"To specify per-output top-k, use the format: --top-k [<out_name>:]<k>. If no output name is provided, "
"top-k is applied to all outputs. For example: "
"--top-k out:5",
nargs="+",
default=None,
)
comparator_args.add_argument(
"--check-error-stat",
help="The error statistic to check. "
"For details on possible values, see the documentation for CompareFunc.simple(). "
"To specify per-output values, use the format: --check-error-stat [<out_name>:]<stat>. If no output name is provided, "
"the value is used for any outputs not explicitly specified. For example: "
"--check-error-stat max out0:mean out1:median",
nargs="+",
default=None,
)
if self._load:
comparator_args.add_argument(
"--load-outputs",
"--load-results",
help="Path(s) to load results from runners prior to comparing. "
"Each file should be a JSON-ified RunResults",
nargs="+",
default=[],
dest="load_results",
)
def parse(self, args):
self.no_shape_check = args_util.get(args, "no_shape_check")
self.rtol = args_util.parse_dict_with_default(args_util.get(args, "rtol"))
self.atol = args_util.parse_dict_with_default(args_util.get(args, "atol"))
self.validate = args_util.get(args, "validate")
self.load_results = args_util.get(args, "load_results")
self.fail_fast = args_util.get(args, "fail_fast")
self.top_k = args_util.parse_dict_with_default(args_util.get(args, "top_k"))
self.check_error_stat = args_util.parse_dict_with_default(args_util.get(args, "check_error_stat"))
if self.check_error_stat:
VALID_CHECK_ERROR_STATS = ["max", "mean", "median", "elemwise"]
for stat in self.check_error_stat.values():
if stat not in VALID_CHECK_ERROR_STATS:
G_LOGGER.critical(
"Invalid choice for check_error_stat: {:}.\n"
"Note: Valid choices are: {:}".format(stat, VALID_CHECK_ERROR_STATS)
)
# FIXME: This should be a proper dependency from a RunnerArgs
self.runners = args_util.get(args, "runners", default=[])
def add_to_script(self, script, results_name):
script.add_import(imports=["Comparator"], frm="polygraphy.comparator")
if self.load_results:
script.add_import(imports=["util"], frm="polygraphy")
script.add_import(imports=["RunResults"], frm="polygraphy.comparator")
script.append_suffix(
safe(
"\n# Load results\nfor load_output in {:}:\n\t{results}.extend(RunResults.load(load_output))",
self.load_results,
results=results_name,
)
)
if self.top_k is not None:
script.add_import(imports=["PostprocessFunc"], frm="polygraphy.comparator")
script.append_suffix(
safe(
"\n# Postprocessing - Apply Top-{top_k}\n"
"{results} = Comparator.postprocess({results}, PostprocessFunc.topk_func(k={top_k}))",
top_k=self.top_k,
results=results_name,
)
)
SUCCESS_VAR_NAME = inline(safe("success"))
script.append_suffix(safe("\n{success} = True", success=SUCCESS_VAR_NAME))
if len(self.runners) > 1 or self.load_results: # Only do comparisons if there's actually something to compare.
script.append_suffix(safe("# Accuracy Comparison"))
compare_func_str = make_invocable_if_nondefault(
"CompareFunc.simple",
rtol=self.rtol,
atol=self.atol,
check_shapes=False if self.no_shape_check else None,
fail_fast=self.fail_fast,
check_error_stat=self.check_error_stat,
)
compare_func = None
if compare_func_str:
script.add_import(imports=["CompareFunc"], frm="polygraphy.comparator")
compare_func = inline(safe("compare_func"))
script.append_suffix(safe("{:} = {:}", compare_func, compare_func_str))
compare_accuracy = make_invocable(
"Comparator.compare_accuracy", results_name, compare_func=compare_func, fail_fast=self.fail_fast
)
script.append_suffix(safe("{success} &= bool({:})\n", compare_accuracy, success=SUCCESS_VAR_NAME))
if self.validate:
script.append_suffix(
safe(
"# Validation\n{success} &= Comparator.validate({results}, check_inf=True, check_nan=True)\n",
success=SUCCESS_VAR_NAME,
results=results_name,
)
)
return SUCCESS_VAR_NAME
| TensorRT-master | tools/Polygraphy/polygraphy/tools/args/comparator.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numbers
from polygraphy import mod, util
from polygraphy.logger import G_LOGGER
from polygraphy.tools.args import util as args_util
from polygraphy.tools.args.base import BaseArgs
from polygraphy.tools.script import Script, make_invocable, make_invocable_if_nondefault, safe
@mod.export()
class DataLoaderArgs(BaseArgs):
def __init__(self):
super().__init__()
self.model_args = None
def add_to_parser(self, parser):
data_loader_args = parser.add_argument_group(
"Data Loader", "Options for controlling how input data is loaded or generated"
)
data_loader_args.add_argument(
"--seed", metavar="SEED", help="Seed to use for random inputs", type=int, default=None
)
data_loader_args.add_argument(
"--val-range",
help="Range of values to generate in the data loader. "
"To specify per-input ranges, use the format: --val-range <out_name>:[min,max]. "
"If no input name is provided, the range is used for any inputs not explicitly specified. "
"For example: --val-range [0,1] inp0:[2,50] inp1:[3.0,4.6]",
nargs="+",
default=None,
)
data_loader_args.add_argument(
"--int-min",
help="[DEPRECATED: Use --val-range] Minimum integer value for random integer inputs",
type=int,
default=None,
)
data_loader_args.add_argument(
"--int-max",
help="[DEPRECATED: Use --val-range] Maximum integer value for random integer inputs",
type=int,
default=None,
)
data_loader_args.add_argument(
"--float-min",
help="[DEPRECATED: Use --val-range] Minimum float value for random float inputs",
type=float,
default=None,
)
data_loader_args.add_argument(
"--float-max",
help="[DEPRECATED: Use --val-range] Maximum float value for random float inputs",
type=float,
default=None,
)
data_loader_args.add_argument(
"--iterations",
"--iters",
metavar="NUM",
help="Number of inference iterations for which to supply data",
type=int,
default=None,
dest="iterations",
)
data_loader_args.add_argument(
"--load-inputs",
"--load-input-data",
help="[EXPERIMENTAL] Path(s) to load inputs. The file(s) should be a JSON-ified "
"List[Dict[str, numpy.ndarray]], i.e. a list where each element is the feed_dict for a single iteration. "
"Other data loader options are ignored when this option is used",
default=[],
dest="load_inputs",
nargs="+",
)
data_loader_args.add_argument(
"--data-loader-script",
help="Path to a Python script that defines a function that loads input data. "
"The function should take no arguments and return a generator or iterable that yields input data (Dict[str, np.ndarray]). "
"When this option is specified, all other data loader arguments are ignored. ",
default=None,
)
data_loader_args.add_argument(
"--data-loader-func-name",
help="When using a data-loader-script, this specifies the name of the function "
"that loads data. Defaults to `load_data`. ",
default="load_data",
)
def register(self, maker):
from polygraphy.tools.args.model import ModelArgs
if isinstance(maker, ModelArgs):
self.model_args = maker
def parse(self, args):
def omit_none_tuple(tup):
if all([elem is None for elem in tup]):
return None
return tup
self.seed = args_util.get(args, "seed")
self.int_range = omit_none_tuple(tup=(args_util.get(args, "int_min"), args_util.get(args, "int_max")))
self.float_range = omit_none_tuple(tup=(args_util.get(args, "float_min"), args_util.get(args, "float_max")))
if self.int_range or self.float_range:
G_LOGGER.warning(
"The --int-min/--int-max and --float-min/--float-max options are deprecated.\n"
"Please use `--val-range` instead, which allows you to specify per-input data ranges."
)
self.val_range = args_util.parse_dict_with_default(args_util.get(args, "val_range"), cast_to=tuple)
if self.val_range is not None:
for name, vals in self.val_range.items():
if len(vals) != 2:
G_LOGGER.critical(
"In --val-range, for input: {:}, expected to receive exactly 2 values, but received {:}.\n"
"Note: Option was parsed as: input: {:}, range: {:}".format(name, len(vals), name, vals)
)
if any(not isinstance(elem, numbers.Number) for elem in vals):
G_LOGGER.critical(
"In --val-range, for input: {:}, one or more elements of the range could not be parsed as a number.\n"
"Note: Option was parsed as: input: {:}, range: {:}".format(name, name, vals)
)
self.iterations = args_util.get(args, "iterations")
self.load_inputs = args_util.get(args, "load_inputs")
self.data_loader_script = args_util.get(args, "data_loader_script")
self.data_loader_func_name = args_util.get(args, "data_loader_func_name")
def _add_to_script(self, script, user_input_metadata_str=None):
needs_invoke = False
using_random_data = False
if self.data_loader_script:
script.add_import(imports=["mod"], frm="polygraphy")
data_loader = make_invocable(
"mod.import_from_script", self.data_loader_script, name=self.data_loader_func_name
)
needs_invoke = True
elif self.load_inputs:
script.add_import(imports=["load_json"], frm="polygraphy.json")
data_loader = safe(
"[]\nfor input_data_path in {load_inputs}:"
"\n\t{data_loader}.extend(load_json(input_data_path, description='input data'))",
load_inputs=self.load_inputs,
data_loader=Script.DATA_LOADER_NAME,
)
else:
using_random_data = True
if user_input_metadata_str is None and self.model_args is not None and self.model_args.input_shapes:
user_input_metadata_str = self.model_args.input_shapes
if user_input_metadata_str:
script.add_import(imports=["TensorMetadata"], frm="polygraphy.common")
data_loader = make_invocable_if_nondefault(
"DataLoader",
seed=self.seed,
iterations=self.iterations,
input_metadata=user_input_metadata_str,
int_range=self.int_range,
float_range=self.float_range,
val_range=self.val_range,
)
if data_loader:
script.add_import(imports=["DataLoader"], frm="polygraphy.comparator")
if using_random_data != self.is_using_random_data():
G_LOGGER.internal_error("is_using_random_data() reported a false positive!")
return script.set_data_loader(data_loader), needs_invoke
def add_data_loader(self, script, *args, **kwargs):
"""
Adds a DataLoader to the script.
Args:
user_input_metadata_str (str(TensorMetadata)):
The name of a variable containing TensorMetadata.
This will control the shape and data type of the generated
data.
Returns:
str: The data loader, as a string. This may either be the variable name,
or an invocation of the data loader function.
"""
data_loader, needs_invoke = self._add_to_script(script, *args, **kwargs)
if needs_invoke:
data_loader = make_invocable(data_loader)
return data_loader
def get_data_loader(self, user_input_metadata=None):
from polygraphy.comparator import DataLoader
needs_invoke = False
# run_script expects the callable to return just the variable name, but self.add_to_script
# has 2 return values. We wrap it here to create a function with the right signature.
def add_to_script_wrapper(script, *args, **kwargs):
nonlocal needs_invoke
name, needs_invoke = self._add_to_script(script, *args, **kwargs)
return name
data_loader = util.default(args_util.run_script(add_to_script_wrapper, user_input_metadata), DataLoader())
if needs_invoke:
data_loader = data_loader()
return data_loader
def is_using_random_data(self):
"""
Whether this data loader will randomly generate data rather than use real data.
Returns:
bool
"""
return not self.data_loader_script and not self.load_inputs
| TensorRT-master | tools/Polygraphy/polygraphy/tools/args/data_loader.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from polygraphy.tools.args.comparator import *
from polygraphy.tools.args.data_loader import *
from polygraphy.tools.args.logger import *
from polygraphy.tools.args.model import *
from polygraphy.tools.args.onnx import *
from polygraphy.tools.args.onnxrt import *
from polygraphy.tools.args.pluginref import *
from polygraphy.tools.args.tf2onnx import *
from polygraphy.tools.args.tf import *
from polygraphy.tools.args.trt import *
from polygraphy.tools.args.trt_legacy import *
| TensorRT-master | tools/Polygraphy/polygraphy/tools/args/__init__.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from polygraphy import mod, util
from polygraphy.tools.args import util as args_util
from polygraphy.tools.args.base import BaseArgs
from polygraphy.tools.script import safe
@mod.export()
class LoggerArgs(BaseArgs):
def add_to_parser(self, parser):
logging_args = parser.add_argument_group("Logging", "Options for logging and debug output")
logging_args.add_argument(
"-v",
"--verbose",
help="Increase logging verbosity. Specify multiple times for higher verbosity",
action="count",
default=0,
)
logging_args.add_argument(
"-q",
"--quiet",
help="Decrease logging verbosity. Specify multiple times for lower verbosity",
action="count",
default=0,
)
logging_args.add_argument("--silent", help="Disable all output", action="store_true", default=None)
logging_args.add_argument(
"--log-format",
help="Format for log messages: {{'timestamp': Include timestamp, 'line-info': Include file and line number, "
"'no-colors': Disable colors}}",
choices=["timestamp", "line-info", "no-colors"],
nargs="+",
default=[],
)
logging_args.add_argument(
"--log-file",
help="Path to a file where Polygraphy logging output should be written. "
"This will not include logging output from dependencies, like TensorRT or ONNX-Runtime. ",
default=None,
)
def parse(self, args):
self.verbosity_count = args_util.get(args, "verbose") - args_util.get(args, "quiet")
self.silent = args_util.get(args, "silent")
self.log_format = args_util.get(args, "log_format", default=[])
self.log_file = args_util.get(args, "log_file")
# Enable logger settings immediately on parsing.
self.get_logger()
def add_to_script(self, script):
# Always required since it is used to print the exit message.
script.append_preimport(safe("from polygraphy.logger import G_LOGGER"))
logger_settings = []
if self.verbosity_count >= 4:
logger_settings.append("G_LOGGER.severity = G_LOGGER.ULTRA_VERBOSE")
elif self.verbosity_count == 3:
logger_settings.append("G_LOGGER.severity = G_LOGGER.SUPER_VERBOSE")
elif self.verbosity_count == 2:
logger_settings.append("G_LOGGER.severity = G_LOGGER.EXTRA_VERBOSE")
elif self.verbosity_count == 1:
logger_settings.append("G_LOGGER.severity = G_LOGGER.VERBOSE")
elif self.verbosity_count == -1:
logger_settings.append("G_LOGGER.severity = G_LOGGER.START")
elif self.verbosity_count == -2:
logger_settings.append("G_LOGGER.severity = G_LOGGER.FINISH")
elif self.verbosity_count == -3:
logger_settings.append("G_LOGGER.severity = G_LOGGER.WARNING")
elif self.verbosity_count == -4:
logger_settings.append("G_LOGGER.severity = G_LOGGER.ERROR")
elif self.verbosity_count <= -4:
logger_settings.append("G_LOGGER.severity = G_LOGGER.CRITICAL")
if self.silent:
logger_settings.append("G_LOGGER.severity = G_LOGGER.CRITICAL")
for fmt in self.log_format:
if fmt == "no-colors":
logger_settings.append("G_LOGGER.colors = False")
elif fmt == "timestamp":
logger_settings.append("G_LOGGER.timestamp = True")
elif fmt == "line-info":
logger_settings.append("G_LOGGER.line_info = True")
if self.log_file:
logger_settings.append("G_LOGGER.log_file = {:}".format(repr(self.log_file)))
for setting in logger_settings:
script.append_preimport(safe(setting))
return safe("G_LOGGER")
def get_logger(self):
return args_util.run_script(self.add_to_script)
| TensorRT-master | tools/Polygraphy/polygraphy/tools/args/logger.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from polygraphy import constants, mod, util
from polygraphy.logger import G_LOGGER
from polygraphy.tools.args import util as args_util
from polygraphy.tools.args.base import BaseArgs
from polygraphy.tools.script import assert_identifier, inline, make_invocable, safe
@mod.export()
class TrtLegacyArgs(BaseArgs):
def add_to_parser(self, parser):
trt_legacy_args = parser.add_argument_group(
"TensorRT Legacy",
"[DEPRECATED] Options for TensorRT Legacy. Reuses TensorRT options, but does not support int8 mode, or dynamic shapes",
)
trt_legacy_args.add_argument(
"-p", "--preprocessor", help="The preprocessor to use for the UFF converter", default=None
)
trt_legacy_args.add_argument("--uff-order", help="The order of the input", default=None)
trt_legacy_args.add_argument(
"--batch-size",
metavar="SIZE",
help="The batch size to use in TensorRT when it cannot be automatically determined",
type=int,
default=None,
)
trt_legacy_args.add_argument(
"--model",
help="Model file for Caffe models. The deploy file should be provided as the model_file positional argument",
dest="caffe_model",
)
trt_legacy_args.add_argument(
"--save-uff", help="Save intermediate UFF files", action="store_true", default=None
)
def register(self, maker):
from polygraphy.tools.args.model import ModelArgs
from polygraphy.tools.args.data_loader import DataLoaderArgs
from polygraphy.tools.args.onnx.loader import OnnxLoaderArgs
from polygraphy.tools.args.tf.loader import TfLoaderArgs
from polygraphy.tools.args.trt.config import TrtConfigArgs
from polygraphy.tools.args.trt.loader import TrtEngineLoaderArgs, TrtEngineSaveArgs
from polygraphy.tools.args.trt.runner import TrtRunnerArgs
if isinstance(maker, OnnxLoaderArgs):
self.onnx_loader_args = maker
if isinstance(maker, ModelArgs):
self.model_args = maker
if isinstance(maker, TfLoaderArgs):
self.tf_loader_args = maker
if isinstance(maker, TrtConfigArgs):
self.trt_config_args = maker
if isinstance(maker, TrtEngineLoaderArgs):
self.trt_engine_loader_args = maker
if isinstance(maker, TrtEngineSaveArgs):
self.trt_engine_save_args = maker
if isinstance(maker, TrtRunnerArgs):
self.trt_runner_args = maker
if isinstance(maker, DataLoaderArgs):
self.data_loader_args = maker
def check_registered(self):
assert self.model_args is not None, "ModelArgs is required!"
assert self.trt_engine_loader_args is not None, "TrtEngineLoaderArgs is required!"
def parse(self, args):
self.trt_outputs = args_util.get_outputs(args, "trt_outputs")
self.caffe_model = args_util.get(args, "caffe_model")
self.batch_size = args_util.get(args, "batch_size")
self.save_uff = args_util.get(args, "save_uff")
self.uff_order = args_util.get(args, "uff_order")
self.preprocessor = args_util.get(args, "preprocessor")
self.calibration_cache = args_util.get(args, "calibration_cache")
calib_base = args_util.get(args, "calibration_base_class")
self.calibration_base_class = None
if calib_base is not None:
calib_base = safe(assert_identifier(calib_base))
self.calibration_base_class = inline(safe("trt.{:}", inline(calib_base)))
self.quantile = args_util.get(args, "quantile")
self.regression_cutoff = args_util.get(args, "regression_cutoff")
self.use_dla = args_util.get(args, "use_dla")
self.allow_gpu_fallback = args_util.get(args, "allow_gpu_fallback")
def add_to_script(self, script):
script.add_import(imports=["TrtLegacyRunner"], frm="polygraphy.backend.trt_legacy")
G_LOGGER.warning("Legacy TensorRT runner only supports implicit batch TensorFlow/UFF, ONNX, and Caffe models")
load_engine = self.model_args.model_file if self.model_args.model_type == "engine" else None
loader_name = None
if self.model_args.model_type == "onnx":
script.add_import(imports=["ParseNetworkFromOnnxLegacy"], frm="polygraphy.backend.trt_legacy")
onnx_loader = self.onnx_loader_args.add_onnx_loader(script, disable_custom_outputs=True)
loader_name = script.add_loader(
make_invocable("ParseNetworkFromOnnxLegacy", onnx_loader), "parse_network_from_onnx_legacy"
)
elif self.model_args.model_type == "caffe":
script.add_import(imports=["LoadNetworkFromCaffe"], frm="polygraphy.backend.trt_legacy")
loader_name = script.add_loader(
make_invocable(
"LoadNetworkFromCaffe",
self.model_args.model_file,
self.caffe_model,
self.trt_outputs,
self.batch_size,
),
"parse_network_from_caffe",
)
elif load_engine is None:
script.add_import(imports=["LoadNetworkFromUff"], frm="polygraphy.backend.trt_legacy")
if self.model_args.model_type == "uff":
script.add_import(imports=["LoadUffFile"], frm="polygraphy.backend.trt_legacy")
shapes = {name: shape for name, (_, shape) in self.model_args.input_shapes.items()}
loader_name = script.add_loader(
make_invocable(
"LoadUffFile", self.model_args.model_file, util.default(shapes, {}), self.trt_outputs
),
"load_uff_file",
)
else:
script.add_import(imports=["ConvertToUff"], frm="polygraphy.backend.trt_legacy")
loader_name = script.add_loader(
make_invocable(
"ConvertToUff",
self.tf_loader_args.add_to_script(script),
save_uff=self.save_uff,
preprocessor=self.preprocessor,
),
"convert_to_uff",
)
loader_name = script.add_loader(
make_invocable("LoadNetworkFromUff", loader_name, uff_order=self.uff_order), "uff_network_loader"
)
calibrator = None
if (
self.trt_config_args.int8 and self.data_loader_args is not None
): # We cannot do calibration if there is no data loader.
script.add_import(imports=["Calibrator"], frm="polygraphy.backend.trt")
script.add_import(imports=["DataLoader"], frm="polygraphy.comparator")
data_loader_name = self.data_loader_args.add_data_loader(script)
if self.calibration_base_class:
script.add_import(imports=["tensorrt as trt"])
calibrator = make_invocable(
"Calibrator",
data_loader=data_loader_name if data_loader_name else inline(safe("DataLoader()")),
cache=self.calibration_cache,
BaseClass=self.calibration_base_class,
quantile=self.quantile,
regression_cutoff=self.regression_cutoff,
)
runner_str = make_invocable(
"TrtLegacyRunner",
network_loader=loader_name,
max_workspace_size=self.trt_config_args.workspace,
max_batch_size=self.batch_size,
fp16=self.trt_config_args.fp16,
tf32=self.trt_config_args.tf32,
load_engine=load_engine,
save_engine=self.trt_engine_save_args.path,
layerwise=self.trt_outputs == constants.MARK_ALL,
plugins=self.trt_engine_loader_args.plugins,
int8=self.trt_config_args.int8,
calibrator=calibrator,
use_dla=self.use_dla,
allow_gpu_fallback=self.allow_gpu_fallback,
)
script.add_runner(runner_str)
| TensorRT-master | tools/Polygraphy/polygraphy/tools/args/trt_legacy.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from polygraphy import mod, util
from polygraphy.common import TensorMetadata
from polygraphy.logger import G_LOGGER
from polygraphy.tools.args import util as args_util
from polygraphy.tools.args.base import BaseArgs
@mod.export()
class ModelArgs(BaseArgs):
EXT_MODEL_TYPE_MAPPING = {
".hdf5": "keras",
".uff": "uff",
".prototxt": "caffe",
".onnx": "onnx",
".engine": "engine",
".plan": "engine",
".graphdef": "frozen",
".py": "trt-network-script",
}
class ModelType(str):
TF_TYPES = ["frozen", "keras", "ckpt"]
ONNX_TYPES = ["onnx"]
TRT_TYPES = ["engine", "uff", "trt-network-script"]
OTHER_TYPES = ["caffe"]
VALID_TYPES = TF_TYPES + ONNX_TYPES + TRT_TYPES + OTHER_TYPES
def __new__(cls, model_type):
assert model_type in ModelArgs.ModelType.VALID_TYPES or model_type is None
return str.__new__(cls, model_type)
def is_tf(self):
return self in ModelArgs.ModelType.TF_TYPES
def is_onnx(self):
return self in ModelArgs.ModelType.ONNX_TYPES
def is_trt(self):
return self in ModelArgs.ModelType.TRT_TYPES
def __init__(self, model_required=False, inputs="--inputs", model_type=None, inputs_doc=None):
super().__init__()
self._model_required = model_required
self._inputs = inputs
# If model type is provided, it means the tool only supports a single type of model.
self._model_type = model_type
self._inputs_doc = util.default(
inputs_doc,
"Model input(s) and their shape(s). "
"Used to determine shapes to use while generating input data for inference",
)
def add_to_parser(self, parser):
model_args = parser.add_argument_group("Model", "Options for the model")
model_args.add_argument("model_file", help="Path to the model", nargs=None if self._model_required else "?")
if self._model_type is None:
model_args.add_argument(
"--model-type",
help="The type of the input model: {{'frozen': TensorFlow frozen graph, 'keras': Keras model, "
"'ckpt': TensorFlow checkpoint directory, 'onnx': ONNX model, 'engine': TensorRT engine, 'trt-network-script': "
"A Python script that defines a `load_network` function that takes no arguments and returns a TensorRT Builder, "
"Network, and optionally Parser, "
"'uff': UFF file [deprecated], 'caffe': Caffe prototxt [deprecated]}}",
choices=ModelArgs.ModelType.VALID_TYPES,
default=None,
)
if self._inputs:
model_args.add_argument(
self._inputs.replace("inputs", "input") + "-shapes",
self._inputs,
help="{:}. Format: {arg_name}-shapes <name>:<shape>. "
"For example: {arg_name}-shapes image:[1,3,224,224] other_input:[10]".format(
self._inputs_doc, arg_name=self._inputs.replace("inputs", "input")
),
nargs="+",
default=None,
dest="input_shapes",
)
def parse(self, args):
def determine_model_type():
if args_util.get(args, "model_type") is not None:
return args.model_type.lower()
if args_util.get(args, "model_file") is None:
return None
def use_ext(ext_mapping):
file_ext = os.path.splitext(args.model_file)[-1]
if file_ext in ext_mapping:
return ext_mapping[file_ext]
runners = args_util.get(args, "runners", default=[])
if args_util.get(args, "ckpt") or os.path.isdir(args.model_file):
return "ckpt"
elif "tf" in runners or "trt_legacy" in runners:
if args.caffe_model:
return "caffe"
return use_ext(ModelArgs.EXT_MODEL_TYPE_MAPPING) or "frozen"
else:
model_type = use_ext(ModelArgs.EXT_MODEL_TYPE_MAPPING)
if model_type:
return model_type
G_LOGGER.critical(
"Could not automatically determine model type for: {:}\n"
"Please explicitly specify the type with the --model-type option".format(args.model_file)
)
if args_util.get(args, "input_shapes"):
self.input_shapes = args_util.parse_meta(
args_util.get(args, "input_shapes"), includes_dtype=False
) # TensorMetadata
else:
self.input_shapes = TensorMetadata()
self.model_file = args_util.get(args, "model_file")
if self.model_file:
G_LOGGER.verbose("Model: {:}".format(self.model_file))
if not os.path.exists(self.model_file):
G_LOGGER.warning("Model path does not exist: {:}".format(self.model_file))
self.model_file = os.path.abspath(self.model_file)
model_type_str = self._model_type if self._model_type else determine_model_type()
self.model_type = ModelArgs.ModelType(model_type_str) if model_type_str else None
if self.model_type == "trt-network-script" and (not self.model_file or not self.model_file.endswith(".py")):
G_LOGGER.critical(
"TensorRT network scripts must exist and have '.py' extensions.\n"
"Note: Provided network script path was: {:}".format(self.model_file)
)
| TensorRT-master | tools/Polygraphy/polygraphy/tools/args/model.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from polygraphy import util
class BaseArgs(object):
"""
Adds a arguments to a command-line parser, and provides capabilities to create
Polygraphy objects based on the arguments.
"""
def __init__(self, disable_abbrev=None):
self.disable_abbrev = util.default(disable_abbrev, False)
def add_to_parser(self, parser):
"""
Add arguments to a command-line parser.
Args:
parser (argparse.ArgumentParser): The argument parser.
"""
pass
def parse(self, args):
"""
Parses relevant arguments from command-line arguments.
Args:
args: Arguments provided by argparse.
"""
pass
def register(self, maker):
"""
Registers another argument group with this one.
This can be used to pick up dependencies for example.
Args:
maker (BaseArgs): Another argument group.
"""
pass
def check_registered(self):
"""
Called after all `register()` calls to make dependency checks easier.
"""
pass
| TensorRT-master | tools/Polygraphy/polygraphy/tools/args/base.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from polygraphy import constants, mod, util
from polygraphy.common import TensorMetadata
from polygraphy.logger import G_LOGGER, LogMode
from polygraphy.tools.script import Script, ensure_safe, inline, safe
np = mod.lazy_import("numpy")
@mod.export()
def cast(val):
"""
Cast a value from a string to one of:
[int, float, str, List[int], List[float], List[str]]
Args:
val (str): The value to cast.
Returns:
object: The casted value.
"""
val = str(val.strip())
if val.strip("[]") != val:
return [cast(elem) for elem in val.strip("[]").split(",")]
try:
return int(val) # This fails for float strings like '0.0'
except:
pass
try:
return float(val) # This fails for non-numerical strings like 'isildur'
except:
pass
return val.strip("\"'")
@mod.export()
def run_script(script_func, *args):
"""
Populates a script using the provided callable, then returns
the variable indicated by the return value of the callable.
Args:
script_func (Callable(Script, *args) -> str):
A callable that populates a Script and then returns
the name of an object defined within the script to retrieve.
args:
Additional positional argruments to pass to script_func.
The script_func should accept these by variable name instead
of taking the values themselves. Values of ``None`` will be
passed directly instead of by variable name.
Returns:
object:
An object defined within the script, or ``None`` if it is not
defined by the script.
"""
script = Script()
arg_names = []
for index, arg in enumerate(args):
if arg is not None:
arg_name = safe("__arg{:}", index)
locals()[arg_name.unwrap()] = arg
arg_names.append(inline(arg_name))
else:
arg_names.append(None)
safe_ret_name = script_func(script, *arg_names)
exec(str(script), globals(), locals())
if safe_ret_name is not None:
ret_name = ensure_safe(safe_ret_name).unwrap()
if ret_name in locals():
return locals()[ret_name]
return None
@mod.export()
def get(args, attr, default=None):
"""
Gets a command-line argument if it exists, otherwise returns a default value.
Args:
args: The command-line arguments.
attr (str): The name of the command-line argument.
default (obj): The default value to return if the argument is not found. Defaults to None.
"""
if hasattr(args, attr):
return getattr(args, attr)
return default
@mod.export()
def get_outputs(args, name):
outputs = get(args, name)
if outputs is not None and len(outputs) == 2 and outputs == ["mark", "all"]:
outputs = constants.MARK_ALL
return outputs
@mod.export()
def get_outputs_for_script(script, outputs):
if outputs == constants.MARK_ALL:
script.add_import(["constants"], frm="polygraphy")
outputs = inline(safe("constants.MARK_ALL"))
return outputs
def np_types():
"""
Returns a list of human-readable names of NumPy data types.
"""
return sorted(set(np.dtype(dtype).name for dtype in np.sctypeDict.values()))
def np_type_from_str(dt_str):
"""
Converts a string representation of a data type to a NumPy data type.
Args:
dt_str (str): The string representation of the data type.
Returns:
np.dtype: The NumPy data type.
Raises:
KeyError: If the provided string does not correspond to a NumPy data type.
"""
try:
return {np.dtype(dtype).name: np.dtype(dtype) for dtype in np.sctypeDict.values()}[dt_str]
except KeyError:
G_LOGGER.error(
"Could not understand data type: {:}. Did you forget to specify a data type? "
"Please use one of: {:} or `auto`.".format(dt_str, np_types())
)
raise
@mod.export()
def parse_dict_with_default(arg_lst, cast_to=None, sep=None):
"""
Generate a dictionary from a list of arguments of the form:
``<key>:<val>``. If ``<key>`` is empty, the value will be assigned
to an empty string key in the returned mapping.
Args:
arg_lst (List[str]):
The arguments to map.
cast_to (type):
The type to cast the values in the map. By default,
uses the type returned by ``cast``.
sep (str):
The separator between the key and value strings.
Returns:
Dict[str, obj]: The mapping.
"""
sep = util.default(sep, ":")
if arg_lst is None:
return
arg_map = {}
for arg in arg_lst:
key, _, val = arg.rpartition(sep)
val = cast(val)
if cast_to:
val = cast_to(val)
arg_map[key] = val
return arg_map
@mod.deprecate(
remove_in="0.35.0",
use_instead=": as a separator and write shapes in the form [dim0,...,dimN]",
name="Using , as a separator",
)
def parse_meta_legacy(meta_args, includes_shape=True, includes_dtype=True):
"""
Parses a list of tensor metadata arguments of the form "<name>,<shape>,<dtype>"
`shape` and `dtype` are optional, but `dtype` must always come after `shape` if they are both enabled.
Args:
meta_args (List[str]): A list of tensor metadata arguments from the command-line.
includes_shape (bool): Whether the arguments include shape information.
includes_dtype (bool): Whether the arguments include dtype information.
Returns:
TensorMetadata: The parsed tensor metadata.
"""
SEP = ","
SHAPE_SEP = "x"
meta = TensorMetadata()
for orig_tensor_meta_arg in meta_args:
tensor_meta_arg = orig_tensor_meta_arg
def pop_meta(name):
nonlocal tensor_meta_arg
tensor_meta_arg, _, val = tensor_meta_arg.rpartition(SEP)
if not tensor_meta_arg:
G_LOGGER.critical(
"Could not parse {:} from argument: {:}. Is it separated by a comma "
"(,) from the tensor name?".format(name, orig_tensor_meta_arg)
)
if val.lower() == "auto":
val = None
return val
def parse_dtype(dtype):
if dtype is not None:
dtype = np_type_from_str(dtype)
return dtype
def parse_shape(shape):
if shape is not None:
def parse_shape_dim(buf):
try:
buf = int(buf)
except:
pass
return buf
parsed_shape = []
# Allow for quoted strings in shape dimensions
in_quotes = False
buf = ""
for char in shape.lower():
if char in ['"', "'"]:
in_quotes = not in_quotes
elif not in_quotes and char == SHAPE_SEP:
parsed_shape.append(parse_shape_dim(buf))
buf = ""
else:
buf += char
# For the last dimension
if buf:
parsed_shape.append(parse_shape_dim(buf))
shape = tuple(parsed_shape)
return shape
name = None
dtype = None
shape = None
if includes_dtype:
dtype = parse_dtype(pop_meta("data type"))
if includes_shape:
shape = parse_shape(pop_meta("shape"))
name = tensor_meta_arg
meta.add(name, dtype, shape)
new_style = []
for m_arg in meta_args:
arg = m_arg
if includes_shape:
arg = arg.replace(",", ":[", 1)
if includes_dtype:
arg = arg.replace(",", "]:", 1)
else:
arg += "]"
arg = arg.replace(",auto", ":auto")
arg = arg.replace(",", ":")
if includes_shape:
arg = arg.replace("x", ",")
new_style.append(arg)
G_LOGGER.warning(
"The old shape syntax is deprecated and will be removed in a future version of Polygraphy\n"
"See the CHANGELOG for the motivation behind this deprecation.",
mode=LogMode.ONCE,
)
G_LOGGER.warning("Instead of: '{:}', use: '{:}'\n".format(" ".join(meta_args), " ".join(new_style)))
return meta
def parse_meta_new_impl(meta_args, includes_shape=True, includes_dtype=True):
SEP = ":"
meta = TensorMetadata()
for meta_arg in meta_args:
name, shape, dtype = None, None, None
def pop_meta(func):
nonlocal meta_arg
meta_arg, _, val = meta_arg.rpartition(SEP)
val = cast(val.strip())
if isinstance(val, str) and val.lower() == "auto":
return None
return func(val)
if includes_dtype:
dtype = pop_meta(func=np_type_from_str)
if includes_shape:
shape = pop_meta(func=lambda s: tuple(e for e in s if e != ""))
name = meta_arg
meta.add(name, dtype=dtype, shape=shape)
return meta
@mod.export()
def parse_meta(meta_args, includes_shape=True, includes_dtype=True):
"""
Parses a list of tensor metadata arguments of the form "<name>:<shape>:<dtype>"
`shape` and `dtype` are optional, but `dtype` must always come after `shape` if they are both enabled.
Args:
meta_args (List[str]): A list of tensor metadata arguments from the command-line.
includes_shape (bool): Whether the arguments include shape information.
includes_dtype (bool): Whether the arguments include dtype information.
Returns:
TensorMetadata: The parsed tensor metadata.
"""
if all((includes_shape and "[" in arg) or (includes_dtype and "," not in arg) for arg in meta_args):
return parse_meta_new_impl(meta_args, includes_shape, includes_dtype)
return parse_meta_legacy(meta_args, includes_shape, includes_dtype)
@mod.export()
def parse_num_bytes(num_bytes_arg):
"""
Parses an argument that indicates a number of bytes. The argument may use scientific notation,
or contain a `K`, `M`, or `G` suffix (case-insensitive), indicating `KiB`, `MiB`, or `GiB` respectively.
If the number is fractional, it will be truncated to the nearest integer value.
If the provided argument is `None`, `None` is returned.
Args:
num_bytes_arg (str): The argument indicating the number of bytes.
Returns:
int: The number of bytes.
"""
if num_bytes_arg is None:
return None
num_component = num_bytes_arg # Numerical component of the argument
multiplier = 1
suffix_mulitplier = {"K": 1 << 10, "M": 1 << 20, "G": 1 << 30}
for suffix, mult in suffix_mulitplier.items():
if num_bytes_arg.upper().endswith(suffix):
num_component = num_bytes_arg.upper().rstrip(suffix)
multiplier = mult
break
try:
return int(float(num_component) * multiplier)
except:
G_LOGGER.critical(
"Could not convert {:} to a number of bytes. "
"Please use either an integer (e.g. 16000000), scientific notation (e.g. 16e6), "
"or a number with a valid suffix: K, M, or G (e.g. 16M).".format(num_bytes_arg)
)
| TensorRT-master | tools/Polygraphy/polygraphy/tools/args/util/util.py |
from polygraphy.tools.args.util.util import *
| TensorRT-master | tools/Polygraphy/polygraphy/tools/args/util/__init__.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from polygraphy.tools.args.onnx.loader import *
| TensorRT-master | tools/Polygraphy/polygraphy/tools/args/onnx/__init__.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from polygraphy import constants, mod, util
from polygraphy.common import TensorMetadata
from polygraphy.logger import G_LOGGER
from polygraphy.tools.args import util as args_util
from polygraphy.tools.args.base import BaseArgs
from polygraphy.tools.script import Script, make_invocable
onnx_backend = mod.lazy_import("polygraphy.backend.onnx")
onnxrt_backend = mod.lazy_import("polygraphy.backend.onnxrt")
@mod.export()
class OnnxSaveArgs(BaseArgs):
def __init__(
self,
infer_shapes=False,
output="output",
short_opt="-o",
required=False,
allow_ext_data_path=True,
custom_help=None,
default_output_path=None,
):
super().__init__()
self._infer_shapes = infer_shapes
self._output = output
self._short_opt = short_opt
self._required = required
self._allow_ext_data_path = allow_ext_data_path
self._custom_help = custom_help
self._default_output_path = default_output_path
self.onnx_shape_inference_args = None
def register(self, maker):
if self._infer_shapes and isinstance(maker, OnnxShapeInferenceArgs):
self.onnx_shape_inference_args = maker
def add_to_parser(self, parser):
self.group = parser.add_argument_group("ONNX Save Options", "Options for saving ONNX models")
if self._output:
flag = "--{:}".format(self._output)
short = self._short_opt or flag
self.group.add_argument(
short,
flag,
help=self._custom_help or "Path to save the ONNX model",
dest="save_onnx",
default=self._default_output_path,
required=self._required,
)
if self._allow_ext_data_path:
ext_data_params = {
"action": "append",
"nargs": "?",
}
else:
ext_data_params = {
"action": "append_const",
"const": "",
}
self.group.add_argument(
"--save-external-data",
help="Whether to save weight data in external file(s). "
+ (
"To use a non-default path, supply the desired path as an argument. This is always a relative path; "
"external data is always written to the same directory as the model. "
if self._allow_ext_data_path
else ""
),
default=None,
**ext_data_params,
)
self.group.add_argument(
"--external-data-size-threshold",
help="The size threshold, in bytes, above which tensor data will be stored in the external file. "
"Tensors smaller that this threshold will remain in the ONNX file. "
"Optionally, use a `K`, `M`, or `G` suffix to indicate KiB, MiB, or GiB respectively."
"For example, `--external-data-size-threshold=16M` is equivalent to `--external-data-size-threshold=16777216`"
"Has no effect if `--save-external-data` is not set",
default=None,
)
self.group.add_argument(
"--no-save-all-tensors-to-one-file",
help="Do not save all tensors to a single file when saving external data. "
"Has no effect if `--save-external-data` is not set",
dest="all_tensors_to_one_file",
default=None,
action="store_false",
)
def parse(self, args):
self.path = args_util.get(args, "save_onnx")
save_external_data = args_util.get(args, "save_external_data")
if save_external_data is not None:
save_external_data = save_external_data[0] or ""
self.save_external_data = save_external_data
self.size_threshold = args_util.parse_num_bytes(args_util.get(args, "external_data_size_threshold"))
self.all_tensors_to_one_file = args_util.get(args, "all_tensors_to_one_file")
def add_save_onnx(self, script, loader_name):
if self.path is None:
return loader_name
# Need to run shape inference again after processing the graph since it may have changed.
if self.onnx_shape_inference_args is not None:
loader_name = self.onnx_shape_inference_args.add_to_script(script, loader_name)
script.add_import(imports=["SaveOnnx"], frm="polygraphy.backend.onnx")
loader_name = script.add_loader(
make_invocable(
"SaveOnnx",
loader_name,
path=self.path,
external_data_path=self.save_external_data,
size_threshold=self.size_threshold,
all_tensors_to_one_file=self.all_tensors_to_one_file,
),
"save_onnx",
)
return loader_name
def save_onnx(self, model, path=None):
with util.TempAttrChange(self, "path", path):
loader = args_util.run_script(self.add_save_onnx, model)
return loader()
@mod.export()
class OnnxShapeInferenceArgs(BaseArgs):
# NOTE: force_fallback is not implemented under add_to_script, and must be implemented
# manually in tools that use this group.
def __init__(self, default=False, enable_force_fallback=False):
super().__init__()
self._default = default
self._enable_force_fallback = enable_force_fallback
self.onnx_loader_args = None
def add_to_parser(self, parser):
self.group = parser.add_argument_group("ONNX Shape Inference", "Options for ONNX Shape Inference")
g = self.group.add_mutually_exclusive_group()
if self._default:
g.add_argument(
"--no-shape-inference",
help="Disable ONNX shape inference when loading the model",
dest="do_shape_inference",
action="store_false",
default=True,
)
else:
g.add_argument(
"--shape-inference",
help="Enable ONNX shape inference when loading the model",
dest="do_shape_inference",
action="store_true",
default=False,
)
if self._enable_force_fallback:
g.add_argument(
"--force-fallback-shape-inference",
help="Force Polygraphy to use ONNX-Runtime to determine metadata for "
"tensors in the graph. This can be useful in cases where ONNX shape inference does not generate correct information. "
"Note that this will cause dynamic dimensions to become fixed. ",
action="store_true",
default=None,
)
def register(self, maker):
from polygraphy.tools.args.data_loader import DataLoaderArgs
if isinstance(maker, DataLoaderArgs):
self.data_loader_args = maker
if isinstance(maker, OnnxLoaderArgs):
self.onnx_loader_args = maker
def check_registered(self):
assert (
not self._enable_force_fallback or self.data_loader_args
), "DataLoaderArgs is required if force fallback shape inference is enabled!"
def parse(self, args):
self.do_shape_inference = args_util.get(args, "do_shape_inference")
self.force_fallback = args_util.get(args, "force_fallback_shape_inference")
# No point is running ONNX shape inference if we're going to use fallback inference.
if self.force_fallback:
self.do_shape_inference = False
def add_to_script(self, script, loader_name):
if self.do_shape_inference:
script.add_import(imports=["InferShapes"], frm="polygraphy.backend.onnx")
external_data_dir = self.onnx_loader_args.load_external_data if self.onnx_loader_args is not None else None
loader_name = script.add_loader(
make_invocable("InferShapes", loader_name, external_data_dir=external_data_dir), "infer_shapes"
)
return loader_name
def fallback_inference(self, onnx_model):
"""
Run inference with ONNX-Runtime.
This can be used to retrieve values/shapes/data types for all
tensors in the model when other shape inference approaches fail.
Args:
onnx_model (onnx.ModelProto):
The ONNX model in which to infer shapes.
data_loader_args (DataLoaderArgs):
The data loader argument group to use to generate input data.
Returns:
(OrderedDict[str, np.ndarray], TensorMetadata):
1. Mapping of values for all tensors in the model, including inputs.
Values are loaded lazily when first accessed so as to save memory.
2. Metadata for every tensor in the model.
"""
from polygraphy.comparator import IterationResult
with G_LOGGER.verbosity(G_LOGGER.severity + 10):
load_model = onnx_backend.ModifyOutputs(onnx_model, outputs=constants.MARK_ALL, copy=True)
with onnxrt_backend.OnnxrtRunner(
onnxrt_backend.SessionFromOnnx(onnx_backend.BytesFromOnnx(load_model))
) as runner:
# We want to set input_metadata only - not user_input_metadata, so that user_input_metadata
# will be populated by the --model-inputs argument.
data_loader = self.data_loader_args.get_data_loader()
data_loader.input_metadata = runner.get_input_metadata()
feed_dict = data_loader[0]
with G_LOGGER.verbosity(G_LOGGER.severity - 10):
G_LOGGER.info(
"Running fallback shape inference using input metadata:\n{:}".format(
TensorMetadata.from_feed_dict(feed_dict)
)
)
outputs = runner.infer(feed_dict)
# We include the inputs here so that we have values for all tensors in the model.
outputs.update(feed_dict)
# Use IterationResult here since it can handle very large tensors by saving to disk.
# Layerwise outputs might otherwise take up too much memory.
return IterationResult(outputs), TensorMetadata.from_feed_dict(outputs)
@mod.export()
class OnnxLoaderArgs(BaseArgs):
def __init__(self, save=False, output_prefix="onnx-"):
super().__init__()
self.tf2onnx_loader_args = None
self.onnx_save_args = None
self.onnx_shape_inference_args = None
self._save = save
self._output_prefix = output_prefix
def add_to_parser(self, parser):
self.group = parser.add_argument_group("ONNX Loader", "Options for the ONNX Loader")
self.group.add_argument(
"--external-data-dir",
"--load-external-data",
"--ext",
dest="load_external_data",
help="Path to a directory containing external data for the model. "
"Generally, this is only required if the external data is not stored in the model directory.",
)
if self._output_prefix is not None:
self.group.add_argument(
"--{:}outputs".format(self._output_prefix),
help="Name(s) of ONNX tensor(s) to mark as output(s). "
"Using the special value 'mark all' indicates that all tensors should be used as outputs",
nargs="+",
default=None,
dest="onnx_outputs",
)
self.group.add_argument(
"--{:}exclude-outputs".format(self._output_prefix),
help="[EXPERIMENTAL] Name(s) of ONNX output(s) to unmark as outputs.",
nargs="+",
default=None,
dest="onnx_exclude_outputs",
)
def register(self, maker):
from polygraphy.tools.args.model import ModelArgs
from polygraphy.tools.args.tf2onnx.loader import Tf2OnnxLoaderArgs
if isinstance(maker, ModelArgs):
self.model_args = maker
if isinstance(maker, Tf2OnnxLoaderArgs):
self.tf2onnx_loader_args = maker
if self._save and isinstance(maker, OnnxSaveArgs):
self.onnx_save_args = maker
if isinstance(maker, OnnxShapeInferenceArgs):
self.onnx_shape_inference_args = maker
def check_registered(self):
assert self.model_args is not None, "ModelArgs is required!"
assert not self._save or self.onnx_save_args is not None, "OnnxSaveArgs is required to use save=True"
def parse(self, args):
self.outputs = args_util.get_outputs(args, "onnx_outputs")
self.exclude_outputs = args_util.get(args, "onnx_exclude_outputs")
self.load_external_data = args_util.get(args, "load_external_data")
def _get_modify_onnx_loader(self, script, loader_name, disable_custom_outputs=None):
if disable_custom_outputs:
outputs = None
exclude_outputs = None
else:
outputs = args_util.get_outputs_for_script(script, self.outputs)
exclude_outputs = self.exclude_outputs
if outputs or exclude_outputs:
script.add_import(imports=["ModifyOutputs as ModifyOnnxOutputs"], frm="polygraphy.backend.onnx")
loader_name = script.add_loader(
make_invocable("ModifyOnnxOutputs", loader_name, outputs=outputs, exclude_outputs=exclude_outputs),
"modify_outputs",
)
return loader_name
def add_onnx_loader(self, script, disable_custom_outputs=None, suffix=None):
model_type = self.model_args.model_type
if model_type.is_onnx():
loader_name = self.model_args.model_file
if self.onnx_shape_inference_args is not None:
loader_name = self.onnx_shape_inference_args.add_to_script(script, loader_name)
if loader_name == self.model_args.model_file: # Shape inference loader isn't being used, have to load.
script.add_import(imports=["OnnxFromPath"], frm="polygraphy.backend.onnx")
loader_str = make_invocable(
"OnnxFromPath", self.model_args.model_file, external_data_dir=self.load_external_data
)
loader_name = script.add_loader(loader_str, "load_onnx", suffix=suffix)
elif model_type.is_tf():
if self.tf2onnx_loader_args is None:
G_LOGGER.critical("Could not load: {:}. Is it an ONNX model?".format(self.model_args.model_file))
loader_name = self.tf2onnx_loader_args.add_to_script(script)
else:
G_LOGGER.critical("Model type: {:} cannot be converted to ONNX.".format(model_type))
loader_name = self._get_modify_onnx_loader(script, loader_name, disable_custom_outputs=disable_custom_outputs)
if self.onnx_save_args is not None:
loader_name = self.onnx_save_args.add_save_onnx(script, loader_name)
return loader_name
def should_use_onnx_loader(self, disable_custom_outputs=None):
"""
Whether this model needs to be loaded via a Polygraphy ONNX loader, e.g., in case it
needs modifications.
"""
tmp_script = Script()
inp_loader = "check_needs_modify"
needs_modify = self._get_modify_onnx_loader(tmp_script, inp_loader, disable_custom_outputs) != inp_loader
needs_shape_inference = (
self.onnx_shape_inference_args is not None and self.onnx_shape_inference_args.do_shape_inference
)
needs_save = self.onnx_save_args is not None and self.onnx_save_args.path is not None
# Currently, other loaders do not support external data, so we must fall back to the ONNX loader if it's present.
return (
not self.model_args.model_type.is_onnx()
or needs_modify
or self.load_external_data
or needs_shape_inference
or needs_save
)
def add_serialized_onnx_loader(self, script, disable_custom_outputs=None):
script.add_import(imports=["BytesFromOnnx"], frm="polygraphy.backend.onnx")
onnx_loader = self.add_onnx_loader(script, disable_custom_outputs=disable_custom_outputs)
return script.add_loader(make_invocable("BytesFromOnnx", onnx_loader), "serialize_onnx")
def load_onnx(self):
loader = args_util.run_script(self.add_onnx_loader)
return loader()
| TensorRT-master | tools/Polygraphy/polygraphy/tools/args/onnx/loader.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from polygraphy import mod
from polygraphy.tools.args.base import BaseArgs
from polygraphy.tools.script import make_invocable
@mod.export()
class TrtRunnerArgs(BaseArgs):
def register(self, maker):
from polygraphy.tools.args.model import ModelArgs
from polygraphy.tools.args.trt.loader import TrtEngineLoaderArgs
if isinstance(maker, ModelArgs):
self.model_args = maker
elif isinstance(maker, TrtEngineLoaderArgs):
self.trt_engine_loader_args = maker
def check_registered(self):
assert self.model_args is not None, "ModelArgs is required!"
assert self.trt_engine_loader_args is not None, "TrtEngineLoaderArgs is required!"
def add_to_script(self, script):
script.add_import(imports=["TrtRunner"], frm="polygraphy.backend.trt")
if self.model_args.model_type == "engine":
loader_name = self.trt_engine_loader_args.add_trt_serialized_engine_loader(script)
else:
loader_name = self.trt_engine_loader_args.add_trt_build_engine_loader(script)
script.add_runner(make_invocable("TrtRunner", loader_name))
| TensorRT-master | tools/Polygraphy/polygraphy/tools/args/trt/runner.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import copy
import os
from polygraphy import mod, util
from polygraphy.common import TensorMetadata
from polygraphy.logger import G_LOGGER, LogMode
from polygraphy.tools.args import util as args_util
from polygraphy.tools.args.base import BaseArgs
from polygraphy.tools.script import assert_identifier, inline, make_invocable, make_invocable_if_nondefault, safe
def parse_profile_shapes(default_shapes, min_args, opt_args, max_args):
"""
Parses TensorRT profile options from command-line arguments.
Args:
default_shapes (TensorMetadata): The inference input shapes.
Returns:
List[Tuple[OrderedDict[str, Shape]]]:
A list of profiles with each profile comprised of three dictionaries
(min, opt, max) mapping input names to shapes.
"""
def get_shapes(lst, idx):
nonlocal default_shapes
default_shapes = copy.copy(default_shapes)
if idx < len(lst):
default_shapes.update(args_util.parse_meta(lst[idx], includes_dtype=False))
# Don't care about dtype, and need to override dynamic dimensions
shapes = {name: util.override_dynamic_shape(shape) for name, (_, shape) in default_shapes.items()}
for name, shape in shapes.items():
if tuple(default_shapes[name].shape) != tuple(shape):
G_LOGGER.warning(
"Input tensor: {:} | For TensorRT profile, overriding dynamic shape: {:} to: {:}".format(
name, default_shapes[name].shape, shape
),
mode=LogMode.ONCE,
)
return shapes
num_profiles = max(len(min_args), len(opt_args), len(max_args))
# For cases where input shapes are provided, we have to generate a profile
if not num_profiles and default_shapes:
num_profiles = 1
profiles = []
for idx in range(num_profiles):
min_shapes = get_shapes(min_args, idx)
opt_shapes = get_shapes(opt_args, idx)
max_shapes = get_shapes(max_args, idx)
if sorted(min_shapes.keys()) != sorted(opt_shapes.keys()):
G_LOGGER.critical(
"Mismatch in input names between minimum shapes ({:}) and optimum shapes "
"({:})".format(list(min_shapes.keys()), list(opt_shapes.keys()))
)
elif sorted(opt_shapes.keys()) != sorted(max_shapes.keys()):
G_LOGGER.critical(
"Mismatch in input names between optimum shapes ({:}) and maximum shapes "
"({:})".format(list(opt_shapes.keys()), list(max_shapes.keys()))
)
profiles.append((min_shapes, opt_shapes, max_shapes))
return profiles
@mod.export()
class TrtConfigArgs(BaseArgs):
def __init__(self, strict_types_default=None, random_data_calib_warning=True):
"""
Args:
strict_types_default (bool): Whether strict types should be enabled by default.
random_data_calib_warning (bool):
Whether to issue a warning when randomly generated data is being used
for calibration.
"""
super().__init__()
self.model_args = None
self.data_loader_args = None
self._strict_types_default = strict_types_default
self._random_data_calib_warning = random_data_calib_warning
def add_to_parser(self, parser):
trt_config_args = parser.add_argument_group(
"TensorRT Builder Configuration", "Options for TensorRT Builder Configuration"
)
trt_config_args.add_argument(
"--trt-min-shapes",
action="append",
help="The minimum shapes the optimization profile(s) will support. "
"Specify this option once for each profile. If not provided, inference-time input shapes are used. "
"Format: --trt-min-shapes <input0>:[D0,D1,..,DN] .. <inputN>:[D0,D1,..,DN]",
nargs="+",
default=[],
)
trt_config_args.add_argument(
"--trt-opt-shapes",
action="append",
help="The shapes for which the optimization profile(s) will be most performant. "
"Specify this option once for each profile. If not provided, inference-time input shapes are used. "
"Format: --trt-opt-shapes <input0>:[D0,D1,..,DN] .. <inputN>:[D0,D1,..,DN]",
nargs="+",
default=[],
)
trt_config_args.add_argument(
"--trt-max-shapes",
action="append",
help="The maximum shapes the optimization profile(s) will support. "
"Specify this option once for each profile. If not provided, inference-time input shapes are used. "
"Format: --trt-max-shapes <input0>:[D0,D1,..,DN] .. <inputN>:[D0,D1,..,DN]",
nargs="+",
default=[],
)
trt_config_args.add_argument(
"--tf32", help="Enable tf32 precision in TensorRT", action="store_true", default=None
)
trt_config_args.add_argument(
"--fp16", help="Enable fp16 precision in TensorRT", action="store_true", default=None
)
trt_config_args.add_argument(
"--int8",
help="Enable int8 precision in TensorRT. "
"If calibration is required but no calibration cache is provided, this option will cause TensorRT to run "
"int8 calibration using the Polygraphy data loader to provide calibration data. ",
action="store_true",
default=None,
)
if self._strict_types_default:
trt_config_args.add_argument(
"--no-strict-types",
help="Disables strict types in TensorRT, allowing it to choose tactics outside the "
"layer precision set.",
action="store_false",
default=True,
dest="strict_types",
)
else:
trt_config_args.add_argument(
"--strict-types",
help="Enable strict types in TensorRT, forcing it to choose tactics based on the "
"layer precision set, even if another precision is faster.",
action="store_true",
default=None,
dest="strict_types",
)
trt_config_args.add_argument(
"--sparse-weights",
help="Enable optimizations for sparse weights in TensorRT",
action="store_true",
default=None,
)
trt_config_args.add_argument(
"--workspace",
metavar="BYTES",
help="Amount of memory, in bytes, to allocate for the TensorRT builder's workspace. "
"Optionally, use a `K`, `M`, or `G` suffix to indicate KiB, MiB, or GiB respectively."
"For example, `--workspace=16M` is equivalent to `--workspace=16777216`",
default=None,
)
trt_config_args.add_argument(
"--calibration-cache",
help="Path to load/save a calibration cache. "
"Used to store calibration scales to speed up the process of int8 calibration. "
"If the provided path does not yet exist, int8 calibration scales will be calculated and written to it during engine building. "
"If the provided path does exist, it will be read and int8 calibration will be skipped during engine building. ",
default=None,
)
trt_config_args.add_argument(
"--calib-base-cls",
"--calibration-base-class",
dest="calibration_base_class",
help="The name of the calibration base class to use. For example, 'IInt8MinMaxCalibrator'. ",
default=None,
)
trt_config_args.add_argument(
"--quantile",
type=float,
help="The quantile to use for IInt8LegacyCalibrator. Has no effect for other calibrator types.",
default=None,
)
trt_config_args.add_argument(
"--regression-cutoff",
type=float,
help="The regression cutoff to use for IInt8LegacyCalibrator. Has no effect for other calibrator types.",
default=None,
)
trt_config_args.add_argument(
"--timing-cache",
help="Path to load/save tactic timing cache. "
"Used to cache tactic timing information to speed up the engine building process. "
"Existing caches will be appended to with any new timing information gathered. ",
default=None,
)
replay = trt_config_args.add_mutually_exclusive_group()
replay.add_argument(
"--tactic-replay",
help="[DEPRECATED - use --load/save-tactics] Path to load/save a tactic replay file. "
"Used to record and replay tactics selected by TensorRT to provide deterministic engine builds. "
"If the provided path does not yet exist, tactics will be recorded and written to it. "
"If the provided path does exist, it will be read and used to replay previously recorded tactics. ",
default=None,
)
replay.add_argument(
"--save-tactics",
help="Path to save a tactic replay file. "
"Tactics selected by TensorRT will be recorded and stored at this location. ",
default=None,
)
replay.add_argument(
"--load-tactics",
help="Path to load a tactic replay file. "
"The tactics specified in the file will be used to override TensorRT's default selections. ",
default=None,
)
trt_config_args.add_argument(
"--tactic-sources",
help="Tactic sources to enable. This controls which libraries "
"(e.g. cudnn, cublas, etc.) TensorRT is allowed to load tactics from. "
"Values come from the names of the values in the trt.TacticSource enum, and are case-insensitive. "
"If no arguments are provided, e.g. '--tactic-sources', then all tactic sources are disabled.",
nargs="*",
default=None,
)
trt_config_args.add_argument(
"--trt-config-script",
help="Path to a Python script that defines a function that creates a "
"TensorRT IBuilderConfig. The function should take a builder and network as parameters and return a "
"TensorRT builder configuration. When this option is specified, all other config arguments are ignored. ",
default=None,
)
trt_config_args.add_argument(
"--trt-config-func-name",
help="When using a trt-config-script, this specifies the name of the function "
"that creates the config. Defaults to `load_config`. ",
default="load_config",
)
trt_config_args.add_argument(
"--trt-safety-restricted",
help="Enable safety scope checking in TensorRT",
action="store_true",
default=None,
dest="restricted",
)
trt_config_args.add_argument(
"--use-dla",
help="[EXPERIMENTAL] Use DLA as the default device type",
action="store_true",
default=None,
)
trt_config_args.add_argument(
"--allow-gpu-fallback",
help="[EXPERIMENTAL] Allow layers unsupported on the DLA to fall back to GPU. Has no effect if --dla is not set.",
action="store_true",
default=None,
)
def register(self, maker):
from polygraphy.tools.args.data_loader import DataLoaderArgs
from polygraphy.tools.args.model import ModelArgs
if isinstance(maker, ModelArgs):
self.model_args = maker
if isinstance(maker, DataLoaderArgs):
self.data_loader_args = maker
def parse(self, args):
trt_min_shapes = args_util.get(args, "trt_min_shapes", default=[])
trt_max_shapes = args_util.get(args, "trt_max_shapes", default=[])
trt_opt_shapes = args_util.get(args, "trt_opt_shapes", default=[])
default_shapes = TensorMetadata()
if self.model_args is not None:
assert hasattr(self.model_args, "input_shapes"), "ModelArgs must be parsed before TrtConfigArgs!"
default_shapes = self.model_args.input_shapes
self.profile_dicts = parse_profile_shapes(default_shapes, trt_min_shapes, trt_opt_shapes, trt_max_shapes)
self.workspace = args_util.parse_num_bytes(args_util.get(args, "workspace"))
self.tf32 = args_util.get(args, "tf32")
self.fp16 = args_util.get(args, "fp16")
self.int8 = args_util.get(args, "int8")
self.strict_types = args_util.get(args, "strict_types")
self.restricted = args_util.get(args, "restricted")
self.calibration_cache = args_util.get(args, "calibration_cache")
calib_base = args_util.get(args, "calibration_base_class")
self.calibration_base_class = None
if calib_base is not None:
calib_base = safe(assert_identifier(calib_base))
self.calibration_base_class = inline(safe("trt.{:}", inline(calib_base)))
self.quantile = args_util.get(args, "quantile")
self.regression_cutoff = args_util.get(args, "regression_cutoff")
self.sparse_weights = args_util.get(args, "sparse_weights")
self.timing_cache = args_util.get(args, "timing_cache")
tactic_replay = args_util.get(args, "tactic_replay")
self.load_tactics = args_util.get(args, "load_tactics")
self.save_tactics = args_util.get(args, "save_tactics")
if tactic_replay is not None:
mod.warn_deprecated("--tactic-replay", "--save-tactics or --load-tactics", remove_in="0.35.0")
G_LOGGER.warning("--tactic-replay is deprecated. Use either --save-tactics or --load-tactics instead.")
if os.path.exists(tactic_replay) and util.get_file_size(tactic_replay) > 0:
self.load_tactics = tactic_replay
else:
self.save_tactics = tactic_replay
tactic_sources = args_util.get(args, "tactic_sources")
self.tactic_sources = None
if tactic_sources is not None:
self.tactic_sources = []
for source in tactic_sources:
source = safe(assert_identifier(source.upper()))
source_str = safe("trt.TacticSource.{:}", inline(source))
self.tactic_sources.append(inline(source_str))
self.trt_config_script = args_util.get(args, "trt_config_script")
self.trt_config_func_name = args_util.get(args, "trt_config_func_name")
self.use_dla = args_util.get(args, "use_dla")
self.allow_gpu_fallback = args_util.get(args, "allow_gpu_fallback")
def add_trt_config_loader(self, script):
profiles = []
for (min_shape, opt_shape, max_shape) in self.profile_dicts:
profile_str = "Profile()"
for name in min_shape.keys():
profile_str += safe(
".add({:}, min={:}, opt={:}, max={:})", name, min_shape[name], opt_shape[name], max_shape[name]
).unwrap()
profiles.append(profile_str)
if profiles:
script.add_import(imports=["Profile"], frm="polygraphy.backend.trt")
profiles = safe("[\n\t{:}\n]", inline(safe(",\n\t".join(profiles))))
profile_name = script.add_loader(profiles, "profiles")
else:
profile_name = None
calibrator = None
if any(arg is not None for arg in [self.calibration_cache, self.calibration_base_class]) and not self.int8:
G_LOGGER.warning(
"Some int8 calibrator options were set, but int8 precision is not enabled. "
"Calibration options will be ignored. Please set --int8 to enable calibration. "
)
if self.int8 and self.data_loader_args is not None: # We cannot do calibration if there is no data loader.
script.add_import(imports=["Calibrator"], frm="polygraphy.backend.trt")
script.add_import(imports=["DataLoader"], frm="polygraphy.comparator")
data_loader_name = self.data_loader_args.add_data_loader(script)
if self.calibration_base_class:
script.add_import(imports=["tensorrt as trt"])
if (
self.data_loader_args.is_using_random_data()
and (not self.calibration_cache or not os.path.exists(self.calibration_cache))
and self._random_data_calib_warning
):
G_LOGGER.warning(
"Int8 Calibration is using randomly generated input data.\n"
"This could negatively impact accuracy if the inference-time input data is dissimilar "
"to the randomly generated calibration data.\n"
"You may want to consider providing real data via the --data-loader-script option."
)
calibrator = make_invocable(
"Calibrator",
data_loader=data_loader_name if data_loader_name else inline(safe("DataLoader()")),
cache=self.calibration_cache,
BaseClass=self.calibration_base_class,
quantile=self.quantile,
regression_cutoff=self.regression_cutoff,
)
algo_selector = None
if self.load_tactics is not None:
script.add_import(imports=["TacticReplayer"], frm="polygraphy.backend.trt")
algo_selector = make_invocable("TacticReplayer", replay=self.load_tactics)
elif self.save_tactics is not None:
script.add_import(imports=["TacticRecorder"], frm="polygraphy.backend.trt")
algo_selector = make_invocable("TacticRecorder", record=self.save_tactics)
if self.tactic_sources is not None:
script.add_import(imports=["tensorrt as trt"])
if self.trt_config_script is not None:
script.add_import(imports=["InvokeFromScript"], frm="polygraphy.backend.common")
config_loader_str = make_invocable(
"InvokeFromScript", self.trt_config_script, name=self.trt_config_func_name
)
else:
config_loader_str = make_invocable_if_nondefault(
"CreateTrtConfig",
max_workspace_size=self.workspace,
tf32=self.tf32,
fp16=self.fp16,
int8=self.int8,
strict_types=self.strict_types,
restricted=self.restricted,
profiles=profile_name,
calibrator=calibrator,
load_timing_cache=(
self.timing_cache if self.timing_cache and os.path.exists(self.timing_cache) else None
),
algorithm_selector=algo_selector,
sparse_weights=self.sparse_weights,
tactic_sources=self.tactic_sources,
use_dla=self.use_dla,
allow_gpu_fallback=self.allow_gpu_fallback,
)
if config_loader_str is not None:
script.add_import(imports=["CreateConfig as CreateTrtConfig"], frm="polygraphy.backend.trt")
if config_loader_str is not None:
config_loader_name = script.add_loader(config_loader_str, "create_trt_config")
else:
config_loader_name = None
return config_loader_name
def create_config(self, builder, network):
from polygraphy.backend.trt import CreateConfig
loader = util.default(args_util.run_script(self.add_trt_config_loader), CreateConfig())
return loader(builder, network)
| TensorRT-master | tools/Polygraphy/polygraphy/tools/args/trt/config.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from polygraphy.tools.args.trt.config import *
from polygraphy.tools.args.trt.loader import *
from polygraphy.tools.args.trt.runner import *
| TensorRT-master | tools/Polygraphy/polygraphy/tools/args/trt/__init__.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from polygraphy import mod, util
from polygraphy.tools.args import util as args_util
from polygraphy.tools.args.base import BaseArgs
from polygraphy.tools.script import make_invocable
@mod.export()
class TrtPluginLoaderArgs(BaseArgs):
def add_to_parser(self, parser):
trt_args = parser.add_argument_group("TensorRT Plugin Loader", "Options for TensorRT Plugin Loader")
trt_args.add_argument("--plugins", help="Path(s) of plugin libraries to load", nargs="+", default=None)
def parse(self, args):
self.plugins = args_util.get(args, "plugins")
# If plugins are present, wrap the provided loader/object with LoadPlugins
def wrap_if_plugins(self, script, loader_name):
if self.plugins:
script.add_import(imports=["LoadPlugins"], frm="polygraphy.backend.trt")
loader_str = make_invocable("LoadPlugins", plugins=self.plugins, obj=loader_name)
loader_name = script.add_loader(loader_str, "load_plugins")
return loader_name
@mod.export()
class TrtNetworkLoaderArgs(BaseArgs):
def __init__(self, outputs=True):
super().__init__()
self.onnx_loader_args = None
self._outputs = outputs
def add_to_parser(self, parser):
trt_args = parser.add_argument_group("TensorRT Network Loader", "Options for TensorRT Network Loader")
trt_args.add_argument(
"--explicit-precision", help="Enable explicit precision mode", action="store_true", default=None
)
if self._outputs:
trt_args.add_argument(
"--trt-outputs",
help="Name(s) of TensorRT output(s). "
"Using '--trt-outputs mark all' indicates that all tensors should be used as outputs",
nargs="+",
default=None,
)
trt_args.add_argument(
"--trt-exclude-outputs",
help="[EXPERIMENTAL] Name(s) of TensorRT output(s) to unmark as outputs.",
nargs="+",
default=None,
)
trt_args.add_argument(
"--trt-network-func-name",
help="When using a trt-network-script instead of other model types, this specifies the name "
"of the function that loads the network. Defaults to `load_network`.",
default="load_network",
)
def register(self, maker):
from polygraphy.tools.args.model import ModelArgs
from polygraphy.tools.args.onnx.loader import OnnxLoaderArgs
from polygraphy.tools.args.trt.config import TrtConfigArgs
if isinstance(maker, ModelArgs):
self.model_args = maker
if isinstance(maker, OnnxLoaderArgs):
self.onnx_loader_args = maker
if isinstance(maker, TrtConfigArgs):
self.trt_config_args = maker
if isinstance(maker, TrtPluginLoaderArgs):
self.trt_plugin_args = maker
def check_registered(self):
assert self.model_args is not None, "ModelArgs is required!"
assert self.trt_plugin_args is not None, "TrtPluginLoaderArgs is required!"
def parse(self, args):
self.outputs = args_util.get_outputs(args, "trt_outputs")
self.explicit_precision = args_util.get(args, "explicit_precision")
self.exclude_outputs = args_util.get(args, "trt_exclude_outputs")
self.trt_network_func_name = args_util.get(args, "trt_network_func_name")
def add_trt_network_loader(self, script):
model_file = self.model_args.model_file
model_type = self.model_args.model_type
outputs = args_util.get_outputs_for_script(script, self.outputs)
if model_type == "trt-network-script":
script.add_import(imports=["InvokeFromScript"], frm="polygraphy.backend.common")
loader_str = make_invocable("InvokeFromScript", model_file, name=self.trt_network_func_name)
loader_name = script.add_loader(loader_str, "load_network")
# When loading from ONNX, we need to disable custom outputs since TRT requires dtypes on outputs, which our marking function doesn't guarantee.
elif self.onnx_loader_args is not None and self.onnx_loader_args.should_use_onnx_loader(
disable_custom_outputs=True
):
script.add_import(imports=["NetworkFromOnnxBytes"], frm="polygraphy.backend.trt")
onnx_loader = self.onnx_loader_args.add_serialized_onnx_loader(script, disable_custom_outputs=True)
loader_str = make_invocable(
"NetworkFromOnnxBytes",
self.trt_plugin_args.wrap_if_plugins(script, onnx_loader),
explicit_precision=self.explicit_precision,
)
loader_name = script.add_loader(loader_str, "parse_network_from_onnx")
else:
script.add_import(imports=["NetworkFromOnnxPath"], frm="polygraphy.backend.trt")
loader_str = make_invocable(
"NetworkFromOnnxPath",
self.trt_plugin_args.wrap_if_plugins(script, model_file),
explicit_precision=self.explicit_precision,
)
loader_name = script.add_loader(loader_str, "parse_network_from_onnx")
MODIFY_NETWORK = "ModifyNetworkOutputs"
modify_network_str = make_invocable(
MODIFY_NETWORK, loader_name, outputs=outputs, exclude_outputs=self.exclude_outputs
)
if str(modify_network_str) != str(make_invocable(MODIFY_NETWORK, loader_name)):
script.add_import(imports=[MODIFY_NETWORK], frm="polygraphy.backend.trt")
loader_name = script.add_loader(modify_network_str, "modify_network")
return loader_name
def get_network_loader(self):
return args_util.run_script(self.add_trt_network_loader)
def load_network(self):
return self.get_network_loader()()
@mod.export()
class TrtEngineSaveArgs(BaseArgs):
def __init__(self, output="output", short_opt="-o"):
super().__init__()
self._output = output
self._short_opt = short_opt
def add_to_parser(self, parser):
if self._output:
self.group = parser.add_argument_group(
"TensorRT Engine Save Options", "Options for saving TensorRT engines"
)
flag = "--{:}".format(self._output)
short = self._short_opt or flag
self.group.add_argument(
short, flag, help="Path to save the TensorRT Engine", dest="save_engine", default=None
)
def parse(self, args):
self.path = args_util.get(args, "save_engine")
def add_save_engine(self, script, loader_name):
if self.path is None:
return loader_name
script.add_import(imports=["SaveEngine"], frm="polygraphy.backend.trt")
return script.add_loader(make_invocable("SaveEngine", loader_name, path=self.path), "save_engine")
def save_engine(self, engine, path=None):
with util.TempAttrChange(self, "path", path):
loader = args_util.run_script(self.add_save_engine, engine)
return loader()
@mod.export()
class TrtEngineLoaderArgs(BaseArgs):
def __init__(self, save=False):
super().__init__()
self.trt_engine_save_args = None
self._save = save
def register(self, maker):
from polygraphy.tools.args.model import ModelArgs
from polygraphy.tools.args.trt.config import TrtConfigArgs
if isinstance(maker, ModelArgs):
self.model_args = maker
if isinstance(maker, TrtConfigArgs):
self.trt_config_args = maker
if isinstance(maker, TrtNetworkLoaderArgs):
self.trt_network_loader_args = maker
if isinstance(maker, TrtPluginLoaderArgs):
self.trt_plugin_args = maker
if self._save and isinstance(maker, TrtEngineSaveArgs):
self.trt_engine_save_args = maker
def check_registered(self):
assert self.model_args is not None, "ModelArgs is required!"
assert self.trt_plugin_args is not None, "TrtPluginLoaderArgs is required!"
assert not self._save or self.trt_engine_save_args is not None, "TrtEngineSaveArgs is required to use save=True"
def parse(self, args):
self.plugins = args_util.get(args, "plugins")
def add_trt_serialized_engine_loader(self, script):
assert self.model_args is not None, "ModelArgs is required for engine deserialization!"
script.add_import(imports=["EngineFromBytes"], frm="polygraphy.backend.trt")
script.add_import(imports=["BytesFromPath"], frm="polygraphy.backend.common")
load_engine = script.add_loader(
make_invocable("BytesFromPath", self.model_args.model_file), "load_engine_bytes"
)
return script.add_loader(
make_invocable("EngineFromBytes", self.trt_plugin_args.wrap_if_plugins(script, load_engine)),
"deserialize_engine",
)
def add_trt_build_engine_loader(self, script, network_name=None):
if network_name:
network_loader_name = network_name
else:
assert self.trt_network_loader_args is not None, "TrtNetworkLoaderArgs is required for engine building!"
network_loader_name = self.trt_network_loader_args.add_trt_network_loader(script)
assert self.trt_config_args is not None, "TrtConfigArgs is required for engine building!"
script.add_import(imports=["EngineFromNetwork"], frm="polygraphy.backend.trt")
config_loader_name = self.trt_config_args.add_trt_config_loader(script)
loader_str = make_invocable(
"EngineFromNetwork",
self.trt_plugin_args.wrap_if_plugins(script, network_loader_name),
config=config_loader_name,
save_timing_cache=self.trt_config_args.timing_cache,
)
loader_name = script.add_loader(loader_str, "build_engine")
if self.trt_engine_save_args is not None:
loader_name = self.trt_engine_save_args.add_save_engine(script, loader_name)
return loader_name
def build_engine(self, network=None):
loader = args_util.run_script(self.add_trt_build_engine_loader, network)
return loader()
def load_serialized_engine(self):
loader = args_util.run_script(self.add_trt_serialized_engine_loader)
return loader()
| TensorRT-master | tools/Polygraphy/polygraphy/tools/args/trt/loader.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from polygraphy.tools.args.tf2onnx.loader import *
| TensorRT-master | tools/Polygraphy/polygraphy/tools/args/tf2onnx/__init__.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from polygraphy import mod
from polygraphy.logger import G_LOGGER, LogMode
from polygraphy.tools.args import util as args_util
from polygraphy.tools.args.base import BaseArgs
from polygraphy.tools.script import make_invocable
@mod.export()
class Tf2OnnxLoaderArgs(BaseArgs):
def add_to_parser(self, parser):
tf_onnx_args = parser.add_argument_group("TensorFlow-ONNX Loader", "Options for TensorFlow-ONNX conversion")
tf_onnx_args.add_argument("--opset", help="Opset to use when converting to ONNX", default=None, type=int)
tf_onnx_args.add_argument(
"--no-const-folding",
help="Do not fold constants in the TensorFlow graph prior to conversion",
action="store_true",
default=None,
)
def register(self, maker):
from polygraphy.tools.args.tf.loader import TfLoaderArgs
if isinstance(maker, TfLoaderArgs):
self.tf_loader_args = maker
def check_registered(self):
assert self.tf_loader_args is not None, "TfLoaderArgs is required!"
def parse(self, args):
self.opset = args_util.get(args, "opset")
self.fold_constant = False if args_util.get(args, "no_const_folding") else None
def add_to_script(self, script, suffix=None):
G_LOGGER.verbose(
"Attempting to load as a TensorFlow model, using TF2ONNX to convert to ONNX. "
"If this is not correct, please specify --model-type",
mode=LogMode.ONCE,
)
script.add_import(imports=["OnnxFromTfGraph"], frm="polygraphy.backend.onnx")
loader_str = make_invocable(
"OnnxFromTfGraph",
self.tf_loader_args.add_to_script(script, disable_custom_outputs=True, suffix=suffix),
opset=self.opset,
fold_constant=self.fold_constant,
)
loader_name = script.add_loader(loader_str, "export_onnx_from_tf", suffix=suffix)
return loader_name
| TensorRT-master | tools/Polygraphy/polygraphy/tools/args/tf2onnx/loader.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from polygraphy import mod
from polygraphy.tools.args.base import BaseArgs
from polygraphy.tools.script import make_invocable
@mod.export()
class OnnxrtRunnerArgs(BaseArgs):
def register(self, maker):
from polygraphy.tools.args.model import ModelArgs
from polygraphy.tools.args.onnx.loader import OnnxLoaderArgs
if isinstance(maker, OnnxLoaderArgs):
self.onnx_loader_args = maker
if isinstance(maker, ModelArgs):
self.model_args = maker
def check_registered(self):
assert self.onnx_loader_args is not None, "OnnxLoaderArgs is required!"
assert self.model_args is not None, "ModelArgs is required!"
def add_to_script(self, script):
script.add_import(imports=["OnnxrtRunner"], frm="polygraphy.backend.onnxrt")
if self.onnx_loader_args.should_use_onnx_loader():
onnx_name = self.onnx_loader_args.add_serialized_onnx_loader(script)
else:
onnx_name = self.model_args.model_file
script.add_import(imports=["SessionFromOnnx"], frm="polygraphy.backend.onnxrt")
loader_name = script.add_loader(make_invocable("SessionFromOnnx", onnx_name), "build_onnxrt_session")
script.add_runner(make_invocable("OnnxrtRunner", loader_name))
| TensorRT-master | tools/Polygraphy/polygraphy/tools/args/onnxrt/runner.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from polygraphy.tools.args.onnxrt.runner import *
| TensorRT-master | tools/Polygraphy/polygraphy/tools/args/onnxrt/__init__.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from polygraphy import mod
from polygraphy.tools.args.base import BaseArgs
from polygraphy.tools.script import make_invocable
@mod.export()
class PluginRefArgs(BaseArgs):
def register(self, maker):
from polygraphy.tools.args.model import ModelArgs
from polygraphy.tools.args.onnx import OnnxLoaderArgs
if isinstance(maker, OnnxLoaderArgs):
self.onnx_loader_args = maker
if isinstance(maker, ModelArgs):
self.model_args = maker
def check_registered(self):
assert self.onnx_loader_args is not None, "OnnxLoaderArgs is required!"
assert self.model_args is not None, "ModelArgs is required!"
def add_to_script(self, script):
script.add_import(imports=["GsFromOnnx"], frm="polygraphy.backend.onnx")
script.add_import(imports=["PluginRefRunner"], frm="polygraphy.backend.pluginref")
onnx_name = self.onnx_loader_args.add_onnx_loader(script)
loader_name = script.add_loader(make_invocable("GsFromOnnx", onnx_name), "pluginref")
script.add_runner(make_invocable("PluginRefRunner", loader_name))
| TensorRT-master | tools/Polygraphy/polygraphy/tools/args/pluginref/runner.py |
Subsets and Splits