python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from polygraphy.tools.args.pluginref.runner import *
| TensorRT-master | tools/Polygraphy/polygraphy/tools/args/pluginref/__init__.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from polygraphy import mod
from polygraphy.tools.args import util as args_util
from polygraphy.tools.args.base import BaseArgs
from polygraphy.tools.script import make_invocable
@mod.export()
class TfRunnerArgs(BaseArgs):
def add_to_parser(self, parser):
tf_args = parser.add_argument_group("TensorFlow Runner", "Options for TensorFlow Inference")
tf_args.add_argument(
"--save-timeline",
help="[EXPERIMENTAL] Directory to save timeline JSON files for profiling inference (view at chrome://tracing)",
default=None,
)
def register(self, maker):
from polygraphy.tools.args.tf.config import TfConfigArgs
from polygraphy.tools.args.tf.loader import TfLoaderArgs
if isinstance(maker, TfLoaderArgs):
self.tf_loader_args = maker
if isinstance(maker, TfConfigArgs):
self.tf_config_args = maker
def check_registered(self):
assert self.tf_loader_args is not None, "TfLoaderArgs is required!"
assert self.tf_config_args is not None, "TfConfigArgs is required!"
def parse(self, args):
self.timeline_path = args_util.get(args, "save_timeline")
def add_to_script(self, script):
script.add_import(imports=["TfRunner"], frm="polygraphy.backend.tf")
graph_name = self.tf_loader_args.add_to_script(script)
config_name = self.tf_config_args.add_to_script(script)
script.add_import(imports=["SessionFromGraph"], frm="polygraphy.backend.tf")
loader_name = script.add_loader(
make_invocable("SessionFromGraph", graph_name, config=config_name), "build_tf_session"
)
script.add_runner(make_invocable("TfRunner", loader_name, timeline_path=self.timeline_path))
| TensorRT-master | tools/Polygraphy/polygraphy/tools/args/tf/runner.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from polygraphy import mod
from polygraphy.tools.args import util as args_util
from polygraphy.tools.args.base import BaseArgs
from polygraphy.tools.script import make_invocable_if_nondefault
@mod.export()
class TfConfigArgs(BaseArgs):
def add_to_parser(self, parser):
tf_args = parser.add_argument_group(
"TensorFlow Session Configuration", "Options for the TensorFlow Session Configuration"
)
tf_args.add_argument(
"--gpu-memory-fraction",
help="Maximum percentage of GPU memory TensorFlow can allocate per process",
type=float,
default=None,
)
tf_args.add_argument(
"--allow-growth", help="Allow GPU memory allocated by TensorFlow to grow", action="store_true", default=None
)
tf_args.add_argument(
"--xla", help="[EXPERIMENTAL] Attempt to run graph with xla", action="store_true", default=None
)
def parse(self, args):
self.gpu_memory_fraction = args_util.get(args, "gpu_memory_fraction")
self.allow_growth = args_util.get(args, "allow_growth")
self.xla = args_util.get(args, "xla")
def add_to_script(self, script):
config_loader_str = make_invocable_if_nondefault(
"CreateConfig",
gpu_memory_fraction=self.gpu_memory_fraction,
allow_growth=self.allow_growth,
use_xla=self.xla,
)
if config_loader_str is not None:
script.add_import(imports=["CreateConfig"], frm="polygraphy.backend.tf")
config_loader_name = script.add_loader(config_loader_str, "create_tf_config")
else:
config_loader_name = None
return config_loader_name
| TensorRT-master | tools/Polygraphy/polygraphy/tools/args/tf/config.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from polygraphy.tools.args.tf.config import *
from polygraphy.tools.args.tf.loader import *
from polygraphy.tools.args.tf.runner import *
| TensorRT-master | tools/Polygraphy/polygraphy/tools/args/tf/__init__.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from polygraphy import mod
from polygraphy.logger import G_LOGGER, LogMode
from polygraphy.tools.args import util as args_util
from polygraphy.tools.args.base import BaseArgs
from polygraphy.tools.script import make_invocable
@mod.export()
class TfLoaderArgs(BaseArgs):
def __init__(self, tftrt=False, artifacts=True, outputs=True):
super().__init__()
self._enable_tftrt = tftrt
self._enable_artifacts = artifacts
self._enable_outputs = outputs
def add_to_parser(self, parser):
tf_args = parser.add_argument_group("TensorFlow Loader", "Options for TensorFlow Loader")
tf_args.add_argument(
"--ckpt",
help="[EXPERIMENTAL] Name of the checkpoint to load. Required if the `checkpoint` file is missing. Should not include file extension "
"(e.g. to load `model.meta` use `--ckpt=model`)",
default=None,
)
if self._enable_outputs:
tf_args.add_argument(
"--tf-outputs",
help="Name(s) of TensorFlow output(s). "
"Using '--tf-outputs mark all' indicates that all tensors should be used as outputs",
nargs="+",
default=None,
)
if self._enable_artifacts:
tf_args.add_argument("--save-pb", help="Path to save the TensorFlow frozen graphdef", default=None)
tf_args.add_argument(
"--save-tensorboard", help="[EXPERIMENTAL] Path to save a TensorBoard visualization", default=None
)
tf_args.add_argument(
"--freeze-graph", help="[EXPERIMENTAL] Attempt to freeze the graph", action="store_true", default=None
)
if self._enable_tftrt:
tftrt_args = parser.add_argument_group(
"TensorFlow-TensorRT", "[UNTESTED] Options for TensorFlow-TensorRT Integration"
)
tftrt_args.add_argument(
"--tftrt",
"--use-tftrt",
help="[UNTESTED] Enable TF-TRT integration",
action="store_true",
default=None,
dest="tftrt",
)
tftrt_args.add_argument(
"--minimum-segment-size",
help="Minimum length of a segment to convert to TensorRT",
type=int,
default=None,
)
tftrt_args.add_argument(
"--dynamic-op",
help="Enable dynamic mode (defers engine build until runtime)",
action="store_true",
default=None,
)
def register(self, maker):
from polygraphy.tools.args.model import ModelArgs
from polygraphy.tools.args.trt import TrtConfigArgs, TrtEngineSaveArgs
from polygraphy.tools.args.trt_legacy import TrtLegacyArgs
if isinstance(maker, ModelArgs):
self.model_args = maker
if isinstance(maker, TrtConfigArgs):
self.trt_config_args = maker
if isinstance(maker, TrtLegacyArgs):
self.trt_legacy_args = maker
if isinstance(maker, TrtEngineSaveArgs):
self.trt_engine_save_args = maker
def check_registered(self):
assert self.model_args is not None, "ModelArgs is required!"
if self._enable_tftrt:
assert self.trt_config_args is not None, "TrtConfigArgs is required when tftrt is enabled!"
def parse(self, args):
self.ckpt = args_util.get(args, "ckpt")
self.outputs = args_util.get_outputs(args, "tf_outputs")
self.save_pb = args_util.get(args, "save_pb")
self.save_tensorboard = args_util.get(args, "save_tensorboard")
self.freeze_graph = args_util.get(args, "freeze_graph")
self.tftrt = args_util.get(args, "tftrt")
self.minimum_segment_size = args_util.get(args, "minimum_segment_size")
self.dynamic_op = args_util.get(args, "dynamic_op")
def add_to_script(self, script, disable_custom_outputs=None, suffix=None):
if disable_custom_outputs:
outputs = None
else:
outputs = args_util.get_outputs_for_script(script, self.outputs)
model_file = self.model_args.model_file
model_type = self.model_args.model_type
if model_type == "ckpt":
G_LOGGER.verbose(
"Loading a TensorFlow checkpoint. Please ensure you are not using the --use-subprocess flag".format(
model_file
),
mode=LogMode.ONCE,
)
script.add_import(imports=["GraphFromCkpt"], frm="polygraphy.backend.tf")
loader_id = "load_ckpt"
loader_str = make_invocable("GraphFromCkpt", model_file, self.ckpt)
elif model_type == "keras":
script.add_import(imports=["GraphFromKeras"], frm="polygraphy.backend.tf")
loader_id = "load_keras"
loader_str = make_invocable("GraphFromKeras", model_file)
elif model_type == "frozen":
script.add_import(imports=["GraphFromFrozen"], frm="polygraphy.backend.tf")
G_LOGGER.verbose(
"Attempting to load as a frozen graph. If this is not correct, please specify --model-type",
mode=LogMode.ONCE,
)
loader_id = "load_frozen"
loader_str = make_invocable("GraphFromFrozen", model_file)
else:
G_LOGGER.critical("Model type: {:} cannot be imported with TensorFlow.".format(model_type))
loader_name = script.add_loader(loader_str, loader_id, suffix=suffix)
if self.freeze_graph:
script.add_import(imports=["OptimizeGraph"], frm="polygraphy.backend.tf")
loader_name = script.add_loader(
make_invocable("OptimizeGraph", loader_name), "optimize_graph", suffix=suffix
)
if self.tftrt:
script.add_import(imports=["UseTfTrt"], frm="polygraphy.backend.tf")
loader_str = make_invocable(
"UseTfTrt",
loader_name,
max_workspace_size=self.trt_config_args.workspace,
fp16=self.trt_config_args.fp16,
int8=self.trt_config_args.int8,
max_batch_size=self.trt_legacy_args.batch_size,
is_dynamic_op=self.dynamic_op,
minimum_segment_size=self.minimum_segment_size,
)
loader_name = script.add_loader(loader_str, "use_tftrt", suffix=suffix)
MODIFY_TF = "ModifyGraphOutputs"
modify_tf_str = make_invocable(MODIFY_TF, loader_name, outputs=outputs)
if modify_tf_str != make_invocable(MODIFY_TF, loader_name):
script.add_import(imports=[MODIFY_TF], frm="polygraphy.backend.tf")
loader_name = script.add_loader(modify_tf_str, "modify_tf")
engine_dir = None
if self.tftrt:
engine_dir = self.trt_engine_save_args.path
WRITE_TF = "SaveGraph"
write_tf_str = make_invocable(
WRITE_TF, loader_name, path=self.save_pb, tensorboard_dir=self.save_tensorboard, engine_dir=engine_dir
)
if write_tf_str != make_invocable(WRITE_TF, loader_name):
script.add_import(imports=[WRITE_TF], frm="polygraphy.backend.tf")
loader_name = script.add_loader(write_tf_str, "save_tf")
return loader_name
def load_graph(self):
loader = args_util.run_script(self.add_to_script)
return loader()
| TensorRT-master | tools/Polygraphy/polygraphy/tools/args/tf/loader.py |
from polygraphy.tools.data.data import Data
| TensorRT-master | tools/Polygraphy/polygraphy/tools/data/__init__.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from polygraphy.tools.base import Tool
from polygraphy.tools.data.subtool import ToInput
class Data(Tool):
"""
Manipulate input and output data generated by other Polygraphy subtools.
"""
def __init__(self):
super().__init__("data")
def add_parser_args(self, parser):
subparsers = parser.add_subparsers(title="Data Manipulation Subtools", dest="subtool")
subparsers.required = True
SUBTOOLS = [
ToInput(),
]
for subtool in SUBTOOLS:
subtool.setup_parser(subparsers)
| TensorRT-master | tools/Polygraphy/polygraphy/tools/data/data.py |
from polygraphy.tools.data.subtool.to_input import ToInput
| TensorRT-master | tools/Polygraphy/polygraphy/tools/data/subtool/__init__.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from polygraphy import util
from polygraphy.comparator import RunResults
from polygraphy.json import load_json
from polygraphy.logger import G_LOGGER
from polygraphy.tools.base import Tool
class ToInput(Tool):
"""
Combines and converts one or more input/output files generated by
Polygraphy into a single file usable with --load-inputs.
"""
def __init__(self):
super().__init__("to-input")
def add_parser_args(self, parser):
parser.add_argument(
"paths", help="Path(s) to file(s) containing input or output data from Polygraphy", nargs="+"
)
parser.add_argument("-o", "--output", help="Path to the file to generate", required=True)
def run(self, args):
inputs = []
def update_inputs(new_inputs, path):
nonlocal inputs
if inputs and len(inputs) != len(new_inputs):
G_LOGGER.warning(
"The provided files have different numbers of iterations.\n"
"Note: Inputs currently contains {:} iterations, but the data in {:} contains {:} iterations. "
"Some iterations will contain incomplete data".format(len(inputs), path, len(new_inputs))
)
# Pad to appropriate length
inputs += [OrderedDict()] * (len(new_inputs) - len(inputs))
for inp, new_inp in zip(inputs, new_inputs):
inp.update(new_inp)
for path in args.paths:
# Note: It's important we have encode/decode JSON methods registered
# for the types we care about, e.g. RunResults. Importing the class should generally guarantee this.
data = load_json(path)
if isinstance(data, RunResults):
for _, iters in data.items():
update_inputs(iters, path)
else:
if not util.is_sequence(data):
data = [data]
update_inputs(data, path)
util.save_json(inputs, args.output, description="input file containing {:} iteration(s)".format(len(inputs)))
| TensorRT-master | tools/Polygraphy/polygraphy/tools/data/subtool/to_input.py |
from polygraphy.tools.base.tool import *
| TensorRT-master | tools/Polygraphy/polygraphy/tools/base/__init__.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import sys
from collections import OrderedDict
import polygraphy
from polygraphy import mod
from polygraphy.logger.logger import G_LOGGER
from polygraphy.tools.args.base import BaseArgs
from polygraphy.tools.args.logger import LoggerArgs
@mod.export()
class Tool(object):
"""
Base class for CLI Tools.
"""
def __init__(self, name=None):
self.name = name
# makers is a Dict[type, BaseArgs] - Maps names to subtypes of BaseArgs.
# This will be populated with instances of BaseArgs, and parsing will
# happen in __call__. Child classes can then access the instances directly
# instead of reimplementing argument parsing.
self.arg_groups = OrderedDict()
self.subscribe_args(LoggerArgs())
def subscribe_args(self, maker):
"""
Subscribe to an argument group. The argument group's arguments will be added
to the argument parser, and will be parsed prior to ``run``.
Args:
maker (BaseArgs): The argument group to register.
"""
assert isinstance(maker, BaseArgs)
m_type = type(maker)
self.arg_groups[m_type] = maker
def add_parser_args(self, parser):
# Should be implemented by child classes to add custom arguments.
pass
def setup_parser(self, subparsers=None):
"""
Set up a command-line argument parser.
Args:
subparsers (argparse.SubParsers):
A subparser group from argparse, like that returned by ``ArgumentParser.add_subparsers()``.
If this is omitted, this function will generate a new ``ArgumentParser`` instance.
Defaults to None.
Returns:
argparse.ArgumentParser:
The newly created parser if ``subparsers`` is not provided, or the newly created subparser otherwise.
"""
assert self.__doc__, "No help output was provided for this tool!"
allow_abbrev = all(not maker.disable_abbrev for maker in self.arg_groups.values())
if subparsers is not None:
parser = subparsers.add_parser(
self.name, help=self.__doc__, add_help=True, description=self.__doc__, allow_abbrev=allow_abbrev
)
parser.set_defaults(subcommand=self)
else:
parser = argparse.ArgumentParser(add_help=True, description=self.__doc__, allow_abbrev=allow_abbrev)
for maker in self.arg_groups.values():
# Register each maker with every other maker
for other_maker in self.arg_groups.values():
maker.register(other_maker)
maker.check_registered()
# This must be done after registration, since some argument groups
# may conditionally define arguments based on what other groups are present.
for maker in self.arg_groups.values():
maker.add_to_parser(parser)
try:
self.add_parser_args(parser)
except Exception as err:
G_LOGGER.internal_error(
"Could not register tool argument parser for: {:}\nNote: Error was: {:}".format(self.name, err)
)
return parser
def run(self, args):
raise NotImplementedError("run() must be implemented by child classes")
def __call__(self, args):
"""
Calls this tool with the specified arguments.
Args:
args (Namespace):
The namespace returned by ``parse_args()`` or ``parse_known_args()``.
"""
for maker in self.arg_groups.values():
maker.parse(args)
G_LOGGER.module_info(polygraphy)
return self.run(args)
def main(self):
"""
Set up and run this tool. This function serves as a replacement for a manually
defined ``main`` method.
Runs ``sys.exit()`` with the status code returned by ``run``. If ``run`` does
not return anything, always exits with ``0`` (success).
"""
parser = self.setup_parser()
args = parser.parse_args()
sys.exit(self.__call__(args))
| TensorRT-master | tools/Polygraphy/polygraphy/tools/base/tool.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from polygraphy import mod
from polygraphy.logger import G_LOGGER
from polygraphy.tools.args import (
DataLoaderArgs,
ModelArgs,
OnnxLoaderArgs,
OnnxSaveArgs,
OnnxShapeInferenceArgs,
Tf2OnnxLoaderArgs,
TfLoaderArgs,
TrtConfigArgs,
TrtEngineLoaderArgs,
TrtEngineSaveArgs,
TrtNetworkLoaderArgs,
TrtPluginLoaderArgs,
)
from polygraphy.tools.base import Tool
onnx_backend = mod.lazy_import("polygraphy.backend.onnx")
trt_backend = mod.lazy_import("polygraphy.backend.trt")
class Convert(Tool):
"""
Convert models to other formats.
"""
def __init__(self):
super().__init__("convert")
self.subscribe_args(ModelArgs(model_required=True))
self.subscribe_args(TfLoaderArgs(artifacts=False))
self.subscribe_args(Tf2OnnxLoaderArgs())
self.subscribe_args(OnnxShapeInferenceArgs())
self.subscribe_args(OnnxLoaderArgs())
self.subscribe_args(OnnxSaveArgs(output=False))
self.subscribe_args(DataLoaderArgs()) # For int8 calibration
self.subscribe_args(TrtConfigArgs())
self.subscribe_args(TrtPluginLoaderArgs())
self.subscribe_args(TrtNetworkLoaderArgs())
self.subscribe_args(TrtEngineLoaderArgs())
self.subscribe_args(TrtEngineSaveArgs(output=False))
def add_parser_args(self, parser):
parser.add_argument("-o", "--output", help="Path to save the converted model", required=True)
parser.add_argument(
"--convert-to",
help="The format to attempt to convert the model to."
"'onnx-like-trt-network' is EXPERIMETNAL and converts a TensorRT network to a format usable for visualization. "
"See 'OnnxLikeFromNetwork' for details. ",
choices=["onnx", "trt", "onnx-like-trt-network"],
)
onnx_args = self.arg_groups[OnnxLoaderArgs].group
onnx_args.add_argument(
"--fp-to-fp16",
help="Convert all floating point tensors in an ONNX model to 16-bit precision. "
"This is *not* needed in order to use TensorRT's fp16 precision, but may be useful for other backends. "
"Requires onnxmltools. ",
action="store_true",
default=None,
)
def run(self, args):
if not args.convert_to:
_, ext = os.path.splitext(args.output)
if ext not in ModelArgs.EXT_MODEL_TYPE_MAPPING:
G_LOGGER.critical(
"Could not automatically determine model type based on output path: {:}\n"
"Please specify the desired output format with --convert-to".format(args.output)
)
convert_type = ModelArgs.ModelType(ModelArgs.EXT_MODEL_TYPE_MAPPING[ext])
elif args.convert_to == "onnx-like-trt-network":
convert_type = "onnx-like-trt-network"
else:
CONVERT_TO_MODEL_TYPE_MAPPING = {"onnx": "onnx", "trt": "engine"}
convert_type = ModelArgs.ModelType(CONVERT_TO_MODEL_TYPE_MAPPING[args.convert_to])
if convert_type == "onnx-like-trt-network":
onnx_like = trt_backend.onnx_like_from_network(self.arg_groups[TrtNetworkLoaderArgs].get_network_loader())
onnx_backend.save_onnx(onnx_like, args.output)
elif convert_type.is_onnx():
model = self.arg_groups[OnnxLoaderArgs].load_onnx()
if args.fp_to_fp16:
model = onnx_backend.convert_to_fp16(model)
self.arg_groups[OnnxSaveArgs].save_onnx(model, args.output)
elif convert_type.is_trt():
with self.arg_groups[TrtEngineLoaderArgs].build_engine() as engine:
self.arg_groups[TrtEngineSaveArgs].save_engine(engine, args.output)
else:
G_LOGGER.critical("Cannot convert to model type: {:}".format(convert_type))
| TensorRT-master | tools/Polygraphy/polygraphy/tools/convert/convert.py |
from polygraphy.tools.convert.convert import Convert
| TensorRT-master | tools/Polygraphy/polygraphy/tools/convert/__init__.py |
from polygraphy.tools.debug.debug import Debug
| TensorRT-master | tools/Polygraphy/polygraphy/tools/debug/__init__.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from polygraphy.tools.base import Tool
from polygraphy.tools.debug.subtool import Build, DiffTactics, Precision, Reduce, Repeat
class Debug(Tool):
"""
[EXPERIMENTAL] Debug model accuracy issues.
"""
def __init__(self):
super().__init__("debug")
def add_parser_args(self, parser):
subparsers = parser.add_subparsers(title="Debug Subtools", dest="subtool")
subparsers.required = True
SUBTOOLS = [
Build(),
Precision(),
DiffTactics(),
Reduce(),
Repeat(),
]
for subtool in SUBTOOLS:
subtool.setup_parser(subparsers)
| TensorRT-master | tools/Polygraphy/polygraphy/tools/debug/debug.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from polygraphy.logger import G_LOGGER
from polygraphy.tools.debug.subtool.base import BaseCheckerSubtool
class Build(BaseCheckerSubtool):
"""
Repeatedly build an engine to isolate flaky behavior, sorting generated artifacts
into `good` and `bad` directories.
Each iteration will generate an engine called 'polygraphy_debug.engine' in the current directory.
"""
def __init__(self):
super().__init__("build")
def add_parser_args(self, parser):
parser.add_argument(
"--until",
required=True,
help="Controls when to stop running. "
"Choices are: ['good', 'bad', int]. 'good' will keep running until the first 'good' run. "
"'bad' will run until the first 'bad' run. An integer can be specified to run a set number of iterations. ",
)
def setup(self, args, network):
try:
self.until = int(args.until) - 1
except:
self.until = args.until
if self.until not in ["good", "bad"]:
G_LOGGER.critical("--until value must be an integer, 'good', or 'bad', but was: {:}".format(args.until))
def stop(self, index, success):
if self.until == "good":
return success
elif self.until == "bad":
return not success
return index >= self.until
| TensorRT-master | tools/Polygraphy/polygraphy/tools/debug/subtool/build.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import math
from polygraphy import mod
from polygraphy.logger.logger import G_LOGGER
from polygraphy.tools import util as tools_util
from polygraphy.tools.args import DataLoaderArgs, ModelArgs, OnnxLoaderArgs, OnnxSaveArgs, OnnxShapeInferenceArgs
from polygraphy.tools.base import Tool
from polygraphy.tools.debug.subtool.artifact_sorter import ArtifactSorterArgs
gs = mod.lazy_import("onnx_graphsurgeon")
onnx_backend = mod.lazy_import("polygraphy.backend.onnx")
onnx_util = mod.lazy_import("polygraphy.backend.onnx.util")
class MarkerBase(object):
"""
Controls how layers are marked for reduction.
"""
def __init__(self, num_nodes, node_index):
self.num_nodes = num_nodes
self.iteration = 0
self.node_index = node_index
# The node index value that leads to the fewest number of nodes but still fails.
self.best_bad_node_index = None
self._least_bad_nodes = self.num_nodes + 1
# Maps num_nodes to node_index for every success. At the end, we can figure out which one is the
# highest value that's still smaller than _least_bad_nodes.
self._good_node_indices = {}
self.best_good_node_index = None
def step(self, success, num_nodes):
self.iteration += 1
if not success and num_nodes <= self._least_bad_nodes:
self._least_bad_nodes = num_nodes
self.best_bad_node_index = self.node_index
if success:
self._good_node_indices[num_nodes] = self.node_index
def _clamp(self, x, min_val, max_val):
return max(min(x, max_val), min_val)
def finish(self):
# Find the index of the node that has the highest number of nodes less than _least_bad_nodes, but still is successful.
# Failing that, use the smallest possible subgraph (which will always be > _least_bad_nodes)
def split_good(cond):
return {num: idx for num, idx in self._good_node_indices.items() if cond(num)}
max_smaller_graph = split_good(lambda num: num < self._least_bad_nodes)
min_larger_graph = split_good(lambda num: num >= self._least_bad_nodes)
if max_smaller_graph:
self.best_good_node_index = max_smaller_graph[max(max_smaller_graph)]
elif min_larger_graph:
self.best_good_node_index = min_larger_graph[min(min_larger_graph)]
class LinearMarker(MarkerBase):
def __init__(self, num_nodes, invert=False):
super().__init__(num_nodes, node_index=num_nodes - 1 if not invert else 0)
self.invert = invert
def step(self, success, num_nodes):
super().step(success, num_nodes)
self.node_index += -1 if not self.invert else 1
return self.node_index
def stop(self):
return (self.node_index < 0) or (self.node_index >= self.num_nodes)
def remaining(self):
return self.num_nodes - self.iteration
class BisectMarker(MarkerBase):
def __init__(self, num_nodes, invert=False):
# Assume the original model doesn't work, and start right in the middle.
super().__init__(num_nodes, node_index=num_nodes // 2)
self.good = 0
self.bad = self.num_nodes
if invert:
self.good, self.bad = self.bad, self.good
# Take a step in bisection.
# This will return the index of the next node to try depending on the status of the previous run.
def step(self, success, num_nodes):
super().step(success, num_nodes)
if success:
self.good = self.node_index
round_func = math.ceil
else:
self.bad = self.node_index
round_func = math.floor
self.node_index = round_func((self.good + self.bad) / 2.0)
return self.node_index
def stop(self):
return abs(self.good - self.bad) <= 1
def remaining(self):
return int(math.log2(self.num_nodes) - self.iteration)
class Reduce(Tool):
"""
[EXPERIMENTAL] Reduce a failing ONNX model to the minimum set of nodes that cause the failure.
Each iteration will generate an ONNX model called 'polygraphy_debug.onnx' in the current directory.
"""
def __init__(self):
super().__init__("reduce")
self.subscribe_args(ArtifactSorterArgs("polygraphy_debug.onnx", prefer_artifacts=False))
self.subscribe_args(ModelArgs(model_required=True, inputs="--model-inputs", model_type="onnx"))
self.subscribe_args(OnnxSaveArgs())
self.subscribe_args(OnnxShapeInferenceArgs(default=True, enable_force_fallback=True))
self.subscribe_args(OnnxLoaderArgs(output_prefix=None))
self.subscribe_args(DataLoaderArgs()) # For fallback shape inference
def add_parser_args(self, parser):
parser.add_argument(
"--min-good",
"--minimal-good",
dest="min_good",
help="Path at which to save an ONNX model close in size to the reduced model "
"that does not have the failure. This is not guaranteed to be generated.",
)
disable_passes = parser.add_mutually_exclusive_group()
disable_passes.add_argument(
"--no-reduce-inputs",
help="Do not attempt to change the graph inputs to reduce the model further. "
"'reduce' will then only attempt to find the earliest failing outputs. ",
action="store_false",
dest="reduce_inputs",
)
disable_passes.add_argument(
"--no-reduce-outputs",
help="Do not attempt to change the graph outputs to reduce the model further. "
"'reduce' will then only attempt to find the latest failing inputs. ",
action="store_false",
dest="reduce_outputs",
)
parser.add_argument(
"--mode",
help="Strategy to use to iteratively remove nodes from the model. "
"'bisect' will use binary search, and 'linear' will delete one node at a time. "
"'linear' mode may be significantly slower, but can offer better results in models with branches. "
"One strategy is to use 'bisect' first, and then further reduce the result with 'linear'. ",
choices=["bisect", "linear"],
default="bisect",
)
def run(self, args):
if not self.arg_groups[OnnxSaveArgs].path and not args.min_good:
G_LOGGER.critical(
"--output (where to write the reduced model) and/or "
"--min-good (where to write a reduced model that passes) must be provided!"
)
model = self.arg_groups[OnnxLoaderArgs].load_onnx()
num_orig_nodes = len(model.graph.node)
# When --model-input-shapes are set, we need to override the shapes in the model, and then run
# shape inference to figure out the new shapes of intermediate tensors.
user_input_metadata = self.arg_groups[ModelArgs].input_shapes
if user_input_metadata:
model = gs.export_onnx(
tools_util.override_input_shapes(onnx_backend.gs_from_onnx(model), user_input_metadata)
)
if self.arg_groups[OnnxShapeInferenceArgs].do_shape_inference:
model = onnx_backend.infer_shapes(model)
# Lower Constant nodes into Constant tensors
# If we don't do this, the outputs of Constant nodes may be incorrectly marked
# as variable inputs. Further, fallback shape inference does not apply to Constant nodes.
GRAPH = onnx_util.lower_constant_nodes(onnx_backend.gs_from_onnx(model))
_layerwise_outputs = None
_layerwise_meta = None
# Get metadata inferred by fallback shape inference. If fallback shape inference was
# never run, then this function runs it.
def layerwise(model, include_data=False):
nonlocal _layerwise_outputs, _layerwise_meta
if _layerwise_outputs is None or _layerwise_meta is None:
G_LOGGER.info(
"Running inference with ONNX-Runtime to determine metadata for intermediate tensors.\n"
"This will cause intermediate models to have static shapes."
)
_layerwise_outputs, _layerwise_meta = self.arg_groups[OnnxShapeInferenceArgs].fallback_inference(model)
return _layerwise_outputs if include_data else _layerwise_meta
if self.arg_groups[OnnxShapeInferenceArgs].force_fallback:
G_LOGGER.info("Freezing shapes in the model according to values determined by fallback shape inference")
onnx_util.set_shapes_from_layerwise_meta(GRAPH, layerwise(model))
def fix_graph(graph, model):
"""
Fix the graph so it is valid ONNX.
"""
def fix_tensor_metadata(tensors, fix_shape=True):
for tensor in tensors:
if not tensor.shape and fix_shape:
tensor.shape = layerwise(model)[tensor.name].shape
if not tensor.dtype:
tensor.dtype = layerwise(model)[tensor.name].dtype
fix_tensor_metadata(graph.inputs)
fix_tensor_metadata(graph.outputs, fix_shape=False)
# If we're marking inputs, there may be cases where some other inputs are required - for
# example, if the model is branchy. If, after cleanup(), there are any Variable tensors in
# the graph without inputs, we'll replace them with constants and fold them away.
tensor_map = graph.tensors()
needs_const_fold = False
for tensor in tensor_map.values():
if isinstance(tensor, gs.Variable) and not tensor.inputs and tensor not in graph.inputs:
needs_const_fold = True
G_LOGGER.info("Freezing model input: {:}".format(tensor))
tensor.to_constant(layerwise(model, include_data=True)[tensor.name])
if needs_const_fold:
G_LOGGER.info("Folding constants to remove extraneous subgraphs")
graph.fold_constants().cleanup()
return graph
def mark_io(graph, attr, tensors, filter_const=True):
if filter_const:
tensors = [t for t in tensors if not isinstance(t, gs.Constant)]
if not tensors:
G_LOGGER.warning(
"No non-constant tensors are available to mark. "
"Try folding constants in the model with `polygraphy surgeon sanitize --fold-constants`"
)
setattr(graph, attr, tensors)
G_LOGGER.info("Marking model {attr}: {:}".format(getattr(graph, attr), attr=attr))
return graph
def names_from_tensors(tensors):
return [t.name for t in tensors]
def lookup_tensors(graph, names):
tensor_map = graph.tensors()
return [tensor_map[name] for name in names]
# Bisect using the given marker, and modifying the given graph attribute.
# attr should be one of ["inputs", "outputs"].
# filter_const indicates whether to filter out constant tensors before updating graph I/O.
def bisect_io(graph, model, marker, attr, filter_const=True):
G_LOGGER.start("Reducing model {:}".format(attr))
iter_graph = graph
while not marker.stop():
G_LOGGER.start(
"RUNNING | Iteration {:} | Approximately {:} iteration(s) remaining".format(
marker.iteration + 1, marker.remaining()
)
)
iter_graph = graph.copy() # This is a very light-weight copy of the entire graph.
with G_LOGGER.indent():
io_list = list(getattr(iter_graph.nodes[marker.node_index], attr))
mark_io(iter_graph, attr, io_list, filter_const)
iter_graph.cleanup()
self.arg_groups[OnnxSaveArgs].save_onnx(
gs.export_onnx(fix_graph(iter_graph, model)), self.arg_groups[ArtifactSorterArgs].iter_artifact
)
num_nodes = len(iter_graph.nodes)
success = self.arg_groups[ArtifactSorterArgs].sort_artifacts(
marker.iteration + 1, suffix="_reduce_{:}_{:}_nodes".format(attr, num_nodes)
)
marker.step(success, num_nodes)
marker.finish()
G_LOGGER.finish("Finished reducing model {attr}".format(attr=attr))
# Find minimal good/bad inputs/outputs, falling back to existing graph inputs/outputs.
def get_io(index):
if index is None:
return names_from_tensors(getattr(graph, attr))
return names_from_tensors(list(getattr(graph.nodes[index], attr)))
return get_io(marker.best_bad_node_index), get_io(marker.best_good_node_index)
# We reduce the model in 2 phases:
# 1. Find the earliest output nodes that cause a failure.
# 2. Find the latest input nodes cause a failure.
MarkerType = BisectMarker if args.mode == "bisect" else LinearMarker
bad_graph = GRAPH.copy()
good_graph = None
if args.min_good:
good_graph = GRAPH.copy()
# == Phase 1 ==
if args.reduce_outputs:
out_marker = MarkerType(len(bad_graph.nodes))
bad_outputs, good_outputs = bisect_io(bad_graph, model, out_marker, attr="outputs", filter_const=False)
bad_graph = mark_io(bad_graph, "outputs", lookup_tensors(bad_graph, bad_outputs)).cleanup()
if good_graph is not None:
good_graph = mark_io(
good_graph, "outputs", lookup_tensors(good_graph, good_outputs)
) # Defer cleanup where possible.
# Export the model with the reduced outputs so that reducing inputs is faster.
model = gs.export_onnx(fix_graph(bad_graph, model))
# == Phase 2 ==
if args.reduce_inputs:
in_marker = MarkerType(len(bad_graph.nodes), invert=True)
bad_inputs, good_inputs = bisect_io(bad_graph, model, in_marker, attr="inputs")
bad_graph = mark_io(bad_graph, "inputs", lookup_tensors(bad_graph, bad_inputs)).cleanup()
if good_graph is not None:
good_graph = mark_io(
good_graph, "inputs", lookup_tensors(good_graph, good_inputs)
) # Defer cleanup where possible.
# == Write Bad Model ==
reduced_model = gs.export_onnx(fix_graph(bad_graph, model))
if self.arg_groups[OnnxSaveArgs].path:
num_reduced_nodes = len(reduced_model.graph.node)
if (
float(num_reduced_nodes) / float(num_orig_nodes) >= 0.25
and num_reduced_nodes > 1
and args.mode == "bisect"
):
G_LOGGER.warning(
"It looks like this model could potentially be reduced further.\n"
"You may want to reduce {:} again using --mode=linear. ".format(self.arg_groups[OnnxSaveArgs].path)
)
G_LOGGER.info("Minimum Bad Model:\n{:}\n\n".format(onnx_util.str_from_onnx(reduced_model, mode="none")))
self.arg_groups[OnnxSaveArgs].save_onnx(reduced_model)
# == Write Good Model ==
if good_graph is not None:
min_good_model = gs.export_onnx(fix_graph(good_graph.cleanup(), model))
if min_good_model == reduced_model:
G_LOGGER.warning(
"Could not find a minimal model close in size to the reduced model that does not cause a failure."
)
else:
G_LOGGER.info(
"Minimum Good Model:\n{:}\n\n".format(onnx_util.str_from_onnx(min_good_model, mode="none"))
)
self.arg_groups[OnnxSaveArgs].save_onnx(min_good_model, args.min_good)
| TensorRT-master | tools/Polygraphy/polygraphy/tools/debug/subtool/reduce.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import glob
import os
from collections import OrderedDict, defaultdict
from polygraphy import util
from polygraphy import mod
from polygraphy.logger import G_LOGGER
from polygraphy.tools.base import Tool
algorithm_selector = mod.lazy_import("polygraphy.backend.trt.algorithm_selector")
class DiffTactics(Tool):
"""
Determine potentially bad tactics given sets of good and bad tactic
replay files.
"""
def __init__(self):
super().__init__("diff-tactics")
def add_parser_args(self, parser):
parser.add_argument(
"--dir",
help="A directory containing good and bad tactic replay files. "
"By default, this tool will search for files in directories called 'good' and 'bad'",
default="",
)
parser.add_argument(
"--good",
help="A directory containing good tactic replay files or a single good tactic replay file. ",
default=None,
)
parser.add_argument(
"--bad",
help="A directory containing bad tactic replay files or a single bad tactic replay file. ",
default=None,
)
def run(self, args):
if args.dir is None and (args.good is None or args.bad is None):
G_LOGGER.critical("Either `--dir`, or both `--good` and `--bad` must be specified.")
def load_tactics(dirpath):
"""
Load all tactic replays from the specified directory into a single dictionary.
Args:
dirpath (str): Directory containing zero or more tactic replay files.
Returns:
dict[str, Set[polygraphy.backend.trt.algorithm_selector.Algorithm]]:
Maps layer names to the set of algorithms present in the tactic replays.
"""
def try_load_replay(path):
try:
return algorithm_selector.TacticReplayData.load(path)
except:
return None
tactics = defaultdict(set)
replay_paths = []
search_paths = (
glob.iglob(os.path.join(dirpath, "**"), recursive=True) if os.path.isdir(dirpath) else [dirpath]
)
for path in search_paths:
replay = try_load_replay(path)
if replay is None:
G_LOGGER.verbose("{:} does not look like a tactic replay file, skipping.".format(path))
continue
replay_paths.append(path)
for name, algo in replay.items():
tactics[name].add(algo)
return tactics, replay_paths
good_dir = util.default(args.good, os.path.join(args.dir, "good"))
good_tactics, good_paths = load_tactics(good_dir)
G_LOGGER.info("Loaded {:} good tactic replays.".format(len(good_paths)))
G_LOGGER.verbose("Good tactic replays: {:}".format(good_paths))
bad_dir = util.default(args.bad, os.path.join(args.dir, "bad"))
bad_tactics, bad_paths = load_tactics(bad_dir)
G_LOGGER.info("Loaded {:} bad tactic replays.".format(len(bad_paths)))
G_LOGGER.verbose("Bad tactic replays: {:}".format(bad_paths))
# Walk bad tactics and remove all the known good tactics.
potential_bad_tactics = OrderedDict()
for name, algo_set in bad_tactics.items():
if name in good_tactics:
algo_set -= good_tactics[name]
if algo_set:
potential_bad_tactics[name] = algo_set
if potential_bad_tactics:
G_LOGGER.info("Found potentially bad tactics:")
for name, algo_set in potential_bad_tactics.items():
algo_set_str = list(map(str, algo_set))
G_LOGGER.info("Layer: {:}\n\tAlgorithms: {:}".format(name, algo_set_str))
else:
G_LOGGER.info("Could not determine potentially bad tactics. Try generating more tactic replay files?")
| TensorRT-master | tools/Polygraphy/polygraphy/tools/debug/subtool/diff_tactics.py |
from polygraphy.tools.debug.subtool.diff_tactics import DiffTactics
from polygraphy.tools.debug.subtool.build import Build
from polygraphy.tools.debug.subtool.precision import Precision
from polygraphy.tools.debug.subtool.reduce import Reduce
from polygraphy.tools.debug.subtool.repeat import Repeat
| TensorRT-master | tools/Polygraphy/polygraphy/tools/debug/subtool/__init__.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import contextlib
import os
import re
import shutil
import subprocess as sp
import time
from polygraphy import util
from polygraphy.logger import G_LOGGER
from polygraphy.tools.args import util as args_util
from polygraphy.tools.args.base import BaseArgs
class ArtifactSorterArgs(BaseArgs):
def __init__(self, iter_art_default=None, prefer_artifacts=True, enable_iter_art=True):
assert (
iter_art_default or not enable_iter_art
), "Must provide iter_art_default if intermediate artifact is enabled"
super().__init__(disable_abbrev=True)
self._iter_art_default = iter_art_default
self._prefer_artifacts = prefer_artifacts
self._enable_iter_art = enable_iter_art
def add_to_parser(self, parser):
artifact_sorter_args = parser.add_argument_group("Artifact Sorting", "Options for sorting artifacts")
artifact_sorter_args.add_argument(
"--artifacts",
help="Path(s) of artifacts to sort. "
"These will be moved into 'good' and 'bad' directories based on the exit status of "
"the `--check` command and suffixed with an iteration number, timestamp and return code. ",
nargs="+",
)
artifact_sorter_args.add_argument(
"--art-dir",
"--artifacts-dir",
metavar="DIR",
dest="artifacts_dir",
help="The directory in which to move artifacts and sort them into 'good' and 'bad'. ",
)
artifact_sorter_args.add_argument(
"--check",
"--check-inference",
dest="check",
help="A command to check the model. "
"The command should return an exit status of 0 for the run to be considered 'good'. "
"Non-zero exit statuses are treated as 'bad' runs.",
required=True,
nargs=argparse.REMAINDER,
)
fail_codes = artifact_sorter_args.add_mutually_exclusive_group()
fail_codes.add_argument(
"--fail-code",
"--fail-returncode",
dest="fail_codes",
help="The return code(s) from the --check command to count as failures. "
"If this is provided, any other return code will be counted as a success. ",
nargs="+",
default=None,
type=int,
)
fail_codes.add_argument(
"--ignore-fail-code",
"--ignore-fail-returncode",
dest="ignore_fail_codes",
help="The return code(s) from the --check command to ignore as failures. ",
nargs="+",
default=None,
type=int,
)
artifact_sorter_args.add_argument(
"--fail-regex",
dest="fail_regex",
help="Regular expression denoting an error in the check command's output. The command "
"is only considered a failure if a matching string is found in the command's output. "
"This can be useful to distinguish among multiple types of failures. "
"Can be specified multiple times to match different regular expressions, in which case any match counts as a failure. "
"When combined with --fail-code, only iterations whose return code is considered a failure are "
"checked for regular expressions.",
default=None,
nargs="+",
)
output_show = artifact_sorter_args.add_mutually_exclusive_group()
output_show.add_argument(
"--show-output",
help="Show output from the --check command even for passing iterations. "
"By default, output from passing iterations is captured. ",
action="store_true",
)
output_show.add_argument(
"--hide-fail-output",
help="Suppress output from the --check command for failing iterations. "
"By default, output from failing iterations is displayed. ",
action="store_true",
)
if self._enable_iter_art:
artifact_sorter_args.add_argument(
"--iter-artifact",
"--intermediate-artifact",
dest="iter_artifact",
help="Path to store the intermediate artifact from each iteration. "
"Defaults to: {:}".format(self._iter_art_default),
default=self._iter_art_default,
)
artifact_sorter_args.add_argument(
"--no-remove-intermediate",
help="Do not remove the intermediate artifact between iterations. "
"This allows you to exit the tool early and still have access to the intermediate artifact. ",
action="store_false",
dest="remove_intermediate",
)
artifact_sorter_args.add_argument(
"--iter-info",
"--iteration-info",
help="Path to write a JSON file containing information about "
"the current iteration. This will include an 'iteration' key specifying the current iteration. ",
dest="iteration_info",
default=None,
)
def parse(self, args):
self.iter_artifact = args_util.get(args, "iter_artifact")
if self.iter_artifact and os.path.exists(self.iter_artifact):
G_LOGGER.critical(
"{:} already exists, refusing to overwrite.\n"
"Please specify a different path for the intermediate artifact with "
"--intermediate-artifact".format(self.iter_artifact)
)
self.artifacts = util.default(args_util.get(args, "artifacts"), [])
self.output = args_util.get(args, "artifacts_dir")
self.show_output = args_util.get(args, "show_output")
self.hide_fail_output = args_util.get(args, "hide_fail_output")
self.remove_intermediate = args_util.get(args, "remove_intermediate")
self.fail_codes = args_util.get(args, "fail_codes")
self.ignore_fail_codes = args_util.get(args, "ignore_fail_codes")
self.fail_regexes = None
fail_regex = args_util.get(args, "fail_regex")
if fail_regex is not None:
self.fail_regexes = []
for regex in fail_regex:
self.fail_regexes.append(re.compile(regex))
if self.artifacts and not self.output:
G_LOGGER.critical(
"An output directory must be specified if artifacts are enabled! "
"Note: Artifacts specified were: {:}".format(self.artifacts)
)
if not self.artifacts and self._prefer_artifacts:
G_LOGGER.warning(
"`--artifacts` was not specified; No artifacts will be stored during this run! "
"Is this what you intended?"
)
self.iteration_info = args_util.get(args, "iteration_info")
self.check = args_util.get(args, "check")
self.start_date = time.strftime("%x").replace("/", "-")
self.start_time = time.strftime("%X").replace(":", "-")
def sort_artifacts(self, iteration, suffix=None):
"""
Run the check command and move artifacts into the correct subdirectory.
Args:
iteration (int):
The current iteration index. This is used to name artifacts
and display logging messages.
suffix (str):
A custom suffix to add to the artifact prior to moving it.
This will be applied in addition to the default suffix.
Returns:
bool: True if the command succeeded, False otherwise.
"""
def move_artifacts(subdir, returncode):
"""
Moves artifacts (args.artifacts) into the specified subdirectory or args.output and
appends an index and timestamp. Creates parent directories as required.
Args:
subdir (str): The destination path as a subdirectory of args.output.
index (int): The iteration index.
"""
for art in self.artifacts:
basename, ext = os.path.splitext(os.path.basename(art))
if suffix:
basename += suffix
name = "{:}_{:}_{:}_N{:}_ret{:}{:}".format(
basename, self.start_date, self.start_time, iteration, returncode, ext
)
dest = os.path.join(self.output, subdir, name)
if not os.path.exists(art):
G_LOGGER.error(
"Artifact: {:} does not exist, skipping.\n"
"Was the artifact supposed to be generated?".format(art)
)
continue
if os.path.exists(dest):
G_LOGGER.error(
"Destination path: {:} already exists.\n"
"Refusing to overwrite. This artifact will be skipped!".format(dest)
)
continue
G_LOGGER.info("Moving {:} to {:}".format(art, dest))
dir_path = os.path.dirname(dest)
if dir_path:
dir_path = os.path.realpath(dir_path)
os.makedirs(dir_path, exist_ok=True)
shutil.move(art, dest)
def try_remove(path):
def func():
try:
os.remove(path)
except:
G_LOGGER.verbose("Could not remove: {:}".format(path))
return func
def is_success(status):
if self.ignore_fail_codes and status.returncode in self.ignore_fail_codes:
return True
has_fail_regex = None
if self.fail_regexes is not None:
output = status.stdout.decode() + status.stderr.decode()
has_fail_regex = any(regex.search(output) is not None for regex in self.fail_regexes)
if self.fail_codes is not None:
# If a fail-code is specified, then we should also check has_fail_regex if provided.
failed = status.returncode in self.fail_codes
if has_fail_regex is not None:
failed &= has_fail_regex
else:
# If a fail-code is not specified, we should trigger failures even on 0-status
# if the fail regex is found.
failed = status.returncode != 0 if has_fail_regex is None else has_fail_regex
return not failed
with contextlib.ExitStack() as stack, G_LOGGER.indent():
if self.iter_artifact and self.remove_intermediate:
stack.callback(try_remove(self.iter_artifact))
if self.iteration_info:
util.save_json({"iteration": iteration}, self.iteration_info)
stack.callback(try_remove(self.iteration_info))
G_LOGGER.info("Running check command: {:}".format(" ".join(self.check)))
status = sp.run(self.check, stdout=sp.PIPE, stderr=sp.PIPE)
success = is_success(status)
if self.show_output or (not success and not self.hide_fail_output):
stderr_log_level = G_LOGGER.WARNING if success else G_LOGGER.ERROR
G_LOGGER.info("========== CAPTURED STDOUT ==========\n{:}".format(status.stdout.decode()))
G_LOGGER.log(
"========== CAPTURED STDERR ==========\n{:}".format(status.stderr.decode()),
severity=stderr_log_level,
)
if success:
move_artifacts("good", status.returncode)
G_LOGGER.finish("PASSED | Iteration {:}".format(iteration))
return True
else:
move_artifacts("bad", status.returncode)
G_LOGGER.error("FAILED | Iteration {:}".format(iteration))
return False
| TensorRT-master | tools/Polygraphy/polygraphy/tools/debug/subtool/artifact_sorter.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import math
from polygraphy import mod
from polygraphy.logger import G_LOGGER
from polygraphy.tools.args import ModelArgs, TrtConfigArgs
from polygraphy.tools.debug.subtool.base import BaseCheckerSubtool
trt = mod.lazy_import("tensorrt")
trt_util = mod.lazy_import("polygraphy.backend.trt.util")
class BaseMarker(object):
def __init__(self, max_layers, direction, num_layers):
self.max_layers = max_layers
self.direction = direction
self.num_layers = num_layers
self.good = max_layers + 1 # Pretend marking all the layers gives us good accuracy.
self.iteration = 0
def select_layers(self):
self.iteration += 1
if self.direction == "forward":
G_LOGGER.info("Selecting first {:} layer(s) to run in higher precision".format(self.num_layers))
return range(0, self.num_layers)
else:
G_LOGGER.info("Selecting last {:} layer(s) to run in higher precision".format(self.num_layers))
return range(self.max_layers - self.num_layers, self.max_layers)
def success_message(self):
which_layers = "first" if self.direction == "forward" else "last"
G_LOGGER.finish(
"To achieve acceptable accuracy, try running the {:} {:} "
"layer(s) in higher precision".format(which_layers, self.good)
)
class BisectMarker(BaseMarker):
def __init__(self, max_layers, direction) -> None:
super().__init__(max_layers, direction, max_layers)
self.bad = 0
def select_layers(self, prev_success):
if prev_success:
self.good = self.num_layers
# On successes, we want num_layers to go closer to self.bad
round_func = math.floor
else:
self.bad = self.num_layers
round_func = math.ceil
self.num_layers = round_func((self.good + self.bad) / 2.0)
return super().select_layers()
def stop(self, index, success):
# If good and bad are within 1 layer of each other,
# then we already have the information we need.
if abs(self.good - self.bad) <= 1:
if self.good >= self.max_layers:
G_LOGGER.error("Could not find a configuration that satisfied accuracy requirements.")
else:
self.success_message()
return True
if index >= (self.max_layers - 1):
G_LOGGER.error("Could not find a configuration that satisfied accuracy requirements.")
return True
return False
def remaining(self):
return int(math.log2(self.max_layers) - self.iteration)
class LinearMarker(BaseMarker):
def __init__(self, max_layers, direction) -> None:
super().__init__(max_layers, direction, 0)
def select_layers(self, prev_success):
if prev_success:
self.good = self.num_layers
self.num_layers += 1
return super().select_layers()
def stop(self, index, success):
if success:
self.success_message()
return True
if index >= (self.max_layers - 1):
G_LOGGER.error("Could not find a configuration that satisfied accuracy requirements.")
return True
return False
def remaining(self):
return self.max_layers - self.iteration
class Precision(BaseCheckerSubtool):
"""
Iteratively mark layers to run in a higher precision to find a
compromise between performance and quality.
Each iteration will generate an engine called 'polygraphy_debug.engine' in the current directory.
"""
def __init__(self):
super().__init__("precision", strict_types_default=True, prefer_artifacts=False)
def add_parser_args(self, parser):
parser.add_argument(
"--mode",
help="How layers are selected to run in higher precision. "
"'bisect' will use binary search, and 'linear' will iteratively mark one extra layer at a time",
choices=["bisect", "linear"],
default="bisect",
)
parser.add_argument(
"--dir",
"--direction",
help="Order in which layers are marked to run in higher precision. "
"'forward' will start marking layers from network inputs, and 'reverse' will start "
"from the network outputs",
choices=["forward", "reverse"],
default="forward",
dest="direction",
)
parser.add_argument(
"-p",
"--precision",
help="Precision to use when marking layers to run in higher precision",
choices=["fp32", "fp16"],
default="fp32",
)
def setup(self, args, network):
self.precision = {"fp32": trt.float32, "fp16": trt.float16}[args.precision]
if self.precision == trt.float16 and not self.arg_groups[TrtConfigArgs].fp16:
G_LOGGER.critical(
"Cannot mark layers to run in fp16 if it is not enabled in the builder configuration.\n"
"Please also specify `--fp16` as a command-line option"
)
if self.precision == trt.float16 and not self.arg_groups[TrtConfigArgs].int8:
G_LOGGER.warning(
"Using fp16 as the higher precision, but fp16 is also the lowest precision available. "
"Did you mean to set --int8 as well?"
)
if not any(
[
self.arg_groups[TrtConfigArgs].tf32,
self.arg_groups[TrtConfigArgs].fp16,
self.arg_groups[TrtConfigArgs].int8,
]
):
G_LOGGER.critical("Please enable at least one precision besides fp32 (e.g. --int8, --fp16, --tf32)")
if self.arg_groups[ModelArgs].model_type == "engine":
G_LOGGER.critical(
"The precision tool cannot work with engines, as they cannot be modified. "
"Please provide a different format, such as an ONNX or TensorFlow model."
)
G_LOGGER.start("Using {:} as higher precision".format(self.precision))
if args.mode == "linear":
self.layer_marker = LinearMarker(len(network), args.direction)
elif args.mode == "bisect":
self.layer_marker = BisectMarker(len(network), args.direction)
def mark_layers(self, network, indices):
EXCLUDE_LAYER_NAMES = ["CONSTANT"]
EXCLUDE_LAYERS = [getattr(trt.LayerType, attr) for attr in EXCLUDE_LAYER_NAMES if hasattr(trt.LayerType, attr)]
# First, reset, since changes from the previous call will persist.
for layer in network:
layer.reset_precision()
marked_indices = set()
for index in indices:
layer = network.get_layer(index)
def should_exclude():
has_non_execution_output = any(
not layer.get_output(i).is_execution_tensor for i in range(layer.num_outputs)
)
return layer.type in EXCLUDE_LAYERS or has_non_execution_output
if not should_exclude():
G_LOGGER.extra_verbose(
"Running layer in higher precision: {:}".format(trt_util.str_from_layer(layer, index))
)
layer.precision = self.precision
marked_indices.add(index)
G_LOGGER.verbose("Marking layer(s): {:} to run in {:} precision".format(marked_indices, self.precision))
def process_network(self, network, prev_success):
indices = list(self.layer_marker.select_layers(prev_success))
self.mark_layers(network, indices)
def stop(self, index, success):
return self.layer_marker.stop(index, success)
def remaining(self):
return self.layer_marker.remaining()
| TensorRT-master | tools/Polygraphy/polygraphy/tools/debug/subtool/precision.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from polygraphy.logger import G_LOGGER
from polygraphy.tools.base import Tool
from polygraphy.tools.debug.subtool.artifact_sorter import ArtifactSorterArgs
class Repeat(Tool):
"""
[EXPERIMENTAL] Run an arbitrary command repeatedly, sorting generated artifacts
into `good` and `bad` directories.
"""
def __init__(self):
super().__init__("repeat")
self.subscribe_args(ArtifactSorterArgs(enable_iter_art=False))
def add_parser_args(self, parser):
parser.add_argument(
"--until",
required=True,
help="Controls when to stop running. "
"Choices are: ['good', 'bad', int]. 'good' will keep running until the first 'good' run. "
"'bad' will run until the first 'bad' run. An integer can be specified to run a set number of iterations. ",
)
def run(self, args):
try:
until = int(args.until) - 1
except:
until = args.until
if until not in ["good", "bad"]:
G_LOGGER.critical("--until value must be an integer, 'good', or 'bad', but was: {:}".format(args.until))
def stop(index, success):
if until == "good":
return success
elif until == "bad":
return not success
return index >= until
G_LOGGER.start("Starting iterations")
num_passed = 0
num_total = 0
success = True
MAX_COUNT = 100000 # We don't want to loop forever. This many iterations ought to be enough for anybody.
for iteration in range(MAX_COUNT):
G_LOGGER.start("RUNNING | Iteration {:}".format(iteration + 1))
success = self.arg_groups[ArtifactSorterArgs].sort_artifacts(iteration + 1)
num_total += 1
if success:
num_passed += 1
if stop(iteration, success):
break
else:
G_LOGGER.warning(
"Maximum number of iterations reached: {:}.\n"
"Iteration has been halted to prevent an infinite loop!".format(MAX_COUNT)
)
G_LOGGER.finish(
"Finished {:} iteration(s) | Passed: {:}/{:} | Pass Rate: {:}%".format(
iteration + 1, num_passed, num_total, float(num_passed) * 100 / float(num_total)
)
)
| TensorRT-master | tools/Polygraphy/polygraphy/tools/debug/subtool/repeat.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import contextlib
import os
from polygraphy import mod, util
from polygraphy.logger import G_LOGGER
from polygraphy.tools.args import (
DataLoaderArgs,
ModelArgs,
OnnxLoaderArgs,
OnnxShapeInferenceArgs,
TrtConfigArgs,
TrtEngineLoaderArgs,
TrtEngineSaveArgs,
TrtNetworkLoaderArgs,
TrtPluginLoaderArgs,
)
from polygraphy.tools.base import Tool
from polygraphy.tools.debug.subtool.artifact_sorter import ArtifactSorterArgs
trt_backend = mod.lazy_import("polygraphy.backend.trt")
class BaseCheckerSubtool(Tool):
def __init__(self, name, strict_types_default=None, prefer_artifacts=True):
super().__init__(name)
self.subscribe_args(ArtifactSorterArgs("polygraphy_debug.engine", prefer_artifacts=prefer_artifacts))
self.subscribe_args(ModelArgs(model_required=True, inputs=None))
self.subscribe_args(OnnxShapeInferenceArgs())
self.subscribe_args(OnnxLoaderArgs(output_prefix=None))
self.subscribe_args(DataLoaderArgs()) # For int8 calibration
self.subscribe_args(TrtConfigArgs(strict_types_default=strict_types_default))
self.subscribe_args(TrtPluginLoaderArgs())
self.subscribe_args(TrtNetworkLoaderArgs())
self.subscribe_args(TrtEngineLoaderArgs())
self.subscribe_args(TrtEngineSaveArgs(output=False))
def setup(self, args, network):
"""
Initialize a subtool.
"""
pass
def stop(self, iteration, success):
"""
Controls when to stop iteration.
Args:
iteration (int): The current iteration, starting at 0.
success (bool): Whether the check command succeeded (True) or failed (False).
Returns:
bool: Whether to stop iteration.
"""
raise NotImplementedError("Must be implemented by child classes!")
def process_network(self, network, prev_success):
"""
Process the TensorRT network prior to engine building.
Args:
network (trt.INetworkDefinition): The network to process.
prev_success (bool):
Whether the previous iteration succeeded.
This value is always True for the 0th iteration.
"""
pass
def remaining(self):
"""
Returns the estimated number of iterations remaining.
"""
pass
def run(self, args):
G_LOGGER.start("Starting iterations")
builder, network, parser = util.unpack_args(self.arg_groups[TrtNetworkLoaderArgs].load_network(), 3)
with contextlib.ExitStack() as stack:
stack.enter_context(builder)
stack.enter_context(network)
if parser:
stack.enter_context(parser)
self.setup(args, network)
num_passed = 0
num_total = 0
success = True
MAX_COUNT = 100000 # We don't want to loop forever. This many iterations ought to be enough for anybody.
for iteration in range(MAX_COUNT):
remaining = self.remaining()
G_LOGGER.start(
"RUNNING | Iteration {:}{:}".format(
iteration + 1,
" | Approximately {:} iteration(s) remaining".format(remaining)
if remaining is not None
else "",
)
)
self.process_network(network, success)
try:
engine = self.arg_groups[TrtEngineLoaderArgs].build_engine((builder, network))
except Exception as err:
G_LOGGER.warning(
"Failed to create network or engine, continuing to the next iteration.\n"
"Note: Error was: {:}".format(err)
)
G_LOGGER.internal_error("Failed to create network or engine. See warning above for details.")
success = False
else:
# Don't need to keep the engine around in memory - just serialize to disk and free it.
with engine:
self.arg_groups[TrtEngineSaveArgs].save_engine(
engine, self.arg_groups[ArtifactSorterArgs].iter_artifact
)
success = self.arg_groups[ArtifactSorterArgs].sort_artifacts(iteration + 1)
num_total += 1
if success:
num_passed += 1
if self.stop(iteration, success):
break
else:
G_LOGGER.warning(
"Maximum number of iterations reached: {:}.\n"
"Iteration has been halted to prevent an infinite loop!".format(MAX_COUNT)
)
G_LOGGER.finish(
"Finished {:} iteration(s) | Passed: {:}/{:} | Pass Rate: {:}%".format(
iteration + 1, num_passed, num_total, float(num_passed) * 100 / float(num_total)
)
)
| TensorRT-master | tools/Polygraphy/polygraphy/tools/debug/subtool/base.py |
from polygraphy.logger.logger import G_LOGGER, LogMode
| TensorRT-master | tools/Polygraphy/polygraphy/logger/__init__.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import enum
import inspect
import os
import sys
import time
import traceback
COLORED_MODULE_PRESENT = None
def has_colors():
global COLORED_MODULE_PRESENT
if COLORED_MODULE_PRESENT is None:
try:
import colored
COLORED_MODULE_PRESENT = True
except:
COLORED_MODULE_PRESENT = False
print(
"[W] 'colored' module is not installed, will not use colors when logging. "
"To enable colors, please install the 'colored' module: python3 -m pip install colored"
)
return COLORED_MODULE_PRESENT
# Context manager to apply indentation to messages
class LoggerIndent(object):
def __init__(self, logger, indent):
self.logger = logger
self.old_indent = self.logger.logging_indent
self.indent = indent
def __enter__(self):
self.logger.logging_indent = self.indent
return self
def __exit__(self, exc_type, exc_value, traceback):
self.logger.logging_indent = self.old_indent
# Context manager to temporarily set verbosity
class LoggerVerbosity(object):
def __init__(self, logger, severity):
self.logger = logger
self.old_severity = self.logger.severity
self.severity = severity
def __enter__(self):
self.logger.severity = self.severity
return self
def __exit__(self, exc_type, exc_value, traceback):
self.logger.severity = self.old_severity
class LogMode(enum.IntEnum):
"""
Specifies how messages should be logged.
"""
EACH = 0
"""Log the message each time"""
ONCE = 1
"""Log the message only once. The same message will not be logged again."""
class Logger(object):
ULTRA_VERBOSE = -20 # Cast it into the flames!
SUPER_VERBOSE = -10
EXTRA_VERBOSE = 0
VERBOSE = 10
INFO = 20
START = 22
FINISH = 28
WARNING = 30
ERROR = 40
CRITICAL = 50
SEVERITY_LETTER_MAPPING = {
ULTRA_VERBOSE: "[U]",
SUPER_VERBOSE: "[S]",
EXTRA_VERBOSE: "[X]",
VERBOSE: "[V]",
INFO: "[I]",
START: "[I]",
FINISH: "[I]",
WARNING: "[W]",
ERROR: "[E]",
CRITICAL: "[!]",
}
SEVERITY_COLOR_MAPPING = {
ULTRA_VERBOSE: "dark_gray",
SUPER_VERBOSE: "medium_violet_red",
EXTRA_VERBOSE: "medium_purple",
VERBOSE: "light_magenta",
INFO: None,
START: "light_cyan",
FINISH: "light_green",
WARNING: "light_yellow",
ERROR: "light_red",
CRITICAL: "light_red",
}
def __init__(self, severity=INFO, colors=True, letter=True, timestamp=False, line_info=False):
"""
Logger.
Args:
severity (Logger.Severity):
Messages below this severity are ignored.
colors (bool):
Whether to use colored output.
Defaults to True.
letter (bool):
Whether to prepend each logging message with a letter indicating it's severity.
Defaults to True.
timestamp (bool):
Whether to include a timestamp in the logging output.
Defaults to False.
line_info (bool):
Whether to include file and line number information in the logging output.
Defaults to False.
log_file (str):
Path to a log file to write logging output from Polygraphy.
This will not include logging messages from libraries used by Polygraphy, like
TensorRT or ONNX-Runtime.
"""
self._severity = severity
self._log_path = None
self._log_file = None
self.logging_indent = 0
self.root_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir, os.pardir))
self.once_logged = set()
self.colors = colors
self.letter = letter
self.timestamp = timestamp
self.line_info = line_info
self.logger_callbacks = []
@property
def log_file(self):
return self._log_path
@log_file.setter
def log_file(self, value):
self._log_path = value
dir_path = os.path.dirname(self._log_path)
if dir_path:
dir_path = os.path.realpath(dir_path)
os.makedirs(dir_path, exist_ok=True)
self._log_file = open(self._log_path, "w")
@property
def severity(self):
return self._severity
@severity.setter
def severity(self, value):
self._severity = value
for callback in self.logger_callbacks:
callback(self._severity)
def register_callback(self, callback):
"""
Registers a callback with the logger, which will be invoked when the logging severity is modified.
The callback is guaranteed to be called at least once in the register_callback function.
Args:
callback (Callable(Logger.Severity)): A callback that accepts the current logger severity.
"""
callback(self._severity)
self.logger_callbacks.append(callback)
def indent(self, level=1):
"""
Returns a context manager that indents all strings logged by the specified amount.
"""
return LoggerIndent(self, level + self.logging_indent)
def verbosity(self, severity=CRITICAL):
"""
Returns a context manager that temporarily changes the severity of the logger for its duration.
Args:
severity (Logger.Severity):
The severity to set the logger to. Defaults to Logger.CRITICAL, which will suppress all messages.
"""
return LoggerVerbosity(self, severity)
def log(self, message, severity, mode=LogMode.EACH, stack_depth=2, error_ok=False):
"""
Logs a message to stdout.
Args:
message (Union[str, Callable() -> str]):
A string or callable which returns a string of the message to log.
severity (Logger.Severity):
The severity with which to log this message. If the severity is less than
the logger's current severity, the message is suppressed. Provided callables
will not be called in that case.
mode (LogMode):
Controls how the message is logged.
See LogMode for details.
stack_depth (int):
The stack depth to use to determine file and line information.
Defaults to 2.
error_ok (bool):
Whether to suppress errors encountered while logging.
When this is True, in the event of an error, the message will not be
logged, but the logger will recover and resume execution.
When False, the logger will re-raise the exception.
"""
from polygraphy import constants, config
def process_message(message, stack_depth):
def get_prefix():
def get_line_info():
adjusted_stack_depth = stack_depth
adjusted_stack_depth += 2
module = inspect.getmodule(sys._getframe(adjusted_stack_depth))
# Handle logging from the top-level of a module.
if not module:
adjusted_stack_depth -= 1
module = inspect.getmodule(sys._getframe(adjusted_stack_depth))
filename = module.__file__
filename = os.path.relpath(filename, self.root_dir)
# If the file is not located in polygraphy, use its basename instead.
if os.pardir in filename:
filename = os.path.basename(filename)
return "[{:}:{:}] ".format(filename, sys._getframe(adjusted_stack_depth).f_lineno)
prefix = ""
if self.letter:
prefix += Logger.SEVERITY_LETTER_MAPPING[severity] + " "
if self.timestamp:
prefix += "({:}) ".format(time.strftime("%X"))
if self.line_info:
prefix += get_line_info()
return prefix
def apply_indentation(prefix, message):
message_lines = str(message).splitlines()
tab = constants.TAB * self.logging_indent
newline_tab = "\n" + tab + " " * len(prefix)
return tab + newline_tab.join([line for line in message_lines])
def apply_color(message):
if self.colors and has_colors():
import colored
color = Logger.SEVERITY_COLOR_MAPPING[severity]
return colored.stylize(message, [colored.fg(color)]) if color else message
return message
prefix = get_prefix()
message = apply_indentation(prefix, message)
return apply_color("{:}{:}".format(prefix, message))
def should_log(message):
should = severity >= self._severity
if should and mode == LogMode.ONCE:
message_hash = hash(message)
should &= message_hash not in self.once_logged
self.once_logged.add(message_hash)
return should
if not should_log(message):
return
if callable(message):
try:
message = message()
except Exception as err:
if not error_ok or config.INTERNAL_CORRECTNESS_CHECKS:
raise
message = "<Error while logging this message: {:}>".format(str(err))
message = str(message)
message = message.replace("\t", constants.TAB)
# Use the warnings module in correctness checking mode so all warnings are
# visible in the test result summary.
if config.INTERNAL_CORRECTNESS_CHECKS and severity == Logger.WARNING:
import warnings
warnings.warn(message)
message = process_message(message, stack_depth=stack_depth)
if self._log_file is not None:
self._log_file.write(message + "\n")
self._log_file.flush()
print(message, file=sys.stdout if severity < Logger.CRITICAL else sys.stderr)
def backtrace(self, depth=0, limit=None, severity=ERROR):
limit = limit if limit is not None else (3 - self.severity // 10) * 2 # Info provides 1 stack frame
limit = max(limit, 0)
self.log(" ".join(traceback.format_stack(f=sys._getframe(depth + 2), limit=limit)), severity=severity)
def ultra_verbose(self, message, mode=LogMode.EACH):
self.log(message, Logger.ULTRA_VERBOSE, mode=mode, stack_depth=3, error_ok=True)
def super_verbose(self, message, mode=LogMode.EACH):
self.log(message, Logger.SUPER_VERBOSE, mode=mode, stack_depth=3, error_ok=True)
def extra_verbose(self, message, mode=LogMode.EACH):
self.log(message, Logger.EXTRA_VERBOSE, mode=mode, stack_depth=3, error_ok=True)
def verbose(self, message, mode=LogMode.EACH):
self.log(message, Logger.VERBOSE, mode=mode, stack_depth=3, error_ok=True)
def info(self, message, mode=LogMode.EACH):
self.log(message, Logger.INFO, mode=mode, stack_depth=3)
def start(self, message, mode=LogMode.EACH):
self.log(message, Logger.START, mode=mode, stack_depth=3)
def finish(self, message, mode=LogMode.EACH):
self.log(message, Logger.FINISH, mode=mode, stack_depth=3)
def warning(self, message, mode=LogMode.EACH):
self.log(message, Logger.WARNING, mode=mode, stack_depth=3)
def error(self, message, mode=LogMode.EACH):
self.log(message, Logger.ERROR, mode=mode, stack_depth=3)
def critical(self, message):
self.log(message, Logger.CRITICAL, stack_depth=3)
from polygraphy.exception import PolygraphyException
raise PolygraphyException(message) from None
def internal_error(self, message):
from polygraphy import config
if config.INTERNAL_CORRECTNESS_CHECKS:
self.log(message, Logger.CRITICAL, stack_depth=3)
from polygraphy.exception import PolygraphyInternalException
raise PolygraphyInternalException(message) from None
def _str_from_module_info(self, module, name=None):
ret = ""
def try_append(func):
nonlocal ret
try:
ret += func()
except:
pass
try_append(lambda: name or "Loaded Module: {:<18}".format(module.__name__))
try_append(lambda: " | Version: {:<8}".format(module.__version__))
try_append(lambda: " | Path: {:}".format(list(map(os.path.realpath, module.__path__))))
return ret
def module_info(self, module, name=None, severity=VERBOSE):
self.log(self._str_from_module_info(module, name), severity=severity, mode=LogMode.ONCE)
def log_exception(self, func):
"""
Decorator that causes exceptions in a function to be logged.
This is useful in cases where the exception is caught by a caller, but should
still be logged.
"""
def wrapped(*args, **kwargs):
from polygraphy.exception import PolygraphyException
try:
return func(*args, **kwargs)
except PolygraphyException:
# `PolygraphyException`s are always logged.
raise
except Exception as err:
G_LOGGER.error(err)
raise
return wrapped
global G_LOGGER
G_LOGGER = Logger()
# For backwards compatibility
G_LOGGER.exit = G_LOGGER.critical
| TensorRT-master | tools/Polygraphy/polygraphy/logger/logger.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import contextlib
import glob
import os
import sys
import tempfile
import zlib
from collections import OrderedDict
from polygraphy import constants, mod
from polygraphy.logger import G_LOGGER
np = mod.lazy_import("numpy")
@mod.export()
def check(cond, msg=None):
"""
Like assert, but applies even when optimizations are enabled (i.e. __debug__ is False).
Args:
cond (bool): The condition to check.
msg (str): The error message in case condition is False.
Raises:
AssertionError: If the condition is False.
"""
if not cond:
raise AssertionError(msg)
@mod.export()
def find_in_dict(name, mapping, index=None):
"""
Attempts to partially match keys in a dictionary. Checks for exact matches and
substring matches, falling back to index based matching.
Args:
name (str): The key to search for.
mapping (dict): The dictionary to search in.
index (int): An index to fall back to if the key could not be found by name.
Returns:
str: The key found in the dict, or None if it could not be found.
"""
G_LOGGER.ultra_verbose("Searching for key: {:}. Fallback index is set to {:}".format(name, index))
if name in mapping:
return name
for key in mapping.keys():
if name.lower() in key.lower() or key.lower() in name.lower():
return key
if index is not None and index >= 0 and index < len(mapping.keys()):
return list(mapping.keys())[index]
return None
@mod.export()
def check_dict_contains(dct, keys, check_missing=True, dict_name=None, log_func=None):
"""
Checks that a dictionary contains the provided keys and also
that it does not contain any extra items and issues warnings
otherwise.
Args:
dct (Dict[Any, Any]):
The dictionary to check.
keys (Sequence[Any]):
The keys that should be in the dictionary.
check_missing (bool):
Whether to check for missing keys in the dictionary.
Defaults to True.
dict_name (str):
The name to use instead of "the dictionary" when
displaying warnings.
log_func (Logger.method):
The logging method to use to display warnings/errors.
Defaults to G_LOGGER.warning.
Returns:
bool: Whether the dictionary contains exactly the specified keys.
"""
log_func = default(log_func, G_LOGGER.warning)
dict_name = default(dict_name, "the dictionary")
feed_names = set(dct.keys())
keys = set(keys)
missing_in_dct = (keys - feed_names) if check_missing else False
extra_in_dct = feed_names - keys
if missing_in_dct:
log_func(
"Some keys are missing in {:}: {:}.\n"
"Note: Expected keys are: {:}, but keys provided were: {:}".format(
dict_name, missing_in_dct, keys, feed_names
)
)
if extra_in_dct:
log_func(
"Extra keys in {:}: {:}.\n"
"Note: Expected keys are: {:}, but keys provided were: {:}".format(
dict_name, extra_in_dct, keys, feed_names
)
)
return not extra_in_dct and not missing_in_dct
@mod.export()
def value_or_from_dict(obj, key, default=None):
"""
Many Polygraphy APIs can accept a `Union[obj, Dict[str, obj]]` to allow
for specifying either a global value, or a per-key (e.g. input, output, etc.) value.
When a dictionary is provided, the `""` key indiciates a default value to use for keys
not otherwise found.
For example, Polygraphy allows for providing per-output tolerances. Thus, all of the
following are valid arguments:
::
# Value directly
atol = 1.0
# Per-output values
atol = {"out1": 1.0, "out2": 2.0}
# Per-output values with default
atol = {"out1": 1.0, "": 2.0}
Args:
obj (Union[obj, Dict[str, obj]]): The value, or per-key values.
key (str): The key to use when per-key values are provided.
default (obj): The default value to use if it is not found in the dictionary.
Returns:
obj: The value.
"""
if not isinstance(obj, dict):
return obj
if key in obj:
return obj[key]
elif "" in obj:
return obj[""]
return default
@mod.export()
def unique_list(sequence):
"""
Creates a list without duplicate elements, preserving order.
Args:
sequence (Sequence): The sequence to make unique
Returns:
list: A list containing the same elements as sequence, in the same order, but without duplicates.
"""
return list(OrderedDict.fromkeys(sequence))
# default exists to solve issues that might result from Python's normal default arguments.
# Specifically, consider the following class:
#
# class MyClass(object):
# def __init__(self, value=[]):
# self.value = value
#
# This leads to unexpected behavior when the default value is used:
#
# >>> x = MyClass()
# >>> x.value.append("SHOULD NOT BE IN Y")
# >>> y = MyClass()
# >>> y.value
# ['SHOULD NOT BE IN Y']
#
# If we rewrite the class using default value:
#
# class MyClass(object):
# def __init__(self, value=None):
# self.value = default(value, [])
#
# Then we get the expected behavior:
#
# >>> x = MyClass()
# >>> x.value.append("SHOULD NOT BE IN Y")
# >>> y = MyClass()
# >>> y.value
# []
@mod.export()
def default(value, default):
"""
Returns a specified default value if the provided value is None.
Args:
value (object): The value.
default (object): The default value to use if value is None.
Returns:
object: Either value, or the default.
"""
return value if value is not None else default
@mod.export()
def is_sequence(obj):
return hasattr(obj, "__iter__") and not isinstance(obj, dict) and not isinstance(obj, set)
@mod.export()
def unpack_args(args, num):
"""
Extracts the specified number of arguments from a tuple, padding with
`None` if the tuple length is insufficient.
Args:
args (Tuple[object]): The tuple of arguments
num (int): The number of elements desired.
Returns:
Tuple[object]: A tuple containing `num` arguments, padded with `None` if `len(args) < num`
"""
args = args if is_sequence(args) else (args,)
args += (None,) * (num - len(args))
return args[0:num]
##
## File I/O
##
@mod.export()
class NamedTemporaryFile(object):
"""
Cross-platform temporary file implementation. Unlike tempfile.NamedTemporaryFile,
it can be opened multiple times without error on Windows.
"""
def __init__(self, mode=None, prefix=None, suffix=None):
"""
Args:
mode (str): The mode to use when opening the file.
prefix (str): The prefix to use for the file path.
suffix (str): The suffix to use for the file path.
"""
self.mode = default(mode, "wb+")
prefix = default(prefix, "")
suffix = default(suffix, "")
def rand_path():
return os.path.join(tempfile.gettempdir(), "{:}{:}{:}".format(prefix, os.urandom(24).hex(), suffix))
# In the unlikely event the path exists, generate a new one. Only try 100 times so
# we don't end up in an infinite loop.
path = rand_path()
for _ in range(100):
if not os.path.exists(path):
break
path = rand_path()
else:
G_LOGGER.critical("Could not create a temporary file under: {:}".format(tempfile.gettempdir()))
self.name = path # Use 'name' to be compatible with tempfile.NamedTemporaryFile
open(self.name, "x").close()
self._fhandle = None
def __enter__(self):
"""
Opens the temporary file using the mode specified in the constructor.
Returns:
file-like: The open file object.
"""
self._fhandle = open(self.name, self.mode)
return self._fhandle
def __exit__(self, exc_type, exc_val, exc_tb):
"""
Closes the file handle.
"""
self._fhandle.close()
@mod.export()
def find_in_dirs(name_glob, dirs):
"""
Finds a file, optionally including a glob expression, in the specified directories.
Args:
name_glob (str):
The name of the file, optionally including a glob expression.
Only the first match will be returned.
dirs (Sequence[str]):
The directories in which to search.
Returns:
List[str]: The paths found, or an empty list if it could not be found.
"""
for dir_name in dirs:
paths = glob.glob(os.path.join(dir_name, name_glob))
if paths:
return paths
return []
@mod.export()
def get_file_size(src):
"""
Gets the size of a file or file-like object.
Args:
src (Union[str, file-like]):
The path or file-like object to read from.
Returns:
int: The size of the file if it exists, otherwise 0.
"""
try:
src.fileno
except AttributeError:
path = src
if not os.path.exists(path):
return 0
else:
path = src.fileno()
return os.stat(path).st_size
def warn_if_wrong_mode(file_like, mode):
def binary(mode):
return "b" in mode
def readable(mode):
return "r" in mode or "+" in mode
def writable(mode):
return "w" in mode or "a" in mode or "+" in mode
fmode = file_like.mode
if (
binary(fmode) != binary(mode)
or (readable(mode) and not readable(fmode))
or (writable(mode) and not writable(fmode))
):
G_LOGGER.warning(
"File-like object has a different mode than requested!\n"
"Note: Requested mode was: {:} but file-like object has mode: {:}".format(mode, file_like.mode)
)
def is_file_like(obj):
try:
obj.read
obj.write
except AttributeError:
return False
else:
return True
@mod.export()
def makedirs(path):
dir_path = os.path.dirname(path)
if dir_path:
dir_path = os.path.realpath(dir_path)
if not os.path.exists(dir_path):
G_LOGGER.verbose("{:} does not exist, creating now.".format(dir_path))
os.makedirs(dir_path, exist_ok=True)
@mod.export()
def load_file(src, mode="rb", description=None):
"""
Reads from the specified source path or file-like object.
Args:
src (Union[str, file-like]): The path or file-like object to read from.
mode (str): The mode to use when reading. Defaults to "rb".
description (str): A description of what is being read.
Returns:
Union[str, bytes, None]: The contents read.
Raises:
Exception: If the file or file-like object could not be read.
"""
if description is not None:
G_LOGGER.info("Loading {:} from {:}".format(description, src))
if is_file_like(src):
warn_if_wrong_mode(src, mode)
# Reset cursor position after reading from the beginning of the file.
prevpos = src.tell()
if src.seekable():
src.seek(0)
contents = src.read()
if src.seekable():
src.seek(prevpos)
return contents
else:
with open(src, mode) as f:
return f.read()
@mod.export()
def save_file(contents, dest, mode="wb", description=None):
"""
Writes text or binary data to the specified destination path or file-like object.
Args:
contents (bytes):
A bytes-like object that can be written to disk.
dest (Union[str, file-like]):
The path or file-like object to write to.
mode (str): The mode to use when writing. Defaults to "wb".
description (str): A description of what is being written.
Returns:
Union[str, file-like, None]: The complete file path or file-like object.
Raises:
Exception: If the path could not be written to, or if the file-like object could not be written to.
"""
if description is not None:
G_LOGGER.info("Saving {:} to {:}".format(description, dest))
if is_file_like(dest):
warn_if_wrong_mode(dest, mode)
bytes_written = dest.write(contents)
dest.flush()
try:
content_bytes = len(contents.encode())
except:
pass
else:
if bytes_written != content_bytes:
G_LOGGER.warning(
"Could not write entire file. Note: file contains {:} bytes, but only "
"{:} bytes were written".format(content_bytes, bytes_written)
)
else:
makedirs(dest)
with open(dest, mode) as f:
f.write(contents)
return dest
##
## Compression
##
class Compressed(object):
"""
Represents an object compressed by zlib
"""
def __init__(self, cobj):
self.bytes = cobj
def is_compressed(obj):
return isinstance(obj, Compressed)
def compress(obj):
G_LOGGER.verbose("Compressing {} object".format(type(obj)))
return Compressed(zlib.compress(obj))
def decompress(compressed):
G_LOGGER.verbose("Decompressing bytes")
return zlib.decompress(compressed.bytes)
##
## Subprocess Utils
##
PIPE_MAX_SEND_BYTES = 1 << 31
"""The maximum number of bytes that can be sent at once over a queue"""
def send_on_queue(queue, obj):
if sys.getsizeof(obj) > PIPE_MAX_SEND_BYTES:
G_LOGGER.warning(
"Object size ({:} bytes) exceeds maximum size that can be sent over queues ({:} bytes). "
"Attempting to compress - this may take some time. If this does not work or you want to avoid "
"the compression overhead, you should disable subprocesses by omitting the --use-subprocess flag, "
"or by setting use_subprocess=False in Comparator.run().".format(sys.getsizeof(obj), PIPE_MAX_SEND_BYTES)
)
obj = compress(obj)
assert sys.getsizeof(obj) <= PIPE_MAX_SEND_BYTES
G_LOGGER.ultra_verbose("Sending: {:} on queue".format(obj))
queue.put(obj)
@mod.export()
def try_send_on_queue(queue, obj):
"""
Attempts to send an object over the queue, compressing it if needed.
In the event the object cannot be sent, sends `None` instead.
Args:
queue (queue.Queue): The queue to send the object over.
obj (object): The object to send.
"""
try:
send_on_queue(queue, obj)
except Exception as err:
G_LOGGER.warning("Could not send object on queue: {:}\nSending None instead.".format(err))
queue.put(None)
def receive_on_queue(queue, timeout=None):
G_LOGGER.extra_verbose("Waiting for data to become available on queue")
obj = queue.get(block=True, timeout=timeout)
if is_compressed(obj):
obj = decompress(obj)
G_LOGGER.ultra_verbose("Received {:} on queue".format(obj))
return obj
@mod.export()
def try_receive_on_queue(queue, timeout=None):
try:
obj = receive_on_queue(queue, timeout)
if obj is None:
G_LOGGER.warning(
"Received {:} on the queue. This likely means that there was an error in sending "
"the object over the queue. You may want to run with use_subprocess=False in Comparator.run() "
"or omit the --use-subprocess flag to prevent further issues.".format(obj)
)
return obj
except Exception as err:
G_LOGGER.warning(
"Could not receive on queue: {:}\nYou may want to run with use_subprocess=False in Comparator.run() "
"or omit the --use-subprocess flag to prevent further issues.".format(err)
)
return None
##
## Function Utils
##
@mod.export()
def invoke_if_callable(func, *args, **kwargs):
"""
Attempts to invoke a function with arguments. If `func` is not callable, then returns `func`
The second return value of this function indicates whether the argument was a callable.
"""
if callable(func):
ret = func(*args, **kwargs)
return ret, True
return func, False
##
## Shapes
##
def is_dimension_dynamic(dim):
is_dim_str = not isinstance(dim, int)
return dim is None or is_dim_str or dim < 0
def num_dynamic_dimensions(shape):
return len([dim for dim in shape if is_dimension_dynamic(dim)])
@mod.export()
def is_shape_dynamic(shape):
return num_dynamic_dimensions(shape) > 0
@mod.export()
def is_valid_shape_override(new_shape, original_shape):
ranks_same = len(original_shape) == len(new_shape)
overrides_valid = all([odim == ndim or is_dimension_dynamic(odim) for odim, ndim in zip(original_shape, new_shape)])
return ranks_same and overrides_valid
@mod.export()
def override_dynamic_shape(shape, default_shape_value=None):
default_shape_value = default(default_shape_value, constants.DEFAULT_SHAPE_VALUE)
return [default_shape_value if is_dimension_dynamic(elem) else elem for elem in shape]
@mod.export()
def volume(obj):
vol = 1
for elem in obj:
vol *= elem
return vol
@mod.export()
def is_empty_shape(shape):
return volume(shape) == 0
@mod.export()
def try_match_shape(arr, shape):
"""
Attempts to permute or reshape the array so its shape matches the specified shape.
This is a no-op if the array is already the correct shape.
Args:
arr (numpy.ndarray): The array to reshape.
shape (Tuple[int]): The shape to use. May contain at most 1 dynamic dimension.
Returns:
numpy.ndarray: The reshaped array.
"""
def is_rank_same(arr, shape):
return len(shape) == len(arr.shape)
def try_reshape(arr, shape):
original_shape = arr.shape
try:
arr = arr.reshape(shape)
except ValueError:
G_LOGGER.warning(
"Could not reshape array from shape: {:} to {:}. Skipping reshape.".format(arr.shape, shape)
)
else:
if arr.shape != original_shape:
G_LOGGER.info("Reshaped array from shape: {:} to: {:}".format(original_shape, arr.shape))
return arr
def try_permute(arr, shape):
original_shape = arr.shape
if sorted(arr.shape) != sorted(shape):
G_LOGGER.extra_verbose("Array of shape: {:} cannot be permuted to: {:}".format(arr.shape, shape))
return arr
# We need to remove axes from the original shape as we use them to avoid
# duplication in the permutation.
arr_shape_indices = {index: dimlen for index, dimlen in enumerate(arr.shape)}
# Find which axis in arr.shape corresponds to the specified size. Never returns duplicates.
def find_axis(dimlen):
nonlocal arr_shape_indices
for index, d in arr_shape_indices.items():
if d == dimlen:
del arr_shape_indices[index]
return index
try:
perm = [find_axis(dimlen) for dimlen in shape]
arr = np.transpose(arr, perm)
except Exception as err:
G_LOGGER.extra_verbose("Skipping permutation due to {:}".format(err))
else:
if arr.shape != original_shape:
G_LOGGER.info(
"Permuted array of shape: {:} to: {:} using permutation {:}".format(original_shape, arr.shape, perm)
)
return arr
# Override any dynamic dimensions in the shape with concrete shapes from the array.
def try_freeze_shape(arr, shape):
if num_dynamic_dimensions(shape) == 1:
try:
static_dims = [dim for dim in shape if not is_dimension_dynamic(dim)]
determined_dim = volume(arr.shape) // volume(static_dims)
except ZeroDivisionError:
determined_dim = 0
shape = [determined_dim if is_dimension_dynamic(elem) else elem for elem in shape]
elif is_rank_same(arr, shape):
shape = [
arr_shape_elem if is_dimension_dynamic(elem) else elem for elem, arr_shape_elem in zip(shape, arr.shape)
]
return shape
if shape == arr.shape:
return arr
if is_shape_dynamic(shape):
shape = try_freeze_shape(arr, shape)
if not is_rank_same(arr, shape):
arr = try_reshape(arr, shape)
if is_rank_same(arr, shape):
arr = try_permute(arr, shape)
arr = try_reshape(arr, shape)
return arr
##
## Logging Utilities
##
@mod.export()
def str_from_layer(prefix, index, name, op, input_info, output_info):
layer_str = "{:} {:<4} | {:} [Op: {:}]\n".format(prefix, index, name, op)
layer_str += indent_block(input_info)
layer_str += "\n" if (input_info and output_info) else ""
indent_level = 1 if (input_info and output_info) else 0
layer_str += (
indent_block(" -> {:}".format(indent_block(output_info, level=indent_level).strip()), level=indent_level) + "\n"
)
return layer_str
@mod.export()
def indent_block(block, level=1):
"""
Indents the provided block of text.
Args:
block (str): The text to indent.
level (int): The number of tabs to indent with.
Returns:
str: The indented block.
"""
tab = "\t" * level
sep = "\n{:}".format(tab)
return tab + sep.join(str(block).splitlines())
@mod.export()
def make_repr(type_str, *args, **kwargs):
"""
Creates a string suitable for use with ``__repr__`` for a given
type with the provided arguments.
Skips keyword arguments that are set to ``None``.
For example, ``make_repr("Example", None, "string", w=None, x=2)``
would return a string: ``"Example(None, 'string', x=2)"``
Args:
type_str (str):
The name of the type to create a representation for.
Returns:
Tuple[str, bool]:
A tuple including the ``__repr__`` string and a boolean
indicating whether all the arguments were default (i.e. None).
"""
all_args = list(map(repr, args))
for key, val in filter(lambda t: t[1] is not None, kwargs.items()):
all_args.append("{:}={:}".format(key, repr(val)))
repr_str = "{:}({:})".format(type_str, ", ".join(all_args))
return repr_str, all(arg == repr(None) for arg in all_args)
##
## Safety
##
@mod.export()
class FreeOnException(object):
def __init__(self, objs):
"""
Frees the specified objects if an exception occurs in this context.
Does nothing otherwise.
Args:
objs (List[object]): List of objects with __enter__/__exit__ methods defined.
"""
assert is_sequence(objs), "FreeOnException requires a sequence of objects!"
self.objs = objs
def __enter__(self):
"""
Returns the objects managed by this context manager.
"""
return self.objs
def __exit__(self, exc_type, exc_value, traceback):
"""
On exception, deletes all tracked objects.
Does nothing if there are no exceptions.
"""
if exc_type is not None:
# Objects are freed in reverse order
with contextlib.ExitStack() as stack:
for obj in self.objs:
if obj is not None:
stack.enter_context(obj)
##
## Attribute Helpers
##
@mod.export()
class TempAttrChange(object):
"""
Temporarily set an instance member to a particular value for the duration
of the context manager.
"""
def __init__(self, arg_group, attr, value):
self.arg_group = arg_group
self.attr = attr
self.old_value = getattr(arg_group, attr)
self.new_value = value
def __enter__(self):
if self.new_value is not None:
setattr(self.arg_group, self.attr, self.new_value)
def __exit__(self, exc_type, exc_value, traceback):
setattr(self.arg_group, self.attr, self.old_value)
@mod.export()
def getattr_nested(obj, attr):
for typ in attr.split("."):
obj = getattr(obj, typ)
return obj
| TensorRT-master | tools/Polygraphy/polygraphy/util/util.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from polygraphy import mod
from polygraphy.json import *
mod.warn_deprecated("JSON utilities in polygraphy.util", "polygraphy.json", remove_in="0.35.0")
| TensorRT-master | tools/Polygraphy/polygraphy/util/serde.py |
from polygraphy.util.util import *
from polygraphy.util.serde import *
| TensorRT-master | tools/Polygraphy/polygraphy/util/__init__.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import enum
from polygraphy.logger import G_LOGGER
# TRT does not include batch dimension.
class DataFormat(enum.IntEnum):
UNKNOWN = 0
NW = 1
NHW = 2
CHW = 3
NHWC = 4
NCHW = 5
# This class is responsible for deducing the format of a shape,
# and converting it to the desired format (specified as a DataFormat).
class FormatManager(object):
# NOTE: New permutations should be added in this function, as it will automatically generate the inverses.
def _generate_permutations():
def is_invertible(perm):
return min(perm) >= 0 and max(perm) < len(perm)
def inverse_permutation(perm):
inverse = [perm[index] for index in perm]
return inverse
# Inverse permutations are generated automatically below.
# We use -1 to denote that a dummy dimension of 1 should be inserted in the convert function.
initial_permutations = {
(DataFormat.NCHW, DataFormat.NCHW): (0, 1, 2, 3),
(DataFormat.NHWC, DataFormat.NHWC): (0, 1, 2, 3),
(DataFormat.NHWC, DataFormat.NCHW): (0, 3, 1, 2),
(DataFormat.CHW, DataFormat.CHW): (0, 1, 2),
(DataFormat.NCHW, DataFormat.CHW): (1, 2, 3),
(DataFormat.NHWC, DataFormat.CHW): (3, 1, 2),
(DataFormat.NHW, DataFormat.CHW): (-1, 1, 2),
(DataFormat.NW, DataFormat.CHW): (-1, -1, 1),
}
permutations = {}
for (f1, f2), perm in initial_permutations.items():
permutations[(f1, f2)] = perm
if is_invertible(perm):
permutations[(f2, f1)] = inverse_permutation(perm)
return permutations
# Dict[Tuple[DataFormat, DataFormat], Tuple[int]]
# This provides the correct permutation for various data format conversions.
DATA_PERMUTATIONS = _generate_permutations()
@staticmethod
def determine_format(shape):
"""
Guesses the data format of a given shape.
Args:
shape (Tuple[int]): The shape, including batch dimension.
Returns:
DataFormat: The determined data format.
"""
# The smaller this ratio, the closer a and b are.
def minmax_ratio(a, b):
return abs(max(a, b) / min(a, b))
# Assume all shapes include batch dimension
if len(shape) == 4:
# Typically, H and W are quite close, so if minmax_ratio(0, 1) > minmax_ratio(1, 2), then we assume CHW.
if minmax_ratio(shape[1], shape[2]) > minmax_ratio(shape[2], shape[3]):
return DataFormat.NCHW
return DataFormat.NHWC
elif len(shape) == 3:
return DataFormat.NHW
elif len(shape) == 2:
return DataFormat.NW
else:
G_LOGGER.warning(
"Cannot determine format for "
+ str(shape)
+ ". Currently only implemented for input_buffers with 1-3 non-batch dimensions. Please update this function!"
)
return DataFormat.UNKNOWN
# Get the permutation required to transpose old_format to new_format
@staticmethod
def permutation(old_format, new_format):
return FormatManager.DATA_PERMUTATIONS[(old_format, new_format)]
@staticmethod
def convert(shape, new_format):
"""
Permutes a shape from one format to another.
Args:
shape (Tuple[int]): The shape to convert.
new_format (DataFormat): The desired format of the shape.
Returns:
Tuple[int]: A new shape in the correct format.
"""
old_format = FormatManager.determine_format(shape)
perm = FormatManager.permutation(old_format, new_format)
return [shape[index] if index != -1 else 1 for index in perm]
| TensorRT-master | tools/Polygraphy/polygraphy/util/format.py |
from polygraphy.cuda.cuda import *
| TensorRT-master | tools/Polygraphy/polygraphy/cuda/__init__.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import ctypes
import time
import os
import sys
from polygraphy import func, mod, util
from polygraphy.logger import G_LOGGER
np = mod.lazy_import("numpy")
def void_ptr(val=None):
return ctypes.c_void_p(val)
@mod.export()
class MemcpyKind(object):
"""
Enumerates different kinds of copy operations.
"""
HostToHost = ctypes.c_int(0)
"""Copies from host memory to host memory"""
HostToDevice = ctypes.c_int(1)
"""Copies from host memory to device memory"""
DeviceToHost = ctypes.c_int(2)
"""Copies from device memory to host memory"""
DeviceToDevice = ctypes.c_int(3)
"""Copies from device memory to device memory"""
Default = ctypes.c_int(4)
@mod.export()
class Cuda(object):
"""
NOTE: Do *not* construct this class manually.
Instead, use the ``wrapper()`` function to get the global wrapper.
Wrapper that exposes low-level CUDA functionality.
"""
def __init__(self):
self.handle = None
if sys.platform.startswith("win"):
cuda_paths = [os.environ.get("CUDA_PATH", "")]
cuda_paths += os.environ.get("PATH", "").split(os.path.pathsep)
cuda_paths = list(filter(lambda x: x, cuda_paths)) # Filter out empty paths (i.e. "")
candidates = util.find_in_dirs("cudart64_*.dll", cuda_paths)
if not candidates:
G_LOGGER.critical(
"Could not find the CUDA runtime library.\nNote: Paths searched were:\n{:}".format(cuda_paths)
)
self.handle = ctypes.CDLL(candidates[0])
else:
self.handle = ctypes.CDLL("libcudart.so")
if not self.handle:
G_LOGGER.critical("Could not load the CUDA runtime library. Is it on your loader path?")
@func.constantmethod
def check(self, status):
if status != 0:
G_LOGGER.critical(
"CUDA Error: {:}. To figure out what this means, refer to "
"https://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART__TYPES.html#group__CUDART__TYPES_1g3f51e3575c2178246db0a94a430e0038".format(
status
)
)
@func.constantmethod
def create_stream(self):
# Signature: () -> int
ptr = void_ptr()
self.check(self.handle.cudaStreamCreate(ctypes.byref(ptr)))
return ptr.value
@func.constantmethod
def stream_synchronize(self, ptr):
# Signature: int -> None
self.check(self.handle.cudaStreamSynchronize(void_ptr(ptr)))
@func.constantmethod
def destroy_stream(self, ptr):
# Signature: int -> None
self.check(self.handle.cudaStreamDestroy(void_ptr(ptr)))
@func.constantmethod
def malloc(self, nbytes):
"""
Allocates memory on the GPU.
Args:
nbytes (int): The number of bytes to allocate.
Returns:
int: The memory address of the allocated region, i.e. a device pointer.
Raises:
PolygraphyException: If an error was encountered during the allocation.
"""
ptr = void_ptr()
nbytes = ctypes.c_size_t(nbytes) # Required to prevent overflow
self.check(self.handle.cudaMalloc(ctypes.byref(ptr), nbytes))
return ptr.value
@func.constantmethod
def free(self, ptr):
"""
Frees memory allocated on the GPU.
Args:
ptr (int): The memory address, i.e. a device pointer.
Raises:
PolygraphyException: If an error was encountered during the free.
"""
self.check(self.handle.cudaFree(void_ptr(ptr)))
@func.constantmethod
def memcpy(self, dst, src, nbytes, kind, stream_ptr=None):
"""
Copies data between host and device memory.
Args:
dst (int):
The memory address of the destination, i.e. a pointer.
src (int):
The memory address of the source, i.e. a pointer.
nbytes (int):
The number of bytes to copy.
kind (MemcpyKind):
The kind of copy to perform.
stream_ptr (int):
The memory address of a CUDA stream, i.e. a pointer.
If this is not provided, a synchronous copy is performed.
Raises:
PolygraphyException: If an error was encountered during the copy.
"""
nbytes = ctypes.c_size_t(nbytes) # Required to prevent overflow
if stream_ptr is not None:
self.check(self.handle.cudaMemcpyAsync(void_ptr(dst), void_ptr(src), nbytes, kind, void_ptr(stream_ptr)))
else:
self.check(self.handle.cudaMemcpy(void_ptr(dst), void_ptr(src), nbytes, kind))
G_CUDA = None
@mod.export()
def wrapper():
"""
Returns the global Polygraphy CUDA wrapper.
Returns:
Cuda: The global CUDA wrapper.
"""
global G_CUDA
if G_CUDA is None:
G_CUDA = Cuda()
return G_CUDA
@mod.export()
class Stream(object):
"""
High-level wrapper for a CUDA stream.
"""
def __init__(self):
self.ptr = wrapper().create_stream()
"""int: The memory address of the underlying CUDA stream"""
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
"""
Frees the underlying CUDA stream.
"""
self.free()
def free(self):
"""
Frees the underlying CUDA stream.
You can also use a context manager to manage the stream lifetime.
For example:
::
with Stream() as stream:
...
"""
wrapper().destroy_stream(self.ptr)
self.handle = ctypes.c_void_p(None)
def synchronize(self):
"""
Synchronizes the stream.
"""
wrapper().stream_synchronize(self.ptr)
def try_get_stream_handle(stream):
if stream is None:
return None
return stream.ptr
# Make a numpy array contiguous if it's not already.
def make_np_contiguous(arr):
if not arr.flags["C_CONTIGUOUS"]:
return np.ascontiguousarray(arr)
return arr
@mod.export()
class DeviceView(object):
"""
A read-only view of a GPU memory region.
"""
def __init__(self, ptr, shape, dtype):
"""
Args:
ptr (int): A pointer to the region of memory.
shape (Tuple[int]): The shape of the region.
dtype (numpy.dtype): The data type of the region.
"""
self.ptr = int(ptr)
"""int: The memory address of the underlying GPU memory"""
self.shape = shape
"""Tuple[int]: The shape of the device buffer"""
self.itemsize = None
self.dtype = dtype
"""np.dtype: The data type of the device buffer"""
def _check_dtype_matches(self, host_buffer):
if host_buffer.dtype != self.dtype:
G_LOGGER.error(
"Host buffer type: {:} does not match the type of this device buffer: {:}. "
"This may cause CUDA errors!".format(host_buffer.dtype, self.dtype)
)
@property
def dtype(self):
return self._dtype
@dtype.setter
def dtype(self, new):
self._dtype = new
self.itemsize = np.dtype(new).itemsize
@property
def nbytes(self):
"""
The number of bytes in the memory region.
"""
return util.volume(self.shape) * self.itemsize
@func.constantmethod
def copy_to(self, host_buffer, stream=None):
"""
Copies from this device buffer to the provided host buffer.
Args:
host_buffer (numpy.ndarray):
The host buffer to copy into. The buffer will be reshaped to match the
shape of this device buffer. If the provided host buffer is too small,
it will be freed and reallocated.
The buffer may also be reallocated if it is not contiguous in
memory (see np.ascontiguousarray).
stream (Stream):
A Stream instance. Performs a synchronous copy if no stream is provided.
Returns:
numpy.ndarray: The host buffer, possibly reallocated.
"""
self._check_dtype_matches(host_buffer)
if self.shape != host_buffer.shape:
try:
host_buffer.resize(self.shape, refcheck=False)
except ValueError as err:
G_LOGGER.warning(
"Could not resize host buffer to shape: {:}. Allocating a new buffer instead.\n"
"Note: Error was: {:}".format(self.shape, err)
)
host_buffer = np.empty(self.shape, dtype=np.dtype(self.dtype))
if not self.nbytes:
return host_buffer
host_buffer = make_np_contiguous(host_buffer)
wrapper().memcpy(
dst=host_buffer.ctypes.data,
src=self.ptr,
nbytes=self.nbytes,
kind=MemcpyKind.DeviceToHost,
stream_ptr=try_get_stream_handle(stream),
)
return host_buffer
@func.constantmethod
def numpy(self):
"""
Create a new NumPy array containing the contents of this device buffer.
Returns:
np.ndarray: The newly created NumPy array.
"""
arr = np.empty(self.shape, dtype=self.dtype)
return self.copy_to(arr)
def __str__(self):
return "DeviceView[(dtype={:}, shape={:}), ptr={:}]".format(
np.dtype(self.dtype).name, self.shape, hex(self.ptr)
)
@mod.export()
class DeviceArray(DeviceView):
"""
An array on the GPU.
"""
def __init__(self, shape=None, dtype=None):
"""
Args:
shape (Tuple[int]): The initial shape of the buffer.
dtype (numpy.dtype): The data type of the buffer.
"""
super().__init__(ptr=0, shape=util.default(shape, tuple()), dtype=util.default(dtype, np.float32))
self.allocated_nbytes = 0
self.resize(self.shape)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
"""
Frees the underlying memory of this DeviceArray.
"""
self.free()
def allocate(self, nbytes):
if nbytes:
self.ptr = wrapper().malloc(nbytes)
self.allocated_nbytes = nbytes
def free(self):
"""
Frees the GPU memory associated with this array.
You can also use a context manager to ensure that memory is freed. For example:
::
with DeviceArray(...) as arr:
...
"""
wrapper().free(self.ptr)
self.shape = tuple()
self.allocated_nbytes = 0
self.ptr = 0
def resize(self, shape):
"""
Resizes or reshapes the array to the specified shape.
If the allocated memory region is already large enough,
no reallocation is performed.
Args:
shape (Tuple[int]): The new shape.
"""
nbytes = util.volume(shape) * self.itemsize
if nbytes > self.allocated_nbytes:
self.free()
self.allocate(nbytes)
self.shape = shape
def copy_from(self, host_buffer, stream=None):
"""
Copies from the provided host buffer into this device buffer.
The device array may be resized if the currently allocated memory region
is smaller than the host_buffer.
Args:
host_buffer (numpy.ndarray):
The host buffer to copy from. If the buffer is not contiguous in memory,
an additional copy may be performed.
stream (Stream):
A Stream instance. Performs a synchronous copy if no stream is provided.
Returns:
DeviceArray: Self
"""
if host_buffer.nbytes:
self._check_dtype_matches(host_buffer)
self.resize(host_buffer.shape)
host_buffer = make_np_contiguous(host_buffer)
wrapper().memcpy(
dst=self.ptr,
src=host_buffer.ctypes.data,
nbytes=host_buffer.nbytes,
kind=MemcpyKind.HostToDevice,
stream_ptr=try_get_stream_handle(stream),
)
return self
def view(self):
"""
Creates a read-only DeviceView from this DeviceArray.
Returns:
DeviceView: A view of this arrays data on the device.
"""
return DeviceView(self.ptr, self.shape, self.dtype)
def __str__(self):
return "DeviceArray[(dtype={:}, shape={:}), ptr={:}]".format(
np.dtype(self.dtype).name, self.shape, hex(self.ptr)
)
| TensorRT-master | tools/Polygraphy/polygraphy/cuda/cuda.py |
TensorRT-master | tools/Polygraphy/polygraphy/backend/__init__.py |
|
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import contextlib
import os
import time
from collections import OrderedDict
from polygraphy import constants, cuda, mod, util
from polygraphy.backend.base import BaseLoader, BaseRunner
from polygraphy.backend.trt import util as trt_util
from polygraphy.backend.trt.loader import BaseNetworkFromOnnx
from polygraphy.backend.trt.util import get_trt_logger
from polygraphy.common import TensorMetadata
from polygraphy.logger import G_LOGGER
from polygraphy.util.format import DataFormat, FormatManager
np = mod.lazy_import("numpy")
trt = mod.lazy_import("tensorrt")
class LoadUffFile(BaseLoader):
def __init__(self, path, shapes, outputs):
self.path = path
self.shapes = shapes
self.outputs = outputs
def call_impl(self):
input_names = list(self.shapes.keys())
input_shapes = list(self.shapes.values())
with open(self.path, "rb") as f:
return f.read(), input_names, input_shapes, self.outputs
class ConvertToUff(BaseLoader):
def __init__(self, tf_loader, save_uff=None, preprocessor=None):
self.tf_loader = tf_loader
self.uff_path = save_uff
self.preprocessor = preprocessor
def call_impl(self):
"""
save_uff (bool): Whether to write the generated UFF and corresponding PBTXT files.
"""
import uff
from polygraphy.backend.tf import util as tf_util
G_LOGGER.module_info(uff)
graph, output_names = self.tf_loader()
output_names = [name.split(":")[0] for name in output_names]
# GraphDefs don't have names, so we have to name it something generic.
output_filename = None if not self.uff_path else "out.uff"
# Generate the UFF model and get information about the input_buffers/output_buffers.
uff_model, input_nodes, _ = uff.from_tensorflow(
graph.as_graph_def(),
return_graph_info=True,
quiet=(G_LOGGER.severity > G_LOGGER.VERBOSE),
debug_mode=(G_LOGGER.severity == G_LOGGER.EXTRA_VERBOSE),
text=self.uff_path,
save_preprocessed=self.uff_path,
output_filename=output_filename,
preprocessor=self.preprocessor,
)
input_names = [node.name for node in input_nodes]
input_shapes = [tuple(int(dim.size) for dim in node.attr["shape"].shape.dim) for node in input_nodes]
return uff_model, input_names, input_shapes, output_names
class LoadNetworkFromUff(BaseLoader):
def __init__(self, uff_loader, uff_order=None):
self.uff_loader = uff_loader
self.uff_order = None
if uff_order:
self.uff_order = trt.UffInputOrder.NCHW if uff_order.lower() == "nchw" else trt.UffInputOrder.NHWC
def call_impl(self):
uff_model, input_names, input_shapes, output_names = self.uff_loader()
builder = trt.Builder(get_trt_logger())
network = builder.create_network()
parser = trt.UffParser()
# Input names should come from the converter, as a preprocessing script may have been applied to the frozen model.
for name, shape in zip(input_names, input_shapes):
# Default order is NCHW, only set to NHWC if we're reasonably certain that it is.
input_order = self.uff_order
if not self.uff_order:
input_order = trt.UffInputOrder.NCHW
if FormatManager.determine_format(shape) == DataFormat.NHWC:
input_order = trt.UffInputOrder.NHWC
shape = shape[1:]
G_LOGGER.verbose(
"Registering UFF input: {:} with shape: {:} and input order: {:}".format(name, shape, input_order)
)
parser.register_input(name, shape, input_order)
if output_names and output_names != constants.MARK_ALL:
for name in output_names:
G_LOGGER.verbose("Registering UFF output: " + str(name))
parser.register_output(name)
G_LOGGER.info("Parsing UFF model with inputs: {:} and outputs: {:}".format(input_names, output_names))
success = parser.parse_buffer(uff_model, network)
if not success:
G_LOGGER.critical("Could not parse UFF correctly")
return builder, network, parser, input_shapes[0][0]
class ParseNetworkFromOnnxLegacy(BaseNetworkFromOnnx):
def __init__(self, onnx_loader):
"""
Parses an ONNX model to create a trt.INetworkDefinition. This loader only supports the
implicit batch version of the parser.
Args:
onnx_loader (Union[onnx.ModelProto, Callable() -> onnx.ModelProto]):
An ONNX model or a callable that returns one.
"""
super().__init__(explicit_precision=False, explicit_batch=False)
self.onnx_loader = onnx_loader
def call_impl(self):
from polygraphy.backend.onnx import util as onnx_util
with util.FreeOnException(super().call_impl()) as (builder, network, parser):
onnx_model, _ = util.invoke_if_callable(self.onnx_loader)
_, shape = list(onnx_util.get_input_metadata(onnx_model.graph).values())[0]
success = parser.parse(onnx_model.SerializeToString())
trt_util.check_onnx_parser_errors(parser, success)
return builder, network, parser, shape[0]
class LoadNetworkFromCaffe(object):
def __init__(self, deploy, model, outputs, batch_size=None, dtype=None):
self.deploy = deploy
self.model = model
if not self.model:
G_LOGGER.warning(
"No model file provided for Caffe model, random weights will be used. To avoid this, "
"please set the model paramater, or --model"
)
if not outputs:
G_LOGGER.critical(
"Please set Caffe model outputs using the outputs parameter, or --trt-outputs. "
"Note: To determine possible outputs, try running: tail -n50 {:}".format(deploy)
)
self.outputs = outputs
self.dtype = util.default(dtype, trt.float32)
self.batch_size = util.default(batch_size, 1)
def __call__(self):
builder = trt.Builder(get_trt_logger())
network = builder.create_network()
parser = trt.CaffeParser()
parser.parse(deploy=self.deploy, model=self.model, network=network, dtype=self.dtype)
if self.outputs and self.outputs != constants.MARK_ALL:
trt_util.mark_outputs(network, self.outputs)
return builder, network, parser, self.batch_size
def _input_metadata_from_network(network):
input_metadata = TensorMetadata()
for index in range(network.num_inputs):
tensor = network.get_input(index)
input_metadata.add(name=tensor.name, dtype=np.dtype(trt.nptype(tensor.dtype)), shape=tensor.shape)
return input_metadata
# Builds and tracks a single engine for a single network.
class TrtLegacyRunner(BaseRunner):
"""
A runner that can perform inference on a single TensorRT engine.
"""
# Simple helper data class that's a little nicer to use than a 2-tuple.
class HostDeviceMem(object):
def __init__(self, host_mem, device_mem):
self.host = host_mem
self.device = device_mem
def __str__(self):
return "Host:" + str(self.host) + ", Device:" + str(self.device)
def __init__(
self,
network_loader=None,
max_workspace_size=None,
max_batch_size=None,
fp16=None,
tf32=None,
load_engine=None,
save_engine=None,
layerwise=False,
plugins=[],
name=None,
int8=None,
calibrator=None,
use_dla=None,
allow_gpu_fallback=None,
):
"""
Creates a runner that manages a single TensorRT engine.
network_loader (BaseModelLoader):
A loader that returns a TRT builder, network, parser and input shapes.
max_workspace_size (int): The maximum workspace size.
max_batch_size (int): The maximum batch size.
fp16 (bool): Whether to run in fp16 mode
layerwise (bool): Whether to retrieve the outputs of every layer in the network.
name (str):
The human-readable name prefix to use for this runner.
A runner count and timestamp will be appended to this prefix.
"""
G_LOGGER.warning("TrtLegacyRunner is deprecated, and will be removed in a future release")
# Load any user-supplied plugin libraries. This must happen before everything else, including engine deserialization.
if plugins:
import ctypes
for plugin in plugins:
path = os.path.abspath(plugin)
G_LOGGER.info("Loading plugin library: {:}".format(path))
ctypes.CDLL(path)
# Choose a unique name for this runner.
super().__init__(name=name, prefix="trt-legacy-runner")
# Save parameters for activate and deactivate.
self.network_loader = network_loader
self.max_workspace_size = util.default(max_workspace_size, 1 << 24)
self.fp16 = util.default(fp16, False)
self.tf32 = util.default(tf32, False)
self.load_engine = load_engine
self.engine_path = save_engine
self.layerwise = layerwise
self.max_batch_size = max_batch_size
self.int8 = util.default(int8, False)
self.calibrator = calibrator
self.use_dla = use_dla
self.allow_gpu_fallback = allow_gpu_fallback
def activate_impl(self):
"""
Vars:
engine (trt.ICudaEngine):
The engine tracked by this runner. The TrtLegacyRunner OWNS the engine it
manages, and therefore is responsible for it's destruction. Do not free the engine outside of the
runner, or it will result in a double free.
context (trt.IExecutionContext): The context used for inference.
input_buffers (Dict[str, TrtLegacyRunner.HostDeviceMem]):
A mapping of binding names to HostDeviceMem objects for input buffers.
output_buffers (Dict[str, TrtLegacyRunner.HostDeviceMem]):
A mapping of binding names to HostDeviceMem objects for output buffers.
bindings (List[int]): A list of device pointers for engine bindings.
stream (cuda.Stream): The CUDA stream that this runner will use for inference.
"""
# Only initialize GPU after this runner is activated.
# Allocates all buffers required for an engine, i.e. host/device input_buffers/output_buffers.
def allocate_buffers(engine):
input_buffers = OrderedDict()
output_buffers = OrderedDict()
stream = cuda.Stream()
G_LOGGER.verbose("Using batch size: " + str(engine.max_batch_size) + " during buffer allocation")
for binding in engine:
shape = (engine.max_batch_size,) + tuple(engine.get_binding_shape(binding))
dtype = engine.get_binding_dtype(binding)
device_mem = cuda.DeviceArray(shape=shape, dtype=trt.nptype(dtype))
G_LOGGER.extra_verbose("Tensor: " "{:35} | Allocated: {:}".format(binding, device_mem))
if engine.binding_is_input(binding):
input_buffers[binding] = TrtLegacyRunner.HostDeviceMem(None, device_mem)
else:
host_mem = np.empty(shape=shape, dtype=trt.nptype(dtype))
output_buffers[binding] = TrtLegacyRunner.HostDeviceMem(host_mem, device_mem)
return input_buffers, output_buffers, stream
# Always try reading the engine first, or, failing that, build it.
if self.load_engine:
with open(self.load_engine, "rb") as f, trt.Runtime(get_trt_logger()) as runtime:
G_LOGGER.info("Reading engine from {:}".format(self.load_engine))
self.engine = runtime.deserialize_cuda_engine(f.read())
else:
trt.init_libnvinfer_plugins(get_trt_logger(), "")
builder, network, parser, model_batch_size = self.network_loader()
with builder, network, parser, builder.create_builder_config() as config:
if not network:
G_LOGGER.critical("Invalid network")
G_LOGGER.super_verbose(lambda: trt_util.str_from_network(network) or "Finished logging network")
builder.max_batch_size = int(self.max_batch_size or model_batch_size or 1)
config.max_workspace_size = int(self.max_workspace_size)
if not self.tf32:
with contextlib.suppress(AttributeError):
config.clear_flag(trt.BuilderFlag.TF32)
if self.fp16:
config.set_flag(trt.BuilderFlag.FP16)
if self.int8:
config.set_flag(trt.BuilderFlag.INT8)
input_metadata = _input_metadata_from_network(network)
with contextlib.suppress(AttributeError): # Polygraphy calibrator has a reset method
self.calibrator.reset(input_metadata)
config.int8_calibrator = self.calibrator
if self.use_dla:
config.default_device_type = trt.DeviceType.DLA
config.DLA_core = 0
if self.allow_gpu_fallback:
config.set_flag(trt.BuilderFlag.GPU_FALLBACK)
if self.layerwise:
trt_util.mark_layerwise(network)
G_LOGGER.info(
"Building engine: max workspace size={:} bytes, max batch size={:}, fp16={:}, "
"tf32={:}, int8={:}".format(
config.max_workspace_size, builder.max_batch_size, self.fp16, self.tf32, self.int8
)
)
self.engine = builder.build_engine(network, config)
if not self.engine:
G_LOGGER.critical("Invalid Engine. Please ensure the engine was built correctly")
if self.engine_path:
with open(self.engine_path, "wb") as f:
G_LOGGER.info("Writing engine to {:}".format(self.engine_path))
f.write(self.engine.serialize())
self.context = self.engine.create_execution_context()
self.input_buffers, self.output_buffers, self.stream = allocate_buffers(self.engine)
def get_input_metadata_impl(self):
inputs = TensorMetadata()
for binding in self.engine:
if self.engine.binding_is_input(binding):
# Always prepend a dynamic batch dimension
inputs.add(
binding,
trt.nptype(self.engine.get_binding_dtype(binding)),
[-1] + list(self.engine.get_binding_shape(binding)),
)
return inputs
def deactivate_impl(self):
# Destroy the engine and context.
with self.engine, self.context:
pass
[inp.device.free() for inp in self.input_buffers.values()]
[out.device.free() for out in self.output_buffers.values()]
self.stream.free()
del (self.engine, self.context, self.input_buffers, self.output_buffers, self.stream)
def infer_impl(self, feed_dict):
start = time.time()
[self.input_buffers[name].device.copy_from(buffer, self.stream) for name, buffer in feed_dict.items()]
# We will not run with smaller batch sizes than whatever the builder chose.
bindings = [buf.device.ptr for buf in self.input_buffers.values()] + [
buf.device.ptr for buf in self.output_buffers.values()
]
status = self.context.execute_async(
batch_size=self.context.engine.max_batch_size, bindings=bindings, stream_handle=self.stream.ptr
)
if not status:
G_LOGGER.critical("Model execution failed. Please see the log messages above for details")
for out in self.output_buffers.values():
out.host = out.device.copy_to(out.host, self.stream)
self.stream.synchronize()
end = time.time()
out_dict = OrderedDict()
for (name, out) in self.output_buffers.items():
out_dict[name] = out.host
self.inference_time = end - start
return out_dict
| TensorRT-master | tools/Polygraphy/polygraphy/backend/trt_legacy.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import copy
from collections import OrderedDict
from polygraphy import mod, util
from polygraphy.common import TensorMetadata
from polygraphy.logger import G_LOGGER
gs = mod.lazy_import("onnx_graphsurgeon")
numpy_helper = mod.lazy_import("onnx.numpy_helper")
onnx = mod.lazy_import("onnx")
def get_num_nodes(model):
def _get_num_graph_nodes(graph):
num_nodes = len(graph.node)
for node in graph.node:
for attr in node.attribute:
if attr.type == onnx.AttributeProto.GRAPH:
num_nodes += _get_num_graph_nodes(attr.g)
elif attr.type == onnx.AttributeProto.GRAPHS:
for subgraph in attr.graphs:
num_nodes += _get_num_graph_nodes(subgraph)
return num_nodes
return _get_num_graph_nodes(model.graph)
def all_tensor_names(model):
all_outputs = [output for node in model.graph.node if node.op_type != "Constant" for output in node.output]
all_outputs = util.unique_list(all_outputs)
return all_outputs
def check_outputs_not_found(not_found, all_outputs):
if not_found:
G_LOGGER.critical(
"The following outputs were not found: {:}.\nNote: Available tensors:\n\t{:}".format(
not_found, "\n\t".join(all_outputs)
)
)
def mark_outputs(model, outputs):
# Clear the old outputs
while model.graph.output:
model.graph.output.pop()
all_outputs = all_tensor_names(model)
all_outputs_set = set(all_outputs)
value_info_map = {t.name: t for t in model.graph.value_info}
out_tensors = []
not_found = set()
for output in outputs:
if output in all_outputs_set:
value_info = value_info_map.get(output, onnx.helper.make_empty_tensor_value_info(output))
out_tensors.append(value_info)
else:
not_found.add(output)
check_outputs_not_found(not_found, all_outputs)
model.graph.output.extend(out_tensors)
return model
def mark_layerwise(model):
# Add all non-constant node outputs as graph outputs
model = mark_outputs(model, all_tensor_names(model))
return model
def unmark_outputs(model, outputs):
outputs = set(outputs)
cur_outputs = []
while model.graph.output:
cur_outputs.append(model.graph.output.pop())
cur_outputs = list(reversed(cur_outputs)) # Preserve ordering
unmarked_outputs = set()
for out in cur_outputs:
if out.name not in outputs:
model.graph.output.extend([out])
else:
unmarked_outputs.add(out.name)
not_found = outputs - unmarked_outputs
check_outputs_not_found(not_found, [t.name for t in model.graph.output])
return model
def get_shape(tensor):
shape = []
if isinstance(tensor, onnx.TensorProto):
shape = tensor.dims
else:
for dim in tensor.type.tensor_type.shape.dim:
if dim.dim_param:
shape.append(dim.dim_param)
else:
shape.append(dim.dim_value)
return shape
def get_dtype(tensor):
if isinstance(tensor, onnx.TensorProto):
onnx_type = tensor.data_type
else:
onnx_type = tensor.type.tensor_type.elem_type
if onnx_type in onnx.mapping.TENSOR_TYPE_TO_NP_TYPE:
return onnx.mapping.TENSOR_TYPE_TO_NP_TYPE[onnx_type]
return None
def get_values(tensor):
return numpy_helper.to_array(tensor)
def get_tensor_metadata(tensors):
metadata = TensorMetadata()
for tensor in tensors:
metadata.add(name=tensor.name, dtype=get_dtype(tensor), shape=get_shape(tensor))
return metadata
def get_input_metadata(graph):
# Some "inputs" are actually weights with initalizers, so we need to eliminate those.
initializer_names = {tensor.name for tensor in graph.initializer}
input_tensors = [tensor for tensor in graph.input if tensor.name not in initializer_names]
return get_tensor_metadata(input_tensors)
def get_output_metadata(graph):
return get_tensor_metadata(graph.output)
def str_from_onnx(model, mode="full"):
"""
Converts an ONNX Graph to a human-readable representation
Args:
graph (onnx.GraphProto): The onnx graph.
mode (str): Controls what is displayed. Choices: ["none", "basic", "attrs", "full"]
Returns:
str
"""
def get_opset():
try:
return model.opset_import[0].version
except:
G_LOGGER.warning("Model does not contain opset information!")
return None
onnx_str = ""
onnx_str += "Name: {:} | Opset: {:}\n".format(model.graph.name, get_opset())
onnx_str += "\n"
onnx_str += str_from_onnx_graph(model.graph, mode=mode, tensors={})
return onnx_str
def str_from_onnx_graph(graph, mode, tensors, indent_level=0):
input_metadata = get_input_metadata(graph)
output_metadata = get_output_metadata(graph)
initializer_metadata = get_tensor_metadata(graph.initializer)
# Subgraph inputs should remain separate from each other, hence copy the tensors map
tensors = copy.copy(tensors)
tensors.update(get_tensor_metadata(graph.value_info))
tensors.update(initializer_metadata)
tensors.update(input_metadata)
tensors.update(output_metadata)
graph_type = "Graph" if indent_level == 0 else "Subgraph"
onnx_str = ""
onnx_str += "---- {:} {:} Input(s) ----\n{:}\n\n".format(len(input_metadata), graph_type, input_metadata)
onnx_str += "---- {:} {:} Output(s) ----\n{:}\n\n".format(len(output_metadata), graph_type, output_metadata)
onnx_str += "---- {:} Initializer(s) ----\n".format(len(initializer_metadata))
if mode == "full":
for init in graph.initializer:
onnx_str += "Initializer | {:} [dtype={:}, shape={:}] | Values:\n{:}\n\n".format(
init.name, get_dtype(init), get_shape(init), util.indent_block(str(get_values(init)))
)
if not graph.initializer:
onnx_str += "{}\n\n"
elif mode != "none":
onnx_str += str(initializer_metadata)
onnx_str += "\n\n"
else:
onnx_str += "\n"
def metadata_from_names(names):
metadata = TensorMetadata()
for name in names:
dtype, shape = tensors.get(name, (None, None))
if name in initializer_metadata:
name = "Initializer | {:}".format(name)
metadata.add(name=name, dtype=dtype, shape=shape)
return metadata
# Maps values from the AttributeType enum to their string representations, e.g., {1: "FLOAT"}
ATTR_TYPE_MAPPING = dict(zip(onnx.AttributeProto.AttributeType.values(), onnx.AttributeProto.AttributeType.keys()))
# Maps an ONNX attribute to the corresponding Python property
ONNX_PYTHON_ATTR_MAPPING = {
"FLOAT": "f",
"INT": "i",
"STRING": "s",
"TENSOR": "t",
"GRAPH": "g",
"FLOATS": "floats",
"INTS": "ints",
"STRINGS": "strings",
}
def attrs_to_dict(attrs):
attr_dict = OrderedDict()
for attr in attrs:
def process_attr(attr_str: str):
processed = getattr(attr, ONNX_PYTHON_ATTR_MAPPING[attr_str])
if attr_str == "STRING":
processed = processed.decode()
elif attr_str == "TENSOR":
tensor_str = "Tensor: [dtype={:}, shape={:}]".format(get_dtype(processed), get_shape(processed))
if mode == "full":
tensor_str += " | Values:\n" + util.indent_block(str(get_values(processed)))
processed = tensor_str
elif attr_str == "GRAPH":
processed = "\n" + str_from_onnx_graph(processed, mode, tensors, indent_level=indent_level + 2)
elif attr_str == "FLOATS" or attr_str == "INTS":
# Proto hacky list to normal Python list
processed = [p for p in processed]
elif attr_str == "STRINGS":
processed = [p.decode() for p in processed]
return processed
if attr.type in ATTR_TYPE_MAPPING:
attr_str = ATTR_TYPE_MAPPING[attr.type]
if attr_str in ONNX_PYTHON_ATTR_MAPPING:
attr_dict[attr.name] = process_attr(attr_str)
else:
G_LOGGER.warning(
"Attribute of type {:} is currently unsupported. Skipping attribute.".format(attr_str)
)
else:
G_LOGGER.warning(
"Attribute type: {:} was not recognized. Was the graph generated with a newer IR "
"version than the installed `onnx` package? Skipping attribute.".format(attr.type)
)
return attr_dict
onnx_str += "---- {:} Node(s) ----\n".format(len(graph.node))
if mode != "none":
for index, node in enumerate(graph.node):
input_info = metadata_from_names(node.input)
output_info = metadata_from_names(node.output)
onnx_str += util.str_from_layer("Node", index, node.name, node.op_type, input_info, output_info)
if mode in ["attrs", "full"]:
attrs = attrs_to_dict(node.attribute)
if attrs:
onnx_str += util.indent_block("---- Attributes ----") + "\n"
for key, val in attrs.items():
attr_str = ""
if node.name:
attr_str += "{:}.".format(node.name)
onnx_str += util.indent_block("{:}{:} = {:}".format(attr_str, key, val)) + "\n"
onnx_str += "\n"
return util.indent_block(onnx_str, indent_level)
##
## ONNX-GraphSurgeon utilities
##
def meta_from_gs_tensors(tensors):
"""Get TensorMetadata from a list of ONNX-GraphSurgeon tensors"""
meta = TensorMetadata()
for tensor in tensors:
meta.add(tensor.name, tensor.dtype, tensor.shape)
return meta
def set_shapes_from_layerwise_meta(graph, layerwise_meta):
for tensor in graph.tensors().values():
if isinstance(tensor, gs.Variable) and tensor.name in layerwise_meta:
tensor.shape = layerwise_meta[tensor.name].shape
tensor.dtype = layerwise_meta[tensor.name].dtype
def lower_constant_nodes(graph):
"""Converts the outputs of Constant nodes into constant tensors, removing the nodes"""
remove_nodes = set()
with graph.node_ids():
for node in graph.nodes:
if node.op == "Constant" and "value" in node.attrs:
node.outputs[0].to_constant(node.attrs["value"].values)
remove_nodes.add(node.id)
# Iterate from the end so we don't shift the list under us.
for node_id in sorted(remove_nodes, reverse=True):
del graph.nodes[node_id]
return graph
| TensorRT-master | tools/Polygraphy/polygraphy/backend/onnx/util.py |
from polygraphy.backend.onnx.loader import *
| TensorRT-master | tools/Polygraphy/polygraphy/backend/onnx/__init__.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import copy
import os
import sys
import tempfile
from polygraphy import constants, mod, util
from polygraphy.backend.base import BaseLoader
from polygraphy.backend.onnx import util as onnx_util
from polygraphy.logger import G_LOGGER, LogMode
onnx = mod.lazy_import("onnx", version=">=1.8.1")
onnxrt = mod.lazy_import("onnxruntime")
onnxmltools = mod.lazy_import("onnxmltools")
tf = mod.lazy_import("tensorflow", version="<2.0")
tf2onnx = mod.lazy_import("tf2onnx")
tf_util = mod.lazy_import("polygraphy.backend.tf.util", log=False)
gs = mod.lazy_import("onnx_graphsurgeon", version=mod.LATEST_VERSION)
shape_inference = mod.lazy_import("onnx.shape_inference")
external_data_helper = mod.lazy_import("onnx.external_data_helper")
LARGE_MODEL_THRESHOLD = 512 << 20 # 512 MiB
class BaseLoadOnnxCopy(BaseLoader):
"""
Abstract base class for loaders that require loading an ONNX model and potentially
making a copy.
"""
def __init__(self, model, copy=None):
"""
Args:
model (Union[onnx.ModelProto, Callable() -> onnx.ModelProto]):
An ONNX model or a callable that returns one.
copy (bool): Whether to create a copy of the model first. Defaults to False.
"""
self._model = model
self.copy = util.default(copy, False)
def load(self):
model, _ = util.invoke_if_callable(self._model)
if self.copy:
model = copy.copy(model)
return model
class _GSGraphManager(object):
"""
Imports an ONNX-GraphSurgeon graph.
If the provided model is already a graph, the graph is not
exported to ONNX.
"""
def __init__(self, model):
self._model = model
def __enter__(self):
model, _ = util.invoke_if_callable(self._model)
self.USE_GS_GRAPH = isinstance(model, gs.Graph)
if self.USE_GS_GRAPH:
self.graph = model.copy()
else:
self.graph = gs_from_onnx(model)
return self
def __exit__(self, exc_type, exc_value, traceback):
if self.USE_GS_GRAPH:
self.retval = self.graph
else:
self.retval = gs.export_onnx(self.graph, do_type_check=False)
@mod.export(funcify=True)
class GsFromOnnx(BaseLoader):
"""
Functor that creates an ONNX-GraphSurgeon graph from an ONNX ModelProto.
"""
def __init__(self, model):
"""
Creates an ONNX-GraphSurgeon graph from an ONNX ModelProto.
Args:
model (Union[onnx.ModelProto, Callable() -> onnx.ModelProto]):
An ONNX model or a callable that returns one.
"""
self._model = model
def call_impl(self):
"""
Returns:
onnx_graphsurgeon.Graph: The ONNX-GraphSurgeon representation of the ONNX model
"""
model, _ = util.invoke_if_callable(self._model)
return gs.import_onnx(model)
@mod.export(funcify=True)
class OnnxFromPath(BaseLoader):
"""
Functor that loads an ONNX model from a file.
"""
def __init__(self, path, external_data_dir=None):
"""
Loads an ONNX model from a file.
Args:
path (str): The path from which to load the model.
external_data_dir (str): The directory where external data for the model is stored.
"""
self.path = path
self.external_data_dir = external_data_dir
def call_impl(self):
"""
Returns:
onnx.ModelProto: The ONNX model
"""
G_LOGGER.info("Loading model: {:}".format(self.path))
# If external_data_dir is not None, we'll load external data ourselves
model = onnx.load(self.path, load_external_data=self.external_data_dir is None)
if self.external_data_dir is not None:
G_LOGGER.verbose("Loading external data from: {:}".format(self.external_data_dir))
external_data_helper.load_external_data_for_model(model, self.external_data_dir)
return model
@mod.export(funcify=True)
class OnnxFromTfGraph(BaseLoader):
"""
Functor that loads a TensorFlow graph and converts it to ONNX using the tf2onnx converter.
"""
def __init__(self, graph, opset=None, optimize=None, fold_constant=None):
"""
Converts a TensorFlow model into ONNX.
Args:
graph (Union[Tuple[tf.Graph, Sequence[str]], Callable() -> Tuple[tf.Graph, Sequence[str]]]):
A tuple containing a TensorFlow graph and output names or a callable that returns one.
opset (int): The ONNX opset to use during conversion.
optimize (bool): Whether to use tf2onnx's graph optimization pass.
fold_constant (bool):
Whether to fold constants in the TensorFlow Graph.
Requires that ``optimize`` is also enabled.
Defaults to True.
"""
self._graph = graph
self.opset = util.default(opset, 11)
self.fold_constant = util.default(fold_constant, True)
self.optimize = util.default(optimize, True)
if self.fold_constant and not self.optimize:
G_LOGGER.warning(
"`fold_constant` is enabled, but `optimize` is disabled. Constant folding will not be performed"
)
def call_impl(self):
"""
Returns:
onnx.ModelProto: The ONNX model.
"""
(graph, output_names), _ = util.invoke_if_callable(self._graph)
input_names = list(tf_util.get_input_metadata(graph).keys())
if self.fold_constant:
G_LOGGER.info("Folding constants in graph using tf2onnx.tfonnx.tf_optimize")
graphdef = graph.as_graph_def()
if self.optimize:
graphdef = tf2onnx.tfonnx.tf_optimize(
input_names, output_names, graph.as_graph_def(), fold_constant=self.fold_constant
)
with tf.Graph().as_default() as graph, tf.compat.v1.Session(graph=graph) as sess:
tf.import_graph_def(graphdef, name="")
onnx_graph = tf2onnx.tfonnx.process_tf_graph(
graph, input_names=input_names, output_names=output_names, opset=self.opset
)
if self.optimize:
onnx_graph = tf2onnx.optimizer.optimize_graph(onnx_graph)
return onnx_graph.make_model("model")
@mod.export(funcify=True)
class ModifyOutputs(BaseLoadOnnxCopy):
"""
Functor that modifies the outputs of an ONNX model.
"""
def __init__(self, model, outputs=None, exclude_outputs=None, copy=None):
"""
Modifies outputs of an ONNX model.
Args:
model (Union[onnx.ModelProto, Callable() -> onnx.ModelProto]):
An ONNX model or a callable that returns one.
outputs (Sequence[str]):
Names of tensors to mark as outputs. If provided, this will override the
existing model outputs.
If a value of `constants.MARK_ALL` is used instead of a list, all tensors in the network are marked.
exclude_outputs (Sequence[str]):
Names of tensors to exclude as outputs. This can be useful in conjunction with
``outputs=constants.MARK_ALL`` to omit outputs.
copy (bool): Whether to create a copy of the model first. Defaults to False.
"""
super().__init__(model, copy)
self.outputs = outputs
self.exclude_outputs = exclude_outputs
def call_impl(self):
"""
Returns:
onnx.ModelProto: The ONNX model with modified outputs.
"""
model = self.load()
if self.outputs == constants.MARK_ALL:
G_LOGGER.verbose("Marking all ONNX tensors as outputs")
model = onnx_util.mark_layerwise(model)
elif self.outputs is not None:
model = onnx_util.mark_outputs(model, self.outputs)
if self.exclude_outputs is not None:
model = onnx_util.unmark_outputs(model, self.exclude_outputs)
return model
@mod.export(funcify=True)
class ConvertToFp16(BaseLoadOnnxCopy):
"""
Functor that converts all floating point tensors in the model to 16-bit precision.
This is *not* needed in order to use TensorRT's fp16 precision, but may be useful for other backends.
"""
def __init__(self, model, copy=None):
"""
Converts all floating point tensors in the model to 16-bit precision.
Args:
model (Union[onnx.ModelProto, Callable() -> onnx.ModelProto]):
An ONNX model or a callable that returns one.
copy (bool): Whether to create a copy of the model first. Defaults to False.
"""
super().__init__(model, copy)
def call_impl(self):
"""
Returns:
onnx.ModelProto: The modified ONNX model.
"""
model = self.load()
G_LOGGER.info("Converting float tensors to float16")
try:
model = onnxmltools.utils.float16_converter.convert_float_to_float16(
model, keep_io_types=True, disable_shape_inference=True
)
except TypeError: # Using an old version of onnxmltools
model = onnxmltools.utils.float16_converter.convert_float_to_float16(model)
return model
@mod.export(funcify=True)
class FoldConstants(BaseLoadOnnxCopy):
"""
Functor that folds constants in an ONNX model.
"""
def __init__(
self,
model,
num_passes=None,
do_shape_inference=None,
partitioning=None,
fold_shapes=None,
copy=None,
error_ok=None,
):
"""
Fold constants in an ONNX model.
Args:
model (Union[onnx.ModelProto, Callable() -> onnx.ModelProto]):
An ONNX model or a callable that returns one.
num_passes (int):
The number of constant folding passes to run.
Sometimes, subgraphs that compute tensor shapes may not be foldable in a single pass.
By default, Polygraphy will automatically determine the number of passes required.
do_shape_inference (bool):
Whether to run shape inference in the model between passes.
This enables the loader to fold `Shape` nodes.
Only effective if `fold_shapes` is True.
Defaults to True.
partitioning (Union[str, None]):
Whether/How to partition the graph so that errors in folding one
part of a model do not affect other parts. Available modes are:
- None: Do not partition the graph. If inference fails, no constants are folded.
- 'basic': Partition the graph. If inference fails in one partition, other partitions will remain unaffected.
- 'recursive': Parition the graph recursively. If inference fails in a partition, the partition will be further partitioned.
Defaults to None.
fold_shapes (bool):
Whether to fold `Shape` nodes in the graph.
This requires shapes to be inferred in the graph, and can only fold
static shapes.
Defaults to True.
copy (bool):
Whether to create a copy of the model first.
Defaults to False.
error_ok (bool):
Whether to suppress errors during constant folding.
If this is set to `False`, errors will be re-raised.
Defaults to True.
"""
super().__init__(model, copy)
self.num_passes = num_passes
self.do_shape_inference = util.default(do_shape_inference, True)
self.partitioning = partitioning
self.fold_shapes = util.default(fold_shapes, True)
self.error_ok = util.default(error_ok, True)
def call_impl(self):
"""
Returns:
onnx.ModelProto: The new ONNX model with constants folded.
"""
def run_const_fold_pass(model):
graph = gs_from_onnx(model)
del model
try:
graph.fold_constants(fold_shapes=self.fold_shapes, partitioning=self.partitioning)
except TypeError as err: # Using an old version of ONNX-GS
if self.partitioning:
G_LOGGER.critical(
"This version of ONNX-GraphSurgeon may not support partitioning the graph.\n"
"Please upgrade to a newer version of ONNX-GraphSurgeon or disable partitioning.\n"
"Note: Error was:\n{:}".format(err)
)
if self.fold_shapes:
G_LOGGER.critical(
"This version of ONNX-GraphSurgeon may not support folding shapes.\n"
"Please upgrade to a newer version of ONNX-GraphSurgeon or disable shape folding.\n"
"Note: Error was:\n{:}".format(err)
)
graph.fold_constants()
model = gs.export_onnx(graph.cleanup(), do_type_check=False)
del graph
if self.fold_shapes and self.do_shape_inference:
model = infer_shapes(model)
return model
if not mod.has_mod(onnxrt):
G_LOGGER.error(
"ONNX-Runtime is not installed, so constant folding may be suboptimal or not work at all.\n"
"Consider installing ONNX-Runtime: {:} -m pip install onnxruntime".format(sys.executable)
)
model = self.load()
prefold_num_nodes = len(model.graph.node)
postfold_num_nodes = -1
index = 0
while (prefold_num_nodes != postfold_num_nodes) and (self.num_passes is None or index < self.num_passes):
prefold_num_nodes = onnx_util.get_num_nodes(model)
G_LOGGER.start("Folding Constants | Pass {:}".format(index + 1))
try:
model = run_const_fold_pass(model)
except Exception as err:
if not self.error_ok:
raise
G_LOGGER.warning(
"Constant folding pass failed. Skipping subsequent passes.\nNote: Error was:\n{:}".format(err)
)
break
else:
postfold_num_nodes = onnx_util.get_num_nodes(model)
index += 1
G_LOGGER.finish(
"\tTotal Nodes | Original: {:5}, After Folding: {:5} | {:5} Nodes Folded".format(
prefold_num_nodes, postfold_num_nodes, prefold_num_nodes - postfold_num_nodes
)
)
return model
@mod.export(funcify=True)
class InferShapes(BaseLoader):
"""
Functor that runs shape inference on an ONNX model.
"""
def __init__(self, model, error_ok=None, external_data_dir=None, save_to_disk_threshold_bytes=None):
"""
Run shape inference on an ONNX model.
Args:
model (Union[onnx.ModelProto, Callable() -> onnx.ModelProto]):
An ONNX model or a callable that returns one, or a path to a model.
Supports models larger than the 2 GiB protobuf limit.
error_ok (bool):
Whether errors during shape inference should be suppressed. Defaults to True.
external_data_dir (str):
The directory where external data for the model is stored.
Only used if the model is provided via a path rather than a loader.
save_to_disk_threshold_bytes (int):
The size in bytes above which a ModelProto will be serialized to the disk
before running shape inference.
This can be used to work around the 2 GiB protobuf limitation.
Defaults to ~2 GiB.
"""
self._model = model
self.error_ok = util.default(error_ok, True)
self.external_data_dir = external_data_dir
# Subtract a little so we're below the real threshold
self.save_to_disk_threshold_bytes = util.default(save_to_disk_threshold_bytes, (2 << 30) - 8192)
def call_impl(self):
"""
Returns:
onnx.ModelProto: The new ONNX model with shapes inferred.
"""
model, _ = util.invoke_if_callable(self._model)
external_data_dir = self.external_data_dir
try:
if isinstance(model, onnx.ModelProto):
MODEL_SIZE = model.ByteSize()
if MODEL_SIZE > LARGE_MODEL_THRESHOLD:
G_LOGGER.warning(
"Attempting to run shape inference on a large model. "
"This may require a large amount of memory.\nIf memory consumption becomes too high, "
"the process may be killed. You may want to try disabling shape inference in that case. ",
mode=LogMode.ONCE,
)
if MODEL_SIZE > self.save_to_disk_threshold_bytes:
G_LOGGER.warning(
"Model size ({:.3} MiB) exceeds the in-memory size threshold: {:.3} MiB.\n"
"The model will be saved to a temporary file before shape inference is run.".format(
MODEL_SIZE / (1024.0 ** 2), self.save_to_disk_threshold_bytes / (1024.0 ** 2)
),
mode=LogMode.ONCE,
)
outdir = tempfile.TemporaryDirectory()
outpath = os.path.join(outdir.name, "tmp_model.onnx")
save_onnx(model, outpath, external_data_path="ext.data")
model = outpath
external_data_dir = outdir.name
G_LOGGER.verbose("Starting ONNX shape inference")
if isinstance(model, onnx.ModelProto):
model = shape_inference.infer_shapes(model)
else:
tmp_path = util.NamedTemporaryFile(prefix="tmp_polygraphy_", suffix=".onnx").name
G_LOGGER.verbose("Writing shape-inferred model to: {:}".format(tmp_path))
shape_inference.infer_shapes_path(model, tmp_path)
# When external_data_dir is unset, use the model's current directory
model = onnx_from_path(
tmp_path, external_data_dir=util.default(external_data_dir, os.path.dirname(model) or None)
)
G_LOGGER.verbose("ONNX Shape Inference completed successfully")
except Exception as err:
if not self.error_ok:
raise
G_LOGGER.warning("ONNX shape inference exited with an error:\n{:}".format(err))
G_LOGGER.internal_error("ONNX shape inference exited with an error:\n{:}".format(err))
if not isinstance(model, onnx.ModelProto):
model = onnx_from_path(model, external_data_dir=self.external_data_dir)
return model
@mod.export(funcify=True)
class ExtractSubgraph(BaseLoader):
"""
Functor that extracts a subgraph from an ONNX model.
"""
def __init__(self, model, input_metadata=None, output_metadata=None, check_meta=None):
"""
Extracts a subgraph from an ONNX model.
Args:
model (Union[Union[onnx.ModelProto, onnx_graphsurgeon.Graph], Callable() -> Union[onnx.ModelProto, onnx_graphsurgeon.Graph]]):
An ONNX model or ONNX-GraphSurgeon Graph or a callable that returns one.
input_metadata (TensorMetadata):
Metadata for the inputs of the subgraph.
Name, shape, and data type are required.
If not provided, the graph outputs are not modified.
output_metadata (TensorMetadata):
Metadata for the outputs of the subgraph.
Name and data type are required.
If not provided, the graph outputs are not modified.
check_meta (bool):
Whether to check that the provided input and output metadata include
all the expected fields.
Defaults to True.
"""
self._model = model
self.input_metadata = input_metadata
self.output_metadata = output_metadata
self.check_meta = util.default(check_meta, True)
def call_impl(self):
"""
Returns:
Union[onnx.ModelProto, onnx_graphsurgeon.Graph]:
The new ONNX model or ONNX-GraphSurgeon Graph.
"""
with _GSGraphManager(self._model) as manager:
graph = manager.graph
TENSOR_MAP = graph.tensors()
def get_tensor(name):
if name not in TENSOR_MAP:
G_LOGGER.critical("Tensor: {:} does not exist in the model.".format(name))
return TENSOR_MAP[name]
def update_tensor(name, dtype, shape):
tensor = get_tensor(name)
# No need to update constants
if isinstance(tensor, gs.Variable):
tensor.dtype, tensor.shape = dtype or tensor.dtype, shape or tensor.shape
return tensor
def check_meta(name, dtype, shape, meta_type, needs_shape=True):
if not self.check_meta:
return
if needs_shape and shape is None:
G_LOGGER.warning(
"{:} metadata should include shape, but no shape was "
"provided for tensor: {:}".format(meta_type, name)
)
if dtype is None:
G_LOGGER.warning(
"{:} metadata should include data type, but no data type was "
"provided for tensor: {:}".format(meta_type, name)
)
if self.input_metadata is not None:
graph.inputs.clear()
for name, (dtype, shape) in self.input_metadata.items():
tensor = update_tensor(name, dtype, shape)
check_meta(name, tensor.dtype, tensor.shape, "Input")
tensor.inputs.clear()
graph.inputs.append(tensor)
if self.output_metadata is not None:
graph.outputs.clear()
for name, (dtype, shape) in self.output_metadata.items():
tensor = update_tensor(name, dtype, shape)
check_meta(name, tensor.dtype, tensor.shape, "Output", needs_shape=False)
graph.outputs.append(tensor)
graph.cleanup().toposort()
return manager.retval
@mod.export(funcify=True)
class SaveOnnx(BaseLoader):
"""
Functor that saves an ONNX model to the specified path.
"""
def __init__(self, model, path, external_data_path=None, size_threshold=None, all_tensors_to_one_file=None):
"""
Saves an ONNX model to the specified path.
Args:
model (Union[onnx.ModelProto, Callable() -> onnx.ModelProto]):
An ONNX model or a callable that returns one.
path (str): Path at which to write the ONNX model.
external_data_path (str):
Path to save external data.
This is always a relative path; external data is always written to the same
directory as the model.
Set to an empty string to use the default path.
Set to None to disable.
Defaults to None.
size_threshold (int):
Tensor size threshold, in bytes, above which tensor data will be
stored in the external file.
Tensors smaller that this threshold will remain in the ONNX file.
Has no effect if external_data_path is not set.
Defaults to 1024.
all_tensors_to_one_file (bool):
Whether to write all tensors to one file when saving external data.
Has no effect if external_data_path is not set.
Defaults to True.
"""
self._model = model
self.path = path
self.external_data_path = external_data_path
self.size_threshold = size_threshold
self.all_tensors_to_one_file = all_tensors_to_one_file
def call_impl(self):
"""
Returns:
onnx.ModelProto: The model, after saving it.
"""
model, _ = util.invoke_if_callable(self._model)
G_LOGGER.info("Saving ONNX model to: {:}".format(self.path))
if self.external_data_path is not None:
G_LOGGER.verbose("Saving external data for ONNX model to: {:}".format(self.external_data_path))
try:
external_data_helper.convert_model_to_external_data(
model,
location=self.external_data_path,
all_tensors_to_one_file=util.default(self.all_tensors_to_one_file, True),
size_threshold=util.default(self.size_threshold, 1024),
)
except TypeError:
if self.size_threshold is not None:
G_LOGGER.warning(
"This version of onnx does not support size_threshold in convert_model_to_external_data"
)
external_data_helper.convert_model_to_external_data(
model,
location=self.external_data_path,
all_tensors_to_one_file=util.default(self.all_tensors_to_one_file, True),
)
else:
if self.size_threshold is not None:
G_LOGGER.warning(
"size_threshold is set, but external data path has not been set. "
"No external data will be written."
)
if self.all_tensors_to_one_file is not None:
G_LOGGER.warning(
"all_tensors_to_one_file is set, but external data path has not been set. "
"No external data will be written."
)
util.makedirs(self.path)
onnx.save(model, self.path)
return model
@mod.export(funcify=True)
class BytesFromOnnx(BaseLoader):
"""
Functor that serializes an ONNX model.
"""
def __init__(self, model):
"""
Serializes an ONNX model.
Args:
model (Union[onnx.ModelProto, Callable() -> onnx.ModelProto]):
An ONNX model or a callable that returns one.
"""
self._model = model
def call_impl(self):
"""
Returns:
bytes: The serialized model.
"""
model, _ = util.invoke_if_callable(self._model)
return model.SerializeToString()
| TensorRT-master | tools/Polygraphy/polygraphy/backend/onnx/loader.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import contextlib
import copy
import time
from collections import OrderedDict
from polygraphy import cuda, mod, util
from polygraphy.backend.base import BaseRunner
from polygraphy.backend.trt import util as trt_util
from polygraphy.logger import G_LOGGER
np = mod.lazy_import("numpy")
trt = mod.lazy_import("tensorrt")
@mod.export()
class TrtRunner(BaseRunner):
"""
Runs inference using TensorRT.
Note that runners are not designed for production deployment and should generally
be used only for prototyping, testing, and debugging.
"""
def __init__(self, engine, name=None):
"""
Args:
engine (Union[Union[trt.ICudaEngine, trt.IExecutionContext], Callable() -> Union[trt.ICudaEngine, trt.IExecutionContext]]):
A TensorRT engine or execution context or a callable that returns one.
If an engine is provided, the runner will create a context automatically.
name (str):
The human-readable name prefix to use for this runner.
A runner count and timestamp will be appended to this prefix.
"""
super().__init__(name=name, prefix="trt-runner")
self._engine_or_context = engine
def activate_impl(self):
def make_buffers(engine):
"""
Creates empty host and device buffers for the specified engine.
Always uses binding names from Profile 0.
"""
device_buffers = OrderedDict()
host_output_buffers = OrderedDict()
for idx in range(trt_util.get_bindings_per_profile(engine)):
binding = engine[idx]
dtype = trt_util.np_dtype_from_trt(engine.get_binding_dtype(binding))
device_buffers[binding] = cuda.DeviceArray(dtype=dtype)
if not engine.binding_is_input(binding):
host_output_buffers[binding] = np.empty(shape=tuple(), dtype=dtype)
G_LOGGER.extra_verbose("Created device buffers: {:}".format(device_buffers))
return device_buffers, host_output_buffers
engine_or_context, owning = util.invoke_if_callable(self._engine_or_context)
if isinstance(engine_or_context, trt.ICudaEngine):
self.engine = engine_or_context
self.owns_engine = owning
self.context = self.engine.create_execution_context()
self.owns_context = True
if not self.context:
G_LOGGER.critical("Invalid Context. See error log for details.")
elif isinstance(engine_or_context, trt.IExecutionContext):
self.engine = None
self.owns_engine = False
self.context = engine_or_context
self.owns_context = owning
else:
G_LOGGER.critical(
"Invalid Engine or Context. Please ensure the engine was built correctly. See error log for details."
)
if not owning:
G_LOGGER.verbose(
"Object was provided directly instead of via a Callable. This runner will not assume ownership. "
"Please ensure it is freed."
)
self.device_buffers, self.host_output_buffers = make_buffers(self.context.engine)
self.stream = cuda.Stream()
def set_profile(self, index):
"""
Sets the active optimization profile for this runner.
The runner must already be active (see ``__enter__()`` or ``activate()``).
This only applies if your engine was built with multiple
optimization profiles.
In TensorRT 8.0 and newer, the profile will be set asynchronously
using this runner's CUDA stream (``runner.stream``).
By default, the runner uses the first profile (profile 0).
Args:
index (int):
The index of the optimization profile to use.
"""
if not self.is_active:
G_LOGGER.critical("{:35} | Must be activated prior to calling set_profile()".format(self.name))
try:
self.context.set_optimization_profile_async
except AttributeError:
self.context.active_optimization_profile = index
else:
self.context.set_optimization_profile_async(index, self.stream.ptr)
def get_input_metadata_impl(self):
start_binding, end_binding = trt_util.get_active_profile_bindings(self.context)
# This function always uses binding names of the 0th profile.
return trt_util.get_input_metadata_from_engine(self.context.engine, start_binding, end_binding)
def _set_shapes_from_feed_dict(self, feed_dict):
"""
Sets context shapes according to the provided feed_dict.
Note that ``infer()`` will call this function automatically, and hence
you should only use it if you plan to use this runner's context manually.
Args:
feed_dict (OrderedDict[str, numpy.ndarray]):
A mapping of input tensor names to corresponding input NumPy arrays.
Returns:
Tuple[int, int]: The start and end binding indices of the modified bindings.
"""
def is_dynamic_shape_input(binding):
try:
self.context.engine.get_profile_shape_input(0, binding)
return True
except RuntimeError:
return False
start_binding, end_binding = trt_util.get_active_profile_bindings(self.context)
for name, inp in feed_dict.items():
binding = start_binding + self.context.engine[name]
# Only set shapes if required.
# get_shape/get_binding_shape will return what a shape input/data input is currently set to.
if is_dynamic_shape_input(binding): # For input shape tensors
if isinstance(inp, cuda.DeviceView):
G_LOGGER.critical(
"A DeviceView was provided for input: {:}, but since this is a "
"shape tensor, it must reside in host memory. "
"Please use a NumPy array instead. ".format(name)
)
if tuple(self.context.get_shape(binding)) != tuple(inp):
G_LOGGER.verbose("Setting shape binding: {:} (index: {:}) to: {:}".format(name, binding, inp))
self.context.set_shape_input(binding, inp)
elif util.is_shape_dynamic(self.context.engine.get_binding_shape(binding)):
shape = inp.shape
if tuple(self.context.get_binding_shape(binding)) != tuple(shape):
G_LOGGER.verbose("Setting binding: {:} (index: {:}) to shape: {:}".format(name, binding, shape))
self.context.set_binding_shape(binding, shape)
if not self.context.all_binding_shapes_specified:
G_LOGGER.critical(
"Some input shapes were not specified.\n"
"Note: Network inputs are: {:}".format(self.get_input_metadata())
)
if not self.context.all_shape_inputs_specified:
G_LOGGER.critical(
"Some shape inputs were not specified.\n"
"Note: Network inputs are: {:}".format(self.get_input_metadata())
)
return start_binding, end_binding
def infer_impl(self, feed_dict, copy_outputs_to_host=True):
"""
Implementation for running inference with TensorRT.
Do not call this method directly - use ``infer()`` instead,
which will forward unrecognized arguments to this method.
In addition to accepting NumPy arrays in the feed_dict, this runner can also
accept Polygraphy DeviceViews. In that case, no host-to-device copy is necessary for the inputs.
Args:
feed_dict (OrderedDict[str, Union[numpy.ndarray, DeviceView]]):
A mapping of input tensor names to corresponding input NumPy arrays
or Polygraphy DeviceViews.
copy_outputs_to_host (bool):
Whether to copy inference outputs back to the host.
If this is False, Polygraphy DeviceViews are returned
instead of NumPy arrays.
Defaults to True.
"""
start = time.time()
start_binding, end_binding = self._set_shapes_from_feed_dict(feed_dict)
# Resize output device buffers - host buffers will be automatically resized by copy_to
for binding in range(start_binding, end_binding):
if not self.context.engine.binding_is_input(binding):
name = self.context.engine[binding - start_binding] # Use profile 0 binding names for all buffers.
shape = tuple(self.context.get_binding_shape(binding))
self.device_buffers[name].resize(shape)
# Use a shallow copy in case we need to replace our allocated buffers with provided DeviceViews.
dev_bufs = copy.copy(self.device_buffers)
for name, buffer in feed_dict.items():
if isinstance(buffer, cuda.DeviceView):
dev_bufs[name] = buffer
elif isinstance(buffer, np.ndarray):
dev_bufs[name].copy_from(buffer, self.stream)
else:
G_LOGGER.critical(
"For input: {:}, unrecognized type in feed_dict: {:}.\n"
"Please provide either a NumPy array or Polygraphy DeviceView. ".format(name, type(buffer).__name__)
)
# Need to offset bindings in case the active profile is not 0.
bindings = [0] * start_binding + [buf.ptr for buf in dev_bufs.values()]
success = self.context.execute_async_v2(bindings=bindings, stream_handle=self.stream.ptr)
if not success:
G_LOGGER.critical("Model execution failed. Please see the log messages above for details")
output_buffers = OrderedDict()
for name, buffer in self.host_output_buffers.items():
if copy_outputs_to_host:
self.host_output_buffers[name] = dev_bufs[name].copy_to(buffer, self.stream)
output_buffers[name] = self.host_output_buffers[name]
else:
output_buffers[name] = dev_bufs[name].view()
self.stream.synchronize()
end = time.time()
self.inference_time = end - start
return output_buffers
def deactivate_impl(self):
with contextlib.ExitStack() as stack:
if self.owns_engine:
stack.enter_context(self.engine)
if self.owns_context:
stack.enter_context(self.context)
[buf.free() for buf in self.device_buffers.values()]
self.stream.free()
del (
self.engine,
self.owns_engine,
self.context,
self.owns_context,
self.device_buffers,
self.host_output_buffers,
self.stream,
)
# Note: This can be removed once TRT 6 support is dropped.
def infer(self, feed_dict, check_inputs=None, *args, **kwargs):
# Disable checks by default on TRT 6.0 due to implicit batch semantics.
if mod.version(trt.__version__) < mod.version("7.0"):
return super().infer(feed_dict, util.default(check_inputs, False), *args, **kwargs)
return super().infer(feed_dict, util.default(check_inputs, True), *args, **kwargs)
| TensorRT-master | tools/Polygraphy/polygraphy/backend/trt/runner.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import contextlib
from polygraphy import config, mod, util
from polygraphy.common import TensorMetadata
from polygraphy.logger import G_LOGGER, LogMode
trt = mod.lazy_import("tensorrt")
np = mod.lazy_import("numpy")
TRT_LOGGER = None
@mod.export()
def get_trt_logger():
"""
Get the global TensorRT logger created by Polygraphy.
Returns:
trt.Logger: The TensorRT logger.
"""
global TRT_LOGGER
if TRT_LOGGER is None:
TRT_LOGGER = trt.Logger()
return TRT_LOGGER
def fail_unavailable(what):
G_LOGGER.backtrace()
G_LOGGER.critical("{:} is not available on TensorRT version {:}.".format(what, trt.__version__))
def check_onnx_parser_errors(parser, success):
if parser.num_errors > 0:
for index in range(parser.num_errors):
G_LOGGER.error(parser.get_error(index))
G_LOGGER.critical("Could not parse ONNX correctly")
if not success:
G_LOGGER.critical("Failed to parse ONNX model. Does the model file exist and contain a valid ONNX model?")
def get_layer_class_mapping():
layer_class_mapping = {}
def try_add(layer_type, layer_cls):
try:
layer_type = getattr(trt.LayerType, layer_type)
layer_cls = getattr(trt, layer_cls)
except AttributeError:
if config.INTERNAL_CORRECTNESS_CHECKS:
G_LOGGER.warning(
"Could not find layer type: {:} or layer class: {:}".format(layer_type, layer_cls)
)
else:
layer_class_mapping[layer_type] = layer_cls
try_add("CONVOLUTION", "IConvolutionLayer")
try_add("FULLY_CONNECTED", "IFullyConnectedLayer")
try_add("ACTIVATION", "IActivationLayer")
try_add("POOLING", "IPoolingLayer")
try_add("LRN", "ILRNLayer")
try_add("SCALE", "IScaleLayer")
try_add("SOFTMAX", "ISoftMaxLayer")
try_add("DECONVOLUTION", "IDeconvolutionLayer")
try_add("CONCATENATION", "IConcatenationLayer")
try_add("ELEMENTWISE", "IElementWiseLayer")
try_add("PLUGIN", "IPluginLayer")
try_add("UNARY", "IUnaryLayer")
try_add("PADDING", "IPaddingLayer")
try_add("SHUFFLE", "IShuffleLayer")
try_add("REDUCE", "IReduceLayer")
try_add("TOPK", "ITopKLayer")
try_add("GATHER", "IGatherLayer")
try_add("MATRIX_MULTIPLY", "IMatrixMultiplyLayer")
try_add("RAGGED_SOFTMAX", "IRaggedSoftMaxLayer")
try_add("CONSTANT", "IConstantLayer")
try_add("RNN", "IRNNLayer")
try_add("RNN_V2", "IRNNv2Layer")
try_add("IDENTITY", "IIdentityLayer")
try_add("PLUGIN_V2", "IPluginV2Layer")
try_add("SLICE", "ISliceLayer")
try_add("SHAPE", "IShapeLayer")
try_add("PARAMETRIC_RELU", "IParametricReLULayer")
try_add("RESIZE", "IResizeLayer")
try_add("TRIP_LIMIT", "ITripLimitLayer")
try_add("RECURRENCE", "IRecurrenceLayer")
try_add("ITERATOR", "IIteratorLayer")
try_add("LOOP_OUTPUT", "ILoopOutputLayer")
try_add("SELECT", "ISelectLayer")
try_add("FILL", "IFillLayer")
try_add("QUANTIZE", "IQuantizeLayer")
try_add("DEQUANTIZE", "IDequantizeLayer")
try_add("CONDITION", "IConditionLayer")
try_add("CONDITIONAL_INPUT", "IIfConditionalInputLayer")
try_add("CONDITIONAL_OUTPUT", "IIfConditionalOutputLayer")
try_add("ASSERTION", "IAssertionLayer")
try_add("SCATTER", "IScatterLayer")
try_add("EINSUM", "IEinsumLayer")
return layer_class_mapping
def np_dtype_from_trt(trt_dtype):
_ = mod.has_mod(np) # Force numpy to be imported
return np.dtype(trt.nptype(trt_dtype))
def get_network_input_metadata(network):
inputs = TensorMetadata()
for i in range(network.num_inputs):
tensor = network.get_input(i)
inputs.add(name=tensor.name, dtype=np_dtype_from_trt(tensor.dtype), shape=tensor.shape)
return inputs
def get_network_output_metadata(network):
outputs = TensorMetadata()
for i in range(network.num_outputs):
tensor = network.get_output(i)
outputs.add(name=tensor.name, dtype=np_dtype_from_trt(tensor.dtype), shape=tensor.shape)
return outputs
def get_layer_input_metadata(layer):
meta = TensorMetadata()
for i in range(layer.num_inputs):
inp = layer.get_input(i)
if inp:
meta.add(inp.name, np_dtype_from_trt(inp.dtype), inp.shape)
return meta
def get_layer_output_metadata(layer):
meta = TensorMetadata()
for i in range(layer.num_outputs):
outp = layer.get_output(i)
if outp:
meta.add(outp.name, np_dtype_from_trt(outp.dtype), outp.shape)
return meta
def str_from_layer(layer, index):
input_info = get_layer_input_metadata(layer)
output_info = get_layer_output_metadata(layer)
return util.str_from_layer("Layer", index, layer.name, layer.type, input_info, output_info)
def get_layer_attribute_names(layer):
def is_special_attribute(attr):
return attr.startswith("__") and attr.endswith("__")
def is_valid_attribute(attr, layer):
if (
type(layer) == trt.IPoolingLayer
or type(layer) == trt.IConvolutionLayer
or type(layer) == trt.IDeconvolutionLayer
):
if len(layer.get_input(0).shape) > 4:
# 3D pooling uses padding_nd
return attr not in ["padding", "stride", "window_size"]
if type(layer) == trt.IResizeLayer:
if layer.num_inputs > 1:
return attr not in ["scales"]
if type(layer) == trt.ISliceLayer:
if layer.num_inputs > 1:
return attr not in ["shape", "start", "stride"]
return True
return [
attr
for attr in dir(layer)
if not is_special_attribute(attr) and not hasattr(trt.ILayer, attr) and is_valid_attribute(attr, layer)
]
def str_from_network(network, mode="full"):
"""
Converts a TensorRT network to a human-readable representation
Args:
network (trt.INetworkDefinition): The network.
mode (str): Controls what is displayed for each layer. Choices: ["none", "basic", "attrs", "full"]
Returns:
str
"""
LAYER_TYPE_CLASS_MAPPING = get_layer_class_mapping()
network_str = "Name: {:} | {:} Batch Network{:}\n".format(
network.name,
"Implicit"
if hasattr(network, "has_implicit_batch_dimension") and network.has_implicit_batch_dimension
else "Explicit",
" with Explicit Precision "
if hasattr(network, "has_explicit_precision") and network.has_explicit_precision
else "",
)
network_str += "\n"
input_metadata = get_network_input_metadata(network)
network_str += "---- {:} Network Input(s) ----\n{:}\n\n".format(len(input_metadata), input_metadata)
output_metadata = get_network_output_metadata(network)
network_str += "---- {:} Network Output(s) ----\n{:}\n\n".format(len(output_metadata), output_metadata)
network_str += "---- {:} Layer(s) ----\n".format(network.num_layers)
if mode != "none":
for index, layer in enumerate(network):
if layer.type in LAYER_TYPE_CLASS_MAPPING:
layer.__class__ = LAYER_TYPE_CLASS_MAPPING[layer.type]
network_str += str_from_layer(layer, index)
if mode in ["attrs", "full"]:
# Exclude special attributes, as well as any attributes of the base layer class (those can be displayed above).
attrs = get_layer_attribute_names(layer)
if attrs:
network_str += util.indent_block("---- Attributes ----") + "\n"
for attr in attrs:
with G_LOGGER.verbosity():
val = getattr(layer, attr)
if mode == "full" or not isinstance(val, np.ndarray):
attr_str = ""
if layer.name:
attr_str += "{:}.".format(layer.name)
network_str += util.indent_block("{:}{:} = {:}".format(attr_str, attr, val)) + "\n"
network_str += "\n"
return util.indent_block(network_str, level=0)
def _get_network_outputs(network):
return [network.get_output(index).name for index in range(network.num_outputs)]
def check_outputs_not_found(not_found, available_outputs):
if not_found:
available_outputs = util.unique_list(available_outputs)
G_LOGGER.critical(
"The following outputs were not found: {:}.\n"
"Note: Available tensors:\n\t{:}".format(not_found, "\n\t".join(available_outputs))
)
def mark_outputs(network, outputs):
"""
Mark the specified outputs as network outputs.
Args:
network (trt.INetworkDefinition): The network in which to mark outputs.
outputs (Sequence[str]): The names of tensors to mark as outputs.
"""
outputs = set(outputs)
all_outputs = []
for layer in network:
for index in range(layer.num_outputs):
tensor = layer.get_output(index)
all_outputs.append(tensor.name)
# Clear all old outputs
if tensor.is_network_output:
network.unmark_output(tensor)
if tensor.name in outputs:
if not tensor.is_network_output:
G_LOGGER.ultra_verbose("Marking {:} as an output".format(tensor.name))
network.mark_output(tensor)
marked_outputs = set(_get_network_outputs(network))
not_found = outputs - marked_outputs
check_outputs_not_found(not_found, all_outputs)
def mark_layerwise(network):
# Layers within loops cannot be marked as network outputs.
LOOP_START_NAMES = ["TRIP_LIMIT", "ITERATOR", "RECURRENCE"]
LOOP_END_NAMES = ["LOOP_OUTPUT"]
LOOP_START_LAYERS = [getattr(trt.LayerType, attr) for attr in LOOP_START_NAMES if hasattr(trt.LayerType, attr)]
LOOP_END_LAYERS = [getattr(trt.LayerType, attr) for attr in LOOP_END_NAMES if hasattr(trt.LayerType, attr)]
EXCLUDE_LAYERS = [trt.LayerType.SHAPE, trt.LayerType.CONSTANT]
outputs = []
in_loop = False
for layer in network:
if layer.type in LOOP_START_LAYERS:
G_LOGGER.warning(
"Loop detected. Please ensure the network is topologically sorted so that layers within "
"the loop body are not marked as network outputs in layerwise mode",
mode=LogMode.ONCE,
)
in_loop = True
elif layer.type in LOOP_END_LAYERS:
in_loop = False
should_mark_layer = not in_loop and layer.type not in EXCLUDE_LAYERS
if should_mark_layer:
for index in range(layer.num_outputs):
tensor = layer.get_output(index)
outputs.append(tensor.name)
G_LOGGER.verbose("Marking {:} tensors as outputs".format(len(outputs)))
mark_outputs(network, outputs)
def unmark_outputs(network, outputs):
outputs = set(outputs)
unmarked_outputs = set()
for layer in network:
for index in range(layer.num_outputs):
tensor = layer.get_output(index)
if tensor.is_network_output and tensor.name in outputs:
network.unmark_output(tensor)
unmarked_outputs.add(tensor.name)
not_found = outputs - unmarked_outputs
check_outputs_not_found(not_found, _get_network_outputs(network))
def str_from_config(config):
config_str = "{:20} | {:} bytes ({:.2f} MiB)\n".format(
"Workspace", config.max_workspace_size, config.max_workspace_size / (1024.0 ** 2)
)
config_str += "{:20} | ".format("Precision")
with contextlib.suppress(AttributeError):
config_str += "TF32: {:}, ".format(config.get_flag(trt.BuilderFlag.TF32))
config_str += "FP16: {:}, INT8: {:}, Strict Types: {:}\n".format(
config.get_flag(trt.BuilderFlag.FP16),
config.get_flag(trt.BuilderFlag.INT8),
config.get_flag(trt.BuilderFlag.STRICT_TYPES),
)
with contextlib.suppress(AttributeError):
source_vals = [
val.name for val in trt.TacticSource.__members__.values() if (1 << int(val)) & config.get_tactic_sources()
]
config_str += "{:20} | {:}\n".format("Tactic Sources", source_vals)
with contextlib.suppress(AttributeError):
config_str += "{:20} | {:}\n".format("Safety Restricted", config.get_flag(trt.BuilderFlag.SAFETY_SCOPE))
if config.int8_calibrator:
config_str += "{:20} | {:}\n".format("Calibrator", config.int8_calibrator)
config_str += "{:20} | {:} profile(s)".format("Profiles", config.num_optimization_profiles)
return config_str
def check_profile(profile):
if not bool(profile):
G_LOGGER.critical("Profile is not valid, please provide profile data.\nNote: profile was: {:}".format(profile))
return profile
def str_from_tensor(tensor, is_shape_tensor):
ret = "Input "
if is_shape_tensor:
ret += "shape-tensor"
else:
ret += "tensor"
ret += ": {:} (dtype={:}, shape={:})".format(tensor.name, tensor.dtype, tensor.shape)
return ret
def get_input_metadata_from_profile(profile, network):
"""
Returns metadata about the inputs based on the OPT values set in a profile.
Args:
profile (trt.IOptimizationProfile):
The profile from which to retrieve input metada.
network (trt.INetworkDefinition):
The network the profile applies to.
Returns:
TensorMetadata:
A mapping of input names to their types and shapes.
Shapes are retrieved from the OPT values in the profile.
"""
input_metadata = TensorMetadata()
for index in range(network.num_inputs):
tensor = network.get_input(index)
if tensor.is_shape_tensor:
shapes = profile.get_shape_input(tensor.name)
else:
shapes = profile.get_shape(tensor.name)
if tuple(shapes[0]) != tuple(shapes[2]):
G_LOGGER.warning(
"Will use `opt` shapes from profile 0 for calibration. "
"Note that even though `min` != `max` in this profile, calibration "
"will use fixed input shapes (this is not necessarily an issue)."
)
# Always use opt shape
input_metadata.add(name=tensor.name, dtype=np_dtype_from_trt(tensor.dtype), shape=shapes[1])
return input_metadata
def add_binding_to_metadata(engine, binding, metadata, name_binding):
# name_binding always comes from profile 0, since that's where we
# get all binding names in the runner
metadata.add(
name=engine[name_binding],
dtype=np_dtype_from_trt(engine.get_binding_dtype(binding)),
shape=list(engine.get_binding_shape(binding)),
)
def get_input_metadata_from_engine(engine, start_binding, end_binding):
inputs = TensorMetadata()
for index, binding in enumerate(range(start_binding, end_binding)):
if engine.binding_is_input(binding):
add_binding_to_metadata(engine, binding, inputs, name_binding=index)
return inputs
def get_output_metadata_from_engine(engine, start_binding, end_binding):
outputs = TensorMetadata()
for index, binding in enumerate(range(start_binding, end_binding)):
if not engine.binding_is_input(binding):
add_binding_to_metadata(engine, binding, outputs, name_binding=index)
return outputs
def str_from_engine(engine):
bindings_per_profile = get_bindings_per_profile(engine)
engine_str = "Name: {:} | {:}{:} Batch Engine ({:} layers)\n".format(
engine.name,
"Refittable " if engine.refittable else "",
"Implicit"
if hasattr(engine, "has_implicit_batch_dimension") and engine.has_implicit_batch_dimension
else "Explicit",
engine.num_layers,
)
engine_str += "\n"
# Show metadata for the first profile (i.e. the dynamic shapes)
input_metadata = get_input_metadata_from_engine(engine, 0, bindings_per_profile)
engine_str += "---- {:} Engine Input(s) ----\n{:}\n\n".format(len(input_metadata), input_metadata)
output_metadata = get_output_metadata_from_engine(engine, 0, bindings_per_profile)
engine_str += "---- {:} Engine Output(s) ----\n{:}\n\n".format(len(output_metadata), output_metadata)
engine_str += "---- Memory ----\nDevice Memory: {:} bytes\n\n".format(engine.device_memory_size)
engine_str += "---- {:} Profile(s) ({:} Binding(s) Each) ----\n".format(
engine.num_optimization_profiles, bindings_per_profile
)
for profile_index in range(engine.num_optimization_profiles):
engine_str += "- Profile: {:}\n".format(profile_index)
max_width = max([len(binding) for binding in engine]) + 8
for offset in range(bindings_per_profile):
binding = profile_index * bindings_per_profile + offset
name = "[Name: {:}]".format(engine.get_binding_name(binding))
engine_str += util.indent_block(
"Binding Index: {:} {:} {:<{max_width}}".format(
binding, "(Input) " if engine.binding_is_input(binding) else "(Output)", name, max_width=max_width
)
)
if engine.binding_is_input(binding):
if engine.is_shape_binding(binding):
min_shape, opt_shape, max_shape = engine.get_profile_shape_input(profile_index, binding)
else:
min_shape, opt_shape, max_shape = engine.get_profile_shape(profile_index, binding)
engine_str += " | Shapes: min={:}, opt={:}, max={:}\n".format(min_shape, opt_shape, max_shape)
else:
engine_str += " | Shape: {:}\n".format(engine.get_binding_shape(binding))
engine_str += "\n"
return util.indent_block(engine_str, level=0)
def get_bindings_per_profile(engine):
return engine.num_bindings // engine.num_optimization_profiles
def get_active_profile_bindings(context):
"""
Gets the start and end binding indices for the active optimization profile.
Args:
engine (trt.ICudaEngine): The engine in question.
context (trt.IExecutionContext): The context where the profile is currently set.
Returns:
Tuple[int, int]: The start and end bindings indices, in that order
"""
active_profile = context.active_optimization_profile
bindings_per_profile = get_bindings_per_profile(context.engine)
start_binding = bindings_per_profile * active_profile
end_binding = start_binding + bindings_per_profile
G_LOGGER.ultra_verbose(
"Total # of Profiles: {:}, Bindings Per Profile: {:}, Active Profile: {:}, "
"Start Binding: {:}, End Binding: {:}".format(
context.engine.num_optimization_profiles, bindings_per_profile, active_profile, start_binding, end_binding
)
)
return start_binding, end_binding
| TensorRT-master | tools/Polygraphy/polygraphy/backend/trt/util.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from polygraphy import constants, mod, util
from polygraphy.backend.trt import util as trt_util
from polygraphy.common.interface import TypedDict
from polygraphy.logger import G_LOGGER, LogMode
@mod.export()
class ShapeTuple(object):
"""
Represents a set of shapes for a single binding in a profile.
"""
def __init__(self, min, opt, max):
"""
Args:
min (Tuple[int]): The minimum shape that the profile will support.
opt (Tuple[int]): The shape for which TensorRT will optimize the engine.
max (Tuple[int]): The maximum shape that the profile will support.
"""
self.min = min
self.opt = opt
self.max = max
def __str__(self):
return "(min={:}, opt={:}, max={:})".format(self.min, self.opt, self.max)
def __repr__(self):
return type(self).__name__ + self.__str__()
def __iter__(self):
yield from [self.min, self.opt, self.max]
@mod.export()
class Profile(TypedDict(lambda: str, lambda: ShapeTuple)):
"""
An ordered dictionary that represents a single optimization profile that
can be used to build an engine.
More specifically, it is an ``OrderedDict[str, ShapeTuple]`` which maps binding
names to a set of min/opt/max shapes.
"""
def add(self, name, min, opt, max):
"""
A convenience function to add shapes for a single binding.
Args:
name (str): The name of the binding.
min (Tuple[int]): The minimum shape that the profile will support.
opt (Tuple[int]): The shape for which TensorRT will optimize the engine.
max (Tuple[int]): The maximum shape that the profile will support.
Returns:
Profile:
self, which allows this function to be easily chained to add multiple bindings,
e.g., Profile().add(...).add(...)
"""
self[name] = ShapeTuple(min, opt, max)
return self
def __getitem__(self, key):
"""
Retrieves the shapes registered for a given input name.
Returns:
ShapeTuple:
A named tuple including ``min``, ``opt``, and ``max`` members for the shapes
corresponding to the input.
"""
if key not in self:
G_LOGGER.critical("Binding: {:} does not have shapes set in this profile".format(key))
return super().__getitem__(key)
def fill_defaults(self, network, default_shape_value=None):
"""
Fill this profile with sane default values for any bindings whose
shapes have not been set explicitly.
Args:
network (trt.INetworkDefinition):
The TensorRT network this profile is meant for.
This will be used to determine model inputs and their shapes.
default_shape_value (int):
The value to use to override dynamic dimensions.
Returns:
Profile: Self
"""
default_shape_value = util.default(default_shape_value, constants.DEFAULT_SHAPE_VALUE)
for idx in range(network.num_inputs):
inp = network.get_input(idx)
if inp.name in self:
continue
with G_LOGGER.verbosity(G_LOGGER.CRITICAL): # WAR for spam from TRT
is_shape_tensor = inp.is_shape_tensor
if is_shape_tensor:
rank = inp.shape[0]
shape = (default_shape_value,) * rank
G_LOGGER.warning(
"{:} | No values provided; Will use input values: {:} for min/opt/max in profile.\n".format(
trt_util.str_from_tensor(inp, is_shape_tensor), shape, rank
),
mode=LogMode.ONCE,
)
G_LOGGER.warning(
"This will cause the shape-tensor to have static values. If this is incorrect, please "
"set the range of values for this input shape-tensor.",
mode=LogMode.ONCE,
)
else:
shape = util.override_dynamic_shape(inp.shape, default_shape_value)
if shape != inp.shape:
G_LOGGER.warning(
"{:} | No shapes provided; Will use shape: {:} for min/opt/max in profile.\n".format(
trt_util.str_from_tensor(inp, is_shape_tensor), shape
),
mode=LogMode.ONCE,
)
G_LOGGER.warning(
"This will cause the tensor to have a static shape. If this is incorrect, please "
"set the range of shapes for this input tensor.",
mode=LogMode.ONCE,
)
self.add(inp.name, shape, shape, shape)
return self
def to_trt(self, builder, network):
"""
Creates a TensorRT IOptimizationProfile based on the values set in this Profile.
Args:
builder (trt.Builder):
A TensorRT builder. This will be used to construct the IOptimizationProfile.
network (trt.INetworkDefinition):
The TensorRT network the profile applies to.
Returns:
trt.IOptimizationProfile: A TensorRT optimization profile.
"""
trt_profile = builder.create_optimization_profile()
unused_keys = set(self.keys())
available_inputs = set()
for idx in range(network.num_inputs):
inp = network.get_input(idx)
if inp.name in unused_keys:
unused_keys.remove(inp.name)
available_inputs.add(inp.name)
with G_LOGGER.verbosity(): # WAR for spam from TRT
is_shape_tensor = inp.is_shape_tensor
if is_shape_tensor:
if inp.name in self:
shapes = self[inp.name]
trt_profile.set_shape_input(inp.name, shapes.min, shapes.opt, shapes.max)
G_LOGGER.verbose(
"{:} | Setting input shape-tensor value range to: {:}".format(
trt_util.str_from_tensor(inp, is_shape_tensor), shapes
)
)
else:
G_LOGGER.warning(
"{:} | No values provided. "
"Assuming this is not a dynamic shape-tensor.".format(
trt_util.str_from_tensor(inp, is_shape_tensor)
),
mode=LogMode.ONCE,
)
else:
shapes = self[inp.name]
trt_profile.set_shape(inp.name, shapes.min, shapes.opt, shapes.max)
G_LOGGER.verbose(
"{:} | Setting input tensor shapes to: {:}".format(
trt_util.str_from_tensor(inp, is_shape_tensor), shapes
)
)
if unused_keys:
G_LOGGER.error(
"Invalid inputs were provided to the optimization profile: {:}\n"
"Note: Inputs available in the TensorRT network are: {:}".format(unused_keys, available_inputs)
)
return trt_util.check_profile(trt_profile)
def __repr__(self):
ret = "Profile()"
for name, (min, opt, max) in self.items():
ret += ".add({:}, min={:}, opt={:}, max={:})".format(name, min, opt, max)
return ret
def __str__(self):
elems = []
for name, (min, opt, max) in self.items():
elems.append("{:} [min={:}, opt={:}, max={:}]".format(name, min, opt, max))
sep = ",\n "
return "{" + sep.join(elems) + "}"
| TensorRT-master | tools/Polygraphy/polygraphy/backend/trt/profile.py |
from polygraphy.backend.trt.algorithm_selector import *
from polygraphy.backend.trt.calibrator import *
from polygraphy.backend.trt.loader import *
from polygraphy.backend.trt.profile import *
from polygraphy.backend.trt.runner import *
from polygraphy.backend.trt.util import *
def register_logger_callback():
from polygraphy.logger import G_LOGGER
def set_trt_logging_level(sev):
from polygraphy import mod
trt = mod.lazy_import("tensorrt")
if not mod.has_mod(trt):
return
if sev >= G_LOGGER.CRITICAL:
get_trt_logger().min_severity = trt.Logger.INTERNAL_ERROR
elif sev >= G_LOGGER.ERROR:
get_trt_logger().min_severity = trt.Logger.ERROR
elif sev >= G_LOGGER.INFO:
get_trt_logger().min_severity = trt.Logger.WARNING
elif sev >= G_LOGGER.VERBOSE:
get_trt_logger().min_severity = trt.Logger.INFO
else:
get_trt_logger().min_severity = trt.Logger.VERBOSE
G_LOGGER.register_callback(set_trt_logging_level) # Will be registered when this backend is imported.
register_logger_callback()
| TensorRT-master | tools/Polygraphy/polygraphy/backend/trt/__init__.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import contextlib
from collections import OrderedDict
from polygraphy import cuda, mod, util
from polygraphy.logger import G_LOGGER, LogMode
trt = mod.lazy_import("tensorrt")
np = mod.lazy_import("numpy")
@mod.export()
def Calibrator(
data_loader, cache=None, BaseClass=None, batch_size=None, quantile=None, regression_cutoff=None, algo=None
):
"""
Supplies calibration data to TensorRT to calibrate the network for INT8 inference.
Args:
data_loader (Generator -> OrderedDict[str, Union[numpy.ndarray, DeviceView, int]]):
A generator or iterable that yields a dictionary that maps input names to NumPy
arrays, Polygraphy DeviceViews, or GPU pointers.
In case you don't know details about the inputs ahead of time, you can access the
`input_metadata` property in your data loader, which will be set to an ``TensorMetadata`` instance.
Note that this does not work for generators or lists.
The number of calibration batches is controlled by the number of items supplied
by the data loader.
cache (Union[str, file-like]):
Path or file-like object to save/load the calibration cache.
By default, the calibration cache is not saved.
BaseClass (type):
The type of calibrator to inherit from.
Defaults to ``trt.IInt8MinMaxCalibrator``.
batch_size (int):
[DEPRECATED] The size of each batch provided by the data loader.
quantile (float):
The quantile to use for ``trt.IInt8LegacyCalibrator``.
Has no effect for other calibrator types.
Defaults to 0.5.
regression_cutoff (float):
The regression cutoff to use for ``trt.IInt8LegacyCalibrator``.
Has no effect for other calibrator types.
Defaults to 0.5.
algo (trt.CalibrationAlgoType):
Calibration algorithm to use for ``trt.IInt8Calibrator``.
Has no effect for other calibrator types.
Defaults to ``trt.CalibrationAlgoType.MINMAX_CALIBRATION``.
"""
BaseClass = util.default(BaseClass, trt.IInt8MinMaxCalibrator)
class CalibratorClass(BaseClass):
"""
Calibrator that supplies calibration data to TensorRT to calibrate the network for INT8 inference.
"""
def __init__(self):
# Must explicitly initialize parent for any trampoline class! Will mysteriously segfault without this.
BaseClass.__init__(self)
self.is_active = False
self.data_loader = data_loader
self._cache = cache
self.device_buffers = OrderedDict()
self.reset()
G_LOGGER.verbose("Created calibrator [cache={:}]".format(self._cache))
self.batch_size = util.default(batch_size, 1)
# The function that constructed this instance
self.make_func = Calibrator
def reset(self, input_metadata=None):
"""
Reset this calibrator for reuse.
The calibrator will clear any dynamic ranges cached from previous calibration runs, and will
attempt to rewind the data loader (note that generators cannot be rewound).
Args:
input_metadata (TensorMetadata):
Mapping of input names to their data types and shapes.
Passed along to the data loader if provided. Generally should not be required
unless using Polygraphy's included `DataLoader` for this calibrator.
"""
if input_metadata is not None:
with contextlib.suppress(AttributeError):
self.data_loader.input_metadata = input_metadata
# Attempt to reset data loader
self.data_loader_iter = iter(self.data_loader)
self.num_batches = 0
# Make sure calibrator will check the cache again when reset.
self.cache_contents = None
self.has_cached_scales = False
def get_batch_size(self):
return self.batch_size
def get_batch(self, names):
if not self.is_active:
G_LOGGER.error(
"Calibrator must be activated prior to use. Please use a context manager. "
"For example:\nwith calibrator:\n\t# Use calibrator here"
)
return None
try:
buffers = next(self.data_loader_iter)
except StopIteration:
if not self.num_batches:
G_LOGGER.error(
"Calibrator data loader provided no data.\nPossible reasons for this include:\n(1) data loader "
"has no data to provide\n(2) data loader was a generator, and the calibrator is being "
"used multiple times (generators cannot be rewound)"
)
return None
else:
self.num_batches += 1
if not util.check_dict_contains(buffers, names, dict_name="calibration data", log_func=G_LOGGER.error):
return None
ptrs = []
for name in names:
buf = buffers[name]
if isinstance(buf, cuda.DeviceView):
ptrs.append(buf.ptr)
elif isinstance(buf, np.ndarray):
if name not in self.device_buffers:
self.device_buffers[name] = cuda.DeviceArray(shape=buf.shape, dtype=buf.dtype)
G_LOGGER.verbose("Allocated: {:}".format(self.device_buffers[name]))
ptrs.append(self.device_buffers[name].copy_from(buf).ptr)
elif isinstance(buf, int):
ptrs.append(buf)
else:
G_LOGGER.error(
"Calibration data loader provided an unrecognized type: {:} for input: {:}.\n"
"Please provide either a NumPy array, Polygraphy DeviceView, or GPU pointer. ".format(
type(buf).__name__, name
)
)
return None
return ptrs
def read_calibration_cache(self):
def load_from_cache():
if self._cache is None or not util.get_file_size(self._cache):
return None
try:
return util.load_file(self._cache, description="calibration cache")
except Exception as err:
G_LOGGER.error(
"Could not read from calibration cache: {:}\nNote: Error was: {:}".format(self._cache, err)
)
return None
# Only attempt to read from the cache once.
if self.has_cached_scales:
return self.cache_contents
self.cache_contents = load_from_cache()
if not self.cache_contents:
if self.cache_contents is not None:
G_LOGGER.warning(
"Calibration cache was provided, but is empty. "
"Will regenerate scales by running calibration.",
mode=LogMode.ONCE,
)
self.cache_contents = None
else:
self.has_cached_scales = True
return self.cache_contents
def write_calibration_cache(self, cache):
self.cache_contents = cache.tobytes()
self.has_cached_scales = True
if self._cache is None:
return
try:
util.save_file(contents=self.cache_contents, dest=self._cache, description="calibration cache")
except Exception as err:
G_LOGGER.error(
"Could not write to calibration cache: {:}.\nNote: Error was: {:}".format(self._cache, err)
)
def __enter__(self):
self.is_active = True
return self
def __exit__(self, exc_type, exc_value, traceback):
self.is_active = False
for device_buffer in self.device_buffers.values():
device_buffer.free()
# IInt8LegacyCalibrator methods
def get_quantile(self):
return util.default(quantile, 0.5)
def get_regression_cutoff(self):
return util.default(regression_cutoff, 0.5)
def read_histogram_cache(self, length):
pass
def write_histogram_cache(self, ptr, length):
pass
# IInt8Calibrator methods
def get_algorithm(self):
return util.default(algo, trt.CalibrationAlgoType.MINMAX_CALIBRATION)
def __repr__(self):
return util.make_repr(
"Calibrator",
data_loader,
cache=cache,
BaseClass=BaseClass,
batch_size=batch_size,
quantile=quantile,
regression_cutoff=regression_cutoff,
algo=algo,
)[0]
return CalibratorClass()
| TensorRT-master | tools/Polygraphy/polygraphy/backend/trt/calibrator.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import contextlib
import copy
import ctypes
import time
from polygraphy import constants, mod, util
from polygraphy.backend.base import BaseLoader
from polygraphy.backend.trt import util as trt_util
from polygraphy.backend.trt.profile import Profile
from polygraphy.logger import G_LOGGER
trt = mod.lazy_import("tensorrt")
gs = mod.lazy_import("onnx_graphsurgeon")
np = mod.lazy_import("numpy")
@mod.export(funcify=True)
class LoadPlugins(BaseLoader):
"""
A passthrough loader that loads plugins from the specified paths.
Passthrough here means that it can be used to wrap any other loader. The purpose of wrapping
another loader is that you can control the order of execution when lazily evaluating.
For immediate evaluation, use `load_plugins` instead:
::
load_plugins(plugins=["/path/to/my/plugin.so", "/path/to/my/other_plugin.so"])
"""
def __init__(self, plugins=None, obj=None):
"""
Loads plugins from the specified paths.
Args:
plugins (List[str]):
A list of paths to plugin libraries to load before inference.
obj (object):
An object or callable to return or call respectively.
If ``obj`` is callable, extra parameters will be forwarded to ``obj``.
If ``obj`` is not callable, it will be returned.
"""
self.plugins = util.default(plugins, [])
self.obj = obj
def call_impl(self, *args, **kwargs):
"""
Returns:
object:
The provided ``obj`` argument, or its return value if it is
callable. Returns ``None`` if ``obj`` was not set.
"""
for plugin in self.plugins:
G_LOGGER.info("Loading plugin library: {:}".format(plugin))
ctypes.CDLL(plugin)
ret, _ = util.invoke_if_callable(self.obj, *args, **kwargs)
return ret
@mod.export(funcify=True)
class CreateNetwork(BaseLoader):
"""
Functor that creates an empty TensorRT network.
"""
def __init__(self, explicit_precision=None, explicit_batch=None):
"""
Creates an empty TensorRT network.
Args:
explicit_precision (bool):
Whether to create the network with explicit precision enabled. Defaults to False
explicit_batch (bool):
Whether to create the network with explicit batch mode. Defaults to True.
"""
self.explicit_precision = util.default(explicit_precision, False)
self.explicit_batch = util.default(explicit_batch, True)
def call_impl(self):
"""
Returns:
(trt.Builder, trt.INetworkDefinition): The builder and empty network.
"""
with util.FreeOnException([trt.Builder(trt_util.get_trt_logger())]) as (builder,):
network_flags = 0
if self.explicit_batch:
network_flags |= 1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
if self.explicit_precision:
network_flags |= 1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_PRECISION)
network = builder.create_network(flags=network_flags)
if network is None:
G_LOGGER.critical("Invalid network. See logging output above for details.")
return builder, network
class BaseNetworkFromOnnx(BaseLoader):
def __init__(self, explicit_precision, explicit_batch=None):
"""
Args:
explicit_precision (bool): Whether to create the network with explicit precision enabled.
"""
self.explicit_precision = util.default(explicit_precision, False)
self.explicit_batch = util.default(explicit_batch, True)
def call_impl(self):
with util.FreeOnException(
create_network(explicit_precision=self.explicit_precision, explicit_batch=self.explicit_batch)
) as (builder, network):
parser = trt.OnnxParser(network, trt_util.get_trt_logger())
return builder, network, parser
@mod.export(funcify=True)
class NetworkFromOnnxBytes(BaseNetworkFromOnnx):
"""
Functor that parses an ONNX model to create a trt.INetworkDefinition.
"""
def __init__(self, model_bytes, explicit_precision=None):
"""
Parses an ONNX model.
Args:
model_bytes (Union[bytes, Callable() -> bytes]):
A serialized ONNX model or a callable that returns one.
explicit_precision (bool): Whether to construct the TensorRT network with explicit precision enabled.
"""
super().__init__(explicit_precision)
self._model_bytes = model_bytes
def call_impl(self):
"""
Returns:
(trt.IBuilder, trt.INetworkDefinition, trt.OnnxParser):
A TensorRT network, as well as the builder used to create it, and the parser
used to populate it.
"""
with util.FreeOnException(super().call_impl()) as (builder, network, parser):
success = parser.parse(util.invoke_if_callable(self._model_bytes)[0])
trt_util.check_onnx_parser_errors(parser, success)
return builder, network, parser
@mod.export(funcify=True)
class NetworkFromOnnxPath(BaseNetworkFromOnnx):
"""
Functor that parses an ONNX model to create a trt.INetworkDefinition.
This loader supports models with weights stored in an external location.
"""
def __init__(self, path, explicit_precision=None):
"""
Parses an ONNX model from a file.
Args:
path (str): The path from which to load the model.
"""
super().__init__(explicit_precision)
self.path = path
def call_impl(self):
"""
Returns:
(trt.IBuilder, trt.INetworkDefinition, trt.OnnxParser):
A TensorRT network, as well as the builder used to create it, and the parser
used to populate it.
"""
path = util.invoke_if_callable(self.path)[0]
if mod.version(trt.__version__) >= mod.version("7.1"):
with util.FreeOnException(super().call_impl()) as (builder, network, parser):
# We need to use parse_from_file for the ONNX parser to keep track of the location of the ONNX file for
# potentially parsing any external weights.
success = parser.parse_from_file(path)
trt_util.check_onnx_parser_errors(parser, success)
return builder, network, parser
else:
from polygraphy.backend.common import bytes_from_path
return network_from_onnx_bytes(bytes_from_path(path), self.explicit_precision)
@mod.export(funcify=True)
class ModifyNetworkOutputs(BaseLoader):
"""
Functor that modifies outputs in a TensorRT ``INetworkDefinition``.
"""
def __init__(self, network, outputs=None, exclude_outputs=None):
"""
Modifies outputs in a TensorRT ``INetworkDefinition``.
Args:
network (Union[Tuple[trt.Builder, trt.INetworkDefinition, Optional[parser]], Callable() -> Tuple[trt.Builder, trt.INetworkDefinition, Optional[parser]]):
A tuple containing a TensorRT builder, network and optionally parser or a callable that returns one.
To omit the parser, return a tuple containing just the builder and network.
outputs (Sequence[str]):
Names of tensors to mark as outputs. If provided, this will override the outputs
already marked in the network.
If a value of `constants.MARK_ALL` is used instead of a list, all tensors in the network are marked.
exclude_outputs (Sequence[str]):
Names of tensors to exclude as outputs. This can be useful in conjunction with
``outputs=constants.MARK_ALL`` to omit outputs.
"""
self._network = network
self.outputs = outputs
self.exclude_outputs = exclude_outputs
def call_impl(self):
"""
Returns:
trt.INetworkDefinition: The modified network.
"""
ret, owns_network = util.invoke_if_callable(self._network)
builder, network, parser = util.unpack_args(ret, num=3)
with contextlib.ExitStack() as stack:
if owns_network:
stack.enter_context(util.FreeOnException([builder, network, parser]))
if self.outputs == constants.MARK_ALL:
trt_util.mark_layerwise(network)
elif self.outputs is not None:
trt_util.mark_outputs(network, self.outputs)
if self.exclude_outputs is not None:
trt_util.unmark_outputs(network, self.exclude_outputs)
if parser is None:
return builder, network
return builder, network, parser
@mod.export(funcify=True)
class CreateConfig(BaseLoader):
"""
Functor that creates a TensorRT IBuilderConfig.
"""
def __init__(
self,
max_workspace_size=None,
tf32=None,
fp16=None,
int8=None,
profiles=None,
calibrator=None,
strict_types=None,
load_timing_cache=None,
algorithm_selector=None,
sparse_weights=None,
tactic_sources=None,
restricted=None,
use_dla=None,
allow_gpu_fallback=None,
):
"""
Creates a TensorRT IBuilderConfig that can be used by EngineFromNetwork.
Args:
max_workspace_size (int):
The maximum workspace size, in bytes, when building the engine.
Defaults to 16 MiB.
tf32 (bool):
Whether to build the engine with TF32 precision enabled.
Defaults to False.
fp16 (bool):
Whether to build the engine with FP16 precision enabled.
Defaults to False.
int8 (bool):
Whether to build the engine with INT8 precision enabled.
Defaults to False.
profiles (List[Profile]):
A list of optimization profiles to add to the configuration. Only needed for
networks with dynamic input shapes. If this is omitted for a network with
dynamic shapes, a default profile is created, where dynamic dimensions are
replaced with Polygraphy's DEFAULT_SHAPE_VALUE (defined in constants.py).
A partially populated profile will be automatically filled using values from ``Profile.fill_defaults()``
See ``Profile`` for details.
calibrator (trt.IInt8Calibrator):
An int8 calibrator. Only required in int8 mode when
the network does not have explicit precision. For networks with
dynamic shapes, the last profile provided (or default profile if
no profiles are provided) is used during calibration.
strict_types (bool):
Whether to enable strict types in the builder. This will constrain the builder from
using data types other than those specified in the network.
Defaults to False.
load_timing_cache (Union[str, file-like]):
A path or file-like object from which to load a tactic timing cache.
Providing a tactic timing cache can speed up the engine building process.
Caches can be generated while building an engine with, for example, EngineFromNetwork.
algorithm_selector (trt.IAlgorithmSelector):
An algorithm selector. Allows the user to control how tactics are selected
instead of letting TensorRT select them automatically.
sparse_weights (bool):
Whether to enable optimizations for sparse weights.
Defaults to False.
tactic_sources (List[trt.TacticSource]):
The tactic sources to enable. This controls which libraries (e.g. cudnn, cublas, etc.)
TensorRT is allowed to load tactics from.
Use an empty list to disable all tactic sources.
Defaults to TensorRT's default tactic sources.
restricted (bool):
Whether to enable safety scope checking in the builder. This will check if the network
and builder configuration are compatible with safety scope.
Defaults to False.
use_dla (bool):
[EXPERIMENTAL] Whether to enable DLA as the default device type.
Defaults to False.
allow_gpu_fallback (bool):
[EXPERIMENTAL] When DLA is enabled, whether to allow layers to fall back to GPU if they cannot be run on DLA.
Has no effect if DLA is not enabled.
Defaults to False.
"""
self.max_workspace_size = util.default(max_workspace_size, 1 << 24)
self.tf32 = util.default(tf32, False)
self.fp16 = util.default(fp16, False)
self.int8 = util.default(int8, False)
self.profiles = util.default(profiles, [Profile()])
self.calibrator = calibrator
self.strict_types = util.default(strict_types, False)
self.restricted = util.default(restricted, False)
self.timing_cache_path = load_timing_cache
self.algorithm_selector = algorithm_selector
self.sparse_weights = util.default(sparse_weights, False)
self.tactic_sources = tactic_sources
self.use_dla = util.default(use_dla, False)
self.allow_gpu_fallback = util.default(allow_gpu_fallback, False)
if self.calibrator is not None and not self.int8:
G_LOGGER.warning(
"A calibrator was provided to `CreateConfig`, but int8 mode was not enabled. "
"Did you mean to set `int8=True` to enable building with int8 precision?"
)
def call_impl(self, builder, network):
"""
Args:
builder (trt.Builder):
The TensorRT builder to use to create the configuration.
network (trt.INetworkDefinition):
The TensorRT network for which to create the config. The network is used to
automatically create a default optimization profile if none are provided.
Returns:
trt.IBuilderConfig: The TensorRT builder configuration.
"""
with util.FreeOnException([builder.create_builder_config()]) as (config,):
def try_run(func, name):
try:
return func()
except AttributeError:
trt_util.fail_unavailable("{:} in CreateConfig".format(name))
def try_set_flag(flag_name):
return try_run(lambda: config.set_flag(getattr(trt.BuilderFlag, flag_name)), flag_name.lower())
with G_LOGGER.indent():
G_LOGGER.verbose("Setting TensorRT Optimization Profiles")
profiles = copy.deepcopy(self.profiles)
for profile in profiles:
# Last trt_profile is used for set_calibration_profile.
trt_profile = profile.fill_defaults(network).to_trt(builder, network)
config.add_optimization_profile(trt_profile)
G_LOGGER.info("Configuring with profiles: {:}".format(profiles))
config.max_workspace_size = int(self.max_workspace_size)
if self.strict_types:
try_set_flag("STRICT_TYPES")
if self.restricted:
try_set_flag("SAFETY_SCOPE")
if self.tf32:
try_set_flag("TF32")
else: # TF32 is on by default
with contextlib.suppress(AttributeError):
config.clear_flag(trt.BuilderFlag.TF32)
if self.fp16:
try_set_flag("FP16")
if self.int8:
try_set_flag("INT8")
if not network.has_explicit_precision:
if self.calibrator is not None:
input_metadata = trt_util.get_input_metadata_from_profile(trt_profile, network)
with contextlib.suppress(AttributeError): # Polygraphy calibrator has a reset method
self.calibrator.reset(input_metadata)
config.int8_calibrator = self.calibrator
try:
config.set_calibration_profile(trt_profile)
except:
G_LOGGER.extra_verbose("Cannot set calibration profile on TensorRT 7.0 and older.")
else:
G_LOGGER.warning(
"Network does not have explicit precision and no calibrator was provided. Please ensure "
"that tensors in the network have dynamic ranges set, or provide a calibrator in order to use int8 mode."
)
if self.sparse_weights:
try_set_flag("SPARSE_WEIGHTS")
if self.use_dla:
config.default_device_type = trt.DeviceType.DLA
config.DLA_core = 0
if self.allow_gpu_fallback:
try_set_flag("GPU_FALLBACK")
if self.tactic_sources is not None:
tactic_sources_flag = 0
for source in self.tactic_sources:
tactic_sources_flag |= 1 << int(source)
try_run(lambda: config.set_tactic_sources(tactic_sources_flag), name="tactic_sources")
try:
if self.timing_cache_path:
timing_cache_data = util.load_file(self.timing_cache_path, description="tactic timing cache")
cache = config.create_timing_cache(timing_cache_data)
else:
# Create an empty timing cache by default so it will be populated during engine build.
# This way, consumers of CreateConfig have the option to use the cache later.
cache = config.create_timing_cache(b"")
except AttributeError:
if self.timing_cache_path:
trt_util.fail_unavailable("load_timing_cache in CreateConfig")
else:
config.set_timing_cache(cache, ignore_mismatch=False)
if self.algorithm_selector is not None:
def set_algo_selector():
config.algorithm_selector = self.algorithm_selector
try_run(set_algo_selector, "algorithm_selector")
return config
@mod.export(funcify=True)
class EngineBytesFromNetwork(BaseLoader):
"""
Functor that uses a TensorRT ``INetworkDefinition`` to build a serialized engine.
"""
def __init__(self, network, config=None, save_timing_cache=None):
"""
Builds and serializes TensorRT engine.
Args:
network (Union[Tuple[trt.Builder, trt.INetworkDefinition, Optional[parser]], Callable() -> Tuple[trt.Builder, trt.INetworkDefinition, Optional[parser]]):
A tuple containing a TensorRT builder, network and optionally parser or a callable that returns one.
To omit the parser, return a tuple containing just the builder and network.
config (Callable(trt.Builder, trt.INetworkDefinition) -> trt.IBuilderConfig):
A TensorRT builder configuration or a callable that returns one. If not supplied,
a `CreateConfig` instance with default parameters is used.
save_timing_cache (Union[str, file-like]):
A path or file-like object at which to save a tactic timing cache.
Any existing cache will be overwritten. Note that if the provided config includes a tactic
timing cache, the data from that cache will be copied into the new cache.
"""
self._network = network
self._config = util.default(config, CreateConfig())
self.timing_cache_path = save_timing_cache
def call_impl(self):
"""
Returns:
bytes: The serialized engine that was created.
"""
# If network is a callable, then we own its return value
ret, owns_network = util.invoke_if_callable(self._network)
builder, network, parser = util.unpack_args(ret, num=3)
if builder is None or network is None:
G_LOGGER.critical(
"Expected to recevie a (builder, network) tuple for the `network` parameter, "
"but received: ({:}, {:})".format(builder, network)
)
with contextlib.ExitStack() as stack:
if owns_network:
stack.enter_context(builder)
stack.enter_context(network)
if parser is not None:
stack.enter_context(parser)
else:
provided = "Builder and Network" if parser is None else "Builder, Network, and Parser"
G_LOGGER.verbose(
"{:} were provided directly instead of via a Callable. This loader will not assume ownership. "
"Please ensure that they are freed.".format(provided)
)
config, owns_config = util.invoke_if_callable(self._config, builder, network)
if owns_config:
stack.enter_context(config)
else:
G_LOGGER.verbose(
"Builder configuration was provided directly instead of via a Callable. This loader will not assume "
"ownership. Please ensure it is freed."
)
try:
config.int8_calibrator.__enter__ # Polygraphy calibrator frees device buffers on exit.
except AttributeError:
pass
else:
stack.enter_context(config.int8_calibrator)
network_log_mode = "full" if G_LOGGER.severity <= G_LOGGER.ULTRA_VERBOSE else "attrs"
G_LOGGER.super_verbose(
lambda: ("Displaying TensorRT Network:\n" + trt_util.str_from_network(network, mode=network_log_mode))
)
G_LOGGER.start("Building engine with configuration:\n{:}".format(trt_util.str_from_config(config)))
start_time = time.time()
try:
engine_bytes = builder.build_serialized_network(network, config)
except AttributeError:
engine = builder.build_engine(network, config)
if not engine:
G_LOGGER.critical("Invalid Engine. Please ensure the engine was built correctly")
stack.enter_context(engine)
engine_bytes = engine.serialize()
end_time = time.time()
if not engine_bytes:
G_LOGGER.critical("Invalid Engine. Please ensure the engine was built correctly")
G_LOGGER.finish("Finished engine building in {:.3f} seconds".format(end_time - start_time))
try:
timing_cache = config.get_timing_cache()
except AttributeError:
if self.timing_cache_path:
trt_util.fail_unavailable("save_timing_cache in EngineBytesFromNetwork")
else:
if timing_cache and self.timing_cache_path:
with timing_cache.serialize() as buffer:
util.save_file(buffer, self.timing_cache_path, description="tactic timing cache")
return engine_bytes
@mod.export(funcify=True)
class EngineFromNetwork(EngineBytesFromNetwork):
"""
Similar to EngineBytesFromNetwork, but returns an ICudaEngine instance
instead of a serialized engine.
"""
def call_impl(self):
"""
Returns:
trt.ICudaEngine: The engine that was created.
"""
# We do not invoke super().call_impl here because we would otherwise be responsible
# for freeing it's return values.
return engine_from_bytes(super().call_impl)
@mod.export(funcify=True)
class EngineFromBytes(BaseLoader):
"""
Functor that deserializes an engine from a buffer.
"""
def __init__(self, serialized_engine):
"""
Deserializes an engine from a buffer.
Args:
serialized_engine (Union[Union[str, bytes], Callable() -> Union[str, bytes]]):
The serialized engine bytes or a callable that returns them.
"""
self._serialized_engine = serialized_engine
def call_impl(self):
"""
Returns:
trt.ICudaEngine: The deserialized engine.
"""
buffer, owns_buffer = util.invoke_if_callable(self._serialized_engine)
trt.init_libnvinfer_plugins(trt_util.get_trt_logger(), "")
with contextlib.ExitStack() as stack, trt.Runtime(trt_util.get_trt_logger()) as runtime:
if owns_buffer:
try:
buffer.__enter__ # IHostMemory is freed only in __exit__
except AttributeError:
pass
else:
stack.enter_context(buffer)
engine = runtime.deserialize_cuda_engine(buffer)
if not engine:
G_LOGGER.critical("Could not deserialize engine. See log for details.")
return engine
@mod.export(funcify=True)
class BytesFromEngine(BaseLoader):
"""
Functor that serializes an engine.
"""
def __init__(self, engine):
"""
Serializes an engine.
Args:
engine (Union[trt.ICudaEngine, Callable() -> trt.ICudaEngine]):
An engine or a callable that returns one.
"""
self._engine = engine
def call_impl(self):
"""
Returns:
bytes: The serialized engine.
"""
engine, owns_engine = util.invoke_if_callable(self._engine)
with contextlib.ExitStack() as stack:
if owns_engine:
stack.enter_context(util.FreeOnException([engine]))
with engine.serialize() as buffer:
return bytes(buffer)
@mod.export(funcify=True)
class SaveEngine(BaseLoader):
"""
Functor that saves an engine to the provided path.
"""
def __init__(self, engine, path):
"""
Saves an engine to the provided path.
Args:
engine (Union[trt.ICudaEngine, Callable() -> trt.ICudaEngine]):
An engine or a callable that returns one.
path (str): The path at which to save the engine.
"""
self._engine = engine
self.path = path
def call_impl(self):
"""
Returns:
trt.ICudaEngine: The engine that was saved.
"""
engine, owns_engine = util.invoke_if_callable(self._engine)
with contextlib.ExitStack() as stack:
if owns_engine:
stack.enter_context(util.FreeOnException([engine]))
util.save_file(contents=bytes_from_engine(engine), dest=self.path, description="engine")
return engine
@mod.export(funcify=True)
class OnnxLikeFromNetwork(BaseLoader):
"""
Functor that creates an ONNX-like, but **not** valid ONNX, model based on a TensorRT network.
"""
def __init__(self, network) -> None:
"""
[HIGHLY EXPERIMENTAL] Creates an ONNX-like, but **not** valid ONNX, model from a TensorRT network.
This uses the ONNX format, but generates nodes that are **not** valid ONNX operators.
Hence, this should be used **only** for visualization or debugging purposes.
The resulting model does **not** include enough information to faithfully reconstruct the TensorRT network,
but does preserve the structure of the network and many of the layer parameters.
Args:
network (Union[Tuple[trt.Builder, trt.INetworkDefinition, Optional[parser]], Callable() -> Tuple[trt.Builder, trt.INetworkDefinition, Optional[parser]]):
A tuple containing a TensorRT builder, network and optionally parser or a callable that returns one.
To omit the parser, return a tuple containing just the builder and network.
"""
self._network = network
def call_impl(self):
"""
Returns:
onnx.ModelProto: The ONNX-like, but **not** valid ONNX, representation of the TensorRT network.
"""
ret, owns_network = util.invoke_if_callable(self._network)
builder, network, parser = util.unpack_args(ret, num=3)
if builder is None or network is None:
G_LOGGER.critical(
"Expected to recevie a (builder, network) tuple for the `network` parameter, "
"but received: ({:}, {:})".format(builder, network)
)
with contextlib.ExitStack() as stack:
if owns_network:
stack.enter_context(builder)
stack.enter_context(network)
if parser is not None:
stack.enter_context(parser)
tensor_map = {}
def tensors_from_meta(meta):
nonlocal tensor_map
tensors = []
for name, (dtype, shape) in meta.items():
if name not in tensor_map:
tensor_map[name] = gs.Variable(name=name, dtype=dtype, shape=shape)
tensors.append(tensor_map[name])
return tensors
nodes = []
graph_inputs = tensors_from_meta(trt_util.get_network_input_metadata(network))
graph_outputs = tensors_from_meta(trt_util.get_network_output_metadata(network))
LAYER_TYPE_CLASS_MAPPING = trt_util.get_layer_class_mapping()
for layer in network:
op_name = layer.type.name
if layer.type in LAYER_TYPE_CLASS_MAPPING:
layer.__class__ = LAYER_TYPE_CLASS_MAPPING[layer.type]
node_inputs = tensors_from_meta(trt_util.get_layer_input_metadata(layer))
node_outputs = tensors_from_meta(trt_util.get_layer_output_metadata(layer))
attrs = {}
attr_names = trt_util.get_layer_attribute_names(layer)
for name in attr_names:
with G_LOGGER.verbosity():
attr = getattr(layer, name)
if util.is_sequence(attr) or any(isinstance(attr, cls) for cls in [trt.Dims, trt.Permutation]):
try:
attr = list(attr)
except ValueError: # Invalid dims
attr = []
if hasattr(attr, "__entries"): # TensorRT Enums
attr = attr.name
if isinstance(attr, trt.ILoop):
attr = attr.name
VALID_TYPES = [np.ndarray, list, int, str, bool, float]
if not any(isinstance(attr, cls) for cls in VALID_TYPES):
G_LOGGER.internal_error(
"Unknown type: {:} for layer attribute: {:}.\n"
"Note: Layer was: {:}".format(type(attr), attr, layer)
)
try:
attr = str(attr)
except:
attr = "<error during conversion>"
attrs[name] = attr
nodes.append(
gs.Node(name=layer.name, op=op_name, attrs=attrs, inputs=node_inputs, outputs=node_outputs)
)
graph = gs.Graph(name=network.name, inputs=graph_inputs, outputs=graph_outputs, nodes=nodes)
return gs.export_onnx(graph)
| TensorRT-master | tools/Polygraphy/polygraphy/backend/trt/loader.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from polygraphy import func, mod, util
from polygraphy.backend.trt import util as trt_util
from polygraphy.common.interface import TypedDict
from polygraphy.json import Decoder, Encoder, add_json_methods
from polygraphy.logger import G_LOGGER, LogMode
trt = mod.lazy_import("tensorrt")
##
## Data Structures
##
# NOTE: Modifying the structure of the data classes below will break backwards compatiblity
@mod.export()
class Algorithm(object):
"""
Represents a TensorRT algorithm variant, which can be uniquely represented
by an implementation ID and tactic ID.
"""
@staticmethod
def from_trt(context, algorithm):
"""
Creates a Polygraphy ``Algorithm`` instance from a TensorRT
``IAlgorithmContext`` and ``IAlgorithm``.
Args:
context (trt.IAlgorithmContext):
The algorithm context corresponding to the layer.
algorithm (trt.IAlgorithm):
The algorithm variant provided by TensorRT.
"""
def unpack_io_info(io_info):
return (io_info.tensor_format, io_info.dtype)
implementation = algorithm.algorithm_variant.implementation
tactic = algorithm.algorithm_variant.tactic
inputs = tuple(unpack_io_info(algorithm.get_algorithm_io_info(i)) for i in range(context.num_inputs))
outputs = tuple(
unpack_io_info(algorithm.get_algorithm_io_info(i))
for i in range(context.num_inputs, context.num_inputs + context.num_outputs)
)
return Algorithm(implementation, tactic, inputs, outputs)
def __init__(self, implementation, tactic, inputs, outputs):
"""
Args:
implementation (int):
The implementation for this Algorithm.
tactic (int):
The tactic for this Algorithm.
inputs (List[Tuple[trt.TensorFormat, trt.DataType]]):
A list of tuples containg a TensorRT tensor format and data type for each input.
outputs (List[Tuple[trt.TensorFormat, trt.DataType]]):
A list of tuples containg a TensorRT tensor format and data type for each output.
"""
def validate_meta(meta):
for (fmt, dtype) in meta:
if not isinstance(fmt, trt.TensorFormat):
G_LOGGER.critical(
"'format' must be an instance of trt.TensorFormat, but is: {:}.\n"
"Note: Provided input/output metadata was: {:}".format(fmt, meta)
)
if not isinstance(dtype, trt.DataType):
G_LOGGER.critical(
"'dtype' must be an instance of trt.DataType, but is: {:}.\n"
"Note: Provided input/output metadata was: {:}".format(dtype, meta)
)
return meta
self.implementation = implementation
self.tactic = tactic
# Use tuples here so the class is hashable.
self.inputs = tuple(validate_meta(inputs))
self.outputs = tuple(validate_meta(outputs))
def __str__(self):
def io_str(io):
return tuple((str(tensor_format), str(dtype)) for tensor_format, dtype in io)
return "(Implementation: {:}, Tactic: {:}) | Inputs: {:} | Outputs: {:}".format(
self.implementation, self.tactic, io_str(self.inputs), io_str(self.outputs)
)
def __eq__(self, other):
tactic_matches = self.implementation == other.implementation and self.tactic == other.tactic
io_matches = self.inputs == other.inputs and self.outputs == other.outputs
return tactic_matches and io_matches
def __hash__(self):
return hash((self.implementation, self.tactic, self.inputs, self.outputs))
@Encoder.register(Algorithm)
def encode(algo):
def encode_algo_io(io_list):
encoded = []
for fmt, dtype in io_list:
encoded.append((str(fmt), str(dtype)))
return encoded
return {
"implementation": algo.implementation,
"tactic": algo.tactic,
"inputs": encode_algo_io(algo.inputs),
"outputs": encode_algo_io(algo.outputs),
}
@Decoder.register(Algorithm)
def decode(dct):
def decode_algo_io(io_list):
decoded = []
for fmt, dtype in io_list:
decoded.append((util.getattr_nested(trt, fmt), util.getattr_nested(trt, dtype)))
return decoded
return Algorithm(
implementation=dct["implementation"],
tactic=dct["tactic"],
inputs=decode_algo_io(dct["inputs"]),
outputs=decode_algo_io(dct["outputs"]),
)
@mod.export()
@add_json_methods("tactic replay file")
class TacticReplayData(TypedDict(lambda: str, lambda: Algorithm)):
"""
Maps layer names to corresponding tactics.
More specifically, it is an ``OrderedDict[str, Algorithm]``.
"""
def add(self, name, algorithm):
"""
Add an entry into the tactic replay data.
Args:
name (str): The name of the layer
algorithm (Algorithm): The algorithm to use for the layer.
Returns:
TacticReplayData: self, to allow for method chaining.
"""
if not isinstance(algorithm, Algorithm):
G_LOGGER.critical("Tactic replay data expects Algorithm instances, not: {:}".format(algorithm))
self.dct[name] = algorithm
return self
def __str__(self):
return "\n".join(["Layer: {:}\n\tAlgorithm: {:}".format(name, algorithm) for (name, algorithm) in self.items()])
@Encoder.register(TacticReplayData)
def encode(replay):
return {"replay": replay.dct}
@Decoder.register(TacticReplayData)
def decode(dct):
return TacticReplayData(dct["replay"])
##
## Algorithm Selectors
##
# Everything is encapsulated in functions so that we don't create a dependency on TensorRT
# when objects from this file are imported.
def get_base_selector_type():
ALGO_SELECTOR_ENABLED = False
if mod.version(trt.__version__) >= mod.version("8.0"):
ALGO_SELECTOR_ENABLED = True
IAlgorithmSelector = trt.IAlgorithmSelector
else:
IAlgorithmSelector = object
class BaseSelector(IAlgorithmSelector):
def __init__(self, data):
if not ALGO_SELECTOR_ENABLED:
trt_util.fail_unavailable("Algorithm selector")
# Must explicitly initialize parent for any trampoline class! Will mysteriously segfault without this.
IAlgorithmSelector.__init__(self)
self.path = None
self.data = TacticReplayData()
if isinstance(data, TacticReplayData):
self.data = data
else:
self.path = data
def select_algorithms(self, context, choices):
return list(range(len(choices)))
return BaseSelector
@mod.export()
def TacticRecorder(record):
"""
A TensorRT algorithm selector that can record tactics selected by TensorRT.
The generated tactic replay file is specific to network and builder configuration.
Changing either of these may render the tactic replay file unusable.
Args:
record (Union[path, file-like, TacticReplayData]):
A path or file-like object or an empty ``TacticReplayData`` instance.
Tactics will be recorded and stored here.
"""
class TacticRecorderClass(get_base_selector_type()):
def __init__(self):
super().__init__(record)
# The function that constructed this instance
self.make_func = TacticRecorder
@G_LOGGER.log_exception
def report_algorithms(self, contexts, choices):
"""
Records algorithms selected by TensorRT into the provided path or
``TacticReplayData`` instance.
Args:
contexts (List[trt.IAlgorithmContext]):
The list of TensorRT algorithm contexts. Generally, there is one per layer.
choices (List[trt.IAlgorithm]):
A list of selected algorithms for each context.
Returns:
None
"""
for (context, choice) in zip(contexts, choices):
self.data.add(context.name, Algorithm.from_trt(context, choice))
if self.path is not None:
self.data.save(self.path)
return TacticRecorderClass()
@mod.export()
def TacticReplayer(replay):
"""
A TensorRT algorithm selector that can replay tactics according to a tactic replay file.
Args:
replay (Union[path, file-like, TacticReplayData]):
A path or file-like object containing a JSON-ified ``TacticReplayData`` instance,
or a ``TacticReplayData`` instance.
"""
class TacticReplayerClass(get_base_selector_type()):
def __init__(self):
super().__init__(replay)
if self.path is not None:
self.data = TacticReplayData.load(self.path)
# The function that constructed this instance
self.make_func = TacticReplayer
@G_LOGGER.log_exception
@func.constantmethod
def select_algorithms(self, context, choices):
"""
Selects an algorithm based on ``self.data`` if possible. Otherwise, returns
default tactics.
Args:
context (trt.IAlgorithmContext):
The TensorRT algorithm context.
choices (List[trt.IAlgorithm]):
A list of TensorRT algorithm choices.
Returns:
List[int]:
The indices of selected tactics. If ``self.data`` includes the layer and
TensorRT provides a matching tactic, this will always be of length 1.
Raises:
PolygraphyException:
If a tactic is set for a layer in ``self.data`` but is not provided by
TensorRT as a choice for that layer.
"""
default_choices = super().select_algorithms(context, choices)
if not self.data: # No replay data, we are in recording mode.
return default_choices
if context.name not in self.data:
G_LOGGER.warning(
"Layer: {:} was not found in the tactic replay. Falling back to default tactics.".format(
context.name
)
)
G_LOGGER.warning(
"Has the network changed since the tactic replay file was generated?\n"
"Note: Layers in the tactic replay are:\n\t{:}".format("\n\t".join(self.data.keys())),
mode=LogMode.ONCE,
)
return default_choices
# Need to find the index of the tactic we want.
to_select = self.data[context.name]
tactic_choices = [Algorithm.from_trt(context, algo) for algo in choices]
if to_select not in tactic_choices:
G_LOGGER.critical(
"Layer: {:} | Tactic in replay was not provided by TensorRT as a choice for this layer.\n"
"Has the network or builder configuration changed since the replay file was generated?\n"
"Note: Tactic in replay was:\n\t{:}\nProvided choices were:\n\t{:}".format(
context.name, to_select, "\n\t".join(map(str, tactic_choices))
)
)
return [tactic_choices.index(to_select)]
@G_LOGGER.log_exception
@func.constantmethod
def report_algorithms(self, contexts, choices):
"""
Checks if the tactics specified in ``self.data`` were selected and raises an exception
if not.
Raises:
PolygraphyException:
If a tactic specified in ``self.data`` was not selected for a layer.
"""
for (context, choice) in zip(contexts, choices):
if context.name in self.data:
to_select = self.data[context.name]
selected = Algorithm.from_trt(context, choice)
if to_select != selected:
G_LOGGER.critical(
"Layer: {:} | TensorRT selected a tactic different than the one specified in the tactic replay.\n"
"Note: Tactic in replay was:\n\t{:}, but TensorRT selected:\n\t{:}".format(
context.name, to_select, selected
)
)
return TacticReplayerClass()
| TensorRT-master | tools/Polygraphy/polygraphy/backend/trt/algorithm_selector.py |
from polygraphy.backend.common.loader import *
| TensorRT-master | tools/Polygraphy/polygraphy/backend/common/__init__.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from polygraphy import mod, util
from polygraphy.backend.base import BaseLoader
@mod.export(funcify=True)
class BytesFromPath(BaseLoader):
"""
Functor that can load a file in binary mode ('rb').
"""
def __init__(self, path):
"""
Loads a file in binary mode ('rb').
Args:
path (str): The file path.
"""
self._path = path
def call_impl(self):
"""
Returns:
bytes: The contents of the file.
"""
return util.load_file(self._path, description="bytes")
@mod.export(funcify=True)
class SaveBytes(BaseLoader):
"""
Functor that can save bytes to a file.
"""
def __init__(self, obj, path):
"""
Saves bytes to a file.
Args:
obj (Union[bytes, Callable() -> bytes]):
The bytes to save or a callable that returns them.
path (str): The file path.
"""
self._bytes = obj
self._path = path
def call_impl(self):
"""
Returns:
bytes: The bytes saved.
"""
obj, _ = util.invoke_if_callable(self._bytes)
util.save_file(obj, self._path)
return obj
@mod.export(funcify=True)
class InvokeFromScript(BaseLoader):
"""
Functor that invokes a function from a Python script.
"""
def __init__(self, path, name):
"""
Invokes the specified function from the specified Python script.
If you intend to use the function more than once, you should import
the function using ``polygraphy.mod.import_from_script`` instead.
Args:
path (str): The path to the Python script. The path must include a '.py' extension.
name (str): The name of the function to import and invoke.
"""
self._path = path
self._name = name
def call_impl(self, *args, **kwargs):
"""
Returns:
object:
The return value of the imported function.
"""
return mod.import_from_script(self._path, self._name)(*args, **kwargs)
| TensorRT-master | tools/Polygraphy/polygraphy/backend/common/loader.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import time
from collections import OrderedDict
from polygraphy import mod, util
from polygraphy.backend.base import BaseRunner
from polygraphy.common import TensorMetadata
np = mod.lazy_import("numpy")
@mod.export()
class OnnxrtRunner(BaseRunner):
"""
Runs inference using an ONNX-Runtime inference session.
"""
def __init__(self, sess, name=None):
"""
Args:
sess (Union[onnxruntime.InferenceSession, Callable() -> onnxruntime.InferenceSession]):
An ONNX-Runtime inference session or a callable that returns one.
"""
super().__init__(name=name, prefix="onnxrt-runner")
self._sess = sess
def activate_impl(self):
self.sess, _ = util.invoke_if_callable(self._sess)
def get_input_metadata_impl(self):
ONNX_RT_TYPE_TO_NP = {
"tensor(double)": np.float64,
"tensor(float)": np.float32,
"tensor(float16)": np.float16,
"tensor(int16)": np.int16,
"tensor(int32)": np.int32,
"tensor(int64)": np.int64,
"tensor(int8)": np.int8,
"tensor(uint16)": np.uint16,
"tensor(uint32)": np.uint32,
"tensor(uint64)": np.uint64,
"tensor(uint8)": np.uint8,
"tensor(bool)": np.bool,
"tensor(string)": np.unicode,
}
meta = TensorMetadata()
for node in self.sess.get_inputs():
dtype = ONNX_RT_TYPE_TO_NP[node.type] if node.type in ONNX_RT_TYPE_TO_NP else None
meta.add(node.name, dtype=dtype, shape=node.shape)
return meta
def infer_impl(self, feed_dict):
start = time.time()
inference_outputs = self.sess.run(None, feed_dict)
end = time.time()
out_dict = OrderedDict()
for node, out in zip(self.sess.get_outputs(), inference_outputs):
out_dict[node.name] = out
self.inference_time = end - start
return out_dict
def deactivate_impl(self):
del self.sess
| TensorRT-master | tools/Polygraphy/polygraphy/backend/onnxrt/runner.py |
from polygraphy.backend.onnxrt.loader import *
from polygraphy.backend.onnxrt.runner import *
| TensorRT-master | tools/Polygraphy/polygraphy/backend/onnxrt/__init__.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from polygraphy import mod, util
from polygraphy.backend.base import BaseLoader
onnxruntime = mod.lazy_import("onnxruntime")
@mod.export(funcify=True)
class SessionFromOnnx(BaseLoader):
"""
Functor that builds an ONNX-Runtime inference session.
"""
def __init__(self, model_bytes):
"""
Builds an ONNX-Runtime inference session.
Args:
model_bytes (Union[Union[bytes, str], Callable() -> Union[bytes, str]]):
A serialized ONNX model or a path to a model or a callable that returns one of those.
"""
self._model_bytes_or_path = model_bytes
def call_impl(self):
"""
Returns:
onnxruntime.InferenceSession: The inference session.
"""
model_bytes, _ = util.invoke_if_callable(self._model_bytes_or_path)
return onnxruntime.InferenceSession(model_bytes)
| TensorRT-master | tools/Polygraphy/polygraphy/backend/onnxrt/loader.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import copy
import time
from collections import OrderedDict
from polygraphy import mod, util
from polygraphy.backend.base import BaseRunner
from polygraphy.backend.pluginref.references import OP_REGISTRY
from polygraphy.logger import G_LOGGER
np = mod.lazy_import("numpy")
onnx_util = mod.lazy_import("polygraphy.backend.onnx.util")
@mod.export()
class PluginRefRunner(BaseRunner):
"""
Runner for reference checking TensorRT plugins
"""
def __init__(self, graph, name=None):
"""
Args:
graph (Union[onnx_graphsurgeon.Graph, Callable() -> onnx_graphsurgeon.Graph]):
An ONNX-GraphSurgeon graph or a callable that returns one.
name (str):
The human-readable name prefix to use for this runner.
A runner count and timestamp will be appended to this prefix.
"""
super().__init__(name=name, prefix="pluginref-runner")
self._graph = graph
def activate_impl(self):
self.graph, _ = util.invoke_if_callable(self._graph)
def get_input_metadata_impl(self):
return onnx_util.meta_from_gs_tensors(self.graph.inputs)
def infer_impl(self, feed_dict):
start = time.time()
intermediate_tensors = copy.copy(feed_dict)
for node in self.graph.nodes:
if node.op not in OP_REGISTRY:
G_LOGGER.critical("Op: {:} does not have a reference implementation registered!".format(node.op))
intermediate_tensors.update(OP_REGISTRY[node.op](node, intermediate_tensors))
outputs = OrderedDict()
for out in self.graph.outputs:
outputs[out.name] = intermediate_tensors[out.name]
end = time.time()
self.inference_time = end - start
return outputs
def deactivate_impl(self):
del self.graph
| TensorRT-master | tools/Polygraphy/polygraphy/backend/pluginref/runner.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from polygraphy import mod
from polygraphy.logger import G_LOGGER
np = mod.lazy_import("numpy")
gs = mod.lazy_import("onnx_graphsurgeon")
OP_REGISTRY = {} # Dict[str, Callable]: Maps op names to reference implementations
def register(op):
"""
Registers a function as the reference implementation for a given op.
Args:
op (str): The name of the op for which to register this function.
"""
def register_impl(func):
def wrapped_func(node, intermediate_tensors):
inputs = []
for inp in node.inputs:
if inp.is_empty(): # Optional input
inputs.append(None)
elif isinstance(inp, gs.Constant):
inputs.append(inp.values)
elif inp.name in intermediate_tensors:
inputs.append(intermediate_tensors[inp.name])
else:
G_LOGGER.internal_error(
"Input: {:} was not found in intermediate tensors and is not a constant.\n"
"Note: Intermediate tensors include: {:}".format(inp.name, list(intermediate_tensors.keys()))
)
outputs = func(node.attrs, *inputs)
if len(outputs) != len(node.outputs):
G_LOGGER.internal_error(
"{:} reference implementation returned the wrong number of outputs.\n"
"Note: Expected {:} but recevied {:}".format(op, len(node.outputs), len(outputs))
)
return {out_tensor.name: out for out_tensor, out in zip(node.outputs, outputs)}
OP_REGISTRY[op] = wrapped_func
return wrapped_func
return register_impl
@register("Identity")
def run_identity(attrs, x):
return [x]
@register("InstanceNormalization")
def run_instancenorm(attrs, x, weights, bias):
epsilon = attrs.get("epsilon", 1.0e-5)
rank = len(x.shape)
axis = tuple(range(2, rank))
mean = np.mean(x, axis=axis, keepdims=True)
var = np.var(x, axis=axis, keepdims=True)
# Weights and bias needs to be broadcasted to shape of X. C dimension should be a wildcard.
broadcast_shape = [-1] + [1] * (rank - 2)
weights = weights.reshape(broadcast_shape)
bias = bias.reshape(broadcast_shape)
res = weights * (x - mean) / np.sqrt(var + epsilon) + bias
return [res]
| TensorRT-master | tools/Polygraphy/polygraphy/backend/pluginref/references.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from polygraphy.backend.pluginref.runner import *
| TensorRT-master | tools/Polygraphy/polygraphy/backend/pluginref/__init__.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import time
from collections import OrderedDict
from polygraphy import mod, util
from polygraphy.backend.base import BaseRunner
torch = mod.lazy_import("torch")
@mod.export()
class PytRunner(BaseRunner):
"""
Runs inference using PyTorch.
"""
def __init__(self, model, input_metadata, output_names, name=None):
"""
Args:
model (Union[torch.nn.Module, Callable() -> torch.nn.Module]):
A torch.nn.Module or subclass or a callable that returns one.
input_metadata (TensorMetadata): Mapping of input names to their data types and shapes.
output_names (List[str]):
A list of output names of the model. This information is used by the
Comparator to determine which outputs to compare.
name (str):
The human-readable name prefix to use for this runner.
A runner count and timestamp will be appended to this prefix.
"""
super().__init__(name=name, prefix="pytorch-runner")
self._model = model
self.input_metadata = input_metadata
self.output_names = output_names
def activate_impl(self):
self.model, _ = util.invoke_if_callable(self._model)
self.model.eval()
def get_input_metadata_impl(self):
return self.input_metadata
def infer_impl(self, feed_dict):
with torch.no_grad():
inputs = [
torch.from_numpy(val.astype(dtype)).cuda()
for (val, (dtype, _)) in zip(feed_dict.values(), self.input_metadata.values())
]
start = time.time()
outputs = self.model(*inputs)
end = time.time()
out_dict = OrderedDict()
for name, output in zip(self.output_names, outputs):
out_dict[name] = output.cpu().numpy()
return out_dict, end - start
def deactivate_impl(self):
del self.model
| TensorRT-master | tools/Polygraphy/polygraphy/backend/pyt/runner.py |
from polygraphy.backend.pyt.runner import *
| TensorRT-master | tools/Polygraphy/polygraphy/backend/pyt/__init__.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Sets up everything needed to perform inference in TensorFlow.
import os
import time
from collections import OrderedDict
from polygraphy import mod, util
from polygraphy.backend.base import BaseRunner
from polygraphy.backend.tf import util as tf_util
from polygraphy.logger import G_LOGGER
tf = mod.lazy_import("tensorflow", version="<2.0")
@mod.export()
class TfRunner(BaseRunner):
"""
Runs inference using a TensorFlow session.
"""
def __init__(self, sess, timeline_dir=None, name=None):
"""
Args:
sess (Union[Tuple[tf.Session, Sequence[str]], Callable() -> Tuple[tf.Session, Sequence[str]]]):
A tuple containing a TensorFlow session and output names or a callable that returns one.
timeline_dir (str):
Path to write a TensorFlow timeline.
Note that profiling may affect execution time.
name (str):
The human-readable name prefix to use for this runner.
A runner count and timestamp will be appended to this prefix.
"""
super().__init__(name=name, prefix="tf-runner")
self._sess = sess
self.timeline_dir = timeline_dir
self.num_inferences = 0
self.run_options = None
self.run_metadata = None
if self.timeline_dir is not None:
# Enable profiling
G_LOGGER.warning("Profiling is enabled. This will impact performance")
self.run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
self.run_metadata = tf.RunMetadata()
def activate_impl(self):
(self.sess, self.output_names), _ = util.invoke_if_callable(self._sess)
def get_input_metadata_impl(self):
return tf_util.get_input_metadata(self.sess.graph)
def infer_impl(self, feed_dict):
G_LOGGER.extra_verbose("Received feed_dict: {:}".format(feed_dict))
start = time.time()
inference_outputs = self.sess.run(
self.output_names, feed_dict=feed_dict, options=self.run_options, run_metadata=self.run_metadata
)
end = time.time()
out_dict = OrderedDict()
for name, out in zip(self.output_names, inference_outputs):
out_dict[name] = out
self.inference_time = end - start
if self.timeline_dir is not None:
from tensorflow.python.client import timeline
t1 = timeline.Timeline(self.run_metadata.step_stats)
util.save_file(
contents=t1.generate_chrome_trace_format(),
dest=os.path.join(self.timeline_dir, "run-{:}".format(self.num_inferences)),
mode="w",
)
self.num_inferences += 1
return out_dict
def deactivate_impl(self):
self.sess.close()
del (self.sess, self.output_names)
self.num_inferences = 0
| TensorRT-master | tools/Polygraphy/polygraphy/backend/tf/runner.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import defaultdict
from polygraphy import mod, util
from polygraphy.common import TensorMetadata
from polygraphy.logger import G_LOGGER
tf = mod.lazy_import("tensorflow", version="<2.0")
def load_graph(path):
"""
Loads a TensorFlow frozen model.
Args:
path (Union[str, tf.Graph, tf.GraphDef]):
A path to the frozen model, or a frozen TensorFlow graph or graphdef.
Returns:
tf.Graph: The TensorFlow graph
"""
if isinstance(path, tf.Graph):
return path
if isinstance(path, str):
graphdef = tf.compat.v1.GraphDef()
import google
try:
graphdef.ParseFromString(util.load_file(path, description="GraphDef"))
except google.protobuf.message.DecodeError:
G_LOGGER.backtrace()
G_LOGGER.critical(
"Could not import TensorFlow GraphDef from: {:}. Is this a valid TensorFlow model?".format(path)
)
elif isinstance(path, tf.compat.v1.GraphDef):
graphdef = path
with tf.Graph().as_default() as graph:
tf.import_graph_def(graphdef, name="")
return graph
def find_nodes_by_ops(graphdef, ops):
ops = set(ops)
return [node for node in graphdef.node if any([op in node.op for op in ops])]
def map_node_outputs(graphdef):
def sanitize_input_name(input_name):
# Strip port information and control symbol
split_input = input_name.split(":")
if len(split_input) > 1:
split_input.pop(-1)
return ":".join(split_input).replace("^", "")
node_outputs = defaultdict(list)
for node in graphdef.node:
for input_name in node.input:
node_outputs[sanitize_input_name(input_name)].append(node)
return node_outputs
def get_tensor_metadata(tensors):
metadata = TensorMetadata()
for tensor in tensors:
try:
shape = [elem.value if hasattr(elem, "value") else elem for elem in tensor.shape]
except ValueError:
# Happens when rank is unknown
shape = None
metadata.add(tensor.name, dtype=tensor.dtype.as_numpy_dtype, shape=shape)
return metadata
def get_input_metadata(graph):
input_tensors = []
input_nodes = find_nodes_by_ops(graph.as_graph_def(), ["Placeholder", "FIFOQueue"])
G_LOGGER.verbose("Found input tensors: {:}".format(["{:}: {:}".format(n.name, n.op) for n in input_nodes]))
for node in input_nodes:
input_tensors.append(graph.get_tensor_by_name(node.name + ":0"))
G_LOGGER.verbose("Retrieved TensorFlow input_tensors: {:}".format(input_tensors))
return get_tensor_metadata(input_tensors)
def get_output_metadata(graph, layerwise=False):
graphdef = graph.as_graph_def()
node_output_map = map_node_outputs(graphdef)
def is_output_node(node):
# Make sure that we're not using hanging nodes as outputs - must have at least one input.
if len(node_output_map[node.name]) != 0 or len(node.input) == 0:
return False
# Tensors with no shape cannot be outputs and TensorFlow doesn't like certain ops as outputs.
EXCLUDE_OPS = [
"Switch",
"FusedBatchNorm",
"Assert",
"NextIteration",
"Enter",
"LoopCond",
"Exit",
"Print",
"Assign",
"NoOp",
"ReadVariableOp",
"VarIsInitializedOp",
"Const",
]
# Additionally, we sometimes need to exclude entire namespaces e.g. while loops.
EXCLUDE_NAMESPACES = ["while", "Assert"]
if any([ex_op in node.op for ex_op in EXCLUDE_OPS]) or any([ns in node.name for ns in EXCLUDE_NAMESPACES]):
G_LOGGER.extra_verbose(
"Excluding {:}, op {:} is not a valid output op or is part of an excluded namespace "
"(Note: excluded namespaces: {:})".format(node.name, node.op, EXCLUDE_NAMESPACES)
)
return False
return True
# For layerwise mode, every layer becomes an output.
if layerwise:
output_nodes = list(graphdef.node)
G_LOGGER.verbose("Running in layerwise mode. Marking {:} layers as potential outputs".format(len(output_nodes)))
else:
output_nodes = [node for node in graphdef.node if is_output_node(node)]
G_LOGGER.extra_verbose("Found likely output nodes: {:}".format(output_nodes))
output_tensors = []
for node in output_nodes:
tensor_name = node.name + ":0"
try:
tensor = graph.get_tensor_by_name(tensor_name)
output_tensors.append(tensor)
except KeyError:
G_LOGGER.warning("Could not import: {:}. Skipping.".format(tensor_name))
if len(output_tensors) != len(output_nodes):
G_LOGGER.warning(
"Excluded {:} ops that don't seem like outputs. Use -vv/--super-verbose, or set "
"logging verbosity to EXTRA_VERBOSE to view them.".format(len(output_nodes) - len(output_tensors))
)
G_LOGGER.extra_verbose(
"Found output op types in graph: {:}".format({tensor.op.type for tensor in output_tensors})
)
G_LOGGER.verbose("Retrieved TensorFlow output_tensors: {:}".format(output_tensors))
return get_tensor_metadata(output_tensors)
def get_graph_output_names(graph):
return list(get_output_metadata(graph).keys())
def str_from_graph(graph, mode):
graph_str = ""
input_metadata = get_input_metadata(graph)
output_metadata = get_output_metadata(graph)
graph_str += "---- {:} Graph Inputs ----\n{:}\n\n".format(len(input_metadata), input_metadata)
graph_str += "---- {:} Graph Outputs ----\n{:}\n\n".format(len(output_metadata), output_metadata)
graph_str += "---- {:} Nodes ----\n".format(len(graph.as_graph_def().node))
if mode == "basic":
G_LOGGER.warning(
"Displaying layer information is unsupported for TensorFlow graphs. "
"Please use --mode=full if you would like to see the raw nodes"
)
if mode == "full":
for node in graph.as_graph_def().node:
graph_str += str(node) + "\n"
graph_str += "\n"
return util.indent_block(graph_str, level=0)
| TensorRT-master | tools/Polygraphy/polygraphy/backend/tf/util.py |
from polygraphy.backend.tf.loader import *
from polygraphy.backend.tf.runner import *
def register_logger_callback():
from polygraphy.logger import G_LOGGER
def set_tf_logging_level(sev):
import os
from polygraphy import mod
tf = mod.lazy_import("tensorflow", version="<2.0")
if not mod.has_mod(tf):
return
if sev > G_LOGGER.WARNING:
tf_sev = tf.compat.v1.logging.ERROR
tf_logging_level = "3"
elif sev > G_LOGGER.INFO:
tf_sev = tf.compat.v1.logging.WARN
tf_logging_level = "2"
elif sev > G_LOGGER.VERBOSE:
tf_sev = tf.compat.v1.logging.INFO
tf_logging_level = "1"
else:
tf_sev = tf.compat.v1.logging.DEBUG
tf_logging_level = "0"
tf.compat.v1.logging.set_verbosity(tf_sev)
os.environ["TF_CPP_MIN_LOG_LEVEL"] = tf_logging_level
G_LOGGER.register_callback(set_tf_logging_level) # Will be registered when this backend is imported.
register_logger_callback()
| TensorRT-master | tools/Polygraphy/polygraphy/backend/tf/__init__.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Sets up everything needed to perform inference in TensorFlow.
import os
from polygraphy import constants, mod, util
from polygraphy.backend.base import BaseLoader
from polygraphy.backend.tf import util as tf_util
from polygraphy.logger import G_LOGGER
tf = mod.lazy_import("tensorflow", version="<2.0")
@mod.export(funcify=True)
class OptimizeGraph(BaseLoader):
"""
Functor that freezes a TensorFlow graph, and folds constants.
"""
def __init__(self, graph):
"""
Freezes a TensorFlow graph and folds constants.
Args:
graph (Union[Tuple[tf.Graph, Sequence[str]], Callable() -> Tuple[tf.Graph, Sequence[str]]]):
A tuple containing a TensorFlow graph and output names or a callable that returns one.
"""
self._graph = graph
def constfold(self, graphdef, output_names):
from tensorflow.core.protobuf import config_pb2, meta_graph_pb2, rewriter_config_pb2
from tensorflow.python.framework import importer, ops
from tensorflow.python.grappler import tf_optimizer
from tensorflow.python.training import saver
graph = ops.Graph()
with graph.as_default():
output_collection = meta_graph_pb2.CollectionDef()
output_list = output_collection.node_list.value
for output in output_names:
output_list.append(output.encode("utf-8"))
importer.import_graph_def(graphdef, name="")
metagraph = saver.export_meta_graph(graph_def=graph.as_graph_def(add_shapes=True), graph=graph)
metagraph.collection_def["train_op"].CopyFrom(output_collection)
rewriter_config = rewriter_config_pb2.RewriterConfig()
rewriter_config.optimizers.extend(["constfold"])
rewriter_config.meta_optimizer_iterations = rewriter_config_pb2.RewriterConfig.ONE
session_config = config_pb2.ConfigProto()
session_config.graph_options.resave_options.CopyFrom(rewriter_config)
return tf_optimizer.OptimizeGraph(session_config, metagraph, graph_id=b"graph")
def call_impl(self):
"""
Returns:
Tuple[tf.Graph, Sequence[str]]: The TensorFlow graph, and the names of its outputs.
"""
(graph, output_names), _ = util.invoke_if_callable(self._graph)
with tf.Session(graph=graph) as sess:
sess.run(tf.initializers.global_variables())
sess.run(tf.initializers.local_variables())
graphdef = sess.graph.as_graph_def()
removed = tf.graph_util.remove_training_nodes(graphdef)
G_LOGGER.ultra_verbose("Removed nodes: {:}".format(removed))
for node in graphdef.node:
if node.op == "RefSwitch":
node.op = "Switch"
for index in range(len(node.input)):
if "moving_" in node.input[index]:
node.input[index] = node.input[index] + "/read"
elif node.op == "AssignSub":
node.op = "Sub"
if "use_locking" in node.attr:
del node.attr["use_locking"]
elif node.op == "AssignAdd":
node.op = "Add"
if "use_locking" in node.attr:
del node.attr["use_locking"]
elif node.op == "Assign":
node.op = "Identity"
if "use_locking" in node.attr:
del node.attr["use_locking"]
if "validate_shape" in node.attr:
del node.attr["validate_shape"]
if len(node.input) == 2:
# input0: ref: Should be from a Variable node. May be uninitialized.
# input1: value: The value to be assigned to the variable.
node.input[0] = node.input[1]
del node.input[1]
# Strip port information from outputs
output_names = [name.split(":")[0] for name in output_names]
output_graph_def = tf.graph_util.convert_variables_to_constants(sess, graphdef, output_names)
output_graph_def = self.constfold(output_graph_def, output_names)
return graph_from_frozen(output_graph_def)
@mod.export(funcify=True)
class GraphFromKeras(BaseLoader):
"""
Functor that loads a TensorFlow model from Keras.
"""
def __init__(self, path):
"""
Loads a TensorFlow model from Keras.
Args:
path (Union[str, h5py.File]): A path to the saved model, or the file object.
"""
self.path = path
def call_impl(self):
"""
Returns:
Tuple[tf.Graph, Sequence[str]]: The TensorFlow graph, and the names of its outputs.
"""
from tensorflow.python import keras
from tensorflow.python.keras import backend
model = keras.models.load_model(self.path)
graph = backend.get_session().graph
return graph, tf_util.get_graph_output_names(graph)
@mod.export(funcify=True)
class GraphFromFrozen(BaseLoader):
"""
Functor that loads a TensorFlow frozen model.
"""
def __init__(self, path):
"""
Loads a TensorFlow frozen model.
Args:
path (Union[str, tf.Graph, tf.GraphDef]):
A path to the frozen model, or a frozen TensorFlow graph or graphdef.
"""
self.path = path
def call_impl(self):
"""
Returns:
Tuple[tf.Graph, Sequence[str]]: The TensorFlow graph, and the names of its outputs.
"""
graph = tf_util.load_graph(self.path)
return graph, tf_util.get_graph_output_names(graph)
@mod.export(funcify=True)
class GraphFromCkpt(BaseLoader):
"""
Functor that loads a TensorFlow model from a checkpoint. Note that in order to use checkpoints,
you must NOT use subprocesses in the Comparator.
"""
def __init__(self, dir, name=None):
"""
Loads a TensorFlow model from a checkpoint.
Args:
dir (str): Path to a directory containing checkpoints.
name (str):
The name of the checkpoint to load, not including the file extension.
For example, to load `model.meta`, the argument would be `model`.
"""
self.dir = dir
self.name = name
def call_impl(self):
"""
Returns:
Tuple[tf.Graph, Sequence[str]]: The TensorFlow graph, and the names of its outputs.
"""
# If `name` is not provided, this expects that the directory contains a `checkpoint` file with the contents:
#
# model_checkpoint_path: "model"
# all_model_checkpoint_paths: "model"
#
# where "model" is the checkpoint name
if not os.path.isdir(self.dir):
G_LOGGER.warning("Specified checkpoint directory: {:} does not look like a directory.".format(self.dir))
if self.name is None:
G_LOGGER.verbose("Checkpoint name was not explicitly provided, searching for `checkpoint` file")
checkpoint = tf.train.get_checkpoint_state(self.dir)
if checkpoint is None:
ckpt_file_contents = '\nmodel_checkpoint_path: "model"\nall_model_checkpoint_paths: "model"\n'
G_LOGGER.critical(
"Checkpoint directory: {:} does not contain a `checkpoint` file, and the checkpoint name was "
"not provided. Please either create a checkpoint file with the contents:\n{:} "
"\nWhere `model` is the name of the checkpoint, or explicitly provide the name with "
"--ckpt, not including file extensions".format(self.dir, ckpt_file_contents)
)
input_checkpoint = checkpoint.model_checkpoint_path
else:
input_checkpoint = os.path.join(self.dir, self.name)
meta_file = input_checkpoint + ".meta"
with tf.Graph().as_default() as graph, tf.compat.v1.Session(graph=graph).as_default() as sess:
saver = tf.compat.v1.train.import_meta_graph(meta_file, clear_devices=True)
saver.restore(sess, input_checkpoint)
return graph, tf_util.get_graph_output_names(graph)
@mod.export(funcify=True)
class UseTfTrt(BaseLoader):
"""
[UNTESTED] Functor that optimizes a TensorFlow model using TF-TRT.
"""
def __init__(
self,
graph,
max_workspace_size=None,
fp16=None,
int8=None,
max_batch_size=None,
is_dynamic_op=False,
minimum_segment_size=None,
):
"""
Optimizes a TensorFlow model using TF-TRT.
Args:
graph (Union[Tuple[tf.Graph, Sequence[str]], Callable() -> Tuple[tf.Graph, Sequence[str]]]):
A tuple containing a TensorFlow graph and output names or a callable that returns one.
max_workspace_size (int): The maximum workspace size.
fp16 (bool): Whether to run in FP16 mode.
max_batch_size (int): The maximum batch size.
"""
self._graph = graph
self.max_workspace_size = util.default(max_workspace_size, 1 << 24)
self.fp16 = util.default(fp16, False)
self.int8 = util.default(int8, False)
self.max_batch_size = util.default(max_batch_size, 1)
self.is_dynamic_op = is_dynamic_op
self.minimum_segment_size = util.default(minimum_segment_size, 3)
def call_impl(self):
"""
Returns:
Tuple[tf.Graph, Sequence[str]]: The TensorFlow graph, and the names of its outputs.
"""
from tensorflow.contrib import tensorrt as tf_trt
(graph, output_names), _ = util.invoke_if_callable(self._graph)
precision_mode = "FP16" if self.fp16 else "FP32"
precision_mode = "INT8" if self.int8 else precision_mode
G_LOGGER.info(
"For TF-TRT, using outputs={:}, max_workspace_size_bytes={:}, max_batch_size={:}, "
"minimum_segment_size={:}, is_dynamic_op={:}, precision_mode={:}".format(
output_names,
self.max_workspace_size,
self.max_batch_size,
self.minimum_segment_size,
self.is_dynamic_op,
precision_mode,
)
)
graphdef = tf_trt.create_inference_graph(
graph.as_graph_def(),
outputs=output_names,
max_workspace_size_bytes=self.max_workspace_size,
max_batch_size=self.max_batch_size,
minimum_segment_size=self.minimum_segment_size,
is_dynamic_op=self.is_dynamic_op,
precision_mode=precision_mode,
)
segment_number = 0
for node in graphdef.node:
if node.op == "TRTEngineOp":
engine = node.attr["serialized_segment"].s
segment_number += 1
G_LOGGER.info("Found {:} engines in TFTRT graph".format(segment_number))
with tf.Graph().as_default() as graph:
tf.import_graph_def(graphdef, name="")
return graph, tf_util.get_graph_output_names(graph)
@mod.export(funcify=True)
class ModifyGraphOutputs(BaseLoader):
"""
Functor that modifies outputs of a TensorFlow graph.
"""
def __init__(self, graph, outputs=None):
"""
Modifies outputs of a TensorFlow graph.
Args:
graph (Union[Tuple[tf.Graph, Sequence[str]], Callable() -> Tuple[tf.Graph, Sequence[str]]]):
A tuple containing a TensorFlow graph and output names or a callable that returns one.
outputs (List[str]):
Names of output tensors. If provided, this will override the outputs
determined by the loader.
If a value of `constants.MARK_ALL` is used instead of a list, all tensors in the network are marked.
"""
self._graph = graph
self.outputs = outputs
def call_impl(self):
"""
Returns:
Tuple[tf.Graph, Sequence[str]]: The TensorFlow graph, and the names of its outputs.
"""
(graph, outputs), _ = util.invoke_if_callable(self._graph)
if self.outputs == constants.MARK_ALL:
outputs = list(tf_util.get_output_metadata(graph, layerwise=True).keys())
elif self.outputs is not None:
outputs = self.outputs
return graph, outputs
@mod.export(funcify=True)
class SaveGraph(BaseLoader):
"""
Functor that writes out artifacts from a TensorFlow graph.
"""
def __init__(self, graph, path=None, tensorboard_dir=None, engine_dir=None):
"""
Writes out artifacts from a TensorFlow Graph.
Args:
graph (Union[Tuple[tf.Graph, Sequence[str]], Callable() -> Tuple[tf.Graph, Sequence[str]]]):
A tuple containing a TensorFlow graph and output names or a callable that returns one.
path (str): Path at which to save the frozen graphdef.
tensorboard_dir (str): The directory in which to write TensorBoard visualizations.
engine_dir (str): The directory in which to save TF-TRT engines,
"""
self._graph = graph
self.path = path
self.tensorboard_dir = tensorboard_dir
self.engine_dir = engine_dir
def call_impl(self):
"""
Returns:
Tuple[tf.Graph, Sequence[str]]: The TensorFlow graph, and the names of its outputs.
"""
(graph, outputs), _ = util.invoke_if_callable(self._graph)
if self.path:
util.save_file(graph.as_graph_def().SerializeToString(), dest=self.path)
if self.tensorboard_dir:
G_LOGGER.info("Writing tensorboard events to {:}".format(self.tensorboard_dir))
train_writer = tf.compat.v1.summary.FileWriter(self.tensorboard_dir)
train_writer.add_graph(graph)
if self.engine_dir is not None:
graphdef = graph.as_graph_def()
segment_number = 0
for node in graphdef.node:
if node.op == "TRTEngineOp":
engine = node.attr["serialized_segment"].s
if self.engine_dir is not None:
util.save_file(
contents=engine, dest=os.path.join(self.engine_dir, "segment-{:}".format(segment_number))
)
segment_number += 1
return graph, outputs
@mod.export(funcify=True)
class CreateConfig(BaseLoader):
"""
Functor that creates a TensorFlow config.
"""
def __init__(self, gpu_memory_fraction=None, allow_growth=None, use_xla=None):
"""
Creates a TensorFlow config.
Args:
gpu_memory_fraction (float):
The fraction of GPU memory that will be made available to TensorFlow.
This should be a value between 0.0 and 1.0.
allow_growth (bool): Whether to allow GPU memory allocated by TensorFlow to grow.
use_xla (bool): Whether to attempt to enable XLA.
"""
self.gpu_memory_fraction = util.default(gpu_memory_fraction, 0.9)
self.allow_growth = util.default(allow_growth, False)
self.use_xla = util.default(use_xla, False)
def call_impl(self):
"""
Returns:
tf.ConfigProto: The TensorFlow config.
"""
# Session configuration
gpu_options = tf.compat.v1.GPUOptions(
per_process_gpu_memory_fraction=self.gpu_memory_fraction, allow_growth=self.allow_growth
)
config = tf.compat.v1.ConfigProto(gpu_options=gpu_options)
if self.use_xla:
config.graph_options.optimizer_options.global_jit_level = tf.OptimizerOptions.ON_1
G_LOGGER.verbose("Using gpu memory fraction: {:}, XLA: {:}".format(self.gpu_memory_fraction, self.use_xla))
return config
@mod.export(funcify=True)
class SessionFromGraph(BaseLoader):
"""
Functor that creates a TensorFlow session that can be used for inference.
"""
def __init__(self, graph, config=None):
"""
Creates a TensorFlow session.
Args:
graph (Union[Tuple[tf.Graph, Sequence[str]], Callable() -> Tuple[tf.Graph, Sequence[str]]]):
A tuple containing a TensorFlow graph and output names or a callable that returns one.
config (Union[tf.ConfigProto, Callable() -> tf.ConfigProto]):
A TensorFlow ConfigProto or a callable that returns one.
"""
self.graph = graph
self.config = util.default(config, CreateConfig())
def call_impl(self):
"""
Returns:
tf.Session: The TensorFlow session.
"""
config, _ = util.invoke_if_callable(self.config)
(graph, output_names), _ = util.invoke_if_callable(self.graph)
with graph.as_default() as graph, tf.compat.v1.Session(graph=graph, config=config).as_default() as sess:
G_LOGGER.verbose("Using TensorFlow outputs: {:}".format(output_names))
G_LOGGER.extra_verbose("Initializing variables in TensorFlow Graph")
sess.run(tf.compat.v1.initializers.global_variables())
return sess, output_names
| TensorRT-master | tools/Polygraphy/polygraphy/backend/tf/loader.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import copy
import time
from collections import defaultdict
from polygraphy import config, func, mod, util
from polygraphy.logger import G_LOGGER, LogMode
np = mod.lazy_import("numpy")
@mod.export()
class BaseRunner(object):
"""
Base class for Polygraphy runners. All runners should override the functions and attributes specified here.
"""
RUNNER_COUNTS = defaultdict(int)
def __init__(self, name=None, prefix=None):
"""
Args:
name (str):
The name to use for this runner.
prefix (str):
The human-readable name prefix to use for this runner.
A runner count and timestamp will be appended to this prefix.
Only used if name is not provided.
"""
prefix = util.default(prefix, "Runner")
if name is None:
count = BaseRunner.RUNNER_COUNTS[prefix]
BaseRunner.RUNNER_COUNTS[prefix] += 1
name = "{:}-N{:}-{:}-{:}".format(prefix, count, time.strftime("%x"), time.strftime("%X"))
self.name = name
self.inference_time = None
self.is_active = False
"""bool: Whether this runner has been activated, either via context manager, or by calling ``activate()``."""
def __enter__(self):
"""
Activate the runner for inference. For example, this may involve allocating CPU or GPU memory.
"""
self.activate()
return self
def __exit__(self, exc_type, exc_value, traceback):
"""
Deactivate the runner. For example, this may involve freeing CPU or GPU memory.
"""
self.deactivate()
# Implementation for runner activation. Derived classes should override this function
# rather than ``activate()``.
def activate_impl(self):
pass
def activate(self):
"""
Activate the runner for inference. For example, this may involve allocating CPU or GPU memory.
Generally, you should use a context manager instead of manually activating and deactivating.
For example:
::
with RunnerType(...) as runner:
runner.infer(...)
"""
if self.is_active:
G_LOGGER.warning(
"{:35} | Already active; will not activate again. If you really want to "
"activate this runner again, call activate_impl() directly".format(self.name)
)
return
if config.INTERNAL_CORRECTNESS_CHECKS:
self._pre_activate_runner_state = copy.copy(vars(self))
self.activate_impl()
self.is_active = True
def get_input_metadata_impl(self):
"""
Implemenation for `get_input_metadata`. Derived classes should override this function
rather than `get_input_metadata`.
"""
raise NotImplementedError("BaseRunner is an abstract class")
@func.constantmethod
def get_input_metadata(self):
"""
Returns information about the inputs of the model.
Shapes here may include dynamic dimensions, represented by ``None``.
Must be called only after activate() and before deactivate().
Returns:
TensorMetadata: Input names, shapes, and data types.
"""
return self.get_input_metadata_impl()
# Implementation for runner inference. Derived classes should override this function
# rather than ``infer()``
def infer_impl(self, feed_dict):
raise NotImplementedError("BaseRunner is an abstract class")
def infer(self, feed_dict, check_inputs=True, *args, **kwargs):
"""
Runs inference using the provided feed_dict.
NOTE: Some runners may accept additional parameters in infer().
For details on these, see the documentation for their `infer_impl()` methods.
Args:
feed_dict (OrderedDict[str, numpy.ndarray]):
A mapping of input tensor names to corresponding input NumPy arrays.
check_inputs (bool):
Whether to check that the provided ``feed_dict`` includes the expected inputs
with the expected data types and shapes.
Disabling this may improve performance.
Defaults to True.
Returns:
OrderedDict[str, numpy.ndarray]:
A mapping of output tensor names to their corresponding NumPy arrays.
IMPORTANT: Runners may reuse these output buffers. Thus, if you need to save
outputs from multiple inferences, you should make a copy with ``copy.deepcopy(outputs)``.
"""
if not self.is_active:
G_LOGGER.critical("{:35} | Must be activated prior to calling infer()".format(self.name))
if check_inputs:
input_metadata = self.get_input_metadata()
G_LOGGER.verbose("Runner input metadata is: {:}".format(input_metadata))
util.check_dict_contains(
feed_dict, input_metadata.keys(), dict_name="feed_dict", log_func=G_LOGGER.critical
)
for name, inp in feed_dict.items():
meta = input_metadata[name]
if not np.issubdtype(inp.dtype, meta.dtype):
G_LOGGER.critical(
"Input tensor: {:} | Received unexpected dtype: {:}.\n"
"Note: Expected type: {:}".format(name, inp.dtype, meta.dtype)
)
if not util.is_valid_shape_override(inp.shape, meta.shape):
G_LOGGER.critical(
"Input tensor: {:} | Received incompatible shape: {:}.\n"
"Note: Expected a shape compatible with: {:}".format(name, inp.shape, meta.shape)
)
return self.infer_impl(feed_dict, *args, **kwargs)
@func.constantmethod
def last_inference_time(self):
"""
Returns the total inference time required during the last call to ``infer()``.
Returns:
float: The time in seconds, or None if runtime was not measured by the runner.
"""
if self.inference_time is None:
G_LOGGER.warning(
"{:35} | inference_time was not set. Inference time will be incorrect!"
"To correctly compare runtimes, please set the inference_time property in the"
"infer() function".format(self.name),
mode=LogMode.ONCE,
)
return None
return self.inference_time
# Implementation for runner deactivation. Derived classes should override this function
# rather than ``deactivate()``.
def deactivate_impl(self):
pass
def deactivate(self):
"""
Deactivate the runner. For example, this may involve freeing CPU or GPU memory.
Generally, you should use a context manager instead of manually activating and deactivating.
For example:
::
with RunnerType(...) as runner:
runner.infer(...)
"""
if not self.is_active:
G_LOGGER.warning(
"{:35} | Not active; will not deactivate. If you really want to "
"deactivate this runner, call deactivate_impl() directly".format(self.name)
)
return
self.inference_time = None
self.is_active = None
self.deactivate_impl()
self.is_active = False
if config.INTERNAL_CORRECTNESS_CHECKS:
old_state = self._pre_activate_runner_state
del self._pre_activate_runner_state
if old_state != vars(self):
G_LOGGER.internal_error(
"Runner state was not reset after deactivation. "
"Note:\nOld state: {:}\nNew state: {:}".format(old_state, vars(self))
)
def __del__(self):
if self.is_active:
# __del__ is not guaranteed to be called, but when it is, this could be a useful warning.
print("[W] {:35} | Was activated but never deactivated. This could cause a memory leak!".format(self.name))
| TensorRT-master | tools/Polygraphy/polygraphy/backend/base/runner.py |
from polygraphy.backend.base.runner import *
from polygraphy.backend.base.loader import *
| TensorRT-master | tools/Polygraphy/polygraphy/backend/base/__init__.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from polygraphy import func, mod
@mod.export()
class BaseLoader(object):
"""
Base class for Polygraphy Loaders.
"""
def call_impl(self, *args, **kwargs):
"""
Implementation for ``__call__``. Derived classes should implement this
method rather than ``__call__``.
"""
raise NotImplementedError("BaseLoader is an abstract class")
@func.constantmethod
def __call__(self, *args, **kwargs):
"""
Invokes the loader by forwarding arguments to ``call_impl``.
Note: ``call_impl`` should *not* be called directly - use this function instead.
"""
__doc__ = self.call_impl.__doc__
return self.call_impl(*args, **kwargs)
| TensorRT-master | tools/Polygraphy/polygraphy/backend/base/loader.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from polygraphy import mod
from polygraphy.exception import *
mod.warn_deprecated("polygraphy.common.exception", "polygraphy.exception", remove_in="0.34.0")
| TensorRT-master | tools/Polygraphy/polygraphy/common/exception.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from polygraphy import mod
from polygraphy.constants import *
mod.warn_deprecated("polygraphy.common.constants", "polygraphy.constants", remove_in="0.34.0")
| TensorRT-master | tools/Polygraphy/polygraphy/common/constants.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import copy
from collections import OrderedDict
from polygraphy.logger.logger import G_LOGGER
from polygraphy import mod
util = mod.lazy_import("polygraphy.util")
def TypedDict(key_type_func, value_type_func):
"""
Returns a class (not an instance) that will provide a dictionary-like
interface with runtime type checks.
Note: The types are provided lazily via a callable to avoid unnecessary dependencies
on types from external packages at import-time.
Args:
key_type_func (Callable() -> type):
A callable that returns the expected key type.
value_type_func (Callable() -> type):
A callable that returns the expected value type.
"""
class Interface(object):
def __init__(self, dct=None):
self.dct = OrderedDict(util.default(dct, {}))
self.key_type = key_type_func()
self.value_type = value_type_func()
def _check_types(self, key, val):
if not isinstance(key, self.key_type):
G_LOGGER.critical(
"Unsupported key type in {:}. Key: {:} is type `{:}` but {:} expects type `{:}`".format(
self, repr(key), type(key).__name__, type(self).__name__, self.key_type.__name__
)
)
if not isinstance(val, self.value_type):
G_LOGGER.critical(
"Unsupported value type in {:}. Value: {:} for key: {:} is type `{:}` but {:} expects type `{:}`".format(
self, repr(val), repr(key), type(val).__name__, type(self).__name__, self.value_type.__name__
)
)
def keys(self):
return self.dct.keys()
def values(self):
return self.dct.values()
def items(self):
return self.dct.items()
def update(self, other):
for key, val in other.items():
self._check_types(key, val)
return self.dct.update(other)
def __contains__(self, key):
return key in self.dct
def __getitem__(self, key):
return self.dct[key]
def __setitem__(self, key, val):
self._check_types(key, val)
self.dct[key] = val
def __str__(self):
return str(self.dct)
def __repr__(self):
return repr(self.dct)
def __len__(self):
return len(self.dct)
def __eq__(self, other):
return self.dct == other.dct
def __iter__(self):
return self.dct.__iter__()
def __copy__(self):
new_dict = type(self)()
new_dict.__dict__.update(self.__dict__)
new_dict.dct = copy.copy(self.dct)
return new_dict
def __deepcopy__(self, memo):
new_dict = type(self)()
new_dict.__dict__.update(self.__dict__)
new_dict.dct = copy.deepcopy(self.dct)
return new_dict
return Interface
def TypedList(elem_type_func):
"""
Returns a class (not an instance) that will provide a list-like
interface with runtime type checks.
Note: The types are provided lazily via a callable to avoid unnecessary dependencies
on types from external packages at import-time.
Args:
elem_type_func (Callable() -> type):
A callable that returns the expected list-element type.
"""
class Interface(object):
def __init__(self, lst=None):
self.lst = util.default(lst, [])
self.elem_type = elem_type_func()
def _check_type(self, elem):
if not isinstance(elem, self.elem_type):
G_LOGGER.critical(
"Unsupported element type type in {:}. Element: {:} is type: {:} but type: {:} was expected".format(
type(self).__name__, repr(elem), type(elem).__name__, self.elem_type.__name__
)
)
def __contains__(self, key):
return key in self.lst
def __getitem__(self, index):
return self.lst[index]
def __setitem__(self, index, elem):
self._check_type(elem)
self.lst[index] = elem
def __str__(self):
return str(self.lst)
def __repr__(self):
return repr(self.lst)
def append(self, elem):
self._check_type(elem)
return self.lst.append(elem)
def extend(self, elems):
for elem in elems:
self._check_type(elem)
return self.lst.extend(elems)
def __iadd__(self, elems):
for elem in elems:
self._check_type(elem)
self.lst += elems
def __len__(self):
return len(self.lst)
def __eq__(self, other):
return self.lst == other.lst
def __iter__(self):
return self.lst.__iter__()
def __copy__(self):
new_list = type(self)()
new_list.__dict__.update(self.__dict__)
new_list.lst = copy.copy(self.lst)
return new_list
def __deepcopy__(self, memo):
new_list = type(self)()
new_list.__dict__.update(self.__dict__)
new_list.lst = copy.deepcopy(self.lst)
return new_list
return Interface
| TensorRT-master | tools/Polygraphy/polygraphy/common/interface.py |
from polygraphy.exception import *
from polygraphy.common.struct import *
| TensorRT-master | tools/Polygraphy/polygraphy/common/__init__.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from polygraphy import mod
from polygraphy.cuda import *
mod.warn_deprecated("polygraphy.common.cuda", "polygraphy.cuda", remove_in="0.34.0")
| TensorRT-master | tools/Polygraphy/polygraphy/common/cuda.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from polygraphy import mod
from polygraphy.func import *
mod.warn_deprecated("polygraphy.common.func", "polygraphy.func", remove_in="0.34.0")
| TensorRT-master | tools/Polygraphy/polygraphy/common/func.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from polygraphy import mod
from polygraphy.common.interface import TypedDict
np = mod.lazy_import("numpy")
class MetadataTuple(object):
def __init__(self, dtype, shape):
self.dtype = dtype
self.shape = shape
def __iter__(self):
yield from [self.dtype, self.shape]
def __repr__(self):
return "MetadataTuple({:}, {:})".format(self.dtype, self.shape)
def __str__(self):
ret = ""
meta_items = []
if self.dtype is not None:
meta_items.append("dtype={:}".format(np.dtype(self.dtype).name))
if self.shape is not None:
meta_items.append("shape={:}".format(tuple(self.shape)))
if meta_items:
ret += "[" + ", ".join(meta_items) + "]"
return ret
@mod.export()
class TensorMetadata(TypedDict(lambda: str, lambda: MetadataTuple)):
"""
An OrderedDict[str, MetadataTuple] that maps input names to their data types and shapes.
Shapes may include negative values, ``None``, or strings to indicate dynamic dimensions.
Example:
::
shape = tensor_meta["input0"].shape
dtype = tensor_meta["input0"].dtype
"""
@staticmethod
def from_feed_dict(feed_dict):
"""
Constructs a new TensorMetadata using information from the provided feed_dict.
Args:
feed_dict (OrderedDict[str, numpy.ndarray]):
A mapping of input tensor names to corresponding input NumPy arrays.
Returns:
TensorMetadata
"""
meta = TensorMetadata()
for name, arr in feed_dict.items():
meta.add(name, arr.dtype, arr.shape)
return meta
def add(self, name, dtype, shape):
"""
Convenience function for adding entries.
Args:
name (str): The name of the input.
dtype (numpy.dtype): The data type of the input.
shape (Sequence[Union[int, str]]]):
The shape of the input. Dynamic dimensions may
be indicated by negative values, ``None``, or a string.
Returns:
The newly added entry.
"""
self[name] = MetadataTuple(dtype, shape)
return self
def __repr__(self):
ret = "TensorMetadata()"
for name, (dtype, shape) in self.items():
ret += ".add('{:}', {:}, {:})".format(name, dtype, shape)
return ret
def __str__(self):
sep = ",\n "
elems = ["{:} {:}".format(name, meta_tuple).strip() for name, meta_tuple in self.items()]
return "{" + sep.join(elems) + "}"
| TensorRT-master | tools/Polygraphy/polygraphy/common/struct.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import base64
import functools
import io
import json
from collections import OrderedDict
from polygraphy import config, constants, mod
from polygraphy.logger import G_LOGGER
np = mod.lazy_import("numpy")
util = mod.lazy_import("polygraphy.util.util")
TYPE_STRING_PREFIX = "__polygraphy_encoded_"
def legacy_str_from_type(typ):
return TYPE_STRING_PREFIX + typ.__name__
def str_from_type(typ):
return typ.__name__
class BaseCustomImpl(object):
"""
Base class for Polygraphy's JSON encoder/decoder.
"""
@classmethod
def register(cls, typ):
"""
Decorator that registers JSON encoding/decoding functions for types.
For the documentation that follows, assume we have a class:
::
class Dummy(object):
def __init__(self, x):
self.x = x
========
Encoders
========
Encoder functions should accept instances of the specified type and
return dictionaries.
For example:
::
@Encoder.register(Dummy)
def encode(dummy):
return {"x": dummy.x}
To use the custom encoder, use the `to_json` helper:
::
d = Dummy(x=1)
d_json = to_json(d)
========
Decoders
========
Decoder functions should accept dictionaries, and return instances of the
type.
For example:
::
@Decoder.register(Dummy)
def decode(dct):
return Dummy(x=dct["x"])
To use the custom decoder, use the `from_json` helper:
::
from_json(d_json)
Args:
typ (type): The type of the class for which to register the function.
"""
def register_impl(func):
def add(key, val):
if key in cls.polygraphy_registered:
G_LOGGER.critical(
"Duplicate serialization function for type: {:}.\n"
"Note: Existing function: {:}, New function: {:}".format(
key, cls.polygraphy_registered[key], func
)
)
cls.polygraphy_registered[key] = val
if cls == Encoder:
def wrapped(obj):
dct = func(obj)
dct[constants.TYPE_MARKER] = str_from_type(typ)
return dct
add(typ, wrapped)
return wrapped
elif cls == Decoder:
def wrapped(dct):
if constants.TYPE_MARKER in dct:
del dct[constants.TYPE_MARKER]
type_name = legacy_str_from_type(typ)
if type_name in dct:
del dct[type_name]
return func(dct)
add(legacy_str_from_type(typ), wrapped)
add(str_from_type(typ), wrapped)
else:
G_LOGGER.critical("Cannot register for unrecognized class type: ")
return register_impl
@mod.export()
class Encoder(BaseCustomImpl, json.JSONEncoder):
"""
Polygraphy's custom JSON Encoder implementation.
"""
polygraphy_registered = {}
def default(self, o):
if type(o) in self.polygraphy_registered:
return self.polygraphy_registered[type(o)](o)
return super().default(o)
@mod.export()
class Decoder(BaseCustomImpl):
"""
Polygraphy's custom JSON Decoder implementation.
"""
polygraphy_registered = {}
def __call__(self, pairs):
dct = OrderedDict(pairs)
if config.INTERNAL_CORRECTNESS_CHECKS:
custom_type_keys = [key for key in dct if key.startswith(TYPE_STRING_PREFIX)]
if custom_type_keys and custom_type_keys[0] not in self.polygraphy_registered:
G_LOGGER.internal_error(
"Custom type has no decode function registered! " "Note: Encoded object is:\n{:}".format(dct)
)
# The encoder will insert special key-value pairs into dictionaries encoded from
# custom types. If we find one, then we know to decode using the corresponding custom
# type function.
type_name = dct.get(constants.TYPE_MARKER)
func = self.polygraphy_registered.get(type_name)
if func:
return func(dct)
for type_str, func in self.polygraphy_registered.items():
if type_str in dct and dct[type_str] == constants.LEGACY_TYPE_MARKER: # Found a custom type!
return func(dct)
return dct
NUMPY_REGISTRATION_SUCCESS = False
def try_register_numpy_json(func):
"""
Decorator that attempts to register JSON encode/decode methods
for numpy arrays if NumPy is available and the methods have not already been registered.
This needs to be attempted multiple times because numpy may become available in the
middle of execution - for example, if using dependency auto-installation.
"""
@functools.wraps(func)
def wrapped(*args, **kwargs):
global NUMPY_REGISTRATION_SUCCESS
if not NUMPY_REGISTRATION_SUCCESS and mod.has_mod(np):
# We define this alongside load_json/save_json so that it is guaranteed to be
# imported before we need to encode/decode NumPy arrays.
@Encoder.register(np.ndarray)
def encode(array):
outfile = io.BytesIO()
np.save(outfile, array, allow_pickle=False)
outfile.seek(0)
data = base64.b64encode(outfile.read()).decode()
return {"array": data}
@Decoder.register(np.ndarray)
def decode(dct):
def load(mode="base64"):
if mode == "base64":
data = base64.b64decode(dct["array"].encode(), validate=True)
elif mode == "latin-1":
data = dct["array"].encode(mode)
else:
assert False, "Unsupported mode: {:}".format(mode)
infile = io.BytesIO(data)
return np.load(infile, allow_pickle=False)
try:
arr = load()
except:
arr = load("latin-1") # For backwards compatibility
if isinstance(arr, np.ndarray):
return arr
return list(arr.values())[0] # For backwards compatibility
NUMPY_REGISTRATION_SUCCESS = True
return func(*args, **kwargs)
return wrapped
@mod.export()
@try_register_numpy_json
def to_json(obj):
"""
Encode an object to JSON.
NOTE: For Polygraphy objects, you should use the ``to_json()`` method instead.
Returns:
str: A JSON representation of the object.
"""
return json.dumps(obj, cls=Encoder, indent=constants.TAB)
@mod.export()
@try_register_numpy_json
def from_json(src):
"""
Decode a JSON string to an object.
NOTE: For Polygraphy objects, you should use the ``from_json()`` method instead.
Args:
src (str):
The JSON representation of the object
Returns:
object: The decoded instance
"""
return json.loads(src, object_pairs_hook=Decoder())
@mod.export()
@try_register_numpy_json
def save_json(obj, dest, description=None):
"""
Encode an object as JSON and save it to a file.
NOTE: For Polygraphy objects, you should use the ``save()`` method instead.
Args:
obj (object): The object to save.
src (Union[str, file-like]): The path or file-like object to save to.
"""
util.save_file(to_json(obj), dest, mode="w", description=description)
@mod.export()
@try_register_numpy_json
def load_json(src, description=None):
"""
Loads a file and decodes the JSON contents.
NOTE: For Polygraphy objects, you should use the ``load()`` method instead.
Args:
src (Union[str, file-like]): The path or file-like object to load from.
Returns:
object: The object, or `None` if nothing could be read.
"""
return from_json(util.load_file(src, mode="r", description=description))
@mod.export()
def add_json_methods(description=None):
"""
Decorator that adds 4 JSON helper methods to a class:
- to_json(): Convert to JSON string
- from_json(): Convert from JSON string
- save(): Convert to JSON and save to file
- load(): Load from file and convert from JSON
Args:
description (str):
A description of what is being saved or loaded.
"""
def add_json_methods_impl(cls):
# JSON methods
def check_decoded(obj):
if not isinstance(obj, cls):
G_LOGGER.critical(
"Provided JSON cannot be decoded into a {:}.\n"
"Note: JSON was decoded into a {:}:\n{:}".format(cls.__name__, type(obj), obj)
)
return obj
def _to_json_method(self):
"""
Encode this instance as a JSON object.
Returns:
str: A JSON representation of this instance.
"""
return to_json(self)
def _from_json_method(src):
return check_decoded(from_json(src))
_from_json_method.__doc__ = """
Decode a JSON object and create an instance of this class.
Args:
src (str):
The JSON representation of the object
Returns:
{cls}: The decoded instance
Raises:
PolygraphyException:
If the JSON cannot be decoded to an instance of {cls}
""".format(
cls=cls.__name__
)
cls.to_json = _to_json_method
cls.from_json = staticmethod(_from_json_method)
# Save/Load methods
def _save_method(self, dest):
"""
Encode this instance as a JSON object and save it to the specified path
or file-like object.
Args:
dest (Union[str, file-like]):
The path or file-like object to write to.
"""
save_json(self, dest, description=description)
def _load_method(src):
return check_decoded(load_json(src, description=description))
_load_method.__doc__ = """
Loads an instance of this class from a JSON file.
Args:
src (Union[str, file-like]): The path or file-like object to read from.
Returns:
{cls}: The decoded instance
Raises:
PolygraphyException:
If the JSON cannot be decoded to an instance of {cls}
""".format(
cls=cls.__name__
)
cls.save = _save_method
cls.load = staticmethod(_load_method)
return cls
return add_json_methods_impl
| TensorRT-master | tools/Polygraphy/polygraphy/json/serde.py |
from polygraphy.json.serde import *
| TensorRT-master | tools/Polygraphy/polygraphy/json/__init__.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from polygraphy import mod
# Do not raise this exception manually. Instead, use G_LOGGER.critical().
@mod.export()
class PolygraphyException(Exception):
"""
An exception raised by Polygraphy.
"""
pass
# Do not raise this exception manually. Instead, use G_LOGGER.internal_error().
@mod.export()
class PolygraphyInternalException(Exception):
"""
An exception raised when a Polygraphy internal check is violated.
Polygraphy internal checks can be enabled by setting the ``POLYGRAPHY_INTERNAL_CORRECTNESS_CHECKS``
environment variable to ``1``.
This is *not* a child class of PolygraphyException because it
indicates a bug in Polygraphy itself.
"""
pass
| TensorRT-master | tools/Polygraphy/polygraphy/exception/exception.py |
from polygraphy.exception.exception import *
| TensorRT-master | tools/Polygraphy/polygraphy/exception/__init__.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import contextlib
import importlib
import os
import subprocess as sp
import sys
from polygraphy.mod import util as mod_util
# Tracks all of Polygraphy's lazy imports, excluding internal ones.
_all_external_lazy_imports = set()
# Sometimes the Python package name differs from the module name.
_PKG_NAME_FROM_MODULE = {
"tensorrt": "nvidia-tensorrt",
}
# Some packages need additional flags to install correctly.
_EXTRA_FLAGS_FOR_MODULE = {
"tensorrt": ["--extra-index-url=https://pypi.ngc.nvidia.com"],
"onnx_graphsurgeon": ["--extra-index-url=https://pypi.ngc.nvidia.com"],
}
LATEST_VERSION = "latest"
"""Indicates that the latest version of the package is preferred in lazy_import"""
def _version_ok(ver, preferred):
if preferred == LATEST_VERSION:
return False
pref_ver = preferred.lstrip("<=>").strip()
cond = preferred.rstrip(pref_ver).strip()
check = {
"==": lambda x, y: x == y,
">=": lambda x, y: x >= y,
">": lambda x, y: x > y,
"<=": lambda x, y: x <= y,
"<": lambda x, y: x < y,
}[cond]
return check(mod_util.version(ver), mod_util.version(pref_ver))
def lazy_import(name, log=True, version=None):
"""
Lazily import a module.
If the POLYGRAPHY_AUTOINSTALL_DEPS environment variable is set to 1,
missing modules are automatically installed, and existing modules may be
upgraded if newer versions are required.
Args:
name (str):
The name of the module.
log (bool):
Whether to log information about the module.
version (str):
The preferred version of the package, formatted as a version string.
For example, ``'>=0.5.0'`` or ``'==1.8.0'``. Use ``LATEST_VERSION`` to
indicate that the latest version of the package is preferred.
Returns:
LazyModule:
A lazily loaded module. When an attribute is first accessed,
the module will be imported.
"""
assert (
version is None or version == LATEST_VERSION or any(version.startswith(char) for char in ["=", ">", "<"])
), "version must be formatted as a version string!"
if "polygraphy" not in name:
_all_external_lazy_imports.add(name)
def import_mod():
from polygraphy import config
from polygraphy.logger import G_LOGGER, LogMode
def install_mod(raise_error=True):
modname = name.split(".")[0]
pkg = _PKG_NAME_FROM_MODULE.get(modname, modname)
extra_flags = _EXTRA_FLAGS_FOR_MODULE.get(modname, [])
if version == LATEST_VERSION:
extra_flags.append("--upgrade")
elif version is not None:
pkg += version
cmd = config.INSTALL_CMD + [pkg] + extra_flags
G_LOGGER.info("Running installation command: {:}".format(" ".join(cmd)))
status = sp.run(cmd)
if status.returncode != 0:
log_func = G_LOGGER.critical if raise_error else G_LOGGER.warning
log_func(
"Could not automatically install required module: {:}. Please install it manually.".format(pkg)
)
mod = importlib.import_module(name)
return mod
mod = None
try:
mod = importlib.import_module(name)
except ImportError:
if config.AUTOINSTALL_DEPS:
G_LOGGER.info("Module: '{:}' is required, but not installed. Attempting to install now.".format(name))
mod = install_mod()
else:
G_LOGGER.critical(
"Module: '{:}' is required but could not be imported.\n"
"You can try setting POLYGRAPHY_AUTOINSTALL_DEPS=1 in your environment variables "
"to allow Polygraphy to automatically install missing modules.\n"
"Note that this may cause existing modules to be overwritten - hence, it may be "
"desirable to use a Python virtual environment or container. ".format(name)
)
# Auto-upgrade if necessary
if version is not None and hasattr(mod, "__version__") and not _version_ok(mod.__version__, version):
if config.AUTOINSTALL_DEPS:
G_LOGGER.info(
"Note: Module: '{name}' version '{cur_ver}' is installed, but version '{rec_ver}' is recommended.\n"
"Attempting to upgrade now.".format(name=name, cur_ver=mod.__version__, rec_ver=version)
)
mod = install_mod(raise_error=False) # We can try to use the other version if install fails.
elif version != LATEST_VERSION:
G_LOGGER.warning(
"Module: '{name}' version '{cur_ver}' is installed, but version '{rec_ver}' is recommended.\n"
"Consider installing the recommended version or setting POLYGRAPHY_AUTOINSTALL_DEPS=1 in your "
"environment variables to do so automatically. ".format(
name=name, cur_ver=mod.__version__, rec_ver=version
),
mode=LogMode.ONCE,
)
if log:
G_LOGGER.module_info(mod)
return mod
class LazyModule(object):
def __getattr__(self, name):
self = import_mod()
return getattr(self, name)
def __setattr__(self, name, value):
self = import_mod()
return setattr(self, name, value)
return LazyModule()
def has_mod(lazy_mod, with_attr="__version__"):
"""
Checks whether a module is available.
Args:
lazy_mod (LazyModule):
A lazy module, like that returned by ``lazy_import``.
with_attr (str):
The name of an attribute to check for.
This helps distinguish mock modules from real ones.
"""
try:
getattr(lazy_mod, with_attr)
except:
return False
return True
def import_from_script(path, name):
"""
Imports a specified symbol from a Python script.
Args:
path (str): A path to the Python script. The path must include a '.py' extension.
name (str): The name of the symbol to import from the script.
Returns:
object: The loaded symbol.
"""
from polygraphy.logger import G_LOGGER
dir = os.path.dirname(path)
modname = os.path.splitext(os.path.basename(path))[0]
sys.path.insert(0, dir)
with contextlib.ExitStack() as stack:
def reset_sys_path():
del sys.path[0]
stack.callback(reset_sys_path)
try:
mod = importlib.import_module(modname)
return getattr(mod, name)
except Exception as err:
ext = os.path.splitext(path)[1]
err_msg = "Could not import symbol: {:} from script: {:}".format(name, path)
if ext != ".py":
err_msg += "\nThis could be because the extension of the file is not '.py'. Note: The extension is: {:}".format(
ext
)
err_msg += "\nNote: Error was: {:}".format(err)
G_LOGGER.critical(err_msg)
| TensorRT-master | tools/Polygraphy/polygraphy/mod/importer.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
def version(version_str):
return tuple([int(num) for num in version_str.split(".")])
| TensorRT-master | tools/Polygraphy/polygraphy/mod/util.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import inspect
import sys
import warnings
from textwrap import dedent
import polygraphy
from polygraphy import config
from polygraphy.logger import G_LOGGER
from polygraphy.mod.util import version
def _add_to_all(symbol, module):
if hasattr(module, "__all__"):
module.__all__.append(symbol)
else:
module.__all__ = [symbol]
def _define_in_module(name, symbol, module):
assert name not in vars(module), "This symbol is already defined!"
vars(module)[name] = symbol
_add_to_all(name, module)
def export(funcify=False, func_name=None):
"""
Decorator that exports a symbol into the ``__all__`` attribute of
the caller's module. This makes the symbol visible in a ``*`` import
(e.g. ``from module import *``) and hides other symbols unless they are
also present in ``__all__``.
Args:
funcify (bool):
Whether to create and export a function that will call a decorated Polygraphy loader.
The decorated type *must* be a subclass of ``BaseLoader`` if ``funcify=True``.
This is useful to provide convenient short-hands to immediately evaluate loaders.
For example:
::
@mod.export(funcify=True)
class SuperCoolModelFromPath(BaseLoader):
def __init__(self, init_params):
...
def call_impl(self, call_params):
...
# We can now magically access an immediately evaluated functional
# variant of the loader:
model = super_cool_model_from_path(init_params, call_params)
# Which is equivalent to:
load_model = SuperCoolModelFromPath(init_params)
model = load_model(call_params)
The signature of the generated function is a combination of the signatures
of ``__init__`` and ``call_impl``. Specifically, parameters without defaults will
precede those with defaults, and ``__init__`` parameters will precede ``call_impl``
parameters. Special parameters like ``*args`` and ``**kwargs`` will always be the last
parameters in the generated signature if they are present in the loader method signatures.
The return value(s) will always come from ``call_impl``.
For example:
::
# With __init__ signature:
def __init__(a, b=0) -> None:
# And call_impl signature:
def call_impl(c, d=0) -> z:
# The generated function will have a signature:
def generated(a, c, b=0, d=0) -> z:
func_name (str):
If funcify is True, this controls the name of the generated function.
By default, the exported function will use the same name as the loader, but
``snake_case`` instead of ``PascalCase``.
"""
module = inspect.getmodule(sys._getframe(1))
# Find a method by wallking the inheritance hierarchy of a type:
def find_method(symbol, method):
hierarchy = inspect.getmro(symbol)
for ancestor in hierarchy:
if method in vars(ancestor):
return vars(ancestor)[method]
assert False, "Could not find method: {:} in the inheritance hierarcy of: {:}".format(method, symbol)
def export_impl(func_or_cls):
_add_to_all(func_or_cls.__name__, module)
if funcify:
# We only support funcify-ing BaseLoaders, and only if __init__ and call_impl
# have no overlapping parameters.
from polygraphy.backend.base import BaseLoader
assert inspect.isclass(func_or_cls), "Decorated type must be a loader to use funcify=True"
assert BaseLoader in inspect.getmro(
func_or_cls
), "Decorated type must derive from BaseLoader to use funcify=True"
loader = func_or_cls
def get_params(method):
return [
p
for p in inspect.signature(find_method(func_or_cls, method)).parameters.values()
if p.name != "self"
]
init_params = get_params("__init__")
call_impl_params = get_params("call_impl")
def param_names(params):
return list(str(p).partition("=")[0] for p in params)
assert (set(param_names(call_impl_params)) - set(param_names(init_params))) == set(
param_names(call_impl_params)
), "Cannot funcify a type where call_impl and __init__ have the same argument names!"
# Dynamically generate a function with the right signature.
# To generate the signature, we use the init and call_impl arguments,
# but move required arguments (i.e. without default values) to the front.
def is_special(param):
return "*" in str(param)
def has_default(param): # Non special arguments that have default values
return "=" in str(param)
def build_arg_list(should_include):
arg_list = [str(p) for p in init_params if should_include(p)]
arg_list += [str(p) for p in call_impl_params if should_include(p)]
return arg_list
non_default_args = build_arg_list(should_include=lambda p: not is_special(p) and not has_default(p))
default_args = build_arg_list(should_include=lambda p: not is_special(p) and has_default(p))
special_args = build_arg_list(should_include=is_special)
signature = ", ".join(non_default_args + default_args + special_args)
init_args = ", ".join(param_names(init_params))
call_impl_args = ", ".join(param_names(call_impl_params))
func_code = dedent(
"""
def func_impl({signature}):
return loader_binding({init_args})({call_impl_args})
func_var = func_impl
""".format(
signature=signature, init_args=init_args, call_impl_args=call_impl_args
)
)
exec(
func_code, {"loader_binding": loader}, locals()
) # Need to bind the loader this way, or it won't be accesible from func_code.
func = locals()["func_var"]
# Next we setup the docstring so that it is a combination of the __init__
# and call_impl docstrings.
func.__doc__ = "Immediately evaluated functional variant of :class:`{}` .\n".format(loader.__name__)
def try_add_method_doc(method):
call_impl = find_method(loader, method)
if call_impl.__doc__:
func.__doc__ += dedent(call_impl.__doc__)
try_add_method_doc("__init__")
try_add_method_doc("call_impl")
# Now that the function has been defined, we just need to add it into the module's
# __dict__ so it is accessible like a normal symbol.
def pascal_to_snake(name):
return "".join("_{:}".format(c.lower()) if c.isupper() else c for c in name).lstrip("_")
nonlocal func_name
func_name = func_name or pascal_to_snake(loader.__name__)
_define_in_module(func_name, func, module)
# We don't actually want to modify the decorated object.
return func_or_cls
return export_impl
def warn_deprecated(name, use_instead, remove_in, module_name=None):
if config.INTERNAL_CORRECTNESS_CHECKS and version(polygraphy.__version__) >= version(remove_in):
G_LOGGER.internal_error("{:} should have been removed in version: {:}".format(name, remove_in))
full_obj_name = "{:}.{:}".format(module_name, name) if module_name else name
warnings.warn(
"{:} is deprecated and will be removed in Polygraphy {:}. "
"Use {:} instead.".format(full_obj_name, remove_in, use_instead),
DeprecationWarning,
stacklevel=3,
)
def deprecate(remove_in, use_instead, module_name=None, name=None):
"""
Decorator that marks a function or class as deprecated.
When the function or class is used, a warning will be issued.
Args:
remove_in (str):
The version in which the decorated type will be removed.
use_instead (str):
The function or class to use instead.
module_name (str):
The name of the containing module. This will be used to
generate more informative warnings.
Defaults to None.
name (str):
The name of the object being deprecated.
If not provided, this is automatically determined based on the decorated type.
Defaults to None.
"""
def deprecate_impl(obj):
if config.INTERNAL_CORRECTNESS_CHECKS and version(polygraphy.__version__) >= version(remove_in):
G_LOGGER.internal_error("{:} should have been removed in version: {:}".format(obj, remove_in))
nonlocal name
name = name or obj.__name__
if inspect.ismodule(obj):
class DeprecatedModule(object):
def __getattr__(self, attr_name):
warn_deprecated(name, use_instead, remove_in, module_name)
self = obj
return getattr(self, attr_name)
def __setattr__(self, attr_name, value):
warn_deprecated(name, use_instead, remove_in, module_name)
self = obj
return setattr(self, attr_name, value)
DeprecatedModule.__doc__ = "Deprecated: Use {:} instead".format(use_instead)
return DeprecatedModule()
elif inspect.isclass(obj):
class Deprecated(obj):
def __init__(self, *args, **kwargs):
warn_deprecated(name, use_instead, remove_in, module_name)
super().__init__(*args, **kwargs)
Deprecated.__doc__ = "Deprecated: Use {:} instead".format(use_instead)
return Deprecated
elif inspect.isfunction(obj):
def wrapped(*args, **kwargs):
warn_deprecated(name, use_instead, remove_in, module_name)
return obj(*args, **kwargs)
wrapped.__doc__ = "Deprecated: Use {:} instead".format(use_instead)
return wrapped
else:
G_LOGGER.internal_error("deprecate is not implemented for: {:}".format(obj))
return deprecate_impl
def export_deprecated_alias(name, remove_in, use_instead=None):
"""
Decorator that creates and exports a deprecated alias for
the decorated class or function.
The alias will behave like the decorated type, except it will
issue a deprecation warning when used.
To create a deprecated alias for an entire module, invoke the
function manually within the module like so:
::
mod.export_deprecated_alias("old_mod_name", remove_in="0.0.0")(sys.modules[__name__])
Args:
name (str):
The name of the deprecated alias.
remove_in (str):
The version, as a string, in which the deprecated alias will be removed.
use_instead (str):
The name of the function, class, or module to use instead.
If this is ``None``, the new name will be automatically determined.
Defaults to None.
"""
module = inspect.getmodule(sys._getframe(1))
def export_deprecated_alias_impl(obj):
new_obj = deprecate(remove_in, use_instead=use_instead or obj.__name__, module_name=module.__name__, name=name)(
obj
)
_define_in_module(name, new_obj, module)
_add_to_all(name, module)
return obj
return export_deprecated_alias_impl
| TensorRT-master | tools/Polygraphy/polygraphy/mod/exporter.py |
from polygraphy.mod.importer import *
from polygraphy.mod.exporter import *
from polygraphy.mod.util import version
| TensorRT-master | tools/Polygraphy/polygraphy/mod/__init__.py |
from polygraphy.func.func import *
| TensorRT-master | tools/Polygraphy/polygraphy/func/__init__.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import copy
import functools
import inspect
from polygraphy import config, mod
from polygraphy.logger import G_LOGGER
def make_iterable(obj):
return obj if type(obj) == tuple else (obj,)
@mod.export()
def extend(extend_func):
"""
A decorator that uses the function it decorates to extend the function
provided as a parameter.
This is best illustrated with an example:
::
def x(a0, a1, a2):
rv0 = [a0, a1, a2]
rv1 = None
return rv0, rv1
@extend(x)
def y(rv0, rv1):
rv0.append(-1)
# We can now call `y` as if it were `x`, and we will receive
# the return values from `x` after any modifications by `y`
rv0, rv1 = y(1, 2, 3)
assert rv0 == [1, 2, 3, -1]
assert rv1 is None
In this case, ``extend`` is essentially syntactic sugar for:
::
def y(a0, a1, a2):
rv0, rv1 = x(a0, a1, a2)
# Body of `y` from previous section
rv0.append(-1)
return rv0, rv1
If ``y`` does not return anything, or returns ``None``, then ``extend`` will
ensure that the return value of ``x`` is forwarded to the caller.
This means that ``y`` will provide exactly the same interface as ``x``.
If `y` returns something other than ``None``, then its return value will be
provided to the caller, and the return value of ``x`` will be discarded.
NOTE: This function will automatically unpack tuples returned by the function
being extended. Thus, the following implementation of ``x`` would behave just like
the one mentioned above:
::
def x(a0, a1, a2):
ret = (rv0, rv1)
return ret # Tuple will be unpacked, and `y` still sees 2 parameters
Args:
extend_func (Callable): A callable to extend.
"""
def extend_decorator(func):
@functools.wraps(func)
def extended_func(*args, **kwargs):
extend_func_retval = extend_func(*args, **kwargs)
extend_func_ret_tuple = make_iterable(extend_func_retval)
func_args = inspect.signature(func).parameters
# Special case for when the extended function does not return anything
if len(func_args) == 0 and len(extend_func_ret_tuple) == 1 and extend_func_ret_tuple[0] is None:
func_retval = func()
elif len(extend_func_ret_tuple) == len(func_args):
func_retval = func(*extend_func_ret_tuple)
else:
def try_get_name(fn):
try:
return fn.__name__
except:
return fn
G_LOGGER.critical(
"Function: {:} accepts {:} parameter(s), but "
"needs to accept {:} parameter(s) from: {:} instead.\nNote: Parameters should be: {:}".format(
try_get_name(func),
len(func_args),
len(extend_func_ret_tuple),
try_get_name(extend_func),
tuple(map(type, extend_func_ret_tuple)),
)
)
if func_retval is not None:
return func_retval
return extend_func_retval
return extended_func
return extend_decorator
@mod.export()
def constantmethod(func):
"""
A decorator that denotes constant methods.
NOTE: This decorator does nothing if the POLYGRAPHY_INTERNAL_CORRECTNESS_CHECKS environment variable is not set to `1`
Example:
::
class Dummy(object):
def __init__(self):
self.x = 1
@func.constantmethod
def modify_x(self):
self.x = 2
d = Dummy()
d.modify_x() # This will fail!
This provides only minimal protection against accidental mutation of instance attributes.
For example, if a class includes references (e.g. a numpy array member), this function cannot
ensure that the contents of that member (e.g. the values in a numpy array) will remain unchanged.
"""
if not config.INTERNAL_CORRECTNESS_CHECKS:
return func
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
old_dict = copy.copy(vars(self))
ret = None
try:
ret = func(self, *args, **kwargs)
finally:
if vars(self) != old_dict:
G_LOGGER.internal_error(
"{:} was mutated in a constant method! Note:\nOld state: {:}\nNew state: {:}".format(
self, old_dict, vars(self)
)
)
return ret
return wrapper
| TensorRT-master | tools/Polygraphy/polygraphy/func/func.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
try:
from setuptools import setup, find_packages
except ImportError:
from distutils.core import setup, find_packages
import os
def is_standalone():
return os.environ.get("STANDALONE") == "1"
def is_dla():
return os.environ.get("ENABLE_DLA") == "1"
def get_requirements():
def get_version_range():
def get_vers(var):
vers = os.environ.get(var).replace("cuda-", "")
major, minor = map(int, vers.split("."))
return major, minor
cuda_major, _ = get_vers("CUDA")
return "-cu{cuda_major}".format(cuda_major=cuda_major)
if is_standalone():
return [
"nvidia-cuda-runtime" + get_version_range(),
"nvidia-cudnn" + get_version_range(),
"nvidia-cublas" + get_version_range(),
]
return []
name = "tensorrt"
if is_standalone():
name = "nvidia-{:}".format(name)
# Only standalone wheels need to be disambiguated. Otherwise, the entire tar/deb/rpm is DLA/non-DLA.
if is_dla():
name += "-dla"
setup(
name=name,
version="##TENSORRT_VERSION##",
description="A high performance deep learning inference library",
long_description="A high performance deep learning inference library",
author="NVIDIA",
license="Proprietary",
classifiers=[
"License :: Other/Proprietary License",
"Intended Audience :: Developers",
"Programming Language :: Python :: 3",
],
packages=find_packages(),
install_requires=get_requirements(),
extras_require={"numpy": "numpy"},
package_data={"tensorrt": ["*.so*", "*.pyd", "*.pdb"]},
include_package_data=True,
zip_safe=True,
)
| TensorRT-master | python/packaging/setup.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import ctypes
import glob
import os
import sys
import warnings
def try_load(library):
try:
ctypes.CDLL(library)
except OSError:
pass
# Try loading all packaged libraries. This is a nop if there are no libraries packaged.
CURDIR = os.path.realpath(os.path.dirname(__file__))
for lib in glob.iglob(os.path.join(CURDIR, "*.so*")):
try_load(lib)
# On Windows, we need to manually open the TensorRT libraries - otherwise we are unable to
# load the bindings.
def find_lib(name):
paths = os.environ["PATH"].split(os.path.pathsep)
for path in paths:
libpath = os.path.join(path, name)
if os.path.isfile(libpath):
return libpath
raise FileNotFoundError(
"Could not find: {:}. Is it on your PATH?\nNote: Paths searched were:\n{:}".format(name, paths)
)
if sys.platform.startswith("win"):
# Order matters here because of dependencies
LIBRARIES = [
"nvinfer.dll",
"cublas64_##CUDA_MAJOR##.dll",
"cublasLt64_##CUDA_MAJOR##.dll",
"cudnn64_##CUDNN_MAJOR##.dll",
"nvinfer_plugin.dll",
"nvonnxparser.dll",
"nvparsers.dll",
]
for lib in LIBRARIES:
ctypes.CDLL(find_lib(lib))
from .tensorrt import *
__version__ = "##TENSORRT_VERSION##"
# Provides Python's `with` syntax
def common_enter(this):
warnings.warn(
"Context managers for TensorRT types are deprecated. "
"Memory will be freed automatically when the reference count reaches 0.",
DeprecationWarning,
)
return this
def common_exit(this, exc_type, exc_value, traceback):
"""
Context managers are deprecated and have no effect. Objects are automatically freed when
the reference count reaches 0.
"""
pass
# Logger does not have a destructor.
ILogger.__enter__ = common_enter
ILogger.__exit__ = lambda this, exc_type, exc_value, traceback: None
Builder.__enter__ = common_enter
Builder.__exit__ = common_exit
ICudaEngine.__enter__ = common_enter
ICudaEngine.__exit__ = common_exit
IExecutionContext.__enter__ = common_enter
IExecutionContext.__exit__ = common_exit
Runtime.__enter__ = common_enter
Runtime.__exit__ = common_exit
INetworkDefinition.__enter__ = common_enter
INetworkDefinition.__exit__ = common_exit
UffParser.__enter__ = common_enter
UffParser.__exit__ = common_exit
CaffeParser.__enter__ = common_enter
CaffeParser.__exit__ = common_exit
OnnxParser.__enter__ = common_enter
OnnxParser.__exit__ = common_exit
IHostMemory.__enter__ = common_enter
IHostMemory.__exit__ = common_exit
Refitter.__enter__ = common_enter
Refitter.__exit__ = common_exit
IBuilderConfig.__enter__ = common_enter
IBuilderConfig.__exit__ = common_exit
# Add logger severity into the default implementation to preserve backwards compatibility.
Logger.Severity = ILogger.Severity
for attr, value in ILogger.Severity.__members__.items():
setattr(Logger, attr, value)
# Computes the volume of an iterable.
def volume(iterable):
"""
Computes the volume of an iterable.
:arg iterable: Any python iterable, including a :class:`Dims` object.
:returns: The volume of the iterable. This will return 1 for empty iterables, as a scalar has an empty shape and the volume of a tensor with empty shape is 1.
"""
vol = 1
for elem in iterable:
vol *= elem
return vol
# Converts a TensorRT datatype to the equivalent numpy type.
def nptype(trt_type):
"""
Returns the numpy-equivalent of a TensorRT :class:`DataType` .
:arg trt_type: The TensorRT data type to convert.
:returns: The equivalent numpy type.
"""
import numpy as np
mapping = {
float32: np.float32,
float16: np.float16,
int8: np.int8,
int32: np.int32,
bool: np.bool,
}
if trt_type in mapping:
return mapping[trt_type]
raise TypeError("Could not resolve TensorRT datatype to an equivalent numpy datatype.")
# Add a numpy-like itemsize property to the datatype.
def _itemsize(trt_type):
"""
Returns the size in bytes of this :class:`DataType` .
:arg trt_type: The TensorRT data type.
:returns: The size of the type.
"""
mapping = {
float32: 4,
float16: 2,
int8: 1,
int32: 4,
bool: 1,
}
if trt_type in mapping:
return mapping[trt_type]
DataType.itemsize = property(lambda this: _itemsize(this))
| TensorRT-master | python/packaging/tensorrt/__init__.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from PIL import Image
from io import BytesIO
import requests
output_image="input.ppm"
# Read sample image input and save it in ppm format
print("Exporting ppm image {}".format(output_image))
response = requests.get("https://pytorch.org/assets/images/deeplab1.png")
with Image.open(BytesIO(response.content)) as img:
ppm = Image.new("RGB", img.size, (255, 255, 255))
ppm.paste(img, mask=img.split()[3])
ppm.save(output_image)
import torch
import torch.nn as nn
output_onnx="fcn-resnet101.onnx"
# FC-ResNet101 pretrained model from torch-hub extended with argmax layer
class FCN_ResNet101(nn.Module):
def __init__(self):
super(FCN_ResNet101, self).__init__()
self.model = torch.hub.load('pytorch/vision:v0.6.0', 'fcn_resnet101', pretrained=True)
def forward(self, inputs):
x = self.model(inputs)['out']
x = x.argmax(1, keepdims=True)
return x
model = FCN_ResNet101()
model.eval()
# Generate input tensor with random values
input_tensor = torch.rand(4, 3, 224, 224)
# Export torch model to ONNX
print("Exporting ONNX model {}".format(output_onnx))
torch.onnx.export(model, input_tensor, output_onnx,
opset_version=12,
do_constant_folding=True,
input_names=["input"],
output_names=["output"],
dynamic_axes={"input": {0: "batch", 2: "height", 3: "width"},
"output": {0: "batch", 2: "height", 3: "width"}},
verbose=False)
| TensorRT-master | quickstart/SemanticSegmentation/export.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import tensorflow as tf
import tensorrt as trt
import pycuda.driver as cuda
import pycuda.autoinit
# For ONNX:
class ONNXClassifierWrapper():
def __init__(self, file, num_classes, target_dtype = np.float32):
self.target_dtype = target_dtype
self.num_classes = num_classes
self.load(file)
self.stream = None
def load(self, file):
f = open(file, "rb")
runtime = trt.Runtime(trt.Logger(trt.Logger.WARNING))
engine = runtime.deserialize_cuda_engine(f.read())
self.context = engine.create_execution_context()
def allocate_memory(self, batch):
self.output = np.empty(self.num_classes, dtype = self.target_dtype) # Need to set both input and output precisions to FP16 to fully enable FP16
# Allocate device memory
self.d_input = cuda.mem_alloc(1 * batch.nbytes)
self.d_output = cuda.mem_alloc(1 * self.output.nbytes)
self.bindings = [int(self.d_input), int(self.d_output)]
self.stream = cuda.Stream()
def predict(self, batch): # result gets copied into output
if self.stream is None:
self.allocate_memory(batch)
# Transfer input data to device
cuda.memcpy_htod_async(self.d_input, batch, self.stream)
# Execute model
self.context.execute_async_v2(self.bindings, self.stream.handle, None)
# Transfer predictions back
cuda.memcpy_dtoh_async(self.output, self.d_output, self.stream)
# Syncronize threads
self.stream.synchronize()
return self.output
def convert_onnx_to_engine(onnx_filename, engine_filename = None, max_batch_size = 32, max_workspace_size = 1 << 30, fp16_mode = True):
logger = trt.Logger(trt.Logger.WARNING)
with trt.Builder(logger) as builder, builder.create_network() as network, trt.OnnxParser(network, logger) as parser:
builder.max_workspace_size = max_workspace_size
builder.fp16_mode = fp16_mode
builder.max_batch_size = max_batch_size
print("Parsing ONNX file.")
with open(onnx_filename, 'rb') as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
print("Building TensorRT engine. This may take a few minutes.")
engine = builder.build_cuda_engine(network)
if engine_filename:
with open(engine_filename, 'wb') as f:
f.write(engine.serialize())
return engine, logger
| TensorRT-master | quickstart/IntroNotebooks/onnx_helper.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from tensorflow.python.compiler.tensorrt import trt_convert as tf_trt
from tensorflow.python.saved_model import tag_constants
import tensorflow as tf
import tensorrt as trt
import numpy as np
precision_dict = {
"FP32": tf_trt.TrtPrecisionMode.FP32,
"FP16": tf_trt.TrtPrecisionMode.FP16,
"INT8": tf_trt.TrtPrecisionMode.INT8,
}
# For TF-TRT:
class OptimizedModel():
def __init__(self, saved_model_dir = None):
self.loaded_model_fn = None
if not saved_model_dir is None:
self.load_model(saved_model_dir)
def predict(self, input_data):
if self.loaded_model_fn is None:
raise(Exception("Haven't loaded a model"))
x = tf.constant(input_data.astype('float32'))
labeling = self.loaded_model_fn(x)
try:
preds = labeling['predictions'].numpy()
except:
try:
preds = labeling['probs'].numpy()
except:
try:
preds = labeling[next(iter(labeling.keys()))]
except:
raise(Exception("Failed to get predictions from saved model object"))
return preds
def load_model(self, saved_model_dir):
saved_model_loaded = tf.saved_model.load(saved_model_dir, tags=[tag_constants.SERVING])
wrapper_fp32 = saved_model_loaded.signatures['serving_default']
self.loaded_model_fn = wrapper_fp32
class ModelOptimizer():
def __init__(self, input_saved_model_dir, calibration_data=None):
self.input_saved_model_dir = input_saved_model_dir
self.calibration_data = None
self.loaded_model = None
if not calibration_data is None:
self.set_calibration_data(calibration_data)
def set_calibration_data(self, calibration_data):
def calibration_input_fn():
yield (tf.constant(calibration_data.astype('float32')), )
self.calibration_data = calibration_input_fn
def convert(self, output_saved_model_dir, precision="FP32", max_workspace_size_bytes=8000000000, **kwargs):
if precision == "INT8" and self.calibration_data is None:
raise(Exception("No calibration data set!"))
trt_precision = precision_dict[precision]
conversion_params = tf_trt.DEFAULT_TRT_CONVERSION_PARAMS._replace(precision_mode=trt_precision,
max_workspace_size_bytes=max_workspace_size_bytes,
use_calibration= precision == "INT8")
converter = tf_trt.TrtGraphConverterV2(input_saved_model_dir=self.input_saved_model_dir,
conversion_params=conversion_params)
if precision == "INT8":
converter.convert(calibration_input_fn=self.calibration_data)
else:
converter.convert()
converter.save(output_saved_model_dir=output_saved_model_dir)
return OptimizedModel(output_saved_model_dir)
def predict(self, input_data):
if self.loaded_model is None:
self.load_default_model()
return self.loaded_model.predict(input_data)
def load_default_model(self):
self.loaded_model = tf.keras.models.load_model('resnet50_saved_model')
| TensorRT-master | quickstart/IntroNotebooks/helper.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from tensorflow.python.compiler.tensorrt import trt_convert as tf_trt
from tensorflow.python.saved_model import tag_constants
import tensorflow as tf
import tensorrt as trt
import numpy as np
precision_dict = {
"FP32": tf_trt.TrtPrecisionMode.FP32,
"FP16": tf_trt.TrtPrecisionMode.FP16,
"INT8": tf_trt.TrtPrecisionMode.INT8,
}
# For TF-TRT:
class OptimizedModel():
def __init__(self, saved_model_dir = None):
self.loaded_model_fn = None
if not saved_model_dir is None:
self.load_model(saved_model_dir)
def predict(self, input_data):
if self.loaded_model_fn is None:
raise(Exception("Haven't loaded a model"))
x = tf.constant(input_data.astype('float32'))
labeling = self.loaded_model_fn(x)
try:
preds = labeling['predictions'].numpy()
except:
try:
preds = labeling['probs'].numpy()
except:
try:
preds = labeling[next(iter(labeling.keys()))]
except:
raise(Exception("Failed to get predictions from saved model object"))
return preds
def load_model(self, saved_model_dir):
saved_model_loaded = tf.saved_model.load(saved_model_dir, tags=[tag_constants.SERVING])
wrapper_fp32 = saved_model_loaded.signatures['serving_default']
self.loaded_model_fn = wrapper_fp32
class ModelOptimizer():
def __init__(self, input_saved_model_dir, calibration_data=None):
self.input_saved_model_dir = input_saved_model_dir
self.calibration_data = None
self.loaded_model = None
if not calibration_data is None:
self.set_calibration_data(calibration_data)
def set_calibration_data(self, calibration_data):
def calibration_input_fn():
yield (tf.constant(calibration_data.astype('float32')), )
self.calibration_data = calibration_input_fn
def convert(self, output_saved_model_dir, precision="FP32", max_workspace_size_bytes=8000000000, **kwargs):
if precision == "INT8" and self.calibration_data is None:
raise(Exception("No calibration data set!"))
trt_precision = precision_dict[precision]
conversion_params = tf_trt.DEFAULT_TRT_CONVERSION_PARAMS._replace(precision_mode=trt_precision,
max_workspace_size_bytes=max_workspace_size_bytes,
use_calibration= precision == "INT8")
converter = tf_trt.TrtGraphConverterV2(input_saved_model_dir=self.input_saved_model_dir,
conversion_params=conversion_params)
if precision == "INT8":
converter.convert(calibration_input_fn=self.calibration_data)
else:
converter.convert()
converter.save(output_saved_model_dir=output_saved_model_dir)
return OptimizedModel(output_saved_model_dir)
def predict(self, input_data):
if self.loaded_model is None:
self.load_default_model()
return self.loaded_model.predict(input_data)
def load_default_model(self):
self.loaded_model = tf.keras.models.load_model('resnet50_saved_model')
| TensorRT-master | quickstart/IntroNotebooks/Additional Examples/helper.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import tensorflow as tf
import numpy as np
import os
import sys
import argparse
from copy import deepcopy
"""
The conversion of a checkpoint from
https://github.com/tensorflow/nmt project
The conversion was tested using Tensorflow 1.6
"""
def chpt_to_dict_arrays_simple(file_name):
"""
Convert a checkpoint into into a dictionary of numpy arrays
for later use in TensorRT NMT sample.
"""
config = tf.ConfigProto(allow_soft_placement=True)
sess = tf.Session(config=config)
saver = tf.train.import_meta_graph(file_name)
dir_name = os.path.dirname(os.path.abspath(file_name))
saver.restore(sess, tf.train.latest_checkpoint(dir_name))
params = {}
print("\nFound the following trainable variables:")
with sess.as_default():
variables = tf.trainable_variables()
for v in variables:
params[v.name] = v.eval(session=sess)
print("{0} {1}".format(v.name, params[v.name].shape))
# use default value
params["forget_bias"] = 1.0
return params
def chpt_to_dict_arrays():
"""
Convert a checkpoint into a dictionary of numpy arrays
for later use in TensorRT NMT sample.
git clone https://github.com/tensorflow/nmt.git
"""
sys.path.append("./nmt")
from nmt.nmt import add_arguments, create_hparams
from nmt import attention_model
from nmt import model_helper
from nmt.nmt import create_or_load_hparams
from nmt import utils
from nmt import model as nmt_model
nmt_parser = argparse.ArgumentParser()
add_arguments(nmt_parser)
FLAGS, unparsed = nmt_parser.parse_known_args()
default_hparams = create_hparams(FLAGS)
hparams = create_or_load_hparams(FLAGS.out_dir, default_hparams, FLAGS.hparams_path, save_hparams=False)
print(hparams)
model_creator = None
if not hparams.attention:
model_creator = nmt_model.Model
elif hparams.attention_architecture == "standard":
model_creator = attention_model.AttentionModel
else:
raise ValueError("Unknown model architecture")
infer_model = model_helper.create_infer_model(model_creator, hparams, scope=None)
params = {}
print("\nFound the following trainable variables:")
with tf.Session(graph=infer_model.graph, config=utils.misc_utils.get_config_proto()) as sess:
loaded_infer_model = model_helper.load_model(infer_model.model, FLAGS.ckpt, sess, "infer")
variables = tf.trainable_variables()
for v in variables:
params[v.name] = v.eval(session=sess)
print("{0} {1}".format(v.name, params[v.name].shape))
params["forget_bias"] = hparams.forget_bias
return params
def concatenate_layers(params):
"""Concatenate weights from multiple layers"""
input_dict_size = params[u"embeddings/encoder/embedding_encoder:0"].shape[0]
output_dict_size = params[u"embeddings/decoder/embedding_decoder:0"].shape[0]
print("Input dictionary size: {0}, Output dictionary size: {1}".format(input_dict_size, output_dict_size))
layers = 0
encoder_type = "unidirectional"
for key in params:
if "bidirectional_rnn" in key:
encoder_type = "bidirectional"
if "basic_lstm_cell" in key:
layers = layers + 1
# we have encoder, decoder, kernel and bias
layers = int(layers / 4)
print("Layers: {0}, Encoder type: {1}".format(layers, encoder_type))
data = {}
encoder_postfix = u"/basic_lstm_cell/"
kernel_alias = u"kernel:0"
bias_alias = u"bias:0"
# weights, concatenate all layers
# process encoder
if encoder_type == "bidirectional":
bi_layers = int(layers / 2)
if bi_layers == 1:
bifw_encoder_prefix = u"dynamic_seq2seq/encoder/bidirectional_rnn/fw/basic_lstm_cell/"
bibw_encoder_prefix = u"dynamic_seq2seq/encoder/bidirectional_rnn/bw/basic_lstm_cell/"
data["encrnnkernel"] = params[bifw_encoder_prefix + kernel_alias]
tmp_weights = params[bibw_encoder_prefix + kernel_alias]
data["encrnnkernel"] = np.concatenate((data["encrnnkernel"], tmp_weights), axis=0)
data["encrnnbias"] = params[bifw_encoder_prefix + bias_alias]
tmp_weights = params[bibw_encoder_prefix + bias_alias]
data["encrnnbias"] = np.concatenate((data["encrnnbias"], tmp_weights), axis=0)
else:
bifw_encoder_prefix = u"dynamic_seq2seq/encoder/bidirectional_rnn/fw/multi_rnn_cell/cell_"
bibw_encoder_prefix = u"dynamic_seq2seq/encoder/bidirectional_rnn/bw/multi_rnn_cell/cell_"
data["encrnnkernel"] = np.concatenate(
tuple(params[bifw_encoder_prefix + str(i) + encoder_postfix + kernel_alias] for i in range(bi_layers)),
axis=0,
)
tmp_weights = np.concatenate(
tuple(params[bibw_encoder_prefix + str(i) + encoder_postfix + kernel_alias] for i in range(bi_layers)),
axis=0,
)
data["encrnnkernel"] = np.concatenate((data["encrnnkernel"], tmp_weights), axis=0)
data["encrnnbias"] = np.concatenate(
tuple(params[bifw_encoder_prefix + str(i) + encoder_postfix + bias_alias] for i in range(bi_layers)),
axis=0,
)
tmp_weights = np.concatenate(
tuple(params[bibw_encoder_prefix + str(i) + encoder_postfix + bias_alias] for i in range(bi_layers)),
axis=0,
)
data["encrnnbias"] = np.concatenate((data["encrnnbias"], tmp_weights), axis=0)
else:
uni_encoder_prefix = u"dynamic_seq2seq/encoder/rnn/multi_rnn_cell/cell_"
data["encrnnkernel"] = np.concatenate(
tuple(params[uni_encoder_prefix + str(i) + encoder_postfix + kernel_alias] for i in range(layers)), axis=0
)
data["encrnnbias"] = np.concatenate(
tuple(params[uni_encoder_prefix + str(i) + encoder_postfix + bias_alias] for i in range(layers)), axis=0
)
data["encembed"] = params[u"embeddings/encoder/embedding_encoder:0"]
# process decoder
data["decembed"] = params[u"embeddings/decoder/embedding_decoder:0"]
data["decmemkernel"] = params[u"dynamic_seq2seq/decoder/memory_layer/kernel:0"]
data["decattkernel"] = params[u"dynamic_seq2seq/decoder/attention/attention_layer/kernel:0"]
data["decprojkernel"] = params[u"dynamic_seq2seq/decoder/output_projection/kernel:0"]
uni_decoder_prefix = u"dynamic_seq2seq/decoder/attention/multi_rnn_cell/cell_"
data["decrnnkernel"] = np.concatenate(
tuple(params[uni_decoder_prefix + str(i) + encoder_postfix + kernel_alias] for i in range(layers)), axis=0
)
data["decrnnbias"] = np.concatenate(
tuple(params[uni_decoder_prefix + str(i) + encoder_postfix + bias_alias] for i in range(layers)), axis=0
)
for key in data:
print("{0} shape: {1}".format(key, data[key].shape))
num_units = int(data["decrnnkernel"].shape[1] / 4)
encoder_type_int = 1 if encoder_type == "bidirectional" else 0
dimensions = {
"layers": layers,
"encoder_type": encoder_type_int,
"num_units": num_units,
"encembed_outputs": data["encembed"].shape[0],
"decembed_outputs": data["decembed"].shape[0],
}
return dimensions, data
def convert_rnn_kernel(weights, dimensions, is_decoder_rnn=False):
"""
In place. weights conversion
TensorFlow weight parameters for BasicLSTMCell
are formatted as:
Each [WR][icfo] is hiddenSize sequential elements.
CellN Row 0: WiT, WcT, WfT, WoT
CellN Row 1: WiT, WcT, WfT, WoT
...
CellN RowM-1: WiT, WcT, WfT, WoT
CellN RowM+0: RiT, RcT, RfT, RoT
CellN RowM+1: RiT, RcT, RfT, RoT
...
CellNRow(M+P)-1: RiT, RcT, RfT, RoT
M - data size
P - projection size
TensorRT expects the format to laid out in memory:
CellN: Wf, Wi, Wc, Wo, Rf, Ri, Rc, Ro
For the purpose of implementing LSTMP all W and R weights become weights from W
CellN: Wf, Rf, Wi, Ri, Wc, Rc, Wo, Ro, Empty states
Update: alternative notation
Tensorflow documents gates' order in e.g.
https:github.com/tensorflow/tensorflow/blob/r1.4/tensorflow/python/ops/rnn_cell_impl.py:439
TF: i = input_gate, j = new_input (cell gate), f = forget_gate, o = output_gate - ijfo
Need to convert 'ijfo' to 'fijo'
"""
print("Starting shape: {0}".format(weights.shape))
num_units = dimensions["num_units"]
layers = dimensions["layers"]
new_weights = np.empty([0], dtype=np.float32)
# if is_decoder_rnn == False:
if False:
# we can use decoder path for both, but we leave it for now
input_size = num_units
# case encoder
# (layers * 2 * input_size, 4 * num_units) -> (layers, 2, input_size, 4, num_units))
weights = np.reshape(weights, (layers, 2, input_size, 4, num_units))
print("After reshape: {0}".format(weights.shape))
# reorder/transpose axis to match TensorRT format (layers, 2, 4, num_units, input_size)
weights = np.moveaxis(weights, [2, 3, 4], [4, 2, 3])
print("After moveaxis: {0}".format(weights.shape))
# then we reorder gates from Tensorflow's 'icfo' into TensorRT's 'fico' order
input_perm = [1, 2, 0, 3]
temp_weights = np.empty([layers, 2, 4, num_units, input_size], dtype=np.float32)
for i in range(4):
temp_weights[:, :, input_perm[i], :, :] = weights[:, :, i, :, :]
weights = deepcopy(temp_weights)
else:
offset = 0
for i in range(layers):
# first layer has shape (input_size + num_units, 4 * num_units)
# other layers (num_units + num_units, 4 * num_units)
input_size = 2 * num_units if i == 0 and is_decoder_rnn else num_units
temp_weights_w = np.empty([4, num_units, input_size], dtype=np.float32)
temp_weights_r = np.empty([4, num_units, num_units], dtype=np.float32)
layer_weights_w = np.reshape(weights[offset : (offset + input_size), :], (input_size, 4, num_units))
layer_weights_r = np.reshape(
weights[(offset + input_size) : (offset + input_size + num_units), :], (num_units, 4, num_units)
)
# reorder/transpose axis to match TensorRT format (layers, 2, 4, num_units, input_size)
layer_weights_w = np.moveaxis(layer_weights_w, [0, 1, 2], [2, 0, 1])
layer_weights_r = np.moveaxis(layer_weights_r, [0, 1, 2], [2, 0, 1])
# then we reorder gates from Tensorflow's 'icfo' into TensorRT's 'fico' order
input_perm = [1, 2, 0, 3]
for i in range(4):
temp_weights_w[input_perm[i], :, :] = layer_weights_w[i, :, :]
temp_weights_r[input_perm[i], :, :] = layer_weights_r[i, :, :]
layer_weights_w = deepcopy(temp_weights_w.flatten())
layer_weights_r = deepcopy(temp_weights_r.flatten())
new_weights = np.concatenate((new_weights, layer_weights_w, layer_weights_r), axis=0)
offset = offset + input_size + num_units
return new_weights
def convert_rnn_bias(weights, dimensions, forget_bias=1.0):
"""
TensorFlow bias parameters for BasicLSTMCell
are formatted as:
CellN: Bi, Bc, Bf, Bo
TensorRT expects the format to be:
CellN: Wf, Wi, Wc, Wo, Rf, Ri, Rc, Ro
Since Tensorflow already combines U and W,
we double the size and set all of U to zero.
"""
num_units = dimensions["num_units"]
layers = dimensions["layers"]
temp_weights = np.zeros([layers, 2 * 4, num_units], dtype=np.float32)
weights = np.reshape(weights, (layers, 4, num_units))
# then we reorder gates from Tensorflow's 'icfo' into TensorRT's 'fico' order
input_perm = [1, 2, 0, 3]
for i in range(4):
temp_weights[:, input_perm[i], :] = weights[:, i, :]
# Add a value to f bias to be consistent with the Tensorflow model.
print("Adding {0} to forget bias".format(forget_bias))
temp_weights[:, 0, :] = np.add(temp_weights[:, 0, :], forget_bias)
weights = deepcopy(temp_weights)
return weights
def convert_weigts(dimensions, data, forget_bias=1.0):
"""Convert weights from Tensorflow to TensorRT format"""
print("Processing encoder RNN kernel")
data["encrnnkernel"] = convert_rnn_kernel(data["encrnnkernel"], dimensions, False)
print("Processing encoder RNN bias")
data["encrnnbias"] = convert_rnn_bias(data["encrnnbias"], dimensions, forget_bias=forget_bias)
print("Processing decoder RNN kernel")
data["decrnnkernel"] = convert_rnn_kernel(data["decrnnkernel"], dimensions, True)
print("Processing decoder RNN bias")
data["decrnnbias"] = convert_rnn_bias(data["decrnnbias"], dimensions, forget_bias=forget_bias)
return data
def save_layer_weights(data, list_keys, dims, footer_string, file_name):
"""
data - dictionary with string names as keys and
numpy weights as values
list_keys - list of dictionary keys to save
dims - list of int32_t values relevant to the layer
e.g. tensor dimensions sufficient to extract all the tensors
footer_string - marker placed at the end of file
file format: data -> meta_data -> footer
"""
data_type = data[list_keys[0]].dtype
# default precision is FP32
# The values should be compartible with DataType from Nvinfer.h
data_prec = 1 if data_type == np.dtype("float16") else 0
meta_data = np.int32([data_prec] + dims)
meta_count = np.int32(meta_data.shape[0])
out_file = open(file_name, "wb")
for key in list_keys:
out_file.write(data[key].tobytes())
out_file.write(meta_data.tobytes())
# write footer
out_file.write(meta_count.tobytes() + bytearray(footer_string, "ASCII"))
def main(_):
if len(sys.argv) < 3:
print("\nUsage:")
print("python {0} <NMT inference parameters> --weightsdir=<case_name_dir>".format(sys.argv[0]))
print(
"""e.g. \npython {0} --src=en --tgt=vi \\
--ckpt=/path/to/envi_model/translate.ckpt \\
--hparams_path=nmt/standard_hparams/iwslt15.json \\
--out_dir=/tmp/envi \\
--vocab_prefix=/tmp/nmt_data/vocab \\
--inference_input_file=/tmp/nmt_data/tst2013.en \\
--inference_output_file=/tmp/envi/output_infer \\
--inference_ref_file=/tmp/nmt_data/tst2013.vi \\
--weightsdir=envi""".format(
sys.argv[0]
)
)
print("\nOR\n")
print("python {0} --metafile=</path_to/graph.meta> --weightsdir=<case_name_dir> ".format(sys.argv[0]))
print("e.g.\npython {0} --metafile=./translate.ckpt-12000.meta --weightsdir=envi".format(sys.argv[0]))
sys.exit()
nmt_parser = argparse.ArgumentParser()
nmt_parser.add_argument(
"--metafile", type=str, default=None, help="Path to the metafile (alternative checkpoint restore, may not work)"
)
nmt_parser.add_argument("--weightsdir", type=str, default="weights", help="Output weights directory")
trt_flags, unparsed = nmt_parser.parse_known_args()
if trt_flags.metafile == None:
params = chpt_to_dict_arrays()
else:
params = chpt_to_dict_arrays(trt_flags.metafile)
print("\nLoading the checkpoint...\n")
print("\nConcatenating the weights...")
dimensions, data = concatenate_layers(params)
print("\nConverting the weights...")
# Convert weights to TensorRT format
data = convert_weigts(dimensions, data, params["forget_bias"])
print("\nSaving into binary file...")
case_dir = trt_flags.weightsdir
if not os.path.isdir(case_dir):
os.mkdir(case_dir)
case_dir = case_dir + "/"
trt_string = u"trtsamplenmt"
# save embed weights
save_layer_weights(
data,
["encembed"],
[dimensions["encembed_outputs"], dimensions["num_units"]],
trt_string,
case_dir + "encembed.bin",
)
save_layer_weights(
data,
["decembed"],
[dimensions["decembed_outputs"], dimensions["num_units"]],
trt_string,
case_dir + "decembed.bin",
)
# encrnn
save_layer_weights(
data,
["encrnnkernel", "encrnnbias"],
[dimensions["encoder_type"], dimensions["layers"], dimensions["num_units"]],
trt_string,
case_dir + "encrnn.bin",
)
# decrnn
save_layer_weights(
data,
["decrnnkernel", "decrnnbias"],
[0, dimensions["layers"], dimensions["num_units"]],
trt_string,
case_dir + "decrnn.bin",
)
# decprojkernel
save_layer_weights(
data,
["decprojkernel"],
[dimensions["num_units"], dimensions["decembed_outputs"]],
trt_string,
case_dir + "decproj.bin",
)
# decmemkernel
save_layer_weights(
data, ["decmemkernel"], [dimensions["num_units"], dimensions["num_units"]], trt_string, case_dir + "decmem.bin"
)
# decattkernel
# first dimension is 3 * num_units of bi RNN, 2 * num_units otherwise
save_layer_weights(
data,
["decattkernel"],
[data["decattkernel"].shape[0], dimensions["num_units"]],
trt_string,
case_dir + "decatt.bin",
)
if __name__ == "__main__":
tf.app.run()
| TensorRT-master | samples/sampleNMT/chptToBin.py |
#!/usr/bin/env python3
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import errno
import hashlib
import logging
import os
import sys
logger = logging.getLogger('downloader')
class DataFile:
"""Holder of a data file."""
def __init__(self, attr):
self.attr = attr
self.path = attr['path']
self.url = attr['url']
if 'checksum' not in attr:
logger.warning("Checksum of %s not provided!", self.path)
self.checksum = attr.get('checksum', None)
def __str__(self):
return str(self.attr)
class SampleData:
"""Holder of data files of an sample."""
def __init__(self, attr):
self.attr = attr
self.sample = attr['sample']
files = attr.get('files', None)
self.files = [DataFile(f) for f in files]
def __str__(self):
return str(self.attr)
def _loadYAML(yaml_path):
with open(yaml_path, 'rb') as f:
import yaml
y = yaml.load(f, yaml.FullLoader)
return SampleData(y)
def _checkMD5(path, refMD5):
md5 = hashlib.md5(open(path, 'rb').read()).hexdigest()
return md5 == refMD5
def _createDirIfNeeded(path):
the_dir = os.path.dirname(path)
try:
os.makedirs(the_dir)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def download(data_dir, yaml_path, overwrite=False):
"""Download the data files specified in YAML file to a directory.
Return false if the downloaded file or the local copy (if not overwrite) has a different checksum.
"""
sample_data = _loadYAML(yaml_path)
logger.info("Downloading data for %s", sample_data.sample)
def _downloadFile(path, url):
logger.info("Downloading %s from %s", path, url)
import requests
r = requests.get(url, stream=True, timeout=5)
size = int(r.headers.get('content-length', 0))
from tqdm import tqdm
progress_bar = tqdm(total=size, unit='iB', unit_scale=True)
with open(path, 'wb') as fd:
for chunk in r.iter_content(chunk_size=1024):
progress_bar.update(len(chunk))
fd.write(chunk)
progress_bar.close()
allGood = True
for f in sample_data.files:
fpath = os.path.join(data_dir, f.path)
if os.path.exists(fpath):
if _checkMD5(fpath, f.checksum):
logger.info("Found local copy %s, skip downloading.", fpath)
continue
else:
logger.warning("Local copy %s has a different checksum!", fpath)
if overwrite:
logging.warning("Removing local copy %s", fpath)
os.remove(fpath)
else:
allGood = False
continue
_createDirIfNeeded(fpath)
_downloadFile(fpath, f.url)
if not _checkMD5(fpath, f.checksum):
logger.error("The downloaded file %s has a different checksum!", fpath)
allGood = False
return allGood
def _parseArgs():
parser = argparse.ArgumentParser(description="Downloader of TensorRT sample data files.")
parser.add_argument('-d', '--data', help="Specify the data directory, data will be downloaded to there. $TRT_DATA_DIR will be overwritten by this argument.")
parser.add_argument('-f', '--file', help="Specify the path to the download.yml, default to `download.yml` in the working directory",
default='download.yml')
parser.add_argument('-o', '--overwrite', help="Force to overwrite if MD5 check failed",
action='store_true', default=False)
parser.add_argument('-v', '--verify', help="Verify if the data has been downloaded. Will not download if specified.",
action='store_true', default=False)
args, _ = parser.parse_known_args()
data = os.environ.get('TRT_DATA_DIR', None) if args.data is None else args.data
if data is None:
raise ValueError("Data directory must be specified by either `-d $DATA` or environment variable $TRT_DATA_DIR.")
return data, args
def verifyChecksum(data_dir, yaml_path):
"""Verify the checksum of the files described by the YAML.
Return false of any of the file doesn't existed or checksum is different with the YAML.
"""
sample_data = _loadYAML(yaml_path)
logger.info("Verifying data files and their MD5 for %s", sample_data.sample)
allGood = True
for f in sample_data.files:
fpath = os.path.join(data_dir, f.path)
if os.path.exists(fpath):
if _checkMD5(fpath, f.checksum):
logger.info("MD5 match for local copy %s", fpath)
else:
logger.error("Local file %s has a different checksum!", fpath)
allGood = False
else:
allGood = False
logger.error("Data file %s doesn't have a local copy", f.path)
return allGood
def main():
data, args = _parseArgs()
logging.basicConfig()
logger.setLevel(logging.INFO)
ret = True
if args.verify:
ret = verifyChecksum(data, args.file)
else:
ret = download(data, args.file, args.overwrite)
if not ret:
# Error of downloading or checksum
sys.exit(1)
if __name__ == '__main__':
main()
TRT_DATA_DIR = None
def getFilePath(path):
"""Util to get the full path to the downloaded data files.
It only works when the sample doesn't have any other command line argument.
"""
global TRT_DATA_DIR
if not TRT_DATA_DIR:
parser = argparse.ArgumentParser(description="Helper of data file download tool")
parser.add_argument('-d', '--data', help="Specify the data directory where it is saved in. $TRT_DATA_DIR will be overwritten by this argument.")
args, _ = parser.parse_known_args()
TRT_DATA_DIR = os.environ.get('TRT_DATA_DIR', None) if args.data is None else args.data
if TRT_DATA_DIR is None:
raise ValueError("Data directory must be specified by either `-d $DATA` or environment variable $TRT_DATA_DIR.")
fullpath = os.path.join(TRT_DATA_DIR, path)
if not os.path.exists(fullpath):
raise ValueError("Data file %s doesn't exist!" % fullpath)
return fullpath
| TensorRT-master | samples/python/downloader.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import os
import numpy as np
import pycuda.autoinit
import pycuda.driver as cuda
import tensorrt as trt
try:
# Sometimes python does not understand FileNotFoundError
FileNotFoundError
except NameError:
FileNotFoundError = IOError
EXPLICIT_BATCH = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
def GiB(val):
return val * 1 << 30
def add_help(description):
parser = argparse.ArgumentParser(description=description, formatter_class=argparse.ArgumentDefaultsHelpFormatter)
args, _ = parser.parse_known_args()
def find_sample_data(description="Runs a TensorRT Python sample", subfolder="", find_files=[], err_msg=""):
'''
Parses sample arguments.
Args:
description (str): Description of the sample.
subfolder (str): The subfolder containing data relevant to this sample
find_files (str): A list of filenames to find. Each filename will be replaced with an absolute path.
Returns:
str: Path of data directory.
'''
# Standard command-line arguments for all samples.
kDEFAULT_DATA_ROOT = os.path.join(os.sep, "usr", "src", "tensorrt", "data")
parser = argparse.ArgumentParser(description=description, formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("-d", "--datadir", help="Location of the TensorRT sample data directory, and any additional data directories.", action="append", default=[kDEFAULT_DATA_ROOT])
args, _ = parser.parse_known_args()
def get_data_path(data_dir):
# If the subfolder exists, append it to the path, otherwise use the provided path as-is.
data_path = os.path.join(data_dir, subfolder)
if not os.path.exists(data_path):
if data_dir != kDEFAULT_DATA_ROOT:
print("WARNING: " + data_path + " does not exist. Trying " + data_dir + " instead.")
data_path = data_dir
# Make sure data directory exists.
if not (os.path.exists(data_path)) and data_dir != kDEFAULT_DATA_ROOT:
print("WARNING: {:} does not exist. Please provide the correct data path with the -d option.".format(data_path))
return data_path
data_paths = [get_data_path(data_dir) for data_dir in args.datadir]
return data_paths, locate_files(data_paths, find_files, err_msg)
def locate_files(data_paths, filenames, err_msg=""):
"""
Locates the specified files in the specified data directories.
If a file exists in multiple data directories, the first directory is used.
Args:
data_paths (List[str]): The data directories.
filename (List[str]): The names of the files to find.
Returns:
List[str]: The absolute paths of the files.
Raises:
FileNotFoundError if a file could not be located.
"""
found_files = [None] * len(filenames)
for data_path in data_paths:
# Find all requested files.
for index, (found, filename) in enumerate(zip(found_files, filenames)):
if not found:
file_path = os.path.abspath(os.path.join(data_path, filename))
if os.path.exists(file_path):
found_files[index] = file_path
# Check that all files were found
for f, filename in zip(found_files, filenames):
if not f or not os.path.exists(f):
raise FileNotFoundError("Could not find {:}. Searched in data paths: {:}\n{:}".format(filename, data_paths, err_msg))
return found_files
# Simple helper data class that's a little nicer to use than a 2-tuple.
class HostDeviceMem(object):
def __init__(self, host_mem, device_mem):
self.host = host_mem
self.device = device_mem
def __str__(self):
return "Host:\n" + str(self.host) + "\nDevice:\n" + str(self.device)
def __repr__(self):
return self.__str__()
# Allocates all buffers required for an engine, i.e. host/device inputs/outputs.
def allocate_buffers(engine):
inputs = []
outputs = []
bindings = []
stream = cuda.Stream()
for binding in engine:
size = trt.volume(engine.get_binding_shape(binding)) * engine.max_batch_size
dtype = trt.nptype(engine.get_binding_dtype(binding))
# Allocate host and device buffers
host_mem = cuda.pagelocked_empty(size, dtype)
device_mem = cuda.mem_alloc(host_mem.nbytes)
# Append the device buffer to device bindings.
bindings.append(int(device_mem))
# Append to the appropriate list.
if engine.binding_is_input(binding):
inputs.append(HostDeviceMem(host_mem, device_mem))
else:
outputs.append(HostDeviceMem(host_mem, device_mem))
return inputs, outputs, bindings, stream
# This function is generalized for multiple inputs/outputs.
# inputs and outputs are expected to be lists of HostDeviceMem objects.
def do_inference(context, bindings, inputs, outputs, stream, batch_size=1):
# Transfer input data to the GPU.
[cuda.memcpy_htod_async(inp.device, inp.host, stream) for inp in inputs]
# Run inference.
context.execute_async(batch_size=batch_size, bindings=bindings, stream_handle=stream.handle)
# Transfer predictions back from the GPU.
[cuda.memcpy_dtoh_async(out.host, out.device, stream) for out in outputs]
# Synchronize the stream
stream.synchronize()
# Return only the host outputs.
return [out.host for out in outputs]
# This function is generalized for multiple inputs/outputs for full dimension networks.
# inputs and outputs are expected to be lists of HostDeviceMem objects.
def do_inference_v2(context, bindings, inputs, outputs, stream):
# Transfer input data to the GPU.
[cuda.memcpy_htod_async(inp.device, inp.host, stream) for inp in inputs]
# Run inference.
context.execute_async_v2(bindings=bindings, stream_handle=stream.handle)
# Transfer predictions back from the GPU.
[cuda.memcpy_dtoh_async(out.host, out.device, stream) for out in outputs]
# Synchronize the stream
stream.synchronize()
# Return only the host outputs.
return [out.host for out in outputs]
| TensorRT-master | samples/python/common.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This file contains functions for training a TensorFlow model
import os
import numpy as np
import tensorflow as tf
def process_dataset():
# Import the data
(x_train, y_train),(x_test, y_test) = tf.keras.datasets.mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
# Reshape the data
NUM_TRAIN = 60000
NUM_TEST = 10000
x_train = np.reshape(x_train, (NUM_TRAIN, 28, 28, 1))
x_test = np.reshape(x_test, (NUM_TEST, 28, 28, 1))
return x_train, y_train, x_test, y_test
def create_model():
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.InputLayer(input_shape=[28,28, 1]))
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(512, activation=tf.nn.relu))
model.add(tf.keras.layers.Dense(10, activation=tf.nn.softmax))
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
return model
def save(model, filename):
# First freeze the graph and remove training nodes.
output_names = model.output.op.name
sess = tf.keras.backend.get_session()
frozen_graph = tf.graph_util.convert_variables_to_constants(sess, sess.graph.as_graph_def(), [output_names])
frozen_graph = tf.graph_util.remove_training_nodes(frozen_graph)
# Save the model
with open(filename, "wb") as ofile:
ofile.write(frozen_graph.SerializeToString())
def main():
x_train, y_train, x_test, y_test = process_dataset()
model = create_model()
# Train the model on the data
model.fit(x_train, y_train, epochs = 5, verbose = 1)
# Evaluate the model on test data
model.evaluate(x_test, y_test)
model_path = os.environ.get("MODEL_PATH") or os.path.join(os.path.dirname(__file__), "models")
model_file = os.path.join(model_path, "lenet5.pb")
save(model, filename=model_file)
if __name__ == '__main__':
main()
| TensorRT-master | samples/python/end_to_end_tensorflow_mnist/model.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This sample uses a UFF MNIST model to create a TensorRT Inference Engine
from random import randint
from PIL import Image
import numpy as np
import pycuda.driver as cuda
# This import causes pycuda to automatically manage CUDA context creation and cleanup.
import pycuda.autoinit
import tensorrt as trt
import sys, os
sys.path.insert(1, os.path.join(sys.path[0], ".."))
import common
# You can set the logger severity higher to suppress messages (or lower to display more messages).
TRT_LOGGER = trt.Logger(trt.Logger.WARNING)
class ModelData(object):
MODEL_FILE = "lenet5.uff"
INPUT_NAME ="input_1"
INPUT_SHAPE = (1, 28, 28)
OUTPUT_NAME = "dense_1/Softmax"
def build_engine(model_file):
# For more information on TRT basics, refer to the introductory samples.
with trt.Builder(TRT_LOGGER) as builder, builder.create_network() as network, builder.create_builder_config() as config, trt.UffParser() as parser, trt.Runtime(TRT_LOGGER) as runtime:
config.max_workspace_size = common.GiB(1)
# Parse the Uff Network
parser.register_input(ModelData.INPUT_NAME, ModelData.INPUT_SHAPE)
parser.register_output(ModelData.OUTPUT_NAME)
parser.parse(model_file, network)
# Build and return an engine.
plan = builder.build_serialized_network(network, config)
return runtime.deserialize_cuda_engine(plan)
# Loads a test case into the provided pagelocked_buffer.
def load_normalized_test_case(data_paths, pagelocked_buffer, case_num=randint(0, 9)):
[test_case_path] = common.locate_files(data_paths, [str(case_num) + ".pgm"], err_msg="Please follow the README in the mnist data directory (usually in `/usr/src/tensorrt/data/mnist`) to download the MNIST dataset")
# Flatten the image into a 1D array, normalize, and copy to pagelocked memory.
img = np.array(Image.open(test_case_path)).ravel()
np.copyto(pagelocked_buffer, 1.0 - img / 255.0)
return case_num
def main():
data_paths, _ = common.find_sample_data(description="Runs an MNIST network using a UFF model file", subfolder="mnist")
model_path = os.environ.get("MODEL_PATH") or os.path.join(os.path.dirname(__file__), "models")
model_file = os.path.join(model_path, ModelData.MODEL_FILE)
with build_engine(model_file) as engine:
# Build an engine, allocate buffers and create a stream.
# For more information on buffer allocation, refer to the introductory samples.
inputs, outputs, bindings, stream = common.allocate_buffers(engine)
with engine.create_execution_context() as context:
case_num = load_normalized_test_case(data_paths, pagelocked_buffer=inputs[0].host)
# For more information on performing inference, refer to the introductory samples.
# The common.do_inference function will return a list of outputs - we only have one in this case.
[output] = common.do_inference(context, bindings=bindings, inputs=inputs, outputs=outputs, stream=stream)
pred = np.argmax(output)
print("Test Case: " + str(case_num))
print("Prediction: " + str(pred))
if __name__ == '__main__':
main()
| TensorRT-master | samples/python/end_to_end_tensorflow_mnist/sample.py |
#!/usr/bin/env python3
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import torch
import onnx
import numpy as np
import argparse
import onnx_graphsurgeon as gs
from post_processing import *
from packnet_sfm.networks.depth.PackNet01 import PackNet01
def post_process_packnet(model_file, opset=11):
"""
Use ONNX graph surgeon to replace upsample and instance normalization nodes. Refer to post_processing.py for details.
Args:
model_file : Path to ONNX file
"""
# Load the packnet graph
graph = gs.import_onnx(onnx.load(model_file))
if opset>=11:
graph = process_pad_nodes(graph)
# Replace the subgraph of upsample with a single node with input and scale factor.
if torch.__version__ < '1.5.0':
graph = process_upsample_nodes(graph, opset)
# Convert the group normalization subgraph into a single plugin node.
graph = process_groupnorm_nodes(graph)
# Remove unused nodes, and topologically sort the graph.
graph.cleanup().toposort()
# Export the onnx graph from graphsurgeon
onnx.save_model(gs.export_onnx(graph), model_file)
print("Saving the ONNX model to {}".format(model_file))
def build_packnet(model_file, args):
"""
Construct the packnet network and export it to ONNX
"""
input_pyt = torch.randn((1, 3, 192, 640), requires_grad=False)
# Build the model
model_pyt = PackNet01(version='1A')
# Convert the model into ONNX
torch.onnx.export(model_pyt, input_pyt, model_file, verbose=args.verbose, opset_version=args.opset)
def main():
parser = argparse.ArgumentParser(description="Exports PackNet01 to ONNX, and post-processes it to insert TensorRT plugins")
parser.add_argument("-o", "--output", help="Path to save the generated ONNX model", default="model.onnx")
parser.add_argument("-op", "--opset", type=int, help="ONNX opset to use", default=11)
parser.add_argument("-v", "--verbose", action='store_true', help="Flag to enable verbose logging for torch.onnx.export")
args=parser.parse_args()
# Construct the packnet graph and generate the onnx graph
build_packnet(args.output, args)
# Perform post processing on Instance Normalization and upsampling nodes and create a new ONNX graph
post_process_packnet(args.output, args.opset)
if __name__ == '__main__':
main()
| TensorRT-master | samples/python/onnx_packnet/convert_to_onnx.py |
#!/usr/bin/env python3
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import onnx_graphsurgeon as gs
import argparse
import onnx
import numpy as np
import torch
# Pad layer subgraph structure in ONNX (specific to opset 11):
# Constant
# |
# Shape
# |
# Mul Gather
# \ /
# Sub
# |
# ConstantOfShape
# |
# Concat
# |
# Reshape
# |
# Slice
# |
# Transpose
# |
# Reshape
# |
# Input Cast Constant
# \ | /
# Pad
def process_pad_nodes(graph):
"""
Fold the pad subgraph into a single layer with pad values as input
Input
|
Pad
|
Conv
"""
pad_nodes = [node for node in graph.nodes if node.op == "Pad"]
for node in pad_nodes:
fold_pad_inputs(node, graph)
return graph
def fold_pad_inputs(node, graph):
# Gather the amount of padding in each dimension from pytorch graph.
if torch.__version__ < '1.5.0':
pad_values_pyt = node.i(1).i(0).i(0).i(0).i(0).i(0).i(0).i(0).attrs['value'].values
else:
pad_values_pyt = node.i(1).i(0).i(0).i(0).i(0).i(0).inputs[0].values
# Assumption a 4d input tensor
onnx_pad_values = [0]*4*2 # 4d tensor and 2 sides padding for each dimension
j=3
for i in range(0, len(pad_values_pyt), 2):
onnx_pad_values[j] = pad_values_pyt[i]
onnx_pad_values[j+4] = pad_values_pyt[i+1]
j-=1
# Change the existing pad tensor to the new onnx_pad values tensor
pads_folded_tensor = gs.Constant(name=node.inputs[1].name, values=np.array(onnx_pad_values))
node.inputs[1] = pads_folded_tensor
# Pytorch-exported Upsample structure in ONNX:
# Mul Mul
# | |
# Cast Cast
# | |
# Floor Floor
# | |
# Unsqueeze Unsqueeze
# \ /
# Concat
# |
# Cast Cast
# \ /
# Div
# |
# Input Concat
# \ /
# Upsample
def process_upsample_nodes(graph, opset=11):
"""
Replace the upsample structure with structure below
Conv scale_factor
| /
Upsample
|
ReLU
"""
if opset>=11:
upsample_layer_name = "Resize"
else:
upsample_layer_name = "Upsample"
upsample_nodes = [node for node in graph.nodes if node.op == upsample_layer_name]
for node in upsample_nodes:
fold_upsample_inputs(node, graph, opset)
return graph
def fold_upsample_inputs(upsample, graph, opset=11):
"""
Inplace transformation of the graph. The upsample subgraph is collapsed
to single upsample node with input and scale factor (constant tensor).
Args:
upsample: upsample node in the original graph.
graph: graph object.
"""
if opset==9:
# Gather the scale factor from mul op in the upsample input subgraph
scale_factor = upsample.i(1).i(1).i(0).i(0).i(0).i(0).i(0).i(0).i(1).attrs['value'].values
# Create the new scales tensor
scales = np.array([1.0, 1.0, scale_factor, scale_factor], dtype=np.float32)
scale_tensor = gs.Constant(name=upsample.inputs[-1].name, values=scales)
# Change the last input to the node to the new constant scales tensor.
upsample.inputs[-1] = scale_tensor
else:
# In opset 11, upsample layer is exported as Resize. We will transform this Resize layer into an Upsample layer
# and collapse the input
sizes_tensor_name = upsample.inputs[3].name
# Create the new scales tensor
scale_factor = upsample.i(3).i(1).i().i().i().i().i(0).i(1).attrs['value'].values
scales = np.array([1.0, 1.0, scale_factor, scale_factor], dtype=np.float32)
scale_tensor = gs.Constant(name=sizes_tensor_name, values=scales)
# Rename the Resize op to upsample and add the data and scales as inputs to the upsample layer.
input_tensor = upsample.inputs[0]
upsample.inputs = [input_tensor, scale_tensor]
upsample.op = 'Upsample'
# Pytorch-exported GroupNorm subgraph in ONNX:
# Conv
# |
# Reshape Scale Bias
# \ | /
# InstanceNormalization
# |
# Reshape Unsqueeze
# \ /
# Mul (scale) Unsqueeze
# \ /
# Add (bias)
# |
# ReLU
def process_groupnorm_nodes(graph):
"""
Gather the instance normalization nodes and the rest of the subgraph
and convert into a single group normalization node.
"""
instancenorms = [node for node in graph.nodes if node.op == "InstanceNormalization"]
for node in instancenorms:
convert_to_groupnorm(node, graph)
return graph
def retrieve_attrs(instancenorm):
"""
Gather the required attributes for the GroupNorm plugin from the subgraph.
Args:
instancenorm: Instance Normalization node in the graph.
"""
attrs = {}
# The 2nd dimension of the Reshape shape is the number of groups
attrs["num_groups"] = instancenorm.i().i(1).attrs["value"].values[1]
attrs["eps"] = instancenorm.attrs["epsilon"]
# 1 is the default plugin version the parser will search for, and therefore can be omitted,
# but we include it here for illustrative purposes.
attrs["plugin_version"] = "1"
# "" is the default plugin namespace the parser will use, included here for illustrative purposes
attrs["plugin_namespace"] = ""
return attrs
def convert_to_groupnorm(instancenorm, graph):
"""
Convert the Pytorch-exported GroupNorm subgraph to the subgraph below
Conv
|
GroupNorm
|
ReLU
Attributes:
instancenorm: Instance Normalization node in the graph.
graph: Input graph object
"""
# Retrieve the instancenorm attributes and create the replacement node
attrs = retrieve_attrs(instancenorm)
groupnorm = gs.Node(op="GroupNormalizationPlugin", attrs=attrs)
graph.nodes.append(groupnorm)
# The plugin needs to receive an input from the Conv node, and output to the ReLU node
conv_output_tensor = instancenorm.i().inputs[0] # Output of Conv
relu_input_tensor = instancenorm.o().o().o().outputs[0] # Output of Add
# Reconnect inputs/outputs to the groupnorm plugin
conv_output_tensor.outputs[0] = groupnorm
relu_input_tensor.inputs[0] = groupnorm
# Add scale and bias constant tensors to group norm plugin
if torch.__version__ < '1.5.0':
groupnorm.inputs.append(instancenorm.o().o().i(1).inputs[0])
groupnorm.inputs.append(instancenorm.o().o().o().i(1).inputs[0])
else:
groupnorm.inputs.append(instancenorm.o().o().inputs[1])
groupnorm.inputs.append(instancenorm.o().o().o().inputs[1])
| TensorRT-master | samples/python/onnx_packnet/post_processing.py |
#!/usr/bin/env python3
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
from collections import OrderedDict
import sys
import os
import onnx
from onnx import helper
from onnx import TensorProto
import numpy as np
sys.path.insert(1, os.path.join(sys.path[0], os.path.pardir))
from downloader import getFilePath
class DarkNetParser(object):
"""Definition of a parser for DarkNet-based YOLOv3-608 (only tested for this topology)."""
def __init__(self, supported_layers):
"""Initializes a DarkNetParser object.
Keyword argument:
supported_layers -- a string list of supported layers in DarkNet naming convention,
parameters are only added to the class dictionary if a parsed layer is included.
"""
# A list of YOLOv3 layers containing dictionaries with all layer
# parameters:
self.layer_configs = OrderedDict()
self.supported_layers = supported_layers
self.layer_counter = 0
def parse_cfg_file(self, cfg_file_path):
"""Takes the yolov3.cfg file and parses it layer by layer,
appending each layer's parameters as a dictionary to layer_configs.
Keyword argument:
cfg_file_path -- path to the yolov3.cfg file as string
"""
with open(cfg_file_path) as cfg_file:
remainder = cfg_file.read()
while remainder is not None:
layer_dict, layer_name, remainder = self._next_layer(remainder)
if layer_dict is not None:
self.layer_configs[layer_name] = layer_dict
return self.layer_configs
def _next_layer(self, remainder):
"""Takes in a string and segments it by looking for DarkNet delimiters.
Returns the layer parameters and the remaining string after the last delimiter.
Example for the first Conv layer in yolo.cfg ...
[convolutional]
batch_normalize=1
filters=32
size=3
stride=1
pad=1
activation=leaky
... becomes the following layer_dict return value:
{'activation': 'leaky', 'stride': 1, 'pad': 1, 'filters': 32,
'batch_normalize': 1, 'type': 'convolutional', 'size': 3}.
'001_convolutional' is returned as layer_name, and all lines that follow in yolo.cfg
are returned as the next remainder.
Keyword argument:
remainder -- a string with all raw text after the previously parsed layer
"""
remainder = remainder.split('[', 1)
if len(remainder) == 2:
remainder = remainder[1]
else:
return None, None, None
remainder = remainder.split(']', 1)
if len(remainder) == 2:
layer_type, remainder = remainder
else:
return None, None, None
if remainder.replace(' ', '')[0] == '#':
remainder = remainder.split('\n', 1)[1]
layer_param_block, remainder = remainder.split('\n\n', 1)
layer_param_lines = layer_param_block.split('\n')[1:]
layer_name = str(self.layer_counter).zfill(3) + '_' + layer_type
layer_dict = dict(type=layer_type)
if layer_type in self.supported_layers:
for param_line in layer_param_lines:
if param_line[0] == '#':
continue
param_type, param_value = self._parse_params(param_line)
layer_dict[param_type] = param_value
self.layer_counter += 1
return layer_dict, layer_name, remainder
def _parse_params(self, param_line):
"""Identifies the parameters contained in one of the cfg file and returns
them in the required format for each parameter type, e.g. as a list, an int or a float.
Keyword argument:
param_line -- one parsed line within a layer block
"""
param_line = param_line.replace(' ', '')
param_type, param_value_raw = param_line.split('=')
param_value = None
if param_type == 'layers':
layer_indexes = list()
for index in param_value_raw.split(','):
layer_indexes.append(int(index))
param_value = layer_indexes
elif isinstance(param_value_raw, str) and not param_value_raw.isalpha():
condition_param_value_positive = param_value_raw.isdigit()
condition_param_value_negative = param_value_raw[0] == '-' and \
param_value_raw[1:].isdigit()
if condition_param_value_positive or condition_param_value_negative:
param_value = int(param_value_raw)
else:
param_value = float(param_value_raw)
else:
param_value = str(param_value_raw)
return param_type, param_value
class MajorNodeSpecs(object):
"""Helper class used to store the names of ONNX output names,
corresponding to the output of a DarkNet layer and its output channels.
Some DarkNet layers are not created and there is no corresponding ONNX node,
but we still need to track them in order to set up skip connections.
"""
def __init__(self, name, channels):
""" Initialize a MajorNodeSpecs object.
Keyword arguments:
name -- name of the ONNX node
channels -- number of output channels of this node
"""
self.name = name
self.channels = channels
self.created_onnx_node = False
if name is not None and isinstance(channels, int) and channels > 0:
self.created_onnx_node = True
class ConvParams(object):
"""Helper class to store the hyper parameters of a Conv layer,
including its prefix name in the ONNX graph and the expected dimensions
of weights for convolution, bias, and batch normalization.
Additionally acts as a wrapper for generating safe names for all
weights, checking on feasible combinations.
"""
def __init__(self, node_name, batch_normalize, conv_weight_dims):
"""Constructor based on the base node name (e.g. 101_convolutional), the batch
normalization setting, and the convolutional weights shape.
Keyword arguments:
node_name -- base name of this YOLO convolutional layer
batch_normalize -- bool value if batch normalization is used
conv_weight_dims -- the dimensions of this layer's convolutional weights
"""
self.node_name = node_name
self.batch_normalize = batch_normalize
assert len(conv_weight_dims) == 4
self.conv_weight_dims = conv_weight_dims
def generate_param_name(self, param_category, suffix):
"""Generates a name based on two string inputs,
and checks if the combination is valid."""
assert suffix
assert param_category in ['bn', 'conv']
assert(suffix in ['scale', 'mean', 'var', 'weights', 'bias'])
if param_category == 'bn':
assert self.batch_normalize
assert suffix in ['scale', 'bias', 'mean', 'var']
elif param_category == 'conv':
assert suffix in ['weights', 'bias']
if suffix == 'bias':
assert not self.batch_normalize
param_name = self.node_name + '_' + param_category + '_' + suffix
return param_name
class ResizeParams(object):
#Helper class to store the scale parameter for an Resize node.
def __init__(self, node_name, value):
"""Constructor based on the base node name (e.g. 86_Resize),
and the value of the scale input tensor.
Keyword arguments:
node_name -- base name of this YOLO Resize layer
value -- the value of the scale input to the Resize layer as numpy array
"""
self.node_name = node_name
self.value = value
def generate_param_name(self):
"""Generates the scale parameter name for the Resize node."""
param_name = self.node_name + '_' + "scale"
return param_name
def generate_roi_name(self):
"""Generates the roi input name for the Resize node."""
param_name = self.node_name + '_' + "roi"
return param_name
class WeightLoader(object):
"""Helper class used for loading the serialized weights of a binary file stream
and returning the initializers and the input tensors required for populating
the ONNX graph with weights.
"""
def __init__(self, weights_file_path):
"""Initialized with a path to the YOLOv3 .weights file.
Keyword argument:
weights_file_path -- path to the weights file.
"""
self.weights_file = self._open_weights_file(weights_file_path)
def load_resize_scales(self, resize_params):
"""Returns the initializers with the value of the scale input
tensor given by resize_params.
Keyword argument:
resize_params -- a ResizeParams object
"""
initializer = list()
inputs = list()
name = resize_params.generate_param_name()
shape = resize_params.value.shape
data = resize_params.value
scale_init = helper.make_tensor(
name, TensorProto.FLOAT, shape, data)
scale_input = helper.make_tensor_value_info(
name, TensorProto.FLOAT, shape)
initializer.append(scale_init)
inputs.append(scale_input)
# In opset 11 an additional input named roi is required. Create a dummy tensor to satisfy this.
# It is a 1D tensor of size of the rank of the input (4)
rank = 4
roi_name = resize_params.generate_roi_name()
roi_input = helper.make_tensor_value_info(roi_name, TensorProto.FLOAT, [rank])
roi_init = helper.make_tensor(roi_name, TensorProto.FLOAT, [rank], [0,0,0,0])
initializer.append(roi_init)
inputs.append(roi_input)
return initializer, inputs
def load_conv_weights(self, conv_params):
"""Returns the initializers with weights from the weights file and
the input tensors of a convolutional layer for all corresponding ONNX nodes.
Keyword argument:
conv_params -- a ConvParams object
"""
initializer = list()
inputs = list()
if conv_params.batch_normalize:
bias_init, bias_input = self._create_param_tensors(
conv_params, 'bn', 'bias')
bn_scale_init, bn_scale_input = self._create_param_tensors(
conv_params, 'bn', 'scale')
bn_mean_init, bn_mean_input = self._create_param_tensors(
conv_params, 'bn', 'mean')
bn_var_init, bn_var_input = self._create_param_tensors(
conv_params, 'bn', 'var')
initializer.extend(
[bn_scale_init, bias_init, bn_mean_init, bn_var_init])
inputs.extend([bn_scale_input, bias_input,
bn_mean_input, bn_var_input])
else:
bias_init, bias_input = self._create_param_tensors(
conv_params, 'conv', 'bias')
initializer.append(bias_init)
inputs.append(bias_input)
conv_init, conv_input = self._create_param_tensors(
conv_params, 'conv', 'weights')
initializer.append(conv_init)
inputs.append(conv_input)
return initializer, inputs
def _open_weights_file(self, weights_file_path):
"""Opens a YOLOv3 DarkNet file stream and skips the header.
Keyword argument:
weights_file_path -- path to the weights file.
"""
weights_file = open(weights_file_path, 'rb')
length_header = 5
np.ndarray(
shape=(length_header, ), dtype='int32', buffer=weights_file.read(
length_header * 4))
return weights_file
def _create_param_tensors(self, conv_params, param_category, suffix):
"""Creates the initializers with weights from the weights file together with
the input tensors.
Keyword arguments:
conv_params -- a ConvParams object
param_category -- the category of parameters to be created ('bn' or 'conv')
suffix -- a string determining the sub-type of above param_category (e.g.,
'weights' or 'bias')
"""
param_name, param_data, param_data_shape = self._load_one_param_type(
conv_params, param_category, suffix)
initializer_tensor = helper.make_tensor(
param_name, TensorProto.FLOAT, param_data_shape, param_data)
input_tensor = helper.make_tensor_value_info(
param_name, TensorProto.FLOAT, param_data_shape)
return initializer_tensor, input_tensor
def _load_one_param_type(self, conv_params, param_category, suffix):
"""Deserializes the weights from a file stream in the DarkNet order.
Keyword arguments:
conv_params -- a ConvParams object
param_category -- the category of parameters to be created ('bn' or 'conv')
suffix -- a string determining the sub-type of above param_category (e.g.,
'weights' or 'bias')
"""
param_name = conv_params.generate_param_name(param_category, suffix)
channels_out, channels_in, filter_h, filter_w = conv_params.conv_weight_dims
if param_category == 'bn':
param_shape = [channels_out]
elif param_category == 'conv':
if suffix == 'weights':
param_shape = [channels_out, channels_in, filter_h, filter_w]
elif suffix == 'bias':
param_shape = [channels_out]
param_size = np.product(np.array(param_shape))
param_data = np.ndarray(
shape=param_shape,
dtype='float32',
buffer=self.weights_file.read(param_size * 4))
param_data = param_data.flatten().astype(float)
return param_name, param_data, param_shape
class GraphBuilderONNX(object):
"""Class for creating an ONNX graph from a previously generated list of layer dictionaries."""
def __init__(self, output_tensors):
"""Initialize with all DarkNet default parameters used creating YOLOv3,
and specify the output tensors as an OrderedDict for their output dimensions
with their names as keys.
Keyword argument:
output_tensors -- the output tensors as an OrderedDict containing the keys'
output dimensions
"""
self.output_tensors = output_tensors
self._nodes = list()
self.graph_def = None
self.input_tensor = None
self.epsilon_bn = 1e-5
self.momentum_bn = 0.99
self.alpha_lrelu = 0.1
self.param_dict = OrderedDict()
self.major_node_specs = list()
self.batch_size = 1
def build_onnx_graph(
self,
layer_configs,
weights_file_path,
verbose=True):
"""Iterate over all layer configs (parsed from the DarkNet representation
of YOLOv3-608), create an ONNX graph, populate it with weights from the weights
file and return the graph definition.
Keyword arguments:
layer_configs -- an OrderedDict object with all parsed layers' configurations
weights_file_path -- location of the weights file
verbose -- toggles if the graph is printed after creation (default: True)
"""
for layer_name in layer_configs.keys():
layer_dict = layer_configs[layer_name]
major_node_specs = self._make_onnx_node(layer_name, layer_dict)
if major_node_specs.name is not None:
self.major_node_specs.append(major_node_specs)
outputs = list()
for tensor_name in self.output_tensors.keys():
output_dims = [self.batch_size, ] + \
self.output_tensors[tensor_name]
output_tensor = helper.make_tensor_value_info(
tensor_name, TensorProto.FLOAT, output_dims)
outputs.append(output_tensor)
inputs = [self.input_tensor]
weight_loader = WeightLoader(weights_file_path)
initializer = list()
# If a layer has parameters, add them to the initializer and input lists.
for layer_name in self.param_dict.keys():
_, layer_type = layer_name.split('_', 1)
params = self.param_dict[layer_name]
if layer_type == 'convolutional':
initializer_layer, inputs_layer = weight_loader.load_conv_weights(
params)
initializer.extend(initializer_layer)
inputs.extend(inputs_layer)
elif layer_type == "upsample":
initializer_layer, inputs_layer = weight_loader.load_resize_scales(
params)
initializer.extend(initializer_layer)
inputs.extend(inputs_layer)
del weight_loader
self.graph_def = helper.make_graph(
nodes=self._nodes,
name='YOLOv3-608',
inputs=inputs,
outputs=outputs,
initializer=initializer
)
if verbose:
print(helper.printable_graph(self.graph_def))
model_def = helper.make_model(self.graph_def,
producer_name='NVIDIA TensorRT sample')
return model_def
def _make_onnx_node(self, layer_name, layer_dict):
"""Take in a layer parameter dictionary, choose the correct function for
creating an ONNX node and store the information important to graph creation
as a MajorNodeSpec object.
Keyword arguments:
layer_name -- the layer's name (also the corresponding key in layer_configs)
layer_dict -- a layer parameter dictionary (one element of layer_configs)
"""
layer_type = layer_dict['type']
if self.input_tensor is None:
if layer_type == 'net':
major_node_output_name, major_node_output_channels = self._make_input_tensor(
layer_name, layer_dict)
major_node_specs = MajorNodeSpecs(major_node_output_name,
major_node_output_channels)
else:
raise ValueError('The first node has to be of type "net".')
else:
node_creators = dict()
node_creators['convolutional'] = self._make_conv_node
node_creators['shortcut'] = self._make_shortcut_node
node_creators['route'] = self._make_route_node
node_creators['upsample'] = self._make_resize_node
if layer_type in node_creators.keys():
major_node_output_name, major_node_output_channels = \
node_creators[layer_type](layer_name, layer_dict)
major_node_specs = MajorNodeSpecs(major_node_output_name,
major_node_output_channels)
else:
print(
'Layer of type %s not supported, skipping ONNX node generation.' %
layer_type)
major_node_specs = MajorNodeSpecs(layer_name,
None)
return major_node_specs
def _make_input_tensor(self, layer_name, layer_dict):
"""Create an ONNX input tensor from a 'net' layer and store the batch size.
Keyword arguments:
layer_name -- the layer's name (also the corresponding key in layer_configs)
layer_dict -- a layer parameter dictionary (one element of layer_configs)
"""
batch_size = layer_dict['batch']
channels = layer_dict['channels']
height = layer_dict['height']
width = layer_dict['width']
self.batch_size = batch_size
input_tensor = helper.make_tensor_value_info(
str(layer_name), TensorProto.FLOAT, [
batch_size, channels, height, width])
self.input_tensor = input_tensor
return layer_name, channels
def _get_previous_node_specs(self, target_index=-1):
"""Get a previously generated ONNX node (skip those that were not generated).
Target index can be passed for jumping to a specific index.
Keyword arguments:
target_index -- optional for jumping to a specific index (default: -1 for jumping
to previous element)
"""
previous_node = None
for node in self.major_node_specs[target_index::-1]:
if node.created_onnx_node:
previous_node = node
break
assert previous_node is not None
return previous_node
def _make_conv_node(self, layer_name, layer_dict):
"""Create an ONNX Conv node with optional batch normalization and
activation nodes.
Keyword arguments:
layer_name -- the layer's name (also the corresponding key in layer_configs)
layer_dict -- a layer parameter dictionary (one element of layer_configs)
"""
previous_node_specs = self._get_previous_node_specs()
inputs = [previous_node_specs.name]
previous_channels = previous_node_specs.channels
kernel_size = layer_dict['size']
stride = layer_dict['stride']
filters = layer_dict['filters']
batch_normalize = False
if 'batch_normalize' in layer_dict.keys(
) and layer_dict['batch_normalize'] == 1:
batch_normalize = True
kernel_shape = [kernel_size, kernel_size]
weights_shape = [filters, previous_channels] + kernel_shape
conv_params = ConvParams(layer_name, batch_normalize, weights_shape)
strides = [stride, stride]
dilations = [1, 1]
weights_name = conv_params.generate_param_name('conv', 'weights')
inputs.append(weights_name)
if not batch_normalize:
bias_name = conv_params.generate_param_name('conv', 'bias')
inputs.append(bias_name)
conv_node = helper.make_node(
'Conv',
inputs=inputs,
outputs=[layer_name],
kernel_shape=kernel_shape,
strides=strides,
auto_pad='SAME_LOWER',
dilations=dilations,
name=layer_name
)
self._nodes.append(conv_node)
inputs = [layer_name]
layer_name_output = layer_name
if batch_normalize:
layer_name_bn = layer_name + '_bn'
bn_param_suffixes = ['scale', 'bias', 'mean', 'var']
for suffix in bn_param_suffixes:
bn_param_name = conv_params.generate_param_name('bn', suffix)
inputs.append(bn_param_name)
batchnorm_node = helper.make_node(
'BatchNormalization',
inputs=inputs,
outputs=[layer_name_bn],
epsilon=self.epsilon_bn,
momentum=self.momentum_bn,
name=layer_name_bn
)
self._nodes.append(batchnorm_node)
inputs = [layer_name_bn]
layer_name_output = layer_name_bn
if layer_dict['activation'] == 'leaky':
layer_name_lrelu = layer_name + '_lrelu'
lrelu_node = helper.make_node(
'LeakyRelu',
inputs=inputs,
outputs=[layer_name_lrelu],
name=layer_name_lrelu,
alpha=self.alpha_lrelu
)
self._nodes.append(lrelu_node)
inputs = [layer_name_lrelu]
layer_name_output = layer_name_lrelu
elif layer_dict['activation'] == 'linear':
pass
else:
print('Activation not supported.')
self.param_dict[layer_name] = conv_params
return layer_name_output, filters
def _make_shortcut_node(self, layer_name, layer_dict):
"""Create an ONNX Add node with the shortcut properties from
the DarkNet-based graph.
Keyword arguments:
layer_name -- the layer's name (also the corresponding key in layer_configs)
layer_dict -- a layer parameter dictionary (one element of layer_configs)
"""
shortcut_index = layer_dict['from']
activation = layer_dict['activation']
assert activation == 'linear'
first_node_specs = self._get_previous_node_specs()
second_node_specs = self._get_previous_node_specs(
target_index=shortcut_index)
assert first_node_specs.channels == second_node_specs.channels
channels = first_node_specs.channels
inputs = [first_node_specs.name, second_node_specs.name]
shortcut_node = helper.make_node(
'Add',
inputs=inputs,
outputs=[layer_name],
name=layer_name,
)
self._nodes.append(shortcut_node)
return layer_name, channels
def _make_route_node(self, layer_name, layer_dict):
"""If the 'layers' parameter from the DarkNet configuration is only one index, continue
node creation at the indicated (negative) index. Otherwise, create an ONNX Concat node
with the route properties from the DarkNet-based graph.
Keyword arguments:
layer_name -- the layer's name (also the corresponding key in layer_configs)
layer_dict -- a layer parameter dictionary (one element of layer_configs)
"""
route_node_indexes = layer_dict['layers']
if len(route_node_indexes) == 1:
split_index = route_node_indexes[0]
assert split_index < 0
# Increment by one because we skipped the YOLO layer:
split_index += 1
self.major_node_specs = self.major_node_specs[:split_index]
layer_name = None
channels = None
else:
inputs = list()
channels = 0
for index in route_node_indexes:
if index > 0:
# Increment by one because we count the input as a node (DarkNet
# does not)
index += 1
route_node_specs = self._get_previous_node_specs(
target_index=index)
inputs.append(route_node_specs.name)
channels += route_node_specs.channels
assert inputs
assert channels > 0
route_node = helper.make_node(
'Concat',
axis=1,
inputs=inputs,
outputs=[layer_name],
name=layer_name,
)
self._nodes.append(route_node)
return layer_name, channels
def _make_resize_node(self, layer_name, layer_dict):
"""Create an ONNX Resize node with the properties from
the DarkNet-based graph.
Keyword arguments:
layer_name -- the layer's name (also the corresponding key in layer_configs)
layer_dict -- a layer parameter dictionary (one element of layer_configs)
"""
resize_scale_factors = float(layer_dict['stride'])
# Create the scale factor array with node parameters
scales=np.array([1.0, 1.0, resize_scale_factors, resize_scale_factors]).astype(np.float32)
previous_node_specs = self._get_previous_node_specs()
inputs = [previous_node_specs.name]
channels = previous_node_specs.channels
assert channels > 0
resize_params = ResizeParams(layer_name, scales)
# roi input is the second input, so append it before scales
roi_name = resize_params.generate_roi_name()
inputs.append(roi_name)
scales_name = resize_params.generate_param_name()
inputs.append(scales_name)
resize_node = helper.make_node(
'Resize',
coordinate_transformation_mode='asymmetric',
mode='nearest',
nearest_mode='floor',
inputs=inputs,
outputs=[layer_name],
name=layer_name,
)
self._nodes.append(resize_node)
self.param_dict[layer_name] = resize_params
return layer_name, channels
def main():
"""Run the DarkNet-to-ONNX conversion for YOLOv3-608."""
cfg_file_path = getFilePath('samples/python/yolov3_onnx/yolov3.cfg')
# These are the only layers DarkNetParser will extract parameters from. The three layers of
# type 'yolo' are not parsed in detail because they are included in the post-processing later:
supported_layers = ['net', 'convolutional', 'shortcut',
'route', 'upsample']
# Create a DarkNetParser object, and the use it to generate an OrderedDict with all
# layer's configs from the cfg file:
parser = DarkNetParser(supported_layers)
layer_configs = parser.parse_cfg_file(cfg_file_path)
# We do not need the parser anymore after we got layer_configs:
del parser
# In above layer_config, there are three outputs that we need to know the output
# shape of (in CHW format):
output_tensor_dims = OrderedDict()
output_tensor_dims['082_convolutional'] = [255, 19, 19]
output_tensor_dims['094_convolutional'] = [255, 38, 38]
output_tensor_dims['106_convolutional'] = [255, 76, 76]
# Create a GraphBuilderONNX object with the known output tensor dimensions:
builder = GraphBuilderONNX(output_tensor_dims)
weights_file_path = getFilePath('samples/python/yolov3_onnx/yolov3.weights')
# Now generate an ONNX graph with weights from the previously parsed layer configurations
# and the weights file:
yolov3_model_def = builder.build_onnx_graph(
layer_configs=layer_configs,
weights_file_path=weights_file_path,
verbose=True)
# Once we have the model definition, we do not need the builder anymore:
del builder
# Perform a sanity check on the ONNX model definition:
onnx.checker.check_model(yolov3_model_def)
# Serialize the generated ONNX graph to this file:
output_file_path = 'yolov3.onnx'
onnx.save(yolov3_model_def, output_file_path)
if __name__ == '__main__':
main()
| TensorRT-master | samples/python/yolov3_onnx/yolov3_to_onnx.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import math
from PIL import Image
import numpy as np
import os
# YOLOv3-608 has been trained with these 80 categories from COCO:
# Lin, Tsung-Yi, et al. "Microsoft COCO: Common Objects in Context."
# European Conference on Computer Vision. Springer, Cham, 2014.
def load_label_categories(label_file_path):
categories = [line.rstrip('\n') for line in open(label_file_path)]
return categories
LABEL_FILE_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'coco_labels.txt')
ALL_CATEGORIES = load_label_categories(LABEL_FILE_PATH)
# Let's make sure that there are 80 classes, as expected for the COCO data set:
CATEGORY_NUM = len(ALL_CATEGORIES)
assert CATEGORY_NUM == 80
class PreprocessYOLO(object):
"""A simple class for loading images with PIL and reshaping them to the specified
input resolution for YOLOv3-608.
"""
def __init__(self, yolo_input_resolution):
"""Initialize with the input resolution for YOLOv3, which will stay fixed in this sample.
Keyword arguments:
yolo_input_resolution -- two-dimensional tuple with the target network's (spatial)
input resolution in HW order
"""
self.yolo_input_resolution = yolo_input_resolution
def process(self, input_image_path):
"""Load an image from the specified input path,
and return it together with a pre-processed version required for feeding it into a
YOLOv3 network.
Keyword arguments:
input_image_path -- string path of the image to be loaded
"""
image_raw, image_resized = self._load_and_resize(input_image_path)
image_preprocessed = self._shuffle_and_normalize(image_resized)
return image_raw, image_preprocessed
def _load_and_resize(self, input_image_path):
"""Load an image from the specified path and resize it to the input resolution.
Return the input image before resizing as a PIL Image (required for visualization),
and the resized image as a NumPy float array.
Keyword arguments:
input_image_path -- string path of the image to be loaded
"""
image_raw = Image.open(input_image_path)
# Expecting yolo_input_resolution in (height, width) format, adjusting to PIL
# convention (width, height) in PIL:
new_resolution = (
self.yolo_input_resolution[1],
self.yolo_input_resolution[0])
image_resized = image_raw.resize(
new_resolution, resample=Image.BICUBIC)
image_resized = np.array(image_resized, dtype=np.float32, order='C')
return image_raw, image_resized
def _shuffle_and_normalize(self, image):
"""Normalize a NumPy array representing an image to the range [0, 1], and
convert it from HWC format ("channels last") to NCHW format ("channels first"
with leading batch dimension).
Keyword arguments:
image -- image as three-dimensional NumPy float array, in HWC format
"""
image /= 255.0
# HWC to CHW format:
image = np.transpose(image, [2, 0, 1])
# CHW to NCHW format
image = np.expand_dims(image, axis=0)
# Convert the image to row-major order, also known as "C order":
image = np.array(image, dtype=np.float32, order='C')
return image
class PostprocessYOLO(object):
"""Class for post-processing the three outputs tensors from YOLOv3-608."""
def __init__(self,
yolo_masks,
yolo_anchors,
obj_threshold,
nms_threshold,
yolo_input_resolution):
"""Initialize with all values that will be kept when processing several frames.
Assuming 3 outputs of the network in the case of (large) YOLOv3.
Keyword arguments:
yolo_masks -- a list of 3 three-dimensional tuples for the YOLO masks
yolo_anchors -- a list of 9 two-dimensional tuples for the YOLO anchors
object_threshold -- threshold for object coverage, float value between 0 and 1
nms_threshold -- threshold for non-max suppression algorithm,
float value between 0 and 1
input_resolution_yolo -- two-dimensional tuple with the target network's (spatial)
input resolution in HW order
"""
self.masks = yolo_masks
self.anchors = yolo_anchors
self.object_threshold = obj_threshold
self.nms_threshold = nms_threshold
self.input_resolution_yolo = yolo_input_resolution
def process(self, outputs, resolution_raw):
"""Take the YOLOv3 outputs generated from a TensorRT forward pass, post-process them
and return a list of bounding boxes for detected object together with their category
and their confidences in separate lists.
Keyword arguments:
outputs -- outputs from a TensorRT engine in NCHW format
resolution_raw -- the original spatial resolution from the input PIL image in WH order
"""
outputs_reshaped = list()
for output in outputs:
outputs_reshaped.append(self._reshape_output(output))
boxes, categories, confidences = self._process_yolo_output(
outputs_reshaped, resolution_raw)
return boxes, categories, confidences
def _reshape_output(self, output):
"""Reshape a TensorRT output from NCHW to NHWC format (with expected C=255),
and then return it in (height,width,3,85) dimensionality after further reshaping.
Keyword argument:
output -- an output from a TensorRT engine after inference
"""
output = np.transpose(output, [0, 2, 3, 1])
_, height, width, _ = output.shape
dim1, dim2 = height, width
dim3 = 3
# There are CATEGORY_NUM=80 object categories:
dim4 = (4 + 1 + CATEGORY_NUM)
return np.reshape(output, (dim1, dim2, dim3, dim4))
def _process_yolo_output(self, outputs_reshaped, resolution_raw):
"""Take in a list of three reshaped YOLO outputs in (height,width,3,85) shape and return
return a list of bounding boxes for detected object together with their category and their
confidences in separate lists.
Keyword arguments:
outputs_reshaped -- list of three reshaped YOLO outputs as NumPy arrays
with shape (height,width,3,85)
resolution_raw -- the original spatial resolution from the input PIL image in WH order
"""
# E.g. in YOLOv3-608, there are three output tensors, which we associate with their
# respective masks. Then we iterate through all output-mask pairs and generate candidates
# for bounding boxes, their corresponding category predictions and their confidences:
boxes, categories, confidences = list(), list(), list()
for output, mask in zip(outputs_reshaped, self.masks):
box, category, confidence = self._process_feats(output, mask)
box, category, confidence = self._filter_boxes(box, category, confidence)
boxes.append(box)
categories.append(category)
confidences.append(confidence)
boxes = np.concatenate(boxes)
categories = np.concatenate(categories)
confidences = np.concatenate(confidences)
# Scale boxes back to original image shape:
width, height = resolution_raw
image_dims = [width, height, width, height]
boxes = boxes * image_dims
# Using the candidates from the previous (loop) step, we apply the non-max suppression
# algorithm that clusters adjacent bounding boxes to a single bounding box:
nms_boxes, nms_categories, nscores = list(), list(), list()
for category in set(categories):
idxs = np.where(categories == category)
box = boxes[idxs]
category = categories[idxs]
confidence = confidences[idxs]
keep = self._nms_boxes(box, confidence)
nms_boxes.append(box[keep])
nms_categories.append(category[keep])
nscores.append(confidence[keep])
if not nms_categories and not nscores:
return None, None, None
boxes = np.concatenate(nms_boxes)
categories = np.concatenate(nms_categories)
confidences = np.concatenate(nscores)
return boxes, categories, confidences
def _process_feats(self, output_reshaped, mask):
"""Take in a reshaped YOLO output in height,width,3,85 format together with its
corresponding YOLO mask and return the detected bounding boxes, the confidence,
and the class probability in each cell/pixel.
Keyword arguments:
output_reshaped -- reshaped YOLO output as NumPy arrays with shape (height,width,3,85)
mask -- 2-dimensional tuple with mask specification for this output
"""
# Two in-line functions required for calculating the bounding box
# descriptors:
def sigmoid(value):
"""Return the sigmoid of the input."""
return 1.0 / (1.0 + math.exp(-value))
def exponential(value):
"""Return the exponential of the input."""
return math.exp(value)
# Vectorized calculation of above two functions:
sigmoid_v = np.vectorize(sigmoid)
exponential_v = np.vectorize(exponential)
grid_h, grid_w, _, _ = output_reshaped.shape
anchors = [self.anchors[i] for i in mask]
# Reshape to N, height, width, num_anchors, box_params:
anchors_tensor = np.reshape(anchors, [1, 1, len(anchors), 2])
box_xy = sigmoid_v(output_reshaped[..., :2])
box_wh = exponential_v(output_reshaped[..., 2:4]) * anchors_tensor
box_confidence = sigmoid_v(output_reshaped[..., 4])
box_confidence = np.expand_dims(box_confidence, axis=-1)
box_class_probs = sigmoid_v(output_reshaped[..., 5:])
col = np.tile(np.arange(0, grid_w), grid_w).reshape(-1, grid_w)
row = np.tile(np.arange(0, grid_h).reshape(-1, 1), grid_h)
col = col.reshape(grid_h, grid_w, 1, 1).repeat(3, axis=-2)
row = row.reshape(grid_h, grid_w, 1, 1).repeat(3, axis=-2)
grid = np.concatenate((col, row), axis=-1)
box_xy += grid
box_xy /= (grid_w, grid_h)
box_wh /= self.input_resolution_yolo
box_xy -= (box_wh / 2.)
boxes = np.concatenate((box_xy, box_wh), axis=-1)
# boxes: centroids, box_confidence: confidence level, box_class_probs:
# class confidence
return boxes, box_confidence, box_class_probs
def _filter_boxes(self, boxes, box_confidences, box_class_probs):
"""Take in the unfiltered bounding box descriptors and discard each cell
whose score is lower than the object threshold set during class initialization.
Keyword arguments:
boxes -- bounding box coordinates with shape (height,width,3,4); 4 for
x,y,height,width coordinates of the boxes
box_confidences -- bounding box confidences with shape (height,width,3,1); 1 for as
confidence scalar per element
box_class_probs -- class probabilities with shape (height,width,3,CATEGORY_NUM)
"""
box_scores = box_confidences * box_class_probs
box_classes = np.argmax(box_scores, axis=-1)
box_class_scores = np.max(box_scores, axis=-1)
pos = np.where(box_class_scores >= self.object_threshold)
boxes = boxes[pos]
classes = box_classes[pos]
scores = box_class_scores[pos]
return boxes, classes, scores
def _nms_boxes(self, boxes, box_confidences):
"""Apply the Non-Maximum Suppression (NMS) algorithm on the bounding boxes with their
confidence scores and return an array with the indexes of the bounding boxes we want to
keep (and display later).
Keyword arguments:
boxes -- a NumPy array containing N bounding-box coordinates that survived filtering,
with shape (N,4); 4 for x,y,height,width coordinates of the boxes
box_confidences -- a Numpy array containing the corresponding confidences with shape N
"""
x_coord = boxes[:, 0]
y_coord = boxes[:, 1]
width = boxes[:, 2]
height = boxes[:, 3]
areas = width * height
ordered = box_confidences.argsort()[::-1]
keep = list()
while ordered.size > 0:
# Index of the current element:
i = ordered[0]
keep.append(i)
xx1 = np.maximum(x_coord[i], x_coord[ordered[1:]])
yy1 = np.maximum(y_coord[i], y_coord[ordered[1:]])
xx2 = np.minimum(x_coord[i] + width[i], x_coord[ordered[1:]] + width[ordered[1:]])
yy2 = np.minimum(y_coord[i] + height[i], y_coord[ordered[1:]] + height[ordered[1:]])
width1 = np.maximum(0.0, xx2 - xx1 + 1)
height1 = np.maximum(0.0, yy2 - yy1 + 1)
intersection = width1 * height1
union = (areas[i] + areas[ordered[1:]] - intersection)
# Compute the Intersection over Union (IoU) score:
iou = intersection / union
# The goal of the NMS algorithm is to reduce the number of adjacent bounding-box
# candidates to a minimum. In this step, we keep only those elements whose overlap
# with the current bounding box is lower than the threshold:
indexes = np.where(iou <= self.nms_threshold)[0]
ordered = ordered[indexes + 1]
keep = np.array(keep)
return keep
| TensorRT-master | samples/python/yolov3_onnx/data_processing.py |
#!/usr/bin/env python3
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import numpy as np
import tensorrt as trt
import pycuda.driver as cuda
import pycuda.autoinit
from PIL import ImageDraw
from data_processing import PreprocessYOLO, PostprocessYOLO, ALL_CATEGORIES
import sys, os
sys.path.insert(1, os.path.join(sys.path[0], ".."))
import common
from downloader import getFilePath
TRT_LOGGER = trt.Logger()
def draw_bboxes(image_raw, bboxes, confidences, categories, all_categories, bbox_color='blue'):
"""Draw the bounding boxes on the original input image and return it.
Keyword arguments:
image_raw -- a raw PIL Image
bboxes -- NumPy array containing the bounding box coordinates of N objects, with shape (N,4).
categories -- NumPy array containing the corresponding category for each object,
with shape (N,)
confidences -- NumPy array containing the corresponding confidence for each object,
with shape (N,)
all_categories -- a list of all categories in the correct ordered (required for looking up
the category name)
bbox_color -- an optional string specifying the color of the bounding boxes (default: 'blue')
"""
draw = ImageDraw.Draw(image_raw)
print(bboxes, confidences, categories)
for box, score, category in zip(bboxes, confidences, categories):
x_coord, y_coord, width, height = box
left = max(0, np.floor(x_coord + 0.5).astype(int))
top = max(0, np.floor(y_coord + 0.5).astype(int))
right = min(image_raw.width, np.floor(x_coord + width + 0.5).astype(int))
bottom = min(image_raw.height, np.floor(y_coord + height + 0.5).astype(int))
draw.rectangle(((left, top), (right, bottom)), outline=bbox_color)
draw.text((left, top - 12), '{0} {1:.2f}'.format(all_categories[category], score), fill=bbox_color)
return image_raw
def get_engine(onnx_file_path, engine_file_path=""):
"""Attempts to load a serialized engine if available, otherwise builds a new TensorRT engine and saves it."""
def build_engine():
"""Takes an ONNX file and creates a TensorRT engine to run inference with"""
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(common.EXPLICIT_BATCH) as network, builder.create_builder_config() as config, trt.OnnxParser(network, TRT_LOGGER) as parser, trt.Runtime(TRT_LOGGER) as runtime:
config.max_workspace_size = 1 << 28 # 256MiB
builder.max_batch_size = 1
# Parse model file
if not os.path.exists(onnx_file_path):
print('ONNX file {} not found, please run yolov3_to_onnx.py first to generate it.'.format(onnx_file_path))
exit(0)
print('Loading ONNX file from path {}...'.format(onnx_file_path))
with open(onnx_file_path, 'rb') as model:
print('Beginning ONNX file parsing')
if not parser.parse(model.read()):
print ('ERROR: Failed to parse the ONNX file.')
for error in range(parser.num_errors):
print (parser.get_error(error))
return None
# The actual yolov3.onnx is generated with batch size 64. Reshape input to batch size 1
network.get_input(0).shape = [1, 3, 608, 608]
print('Completed parsing of ONNX file')
print('Building an engine from file {}; this may take a while...'.format(onnx_file_path))
plan = builder.build_serialized_network(network, config)
engine = runtime.deserialize_cuda_engine(plan)
print("Completed creating Engine")
with open(engine_file_path, "wb") as f:
f.write(plan)
return engine
if os.path.exists(engine_file_path):
# If a serialized engine exists, use it instead of building an engine.
print("Reading engine from file {}".format(engine_file_path))
with open(engine_file_path, "rb") as f, trt.Runtime(TRT_LOGGER) as runtime:
return runtime.deserialize_cuda_engine(f.read())
else:
return build_engine()
def main():
"""Create a TensorRT engine for ONNX-based YOLOv3-608 and run inference."""
# Try to load a previously generated YOLOv3-608 network graph in ONNX format:
onnx_file_path = 'yolov3.onnx'
engine_file_path = "yolov3.trt"
# Download a dog image and save it to the following file path:
input_image_path = getFilePath('samples/python/yolov3_onnx/dog.jpg')
# Two-dimensional tuple with the target network's (spatial) input resolution in HW ordered
input_resolution_yolov3_HW = (608, 608)
# Create a pre-processor object by specifying the required input resolution for YOLOv3
preprocessor = PreprocessYOLO(input_resolution_yolov3_HW)
# Load an image from the specified input path, and return it together with a pre-processed version
image_raw, image = preprocessor.process(input_image_path)
# Store the shape of the original input image in WH format, we will need it for later
shape_orig_WH = image_raw.size
# Output shapes expected by the post-processor
output_shapes = [(1, 255, 19, 19), (1, 255, 38, 38), (1, 255, 76, 76)]
# Do inference with TensorRT
trt_outputs = []
with get_engine(onnx_file_path, engine_file_path) as engine, engine.create_execution_context() as context:
inputs, outputs, bindings, stream = common.allocate_buffers(engine)
# Do inference
print('Running inference on image {}...'.format(input_image_path))
# Set host input to the image. The common.do_inference function will copy the input to the GPU before executing.
inputs[0].host = image
trt_outputs = common.do_inference_v2(context, bindings=bindings, inputs=inputs, outputs=outputs, stream=stream)
# Before doing post-processing, we need to reshape the outputs as the common.do_inference will give us flat arrays.
trt_outputs = [output.reshape(shape) for output, shape in zip(trt_outputs, output_shapes)]
postprocessor_args = {"yolo_masks": [(6, 7, 8), (3, 4, 5), (0, 1, 2)], # A list of 3 three-dimensional tuples for the YOLO masks
"yolo_anchors": [(10, 13), (16, 30), (33, 23), (30, 61), (62, 45), # A list of 9 two-dimensional tuples for the YOLO anchors
(59, 119), (116, 90), (156, 198), (373, 326)],
"obj_threshold": 0.6, # Threshold for object coverage, float value between 0 and 1
"nms_threshold": 0.5, # Threshold for non-max suppression algorithm, float value between 0 and 1
"yolo_input_resolution": input_resolution_yolov3_HW}
postprocessor = PostprocessYOLO(**postprocessor_args)
# Run the post-processing algorithms on the TensorRT outputs and get the bounding box details of detected objects
boxes, classes, scores = postprocessor.process(trt_outputs, (shape_orig_WH))
# Draw the bounding boxes onto the original input image and save it as a PNG file
obj_detected_img = draw_bboxes(image_raw, boxes, scores, classes, ALL_CATEGORIES)
output_image_path = 'dog_bboxes.png'
obj_detected_img.save(output_image_path, 'PNG')
print('Saved image with bounding boxes of detected objects to {}.'.format(output_image_path))
if __name__ == '__main__':
main()
| TensorRT-master | samples/python/yolov3_onnx/onnx_to_tensorrt.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
import logging
import argparse
import numpy as np
import tensorrt as trt
import pycuda.driver as cuda
import pycuda.autoinit
from image_batcher import ImageBatcher
logging.basicConfig(level=logging.INFO)
logging.getLogger("EngineBuilder").setLevel(logging.INFO)
log = logging.getLogger("EngineBuilder")
class EngineCalibrator(trt.IInt8EntropyCalibrator2):
"""
Implements the INT8 Entropy Calibrator 2.
"""
def __init__(self, cache_file):
"""
:param cache_file: The location of the cache file.
"""
super().__init__()
self.cache_file = cache_file
self.image_batcher = None
self.batch_allocation = None
self.batch_generator = None
def set_image_batcher(self, image_batcher: ImageBatcher):
"""
Define the image batcher to use, if any. If using only the cache file, an image batcher doesn't need
to be defined.
:param image_batcher: The ImageBatcher object
"""
self.image_batcher = image_batcher
size = int(np.dtype(self.image_batcher.dtype).itemsize * np.prod(self.image_batcher.shape))
self.batch_allocation = cuda.mem_alloc(size)
self.batch_generator = self.image_batcher.get_batch()
def get_batch_size(self):
"""
Overrides from trt.IInt8EntropyCalibrator2.
Get the batch size to use for calibration.
:return: Batch size.
"""
if self.image_batcher:
return self.image_batcher.batch_size
return 1
def get_batch(self, names):
"""
Overrides from trt.IInt8EntropyCalibrator2.
Get the next batch to use for calibration, as a list of device memory pointers.
:param names: The names of the inputs, if useful to define the order of inputs.
:return: A list of int-casted memory pointers.
"""
if not self.image_batcher:
return None
try:
batch, _, _ = next(self.batch_generator)
log.info("Calibrating image {} / {}".format(self.image_batcher.image_index, self.image_batcher.num_images))
cuda.memcpy_htod(self.batch_allocation, np.ascontiguousarray(batch))
return [int(self.batch_allocation)]
except StopIteration:
log.info("Finished calibration batches")
return None
def read_calibration_cache(self):
"""
Overrides from trt.IInt8EntropyCalibrator2.
Read the calibration cache file stored on disk, if it exists.
:return: The contents of the cache file, if any.
"""
if os.path.exists(self.cache_file):
with open(self.cache_file, "rb") as f:
log.info("Using calibration cache file: {}".format(self.cache_file))
return f.read()
def write_calibration_cache(self, cache):
"""
Overrides from trt.IInt8EntropyCalibrator2.
Store the calibration cache to a file on disk.
:param cache: The contents of the calibration cache to store.
"""
with open(self.cache_file, "wb") as f:
log.info("Writing calibration cache data to: {}".format(self.cache_file))
f.write(cache)
class EngineBuilder:
"""
Parses an ONNX graph and builds a TensorRT engine from it.
"""
def __init__(self, verbose=False, workspace=8):
"""
:param verbose: If enabled, a higher verbosity level will be set on the TensorRT logger.
:param workspace: Max memory workspace to allow, in Gb.
"""
self.trt_logger = trt.Logger(trt.Logger.INFO)
if verbose:
self.trt_logger.min_severity = trt.Logger.Severity.VERBOSE
trt.init_libnvinfer_plugins(self.trt_logger, namespace="")
self.builder = trt.Builder(self.trt_logger)
self.config = self.builder.create_builder_config()
self.config.max_workspace_size = workspace * (2 ** 30)
self.batch_size = None
self.network = None
self.parser = None
def create_network(self, onnx_path):
"""
Parse the ONNX graph and create the corresponding TensorRT network definition.
:param onnx_path: The path to the ONNX graph to load.
"""
network_flags = (1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
self.network = self.builder.create_network(network_flags)
self.parser = trt.OnnxParser(self.network, self.trt_logger)
onnx_path = os.path.realpath(onnx_path)
with open(onnx_path, "rb") as f:
if not self.parser.parse(f.read()):
log.error("Failed to load ONNX file: {}".format(onnx_path))
for error in range(self.parser.num_errors):
log.error(self.parser.get_error(error))
sys.exit(1)
inputs = [self.network.get_input(i) for i in range(self.network.num_inputs)]
outputs = [self.network.get_output(i) for i in range(self.network.num_outputs)]
log.info("Network Description")
for input in inputs:
self.batch_size = input.shape[0]
log.info("Input '{}' with shape {} and dtype {}".format(input.name, input.shape, input.dtype))
for output in outputs:
log.info("Output '{}' with shape {} and dtype {}".format(output.name, output.shape, output.dtype))
assert self.batch_size > 0
self.builder.max_batch_size = self.batch_size
def create_engine(self, engine_path, precision, calib_input=None, calib_cache=None, calib_num_images=5000,
calib_batch_size=8):
"""
Build the TensorRT engine and serialize it to disk.
:param engine_path: The path where to serialize the engine to.
:param precision: The datatype to use for the engine, either 'fp32', 'fp16' or 'int8'.
:param calib_input: The path to a directory holding the calibration images.
:param calib_cache: The path where to write the calibration cache to, or if it already exists, load it from.
:param calib_num_images: The maximum number of images to use for calibration.
:param calib_batch_size: The batch size to use for the calibration process.
"""
engine_path = os.path.realpath(engine_path)
engine_dir = os.path.dirname(engine_path)
os.makedirs(engine_dir, exist_ok=True)
log.info("Building {} Engine in {}".format(precision, engine_path))
inputs = [self.network.get_input(i) for i in range(self.network.num_inputs)]
if precision == "fp16":
if not self.builder.platform_has_fast_fp16:
log.warning("FP16 is not supported natively on this platform/device")
else:
self.config.set_flag(trt.BuilderFlag.FP16)
elif precision == "int8":
if not self.builder.platform_has_fast_int8:
log.warning("INT8 is not supported natively on this platform/device")
else:
if self.builder.platform_has_fast_fp16:
# Also enable fp16, as some layers may be even more efficient in fp16 than int8
self.config.set_flag(trt.BuilderFlag.FP16)
self.config.set_flag(trt.BuilderFlag.INT8)
self.config.int8_calibrator = EngineCalibrator(calib_cache)
if not os.path.exists(calib_cache):
calib_shape = [calib_batch_size] + list(inputs[0].shape[1:])
calib_dtype = trt.nptype(inputs[0].dtype)
self.config.int8_calibrator.set_image_batcher(
ImageBatcher(calib_input, calib_shape, calib_dtype, max_num_images=calib_num_images,
exact_batches=True))
with self.builder.build_engine(self.network, self.config) as engine, open(engine_path, "wb") as f:
log.info("Serializing engine to file: {:}".format(engine_path))
f.write(engine.serialize())
def main(args):
builder = EngineBuilder(args.verbose, args.workspace)
builder.create_network(args.onnx)
builder.create_engine(args.engine, args.precision, args.calib_input, args.calib_cache, args.calib_num_images,
args.calib_batch_size)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-o", "--onnx", help="The input ONNX model file to load")
parser.add_argument("-e", "--engine", help="The output path for the TRT engine")
parser.add_argument("-p", "--precision", default="fp16", choices=["fp32", "fp16", "int8"],
help="The precision mode to build in, either 'fp32', 'fp16' or 'int8', default: 'fp16'")
parser.add_argument("-v", "--verbose", action="store_true", help="Enable more verbose log output")
parser.add_argument("-w", "--workspace", default=8, type=int, help="The max memory workspace size to allow in Gb, "
"default: 8")
parser.add_argument("--calib_input", help="The directory holding images to use for calibration")
parser.add_argument("--calib_cache", default="./calibration.cache",
help="The file path for INT8 calibration cache to use, default: ./calibration.cache")
parser.add_argument("--calib_num_images", default=5000, type=int,
help="The maximum number of images to use for calibration, default: 5000")
parser.add_argument("--calib_batch_size", default=8, type=int,
help="The batch size for the calibration process, default: 8")
args = parser.parse_args()
if not all([args.onnx, args.engine]):
parser.print_help()
log.error("These arguments are required: --onnx and --engine")
sys.exit(1)
if args.precision == "int8" and not (args.calib_input or os.path.exists(args.calib_cache)):
parser.print_help()
log.error("When building in int8 precision, --calib_input or an existing --calib_cache file is required")
sys.exit(1)
main(args)
| TensorRT-master | samples/python/efficientdet/build_engine.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
import argparse
import logging
import tensorflow as tf
import onnx_graphsurgeon as gs
import numpy as np
import onnx
from onnx import shape_inference
from tf2onnx import tfonnx, optimizer, tf_loader
import onnx_utils
logging.basicConfig(level=logging.INFO)
logging.getLogger("EfficientDetGraphSurgeon").setLevel(logging.INFO)
log = logging.getLogger("EfficientDetGraphSurgeon")
class EfficientDetGraphSurgeon:
def __init__(self, saved_model_path, legacy_plugins=False):
"""
Constructor of the EfficientDet Graph Surgeon object, to do the conversion of an EfficientDet TF saved model
to an ONNX-TensorRT parsable model.
:param saved_model_path: The path pointing to the TensorFlow saved model to load.
:param legacy_plugins: If using TensorRT version < 8.0.1, set this to True to use older (but slower) plugins.
"""
saved_model_path = os.path.realpath(saved_model_path)
assert os.path.exists(saved_model_path)
# Use tf2onnx to convert saved model to an initial ONNX graph.
graph_def, inputs, outputs = tf_loader.from_saved_model(saved_model_path, None, None, "serve",
["serving_default"])
log.info("Loaded saved model from {}".format(saved_model_path))
with tf.Graph().as_default() as tf_graph:
tf.import_graph_def(graph_def, name="")
with tf_loader.tf_session(graph=tf_graph):
onnx_graph = tfonnx.process_tf_graph(tf_graph, input_names=inputs, output_names=outputs, opset=11)
onnx_model = optimizer.optimize_graph(onnx_graph).make_model("Converted from {}".format(saved_model_path))
self.graph = gs.import_onnx(onnx_model)
assert self.graph
log.info("TF2ONNX graph created successfully")
# Fold constants via ONNX-GS that TF2ONNX may have missed
self.graph.fold_constants()
# Try to auto-detect by finding if nodes match a specific name pattern expected for either of the APIs.
self.api = None
if len([node for node in self.graph.nodes if "class_net/" in node.name]) > 0:
self.api = "AutoML"
elif len([node for node in self.graph.nodes if "/WeightSharedConvolutionalClassHead/" in node.name]) > 0:
self.api = "TFOD"
assert self.api
log.info("Graph was detected as {}".format(self.api))
self.batch_size = None
self.legacy_plugins = legacy_plugins
def infer(self):
"""
Sanitize the graph by cleaning any unconnected nodes, do a topological resort, and fold constant inputs values.
When possible, run shape inference on the ONNX graph to determine tensor shapes.
"""
for i in range(3):
count_before = len(self.graph.nodes)
self.graph.cleanup().toposort()
try:
for node in self.graph.nodes:
for o in node.outputs:
o.shape = None
model = gs.export_onnx(self.graph)
model = shape_inference.infer_shapes(model)
self.graph = gs.import_onnx(model)
except Exception as e:
log.info("Shape inference could not be performed at this time:\n{}".format(e))
try:
self.graph.fold_constants(fold_shapes=True)
except TypeError as e:
log.error("This version of ONNX GraphSurgeon does not support folding shapes, please upgrade your "
"onnx_graphsurgeon module. Error:\n{}".format(e))
raise
count_after = len(self.graph.nodes)
if count_before == count_after:
# No new folding occurred in this iteration, so we can stop for now.
break
def save(self, output_path):
"""
Save the ONNX model to the given location.
:param output_path: Path pointing to the location where to write out the updated ONNX model.
"""
self.graph.cleanup().toposort()
model = gs.export_onnx(self.graph)
output_path = os.path.realpath(output_path)
os.makedirs(os.path.dirname(output_path), exist_ok=True)
onnx.save(model, output_path)
log.info("Saved ONNX model to {}".format(output_path))
def update_preprocessor(self, input_shape):
"""
Remove all the pre-processing nodes in the ONNX graph and leave only the image normalization essentials.
:param input_shape: The input tensor shape to use for the ONNX graph.
"""
# Update the input and output tensors shape
input_shape = input_shape.split(",")
assert len(input_shape) == 4
for i in range(len(input_shape)):
input_shape[i] = int(input_shape[i])
assert input_shape[i] >= 1
input_format = None
if input_shape[1] == 3:
input_format = "NCHW"
if input_shape[3] == 3:
input_format = "NHWC"
assert input_format in ["NCHW", "NHWC"]
self.batch_size = input_shape[0]
self.graph.inputs[0].shape = input_shape
self.graph.inputs[0].dtype = np.float32
if self.api == "TFOD" and self.batch_size > 1 and self.legacy_plugins:
log.error("TFOD models with a batch size larger than 1 are not currently supported in legacy plugin mode. "
"Please upgrade to TensorRT >= 8.0.1 or use batch size 1 for now.")
sys.exit(1)
self.infer()
log.info("ONNX graph input shape: {} [{} format detected]".format(self.graph.inputs[0].shape, input_format))
# Find the initial nodes of the graph, whatever the input is first connected to, and disconnect them
for node in [node for node in self.graph.nodes if self.graph.inputs[0] in node.inputs]:
node.inputs.clear()
# Convert to NCHW format if needed
input_tensor = self.graph.inputs[0]
if input_format == "NHWC":
input_tensor = self.graph.transpose("preprocessor/transpose", input_tensor, [0, 3, 1, 2])
# RGB Normalizers. The per-channel values are given with shape [1, 3, 1, 1] for proper NCHW shape broadcasting
scale_val = 1 / np.asarray([255], dtype=np.float32)
mean_val = -1 * np.expand_dims(np.asarray([0.485, 0.456, 0.406], dtype=np.float32), axis=(0, 2, 3))
stddev_val = 1 / np.expand_dims(np.asarray([0.229, 0.224, 0.225], dtype=np.float32), axis=(0, 2, 3))
# y = (x * scale + mean) * stddev --> y = x * scale * stddev + mean * stddev
scale_out = self.graph.elt_const("Mul", "preprocessor/scale", input_tensor, scale_val * stddev_val)
mean_out = self.graph.elt_const("Add", "preprocessor/mean", scale_out, mean_val * stddev_val)
# Find the first stem conv node of the graph, and connect the normalizer directly to it
stem_name = None
if self.api == "AutoML":
stem_name = "/stem/"
if self.api == "TFOD":
stem_name = "/stem_conv2d/"
stem = [node for node in self.graph.nodes if node.op == "Conv" and stem_name in node.name][0]
log.info("Found {} node '{}' as stem entry".format(stem.op, stem.name))
stem.inputs[0] = mean_out[0]
# Reshape nodes tend to update the batch dimension to a fixed value of 1, they should use the batch size instead
for node in [node for node in self.graph.nodes if node.op == "Reshape"]:
if type(node.inputs[1]) == gs.Constant and node.inputs[1].values[0] == 1:
node.inputs[1].values[0] = self.batch_size
self.infer()
def update_network(self):
"""
Updates the graph to replace certain nodes in the main EfficientDet network:
- the global average pooling nodes are optimized when running for TFOD models.
- the nearest neighbor resize ops in the FPN are replaced by a TRT plugin nodes when running in legacy mode.
"""
if self.api == "TFOD":
for reduce in [node for node in self.graph.nodes if node.op == "ReduceMean"]:
# TFOD models have their ReduceMean nodes applied with some redundant transposes that can be
# optimized away for better performance
# Make sure the correct subgraph is being replaced, basically search for this:
# X > Transpose (0,2,3,1) > ReduceMean (1,2) > Reshape (?,1,1,?) > Reshape (?,?,1,1) > Conv > Y
# And change to this:
# X > ReduceMean (2,3) > Conv > Y
transpose = reduce.i()
if transpose.op != "Transpose" or transpose.attrs['perm'] != [0, 2, 3, 1]:
continue
if len(reduce.attrs['axes']) != 2 or reduce.attrs['axes'] != [1, 2]:
continue
reshape1 = reduce.o()
if reshape1.op != "Reshape" or len(reshape1.inputs[1].values) != 4:
continue
if reshape1.inputs[1].values[1] != 1 or reshape1.inputs[1].values[2] != 1:
continue
reshape2 = reshape1.o()
if reshape2.op != "Reshape" or len(reshape2.inputs[1].values) != 4:
continue
if reshape2.inputs[1].values[2] != 1 or reshape2.inputs[1].values[3] != 1:
continue
conv = reshape2.o()
if conv.op != "Conv":
continue
# If all the checks above pass, then this node sequence can be optimized by just the ReduceMean itself
# operating on a different set of axes
input_tensor = transpose.inputs[0] # Input tensor to the Transpose
reduce.inputs[0] = input_tensor # Forward the Transpose input to the ReduceMean node
output_tensor = reduce.outputs[0] # Output tensor of the ReduceMean
conv.inputs[0] = output_tensor # Forward the ReduceMean output to the Conv node
reduce.attrs['axes'] = [2, 3] # Update the axes that ReduceMean operates on
reduce.attrs['keepdims'] = 1 # Keep the reduced dimensions
log.info("Optimized subgraph around ReduceMean node '{}'".format(reduce.name))
if self.legacy_plugins:
self.infer()
count = 1
for node in [node for node in self.graph.nodes if node.op == "Resize" and node.attrs['mode'] == "nearest"]:
# Older versions of TensorRT do not understand nearest neighbor resize ops, so a plugin is used to
# perform this operation.
self.graph.plugin(
op="ResizeNearest_TRT",
name="resize_nearest_{}".format(count),
inputs=[node.inputs[0]],
outputs=node.outputs,
attrs={
'plugin_version': "1",
'scale': 2.0, # All resize ops in the EfficientDet FPN should have an upscale factor of 2.0
})
node.outputs.clear()
log.info(
"Replaced '{}' ({}) with a ResizeNearest_TRT plugin node".format(node.name, count))
count += 1
def update_nms(self, threshold=None, detections=None):
"""
Updates the graph to replace the NMS op by BatchedNMS_TRT TensorRT plugin node.
:param threshold: Override the score threshold attribute. If set to None, use the value in the graph.
:param detections: Override the max detections attribute. If set to None, use the value in the graph.
"""
def find_head_concat(name_scope):
# This will find the concatenation node at the end of either Class Net or Box Net. These concatenation nodes
# bring together prediction data for each of 5 scales.
# The concatenated Class Net node will have shape [batch_size, num_anchors, num_classes],
# and the concatenated Box Net node has the shape [batch_size, num_anchors, 4].
# These concatenation nodes can be be found by searching for all Concat's and checking if the node two
# steps above in the graph has a name that begins with either "box_net/..." or "class_net/...".
for node in [node for node in self.graph.nodes if node.op == "Transpose" and name_scope in node.name]:
concat = self.graph.find_descendant_by_op(node, "Concat")
assert concat and len(concat.inputs) == 5
log.info("Found {} node '{}' as the tip of {}".format(concat.op, concat.name, name_scope))
return concat
def extract_anchors_tensor(split):
# This will find the anchors that have been hardcoded somewhere within the ONNX graph.
# The function will return a gs.Constant that can be directly used as an input to the NMS plugin.
# The anchor tensor shape will be [1, num_anchors, 4]. Note that '1' is kept as first dim, regardless of
# batch size, as it's not necessary to replicate the anchors for all images in the batch.
# The anchors are available (one per coordinate) hardcoded as constants within certain box decoder nodes.
# Each of these four constants have shape [1, num_anchors], so some numpy operations are used to expand the
# dims and concatenate them as needed.
# These constants can be found by starting from the Box Net's split operation , and for each coordinate,
# walking down in the graph until either an Add or Mul node is found. The second input on this nodes will
# be the anchor data required.
def get_anchor_np(output_idx, op):
node = self.graph.find_descendant_by_op(split.o(0, output_idx), op)
assert node
val = np.squeeze(node.inputs[1].values)
return np.expand_dims(val.flatten(), axis=(0, 2))
anchors_y = get_anchor_np(0, "Add")
anchors_x = get_anchor_np(1, "Add")
anchors_h = get_anchor_np(2, "Mul")
anchors_w = get_anchor_np(3, "Mul")
anchors = np.concatenate([anchors_y, anchors_x, anchors_h, anchors_w], axis=2)
return gs.Constant(name="nms/anchors:0", values=anchors)
self.infer()
head_names = []
if self.api == "AutoML":
head_names = ["class_net/", "box_net/"]
if self.api == "TFOD":
head_names = ["/WeightSharedConvolutionalClassHead/", "/WeightSharedConvolutionalBoxHead/"]
# There are five nodes at the bottom of the graph that provide important connection points:
# 1. Find the concat node at the end of the class net (multi-scale class predictor)
class_net = find_head_concat(head_names[0])
class_net_tensor = class_net.outputs[0]
# 2. Find the concat node at the end of the box net (multi-scale localization predictor)
box_net = find_head_concat(head_names[1])
box_net_tensor = box_net.outputs[0]
# 3. Find the split node that separates the box net coordinates and feeds them into the box decoder.
box_net_split = self.graph.find_descendant_by_op(box_net, "Split")
assert box_net_split and len(box_net_split.outputs) == 4
# 4. Find the concat node at the end of the box decoder.
box_decoder = self.graph.find_descendant_by_op(box_net_split, "Concat")
assert box_decoder and len(box_decoder.inputs) == 4
box_decoder_tensor = box_decoder.outputs[0]
# 5. Find the NMS node.
nms_node = self.graph.find_node_by_op("NonMaxSuppression")
# Extract NMS Configuration
num_detections = int(nms_node.inputs[2].values) if detections is None else detections
iou_threshold = float(nms_node.inputs[3].values)
score_threshold = float(nms_node.inputs[4].values) if threshold is None else threshold
num_classes = class_net.i().inputs[1].values[-1]
normalized = True if self.api == "TFOD" else False
# NMS Inputs and Attributes
# NMS expects these shapes for its input tensors:
# box_net: [batch_size, number_boxes, 4]
# class_net: [batch_size, number_boxes, number_classes]
# anchors: [1, number_boxes, 4] (if used)
nms_op = None
nms_attrs = None
nms_inputs = None
if not self.legacy_plugins:
# EfficientNMS TensorRT Plugin
# Fusing the decoder will always be faster, so this is the default NMS method supported. In this case,
# three inputs are given to the NMS TensorRT node:
# - The box predictions (from the Box Net node found above)
# - The class predictions (from the Class Net node found above)
# - The default anchor coordinates (from the extracted anchor constants)
# As the original tensors from EfficientDet will be used, the NMS code type is set to 1 (Center+Size),
# because this is the internal box coding format used by the network.
anchors_tensor = extract_anchors_tensor(box_net_split)
nms_inputs = [box_net_tensor, class_net_tensor, anchors_tensor]
nms_op = "EfficientNMS_TRT"
nms_attrs = {
'plugin_version': "1",
'background_class': -1,
'max_output_boxes': num_detections,
'score_threshold': max(0.01, score_threshold), # Keep threshold to at least 0.01 for better efficiency
'iou_threshold': iou_threshold,
'score_activation': True,
'box_coding': 1,
}
nms_output_classes_dtype = np.int32
else:
# BatchedNMS TensorRT Plugin
# Alternatively, the ONNX box decoder can be used. This will be slower, as more element-wise and non-fused
# operations will need to be performed by TensorRT. However, it's easier to implement, so it is shown here
# for reference. In this case, only two inputs are given to the NMS TensorRT node:
# - The box predictions (already decoded through the ONNX Box Decoder node)
# - The class predictions (from the Class Net node found above, but also needs to pass through a sigmoid)
# This time, the box predictions will have the coordinate coding from the ONNX box decoder, which matches
# what the BatchedNMS plugin uses.
if self.api == "AutoML":
# The default boxes tensor has shape [batch_size, number_boxes, 4]. This will insert a "1" dimension
# in the second axis, to become [batch_size, number_boxes, 1, 4], the shape that BatchedNMS expects.
box_decoder_tensor = self.graph.unsqueeze("nms/box_net_reshape", box_decoder_tensor, axes=[2])[0]
if self.api == "TFOD":
# The default boxes tensor has shape [4, number_boxes]. This will transpose and insert a "1" dimension
# in the 0 and 2 axes, to become [1, number_boxes, 1, 4], the shape that BatchedNMS expects.
box_decoder_tensor = self.graph.transpose("nms/box_decoder_transpose", box_decoder_tensor, perm=[1, 0])
box_decoder_tensor = self.graph.unsqueeze("nms/box_decoder_reshape", box_decoder_tensor, axes=[0, 2])[0]
# BatchedNMS also expects the classes tensor to be already activated, in the case of EfficientDet, this is
# through a Sigmoid op.
class_net_tensor = self.graph.sigmoid("nms/class_net_sigmoid", class_net_tensor)[0]
nms_inputs = [box_decoder_tensor, class_net_tensor]
nms_op = "BatchedNMS_TRT"
nms_attrs = {
'plugin_version': "1",
'shareLocation': True,
'backgroundLabelId': -1,
'numClasses': num_classes,
'topK': 1024,
'keepTopK': num_detections,
'scoreThreshold': score_threshold,
'iouThreshold': iou_threshold,
'isNormalized': normalized,
'clipBoxes': False,
# 'scoreBits': 10, # Some versions of the plugin may need this parameter. If so, uncomment this line.
}
nms_output_classes_dtype = np.float32
# NMS Outputs
nms_output_num_detections = gs.Variable(name="num_detections", dtype=np.int32, shape=[self.batch_size, 1])
nms_output_boxes = gs.Variable(name="detection_boxes", dtype=np.float32,
shape=[self.batch_size, num_detections, 4])
nms_output_scores = gs.Variable(name="detection_scores", dtype=np.float32,
shape=[self.batch_size, num_detections])
nms_output_classes = gs.Variable(name="detection_classes", dtype=nms_output_classes_dtype,
shape=[self.batch_size, num_detections])
nms_outputs = [nms_output_num_detections, nms_output_boxes, nms_output_scores, nms_output_classes]
# Create the NMS Plugin node with the selected inputs. The outputs of the node will also become the final
# outputs of the graph.
self.graph.plugin(
op=nms_op,
name="nms/non_maximum_suppression",
inputs=nms_inputs,
outputs=nms_outputs,
attrs=nms_attrs)
log.info("Created NMS plugin '{}' with attributes: {}".format(nms_op, nms_attrs))
self.graph.outputs = nms_outputs
self.infer()
def main(args):
effdet_gs = EfficientDetGraphSurgeon(args.saved_model, args.legacy_plugins)
if args.tf2onnx:
effdet_gs.save(args.tf2onnx)
effdet_gs.update_preprocessor(args.input_shape)
effdet_gs.update_network()
effdet_gs.update_nms(args.nms_threshold, args.nms_detections)
effdet_gs.save(args.onnx)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-m", "--saved_model", help="The TensorFlow saved model directory to load")
parser.add_argument("-o", "--onnx", help="The output ONNX model file to write")
parser.add_argument("-i", "--input_shape", default="1,512,512,3",
help="Set the input shape of the graph, as comma-separated dimensions in NCHW or NHWC format, "
"default: 1,512,512,3")
parser.add_argument("-t", "--nms_threshold", type=float, help="Override the score threshold for the NMS op, "
"default: use the original value in the model")
parser.add_argument("-d", "--nms_detections", type=int, help="Override the max detections for the NMS op, "
"default: use the original value in the model")
parser.add_argument("--legacy_plugins", action="store_true", help="Use legacy plugins for support on TensorRT "
"versions lower than 8.0.1")
parser.add_argument("--tf2onnx", help="The path where to save the intermediate ONNX graph generated by tf2onnx, "
"useful for debugging purposes, default: not saved")
args = parser.parse_args()
if not all([args.saved_model, args.onnx]):
parser.print_help()
print("\nThese arguments are required: --saved_model and --onnx")
sys.exit(1)
main(args)
| TensorRT-master | samples/python/efficientdet/create_onnx.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
import numpy as np
from PIL import Image
class ImageBatcher:
"""
Creates batches of pre-processed images.
"""
def __init__(self, input, shape, dtype, max_num_images=None, exact_batches=False, preprocessor="EfficientDet"):
"""
:param input: The input directory to read images from.
:param shape: The tensor shape of the batch to prepare, either in NCHW or NHWC format.
:param dtype: The (numpy) datatype to cast the batched data to.
:param max_num_images: The maximum number of images to read from the directory.
:param exact_batches: This defines how to handle a number of images that is not an exact multiple of the batch
size. If false, it will pad the final batch with zeros to reach the batch size. If true, it will *remove* the
last few images in excess of a batch size multiple, to guarantee batches are exact (useful for calibration).
:param preprocessor: Set the preprocessor to use, depending on which network is being used.
"""
# Find images in the given input path
input = os.path.realpath(input)
self.images = []
extensions = [".jpg", ".jpeg", ".png", ".bmp"]
def is_image(path):
return os.path.isfile(path) and os.path.splitext(path)[1].lower() in extensions
if os.path.isdir(input):
self.images = [os.path.join(input, f) for f in os.listdir(input) if is_image(os.path.join(input, f))]
self.images.sort()
elif os.path.isfile(input):
if is_image(input):
self.images.append(input)
self.num_images = len(self.images)
if self.num_images < 1:
print("No valid {} images found in {}".format("/".join(extensions), input))
sys.exit(1)
# Handle Tensor Shape
self.dtype = dtype
self.shape = shape
assert len(self.shape) == 4
self.batch_size = shape[0]
assert self.batch_size > 0
self.format = None
self.width = -1
self.height = -1
if self.shape[1] == 3:
self.format = "NCHW"
self.height = self.shape[2]
self.width = self.shape[3]
elif self.shape[3] == 3:
self.format = "NHWC"
self.height = self.shape[1]
self.width = self.shape[2]
assert all([self.format, self.width > 0, self.height > 0])
# Adapt the number of images as needed
if max_num_images and 0 < max_num_images < len(self.images):
self.num_images = max_num_images
if exact_batches:
self.num_images = self.batch_size * (self.num_images // self.batch_size)
if self.num_images < 1:
print("Not enough images to create batches")
sys.exit(1)
self.images = self.images[0:self.num_images]
# Subdivide the list of images into batches
self.num_batches = 1 + int((self.num_images - 1) / self.batch_size)
self.batches = []
for i in range(self.num_batches):
start = i * self.batch_size
end = min(start + self.batch_size, self.num_images)
self.batches.append(self.images[start:end])
# Indices
self.image_index = 0
self.batch_index = 0
self.preprocessor = preprocessor
def preprocess_image(self, image_path):
"""
The image preprocessor loads an image from disk and prepares it as needed for batching. This includes padding,
resizing, normalization, data type casting, and transposing.
This Image Batcher implements one algorithm for now:
* EfficientDet: Resizes and pads the image to fit the input size.
:param image_path: The path to the image on disk to load.
:return: Two values: A numpy array holding the image sample, ready to be contacatenated into the rest of the
batch, and the resize scale used, if any.
"""
def resize_pad(image, pad_color=(0, 0, 0)):
"""
A subroutine to implement padding and resizing. This will resize the image to fit fully within the input
size, and pads the remaining bottom-right portions with the value provided.
:param image: The PIL image object
:pad_color: The RGB values to use for the padded area. Default: Black/Zeros.
:return: Two values: The PIL image object already padded and cropped, and the resize scale used.
"""
width, height = image.size
width_scale = width / self.width
height_scale = height / self.height
scale = 1.0 / max(width_scale, height_scale)
image = image.resize((round(width * scale), round(height * scale)), resample=Image.BILINEAR)
pad = Image.new("RGB", (self.width, self.height))
pad.paste(pad_color, [0, 0, self.width, self.height])
pad.paste(image)
return pad, scale
scale = None
image = Image.open(image_path)
image = image.convert(mode='RGB')
if self.preprocessor == "EfficientDet":
# For EfficientNet V2: Resize & Pad with ImageNet mean values and keep as [0,255] Normalization
image, scale = resize_pad(image, (124, 116, 104))
image = np.asarray(image, dtype=self.dtype)
# [0-1] Normalization, Mean subtraction and Std Dev scaling are part of the EfficientDet graph, so
# no need to do it during preprocessing here
else:
print("Preprocessing method {} not supported".format(self.preprocessor))
sys.exit(1)
if self.format == "NCHW":
image = np.transpose(image, (2, 0, 1))
return image, scale
def get_batch(self):
"""
Retrieve the batches. This is a generator object, so you can use it within a loop as:
for batch, images in batcher.get_batch():
...
Or outside of a batch with the next() function.
:return: A generator yielding three items per iteration: a numpy array holding a batch of images, the list of
paths to the images loaded within this batch, and the list of resize scales for each image in the batch.
"""
for i, batch_images in enumerate(self.batches):
batch_data = np.zeros(self.shape, dtype=self.dtype)
batch_scales = [None] * len(batch_images)
print("BATCH SCALES: ", batch_scales)
for i, image in enumerate(batch_images):
self.image_index += 1
batch_data[i], batch_scales[i] = self.preprocess_image(image)
self.batch_index += 1
yield batch_data, batch_images, batch_scales
| TensorRT-master | samples/python/efficientdet/image_batcher.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import PIL.Image as Image
import PIL.ImageDraw as ImageDraw
import PIL.ImageFont as ImageFont
COLORS = ['GoldenRod', 'MediumTurquoise', 'GreenYellow', 'SteelBlue', 'DarkSeaGreen', 'SeaShell', 'LightGrey',
'IndianRed', 'DarkKhaki', 'LawnGreen', 'WhiteSmoke', 'Peru', 'LightCoral', 'FireBrick', 'OldLace',
'LightBlue', 'SlateGray', 'OliveDrab', 'NavajoWhite', 'PaleVioletRed', 'SpringGreen', 'AliceBlue', 'Violet',
'DeepSkyBlue', 'Red', 'MediumVioletRed', 'PaleTurquoise', 'Tomato', 'Azure', 'Yellow', 'Cornsilk',
'Aquamarine', 'CadetBlue', 'CornflowerBlue', 'DodgerBlue', 'Olive', 'Orchid', 'LemonChiffon', 'Sienna',
'OrangeRed', 'Orange', 'DarkSalmon', 'Magenta', 'Wheat', 'Lime', 'GhostWhite', 'SlateBlue', 'Aqua',
'MediumAquaMarine', 'LightSlateGrey', 'MediumSeaGreen', 'SandyBrown', 'YellowGreen', 'Plum', 'FloralWhite',
'LightPink', 'Thistle', 'DarkViolet', 'Pink', 'Crimson', 'Chocolate', 'DarkGrey', 'Ivory', 'PaleGreen',
'DarkGoldenRod', 'LavenderBlush', 'SlateGrey', 'DeepPink', 'Gold', 'Cyan', 'LightSteelBlue', 'MediumPurple',
'ForestGreen', 'DarkOrange', 'Tan', 'Salmon', 'PaleGoldenRod', 'LightGreen', 'LightSlateGray', 'HoneyDew',
'Fuchsia', 'LightSeaGreen', 'DarkOrchid', 'Green', 'Chartreuse', 'LimeGreen', 'AntiqueWhite', 'Beige',
'Gainsboro', 'Bisque', 'SaddleBrown', 'Silver', 'Lavender', 'Teal', 'LightCyan', 'PapayaWhip', 'Purple',
'Coral', 'BurlyWood', 'LightGray', 'Snow', 'MistyRose', 'PowderBlue', 'DarkCyan', 'White', 'Turquoise',
'MediumSlateBlue', 'PeachPuff', 'Moccasin', 'LightSalmon', 'SkyBlue', 'Khaki', 'MediumSpringGreen',
'BlueViolet', 'MintCream', 'Linen', 'SeaGreen', 'HotPink', 'LightYellow', 'BlanchedAlmond', 'RoyalBlue',
'RosyBrown', 'MediumOrchid', 'DarkTurquoise', 'LightGoldenRodYellow', 'LightSkyBlue']
def visualize_detections(image_path, output_path, detections, labels=[]):
image = Image.open(image_path).convert(mode='RGB')
draw = ImageDraw.Draw(image)
line_width = 2
font = ImageFont.load_default()
for d in detections:
color = COLORS[d['class'] % len(COLORS)]
draw.line([(d['xmin'], d['ymin']), (d['xmin'], d['ymax']), (d['xmax'], d['ymax']), (d['xmax'], d['ymin']),
(d['xmin'], d['ymin'])], width=line_width, fill=color)
label = "Class {}".format(d['class'])
if d['class'] < len(labels):
label = "{}".format(labels[d['class']])
score = d['score']
text = "{}: {}%".format(label, int(100 * score))
if score < 0:
text = label
text_width, text_height = font.getsize(text)
text_bottom = max(text_height, d['ymin'])
text_left = d['xmin']
margin = np.ceil(0.05 * text_height)
draw.rectangle([(text_left, text_bottom - text_height - 2 * margin), (text_left + text_width, text_bottom)],
fill=color)
draw.text((text_left + margin, text_bottom - text_height - margin), text, fill='black', font=font)
if output_path is None:
return image
image.save(output_path)
def concat_visualizations(images, names, colors, output_path):
def draw_text(draw, font, text, width, bar_height, offset, color):
text_width, text_height = font.getsize(text)
draw.rectangle([(offset, 0), (offset + width, bar_height)], fill=color)
draw.text((offset + (width - text_width) / 2, text_height - text_height / 2), text, fill='black', font=font)
bar_height = 18
width = 0
height = 0
for im in images:
width += im.width
height = max(height, im.height)
concat = Image.new('RGB', (width, height + bar_height))
draw = ImageDraw.Draw(concat)
font = ImageFont.load_default()
offset = 0
for i, im in enumerate(images):
concat.paste(im, (offset, bar_height))
draw_text(draw, font, names[i], im.width, bar_height, offset, colors[i])
offset += im.width
if output_path is None:
return concat
concat.save(output_path)
| TensorRT-master | samples/python/efficientdet/visualize.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
import time
import ctypes
import argparse
import numpy as np
import tensorrt as trt
import pycuda.driver as cuda
import pycuda.autoinit
from image_batcher import ImageBatcher
from visualize import visualize_detections
class TensorRTInfer:
"""
Implements inference for the EfficientDet TensorRT engine.
"""
def __init__(self, engine_path):
"""
:param engine_path: The path to the serialized engine to load from disk.
"""
# Load TRT engine
self.logger = trt.Logger(trt.Logger.ERROR)
trt.init_libnvinfer_plugins(self.logger, namespace="")
with open(engine_path, "rb") as f, trt.Runtime(self.logger) as runtime:
self.engine = runtime.deserialize_cuda_engine(f.read())
self.context = self.engine.create_execution_context()
assert self.engine
assert self.context
# Setup I/O bindings
self.inputs = []
self.outputs = []
self.allocations = []
for i in range(self.engine.num_bindings):
is_input = False
if self.engine.binding_is_input(i):
is_input = True
name = self.engine.get_binding_name(i)
dtype = self.engine.get_binding_dtype(i)
shape = self.engine.get_binding_shape(i)
if is_input:
self.batch_size = shape[0]
size = np.dtype(trt.nptype(dtype)).itemsize
for s in shape:
size *= s
allocation = cuda.mem_alloc(size)
binding = {
'index': i,
'name': name,
'dtype': np.dtype(trt.nptype(dtype)),
'shape': list(shape),
'allocation': allocation,
}
self.allocations.append(allocation)
if self.engine.binding_is_input(i):
self.inputs.append(binding)
else:
self.outputs.append(binding)
assert self.batch_size > 0
assert len(self.inputs) > 0
assert len(self.outputs) > 0
assert len(self.allocations) > 0
def input_spec(self):
"""
Get the specs for the input tensor of the network. Useful to prepare memory allocations.
:return: Two items, the shape of the input tensor and its (numpy) datatype.
"""
return self.inputs[0]['shape'], self.inputs[0]['dtype']
def output_spec(self):
"""
Get the specs for the output tensors of the network. Useful to prepare memory allocations.
:return: A list with two items per element, the shape and (numpy) datatype of each output tensor.
"""
specs = []
for o in self.outputs:
specs.append((o['shape'], o['dtype']))
return specs
def infer(self, batch, scales=None, nms_threshold=None):
"""
Execute inference on a batch of images. The images should already be batched and preprocessed, as prepared by
the ImageBatcher class. Memory copying to and from the GPU device will be performed here.
:param batch: A numpy array holding the image batch.
:param scales: The image resize scales for each image in this batch. Default: No scale postprocessing applied.
:return: A nested list for each image in the batch and each detection in the list.
"""
# Prepare the output data
outputs = []
for shape, dtype in self.output_spec():
outputs.append(np.zeros(shape, dtype))
# Process I/O and execute the network
cuda.memcpy_htod(self.inputs[0]['allocation'], np.ascontiguousarray(batch))
self.context.execute_v2(self.allocations)
for o in range(len(outputs)):
cuda.memcpy_dtoh(outputs[o], self.outputs[o]['allocation'])
# Process the results
nums = outputs[0]
boxes = outputs[1]
scores = outputs[2]
classes = outputs[3]
detections = []
normalized = (np.max(boxes) < 2.0)
for i in range(self.batch_size):
detections.append([])
for n in range(int(nums[i])):
scale = self.inputs[0]['shape'][2] if normalized else 1.0
if scales and i < len(scales):
scale /= scales[i]
if nms_threshold and scores[i][n] < nms_threshold:
continue
detections[i].append({
'ymin': boxes[i][n][0] * scale,
'xmin': boxes[i][n][1] * scale,
'ymax': boxes[i][n][2] * scale,
'xmax': boxes[i][n][3] * scale,
'score': scores[i][n],
'class': int(classes[i][n]),
})
return detections
def main(args):
output_dir = os.path.realpath(args.output)
os.makedirs(output_dir, exist_ok=True)
labels = []
if args.labels:
with open(args.labels) as f:
for i, label in enumerate(f):
labels.append(label.strip())
trt_infer = TensorRTInfer(args.engine)
batcher = ImageBatcher(args.input, *trt_infer.input_spec())
for batch, images, scales in batcher.get_batch():
print("Processing Image {} / {}".format(batcher.image_index, batcher.num_images), end="\r")
detections = trt_infer.infer(batch, scales, args.nms_threshold)
for i in range(len(images)):
basename = os.path.splitext(os.path.basename(images[i]))[0]
# Image Visualizations
output_path = os.path.join(output_dir, "{}.png".format(basename))
visualize_detections(images[i], output_path, detections[i], labels)
# Text Results
output_results = ""
for d in detections[i]:
line = [d['xmin'], d['ymin'], d['xmax'], d['ymax'], d['score'], d['class']]
output_results += "\t".join([str(f) for f in line]) + "\n"
with open(os.path.join(args.output, "{}.txt".format(basename)), "w") as f:
f.write(output_results)
print()
print("Finished Processing")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-e", "--engine", default=None, help="The serialized TensorRT engine")
parser.add_argument("-i", "--input", default=None, help="Path to the image or directory to process")
parser.add_argument("-o", "--output", default=None, help="Directory where to save the visualization results")
parser.add_argument("-l", "--labels", default="./labels_coco.txt", help="File to use for reading the class labels "
"from, default: ./labels_coco.txt")
parser.add_argument("-t", "--nms_threshold", type=float, help="Override the score threshold for the NMS operation, "
"if higher than the threshold in the engine.")
args = parser.parse_args()
if not all([args.engine, args.input, args.output]):
parser.print_help()
print("\nThese arguments are required: --engine --input and --output")
sys.exit(1)
main(args)
| TensorRT-master | samples/python/efficientdet/infer.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import onnx_graphsurgeon as gs
logging.basicConfig(level=logging.INFO)
logging.getLogger("EfficientDetHelper").setLevel(logging.INFO)
log = logging.getLogger("EfficientDetHelper")
@gs.Graph.register()
def elt_const(self, op, name, input, value):
"""
Add an element-wise operation to the graph which will operate on the input tensor with the value(s) given.
:param op: The ONNX operation to perform, i.e. "Add" or "Mul".
:param input: The tensor to operate on.
:param value: The value array to operate with.
:param name: The name to use for the node.
"""
input_tensor = input if type(input) is gs.Variable else input[0]
log.debug("Created {} node '{}': {}".format(op, name, value.squeeze()))
const = gs.Constant(name="{}_value:0".format(name), values=value)
return self.layer(name=name, op=op, inputs=[input_tensor, const], outputs=[name + ":0"])
@gs.Graph.register()
def unsqueeze(self, name, input, axes=[-1]):
"""
Adds to the graph an Unsqueeze node for the given axes and to the given input.
:param self: The gs.Graph object being extended.
:param name: The name to use for the node.
:param input: The tensor to be "unsqueezed".
:param axes: A list of axes on which to add the new dimension(s).
:return: The first output tensor, to allow chained graph construction.
"""
input_tensor = input if type(input) is gs.Variable else input[0]
log.debug("Created Unsqueeze node '{}': {}".format(name, axes))
return self.layer(name=name, op="Unsqueeze", inputs=[input_tensor], outputs=[name + ":0"], attrs={'axes': axes})
@gs.Graph.register()
def transpose(self, name, input, perm):
"""
Adds to the graph a Transpose node for the given axes permutation and to the given input.
:param self: The gs.Graph object being extended.
:param name: The name to use for the node.
:param input: The tensor to be transposed.
:param perm: A list of axes defining their order after transposing occurs.
:return: The first output tensor, to allow chained graph construction.
"""
input_tensor = input if type(input) is gs.Variable else input[0]
log.debug("Created Transpose node '{}': {}".format(name, perm))
return self.layer(name=name, op="Transpose", inputs=[input_tensor], outputs=[name + ":0"], attrs={'perm': perm})
@gs.Graph.register()
def sigmoid(self, name, input):
"""
Adds to the graph a Sigmoid node for the given input.
:param self: The gs.Graph object being extended.
:param name: The name to use for the node.
:param input: The tensor to be applied to.
:return: The first output tensor, to allow chained graph construction.
"""
input_tensor = input if type(input) is gs.Variable else input[0]
log.debug("Created Sigmoid node '{}'".format(name))
return self.layer(name=name, op="Sigmoid", inputs=[input_tensor], outputs=[name + ":0"])
@gs.Graph.register()
def plugin(self, op, name, inputs, outputs, attrs):
"""
Adds to the graph a TensorRT plugin node with the given name, inputs and outputs. The attrs dictionary holds
attributes to be added to the plugin node.
:param self: The gs.Graph object being extended.
:param op: The registered name for the TensorRT plugin.
:param name: The name to use for the node.
:param inputs: The list of tensors to use an inputs.
:param outputs: The list of tensors to use as outputs.
:param attrs: The dictionary to use as attributes.
:return: The first output tensor, to allow chained graph construction.
"""
input_tensors = inputs if type(inputs) is list else [inputs]
log.debug("Created TRT Plugin node '{}': {}".format(name, attrs))
return self.layer(op=op, name=name, inputs=input_tensors, outputs=outputs, attrs=attrs)
@gs.Graph.register()
def find_node_by_op(self, op):
"""
Finds the first node in the graph with the given operation name.
:param self: The gs.Graph object being extended.
:param op: The operation name to search for.
:return: The first node matching that performs that op.
"""
for node in self.nodes:
if node.op == op:
return node
return None
@gs.Graph.register()
def find_descendant_by_op(self, node, op, depth=10):
"""
Starting from the given node, finds a node lower in the graph matching the given operation name. This is not an
exhaustive graph search, it will take only the first output of each node traversed while searching depth-first.
:param self: The gs.Graph object being extended.
:param node: The node to start searching from.
:param op: The operation name to search for.
:param depth: Stop searching after traversing these many nodes.
:return: The first descendant node matching that performs that op.
"""
for i in range(depth):
node = node.o()
if node.op == op:
return node
return None
@gs.Graph.register()
def find_ancestor_by_op(self, node, op, depth=10):
"""
Starting from the given node, finds a node higher in the graph matching the given operation name. This is not an
exhaustive graph search, it will take only the first input of each node traversed while searching depth-first.
:param self: The gs.Graph object being extended.
:param node: The node to start searching from.
:param op: The operation name to search for.
:param depth: Stop searching after traversing these many nodes.
:return: The first ancestor node matching that performs that op.
"""
for i in range(depth):
node = node.i()
if node.op == op:
return node
return None | TensorRT-master | samples/python/efficientdet/onnx_utils.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
import argparse
import numpy as np
from infer import TensorRTInfer
from image_batcher import ImageBatcher
def main(args):
automl_path = os.path.realpath(args.automl_path)
sys.path.insert(1, os.path.join(automl_path, "efficientdet"))
try:
import coco_metric
except ImportError:
print("Could not import the 'coco_metric' module from AutoML. Searching in: {}".format(automl_path))
print("Please clone the repository https://github.com/google/automl and provide its path with --automl_path.")
sys.exit(1)
trt_infer = TensorRTInfer(args.engine)
batcher = ImageBatcher(args.input, *trt_infer.input_spec())
evaluator = coco_metric.EvaluationMetric(filename=args.annotations)
for batch, images, scales in batcher.get_batch():
print("Processing Image {} / {}".format(batcher.image_index, batcher.num_images), end="\r")
detections = trt_infer.infer(batch, scales, args.nms_threshold)
coco_det = np.zeros((len(images), max([len(d) for d in detections]), 7))
coco_det[:, :, -1] = -1
for i in range(len(images)):
for n in range(len(detections[i])):
source_id = int(os.path.splitext(os.path.basename(images[i]))[0])
det = detections[i][n]
coco_det[i][n] = [
source_id,
det['xmin'],
det['ymin'],
det['xmax'] - det['xmin'],
det['ymax'] - det['ymin'],
det['score'],
det['class'] + 1, # The COCO evaluator expects class 0 to be background, so offset by 1
]
evaluator.update_state(None, coco_det)
print()
evaluator.result(100)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-e", "--engine", help="The TensorRT engine to infer with")
parser.add_argument("-i", "--input",
help="The input to infer, either a single image path, or a directory of images")
parser.add_argument("-a", "--annotations", help="Set the path to the COCO 'instances_val2017.json' file")
parser.add_argument("-p", "--automl_path", default="./automl",
help="Set the path where to find the AutoML repository, from "
"https://github.com/google/automl. Default: ./automl")
parser.add_argument("-t", "--nms_threshold", type=float, help="Override the score threshold for the NMS operation, "
"if higher than the threshold in the engine.")
args = parser.parse_args()
if not all([args.engine, args.input, args.annotations]):
parser.print_help()
print("\nThese arguments are required: --engine --input and --annotations")
sys.exit(1)
main(args)
| TensorRT-master | samples/python/efficientdet/eval_coco.py |
Subsets and Splits