python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
# std
import os
from re import S
import sys
from typing import Dict, List, Tuple
# Add syspath for custom library
if __name__ == "__main__":
filepath = os.path.dirname(os.path.abspath(__file__))
project_root = os.path.join(filepath, os.pardir)
sys.path.append(project_root)
# numpy
import numpy as np
# torch
import torch
# huggingface
from transformers import T5Tokenizer, T5Config
from transformers.modeling_outputs import Seq2SeqLMOutput
from transformers.configuration_utils import PretrainedConfig
from transformers.generation_utils import GenerationMixin
# TensorRT
import tensorrt as trt
# TRT-HuggingFace
from NNDF.interface import TRTInferenceCommand
from NNDF.networks import (
NetworkMetadata,
NetworkModels,
NetworkModel,
NetworkResult,
NetworkRuntime,
Precision,
TimingProfile,
)
from NNDF.tensorrt_utils import TRTNativeRunner
from NNDF.general_utils import NNFolderWorkspace
from T5.frameworks import T5FHuggingFace
from T5.T5ModelConfig import T5ModelTRTConfig
from T5.measurements import decoder_inference, encoder_inference, full_inference_greedy
from T5.export import T5DecoderONNXFile, T5EncoderONNXFile
from NNDF.models import TRTEngineFile
class TRTHFRunner(TRTNativeRunner, GenerationMixin):
"""Runner that adds interop support for HF and HF provided greedy_search functions."""
# Stores the encoder input length received at runtime, which is used to slice decoder inputs.
ENCODER_LENGTH = 0
def _allocate_memory(self, input_dict: Dict[str, np.ndarray], output_dict: Dict[str, np.ndarray]):
"""Helper function for binding several inputs at once and pre-allocating the results."""
bindings = [None] * self.trt_engine.num_bindings
for input_name, input_array in input_dict.items():
# Allocate memory for inputs
input_idx = self.trt_engine.get_binding_index(input_name)
self.trt_context.set_binding_shape(input_idx, input_array.shape)
bindings[input_idx] = input_array.data_ptr()
assert self.trt_context.all_binding_shapes_specified
for output_name, output_array in output_dict.items():
# Output shape should be allocated from context size
output_idx = self.trt_engine.get_binding_index(output_name)
bindings[output_idx] = output_array.data_ptr()
return bindings
def __init__(
self,
trt_engine_file: TRTEngineFile,
network_metadata: NetworkMetadata,
hf_config: PretrainedConfig,
):
super().__init__(trt_engine_file, network_metadata)
self.config = hf_config
class T5TRTEncoder(TRTHFRunner):
"""TRT implemented network interface that can be used to measure inference time."""
def __init__(
self,
trt_engine_file: str,
network_metadata: NetworkMetadata,
hf_config: PretrainedConfig,
):
super().__init__(trt_engine_file, network_metadata, hf_config)
self.max_sequence_length = T5ModelTRTConfig.MAX_SEQUENCE_LENGTH[network_metadata.variant]
# For T5, we only have one profile to optimize
assert len(trt_engine_file.get_dynamic_shape_profiles()) == 1, "T5 should only have one dynamic shapes profile."
# We only have one profile to select so we can just grab the profile at the start of the class
self.profile_idx = self.get_optimization_profile(batch_size=1, sequence_length=1)
self.inputs = {
"input_ids": torch.zeros(1, self.max_sequence_length, dtype=torch.int32).cuda()
}
self.outputs = {
"hidden_states": torch.zeros(1, self.max_sequence_length, self.max_sequence_length, dtype=torch.float32).cuda()
}
self.bindings = self._allocate_memory(self.inputs, self.outputs)
def forward(self, input_ids, *args, **kwargs):
TRTHFRunner.ENCODER_LENGTH = input_ids.shape[1]
self.inputs["input_ids"][:, :input_ids.shape[1]] = input_ids
self.trt_context.set_binding_shape(0, input_ids.shape)
self.trt_context.execute_v2(bindings=self.bindings)
# No need to return encoding back to CPU due to encoder used directly by decoder
return self.outputs["hidden_states"]
class T5TRTDecoder(TRTHFRunner):
def __init__(
self,
trt_engine_file: str,
network_metadata: NetworkMetadata,
hf_config: PretrainedConfig,
):
super().__init__(trt_engine_file, network_metadata, hf_config)
self.max_sequence_length = T5ModelTRTConfig.MAX_SEQUENCE_LENGTH[network_metadata.variant]
# For T5, we only have one profile to optimize
assert len(trt_engine_file.get_dynamic_shape_profiles()) == 1, "T5 should only have one dynamic shapes profile."
# We only have one profile to select so we can just grab the profile at the start of the class
self.profile_idx = self.get_optimization_profile(batch_size=1, sequence_length=1)
self.inputs = {
"input_ids": torch.zeros(1, self.max_sequence_length, dtype=torch.int32).cuda(),
"encoder_hidden_states": torch.zeros(1, self.max_sequence_length, self.max_sequence_length, dtype=torch.float32).cuda()
}
self.outputs = {
"hidden_states": torch.zeros(1, self.max_sequence_length, 32128, dtype=torch.float32).cuda()
}
self.bindings = self._allocate_memory(self.inputs, self.outputs)
def forward(self, input_ids, encoder_hidden_states, *args, **kwargs):
self.inputs["input_ids"][:, :input_ids.shape[1]] = input_ids
self.inputs["encoder_hidden_states"][:, :TRTHFRunner.ENCODER_LENGTH, :] = encoder_hidden_states[:, :TRTHFRunner.ENCODER_LENGTH, :]
# TODO: This can be better generalized
self.trt_context.set_binding_shape(0, input_ids.shape)
self.trt_context.set_binding_shape(1, (1, TRTHFRunner.ENCODER_LENGTH, self.max_sequence_length))
# Copy to device
self.trt_context.execute_v2(bindings=self.bindings)
# Transfer predictions back from GPU to do greedy search
return Seq2SeqLMOutput(logits=self.outputs["hidden_states"][:, :input_ids.shape[1]].cpu())
def prepare_inputs_for_generation(self, input_ids, **kwargs):
return {
"input_ids": input_ids,
"encoder_hidden_states": kwargs["encoder_hidden_states"],
}
class T5TRT(TRTInferenceCommand):
def __init__(self):
super().__init__(
T5ModelTRTConfig,
"Runs trt results for T5 model.",
T5FHuggingFace,
)
self.t5_trt_decoder = None
self.t5_trt_encoder = None
def cleanup(
self,
workspace: NNFolderWorkspace,
keep_trt_engine: bool = False,
keep_onnx_model: bool = False,
keep_torch_model: bool = False,
) -> None:
# Deactivates context
if self.t5_trt_encoder:
self.t5_trt_encoder.release()
if self.t5_trt_decoder:
self.t5_trt_decoder.release()
if not keep_trt_engine:
self.t5_trt_encoder_engine.cleanup()
self.t5_trt_decoder_engine.cleanup()
self.frameworks_cmd.cleanup(workspace, keep_onnx_model, keep_torch_model)
def execute_inference(
self,
metadata: NetworkMetadata,
onnx_fpaths: Dict[str, NetworkModel],
inference_input: str,
timing_profile: TimingProfile,
) -> NetworkResult:
tokenizer = T5Tokenizer.from_pretrained(metadata.variant)
input_ids = tokenizer(inference_input, return_tensors="pt").input_ids
encoder_last_hidden_state, encoder_e2e_median_time = encoder_inference(
self.t5_trt_encoder, input_ids, timing_profile
)
_, decoder_e2e_median_time = decoder_inference(
self.t5_trt_decoder,
input_ids,
encoder_last_hidden_state,
timing_profile,
use_cuda=False,
)
decoder_output_greedy, full_e2e_median_runtime = full_inference_greedy(
self.t5_trt_encoder,
self.t5_trt_decoder,
input_ids,
tokenizer,
timing_profile,
max_length=T5ModelTRTConfig.MAX_SEQUENCE_LENGTH[metadata.variant],
use_cuda=False,
)
# Remove the padding and end tokens.
semantic_outputs = tokenizer.convert_ids_to_tokens(
decoder_output_greedy.tolist()[0]
)[1:-1]
remove_underscore = "".join(
[s.replace("\u2581", " ") for s in semantic_outputs]
)
return NetworkResult(
input=inference_input,
output_tensor=encoder_last_hidden_state,
semantic_output=remove_underscore.strip(),
median_runtime=[
NetworkRuntime(
name=T5ModelTRTConfig.NETWORK_DECODER_SEGMENT_NAME,
runtime=decoder_e2e_median_time,
),
NetworkRuntime(
name=T5ModelTRTConfig.NETWORK_ENCODER_SEGMENT_NAME,
runtime=encoder_e2e_median_time,
),
NetworkRuntime(
name=T5ModelTRTConfig.NETWORK_FULL_NAME,
runtime=full_e2e_median_runtime,
),
],
models=NetworkModels(
torch=None,
onnx=list(onnx_fpaths.values()),
trt=[
NetworkModel(
name=T5ModelTRTConfig.NETWORK_DECODER_SEGMENT_NAME,
fpath=self.t5_trt_decoder_engine.fpath,
),
NetworkModel(
name=T5ModelTRTConfig.NETWORK_ENCODER_SEGMENT_NAME,
fpath=self.t5_trt_encoder_engine.fpath,
),
],
),
)
def run_trt(
self,
metadata: NetworkMetadata,
onnx_fpaths: Tuple[NetworkModel],
network_input: List[str],
working_directory: str,
keep_trt_engine: bool,
keep_onnx_model: bool,
keep_torch_model: bool,
timing_profile: TimingProfile,
) -> List[NetworkResult]:
workspace = NNFolderWorkspace(
self.frameworks_cmd.config.network_name, metadata, working_directory
)
results = []
try:
# no fpath provided for onnx files, download them
if len(onnx_fpaths) == 0:
onnx_fpaths = self.frameworks_cmd.generate_and_download_framework(
metadata, workspace
).onnx
else:
keep_onnx_model = True
keep_torch_model = True
# Output networks shall not exceed number of network segments explicitly defined by configuraiton file.
assert len(onnx_fpaths) == len(
T5ModelTRTConfig.NETWORK_SEGMENTS
), "There should only be {} exported ONNX segments in T5 model.".format(
len(T5ModelTRTConfig.NETWORK_SEGMENTS)
)
hash_onnx_fpath = {v.name: v for v in onnx_fpaths}
decoder_onnx_fpath = hash_onnx_fpath[
T5ModelTRTConfig.NETWORK_DECODER_SEGMENT_NAME
].fpath
encoder_onnx_fpath = hash_onnx_fpath[
T5ModelTRTConfig.NETWORK_ENCODER_SEGMENT_NAME
].fpath
self.t5_trt_encoder_engine = T5EncoderONNXFile(
encoder_onnx_fpath, metadata
).as_trt_engine(encoder_onnx_fpath + ".engine")
self.t5_trt_decoder_engine = T5DecoderONNXFile(
decoder_onnx_fpath, metadata
).as_trt_engine(decoder_onnx_fpath + ".engine")
tfm_config = T5Config(
use_cache=metadata.other.kv_cache,
num_layers=T5ModelTRTConfig.NUMBER_OF_LAYERS[metadata.variant],
)
self.t5_trt_encoder = T5TRTEncoder(
self.t5_trt_encoder_engine, metadata, tfm_config
)
self.t5_trt_decoder = T5TRTDecoder(
self.t5_trt_decoder_engine, metadata, tfm_config
)
for ninput in network_input:
results.append(
self.execute_inference(
metadata, hash_onnx_fpath, ninput, timing_profile
)
)
finally:
self.cleanup(workspace, keep_trt_engine, keep_onnx_model, keep_torch_model)
return results
def add_args(self, parser) -> None:
super().add_args(parser)
polygraphy_group = parser.add_argument_group("polygraphy models")
polygraphy_group.add_argument(
"--onnx-decoder-fpath",
default=None,
help="Path to ONNX decoder. If None is supplied, scripts will generate them from HuggingFace.",
)
polygraphy_group.add_argument(
"--onnx-encoder-fpath",
default=None,
help="Path to ONNX encoder. If None is supplied, scripts will generate them from HuggingFace.",
)
def args_to_network_models(self, args) -> List[NetworkModel]:
# Check if both flags are given otherwise error out
decoder_fpath_check = args.onnx_decoder_fpath is None
encoder_fpath_check = args.onnx_encoder_fpath is None
network_models = None
if decoder_fpath_check and encoder_fpath_check:
network_models = tuple()
elif decoder_fpath_check or encoder_fpath_check:
raise self._parser.error(
"Both --onnx-decoder-fpath and --onnx-encoder-fpath must be given. Otherwise neither should be provided for script to download them."
)
else:
onnx_decoder = NetworkModel(
name=T5ModelTRTConfig.NETWORK_DECODER_SEGMENT_NAME,
fpath=args.onnx_decoder_fpath,
)
onnx_encoder = NetworkModel(
name=T5ModelTRTConfig.NETWORK_ENCODER_SEGMENT_NAME,
fpath=args.onnx_encoder_fpath,
)
network_models = (onnx_decoder, onnx_encoder)
return network_models
def args_to_network_metadata(self, args) -> NetworkMetadata:
frameworks_parsed_metadata = self.frameworks_cmd.args_to_network_metadata(args)
return NetworkMetadata(
variant=frameworks_parsed_metadata.variant,
precision=Precision(fp16=args.fp16),
other=frameworks_parsed_metadata.other,
)
RUN_CMD = T5TRT()
if __name__ == "__main__":
result = RUN_CMD()
print("Results: {}".format(result))
| TensorRT-master | demo/HuggingFace/T5/trt.py |
# std
import os
import sys
from typing import List
# huggingface
from transformers import (
T5ForConditionalGeneration,
T5Tokenizer,
T5Config,
)
# Add syspath for custom library
if __name__ == "__main__":
filepath = os.path.dirname(os.path.abspath(__file__))
project_root = os.path.join(filepath, os.pardir)
sys.path.append(project_root)
# TRT-HuggingFace
from NNDF.interface import FrameworkCommand
from NNDF.networks import (
NetworkResult,
NetworkMetadata,
NetworkRuntime,
NetworkModels,
NetworkModel,
TimingProfile,
)
from T5.export import T5EncoderTorchFile, T5DecoderTorchFile
from T5.T5ModelConfig import T5ModelTRTConfig
from T5.measurements import decoder_inference, encoder_inference, full_inference_greedy
from NNDF.general_utils import confirm_folder_delete, NNFolderWorkspace
class T5FHuggingFace(FrameworkCommand):
def __init__(self):
super().__init__(
T5ModelTRTConfig, description="Runs framework results for T5 model."
)
self.onnx_t5_encoder = None
self.onnx_t5_decoder = None
self.torch_t5_dir = None
def generate_and_download_framework(
self, metadata: NetworkMetadata, workspace: NNFolderWorkspace
) -> NetworkModels:
cache_variant = False
if metadata.other.kv_cache:
cache_variant = True
trt_t5_config = self.config
metadata_serialized = trt_t5_config.get_metadata_string(metadata)
workspace_dir = workspace.get_path()
pytorch_model_dir = os.path.join(workspace_dir, metadata_serialized)
# We keep track of the generated torch location for cleanup later
self.torch_t5_dir = pytorch_model_dir
model = None
tfm_config = T5Config(
use_cache=cache_variant,
num_layers=T5ModelTRTConfig.NUMBER_OF_LAYERS[metadata.variant],
)
if not os.path.exists(pytorch_model_dir):
# Generate the pre-trained weights
model = T5ForConditionalGeneration(tfm_config).from_pretrained(
metadata.variant
)
model.save_pretrained(pytorch_model_dir)
print("Pytorch Model saved to {}".format(pytorch_model_dir))
else:
print(
"Frameworks file already exists, skipping generation and loading from file instead."
)
model = T5ForConditionalGeneration(tfm_config).from_pretrained(
pytorch_model_dir
)
# These ONNX models can be converted using special encoder and decoder classes.
root_onnx_model_name = "{}.onnx".format(metadata_serialized)
root_onnx_model_fpath = os.path.join(
os.getcwd(), workspace_dir, root_onnx_model_name
)
encoder_onnx_model_fpath = root_onnx_model_fpath + "-encoder.onnx"
decoder_onnx_model_fpath = root_onnx_model_fpath + "-decoder-with-lm-head.onnx"
t5_encoder = T5EncoderTorchFile(model, metadata)
t5_decoder = T5DecoderTorchFile(model, metadata)
self.onnx_t5_encoder = t5_encoder.as_onnx_model(
encoder_onnx_model_fpath, force_overwrite=False
)
self.onnx_t5_decoder = t5_decoder.as_onnx_model(
decoder_onnx_model_fpath, force_overwrite=False
)
onnx_models = [
NetworkModel(
name=T5ModelTRTConfig.NETWORK_DECODER_SEGMENT_NAME,
fpath=self.onnx_t5_decoder.fpath,
),
NetworkModel(
name=T5ModelTRTConfig.NETWORK_ENCODER_SEGMENT_NAME,
fpath=self.onnx_t5_encoder.fpath,
),
]
torch_models = [
NetworkModel(
name=T5ModelTRTConfig.NETWORK_FULL_NAME, fpath=pytorch_model_dir
)
]
return NetworkModels(torch=torch_models, onnx=onnx_models, trt=None)
def cleanup(
self,
workspace: NNFolderWorkspace,
keep_onnx_model: bool = True,
keep_pytorch_model: bool = True,
) -> None:
"""
Cleans up the working directory and leaves models if available.
Should not assume any functions from the framework class has been called.
Return:
None
"""
# Clean-up generated files
if not keep_onnx_model:
if self.onnx_t5_decoder is not None:
self.onnx_t5_decoder.cleanup()
if self.onnx_t5_encoder is not None:
self.onnx_t5_encoder.cleanup()
# Remove any onnx external files by removing integer named values and weight files
workspace_path = workspace.get_path()
for d in os.listdir(workspace_path):
fpath = os.path.join(workspace_path, d)
if os.path.isfile(fpath) and os.path.splitext(d)[1] == ".weight":
os.remove(fpath)
elif d.isnumeric():
os.remove(fpath)
if not keep_pytorch_model:
# Using rmtree can be dangerous, have user confirm before deleting.
confirm_folder_delete(
self.torch_t5_dir,
prompt="Confirm you want to delete downloaded pytorch model folder?",
)
if not keep_pytorch_model and not keep_onnx_model:
workspace.cleanup(force_remove=False)
def execute_inference(
self,
metadata: NetworkMetadata,
network_fpaths: NetworkModels,
inference_input: str,
timing_profile: TimingProfile,
) -> NetworkResult:
# Execute some tests
tokenizer = T5Tokenizer.from_pretrained(metadata.variant)
input_ids = tokenizer(inference_input, return_tensors="pt").input_ids
# By default, huggingface model structure is one giant file.
t5_torch_fpath = network_fpaths.torch[0].fpath
config = T5Config(
use_cache=metadata.other.kv_cache,
num_layers=T5ModelTRTConfig.NUMBER_OF_LAYERS[metadata.variant],
)
t5_model = T5ForConditionalGeneration(config).from_pretrained(t5_torch_fpath)
t5_torch_encoder = T5EncoderTorchFile.TorchModule(t5_model.encoder)
t5_torch_decoder = T5DecoderTorchFile.TorchModule(
t5_model.decoder, t5_model.lm_head, t5_model.config
)
encoder_last_hidden_state, encoder_e2e_median_time = encoder_inference(
t5_torch_encoder, input_ids, timing_profile
)
_, decoder_e2e_median_time = decoder_inference(
t5_torch_decoder, input_ids, encoder_last_hidden_state, timing_profile
)
decoder_output_greedy, full_e2e_median_runtime = full_inference_greedy(
t5_torch_encoder,
t5_torch_decoder,
input_ids,
tokenizer,
timing_profile,
max_length=T5ModelTRTConfig.MAX_SEQUENCE_LENGTH[metadata.variant],
)
# Remove the padding and end tokens.
semantic_outputs = tokenizer.convert_ids_to_tokens(
decoder_output_greedy.tolist()[0]
)[1:-1]
remove_underscore = "".join(
[s.replace("\u2581", " ") for s in semantic_outputs]
)
return NetworkResult(
input=inference_input,
output_tensor=encoder_last_hidden_state,
semantic_output=remove_underscore.strip(),
median_runtime=[
NetworkRuntime(
name=T5ModelTRTConfig.NETWORK_DECODER_SEGMENT_NAME,
runtime=decoder_e2e_median_time,
),
NetworkRuntime(
name=T5ModelTRTConfig.NETWORK_ENCODER_SEGMENT_NAME,
runtime=encoder_e2e_median_time,
),
NetworkRuntime(
name=T5ModelTRTConfig.NETWORK_FULL_NAME,
runtime=full_e2e_median_runtime,
),
],
models=network_fpaths,
)
def run_framework(
self,
metadata: NetworkMetadata,
network_input: List[str],
working_directory: str,
keep_onnx_model: bool,
keep_pytorch_model: bool,
timing_profile: TimingProfile,
) -> List[NetworkResult]:
"""
Main entry point of our function which compiles and generates our model data.
"""
results = []
workspace = NNFolderWorkspace(
self.config.network_name, metadata, working_directory
)
try:
network_fpaths = self.generate_and_download_framework(metadata, workspace)
for ninput in network_input:
results.append(
self.execute_inference(
metadata, network_fpaths, ninput, timing_profile
)
)
finally:
self.cleanup(workspace, keep_onnx_model, keep_pytorch_model)
return results
# Entry point
RUN_CMD = T5FHuggingFace()
if __name__ == "__main__":
result = RUN_CMD()
print("Results: {}".format(result))
| TensorRT-master | demo/HuggingFace/T5/frameworks.py |
"""
Utils specific to T5 network.
"""
# torch
import torch
# numpy
import numpy as np
# numpy
from transformers.generation_stopping_criteria import (
MaxLengthCriteria,
StoppingCriteriaList,
)
# TRT-HuggingFace
from NNDF.general_utils import measure_python_inference_code
from NNDF.torch_utils import use_cuda
@use_cuda
def decoder_inference(
t5_decoder, input_ids, encoder_last_hidden_state, timing_profile, use_cuda=True
):
decoder_stmt = lambda: t5_decoder(
input_ids=input_ids, encoder_hidden_states=encoder_last_hidden_state
)
decoder_e2e_median_time = measure_python_inference_code(
decoder_stmt, number=timing_profile.number, iterations=timing_profile.iterations
)
return (decoder_stmt(), decoder_e2e_median_time)
@use_cuda
def encoder_inference(t5_encoder, input_ids, timing_profile, use_cuda=True):
encoder_stmt = lambda: t5_encoder(input_ids=input_ids)
encoder_e2e_median_time = measure_python_inference_code(
encoder_stmt, number=timing_profile.number, iterations=timing_profile.iterations
)
return (encoder_stmt(), encoder_e2e_median_time)
# Code specifically for Pythonic inference measurement used across all T5 related scripts
@use_cuda
def full_inference_greedy(
t5_encoder,
t5_decoder,
input_ids,
tokenizer,
timing_profile,
max_length,
use_cuda=True,
):
stopping_criteria = StoppingCriteriaList([MaxLengthCriteria(max_length)])
decoder_input_ids = torch.full(
(1, 1), tokenizer.convert_tokens_to_ids(tokenizer.pad_token), dtype=torch.int32
)
if use_cuda:
decoder_input_ids = decoder_input_ids.to("cuda")
def _e2e():
encoder_last_hidden_state = t5_encoder(input_ids=input_ids)
return t5_decoder.greedy_search(
input_ids=decoder_input_ids,
encoder_hidden_states=encoder_last_hidden_state,
stopping_criteria=stopping_criteria,
)
full_e2e_median_time = measure_python_inference_code(
_e2e,
number=timing_profile.number,
iterations=timing_profile.iterations,
)
return (_e2e(), full_e2e_median_time)
| TensorRT-master | demo/HuggingFace/T5/measurements.py |
"""Utilities related to Polygraphy"""
# std
from typing import List
# polygraphy
from polygraphy.backend.trt import engine_from_bytes, TrtRunner
from polygraphy.backend.onnxrt import OnnxrtRunner, SessionFromOnnx, OnnxrtRunner
from polygraphy.backend.common import bytes_from_path
from polygraphy.logger import G_LOGGER as PG_LOGGER
# tensorrt
import tensorrt as trt
# ONNX
import onnx
import onnx_graphsurgeon as gs
# numpy
import numpy as np
# NNDF
from NNDF.networks import NetworkMetadata
from NNDF.models import TRTEngineFile
from NNDF.logger import G_LOGGER
# Helper Functions
def clamp_weights_onnx(onnx_input_fpath: str, onnx_output_fpath: str, min: float, max: float, ignore_nodes: List = None):
"""
Clamps given onnx model to targeted upper and lower bounds.
"""
graph = gs.import_onnx(onnx.load(onnx_input_fpath))
if ignore_nodes is None:
ignore_nodes = {}
else:
ignore_nodes = {k: True for k in ignore_nodes}
for tensor in graph.tensors().values():
if tensor.name in ignore_nodes or isinstance(tensor, gs.ir.tensor.Variable):
continue
np.clip(tensor.values, min, max, out=tensor.values)
for tensor in graph.nodes:
node_attr = tensor.attrs.get("value", None)
if tensor.name in ignore_nodes:
continue
if node_attr is not None:
np.clip(node_attr.values, min, max, out=node_attr.values)
model = gs.export_onnx(graph)
onnx.save(model, onnx_output_fpath)
def clamp_weights_onnx_to_fp16_bounds(onnx_input_fpath: str, onnx_output_fpath: str, ignore_nodes: List = None):
upper_bound = 65504
return clamp_weights_onnx(onnx_input_fpath, onnx_output_fpath, -upper_bound, upper_bound, ignore_nodes)
# Helper Classes
class TRTNativeRunner:
"""TRTNativeRunner avoids the high overheads with Polygraphy runner providing performance comparable to C++ implementation."""
def __init__(self, trt_engine_file: TRTEngineFile, network_metadata: NetworkMetadata):
self.trt_engine_file = trt_engine_file
trt_logger = trt.Logger(trt.Logger.VERBOSE if G_LOGGER.root.level == G_LOGGER.DEBUG else trt.Logger.WARNING)
G_LOGGER.info("Reading and loading engine file {} using trt native runner.".format(self.trt_engine_file.fpath))
with open(self.trt_engine_file.fpath, "rb") as f:
self.trt_runtime = trt.Runtime(trt_logger)
self.trt_engine = self.trt_runtime.deserialize_cuda_engine(f.read())
self.trt_context = self.trt_engine.create_execution_context()
# By default set optimization profile to 0
self.profile_idx = 0
# Other metadata required by the profile
self._num_bindings_per_profile = self.trt_engine.num_bindings // self.trt_engine.num_optimization_profiles
G_LOGGER.debug("Number of profiles detected in engine: {}".format(self._num_bindings_per_profile))
def release(self):
pass
def get_optimization_profile(self, batch_size, sequence_length):
"""Provided helper function to obtain a profile optimization."""
# Select an optimization profile
# inspired by demo/BERT/inference.py script
selected_profile_idx = None
for idx in range(self.trt_engine.num_optimization_profiles):
profile_shape = self.trt_engine.get_profile_shape(profile_index=idx, binding=idx * self._num_bindings_per_profile)
if profile_shape[0][0] <= batch_size and profile_shape[2][0] >= batch_size \
and profile_shape[0][1] <= sequence_length and profile_shape[2][1] >= sequence_length:
G_LOGGER.debug("Selected profile: {}".format(profile_shape))
selected_profile_idx = idx
break
if selected_profile_idx == -1:
raise RuntimeError("Could not find any profile that matches batch_size={}, sequence_length={}".format(batch_size, sequence_length))
return selected_profile_idx
def __call__(self, *args, **kwargs):
self.trt_context.active_optimization_profile = self.profile_idx
return self.forward(*args, **kwargs)
class PolygraphyOnnxRunner:
def __init__(self, onnx_fpath: str, network_metadata: NetworkMetadata):
self.network_metadata = network_metadata
self.trt_session = SessionFromOnnx(onnx_fpath)
self.trt_context = OnnxrtRunner(self.trt_session)
self.trt_context.activate()
def __call__(self, *args, **kwargs):
# hook polygraphy verbosity for inference
g_logger_verbosity = (
G_LOGGER.EXTRA_VERBOSE
if G_LOGGER.root.level == G_LOGGER.DEBUG
else G_LOGGER.WARNING
)
with PG_LOGGER.verbosity(g_logger_verbosity):
return self.forward(*args, **kwargs)
def release(self):
self.trt_context.deactivate()
class TRTPolygraphyRunner:
"""
TRT implemented network interface that can be used to measure inference time.
Easier to use but harder to utilize. Recommend using TRTNativeRunner for better performance.
"""
def __init__(self, engine_fpath: str, network_metadata: NetworkMetadata):
self.network_metadata = network_metadata
self.trt_engine = engine_from_bytes(bytes_from_path(engine_fpath))
self.trt_context = TrtRunner(self.trt_engine.create_execution_context())
self.trt_context.activate()
def __call__(self, *args, **kwargs):
# hook polygraphy verbosity for inference
g_logger_verbosity = (
G_LOGGER.EXTRA_VERBOSE
if G_LOGGER.root.level == G_LOGGER.DEBUG
else G_LOGGER.WARNING
)
with PG_LOGGER.verbosity(g_logger_verbosity):
return self.forward(*args, **kwargs)
def release(self):
self.trt_context.deactivate()
| TensorRT-master | demo/HuggingFace/NNDF/tensorrt_utils.py |
"""Common utils used by demo folder."""
# std
import os
import shutil
import timeit
from shutil import rmtree
from typing import Callable, Union, List
from collections import defaultdict
from statistics import mean, median
from glob import glob
# NNDF
from NNDF.networks import NNConfig, NetworkResult, NetworkMetadata
from NNDF.logger import G_LOGGER
# Used for HuggingFace setting random seed
RANDOM_SEED = 42
# Networks #
def register_network_folders(
root_dir: str, config_file_str: str = "*Config.py"
) -> List[str]:
networks = []
for network_configs in glob(os.path.join(root_dir, "*", config_file_str)):
network_name = os.path.split(os.path.split(network_configs)[0])[1]
networks.append(network_name)
return networks
def process_results(category: List[str], results: List[NetworkResult], nconfig: NNConfig):
"""
Calculate and process results across multiple runs.
"""
general_stats = ["script", "accuracy"]
runtime_result_row_names = list(nconfig.NETWORK_SEGMENTS)
if nconfig.NETWORK_FULL_NAME not in nconfig.NETWORK_SEGMENTS:
runtime_result_row_names.append(nconfig.NETWORK_FULL_NAME)
rows = []
row_entry = []
for cat, result in zip(category, results):
# Process runtime results for each group
runtime_results = defaultdict(list)
for runtimes in [nr.median_runtime for nr in result.network_results]:
for runtime in runtimes:
runtime_results[runtime.name].append(runtime.runtime)
# Calculate average runtime for each group
average_group_runtime = {k: mean(v) for k, v in runtime_results.items()}
row_entry = [cat, result.accuracy] + [
average_group_runtime[n] for n in runtime_result_row_names
]
rows.append(row_entry)
headers = general_stats + [r + " (sec)" for r in runtime_result_row_names]
return headers, rows
# IO #
def confirm_folder_delete(
fpath: str, prompt: str = "Confirm you want to delete entire folder?"
) -> None:
"""
Confirms whether or not user wants to delete given folder path.
Args:
fpath (str): Path to folder.
prompt (str): Prompt to display
Returns:
None
"""
msg = prompt + " {} [Y/n] ".format(fpath)
confirm = input(msg)
if confirm == "Y":
rmtree(fpath)
else:
G_LOGGER.info("Skipping file removal.")
def remove_if_empty(
fpath: str,
success_msg: str = "Folder successfully removed.",
error_msg: str = "Folder cannot be removed, there are files.",
) -> None:
"""
Removes an entire folder if folder is empty. Provides print info statements.
Args:
fpath: Location to folder
success_msg: Success message.
error_msg: Error message.
Returns:
None
"""
if len(os.listdir(fpath)) == 0:
os.rmdir(fpath)
G_LOGGER.info(success_msg + " {}".format(fpath))
else:
G_LOGGER.info(error_msg + " {}".format(fpath))
def measure_python_inference_code(
stmt: Union[Callable, str], warmup: int = 3, number: int = 10, iterations: int = 10
) -> None:
"""
Measures the time it takes to run Pythonic inference code.
Statement given should be the actual model inference like forward() in torch.
See timeit for more details on how stmt works.
Args:
stmt (Union[Callable, str]): Callable or string for generating numbers.
number (int): Number of times to call function per iteration.
iterations (int): Number of measurement cycles.
"""
G_LOGGER.debug(
"Measuring inference call with warmup: {} and number: {} and iterations {}".format(
warmup, number, iterations
)
)
# Warmup
warmup_mintime = timeit.repeat(stmt, number=number, repeat=warmup)
G_LOGGER.debug("Warmup times: {}".format(warmup_mintime))
return median(timeit.repeat(stmt, number=number, repeat=iterations)) / number
class NNFolderWorkspace:
"""For keeping track of workspace folder and for cleaning them up."""
def __init__(
self, network_name: str, metadata: NetworkMetadata, working_directory: str
):
self.rootdir = working_directory
self.metadata = metadata
self.network_name = network_name
self.dpath = os.path.join(self.rootdir, self.network_name)
os.makedirs(self.dpath, exist_ok=True)
def get_path(self) -> str:
dpath = os.path.join(self.rootdir, self.network_name)
return dpath
def cleanup(self, force_remove: bool = False) -> None:
fpath = self.get_path()
if force_remove:
return shutil.rmtree(fpath)
remove_if_empty(
fpath,
success_msg="Sucessfully removed workspace.",
error_msg="Unable to remove workspace.",
)
| TensorRT-master | demo/HuggingFace/NNDF/general_utils.py |
"""
File for containing model file abstraction. Useful for generating models.
"""
# std
import os
from abc import ABCMeta, abstractmethod
from typing import Union
from shutil import copytree, rmtree
# polygraphy
from polygraphy.backend.trt import (
network_from_onnx_path,
engine_from_network,
save_engine,
)
from polygraphy.backend.trt import CreateConfig
from polygraphy.logger import G_LOGGER as PG_LOGGER
# torch
from torch import load, save
from torch.nn import Module
# TRT-HuggingFace
from NNDF.networks import NetworkMetadata
from NNDF.logger import G_LOGGER
class ModelFileConverter:
"""Abstract class for converting one model format to another."""
def __init__(self, onnx_class, torch_class, trt_engine_class):
self.onnx_class = onnx_class
self.torch_class = torch_class
self.trt_engine_class = trt_engine_class
def torch_to_onnx(
self, output_fpath: str, model: Module, network_metadata: NetworkMetadata
):
"""
Converts a torch.Model into an ONNX model on disk specified at output_fpath.
Arg:
output_fpath (str): File location of the generated ONNX file.
input_fpath (str): Input file location of the generated ONNX file.
network_metadata (NetworkMetadata): Network metadata of the network being converted.
Returns:
ONNXModelFile: Newly generated ONNXModelFile
"""
raise NotImplementedError(
"Current model does not support exporting to ONNX model."
)
def onnx_to_torch(
self, output_fpath: str, input_fpath: str, network_metadata: NetworkMetadata
):
"""
Converts ONNX file into torch.Model which is written to disk.
Arg:
output_fpath (str): File location of the generated ONNX file.
input_fpath (str): Input file location of the generated ONNX file.
network_metadata (NetworkMetadata): Network metadata of the network being converted.
Returns:
TorchModelFile: Newly generated TorchModelFile
"""
raise NotImplementedError(
"Current model does not support exporting to torch model."
)
def onnx_to_trt(
self, output_fpath: str, input_fpath: str, network_metadata: NetworkMetadata
):
"""
Converts ONNX file to TRT engine.
Since TensorRT already supplies converter functions and scripts,
a default implementation is already provided.
Arg:
output_fpath (str): File location of the generated ONNX file.
input_fpath (str): Input file location of the generated ONNX file.
network_metadata (NetworkMetadata): Network metadata of the network being converted.
Returns:
TRTEngineFile: Newly generated engine.
"""
result = self.trt_engine_class(output_fpath, network_metadata)
self.trt_inference_config = CreateConfig(
fp16=network_metadata.precision.fp16,
max_workspace_size=result.DEFAULT_TRT_WORKSPACE_MB * 1024 * 1024,
profiles=result.get_dynamic_shape_profiles(),
strict_types=result.use_strict_types()
)
g_logger_verbosity = (
PG_LOGGER.EXTRA_VERBOSE
if G_LOGGER.level == G_LOGGER.DEBUG
else PG_LOGGER.WARNING
)
with PG_LOGGER.verbosity(g_logger_verbosity):
network_definition = result.get_network_definition(network_from_onnx_path(input_fpath))
trt_engine = engine_from_network(
network_definition, config=self.trt_inference_config
)
save_engine(trt_engine, output_fpath)
return result
class NNModelFile(metaclass=ABCMeta):
"""
Model abstraction. Allows for loading model as various formats.
The class assumes models live on the disk in order to reduce complexity of model loading into memory.
The class guarantees that once export functions are called, models exist on the disk for other
code to parse or use in other libraries.
"""
def __init__(
self,
default_converter: ModelFileConverter = None,
network_metadata: NetworkMetadata = None,
):
"""
Since torch functions often allow for models to either be from disk as fpath or from a loaded object,
we provide a similar option here. Arguments can either be a path on disk or from model itself.
Args:
model (Union[str, torch.Model]): Location of the model as fpath OR loaded torch.Model object.
"""
if default_converter is not None:
self.default_converter = default_converter()
else:
self.default_converter = NullConverter()
self.network_metadata = network_metadata
def as_torch_model(
self,
output_fpath: str,
converter: ModelFileConverter = None,
force_overwrite: bool = False,
):
"""
Converts ONNX file into torch.Model which is written to disk.
Uses provided converter to convert object or default_convert is used instead if available.
Arg:
output_fpath (str): File location of the generated torch file.
converter (ModelFileConverter): Class to convert current model instance into another.
force_overwrite (bool): If the file already exists, tell whether or not to overwrite.
Since torch models folders, can potentially erase entire folders.
Returns:
TorchModelFile: Newly generated TorchModelFile
"""
raise NotImplementedError(
"Current model does not support exporting to pytorch model."
)
def as_onnx_model(
self,
output_fpath: str,
converter: ModelFileConverter = None,
force_overwrite: bool = False,
):
"""
Converts current model into an ONNX model.
Uses provided converter to convert object or default_convert is used instead if available.
Args:
output_fpath (str): File location of the generated ONNX file.
converter (ModelFileConverter): Class to convert current model instance into another.
force_overwrite (bool): If the file already exists, tell whether or not to overwrite.
Since torch models folders, can potentially erase entire folders.
Returns:
ONNXModelFile: Newly generated ONNXModelFile
"""
raise NotImplementedError(
"Current model does not support exporting to onnx model."
)
def as_trt_engine(
self,
output_fpath: str,
converter: ModelFileConverter = None,
force_overwrite: bool = False,
):
"""
Converts current model into an TRT engine.
Uses provided converter to convert object or default_convert is used instead if available.
Args:
output_fpath (str): File location of the generated ONNX file.
converter (ModelFileConverter): Class to convert current model instance into another.
force_overwrite (bool): If the file already exists, tell whether or not to overwrite.
Since torch models folders, can potentially erase entire folders.
Returns:
TRTEngineFile: Newly generated ONNXModelFile
"""
raise NotImplementedError(
"Current model does not support exporting to trt engine."
)
@abstractmethod
def cleanup(self) -> None:
"""Cleans up any saved models or loaded models from memory."""
class TorchModelFile(NNModelFile):
def __init__(
self,
model: Union[str, Module],
default_converter: ModelFileConverter = None,
network_metadata: NetworkMetadata = None,
):
"""
Since torch functions often allow for models to either be from disk as fpath or from a loaded object,
we provide a similar option here. Arguments can either be a path on disk or from model itself.
Args:
model (Union[str, torch.Model]): Location of the model as fpath OR loaded torch.Model object.
"""
super().__init__(default_converter, network_metadata)
if isinstance(model, Module):
self.is_loaded = True
self.fpath = None
self.model = model
else:
self.is_loaded = False
self.fpath = model
self.model = None
def load_model(self) -> Module:
"""
Loads the model from disk if isn't already loaded.
Does not attempt to load if given model is already loaded and instead returns original instance.
Use as_torch_model() instead to always guarantee a new instance and location on disk.
Args:
None
Returns:
torch.Model: Loaded torch model.
"""
if self.is_loaded:
return self.model
return load(self.fpath)
def as_onnx_model(
self,
output_fpath: str,
converter: ModelFileConverter = None,
force_overwrite: bool = False,
):
"""
Converts the torch model into an onnx model.
Args:
output_fpath (str): File location of the generated ONNX file.
converter (ModelFileConverter): Class to convert current model instance into another.
force_overwrite (bool): If the file already exists, tell whether or not to overwrite.
Since torch models folders, can potentially erase entire folders.
Return:
(converter.onnx_class): Returns a converted instance of ONNXModelFile.
"""
converter = self.default_converter if converter is None else converter()
if not force_overwrite and os.path.exists(output_fpath):
return converter.onnx_class(output_fpath, self.network_metadata)
return converter.torch_to_onnx(
output_fpath, self.load_model(), self.network_metadata
)
def as_torch_model(
self,
output_fpath: str,
converter: ModelFileConverter = None,
force_overwrite: bool = False,
):
"""
Since the model is already a torch model, forces a save to specified folder and returns new TorchModelFile object from that file location.
Args:
output_fpath (str): File location of the generated ONNX file.
converter (ModelFileConverter): Class to convert current model instance into another.
force_overwrite (bool): If the file already exists, tell whether or not to overwrite.
Since torch models folders, can potentially erase entire folders.
Return:
(converter.torch_class): Returns a converted instance of TorchModelFile.
"""
converter = self.default_converter if converter is None else converter()
if not force_overwrite and os.path.exists(output_fpath):
return converter.torch_class(output_fpath, self.network_metadata)
if self.is_loaded:
save(self.model, output_fpath)
else:
copytree(self.fpath, output_fpath)
return converter.torch_class(output_fpath, self.network_metadata)
def cleanup(self) -> None:
if self.model:
G_LOGGER.debug("Freeing model from memory: {}".format(self.model))
del self.model
if self.fpath:
G_LOGGER.debug("Removing saved torch model from location: {}".format(self.fpath))
rmtree(self.fpath)
class ONNXModelFile(NNModelFile):
def __init__(
self,
model: str,
default_converter: ModelFileConverter = None,
network_metadata: NetworkMetadata = None,
):
"""
Keeps track of ONNX model file. Does not support loading into memory. Only reads and writes to disk.
Args:
model (str): Location of the model as fpath OR loaded torch.Model object.
"""
super().__init__(default_converter, network_metadata)
self.fpath = model
def as_onnx_model(
self,
output_fpath: str,
converter: ModelFileConverter = None,
force_overwrite: bool = False,
):
"""
Since the model is already a onnx model, forces a save to specified folder and returns new ONNXModelFile object from that file location.
Args:
output_fpath (str): File location of the generated ONNX file.
converter (ModelFileConverter): Class to convert current model instance into another.
force_overwrite (bool): If the file already exists, tell whether or not to overwrite.
Return:
(converter.onnx_class): Returns a converted instance of ONNXModelFile.
"""
converter = self.default_converter if converter is None else converter()
if not force_overwrite and os.path.exists(output_fpath):
return converter.onnx_class(output_fpath, self.network_metadata)
else:
copytree(self.fpath, output_fpath)
return converter.onnx_class(output_fpath, self.network_metadata)
def as_torch_model(
self,
output_fpath: str,
converter: ModelFileConverter = None,
force_overwrite: bool = False,
):
"""
Converts the onnx model into an torch model.
Args:
output_fpath (str): File location of the generated ONNX file.
converter (ModelFileConverter): Class to convert current model instance into another.
force_overwrite (bool): If the file already exists, tell whether or not to overwrite.
Since torch models folders, can potentially erase entire folders.
Return:
(converter.torch_class): Returns a converted instance of TorchModelFile.
"""
converter = self.default_converter if converter is None else converter()
if not force_overwrite and os.path.exists(output_fpath):
return converter.torch_class(output_fpath, self.network_metadata)
return converter.onnx_to_torch(output_fpath, self.fpath, self.network_metadata)
def cleanup(self) -> None:
G_LOGGER.debug("Removing saved ONNX model from location: {}".format(self.fpath))
# Does not cleanup external data and weights.
os.remove(self.fpath)
def as_trt_engine(
self,
output_fpath: str,
converter: ModelFileConverter = None,
force_overwrite: bool = False,
):
"""
Converts the onnx model into an trt engine.
Args:
output_fpath (str): File location of the generated ONNX file.
converter (ModelFileConverter): Class to convert current model instance into another.
force_overwrite (bool): If the file already exists, tell whether or not to overwrite.
Since torch models folders, can potentially erase entire folders.
Return:
(converter.trt_engine_class): Returns a converted instance of TRTEngineFile.
"""
converter = self.default_converter if converter is None else converter()
# TODO: Need to check if the old engine file is compatible with current setting
if not force_overwrite and os.path.exists(output_fpath):
return converter.trt_engine_class(output_fpath, self.network_metadata)
return converter.onnx_to_trt(output_fpath, self.fpath, self.network_metadata)
class TRTEngineFile(NNModelFile):
DEFAULT_TRT_WORKSPACE_MB = 3072
@abstractmethod
def get_dynamic_shape_profiles(self):
pass
@abstractmethod
def use_strict_types(self):
pass
# get_network_definition can be overloaded to alter the network definition.
# For example, this function can be used to change the precisions of ops or
# data type of intermediate tensors.
def get_network_definition(self, network_definition):
return network_definition
def __init__(
self,
model: str,
default_converter: ModelFileConverter = None,
network_metadata: NetworkMetadata = None,
):
super().__init__(default_converter, network_metadata)
self.fpath = model
if os.path.exists(self.fpath):
# Engine already exists, do nothing
return
def cleanup(self) -> None:
G_LOGGER.debug("Removing saved engine model from location: {}".format(self.fpath))
os.remove(self.fpath)
class NullConverter(ModelFileConverter):
def __init__(self):
super().__init__(ONNXModelFile, TorchModelFile, TRTEngineFile)
| TensorRT-master | demo/HuggingFace/NNDF/models.py |
"""
Interface classes required for each registered network script.
"""
# std
import argparse
from abc import ABCMeta, abstractmethod
from typing import List, Tuple
# NNDF
from NNDF.networks import (
NetworkResult,
NetworkMetadata,
NetworkCheckpointResult,
NNConfig,
NetworkModel,
TimingProfile,
)
from NNDF.logger import G_LOGGER
# externals
# None, there should be no external dependencies for testing purposes.
class MetadataArgparseInteropMixin:
"""Add argparse support where the class can add new arguments to an argparse object."""
@staticmethod
@abstractmethod
def add_args(parser):
pass
@staticmethod
@abstractmethod
def from_args(args):
pass
@staticmethod
@abstractmethod
def add_inference_args(parser):
pass
@staticmethod
@abstractmethod
def from_inference_args(args):
pass
class NetworkCommand(metaclass=ABCMeta):
"""Base class that each network script's command module should inherit."""
description = "NetworkCommand"
DEFAULT_ITERATIONS = 10
DEFAULT_NUMBER = 1
DEFAULT_WARMUP = 3
def __init__(self, network_config: NNConfig, description: str):
self.config = network_config()
self.description = description
self._parser = argparse.ArgumentParser(description=description, conflict_handler="resolve")
def __call__(self):
self.add_args(self._parser)
self.config.MetadataClass.add_args(self._parser)
self._args = self._parser.parse_args()
if self._args.verbose:
G_LOGGER.setLevel(level=G_LOGGER.DEBUG)
self.metadata = self.args_to_network_metadata(self._args)
self.check_network_metadata_is_supported(self.metadata)
def add_args(self, parser) -> None:
general_group = parser.add_argument_group("general")
general_group.add_argument(
"--verbose", help="Display verbose logs.", action="store_true"
)
general_group.add_argument(
"--cleanup",
help="Cleans up user-specified workspace. Can not be cleaned if external files exist in workspace.",
action="store_false",
)
general_group.add_argument(
"--working-dir",
help="Location of where to save the model and other downloaded files.",
required=True,
)
timing_group = parser.add_argument_group("inference measurement")
timing_group.add_argument(
"--iterations", help="Number of iterations to measure.", default=self.DEFAULT_ITERATIONS
)
timing_group.add_argument(
"--number",
help="Number of actual inference cycles per iterations.",
default=self.DEFAULT_NUMBER,
)
timing_group.add_argument(
"--warmup",
help="Number of warmup iterations before actual measurement occurs.",
default=self.DEFAULT_WARMUP,
)
def check_network_metadata_is_supported(self, metadata: NetworkMetadata) -> None:
"""
Checks if current command supports the given metadata as defined by the NNConfig.
Args:
metadata (NetworkMetadata): NetworkMetadata to check if input is supported.
Throws:
NotImplementedError: If the given metadata is not a valid configuration for this network.
Returns:
None
"""
if metadata not in self.config.variants:
raise NotImplementedError(
"The following network config is not yet supported by our scripts: {}".format(
metadata
)
)
def args_to_network_metadata(self, args) -> NetworkMetadata:
return self.config.MetadataClass.from_args(args)
class FrameworkCommand(NetworkCommand):
"""Base class that is associated with Frameworks related scripts."""
@abstractmethod
def run_framework(
self,
metadata: NetworkMetadata,
network_input: List[str],
working_directory: str,
keep_onnx_model: bool,
keep_pytorch_model: bool,
timing_profile: TimingProfile,
) -> List[NetworkResult]:
pass
def __call__(self):
super().__call__()
# Differ import so that interface file can use used without
# dependency install for our testing.
from NNDF.checkpoints import NNSemanticCheckpoint
checkpoint = NNSemanticCheckpoint(
"checkpoint.toml",
framework="native",
network_name=self.config.network_name,
metadata=self.metadata,
)
network_results = self.run_framework(
metadata=self.metadata,
network_input=list(checkpoint.inputs()),
working_directory=self._args.working_dir,
keep_onnx_model=self._args.cleanup,
keep_pytorch_model=self._args.cleanup,
timing_profile=TimingProfile(
iterations=int(self._args.iterations),
number=int(self._args.number),
warmup=int(self._args.warmup),
),
)
return NetworkCheckpointResult(
network_results=network_results,
accuracy=checkpoint.accuracy(network_results),
)
def add_args(self, parser) -> argparse.ArgumentParser:
super().add_args(parser)
class TRTInferenceCommand(NetworkCommand):
"""Base class that is associated with Polygraphy related scripts."""
def __init__(
self,
network_config: NNConfig,
description: str,
frameworks_cmd: FrameworkCommand,
):
super().__init__(network_config, description)
# Should be set by
self.frameworks_cmd = frameworks_cmd()
@abstractmethod
def run_trt(
self,
metadata: NetworkMetadata,
onnx_fpaths: Tuple[NetworkModel],
network_input: List[str],
working_directory: str,
keep_trt_engine: bool,
keep_onnx_model: bool,
keep_torch_model: bool,
timing_profile: TimingProfile,
) -> List[NetworkResult]:
pass
def __call__(self):
self.config.MetadataClass.add_inference_args(self._parser)
super().__call__()
onnx_fpaths = self.args_to_network_models(self._args)
# Differ import so that interface file can use used without
# dependency install for our testing.
from NNDF.checkpoints import NNSemanticCheckpoint
checkpoint = NNSemanticCheckpoint(
"checkpoint.toml",
framework="native",
network_name=self.config.network_name,
metadata=self.metadata,
)
network_results = self.run_trt(
metadata=self.metadata,
onnx_fpaths=onnx_fpaths,
network_input=list(checkpoint.inputs()),
working_directory=self._args.working_dir,
keep_trt_engine=self._args.cleanup,
keep_onnx_model=self._args.cleanup,
keep_torch_model=self._args.cleanup,
timing_profile=TimingProfile(
iterations=int(self._args.iterations),
number=int(self._args.number),
warmup=int(self._args.warmup),
),
)
return NetworkCheckpointResult(
network_results=network_results,
accuracy=checkpoint.accuracy(network_results),
)
def args_to_network_metadata(self, args) -> NetworkMetadata:
return self.config.MetadataClass.from_inference_args(args)
@abstractmethod
def args_to_network_models(self, args) -> Tuple[NetworkModel]:
"""
Converts argparse arguments into a list of valid NetworkModel fpaths. Specifically for ONNX.
Invokes conversion scripts if not.
Return:
List[NetworkModel]: List of network model names.
"""
class OnnxRTCommand(NetworkCommand):
"""ONNX Runtime command."""
def __init__(
self,
network_config: NNConfig,
description: str,
frameworks_cmd: FrameworkCommand,
):
super().__init__(network_config, description)
# Should be set by
self.frameworks_cmd = frameworks_cmd()
@abstractmethod
def run_onnxrt(
self,
metadata: NetworkMetadata,
onnx_fpaths: Tuple[NetworkModel],
network_input: List[str],
working_directory: str,
keep_onnx_model: bool,
keep_torch_model: bool,
timing_profile: TimingProfile,
) -> List[NetworkResult]:
pass
def __call__(self):
self.config.MetadataClass.add_inference_args(self._parser)
super().__call__()
onnx_fpaths = self.args_to_network_models(self._args)
# Differ import so that interface file can use used without
# dependency install for our testing.
from NNDF.checkpoints import NNSemanticCheckpoint
checkpoint = NNSemanticCheckpoint(
"checkpoint.toml",
framework="native",
network_name=self.config.network_name,
metadata=self.metadata,
)
network_results = self.run_onnxrt(
metadata=self.metadata,
onnx_fpaths=onnx_fpaths,
network_input=list(checkpoint.inputs()),
working_directory=self._args.working_dir,
keep_onnx_model=self._args.cleanup,
keep_torch_model=self._args.cleanup,
timing_profile=TimingProfile(
iterations=int(self._args.iterations),
number=int(self._args.number),
warmup=int(self._args.warmup),
),
)
return NetworkCheckpointResult(
network_results=network_results,
accuracy=checkpoint.accuracy(network_results),
)
def args_to_network_metadata(self, args) -> NetworkMetadata:
return self.config.MetadataClass.from_inference_args(args)
@abstractmethod
def args_to_network_models(self, args) -> Tuple[NetworkModel]:
"""
Converts argparse arguments into a list of valid NetworkModel fpaths. Specifically for ONNX.
Invokes conversion scripts if not.
Return:
List[NetworkModel]: List of network model names.
"""
| TensorRT-master | demo/HuggingFace/NNDF/interface.py |
import logging
G_LOGGER = logging.getLogger("OSS")
G_LOGGER.DEBUG = logging.DEBUG
G_LOGGER.INFO = logging.INFO
G_LOGGER.WARNING = logging.WARNING
G_LOGGER.ERROR = logging.ERROR
formatter = logging.Formatter("[%(asctime)s][%(name)s][%(levelname)s] %(message)s")
stream = logging.StreamHandler()
stream.setFormatter(formatter)
G_LOGGER.addHandler(stream)
| TensorRT-master | demo/HuggingFace/NNDF/logger.py |
"""
Helper file for generating common checkpoints.
"""
# std
from typing import List
# TRT-HuggingFace
from NNDF.networks import NetworkMetadata, NetworkResult
# externals
import toml
class NNTomlCheckpoint:
"""Loads a toml checkpoint file for comparing labels and inputs."""
def __init__(self, fpath: str, framework: str, network_name: str, metadata: NetworkMetadata):
"""Loads the toml file for processing."""
data = {}
with open(fpath) as f:
data = toml.load(f)
# Select the current input data
# try to get the base data
network = data.get(network_name, {})
self.baseline = network.get("all", {}).get("default", {})
specific_general_data = network.get("all", {}).get(metadata.variant, {})
# Defaults are also used as baselines for the network in case there are deviations known in variants.
# then apply specific data
addendum = network.get(framework, {})
addendum_default = addendum.get("default", {})
addendum_specific = addendum.get(metadata.variant, {})
self.data = {
k: {**self.baseline[k],
**specific_general_data.get(k, {}),
**addendum_default.get(k, {}),
**addendum_specific.get(k, {})} for k in self.baseline.keys()
}
# Used when accuracy() is called
self._lookup_cache = None
def _iterate_data(self, slice: List[str], skip_keyword: str = "skip"):
"""
Helper for child classes to iterate through a slice of data.
Return:
(Union[Dict[str, str], List[str]]): Returns a list of all value keys given in 'slice' or if more than one value is given for 'slice' then a dictionary instead.
"""
returns_dict = len(slice) > 1
for value in self.data.values():
if "skip" in value:
continue
if returns_dict:
yield {s: value[s] for s in slice}
else:
yield value[slice[0]]
class NNSemanticCheckpoint(NNTomlCheckpoint):
"""Requires the following data structure:
[<network>.<framework>.<variant>]
[input_a]
label = "sample_label"
input = "sample_input"
[input_b]
label = "sample_label"
input = "sample_input"
Following are reserved keywords:
<framework> = "all" indicates rules apply to all frameworks
<variant> = "default" indicates rules apply to all networks.
"""
def __iter__(self):
return self._iterate_data(["label", "input"])
def labels(self):
return self._iterate_data(["label"])
def inputs(self):
return self._iterate_data(["input"])
def accuracy(self, results: List[NetworkResult]) -> float:
# Hash checkpoints by their input
if self._lookup_cache is None:
self._lookup_cache = {}
for k, v in self.data.items():
self._lookup_cache[v["input"]] = k
correct_count = 0
for r in results:
# Find the data the corresponds to input
key = self._lookup_cache[r.input]
# remove new line characters
r_new = r.semantic_output[0] if isinstance(r.semantic_output, list) else r.semantic_output
correct_count += int(self.data[key]["label"].replace('\\n','').replace('\n','') == r_new.replace('\\n','').replace('\n',''))
return correct_count / len(results)
| TensorRT-master | demo/HuggingFace/NNDF/checkpoints.py |
"""
Helpers for abstracting hik concepts. Different from 'models.py' which dealsgh-level neural networ
with IO abstraction. This file deals with high level network configurations.
"""
# std
import string
from typing import Dict, Union, Tuple
from collections import namedtuple, OrderedDict
# externals
# None. Should not have any external dependencies.
FILENAME_VALID_CHARS = "-~_.() {}{}".format(string.ascii_letters, string.digits)
"""NetworkResult(input: str, output_tensor: np.array, semantic_output: np.array, median_runtime: NetworkRuntime, models: [str])"""
NetworkResult = namedtuple(
"NetworkResult",
["input", "output_tensor", "semantic_output", "median_runtime", "models"],
)
"""CheckpointResult(network_results: List[NetworkResult], accuracy: float)"""
NetworkCheckpointResult = namedtuple(
"NetworkCheckpointResult", ["network_results", "accuracy"]
)
# Tracks TRT Precision Config
"""Precision(fp16: Bool)"""
Precision = namedtuple("Precision", ["fp16"])
"""NetworkMetadata(variant: str, precision: Precision, other: Union[namedtuple, None])"""
NetworkMetadata = namedtuple("NetworkMetadata", ["variant", "precision", "other"])
"""TimingProfile(iterations: int, repeat: int)"""
TimingProfile = namedtuple("TimingProfile", ["iterations", "number", "warmup"])
"""NetworkModel(name: str, fpath: str)"""
NetworkModel = namedtuple("NetworkModel", ["name", "fpath"])
"""
String encodings to genereted network models.
NetworkModels(torch: Tuple[NetworkModel], onnx: Tuple[NetworkModel])
"""
NetworkModels = namedtuple("NetworkModels", ["torch", "onnx", "trt"])
"""
Args:
name: Name of the network / parts of the network timed.
runtime: Runtime of the time.
NetworkRuntime(name: str, runtime: float)
"""
NetworkRuntime = namedtuple("NetworkRuntime", ["name", "runtime"])
class Dims:
"""Helper class for interfacing dimension constructs with Polygraphy and PyTorch."""
BATCH = "batch"
SEQUENCE = "sequence"
def __init__(self, encoding: OrderedDict):
self.encoding = encoding
def create_new_sequence_dim(dim_type: str) -> str:
"""
Returns a new sequence dimension.
Return:
str: Returns a sequence dimension which Dims.SEQUENCE appended by dim_type.
"""
return Dims.SEQUENCE + dim_type
def get_dims(self):
"""
Returns the encoding dimensions.
Return:
OrderedDict[str, Union[int, str]]: Returns dimensional encoding. Example: {'input_ids': (1, SEQUENCE_DIM)}
"""
return self.encoding
def get_names(self) -> Tuple[str]:
return tuple(self.encoding.keys())
def get_lengths(self) -> Tuple[Union[int, str]]:
return tuple(self.encoding.values())
def get_torch_dynamic_axis_encoding(self) -> dict:
"""
Returns a Pytorch "dynamic_axes" encoding for onnx.export.
Returns:
dict: Returns a 'dynamic' index with corresponding names according to:
https://pytorch.org/docs/stable/onnx.html
"""
dynamic_axes = {}
for k, v in self.encoding.items():
encodings = []
for e in v:
if isinstance(e, str) and (e == self.BATCH or self.SEQUENCE in e):
encodings.append(e)
dynamic_axes[k] = {c: v for c, v in enumerate(encodings)}
return dynamic_axes
# Config Class
class NNConfig:
"""Contains info for a given network that we support."""
NETWORK_SEGMENTS = ["full"]
def __init__(self, network_name, variants=None):
assert self._is_valid_filename(
network_name
), "Network name: {} is not filename friendly.".format(network_name)
self.network_name = network_name
self.variants = variants
# Due to limitations of namedtuples and pickle function, namedtupled must be tracked as an instance
# which refers to a global.
if len(self.variants) > 0:
self.MetadataClass = type(self.variants[0].other)
else:
self.MetadataClass = None
def get_network_segments(self):
"""
Returns exportable segments for the given network.
Used in the case where a single network needs to
be exported into multiple parts.
"""
return self.NETWORK_SEGMENTS
@staticmethod
def get_output_dims(metadata) -> Dict:
"""
Returns the output dimensions of the current network.
Since some networks can have multiple parts, should be a dictionary encoding.
Returns:
(Dict): {"network_section": Dims}
"""
raise NotImplementedError("Output dims not yet defined.")
@staticmethod
def get_input_dims(metadata) -> Dict:
"""
Returns the input dimensions of the current network.
Since some networks can have multiple parts, should be a dictionary encoding.
Returns:
(Dict): {"network_section": Dims} example:
{"encoder": Dims(...), "decoder": Dims(...)}
"""
raise NotImplementedError("Input dims not yet defined.")
def _is_valid_filename(self, filename: str) -> bool:
"""
Checks if a given filename is valid, helpful for cross platform dependencies.
"""
return all(c in FILENAME_VALID_CHARS for c in filename)
def get_python_requirements():
return []
def get_metadata_string(self, metadata: NetworkMetadata) -> str:
"""
Serializes a Metadata object into string.
String will be checked if friendly to filenames across Windows and Linux operating systems.
returns:
string: <network>-<variant-name>-<precision>-<others>
"""
precision_str = "-".join(
[k for k, v in metadata.precision._asdict().items() if v]
)
result = [self.network_name, metadata.variant]
if precision_str:
result.append(precision_str)
other_result = [
"{}~{}".format(k, str(v)) for k, v in metadata.other._asdict().items()
]
# Remove all boolean values that are False and remove True if exists
true_length = len("~True")
other_result_filtered = [v[:-true_length] if v.endswith("~True") else v for v in other_result if "~False" not in v]
if len(other_result_filtered) != 0:
result.append("-".join(other_result_filtered))
final_str = "-".join(result)
assert self._is_valid_filename(
final_str
), "Metadata for current network {} is not filename friendly: {}.".format(
self.network_name, final_str
)
return final_str
| TensorRT-master | demo/HuggingFace/NNDF/networks.py |
"""Torch utils used by demo folder."""
# std
import inspect
from typing import Callable
# pytorch
import torch
# Function Decorators #
def use_cuda(func: Callable):
"""
Tries to send all parameters of a given function to cuda device if user supports it.
Object must have a "to(device: str)" and maps to target device "cuda"
Basically, uses torch implementation.
Wrapped functions musts have keyword argument "use_cuda: bool" which enables
or disables toggling of cuda.
"""
def wrapper(*args, **kwargs):
caller_kwargs = inspect.getcallargs(func, *args, **kwargs)
assert (
"use_cuda" in caller_kwargs
), "Function must have 'use_cuda' as a parameter."
if caller_kwargs["use_cuda"] and torch.cuda.is_available():
new_kwargs = {}
for k, v in caller_kwargs.items():
if getattr(v, "to", False):
new_kwargs[k] = v.to("cuda")
else:
new_kwargs[k] = v
return func(**new_kwargs)
else:
return func(**caller_kwargs)
return wrapper
| TensorRT-master | demo/HuggingFace/NNDF/torch_utils.py |
# std
import argparse
from collections import namedtuple, OrderedDict
from itertools import product
from typing import Dict
# TRT-HuggingFace
from NNDF.networks import Precision, NetworkMetadata, NNConfig, Dims
from NNDF.interface import MetadataArgparseInteropMixin
# Limitation of namedtuples. You must declare namedtuples in module scope and not in classes.
# Otherwise pickle doesn't work.
# See: https://stackoverflow.com/questions/4677012/python-cant-pickle-type-x-attribute-lookup-failed
_GPT2Metadata = namedtuple("GPT2Metadata", ["kv_cache"])
class GPT2Metadata(_GPT2Metadata, MetadataArgparseInteropMixin):
@staticmethod
def add_args(parser: argparse.ArgumentParser) -> None:
"""Add commandline interface parser."""
network_group = parser.add_argument_group("GPT2 network")
network_group.add_argument(
"--variant",
help="GPT2 variant to generate",
choices=GPT2ModelTRTConfig.TARGET_MODELS,
required=True,
)
network_group.add_argument(
"--enable-kv-cache",
help="GPT2 enable KV cache",
action="store_true",
default=False,
)
@staticmethod
def from_args(args: argparse.Namespace):
return NetworkMetadata(
variant=args.variant,
precision=Precision(fp16=False),
other=GPT2Metadata(kv_cache=args.enable_kv_cache),
)
@staticmethod
def add_inference_args(parser: argparse.ArgumentParser) -> None:
inference_group = parser.add_argument_group("inference group")
inference_group.add_argument(
"--fp16", action="store_true", help="Enables fp16 TensorRT tactics."
)
@staticmethod
def from_inference_args(args: argparse.Namespace):
base_metadata = GPT2Metadata.from_args(args)
return base_metadata._replace(precision=Precision(fp16=args.fp16))
class GPT2ModelTRTConfig(NNConfig):
VOCAB_SIZE = 50257 # Vocabulary size of the GPT-2 model
TARGET_MODELS = ["gpt2", "gpt2-large"]
NETWORK_DECODER_SEGMENT_NAME = "gpt2_decoder"
NETWORK_SEGMENTS = [NETWORK_DECODER_SEGMENT_NAME]
NETWORK_FULL_NAME = "full"
MAX_SEQUENCE_LENGTH = {
TARGET_MODELS[0]: 64,
TARGET_MODELS[1]: 64,
}
def __init__(self):
precision_fp16 = [False, True]
kv_caches = [False]
variants = []
for variant, fp16, kv_cache in product(
GPT2ModelTRTConfig.TARGET_MODELS, precision_fp16, kv_caches
):
variants.append(
NetworkMetadata(
variant=variant,
precision=Precision(fp16=fp16),
other=GPT2Metadata(kv_cache=kv_cache),
)
)
super().__init__("GPT2", variants=variants)
def get_python_requirements(self):
base_requirements = super().get_python_requirements()
base_requirements.append("transformers==4.6.1")
return base_requirements
@staticmethod
def get_input_dims(metadata) -> Dict:
"""
Returns dictionary encoding of input dimensions.
Returns:
(Dict[str, Dims]): {"decoder": Dims}
"""
return {
GPT2ModelTRTConfig.NETWORK_DECODER_SEGMENT_NAME: Dims(
OrderedDict({"input_ids": (Dims.BATCH, Dims.SEQUENCE)})
),
}
@staticmethod
def get_output_dims(metadata) -> Dict:
"""
Returns dictionary encoding of output dimensions.
Returns:
(Dict[str, Dims]): {"decoder": Dims, "encoder": Dims}
"""
return {
GPT2ModelTRTConfig.NETWORK_DECODER_SEGMENT_NAME: Dims(
OrderedDict(
{
"logits": (
Dims.BATCH,
Dims.SEQUENCE,
GPT2ModelTRTConfig.VOCAB_SIZE,
)
}
)
),
}
| TensorRT-master | demo/HuggingFace/GPT2/GPT2ModelConfig.py |
"""
Contains logic that captures GPT2 HuggingFace models into ONNX models and TRT engines.
"""
# std
from itertools import tee
# tensorrt
import tensorrt as trt
# polygraphy
from polygraphy.backend.trt import Profile
# torch
import torch
from torch.nn import Module
# # huggingface
from transformers.generation_utils import GenerationMixin
from transformers.modeling_outputs import CausalLMOutputWithCrossAttentions
from transformers import GPT2Tokenizer
# TRT-HuggingFace
from GPT2.GPT2ModelConfig import GPT2ModelTRTConfig
from NNDF.networks import NetworkMetadata
from NNDF.models import TRTEngineFile, TorchModelFile, ONNXModelFile, ModelFileConverter
class GPT2TorchFile(TorchModelFile):
class TorchModule(Module, GenerationMixin):
"""
A simplied definition of GPT2 with LM head.
"""
def __init__(self, transformer, lm_head, config):
super().__init__()
self.transformer = transformer
self.lm_head = lm_head
self.config = config
def prepare_inputs_for_generation(self, input_ids, **kwargs):
# Todo (@pchadha): add position_ids, token_type_ids support
return {
"input_ids": input_ids,
}
def forward(self, input_ids, **kwargs):
transformer_outputs = self.transformer(input_ids)
hidden_states = transformer_outputs[0]
lm_logits = self.lm_head(hidden_states)
return CausalLMOutputWithCrossAttentions(logits=lm_logits)
def __call__(self, *args, **kwargs):
return self.forward(*args, **kwargs)
def __init__(self, model, network_metadata):
super().__init__(model, GPT2Converter, network_metadata)
class GPT2ONNXFile(ONNXModelFile):
def __init__(self, model, network_metadata):
super().__init__(model, GPT2Converter, network_metadata)
# TRT Engine File Encoding #
class GPT2TRTEngine(TRTEngineFile):
def __init__(self, model, network_metadata):
super().__init__(model, GPT2Converter, network_metadata)
def use_strict_types(self):
return self.network_metadata.precision.fp16
def get_dynamic_shape_profiles(self):
max_sequence_length = GPT2ModelTRTConfig.MAX_SEQUENCE_LENGTH[
self.network_metadata.variant
]
profile = Profile()
profile.add(
"input_ids",
min=(1, 1),
opt=(1, max_sequence_length // 2),
max=(1, max_sequence_length),
)
return [profile]
def get_network_definition(self, network_definition):
def pairwise(iterable):
a, b = tee(iterable)
next(b, None)
return zip(a, b)
indices = list(range(0, network_definition[1].num_layers))
for i, i_next in pairwise(indices):
l = network_definition[1].get_layer(i)
l_next = network_definition[1].get_layer(i_next)
if not all([l.get_output(i).is_execution_tensor for i in range(l.num_outputs)]):
continue
if l.get_output_type(0) != trt.float32:
continue
if l.type == trt.LayerType.ELEMENTWISE and l_next.type == trt.LayerType.REDUCE:
l.__class__ = getattr(trt, "IElementWiseLayer")
if l.op == trt.ElementWiseOperation.POW:
l.precision = trt.float32
l.set_output_type(0, trt.float32)
l_next.precision = trt.float32
l_next.set_output_type(0, trt.float32)
return network_definition
# Converters
class GPT2Converter(ModelFileConverter):
def __init__(self):
super().__init__(GPT2TorchFile, GPT2ONNXFile, GPT2TRTEngine)
def torch_to_onnx(
self, output_fpath: str, model: Module, network_metadata: NetworkMetadata
):
"""
Exports a GPT2LMHead model to ONNX.
Args:
output_prefix (str): Path to the onnx file
model (torch.Model): Model loaded torch class
Returns:
GPT2ONNXFile: ONNX GPT2 decoder object.
"""
tokenizer = GPT2Tokenizer.from_pretrained(network_metadata.variant)
input_ids = torch.tensor(
[
tokenizer.encode(
"Here is some text to encode Hello World", add_special_tokens=True
)
]
)
gpt2_model = GPT2TorchFile.TorchModule(
model.transformer, model.lm_head, model.config
)
inputs = GPT2ModelTRTConfig.get_input_dims(network_metadata)[
GPT2ModelTRTConfig.NETWORK_DECODER_SEGMENT_NAME
]
outputs = GPT2ModelTRTConfig.get_output_dims(network_metadata)[
GPT2ModelTRTConfig.NETWORK_DECODER_SEGMENT_NAME
]
# Exports to ONNX
torch.onnx._export(
gpt2_model,
input_ids,
output_fpath,
opset_version=12,
input_names=inputs.get_names(),
output_names=outputs.get_names(),
dynamic_axes={
**inputs.get_torch_dynamic_axis_encoding(),
**outputs.get_torch_dynamic_axis_encoding(),
},
training=False,
use_external_data_format=True
)
return GPT2ONNXFile(output_fpath, network_metadata)
| TensorRT-master | demo/HuggingFace/GPT2/export.py |
# std
import os
import sys
from typing import Dict, List, Tuple
# Add syspath for custom library
if __name__ == "__main__":
filepath = os.path.dirname(os.path.abspath(__file__))
project_root = os.path.join(filepath, os.pardir)
sys.path.append(project_root)
# numpy
import numpy as np
# torch
import torch
# huggingface
from transformers import GPT2Tokenizer, GPT2Config
from transformers.modeling_outputs import CausalLMOutputWithCrossAttentions
from transformers.configuration_utils import PretrainedConfig
from transformers.generation_utils import GenerationMixin
# TRT-HuggingFace
from NNDF.interface import TRTInferenceCommand
from NNDF.networks import (
NetworkMetadata,
NetworkModels,
NetworkModel,
NetworkResult,
NetworkRuntime,
Precision,
TimingProfile,
)
from NNDF.tensorrt_utils import TRTNativeRunner, TRTPolygraphyRunner
from GPT2.frameworks import GPT2HuggingFace
from NNDF.general_utils import NNFolderWorkspace
from GPT2.GPT2ModelConfig import GPT2ModelTRTConfig
from GPT2.measurements import gpt2_inference, full_inference_greedy
from GPT2.export import GPT2ONNXFile, GPT2TRTEngine
class TRTHFRunner(TRTNativeRunner, GenerationMixin):
"""Runner that adds interop support for HF and HF provided greedy_search functions."""
def _allocate_memory(self, input_dict: Dict[str, np.ndarray], output_dict: Dict[str, np.ndarray]):
"""Helper function for binding several inputs at once and pre-allocating the results."""
bindings = [None] * self.trt_engine.num_bindings
for input_name, input_array in input_dict.items():
# Allocate memory for inputs
input_idx = self.trt_engine.get_binding_index(input_name)
self.trt_context.set_binding_shape(input_idx, input_array.shape)
bindings[input_idx] = input_array.data_ptr()
assert self.trt_context.all_binding_shapes_specified
for output_name, output_array in output_dict.items():
# Output shape should be allocated from context size
output_idx = self.trt_engine.get_binding_index(output_name)
bindings[output_idx] = output_array.data_ptr()
return bindings
def __init__(
self,
trt_engine_file: str,
network_metadata: NetworkMetadata,
hf_config: PretrainedConfig,
):
super().__init__(trt_engine_file, network_metadata)
self.config = hf_config
class GPT2TRTDecoder(TRTHFRunner):
def __init__(
self,
trt_engine_file: str,
network_metadata: NetworkMetadata,
hf_config: PretrainedConfig,
):
super().__init__(trt_engine_file, network_metadata, hf_config)
self.max_sequence_length = GPT2ModelTRTConfig.MAX_SEQUENCE_LENGTH[network_metadata.variant]
assert len(trt_engine_file.get_dynamic_shape_profiles()) == 1, "GPT2 should only have one dynamic shapes profile."
# We only have one profile to select so we can just grab the profile at the start of the class
self.profile_idx = self.get_optimization_profile(batch_size=1, sequence_length=1)
self.inputs = {
"input_ids": torch.zeros(1, self.max_sequence_length, dtype=torch.int32).cuda(),
}
self.outputs = {
"logits": torch.zeros(1, self.max_sequence_length, GPT2ModelTRTConfig.VOCAB_SIZE, dtype=torch.float32).cuda()
}
self.bindings = self._allocate_memory(self.inputs, self.outputs)
def prepare_inputs_for_generation(self, input_ids, **kwargs):
# Todo (@pchadha): add position_ids, token_type_ids support
return {
"input_ids": input_ids,
}
def forward(self, input_ids, **kwargs):
self.inputs["input_ids"][:, :input_ids.shape[1]] = input_ids
self.trt_context.set_binding_shape(0, input_ids.shape)
self.trt_context.execute_v2(bindings=self.bindings)
return CausalLMOutputWithCrossAttentions(logits=self.outputs["logits"][:, :input_ids.shape[1], :])
class GPT2Polygraphy(TRTInferenceCommand):
def __init__(self):
super().__init__(
GPT2ModelTRTConfig, "Runs polygraphy results for GPT2 model.", GPT2HuggingFace
)
self.gpt2_trt = None
def cleanup(
self,
workspace: NNFolderWorkspace,
keep_trt_engine: bool = False,
keep_onnx_model: bool = False,
keep_torch_model: bool = False,
) -> None:
# Deactivates context
if self.gpt2_trt is not None:
self.gpt2_trt.release()
if not keep_trt_engine:
self.gpt2_engine.cleanup()
self.frameworks_cmd.cleanup(workspace, keep_onnx_model, keep_torch_model)
def execute_inference(
self,
metadata: NetworkMetadata,
onnx_fpaths: Dict[str, NetworkModel],
inference_input: str,
timing_profile: TimingProfile,
) -> NetworkResult:
tokenizer = GPT2Tokenizer.from_pretrained(metadata.variant)
input_ids = tokenizer(inference_input, return_tensors="pt").input_ids
# get single decoder iteration inference timing profile
_, decoder_e2e_median_time = gpt2_inference(
self.gpt2_trt, input_ids, timing_profile
)
# get complete decoder inference result and its timing profile
sample_output, full_e2e_median_runtime = full_inference_greedy(
self.gpt2_trt, input_ids, timing_profile,
max_length=GPT2ModelTRTConfig.MAX_SEQUENCE_LENGTH[metadata.variant]
)
semantic_outputs = []
for i, sample_output in enumerate(sample_output):
semantic_outputs.append(tokenizer.decode(sample_output, skip_special_tokens=True))
return NetworkResult(
input=inference_input,
output_tensor=sample_output,
semantic_output=semantic_outputs,
median_runtime=[
NetworkRuntime(
name=GPT2ModelTRTConfig.NETWORK_DECODER_SEGMENT_NAME,
runtime=decoder_e2e_median_time,
),
NetworkRuntime(
name=GPT2ModelTRTConfig.NETWORK_FULL_NAME,
runtime=full_e2e_median_runtime,
),
],
models=NetworkModels(
torch=None,
onnx=list(onnx_fpaths.values()),
trt=[
NetworkModel(
name=GPT2ModelTRTConfig.NETWORK_DECODER_SEGMENT_NAME,
fpath=self.gpt2_engine.fpath,
),
],
),
)
def run_trt(
self,
metadata: NetworkMetadata,
onnx_fpaths: Tuple[NetworkModel],
network_input: List[str],
working_directory: str,
keep_trt_engine: bool,
keep_onnx_model: bool,
keep_torch_model: bool,
timing_profile: TimingProfile,
) -> List[NetworkResult]:
workspace = NNFolderWorkspace(
self.frameworks_cmd.config.network_name, metadata, working_directory
)
results = []
try:
# no fpath provided for onnx files, download them
if len(onnx_fpaths) == 0:
onnx_fpaths = self.frameworks_cmd.generate_and_download_framework(
metadata, workspace
).onnx
else:
keep_onnx_model = True
keep_torch_model = True
# Output networks shall not exceed number of network segments explicitly defined by configuraiton file.
assert len(onnx_fpaths) == len(
GPT2ModelTRTConfig.NETWORK_SEGMENTS
), "There should only be {} exported ONNX segments in GPT2 model."
hash_onnx_fpath = {v.name: v for v in onnx_fpaths}
gpt2_onnx_fpath = hash_onnx_fpath[
GPT2ModelTRTConfig.NETWORK_DECODER_SEGMENT_NAME
].fpath
self.gpt2_engine = GPT2ONNXFile(gpt2_onnx_fpath, metadata).as_trt_engine(gpt2_onnx_fpath + ".engine")
tfm_config = GPT2Config(
use_cache=metadata.other.kv_cache,
)
self.gpt2_trt = GPT2TRTDecoder(self.gpt2_engine, metadata, tfm_config)
for ninput in network_input:
results.append(
self.execute_inference(
metadata, hash_onnx_fpath, ninput, timing_profile
)
)
finally:
self.cleanup(workspace, keep_trt_engine, keep_onnx_model, keep_torch_model)
return results
def add_args(self, parser) -> None:
# use the same args as frameworks.py
self.frameworks_cmd.add_args(parser)
polygraphy_group = parser.add_argument_group("polygraphy")
polygraphy_group.add_argument(
"--onnx-fpath",
default=None,
help="Path to GPT2 ONNX model. If None is supplied, scripts will generate them from HuggingFace.",
)
polygraphy_group.add_argument(
"--fp16", action="store_true", help="Enables fp16 TensorRT tactics."
)
polygraphy_group.add_argument(
"--save-trt-engine",
action="store_true",
help="Saves TensorRT runtime engine in working directory.",
)
def args_to_network_models(self, args) -> List[NetworkModel]:
gpt2_fpath_check = args.onnx_fpath is None
network_models = None
if gpt2_fpath_check:
network_models = tuple()
else:
onnx_decoder = NetworkModel(
name=GPT2ModelTRTConfig.NETWORK_DECODER_SEGMENT_NAME,
fpath=args.onnx_fpath,
)
network_models = (onnx_decoder)
return network_models
def args_to_network_metadata(self, args) -> NetworkMetadata:
frameworks_parsed_metadata = self.frameworks_cmd.args_to_network_metadata(args)
return NetworkMetadata(
variant=frameworks_parsed_metadata.variant,
precision=Precision(fp16=args.fp16),
other=frameworks_parsed_metadata.other,
)
RUN_CMD = GPT2Polygraphy()
if __name__ == "__main__":
result = RUN_CMD()
print("Results: {}".format(result))
| TensorRT-master | demo/HuggingFace/GPT2/trt.py |
# std
import os
import sys
import argparse
from typing import List
# huggingface
from transformers import (
GPT2LMHeadModel,
GPT2Tokenizer,
GPT2Config,
)
# Add syspath for custom library
if __name__ == "__main__":
filepath = os.path.dirname(os.path.abspath(__file__))
project_root = os.path.join(filepath, os.pardir)
sys.path.append(project_root)
# helpers
from NNDF.interface import FrameworkCommand
from NNDF.general_utils import confirm_folder_delete, NNFolderWorkspace
from NNDF.networks import (
NetworkResult,
NetworkMetadata,
NetworkRuntime,
Precision,
NetworkModel,
NetworkModels,
TimingProfile,
)
from GPT2.export import GPT2TorchFile
from GPT2.GPT2ModelConfig import GPT2ModelTRTConfig
from GPT2.measurements import gpt2_inference, full_inference_greedy
class GPT2HuggingFace(FrameworkCommand):
def __init__(self):
super().__init__(
GPT2ModelTRTConfig, description="Runs framework results for GPT2 model."
)
# Default inference input used during inference stage
self.onnx_gpt2 = None
self.torch_gpt2_dir = None
def generate_and_download_framework(
self, metadata: NetworkMetadata, workspace: NNFolderWorkspace
) -> NetworkModels:
cache_variant = False
if metadata.other.kv_cache:
cache_variant = True
trt_gpt2_config = self.config
metadata_serialized = trt_gpt2_config.get_metadata_string(metadata)
workspace_dir = workspace.get_path()
pytorch_model_dir = os.path.join(workspace_dir, metadata_serialized)
# We keep track of the generated torch location for cleanup later
self.torch_gpt2_dir = pytorch_model_dir
model = None
tfm_config = GPT2Config(use_cache=cache_variant)
if not os.path.exists(pytorch_model_dir):
# Generate the pre-trained weights
model = GPT2LMHeadModel(tfm_config).from_pretrained(metadata.variant)
model.save_pretrained(pytorch_model_dir)
print("Pytorch Model saved to {}".format(pytorch_model_dir))
else:
print(
"Frameworks file already exists, skipping generation and loading from file instead."
)
model = GPT2LMHeadModel(tfm_config).from_pretrained(pytorch_model_dir)
root_onnx_model_name = "{}.onnx".format(metadata_serialized)
root_onnx_model_fpath = os.path.join(
os.getcwd(), workspace_dir, root_onnx_model_name
)
onnx_model_fpath = root_onnx_model_fpath
gpt2 = GPT2TorchFile(model, metadata)
self.onnx_gpt2 = gpt2.as_onnx_model(onnx_model_fpath, force_overwrite=False)
onnx_models = [
NetworkModel(
name=GPT2ModelTRTConfig.NETWORK_DECODER_SEGMENT_NAME,
fpath=self.onnx_gpt2.fpath,
)
]
torch_models = [
NetworkModel(
name=GPT2ModelTRTConfig.NETWORK_DECODER_SEGMENT_NAME,
fpath=pytorch_model_dir,
)
]
return NetworkModels(torch=torch_models, onnx=onnx_models, trt=None)
def cleanup(
self,
workspace: NNFolderWorkspace,
save_onnx_model: bool = True,
keep_pytorch_model: bool = True,
) -> None:
"""
Cleans up the working directory and leaves models if available.
Should not assume any functions from the framework class has been called.
Returns:
None
"""
# Clean-up generated files
if not save_onnx_model and self.onnx_gpt2 is not None:
self.onnx_gpt2.cleanup()
# Remove any onnx external files by removing integer named values and weight files
workspace_path = workspace.get_path()
for d in os.listdir(workspace_path):
fpath = os.path.join(workspace_path, d)
if os.path.isfile(fpath) and os.path.splitext(d)[1] == ".weight":
os.remove(fpath)
elif d.isnumeric():
os.remove(fpath)
if not keep_pytorch_model:
# Using rmtree can be dangerous, have user confirm before deleting.
confirm_folder_delete(
self.torch_gpt2_dir,
prompt="Confirm you want to delete downloaded pytorch model folder?",
)
if not keep_pytorch_model and not save_onnx_model:
workspace.cleanup(force_remove=False)
def execute_inference(
self,
metadata: NetworkMetadata,
network_fpaths: NetworkModels,
inference_input: str,
timing_profile: TimingProfile,
) -> NetworkResult:
# Execute some tests
tokenizer = GPT2Tokenizer.from_pretrained(metadata.variant)
input_ids = tokenizer(inference_input, return_tensors="pt").input_ids
# By default, HuggingFace model structure is one giant file.
gpt2_torch_fpath = network_fpaths.torch[0].fpath
config = GPT2Config(use_cache=metadata.other.kv_cache)
gpt2_model = GPT2LMHeadModel(config).from_pretrained(gpt2_torch_fpath)
gpt2_torch = GPT2TorchFile.TorchModule(
gpt2_model.transformer, gpt2_model.lm_head, gpt2_model.config
)
greedy_output = gpt2_torch.generate(input_ids) #greedy search
# get single decoder iteration inference timing profile
_, decoder_e2e_median_time = gpt2_inference(
gpt2_torch, input_ids, timing_profile
)
# get complete decoder inference result and its timing profile
sample_output, full_e2e_median_runtime = full_inference_greedy(
gpt2_torch,
input_ids,
timing_profile,
max_length=GPT2ModelTRTConfig.MAX_SEQUENCE_LENGTH[metadata.variant],
)
semantic_outputs = []
for i, sample_output in enumerate(sample_output):
semantic_outputs.append(
tokenizer.decode(sample_output, skip_special_tokens=True)
)
return NetworkResult(
input=inference_input,
output_tensor=greedy_output,
semantic_output=semantic_outputs,
median_runtime=[
NetworkRuntime(
name=GPT2ModelTRTConfig.NETWORK_DECODER_SEGMENT_NAME,
runtime=decoder_e2e_median_time,
),
NetworkRuntime(
name=GPT2ModelTRTConfig.NETWORK_FULL_NAME,
runtime=full_e2e_median_runtime,
),
],
models=network_fpaths,
)
def run_framework(
self,
metadata: NetworkMetadata,
network_input: List[str],
working_directory: str,
keep_onnx_model: bool,
keep_pytorch_model: bool,
timing_profile: TimingProfile,
) -> List[NetworkResult]:
"""
Main entry point of our function which compiles and generates our model data.
"""
results = []
workspace = NNFolderWorkspace(
self.config.network_name, metadata, working_directory
)
try:
network_fpaths = self.generate_and_download_framework(metadata, workspace)
for ninput in network_input:
results.append(
self.execute_inference(
metadata, network_fpaths, ninput, timing_profile
)
)
finally:
self.cleanup(workspace, keep_onnx_model, keep_pytorch_model)
return results
def args_to_network_metadata(self, args: argparse.Namespace) -> NetworkMetadata:
return NetworkMetadata(
variant=args.variant,
precision=Precision(fp16=False),
other=self.config.MetadataClass(kv_cache=args.enable_kv_cache),
)
# Entry point
RUN_CMD = GPT2HuggingFace()
if __name__ == "__main__":
result = RUN_CMD()
print("Results: {}".format(result))
| TensorRT-master | demo/HuggingFace/GPT2/frameworks.py |
"""
Utils specific to GPT2 network.
"""
# torch
import torch
# TRT-HuggingFace
from NNDF.general_utils import measure_python_inference_code
from NNDF.torch_utils import use_cuda
@use_cuda
def gpt2_inference(gpt2, input_ids, timing_profile, use_cuda=True):
gpt2_stmt = lambda: gpt2(input_ids=input_ids)
gpt2_e2e_median_time = measure_python_inference_code(
gpt2_stmt, number=timing_profile.number, iterations=timing_profile.iterations
)
return (gpt2_stmt(), gpt2_e2e_median_time)
# Code specifically for Pythonic inference measurement used across all GPT2 related scripts
@use_cuda
def full_inference_greedy(gpt2, input_ids, timing_profile, max_length, use_cuda=True):
def _e2e():
return gpt2.generate(input_ids, max_length=max_length) # greedy search
full_e2e_median_time = measure_python_inference_code(
_e2e,
number=timing_profile.number,
iterations=timing_profile.iterations,
)
return (_e2e(), full_e2e_median_time)
| TensorRT-master | demo/HuggingFace/GPT2/measurements.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import torch
import torch.nn as nn
from tacotron2.loss_function import Tacotron2Loss
from waveglow.loss_function import WaveGlowLoss
def get_loss_function(loss_function, sigma=1.0):
if loss_function == 'Tacotron2':
loss = Tacotron2Loss()
elif loss_function == 'WaveGlow':
loss = WaveGlowLoss(sigma=sigma)
else:
raise NotImplementedError(
"unknown loss function requested: {}".format(loss_function))
loss.cuda()
return loss
| TensorRT-master | demo/Tacotron2/loss_functions.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
from os.path import abspath, dirname
# enabling modules discovery from global entrypoint
sys.path.append(abspath(dirname(__file__)+'/'))
from tacotron2.model import Tacotron2
from waveglow.model import WaveGlow
import torch
def parse_model_args(model_name, parser, add_help=False):
if model_name == 'Tacotron2':
from tacotron2.arg_parser import parse_tacotron2_args
return parse_tacotron2_args(parser, add_help)
if model_name == 'WaveGlow':
from waveglow.arg_parser import parse_waveglow_args
return parse_waveglow_args(parser, add_help)
else:
raise NotImplementedError(model_name)
def batchnorm_to_float(module):
"""Converts batch norm to FP32"""
if isinstance(module, torch.nn.modules.batchnorm._BatchNorm):
module.float()
for child in module.children():
batchnorm_to_float(child)
return module
def init_bn(module):
if isinstance(module, torch.nn.modules.batchnorm._BatchNorm):
if module.affine:
module.weight.data.uniform_()
for child in module.children():
init_bn(child)
def get_model(model_name, model_config, to_cuda,
uniform_initialize_bn_weight=False, forward_is_infer=False):
""" Code chooses a model based on name"""
model = None
if model_name == 'Tacotron2':
if forward_is_infer:
class Tacotron2__forward_is_infer(Tacotron2):
def forward(self, inputs, input_lengths):
return self.infer(inputs, input_lengths)
model = Tacotron2__forward_is_infer(**model_config)
else:
model = Tacotron2(**model_config)
elif model_name == 'WaveGlow':
if forward_is_infer:
class WaveGlow__forward_is_infer(WaveGlow):
def forward(self, spect, sigma=1.0):
return self.infer(spect, sigma)
model = WaveGlow__forward_is_infer(**model_config)
else:
model = WaveGlow(**model_config)
else:
raise NotImplementedError(model_name)
if uniform_initialize_bn_weight:
init_bn(model)
if to_cuda:
model = model.cuda()
return model
def get_model_config(model_name, args):
""" Code chooses a model based on name"""
if model_name == 'Tacotron2':
model_config = dict(
# optimization
mask_padding=args.mask_padding,
# audio
n_mel_channels=args.n_mel_channels,
# symbols
n_symbols=args.n_symbols,
symbols_embedding_dim=args.symbols_embedding_dim,
# encoder
encoder_kernel_size=args.encoder_kernel_size,
encoder_n_convolutions=args.encoder_n_convolutions,
encoder_embedding_dim=args.encoder_embedding_dim,
# attention
attention_rnn_dim=args.attention_rnn_dim,
attention_dim=args.attention_dim,
# attention location
attention_location_n_filters=args.attention_location_n_filters,
attention_location_kernel_size=args.attention_location_kernel_size,
# decoder
n_frames_per_step=args.n_frames_per_step,
decoder_rnn_dim=args.decoder_rnn_dim,
prenet_dim=args.prenet_dim,
max_decoder_steps=args.max_decoder_steps,
gate_threshold=args.gate_threshold,
p_attention_dropout=args.p_attention_dropout,
p_decoder_dropout=args.p_decoder_dropout,
# postnet
postnet_embedding_dim=args.postnet_embedding_dim,
postnet_kernel_size=args.postnet_kernel_size,
postnet_n_convolutions=args.postnet_n_convolutions,
decoder_no_early_stopping=args.decoder_no_early_stopping
)
return model_config
elif model_name == 'WaveGlow':
model_config = dict(
n_mel_channels=args.n_mel_channels,
n_flows=args.flows,
n_group=args.groups,
n_early_every=args.early_every,
n_early_size=args.early_size,
WN_config=dict(
n_layers=args.wn_layers,
kernel_size=args.wn_kernel_size,
n_channels=args.wn_channels
)
)
return model_config
else:
raise NotImplementedError(model_name)
| TensorRT-master | demo/Tacotron2/models.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from tacotron2.text import text_to_sequence
import models
import torch
import argparse
import numpy as np
from scipy.io.wavfile import write
import sys
from inference import checkpoint_from_distributed, unwrap_distributed, MeasureTime, prepare_input_sequence, load_and_setup_model
import time
import dllogger as DLLogger
from dllogger import StdOutBackend, JSONStreamBackend, Verbosity
from apex import amp
from waveglow.denoiser import Denoiser
def parse_args(parser):
"""
Parse commandline arguments.
"""
parser.add_argument('--tacotron2', type=str,
help='Full path to the Tacotron2 model checkpoint file')
parser.add_argument('--waveglow', type=str,
help='Full path to the WaveGlow model checkpoint file')
parser.add_argument('-s', '--sigma-infer', default=0.6, type=float,
help='Standard deviation of the Gaussian distribution')
parser.add_argument('-d', '--denoising-strength', default=0.01, type=float,
help='Denoising strength for removing model bias')
parser.add_argument('-sr', '--sampling-rate', default=22050, type=int,
help='Sampling rate')
run_mode = parser.add_mutually_exclusive_group()
run_mode.add_argument('--fp16', action='store_true',
help='Run inference with FP16')
run_mode.add_argument('--cpu', action='store_true',
help='Run inference on CPU')
parser.add_argument('--log-file', type=str, default='nvlog.json',
help='Filename for logging')
parser.add_argument('--stft-hop-length', type=int, default=256,
help='STFT hop length for estimating audio length from mel size')
parser.add_argument('--num-iters', type=int, default=10,
help='Number of iterations')
parser.add_argument('-il', '--input-length', type=int, default=64,
help='Input length')
parser.add_argument('-bs', '--batch-size', type=int, default=1,
help='Batch size')
return parser
def print_stats(measurements_all):
throughput = measurements_all['throughput']
preprocessing = measurements_all['pre_processing']
type_conversion = measurements_all['type_conversion']
storage = measurements_all['storage']
data_transfer = measurements_all['data_transfer']
postprocessing = [sum(p) for p in zip(type_conversion,storage,data_transfer)]
latency = measurements_all['latency']
waveglow_latency = measurements_all['waveglow_latency']
tacotron2_latency = measurements_all['tacotron2_latency']
denoiser_latency = measurements_all['denoiser_latency']
num_mels_per_audio = measurements_all['num_mels_per_audio']
latency.sort()
cf_50 = max(latency[:int(len(latency)*0.50)])
cf_90 = max(latency[:int(len(latency)*0.90)])
cf_95 = max(latency[:int(len(latency)*0.95)])
cf_99 = max(latency[:int(len(latency)*0.99)])
cf_100 = max(latency[:int(len(latency)*1.0)])
print("Throughput average (samples/sec) = {:.0f}".format(np.mean(throughput)))
print("Preprocessing average (seconds) = {:.4f}".format(np.mean(preprocessing)))
print("Postprocessing average (seconds) = {:.4f}".format(np.mean(postprocessing)))
print("Number of mels per audio average = {:.0f}".format(np.mean(num_mels_per_audio)))
print("Tacotron2 latency average (seconds) = {:.2f}".format(np.mean(tacotron2_latency)))
print("WaveGlow latency average (seconds) = {:.2f}".format(np.mean(waveglow_latency)))
print("Denoiser latency average (seconds) = {:.4f}".format(np.mean(denoiser_latency)))
print("Latency average (seconds) = {:.2f}".format(np.mean(latency)))
print("Latency std (seconds) = {:.2f}".format(np.std(latency)))
print("Latency cl 50 (seconds) = {:.2f}".format(cf_50))
print("Latency cl 90 (seconds) = {:.2f}".format(cf_90))
print("Latency cl 95 (seconds) = {:.2f}".format(cf_95))
print("Latency cl 99 (seconds) = {:.2f}".format(cf_99))
print("Latency cl 100 (seconds) = {:.2f}".format(cf_100))
def main():
"""
Launches text to speech (inference).
Inference is executed on a single GPU or CPU.
"""
parser = argparse.ArgumentParser(
description='PyTorch Tacotron 2 Inference')
parser = parse_args(parser)
args, unknown_args = parser.parse_known_args()
DLLogger.init(backends=[JSONStreamBackend(Verbosity.DEFAULT, args.log_file),
StdOutBackend(Verbosity.VERBOSE)])
for k,v in vars(args).items():
DLLogger.log(step="PARAMETER", data={k:v})
DLLogger.log(step="PARAMETER", data={'model_name':'Tacotron2_PyT'})
measurements_all = {"pre_processing": [],
"tacotron2_latency": [],
"waveglow_latency": [],
"denoiser_latency": [],
"latency": [],
"type_conversion": [],
"data_transfer": [],
"storage": [],
"tacotron2_items_per_sec": [],
"waveglow_items_per_sec": [],
"num_mels_per_audio": [],
"throughput": []}
print("args:", args, unknown_args)
tacotron2 = load_and_setup_model('Tacotron2', parser, args.tacotron2,
args.fp16, args.cpu, forward_is_infer=True)
waveglow = load_and_setup_model('WaveGlow', parser, args.waveglow,
args.fp16, args.cpu, forward_is_infer=True)
denoiser = Denoiser(waveglow)
if not args.cpu:
denoiser.cuda()
texts = ["The forms of printed letters should be beautiful, and that their arrangement on the page should be reasonable and a help to the shapeliness of the letters themselves. The forms of printed letters should be beautiful, and that their arrangement on the page should be reasonable and a help to the shapeliness of the letters themselves."]
texts = [texts[0][:args.input_length]]
texts = texts*args.batch_size
warmup_iters = 3
for iter in range(args.num_iters):
measurements = {}
with MeasureTime(measurements, "pre_processing", args.cpu):
sequences_padded, input_lengths = prepare_input_sequence(texts, args.cpu)
with torch.no_grad():
with MeasureTime(measurements, "latency", args.cpu):
with MeasureTime(measurements, "tacotron2_latency", args.cpu):
mel, mel_lengths, _ = tacotron2.infer(sequences_padded, input_lengths)
with MeasureTime(measurements, "waveglow_latency", args.cpu):
audios = waveglow.infer(mel, sigma=args.sigma_infer)
num_mels = mel.size(0)*mel.size(2)
num_samples = audios.size(0)*audios.size(1)
with MeasureTime(measurements, "type_conversion", args.cpu):
audios = audios.float()
with torch.no_grad(), MeasureTime(measurements, "denoiser_latency", args.cpu):
audios = denoiser(audios, strength=args.denoising_strength).squeeze(1)
with MeasureTime(measurements, "data_transfer", args.cpu):
audios = audios.cpu()
with MeasureTime(measurements, "storage", args.cpu):
audios = audios.numpy()
for i, audio in enumerate(audios):
audio_path = "audio_"+str(i)+".wav"
write(audio_path, args.sampling_rate,
audio[:mel_lengths[i]*args.stft_hop_length])
measurements['tacotron2_items_per_sec'] = num_mels/measurements['tacotron2_latency']
measurements['waveglow_items_per_sec'] = num_samples/measurements['waveglow_latency']
measurements['num_mels_per_audio'] = mel.size(2)
measurements['throughput'] = num_samples/measurements['latency']
if iter >= warmup_iters:
for k,v in measurements.items():
measurements_all[k].append(v)
DLLogger.log(step=(iter-warmup_iters), data={k: v})
DLLogger.flush()
print_stats(measurements_all)
if __name__ == '__main__':
main()
| TensorRT-master | demo/Tacotron2/test_infer.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import models
import torch
import argparse
import numpy as np
import json
import time
from inference import checkpoint_from_distributed, unwrap_distributed, load_and_setup_model, MeasureTime
import dllogger as DLLogger
from dllogger import StdOutBackend, JSONStreamBackend, Verbosity
from apex import amp
def parse_args(parser):
"""
Parse commandline arguments.
"""
parser.add_argument('-m', '--model-name', type=str, default='', required=True,
help='Model to train')
parser.add_argument('-sr', '--sampling-rate', default=22050, type=int,
help='Sampling rate')
parser.add_argument('--amp-run', action='store_true',
help='Inference with Automatic Mixed Precision')
parser.add_argument('-bs', '--batch-size', type=int, default=1,
help='Batch size')
parser.add_argument('-o', '--output', type=str, required=True,
help='Directory to save results')
parser.add_argument('--log-file', type=str, default='nvlog.json',
help='Filename for logging')
return parser
def main():
"""
Launches inference benchmark.
Inference is executed on a single GPU.
"""
parser = argparse.ArgumentParser(
description='PyTorch Tacotron 2 Inference')
parser = parse_args(parser)
args, _ = parser.parse_known_args()
log_file = args.log_file
DLLogger.init(backends=[JSONStreamBackend(Verbosity.DEFAULT,
args.output+'/'+args.log_file),
StdOutBackend(Verbosity.VERBOSE)])
for k,v in vars(args).items():
DLLogger.log(step="PARAMETER", data={k:v})
DLLogger.log(step="PARAMETER", data={'model_name':'Tacotron2_PyT'})
model = load_and_setup_model(args.model_name, parser, None, args.amp_run,
forward_is_infer=True)
if args.model_name == "Tacotron2":
model = torch.jit.script(model)
warmup_iters = 3
num_iters = 1+warmup_iters
for i in range(num_iters):
measurements = {}
if args.model_name == 'Tacotron2':
text_padded = torch.randint(low=0, high=148, size=(args.batch_size, 140),
dtype=torch.long).cuda()
input_lengths = torch.IntTensor([text_padded.size(1)]*args.batch_size).cuda().long()
with torch.no_grad(), MeasureTime(measurements, "inference_time"):
mels, _, _ = model(text_padded, input_lengths)
num_items = mels.size(0)*mels.size(2)
if args.model_name == 'WaveGlow':
n_mel_channels = model.upsample.in_channels
num_mels = 895
mel_padded = torch.zeros(args.batch_size, n_mel_channels,
num_mels).normal_(-5.62, 1.98).cuda()
if args.amp_run:
mel_padded = mel_padded.half()
with torch.no_grad(), MeasureTime(measurements, "inference_time"):
audios = model(mel_padded)
audios = audios.float()
num_items = audios.size(0)*audios.size(1)
if i >= warmup_iters:
DLLogger.log(step=(i-warmup_iters,), data={"latency": measurements['inference_time']})
DLLogger.log(step=(i-warmup_iters,), data={"items_per_sec": num_items/measurements['inference_time']})
DLLogger.log(step=tuple(),
data={'infer_latency': measurements['inference_time']})
DLLogger.log(step=tuple(),
data={'infer_items_per_sec': num_items/measurements['inference_time']})
DLLogger.flush()
if __name__ == '__main__':
main()
| TensorRT-master | demo/Tacotron2/inference_perf.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import torch
from tacotron2.data_function import TextMelCollate
from tacotron2.data_function import TextMelLoader
from waveglow.data_function import MelAudioLoader
from tacotron2.data_function import batch_to_gpu as batch_to_gpu_tacotron2
from waveglow.data_function import batch_to_gpu as batch_to_gpu_waveglow
def get_collate_function(model_name, n_frames_per_step):
if model_name == 'Tacotron2':
collate_fn = TextMelCollate(n_frames_per_step)
elif model_name == 'WaveGlow':
collate_fn = torch.utils.data.dataloader.default_collate
else:
raise NotImplementedError(
"unknown collate function requested: {}".format(model_name))
return collate_fn
def get_data_loader(model_name, dataset_path, audiopaths_and_text, args):
if model_name == 'Tacotron2':
data_loader = TextMelLoader(dataset_path, audiopaths_and_text, args)
elif model_name == 'WaveGlow':
data_loader = MelAudioLoader(dataset_path, audiopaths_and_text, args)
else:
raise NotImplementedError(
"unknown data loader requested: {}".format(model_name))
return data_loader
def get_batch_to_gpu(model_name):
if model_name == 'Tacotron2':
batch_to_gpu = batch_to_gpu_tacotron2
elif model_name == 'WaveGlow':
batch_to_gpu = batch_to_gpu_waveglow
else:
raise NotImplementedError(
"unknown batch_to_gpu requested: {}".format(model_name))
return batch_to_gpu
| TensorRT-master | demo/Tacotron2/data_functions.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import torch
from tacotron2.data_function import TextMelLoader
from common.utils import load_filepaths_and_text
def parse_args(parser):
"""
Parse commandline arguments.
"""
parser.add_argument('-d', '--dataset-path', type=str,
default='./', help='Path to dataset')
parser.add_argument('--wav-files', required=True,
type=str, help='Path to filelist with audio paths and text')
parser.add_argument('--mel-files', required=True,
type=str, help='Path to filelist with mel paths and text')
parser.add_argument('--text-cleaners', nargs='*',
default=['english_cleaners'], type=str,
help='Type of text cleaners for input text')
parser.add_argument('--max-wav-value', default=32768.0, type=float,
help='Maximum audiowave value')
parser.add_argument('--sampling-rate', default=22050, type=int,
help='Sampling rate')
parser.add_argument('--filter-length', default=1024, type=int,
help='Filter length')
parser.add_argument('--hop-length', default=256, type=int,
help='Hop (stride) length')
parser.add_argument('--win-length', default=1024, type=int,
help='Window length')
parser.add_argument('--mel-fmin', default=0.0, type=float,
help='Minimum mel frequency')
parser.add_argument('--mel-fmax', default=8000.0, type=float,
help='Maximum mel frequency')
parser.add_argument('--n-mel-channels', default=80, type=int,
help='Number of bins in mel-spectrograms')
return parser
def audio2mel(dataset_path, audiopaths_and_text, melpaths_and_text, args):
melpaths_and_text_list = load_filepaths_and_text(dataset_path, melpaths_and_text)
audiopaths_and_text_list = load_filepaths_and_text(dataset_path, audiopaths_and_text)
data_loader = TextMelLoader(dataset_path, audiopaths_and_text, args)
for i in range(len(melpaths_and_text_list)):
if i%100 == 0:
print("done", i, "/", len(melpaths_and_text_list))
mel = data_loader.get_mel(audiopaths_and_text_list[i][0])
torch.save(mel, melpaths_and_text_list[i][0])
def main():
parser = argparse.ArgumentParser(description='PyTorch Tacotron 2 Training')
parser = parse_args(parser)
args = parser.parse_args()
args.load_mel_from_disk = False
audio2mel(args.dataset_path, args.wav_files, args.mel_files, args)
if __name__ == '__main__':
main()
| TensorRT-master | demo/Tacotron2/preprocess_audio2mel.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import time
import argparse
import numpy as np
from contextlib import contextmanager
import torch
from torch.utils.data import DataLoader
from torch.autograd import Variable
from torch.nn.parameter import Parameter
import torch.distributed as dist
from torch.utils.data.distributed import DistributedSampler
from apex.parallel import DistributedDataParallel as DDP
import models
import loss_functions
import data_functions
import dllogger as DLLogger
from dllogger import StdOutBackend, JSONStreamBackend, Verbosity
from scipy.io.wavfile import write as write_wav
from apex import amp
amp.lists.functional_overrides.FP32_FUNCS.remove('softmax')
amp.lists.functional_overrides.FP16_FUNCS.append('softmax')
def parse_args(parser):
"""
Parse commandline arguments.
"""
parser.add_argument('-o', '--output', type=str, required=True,
help='Directory to save checkpoints')
parser.add_argument('-d', '--dataset-path', type=str,
default='./', help='Path to dataset')
parser.add_argument('-m', '--model-name', type=str, default='', required=True,
help='Model to train')
parser.add_argument('--log-file', type=str, default='nvlog.json',
help='Filename for logging')
parser.add_argument('--anneal-steps', nargs='*',
help='Epochs after which decrease learning rate')
parser.add_argument('--anneal-factor', type=float, choices=[0.1, 0.3], default=0.1,
help='Factor for annealing learning rate')
# training
training = parser.add_argument_group('training setup')
training.add_argument('--epochs', type=int, required=True,
help='Number of total epochs to run')
training.add_argument('--epochs-per-checkpoint', type=int, default=50,
help='Number of epochs per checkpoint')
training.add_argument('--checkpoint-path', type=str, default='',
help='Checkpoint path to resume training')
training.add_argument('--resume-from-last', action='store_true',
help='Resumes training from the last checkpoint; uses the directory provided with \'--output\' option to search for the checkpoint \"checkpoint_<model_name>_last.pt\"')
training.add_argument('--dynamic-loss-scaling', type=bool, default=True,
help='Enable dynamic loss scaling')
training.add_argument('--amp', action='store_true',
help='Enable AMP')
training.add_argument('--cudnn-enabled', action='store_true',
help='Enable cudnn')
training.add_argument('--cudnn-benchmark', action='store_true',
help='Run cudnn benchmark')
training.add_argument('--disable-uniform-initialize-bn-weight', action='store_true',
help='disable uniform initialization of batchnorm layer weight')
optimization = parser.add_argument_group('optimization setup')
optimization.add_argument(
'--use-saved-learning-rate', default=False, type=bool)
optimization.add_argument('-lr', '--learning-rate', type=float, required=True,
help='Learing rate')
optimization.add_argument('--weight-decay', default=1e-6, type=float,
help='Weight decay')
optimization.add_argument('--grad-clip-thresh', default=1.0, type=float,
help='Clip threshold for gradients')
optimization.add_argument('-bs', '--batch-size', type=int, required=True,
help='Batch size per GPU')
optimization.add_argument('--grad-clip', default=5.0, type=float,
help='Enables gradient clipping and sets maximum gradient norm value')
# dataset parameters
dataset = parser.add_argument_group('dataset parameters')
dataset.add_argument('--load-mel-from-disk', action='store_true',
help='Loads mel spectrograms from disk instead of computing them on the fly')
dataset.add_argument('--training-files',
default='filelists/ljs_audio_text_train_filelist.txt',
type=str, help='Path to training filelist')
dataset.add_argument('--validation-files',
default='filelists/ljs_audio_text_val_filelist.txt',
type=str, help='Path to validation filelist')
dataset.add_argument('--text-cleaners', nargs='*',
default=['english_cleaners'], type=str,
help='Type of text cleaners for input text')
# audio parameters
audio = parser.add_argument_group('audio parameters')
audio.add_argument('--max-wav-value', default=32768.0, type=float,
help='Maximum audiowave value')
audio.add_argument('--sampling-rate', default=22050, type=int,
help='Sampling rate')
audio.add_argument('--filter-length', default=1024, type=int,
help='Filter length')
audio.add_argument('--hop-length', default=256, type=int,
help='Hop (stride) length')
audio.add_argument('--win-length', default=1024, type=int,
help='Window length')
audio.add_argument('--mel-fmin', default=0.0, type=float,
help='Minimum mel frequency')
audio.add_argument('--mel-fmax', default=8000.0, type=float,
help='Maximum mel frequency')
distributed = parser.add_argument_group('distributed setup')
# distributed.add_argument('--distributed-run', default=True, type=bool,
# help='enable distributed run')
distributed.add_argument('--rank', default=0, type=int,
help='Rank of the process, do not set! Done by multiproc module')
distributed.add_argument('--world-size', default=1, type=int,
help='Number of processes, do not set! Done by multiproc module')
distributed.add_argument('--dist-url', type=str, default='tcp://localhost:23456',
help='Url used to set up distributed training')
distributed.add_argument('--group-name', type=str, default='group_name',
required=False, help='Distributed group name')
distributed.add_argument('--dist-backend', default='nccl', type=str, choices={'nccl'},
help='Distributed run backend')
benchmark = parser.add_argument_group('benchmark')
benchmark.add_argument('--bench-class', type=str, default='')
return parser
def reduce_tensor(tensor, num_gpus):
rt = tensor.clone()
dist.all_reduce(rt, op=dist.reduce_op.SUM)
rt /= num_gpus
return rt
def init_distributed(args, world_size, rank, group_name):
assert torch.cuda.is_available(), "Distributed mode requires CUDA."
print("Initializing Distributed")
# Set cuda device so everything is done on the right GPU.
torch.cuda.set_device(rank % torch.cuda.device_count())
# Initialize distributed communication
dist.init_process_group(
backend=args.dist_backend, init_method=args.dist_url,
world_size=world_size, rank=rank, group_name=group_name)
print("Done initializing distributed")
def save_checkpoint(model, optimizer, epoch, config, amp_run, output_dir, model_name,
local_rank, world_size):
random_rng_state = torch.random.get_rng_state().cuda()
cuda_rng_state = torch.cuda.get_rng_state(local_rank).cuda()
random_rng_states_all = [torch.empty_like(random_rng_state) for _ in range(world_size)]
cuda_rng_states_all = [torch.empty_like(cuda_rng_state) for _ in range(world_size)]
if world_size > 1:
dist.all_gather(random_rng_states_all, random_rng_state)
dist.all_gather(cuda_rng_states_all, cuda_rng_state)
else:
random_rng_states_all = [random_rng_state]
cuda_rng_states_all = [cuda_rng_state]
random_rng_states_all = torch.stack(random_rng_states_all).cpu()
cuda_rng_states_all = torch.stack(cuda_rng_states_all).cpu()
if local_rank == 0:
checkpoint = {'epoch': epoch,
'cuda_rng_state_all': cuda_rng_states_all,
'random_rng_states_all': random_rng_states_all,
'config': config,
'state_dict': model.state_dict(),
'optimizer': optimizer.state_dict()}
if amp_run:
checkpoint['amp'] = amp.state_dict()
checkpoint_filename = "checkpoint_{}_{}.pt".format(model_name, epoch)
checkpoint_path = os.path.join(
output_dir, checkpoint_filename)
print("Saving model and optimizer state at epoch {} to {}".format(
epoch, checkpoint_path))
torch.save(checkpoint, checkpoint_path)
symlink_src = checkpoint_filename
symlink_dst = os.path.join(
output_dir, "checkpoint_{}_last.pt".format(model_name))
if os.path.exists(symlink_dst) and os.path.islink(symlink_dst):
print("|||| Updating symlink", symlink_dst, "to point to", symlink_src)
os.remove(symlink_dst)
os.symlink(symlink_src, symlink_dst)
def get_last_checkpoint_filename(output_dir, model_name):
symlink = os.path.join(output_dir, "checkpoint_{}_last.pt".format(model_name))
if os.path.exists(symlink):
print("|||| Loading checkpoint from symlink", symlink)
return os.path.join(output_dir, os.readlink(symlink))
else:
print("|||| No last checkpoint available - starting from epoch 0 ")
return ""
def load_checkpoint(model, optimizer, epoch, config, amp_run, filepath, local_rank):
checkpoint = torch.load(filepath, map_location='cpu')
epoch[0] = checkpoint['epoch']+1
device_id = local_rank % torch.cuda.device_count()
torch.cuda.set_rng_state(checkpoint['cuda_rng_state_all'][device_id])
torch.random.set_rng_state(checkpoint['random_rng_states_all'][device_id])
config = checkpoint['config']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
if amp_run:
amp.load_state_dict(checkpoint['amp'])
# adapted from: https://discuss.pytorch.org/t/opinion-eval-should-be-a-context-manager/18998/3
# Following snippet is licensed under MIT license
@contextmanager
def evaluating(model):
'''Temporarily switch to evaluation mode.'''
istrain = model.training
try:
model.eval()
yield model
finally:
if istrain:
model.train()
def validate(model, criterion, valset, epoch, batch_iter, batch_size,
world_size, collate_fn, distributed_run, rank, batch_to_gpu):
"""Handles all the validation scoring and printing"""
with evaluating(model), torch.no_grad():
val_sampler = DistributedSampler(valset) if distributed_run else None
val_loader = DataLoader(valset, num_workers=1, shuffle=False,
sampler=val_sampler,
batch_size=batch_size, pin_memory=False,
collate_fn=collate_fn)
val_loss = 0.0
num_iters = 0
val_items_per_sec = 0.0
for i, batch in enumerate(val_loader):
torch.cuda.synchronize()
iter_start_time = time.perf_counter()
x, y, num_items = batch_to_gpu(batch)
y_pred = model(x)
loss = criterion(y_pred, y)
if distributed_run:
reduced_val_loss = reduce_tensor(loss.data, world_size).item()
reduced_num_items = reduce_tensor(num_items.data, 1).item()
else: #
reduced_val_loss = loss.item()
reduced_num_items = num_items.item()
val_loss += reduced_val_loss
torch.cuda.synchronize()
iter_stop_time = time.perf_counter()
iter_time = iter_stop_time - iter_start_time
items_per_sec = reduced_num_items/iter_time
DLLogger.log(step=(epoch, batch_iter, i), data={'val_items_per_sec': items_per_sec})
val_items_per_sec += items_per_sec
num_iters += 1
val_loss = val_loss/(i + 1)
DLLogger.log(step=(epoch,), data={'val_loss': val_loss})
DLLogger.log(step=(epoch,), data={'val_items_per_sec':
(val_items_per_sec/num_iters if num_iters > 0 else 0.0)})
return val_loss
def adjust_learning_rate(iteration, epoch, optimizer, learning_rate,
anneal_steps, anneal_factor, rank):
p = 0
if anneal_steps is not None:
for i, a_step in enumerate(anneal_steps):
if epoch >= int(a_step):
p = p+1
if anneal_factor == 0.3:
lr = learning_rate*((0.1 ** (p//2))*(1.0 if p % 2 == 0 else 0.3))
else:
lr = learning_rate*(anneal_factor ** p)
if optimizer.param_groups[0]['lr'] != lr:
DLLogger.log(step=(epoch, iteration), data={'learning_rate changed': str(optimizer.param_groups[0]['lr'])+" -> "+str(lr)})
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def main():
parser = argparse.ArgumentParser(description='PyTorch Tacotron 2 Training')
parser = parse_args(parser)
args, _ = parser.parse_known_args()
if 'LOCAL_RANK' in os.environ and 'WORLD_SIZE' in os.environ:
local_rank = int(os.environ['LOCAL_RANK'])
world_size = int(os.environ['WORLD_SIZE'])
else:
local_rank = args.rank
world_size = args.world_size
distributed_run = world_size > 1
if local_rank == 0:
DLLogger.init(backends=[JSONStreamBackend(Verbosity.DEFAULT,
args.output+'/'+args.log_file),
StdOutBackend(Verbosity.VERBOSE)])
else:
DLLogger.init(backends=[])
for k,v in vars(args).items():
DLLogger.log(step="PARAMETER", data={k:v})
DLLogger.log(step="PARAMETER", data={'model_name':'Tacotron2_PyT'})
model_name = args.model_name
parser = models.parse_model_args(model_name, parser)
args, _ = parser.parse_known_args()
torch.backends.cudnn.enabled = args.cudnn_enabled
torch.backends.cudnn.benchmark = args.cudnn_benchmark
if distributed_run:
init_distributed(args, world_size, local_rank, args.group_name)
torch.cuda.synchronize()
run_start_time = time.perf_counter()
model_config = models.get_model_config(model_name, args)
model = models.get_model(model_name, model_config,
to_cuda=True,
uniform_initialize_bn_weight=not args.disable_uniform_initialize_bn_weight)
if not args.amp and distributed_run:
model = DDP(model)
optimizer = torch.optim.Adam(model.parameters(), lr=args.learning_rate,
weight_decay=args.weight_decay)
if args.amp:
model, optimizer = amp.initialize(model, optimizer, opt_level="O1")
if distributed_run:
model = DDP(model)
try:
sigma = args.sigma
except AttributeError:
sigma = None
start_epoch = [0]
if args.resume_from_last:
args.checkpoint_path = get_last_checkpoint_filename(args.output, model_name)
if args.checkpoint_path is not "":
load_checkpoint(model, optimizer, start_epoch, model_config,
args.amp, args.checkpoint_path, local_rank)
start_epoch = start_epoch[0]
criterion = loss_functions.get_loss_function(model_name, sigma)
try:
n_frames_per_step = args.n_frames_per_step
except AttributeError:
n_frames_per_step = None
collate_fn = data_functions.get_collate_function(
model_name, n_frames_per_step)
trainset = data_functions.get_data_loader(
model_name, args.dataset_path, args.training_files, args)
if distributed_run:
train_sampler = DistributedSampler(trainset)
shuffle = False
else:
train_sampler = None
shuffle = True
train_loader = DataLoader(trainset, num_workers=1, shuffle=shuffle,
sampler=train_sampler,
batch_size=args.batch_size, pin_memory=False,
drop_last=True, collate_fn=collate_fn)
valset = data_functions.get_data_loader(
model_name, args.dataset_path, args.validation_files, args)
batch_to_gpu = data_functions.get_batch_to_gpu(model_name)
iteration = 0
train_epoch_items_per_sec = 0.0
val_loss = 0.0
num_iters = 0
model.train()
for epoch in range(start_epoch, args.epochs):
torch.cuda.synchronize()
epoch_start_time = time.perf_counter()
# used to calculate avg items/sec over epoch
reduced_num_items_epoch = 0
train_epoch_items_per_sec = 0.0
num_iters = 0
reduced_loss = 0
# if overflow at the last iteration then do not save checkpoint
overflow = False
if distributed_run:
train_loader.sampler.set_epoch(epoch)
for i, batch in enumerate(train_loader):
torch.cuda.synchronize()
iter_start_time = time.perf_counter()
DLLogger.log(step=(epoch, i),
data={'glob_iter/iters_per_epoch': str(iteration)+"/"+str(len(train_loader))})
adjust_learning_rate(iteration, epoch, optimizer, args.learning_rate,
args.anneal_steps, args.anneal_factor, local_rank)
model.zero_grad()
x, y, num_items = batch_to_gpu(batch)
y_pred = model(x)
loss = criterion(y_pred, y)
if distributed_run:
reduced_loss = reduce_tensor(loss.data, world_size).item()
reduced_num_items = reduce_tensor(num_items.data, 1).item()
else:
reduced_loss = loss.item()
reduced_num_items = num_items.item()
if np.isnan(reduced_loss):
raise Exception("loss is NaN")
DLLogger.log(step=(epoch,i), data={'train_loss': reduced_loss})
num_iters += 1
# accumulate number of items processed in this epoch
reduced_num_items_epoch += reduced_num_items
if args.amp:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
grad_norm = torch.nn.utils.clip_grad_norm_(
amp.master_params(optimizer), args.grad_clip_thresh)
else:
loss.backward()
grad_norm = torch.nn.utils.clip_grad_norm_(
model.parameters(), args.grad_clip_thresh)
optimizer.step()
torch.cuda.synchronize()
iter_stop_time = time.perf_counter()
iter_time = iter_stop_time - iter_start_time
items_per_sec = reduced_num_items/iter_time
train_epoch_items_per_sec += items_per_sec
DLLogger.log(step=(epoch, i), data={'train_items_per_sec': items_per_sec})
DLLogger.log(step=(epoch, i), data={'train_iter_time': iter_time})
iteration += 1
torch.cuda.synchronize()
epoch_stop_time = time.perf_counter()
epoch_time = epoch_stop_time - epoch_start_time
DLLogger.log(step=(epoch,), data={'train_items_per_sec':
(train_epoch_items_per_sec/num_iters if num_iters > 0 else 0.0)})
DLLogger.log(step=(epoch,), data={'train_loss': reduced_loss})
DLLogger.log(step=(epoch,), data={'train_epoch_time': epoch_time})
val_loss = validate(model, criterion, valset, epoch, iteration,
args.batch_size, world_size, collate_fn,
distributed_run, local_rank, batch_to_gpu)
if (epoch % args.epochs_per_checkpoint == 0) and args.bench_class == "":
save_checkpoint(model, optimizer, epoch, model_config,
args.amp, args.output, args.model_name,
local_rank, world_size)
if local_rank == 0:
DLLogger.flush()
torch.cuda.synchronize()
run_stop_time = time.perf_counter()
run_time = run_stop_time - run_start_time
DLLogger.log(step=tuple(), data={'run_time': run_time})
DLLogger.log(step=tuple(), data={'val_loss': val_loss})
DLLogger.log(step=tuple(), data={'train_items_per_sec':
(train_epoch_items_per_sec/num_iters if num_iters > 0 else 0.0)})
if local_rank == 0:
DLLogger.flush()
if __name__ == '__main__':
main()
| TensorRT-master | demo/Tacotron2/train.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from tacotron2.text import text_to_sequence
import models
import torch
import argparse
import numpy as np
from scipy.io.wavfile import write
import matplotlib
import matplotlib.pyplot as plt
import sys
import time
import dllogger as DLLogger
from dllogger import StdOutBackend, JSONStreamBackend, Verbosity
from waveglow.denoiser import Denoiser
def parse_args(parser):
"""
Parse commandline arguments.
"""
parser.add_argument('-i', '--input', type=str, required=True,
help='Full path to the input text (phareses separated by new line)')
parser.add_argument('-o', '--output', required=True,
help='Output folder to save audio (file per phrase)')
parser.add_argument('--suffix', type=str, default="",
help="Output filename suffix")
parser.add_argument('--tacotron2', type=str,
help='Full path to the Tacotron2 model checkpoint file')
parser.add_argument('--waveglow', type=str,
help='Full path to the WaveGlow model checkpoint file')
parser.add_argument('-s', '--sigma-infer', default=0.9, type=float,
help='Standard deviation of the Gaussian distribution')
parser.add_argument('-d', '--denoising-strength', default=0.01, type=float,
help='Denoising strength for removing model bias')
parser.add_argument('-sr', '--sampling-rate', default=22050, type=int,
help='Sampling rate')
run_mode = parser.add_mutually_exclusive_group()
run_mode.add_argument('--fp16', action='store_true',
help='Run inference with mixed precision')
run_mode.add_argument('--cpu', action='store_true',
help='Run inference on CPU')
parser.add_argument('--log-file', type=str, default='nvlog.json',
help='Filename for logging')
parser.add_argument('--include-warmup', action='store_true',
help='Include warmup')
parser.add_argument('--stft-hop-length', type=int, default=256,
help='STFT hop length for estimating audio length from mel size')
return parser
def checkpoint_from_distributed(state_dict):
"""
Checks whether checkpoint was generated by DistributedDataParallel. DDP
wraps model in additional "module.", it needs to be unwrapped for single
GPU inference.
:param state_dict: model's state dict
"""
ret = False
for key, _ in state_dict.items():
if key.find('module.') != -1:
ret = True
break
return ret
def unwrap_distributed(state_dict):
"""
Unwraps model from DistributedDataParallel.
DDP wraps model in additional "module.", it needs to be removed for single
GPU inference.
:param state_dict: model's state dict
"""
new_state_dict = {}
for key, value in state_dict.items():
new_key = key.replace('module.', '')
new_state_dict[new_key] = value
return new_state_dict
def load_and_setup_model(model_name, parser, checkpoint, fp16_run, cpu_run, forward_is_infer=False):
model_parser = models.parse_model_args(model_name, parser, add_help=False)
model_args, _ = model_parser.parse_known_args()
model_config = models.get_model_config(model_name, model_args)
model = models.get_model(model_name, model_config, to_cuda=(not cpu_run),
forward_is_infer=forward_is_infer)
if checkpoint is not None:
if cpu_run:
state_dict = torch.load(checkpoint, map_location=torch.device('cpu'))['state_dict']
else:
state_dict = torch.load(checkpoint)['state_dict']
if checkpoint_from_distributed(state_dict):
state_dict = unwrap_distributed(state_dict)
model.load_state_dict(state_dict)
if model_name == "WaveGlow":
model = model.remove_weightnorm(model)
model.eval()
if fp16_run:
model.half()
return model
# taken from tacotron2/data_function.py:TextMelCollate.__call__
def pad_sequences(batch):
# Right zero-pad all one-hot text sequences to max input length
input_lengths, ids_sorted_decreasing = torch.sort(
torch.LongTensor([len(x) for x in batch]),
dim=0, descending=True)
max_input_len = input_lengths[0]
text_padded = torch.LongTensor(len(batch), max_input_len)
text_padded.zero_()
for i in range(len(ids_sorted_decreasing)):
text = batch[ids_sorted_decreasing[i]]
text_padded[i, :text.size(0)] = text
return text_padded, input_lengths
def prepare_input_sequence(texts, cpu_run=False):
d = []
for i,text in enumerate(texts):
d.append(torch.IntTensor(
text_to_sequence(text, ['english_cleaners'])[:]))
text_padded, input_lengths = pad_sequences(d)
if not cpu_run:
text_padded = text_padded.cuda().long()
input_lengths = input_lengths.cuda().long()
else:
text_padded = text_padded.long()
input_lengths = input_lengths.long()
return text_padded, input_lengths
class MeasureTime():
def __init__(self, measurements, key, cpu_run=False):
self.measurements = measurements
self.key = key
self.cpu_run = cpu_run
def __enter__(self):
if not self.cpu_run:
torch.cuda.synchronize()
self.t0 = time.perf_counter()
def __exit__(self, exc_type, exc_value, exc_traceback):
if not self.cpu_run:
torch.cuda.synchronize()
self.measurements[self.key] = time.perf_counter() - self.t0
def main():
"""
Launches text to speech (inference).
Inference is executed on a single GPU or CPU.
"""
parser = argparse.ArgumentParser(
description='PyTorch Tacotron 2 Inference')
parser = parse_args(parser)
args, _ = parser.parse_known_args()
DLLogger.init(backends=[JSONStreamBackend(Verbosity.DEFAULT,
args.output+'/'+args.log_file),
StdOutBackend(Verbosity.VERBOSE)])
for k,v in vars(args).items():
DLLogger.log(step="PARAMETER", data={k:v})
DLLogger.log(step="PARAMETER", data={'model_name':'Tacotron2_PyT'})
tacotron2 = load_and_setup_model('Tacotron2', parser, args.tacotron2,
args.fp16, args.cpu, forward_is_infer=True)
waveglow = load_and_setup_model('WaveGlow', parser, args.waveglow,
args.fp16, args.cpu, forward_is_infer=True)
denoiser = Denoiser(waveglow)
if not args.cpu:
denoiser.cuda()
jitted_tacotron2 = torch.jit.script(tacotron2)
texts = []
try:
f = open(args.input, 'r')
texts = f.readlines()
except:
print("Could not read file")
sys.exit(1)
if args.include_warmup:
sequence = torch.randint(low=0, high=148, size=(1,50)).long()
input_lengths = torch.IntTensor([sequence.size(1)]).long()
if not args.cpu:
sequence = sequence.cuda()
input_lengths = input_lengths.cuda()
for i in range(3):
with torch.no_grad():
mel, mel_lengths, _ = jitted_tacotron2(sequence, input_lengths)
_ = waveglow(mel)
measurements = {}
sequences_padded, input_lengths = prepare_input_sequence(texts, args.cpu)
with torch.no_grad(), MeasureTime(measurements, "tacotron2_time", args.cpu):
mel, mel_lengths, alignments = jitted_tacotron2(sequences_padded, input_lengths)
with torch.no_grad(), MeasureTime(measurements, "waveglow_time", args.cpu):
audios = waveglow(mel, sigma=args.sigma_infer)
audios = audios.float()
with torch.no_grad(), MeasureTime(measurements, "denoiser_time", args.cpu):
audios = denoiser(audios, strength=args.denoising_strength).squeeze(1)
print("Stopping after",mel.size(2),"decoder steps")
tacotron2_infer_perf = mel.size(0)*mel.size(2)/measurements['tacotron2_time']
waveglow_infer_perf = audios.size(0)*audios.size(1)/measurements['waveglow_time']
DLLogger.log(step=0, data={"tacotron2_items_per_sec": tacotron2_infer_perf})
DLLogger.log(step=0, data={"tacotron2_latency": measurements['tacotron2_time']})
DLLogger.log(step=0, data={"waveglow_items_per_sec": waveglow_infer_perf})
DLLogger.log(step=0, data={"waveglow_latency": measurements['waveglow_time']})
DLLogger.log(step=0, data={"denoiser_latency": measurements['denoiser_time']})
DLLogger.log(step=0, data={"latency": (measurements['tacotron2_time']+measurements['waveglow_time']+measurements['denoiser_time'])})
for i, audio in enumerate(audios):
plt.imshow(alignments[i].float().data.cpu().numpy().T, aspect="auto", origin="lower")
figure_path = args.output+"alignment_"+str(i)+"_"+args.suffix+".png"
plt.savefig(figure_path)
audio = audio[:mel_lengths[i]*args.stft_hop_length]
audio = audio/torch.max(torch.abs(audio))
audio_path = args.output+"audio_"+str(i)+"_"+args.suffix+".wav"
write(audio_path, args.sampling_rate, audio.cpu().numpy())
DLLogger.flush()
if __name__ == '__main__':
main()
| TensorRT-master | demo/Tacotron2/inference.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
from train import main as main_train
from inference_perf import main as main_infer
def parse_args(parser):
"""
Parse commandline arguments.
"""
parser.add_argument('--bench-class', type=str, choices=['train', 'perf-infer', 'perf-train'], required=True, help='Choose test class')
return parser
def main():
parser = argparse.ArgumentParser(description='PyTorch Tacotron 2 Testing')
parser = parse_args(parser)
args, unknown_args = parser.parse_known_args()
if "train" in args.bench_class:
main_train()
else:
main_infer()
if __name__ == '__main__':
main()
| TensorRT-master | demo/Tacotron2/main.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import subprocess
import torch
def main():
argslist = list(sys.argv)[1:]
world_size = torch.cuda.device_count()
if '--world-size' in argslist:
argslist[argslist.index('--world-size') + 1] = str(world_size)
else:
argslist.append('--world-size')
argslist.append(str(world_size))
workers = []
for i in range(world_size):
if '--rank' in argslist:
argslist[argslist.index('--rank') + 1] = str(i)
else:
argslist.append('--rank')
argslist.append(str(i))
stdout = None if i == 0 else subprocess.DEVNULL
worker = subprocess.Popen(
[str(sys.executable)] + argslist, stdout=stdout)
workers.append(worker)
returncode = 0
try:
pending = len(workers)
while pending > 0:
for worker in workers:
try:
worker_returncode = worker.wait(1)
except subprocess.TimeoutExpired:
continue
pending -= 1
if worker_returncode != 0:
if returncode != 1:
for worker in workers:
worker.terminate()
returncode = 1
except KeyboardInterrupt:
print('Pressed CTRL-C, TERMINATING')
for worker in workers:
worker.terminate()
for worker in workers:
worker.wait()
raise
sys.exit(returncode)
if __name__ == "__main__":
main()
| TensorRT-master | demo/Tacotron2/multiproc.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import torch
import random
import common.layers as layers
from common.utils import load_wav_to_torch, load_filepaths_and_text, to_gpu
class MelAudioLoader(torch.utils.data.Dataset):
"""
1) loads audio,text pairs
2) computes mel-spectrograms from audio files.
"""
def __init__(self, dataset_path, audiopaths_and_text, args):
self.audiopaths_and_text = load_filepaths_and_text(dataset_path, audiopaths_and_text)
self.max_wav_value = args.max_wav_value
self.sampling_rate = args.sampling_rate
self.stft = layers.TacotronSTFT(
args.filter_length, args.hop_length, args.win_length,
args.n_mel_channels, args.sampling_rate, args.mel_fmin,
args.mel_fmax)
self.segment_length = args.segment_length
random.seed(1234)
random.shuffle(self.audiopaths_and_text)
def get_mel_audio_pair(self, filename):
audio, sampling_rate = load_wav_to_torch(filename)
if sampling_rate != self.stft.sampling_rate:
raise ValueError("{} {} SR doesn't match target {} SR".format(
sampling_rate, self.stft.sampling_rate))
# Take segment
if audio.size(0) >= self.segment_length:
max_audio_start = audio.size(0) - self.segment_length
audio_start = random.randint(0, max_audio_start)
audio = audio[audio_start:audio_start+self.segment_length]
else:
audio = torch.nn.functional.pad(
audio, (0, self.segment_length - audio.size(0)), 'constant').data
audio = audio / self.max_wav_value
audio_norm = audio.unsqueeze(0)
audio_norm = torch.autograd.Variable(audio_norm, requires_grad=False)
melspec = self.stft.mel_spectrogram(audio_norm)
melspec = melspec.squeeze(0)
return (melspec, audio, len(audio))
def __getitem__(self, index):
return self.get_mel_audio_pair(self.audiopaths_and_text[index][0])
def __len__(self):
return len(self.audiopaths_and_text)
def batch_to_gpu(batch):
x, y, len_y = batch
x = to_gpu(x).float()
y = to_gpu(y).float()
len_y = to_gpu(torch.sum(len_y))
return ((x, y), y, len_y)
| TensorRT-master | demo/Tacotron2/waveglow/data_function.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import torch
from torch.autograd import Variable
import torch.nn.functional as F
@torch.jit.script
def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels):
n_channels_int = n_channels[0]
in_act = input_a + input_b
t_act = torch.tanh(in_act[:, :n_channels_int, :])
s_act = torch.sigmoid(in_act[:, n_channels_int:, :])
acts = t_act * s_act
return acts
class Invertible1x1Conv(torch.nn.Module):
"""
The layer outputs both the convolution, and the log determinant
of its weight matrix. If reverse=True it does convolution with
inverse
"""
def __init__(self, c):
super(Invertible1x1Conv, self).__init__()
self.conv = torch.nn.Conv1d(c, c, kernel_size=1, stride=1, padding=0,
bias=False)
# Sample a random orthonormal matrix to initialize weights
W = torch.qr(torch.FloatTensor(c, c).normal_())[0]
# Ensure determinant is 1.0 not -1.0
if torch.det(W) < 0:
W[:, 0] = -1 * W[:, 0]
W = W.view(c, c, 1)
W = W.contiguous()
self.conv.weight.data = W
def forward(self, z):
# shape
batch_size, group_size, n_of_groups = z.size()
W = self.conv.weight.squeeze()
# Forward computation
log_det_W = batch_size * n_of_groups * torch.logdet(W.unsqueeze(0).float()).squeeze()
z = self.conv(z)
return z, log_det_W
def infer(self, z):
# shape
batch_size, group_size, n_of_groups = z.size()
W = self.conv.weight.squeeze()
if not hasattr(self, 'W_inverse'):
# Reverse computation
W_inverse = W.float().inverse()
W_inverse = Variable(W_inverse[..., None])
if z.type() == 'torch.cuda.HalfTensor' or z.type() == 'torch.HalfTensor':
W_inverse = W_inverse.half()
self.W_inverse = W_inverse
z = F.conv1d(z, self.W_inverse, bias=None, stride=1, padding=0)
return z
class WN(torch.nn.Module):
"""
This is the WaveNet like layer for the affine coupling. The primary
difference from WaveNet is the convolutions need not be causal. There is
also no dilation size reset. The dilation only doubles on each layer
"""
def __init__(self, n_in_channels, n_mel_channels, n_layers, n_channels,
kernel_size):
super(WN, self).__init__()
assert(kernel_size % 2 == 1)
assert(n_channels % 2 == 0)
self.n_layers = n_layers
self.n_channels = n_channels
self.in_layers = torch.nn.ModuleList()
self.res_skip_layers = torch.nn.ModuleList()
self.cond_layers = torch.nn.ModuleList()
start = torch.nn.Conv1d(n_in_channels, n_channels, 1)
start = torch.nn.utils.weight_norm(start, name='weight')
self.start = start
# Initializing last layer to 0 makes the affine coupling layers
# do nothing at first. This helps with training stability
end = torch.nn.Conv1d(n_channels, 2 * n_in_channels, 1)
end.weight.data.zero_()
end.bias.data.zero_()
self.end = end
for i in range(n_layers):
dilation = 2 ** i
padding = int((kernel_size * dilation - dilation) / 2)
in_layer = torch.nn.Conv1d(n_channels, 2 * n_channels, kernel_size,
dilation=dilation, padding=padding)
in_layer = torch.nn.utils.weight_norm(in_layer, name='weight')
self.in_layers.append(in_layer)
cond_layer = torch.nn.Conv1d(n_mel_channels, 2 * n_channels, 1)
cond_layer = torch.nn.utils.weight_norm(cond_layer, name='weight')
self.cond_layers.append(cond_layer)
# last one is not necessary
if i < n_layers - 1:
res_skip_channels = 2 * n_channels
else:
res_skip_channels = n_channels
res_skip_layer = torch.nn.Conv1d(n_channels, res_skip_channels, 1)
res_skip_layer = torch.nn.utils.weight_norm(
res_skip_layer, name='weight')
self.res_skip_layers.append(res_skip_layer)
def forward(self, forward_input):
audio, spect = forward_input
audio = self.start(audio)
for i in range(self.n_layers):
acts = fused_add_tanh_sigmoid_multiply(
self.in_layers[i](audio),
self.cond_layers[i](spect),
torch.IntTensor([self.n_channels]))
res_skip_acts = self.res_skip_layers[i](acts)
if i < self.n_layers - 1:
audio = res_skip_acts[:, :self.n_channels, :] + audio
skip_acts = res_skip_acts[:, self.n_channels:, :]
else:
skip_acts = res_skip_acts
if i == 0:
output = skip_acts
else:
output = skip_acts + output
return self.end(output)
class WaveGlow(torch.nn.Module):
def __init__(self, n_mel_channels, n_flows, n_group, n_early_every,
n_early_size, WN_config):
super(WaveGlow, self).__init__()
self.upsample = torch.nn.ConvTranspose1d(n_mel_channels,
n_mel_channels,
1024, stride=256)
assert(n_group % 2 == 0)
self.n_flows = n_flows
self.n_group = n_group
self.n_early_every = n_early_every
self.n_early_size = n_early_size
self.WN = torch.nn.ModuleList()
self.convinv = torch.nn.ModuleList()
n_half = int(n_group / 2)
# Set up layers with the right sizes based on how many dimensions
# have been output already
n_remaining_channels = n_group
for k in range(n_flows):
if k % self.n_early_every == 0 and k > 0:
n_half = n_half - int(self.n_early_size / 2)
n_remaining_channels = n_remaining_channels - self.n_early_size
self.convinv.append(Invertible1x1Conv(n_remaining_channels))
self.WN.append(WN(n_half, n_mel_channels * n_group, **WN_config))
self.n_remaining_channels = n_remaining_channels
def forward(self, forward_input):
"""
forward_input[0] = mel_spectrogram: batch x n_mel_channels x frames
forward_input[1] = audio: batch x time
"""
spect, audio = forward_input
# Upsample spectrogram to size of audio
spect = self.upsample(spect)
assert(spect.size(2) >= audio.size(1))
if spect.size(2) > audio.size(1):
spect = spect[:, :, :audio.size(1)]
spect = spect.unfold(2, self.n_group, self.n_group).permute(0, 2, 1, 3)
spect = spect.contiguous().view(spect.size(0), spect.size(1), -1)
spect = spect.permute(0, 2, 1)
audio = audio.unfold(1, self.n_group, self.n_group).permute(0, 2, 1)
output_audio = []
log_s_list = []
log_det_W_list = []
for k in range(self.n_flows):
if k % self.n_early_every == 0 and k > 0:
output_audio.append(audio[:, :self.n_early_size, :])
audio = audio[:, self.n_early_size:, :]
audio, log_det_W = self.convinv[k](audio)
log_det_W_list.append(log_det_W)
n_half = int(audio.size(1) / 2)
audio_0 = audio[:, :n_half, :]
audio_1 = audio[:, n_half:, :]
output = self.WN[k]((audio_0, spect))
log_s = output[:, n_half:, :]
b = output[:, :n_half, :]
audio_1 = torch.exp(log_s) * audio_1 + b
log_s_list.append(log_s)
audio = torch.cat([audio_0, audio_1], 1)
output_audio.append(audio)
return torch.cat(output_audio, 1), log_s_list, log_det_W_list
def infer(self, spect, sigma=1.0):
spect = self.upsample(spect)
# trim conv artifacts. maybe pad spec to kernel multiple
time_cutoff = self.upsample.kernel_size[0] - self.upsample.stride[0]
spect = spect[:, :, :-time_cutoff]
spect = spect.unfold(2, self.n_group, self.n_group).permute(0, 2, 1, 3)
spect = spect.contiguous().view(spect.size(0), spect.size(1), -1)
spect = spect.permute(0, 2, 1)
audio = torch.randn(spect.size(0),
self.n_remaining_channels,
spect.size(2), device=spect.device).to(spect.dtype)
audio = torch.autograd.Variable(sigma * audio)
for k in reversed(range(self.n_flows)):
n_half = int(audio.size(1) / 2)
audio_0 = audio[:, :n_half, :]
audio_1 = audio[:, n_half:, :]
output = self.WN[k]((audio_0, spect))
s = output[:, n_half:, :]
b = output[:, :n_half, :]
audio_1 = (audio_1 - b) / torch.exp(s)
audio = torch.cat([audio_0, audio_1], 1)
audio = self.convinv[k].infer(audio)
if k % self.n_early_every == 0 and k > 0:
z = torch.randn(spect.size(0), self.n_early_size, spect.size(
2), device=spect.device).to(spect.dtype)
audio = torch.cat((sigma * z, audio), 1)
audio = audio.permute(
0, 2, 1).contiguous().view(
audio.size(0), -1).data
return audio
def infer_onnx(self, spect, z, sigma=0.9):
spect = self.upsample(spect)
# trim conv artifacts. maybe pad spec to kernel multiple
time_cutoff = self.upsample.kernel_size[0] - self.upsample.stride[0]
spect = spect[:, :, :-time_cutoff]
length_spect_group = spect.size(2)//8
mel_dim = 80
batch_size = spect.size(0)
spect = torch.squeeze(spect, 3)
spect = spect.view((batch_size, mel_dim, length_spect_group, self.n_group))
spect = spect.permute(0, 2, 1, 3)
spect = spect.contiguous()
spect = spect.view((batch_size, length_spect_group, self.n_group*mel_dim))
spect = spect.permute(0, 2, 1)
spect = torch.unsqueeze(spect, 3)
spect = spect.contiguous()
audio = z[:, :self.n_remaining_channels, :, :]
z = z[:, self.n_remaining_channels:self.n_group, :, :]
audio = sigma*audio
for k in reversed(range(self.n_flows)):
n_half = int(audio.size(1) // 2)
audio_0 = audio[:, :n_half, :, :]
audio_1 = audio[:, n_half:(n_half+n_half), :, :]
output = self.WN[k]((audio_0, spect))
s = output[:, n_half:(n_half+n_half), :, :]
b = output[:, :n_half, :, :]
audio_1 = (audio_1 - b) / torch.exp(s)
audio = torch.cat([audio_0, audio_1], 1)
audio = self.convinv[k](audio)
if k % self.n_early_every == 0 and k > 0:
audio = torch.cat((z[:, :self.n_early_size, :, :], audio), 1)
z = z[:, self.n_early_size:self.n_group, :, :]
audio = torch.squeeze(audio, 3)
audio = audio.permute(0,2,1).contiguous().view(batch_size, (length_spect_group * self.n_group))
return audio
@staticmethod
def remove_weightnorm(model):
waveglow = model
for WN in waveglow.WN:
WN.start = torch.nn.utils.remove_weight_norm(WN.start)
WN.in_layers = remove(WN.in_layers)
WN.cond_layers = remove(WN.cond_layers)
WN.res_skip_layers = remove(WN.res_skip_layers)
return waveglow
def remove(conv_list):
new_conv_list = torch.nn.ModuleList()
for old_conv in conv_list:
old_conv = torch.nn.utils.remove_weight_norm(old_conv)
new_conv_list.append(old_conv)
return new_conv_list
| TensorRT-master | demo/Tacotron2/waveglow/model.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
sys.path.append('tacotron2')
import torch
from common.layers import STFT
class Denoiser(torch.nn.Module):
""" Removes model bias from audio produced with waveglow """
def __init__(self, waveglow, filter_length=1024, n_overlap=4,
win_length=1024, mode='zeros'):
super(Denoiser, self).__init__()
device = waveglow.upsample.weight.device
dtype = waveglow.upsample.weight.dtype
self.stft = STFT(filter_length=filter_length,
hop_length=int(filter_length/n_overlap),
win_length=win_length).to(device)
if mode == 'zeros':
mel_input = torch.zeros((1, 80, 88), dtype=dtype, device=device)
elif mode == 'normal':
mel_input = torch.randn((1, 80, 88), dtype=dtype, device=device)
else:
raise Exception("Mode {} if not supported".format(mode))
with torch.no_grad():
bias_audio = waveglow.infer(mel_input, sigma=0.0).float()
bias_spec, _ = self.stft.transform(bias_audio)
self.register_buffer('bias_spec', bias_spec[:, :, 0][:, :, None])
def forward(self, audio, strength=0.1):
audio_spec, audio_angles = self.stft.transform(audio)
audio_spec_denoised = audio_spec - self.bias_spec * strength
audio_spec_denoised = torch.clamp(audio_spec_denoised, 0.0)
audio_denoised = self.stft.inverse(audio_spec_denoised, audio_angles)
return audio_denoised
| TensorRT-master | demo/Tacotron2/waveglow/denoiser.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import torch
class WaveGlowLoss(torch.nn.Module):
def __init__(self, sigma=1.0):
super(WaveGlowLoss, self).__init__()
self.sigma = sigma
def forward(self, model_output, clean_audio):
# clean_audio is unused;
z, log_s_list, log_det_W_list = model_output
for i, log_s in enumerate(log_s_list):
if i == 0:
log_s_total = torch.sum(log_s)
log_det_W_total = log_det_W_list[i]
else:
log_s_total = log_s_total + torch.sum(log_s)
log_det_W_total += log_det_W_list[i]
loss = torch.sum(
z * z) / (2 * self.sigma * self.sigma) - log_s_total - log_det_W_total # noqa: E501
return loss / (z.size(0) * z.size(1) * z.size(2))
| TensorRT-master | demo/Tacotron2/waveglow/loss_function.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
def parse_waveglow_args(parent, add_help=False):
"""
Parse commandline arguments.
"""
parser = argparse.ArgumentParser(parents=[parent], add_help=add_help)
# misc parameters
parser.add_argument('--n-mel-channels', default=80, type=int,
help='Number of bins in mel-spectrograms')
# glow parameters
parser.add_argument('--flows', default=12, type=int,
help='Number of steps of flow')
parser.add_argument('--groups', default=8, type=int,
help='Number of samples in a group processed by the steps of flow')
parser.add_argument('--early-every', default=4, type=int,
help='Determines how often (i.e., after how many coupling layers) \
a number of channels (defined by --early-size parameter) are output\
to the loss function')
parser.add_argument('--early-size', default=2, type=int,
help='Number of channels output to the loss function')
parser.add_argument('--sigma', default=1.0, type=float,
help='Standard deviation used for sampling from Gaussian')
parser.add_argument('--segment-length', default=4000, type=int,
help='Segment length (audio samples) processed per iteration')
# wavenet parameters
wavenet = parser.add_argument_group('WaveNet parameters')
wavenet.add_argument('--wn-kernel-size', default=3, type=int,
help='Kernel size for dialted convolution in the affine coupling layer (WN)')
wavenet.add_argument('--wn-channels', default=512, type=int,
help='Number of channels in WN')
wavenet.add_argument('--wn-layers', default=8, type=int,
help='Number of layers in WN')
return parser
| TensorRT-master | demo/Tacotron2/waveglow/arg_parser.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import torch
import numpy as np
from scipy.signal import get_window
import librosa.util as librosa_util
def window_sumsquare(window, n_frames, hop_length=200, win_length=800,
n_fft=800, dtype=np.float32, norm=None):
"""
# from librosa 0.6
Compute the sum-square envelope of a window function at a given hop length.
This is used to estimate modulation effects induced by windowing
observations in short-time fourier transforms.
Parameters
----------
window : string, tuple, number, callable, or list-like
Window specification, as in `get_window`
n_frames : int > 0
The number of analysis frames
hop_length : int > 0
The number of samples to advance between frames
win_length : [optional]
The length of the window function. By default, this matches `n_fft`.
n_fft : int > 0
The length of each analysis frame.
dtype : np.dtype
The data type of the output
Returns
-------
wss : np.ndarray, shape=`(n_fft + hop_length * (n_frames - 1))`
The sum-squared envelope of the window function
"""
if win_length is None:
win_length = n_fft
n = n_fft + hop_length * (n_frames - 1)
x = np.zeros(n, dtype=dtype)
# Compute the squared window at the desired length
win_sq = get_window(window, win_length, fftbins=True)
win_sq = librosa_util.normalize(win_sq, norm=norm)**2
win_sq = librosa_util.pad_center(win_sq, n_fft)
# Fill the envelope
for i in range(n_frames):
sample = i * hop_length
x[sample:min(n, sample + n_fft)] += win_sq[:max(0, min(n_fft, n - sample))]
return x
def griffin_lim(magnitudes, stft_fn, n_iters=30):
"""
PARAMS
------
magnitudes: spectrogram magnitudes
stft_fn: STFT class with transform (STFT) and inverse (ISTFT) methods
"""
angles = np.angle(np.exp(2j * np.pi * np.random.rand(*magnitudes.size())))
angles = angles.astype(np.float32)
angles = torch.autograd.Variable(torch.from_numpy(angles))
signal = stft_fn.inverse(magnitudes, angles).squeeze(1)
for i in range(n_iters):
_, angles = stft_fn.transform(signal)
signal = stft_fn.inverse(magnitudes, angles).squeeze(1)
return signal
def dynamic_range_compression(x, C=1, clip_val=1e-5):
"""
PARAMS
------
C: compression factor
"""
return torch.log(torch.clamp(x, min=clip_val) * C)
def dynamic_range_decompression(x, C=1):
"""
PARAMS
------
C: compression factor used to compress
"""
return torch.exp(x) / C
| TensorRT-master | demo/Tacotron2/common/audio_processing.py |
"""
BSD 3-Clause License
Copyright (c) 2017, Prem Seetharaman
All rights reserved.
* Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import torch
import numpy as np
import torch.nn.functional as F
from torch.autograd import Variable
from scipy.signal import get_window
from librosa.util import pad_center, tiny
from common.audio_processing import window_sumsquare
class STFT(torch.nn.Module):
"""adapted from Prem Seetharaman's https://github.com/pseeth/pytorch-stft"""
def __init__(self, filter_length=800, hop_length=200, win_length=800,
window='hann'):
super(STFT, self).__init__()
self.filter_length = filter_length
self.hop_length = hop_length
self.win_length = win_length
self.window = window
self.forward_transform = None
scale = self.filter_length / self.hop_length
fourier_basis = np.fft.fft(np.eye(self.filter_length))
cutoff = int((self.filter_length / 2 + 1))
fourier_basis = np.vstack([np.real(fourier_basis[:cutoff, :]),
np.imag(fourier_basis[:cutoff, :])])
forward_basis = torch.FloatTensor(fourier_basis[:, None, :])
inverse_basis = torch.FloatTensor(
np.linalg.pinv(scale * fourier_basis).T[:, None, :].astype(np.float32))
if window is not None:
assert(filter_length >= win_length)
# get window and zero center pad it to filter_length
fft_window = get_window(window, win_length, fftbins=True)
fft_window = pad_center(fft_window, filter_length)
fft_window = torch.from_numpy(fft_window).float()
# window the bases
forward_basis *= fft_window
inverse_basis *= fft_window
self.register_buffer('forward_basis', forward_basis.float())
self.register_buffer('inverse_basis', inverse_basis.float())
def transform(self, input_data):
num_batches = input_data.size(0)
num_samples = input_data.size(1)
self.num_samples = num_samples
# similar to librosa, reflect-pad the input
input_data = input_data.view(num_batches, 1, num_samples)
input_data = F.pad(
input_data.unsqueeze(1),
(int(self.filter_length / 2), int(self.filter_length / 2), 0, 0),
mode='reflect')
input_data = input_data.squeeze(1)
forward_transform = F.conv1d(
input_data,
Variable(self.forward_basis, requires_grad=False),
stride=self.hop_length,
padding=0)
cutoff = int((self.filter_length / 2) + 1)
real_part = forward_transform[:, :cutoff, :]
imag_part = forward_transform[:, cutoff:, :]
magnitude = torch.sqrt(real_part**2 + imag_part**2)
phase = torch.autograd.Variable(
torch.atan2(imag_part.data, real_part.data))
return magnitude, phase
def inverse(self, magnitude, phase):
recombine_magnitude_phase = torch.cat(
[magnitude*torch.cos(phase), magnitude*torch.sin(phase)], dim=1)
inverse_transform = F.conv_transpose2d(
recombine_magnitude_phase.unsqueeze(-1),
Variable(self.inverse_basis.unsqueeze(-1), requires_grad=False),
stride=(self.hop_length,1),
padding=(0,0))
inverse_transform = inverse_transform.squeeze(-1)
if self.window is not None:
window_sum = window_sumsquare(
self.window, magnitude.size(-1), hop_length=self.hop_length,
win_length=self.win_length, n_fft=self.filter_length,
dtype=np.float32)
# remove modulation effects
approx_nonzero_indices = torch.from_numpy(
np.where(window_sum > tiny(window_sum))[0])
window_sum = torch.autograd.Variable(
torch.from_numpy(window_sum), requires_grad=False)
window_sum = window_sum.cuda() if magnitude.is_cuda else window_sum
inverse_transform[:, :, approx_nonzero_indices] /= window_sum[approx_nonzero_indices]
# scale by hop ratio
inverse_transform *= float(self.filter_length) / self.hop_length
inverse_transform = inverse_transform[:, :, int(self.filter_length/2):]
inverse_transform = inverse_transform[:, :, :-int(self.filter_length/2):]
return inverse_transform
def forward(self, input_data):
self.magnitude, self.phase = self.transform(input_data)
reconstruction = self.inverse(self.magnitude, self.phase)
return reconstruction
| TensorRT-master | demo/Tacotron2/common/stft.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
from scipy.io.wavfile import read
import torch
import os
import argparse
import json
class ParseFromConfigFile(argparse.Action):
def __init__(self, option_strings, type, dest, help=None, required=False):
super(ParseFromConfigFile, self).__init__(option_strings=option_strings, type=type, dest=dest, help=help, required=required)
def __call__(self, parser, namespace, values, option_string):
with open(values, 'r') as f:
data = json.load(f)
for group in data.keys():
for k,v in data[group].items():
underscore_k = k.replace('-', '_')
setattr(namespace, underscore_k, v)
def get_mask_from_lengths(lengths):
max_len = torch.max(lengths).item()
ids = torch.arange(0, max_len, device=lengths.device, dtype=lengths.dtype)
mask = (ids < lengths.unsqueeze(1)).byte()
mask = torch.le(mask, 0)
return mask
def load_wav_to_torch(full_path):
sampling_rate, data = read(full_path)
return torch.FloatTensor(data.astype(np.float32)), sampling_rate
def load_filepaths_and_text(dataset_path, filename, split="|"):
with open(filename, encoding='utf-8') as f:
def split_line(root, line):
parts = line.strip().split(split)
if len(parts) > 2:
raise Exception(
"incorrect line format for file: {}".format(filename))
path = os.path.join(root, parts[0])
text = parts[1]
return path,text
filepaths_and_text = [split_line(dataset_path, line) for line in f]
return filepaths_and_text
def to_gpu(x):
x = x.contiguous()
if torch.cuda.is_available():
x = x.cuda(non_blocking=True)
return x
| TensorRT-master | demo/Tacotron2/common/utils.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import torch
from librosa.filters import mel as librosa_mel_fn
from common.audio_processing import dynamic_range_compression, dynamic_range_decompression
from common.stft import STFT
class LinearNorm(torch.nn.Module):
def __init__(self, in_dim, out_dim, bias=True, w_init_gain='linear'):
super(LinearNorm, self).__init__()
self.linear_layer = torch.nn.Linear(in_dim, out_dim, bias=bias)
torch.nn.init.xavier_uniform_(
self.linear_layer.weight,
gain=torch.nn.init.calculate_gain(w_init_gain))
def forward(self, x):
return self.linear_layer(x)
class ConvNorm(torch.nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=1, stride=1,
padding=None, dilation=1, bias=True, w_init_gain='linear'):
super(ConvNorm, self).__init__()
if padding is None:
assert(kernel_size % 2 == 1)
padding = int(dilation * (kernel_size - 1) / 2)
self.conv = torch.nn.Conv1d(in_channels, out_channels,
kernel_size=kernel_size, stride=stride,
padding=padding, dilation=dilation,
bias=bias)
torch.nn.init.xavier_uniform_(
self.conv.weight,
gain=torch.nn.init.calculate_gain(w_init_gain))
def forward(self, signal):
return self.conv(signal)
class TacotronSTFT(torch.nn.Module):
def __init__(self, filter_length=1024, hop_length=256, win_length=1024,
n_mel_channels=80, sampling_rate=22050, mel_fmin=0.0,
mel_fmax=8000.0):
super(TacotronSTFT, self).__init__()
self.n_mel_channels = n_mel_channels
self.sampling_rate = sampling_rate
self.stft_fn = STFT(filter_length, hop_length, win_length)
mel_basis = librosa_mel_fn(
sampling_rate, filter_length, n_mel_channels, mel_fmin, mel_fmax)
mel_basis = torch.from_numpy(mel_basis).float()
self.register_buffer('mel_basis', mel_basis)
def spectral_normalize(self, magnitudes):
output = dynamic_range_compression(magnitudes)
return output
def spectral_de_normalize(self, magnitudes):
output = dynamic_range_decompression(magnitudes)
return output
def mel_spectrogram(self, y):
"""Computes mel-spectrograms from a batch of waves
PARAMS
------
y: Variable(torch.FloatTensor) with shape (B, T) in range [-1, 1]
RETURNS
-------
mel_output: torch.FloatTensor of shape (B, n_mel_channels, T)
"""
assert(torch.min(y.data) >= -1)
assert(torch.max(y.data) <= 1)
magnitudes, phases = self.stft_fn.transform(y)
magnitudes = magnitudes.data
mel_output = torch.matmul(self.mel_basis, magnitudes)
mel_output = self.spectral_normalize(mel_output)
return mel_output
| TensorRT-master | demo/Tacotron2/common/layers.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
sys.path.append('./')
from tacotron2.text import text_to_sequence
import models
import torch
import argparse
import numpy as np
from scipy.io.wavfile import write
from inference import checkpoint_from_distributed, unwrap_distributed, MeasureTime, prepare_input_sequence, load_and_setup_model
from inference_trt import infer_tacotron2_trt, infer_waveglow_trt
from trt_utils import load_engine
import tensorrt as trt
import time
import dllogger as DLLogger
from dllogger import StdOutBackend, JSONStreamBackend, Verbosity
# from apex import amp
def parse_args(parser):
"""
Parse commandline arguments.
"""
parser.add_argument('--encoder', type=str, required=True,
help='full path to the Encoder engine')
parser.add_argument('--decoder', type=str, required=True,
help='full path to the DecoderIter engine')
parser.add_argument('--postnet', type=str, required=True,
help='full path to the Postnet engine')
parser.add_argument('--waveglow', type=str, required=True,
help='full path to the WaveGlow engine')
parser.add_argument('--waveglow-ckpt', type=str, default="",
help='full path to the WaveGlow model checkpoint file')
parser.add_argument('-s', '--sigma-infer', default=0.6, type=float)
parser.add_argument('-sr', '--sampling-rate', default=22050, type=int,
help='Sampling rate')
parser.add_argument('--fp16', action='store_true',
help='inference with FP16')
parser.add_argument('--log-file', type=str, default='nvlog.json',
help='Filename for logging')
parser.add_argument('--stft-hop-length', type=int, default=256,
help='STFT hop length for estimating audio length from mel size')
parser.add_argument('--num-iters', type=int, default=10,
help='Number of iterations')
parser.add_argument('-il', '--input-length', type=int, default=64,
help='Input length')
parser.add_argument('-bs', '--batch-size', type=int, default=1,
help='Batch size')
return parser
def print_stats(measurements_all):
print(np.mean(measurements_all['latency'][1:]),
np.mean(measurements_all['throughput'][1:]),
np.mean(measurements_all['pre_processing'][1:]),
np.mean(measurements_all['type_conversion'][1:])+
np.mean(measurements_all['storage'][1:])+
np.mean(measurements_all['data_transfer'][1:]),
np.mean(measurements_all['num_mels_per_audio'][1:]))
throughput = measurements_all['throughput']
preprocessing = measurements_all['pre_processing']
type_conversion = measurements_all['type_conversion']
storage = measurements_all['storage']
data_transfer = measurements_all['data_transfer']
postprocessing = [sum(p) for p in zip(type_conversion,storage,data_transfer)]
latency = measurements_all['latency']
num_mels_per_audio = measurements_all['num_mels_per_audio']
latency.sort()
cf_50 = max(latency[:int(len(latency)*0.50)])
cf_90 = max(latency[:int(len(latency)*0.90)])
cf_95 = max(latency[:int(len(latency)*0.95)])
cf_99 = max(latency[:int(len(latency)*0.99)])
cf_100 = max(latency[:int(len(latency)*1.0)])
print("Throughput average (samples/sec) = {:.4f}".format(np.mean(throughput)))
print("Preprocessing average (seconds) = {:.4f}".format(np.mean(preprocessing)))
print("Postprocessing average (seconds) = {:.4f}".format(np.mean(postprocessing)))
print("Number of mels per audio average = {}".format(np.mean(num_mels_per_audio))) #
print("Latency average (seconds) = {:.4f}".format(np.mean(latency)))
print("Latency std (seconds) = {:.4f}".format(np.std(latency)))
print("Latency cl 50 (seconds) = {:.4f}".format(cf_50))
print("Latency cl 90 (seconds) = {:.4f}".format(cf_90))
print("Latency cl 95 (seconds) = {:.4f}".format(cf_95))
print("Latency cl 99 (seconds) = {:.4f}".format(cf_99))
print("Latency cl 100 (seconds) = {:.4f}".format(cf_100))
def main():
"""
Launches text to speech (inference).
Inference is executed on a single GPU.
"""
parser = argparse.ArgumentParser(
description='PyTorch Tacotron 2 Inference')
parser = parse_args(parser)
args, unknown_args = parser.parse_known_args()
DLLogger.init(backends=[JSONStreamBackend(Verbosity.DEFAULT, args.log_file),
StdOutBackend(Verbosity.VERBOSE)])
for k,v in vars(args).items():
DLLogger.log(step="PARAMETER", data={k:v})
DLLogger.log(step="PARAMETER", data={'model_name':'Tacotron2_PyT'})
measurements_all = {"pre_processing": [],
"tacotron2_encoder_time": [],
"tacotron2_decoder_time": [],
"tacotron2_postnet_time": [],
"tacotron2_latency": [],
"waveglow_latency": [],
"latency": [],
"type_conversion": [],
"data_transfer": [],
"storage": [],
"tacotron2_items_per_sec": [],
"waveglow_items_per_sec": [],
"num_mels_per_audio": [],
"throughput": []}
print("args:", args, unknown_args)
torch.cuda.init()
TRT_LOGGER = trt.Logger(trt.Logger.WARNING)
encoder = load_engine(args.encoder, TRT_LOGGER)
decoder_iter = load_engine(args.decoder, TRT_LOGGER)
postnet = load_engine(args.postnet, TRT_LOGGER)
waveglow = load_engine(args.waveglow, TRT_LOGGER)
if args.waveglow_ckpt != "":
# setup denoiser using WaveGlow PyTorch checkpoint
waveglow_ckpt = load_and_setup_model('WaveGlow', parser,
args.waveglow_ckpt,
fp16_run=args.fp16,
cpu_run=False,
forward_is_infer=True)
denoiser = Denoiser(waveglow_ckpt).cuda()
# after initialization, we don't need WaveGlow PyTorch checkpoint
# anymore - deleting
del waveglow_ckpt
torch.cuda.empty_cache()
# create TRT contexts for each engine
encoder_context = encoder.create_execution_context()
decoder_context = decoder_iter.create_execution_context()
postnet_context = postnet.create_execution_context()
waveglow_context = waveglow.create_execution_context()
texts = ["The forms of printed letters should be beautiful, and that their arrangement on the page should be reasonable and a help to the shapeliness of the letters themselves. The forms of printed letters should be beautiful, and that their arrangement on the page should be reasonable and a help to the shapeliness of the letters themselves."]
texts = [texts[0][:args.input_length]]
texts = texts*args.batch_size
warmup_iters = 3
for iter in range(args.num_iters):
measurements = {}
with MeasureTime(measurements, "pre_processing"):
sequences_padded, input_lengths = prepare_input_sequence(texts)
sequences_padded = sequences_padded.to(torch.int32)
input_lengths = input_lengths.to(torch.int32)
with torch.no_grad():
with MeasureTime(measurements, "latency"):
with MeasureTime(measurements, "tacotron2_latency"):
mel, mel_lengths = infer_tacotron2_trt(encoder, decoder_iter, postnet,
encoder_context, decoder_context, postnet_context,
sequences_padded, input_lengths, measurements, args.fp16, True)
with MeasureTime(measurements, "waveglow_latency"):
audios = infer_waveglow_trt(waveglow, waveglow_context, mel, measurements, args.fp16)
num_mels = mel.size(0)*mel.size(2)
num_samples = audios.size(0)*audios.size(1)
with MeasureTime(measurements, "type_conversion"):
audios = audios.float()
with MeasureTime(measurements, "data_transfer"):
audios = audios.cpu()
with MeasureTime(measurements, "storage"):
audios = audios.numpy()
for i, audio in enumerate(audios):
audio_path = "audio_"+str(i)+".wav"
write(audio_path, args.sampling_rate,
audio[:mel_lengths[i]*args.stft_hop_length])
measurements['tacotron2_items_per_sec'] = num_mels/measurements['tacotron2_latency']
measurements['waveglow_items_per_sec'] = num_samples/measurements['waveglow_latency']
measurements['num_mels_per_audio'] = mel.size(2)
measurements['throughput'] = num_samples/measurements['latency']
if iter >= warmup_iters:
for k,v in measurements.items():
if k in measurements_all.keys():
measurements_all[k].append(v)
DLLogger.log(step=(iter-warmup_iters), data={k: v})
DLLogger.flush()
print_stats(measurements_all)
if __name__ == '__main__':
main()
| TensorRT-master | demo/Tacotron2/tensorrt/test_infer_trt.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import tensorrt as trt
# For a single dimension this will return the min, opt, and max size when given
# input of either one or three (comma delimited) values
# dim="1" or dim=1 returns (1, 1, 1)
# dim="1,4,5" returns (1, 4, 5)
def parse_dynamic_size(dim):
split = str(dim).split(',')
assert len(split) in (1,3) , "Dynamic size input must be either 1 or 3 comma-separated integers"
ints = [int(i) for i in split]
if len(ints) == 1:
ints *= 3
assert ints[0] <= ints[1] <= ints[2]
return tuple(ints)
def is_dimension_dynamic(dim):
return dim is None or dim <= 0
def is_shape_dynamic(shape):
return any([is_dimension_dynamic(dim) for dim in shape])
def run_trt_engine(context, engine, tensors):
bindings = [None]*engine.num_bindings
for name,tensor in tensors['inputs'].items():
idx = engine.get_binding_index(name)
bindings[idx] = tensor.data_ptr()
if engine.is_shape_binding(idx) and is_shape_dynamic(context.get_shape(idx)):
context.set_shape_input(idx, tensor)
elif is_shape_dynamic(engine.get_binding_shape(idx)):
context.set_binding_shape(idx, tensor.shape)
for name,tensor in tensors['outputs'].items():
idx = engine.get_binding_index(name)
bindings[idx] = tensor.data_ptr()
context.execute_v2(bindings=bindings)
def load_engine(engine_filepath, trt_logger):
with open(engine_filepath, "rb") as f, trt.Runtime(trt_logger) as runtime:
engine = runtime.deserialize_cuda_engine(f.read())
return engine
def engine_info(engine_filepath):
TRT_LOGGER = trt.Logger(trt.Logger.WARNING)
engine = load_engine(engine_filepath, TRT_LOGGER)
binding_template = r"""
{btype} {{
name: "{bname}"
data_type: {dtype}
dims: {dims}
}}"""
type_mapping = {"DataType.HALF": "TYPE_FP16",
"DataType.FLOAT": "TYPE_FP32",
"DataType.INT32": "TYPE_INT32",
"DataType.BOOL" : "TYPE_BOOL"}
print("engine name", engine.name)
print("has_implicit_batch_dimension", engine.has_implicit_batch_dimension)
start_dim = 0 if engine.has_implicit_batch_dimension else 1
print("num_optimization_profiles", engine.num_optimization_profiles)
print("max_batch_size:", engine.max_batch_size)
print("device_memory_size:", engine.device_memory_size)
print("max_workspace_size:", engine.max_workspace_size)
print("num_layers:", engine.num_layers)
for i in range(engine.num_bindings):
btype = "input" if engine.binding_is_input(i) else "output"
bname = engine.get_binding_name(i)
dtype = engine.get_binding_dtype(i)
bdims = engine.get_binding_shape(i)
config_values = {
"btype": btype,
"bname": bname,
"dtype": type_mapping[str(dtype)],
"dims": list(bdims[start_dim:])
}
final_binding_str = binding_template.format_map(config_values)
print(final_binding_str)
def build_engine(model_file, shapes, max_ws=512*1024*1024, fp16=False):
TRT_LOGGER = trt.Logger(trt.Logger.WARNING)
builder = trt.Builder(TRT_LOGGER)
config = builder.create_builder_config()
config.max_workspace_size = max_ws
if fp16:
config.flags |= 1 << int(trt.BuilderFlag.FP16)
profile = builder.create_optimization_profile()
for s in shapes:
profile.set_shape(s['name'], min=s['min'], opt=s['opt'], max=s['max'])
config.add_optimization_profile(profile)
explicit_batch = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
network = builder.create_network(explicit_batch)
with trt.OnnxParser(network, TRT_LOGGER) as parser:
with open(model_file, 'rb') as model:
parsed = parser.parse(model.read())
for i in range(parser.num_errors):
print("TensorRT ONNX parser error:", parser.get_error(i))
engine = builder.build_engine(network, config=config)
return engine
| TensorRT-master | demo/Tacotron2/tensorrt/trt_utils.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import tensorrt as trt
import numpy as np
from scipy.io.wavfile import write
import time
import torch
import argparse
import os.path as path
import sys
from pathlib import Path
sys.path.append(str(Path(__file__).parents[1]))
from common.utils import to_gpu, get_mask_from_lengths
from tacotron2.text import text_to_sequence
from inference import MeasureTime, prepare_input_sequence, load_and_setup_model
import dllogger as DLLogger
from dllogger import StdOutBackend, JSONStreamBackend, Verbosity
from trt_utils import load_engine, run_trt_engine
from waveglow.denoiser import Denoiser
def parse_args(parser):
"""
Parse commandline arguments.
"""
parser.add_argument('-i', '--input', type=str, required=True,
help='full path to the input text (phareses separated by new line)')
parser.add_argument('-o', '--output', required=True,
help='output folder to save audio (file per phrase)')
parser.add_argument('--encoder', type=str, required=True,
help='full path to the Encoder engine')
parser.add_argument('--decoder', type=str, required=True,
help='full path to the DecoderIter engine')
parser.add_argument('--postnet', type=str, required=True,
help='full path to the Postnet engine')
parser.add_argument('--waveglow', type=str, required=True,
help='full path to the WaveGlow engine')
parser.add_argument('--waveglow-ckpt', type=str, default="",
help='full path to the WaveGlow model checkpoint file')
parser.add_argument('--log-file', type=str, default='nvlog.json',
help='Filename for logging')
parser.add_argument('-d', '--denoising-strength', default=0.01, type=float)
parser.add_argument('-sr', '--sampling-rate', default=22050, type=int,
help='Sampling rate')
parser.add_argument('--stft-hop-length', type=int, default=256,
help='STFT hop length for estimating audio length from mel size')
parser.add_argument('--fp16', action='store_true',
help='inference with FP16')
parser.add_argument('--loop', dest='loop', action='store_true',
help='Includes the outer decoder loop in the ONNX model. Enabled by default and only supported on TensorRT 8.0 or later.')
parser.add_argument('--no-loop', dest='loop', action='store_false',
help='Excludes outer decoder loop from decoder ONNX model. Default behavior and necessary for TensorRT 7.2 or earlier.')
parser.set_defaults(loop=int(trt.__version__[0]) >= 8)
parser.add_argument('--waveglow-onnxruntime', action='store_true',
help='Specify this option to use ONNX runtime instead of TRT for running Waveglow')
parser.add_argument('--decoder-onnxruntime', action='store_true',
help='Specify this option to use ONNX runtime instead of TRT for running the TT2 Decoder with loop. When using this option, pass the decoder ONNX model to the --decoder argument')
return parser
def init_decoder_inputs(memory, processed_memory, memory_lengths):
device = memory.device
dtype = memory.dtype
bs = memory.size(0)
seq_len = memory.size(1)
attention_rnn_dim = 1024
decoder_rnn_dim = 1024
encoder_embedding_dim = 512
n_mel_channels = 80
attention_hidden = torch.zeros(bs, attention_rnn_dim, device=device, dtype=dtype)
attention_cell = torch.zeros(bs, attention_rnn_dim, device=device, dtype=dtype)
decoder_hidden = torch.zeros(bs, decoder_rnn_dim, device=device, dtype=dtype)
decoder_cell = torch.zeros(bs, decoder_rnn_dim, device=device, dtype=dtype)
attention_weights = torch.zeros(bs, seq_len, device=device, dtype=dtype)
attention_weights_cum = torch.zeros(bs, seq_len, device=device, dtype=dtype)
attention_context = torch.zeros(bs, encoder_embedding_dim, device=device, dtype=dtype)
mask = get_mask_from_lengths(memory_lengths).to(device)
decoder_input = torch.zeros(bs, n_mel_channels, device=device, dtype=dtype)
return (decoder_input, attention_hidden, attention_cell, decoder_hidden,
decoder_cell, attention_weights, attention_weights_cum,
attention_context, memory, processed_memory, mask)
def init_decoder_outputs(memory, memory_lengths):
device = memory.device
dtype = memory.dtype
bs = memory.size(0)
seq_len = memory.size(1)
attention_rnn_dim = 1024
decoder_rnn_dim = 1024
encoder_embedding_dim = 512
n_mel_channels = 80
attention_hidden = torch.zeros(bs, attention_rnn_dim, device=device, dtype=dtype)
attention_cell = torch.zeros(bs, attention_rnn_dim, device=device, dtype=dtype)
decoder_hidden = torch.zeros(bs, decoder_rnn_dim, device=device, dtype=dtype)
decoder_cell = torch.zeros(bs, decoder_rnn_dim, device=device, dtype=dtype)
attention_weights = torch.zeros(bs, seq_len, device=device, dtype=dtype)
attention_weights_cum = torch.zeros(bs, seq_len, device=device, dtype=dtype)
attention_context = torch.zeros(bs, encoder_embedding_dim, device=device, dtype=dtype)
decoder_output = torch.zeros(bs, n_mel_channels, device=device, dtype=dtype)
gate_prediction = torch.zeros(bs, 1, device=device, dtype=dtype)
return (attention_hidden, attention_cell, decoder_hidden,
decoder_cell, attention_weights, attention_weights_cum,
attention_context, decoder_output, gate_prediction)
def init_decoder_tensors(decoder_inputs, decoder_outputs):
decoder_tensors = {
"inputs" : {
'decoder_input': decoder_inputs[0],
'attention_hidden': decoder_inputs[1],
'attention_cell': decoder_inputs[2],
'decoder_hidden': decoder_inputs[3],
'decoder_cell': decoder_inputs[4],
'attention_weights': decoder_inputs[5],
'attention_weights_cum': decoder_inputs[6],
'attention_context': decoder_inputs[7],
'memory': decoder_inputs[8],
'processed_memory': decoder_inputs[9],
'mask': decoder_inputs[10]
},
"outputs" : {
'out_attention_hidden': decoder_outputs[0],
'out_attention_cell': decoder_outputs[1],
'out_decoder_hidden': decoder_outputs[2],
'out_decoder_cell': decoder_outputs[3],
'out_attention_weights': decoder_outputs[4],
'out_attention_weights_cum': decoder_outputs[5],
'out_attention_context': decoder_outputs[6],
'decoder_output': decoder_outputs[7],
'gate_prediction': decoder_outputs[8]
}
}
return decoder_tensors
def swap_inputs_outputs(decoder_inputs, decoder_outputs):
new_decoder_inputs = (decoder_outputs[7], # decoder_output
decoder_outputs[0], # attention_hidden
decoder_outputs[1], # attention_cell
decoder_outputs[2], # decoder_hidden
decoder_outputs[3], # decoder_cell
decoder_outputs[4], # attention_weights
decoder_outputs[5], # attention_weights_cum
decoder_outputs[6], # attention_context
decoder_inputs[8], # memory
decoder_inputs[9], # processed_memory
decoder_inputs[10]) # mask
new_decoder_outputs = (decoder_inputs[1], # attention_hidden
decoder_inputs[2], # attention_cell
decoder_inputs[3], # decoder_hidden
decoder_inputs[4], # decoder_cell
decoder_inputs[5], # attention_weights
decoder_inputs[6], # attention_weights_cum
decoder_inputs[7], # attention_context
decoder_inputs[0], # decoder_input
decoder_outputs[8])# gate_output
return new_decoder_inputs, new_decoder_outputs
def infer_tacotron2_trt(encoder, decoder_iter, postnet,
encoder_context, decoder_context, postnet_context,
sequences, sequence_lengths, measurements, fp16, loop):
batch_size = len(sequence_lengths)
max_sequence_len = sequence_lengths[0]
memory = torch.zeros((batch_size, max_sequence_len, 512)).cuda()
if fp16:
memory = memory.half()
device = memory.device
dtype = memory.dtype
processed_memory = torch.zeros((batch_size, max_sequence_len, 128), device=device, dtype=dtype)
lens = torch.zeros_like(sequence_lengths)
print(f"batch_size: {batch_size}, max sequence length: {max_sequence_len}")
encoder_tensors = {
"inputs" :
{'sequences': sequences, 'sequence_lengths': sequence_lengths},
"outputs" :
{'memory': memory, 'lens': lens, 'processed_memory': processed_memory}
}
print("Running Tacotron2 Encoder")
with MeasureTime(measurements, "tacotron2_encoder_time"):
run_trt_engine(encoder_context, encoder, encoder_tensors)
max_decoder_steps = 1024
device = memory.device
mel_lengths = torch.zeros([memory.size(0)], dtype=torch.int32, device = device)
not_finished = torch.ones([memory.size(0)], dtype=torch.int32, device = device)
mel_outputs = torch.ones((batch_size, 80, max_decoder_steps), device = device, dtype=dtype).cuda()
gate_threshold = 0.5
first_iter = True
decoder_inputs = init_decoder_inputs(memory, processed_memory, sequence_lengths)
decoder_outputs = init_decoder_outputs(memory, sequence_lengths)
if loop:
if decoder_context is None:
print("Running Tacotron2 Decoder with loop with ONNX-RT")
decoder_inputs_onnxrt = [x.cpu().numpy().copy() for x in decoder_inputs]
import onnx
import onnxruntime
sess = onnxruntime.InferenceSession(decoder_iter)
with MeasureTime(measurements, "tacotron2_decoder_time"):
result = sess.run(["mel_outputs", "mel_lengths_t"], {
'decoder_input_0': decoder_inputs_onnxrt[0],
'attention_hidden_0': decoder_inputs_onnxrt[1],
'attention_cell_0': decoder_inputs_onnxrt[2],
'decoder_hidden_0': decoder_inputs_onnxrt[3],
'decoder_cell_0': decoder_inputs_onnxrt[4],
'attention_weights_0': decoder_inputs_onnxrt[5],
'attention_weights_cum_0': decoder_inputs_onnxrt[6],
'attention_context_0': decoder_inputs_onnxrt[7],
'memory': decoder_inputs_onnxrt[8],
'processed_memory': decoder_inputs_onnxrt[9],
'mask': decoder_inputs_onnxrt[10]
})
mel_outputs = torch.tensor(result[0], device=device)
mel_lengths = torch.tensor(result[1], device=device)
else:
print("Running Tacotron2 Decoder with loop")
decoder_tensors = {
"inputs" :
{
'decoder_input_0': decoder_inputs[0],
'attention_hidden_0': decoder_inputs[1],
'attention_cell_0': decoder_inputs[2],
'decoder_hidden_0': decoder_inputs[3],
'decoder_cell_0': decoder_inputs[4],
'attention_weights_0': decoder_inputs[5],
'attention_weights_cum_0': decoder_inputs[6],
'attention_context_0': decoder_inputs[7],
'memory': decoder_inputs[8],
'processed_memory': decoder_inputs[9],
'mask': decoder_inputs[10]
},
"outputs" :
{'mel_outputs': mel_outputs, 'mel_lengths_t': mel_lengths}
}
with MeasureTime(measurements, "tacotron2_decoder_time"):
run_trt_engine(decoder_context, decoder_iter, decoder_tensors)
mel_outputs = mel_outputs[:,:,:torch.max(mel_lengths)]
else:
print("Running Tacotron2 Decoder")
measurements_decoder = {}
while True:
decoder_tensors = init_decoder_tensors(decoder_inputs, decoder_outputs)
with MeasureTime(measurements_decoder, "step"):
run_trt_engine(decoder_context, decoder_iter, decoder_tensors)
if first_iter:
mel_outputs = torch.unsqueeze(decoder_outputs[7], 2)
gate_outputs = torch.unsqueeze(decoder_outputs[8], 2)
alignments = torch.unsqueeze(decoder_outputs[4], 2)
measurements['tacotron2_decoder_time'] = measurements_decoder['step']
first_iter = False
else:
mel_outputs = torch.cat((mel_outputs, torch.unsqueeze(decoder_outputs[7], 2)), 2)
gate_outputs = torch.cat((gate_outputs, torch.unsqueeze(decoder_outputs[8], 2)), 2)
alignments = torch.cat((alignments, torch.unsqueeze(decoder_outputs[4], 2)), 2)
measurements['tacotron2_decoder_time'] += measurements_decoder['step']
dec = torch.le(torch.sigmoid(decoder_outputs[8]), gate_threshold).to(torch.int32).squeeze(1)
not_finished = not_finished*dec
mel_lengths += not_finished
if torch.sum(not_finished) == 0:
print("Stopping after",mel_outputs.size(2),"decoder steps")
break
if mel_outputs.size(2) == max_decoder_steps:
print("Warning! Reached max decoder steps")
break
decoder_inputs, decoder_outputs = swap_inputs_outputs(decoder_inputs, decoder_outputs)
mel_outputs = mel_outputs.clone().detach()
mel_outputs_postnet = torch.zeros_like(mel_outputs, device=device, dtype=dtype)
postnet_tensors = {
"inputs" :
{'mel_outputs': mel_outputs},
"outputs" :
{'mel_outputs_postnet': mel_outputs_postnet}
}
print("Running Tacotron2 Postnet")
with MeasureTime(measurements, "tacotron2_postnet_time"):
run_trt_engine(postnet_context, postnet, postnet_tensors)
print("Tacotron2 Postnet done")
return mel_outputs_postnet, mel_lengths
def infer_waveglow_trt(waveglow, waveglow_context, mel, measurements, fp16):
mel_size = mel.size(2)
batch_size = mel.size(0)
stride = 256
n_group = 8
z_size = mel_size*stride
z_size = z_size//n_group
z = torch.randn(batch_size, n_group, z_size).cuda()
audios = torch.zeros(batch_size, mel_size*stride).cuda()
mel = mel.unsqueeze(3)
z = z.unsqueeze(3)
if fp16:
z = z.half()
mel = mel.half()
audios = audios.half()
waveglow_tensors = {
"inputs" : {'mel': mel, 'z': z},
"outputs" : {'audio': audios}
}
print("Running WaveGlow with TensorRT")
with MeasureTime(measurements, "waveglow_time"):
run_trt_engine(waveglow_context, waveglow, waveglow_tensors)
return audios
def infer_waveglow_onnx(waveglow_path, mel, measurements, fp16):
import onnx
import onnxruntime
sess = onnxruntime.InferenceSession(waveglow_path)
device=mel.device
mel_size = mel.size(2)
batch_size = mel.size(0)
stride = 256
n_group = 8
z_size = mel_size*stride
z_size = z_size//n_group
z = torch.randn(batch_size, n_group, z_size).cuda()
mel = mel.unsqueeze(3)
z = z.unsqueeze(3)
if fp16:
z = z.half()
mel = mel.half()
mel = mel.cpu().numpy().copy()
z = z.cpu().numpy().copy()
print("Running WaveGlow with ONNX Runtime")
with MeasureTime(measurements, "waveglow_time"):
result = sess.run(["audio"], {
'mel': mel,
'z': z
})
audios = torch.tensor(result[0], device=device)
return audios
def main():
parser = argparse.ArgumentParser(
description='TensorRT Tacotron 2 Inference')
parser = parse_args(parser)
args, _ = parser.parse_known_args()
# initialize CUDA state
torch.cuda.init()
TRT_LOGGER = trt.Logger(trt.Logger.WARNING)
encoder = load_engine(args.encoder, TRT_LOGGER)
postnet = load_engine(args.postnet, TRT_LOGGER)
if args.waveglow_ckpt != "":
# setup denoiser using WaveGlow PyTorch checkpoint
waveglow_ckpt = load_and_setup_model('WaveGlow', parser, args.waveglow_ckpt,
True, forward_is_infer=True)
denoiser = Denoiser(waveglow_ckpt).cuda()
# after initialization, we don't need WaveGlow PyTorch checkpoint
# anymore - deleting
del waveglow_ckpt
torch.cuda.empty_cache()
# create TRT contexts for each engine
encoder_context = encoder.create_execution_context()
decoder_context = None
if not args.decoder_onnxruntime:
decoder_iter = load_engine(args.decoder, TRT_LOGGER)
decoder_context = decoder_iter.create_execution_context()
else:
decoder_iter = args.decoder
postnet_context = postnet.create_execution_context()
waveglow_context = None
if not args.waveglow_onnxruntime:
waveglow = load_engine(args.waveglow, TRT_LOGGER)
waveglow_context = waveglow.create_execution_context()
DLLogger.init(backends=[JSONStreamBackend(Verbosity.DEFAULT,
path.join(args.output, args.log_file)),
StdOutBackend(Verbosity.VERBOSE)])
texts = []
try:
f = open(args.input, 'r')
texts = f.readlines()
except:
print("Could not read file")
sys.exit(1)
measurements = {}
sequences, sequence_lengths = prepare_input_sequence(texts)
sequences = sequences.to(torch.int32)
sequence_lengths = sequence_lengths.to(torch.int32)
with MeasureTime(measurements, "latency"):
mel, mel_lengths = infer_tacotron2_trt(encoder, decoder_iter, postnet,
encoder_context, decoder_context, postnet_context,
sequences, sequence_lengths, measurements, args.fp16, args.loop)
audios = infer_waveglow_onnx(args.waveglow, mel, measurements, args.fp16) if args.waveglow_onnxruntime else \
infer_waveglow_trt(waveglow, waveglow_context, mel, measurements, args.fp16)
with encoder_context, postnet_context:
pass
if decoder_context is not None:
with decoder_context: pass
if waveglow_context is not None:
with waveglow_context: pass
audios = audios.float()
if args.waveglow_ckpt != "":
with MeasureTime(measurements, "denoiser"):
audios = denoiser(audios, strength=args.denoising_strength).squeeze(1)
for i, audio in enumerate(audios):
audio = audio[:mel_lengths[i]*args.stft_hop_length]
audio = audio/torch.max(torch.abs(audio))
audio_path = path.join(args.output, f"audio_{i}_trt.wav")
write(audio_path, args.sampling_rate, audio.cpu().numpy())
DLLogger.log(step=0, data={"tacotron2_encoder_latency": measurements['tacotron2_encoder_time']})
DLLogger.log(step=0, data={"tacotron2_decoder_latency": measurements['tacotron2_decoder_time']})
DLLogger.log(step=0, data={"tacotron2_postnet_latency": measurements['tacotron2_postnet_time']})
DLLogger.log(step=0, data={"waveglow_latency": measurements['waveglow_time']})
DLLogger.log(step=0, data={"latency": measurements['latency']})
if args.waveglow_ckpt != "":
DLLogger.log(step=0, data={"denoiser": measurements['denoiser']})
DLLogger.flush()
prec = "fp16" if args.fp16 else "fp32"
latency = measurements['latency']
throughput = audios.size(1)/latency
log_data = f"1,{sequence_lengths[0].item()},{prec},{latency},{throughput},{mel_lengths[0].item()}\n"
log_file = path.join(args.output, f"log_bs1_{prec}.log")
with open(log_file, 'a') as f:
f.write(log_data)
if __name__ == "__main__":
main()
| TensorRT-master | demo/Tacotron2/tensorrt/inference_trt.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import torch
import argparse
import os
import sys
from pathlib import Path
sys.path.append(str(Path(__file__).parents[1]))
from common.utils import ParseFromConfigFile
from inference import load_and_setup_model
def convert_convinv_1d_to_2d(convinv):
"""
Takes an invertible 1x1 1-d convolution and returns a 2-d convolution that does
the inverse
"""
conv2d = torch.nn.Conv2d(convinv.W_inverse.size(1),
convinv.W_inverse.size(0),
1, bias=False)
conv2d.weight.data[:,:,:,0] = convinv.W_inverse.data
return conv2d
def convert_conv_1d_to_2d(conv1d):
conv2d = torch.nn.Conv2d(conv1d.weight.size(1),
conv1d.weight.size(0),
(conv1d.weight.size(2), 1),
stride=(conv1d.stride[0], 1),
dilation=(conv1d.dilation[0], 1),
padding=(conv1d.padding[0], 0))
conv2d.weight.data[:,:,:,0] = conv1d.weight.data
conv2d.bias.data = conv1d.bias.data
return conv2d
def convert_WN_1d_to_2d_(WN):
"""
Modifies the WaveNet like affine coupling layer in-place to use 2-d convolutions
"""
WN.start = convert_conv_1d_to_2d(WN.start)
WN.end = convert_conv_1d_to_2d(WN.end)
for i in range(len(WN.in_layers)):
WN.in_layers[i] = convert_conv_1d_to_2d(WN.in_layers[i])
for i in range(len(WN.res_skip_layers)):
WN.res_skip_layers[i] = convert_conv_1d_to_2d(WN.res_skip_layers[i])
for i in range(len(WN.res_skip_layers)):
WN.cond_layers[i] = convert_conv_1d_to_2d(WN.cond_layers[i])
def convert_1d_to_2d_(glow):
"""
Caffe2 and TensorRT don't seem to support 1-d convolutions or properly
convert ONNX exports with 1d convolutions to 2d convolutions yet, so we
do the conversion to 2-d convolutions before ONNX export
"""
# Convert upsample to 2d
upsample = torch.nn.ConvTranspose2d(glow.upsample.weight.size(0),
glow.upsample.weight.size(1),
(glow.upsample.weight.size(2), 1),
stride=(glow.upsample.stride[0], 1))
upsample.weight.data[:,:,:,0] = glow.upsample.weight.data
upsample.bias.data = glow.upsample.bias.data
glow.upsample = upsample.cuda()
# Convert WN to 2d
for WN in glow.WN:
convert_WN_1d_to_2d_(WN)
# Convert invertible conv to 2d
for i in range(len(glow.convinv)):
glow.convinv[i] = convert_convinv_1d_to_2d(glow.convinv[i])
glow.cuda()
def parse_args(parser):
"""
Parse commandline arguments.
"""
parser.add_argument('--waveglow', type=str, required=True,
help='full path to the WaveGlow model checkpoint file')
parser.add_argument('-o', '--output', type=str, required=True,
help='Directory or file name for the exported WaveGlow ONNX model')
parser.add_argument('--fp16', action='store_true',
help='inference with AMP')
parser.add_argument('-s', '--sigma-infer', default=0.6, type=float)
parser.add_argument('--config-file', action=ParseFromConfigFile,
type=str, help='Path to configuration file')
return parser
def export_onnx(parser, args):
waveglow = load_and_setup_model('WaveGlow', parser, args.waveglow,
fp16_run=args.fp16, cpu_run=False,
forward_is_infer=False)
# 80 mel channels, 620 mel spectrograms ~ 7 seconds of speech
mel = torch.randn(1, 80, 620).cuda()
stride = 256 # value from waveglow upsample
n_group = 8
z_size2 = (mel.size(2)*stride)//n_group
z = torch.randn(1, n_group, z_size2, 1).cuda()
if args.fp16:
mel = mel.half()
z = z.half()
with torch.no_grad():
# run inference to force calculation of inverses
waveglow.infer(mel, sigma=args.sigma_infer)
convert_1d_to_2d_(waveglow)
mel = mel.unsqueeze(3)
# export to ONNX
if args.fp16:
waveglow = waveglow.half()
waveglow.forward = waveglow.infer_onnx
opset_version = 11
if os.path.isdir(args.output):
output_path = os.path.join(args.output, "waveglow.onnx")
else:
output_path = args.output
torch.onnx.export(waveglow, (mel, z), output_path,
opset_version=opset_version,
do_constant_folding=True,
input_names=["mel", "z"],
output_names=["audio"],
dynamic_axes={"mel": {0: "batch_size", 2: "mel_seq"},
"z": {0: "batch_size", 2: "z_seq"},
"audio": {0: "batch_size", 1: "audio_seq"}})
def main():
parser = argparse.ArgumentParser(
description='PyTorch Tacotron 2 Inference')
parser = parse_args(parser)
args, _ = parser.parse_known_args()
export_onnx(parser, args)
if __name__ == '__main__':
main()
| TensorRT-master | demo/Tacotron2/tensorrt/convert_waveglow2onnx.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import onnx_graphsurgeon as gs
import onnx
import sys
import os
import numpy as np
import argparse
def insert_decoder_loop(decoder_iter_onnx_path, output_dir, decoder_out_name, fp16):
float_prec = np.float16 if fp16 else np.float32
# Modify loop body so that it has 2+N inputs: (iteration_num, condition, loop carried dependencies...)
# and 1+N+K outputs: (condition, loop carried dependencies..., scan_outputs...)
# In this case, the loop carried dependencies include the following IN ORDER
# - decoder_output/decoder_input
# - attention_hidden
# - attention_cell
# - decoder_hidden
# - decoder_cell
# - attention_weights
# - attention_weights_cum
# - attention_context
# - not_finished (bool tensor, initialized to all True)
# - mel_lengths
# The following are NOT loop carried dependencies (they remain constant through the loop), and must be moved to be inputs outside of the loop body
# - memory
# - processed_memory
# - mask
# The scan outputs are
# - mel_outputs (which scans across decoder_output)
# - gate_outputs (scans across gate_prediction)
# - alignments (scans across attention_weights)
loop_body = gs.import_onnx(onnx.load(decoder_iter_onnx_path))
loop_tensors = loop_body.tensors()
iteration_num = gs.Variable("iteration_num", dtype=np.int64, shape=())
cond_in = gs.Variable("cond_in", dtype=bool, shape=())
cond_out = gs.Variable("cond_out", dtype=bool, shape=())
not_finished_in = gs.Variable("not_finished_in", shape=('batch_size', 1), dtype=bool)
not_finished_out = gs.Variable("not_finished_out", shape=('batch_size', 1), dtype=bool)
mel_lengths_in = gs.Variable("mel_lengths_in", shape=('batch_size', 1), dtype=np.int32)
mel_lengths_out = gs.Variable("mel_lengths_out", shape=('batch_size', 1), dtype=np.int32)
# Set loop body inputs in the correct order
loop_body.inputs = [iteration_num, cond_in, loop_tensors["decoder_input"], loop_tensors["attention_hidden"], loop_tensors["attention_cell"], loop_tensors["decoder_hidden"], loop_tensors["decoder_cell"], loop_tensors["attention_weights"], loop_tensors["attention_weights_cum"], loop_tensors["attention_context"], not_finished_in, mel_lengths_in]
# Set loop body outputs in the correct order
loop_body.outputs = [cond_out, loop_tensors["decoder_output"], loop_tensors["out_attention_hidden"], loop_tensors["out_attention_cell"], loop_tensors["out_decoder_hidden"], loop_tensors["out_decoder_cell"], loop_tensors["out_attention_weights"], loop_tensors["out_attention_weights_cum"], loop_tensors["out_attention_context"], not_finished_out, mel_lengths_out, loop_tensors["decoder_output"], loop_tensors["gate_prediction"], loop_tensors["out_attention_weights"]]
# The loop stop condition is given by the following lines in PyTorch
# dec = torch.le(torch.sigmoid(decoder_outputs[8]), gate_threshold).to(torch.int32).squeeze(1)
# not_finished = not_finished*dec
# if torch.sum(not_finished) == 0:
# break
# To compute cond_out, we can essentially follow the same steps. Using Less instead of Greater+Not for now
gate_threshold = gs.Constant("gate_threshold", np.array([0.5], dtype=float_prec))
gate_sigmoid = gs.Variable("gate_sigmoid", dtype=float_prec, shape=())
sigmoid = loop_body.nodes.append(gs.Node(op="Sigmoid", inputs=[loop_tensors["gate_prediction"]], outputs=[gate_sigmoid]))
leq_output = gs.Variable("leq_output", dtype=bool)
leq = loop_body.nodes.append(gs.Node(op="Less", inputs=[gate_sigmoid, gate_threshold], outputs=[leq_output]))
loop_body.nodes.append(gs.Node(op="And", inputs=[not_finished_in, leq_output], outputs=[not_finished_out]))
cast_output = gs.Variable("cast_output", dtype=np.int32)
loop_body.nodes.append(gs.Node(op="Cast", inputs=[not_finished_out], outputs=[cast_output], attrs={"to": 6})) # int32
reduce_output = gs.Variable("reduce_output", dtype=np.int32)
loop_body.nodes.append( gs.Node(op="ReduceSum", inputs=[cast_output], outputs=[reduce_output], attrs={"axes": [0], "keepdims": 0}))
unsqueezed_cond_out = gs.Variable("unsqueezed_cond_out", dtype=bool)
loop_body.nodes.append(gs.Node(op="Equal", inputs=[reduce_output, gs.Constant("zero", np.array(0, dtype=np.int32))], outputs=[unsqueezed_cond_out]))
squeezed_cond_out = gs.Variable("squeezed_cond_out", dtype=bool)
loop_body.nodes.append(gs.Node(op="Squeeze", inputs=[unsqueezed_cond_out], outputs=[squeezed_cond_out], attrs={"axes": [0]}))
loop_body.nodes.append(gs.Node(op="Not", inputs=[squeezed_cond_out], outputs=[cond_out]))
# Compute mel_lengths
# from PyTorch: mel_lengths += not_finished
loop_body.nodes.append(gs.Node(op="Add", inputs=[mel_lengths_in, cast_output], outputs=[mel_lengths_out]))
memory = gs.Variable("memory", dtype=float_prec, shape=('batch_size', 'seq_len', 512))
processed_memory = gs.Variable("processed_memory", dtype=float_prec, shape=('batch_size', 'seq_len', 128))
mask = gs.Variable("mask", dtype=bool, shape=('batch_size', 'seq_len'))
loop_body.toposort()
onnx.save(gs.export_onnx(loop_body), os.path.join(output_dir, "loop_body_{prec}.onnx".format(prec="fp16" if float_prec == np.float16 else "fp32")))
# Create outer graph
# Inputs to outer graph are the following (suffixed with _0 to signify initial states)
# - decoder_input_0
# - attention_hidden_0
# - attention_cell_0
# - decoder_hidden_0
# - decoder_cell_0
# - attention_weights_0
# - attention_weights_cum_0
# - attention_context_0
# - memory
# - processed_memory
# - mask
# Outputs are the following
# - mel_outputs
# - mel_lengths
# Note: alignments and gate_outputs are scan outputs, but don't seem to be used later in the PyTorch implementation. For now, we will make them intermediate tensors that are not outputted
graph = gs.Graph()
decoder_input_0 = gs.Variable("decoder_input_0", dtype=float_prec, shape=('batch_size', 80))
attention_hidden_0 = gs.Variable("attention_hidden_0", dtype=float_prec, shape=('batch_size', 1024))
attention_cell_0 = gs.Variable("attention_cell_0", dtype=float_prec, shape=('batch_size', 1024))
decoder_hidden_0 = gs.Variable("decoder_hidden_0", dtype=float_prec, shape=('batch_size', 1024))
decoder_cell_0 = gs.Variable("decoder_cell_0", dtype=float_prec, shape=('batch_size', 1024))
attention_weights_0 = gs.Variable("attention_weights_0", dtype=float_prec, shape=('batch_size', 'seq_len'))
attention_weights_cum_0 = gs.Variable("attention_weights_cum_0", dtype=float_prec, shape=('batch_size', 'seq_len'))
attention_context_0 = gs.Variable("attention_context_0", dtype=float_prec, shape=('batch_size', 512))
not_finished_0 = gs.Variable("not_finished_0", dtype=bool)
mel_lengths_0 = gs.Variable("mel_lengths_0", dtype=np.int32)
# For not_finished, we need to generate a tensor of shape (batch_size) that is all 1s
# We can use the ONNX ConstantOfShape op to do this
not_finished_shape = gs.Variable("not_finished_shape", dtype=np.int64)
reduced = gs.Variable("reduced", dtype=float_prec)
graph.nodes.append(gs.Node(op="ReduceSum", inputs=[decoder_input_0], outputs=[reduced], attrs={"axes":[1], "keepdims": 1}))
graph.nodes.append(gs.Node(op="Shape", inputs=[reduced], outputs=[not_finished_shape]))
before_cast = gs.Variable("before_cast", dtype=np.int32)
graph.nodes.append(gs.Node(op="ConstantOfShape", inputs=[not_finished_shape], outputs=[before_cast], attrs={"value":gs.Constant("one", np.array([1], dtype=np.int32))}))
graph.nodes.append(gs.Node(op="Cast", inputs=[before_cast], outputs=[not_finished_0], attrs={"to": 9}))
# Same thing for mel_lengths, but we need all 0s
graph.nodes.append(gs.Node(op="ConstantOfShape", inputs=[not_finished_shape], outputs=[mel_lengths_0], attrs={"value":gs.Constant("zero", np.array([0], dtype=np.int32))}))
# Loop carried dependecies at the end of the loop
decoder_input_t = gs.Variable("decoder_input_t", dtype=float_prec, shape=('batch_size', 80))
attention_hidden_t = gs.Variable("attention_hidden_t", dtype=float_prec, shape=('batch_size', 1024))
attention_cell_t = gs.Variable("attention_cell_t", dtype=float_prec, shape=('batch_size', 1024))
decoder_hidden_t = gs.Variable("decoder_hidden_t", dtype=float_prec, shape=('batch_size', 1024))
decoder_cell_t = gs.Variable("decoder_cell_t", dtype=float_prec, shape=('batch_size', 1024))
attention_weights_t = gs.Variable("attention_weights_t", dtype=float_prec, shape=('batch_size', 'seq_len'))
attention_weights_cum_t = gs.Variable("attention_weights_cum_t", dtype=float_prec, shape=('batch_size', 'seq_len'))
attention_context_t = gs.Variable("attention_context_t", dtype=float_prec, shape=('batch_size', 512))
not_finished_t = gs.Variable("not_finished_t", dtype=bool)
mel_lengths_t = gs.Variable("mel_lengths_t", dtype=np.int32, shape=('batch_size', 1))
# Scan outputs
mel_outputs_raw = gs.Variable("mel_outputs_raw", dtype=float_prec, shape=(-1, 'batch_size', 80))
gate_outputs = gs.Variable("gate_outputs", dtype=float_prec, shape=(-1, 'batch_size', 1))
alignments = gs.Variable("alignments", dtype=float_prec, shape=(-1, 1, 'seq_len'))
mel_outputs = gs.Variable("mel_outputs", dtype=float_prec, shape=('batch_size', 80, -1))
graph.inputs = [decoder_input_0, attention_hidden_0, attention_cell_0, decoder_hidden_0, decoder_cell_0, attention_weights_0, attention_weights_cum_0, attention_context_0, memory, processed_memory, mask]
graph.outputs = [mel_outputs, mel_lengths_t]
trip_count = gs.Constant("trip_count", np.array(0, dtype=np.int64)) # In ONNX, this is an optional parameter, but I don't think ONNX-GS supports optional inputs. To fix this, after we export the ONNX ModelProto from GS, we replace this input with ""
initial_cond = gs.Constant("initial_cond", np.array(True, dtype=bool))
loop_inputs = [trip_count, initial_cond, decoder_input_0, attention_hidden_0, attention_cell_0, decoder_hidden_0, decoder_cell_0, attention_weights_0, attention_weights_cum_0, attention_context_0, not_finished_0, mel_lengths_0]
loop_outputs = [decoder_input_t, attention_hidden_t, attention_cell_t, decoder_hidden_t, decoder_cell_t, attention_weights_t, attention_weights_cum_t, attention_context_t, not_finished_t, mel_lengths_t, mel_outputs_raw, gate_outputs, alignments]
decoder_loop = gs.Node(op="Loop", name="decoder_loop", inputs=loop_inputs, outputs=loop_outputs, attrs={"body": loop_body})
graph.nodes.append(decoder_loop)
graph.nodes.append(gs.Node(op="Transpose", inputs=[mel_outputs_raw], outputs=[mel_outputs], attrs={"perm": [1, 2, 0]})) # Output needs to have loop dimension as inner-most dim
graph.toposort()
exported_graph = gs.export_onnx(graph)
[x for x in exported_graph.graph.node if x.name == "decoder_loop"][0].input[0] = "" # Remove trip count input
onnx.save(exported_graph, os.path.join(output_dir, decoder_out_name))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('model_path', type=str,
help='path to original decoder_iter ONNX model')
parser.add_argument('-o', '--output_dir', type=str, default='.', help='Output directory')
parser.add_argument('--decoder_out', type=str, help='Filename of the exported decoder with outer loop')
parser.add_argument('--fp16', action='store_true')
args = parser.parse_args()
if args.decoder_out == None:
args.decoder_out = "decoder_with_outer_loop_{}.onnx".format("fp16" if args.fp16 else "fp32")
insert_decoder_loop(args.model_path, args.output_dir, args.decoder_out, args.fp16) | TensorRT-master | demo/Tacotron2/tensorrt/generate_decoder.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import torch
from torch import nn
from torch.nn import functional as F
import argparse
import tensorrt
import sys
import os
from pathlib import Path
sys.path.append(str(Path(__file__).parents[1]))
import models
from inference import checkpoint_from_distributed, unwrap_distributed, load_and_setup_model, prepare_input_sequence
from common.utils import to_gpu, get_mask_from_lengths
torch.backends.cudnn.enabled = True
def parse_args(parser):
"""
Parse commandline arguments.
"""
parser.add_argument('--tacotron2', type=str, required=True,
help='Full path to the Tacotron2 model checkpoint file')
parser.add_argument('-o', '--output', type=str, required=True,
help='Directory for the exported Tacotron2 ONNX models')
parser.add_argument('-e', '--encoder', type=str, required=False, default="encoder.onnx",
help='Filename for exported encoder ONNX model')
parser.add_argument('-d', '--decoder', type=str, required=False, default="decoder_iter.onnx",
help='Filename for exported decoder ONNX model')
parser.add_argument('-p', '--postnet', type=str, required=False, default="postnet.onnx",
help='Filename for exported postnet ONNX model')
parser.add_argument('--fp16', action='store_true',
help='Export with half precision to ONNX')
parser.add_argument('--loop', dest='loop', action='store_true',
help='Includes the outer decoder loop in the ONNX model. Enabled by default and only supported on TensorRT 8.0 or later.')
parser.add_argument('--no-loop', dest='loop', action='store_false',
help='Excludes outer decoder loop from decoder ONNX model. Default behavior and necessary for TensorRT 7.2 or earlier.')
parser.set_defaults(loop=int(tensorrt.__version__[0]) >= 8)
return parser
def encoder_infer(self, x, input_lengths):
device = x.device
for conv in self.convolutions:
x = F.dropout(F.relu(conv(x.to(device))), 0.5, False)
x = x.transpose(1, 2)
x = nn.utils.rnn.pack_padded_sequence(
x, input_lengths, batch_first=True)
outputs, _ = self.lstm(x)
outputs, _ = nn.utils.rnn.pad_packed_sequence(
outputs, batch_first=True)
lens = input_lengths*2
return outputs, lens
class Encoder(torch.nn.Module):
def __init__(self, tacotron2):
super(Encoder, self).__init__()
self.tacotron2 = tacotron2
self.tacotron2.encoder.lstm.flatten_parameters()
self.infer = encoder_infer
def forward(self, sequence, sequence_lengths):
embedded_inputs = self.tacotron2.embedding(sequence).transpose(1, 2)
memory, lens = self.infer(self.tacotron2.encoder, embedded_inputs, sequence_lengths)
processed_memory = self.tacotron2.decoder.attention_layer.memory_layer(memory)
return memory, processed_memory, lens
class Postnet(torch.nn.Module):
def __init__(self, tacotron2):
super(Postnet, self).__init__()
self.tacotron2 = tacotron2
def forward(self, mel_outputs):
mel_outputs_postnet = self.tacotron2.postnet(mel_outputs)
return mel_outputs + mel_outputs_postnet
def lstmcell2lstm_params(lstm_mod, lstmcell_mod):
lstm_mod.weight_ih_l0 = torch.nn.Parameter(lstmcell_mod.weight_ih)
lstm_mod.weight_hh_l0 = torch.nn.Parameter(lstmcell_mod.weight_hh)
lstm_mod.bias_ih_l0 = torch.nn.Parameter(lstmcell_mod.bias_ih)
lstm_mod.bias_hh_l0 = torch.nn.Parameter(lstmcell_mod.bias_hh)
def prenet_infer(self, x):
x1 = x[:]
for linear in self.layers:
x1 = F.relu(linear(x1))
x0 = x1[0].unsqueeze(0)
mask = torch.le(torch.rand(256, device='cuda').to(x.dtype), 0.5).to(x.dtype)
mask = mask.expand(x1.size(0), x1.size(1))
x1 = x1*mask*2.0
return x1
class DecoderIter(torch.nn.Module):
def __init__(self, tacotron2):
super(DecoderIter, self).__init__()
self.tacotron2 = tacotron2
dec = tacotron2.decoder
self.p_attention_dropout = dec.p_attention_dropout
self.p_decoder_dropout = dec.p_decoder_dropout
self.prenet = dec.prenet
self.prenet.infer = prenet_infer
self.attention_rnn = nn.LSTM(dec.prenet_dim + dec.encoder_embedding_dim,
dec.attention_rnn_dim, 1)
lstmcell2lstm_params(self.attention_rnn, dec.attention_rnn)
self.attention_rnn.flatten_parameters()
self.attention_layer = dec.attention_layer
self.decoder_rnn = nn.LSTM(dec.attention_rnn_dim + dec.encoder_embedding_dim,
dec.decoder_rnn_dim, 1)
lstmcell2lstm_params(self.decoder_rnn, dec.decoder_rnn)
self.decoder_rnn.flatten_parameters()
self.linear_projection = dec.linear_projection
self.gate_layer = dec.gate_layer
def decode(self, decoder_input, in_attention_hidden, in_attention_cell,
in_decoder_hidden, in_decoder_cell, in_attention_weights,
in_attention_weights_cum, in_attention_context, memory,
processed_memory, mask):
cell_input = torch.cat((decoder_input, in_attention_context), -1)
_, (out_attention_hidden, out_attention_cell) = self.attention_rnn(
cell_input.unsqueeze(0), (in_attention_hidden.unsqueeze(0),
in_attention_cell.unsqueeze(0)))
out_attention_hidden = out_attention_hidden.squeeze(0)
out_attention_cell = out_attention_cell.squeeze(0)
out_attention_hidden = F.dropout(
out_attention_hidden, self.p_attention_dropout, False)
attention_weights_cat = torch.cat(
(in_attention_weights.unsqueeze(1),
in_attention_weights_cum.unsqueeze(1)), dim=1)
out_attention_context, out_attention_weights = self.attention_layer(
out_attention_hidden, memory, processed_memory,
attention_weights_cat, mask)
out_attention_weights_cum = in_attention_weights_cum + out_attention_weights
decoder_input_tmp = torch.cat(
(out_attention_hidden, out_attention_context), -1)
_, (out_decoder_hidden, out_decoder_cell) = self.decoder_rnn(
decoder_input_tmp.unsqueeze(0), (in_decoder_hidden.unsqueeze(0),
in_decoder_cell.unsqueeze(0)))
out_decoder_hidden = out_decoder_hidden.squeeze(0)
out_decoder_cell = out_decoder_cell.squeeze(0)
out_decoder_hidden = F.dropout(
out_decoder_hidden, self.p_decoder_dropout, False)
decoder_hidden_attention_context = torch.cat(
(out_decoder_hidden, out_attention_context), 1)
decoder_output = self.linear_projection(
decoder_hidden_attention_context)
gate_prediction = self.gate_layer(decoder_hidden_attention_context)
return (decoder_output, gate_prediction, out_attention_hidden,
out_attention_cell, out_decoder_hidden, out_decoder_cell,
out_attention_weights, out_attention_weights_cum, out_attention_context)
# @torch.jit.script
def forward(self,
decoder_input,
attention_hidden,
attention_cell,
decoder_hidden,
decoder_cell,
attention_weights,
attention_weights_cum,
attention_context,
memory,
processed_memory,
mask):
decoder_input1 = self.prenet.infer(self.prenet, decoder_input)
outputs = self.decode(decoder_input1,
attention_hidden,
attention_cell,
decoder_hidden,
decoder_cell,
attention_weights,
attention_weights_cum,
attention_context,
memory,
processed_memory,
mask)
return outputs
def test_inference(encoder, decoder_iter, postnet):
encoder.eval()
decoder_iter.eval()
postnet.eval()
sys.path.append('./tensorrt')
from inference_trt import init_decoder_inputs
texts = ["Hello World, good day."]
sequences, sequence_lengths = prepare_input_sequence(texts)
measurements = {}
print("Running Tacotron2 Encoder")
with torch.no_grad():
memory, processed_memory, lens = encoder(sequences, sequence_lengths)
print("Running Tacotron2 Decoder")
device = memory.device
dtype = memory.dtype
mel_lengths = torch.zeros([memory.size(0)], dtype=torch.int32, device = device)
not_finished = torch.ones([memory.size(0)], dtype=torch.int32, device = device)
mel_outputs, gate_outputs, alignments = (torch.zeros(1), torch.zeros(1), torch.zeros(1))
gate_threshold = 0.6
max_decoder_steps = 1000
first_iter = True
(decoder_input, attention_hidden, attention_cell, decoder_hidden,
decoder_cell, attention_weights, attention_weights_cum,
attention_context, memory, processed_memory,
mask) = init_decoder_inputs(memory, processed_memory, sequence_lengths)
while True:
with torch.no_grad():
(mel_output, gate_output,
attention_hidden, attention_cell,
decoder_hidden, decoder_cell,
attention_weights, attention_weights_cum,
attention_context) = decoder_iter(decoder_input, attention_hidden, attention_cell, decoder_hidden,
decoder_cell, attention_weights, attention_weights_cum,
attention_context, memory, processed_memory, mask)
if first_iter:
mel_outputs = torch.unsqueeze(mel_output, 2)
gate_outputs = torch.unsqueeze(gate_output, 2)
alignments = torch.unsqueeze(attention_weights, 2)
first_iter = False
else:
mel_outputs = torch.cat((mel_outputs, torch.unsqueeze(mel_output, 2)), 2)
gate_outputs = torch.cat((gate_outputs, torch.unsqueeze(gate_output, 2)), 2)
alignments = torch.cat((alignments, torch.unsqueeze(attention_weights, 2)), 2)
dec = torch.le(torch.sigmoid(gate_output), gate_threshold).to(torch.int32).squeeze(1)
not_finished = not_finished*dec
mel_lengths += not_finished
if torch.sum(not_finished) == 0:
print("Stopping after ",mel_outputs.size(2)," decoder steps")
break
if mel_outputs.size(2) == max_decoder_steps:
print("Warning! Reached max decoder steps")
break
decoder_input = mel_output
print("Running Tacotron2 PostNet")
with torch.no_grad():
mel_outputs_postnet = postnet(mel_outputs)
return mel_outputs_postnet
def main():
parser = argparse.ArgumentParser(
description='PyTorch Tacotron 2 export to TRT')
parser = parse_args(parser)
args, _ = parser.parse_known_args()
args.encoder = os.path.join(args.output, args.encoder)
args.decoder = os.path.join(args.output, args.decoder)
args.postnet = os.path.join(args.output, args.postnet)
tacotron2 = load_and_setup_model('Tacotron2', parser, args.tacotron2,
fp16_run=args.fp16, cpu_run=False)
opset_version = 10
sequences = torch.randint(low=0, high=148, size=(1,50),
dtype=torch.long).cuda()
sequence_lengths = torch.IntTensor([sequences.size(1)])
dummy_input = (sequences, sequence_lengths)
encoder = Encoder(tacotron2)
encoder.eval()
with torch.no_grad():
encoder(*dummy_input)
torch.onnx.export(encoder, dummy_input, args.encoder,
opset_version=opset_version,
do_constant_folding=True,
input_names=["sequences", "sequence_lengths"],
output_names=["memory", "processed_memory", "lens"],
dynamic_axes={"sequences": {0: "batch_size", 1: "text_seq"},
"sequence_lengths": {0: "batch_size"},
"memory": {0: "batch_size", 1: "mem_seq"},
"processed_memory": {0: "batch_size", 1: "mem_seq"},
"lens": {0: "batch_size"}
})
decoder_iter = DecoderIter(tacotron2)
memory = torch.randn((1,sequence_lengths[0],512)).cuda() #encoder_outputs
if args.fp16:
memory = memory.half()
memory_lengths = sequence_lengths.cuda()
# initialize decoder states for dummy_input
decoder_input = tacotron2.decoder.get_go_frame(memory)
mask = get_mask_from_lengths(memory_lengths)
(attention_hidden,
attention_cell,
decoder_hidden,
decoder_cell,
attention_weights,
attention_weights_cum,
attention_context,
processed_memory) = tacotron2.decoder.initialize_decoder_states(memory)
dummy_input = (decoder_input,
attention_hidden,
attention_cell,
decoder_hidden,
decoder_cell,
attention_weights,
attention_weights_cum,
attention_context,
memory,
processed_memory,
mask)
decoder_iter = DecoderIter(tacotron2)
decoder_iter.eval()
with torch.no_grad():
decoder_iter(*dummy_input)
torch.onnx.export(decoder_iter, dummy_input, args.decoder,
opset_version=opset_version,
do_constant_folding=True,
input_names=["decoder_input",
"attention_hidden",
"attention_cell",
"decoder_hidden",
"decoder_cell",
"attention_weights",
"attention_weights_cum",
"attention_context",
"memory",
"processed_memory",
"mask"],
output_names=["decoder_output",
"gate_prediction",
"out_attention_hidden",
"out_attention_cell",
"out_decoder_hidden",
"out_decoder_cell",
"out_attention_weights",
"out_attention_weights_cum",
"out_attention_context"],
dynamic_axes={"attention_weights" : {0: "batch_size", 1: "seq_len"},
"attention_weights_cum" : {0: "batch_size", 1: "seq_len"},
"memory" : {0: "batch_size", 1: "seq_len"},
"processed_memory" : {0: "batch_size", 1: "seq_len"},
"mask" : {0: "batch_size", 1: "seq_len"},
"out_attention_weights" : {0: "batch_size", 1: "seq_len"},
"out_attention_weights_cum" : {0: "batch_size", 1: "seq_len"}
})
if args.loop:
from generate_decoder import insert_decoder_loop
decoder_dir = os.path.dirname(os.path.abspath(args.decoder))
insert_decoder_loop(args.decoder, decoder_dir, os.path.basename(args.decoder).replace("_iter", ""), args.fp16)
postnet = Postnet(tacotron2)
dummy_input = torch.randn((1,80,620)).cuda()
if args.fp16:
dummy_input = dummy_input.half()
torch.onnx.export(postnet, dummy_input, args.postnet,
opset_version=opset_version,
do_constant_folding=True,
input_names=["mel_outputs"],
output_names=["mel_outputs_postnet"],
dynamic_axes={"mel_outputs": {0: "batch_size", 2: "mel_seq"},
"mel_outputs_postnet": {0: "batch_size", 2: "mel_seq"}})
if __name__ == '__main__':
main()
| TensorRT-master | demo/Tacotron2/tensorrt/convert_tacotron22onnx.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import onnx
import pycuda.autoinit
import pycuda.driver as cuda
import sys
import tensorrt as trt
from os.path import join
from trt_utils import build_engine, parse_dynamic_size
def parse_args(parser):
"""
Parse commandline arguments.
"""
parser.add_argument('-o', '--output', required=True,
help='output folder to save audio (file per phrase)')
parser.add_argument('--encoder', type=str, default="",
help='full path to the Encoder ONNX')
parser.add_argument('--decoder', type=str, default="",
help='full path to the Decoder or DecoderIter ONNX.')
parser.add_argument('--postnet', type=str, default="",
help='full path to the Postnet ONNX')
parser.add_argument('--waveglow', type=str, default="",
help='full path to the WaveGlow ONNX')
parser.add_argument('--encoder_out', type=str,
help='Filename of the exported encoder engine')
parser.add_argument('--decoder_out', type=str,
help='Filename of the exported decoder engine')
parser.add_argument('--postnet_out', type=str,
help='Filename of the exported postnet engine')
parser.add_argument('--waveglow_out', type=str,
help='Filename of the exported waveglow engine')
parser.add_argument('--fp16', action='store_true',
help='inference with FP16')
parser.add_argument('-bs', '--batch-size', type=str, default="1",
help='One or three comma separated integers specifying the batch size. Specify "min,opt,max" for dynamic shape')
parser.add_argument('--mel-size', type=str, default="32,768,1664",
help='One or three comma separated integers specifying the mels size for waveglow.')
parser.add_argument('--z-size', type=str, default="1024,24576,53248",
help='One or three comma separated integers specifying the z size for waveglow.')
parser.add_argument('--loop', dest='loop', action='store_true',
help='Includes the outer decoder loop in the ONNX model. Enabled by default and only supported on TensorRT 8.0 or later.')
parser.add_argument('--no-loop', dest='loop', action='store_false',
help='Excludes outer decoder loop from decoder ONNX model. Default behavior and necessary for TensorRT 7.2 or earlier.')
parser.set_defaults(loop=int(trt.__version__[0]) >= 8)
return parser
def main():
parser = argparse.ArgumentParser(
description='Export from ONNX to TensorRT for Tacotron 2 and WaveGlow')
parser = parse_args(parser)
args = parser.parse_args()
precision = "fp16" if args.fp16 else "fp32"
encoder_path = join(args.output, args.encoder_out if args.encoder_out else f"encoder_{precision}.engine")
decoder_path = join(args.output, args.decoder_out if args.decoder_out else f"decoder_with_outer_loop_{precision}.engine" if args.loop else f"decoder_iter_{precision}.engine")
postnet_path = join(args.output, args.postnet_out if args.postnet_out else f"postnet_{precision}.engine")
waveglow_path = join(args.output, args.waveglow_out if args.waveglow_out else f"waveglow_{precision}.engine")
bs_min, bs_opt, bs_max = parse_dynamic_size(args.batch_size)
mel_min, mel_opt, mel_max = parse_dynamic_size(args.mel_size)
z_min, z_opt, z_max = parse_dynamic_size(args.z_size)
# Encoder
shapes=[{"name": "sequences", "min": (bs_min,4), "opt": (bs_opt,128), "max": (bs_max,256)},
{"name": "sequence_lengths", "min": (bs_min,), "opt": (bs_opt,), "max": (bs_max,)}]
if args.encoder != "":
print("Building Encoder ...")
encoder_engine = build_engine(args.encoder, shapes=shapes, fp16=args.fp16)
if encoder_engine is not None:
with open(encoder_path, 'wb') as f:
f.write(encoder_engine.serialize())
else:
print("Failed to build engine from", args.encoder)
sys.exit(1)
if args.loop:
# Decoder
shapes=[{"name": "decoder_input_0", "min": (bs_min,80), "opt": (bs_opt,80), "max": (bs_max,80)},
{"name": "attention_hidden_0", "min": (bs_min,1024), "opt": (bs_opt,1024), "max": (bs_max,1024)},
{"name": "attention_cell_0", "min": (bs_min,1024), "opt": (bs_opt,1024), "max": (bs_max,1024)},
{"name": "decoder_hidden_0", "min": (bs_min,1024), "opt": (bs_opt,1024), "max": (bs_max,1024)},
{"name": "decoder_cell_0", "min": (bs_min,1024), "opt": (bs_opt,1024), "max": (bs_max,1024)},
{"name": "attention_weights_0", "min": (bs_min,4), "opt": (bs_opt,128), "max": (bs_max,256)},
{"name": "attention_weights_cum_0", "min": (bs_min,4), "opt": (bs_opt,128), "max": (bs_max,256)},
{"name": "attention_context_0", "min": (bs_min,512), "opt": (bs_opt,512), "max": (bs_max,512)},
{"name": "memory", "min": (bs_min,4,512), "opt": (bs_opt,128,512), "max": (bs_max,256,512)},
{"name": "processed_memory", "min": (bs_min,4,128), "opt": (bs_opt,128,128), "max": (bs_max,256,128)},
{"name": "mask", "min": (bs_min,4), "opt": (bs_opt,128), "max": (bs_max,256)}]
if args.decoder != "":
print("Building Decoder with loop...")
decoder_engine = build_engine(args.decoder, shapes=shapes, fp16=args.fp16)
if decoder_engine is not None:
with open(decoder_path, 'wb') as f:
f.write(decoder_engine.serialize())
else:
print("Failed to build engine from", args.decoder)
sys.exit(1)
else:
# DecoderIter
shapes=[{"name": "decoder_input", "min": (bs_min,80), "opt": (bs_opt,80), "max": (bs_max,80)},
{"name": "attention_hidden", "min": (bs_min,1024), "opt": (bs_opt,1024), "max": (bs_max,1024)},
{"name": "attention_cell", "min": (bs_min,1024), "opt": (bs_opt,1024), "max": (bs_max,1024)},
{"name": "decoder_hidden", "min": (bs_min,1024), "opt": (bs_opt,1024), "max": (bs_max,1024)},
{"name": "decoder_cell", "min": (bs_min,1024), "opt": (bs_opt,1024), "max": (bs_max,1024)},
{"name": "attention_weights", "min": (bs_min,4), "opt": (bs_opt,128), "max": (bs_max,256)},
{"name": "attention_weights_cum", "min": (bs_min,4), "opt": (bs_opt,128), "max": (bs_max,256)},
{"name": "attention_context", "min": (bs_min,512), "opt": (bs_opt,512), "max": (bs_max,512)},
{"name": "memory", "min": (bs_min,4,512), "opt": (bs_opt,128,512), "max": (bs_max,256,512)},
{"name": "processed_memory", "min": (bs_min,4,128), "opt": (bs_opt,128,128), "max": (bs_max,256,128)},
{"name": "mask", "min": (bs_min,4), "opt": (bs_opt,128), "max": (bs_max,256)}]
if args.decoder != "":
print("Building Decoder ...")
decoder_iter_engine = build_engine(args.decoder, shapes=shapes, fp16=args.fp16)
if decoder_iter_engine is not None:
with open(decoder_path, 'wb') as f:
f.write(decoder_iter_engine.serialize())
else:
print("Failed to build engine from", args.decoder)
sys.exit(1)
# Postnet
shapes=[{"name": "mel_outputs", "min": (bs_min,80,32), "opt": (bs_opt,80,768), "max": (bs_max,80,1664)}]
if args.postnet != "":
print("Building Postnet ...")
postnet_engine = build_engine(args.postnet, shapes=shapes, fp16=args.fp16)
if postnet_engine is not None:
with open(postnet_path, 'wb') as f:
f.write(postnet_engine.serialize())
else:
print("Failed to build engine from", args.postnet)
sys.exit(1)
# WaveGlow
shapes=[{"name": "mel", "min": (bs_min,80,mel_min,1), "opt": (bs_opt,80,mel_opt,1), "max": (bs_max,80,mel_max,1)},
{"name": "z", "min": (bs_min,8,z_min,1), "opt": (bs_opt,8,z_opt,1), "max": (bs_max,8,z_max,1)}]
if args.waveglow != "":
print("Building WaveGlow ...")
waveglow_engine = build_engine(args.waveglow, shapes=shapes, fp16=args.fp16)
if waveglow_engine is not None:
with open(waveglow_path, 'wb') as f:
f.write(waveglow_engine.serialize())
else:
print("Failed to build engine from", args.waveglow)
sys.exit(1)
if __name__ == '__main__':
main()
| TensorRT-master | demo/Tacotron2/tensorrt/convert_onnx2trt.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import random
import numpy as np
import torch
import torch.utils.data
import common.layers as layers
from common.utils import load_wav_to_torch, load_filepaths_and_text, to_gpu
from tacotron2.text import text_to_sequence
class TextMelLoader(torch.utils.data.Dataset):
"""
1) loads audio,text pairs
2) normalizes text and converts them to sequences of one-hot vectors
3) computes mel-spectrograms from audio files.
"""
def __init__(self, dataset_path, audiopaths_and_text, args):
self.audiopaths_and_text = load_filepaths_and_text(dataset_path, audiopaths_and_text)
self.text_cleaners = args.text_cleaners
self.max_wav_value = args.max_wav_value
self.sampling_rate = args.sampling_rate
self.load_mel_from_disk = args.load_mel_from_disk
self.stft = layers.TacotronSTFT(
args.filter_length, args.hop_length, args.win_length,
args.n_mel_channels, args.sampling_rate, args.mel_fmin,
args.mel_fmax)
random.seed(1234)
random.shuffle(self.audiopaths_and_text)
def get_mel_text_pair(self, audiopath_and_text):
# separate filename and text
audiopath, text = audiopath_and_text[0], audiopath_and_text[1]
len_text = len(text)
text = self.get_text(text)
mel = self.get_mel(audiopath)
return (text, mel, len_text)
def get_mel(self, filename):
if not self.load_mel_from_disk:
audio, sampling_rate = load_wav_to_torch(filename)
if sampling_rate != self.stft.sampling_rate:
raise ValueError("{} {} SR doesn't match target {} SR".format(
sampling_rate, self.stft.sampling_rate))
audio_norm = audio / self.max_wav_value
audio_norm = audio_norm.unsqueeze(0)
audio_norm = torch.autograd.Variable(audio_norm, requires_grad=False)
melspec = self.stft.mel_spectrogram(audio_norm)
melspec = torch.squeeze(melspec, 0)
else:
melspec = torch.load(filename)
assert melspec.size(0) == self.stft.n_mel_channels, (
'Mel dimension mismatch: given {}, expected {}'.format(
melspec.size(0), self.stft.n_mel_channels))
return melspec
def get_text(self, text):
text_norm = torch.IntTensor(text_to_sequence(text, self.text_cleaners))
return text_norm
def __getitem__(self, index):
return self.get_mel_text_pair(self.audiopaths_and_text[index])
def __len__(self):
return len(self.audiopaths_and_text)
class TextMelCollate():
""" Zero-pads model inputs and targets based on number of frames per setep
"""
def __init__(self, n_frames_per_step):
self.n_frames_per_step = n_frames_per_step
def __call__(self, batch):
"""Collate's training batch from normalized text and mel-spectrogram
PARAMS
------
batch: [text_normalized, mel_normalized]
"""
# Right zero-pad all one-hot text sequences to max input length
input_lengths, ids_sorted_decreasing = torch.sort(
torch.LongTensor([len(x[0]) for x in batch]),
dim=0, descending=True)
max_input_len = input_lengths[0]
text_padded = torch.LongTensor(len(batch), max_input_len)
text_padded.zero_()
for i in range(len(ids_sorted_decreasing)):
text = batch[ids_sorted_decreasing[i]][0]
text_padded[i, :text.size(0)] = text
# Right zero-pad mel-spec
num_mels = batch[0][1].size(0)
max_target_len = max([x[1].size(1) for x in batch])
if max_target_len % self.n_frames_per_step != 0:
max_target_len += self.n_frames_per_step - max_target_len % self.n_frames_per_step
assert max_target_len % self.n_frames_per_step == 0
# include mel padded and gate padded
mel_padded = torch.FloatTensor(len(batch), num_mels, max_target_len)
mel_padded.zero_()
gate_padded = torch.FloatTensor(len(batch), max_target_len)
gate_padded.zero_()
output_lengths = torch.LongTensor(len(batch))
for i in range(len(ids_sorted_decreasing)):
mel = batch[ids_sorted_decreasing[i]][1]
mel_padded[i, :, :mel.size(1)] = mel
gate_padded[i, mel.size(1)-1:] = 1
output_lengths[i] = mel.size(1)
# count number of items - characters in text
len_x = [x[2] for x in batch]
len_x = torch.Tensor(len_x)
return text_padded, input_lengths, mel_padded, gate_padded, \
output_lengths, len_x
def batch_to_gpu(batch):
text_padded, input_lengths, mel_padded, gate_padded, \
output_lengths, len_x = batch
text_padded = to_gpu(text_padded).long()
input_lengths = to_gpu(input_lengths).long()
max_len = torch.max(input_lengths.data).item()
mel_padded = to_gpu(mel_padded).float()
gate_padded = to_gpu(gate_padded).float()
output_lengths = to_gpu(output_lengths).long()
x = (text_padded, input_lengths, mel_padded, max_len, output_lengths)
y = (mel_padded, gate_padded)
len_x = torch.sum(output_lengths)
return (x, y, len_x)
| TensorRT-master | demo/Tacotron2/tacotron2/data_function.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from math import sqrt
import torch
from torch import nn
from torch.nn import functional as F
import sys
from os.path import abspath, dirname
# enabling modules discovery from global entrypoint
sys.path.append(abspath(dirname(__file__)+'/../'))
from common.layers import ConvNorm, LinearNorm
from common.utils import to_gpu, get_mask_from_lengths
class LocationLayer(nn.Module):
def __init__(self, attention_n_filters, attention_kernel_size,
attention_dim):
super(LocationLayer, self).__init__()
padding = int((attention_kernel_size - 1) / 2)
self.location_conv = ConvNorm(2, attention_n_filters,
kernel_size=attention_kernel_size,
padding=padding, bias=False, stride=1,
dilation=1)
self.location_dense = LinearNorm(attention_n_filters, attention_dim,
bias=False, w_init_gain='tanh')
def forward(self, attention_weights_cat):
processed_attention = self.location_conv(attention_weights_cat)
processed_attention = processed_attention.transpose(1, 2)
processed_attention = self.location_dense(processed_attention)
return processed_attention
class Attention(nn.Module):
def __init__(self, attention_rnn_dim, embedding_dim,
attention_dim, attention_location_n_filters,
attention_location_kernel_size):
super(Attention, self).__init__()
self.query_layer = LinearNorm(attention_rnn_dim, attention_dim,
bias=False, w_init_gain='tanh')
self.memory_layer = LinearNorm(embedding_dim, attention_dim, bias=False,
w_init_gain='tanh')
self.v = LinearNorm(attention_dim, 1, bias=False)
self.location_layer = LocationLayer(attention_location_n_filters,
attention_location_kernel_size,
attention_dim)
self.score_mask_value = -float("inf")
def get_alignment_energies(self, query, processed_memory,
attention_weights_cat):
"""
PARAMS
------
query: decoder output (batch, n_mel_channels * n_frames_per_step)
processed_memory: processed encoder outputs (B, T_in, attention_dim)
attention_weights_cat: cumulative and prev. att weights (B, 2, max_time)
RETURNS
-------
alignment (batch, max_time)
"""
processed_query = self.query_layer(query.unsqueeze(1))
processed_attention_weights = self.location_layer(attention_weights_cat)
energies = self.v(torch.tanh(
processed_query + processed_attention_weights + processed_memory))
energies = energies.squeeze(2)
return energies
def forward(self, attention_hidden_state, memory, processed_memory,
attention_weights_cat, mask):
"""
PARAMS
------
attention_hidden_state: attention rnn last output
memory: encoder outputs
processed_memory: processed encoder outputs
attention_weights_cat: previous and cummulative attention weights
mask: binary mask for padded data
"""
alignment = self.get_alignment_energies(
attention_hidden_state, processed_memory, attention_weights_cat)
alignment = alignment.masked_fill(mask, self.score_mask_value)
attention_weights = F.softmax(alignment, dim=1)
attention_context = torch.bmm(attention_weights.unsqueeze(1), memory)
attention_context = attention_context.squeeze(1)
return attention_context, attention_weights
class Prenet(nn.Module):
def __init__(self, in_dim, sizes):
super(Prenet, self).__init__()
in_sizes = [in_dim] + sizes[:-1]
self.layers = nn.ModuleList(
[LinearNorm(in_size, out_size, bias=False)
for (in_size, out_size) in zip(in_sizes, sizes)])
def forward(self, x):
for linear in self.layers:
x = F.dropout(F.relu(linear(x)), p=0.5, training=True)
return x
class Postnet(nn.Module):
"""Postnet
- Five 1-d convolution with 512 channels and kernel size 5
"""
def __init__(self, n_mel_channels, postnet_embedding_dim,
postnet_kernel_size, postnet_n_convolutions):
super(Postnet, self).__init__()
self.convolutions = nn.ModuleList()
self.convolutions.append(
nn.Sequential(
ConvNorm(n_mel_channels, postnet_embedding_dim,
kernel_size=postnet_kernel_size, stride=1,
padding=int((postnet_kernel_size - 1) / 2),
dilation=1, w_init_gain='tanh'),
nn.BatchNorm1d(postnet_embedding_dim))
)
for i in range(1, postnet_n_convolutions - 1):
self.convolutions.append(
nn.Sequential(
ConvNorm(postnet_embedding_dim,
postnet_embedding_dim,
kernel_size=postnet_kernel_size, stride=1,
padding=int((postnet_kernel_size - 1) / 2),
dilation=1, w_init_gain='tanh'),
nn.BatchNorm1d(postnet_embedding_dim))
)
self.convolutions.append(
nn.Sequential(
ConvNorm(postnet_embedding_dim, n_mel_channels,
kernel_size=postnet_kernel_size, stride=1,
padding=int((postnet_kernel_size - 1) / 2),
dilation=1, w_init_gain='linear'),
nn.BatchNorm1d(n_mel_channels))
)
self.n_convs = len(self.convolutions)
def forward(self, x):
i = 0
for conv in self.convolutions:
if i < self.n_convs - 1:
x = F.dropout(torch.tanh(conv(x)), 0.5, training=self.training)
else:
x = F.dropout(conv(x), 0.5, training=self.training)
i += 1
return x
class Encoder(nn.Module):
"""Encoder module:
- Three 1-d convolution banks
- Bidirectional LSTM
"""
def __init__(self, encoder_n_convolutions,
encoder_embedding_dim, encoder_kernel_size):
super(Encoder, self).__init__()
convolutions = []
for _ in range(encoder_n_convolutions):
conv_layer = nn.Sequential(
ConvNorm(encoder_embedding_dim,
encoder_embedding_dim,
kernel_size=encoder_kernel_size, stride=1,
padding=int((encoder_kernel_size - 1) / 2),
dilation=1, w_init_gain='relu'),
nn.BatchNorm1d(encoder_embedding_dim))
convolutions.append(conv_layer)
self.convolutions = nn.ModuleList(convolutions)
self.lstm = nn.LSTM(encoder_embedding_dim,
int(encoder_embedding_dim / 2), 1,
batch_first=True, bidirectional=True)
@torch.jit.ignore
def forward(self, x, input_lengths):
for conv in self.convolutions:
x = F.dropout(F.relu(conv(x)), 0.5, self.training)
x = x.transpose(1, 2)
# pytorch tensor are not reversible, hence the conversion
input_lengths = input_lengths.cpu().numpy()
x = nn.utils.rnn.pack_padded_sequence(
x, input_lengths, batch_first=True)
self.lstm.flatten_parameters()
outputs, _ = self.lstm(x)
outputs, _ = nn.utils.rnn.pad_packed_sequence(
outputs, batch_first=True)
return outputs
@torch.jit.export
def infer(self, x, input_lengths):
device = x.device
for conv in self.convolutions:
x = F.dropout(F.relu(conv(x.to(device))), 0.5, self.training)
x = x.transpose(1, 2)
input_lengths = input_lengths.cpu()
x = nn.utils.rnn.pack_padded_sequence(
x, input_lengths, batch_first=True)
outputs, _ = self.lstm(x)
outputs, _ = nn.utils.rnn.pad_packed_sequence(
outputs, batch_first=True)
return outputs
class Decoder(nn.Module):
def __init__(self, n_mel_channels, n_frames_per_step,
encoder_embedding_dim, attention_dim,
attention_location_n_filters,
attention_location_kernel_size,
attention_rnn_dim, decoder_rnn_dim,
prenet_dim, max_decoder_steps, gate_threshold,
p_attention_dropout, p_decoder_dropout,
early_stopping):
super(Decoder, self).__init__()
self.n_mel_channels = n_mel_channels
self.n_frames_per_step = n_frames_per_step
self.encoder_embedding_dim = encoder_embedding_dim
self.attention_rnn_dim = attention_rnn_dim
self.decoder_rnn_dim = decoder_rnn_dim
self.prenet_dim = prenet_dim
self.max_decoder_steps = max_decoder_steps
self.gate_threshold = gate_threshold
self.p_attention_dropout = p_attention_dropout
self.p_decoder_dropout = p_decoder_dropout
self.early_stopping = early_stopping
self.prenet = Prenet(
n_mel_channels * n_frames_per_step,
[prenet_dim, prenet_dim])
self.attention_rnn = nn.LSTMCell(
prenet_dim + encoder_embedding_dim,
attention_rnn_dim)
self.attention_layer = Attention(
attention_rnn_dim, encoder_embedding_dim,
attention_dim, attention_location_n_filters,
attention_location_kernel_size)
self.decoder_rnn = nn.LSTMCell(
attention_rnn_dim + encoder_embedding_dim,
decoder_rnn_dim, 1)
self.linear_projection = LinearNorm(
decoder_rnn_dim + encoder_embedding_dim,
n_mel_channels * n_frames_per_step)
self.gate_layer = LinearNorm(
decoder_rnn_dim + encoder_embedding_dim, 1,
bias=True, w_init_gain='sigmoid')
def get_go_frame(self, memory):
""" Gets all zeros frames to use as first decoder input
PARAMS
------
memory: decoder outputs
RETURNS
-------
decoder_input: all zeros frames
"""
B = memory.size(0)
dtype = memory.dtype
device = memory.device
decoder_input = torch.zeros(
B, self.n_mel_channels*self.n_frames_per_step,
dtype=dtype, device=device)
return decoder_input
def initialize_decoder_states(self, memory):
""" Initializes attention rnn states, decoder rnn states, attention
weights, attention cumulative weights, attention context, stores memory
and stores processed memory
PARAMS
------
memory: Encoder outputs
mask: Mask for padded data if training, expects None for inference
"""
B = memory.size(0)
MAX_TIME = memory.size(1)
dtype = memory.dtype
device = memory.device
attention_hidden = torch.zeros(
B, self.attention_rnn_dim, dtype=dtype, device=device)
attention_cell = torch.zeros(
B, self.attention_rnn_dim, dtype=dtype, device=device)
decoder_hidden = torch.zeros(
B, self.decoder_rnn_dim, dtype=dtype, device=device)
decoder_cell = torch.zeros(
B, self.decoder_rnn_dim, dtype=dtype, device=device)
attention_weights = torch.zeros(
B, MAX_TIME, dtype=dtype, device=device)
attention_weights_cum = torch.zeros(
B, MAX_TIME, dtype=dtype, device=device)
attention_context = torch.zeros(
B, self.encoder_embedding_dim, dtype=dtype, device=device)
processed_memory = self.attention_layer.memory_layer(memory)
return (attention_hidden, attention_cell, decoder_hidden,
decoder_cell, attention_weights, attention_weights_cum,
attention_context, processed_memory)
def parse_decoder_inputs(self, decoder_inputs):
""" Prepares decoder inputs, i.e. mel outputs
PARAMS
------
decoder_inputs: inputs used for teacher-forced training, i.e. mel-specs
RETURNS
-------
inputs: processed decoder inputs
"""
# (B, n_mel_channels, T_out) -> (B, T_out, n_mel_channels)
decoder_inputs = decoder_inputs.transpose(1, 2)
decoder_inputs = decoder_inputs.view(
decoder_inputs.size(0),
int(decoder_inputs.size(1)/self.n_frames_per_step), -1)
# (B, T_out, n_mel_channels) -> (T_out, B, n_mel_channels)
decoder_inputs = decoder_inputs.transpose(0, 1)
return decoder_inputs
def parse_decoder_outputs(self, mel_outputs, gate_outputs, alignments):
""" Prepares decoder outputs for output
PARAMS
------
mel_outputs:
gate_outputs: gate output energies
alignments:
RETURNS
-------
mel_outputs:
gate_outpust: gate output energies
alignments:
"""
# (T_out, B) -> (B, T_out)
alignments = alignments.transpose(0, 1).contiguous()
# (T_out, B) -> (B, T_out)
gate_outputs = gate_outputs.transpose(0, 1).contiguous()
# (T_out, B, n_mel_channels) -> (B, T_out, n_mel_channels)
mel_outputs = mel_outputs.transpose(0, 1).contiguous()
# decouple frames per step
shape = (mel_outputs.shape[0], -1, self.n_mel_channels)
mel_outputs = mel_outputs.view(*shape)
# (B, T_out, n_mel_channels) -> (B, n_mel_channels, T_out)
mel_outputs = mel_outputs.transpose(1, 2)
return mel_outputs, gate_outputs, alignments
def decode(self, decoder_input, attention_hidden, attention_cell,
decoder_hidden, decoder_cell, attention_weights,
attention_weights_cum, attention_context, memory,
processed_memory, mask):
""" Decoder step using stored states, attention and memory
PARAMS
------
decoder_input: previous mel output
RETURNS
-------
mel_output:
gate_output: gate output energies
attention_weights:
"""
cell_input = torch.cat((decoder_input, attention_context), -1)
attention_hidden, attention_cell = self.attention_rnn(
cell_input, (attention_hidden, attention_cell))
attention_hidden = F.dropout(
attention_hidden, self.p_attention_dropout, self.training)
attention_weights_cat = torch.cat(
(attention_weights.unsqueeze(1),
attention_weights_cum.unsqueeze(1)), dim=1)
attention_context, attention_weights = self.attention_layer(
attention_hidden, memory, processed_memory,
attention_weights_cat, mask)
attention_weights_cum += attention_weights
decoder_input = torch.cat(
(attention_hidden, attention_context), -1)
decoder_hidden, decoder_cell = self.decoder_rnn(
decoder_input, (decoder_hidden, decoder_cell))
decoder_hidden = F.dropout(
decoder_hidden, self.p_decoder_dropout, self.training)
decoder_hidden_attention_context = torch.cat(
(decoder_hidden, attention_context), dim=1)
decoder_output = self.linear_projection(
decoder_hidden_attention_context)
gate_prediction = self.gate_layer(decoder_hidden_attention_context)
return (decoder_output, gate_prediction, attention_hidden,
attention_cell, decoder_hidden, decoder_cell, attention_weights,
attention_weights_cum, attention_context)
@torch.jit.ignore
def forward(self, memory, decoder_inputs, memory_lengths):
""" Decoder forward pass for training
PARAMS
------
memory: Encoder outputs
decoder_inputs: Decoder inputs for teacher forcing. i.e. mel-specs
memory_lengths: Encoder output lengths for attention masking.
RETURNS
-------
mel_outputs: mel outputs from the decoder
gate_outputs: gate outputs from the decoder
alignments: sequence of attention weights from the decoder
"""
decoder_input = self.get_go_frame(memory).unsqueeze(0)
decoder_inputs = self.parse_decoder_inputs(decoder_inputs)
decoder_inputs = torch.cat((decoder_input, decoder_inputs), dim=0)
decoder_inputs = self.prenet(decoder_inputs)
mask = get_mask_from_lengths(memory_lengths)
(attention_hidden,
attention_cell,
decoder_hidden,
decoder_cell,
attention_weights,
attention_weights_cum,
attention_context,
processed_memory) = self.initialize_decoder_states(memory)
mel_outputs, gate_outputs, alignments = [], [], []
while len(mel_outputs) < decoder_inputs.size(0) - 1:
decoder_input = decoder_inputs[len(mel_outputs)]
(mel_output,
gate_output,
attention_hidden,
attention_cell,
decoder_hidden,
decoder_cell,
attention_weights,
attention_weights_cum,
attention_context) = self.decode(decoder_input,
attention_hidden,
attention_cell,
decoder_hidden,
decoder_cell,
attention_weights,
attention_weights_cum,
attention_context,
memory,
processed_memory,
mask)
mel_outputs += [mel_output.squeeze(1)]
gate_outputs += [gate_output.squeeze()]
alignments += [attention_weights]
mel_outputs, gate_outputs, alignments = self.parse_decoder_outputs(
torch.stack(mel_outputs),
torch.stack(gate_outputs),
torch.stack(alignments))
return mel_outputs, gate_outputs, alignments
@torch.jit.export
def infer(self, memory, memory_lengths):
""" Decoder inference
PARAMS
------
memory: Encoder outputs
RETURNS
-------
mel_outputs: mel outputs from the decoder
gate_outputs: gate outputs from the decoder
alignments: sequence of attention weights from the decoder
"""
decoder_input = self.get_go_frame(memory)
mask = get_mask_from_lengths(memory_lengths)
(attention_hidden,
attention_cell,
decoder_hidden,
decoder_cell,
attention_weights,
attention_weights_cum,
attention_context,
processed_memory) = self.initialize_decoder_states(memory)
mel_lengths = torch.zeros([memory.size(0)], dtype=torch.int32, device=memory.device)
not_finished = torch.ones([memory.size(0)], dtype=torch.int32, device=memory.device)
mel_outputs, gate_outputs, alignments = (
torch.zeros(1), torch.zeros(1), torch.zeros(1))
first_iter = True
while True:
decoder_input = self.prenet(decoder_input)
(mel_output,
gate_output,
attention_hidden,
attention_cell,
decoder_hidden,
decoder_cell,
attention_weights,
attention_weights_cum,
attention_context) = self.decode(decoder_input,
attention_hidden,
attention_cell,
decoder_hidden,
decoder_cell,
attention_weights,
attention_weights_cum,
attention_context,
memory,
processed_memory,
mask)
if first_iter:
mel_outputs = mel_output.unsqueeze(0)
gate_outputs = gate_output
alignments = attention_weights
first_iter = False
else:
mel_outputs = torch.cat(
(mel_outputs, mel_output.unsqueeze(0)), dim=0)
gate_outputs = torch.cat((gate_outputs, gate_output), dim=0)
alignments = torch.cat((alignments, attention_weights), dim=0)
dec = torch.le(torch.sigmoid(gate_output),
self.gate_threshold).to(torch.int32).squeeze(1)
not_finished = not_finished*dec
mel_lengths += not_finished
if self.early_stopping and torch.sum(not_finished) == 0:
break
if len(mel_outputs) == self.max_decoder_steps:
print("Warning! Reached max decoder steps")
break
decoder_input = mel_output
mel_outputs, gate_outputs, alignments = self.parse_decoder_outputs(
mel_outputs, gate_outputs, alignments)
return mel_outputs, gate_outputs, alignments, mel_lengths
class Tacotron2(nn.Module):
def __init__(self, mask_padding, n_mel_channels,
n_symbols, symbols_embedding_dim, encoder_kernel_size,
encoder_n_convolutions, encoder_embedding_dim,
attention_rnn_dim, attention_dim, attention_location_n_filters,
attention_location_kernel_size, n_frames_per_step,
decoder_rnn_dim, prenet_dim, max_decoder_steps, gate_threshold,
p_attention_dropout, p_decoder_dropout,
postnet_embedding_dim, postnet_kernel_size,
postnet_n_convolutions, decoder_no_early_stopping):
super(Tacotron2, self).__init__()
self.mask_padding = mask_padding
self.n_mel_channels = n_mel_channels
self.n_frames_per_step = n_frames_per_step
self.embedding = nn.Embedding(n_symbols, symbols_embedding_dim)
std = sqrt(2.0 / (n_symbols + symbols_embedding_dim))
val = sqrt(3.0) * std # uniform bounds for std
self.embedding.weight.data.uniform_(-val, val)
self.encoder = Encoder(encoder_n_convolutions,
encoder_embedding_dim,
encoder_kernel_size)
self.decoder = Decoder(n_mel_channels, n_frames_per_step,
encoder_embedding_dim, attention_dim,
attention_location_n_filters,
attention_location_kernel_size,
attention_rnn_dim, decoder_rnn_dim,
prenet_dim, max_decoder_steps,
gate_threshold, p_attention_dropout,
p_decoder_dropout,
not decoder_no_early_stopping)
self.postnet = Postnet(n_mel_channels, postnet_embedding_dim,
postnet_kernel_size,
postnet_n_convolutions)
def parse_batch(self, batch):
text_padded, input_lengths, mel_padded, gate_padded, \
output_lengths = batch
text_padded = to_gpu(text_padded).long()
input_lengths = to_gpu(input_lengths).long()
max_len = torch.max(input_lengths.data).item()
mel_padded = to_gpu(mel_padded).float()
gate_padded = to_gpu(gate_padded).float()
output_lengths = to_gpu(output_lengths).long()
return (
(text_padded, input_lengths, mel_padded, max_len, output_lengths),
(mel_padded, gate_padded))
def parse_output(self, outputs, output_lengths):
# type: (List[Tensor], Tensor) -> List[Tensor]
if self.mask_padding and output_lengths is not None:
mask = get_mask_from_lengths(output_lengths)
mask = mask.expand(self.n_mel_channels, mask.size(0), mask.size(1))
mask = mask.permute(1, 0, 2)
outputs[0].masked_fill_(mask, 0.0)
outputs[1].masked_fill_(mask, 0.0)
outputs[2].masked_fill_(mask[:, 0, :], 1e3) # gate energies
return outputs
def forward(self, inputs):
inputs, input_lengths, targets, max_len, output_lengths = inputs
input_lengths, output_lengths = input_lengths.data, output_lengths.data
embedded_inputs = self.embedding(inputs).transpose(1, 2)
encoder_outputs = self.encoder(embedded_inputs, input_lengths)
mel_outputs, gate_outputs, alignments = self.decoder(
encoder_outputs, targets, memory_lengths=input_lengths)
mel_outputs_postnet = self.postnet(mel_outputs)
mel_outputs_postnet = mel_outputs + mel_outputs_postnet
return self.parse_output(
[mel_outputs, mel_outputs_postnet, gate_outputs, alignments],
output_lengths)
def infer(self, inputs, input_lengths):
embedded_inputs = self.embedding(inputs).transpose(1, 2)
encoder_outputs = self.encoder.infer(embedded_inputs, input_lengths)
mel_outputs, gate_outputs, alignments, mel_lengths = self.decoder.infer(
encoder_outputs, input_lengths)
mel_outputs_postnet = self.postnet(mel_outputs)
mel_outputs_postnet = mel_outputs + mel_outputs_postnet
BS = mel_outputs_postnet.size(0)
alignments = alignments.unfold(1, BS, BS).transpose(0,2)
return mel_outputs_postnet, mel_lengths, alignments
| TensorRT-master | demo/Tacotron2/tacotron2/model.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from torch import nn
class Tacotron2Loss(nn.Module):
def __init__(self):
super(Tacotron2Loss, self).__init__()
def forward(self, model_output, targets):
mel_target, gate_target = targets[0], targets[1]
mel_target.requires_grad = False
gate_target.requires_grad = False
gate_target = gate_target.view(-1, 1)
mel_out, mel_out_postnet, gate_out, _ = model_output
gate_out = gate_out.view(-1, 1)
mel_loss = nn.MSELoss()(mel_out, mel_target) + \
nn.MSELoss()(mel_out_postnet, mel_target)
gate_loss = nn.BCEWithLogitsLoss()(gate_out, gate_target)
return mel_loss + gate_loss
| TensorRT-master | demo/Tacotron2/tacotron2/loss_function.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
from tacotron2.text import symbols
def parse_tacotron2_args(parent, add_help=False):
"""
Parse commandline arguments.
"""
parser = argparse.ArgumentParser(parents=[parent], add_help=add_help)
# misc parameters
parser.add_argument('--mask-padding', default=False, type=bool,
help='Use mask padding')
parser.add_argument('--n-mel-channels', default=80, type=int,
help='Number of bins in mel-spectrograms')
# symbols parameters
global symbols
len_symbols = len(symbols)
symbols = parser.add_argument_group('symbols parameters')
symbols.add_argument('--n-symbols', default=len_symbols, type=int,
help='Number of symbols in dictionary')
symbols.add_argument('--symbols-embedding-dim', default=512, type=int,
help='Input embedding dimension')
# encoder parameters
encoder = parser.add_argument_group('encoder parameters')
encoder.add_argument('--encoder-kernel-size', default=5, type=int,
help='Encoder kernel size')
encoder.add_argument('--encoder-n-convolutions', default=3, type=int,
help='Number of encoder convolutions')
encoder.add_argument('--encoder-embedding-dim', default=512, type=int,
help='Encoder embedding dimension')
# decoder parameters
decoder = parser.add_argument_group('decoder parameters')
decoder.add_argument('--n-frames-per-step', default=1,
type=int,
help='Number of frames processed per step') # currently only 1 is supported
decoder.add_argument('--decoder-rnn-dim', default=1024, type=int,
help='Number of units in decoder LSTM')
decoder.add_argument('--prenet-dim', default=256, type=int,
help='Number of ReLU units in prenet layers')
decoder.add_argument('--max-decoder-steps', default=2000, type=int,
help='Maximum number of output mel spectrograms')
decoder.add_argument('--gate-threshold', default=0.5, type=float,
help='Probability threshold for stop token')
decoder.add_argument('--p-attention-dropout', default=0.1, type=float,
help='Dropout probability for attention LSTM')
decoder.add_argument('--p-decoder-dropout', default=0.1, type=float,
help='Dropout probability for decoder LSTM')
decoder.add_argument('--decoder-no-early-stopping', action='store_true',
help='Stop decoding once all samples are finished')
# attention parameters
attention = parser.add_argument_group('attention parameters')
attention.add_argument('--attention-rnn-dim', default=1024, type=int,
help='Number of units in attention LSTM')
attention.add_argument('--attention-dim', default=128, type=int,
help='Dimension of attention hidden representation')
# location layer parameters
location = parser.add_argument_group('location parameters')
location.add_argument(
'--attention-location-n-filters', default=32, type=int,
help='Number of filters for location-sensitive attention')
location.add_argument(
'--attention-location-kernel-size', default=31, type=int,
help='Kernel size for location-sensitive attention')
# Mel-post processing network parameters
postnet = parser.add_argument_group('postnet parameters')
postnet.add_argument('--postnet-embedding-dim', default=512, type=int,
help='Postnet embedding dimension')
postnet.add_argument('--postnet-kernel-size', default=5, type=int,
help='Postnet kernel size')
postnet.add_argument('--postnet-n-convolutions', default=5, type=int,
help='Number of postnet convolutions')
return parser
| TensorRT-master | demo/Tacotron2/tacotron2/arg_parser.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" from https://github.com/keithito/tacotron """
import re
valid_symbols = [
'AA', 'AA0', 'AA1', 'AA2', 'AE', 'AE0', 'AE1', 'AE2', 'AH', 'AH0', 'AH1', 'AH2',
'AO', 'AO0', 'AO1', 'AO2', 'AW', 'AW0', 'AW1', 'AW2', 'AY', 'AY0', 'AY1', 'AY2',
'B', 'CH', 'D', 'DH', 'EH', 'EH0', 'EH1', 'EH2', 'ER', 'ER0', 'ER1', 'ER2', 'EY',
'EY0', 'EY1', 'EY2', 'F', 'G', 'HH', 'IH', 'IH0', 'IH1', 'IH2', 'IY', 'IY0', 'IY1',
'IY2', 'JH', 'K', 'L', 'M', 'N', 'NG', 'OW', 'OW0', 'OW1', 'OW2', 'OY', 'OY0',
'OY1', 'OY2', 'P', 'R', 'S', 'SH', 'T', 'TH', 'UH', 'UH0', 'UH1', 'UH2', 'UW',
'UW0', 'UW1', 'UW2', 'V', 'W', 'Y', 'Z', 'ZH'
]
_valid_symbol_set = set(valid_symbols)
class CMUDict:
'''Thin wrapper around CMUDict data. http://www.speech.cs.cmu.edu/cgi-bin/cmudict'''
def __init__(self, file_or_path, keep_ambiguous=True):
if isinstance(file_or_path, str):
with open(file_or_path, encoding='latin-1') as f:
entries = _parse_cmudict(f)
else:
entries = _parse_cmudict(file_or_path)
if not keep_ambiguous:
entries = {word: pron for word, pron in entries.items() if len(pron) == 1}
self._entries = entries
def __len__(self):
return len(self._entries)
def lookup(self, word):
'''Returns list of ARPAbet pronunciations of the given word.'''
return self._entries.get(word.upper())
_alt_re = re.compile(r'\([0-9]+\)')
def _parse_cmudict(file):
cmudict = {}
for line in file:
if len(line) and (line[0] >= 'A' and line[0] <= 'Z' or line[0] == "'"):
parts = line.split(' ')
word = re.sub(_alt_re, '', parts[0])
pronunciation = _get_pronunciation(parts[1])
if pronunciation:
if word in cmudict:
cmudict[word].append(pronunciation)
else:
cmudict[word] = [pronunciation]
return cmudict
def _get_pronunciation(s):
parts = s.strip().split(' ')
for part in parts:
if part not in _valid_symbol_set:
return None
return ' '.join(parts)
| TensorRT-master | demo/Tacotron2/tacotron2/text/cmudict.py |
""" from https://github.com/keithito/tacotron """
import re
from tacotron2.text import cleaners
from tacotron2.text.symbols import symbols
# Mappings from symbol to numeric ID and vice versa:
_symbol_to_id = {s: i for i, s in enumerate(symbols)}
_id_to_symbol = {i: s for i, s in enumerate(symbols)}
# Regular expression matching text enclosed in curly braces:
_curly_re = re.compile(r'(.*?)\{(.+?)\}(.*)')
def text_to_sequence(text, cleaner_names):
'''Converts a string of text to a sequence of IDs corresponding to the symbols in the text.
The text can optionally have ARPAbet sequences enclosed in curly braces embedded
in it. For example, "Turn left on {HH AW1 S S T AH0 N} Street."
Args:
text: string to convert to a sequence
cleaner_names: names of the cleaner functions to run the text through
Returns:
List of integers corresponding to the symbols in the text
'''
sequence = []
# Check for curly braces and treat their contents as ARPAbet:
while len(text):
m = _curly_re.match(text)
if not m:
sequence += _symbols_to_sequence(_clean_text(text, cleaner_names))
break
sequence += _symbols_to_sequence(_clean_text(m.group(1), cleaner_names))
sequence += _arpabet_to_sequence(m.group(2))
text = m.group(3)
return sequence
def sequence_to_text(sequence):
'''Converts a sequence of IDs back to a string'''
result = ''
for symbol_id in sequence:
if symbol_id in _id_to_symbol:
s = _id_to_symbol[symbol_id]
# Enclose ARPAbet back in curly braces:
if len(s) > 1 and s[0] == '@':
s = '{%s}' % s[1:]
result += s
return result.replace('}{', ' ')
def _clean_text(text, cleaner_names):
for name in cleaner_names:
cleaner = getattr(cleaners, name)
if not cleaner:
raise Exception('Unknown cleaner: %s' % name)
text = cleaner(text)
return text
def _symbols_to_sequence(symbols):
return [_symbol_to_id[s] for s in symbols if _should_keep_symbol(s)]
def _arpabet_to_sequence(text):
return _symbols_to_sequence(['@' + s for s in text.split()])
def _should_keep_symbol(s):
return s in _symbol_to_id and s is not '_' and s is not '~'
| TensorRT-master | demo/Tacotron2/tacotron2/text/__init__.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" from https://github.com/keithito/tacotron """
import inflect
import re
_inflect = inflect.engine()
_comma_number_re = re.compile(r'([0-9][0-9\,]+[0-9])')
_decimal_number_re = re.compile(r'([0-9]+\.[0-9]+)')
_pounds_re = re.compile(r'£([0-9\,]*[0-9]+)')
_dollars_re = re.compile(r'\$([0-9\.\,]*[0-9]+)')
_ordinal_re = re.compile(r'[0-9]+(st|nd|rd|th)')
_number_re = re.compile(r'[0-9]+')
def _remove_commas(m):
return m.group(1).replace(',', '')
def _expand_decimal_point(m):
return m.group(1).replace('.', ' point ')
def _expand_dollars(m):
match = m.group(1)
parts = match.split('.')
if len(parts) > 2:
return match + ' dollars' # Unexpected format
dollars = int(parts[0]) if parts[0] else 0
cents = int(parts[1]) if len(parts) > 1 and parts[1] else 0
if dollars and cents:
dollar_unit = 'dollar' if dollars == 1 else 'dollars'
cent_unit = 'cent' if cents == 1 else 'cents'
return '%s %s, %s %s' % (dollars, dollar_unit, cents, cent_unit)
elif dollars:
dollar_unit = 'dollar' if dollars == 1 else 'dollars'
return '%s %s' % (dollars, dollar_unit)
elif cents:
cent_unit = 'cent' if cents == 1 else 'cents'
return '%s %s' % (cents, cent_unit)
else:
return 'zero dollars'
def _expand_ordinal(m):
return _inflect.number_to_words(m.group(0))
def _expand_number(m):
num = int(m.group(0))
if num > 1000 and num < 3000:
if num == 2000:
return 'two thousand'
elif num > 2000 and num < 2010:
return 'two thousand ' + _inflect.number_to_words(num % 100)
elif num % 100 == 0:
return _inflect.number_to_words(num // 100) + ' hundred'
else:
return _inflect.number_to_words(num, andword='', zero='oh', group=2).replace(', ', ' ')
else:
return _inflect.number_to_words(num, andword='')
def normalize_numbers(text):
text = re.sub(_comma_number_re, _remove_commas, text)
text = re.sub(_pounds_re, r'\1 pounds', text)
text = re.sub(_dollars_re, _expand_dollars, text)
text = re.sub(_decimal_number_re, _expand_decimal_point, text)
text = re.sub(_ordinal_re, _expand_ordinal, text)
text = re.sub(_number_re, _expand_number, text)
return text
| TensorRT-master | demo/Tacotron2/tacotron2/text/numbers.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" from https://github.com/keithito/tacotron """
'''
Defines the set of symbols used in text input to the model.
The default is a set of ASCII characters that works well for English or text that has been run through Unidecode. For other data, you can modify _characters. See TRAINING_DATA.md for details. '''
from tacotron2.text import cmudict
_pad = '_'
_punctuation = '!\'(),.:;? '
_special = '-'
_letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
# Prepend "@" to ARPAbet symbols to ensure uniqueness (some are the same as uppercase letters):
_arpabet = ['@' + s for s in cmudict.valid_symbols]
# Export all symbols:
symbols = [_pad] + list(_special) + list(_punctuation) + list(_letters) + _arpabet
| TensorRT-master | demo/Tacotron2/tacotron2/text/symbols.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" from https://github.com/keithito/tacotron """
'''
Cleaners are transformations that run over the input text at both training and eval time.
Cleaners can be selected by passing a comma-delimited list of cleaner names as the "cleaners"
hyperparameter. Some cleaners are English-specific. You'll typically want to use:
1. "english_cleaners" for English text
2. "transliteration_cleaners" for non-English text that can be transliterated to ASCII using
the Unidecode library (https://pypi.python.org/pypi/Unidecode)
3. "basic_cleaners" if you do not want to transliterate (in this case, you should also update
the symbols in symbols.py to match your data).
'''
import re
from unidecode import unidecode
from .numbers import normalize_numbers
# Regular expression matching whitespace:
_whitespace_re = re.compile(r'\s+')
# List of (regular expression, replacement) pairs for abbreviations:
_abbreviations = [(re.compile('\\b%s\\.' % x[0], re.IGNORECASE), x[1]) for x in [
('mrs', 'misess'),
('mr', 'mister'),
('dr', 'doctor'),
('st', 'saint'),
('co', 'company'),
('jr', 'junior'),
('maj', 'major'),
('gen', 'general'),
('drs', 'doctors'),
('rev', 'reverend'),
('lt', 'lieutenant'),
('hon', 'honorable'),
('sgt', 'sergeant'),
('capt', 'captain'),
('esq', 'esquire'),
('ltd', 'limited'),
('col', 'colonel'),
('ft', 'fort'),
]]
def expand_abbreviations(text):
for regex, replacement in _abbreviations:
text = re.sub(regex, replacement, text)
return text
def expand_numbers(text):
return normalize_numbers(text)
def lowercase(text):
return text.lower()
def collapse_whitespace(text):
return re.sub(_whitespace_re, ' ', text)
def convert_to_ascii(text):
return unidecode(text)
def basic_cleaners(text):
'''Basic pipeline that lowercases and collapses whitespace without transliteration.'''
text = lowercase(text)
text = collapse_whitespace(text)
return text
def transliteration_cleaners(text):
'''Pipeline for non-English text that transliterates to ASCII.'''
text = convert_to_ascii(text)
text = lowercase(text)
text = collapse_whitespace(text)
return text
def english_cleaners(text):
'''Pipeline for English text, including number and abbreviation expansion.'''
text = convert_to_ascii(text)
text = lowercase(text)
text = expand_numbers(text)
text = expand_abbreviations(text)
text = collapse_whitespace(text)
return text
| TensorRT-master | demo/Tacotron2/tacotron2/text/cleaners.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import onnx_graphsurgeon
from setuptools import setup, find_packages
def no_publish():
blacklist = ["register"]
for cmd in blacklist:
if cmd in sys.argv:
raise RuntimeError('Command "{}" blacklisted'.format(cmd))
REQUIRED_PACKAGES = [
"numpy",
"onnx",
]
def main():
no_publish()
setup(
name="onnx_graphsurgeon",
version=onnx_graphsurgeon.__version__,
description="ONNX GraphSurgeon",
long_description=open("README.md", "r", encoding="utf-8").read(),
license="Apache 2.0",
url="https://github.com/nvidia/tensorrt/tools/onnx-graphsurgeon",
author="NVIDIA",
author_email="[email protected]",
classifiers=[
"Intended Audience :: Developers",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
],
install_requires=REQUIRED_PACKAGES,
packages=find_packages(),
zip_safe=True,
)
if __name__ == "__main__":
main()
| TensorRT-master | tools/onnx-graphsurgeon/setup.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from onnx_graphsurgeon.logger.logger import G_LOGGER
from onnx_graphsurgeon.ir.tensor import Tensor, Constant, Variable
from onnx_graphsurgeon.ir.graph import Graph
from onnx_graphsurgeon.ir.node import Node
from onnx_graphsurgeon.importers.onnx_importer import OnnxImporter
G_LOGGER.severity = G_LOGGER.ULTRA_VERBOSE
from collections import OrderedDict
import onnx.numpy_helper
from typing import List
import numpy as np
import onnx
import os
TEST_ROOT = os.path.realpath(os.path.dirname(__file__))
class Model(object):
def __init__(
self,
path: str,
inputs: List[Tensor],
outputs: List[Tensor],
nodes: List[Node],
opset: int = None,
):
self.path = path
self.inputs = inputs
self.outputs = outputs
self.nodes = nodes
self.opset = opset
def load(self):
return onnx.load(self.path)
def assert_equal(self, graph: Graph):
assert graph.inputs == self.inputs
G_LOGGER.debug("Graph inputs matched")
# Break down fields to make debugging failures easier.
for actual, expected in zip(graph.nodes, self.nodes):
def check_tensor_io(actensor, extensor):
def check_list(aclist, exlist):
G_LOGGER.debug("Actual node list: {:}\n\nExpected node list: {:}".format(aclist, exlist))
assert len(aclist) == len(exlist)
for acnode, exnode in zip(aclist, exlist):
assert acnode == exnode
G_LOGGER.debug("Checking tensor: {:} inputs".format(actensor.name))
check_list(actensor.inputs, extensor.inputs)
G_LOGGER.debug("Checking tensor: {:} outputs".format(actensor.name))
check_list(actensor.outputs, extensor.outputs)
G_LOGGER.debug("Actual Node: {:}\n\nExpected Node: {:}".format(actual, expected))
assert actual.op == expected.op
assert actual.inputs == expected.inputs
# Check I/O of input tensors
for acinp, exinp in zip(actual.inputs, expected.inputs):
check_tensor_io(acinp, exinp)
assert actual.outputs == expected.outputs
# Check I/O of output tensors
for acout, exout in zip(actual.outputs, expected.outputs):
check_tensor_io(acout, exout)
assert actual.name == expected.name
assert len(actual.attrs) == len(expected.attrs)
for (ackey, acval), (exkey, exval) in zip(actual.attrs.items(), expected.attrs.items()):
assert ackey == exkey
assert acval == exval
assert actual == expected
G_LOGGER.debug("Graph nodes matched")
assert graph.outputs == self.outputs
G_LOGGER.debug("Graph outputs matched")
def __str__(self):
return os.path.basename(self.path)
def identity_model():
path = os.path.join(TEST_ROOT, "models", "identity.onnx")
model = onnx.load(path)
x = Variable(name="x", dtype=np.float32, shape=(1, 1, 2, 2))
y = Variable(name="y", dtype=np.float32, shape=(1, 1, 2, 2))
node = Node(op="Identity", inputs=[x], outputs=[y])
return Model(path, inputs=[x], outputs=[y], nodes=[node], opset=OnnxImporter.get_opset(model))
def dim_param_model():
path = os.path.join(TEST_ROOT, "models", "dim_param.onnx")
model = onnx.load(path)
x = Variable(name="Input:0", dtype=np.float32, shape=("dim0", 16, 128))
y = Variable(name="Output:0", dtype=np.float32, shape=("dim0", 16, 128))
node = Node(op="Identity", inputs=[x], outputs=[y])
return Model(path, inputs=[x], outputs=[y], nodes=[node], opset=OnnxImporter.get_opset(model))
def lstm_model():
path = os.path.join(TEST_ROOT, "models", "lstm.onnx")
model = onnx.load(path)
onnx_graph = model.graph
def load_initializer(index: int) -> np.ndarray:
return onnx.numpy_helper.to_array(onnx_graph.initializer[index])
# Optional inputs are represented by empty tensors
X = Variable(name="X", dtype=np.float32, shape=(4, 3, 6))
W = Constant(name="W", values=load_initializer(0))
R = Constant(name="R", values=load_initializer(1))
B = Constant(name="B", values=load_initializer(2))
initial_c = Constant(name="initial_c", values=load_initializer(3))
Y = Variable(name="Y", dtype=np.float32, shape=(4, 1, 3, 5))
Y_h = Variable(name="Y_h", dtype=np.float32, shape=(1, 3, 5))
Y_c = Variable(name="Y_c", dtype=np.float32, shape=(1, 3, 5))
attrs = OrderedDict()
attrs["direction"] = "forward"
attrs["hidden_size"] = 5
node = Node(
op="LSTM",
attrs=attrs,
inputs=[X, W, R, B, Variable.empty(), Variable.empty(), initial_c],
outputs=[Y, Y_h, Y_c],
)
# Initializers will not be included in the graph inputs.
return Model(
path,
inputs=[X],
outputs=[Y, Y_h, Y_c],
nodes=[node],
opset=OnnxImporter.get_opset(model),
)
def scan_model():
path = os.path.join(TEST_ROOT, "models", "scan.onnx")
model = onnx.load(path)
# Body graph
sum_in = Variable(name="sum_in", dtype=np.float32, shape=(2,))
next = Variable(name="next", dtype=np.float32, shape=(2,))
sum_out = Variable(name="sum_out", dtype=np.float32, shape=(2,))
scan_out = Variable(name="scan_out", dtype=np.float32, shape=(2,))
body_nodes = [
Node(op="Add", inputs=[sum_in, next], outputs=[sum_out]),
Node(op="Identity", inputs=[sum_out], outputs=[scan_out]),
]
body_graph = Graph(nodes=body_nodes, inputs=[sum_in, next], outputs=[sum_out, scan_out], name="scan_body")
# Outer graph
inputs = [
Variable(name="initial", dtype=np.float32, shape=(2,)),
Variable(name="x", dtype=np.float32, shape=(3, 2)),
]
outputs = [
Variable(name="y", dtype=np.float32, shape=(2,)),
Variable(name="z", dtype=np.float32, shape=(3, 2)),
]
attrs = OrderedDict()
attrs["body"] = body_graph
attrs["num_scan_inputs"] = 1
scan_node = Node(op="Scan", inputs=inputs, outputs=outputs, attrs=attrs)
return Model(
path,
inputs=inputs,
outputs=outputs,
nodes=[scan_node],
opset=OnnxImporter.get_opset(model),
)
def initializer_is_output_model():
path = os.path.join(TEST_ROOT, "models", "initializer_is_output.onnx")
model = onnx.load(path)
X = Constant(name="X", values=np.ones((64, 64), dtype=np.float32))
return Model(path, inputs=[], outputs=[X], nodes=[], opset=OnnxImporter.get_opset(model))
# Node includes a subgraph whose I/O names are the same as that of the node.
def nested_dup_names():
path = os.path.join(TEST_ROOT, "models", "nested_dup_names.onnx")
model = onnx.load(path)
# Inner
subgraph_inputs = [Variable("X", shape=(2, 2), dtype=np.float32)]
subgraph_outputs = [Variable("Y", shape=(2, 2), dtype=np.float32)]
subgraph_node = Node(op="Identity", inputs=subgraph_inputs, outputs=subgraph_outputs)
subgraph = Graph(nodes=[subgraph_node], inputs=subgraph_inputs, outputs=subgraph_outputs)
# Outer - problem happens if outer node has same I/O names as subgraph
inputs = [Variable("X", shape=(2, 2), dtype=np.float32)]
outputs = [Variable("Y", shape=(2, 2), dtype=np.float32)]
node = Node(op="Nested", inputs=inputs, outputs=outputs, attrs={"body": subgraph})
return Model(
path,
inputs=inputs,
outputs=outputs,
nodes=[node],
opset=OnnxImporter.get_opset(model),
)
def ext_weights():
path = os.path.join(TEST_ROOT, "models", "ext_weights.onnx")
model = onnx.load(path)
inputs = [Variable("input", shape=(1, 3), dtype=np.float32)]
outputs = [Variable("output", shape=(1, 3), dtype=np.float32)]
a = Constant("a", values=np.ones((1, 3), dtype=np.float32))
b = Constant("b", values=np.ones((1, 3), dtype=np.float32))
d = Constant("d", values=np.ones((1, 3), dtype=np.float32))
c = Variable("c")
e = Variable("e")
nodes = [
Node(op="Add", inputs=[a, b], outputs=[c]),
Node(op="Add", inputs=[c, d], outputs=[e]),
Node(op="Add", inputs=[inputs[0], e], outputs=outputs),
]
return Model(
path,
inputs=inputs,
outputs=outputs,
nodes=nodes,
opset=OnnxImporter.get_opset(model),
)
def const_foldable():
path = os.path.join(TEST_ROOT, "models", "const_foldable.onnx")
return Model(path, inputs=None, outputs=None, nodes=None, opset=None) # Only used for path.
def shape_cast_elision():
path = os.path.join(TEST_ROOT, "models", "shape_cast_elision.onnx")
return Model(path, inputs=None, outputs=None, nodes=None, opset=None) # Only used for path.
| TensorRT-master | tools/onnx-graphsurgeon/tests/onnx_models.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import pytest
from onnx_graphsurgeon.ir.node import Node
from onnx_graphsurgeon.ir.tensor import Constant, Variable
from onnx_graphsurgeon.logger.logger import G_LOGGER
G_LOGGER.severity = G_LOGGER.ULTRA_VERBOSE
class TensorBaseTests(object):
def test_can_convert_in_place_to_constant(self):
tensor = self.tensor.to_constant(values=np.ones((1, 3, 5, 5), dtype=np.float64))
assert tensor is self.tensor
assert isinstance(tensor, Constant)
assert isinstance(self.input_node.outputs[0], Constant)
assert isinstance(self.output_node.inputs[0], Constant)
assert tensor.shape == (1, 3, 5, 5)
assert tensor.dtype == np.float64
assert np.all(self.input_node.outputs[0].values == tensor.values)
assert np.all(self.output_node.inputs[0].values == tensor.values)
def test_can_convert_in_place_to_variable(self):
tensor = self.tensor.to_variable(dtype=np.float32, shape=(1, 3, 224, 224))
assert tensor is self.tensor
assert isinstance(tensor, Variable)
assert isinstance(self.input_node.outputs[0], Variable)
assert tensor.dtype == np.float32
assert tensor.shape == (1, 3, 224, 224)
assert self.input_node.outputs[0].dtype == tensor.dtype
assert self.input_node.outputs[0].shape == tensor.shape
def test_equals(self):
assert self.tensor == self.tensor
def test_set_inputs_updates_old_inputs(self):
dummy = Node(op="dummy")
self.tensor.inputs = [dummy]
assert len(self.input_node.outputs) == 0
assert dummy.outputs[0] == self.tensor
def test_set_outputs_updates_old_outputs(self):
dummy = Node(op="dummy")
self.tensor.outputs = [dummy]
assert len(self.output_node.inputs) == 0
assert dummy.inputs[0] == self.tensor
def test_can_copy_inputs_from_other_node(self):
tensor = Variable(name="other_test_tensor")
tensor.inputs = self.tensor.inputs
assert tensor.inputs == self.tensor.inputs
# Contents should be the same, but it should not just be a reference to the existing SynchronizedList
assert tensor.inputs is not self.tensor.inputs
def test_can_copy_outputs_from_other_node(self):
tensor = Variable(name="other_test_tensor")
tensor.outputs = self.tensor.outputs
assert tensor.outputs == self.tensor.outputs
assert tensor.outputs is not self.tensor.outputs
def test_i(self):
x = Variable(name="x")
y = Variable(name="y")
node = Node(op="Add", name="Input", inputs=[x], outputs=[y])
assert y.i() == x
def test_i_multiple_inputs(self):
x = Variable(name="x")
x2 = Variable(name="x2")
y = Variable(name="y")
node = Node(op="Add", name="Input", inputs=[x, x2], outputs=[y])
assert y.i() == x
assert y.i(1) == x2
def test_o(self):
x = Variable(name="x")
y = Variable(name="y")
node = Node(op="Add", name="Input", inputs=[x], outputs=[y])
assert x.o() == y
def test_o_multiple_outputs(self):
x = Variable(name="x")
y = Variable(name="y")
y2 = Variable(name="y2")
node = Node(op="Add", name="Input", inputs=[x], outputs=[y])
node2 = Node(op="Add", name="Input", inputs=[x], outputs=[y2])
assert x.o() == y
assert x.o(1) == y2
class TestVariable(TensorBaseTests):
def setup_method(self):
self.tensor = Variable(name="test_tensor", dtype=np.float32, shape=(1, 3, 224, 224))
self.input_node = Node(op="Add", outputs=[self.tensor])
self.output_node = Node(op="Add", inputs=[self.tensor])
def test_equals_name_mismatch(self):
tensor = Variable(name="test_tensor0", dtype=np.float32, shape=(1, 3, 224, 224))
assert not self.tensor == tensor
class TestConstant(TensorBaseTests):
def setup_method(self):
self.tensor = Constant(name="test_tensor", values=np.ones((1, 3, 5, 5), dtype=np.float64))
self.input_node = Node(
op="Add", outputs=[self.tensor]
) # Doesn't make sense for Constants, but needed to make base tests happy.
self.output_node = Node(op="Add", inputs=[self.tensor])
def test_can_get_shape(self):
assert self.tensor.shape == (1, 3, 5, 5)
def test_can_get_dtype(self):
assert self.tensor.dtype == np.float64
class TestNode(object):
def setup_method(self):
self.input_tensor = Variable(name="x")
self.output_tensor = Variable(name="y")
self.node = Node(op="Add", name="Test", inputs=[self.input_tensor], outputs=[self.output_tensor])
def test_equals(self):
assert self.node == self.node
def test_equals_name_mismatch(self):
node = Node(op="Add", name="OtherTest")
assert not self.node == node
def test_equals_op_mismatch(self):
node = Node(op="Subtract", name="Test")
assert not self.node == node
def test_equals_num_inputs_mismatch(self):
node = Node(op="Subtract", name="Test")
assert not self.node == node
def test_equals(self):
assert self.node == self.node
def test_equals_inputs_mismatch(self):
tensor = Variable(name="other_tensor")
assert not self.input_tensor == tensor
node = Node(op="Add", name="Test", inputs=[tensor])
assert not self.node == node
def test_set_inputs_updates_old_inputs(self):
dummy = Variable(name="dummy")
self.node.inputs = [dummy]
assert len(self.input_tensor.outputs) == 0
assert dummy.outputs[0] == self.node
def test_set_outputs_updates_old_outputs(self):
dummy = Variable(name="dummy")
self.node.outputs = [dummy]
assert len(self.output_tensor.inputs) == 0
assert dummy.inputs[0] == self.node
def test_can_copy_inputs_from_other_node(self):
node = Node(op="Subtract")
node.inputs = self.node.inputs
assert node.inputs == self.node.inputs
# Contents should be the same, but it should not just be a reference to the existing SynchronizedList
assert node.inputs is not self.node.inputs
def test_can_copy_outputs_from_other_node(self):
node = Node(op="Subtract")
node.outputs = self.node.outputs
assert node.outputs == self.node.outputs
assert node.outputs is not self.node.outputs
def test_i(self):
intermediate_tensor = Variable(name="intermediate")
input_node = Node(op="Add", name="Input", inputs=[self.input_tensor], outputs=[intermediate_tensor])
output_node = Node(op="Add", name="Out", inputs=[intermediate_tensor], outputs=[self.output_tensor])
assert output_node.i() == input_node
def test_i_multiple_inputs(self):
intermediate_tensor = Variable(name="intermediate")
intermediate_tensor2 = Variable(name="intermediate2")
input_node = Node(op="Add", name="Input", inputs=[self.input_tensor], outputs=[intermediate_tensor])
input_node2 = Node(op="Add", name="Input2", inputs=[self.input_tensor], outputs=[intermediate_tensor2])
output_node = Node(
op="Add", name="Out", inputs=[intermediate_tensor, intermediate_tensor2], outputs=[self.output_tensor]
)
assert output_node.i() == input_node
assert output_node.i(1) == input_node2
def test_o(self):
intermediate_tensor = Variable(name="intermediate")
input_node = Node(op="Add", name="Input", inputs=[self.input_tensor], outputs=[intermediate_tensor])
output_node = Node(op="Add", name="Out", inputs=[intermediate_tensor], outputs=[self.output_tensor])
assert input_node.o() == output_node
def test_o_multiple_outputs(self):
intermediate_tensor = Variable(name="intermediate")
intermediate_tensor2 = Variable(name="intermediate2")
input_node = Node(op="Add", name="Input", inputs=[self.input_tensor], outputs=[intermediate_tensor])
output_node = Node(op="Add", name="Out", inputs=[intermediate_tensor], outputs=[self.output_tensor])
output_node2 = Node(op="Add", name="Input2", inputs=[intermediate_tensor], outputs=[intermediate_tensor2])
assert input_node.o() == output_node
assert input_node.o(1) == output_node2
class TestNodeIO(object):
def setup_method(self, field_names):
self.tensors = [
Variable(name="test_tensor_{:}".format(i), dtype=np.float32, shape=(1, 3, 224, 224)) for i in range(10)
]
self.node = Node(op="Dummy")
def get_lists(self, field_names):
return getattr(self.node, field_names[0]), field_names[1]
@pytest.mark.parametrize("field_names", [("inputs", "outputs"), ("outputs", "inputs")])
def test_append(self, field_names):
nlist, tensor_field = self.get_lists(field_names)
nlist.append(self.tensors[0])
assert nlist[0] == self.tensors[0]
assert getattr(self.tensors[0], tensor_field)[0] == self.node
@pytest.mark.parametrize("field_names", [("inputs", "outputs"), ("outputs", "inputs")])
def test_extend(self, field_names):
nlist, tensor_field = self.get_lists(field_names)
nlist.extend(self.tensors)
for tensor in self.tensors:
assert tensor in nlist
assert getattr(tensor, tensor_field)[0] == self.node
@pytest.mark.parametrize("field_names", [("inputs", "outputs"), ("outputs", "inputs")])
def test_insert(self, field_names):
nlist, tensor_field = self.get_lists(field_names)
nlist.append(self.tensors[1])
nlist.insert(0, self.tensors[0])
assert nlist[0] == self.tensors[0]
assert getattr(self.tensors[0], tensor_field)[0] == self.node
@pytest.mark.parametrize("field_names", [("inputs", "outputs"), ("outputs", "inputs")])
def test_remove(self, field_names):
nlist, tensor_field = self.get_lists(field_names)
nlist.append(self.tensors[0])
nlist.remove(self.tensors[0])
assert len(nlist) == 0
assert len(getattr(self.tensors[0], tensor_field)) == 0
@pytest.mark.parametrize("field_names", [("inputs", "outputs"), ("outputs", "inputs")])
def test_pop(self, field_names):
nlist, tensor_field = self.get_lists(field_names)
nlist.append(self.tensors[0])
tensor = nlist.pop()
assert len(nlist) == 0
assert len(getattr(tensor, tensor_field)) == 0
@pytest.mark.parametrize("field_names", [("inputs", "outputs"), ("outputs", "inputs")])
def test_pop_index(self, field_names):
nlist, tensor_field = self.get_lists(field_names)
nlist.extend(self.tensors)
tensor = nlist.pop(1)
assert self.tensors[1] not in nlist
assert len(getattr(tensor, tensor_field)) == 0
@pytest.mark.parametrize("field_names", [("inputs", "outputs"), ("outputs", "inputs")])
def test_del_index(self, field_names):
nlist, tensor_field = self.get_lists(field_names)
nlist.extend(self.tensors)
tensor = nlist[1]
del nlist[1]
assert self.tensors[1] not in nlist
assert len(getattr(tensor, tensor_field)) == 0
@pytest.mark.parametrize("field_names", [("inputs", "outputs"), ("outputs", "inputs")])
def test_clear(self, field_names):
nlist, tensor_field = self.get_lists(field_names)
nlist.extend(self.tensors)
nlist.clear()
assert len(nlist) == 0
assert all([len(getattr(tensor, tensor_field)) == 0 for tensor in self.tensors])
@pytest.mark.parametrize("field_names", [("inputs", "outputs"), ("outputs", "inputs")])
def test_add(self, field_names):
nlist, tensor_field = self.get_lists(field_names)
nlist = nlist + self.tensors
for tensor in self.tensors:
assert tensor in nlist
@pytest.mark.parametrize("field_names", [("inputs", "outputs"), ("outputs", "inputs")])
def test_iadd(self, field_names):
nlist, tensor_field = self.get_lists(field_names)
nlist += self.tensors
for tensor in self.tensors:
assert tensor in nlist
assert getattr(tensor, tensor_field)[0] == self.node
@pytest.mark.parametrize("field_names", [("inputs", "outputs"), ("outputs", "inputs")])
def test_setitem(self, field_names):
nlist, tensor_field = self.get_lists(field_names)
nlist.append(self.tensors[0])
new_tensor = Variable("new_tensor")
nlist[0] = new_tensor
assert nlist[0] == new_tensor
assert len(getattr(self.tensors[0], tensor_field)) == 0
assert getattr(new_tensor, tensor_field)[0] == self.node
| TensorRT-master | tools/onnx-graphsurgeon/tests/test_ir.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from onnx_graphsurgeon.util import misc
def test_combine_dicts_second_overwrites_first():
x = {"a": 1}
y = {"a": 2}
z = misc.combine_dicts(x, y)
assert z["a"] == 2
| TensorRT-master | tools/onnx-graphsurgeon/tests/test_util.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import numpy as np
import onnx
import onnx.numpy_helper
import pytest
from onnx_graphsurgeon.exporters.onnx_exporter import OnnxExporter
from onnx_graphsurgeon.importers.onnx_importer import OnnxImporter
from onnx_graphsurgeon.ir.node import Node
from onnx_graphsurgeon.ir.tensor import Constant, LazyValues, Tensor, Variable
from onnx_models import (
dim_param_model,
ext_weights,
identity_model,
initializer_is_output_model,
lstm_model,
nested_dup_names,
scan_model,
)
class TestOnnxExporter(object):
def test_export_constant_tensor_lazy_values_to_tensor_proto(self):
name = "constant_tensor"
shape = (3, 3, 3)
dtype = np.float32
onnx_tensor = onnx.numpy_helper.from_array(np.ones(shape=shape, dtype=dtype))
tensor = Constant(name=name, values=LazyValues(onnx_tensor))
# Exporter should *not* load LazyValues into a numpy array.
onnx_tensor = OnnxExporter.export_tensor_proto(tensor)
assert isinstance(tensor._values, LazyValues)
def test_export_constant_tensor_to_tensor_proto(self):
name = "constant_tensor"
shape = (3, 224, 224)
values = np.random.random_sample(size=shape).astype(np.float32)
tensor = Constant(name=name, values=values)
onnx_tensor = OnnxExporter.export_tensor_proto(tensor)
assert onnx_tensor.name == name
assert np.all(onnx.numpy_helper.to_array(onnx_tensor) == values)
assert onnx_tensor.data_type == onnx.TensorProto.FLOAT
assert tuple(onnx_tensor.dims) == shape
def test_export_constant_tensor_to_value_info_proto(self):
name = "constant_tensor"
shape = (3, 224, 224)
values = np.random.random_sample(size=shape).astype(np.float32)
tensor = Constant(name=name, values=values)
onnx_tensor = OnnxExporter.export_value_info_proto(tensor, do_type_check=True)
assert onnx_tensor.name == name
assert onnx_tensor.type.tensor_type.elem_type == onnx.TensorProto.FLOAT
onnx_shape = []
for dim in onnx_tensor.type.tensor_type.shape.dim:
onnx_shape.append(dim.dim_value)
assert tuple(onnx_shape) == shape
def test_export_variable_tensor(self):
name = "variable_tensor"
shape = (3, 224, 224)
dtype = np.float32
tensor = Variable(dtype=dtype, shape=shape, name=name)
onnx_tensor = OnnxExporter.export_value_info_proto(tensor, do_type_check=True)
assert onnx_tensor.name == name
assert onnx_tensor.type.tensor_type.elem_type == onnx.TensorProto.FLOAT
onnx_shape = []
for dim in onnx_tensor.type.tensor_type.shape.dim:
onnx_shape.append(dim.dim_value)
assert tuple(onnx_shape) == shape
def test_export_variable_tensor_empty_dim_param(self):
shape = ("", 224, 224)
tensor = Variable(dtype=np.float32, shape=shape, name="variable_tensor")
onnx_tensor = OnnxExporter.export_value_info_proto(tensor, do_type_check=True)
onnx_shape = []
for dim in onnx_tensor.type.tensor_type.shape.dim:
onnx_shape.append(dim.dim_value if dim.HasField("dim_value") else dim.dim_param)
assert tuple(onnx_shape) == shape
# When a tensor shape is unknown, we should leave the shape field empty.
def test_export_variable_tensor_empty_shape(self):
shape = None
tensor = Variable(dtype=np.float32, shape=shape, name="variable_tensor")
onnx_tensor = OnnxExporter.export_value_info_proto(tensor, do_type_check=True)
assert not onnx_tensor.type.tensor_type.HasField("shape")
# When a tensor shape is unknown, we should leave the shape field empty.
def test_export_variable_tensor_scalar_shape(self):
shape = [None]
tensor = Variable(dtype=np.float32, shape=shape, name="variable_tensor")
onnx_tensor = OnnxExporter.export_value_info_proto(tensor, do_type_check=True)
assert not onnx_tensor.type.tensor_type.shape.dim[0].HasField("dim_param")
assert not onnx_tensor.type.tensor_type.shape.dim[0].HasField("dim_value")
# TODO: Test subgraph export.
def test_export_node(self):
name = "TestNode"
op = "Test"
inputs = [Variable(name="input")]
outputs = [Variable(name="output")]
attrs = OrderedDict()
attrs["float_attr"] = 4.0
attrs["int_attr"] = 10
attrs["str_attr"] = "constant"
attrs["tensor_attr"] = Constant("testTensor", np.ones(shape=(1, 2, 3, 4), dtype=np.float32))
attrs["floats_attr"] = [1.0, 2.0, 3.0, 4.0]
attrs["ints_attr"] = [4, 3, 2, 1]
attrs["strings_attr"] = ["constant", "and", "variable"]
node = Node(op=op, name=name, inputs=inputs, outputs=outputs, attrs=attrs)
onnx_node = OnnxExporter.export_node(node, do_type_check=True)
assert onnx_node.name == name
assert onnx_node.op_type == op
assert onnx_node.input == ["input"]
assert onnx_node.output == ["output"]
for onnx_attr, (name, attr) in zip(onnx_node.attribute, attrs.items()):
assert onnx_attr.name == name
if isinstance(attr, float):
assert onnx_attr.f == attr
elif isinstance(attr, int):
assert onnx_attr.i == attr
elif isinstance(attr, str):
assert onnx_attr.s.decode() == attr
elif isinstance(attr, Tensor):
assert onnx_attr.t.SerializeToString() == OnnxExporter.export_tensor_proto(attr).SerializeToString()
elif isinstance(attr, list):
if isinstance(attr[0], float):
assert onnx_attr.floats == attr
elif isinstance(attr[0], int):
assert onnx_attr.ints == attr
elif isinstance(attr[0], str):
assert [s.decode() for s in onnx_attr.strings] == attr
else:
raise AssertionError(
"Unrecognized list attribute: ({:}: {:}) of type: {:}".format(name, attr, type(attr))
)
else:
raise AssertionError("Unrecognized attribute: ({:}: {:}) of type: {:}".format(name, attr, type(attr)))
# See test_importers for import correctness checks
# This function first imports an ONNX graph, and then re-exports it with no changes.
# The exported ONNX graph should exactly match the original.
@pytest.mark.parametrize(
"model",
[
identity_model(),
lstm_model(),
scan_model(),
dim_param_model(),
initializer_is_output_model(),
nested_dup_names(),
ext_weights(),
],
ids=lambda model: str(model),
)
def test_export_graph(self, model):
onnx_graph = model.load().graph
graph = OnnxImporter.import_graph(onnx_graph)
exported_onnx_graph = OnnxExporter.export_graph(graph)
reimported_graph = OnnxImporter.import_graph(exported_onnx_graph)
assert graph == reimported_graph
assert graph.opset == reimported_graph.opset
# ONNX exports the initializers in this model differently after importing - ONNX GS can't do much about this.
if model.path != lstm_model().path:
assert onnx_graph == exported_onnx_graph
| TensorRT-master | tools/onnx-graphsurgeon/tests/test_exporters.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import numpy as np
import onnx
import onnx.numpy_helper
import onnx.shape_inference
import pytest
from onnx_graphsurgeon.importers.onnx_importer import OnnxImporter
from onnx_graphsurgeon.ir.tensor import Constant, Variable
from onnx_graphsurgeon.logger.logger import G_LOGGER
from onnx_models import (
dim_param_model,
ext_weights,
identity_model,
initializer_is_output_model,
lstm_model,
nested_dup_names,
scan_model,
)
G_LOGGER.severity = G_LOGGER.ULTRA_VERBOSE
class TestOnnxImporter(object):
def test_import_variable_tensor(self):
name = "test0"
shape = (1, 2, 3, 4)
onnx_tensor = onnx.helper.make_tensor_value_info(name, onnx.TensorProto.FLOAT, shape)
tensor = OnnxImporter.import_tensor(onnx_tensor)
assert type(tensor) == Variable
assert tensor.name == name
assert tensor.dtype == np.float32
assert tuple(tensor.shape) == shape
def test_import_constant_tensor(self):
shape = (3, 3, 3)
dtype = np.float32
onnx_tensor = onnx.numpy_helper.from_array(np.ones(shape=shape, dtype=dtype))
tensor = OnnxImporter.import_tensor(onnx_tensor)
assert type(tensor) == Constant
assert tensor.dtype == dtype
assert tuple(tensor.shape) == shape
def test_import_tensor_unknown_metadata(self):
name = "test0"
onnx_tensor = onnx.helper.make_empty_tensor_value_info(name)
tensor = OnnxImporter.import_tensor(onnx_tensor)
assert type(tensor) == Variable
assert tensor.name == name
# An empty string in `dim_param` should be treated like a dynamic dimension
def test_import_empty_dim_param_tensor(self):
shape = (1, 2, "non-empty", "")
onnx_tensor = onnx.helper.make_tensor_value_info("test0", onnx.TensorProto.FLOAT, shape)
tensor = OnnxImporter.import_tensor(onnx_tensor)
assert type(tensor) == Variable
assert tuple(tensor.shape) == shape
# Sometimes, tensor shape is not known, in which case we shouldn't import it
def test_import_unknown_shape_tensor(self):
shape = None
onnx_tensor = onnx.helper.make_tensor_value_info("test0", onnx.TensorProto.FLOAT, shape)
tensor = OnnxImporter.import_tensor(onnx_tensor)
assert type(tensor) == Variable
assert tensor.shape is None
# Scalars can be represented in ONNX with a dim that includes neither a dim_param nor dim_value
def test_import_empty_dim_tensor(self):
shape = (None,)
onnx_tensor = onnx.helper.make_tensor_value_info("test0", onnx.TensorProto.FLOAT, shape)
onnx_tensor.type.tensor_type.shape.dim[0].ClearField("dim_value")
onnx_tensor.type.tensor_type.shape.dim[0].ClearField("dim_param")
tensor = OnnxImporter.import_tensor(onnx_tensor)
assert type(tensor) == Variable
assert tuple(tensor.shape) == shape
# TODO: Test all attribute types - missing graph
def test_import_node(self):
op = "Test"
inputs = ["x"]
outputs = ["y"]
float_attr = 4.0
int_attr = 10
str_attr = "constant"
tensor_vals = np.ones(shape=(1, 2, 3, 4), dtype=np.float32)
tensor_attr = onnx.numpy_helper.from_array(tensor_vals)
floats_attr = [1.0, 2.0, 3.0, 4.0]
ints_attr = [4, 3, 2, 1]
strings_attr = ["constant", "and", "variable"]
onnx_node = onnx.helper.make_node(
op,
inputs,
outputs,
float_attr=float_attr,
int_attr=int_attr,
str_attr=str_attr,
tensor_attr=tensor_attr,
floats_attr=floats_attr,
ints_attr=ints_attr,
strings_attr=strings_attr,
)
node = OnnxImporter.import_node(onnx_node, OrderedDict(), OrderedDict())
assert node.op == op
assert node.attrs["float_attr"] == float_attr
assert node.attrs["int_attr"] == int_attr
assert node.attrs["str_attr"] == str_attr
# Tensor should turn into a Constant
assert np.all(node.attrs["tensor_attr"].values == tensor_vals)
assert node.attrs["floats_attr"] == floats_attr
assert node.attrs["ints_attr"] == ints_attr
assert node.attrs["strings_attr"] == strings_attr
@pytest.mark.parametrize(
"model",
[
identity_model(),
lstm_model(),
scan_model(),
dim_param_model(),
initializer_is_output_model(),
nested_dup_names(),
ext_weights(),
],
ids=lambda model: str(model),
)
def test_import_graph(self, model):
graph = OnnxImporter.import_graph(model.load().graph)
model.assert_equal(graph)
def test_import_graph_value_info(self):
model = onnx.shape_inference.infer_shapes(identity_model().load())
graph = OnnxImporter.import_graph(model.graph)
tensors = graph.tensors()
assert all(
[type(tensor) == Variable and tensor.dtype is not None and tensor.shape for tensor in tensors.values()]
)
def test_import_graph_tensor_map_preserved(self):
model = identity_model()
tensor_map = OrderedDict()
graph = OnnxImporter.import_graph(model.load().graph, tensor_map=tensor_map)
assert len(tensor_map) == 0
model.assert_equal(graph)
def test_import_graph_with_initializer(self):
model = lstm_model()
graph = OnnxImporter.import_graph(model.load().graph)
model.assert_equal(graph)
def test_import_graph_with_dim_param(self):
model = dim_param_model()
graph = OnnxImporter.import_graph(model.load().graph)
model.assert_equal(graph)
| TensorRT-master | tools/onnx-graphsurgeon/tests/test_importers.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from onnx_graphsurgeon.importers.onnx_importer import OnnxImporter
import onnx_graphsurgeon as gs
from onnx_models import identity_model
import tempfile
import onnx
class TestApi(object):
def setup_method(self):
self.imported_graph = OnnxImporter.import_graph(identity_model().load().graph)
def test_import(self):
graph = gs.import_onnx(onnx.load(identity_model().path))
assert graph == self.imported_graph
def test_export(self):
with tempfile.NamedTemporaryFile() as f:
onnx_model = gs.export_onnx(self.imported_graph)
assert onnx_model
assert OnnxImporter.import_graph(onnx_model.graph) == self.imported_graph
| TensorRT-master | tools/onnx-graphsurgeon/tests/test_api.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from onnx_graphsurgeon.logger.logger import G_LOGGER
import onnx_graphsurgeon as gs
import subprocess as sp
import numpy as np
import onnxruntime
import tempfile
import pytest
import onnx
import sys
import os
ROOT_DIR = os.path.realpath(os.path.join(os.path.dirname(__file__), os.path.pardir))
EXAMPLES_ROOT = os.path.join(ROOT_DIR, "examples")
class Artifact(object):
def __init__(self, name, infer=True):
self.name = name
self.infer = infer
EXAMPLES = [
("01_creating_a_model", [Artifact("test_globallppool.onnx")]),
("02_creating_a_model_with_initializer", [Artifact("test_conv.onnx")]),
("03_isolating_a_subgraph", [Artifact("model.onnx"), Artifact("subgraph.onnx")]),
("04_modifying_a_model", [Artifact("model.onnx"), Artifact("modified.onnx")]),
("05_folding_constants", [Artifact("model.onnx"), Artifact("folded.onnx")]),
("06_removing_nodes", [Artifact("model.onnx", infer=False), Artifact("removed.onnx")]),
("07_creating_a_model_with_the_layer_api", [Artifact("model.onnx")]),
("08_replacing_a_subgraph", [Artifact("model.onnx"), Artifact("replaced.onnx")]),
("09_shape_operations_with_the_layer_api", [Artifact("model.onnx")]),
]
# Extract any ``` blocks from the README
def load_commands_from_readme(readme):
def ignore_command(cmd):
return "pip" in cmd
commands = []
with open(readme, "r") as f:
in_command_block = False
for line in f.readlines():
if not in_command_block and "```bash" in line:
in_command_block = True
elif in_command_block:
if "```" in line:
in_command_block = False
elif not ignore_command(line):
commands.append(line.strip())
return commands
def infer_model(path):
model = onnx.load(path)
graph = gs.import_onnx(model)
feed_dict = {}
for tensor in graph.inputs:
shape = tuple(dim if dim > 0 else 1 for dim in tensor.shape)
feed_dict[tensor.name] = np.random.random_sample(size=shape).astype(tensor.dtype)
output_names = [out.name for out in graph.outputs]
sess = onnxruntime.InferenceSession(model.SerializeToString())
outputs = sess.run(output_names, feed_dict)
G_LOGGER.info("Inference outputs: {:}".format(outputs))
return outputs
@pytest.mark.parametrize("example_dir,artifacts", EXAMPLES)
def test_examples(example_dir, artifacts):
example_dir = os.path.join(EXAMPLES_ROOT, example_dir)
readme = os.path.join(example_dir, "README.md")
commands = load_commands_from_readme(readme)
for command in commands:
G_LOGGER.info(command)
assert sp.run(["bash", "-c", command], cwd=example_dir, env={"PYTHONPATH": ROOT_DIR}).returncode == 0
for artifact in artifacts:
artifact_path = os.path.join(example_dir, artifact.name)
assert os.path.exists(artifact_path)
if artifact.infer:
assert infer_model(artifact_path)
os.remove(artifact_path)
| TensorRT-master | tools/onnx-graphsurgeon/tests/test_examples.py |
TensorRT-master | tools/onnx-graphsurgeon/tests/ir/__init__.py |
|
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import copy
import numpy as np
import onnx_graphsurgeon as gs
import pytest
from onnx_graphsurgeon.ir.graph import Graph
from onnx_graphsurgeon.ir.node import Node
from onnx_graphsurgeon.ir.tensor import Constant, LazyValues, Variable
from onnx_graphsurgeon.logger.logger import G_LOGGER
from onnx_graphsurgeon.util.exception import OnnxGraphSurgeonException
from onnx_graphsurgeon.util.misc import SynchronizedList
from onnx_models import const_foldable, shape_cast_elision
G_LOGGER.severity = G_LOGGER.ULTRA_VERBOSE
@Graph.register()
def shape(self, inp):
return self.layer(op="Shape", inputs=[inp], outputs=["shape_out"])[0]
@Graph.register()
def constant(self, values):
return self.layer(op="Constant", inputs=[], outputs=["constant_out"], attrs={"value": Constant("values", values)})[
0
]
@Graph.register()
def identity(self, inp):
out = self.layer(op="Identity", inputs=[inp], outputs=["identity_out"])[0]
out.dtype = inp.dtype
return out
@Graph.register()
def add(self, a, b, name=None):
outputs = [Variable(name=name)] if name else ["add_out"]
out = self.layer(op="Add", inputs=[a, b], outputs=outputs)[0]
out.dtype = a.dtype or b.dtype
return out
# A fake op that can be used to ensure things work even when there is an invalid
# node present in the model.
@Graph.register()
def fake(self, inp, name=None):
outputs = [Variable(name=name)] if name else ["fake_out"]
out = self.layer(op="Fake", inputs=[inp], outputs=outputs)[0]
out.dtype = inp.dtype
return out
@gs.Graph.register()
def gather(self, data, indices):
return self.layer(op="Gather", inputs=[data, indices], outputs=["gather_out"])[0]
@gs.Graph.register()
def slice(self, data, starts=None, ends=None, axes=None, steps=None):
inputs = []
for inp in [data, starts, ends, axes, steps]:
if inp is None:
break
inputs.append(inp)
return self.layer(op="Slice", inputs=inputs, outputs=["slice_out"])[0]
@gs.Graph.register()
def nested(self, inp, graph):
return self.layer(op="Nested", inputs=[inp], outputs=["nested_out"], attrs={"body": graph})[0]
@gs.Graph.register()
def if_op(self, cond, then_graph, else_graph):
return self.layer(
op="If", inputs=[cond], outputs=["if_out"], attrs={"then_branch": then_graph, "else_branch": else_graph}
)[0]
# Generates a graph where an outer node has no outputs except
# within the subgraph. ONNX-GS should recognize that the node
# is being used, and should not remove it during cleanup().
def make_nested_graph():
inp = Variable("input")
id_out = Variable("id_out")
identity = Node(op="Identity", inputs=[inp], outputs=[id_out])
# Subgraph outputs come from the parent node, but nodes in the subgraph
# can use nodes from the outer graphs too.
subgraph_inputs = [Variable("subgraph_inp")]
subgraph_id_out = Variable("subgraph_id_out")
subgraph_outputs = [Variable("subgraph_out")]
subgraph_identity0 = Node(op="Identity", inputs=[id_out], outputs=[subgraph_id_out])
subgraph_identity1 = Node(op="Identity", inputs=[subgraph_id_out], outputs=subgraph_outputs)
subgraph = Graph(nodes=[subgraph_identity0, subgraph_identity1], inputs=subgraph_inputs, outputs=subgraph_outputs)
nested_out = Variable("nested_out")
nested_node = Node(op="Nested", attrs={"body": subgraph}, inputs=[inp], outputs=[nested_out])
return Graph(nodes=[identity, nested_node], inputs=[inp], outputs=[nested_out])
@pytest.fixture
def nested_graph():
yield make_nested_graph()
class TestBasic(object):
def test_generate_name(self):
graph = Graph()
names = set()
num_names = 100
# This function should not return the same name more than once
for idx in range(num_names):
names.add(graph._generate_name("name"))
assert len(names) == 100
def test_equal(self, nested_graph):
assert nested_graph == nested_graph
def test_equal_inputs_unequal(self):
g0 = make_nested_graph()
g1 = make_nested_graph()
g0.inputs.append(Variable("test"))
assert not (g0 == g1)
def test_equal_outputs_unequal(self):
g0 = make_nested_graph()
g1 = make_nested_graph()
g0.outputs.append(Variable("test"))
assert not (g0 == g1)
def test_equal_nested_unequal(self):
g0 = make_nested_graph()
g1 = make_nested_graph()
# Changing the nested subgraph should make the graphs unequal
g0.nodes[1].inputs[0].name = "subgraph_inp_modified"
assert not (g0 == g1)
class TestRegister(object):
def test_register(self):
@Graph.register()
def fake_add(self, a, b):
return self.layer(op="Add", inputs=[a, b], outputs=["add_out"])
graph = Graph()
[output] = graph.fake_add("a", "b")
assert "add_out" in output.name
assert len(graph.nodes) == 1
assert graph.nodes[-1].op == "Add"
def test_register_opset(self):
@Graph.register(opsets=[11])
def fake_add(self, a, b):
return self.layer(op="Add", inputs=[a, b], outputs=["add_out"])
@Graph.register(opsets=[10])
def fake_add(self, a, b):
return self.layer(op="Add-10", inputs=[a, b], outputs=["add_out"])
graph = Graph()
[output] = graph.fake_add("a", "b")
assert "add_out" in output.name
assert len(graph.nodes) == 1
assert graph.nodes[-1].op == "Add"
graph_opset10 = Graph(opset=10)
[output] = graph_opset10.fake_add("a", "b")
assert "add_out" in output.name
assert len(graph_opset10.nodes) == 1
assert graph_opset10.nodes[-1].op == "Add-10"
class TestLayer(object):
def test_layer_with_attrs(self):
graph = Graph()
outputs = graph.layer(op="Add", name="node", attrs={"fake_attr": 0})
assert len(graph.nodes) == 1
assert graph.nodes[-1].op == "Add"
assert graph.nodes[-1].name == "node"
assert graph.nodes[-1].attrs["fake_attr"] == 0
def test_layer_with_tensors(self):
x0 = Variable("x0")
x1 = Variable("x1")
y0 = Variable("y0")
y1 = Variable("y1")
graph = Graph()
outputs = graph.layer(op="Fake", inputs=[x0, x1], outputs=[y0, y1])
assert outputs == [y0, y1]
assert len(graph.nodes) == 1
assert graph.nodes[-1].inputs == [x0, x1]
assert graph.nodes[-1].outputs == outputs
def test_layer_with_strings(self):
x0 = "x0"
x1 = "x1"
y0 = "y0"
y1 = "y1"
graph = Graph()
outputs = graph.layer(op="Fake", inputs=[x0, x1], outputs=[y0, y1])
assert len(graph.nodes) == 1
assert [prefix in tensor.name for prefix, tensor in zip([x0, x1], graph.nodes[-1].inputs)]
assert [prefix in tensor.name for prefix, tensor in zip([y0, y1], graph.nodes[-1].outputs)]
assert graph.nodes[-1].outputs == outputs
def test_layer_with_arrays(self):
x0 = np.array([1])
x1 = np.array([1])
y0 = "y0"
y1 = "y1"
graph = Graph()
outputs = graph.layer(op="Fake", inputs=[x0, x1], outputs=[y0, y1])
assert [prefix in tensor.name for prefix, tensor in zip([y0, y1], graph.nodes[-1].outputs)]
assert len(graph.nodes) == 1
assert graph.nodes[-1].inputs[0].values == x0
assert graph.nodes[-1].inputs[1].values == x1
assert graph.nodes[-1].outputs == outputs
def test_layer_with_iterables(self):
x0 = [1]
x1 = (1,)
y0 = "y0"
y1 = "y1"
graph = Graph()
outputs = graph.layer(op="Fake", inputs=[x0, x1], outputs=[y0, y1])
assert [prefix in tensor.name for prefix, tensor in zip([y0, y1], graph.nodes[-1].outputs)]
assert len(graph.nodes) == 1
assert graph.nodes[-1].inputs[0].values == x0
assert graph.nodes[-1].inputs[1].values == x1
assert graph.nodes[-1].outputs == outputs
def tensors_linear_graph():
inputs = [Variable(name="x")]
intermediate0 = Variable(name="intermediate0")
intermediate1 = Variable(name="intermediate1")
intermediate2 = Variable(name="intermediate2")
outputs = [Variable(name="y")]
tensors = inputs + [intermediate0, intermediate1, intermediate2] + outputs
tensors = {tensor.name: tensor for tensor in tensors}
# Nodes are NOT in topo order.
nodes = [
Node(op="Add", name="Test0", inputs=inputs, outputs=[intermediate0]),
Node(op="Add", name="Test1", inputs=[intermediate0], outputs=[intermediate1]),
Node(op="Add", name="Test2", inputs=[intermediate1], outputs=[intermediate2]),
Node(op="Add", name="Test3", inputs=[intermediate2], outputs=outputs),
]
return Graph(nodes=nodes, inputs=inputs, outputs=outputs), nodes, tensors
class TestTensors(object):
# Calling `tensors()` should not modify tensors in the graph.
def test_tensors_does_not_modify_tensors(self):
graph, _, _ = tensors_linear_graph()
graph_tensors = graph.tensors()
# Generate a new graph to compare against
_, _, tensors = tensors_linear_graph()
assert set(tensors.keys()) == set(graph_tensors.keys())
for name, tensor in tensors.items():
graph_tensor = graph_tensors[name]
assert tensor == graph_tensor
assert tensor.inputs == graph_tensor.inputs
assert tensor.outputs == graph_tensor.outputs
# Check that tensors includes tensors not attached to nodes
def test_tensors_includes_non_node_tensors(self):
X = Constant("X", values=np.ones(shape=(64, 64), dtype=np.float32))
graph = Graph(inputs=[], outputs=[X])
tensor_map = graph.tensors()
assert "X" in tensor_map
assert tensor_map["X"] == X
def test_tensors_check_duplicates(self):
inputs = [Variable(name="x")]
outputs = [Variable(name="x")] # Distinct tensors with the same name
nodes = [
Node(op="Add", name="Test", inputs=inputs, outputs=outputs),
]
graph = Graph(nodes=nodes, inputs=inputs, outputs=outputs)
with pytest.raises(OnnxGraphSurgeonException):
graph.tensors(check_duplicates=True)
def test_tensors_with_duplicates_check_disabled(self):
inputs = [Variable(name="x")]
outputs = [Variable(name="x")] # Distinct tensors with the same name
nodes = [
Node(op="Add", name="Test", inputs=inputs, outputs=outputs),
]
graph = Graph(nodes=nodes, inputs=inputs, outputs=outputs)
# This should *not* throw
graph.tensors(check_duplicates=False)
def toposort_linear_graph():
inputs = [Variable(name="x")]
intermediate0 = Variable(name="intermediate0")
intermediate1 = Variable(name="intermediate1")
intermediate2 = Variable(name="intermediate2")
outputs = [Variable(name="y")]
# Nodes are NOT in topo order.
nodes = [
Node(op="Add", name="Test0", inputs=inputs, outputs=[intermediate0]),
Node(op="Add", name="Test2", inputs=[intermediate1], outputs=[intermediate2]),
Node(op="Add", name="Test3", inputs=[intermediate2], outputs=outputs),
Node(op="Add", name="Test1", inputs=[intermediate0], outputs=[intermediate1]),
]
expected_node_order = [nodes[0], nodes[3], nodes[1], nodes[2]]
return Graph(nodes=nodes, inputs=inputs, outputs=outputs), expected_node_order
# Graph structure:
# x
# |
# Test0 -> out0 (graph output)
# |
# out0
# |
# Test1 -> out1 (graph output)
# |
# out1
# |
# Test2 -> out2 (graph_output)
def toposort_multi_tier_output_graph():
inputs = [Variable(name="x")]
outputs = [Variable(name="out0"), Variable(name="out1"), Variable(name="out2")]
out0, out1, out2 = outputs
nodes = [
Node(op="Add", name="Test2", inputs=[out1], outputs=[out2]),
Node(op="Add", name="Test0", inputs=inputs, outputs=[out0]),
Node(op="Add", name="Test1", inputs=[out0], outputs=[out1]),
]
expected_node_order = [nodes[1], nodes[2], nodes[0]]
return Graph(nodes=nodes, inputs=inputs, outputs=outputs), expected_node_order
# Graph structure:
# x2 x1
# | |
# Test0
# |
# int0 x0
# | /
# Test1
# |
# int1 x3
# | /
# Test2 -> out (graph_output)
def toposort_multi_tier_input_graph():
inputs = [Variable(name="x0"), Variable(name="x1"), Variable(name="x2"), Variable(name="x3")]
int0, int1 = [Variable(name="intermediate0"), Variable(name="intermediate1")]
outputs = [Variable(name="out")]
x0, x1, x2, x3 = inputs
nodes = [
Node(op="Add", name="Test2", inputs=[int1, x3], outputs=outputs),
Node(op="Add", name="Test0", inputs=[x2, x1], outputs=[int0]),
Node(op="Add", name="Test1", inputs=[int0, x0], outputs=[int1]),
]
expected_node_order = [nodes[1], nodes[2], nodes[0]]
return Graph(nodes=nodes, inputs=inputs, outputs=outputs), expected_node_order
TOPOSORT_TEST_CASES = [
toposort_linear_graph,
toposort_multi_tier_output_graph,
toposort_multi_tier_input_graph,
]
class TestToposort(object):
@pytest.mark.parametrize("toposort_test_case", TOPOSORT_TEST_CASES)
def test_topologically_sort(self, toposort_test_case):
graph, expected_node_order = toposort_test_case()
assert graph.nodes != expected_node_order
graph.toposort()
assert graph.nodes == expected_node_order
@pytest.mark.parametrize("toposort_test_case", TOPOSORT_TEST_CASES)
def test_toposort_nested(self, toposort_test_case):
subgraph, expected_node_order = toposort_test_case()
assert subgraph.nodes != expected_node_order
# Wrap the graph within a subgraph
inp = Variable("input")
id_out = Variable("id_out")
identity = Node(op="Identity", inputs=[inp], outputs=[id_out])
# Make the subgraph take an input from the outer graph node
# If toposort tries to take the node id, it'll fault.
subgraph.nodes[0].inputs.append(id_out)
out = Variable("output")
nested = Node(op="Nested", inputs=[id_out], outputs=[out], attrs={"subgraph": subgraph})
graph = Graph(nodes=[identity, nested], inputs=[inp], outputs=[out])
graph.toposort(recurse_subgraphs=True)
assert subgraph.nodes == expected_node_order
def build_basic_graph():
inputs = [Variable(name="x")]
outputs = [Variable(name="y")]
nodes = [
Node(op="Add", name="Test", inputs=inputs, outputs=outputs),
]
return Graph(nodes=nodes, inputs=inputs, outputs=outputs)
def build_two_layer_graph():
inputs = [Variable(name="x")]
intermediate_tensor = Variable(name="intermediate")
outputs = [Variable(name="y")]
nodes = [
Node(op="Add", name="Test0", inputs=inputs, outputs=[intermediate_tensor]),
Node(op="Add", name="Test1", inputs=[intermediate_tensor], outputs=outputs),
]
return Graph(nodes=nodes, inputs=inputs, outputs=outputs)
def build_two_layer_graph_multiple_io():
inputs = [Variable(name="x0"), Variable(name="x1")]
intermediate_tensor = Variable(name="intermediate")
outputs = [Variable(name="y0"), Variable(name="y1")]
nodes = [
Node(op="Add", name="Test0", inputs=inputs, outputs=[intermediate_tensor]),
Node(op="Add", name="Test1", inputs=[intermediate_tensor], outputs=outputs),
]
return Graph(nodes=nodes, inputs=inputs, outputs=outputs)
CLEANUP_TEST_CASES = [
build_basic_graph(),
build_two_layer_graph(),
build_two_layer_graph_multiple_io(),
]
class TestCleanup(object):
@pytest.mark.parametrize("graph", CLEANUP_TEST_CASES)
def test_get_used_node_ids(self, graph):
graph_used_nodes = copy.copy(graph.nodes)
graph_used_tensors = copy.copy(list(graph.tensors().values()))
unused_tensor = Variable(name="Unused")
unused_node = Node(op="Unused", inputs=[graph.inputs[0]], outputs=[unused_tensor])
graph.nodes.append(unused_node)
with graph.node_ids():
used_node_ids, used_tensors = graph._get_used_node_ids()
assert len(used_node_ids) == len(graph.nodes) - 1
assert all([node.id in used_node_ids for node in graph_used_nodes])
assert unused_node.id not in used_node_ids
assert unused_tensor not in used_tensors
assert all([used_tensor in used_tensors for used_tensor in graph_used_tensors])
def test_multi_tier(self):
graph, _ = toposort_multi_tier_output_graph()
tensor = graph.outputs.pop()
unused_node = tensor.inputs[0]
graph.cleanup() # Should remove just the Test2 node as out1 is still an output.
assert unused_node not in graph.nodes
assert len(graph.nodes) == 2
assert len(graph.outputs) == 2
tensor_map = graph.tensors()
assert tensor.name not in tensor_map
def test_remove_unused_node_outputs(self):
graph, _ = toposort_linear_graph()
graph.toposort()
graph_output = graph.outputs[0]
dummy = Variable("dummy")
# Add unused tensor to a node in the middle of the graph.
# Since it does not contribute to graph outputs, it should be removed.
graph.nodes[1].outputs.append(dummy)
graph.cleanup(remove_unused_node_outputs=True)
assert dummy not in graph.nodes[1].outputs
assert graph.outputs[0] == graph_output # Graoh outputs will never be removed
def test_graph_input_producers(self):
graph, _ = toposort_linear_graph()
tensor_map = graph.tensors()
assert "x" in tensor_map
graph.inputs = [tensor_map["intermediate0"]]
graph.cleanup()
cleaned_tensor_map = graph.tensors()
assert "x" not in cleaned_tensor_map
@pytest.mark.parametrize("remove_unused_graph_inputs", [True, False])
def test_independent_path(self, remove_unused_graph_inputs):
graph, _ = toposort_linear_graph()
# Build out a path totally unrelated to rest of the graph
indep0 = Variable(name="indep0")
indep1 = Variable(name="indep1")
node = Node(op="IndepTest", inputs=[indep0], outputs=[indep1])
graph.nodes.append(node)
graph.inputs.append(indep0)
graph.cleanup(remove_unused_graph_inputs=remove_unused_graph_inputs)
assert indep0 not in graph.inputs or not remove_unused_graph_inputs
assert node not in graph.nodes or not remove_unused_graph_inputs
tensor_map = graph.tensors()
assert indep0.name not in tensor_map or not remove_unused_graph_inputs
assert indep1.name not in tensor_map or not remove_unused_graph_inputs
def test_nested_graph(self, nested_graph):
nested_node = nested_graph.nodes[1]
nested_inp = nested_node.inputs[0]
nested_out = nested_node.outputs[0]
subgraph = nested_node.attrs["body"]
assert "id_out" in nested_graph.tensors()
nested_graph.cleanup(recurse_subgraphs=True)
# Clean up should not remove a tensor whose only output node is a subgraph.
assert "id_out" in nested_graph.tensors()
# Clean up should not modify the nested nodes inputs or outputs
assert nested_node.inputs == [nested_inp]
assert nested_node.outputs == [nested_out]
# Next we'll clean up the subgraph by recursing from the top-level
assert subgraph.nodes
subgraph.outputs.clear()
nested_graph.cleanup(recurse_subgraphs=True)
assert not subgraph.nodes
def test_node_used_only_in_nested_graph(self):
X = Variable("X", dtype=np.float32, shape=(1,))
Y = Variable("Y", dtype=np.float32, shape=(1,))
graph = Graph(inputs=[X, Y])
X_p = graph.identity(X) # X_p is only used by the subgraph, not in the outer graph.
subgraph_inp = Variable("subgraph_input", dtype=np.float32, shape=(1,))
subgraph = Graph(inputs=[subgraph_inp])
subgraph.outputs = [subgraph.add(subgraph_inp, X_p)]
graph.outputs = [graph.nested(Y, subgraph)]
graph.cleanup(remove_unused_graph_inputs=True)
assert graph.nodes[0].op == "Identity"
assert graph.nodes[0].inputs == [X]
def test_input_is_output(self):
graph = Graph()
A = Variable("A", dtype=np.float32, shape=(1, 1))
B = Variable("B", dtype=np.float32, shape=(1, 1))
C = graph.add(A, B)
graph.inputs = [A, B]
graph.outputs = [C, B, A] # Out of order w/ respect to Add node inputs
# Graph should remain unchanged after cleanup, including I/O tensors.
graph.cleanup()
assert graph.inputs == [A, B]
assert graph.outputs == [C, B, A]
assert len(graph.nodes) == 1
assert graph.nodes[0].inputs == [A, B]
assert graph.nodes[0].outputs == [C]
class TestCopy(object):
def test_basic(self):
graph = Graph(
nodes=[Node(op="Test")],
inputs=[Variable("test")],
outputs=[Variable("test")],
name="test-name",
doc_string="test-docstring",
import_domains=["fake-import-domain"],
opset=-1,
)
new_graph = graph.copy()
assert new_graph == graph
assert new_graph.nodes == graph.nodes
assert new_graph.inputs == graph.inputs
assert new_graph.outputs == graph.outputs
assert new_graph.name == graph.name
assert new_graph.doc_string == graph.doc_string
assert new_graph.import_domains == graph.import_domains
assert new_graph.opset == graph.opset
def test_copy(self):
def make_graph():
graph, _ = toposort_multi_tier_output_graph()
graph.outputs.pop()
# Deep copy should work with empty tensors
graph.nodes[0].inputs.append(Variable.empty())
graph.nodes[0].outputs.append(Variable.empty())
return graph
graph = make_graph()
new_graph = graph.copy()
assert graph == new_graph
# Running cleanup on the first graph should not affect the copy
graph.cleanup()
assert graph != new_graph
assert new_graph == make_graph()
def test_copy_with_subgraph(self, nested_graph):
new_graph = nested_graph.copy()
assert new_graph == nested_graph
new_subgraph = new_graph.nodes[1].attrs["body"]
id_out = new_subgraph.nodes[0].inputs[0]
assert id_out.name == "id_out"
assert len(id_out.inputs) == 1
assert id_out.inputs[0].op == "Identity"
assert id_out.inputs[0].inputs[0].name == "input"
new_subgraph.nodes[0].outputs.clear()
new_subgraph.nodes[1].inputs.clear()
subgraph = nested_graph.nodes[1].attrs["body"]
assert subgraph.nodes[0].outputs
assert subgraph.nodes[1].inputs
new_graph.outputs.clear()
new_graph.cleanup()
assert nested_graph.outputs
assert len(nested_graph.nodes) == 2
assert len(subgraph.nodes) == 2
# If the subgraph has a tensor with the same name as the outer graph,
# the subgraph copy should include a copy of the subgraph tensor, not the outer
# graph tensor.
def test_copy_with_subgraph_dup_tensors(self):
inp = Variable("input", dtype=np.float32, shape=(4, 5))
graph = Graph(inputs=[inp])
# We'll use shape to distinguish inner/outer tensor
subgraph_inp = Variable("input", dtype=np.float32, shape=(1, 2))
subgraph = Graph(inputs=[subgraph_inp])
graph.outputs = [graph.nested(inp, subgraph)]
graph_copy = graph.copy()
assert graph_copy.nodes[0].attrs["body"].inputs[0].shape == (1, 2)
def test_copy_with_subgraph_dup_const_tensors(self):
inp = Constant("input", values=np.ones(dtype=np.float32, shape=(4, 5)))
graph = Graph()
# We'll use shape to distinguish inner/outer tensor
subgraph_inp = Constant("input", values=np.ones(dtype=np.float32, shape=(1, 2)))
subgraph = Graph()
subgraph.outputs = [subgraph.identity(subgraph_inp)]
graph.outputs = [graph.nested(inp, subgraph)]
graph_copy = graph.copy()
assert graph_copy.nodes[0].attrs["body"].nodes[0].inputs[0].shape == (1, 2)
@pytest.fixture
def simple_foldable():
# Graph:
# c = (a + b)
# output = input + c
# Should fold to:
# output = input + c
weights = np.ones(shape=(1, 3), dtype=np.float32)
graph = Graph()
inp = Variable("input", shape=(1, 3), dtype=np.float32)
c = graph.add(weights, weights, name="c")
out = graph.add(inp, c)
graph.inputs = [inp]
graph.outputs = [out]
yield graph
@pytest.fixture
def one_hop_foldable():
# Graph:
# c = (a + b)
# e = (c + d)
# output = input + e
# Should fold to:
# output = input + e
weights = np.ones(shape=(1, 3), dtype=np.float32)
graph = Graph()
inp = Variable("input", shape=(1, 3), dtype=np.float32)
c = graph.add(weights, weights, name="c")
e = graph.add(c, weights, name="e")
out = graph.add(inp, e)
graph.inputs = [inp]
graph.outputs = [out]
yield graph
@pytest.fixture
def foldable_with_invalid_node():
# Graph
# c = (a + b)
# e = fake(d)
# f = (e + c)
# out = inp + f
#
# c should be folded even though e is the output of an
# invalid node.
weights = np.ones(shape=(1, 3), dtype=np.float32)
graph = Graph()
inp = Variable("input", shape=(1, 3), dtype=np.float32)
c = graph.add(weights, weights, name="c")
e = graph.fake(weights, name="e")
f = graph.add(e, c, name="f")
out = graph.add(inp, f, name="output")
graph.inputs = [inp]
graph.outputs = [out]
yield graph
class TestFoldConstants(object):
@pytest.mark.parametrize("partitioning", [None, "basic", "recursive"])
def test_basic(self, simple_foldable, partitioning):
inp = simple_foldable.inputs[0]
simple_foldable.fold_constants(partitioning=partitioning).cleanup(remove_unused_graph_inputs=True)
# Extra node should be removed
assert len(simple_foldable.nodes) == 1
assert simple_foldable.nodes[0].inputs[0] == inp
assert simple_foldable.nodes[0].inputs[1].name == "c"
# Value should be computed correctly
assert np.all(simple_foldable.nodes[0].inputs[1].values == np.ones(shape=(1, 3), dtype=np.float32) * 2)
def test_one_hop(self, one_hop_foldable):
inp = one_hop_foldable.inputs[0]
one_hop_foldable.fold_constants().cleanup()
# Extra nodes should be removed
assert len(one_hop_foldable.nodes) == 1
assert one_hop_foldable.nodes[0].inputs[0] == inp
assert one_hop_foldable.nodes[0].inputs[1].name == "e"
# Value should be computed correctly
assert np.all(one_hop_foldable.nodes[0].inputs[1].values == np.ones(shape=(1, 3), dtype=np.float32) * 3)
def test_with_invalid_nodes(self, foldable_with_invalid_node):
foldable_with_invalid_node.fold_constants(partitioning="recursive").cleanup()
tensor_map = foldable_with_invalid_node.tensors()
assert len(foldable_with_invalid_node.nodes) == 3
assert foldable_with_invalid_node.nodes[0].op == "Fake"
assert foldable_with_invalid_node.nodes[1].op == "Add"
assert foldable_with_invalid_node.nodes[2].op == "Add"
assert np.all(tensor_map["c"].values == (np.ones(shape=(1, 3), dtype=np.float32) * 2))
def test_with_invalid_nodes_no_recursive(self, foldable_with_invalid_node):
# No folding should take place without recursive partitioning
original = foldable_with_invalid_node.copy()
assert foldable_with_invalid_node.fold_constants() == original
def test_no_foldable_constants(self):
inp0 = Variable("input0", shape=(1, 3), dtype=np.float32)
inp1 = Variable("input1", shape=(1, 3), dtype=np.float32)
out = Variable("output", shape=(1, 3), dtype=np.float32)
nodes = [Node("Add", inputs=[inp0, inp1], outputs=[out])]
graph = Graph(nodes=nodes, inputs=[inp0, inp1], outputs=[out])
graph.fold_constants().cleanup()
assert len(graph.nodes) == 1
assert graph.nodes[0].inputs == [inp0, inp1]
def test_const_node(self):
graph = Graph()
values = np.ones((1, 3, 3), dtype=np.int64)
graph.outputs = [graph.constant(values=values)]
assert isinstance(graph.outputs[0], Variable)
graph.fold_constants().cleanup()
assert isinstance(graph.outputs[0], Constant)
assert np.all(graph.outputs[0].values == values)
assert not graph.nodes
def test_shape_of_constant_tensor(self):
graph = Graph()
values = np.ones((1, 3, 3), dtype=np.int64)
const = Constant("const", values=values)
graph.outputs = [graph.shape(const)]
graph.fold_constants().cleanup()
assert not graph.nodes
assert isinstance(graph.outputs[0], Constant)
assert np.all(graph.outputs[0].values == (1, 3, 3))
def test_shape_of_constant_node(self):
graph = Graph()
values = np.ones((1, 3, 3), dtype=np.int64)
const = graph.constant(values=values)
graph.outputs = [graph.shape(const)]
graph.fold_constants().cleanup()
assert not graph.nodes
assert isinstance(graph.outputs[0], Constant)
assert np.all(graph.outputs[0].values == (1, 3, 3))
# Cannot fold shape nodes if they have dynamically shaped inputs.
def test_shape_of_variable_tensor_dynamic_shape(self):
var = Variable("var", dtype=np.float32, shape=("", -1, 0, 4))
graph = Graph(inputs=[var])
graph.outputs = [graph.shape(var)]
graph.fold_constants().cleanup()
assert len(graph.nodes) == 1
assert graph.nodes[0].op == "Shape"
assert isinstance(graph.outputs[0], Variable)
def test_shape_of_variable_tensor_static_shape(self):
var = Variable("var", dtype=np.float32, shape=(1, 3, 4))
graph = Graph(inputs=[var])
graph.inputs = [var]
graph.outputs = [graph.shape(var)]
graph.fold_constants().cleanup()
assert not graph.nodes
assert isinstance(graph.outputs[0], Constant)
assert np.all(graph.outputs[0].values == (1, 3, 4))
def test_shape_of_variable_tensor_multiple_shapes(self):
graph = Graph()
var = Variable("var", dtype=np.float32, shape=(1, 3, 4))
var2 = Variable("var2", dtype=np.float32, shape=tuple()) # Scalar
graph.inputs = [var, var2]
graph.outputs = [graph.shape(var), graph.identity(var), graph.shape(var2)]
graph.fold_constants().cleanup()
assert len(graph.nodes) == 1
assert graph.nodes[0].op == "Identity"
assert isinstance(graph.outputs[0], Constant)
assert np.all(graph.outputs[0].values == (1, 3, 4))
assert isinstance(graph.outputs[2], Constant)
assert np.all(graph.outputs[2].values == tuple())
def test_shape_of_variable_tensor_static_shape_no_fold(self):
graph = Graph()
var = Variable("var", dtype=np.float32, shape=(1, 3, 4))
graph.inputs = [var]
graph.outputs = [graph.shape(var)]
graph.fold_constants(fold_shapes=False).cleanup()
assert len(graph.nodes) == 1
assert graph.nodes[0].op == "Shape"
assert isinstance(graph.outputs[0], Variable)
# Constant folding should not cause constant tensors in the model to be loaded.
def test_no_load_constants(self):
graph = gs.import_onnx(const_foldable().load())
new_graph = graph.fold_constants()
def check_no_const_loaded(graph):
num_lazy_constants = 0
for tensor in graph.tensors().values():
if isinstance(tensor, Constant) and isinstance(tensor._values, LazyValues):
num_lazy_constants += 1
assert num_lazy_constants == 3 # Graph starts with 3 constants - none should be loaded.
check_no_const_loaded(graph)
check_no_const_loaded(new_graph)
@pytest.mark.parametrize(
"shape, indices",
[
(("batch", 3, "height", "width"), 1), # Scalar indices case
(None, 1), # Shape not inferered case
(("batch", 3, "height", "width"), [1]),
(("batch", 3, "height", 224), [1, 3]),
(("batch", 3, 224, 224), [1, 2, 3]),
],
)
def test_shape_gather(self, shape, indices):
indices = np.array(indices)
inp = Variable("input", dtype=np.float32, shape=shape)
graph = Graph(inputs=[inp])
inp_shape = graph.shape(inp)
shape_part = graph.gather(inp_shape, indices=indices)
graph.outputs = [
graph.add(shape_part, shape_part),
graph.gather(inp_shape, indices=[0]),
graph.gather(inp_shape, indices=np.array(0)),
]
graph.fold_constants()
if shape is not None:
assert isinstance(graph.outputs[0], Constant)
expected_shape = np.array(shape)[indices].astype(np.int64) * 2
assert np.all(graph.outputs[0].values == expected_shape)
else:
assert isinstance(graph.outputs[0], Variable)
assert isinstance(graph.outputs[1], Variable)
assert isinstance(graph.outputs[2], Variable)
@pytest.mark.parametrize(
"shape, starts, ends, axes, steps, expected",
[
(("batch", 3, "height", "width"), 1, 2, 0, 1, [3]), # Scalar starts/ends case
(("batch", 3, "height", "width"), [1], [2], [0], [1], [3]),
(("batch", 3, 5, "width"), [1], [-1], [0], [1], [3, 5]), # Negative ends case
(("batch", 3, 5, 7), [1], [2000], [0], [1], [3, 5, 7]), # Past end, ends case
(("batch", 3, 5, 7), [-2], [4], [0], [1], [5, 7]), # Negative starts case
(("batch", 3, 5, 7), [-2], [4], [1], [1], None), # Non-zero axes case
(("batch", 3, 5, "width"), [-2], [4], [1], [1], None), # Dynamic case
(("batch", 3, 5, 7), [1], [4], [0], [2], [3, 7]), # Non-one steps case
(("batch", 3, 5, 7), [4], [0], [0], [-1], [7, 5, 3]), # Negative steps case
],
)
def test_shape_slice(self, shape, starts, ends, axes, steps, expected):
inp = Variable("input", dtype=np.float32, shape=shape)
graph = Graph(inputs=[inp])
inp_shape = graph.shape(inp)
graph.outputs = [
graph.slice(inp_shape, np.array(starts), np.array(ends), axes=np.array(axes), steps=np.array(steps))
]
graph.fold_constants()
if expected:
assert isinstance(graph.outputs[0], Constant)
assert np.all(graph.outputs[0].values == expected)
else:
assert isinstance(graph.outputs[0], Variable)
# In the single input case, we should derive starts/ends/axes/steps from the attributes.
def test_shape_slice_single_input(self):
inp = Variable("input", dtype=np.int64, shape=(5, 6, 3, 2))
graph = Graph(inputs=[inp])
inp_shape = graph.shape(inp)
graph.outputs = [graph.slice(inp_shape)]
slice_node = graph.outputs[0].inputs[0]
slice_node.attrs = {
"axes": [0],
"starts": [1],
"ends": [3],
"steps": [2],
}
graph.fold_constants()
assert isinstance(graph.outputs[0], Constant)
assert np.all(graph.outputs[0].values == inp.shape[1:3:2])
def test_with_nested_graph(self):
cond = gs.Variable("cond", dtype=np.bool, shape=(1,))
X = gs.Variable("X", dtype=np.float32, shape=(1,))
Y = gs.Constant("Y", values=np.ones((1,), dtype=np.float32))
graph = Graph(inputs=[X, cond])
then_graph = Graph(name="Then")
then_graph.outputs = [then_graph.add(Y, Y)]
else_graph = Graph(name="Else")
else_graph.outputs = [else_graph.add(X, else_graph.add(Y, Y))]
graph.outputs = [graph.if_op(cond, then_graph, else_graph)]
graph.fold_constants()
graph.cleanup()
assert len(then_graph.nodes) == 0
assert np.all(then_graph.outputs[0].values == (Y.values * 2))
assert len(else_graph.nodes) == 1
assert isinstance(else_graph.nodes[0].inputs[1], Constant)
assert np.all(else_graph.nodes[0].inputs[1].values == (Y.values * 2))
def test_const_inp_but_non_foldable_nested_graph(self):
cond = gs.Constant("cond", values=np.array(True))
X = gs.Variable("X", dtype=np.float32, shape=(1,))
graph = Graph(inputs=[X])
then_graph = Graph(name="Then")
then_graph.outputs = [then_graph.add(X, X)]
else_graph = Graph(name="Else")
else_graph.outputs = [else_graph.add(X, else_graph.add(X, X))]
# Even though if_op looks foldable because it has all constant inputs,
# it's not, since its subgraphs depend on variables in the outer scope.
graph.outputs = [graph.if_op(cond, then_graph, else_graph)]
# This should not raise because the `If` node should be excluded from
# constant folding.
graph.fold_constants(error_ok=False).cleanup()
assert graph.nodes[0].op == "If"
assert len(then_graph.nodes) == 1
assert len(else_graph.nodes) == 2
def test_cast_elision(self):
graph = gs.import_onnx(shape_cast_elision().load())
new_graph = graph.fold_constants()
no_casts = True
for node in new_graph.nodes:
no_casts &= node.op != "Cast"
assert no_casts
class TestIO(object):
def test_io_cannot_be_sync_list_on_init(self):
inp = Variable("input0", shape=(1, 3), dtype=np.float32)
out = Variable("input1", shape=(1, 3), dtype=np.float32)
node = Node("Add", inputs=[inp], outputs=[out])
assert isinstance(node.inputs, SynchronizedList)
assert isinstance(node.outputs, SynchronizedList)
graph = Graph(nodes=[node], inputs=node.inputs, outputs=node.outputs)
assert not isinstance(graph.inputs, SynchronizedList)
assert not isinstance(graph.outputs, SynchronizedList)
def test_io_cannot_be_sync_list_on_assign(self):
inp = Variable("input0", shape=(1, 3), dtype=np.float32)
out = Variable("input1", shape=(1, 3), dtype=np.float32)
node = Node("Add", inputs=[inp], outputs=[out])
assert isinstance(node.inputs, SynchronizedList)
assert isinstance(node.outputs, SynchronizedList)
graph = Graph(nodes=[node], inputs=[], outputs=[])
graph.inputs = node.inputs
graph.outputs = node.outputs
assert not isinstance(graph.inputs, SynchronizedList)
assert not isinstance(graph.outputs, SynchronizedList)
| TensorRT-master | tools/onnx-graphsurgeon/tests/ir/test_graph.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import os
ROOT_DIR = os.path.join(os.path.abspath(os.path.dirname(__file__)), os.path.pardir)
sys.path.insert(0, ROOT_DIR)
import onnx_graphsurgeon as gs
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.intersphinx",
"sphinx.ext.autosummary",
"sphinx.ext.napoleon",
"sphinx.ext.mathjax",
]
# Want to be able to generate docs with no dependencies installed
autodoc_mock_imports = ["onnx", "numpy", "onnxruntime"]
autodoc_default_options = {
"members": True,
"show-inheritance": True,
"special-members": "__call__, __getitem__, __bool__",
}
autodoc_member_order = "bysource"
autodoc_inherit_docstrings = True
autosummary_generate = True
source_suffix = [".rst"]
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "ONNX GraphSurgeon"
copyright = "2020, NVIDIA"
author = "NVIDIA"
version = gs.__version__
# The full version, including alpha/beta/rc tags.
release = version
# Style
pygments_style = "colorful"
html_theme = "sphinx_rtd_theme"
# Use the TRT theme and NVIDIA logo
html_static_path = ["_static"]
html_logo = "_static/img/nvlogo_white.png"
# Hide source link
html_show_sourcelink = False
# Output file base name for HTML help builder.
htmlhelp_basename = "OnnxGraphSurgeonDoc"
# Template files to extend default Sphinx templates.
# See https://www.sphinx-doc.org/en/master/templating.html for details.
templates_path = ["_templates"]
# For constructor arguments to show up in Sphinx generated doc
autoclass_content = "both"
# Unlimited depth sidebar.
html_theme_options = {"navigation_depth": -1}
html_sidebars = {"**": ["globaltoc.html", "relations.html", "sourcelink.html", "searchbox.html"]}
# Allows us to override the default page width in the Sphinx theme.
def setup(app):
app.add_css_file("style.css")
| TensorRT-master | tools/onnx-graphsurgeon/docs/conf.py |
#!/usr/bin/env python3
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import onnx_graphsurgeon as gs
import numpy as np
import onnx
X = gs.Variable(name="X", dtype=np.float32, shape=(1, 3, 5, 5))
Y = gs.Variable(name="Y", dtype=np.float32, shape=(1, 3, 1, 1))
node = gs.Node(op="GlobalLpPool", attrs={"p": 2}, inputs=[X], outputs=[Y])
graph = gs.Graph(nodes=[node], inputs=[X], outputs=[Y])
onnx.save(gs.export_onnx(graph), "test_globallppool.onnx")
| TensorRT-master | tools/onnx-graphsurgeon/examples/01_creating_a_model/example.py |
#!/usr/bin/env python3
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import onnx_graphsurgeon as gs
import numpy as np
import onnx
print("Graph.layer Help:\n{}".format(gs.Graph.layer.__doc__))
# We can use `Graph.register()` to add a function to the Graph class. Later, we can invoke the function
# directly on instances of the graph, e.g., `graph.add(...)`
@gs.Graph.register()
def add(self, a, b):
# The Graph.layer function creates a node, adds inputs and outputs to it, and finally adds it to the graph.
# It returns the output tensors of the node to make it easy to chain.
# The function will append an index to any strings provided for inputs/outputs prior
# to using them to construct tensors. This will ensure that multiple calls to the layer() function
# will generate distinct tensors. However, this does NOT guarantee that there will be no overlap with
# other tensors in the graph. Hence, you should choose the prefixes to minimize the possibility of
# collisions.
return self.layer(op="Add", inputs=[a, b], outputs=["add_out_gs"])
@gs.Graph.register()
def mul(self, a, b):
return self.layer(op="Mul", inputs=[a, b], outputs=["mul_out_gs"])
@gs.Graph.register()
def gemm(self, a, b, trans_a=False, trans_b=False):
attrs = {"transA": int(trans_a), "transB": int(trans_b)}
return self.layer(op="Gemm", inputs=[a, b], outputs=["gemm_out_gs"], attrs=attrs)
# You can also specify a set of opsets when regsitering a function.
# By default, the function is registered for all opsets lower than Graph.DEFAULT_OPSET
@gs.Graph.register(opsets=[11])
def relu(self, a):
return self.layer(op="Relu", inputs=[a], outputs=["act_out_gs"])
# Note that the same function can be defined in different ways for different opsets.
# It will only be called if the Graph's opset matches one of the opsets for which the function is registered.
# Hence, for the opset 11 graph used in this example, the following function will never be used.
@gs.Graph.register(opsets=[1])
def relu(self, a):
raise NotImplementedError("This function has not been implemented!")
##########################################################################################################
# The functions registered above greatly simplify the process of building the graph itself.
graph = gs.Graph(opset=11)
# Generates a graph which computes:
# output = ReLU((A * X^T) + B) (.) C + D
X = gs.Variable(name="X", shape=(64, 64), dtype=np.float32)
graph.inputs = [X]
# axt = (A * X^T)
# Note that we can use NumPy arrays directly (e.g. Tensor A),
# instead of Constants. These will automatically be converted to Constants.
A = np.ones(shape=(64, 64), dtype=np.float32)
axt = graph.gemm(A, X, trans_b=True)
# dense = ReLU(axt + B)
B = np.ones((64, 64), dtype=np.float32) * 0.5
dense = graph.relu(*graph.add(*axt, B))
# output = dense (.) C + D
# If a Tensor instance is provided (e.g. Tensor C), it will not be modified at all.
# If you prefer to set the exact names of tensors in the graph, you should
# construct tensors manually instead of passing strings or NumPy arrays.
C = gs.Constant(name="C", values=np.ones(shape=(64, 64), dtype=np.float32))
D = np.ones(shape=(64, 64), dtype=np.float32)
graph.outputs = graph.add(*graph.mul(*dense, C), D)
# Finally, we need to set the output datatype to make this a valid ONNX model.
# In our case, all the data types are float32.
for out in graph.outputs:
out.dtype = np.float32
onnx.save(gs.export_onnx(graph), "model.onnx")
| TensorRT-master | tools/onnx-graphsurgeon/examples/07_creating_a_model_with_the_layer_api/generate.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import onnx_graphsurgeon as gs
import numpy as np
import onnx
# Register functions to make graph generation easier
@gs.Graph.register()
def min(self, *args):
return self.layer(op="Min", inputs=args, outputs=["min_out"])[0]
@gs.Graph.register()
def max(self, *args):
return self.layer(op="Max", inputs=args, outputs=["max_out"])[0]
@gs.Graph.register()
def identity(self, inp):
return self.layer(op="Identity", inputs=[inp], outputs=["identity_out"])[0]
# Generate the graph
graph = gs.Graph()
graph.inputs = [gs.Variable("input", shape=(4, 4), dtype=np.float32)]
# Clip values to [0, 6]
MIN_VAL = np.array(0, np.float32)
MAX_VAL = np.array(6, np.float32)
# Add identity nodes to make the graph structure a bit more interesting
inp = graph.identity(graph.inputs[0])
max_out = graph.max(graph.min(inp, MAX_VAL), MIN_VAL)
graph.outputs = [
graph.identity(max_out),
]
# Graph outputs must include dtype information
graph.outputs[0].to_variable(dtype=np.float32, shape=(4, 4))
onnx.save(gs.export_onnx(graph), "model.onnx")
| TensorRT-master | tools/onnx-graphsurgeon/examples/08_replacing_a_subgraph/generate.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import onnx_graphsurgeon as gs
import onnx
# Here we'll register a function to do all the subgraph-replacement heavy-lifting.
# NOTE: Since registered functions are entirely reusable, it may be a good idea to
# refactor them into a separate module so you can use them across all your models.
@gs.Graph.register()
def replace_with_clip(self, inputs, outputs):
# Disconnect output nodes of all input tensors
for inp in inputs:
inp.outputs.clear()
# Disconnet input nodes of all output tensors
for out in outputs:
out.inputs.clear()
# Insert the new node.
return self.layer(op="Clip", inputs=inputs, outputs=outputs)
# Now we'll do the actual replacement
graph = gs.import_onnx(onnx.load("model.onnx"))
tmap = graph.tensors()
# You can figure out the input and output tensors using Netron. In our case:
# Inputs: [inp, MIN_VAL, MAX_VAL]
# Outputs: [max_out]
inputs = [tmap["identity_out_0"], tmap["onnx_graphsurgeon_constant_5"], tmap["onnx_graphsurgeon_constant_2"]]
outputs = [tmap["max_out_6"]]
graph.replace_with_clip(inputs, outputs)
# Remove the now-dangling subgraph.
graph.cleanup().toposort()
# That's it!
onnx.save(gs.export_onnx(graph), "replaced.onnx")
| TensorRT-master | tools/onnx-graphsurgeon/examples/08_replacing_a_subgraph/replace.py |
#!/usr/bin/env python3
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import onnx_graphsurgeon as gs
import numpy as np
import onnx
# Computes Y = x0 + (a * x1 + b)
shape = (1, 3, 224, 224)
# Inputs
x0 = gs.Variable(name="x0", dtype=np.float32, shape=shape)
x1 = gs.Variable(name="x1", dtype=np.float32, shape=shape)
# Intermediate tensors
a = gs.Constant("a", values=np.ones(shape=shape, dtype=np.float32))
b = gs.Constant("b", values=np.ones(shape=shape, dtype=np.float32))
mul_out = gs.Variable(name="mul_out")
add_out = gs.Variable(name="add_out")
# Outputs
Y = gs.Variable(name="Y", dtype=np.float32, shape=shape)
nodes = [
# mul_out = a * x1
gs.Node(op="Mul", inputs=[a, x1], outputs=[mul_out]),
# add_out = mul_out + b
gs.Node(op="Add", inputs=[mul_out, b], outputs=[add_out]),
# Y = x0 + add
gs.Node(op="Add", inputs=[x0, add_out], outputs=[Y]),
]
graph = gs.Graph(nodes=nodes, inputs=[x0, x1], outputs=[Y])
onnx.save(gs.export_onnx(graph), "model.onnx")
| TensorRT-master | tools/onnx-graphsurgeon/examples/04_modifying_a_model/generate.py |
#!/usr/bin/env python3
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import onnx_graphsurgeon as gs
import numpy as np
import onnx
graph = gs.import_onnx(onnx.load("model.onnx"))
# 1. Remove the `b` input of the add node
first_add = [node for node in graph.nodes if node.op == "Add"][0]
first_add.inputs = [inp for inp in first_add.inputs if inp.name != "b"]
# 2. Change the Add to a LeakyRelu
first_add.op = "LeakyRelu"
first_add.attrs["alpha"] = 0.02
# 3. Add an identity after the add node
identity_out = gs.Variable("identity_out", dtype=np.float32)
identity = gs.Node(op="Identity", inputs=first_add.outputs, outputs=[identity_out])
graph.nodes.append(identity)
# 4. Modify the graph output to be the identity output
graph.outputs = [identity_out]
# 5. Remove unused nodes/tensors, and topologically sort the graph
# ONNX requires nodes to be topologically sorted to be considered valid.
# Therefore, you should only need to sort the graph when you have added new nodes out-of-order.
# In this case, the identity node is already in the correct spot (it is the last node,
# and was appended to the end of the list), but to be on the safer side, we can sort anyway.
graph.cleanup().toposort()
onnx.save(gs.export_onnx(graph), "modified.onnx")
| TensorRT-master | tools/onnx-graphsurgeon/examples/04_modifying_a_model/modify.py |
#!/usr/bin/env python3
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import onnx_graphsurgeon as gs
import numpy as np
import onnx
# Inputs
x = gs.Variable(name="x", dtype=np.float32, shape=(1, 3, 224, 224))
# Intermediate tensors
i0 = gs.Variable(name="i0")
i1 = gs.Variable(name="i1")
# Outputs
y = gs.Variable(name="y", dtype=np.float32)
nodes = [
gs.Node(op="Identity", inputs=[x], outputs=[i0]),
gs.Node(op="FakeNodeToRemove", inputs=[i0], outputs=[i1]),
gs.Node(op="Identity", inputs=[i1], outputs=[y]),
]
graph = gs.Graph(nodes=nodes, inputs=[x], outputs=[y])
onnx.save(gs.export_onnx(graph), "model.onnx")
| TensorRT-master | tools/onnx-graphsurgeon/examples/06_removing_nodes/generate.py |
#!/usr/bin/env python3
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import onnx_graphsurgeon as gs
import onnx
graph = gs.import_onnx(onnx.load("model.onnx"))
fake_node = [node for node in graph.nodes if node.op == "FakeNodeToRemove"][0]
# Get the input node of the fake node
# Node provides i() and o() functions that can optionally be provided an index (default is 0)
# These serve as convenience functions for the alternative, which would be to fetch the input/output
# tensor first, then fetch the input/output node of the tensor.
# For example, node.i() is equivalent to node.inputs[0].inputs[0]
inp_node = fake_node.i()
# Reconnect the input node to the output tensors of the fake node, so that the first identity
# node in the example graph now skips over the fake node.
inp_node.outputs = fake_node.outputs
fake_node.outputs.clear()
# Remove the fake node from the graph completely
graph.cleanup()
onnx.save(gs.export_onnx(graph), "removed.onnx")
| TensorRT-master | tools/onnx-graphsurgeon/examples/06_removing_nodes/remove.py |
#!/usr/bin/env python3
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import onnx_graphsurgeon as gs
import numpy as np
import onnx
# Register operators we'll need.
# NOTE: Since all the ops used here only have a single output, we return the
# first output directly instead of returning the list of outputs.
@gs.Graph.register()
def shape(self, a):
return self.layer(op="Shape", inputs=[a], outputs=["shape_out_gs"])[0]
@gs.Graph.register()
def reduce_prod(self, a, axes, keepdims=True):
return self.layer(
op="ReduceProd", inputs=[a], attrs={"axes": axes, "keepdims": int(keepdims)}, outputs=["reduce_prod_out_gs"]
)[0]
@gs.Graph.register()
def reshape(self, data, shape):
return self.layer(op="Reshape", inputs=[data, shape], outputs=["reshape_out_gs"])[0]
@gs.Graph.register()
def gather(self, data, indices):
return self.layer(op="Gather", inputs=[data, indices], outputs=["gather_out_gs"])[0]
@gs.Graph.register()
def concat(self, inputs, axis=0):
return self.layer(op="Concat", inputs=inputs, attrs={"axis": axis}, outputs=["concat_out_gs"])[0]
# Create the graph.
graph = gs.Graph()
# First we set up the inputs, using gs.Tensor.DYNAMIC to specify dynamic dimensions.
graph.inputs = [gs.Variable(name="data", dtype=np.float32, shape=(gs.Tensor.DYNAMIC, 3, gs.Tensor.DYNAMIC, 5))]
input_shape = graph.shape(graph.inputs[0])
# Part 1 - Flattening the input by computing its volume and reshaping.
volume = graph.reduce_prod(input_shape, axes=[0])
flattened = graph.reshape(graph.inputs[0], volume)
# Part 2 - Collapsing some, but not all, dimensions. In this case, we will flatten the last 2 dimensions.
# To do so, we'll gather the last 2 dimensions, compute their volume with reduce_prod, and concatenate the
# result with the first 2 dimensions.
# NOTE: The code here is *not* specific to images, but we use NCHW notation to make it more readable.
NC = graph.gather(input_shape, indices=[0, 1])
HW = graph.gather(input_shape, indices=[2, 3])
new_shape = graph.concat([NC, graph.reduce_prod(HW, axes=[0])])
partially_flattened = graph.reshape(graph.inputs[0], new_shape)
# Finally, set up the outputs and export.
flattened.name = "flattened" # Rename output tensor to make it easy to find.
flattened.dtype = np.float32 # NOTE: We must include dtype information for graph outputs
partially_flattened.name = "partially_flattened"
partially_flattened.dtype = np.float32
graph.outputs = [flattened, partially_flattened]
onnx.save(gs.export_onnx(graph), "model.onnx")
| TensorRT-master | tools/onnx-graphsurgeon/examples/09_shape_operations_with_the_layer_api/generate.py |
#!/usr/bin/env python3
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import onnx_graphsurgeon as gs
import numpy as np
import onnx
# Computes outputs = input + ((a + b) + d)
shape = (1, 3)
# Inputs
input = gs.Variable("input", shape=shape, dtype=np.float32)
# Intermediate tensors
a = gs.Constant("a", values=np.ones(shape=shape, dtype=np.float32))
b = gs.Constant("b", values=np.ones(shape=shape, dtype=np.float32))
c = gs.Variable("c")
d = gs.Constant("d", values=np.ones(shape=shape, dtype=np.float32))
e = gs.Variable("e")
# Outputs
output = gs.Variable("output", shape=shape, dtype=np.float32)
nodes = [
# c = (a + b)
gs.Node("Add", inputs=[a, b], outputs=[c]),
# e = (c + d)
gs.Node("Add", inputs=[c, d], outputs=[e]),
# output = input + e
gs.Node("Add", inputs=[input, e], outputs=[output]),
]
graph = gs.Graph(nodes=nodes, inputs=[input], outputs=[output])
onnx.save(gs.export_onnx(graph), "model.onnx")
| TensorRT-master | tools/onnx-graphsurgeon/examples/05_folding_constants/generate.py |
#!/usr/bin/env python3
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import onnx_graphsurgeon as gs
import onnx
print("Graph.fold_constants Help:\n{}".format(gs.Graph.fold_constants.__doc__))
graph = gs.import_onnx(onnx.load("model.onnx"))
# Fold constants in the graph using ONNX Runtime. This will replace
# expressions that can be evaluated prior to runtime with constant tensors.
# The `fold_constants()` function will not, however, remove the nodes that
# it replaced - it simply changes the inputs of subsequent nodes.
# To remove these unused nodes, we can follow up `fold_constants()` with `cleanup()`
graph.fold_constants().cleanup()
onnx.save(gs.export_onnx(graph), "folded.onnx")
| TensorRT-master | tools/onnx-graphsurgeon/examples/05_folding_constants/fold.py |
#!/usr/bin/env python3
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import onnx_graphsurgeon as gs
import numpy as np
import onnx
# Computes Y = x0 + (a * x1 + b)
shape = (1, 3, 224, 224)
# Inputs
x0 = gs.Variable(name="x0", dtype=np.float32, shape=shape)
x1 = gs.Variable(name="x1", dtype=np.float32, shape=shape)
# Intermediate tensors
a = gs.Constant("a", values=np.ones(shape=shape, dtype=np.float32))
b = gs.Constant("b", values=np.ones(shape=shape, dtype=np.float32))
mul_out = gs.Variable(name="mul_out")
add_out = gs.Variable(name="add_out")
# Outputs
Y = gs.Variable(name="Y", dtype=np.float32, shape=shape)
nodes = [
# mul_out = a * x1
gs.Node(op="Mul", inputs=[a, x1], outputs=[mul_out]),
# add_out = mul_out + b
gs.Node(op="Add", inputs=[mul_out, b], outputs=[add_out]),
# Y = x0 + add
gs.Node(op="Add", inputs=[x0, add_out], outputs=[Y]),
]
graph = gs.Graph(nodes=nodes, inputs=[x0, x1], outputs=[Y])
onnx.save(gs.export_onnx(graph), "model.onnx")
| TensorRT-master | tools/onnx-graphsurgeon/examples/03_isolating_a_subgraph/generate.py |
#!/usr/bin/env python3
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import onnx_graphsurgeon as gs
import numpy as np
import onnx
# Though omitted in this example, in some cases, it may be useful to embed
# shape information in the graph. We can use ONNX shape inference to do this:
#
# from onnx import shape_inference
# model = shape_inference.infer_shapes(onnx.load("model.onnx"))
#
# IMPORTANT: In some cases, ONNX shape inference may not correctly infer shapes,
# which will result in an invalid subgraph. To avoid this, you can instead modify
# the tensors to include the shape information yourself.
model = onnx.load("model.onnx")
graph = gs.import_onnx(model)
# Since we already know the names of the tensors we're interested in, we can
# grab them directly from the tensor map.
#
# NOTE: If you do not know the tensor names you want, you can view the graph in
# Netron to determine them, or use ONNX GraphSurgeon in an interactive shell
# to print the graph.
tensors = graph.tensors()
# If you want to embed shape information, but cannot use ONNX shape inference,
# you can manually modify the tensors at this point:
#
# graph.inputs = [tensors["x1"].to_variable(dtype=np.float32, shape=(1, 3, 224, 224))]
# graph.outputs = [tensors["add_out"].to_variable(dtype=np.float32, shape=(1, 3, 224, 224))]
#
# IMPORTANT: You must include type information for input and output tensors if it is not already
# present in the graph.
#
# NOTE: ONNX GraphSurgeon will also accept dynamic shapes - simply set the corresponding
# dimension(s) to `gs.Tensor.DYNAMIC`, e.g. `shape=(gs.Tensor.DYNAMIC, 3, 224, 224)`
graph.inputs = [tensors["x1"].to_variable(dtype=np.float32)]
graph.outputs = [tensors["add_out"].to_variable(dtype=np.float32)]
# Notice that we do not need to manually modify the rest of the graph. ONNX GraphSurgeon will
# take care of removing any unnecessary nodes or tensors, so that we are left with only the subgraph.
graph.cleanup()
onnx.save(gs.export_onnx(graph), "subgraph.onnx")
| TensorRT-master | tools/onnx-graphsurgeon/examples/03_isolating_a_subgraph/isolate.py |
#!/usr/bin/env python3
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import onnx_graphsurgeon as gs
import numpy as np
import onnx
X = gs.Variable(name="X", dtype=np.float32, shape=(1, 3, 224, 224))
# Since W is a Constant, it will automatically be exported as an initializer
W = gs.Constant(name="W", values=np.ones(shape=(5, 3, 3, 3), dtype=np.float32))
Y = gs.Variable(name="Y", dtype=np.float32, shape=(1, 5, 222, 222))
node = gs.Node(op="Conv", inputs=[X, W], outputs=[Y])
# Note that initializers do not necessarily have to be graph inputs
graph = gs.Graph(nodes=[node], inputs=[X], outputs=[Y])
onnx.save(gs.export_onnx(graph), "test_conv.onnx")
| TensorRT-master | tools/onnx-graphsurgeon/examples/02_creating_a_model_with_initializer/example.py |
from onnx_graphsurgeon.exporters.onnx_exporter import export_onnx
from onnx_graphsurgeon.importers.onnx_importer import import_onnx
from onnx_graphsurgeon.ir.graph import Graph
from onnx_graphsurgeon.ir.node import Node
from onnx_graphsurgeon.ir.tensor import Constant, Tensor, Variable
from onnx_graphsurgeon.util.exception import OnnxGraphSurgeonException
__version__ = "0.3.13"
| TensorRT-master | tools/onnx-graphsurgeon/onnx_graphsurgeon/__init__.py |
from onnx_graphsurgeon.logger.logger import G_LOGGER
| TensorRT-master | tools/onnx-graphsurgeon/onnx_graphsurgeon/logger/__init__.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from onnx_graphsurgeon.util.exception import OnnxGraphSurgeonException
import inspect
import enum
import time
import sys
import os
# Context manager to apply indentation to messages
class LoggerIndent(object):
def __init__(self, logger, indent):
self.logger = logger
self.old_indent = self.logger.logging_indent
self.indent = indent
def __enter__(self):
self.logger.logging_indent = self.indent
return self
def __exit__(self, exc_type, exc_value, traceback):
self.logger.logging_indent = self.old_indent
# Context manager to suppress messages
class LoggerSuppress(object):
def __init__(self, logger, severity):
self.logger = logger
self.old_severity = self.logger.severity
self.severity = severity
def __enter__(self):
self.logger.severity = self.severity
return self
def __exit__(self, exc_type, exc_value, traceback):
self.logger.severity = self.old_severity
class LogMode(enum.IntEnum):
EACH = 0 # Log the message each time
ONCE = 1 # Log the message only once. The same message will not be logged again.
class Logger(object):
ULTRA_VERBOSE = -10
VERBOSE = 0
DEBUG = 10
INFO = 20
WARNING = 30
ERROR = 40
CRITICAL = 50
SEVERITY_LETTER_MAPPING = {
ULTRA_VERBOSE: "[UV]",
VERBOSE: "[V]",
DEBUG: "[D]",
INFO: "[I]",
WARNING: "[W]",
ERROR: "[E]",
CRITICAL: "[C]",
}
SEVERITY_COLOR_MAPPING = {
ULTRA_VERBOSE: "cyan",
VERBOSE: "dark_gray",
DEBUG: "light_gray",
INFO: "light_green",
WARNING: "light_yellow",
ERROR: "red_1",
CRITICAL: "red_1",
}
def __init__(self, severity=INFO, colors=True, letter=True, timestamp=False, line_info=False):
"""
Logger.
Args:
severity (Logger.Severity): Messages below this severity are ignored.
colors (bool): Whether to use colored output.
letter (bool): Whether to prepend each logging message with a letter indicating it's severity. Defaults to True.
timestamp (bool): Whether to include a timestamp in the logging output. Defaults to False.
line_info (bool): Whether to include file and line number information in the logging output. Defaults to False.
"""
self._severity = severity
self.logging_indent = 0
self.root_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir, os.pardir))
self.once_logged = set()
self.colors = colors
self.letter = letter
self.timestamp = timestamp
self.line_info = line_info
self.logger_callbacks = []
@property
def severity(self):
return self._severity
@severity.setter
def severity(self, value):
self._severity = value
for callback in self.logger_callbacks:
callback(self._severity)
def register_callback(self, callback):
"""
Registers a callback with the logger, which will be invoked when the logging severity is modified.
The callback is guaranteed to be called at least once in the register_callback function.
Args:
callback (Callable(Logger.Severity)): A callback that accepts the current logger severity.
"""
callback(self._severity)
self.logger_callbacks.append(callback)
def indent(self, level=1):
"""
Returns a context manager that indents all strings logged by the specified amount.
"""
return LoggerIndent(self, level + self.logging_indent)
def suppress(self, severity=CRITICAL):
"""
Returns a context manager that temporarily changes the severity of the logger for its duration.
Args:
severity (Logger.Severity): The severity to set the logger to. Defaults to Logger.CRITICAL, which will suppress all messages.
"""
return LoggerSuppress(self, severity)
# If once is True, the logger will only log this message a single time. Useful in loops.
# message may be a callable which returns a message. This way, only if the message needs to be logged is it ever generated.
def log(self, message, severity, mode=LogMode.EACH, stack_depth=2):
def process_message(message, stack_depth):
def get_prefix():
def get_line_info():
module = inspect.getmodule(sys._getframe(stack_depth + 3))
# Handle logging from the top-level of a module.
if not module:
module = inspect.getmodule(sys._getframe(stack_depth + 2))
filename = module.__file__
filename = os.path.relpath(filename, self.root_dir)
# If the file is not located in trt_smeagol, use its basename instead.
if os.pardir in filename:
filename = os.path.basename(filename)
return "[{:}:{:}] ".format(filename, sys._getframe(stack_depth).f_lineno)
prefix = ""
if self.letter:
prefix += Logger.SEVERITY_LETTER_MAPPING[severity] + " "
if self.timestamp:
prefix += "({:}) ".format(time.strftime("%X"))
if self.line_info:
prefix += get_line_info()
return prefix
def apply_indentation(message):
message_lines = str(message).splitlines()
return "\n".join(["\t" * self.logging_indent + line for line in message_lines])
def apply_color(message):
if self.colors:
try:
import colored
color = Logger.SEVERITY_COLOR_MAPPING[severity]
return colored.stylize(message, [colored.fg(color)])
except (ImportError, ModuleNotFoundError):
self.colors = False
self.warning(
"colored module is not installed, will not use colors when logging. To enable colors, please install the colored module: python3 -m pip install colored"
)
self.colors = True
return message
prefix = get_prefix()
message = apply_indentation(message)
return apply_color("{:}{:}".format(prefix, message))
def should_log(message):
should = severity >= self._severity
if mode == LogMode.ONCE:
message_hash = hash(message)
should &= message_hash not in self.once_logged
self.once_logged.add(message_hash)
return should
if not should_log(message):
return
if callable(message):
message = message()
message = str(message)
print(process_message(message, stack_depth=stack_depth))
def ultra_verbose(self, message, mode=LogMode.EACH):
self.log(message, Logger.ULTRA_VERBOSE, mode=mode, stack_depth=3)
def verbose(self, message, mode=LogMode.EACH):
self.log(message, Logger.VERBOSE, mode=mode, stack_depth=3)
def debug(self, message, mode=LogMode.EACH):
self.log(message, Logger.DEBUG, mode=mode, stack_depth=3)
def info(self, message, mode=LogMode.EACH):
self.log(message, Logger.INFO, mode=mode, stack_depth=3)
def warning(self, message, mode=LogMode.EACH):
self.log(message, Logger.WARNING, mode=mode, stack_depth=3)
def error(self, message, mode=LogMode.EACH):
self.log(message, Logger.ERROR, mode=mode, stack_depth=3)
# Like error, but immediately exits.
def critical(self, message):
self.log(message, Logger.CRITICAL, stack_depth=3)
raise OnnxGraphSurgeonException(message) from None # Erase exception chain
global G_LOGGER
G_LOGGER = Logger()
| TensorRT-master | tools/onnx-graphsurgeon/onnx_graphsurgeon/logger/logger.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from typing import List, Sequence
# default_value exists to solve issues that might result from Python's normal default argument behavior.
# Specifically, consider the following class:
#
# class MyClass(object):
# def __init__(self, value=[]):
# self.value = value
#
# This leads to unwanted behavior when the default value is used:
#
# >>> x = MyClass()
# >>> x.value.append("SHOULD NOT BE IN Y")
# >>> y = MyClass()
# >>> y.value
# ['SHOULD NOT BE IN Y']
#
# If we rewrite the class using default value:
#
# class MyClass(object):
# def __init__(self, value=None):
# self.value = default_value(value, [])
#
# Then we get the desired behavior:
#
# >>> x = MyClass()
# >>> x.value.append("SHOULD NOT BE IN Y")
# >>> y = MyClass()
# >>> y.value
# []
def default_value(value, default):
return value if value is not None else default
def combine_dicts(dict0, dict1):
"""
Combine two dictionaries. Values in the second will overwrite values in the first.
"""
combined = OrderedDict()
combined.update(dict0)
combined.update(dict1)
return combined
def is_dynamic_dimension(dim):
return not isinstance(dim, int) or dim < 0
def is_dynamic_shape(shape):
return any(is_dynamic_dimension(dim) for dim in shape)
# Special type of list that synchronizes contents with another list.
# Concrete example: Assume some node, n, contains an input tensor, t. If we remove t from n.inputs,
# we also need to remove n from t.outputs. To avoid having to do this manually, we use SynchronizedList,
# which takes an attribute name as a parameter, and then synchronizes to that attribute of each of its elements.
# So, in the example above, we can make n.inputs a synchronized list whose field_name is set to "outputs".
# See test_ir.TestNodeIO for functional tests
class SynchronizedList(list):
def __init__(self, parent_obj, field_name, initial):
self.parent_obj = parent_obj
self.field_name = field_name
self.extend(initial)
def _add_to_elem(self, elem):
# Explicitly avoid SynchronizedList overrides to prevent infinite recursion
list.append(getattr(elem, self.field_name), self.parent_obj)
def _remove_from_elem(self, elem):
# Explicitly avoid SynchronizedList overrides to prevent infinite recursion
list.remove(getattr(elem, self.field_name), self.parent_obj)
def __delitem__(self, index):
self._remove_from_elem(self[index])
super().__delitem__(index)
def __setitem__(self, index, elem):
self._remove_from_elem(self[index])
super().__setitem__(index, elem)
self._add_to_elem(elem)
def append(self, x):
super().append(x)
self._add_to_elem(x)
def extend(self, iterable: Sequence[object]):
super().extend(iterable)
for elem in iterable:
self._add_to_elem(elem)
def insert(self, i, x):
super().insert(i, x)
self._add_to_elem(x)
def remove(self, x):
super().remove(x)
self._remove_from_elem(x)
def pop(self, i=-1):
elem = super().pop(i)
self._remove_from_elem(elem)
return elem
def clear(self):
for elem in self:
self._remove_from_elem(elem)
super().clear()
def __add__(self, other_list: List[object]):
return list(self) + list(other_list)
def __iadd__(self, other_list: List[object]):
self.extend(other_list)
return self
| TensorRT-master | tools/onnx-graphsurgeon/onnx_graphsurgeon/util/misc.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
class OnnxGraphSurgeonException(Exception):
"""
An exception raised by ONNX-GraphSurgeon.
"""
pass
| TensorRT-master | tools/onnx-graphsurgeon/onnx_graphsurgeon/util/exception.py |
TensorRT-master | tools/onnx-graphsurgeon/onnx_graphsurgeon/util/__init__.py |
|
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from onnx_graphsurgeon.ir.graph import Graph
class BaseImporter(object):
@staticmethod
def import_graph(graph) -> Graph:
"""
Import a graph from some source graph.
Args:
graph (object): The source graph to import. For example, this might be an onnx.GraphProto.
Returns:
Graph: The equivalent onnx-graphsurgeon graph.
"""
raise NotImplementedError("BaseImporter is an abstract class")
| TensorRT-master | tools/onnx-graphsurgeon/onnx_graphsurgeon/importers/base_importer.py |
from onnx_graphsurgeon.importers.base_importer import BaseImporter
| TensorRT-master | tools/onnx-graphsurgeon/onnx_graphsurgeon/importers/__init__.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import copy
from collections import OrderedDict
from typing import List, Union
import numpy as np
import onnx
import onnx.numpy_helper
from onnx_graphsurgeon.importers.base_importer import BaseImporter
from onnx_graphsurgeon.ir.graph import Graph
from onnx_graphsurgeon.ir.node import Node
from onnx_graphsurgeon.ir.tensor import Constant, LazyValues, Tensor, Variable
from onnx_graphsurgeon.logger.logger import G_LOGGER
from onnx_graphsurgeon.util import misc
# Maps values from the AttributeType enum to their string representations, e.g., {1: "FLOAT"}
ATTR_TYPE_MAPPING = dict(zip(onnx.AttributeProto.AttributeType.values(), onnx.AttributeProto.AttributeType.keys()))
# Maps an ONNX attribute to the corresponding Python property
ONNX_PYTHON_ATTR_MAPPING = {
"FLOAT": "f",
"INT": "i",
"STRING": "s",
"TENSOR": "t",
"GRAPH": "g",
"FLOATS": "floats",
"INTS": "ints",
"STRINGS": "strings",
}
def get_onnx_tensor_shape(onnx_tensor: Union[onnx.ValueInfoProto, onnx.TensorProto]) -> List[int]:
shape = None
if isinstance(onnx_tensor, onnx.TensorProto):
shape = onnx_tensor.dims
else:
if onnx_tensor.type.tensor_type.HasField("shape"):
shape = []
for dim in onnx_tensor.type.tensor_type.shape.dim:
if dim.HasField("dim_param"):
shape.append(dim.dim_param)
elif dim.HasField("dim_value"):
shape.append(dim.dim_value)
else:
shape.append(None)
return shape
def get_onnx_tensor_dtype(onnx_tensor: Union[onnx.ValueInfoProto, onnx.TensorProto]) -> np.dtype:
if isinstance(onnx_tensor, onnx.TensorProto):
onnx_type = onnx_tensor.data_type
else:
onnx_type = onnx_tensor.type.tensor_type.elem_type
if onnx_type in onnx.mapping.TENSOR_TYPE_TO_NP_TYPE:
return onnx.mapping.TENSOR_TYPE_TO_NP_TYPE[onnx_type]
return None
class OnnxImporter(BaseImporter):
@staticmethod
def get_opset(model: onnx.ModelProto):
try:
for importer in OnnxImporter.get_import_domains(model):
if importer.domain == "" or importer.domain == "ai.onnx":
return importer.version
G_LOGGER.warning("Model does not contain ONNX domain opset information! Using default opset.")
return None
except:
G_LOGGER.warning("Model does not contain opset information! Using default opset.")
return None
@staticmethod
def get_import_domains(model: onnx.ModelProto):
return model.opset_import
@staticmethod
def import_tensor(onnx_tensor: Union[onnx.ValueInfoProto, onnx.TensorProto]) -> Tensor:
if isinstance(onnx_tensor, onnx.TensorProto):
data_location = int(onnx_tensor.data_location) if onnx_tensor.HasField("data_location") else None
return Constant(name=onnx_tensor.name, values=LazyValues(onnx_tensor), data_location=data_location)
else:
return Variable(
name=onnx_tensor.name,
dtype=get_onnx_tensor_dtype(onnx_tensor),
shape=get_onnx_tensor_shape(onnx_tensor),
)
@staticmethod
def import_node(
onnx_node: onnx.NodeProto,
tensor_map: "OrderedDict[str, Tensor]",
subgraph_tensor_map: "OrderedDict[str, Tensor]",
) -> Node:
def attrs_to_dict(attrs):
attr_dict = OrderedDict()
for attr in attrs:
def process_attr(attr_str: str):
processed = getattr(attr, ONNX_PYTHON_ATTR_MAPPING[attr_str])
if attr_str == "STRING":
processed = processed.decode()
elif attr_str == "TENSOR":
processed = OnnxImporter.import_tensor(processed)
elif attr_str == "GRAPH":
processed = OnnxImporter.import_graph(
processed, misc.combine_dicts(tensor_map, subgraph_tensor_map)
)
elif attr_str == "FLOATS" or attr_str == "INTS":
processed = list(processed)
elif attr_str == "STRINGS":
processed = [p.decode() for p in processed]
return processed
if attr.type in ATTR_TYPE_MAPPING:
attr_str = ATTR_TYPE_MAPPING[attr.type]
if attr_str in ONNX_PYTHON_ATTR_MAPPING:
attr_dict[attr.name] = process_attr(attr_str)
else:
G_LOGGER.warning(
"Attribute of type {:} is currently unsupported. Skipping attribute.".format(attr_str)
)
else:
G_LOGGER.warning(
"Attribute type: {:} was not recognized. Was the graph generated with a newer IR version than the installed `onnx` package? Skipping attribute.".format(
attr.type
)
)
return attr_dict
# Optional inputs/outputs are represented by empty tensors. All other tensors should already have been populated during shape inference.
def get_tensor(name: str, check_outer_graph=True):
# Prioritize the subgraph even if check_outer_graph is set
if name in subgraph_tensor_map:
return subgraph_tensor_map[name]
if check_outer_graph and name in tensor_map:
return tensor_map[name]
if not name:
# Empty tensors are not tracked by the graph, as these represent optional inputs/outputs that have been omitted.
G_LOGGER.verbose("Generating empty tensor")
return Variable.empty()
G_LOGGER.verbose(
"Tensor: {:} was not generated during shape inference, or shape inference was not run on this model. Creating a new Tensor.".format(
name
)
)
subgraph_tensor_map[name] = Variable(name)
return subgraph_tensor_map[name]
# Retrieve Tensors for node inputs/outputs. Only empty tensors should need to be newly added.
def retrieve_node_inputs() -> List[Tensor]:
inputs = [] # List[Tensor]
for input_name in onnx_node.input:
inputs.append(get_tensor(input_name))
return inputs
def retrieve_node_outputs() -> List[Tensor]:
outputs = [] # List[Tensor]
for output_name in onnx_node.output:
# Node outputs cannot come from the outer graph, they must be created within the inner graph.
outputs.append(get_tensor(output_name, check_outer_graph=False))
return outputs
return Node(
op=onnx_node.op_type,
name=onnx_node.name,
attrs=attrs_to_dict(onnx_node.attribute),
inputs=retrieve_node_inputs(),
outputs=retrieve_node_outputs(),
)
@staticmethod
def import_graph(
onnx_graph: onnx.GraphProto,
tensor_map: "OrderedDict[str, Tensor]" = None,
opset=None,
import_domains: onnx.OperatorSetIdProto = None,
) -> Graph:
"""
Imports a Graph from an ONNX Graph.
Args:
onnx_graph (onnx.GraphProto): The ONNX graph to import.
tensor_map (OrderedDict[str, Tensor]): A mapping of tensor names to Tensors. This is generally only useful for subgraph import.
opset (int): The ONNX opset to use for this graph.
"""
tensor_map = copy.copy(misc.default_value(tensor_map, OrderedDict())) # Outer graph tensors, read-only
subgraph_tensor_map = OrderedDict() # Tensors in this subgraph
# Retrieves a Tensor from subgraph_tensor_map or the outer graph (tensor_map) if present, otherwise imports the tensor
# If overwrite=True, this function will overwrite previously imported tensors
# if the new tensor has more information available.
def get_tensor(
onnx_tensor: Union[onnx.ValueInfoProto, onnx.TensorProto], overwrite=False, check_outer_graph=True
) -> Tensor:
# Prioritize the subgraph even if check_outer_graph is set
if onnx_tensor.name in subgraph_tensor_map:
if overwrite:
tensor = OnnxImporter.import_tensor(onnx_tensor)
if isinstance(subgraph_tensor_map[onnx_tensor.name], Variable):
subgraph_tensor_map[onnx_tensor.name].dtype = (
subgraph_tensor_map[onnx_tensor.name].dtype or tensor.dtype
)
subgraph_tensor_map[onnx_tensor.name].shape = (
subgraph_tensor_map[onnx_tensor.name].shape or tensor.shape
)
return subgraph_tensor_map[onnx_tensor.name]
if check_outer_graph and onnx_tensor.name in tensor_map:
return tensor_map[onnx_tensor.name]
subgraph_tensor_map[onnx_tensor.name] = OnnxImporter.import_tensor(onnx_tensor)
return subgraph_tensor_map[onnx_tensor.name]
# Import initializers contents into Constants.
G_LOGGER.verbose("Importing initializers")
for initializer in onnx_graph.initializer:
get_tensor(initializer)
# Import all tensors whose shapes are known. Tensors may be repeated, and some of these
# duplicates may not include shape/dtype information, so overwrite is set to True
# so that we can capture all the information available about the tensor
G_LOGGER.verbose("Importing tensors with known shapes")
for tensor in onnx_graph.value_info:
get_tensor(tensor, overwrite=True)
# Import graph inputs and outputs. Initializers are not considered to be inputs.
# Graph inputs and outputs can never come from the outer graph!
initializer_names = set([tensor.name for tensor in onnx_graph.initializer])
G_LOGGER.verbose("Importing graph inputs")
graph_inputs = [] # List[Tensor]
for inp in onnx_graph.input:
if inp.name not in initializer_names:
tensor = get_tensor(inp, check_outer_graph=False)
graph_inputs.append(tensor)
G_LOGGER.verbose("Importing graph outputs")
graph_outputs = [] # List[Tensor]
for out in onnx_graph.output:
tensor = get_tensor(out, check_outer_graph=False)
graph_outputs.append(tensor)
G_LOGGER.verbose("Importing nodes")
nodes = [] # List[Node]
for onnx_node in onnx_graph.node:
node = OnnxImporter.import_node(onnx_node, tensor_map, subgraph_tensor_map)
nodes.append(node)
return Graph(
nodes=nodes,
inputs=graph_inputs,
outputs=graph_outputs,
name=onnx_graph.name,
doc_string=onnx_graph.doc_string,
opset=opset,
import_domains=import_domains,
)
def import_onnx(onnx_model: "onnx.ModelProto") -> Graph:
"""
Import an onnx-graphsurgeon Graph from the provided ONNX model.
Args:
onnx_model (onnx.ModelProto): The ONNX model.
Returns:
Graph: A corresponding onnx-graphsurgeon Graph.
"""
return OnnxImporter.import_graph(
onnx_model.graph,
opset=OnnxImporter.get_opset(onnx_model),
import_domains=OnnxImporter.get_import_domains(onnx_model),
)
| TensorRT-master | tools/onnx-graphsurgeon/onnx_graphsurgeon/importers/onnx_importer.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import copy
from collections import OrderedDict, defaultdict
from typing import Sequence
import numpy as np
from onnx_graphsurgeon.ir.node import Node
from onnx_graphsurgeon.ir.tensor import Constant, Tensor, Variable
from onnx_graphsurgeon.logger import G_LOGGER
from onnx_graphsurgeon.util import misc
class NodeIDAdder(object):
def __init__(self, graph):
self.graph = graph
def __enter__(self):
# To get unique ids for each node, add an `id` attribute. This will be removed before the function returns.
# Using the index in the node list allows the same object to count as different nodes.
for index, node in enumerate(self.graph.nodes):
node.id = index
def __exit__(self, exc_type, exc_value, traceback):
for node in self.graph.nodes:
del node.id
class Graph(object):
"""
Represents a graph containing nodes and tensors.
"""
DEFAULT_OPSET = 11
OPSET_FUNC_MAP = defaultdict(dict) # Ops registered for specific opsets.
GLOBAL_FUNC_MAP = dict() # Ops registered for ALL opsets.
@staticmethod
def register(opsets=None):
"""
Registers a function with the Graph class for the specified group of opsets.
After registering the function, it can be accessed like a normal member function.
For example:
::
@Graph.register()
def add(self, a, b):
return self.layer(op="Add", inputs=[a, b], outputs=["add_out_gs"])
graph.add(a, b)
Args:
opsets (Sequence[int]):
A group of opsets for which to register the function. Multiple functions with the same
name may be registered simultaneously if they are registered for different opsets.
Registering a function with a duplicate name for the same opsets will overwrite any
function previously registered for those opsets. By default, the function is
registered for all opsets.
"""
def register_func(func):
if hasattr(Graph, func.__name__):
G_LOGGER.warning(
"Registered function: {:} is hidden by a Graph attribute or function with the same name. "
"This function will never be called!".format(func.__name__)
)
# Default behavior is to register functions for all opsets.
if opsets is None:
Graph.GLOBAL_FUNC_MAP[func.__name__] = func
else:
for opset in opsets:
Graph.OPSET_FUNC_MAP[opset][func.__name__] = func
return func
return register_func
def __init__(
self,
nodes: Sequence[Node] = None,
inputs: Sequence[Tensor] = None,
outputs: Sequence[Tensor] = None,
name=None,
doc_string=None,
opset=None,
import_domains=None,
):
"""
Args:
nodes (Sequence[Node]): A list of the nodes in this graph.
inputs (Sequence[Tensor]): A list of graph input Tensors.
outputs (Sequence[Tensor]): A list of graph output Tensors.
name (str): The name of the graph. Defaults to "onnx_graphsurgeon_graph".
doc_string (str): A doc_string for the graph. Defaults to "".
opset (int): The ONNX opset to use when exporting this graph.
"""
self.nodes = misc.default_value(nodes, [])
self.inputs = list(misc.default_value(inputs, []))
self.outputs = list(misc.default_value(outputs, []))
self.name = misc.default_value(name, "onnx_graphsurgeon_graph")
self.__name__ = self.name
self.doc_string = misc.default_value(doc_string, "")
self.opset = misc.default_value(opset, Graph.DEFAULT_OPSET)
self.import_domains = import_domains
# Printing graphs can be very expensive
G_LOGGER.ultra_verbose(lambda: "Created Graph: {:}".format(self))
# For layer() function
self.name_idx = 0
def __getattr__(self, name):
try:
return super().__getattribute__(name)
except AttributeError as err:
# Opset specific ops always take priority over global ops.
if self.opset in Graph.OPSET_FUNC_MAP and name in Graph.OPSET_FUNC_MAP[self.opset]:
return lambda *args, **kwargs: Graph.OPSET_FUNC_MAP[self.opset][name](self, *args, **kwargs)
if name in Graph.GLOBAL_FUNC_MAP:
return lambda *args, **kwargs: Graph.GLOBAL_FUNC_MAP[name](self, *args, **kwargs)
G_LOGGER.error("No function: {:} registered for opset: {:}".format(name, self.opset))
raise err
def __setattr__(self, name, value):
# We don't want graph inputs/outputs to be SynchronizedLists
if name in ["inputs", "outputs"]:
value = list(value)
return super().__setattr__(name, value)
def __eq__(self, other: "Graph"):
nodes_match = len(self.nodes) == len(other.nodes) and all(
[node == other_node for node, other_node in zip(self.nodes, other.nodes)]
)
inputs_match = len(self.inputs) == len(other.inputs) and all(
[inp == other_inp for inp, other_inp in zip(self.inputs, other.inputs)]
)
outputs_match = len(self.outputs) == len(other.outputs) and all(
[out == other_out for out, other_out in zip(self.outputs, other.outputs)]
)
return nodes_match and inputs_match and outputs_match
def node_ids(self):
"""
Returns a context manager that supplies unique integer IDs for Nodes in the Graph.
For example:
::
with graph.node_ids():
assert graph.nodes[0].id != graph.nodes[1].id
Returns:
NodeIDAdder: A context manager that supplies unique integer IDs for Nodes.
"""
return NodeIDAdder(self)
def _get_node_id(self, node):
try:
return node.id
except AttributeError:
G_LOGGER.critical(
"Encountered a node not in the graph:\n{:}.\n\n"
"To fix this, please append the node to this graph's `nodes` attribute.".format(node)
)
# A tensor is local if it is produced in this graph, or is explicitly a graph input.
def _local_tensors(self):
local_tensors = {t.name: t for node in self.nodes for t in node.outputs if not t.is_empty()}
local_tensors.update({t.name: t for t in self.inputs})
local_tensors.update({t.name: t for t in self.tensors().values() if isinstance(t, Constant)})
return local_tensors
# Returns tensors used by this graph which are not present in the graph.
# These may come from an outer graph for example.
def _foreign_tensors(self):
local_tensors = self._local_tensors()
foreign_tensors = {}
def is_foreign_tensor(tensor):
return tensor.name not in local_tensors
for node in self.nodes:
foreign_tensors.update({t.name: t for t in node.inputs if is_foreign_tensor(t)})
for attr in node.attrs.values():
if isinstance(attr, Graph):
subgraph_foreign_tensors = attr._foreign_tensors()
# Some of the foreign tensors from a subgraph may come from this graph.
subgraph_foreign_tensors = {
t.name: t for t in subgraph_foreign_tensors.values() if is_foreign_tensor(t)
}
foreign_tensors.update(subgraph_foreign_tensors)
return foreign_tensors
def _get_used_node_ids(self):
local_tensors = self._local_tensors()
# We only want to consider tensors that are local to this graph, because we can't
# remove external tensors (e.g. from outer graphs) anyway.
class IgnoreDupAndForeign(object):
def __init__(self, initial_tensors=None):
tensors = misc.default_value(initial_tensors, [])
self.seen_tensors = set([tensor.name for tensor in tensors])
def __call__(self, tensor):
# Returns True if a tensor should included,
# False if it should be filtered out.
if tensor.is_empty():
return True
elif tensor.name not in local_tensors:
return False
elif tensor.name not in self.seen_tensors:
self.seen_tensors.add(tensor.name)
return True
return False
# Traverse backwards from outputs to find all used nodes.
ignore_tensors = IgnoreDupAndForeign()
used_tensors = list(filter(ignore_tensors, self.outputs))
used_node_ids = set()
index = 0
while index < len(used_tensors):
used_tensor = used_tensors[index]
index += 1
for node in used_tensor.inputs:
# Must cast to list here, otherwise node_used_tensors will be SynchronizedList!
node_used_tensors = list(node.inputs)
# If a node includes a subgraph, get any tensors that it uses from the outer graph.
for attr in node.attrs.values():
if isinstance(attr, Graph):
node_used_tensors += list(attr._foreign_tensors().values())
used_node_ids.add(self._get_node_id(node))
used_tensors.extend(filter(ignore_tensors, node_used_tensors))
return used_node_ids, used_tensors
def cleanup(self, remove_unused_node_outputs=False, recurse_subgraphs=True, remove_unused_graph_inputs=False):
"""
Removes unused nodes and tensors from the graph.
A node or tensor is considered unused if it does not contribute to any of the graph outputs.
Additionally, any producer nodes of graph input tensors, as well as consumer nodes of graph output
tensors that are not in the graph, are removed from the graph.
*Note: This function will never modify graph output tensors.*
Args:
remove_unused_node_outputs (bool): Whether to remove unused output tensors of nodes. This will never remove
empty-tensor (i.e. optional, but omitted) outputs. Defaults to False.
recurse_subgraphs (bool):
Whether to recursively cleanup subgraphs.
Defaults to True.
remove_unused_graph_inputs (bool):
Whether to remove unused graph inputs.
Defaults to False.
Returns:
self
"""
def cleanup_subgraphs():
for node in self.nodes:
for attr in node.attrs.values():
if isinstance(attr, Graph):
attr.cleanup(
remove_unused_node_outputs=remove_unused_node_outputs,
remove_unused_graph_inputs=remove_unused_graph_inputs,
)
if recurse_subgraphs:
cleanup_subgraphs()
G_LOGGER.verbose("Cleaning up {:}".format(self.name))
with self.node_ids():
# Graph input producers must be removed first so used_node_ids is correct.
for inp in self.inputs:
inp.inputs.clear()
used_node_ids, used_tensors = self._get_used_node_ids()
inputs = []
for inp in self.inputs:
if inp in used_tensors or not remove_unused_graph_inputs:
inputs.append(inp)
else:
G_LOGGER.ultra_verbose("Removing unused input: {:}".format(inp))
self.inputs = inputs
nodes = []
for node in self.nodes:
if self._get_node_id(node) in used_node_ids:
nodes.append(node)
else:
node.inputs.clear()
node.outputs.clear()
G_LOGGER.ultra_verbose("Removing unused node: {:}".format(node))
# Remove any hanging tensors - tensors without outputs
if remove_unused_node_outputs:
graph_output_names = set([tensor.name for tensor in self.outputs])
for node in nodes:
def is_hanging_tensor(tensor):
return (
not tensor.is_empty() and len(tensor.outputs) == 0 and tensor.name not in graph_output_names
)
to_remove = [out for out in node.outputs if is_hanging_tensor(out)]
for out in to_remove:
if out in node.outputs:
node.outputs.remove(out)
self.nodes = nodes
return self
def toposort(self, recurse_subgraphs=True):
"""
Topologically sort the graph in place.
Args:
recurse_subgraphs (bool):
Whether to recursively topologically sort subgraphs.
Defaults to True.
Returns:
self
"""
if recurse_subgraphs:
for node in self.nodes:
for attr in node.attrs.values():
if isinstance(attr, Graph):
attr.toposort()
G_LOGGER.debug("Topologically sorting {:}".format(self.name))
# Keeps track of a node and it's level in the graph hierarchy.
# 0 corresponds to an input node, N corresponds to a node with N layers of inputs.
class HierarchyDescriptor(object):
def __init__(self, node=None, level=None):
self.node = node
self.level = level
def __lt__(self, other):
return self.level < other.level
hierarchy_levels = {} # Dict[int, HierarchyDescriptor]
local_tensors = self._local_tensors()
def get_hierarchy_level(node):
# Return all local nodes that contribute to this node.
def get_input_nodes(node):
inputs = {}
for tensor in node.inputs:
if tensor.name in local_tensors:
for inp_node in tensor.inputs:
inputs[self._get_node_id(inp_node)] = inp_node
return inputs.values()
if self._get_node_id(node) in hierarchy_levels:
return hierarchy_levels[self._get_node_id(node)].level
# The level of a node is the level of it's highest input + 1.
try:
max_input_level = max([get_hierarchy_level(input_node) for input_node in get_input_nodes(node)] + [-1])
except RecursionError:
G_LOGGER.critical("Cycle detected in graph! Are there tensors with duplicate names in the graph?")
return max_input_level + 1
with self.node_ids():
for node in self.nodes:
hierarchy_levels[self._get_node_id(node)] = HierarchyDescriptor(node, level=get_hierarchy_level(node))
self.nodes = [hd.node for hd in sorted(hierarchy_levels.values())]
return self
def tensors(self, check_duplicates=False):
"""
Creates a tensor map of all the tensors used by this graph by walking over all nodes. Empty tensors are omitted from this map.
Tensors are guaranteed to be in order of the nodes in the graph. Hence, if the graph is topologically sorted, the tensor map will be too.
Args:
check_duplicates (bool): Whether to fail if multiple tensors with the same name are encountered.
Raises:
OnnxGraphSurgeonException: If check_duplicates is True and multiple distinct tensors in the graph share the same name.
Returns:
OrderedDict[str, Tensor]: A mapping of tensor names to tensors.
"""
tensor_map = OrderedDict()
def add_to_tensor_map(tensor):
if not tensor.is_empty():
if tensor.name in tensor_map and not (tensor_map[tensor.name] is tensor):
msg = "Found distinct tensors that share the same name:\n[id: {:}] {:}\n[id: {:}] {:}\n".format(
id(tensor_map[tensor.name]),
tensor_map[tensor.name],
id(tensor),
tensor,
)
msg += (
"Note: Producer node(s) of first tensor:\n{:}\nProducer node(s) of second tensor:\n{:}".format(
tensor_map[tensor.name].inputs,
tensor.inputs,
)
)
if check_duplicates:
G_LOGGER.critical(msg)
G_LOGGER.warning(msg)
tensor_map[tensor.name] = tensor
# I/O tensors may not be attached to nodes.
for io_tensor in self.inputs:
add_to_tensor_map(io_tensor)
for node in self.nodes:
for tensor in node.inputs + node.outputs:
add_to_tensor_map(tensor)
for io_tensor in self.outputs:
add_to_tensor_map(io_tensor)
return tensor_map
def fold_constants(self, fold_shapes=True, recurse_subgraphs=True, partitioning=None, error_ok=True):
"""
Folds constants in-place in the graph. The graph must be topologically sorted prior to
calling this function (see `toposort()`).
This function will not remove constants after folding them. In order to get rid of
these hanging nodes, you can run the `cleanup()` function.
*Note: Due to how this function is implemented, the graph must be exportable to ONNX,
and evaluable in ONNX-Runtime. Additionally, ONNX-Runtime must be installed.*
Args:
fold_shapes (bool):
Whether to fold `Shape` nodes in the graph.
This requires shapes to be inferred in the graph, and can only fold
static shapes.
Defaults to True.
recurse_subgraphs (bool):
Whether to recursively fold constants in subgraphs.
Defaults to True.
partitioning (Union[str, None]):
Whether/How to partition the graph so that errors in folding one
part of a model do not affect other parts. Available modes are:
- None: Do not partition the graph. If inference fails, no constants are folded.
- "basic": Partition the graph. If inference fails in one partition, other partitions will
remain unaffected.
- "recursive": Parition the graph recursively. If inference fails in a partition, the partition
will be further paritioned.
Defaults to None.
error_ok (bool):
Whether inference errors should be suppressed.
When this is enabled, any errors encountered during inference will be re-raised.
Defaults to True.
Returns:
self
"""
import onnxruntime as rt
from onnx_graphsurgeon.exporters.onnx_exporter import export_onnx
PARTITIONING_MODES = [None, "basic", "recursive"]
if partitioning not in PARTITIONING_MODES:
G_LOGGER.critical("Argument for parameter 'partitioning' must be one of: {:}".format(PARTITIONING_MODES))
# First perform shape tensor cast elision on the graph prior to other constant folding
# Search for Cast(s) (from int -> float) -> intermediate operator (with float constants) -> Cast(s) (back to int)
# This pattern is problematic for TensorRT since these operations may be performed on Shape Tensors, which
# are not allowed to be floating point type. Attempt to fold the pattern here
VALID_CAST_ELISION_OPS = ["Add", "Sub", "Mul", "Div", "Max", "Min", "Equal", "Greater", "Less", "Concat"]
def run_cast_elision(node):
import onnx
if node.op not in VALID_CAST_ELISION_OPS:
return
# Get list of input nodes
inp_casts = [
inp_node
for inp_tensor in node.inputs
for inp_node in inp_tensor.inputs
if inp_node.op == "Cast" and inp_node.attrs["to"] == 1
]
# No cast nodes found, return early
if not inp_casts:
return
# Ensure that all input cast nodes are casting from the same type
final_type = None
for inp in inp_casts:
curr_type = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[inp.inputs[0].dtype]
final_type = final_type or curr_type
if final_type != curr_type:
return
# Check validity and get list of output nodes
out_casts = []
for out_tensor in node.outputs:
for out_node in out_tensor.outputs:
if out_node.op != "Cast" or out_node.attrs["to"] not in [6, 7]:
# Can exit early if any of the output nodes are not valid casts
return
out_casts.append(out_node)
# Check that all final cast types are the same.
curr_type = out_node.attrs["to"]
if final_type != curr_type:
return
# If all checks passed - update constant values.
for inp in node.inputs:
if isinstance(inp, Constant):
inp.values = inp.values.astype(onnx.mapping.TENSOR_TYPE_TO_NP_TYPE[final_type])
# "Remove" casts nodes by changing I/O node operators to Identity. Update corresponding tensor dtypes as well
def replace_with_identity(cast_node, change_dtype):
cast_node.op = "Identity"
cast_node.attrs = {}
getattr(cast_node, change_dtype)[0].dtype = onnx.mapping.TENSOR_TYPE_TO_NP_TYPE[final_type]
G_LOGGER.debug("Cast node {:} elided".format(cast_node.name))
for inp in inp_casts:
replace_with_identity(inp, change_dtype="outputs")
for out in out_casts:
replace_with_identity(out, change_dtype="inputs")
# Perform shape tensor cast elision:
if fold_shapes:
G_LOGGER.debug("Performing shape tensor cast elision in {:}".format(self.name))
try:
for node in self.nodes:
run_cast_elision(node)
except Exception as err:
if not error_ok:
raise err
G_LOGGER.warning("'{:}' routine failed with: {:}".format("Shape tensor cast elision", err))
G_LOGGER.debug("Folding constants in {:}".format(self.name))
graph_clone = self.copy()
clone_tensors = graph_clone.tensors()
# We find graph constants in two passes:
# Pass 1 finds all Constant tensors in the graph, then walks over their outputs.
# Pass 2 searches for Shape nodes that have variable inputs (i.e. not marked const in pass 1)
# and turns them into Constants iff the input has a statically known shape.
def update_foldable_outputs(graph_constants):
def is_foldable(node):
def all_tensors_const(tensors):
return all([t.name in graph_constants for t in tensors])
if not all_tensors_const(node.inputs):
return False
all_subgraph_foreign_tensors_const = True
for attr in node.attrs.values():
if isinstance(attr, Graph):
foreign_tensors = attr._foreign_tensors().values()
all_subgraph_foreign_tensors_const &= all_tensors_const(foreign_tensors)
return all_subgraph_foreign_tensors_const
# Walks along the outputs of graph_constants to see if they can also be computed statically.
# Since the graph is topologically sorted, this should find all constant nodes in the graph.
for node in graph_clone.nodes:
if is_foldable(node):
graph_constants.update({out.name: out for out in node.outputs})
return graph_constants
# Pass 1: Non-shape Constant Folding
graph_constants = {name: tensor for name, tensor in clone_tensors.items() if isinstance(tensor, Constant)}
# Replaces outputs of Constant nodes with constant tensors
for tensor in clone_tensors.values():
if len(tensor.inputs) == 1:
node = tensor.inputs[0]
if node.op == "Constant":
graph_constants[tensor.name] = tensor.to_constant(
node.attrs["value"]._values
) # Using ._values avoids copying
graph_constants[tensor.name].inputs.clear()
graph_constants = update_foldable_outputs(graph_constants)
# Pass 2: Shape Folding
def get_producer(tensor, op):
"""
Get the producer of the specified tensor iff it matches op
"""
if len(tensor.inputs) != 1:
return None
node = tensor.inputs[0]
if node.op != op:
return None
return node
def get_input(node, index=0):
"""
Get the input tensor of a node iff the input tensor is not already marked a graph constant.
"""
if node is None:
return None
inp = node.inputs[index]
# If the input was already found to be a constant, it will be folded anyway.
if inp.name in graph_constants:
return None
return inp
def get_scalar_value(tensor):
"""
Gets the scalar value of a tensor with a single item
"""
if not tensor.shape:
return tensor.values
else:
return list(tensor.values)[0]
def fold_shape(tensor):
inp = get_input(get_producer(tensor, "Shape"))
if inp is None:
return None
if inp.shape is None or misc.is_dynamic_shape(inp.shape):
return None
return np.array(inp.shape, dtype=np.int64)
def fold_shape_gather(tensor):
gather = get_producer(tensor, "Gather")
if gather is None:
return None
data = gather.inputs[0]
indices_tensor = gather.inputs[1]
inp = get_input(get_producer(data, "Shape"))
if inp is None or inp.shape is None:
return None
if not isinstance(indices_tensor, Constant):
return None
indices = indices_tensor.values
if not indices.shape: # Scalar-case
shape = inp.shape[int(indices)]
if misc.is_dynamic_dimension(shape):
return None
else:
shape = [inp.shape[index] for index in indices]
if misc.is_dynamic_shape(shape):
return None
return np.array(shape, dtype=np.int64)
def fold_shape_slice(tensor):
slice = get_producer(tensor, "Slice")
if slice is None:
return None
data = slice.inputs[0]
if len(slice.inputs) >= 3:
starts, ends = slice.inputs[1:3]
if any(not isinstance(t, Constant) for t in [starts, ends]):
return None
starts, ends = get_scalar_value(starts), get_scalar_value(ends)
elif "starts" in slice.attrs and "ends" in slice.attrs:
starts, ends = slice.attrs["starts"][0], slice.attrs["ends"][0]
else:
return None
inp = get_input(get_producer(data, "Shape"))
if inp is None or inp.shape is None:
return None
# For shape tensors, we can only slice on the 0th dimension.
if len(slice.inputs) > 3:
axes = slice.inputs[3]
if not isinstance(axes, Constant):
return None
if get_scalar_value(axes) != 0:
return None
elif "axes" in slice.attrs:
if slice.attrs["axes"][0] != 0:
return None
steps = 1
if len(slice.inputs) > 4:
steps = slice.inputs[4]
if not isinstance(steps, Constant):
return None
steps = get_scalar_value(steps)
elif "steps" in slice.attrs:
steps = slice.attrs["steps"][0]
shape = inp.shape[starts:ends:steps]
if misc.is_dynamic_shape(shape):
return None
return np.array(shape, dtype=np.int64)
if fold_shapes:
# NOTE: The order of shape folding passes is important to maximize how much we fold (phase-ordering problem).
SHAPE_FOLD_FUNCS = [fold_shape_gather, fold_shape_slice, fold_shape]
for shape_fold_func in SHAPE_FOLD_FUNCS:
try:
for tensor in clone_tensors.values():
shape_of = shape_fold_func(tensor)
if shape_of is not None:
G_LOGGER.ultra_verbose("Folding shape tensor: {:} to: {:}".format(tensor.name, shape_of))
graph_constants[tensor.name] = tensor.to_constant(shape_of)
graph_constants[tensor.name].inputs.clear()
except Exception as err:
if not error_ok:
raise err
G_LOGGER.warning("'{:}' routine failed with:\n{:}".format(shape_fold_func.__name__, err))
else:
graph_constants = update_foldable_outputs(graph_constants)
def partition_and_infer(subgraph):
def get_out_node_ids():
# Gets the final output nodes - producer nodes of graph output tensors without other outputs.
with subgraph.node_ids():
out_node_ids = set()
for out in subgraph.outputs:
if not out.outputs and not isinstance(out, Constant):
for n_inp in out.inputs:
out_node_ids.add(n_inp.id)
return out_node_ids
# Compute each output node in a separate subgraph.
out_node_ids = get_out_node_ids()
constant_values = {}
for index in out_node_ids: # Have to use index since 'node' is not in part
part = subgraph.copy()
out_node = part.nodes[index]
part.outputs = out_node.outputs
part.name = "Folding: {:}".format([out.name for out in part.outputs])
part.cleanup(remove_unused_graph_inputs=True)
names = [out.name for out in part.outputs]
try:
# Determining types is not trivial, and ONNX-RT does its own type inference.
sess = rt.InferenceSession(export_onnx(part, do_type_check=False).SerializeToString())
values = sess.run(names, {})
except Exception as err:
G_LOGGER.warning("Inference failed for subgraph: {:}. Note: Error was:\n{:}".format(part.name, err))
if partitioning == "recursive":
G_LOGGER.verbose("Attempting to recursively partition subgraph")
# Partition failed, peel off last node.
# We only need to remove one node, so avoid doing an expensive call to cleanup()
part.outputs = out_node.inputs
del part.nodes[part.nodes.index(out_node)]
out_node.outputs.clear()
out_node.inputs.clear()
else:
G_LOGGER.info("You may see better results if you set partitioning='recursive'")
if not error_ok:
raise err
constant_values.update(partition_and_infer(part))
else:
constant_values.update({name: val for name, val in zip(names, values)})
return constant_values
# Next, evaluate the foldable variables with ONNX-Runtime
# Only evaluate foldable values that have non-foldable outputs or are graph outputs.
# Otherwise, if all the outputs are foldable, then we can just evaluate the outputs directly.
def should_eval_foldable(tensor):
non_const = not isinstance(tensor, Constant)
is_graph_output = not tensor.outputs
has_non_foldable_outputs = any(out.name not in graph_constants for out in tensor.outputs)
return non_const and (is_graph_output or has_non_foldable_outputs)
graph_clone.outputs = [t for t in graph_constants.values() if should_eval_foldable(t)]
G_LOGGER.debug("Folding tensors: {:}".format(graph_clone.outputs))
graph_clone.cleanup(remove_unused_graph_inputs=True)
# Using ._values avoids a deep copy of the values.
constant_values = {
name: tensor._values for name, tensor in graph_constants.items() if isinstance(tensor, Constant)
}
if graph_clone.outputs:
if partitioning:
constant_values.update(partition_and_infer(graph_clone))
else:
names = [t.name for t in graph_clone.outputs]
try:
sess = rt.InferenceSession(export_onnx(graph_clone, do_type_check=False).SerializeToString())
values = sess.run(names, {})
constant_values.update({name: val for name, val in zip(names, values)})
except Exception as err:
G_LOGGER.warning(
"Inference failed. You may want to try enabling partitioning to see better results. "
"Note: Error was:\n{:}".format(err)
)
G_LOGGER.verbose("Note: Graph was:\n{:}".format(graph_clone))
if not error_ok:
raise
elif not constant_values:
G_LOGGER.info(
"Could not find any nodes in this graph ({:}) that can be folded. "
"This could mean that constant folding has already been run on this graph. "
"Skipping.".format(self.name)
)
# Finally, replace the Variables in the original graph with constants.
if constant_values:
graph_tensors = self.tensors()
for name, values in constant_values.items():
tensor = graph_tensors[name]
if not isinstance(tensor, Constant):
tensor.to_constant(values)
tensor.inputs.clear() # Constants do not need inputs
# Folding subgraphs after the outer graph can lead to better folding.
def fold_subgraphs():
for node in self.nodes:
for attr in node.attrs.values():
if isinstance(attr, Graph):
attr.fold_constants(fold_shapes=fold_shapes, partitioning=partitioning)
if recurse_subgraphs:
fold_subgraphs()
return self
def _generate_name(self, prefix):
name = "{}_{}".format(prefix, self.name_idx)
self.name_idx += 1
return name
def layer(self, inputs=[], outputs=[], *args, **kwargs):
"""
Creates a node, adds it to this graph, and optionally creates its input and output tensors.
The input and output lists can include various different types:
- ``Tensor``: Any Tensors provided will be used as-is in the inputs/outputs of the node created.
- ``str``:
If a string is provided, this function will generate a new tensor using
the string to generate a name. It will append an index to the end of the provided string
to attempt to avoid duplicate tensor names, but since this doesn't guarantee that the name will
be unique, you should try to ensure that the string provided is as unique as possible.
To avoid problems with duplicate names, you can generate names yourself and provide ``Tensor`` s.
- ``numpy.ndarray``:
If a NumPy array is provided, this function will generate a Constant tensor
using the name prefix: "onnx_graphsurgeon_constant"
- ``Union[List[Number], Tuple[Number]]``:
If a list or tuple of numbers (int or float) is provided, this function will
generate a Constant tensor using the name prefix: "onnx_graphsurgeon_lst_constant".
The values of the tensor will be a 1D array containing the specified values.
The datatype will be either `np.float32` or `np.int64`.
Args:
inputs (List[Union[Tensor, str, numpy.ndarray]]): The list of inputs
outputs (List[Union[Tensor, str, numpy.ndarray]]): The list of outputs
args/kwargs: These are passed directly to the constructor of Node
Returns:
List[Tensor]: The output tensors of the node
"""
def process_io(io):
new_io = []
for elem in io:
if isinstance(elem, Tensor):
new_io.append(elem)
elif isinstance(elem, str):
tensor = Variable(name=self._generate_name(elem))
new_io.append(tensor)
elif isinstance(elem, np.ndarray):
new_io.append(Constant(name=self._generate_name("onnx_graphsurgeon_constant"), values=elem))
elif isinstance(elem, list) or isinstance(elem, tuple):
dtype = np.float32 if any([isinstance(x, float) for x in elem]) else np.int64
arr = np.array(elem, dtype=dtype)
new_io.append(Constant(name=self._generate_name("onnx_graphsurgeon_lst_constant"), values=arr))
else:
G_LOGGER.critical(
"Unrecognized type passed to Graph.layer: {:}.\n"
"\tHint: Did you forget to unpack a list with `*`?\n"
"\tPlease use Tensors, strings, or NumPy arrays.".format(elem)
)
return new_io
inputs = process_io(inputs)
outputs = process_io(outputs)
if "name" not in kwargs:
kwargs["name"] = self._generate_name("onnx_graphsurgeon_node")
node = Node(*args, **kwargs, inputs=inputs, outputs=outputs)
self.nodes.append(node)
return node.outputs
def copy(self, tensor_map: "OrderedDict[str, Tensor]" = None):
"""
Copy the graph.
This makes copies of all nodes and tensors in the graph, but will not
do a deep-copy of weights or attributes (with the exception of ``Graph``
attributes, which will be copied using their ``copy`` method).
Args:
tensor_map (OrderedDict[str, Tensor]):
A mapping of tensor names to tensors from the outer graph.
This should be ``None`` if this is the outer-most graph.
Returns:
Graph: A copy of the graph.
"""
# First, reconstruct each tensor in the graph, but with no inputs or outputs
tensor_map = copy.copy(misc.default_value(tensor_map, {}))
local_tensor_copies = {}
# When we're cloning a subgraph by itself, we need to use `tensors()` to get all
# required tensors - even those produced by outer graphs.
local_tensor_copies.update({n: t.copy() for n, t in self.tensors().items()})
# However, we should prioritize copies already made by the outer graph.
local_tensor_copies.update(tensor_map)
# And locally produced tensors should take precedence over everything else.
local_tensor_copies.update({n: t.copy() for n, t in self._local_tensors().items()})
def get_tensor(name):
if not name:
return Variable.empty()
return local_tensor_copies[name]
# Next, copy nodes, and update inputs/outputs
new_nodes = []
for node in self.nodes:
new_node = node.copy(
inputs=[get_tensor(inp.name) for inp in node.inputs],
outputs=[get_tensor(out.name) for out in node.outputs],
tensor_map=local_tensor_copies,
)
new_nodes.append(new_node)
new_graph_inputs = [get_tensor(inp.name) for inp in self.inputs]
new_graph_outputs = [get_tensor(out.name) for out in self.outputs]
return Graph(
nodes=new_nodes,
inputs=new_graph_inputs,
outputs=new_graph_outputs,
name=copy.copy(self.name),
doc_string=copy.copy(self.doc_string),
opset=copy.copy(self.opset),
import_domains=self.import_domains,
)
def __str__(self):
nodes_str = "\n".join([str(node) for node in self.nodes])
return "Graph {:} (Opset: {:})\nInputs: {:}\nNodes:\n{:}\nOutputs: {:}".format(
self.name, self.opset, self.inputs, nodes_str, self.outputs
)
def __repr__(self):
return self.__str__()
| TensorRT-master | tools/onnx-graphsurgeon/onnx_graphsurgeon/ir/graph.py |
TensorRT-master | tools/onnx-graphsurgeon/onnx_graphsurgeon/ir/__init__.py |
|
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from onnx_graphsurgeon.logger.logger import G_LOGGER
from onnx_graphsurgeon.util import misc
from typing import Set, Sequence, Union
import numpy as np
class Tensor(object):
"""Abstract base class for tensors in a graph"""
DYNAMIC = -1
def __init__(self):
"""
**This class is abstract and cannot be constructed directly.**
"""
raise NotImplementedError("Tensor is an abstract class")
def __setattr__(self, name, value):
if name in ["inputs", "outputs"]:
try:
getattr(self, name).clear()
getattr(self, name).extend(value)
except AttributeError:
super().__setattr__(name, value)
else:
super().__setattr__(name, value)
def is_empty(self):
"""
Returns whether this tensor is considered empty in the graph.
*Note: 'Empty' here refers to the name of the tensor, which is omitted for
optional tensors, NOT the shape of the tensor*
Returns:
bool: Whether the tensor is empty, meaning that it is used for an omitted optional input or output.
"""
return self.name == ""
def to_constant(self, values: np.ndarray, data_location: int = None):
"""
Modifies this tensor in-place to convert it to a Constant. This means that all consumers/producers of the tensor will see the update.
Args:
values (np.ndarray): The values in this tensor
data_location (int):
An enum value indicating the location where the tensor data is stored.
Generally, this will come from onnx.TensorProto.DataLocation.
Returns:
self
"""
self.__class__ = Constant
self._values = values
self.data_location = data_location
return self
def to_variable(self, dtype: np.dtype = None, shape: Sequence[Union[int, str]] = []):
"""
Modifies this tensor in-place to convert it to a Variable. This means that all consumers/producers of the tensor will see the update.
Args:
dtype (np.dtype): The data type of the tensor.
shape (Sequence[int]): The shape of the tensor.
Returns:
self
"""
self.__class__ = Variable
self.dtype = dtype
self.shape = shape
return self
def i(self, tensor_idx=0, producer_idx=0):
"""
Convenience function to get an input tensor of one of this tensor's input nodes.
Note that the parameters are swapped compared to the o() function; this is because tensors are likely to have only a single producer
For example:
::
assert tensor.i() == tensor.inputs[0].inputs[0]
assert tensor.i(1, 2) == tensor.inputs[2].inputs[1]
Args:
tensor_idx (int): The index of the input tensor of the input node. Defaults to 0.
producer_idx (int): The index of the producer node of the input tensor, if the tensor has multiple producers. Defaults to 0.
Returns:
Tensor: The specified producer (input) tensor.
"""
return self.inputs[producer_idx].inputs[tensor_idx]
def o(self, consumer_idx=0, tensor_idx=0):
"""
Convenience function to get an output tensor of one of this tensor's output nodes.
For example:
::
assert tensor.o() == tensor.outputs[0].outputs[0]
assert tensor.o(2, 1) == tensor.outputs[2].outputs[1]
Args:
consumer_idx (int): The index of the consumer of the input tensor. Defaults to 0.
tensor_idx (int): The index of the output tensor of the node, if the node has multiple outputs. Defaults to 0.
Returns:
Tensor: The specified consumer (output) tensor
"""
return self.outputs[consumer_idx].outputs[tensor_idx]
def __str__(self):
return "{:} ({:}): (shape={:}, dtype={:})".format(type(self).__name__, self.name, self.shape, self.dtype)
def __repr__(self): # Hack to make logging output pretty.
return self.__str__()
def __eq__(self, other):
"""
Perform a check to see if two tensors are equal.
Tensors are considered equal if they share the same name. A Graph must not include Tensors with duplicate names.
"""
return self.name == other.name
class Variable(Tensor):
@staticmethod
def empty():
return Variable(name="")
def __init__(self, name: str, dtype: np.dtype = None, shape: Sequence[Union[int, str]] = None):
"""
Represents a Tensor whose value is not known until inference-time.
Args:
name (str): The name of the tensor.
dtype (numpy.dtype): The data type of the tensor.
shape (Sequence[Union[int, str]]): The shape of the tensor. This may contain strings if the model uses dimension parameters.
"""
self.name = name
self.inputs = misc.SynchronizedList(self, field_name="outputs", initial=[])
self.outputs = misc.SynchronizedList(self, field_name="inputs", initial=[])
self.dtype = dtype
self.shape = misc.default_value(shape, None)
def to_constant(self, values: np.ndarray):
del self.dtype
del self.shape
return super().to_constant(values)
def copy(self):
"""
Makes a shallow copy of this tensor, omitting input and output information.
Note: Generally, you should only ever make a copy of a Graph.
"""
return Variable(self.name, self.dtype, self.shape)
class LazyValues(object):
"""
A special object that represents constant tensor values that should be lazily loaded.
"""
def __init__(self, tensor):
"""
Args:
tensor (onnx.TensorProto): The ONNX tensor that this instance should lazily load.
"""
from onnx_graphsurgeon.importers.onnx_importer import get_onnx_tensor_shape, get_onnx_tensor_dtype
self.tensor = tensor
self.shape = get_onnx_tensor_shape(self.tensor)
self.dtype = get_onnx_tensor_dtype(self.tensor)
def load(self):
"""
Load a numpy array from the underlying tensor values.
Returns:
np.array: A numpy array containing the values of the tensor.
"""
import onnx
import onnx.numpy_helper
return np.array(onnx.numpy_helper.to_array(self.tensor))
def __str__(self):
return "LazyValues (shape={:}, dtype={:})".format(self.shape, self.dtype)
def __repr__(self): # Hack to make logging output pretty.
return self.__str__()
class Constant(Tensor):
def __init__(self, name: str, values: Union[np.ndarray, LazyValues], data_location: int = None):
"""
Represents a Tensor whose value is known.
Args:
name (str): The name of the tensor.
values (numpy.ndarray): The values in this tensor, in the form of a NumPy array.
data_location (int):
An enum value indicating the location where the tensor data is stored.
Generally, this will come from onnx.TensorProto.DataLocation.
"""
self.name = name
self.inputs = misc.SynchronizedList(self, field_name="outputs", initial=[])
self.outputs = misc.SynchronizedList(self, field_name="inputs", initial=[])
if not isinstance(values, np.ndarray) and not isinstance(values, LazyValues):
G_LOGGER.critical(
"Provided `values` argument is not a NumPy array or a LazyValues instance. "
"Please provide a NumPy array or LazyValues instance to construct a Constant. "
"Note: Provided `values` parameter was: {:}".format(values)
)
self._values = values
self.data_location = data_location
def to_variable(self, dtype: np.dtype = None, shape: Sequence[Union[int, str]] = []):
del self._values
return super().to_variable(dtype, shape)
def copy(self):
"""
Makes a shallow copy of this tensor, omitting input and output information.
Note: Generally, you should only ever make a copy of a Graph.
"""
return Constant(self.name, self._values)
@property
def values(self):
# Load values when they are first accesed
if isinstance(self._values, LazyValues):
self._values = self._values.load()
return self._values
@values.setter
def values(self, values: Union[np.ndarray, LazyValues]):
self._values = values
@property
def shape(self):
return self._values.shape
@property
def dtype(self):
return self._values.dtype.type
def __repr__(self): # Hack to make logging output pretty.
ret = self.__str__()
ret += "\n{:}".format(self._values)
return ret
| TensorRT-master | tools/onnx-graphsurgeon/onnx_graphsurgeon/ir/tensor.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from onnx_graphsurgeon.logger.logger import G_LOGGER
from onnx_graphsurgeon.ir.tensor import Tensor
from onnx_graphsurgeon.util import misc
from collections import OrderedDict
from typing import List, Dict
class Node(object):
def __init__(
self,
op: str,
name: str = None,
attrs: Dict[str, object] = None,
inputs: List["Tensor"] = None,
outputs: List["Tensor"] = None,
):
"""
A node represents an operation in a graph, and consumes zero or more Tensors, and produces zero or more Tensors.
Args:
op (str): The operation this node performs.
name (str): The name of this node.
attrs (Dict[str, object]): A dictionary that maps attribute names to their values.
inputs (List[Tensor]): A list of zero or more input Tensors.
outputs (List[Tensor]): A list of zero or more output Tensors.
"""
self.op = op
self.name = misc.default_value(name, "")
self.attrs = misc.default_value(attrs, OrderedDict())
self.inputs = misc.SynchronizedList(self, field_name="outputs", initial=misc.default_value(inputs, []))
self.outputs = misc.SynchronizedList(self, field_name="inputs", initial=misc.default_value(outputs, []))
def i(self, tensor_idx=0, producer_idx=0):
"""
Convenience function to get a producer node of one of this node's input tensors.
Note that the parameters are swapped compared to the o() function; this is because tensors are likely to have only a single producer
For example:
::
assert node.i() == node.inputs[0].inputs[0]
assert node.i(1, 2) == node.inputs[1].inputs[2]
Args:
tensor_idx (int): The index of the input tensor of this node. Defaults to 0.
producer_idx (int): The index of the producer of the input tensor, if the tensor has multiple producers. Defaults to 0
Returns:
Node: The specified producer (input) node.
"""
return self.inputs[tensor_idx].inputs[producer_idx]
def o(self, consumer_idx=0, tensor_idx=0):
"""
Convenience function to get a consumer node of one of this node's output tensors.
For example:
::
assert node.o() == node.outputs[0].outputs[0]
assert node.o(2, 1) == node.outputs[1].outputs[2]
Args:
consumer_idx (int): The index of the consumer of the input tensor. Defaults to 0.
tensor_idx (int): The index of the output tensor of this node, if the node has multiple outputs. Defaults to 0.
Returns:
Node: The specified consumer (output) node
"""
return self.outputs[tensor_idx].outputs[consumer_idx]
def __setattr__(self, name, value):
if name in ["inputs", "outputs"]:
try:
getattr(self, name).clear()
getattr(self, name).extend(value)
except AttributeError:
super().__setattr__(name, value)
else:
super().__setattr__(name, value)
def copy(self, inputs: List["Tensor"] = None, outputs: List["Tensor"] = None, tensor_map=None):
"""
Makes a shallow copy of this node, overriding input and output information.
Note: Generally, you should only ever make a copy of a Graph.
"""
from onnx_graphsurgeon.ir.graph import Graph
new_attrs = OrderedDict()
for name, attr in self.attrs.items():
if isinstance(attr, Graph):
new_attrs[name] = attr.copy(tensor_map)
else:
new_attrs[name] = attr
return Node(self.op, self.name, new_attrs, inputs=inputs, outputs=outputs)
def __str__(self):
ret = "{:} ({:})".format(self.name, self.op)
def add_io(name, io):
nonlocal ret
ret += "\n\t{:}: [".format(name)
for elem in io:
ret += "\n\t\t{:}".format(elem)
ret += "\n\t]"
add_io("Inputs", self.inputs)
add_io("Outputs", self.outputs)
if self.attrs:
ret += "\nAttributes: {:}".format(self.attrs)
return ret
def __repr__(self):
return self.__str__()
def __eq__(self, other):
"""
Check whether two nodes are equal by comparing name, attributes, op, inputs, and outputs.
"""
G_LOGGER.verbose("Comparing node: {:} with {:}".format(self.name, other.name))
attrs_match = self.name == other.name and self.op == other.op and self.attrs == other.attrs
inputs_match = len(self.inputs) == len(other.inputs) and all(
[inp == other_inp for inp, other_inp in zip(self.inputs, other.inputs)]
)
outputs_match = len(self.outputs) == len(other.outputs) and all(
[out == other_out for out, other_out in zip(self.outputs, other.outputs)]
)
return attrs_match and inputs_match and outputs_match
| TensorRT-master | tools/onnx-graphsurgeon/onnx_graphsurgeon/ir/node.py |
from onnx_graphsurgeon.exporters.base_exporter import BaseExporter
| TensorRT-master | tools/onnx-graphsurgeon/onnx_graphsurgeon/exporters/__init__.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from onnx_graphsurgeon.ir.graph import Graph
class BaseExporter(object):
@staticmethod
def export_graph(graph: Graph):
"""
Export a graph to some destination graph.
Args:
graph (Graph): The source graph to export.
Returns:
object: The exported graph. For example, this might be an onnx.GraphProto
"""
raise NotImplementedError("BaseExporter is an abstract class")
| TensorRT-master | tools/onnx-graphsurgeon/onnx_graphsurgeon/exporters/base_exporter.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import onnx
import onnx.numpy_helper
from onnx_graphsurgeon.exporters.base_exporter import BaseExporter
from onnx_graphsurgeon.ir.graph import Graph
from onnx_graphsurgeon.ir.node import Node
from onnx_graphsurgeon.ir.tensor import Constant, LazyValues, Tensor, Variable
from onnx_graphsurgeon.logger.logger import G_LOGGER
def dtype_to_onnx(dtype: np.dtype) -> int:
return onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype(dtype)]
class OnnxExporter(BaseExporter):
@staticmethod
def export_tensor_proto(tensor: Constant) -> onnx.TensorProto:
# Do *not* load LazyValues into an intermediate numpy array - instead, use
# the original onnx.TensorProto directly.
if isinstance(tensor._values, LazyValues):
onnx_tensor = tensor._values.tensor
else:
onnx_tensor = onnx.numpy_helper.from_array(tensor.values)
if tensor.data_location is not None:
onnx_tensor.data_location = tensor.data_location
onnx_tensor.name = tensor.name
return onnx_tensor
@staticmethod
def export_value_info_proto(tensor: Variable, do_type_check: bool) -> onnx.ValueInfoProto:
if do_type_check and tensor.dtype is None:
G_LOGGER.critical(
"Graph input and output tensors must include dtype information. Please set the dtype attribute for: {:}".format(
tensor
)
)
if tensor.dtype is not None:
onnx_tensor = onnx.helper.make_tensor_value_info(tensor.name, dtype_to_onnx(tensor.dtype), tensor.shape)
else:
onnx_tensor = onnx.helper.make_empty_tensor_value_info(tensor.name)
return onnx_tensor
@staticmethod
def export_node(node: Node, do_type_check: bool) -> onnx.NodeProto:
# Cannot pass in attrs directly as make_node will change the order
onnx_node = onnx.helper.make_node(
node.op, inputs=[t.name for t in node.inputs], outputs=[t.name for t in node.outputs], name=node.name
)
# Convert Tensors and Graphs to TensorProtos and GraphProtos respectively
for key, val in node.attrs.items():
if isinstance(val, Tensor):
val = OnnxExporter.export_tensor_proto(val)
elif isinstance(val, Graph):
val = OnnxExporter.export_graph(val, do_type_check)
onnx_node.attribute.extend([onnx.helper.make_attribute(key, val)])
return onnx_node
@staticmethod
def export_graph(graph: Graph, do_type_check=True) -> onnx.GraphProto:
"""
Export an onnx-graphsurgeon Graph to an ONNX GraphProto.
Args:
graph (Graph): The graph to export.
do_type_check (bool): Whether to check that input and output tensors have data types defined, and fail if not.
"""
nodes = [OnnxExporter.export_node(node, do_type_check) for node in graph.nodes]
inputs = [OnnxExporter.export_value_info_proto(inp, do_type_check) for inp in graph.inputs]
outputs = [OnnxExporter.export_value_info_proto(out, do_type_check) for out in graph.outputs]
tensor_map = graph.tensors()
initializer = [
OnnxExporter.export_tensor_proto(tensor) for tensor in tensor_map.values() if isinstance(tensor, Constant)
]
# Remove inputs and outputs to export ValueInfoProtos
for tensor in graph.inputs + graph.outputs:
if tensor.name in tensor_map:
del tensor_map[tensor.name]
# Omit tensors from value_info if we don't know their shape/dtype
def has_value_info(tensor):
return isinstance(tensor, Variable) and (tensor.dtype is not None or tensor.shape is not None)
value_info = [
OnnxExporter.export_value_info_proto(tensor, do_type_check)
for tensor in tensor_map.values()
if has_value_info(tensor)
]
return onnx.helper.make_graph(
nodes=nodes,
name=graph.name,
inputs=inputs,
outputs=outputs,
initializer=initializer,
doc_string=graph.doc_string,
value_info=value_info,
)
def export_onnx(graph: Graph, do_type_check=True, **kwargs) -> "onnx.ModelProto":
"""
Exports an onnx-graphsurgeon Graph to an ONNX model.
Args:
graph (Graph): The graph to export
do_type_check (bool): Whether to check that input and output tensors have data types defined, and fail if not.
kwargs: Additional arguments to onnx.helper.make_model
Returns:
onnx.ModelProto: A corresponding ONNX model.
"""
onnx_graph = OnnxExporter.export_graph(graph, do_type_check=do_type_check)
if "opset_imports" not in kwargs:
if graph.import_domains is None:
kwargs["opset_imports"] = [onnx.helper.make_opsetid("", graph.opset)]
else:
kwargs["opset_imports"] = graph.import_domains
return onnx.helper.make_model(onnx_graph, **kwargs)
| TensorRT-master | tools/onnx-graphsurgeon/onnx_graphsurgeon/exporters/onnx_exporter.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Simple setup script"""
import os
from setuptools import setup, find_packages
from torch.utils.cpp_extension import BuildExtension, CppExtension, CUDAExtension
abspath = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(abspath, "requirements.txt")) as f:
requirements = f.read().splitlines()
license_header = """#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
# Generate version file
with open(os.path.join(abspath, "VERSION")) as f:
version = f.read().strip()
with open(os.path.join(abspath, "pytorch_quantization/version.py"), "w") as f:
f.write(license_header)
f.write(F"__version__ = \"{version}\"")
setup(
name="pytorch_quantization",
version=version,
description="NVIDIA Pytorch quantization toolkit",
packages=find_packages(exclude=["*.tests", "*.tests.*", "tests.*", "tests"]),
setup_requires=["pytest-runner"],
tests_require=["pytest"],
install_requires=requirements,
ext_modules=[
CUDAExtension(
name="pytorch_quantization.cuda_ext",
sources=[os.path.join(abspath, "src/tensor_quant.cpp"),
os.path.join(abspath, "src/tensor_quant_gpu.cu")])
],
cmdclass={
"build_ext": BuildExtension
},
zip_safe=False,
long_description=open("README.md", "r", encoding="utf-8").read(),
url="https://github.com/nvidia/tensorrt/tools/pytorch-quantization",
author="NVIDIA",
author_email="[email protected]",
license="Apache 2.0",
)
| TensorRT-master | tools/pytorch-quantization/setup.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""tests of QuantPooling module.
Most tests check the functionality of all the combinations in Quant Pooling against the corresponding functionalities
in tensor_quant.
"""
import pytest
import numpy as np
import torch
import torch.nn.functional as F
from pytorch_quantization import tensor_quant
from pytorch_quantization.nn.modules import quant_pooling
# make everything run on the GPU
torch.set_default_tensor_type('torch.cuda.FloatTensor')
np.random.seed(1234)
torch.manual_seed(1234)
# pylint:disable=missing-docstring, no-self-use
class TestQuantMaxPool1d():
def test_raise(self):
with pytest.raises(ValueError) as excinfo:
quant_pooling_object = quant_pooling.QuantMaxPool1d(kernel_size=3, stride=1,
quant_desc_input=
tensor_quant.QuantDescriptor(fake_quant=False))
assert "Only fake quantization is supported" in str(excinfo.value)
# Quantizing activations
def test_input_fake_quant(self):
quant_pooling_object = quant_pooling.QuantMaxPool1d(kernel_size=3, stride=1)
test_input = torch.randn(1, 5, 5, dtype=torch.double)
quant_input = tensor_quant.fake_tensor_quant(test_input, torch.max(torch.abs(test_input)))
out1 = F.max_pool1d(quant_input, 3, 1, 0, 1, False, False)
out2 = quant_pooling_object(test_input)
np.testing.assert_array_equal(out1.detach().cpu().numpy(), out2.detach().cpu().numpy())
class TestQuantMaxPool2d():
def test_raise(self):
with pytest.raises(ValueError) as excinfo:
quant_pooling_object = quant_pooling.QuantMaxPool2d(kernel_size=3, stride=1,
quant_desc_input=
tensor_quant.QuantDescriptor(fake_quant=False))
assert "Only fake quantization is supported" in str(excinfo.value)
# Quantizing activations
def test_input_fake_quant(self):
quant_pooling_object = quant_pooling.QuantMaxPool2d(kernel_size=3, stride=1)
test_input = torch.randn(1, 5, 5, 5, dtype=torch.double)
quant_input = tensor_quant.fake_tensor_quant(test_input, torch.max(torch.abs(test_input)))
out1 = F.max_pool2d(quant_input, 3, 1, 0, 1, False, False)
out2 = quant_pooling_object(test_input)
np.testing.assert_array_equal(out1.detach().cpu().numpy(), out2.detach().cpu().numpy())
def test_input_variable_bits(self):
# Repeat checking the output for variable number of bits to QuantDescriptor
for bits in [2, 4, 6]:
quant_desc_input = tensor_quant.QuantDescriptor(num_bits=bits)
quant_pooling.QuantMaxPool2d.set_default_quant_desc_input(quant_desc_input)
quant_pooling_object = quant_pooling.QuantMaxPool2d(kernel_size=3, stride=1)
test_input = torch.randn(1, 5, 5, 5, dtype=torch.double)
quant_input = tensor_quant.fake_tensor_quant(test_input, torch.max(torch.abs(test_input)), bits)
out1 = F.max_pool2d(quant_input, 3, 1, 0, 1, False, False)
out2 = quant_pooling_object(test_input)
np.testing.assert_array_equal(out1.detach().cpu().numpy(), out2.detach().cpu().numpy())
def test_input_fake_quant_disable(self):
quant_pooling_object = quant_pooling.QuantMaxPool2d(kernel_size=3, stride=1)
test_input = torch.randn(1, 5, 5, 5, dtype=torch.double)
quant_pooling_object.input_quantizer.disable()
out1 = F.max_pool2d(test_input, 3, 1, 0, 1, False, False)
out2 = quant_pooling_object(test_input)
np.testing.assert_array_equal(out1.detach().cpu().numpy(), out2.detach().cpu().numpy())
class TestQuantMaxPool3d():
def test_raise(self):
with pytest.raises(ValueError) as excinfo:
quant_pooling_object = quant_pooling.QuantMaxPool3d(kernel_size=3, stride=1,
quant_desc_input=
tensor_quant.QuantDescriptor(fake_quant=False))
assert "Only fake quantization is supported" in str(excinfo.value)
# Quantizing activations
def test_input_fake_quant(self):
quant_pooling_object = quant_pooling.QuantMaxPool3d(kernel_size=3, stride=1)
test_input = torch.randn(5, 5, 5, 5, dtype=torch.double)
quant_input = tensor_quant.fake_tensor_quant(test_input, torch.max(torch.abs(test_input)))
out1 = F.max_pool3d(quant_input, 3, 1, 0, 1, False, False)
out2 = quant_pooling_object(test_input)
np.testing.assert_array_equal(out1.detach().cpu().numpy(), out2.detach().cpu().numpy())
class TestQuantAvgPool1d():
def test_raise(self):
with pytest.raises(ValueError) as excinfo:
quant_pooling_object = quant_pooling.QuantAvgPool1d(kernel_size=3, stride=1,
quant_desc_input=
tensor_quant.QuantDescriptor(fake_quant=False))
assert "Only fake quantization is supported" in str(excinfo.value)
# Quantizing activations
def test_input_fake_quant(self):
quant_pooling_object = quant_pooling.QuantAvgPool1d(kernel_size=3, stride=1)
test_input = torch.randn(1, 5, 5, dtype=torch.double)
quant_input = tensor_quant.fake_tensor_quant(test_input, torch.max(torch.abs(test_input)))
out1 = F.avg_pool1d(quant_input, 3, 1, 0, False, True)
out2 = quant_pooling_object(test_input)
np.testing.assert_array_equal(out1.detach().cpu().numpy(), out2.detach().cpu().numpy())
class TestQuantAvgPool2d():
def test_raise(self):
with pytest.raises(ValueError) as excinfo:
quant_pooling_object = quant_pooling.QuantAvgPool2d(kernel_size=3, stride=1,
quant_desc_input=
tensor_quant.QuantDescriptor(fake_quant=False))
assert "Only fake quantization is supported" in str(excinfo.value)
# Quantizing activations
def test_input_fake_quant(self):
quant_pooling_object = quant_pooling.QuantAvgPool2d(kernel_size=3, stride=1)
test_input = torch.randn(1, 5, 5, 5, dtype=torch.double)
quant_input = tensor_quant.fake_tensor_quant(test_input, torch.max(torch.abs(test_input)))
out1 = F.avg_pool2d(quant_input, 3, 1, 0, False, True, None)
out2 = quant_pooling_object(test_input)
np.testing.assert_array_equal(out1.detach().cpu().numpy(), out2.detach().cpu().numpy())
def test_input_variable_bits(self):
# Repeat checking the output for variable number of bits to QuantDescriptor
for bits in [2, 4, 6]:
quant_desc_input = tensor_quant.QuantDescriptor(num_bits=bits)
quant_pooling.QuantAvgPool2d.set_default_quant_desc_input(quant_desc_input)
quant_pooling_object = quant_pooling.QuantAvgPool2d(kernel_size=3, stride=1)
test_input = torch.randn(1, 5, 5, 5, dtype=torch.double)
quant_input = tensor_quant.fake_tensor_quant(test_input, torch.max(torch.abs(test_input)), bits)
out1 = F.avg_pool2d(quant_input, 3, 1, 0, False, True, None)
out2 = quant_pooling_object(test_input)
np.testing.assert_array_equal(out1.detach().cpu().numpy(), out2.detach().cpu().numpy())
def test_input_fake_quant_disable(self):
quant_pooling_object = quant_pooling.QuantAvgPool2d(kernel_size=3, stride=1)
test_input = torch.randn(1, 5, 5, 5, dtype=torch.double)
quant_pooling_object.input_quantizer.disable()
out1 = F.avg_pool2d(test_input, 3, 1, 0, False, True, None)
out2 = quant_pooling_object(test_input)
np.testing.assert_array_equal(out1.detach().cpu().numpy(), out2.detach().cpu().numpy())
class TestQuantAvgPool3d():
def test_raise(self):
with pytest.raises(ValueError) as excinfo:
quant_pooling_object = quant_pooling.QuantAvgPool3d(kernel_size=3, stride=1,
quant_desc_input=
tensor_quant.QuantDescriptor(fake_quant=False))
assert "Only fake quantization is supported" in str(excinfo.value)
# Quantizing activations
def test_input_fake_quant(self):
quant_pooling_object = quant_pooling.QuantAvgPool3d(kernel_size=3, stride=1)
test_input = torch.randn(5, 5, 5, 5, dtype=torch.double)
quant_input = tensor_quant.fake_tensor_quant(test_input, torch.max(torch.abs(test_input)))
out1 = F.avg_pool3d(quant_input, 3, 1, 0, False, True, None)
out2 = quant_pooling_object(test_input)
np.testing.assert_array_equal(out1.detach().cpu().numpy(), out2.detach().cpu().numpy())
class TestQuantAdaptiveAvgPool1d():
def test_raise(self):
with pytest.raises(ValueError) as excinfo:
quant_pooling_object = quant_pooling.QuantAdaptiveAvgPool1d(output_size=3,
quant_desc_input=
tensor_quant.QuantDescriptor(fake_quant=False))
assert "Only fake quantization is supported" in str(excinfo.value)
# Quantizing activations
def test_input_fake_quant(self):
quant_pooling_object = quant_pooling.QuantAdaptiveAvgPool1d(output_size=3)
test_input = torch.randn(1, 5, 5, dtype=torch.double)
quant_input = tensor_quant.fake_tensor_quant(test_input, torch.max(torch.abs(test_input)))
out1 = F.adaptive_avg_pool1d(quant_input, 3)
out2 = quant_pooling_object(test_input)
np.testing.assert_array_equal(out1.detach().cpu().numpy(), out2.detach().cpu().numpy())
class TestQuantAdaptiveAvgPool2d():
def test_raise(self):
with pytest.raises(ValueError) as excinfo:
quant_pooling_object = quant_pooling.QuantAdaptiveAvgPool2d(output_size=3,
quant_desc_input=
tensor_quant.QuantDescriptor(fake_quant=False))
assert "Only fake quantization is supported" in str(excinfo.value)
# Quantizing activations
def test_input_fake_quant(self):
quant_pooling_object = quant_pooling.QuantAdaptiveAvgPool2d(output_size=3)
test_input = torch.randn(1, 5, 5, 5, dtype=torch.double)
quant_input = tensor_quant.fake_tensor_quant(test_input, torch.max(torch.abs(test_input)))
out1 = F.adaptive_avg_pool2d(quant_input, 3)
out2 = quant_pooling_object(test_input)
np.testing.assert_array_equal(out1.detach().cpu().numpy(), out2.detach().cpu().numpy())
def test_input_variable_bits(self):
# Repeat checking the output for variable number of bits to QuantDescriptor
for bits in [2, 4, 6]:
quant_desc_input = tensor_quant.QuantDescriptor(num_bits=bits)
quant_pooling.QuantAdaptiveAvgPool2d.set_default_quant_desc_input(quant_desc_input)
quant_pooling_object = quant_pooling.QuantAdaptiveAvgPool2d(output_size=3)
test_input = torch.randn(1, 5, 5, 5, dtype=torch.double)
quant_input = tensor_quant.fake_tensor_quant(test_input, torch.max(torch.abs(test_input)), bits)
out1 = F.adaptive_avg_pool2d(quant_input, 3)
out2 = quant_pooling_object(test_input)
np.testing.assert_array_equal(out1.detach().cpu().numpy(), out2.detach().cpu().numpy())
def test_input_fake_quant_disable(self):
quant_pooling_object = quant_pooling.QuantAdaptiveAvgPool2d(output_size=3)
test_input = torch.randn(1, 5, 5, 5, dtype=torch.double)
quant_pooling_object.input_quantizer.disable()
out1 = F.adaptive_avg_pool2d(test_input, 3)
out2 = quant_pooling_object(test_input)
np.testing.assert_array_equal(out1.detach().cpu().numpy(), out2.detach().cpu().numpy())
class TestQuantAdaptiveAvgPool3d():
def test_raise(self):
with pytest.raises(ValueError) as excinfo:
quant_pooling_object = quant_pooling.QuantAdaptiveAvgPool3d(output_size=3,
quant_desc_input=
tensor_quant.QuantDescriptor(fake_quant=False))
assert "Only fake quantization is supported" in str(excinfo.value)
# Quantizing activations
def test_input_fake_quant(self):
quant_pooling_object = quant_pooling.QuantAdaptiveAvgPool3d(output_size=3)
test_input = torch.randn(5, 5, 5, 5, dtype=torch.double)
quant_input = tensor_quant.fake_tensor_quant(test_input, torch.max(torch.abs(test_input)))
out1 = F.adaptive_avg_pool3d(quant_input, 3)
out2 = quant_pooling_object(test_input)
np.testing.assert_array_equal(out1.detach().cpu().numpy(), out2.detach().cpu().numpy())
| TensorRT-master | tools/pytorch-quantization/tests/quant_pooling_test.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Tests of the classification flow"""
import os
import subprocess
import sys
from os import path
import glob
import pytest
# pylint:disable=missing-docstring, no-self-use
class TestClassificationFlow():
def test_resnet18(self, request, pytestconfig):
dir_path = os.path.dirname(os.path.realpath(__file__))
dataset_dir = pytestconfig.getoption('--data-dir')
# skip if the data dir flag was not set
if not dataset_dir:
pytest.skip("Prepare required dataset and use --data-dir option to enable")
# Verify data dir exists
if not path.exists(dataset_dir):
print("Dataset path %s doesn't exist"%(dataset_dir), file=sys.stderr)
assert path.exists(dataset_dir)
# Append required paths to PYTHONPATH
test_env = os.environ.copy()
if 'PYTHONPATH' not in test_env:
test_env['PYTHONPATH'] = ""
# Add project root and torchvision to the path (assuming running in nvcr.io/nvidia/pytorch:20.08-py3)
test_env['PYTHONPATH'] += ":/opt/pytorch/vision/references/classification/:%s/../"%(dir_path)
# Add requirement egg files manually to path since we're spawning a new process (downloaded by setuptools)
for egg in glob.glob(dir_path + "/../.eggs/*.egg"):
test_env['PYTHONPATH'] += ":%s"%(egg)
# Run in a subprocess to avoid contaminating the module state for other test cases
ret = subprocess.run(
[
'python3', dir_path + '/../examples/torchvision/classification_flow.py',
'--data-dir', dataset_dir,
'--model', 'resnet18', '--pretrained',
'-t', '0.5',
'--num-finetune-epochs', '1',
'--evaluate-onnx',
],
env=test_env,
check=False, stdout=subprocess.PIPE)
# If the test failed dump the output to stderr for better logging
if ret.returncode != 0:
print(ret.stdout, file=sys.stderr)
assert ret.returncode == 0
| TensorRT-master | tools/pytorch-quantization/tests/classification_flow_test.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""local configuration for pytests"""
import pytest
def pytest_addoption(parser):
parser.addoption('--data-dir', type=str, dest="data_dir",
default='', help="set dataset dir for tests")
| TensorRT-master | tools/pytorch-quantization/tests/conftest.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Tests of calibrators"""
import pytest
import numpy as np
import torch
from pytorch_quantization import utils as quant_utils
from pytorch_quantization import calib
from pytorch_quantization import nn as quant_nn
import tests.utils as test_utils
from examples.torchvision.models.classification import *
from tests.fixtures import verbose
from tests.fixtures.models import QuantLeNet
np.random.seed(12345)
torch.manual_seed(12345)
# pylint:disable=missing-docstring, no-self-use
class TestExampleModels():
def test_resnet50(self):
model = resnet50(pretrained=True, quantize=True)
model.eval()
model.cuda()
quant_nn.TensorQuantizer.use_fb_fake_quant = True
dummy_input = torch.randn(1, 3, 224, 224, device='cuda')
torch.onnx.export(model,
dummy_input,
"/tmp/resnet50.onnx",
verbose=False,
opset_version=13,
enable_onnx_checker=False,
do_constant_folding=True)
quant_nn.TensorQuantizer.use_fb_fake_quant = False
| TensorRT-master | tools/pytorch-quantization/tests/model_test.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""test the license of source files."""
import pytest
from pathlib import Path
from filecmp import cmp
# pylint:disable=missing-docstring, no-self-use
class TestLicense():
def test_license(self):
root = Path(__file__).parent.parent.absolute()
root_len = len(str(root))
# Collect files ending with relevant extensions
file_list = []
file_types = ['*.py', '*.cpp', '*.cu', '*.h', '*.hpp', '*.c', '*.sh']
for ft in file_types:
file_list += list(root.rglob(ft))
# Trim files from build folders
build_folders = ['build', 'dist', '.eggs', '.vscode']
build_files = []
for src_file in file_list:
local_path = str(src_file.parents[0])[root_len : ]
for folder in build_folders:
if folder in local_path:
build_files.append(src_file)
for bf in build_files:
file_list.remove(bf)
print (f"Found {len(file_list)} source files")
cpp_header = (root / 'tests' / 'license_test_header_cpp.txt').open().readlines()
py_header = (root / 'tests' / 'license_test_header_py.txt').open().readlines()
sh_header = (root / 'tests' / 'license_test_header_sh.txt').open().readlines()
invalid_files = []
for f in file_list:
with open(f) as src_file:
src_lines = src_file.readlines()
if f.suffix == '.py':
header = py_header
elif f.suffix == '.sh':
header = sh_header
else:
header = cpp_header
num_lines = len(header)
if len(src_lines) < num_lines:
invalid_files.append(f)
continue
for i in range(num_lines):
if src_lines[i] != header[i]:
invalid_files.append(f)
break
if len(invalid_files) > 0:
for f in invalid_files:
print(f"The file {f} has an invalid header!")
raise AssertionError("%d files have invalid headers!" % (len(invalid_files)))
| TensorRT-master | tools/pytorch-quantization/tests/license_test.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""tests of QuantRNN module.
"""
import pytest
import torch
from torch import nn
import numpy as np
from pytorch_quantization.nn.modules import quant_rnn
from pytorch_quantization import tensor_quant
from tests.fixtures import verbose
from . import utils
# make everything run on the GPU
torch.set_default_tensor_type('torch.cuda.FloatTensor')
# change default type to double if utils.compare flags a small error, may just be floating point rounding error
# torch.set_default_tensor_type('torch.cuda.DoubleTensor')
np.random.seed(1234)
torch.manual_seed(1234)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(1234)
# pylint: disable=no-self-use, missing-docstring, redefined-builtin, bad-continuation
# global state for saving/loading test vectors
SAVE_VECTORS = 0
VECTOR_FILE = 'tests/quant_rnn_test_vectors.pt'
if SAVE_VECTORS:
TEST_VECTORS = dict()
else:
TEST_VECTORS = torch.load(VECTOR_FILE)
class TestQuantLSTMCell():
"""
tests for quant_rnn.QuantLSTMCell
default parameters in QuantLSTMCell:
bias=True,
num_bits_weight=8, quant_mode_weight='per_channel',
num_bits_input=8, quant_mode_input='per_tensor'
Tests of real quantization mode (nonfake) are disabled as it is not fully supported yet.
"""
def test_basic_forward(self, verbose):
"""Do a forward pass on the cell module and see if anything catches fire."""
batch = 7
input_size = 11
hidden_size = 9
quant_desc_input = tensor_quant.QuantDescriptor(num_bits=8)
quant_desc_weight = tensor_quant.QuantDescriptor(num_bits=8, axis=(1,))
quant_rnn_object = quant_rnn.QuantLSTMCell(input_size, hidden_size, bias=False,
quant_desc_input=quant_desc_input, quant_desc_weight=quant_desc_weight)
quant_rnn_object._input_quantizer.disable()
quant_rnn_object._weight_quantizer.disable()
input = torch.randn(batch, input_size)
hidden = torch.randn(batch, hidden_size)
cell = torch.randn(batch, hidden_size)
quant_rnn_object(input, hx=(hidden, cell))
def test_no_quant_input_hidden(self, verbose):
"""QuantLSTM with quantization disabled vs. pytorch LSTM for input and hidden inputs."""
batch = 17
input_size = 13
hidden_size = 7
quant_rnn_object = quant_rnn.QuantLSTMCell(input_size, hidden_size, bias=False)
quant_rnn_object._input_quantizer.disable()
quant_rnn_object._weight_quantizer.disable()
ref_rnn_object = nn.LSTMCell(input_size, hidden_size, bias=False)
# copy weights from one rnn to the other
ref_rnn_object.load_state_dict(quant_rnn_object.state_dict())
input = torch.randn(batch, input_size)
hidden = torch.randn(batch, hidden_size)
cell = torch.randn(batch, hidden_size)
quant_hout, quant_cout = quant_rnn_object(input, hx=(hidden, cell))
ref_hout, ref_cout = ref_rnn_object(input, hx=(hidden, cell))
utils.compare(quant_hout, ref_hout)
utils.compare(quant_cout, ref_cout)
def test_no_quant_input_hidden_bias(self, verbose):
"""QuantLSTMCell with quantization disabled vs. pytorch LSTMCell for input, hidden inputs and bias."""
batch = 19
input_size = 11
hidden_size = 3
quant_rnn_object = quant_rnn.QuantLSTMCell(input_size, hidden_size, bias=True)
quant_rnn_object._input_quantizer.disable()
quant_rnn_object._weight_quantizer.disable()
ref_rnn_object = nn.LSTMCell(input_size, hidden_size, bias=True)
# copy weights from one rnn to the other
ref_rnn_object.load_state_dict(quant_rnn_object.state_dict())
input = torch.randn(batch, input_size)
hidden = torch.randn(batch, hidden_size)
cell = torch.randn(batch, hidden_size)
quant_hout, quant_cout = quant_rnn_object(input, hx=(hidden, cell))
ref_hout, ref_cout = ref_rnn_object(input, hx=(hidden, cell))
utils.compare(quant_hout, ref_hout)
utils.compare(quant_cout, ref_cout)
def test_against_unquantized(self, verbose):
"""Quantization should introduce bounded error utils.compare to pytorch implementation."""
batch = 9
input_size = 13
hidden_size = 7
quant_desc_input = tensor_quant.QuantDescriptor(num_bits=16)
quant_desc_weight = tensor_quant.QuantDescriptor(num_bits=16, axis=(1,))
quant_rnn_object = quant_rnn.QuantLSTMCell(input_size, hidden_size, bias=False,
quant_desc_input=quant_desc_input, quant_desc_weight=quant_desc_weight)
ref_rnn_object = nn.LSTMCell(input_size, hidden_size, bias=False)
# copy weights from one rnn to the other
ref_rnn_object.load_state_dict(quant_rnn_object.state_dict())
input = torch.randn(batch, input_size)
hidden = torch.randn(batch, hidden_size)
cell = torch.randn(batch, hidden_size)
quant_hout, quant_cout = quant_rnn_object(input, hx=(hidden, cell))
ref_hout, ref_cout = ref_rnn_object(input, hx=(hidden, cell))
# The difference between reference and quantized should be bounded in a range
# Small values which become 0 after quantization lead to large relative errors. rtol and atol could be
# much smaller without those values
utils.compare(quant_hout, ref_hout, rtol=1e-4, atol=1e-4)
utils.compare(quant_cout, ref_cout, rtol=1e-4, atol=1e-4)
# check that quantization introduces some error
utils.assert_min_mse(quant_hout, ref_hout, tol=1e-20)
utils.assert_min_mse(quant_cout, ref_cout, tol=1e-20)
def test_quant_input_hidden(self, verbose):
"""QuantLSTMCell vs. manual input quantization + pytorchLSTMCell."""
batch = 15
input_size = 121
hidden_size = 51
num_bits = 4
quant_desc_input = tensor_quant.QuantDescriptor(num_bits=num_bits)
quant_desc_weight = tensor_quant.QuantDescriptor(num_bits=num_bits)
quant_rnn_object = quant_rnn.QuantLSTMCell(input_size, hidden_size, bias=False,
quant_desc_input=quant_desc_input, quant_desc_weight=quant_desc_weight)
ref_rnn_object = nn.LSTMCell(input_size, hidden_size, bias=False)
input = torch.randn(batch, input_size)
hidden = torch.randn(batch, hidden_size)
cell = torch.randn(batch, hidden_size)
quant_hout, quant_cout = quant_rnn_object(input, hx=(hidden, cell))
quant_input, quant_hidden = utils.quantize_by_range_fused((input, hidden), num_bits)
utils.copy_state_and_quantize_fused(ref_rnn_object, quant_rnn_object, num_bits)
ref_hout, ref_cout = ref_rnn_object(quant_input, hx=(quant_hidden, cell))
utils.compare(quant_hout, ref_hout)
utils.compare(quant_cout, ref_cout)
def test_quant_input_hidden_bias(self, verbose):
"""QuantLSTMCell vs. manual input quantization + pytorchLSTMCell
bias should not be quantized
"""
batch = 9
input_size = 23
hidden_size = 31
num_bits = 7
quant_desc_input = tensor_quant.QuantDescriptor(num_bits=num_bits)
quant_desc_weight = tensor_quant.QuantDescriptor(num_bits=num_bits)
quant_rnn_object = quant_rnn.QuantLSTMCell(input_size, hidden_size, bias=True,
quant_desc_input=quant_desc_input, quant_desc_weight=quant_desc_weight)
ref_rnn_object = nn.LSTMCell(input_size, hidden_size, bias=True)
input = torch.randn(batch, input_size)
hidden = torch.randn(batch, hidden_size)
cell = torch.randn(batch, hidden_size)
quant_hout, quant_cout = quant_rnn_object(input, hx=(hidden, cell))
quant_input, quant_hidden = utils.quantize_by_range_fused((input, hidden), num_bits)
utils.copy_state_and_quantize_fused(ref_rnn_object, quant_rnn_object, num_bits)
ref_hout, ref_cout = ref_rnn_object(quant_input, hx=(quant_hidden, cell))
utils.compare(quant_hout, ref_hout)
utils.compare(quant_cout, ref_cout)
def test_quant_different_prec(self, verbose):
"""QuantLSTMCell vs. manual input quantization + pytorch LSTMCell
different input and weight precisions
"""
batch = 27
input_size = 11
hidden_size = 10
num_bits_weight = 4
num_bits_input = 8
quant_desc_input = tensor_quant.QuantDescriptor(num_bits=num_bits_input)
quant_desc_weight = tensor_quant.QuantDescriptor(num_bits=num_bits_weight)
quant_rnn_object = quant_rnn.QuantLSTMCell(input_size, hidden_size, bias=False,
quant_desc_input=quant_desc_input, quant_desc_weight=quant_desc_weight)
ref_rnn_object = nn.LSTMCell(input_size, hidden_size, bias=False)
input = torch.randn(batch, input_size)
hidden = torch.randn(batch, hidden_size)
cell = torch.randn(batch, hidden_size)
quant_hout, quant_cout = quant_rnn_object(input, hx=(hidden, cell))
quant_input, quant_hidden = utils.quantize_by_range_fused((input, hidden), num_bits_input)
utils.copy_state_and_quantize_fused(ref_rnn_object, quant_rnn_object, num_bits_weight)
ref_hout, ref_cout = ref_rnn_object(quant_input, hx=(quant_hidden, cell))
utils.compare(quant_hout, ref_hout)
utils.compare(quant_cout, ref_cout)
class TestQuantLSTM():
"""
tests for quant_rnn.QuantLSTM
default parameters in QuantLSTM:
bias=True,
quant_weight=True, bits_weight=8, fake_quantTrue, quant_mode_weight='channel',
quant_input=True, bits_acts=8, quant_mode_input='tensor'
Tests of real quantization mode (nonfake) are disabled as it is not fully supported yet.
"""
def test_basic_forward(self, verbose):
"""Do a forward pass on the layer module and see if anything catches fire."""
batch = 5
input_size = 13
hidden_size = 31
seq_len = 1
quant_desc_input = tensor_quant.QuantDescriptor(num_bits=8)
quant_desc_weight = tensor_quant.QuantDescriptor(num_bits=8, axis=(1,))
quant_rnn_object = quant_rnn.QuantLSTM(input_size, hidden_size,
num_layers=1, bias=False, batch_first=False, dropout=0, bidirectional=False,
quant_desc_input=quant_desc_input, quant_desc_weight=quant_desc_weight)
input = torch.randn(seq_len, batch, input_size)
hidden = torch.randn(seq_len, batch, hidden_size)
cell = torch.randn(seq_len, batch, hidden_size)
quant_rnn_object(input, hx=(hidden, cell))
def test_no_quant(self, verbose):
"""QuantLSTM with quantization disabled vs. pytorch LSTM."""
batch = 11
input_size = 14
hidden_size = 22
seq_len = 1
quant_rnn_object = quant_rnn.QuantLSTM(input_size, hidden_size,
num_layers=1, bias=False, batch_first=False, dropout=0, bidirectional=False)
quant_rnn_object._input_quantizers[0].disable()
quant_rnn_object._weight_quantizers[0].disable()
ref_rnn_object = nn.LSTM(input_size, hidden_size,
num_layers=1, bias=False, batch_first=False, dropout=0, bidirectional=False)
# copy weights from one rnn to the other
ref_rnn_object.load_state_dict(quant_rnn_object.state_dict())
input = torch.randn(seq_len, batch, input_size)
hidden = torch.randn(seq_len, batch, hidden_size)
cell = torch.randn(seq_len, batch, hidden_size)
quant_out, (quant_hout, quant_cout) = quant_rnn_object(input)
ref_out, (ref_hout, ref_cout) = ref_rnn_object(input)
utils.compare(quant_out, ref_out)
utils.compare(quant_hout, ref_hout)
utils.compare(quant_cout, ref_cout)
def test_no_quant_input_hidden(self, verbose):
"""QuantLSTM with quantization disabled vs. pytorch LSTM for input and hidden inputs."""
batch = 13
input_size = 19
hidden_size = 20
seq_len = 1
quant_rnn_object = quant_rnn.QuantLSTM(input_size, hidden_size,
num_layers=1, bias=False, batch_first=False, dropout=0, bidirectional=False)
quant_rnn_object._input_quantizers[0].disable()
quant_rnn_object._weight_quantizers[0].disable()
ref_rnn_object = nn.LSTM(input_size, hidden_size,
num_layers=1, bias=False, batch_first=False, dropout=0, bidirectional=False)
# copy weights from one rnn to the other
ref_rnn_object.load_state_dict(quant_rnn_object.state_dict())
input = torch.randn(seq_len, batch, input_size)
hidden = torch.randn(seq_len, batch, hidden_size)
cell = torch.randn(seq_len, batch, hidden_size)
quant_out, (quant_hout, quant_cout) = quant_rnn_object(input, hx=(hidden, cell))
ref_out, (ref_hout, ref_cout) = ref_rnn_object(input, hx=(hidden, cell))
utils.compare(quant_out, ref_out)
utils.compare(quant_hout, ref_hout)
utils.compare(quant_cout, ref_cout)
def test_no_quant_all_modes(self, verbose):
"""QuantLSTM with quantization disabled vs. pytorch LSTM for all modes."""
def testcase(input_size, hidden_size, seq_len, batch, num_layers, bias, batch_first, dropout, bidirectional):
quant_rnn_object = quant_rnn.QuantLSTM(input_size, hidden_size,
num_layers=num_layers, bias=bias, batch_first=batch_first, dropout=dropout,
bidirectional=bidirectional)
num_quantizers = num_layers * 2 if bidirectional else num_layers
for i in range(num_quantizers):
quant_rnn_object._input_quantizers[i].disable()
quant_rnn_object._weight_quantizers[i].disable()
ref_rnn_object = nn.LSTM(input_size, hidden_size,
num_layers=num_layers, bias=bias, batch_first=batch_first, dropout=dropout,
bidirectional=bidirectional)
# copy state from one rnn to the other
ref_rnn_object.load_state_dict(quant_rnn_object.state_dict())
input = torch.randn(seq_len, batch, input_size)
num_directions = 2 if bidirectional else 1
hidden = torch.randn(num_layers*num_directions, batch, hidden_size)
cell = torch.randn(num_layers*num_directions, batch, hidden_size)
quant_out, (quant_hout, quant_cout) = quant_rnn_object(input, hx=(hidden, cell))
ref_out, (ref_hout, ref_cout) = ref_rnn_object(input, hx=(hidden, cell))
utils.compare(quant_out, ref_out)
utils.compare(quant_hout, ref_hout)
utils.compare(quant_cout, ref_cout)
# test various permuatations of the following parameters:
# size, num_layers, bias, batch_first, dropout, bidirectional
testcase(32, 27, 1, 1, 1, False, False, 0, False)
testcase(19, 63, 1, 1, 2, False, False, 0, False)
testcase(11, 41, 1, 1, 1, True, False, 0, False)
testcase(33, 31, 1, 1, 1, False, True, 0, False)
# testcase(32, 32, 1, 1, 2, False, False, 0.5, False) #TODO(pjudd) this fails look into dropout seeding
testcase(73, 13, 1, 1, 1, False, False, 0, True)
def test_against_unquantized(self, verbose):
"""Quantization should introduce bounded error utils.compare to pytorch implementation."""
batch = 21
input_size = 33
hidden_size = 25
seq_len = 1
quant_desc_input = tensor_quant.QuantDescriptor(num_bits=16)
quant_desc_weight = tensor_quant.QuantDescriptor(num_bits=16, axis=(1,))
quant_rnn_object = quant_rnn.QuantLSTM(input_size, hidden_size,
num_layers=1, bias=False, batch_first=False, dropout=0, bidirectional=False,
quant_desc_input=quant_desc_input, quant_desc_weight=quant_desc_weight)
ref_rnn_object = nn.LSTM(input_size, hidden_size,
num_layers=1, bias=False, batch_first=False, dropout=0, bidirectional=False)
# copy weights from one rnn to the other
ref_rnn_object.load_state_dict(quant_rnn_object.state_dict())
input = torch.randn(seq_len, batch, input_size)
hidden = torch.randn(seq_len, batch, hidden_size)
cell = torch.randn(seq_len, batch, hidden_size)
quant_out, (quant_hout, quant_cout) = quant_rnn_object(input, hx=(hidden, cell))
ref_out, (ref_hout, ref_cout) = ref_rnn_object(input, hx=(hidden, cell))
# The difference between reference and quantized should be bounded in a range
# Small values which become 0 after quantization lead to large relative errors. rtol and atol could be
# much smaller without those values
utils.compare(quant_out, ref_out, rtol=1e-4, atol=1e-4)
utils.compare(quant_hout, ref_hout, rtol=1e-4, atol=1e-4)
utils.compare(quant_cout, ref_cout, rtol=1e-4, atol=1e-4)
# check that quantization introduces some error
utils.assert_min_mse(quant_out, ref_out, tol=1e-20)
utils.assert_min_mse(quant_hout, ref_hout, tol=1e-20)
utils.assert_min_mse(quant_cout, ref_cout, tol=1e-20)
def test_quant_input_hidden(self, verbose):
"""QuantLSTM vs. manual input quantization + pytorchLSTM."""
batch = 13
input_size = 17
hidden_size = 7
seq_len = 1
num_bits = 6
quant_desc_input = tensor_quant.QuantDescriptor(num_bits=num_bits)
quant_desc_weight = tensor_quant.QuantDescriptor(num_bits=num_bits)
quant_rnn_object = quant_rnn.QuantLSTM(input_size, hidden_size, num_layers=1, bias=False,
batch_first=False, dropout=0, bidirectional=False,
quant_desc_input=quant_desc_input, quant_desc_weight=quant_desc_weight)
ref_rnn_object = nn.LSTM(input_size, hidden_size, num_layers=1, bias=False,
batch_first=False, dropout=0, bidirectional=False)
input = torch.randn(seq_len, batch, input_size)
hidden = torch.randn(seq_len, batch, hidden_size)
cell = torch.randn(seq_len, batch, hidden_size)
quant_input, quant_hidden = utils.quantize_by_range_fused((input, hidden), num_bits)
utils.copy_state_and_quantize_fused(ref_rnn_object, quant_rnn_object, num_bits)
quant_out, (quant_hout, quant_cout) = quant_rnn_object(input, hx=(hidden, cell))
ref_out, (ref_hout, ref_cout) = ref_rnn_object(quant_input, hx=(quant_hidden, cell))
utils.compare(quant_out, ref_out)
utils.compare(quant_hout, ref_hout)
utils.compare(quant_cout, ref_cout)
def test_quant_input_hidden_bias(self, verbose):
"""QuantLSTM vs. manual input quantization + pytorchLSTM."""
batch = 17
input_size = 13
hidden_size = 7
seq_len = 1
num_bits = 5
quant_desc_input = tensor_quant.QuantDescriptor(num_bits=num_bits)
quant_desc_weight = tensor_quant.QuantDescriptor(num_bits=num_bits)
quant_rnn_object = quant_rnn.QuantLSTM(input_size, hidden_size, num_layers=1, bias=True,
batch_first=False, dropout=0, bidirectional=False,
quant_desc_input=quant_desc_input, quant_desc_weight=quant_desc_weight)
ref_rnn_object = nn.LSTM(input_size, hidden_size, num_layers=1, bias=True,
batch_first=False, dropout=0, bidirectional=False)
input = torch.randn(seq_len, batch, input_size)
hidden = torch.randn(seq_len, batch, hidden_size)
cell = torch.randn(seq_len, batch, hidden_size)
quant_input, quant_hidden = utils.quantize_by_range_fused((input, hidden), num_bits)
utils.copy_state_and_quantize_fused(ref_rnn_object, quant_rnn_object, num_bits)
quant_out, (quant_hout, quant_cout) = quant_rnn_object(input, hx=(hidden, cell))
ref_out, (ref_hout, ref_cout) = ref_rnn_object(quant_input, hx=(quant_hidden, cell))
utils.compare(quant_out, ref_out)
utils.compare(quant_hout, ref_hout)
utils.compare(quant_cout, ref_cout)
def test_quant_different_prec(self, verbose):
"""QuantLSTM vs. manual input quantization + pytorchLSTM."""
batch = 22
input_size = 23
hidden_size = 24
seq_len = 1
num_bits_weight = 4
num_bits_input = 8
quant_desc_input = tensor_quant.QuantDescriptor(num_bits=num_bits_input)
quant_desc_weight = tensor_quant.QuantDescriptor(num_bits=num_bits_weight)
quant_rnn_object = quant_rnn.QuantLSTM(input_size, hidden_size, num_layers=1, bias=False,
batch_first=False, dropout=0, bidirectional=False,
quant_desc_input=quant_desc_input, quant_desc_weight=quant_desc_weight)
ref_rnn_object = nn.LSTM(input_size, hidden_size, num_layers=1, bias=False,
batch_first=False, dropout=0, bidirectional=False)
input = torch.randn(seq_len, batch, input_size)
hidden = torch.randn(seq_len, batch, hidden_size)
cell = torch.randn(seq_len, batch, hidden_size)
quant_input, quant_hidden = utils.quantize_by_range_fused((input, hidden), num_bits_input)
utils.copy_state_and_quantize_fused(ref_rnn_object, quant_rnn_object, num_bits_weight)
quant_out, (quant_hout, quant_cout) = quant_rnn_object(input, hx=(hidden, cell))
ref_out, (ref_hout, ref_cout) = ref_rnn_object(quant_input, hx=(quant_hidden, cell))
utils.compare(quant_out, ref_out)
utils.compare(quant_hout, ref_hout)
utils.compare(quant_cout, ref_cout)
class TestEpilogue():
"""Run after all tests to save globals."""
def test_save_vectors(self, verbose):
"""Save test vectors to file."""
if SAVE_VECTORS:
torch.save(TEST_VECTORS, VECTOR_FILE)
raise Exception('Saved test vectors to {}, for testing set SAVE_VECTORS = 0'.format(VECTOR_FILE))
| TensorRT-master | tools/pytorch-quantization/tests/quant_rnn_test.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""tests of supportive functions"""
import pytest
import numpy as np
import torch
import pytorch_quantization.nn.functional as QF
np.random.seed(1234)
torch.manual_seed(1234)
# pylint:disable=missing-docstring, no-self-use
torch.set_default_tensor_type('torch.cuda.FloatTensor')
class TestClip():
def test_simple_run(self):
x_np = np.random.rand(1023).astype(np.float32)
x_torch = torch.Tensor(x_np)
clip_x_np = np.clip(x_np, 0.3, 0.7)
clip_x_torch = QF.clip(x_torch, torch.tensor(0.3), torch.tensor(0.7))
np.testing.assert_array_equal(clip_x_torch.cpu().numpy(), clip_x_np)
def test_raise(self):
x = torch.randn(3, 7, requires_grad=True)
min_value = torch.Tensor(3, 7)
max_value = torch.Tensor(3, 7)
min_value.requires_grad = True
max_value.requires_grad = True
clip_x = QF.clip(x, min_value, max_value)
labels = torch.randint(6, (3,)).type(torch.LongTensor).cuda()
criterion = torch.nn.CrossEntropyLoss()
loss = criterion(clip_x, labels)
with pytest.raises(ValueError, match="can only be scalar"):
loss.backward()
def test_broadcast(self):
"""Test broadcast behavior by randomly picked shuffling of np.random.rand"""
x_np = np.random.rand(1023, 4, 5, 6).astype(np.float32) - 0.5
x_torch = torch.Tensor(x_np)
min_value = np.random.rand(1, 4, 1, 1).astype(np.float32) * 0.1 - 0.2
max_value = np.random.rand(1, 4, 1, 1).astype(np.float32) * 10 + 0.5
clip_x_np = np.clip(x_np, min_value, max_value)
clip_x_torch = QF.clip(x_torch, torch.tensor(min_value), torch.tensor(max_value))
np.testing.assert_array_equal(clip_x_torch.cpu().numpy(), clip_x_np)
def test_backward(self):
x = torch.randn(3, 1025, requires_grad=True)
x.retain_grad()
min_value = torch.tensor(0.3)
max_value = torch.tensor(0.7)
min_value.requires_grad = True
max_value.requires_grad = True
min_value.retain_grad()
max_value.retain_grad()
clip_x = QF.clip(x, min_value, max_value)
clip_x.retain_grad()
labels = torch.randint(6, (3,)).type(torch.LongTensor).cuda()
criterion = torch.nn.CrossEntropyLoss()
loss = criterion(clip_x, labels)
loss.backward()
np.testing.assert_array_almost_equal(
clip_x.grad[x < min_value].sum().cpu().numpy(), min_value.grad.cpu().numpy(), decimal=6)
np.testing.assert_array_almost_equal(
clip_x.grad[x > max_value].sum().cpu().numpy(), max_value.grad.cpu().numpy(), decimal=6)
assert x.grad.cpu()[x < min_value].sum() == 0
assert x.grad.cpu()[x > max_value].sum() == 0
assert torch.equal(clip_x.grad[(x > min_value) & (x < max_value)], x.grad[(x > min_value) & (x < max_value)])
| TensorRT-master | tools/pytorch-quantization/tests/functional_test.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.