relative_path
stringclasses 812
values | section
stringclasses 339
values | filename
stringlengths 2
61
| text
stringlengths 6
1.76M
|
---|---|---|---|
PyTorch/Segmentation/nnUNet/data_preprocessing | data_preprocessing | preprocessor | # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import json
import math
import os
import pickle
import monai.transforms as transforms
import nibabel
import numpy as np
from joblib import Parallel, delayed
from skimage.transform import resize
from utils.utils import get_task_code, make_empty_dir
from data_preprocessing.configs import ct_max, ct_mean, ct_min, ct_std, patch_size, spacings, task
class Preprocessor:
def __init__(self, args):
self.args = args
self.target_spacing = None
self.task = args.task
self.task_code = get_task_code(args)
self.verbose = args.verbose
self.patch_size = patch_size[self.task_code]
self.training = args.exec_mode == "training"
self.data_path = os.path.join(args.data, task[args.task])
metadata_path = os.path.join(self.data_path, "dataset.json")
self.metadata = json.load(open(metadata_path, "r"))
self.modality = self.metadata["modality"]["0"]
self.results = os.path.join(args.results, self.task_code)
self.ct_min, self.ct_max, self.ct_mean, self.ct_std = (0,) * 4
if not self.training:
self.results = os.path.join(self.results, self.args.exec_mode)
self.crop_foreg = transforms.CropForegroundd(keys=["image", "label"], source_key="image")
nonzero = True if self.modality != "CT" else False # normalize only non-zero region for MRI
self.normalize_intensity = transforms.NormalizeIntensity(nonzero=nonzero, channel_wise=True)
if self.args.exec_mode == "val":
dataset_json = json.load(open(metadata_path, "r"))
dataset_json["val"] = dataset_json["training"]
with open(metadata_path, "w") as outfile:
json.dump(dataset_json, outfile)
def run(self):
make_empty_dir(self.results)
print(f"Preprocessing {self.data_path}")
try:
self.target_spacing = spacings[self.task_code]
except:
self.collect_spacings()
if self.verbose:
print(f"Target spacing {self.target_spacing}")
if self.modality == "CT":
try:
self.ct_min = ct_min[self.task]
self.ct_max = ct_max[self.task]
self.ct_mean = ct_mean[self.task]
self.ct_std = ct_std[self.task]
except:
self.collect_intensities()
_mean = round(self.ct_mean, 2)
_std = round(self.ct_std, 2)
if self.verbose:
print(f"[CT] min: {self.ct_min}, max: {self.ct_max}, mean: {_mean}, std: {_std}")
self.run_parallel(self.preprocess_pair, self.args.exec_mode)
pickle.dump(
{
"patch_size": self.patch_size,
"spacings": self.target_spacing,
"n_class": len(self.metadata["labels"]),
"in_channels": len(self.metadata["modality"]) + int(self.args.ohe),
},
open(os.path.join(self.results, "config.pkl"), "wb"),
)
def preprocess_pair(self, pair):
fname = os.path.basename(pair["image"] if isinstance(pair, dict) else pair)
image, label, image_spacings = self.load_pair(pair)
# Crop foreground and store original shapes.
orig_shape = image.shape[1:]
bbox = transforms.utils.generate_spatial_bounding_box(image)
image = transforms.SpatialCrop(roi_start=bbox[0], roi_end=bbox[1])(image)
image_metadata = np.vstack([bbox, orig_shape, image.shape[1:]])
if label is not None:
label = transforms.SpatialCrop(roi_start=bbox[0], roi_end=bbox[1])(label)
self.save_npy(label, fname, "_orig_lbl.npy")
if self.args.dim == 3:
image, label = self.resample(image, label, image_spacings)
if self.modality == "CT":
image = np.clip(image, self.ct_min, self.ct_max)
image = self.normalize(image)
if self.training:
image, label = self.standardize(image, label)
if self.args.ohe:
mask = np.ones(image.shape[1:], dtype=np.float32)
for i in range(image.shape[0]):
zeros = np.where(image[i] <= 0)
mask[zeros] *= 0.0
image = self.normalize_intensity(image).astype(np.float32)
mask = np.expand_dims(mask, 0)
image = np.concatenate([image, mask])
self.save(image, label, fname, image_metadata)
def resample(self, image, label, image_spacings):
if self.target_spacing != image_spacings:
image, label = self.resample_pair(image, label, image_spacings)
return image, label
def standardize(self, image, label):
pad_shape = self.calculate_pad_shape(image)
image_shape = image.shape[1:]
if pad_shape != image_shape:
paddings = [(pad_sh - image_sh) / 2 for (pad_sh, image_sh) in zip(pad_shape, image_shape)]
image = self.pad(image, paddings)
label = self.pad(label, paddings)
if self.args.dim == 2: # Center cropping 2D images.
_, _, height, weight = image.shape
start_h = (height - self.patch_size[0]) // 2
start_w = (weight - self.patch_size[1]) // 2
image = image[:, :, start_h : start_h + self.patch_size[0], start_w : start_w + self.patch_size[1]]
label = label[:, :, start_h : start_h + self.patch_size[0], start_w : start_w + self.patch_size[1]]
return image, label
def normalize(self, image):
if self.modality == "CT":
return (image - self.ct_mean) / self.ct_std
return self.normalize_intensity(image)
def save(self, image, label, fname, image_metadata):
mean, std = np.round(np.mean(image, (1, 2, 3)), 2), np.round(np.std(image, (1, 2, 3)), 2)
if self.verbose:
print(f"Saving {fname} shape {image.shape} mean {mean} std {std}")
self.save_npy(image, fname, "_x.npy")
if label is not None:
self.save_npy(label, fname, "_y.npy")
if image_metadata is not None:
self.save_npy(image_metadata, fname, "_meta.npy")
def load_pair(self, pair):
image = self.load_nifty(pair["image"] if isinstance(pair, dict) else pair)
image_spacing = self.load_spacing(image)
image = image.get_fdata().astype(np.float32)
image = self.standardize_layout(image)
if self.training:
label = self.load_nifty(pair["label"]).get_fdata().astype(np.uint8)
label = self.standardize_layout(label)
else:
label = None
return image, label, image_spacing
def resample_pair(self, image, label, spacing):
shape = self.calculate_new_shape(spacing, image.shape[1:])
if self.check_anisotrophy(spacing):
image = self.resample_anisotrophic_image(image, shape)
if label is not None:
label = self.resample_anisotrophic_label(label, shape)
else:
image = self.resample_regular_image(image, shape)
if label is not None:
label = self.resample_regular_label(label, shape)
image = image.astype(np.float32)
if label is not None:
label = label.astype(np.uint8)
return image, label
def calculate_pad_shape(self, image):
min_shape = self.patch_size[:]
image_shape = image.shape[1:]
if len(min_shape) == 2: # In 2D case we don't want to pad depth axis.
min_shape.insert(0, image_shape[0])
pad_shape = [max(mshape, ishape) for mshape, ishape in zip(min_shape, image_shape)]
return pad_shape
def get_intensities(self, pair):
image = self.load_nifty(pair["image"]).get_fdata().astype(np.float32)
label = self.load_nifty(pair["label"]).get_fdata().astype(np.uint8)
foreground_idx = np.where(label > 0)
intensities = image[foreground_idx].tolist()
return intensities
def collect_intensities(self):
intensities = self.run_parallel(self.get_intensities, "training")
intensities = list(itertools.chain(*intensities))
self.ct_min, self.ct_max = np.percentile(intensities, [0.5, 99.5])
self.ct_mean, self.ct_std = np.mean(intensities), np.std(intensities)
def get_spacing(self, pair):
image = nibabel.load(os.path.join(self.data_path, pair["image"]))
spacing = self.load_spacing(image)
return spacing
def collect_spacings(self):
spacing = self.run_parallel(self.get_spacing, "training")
spacing = np.array(spacing)
target_spacing = np.median(spacing, axis=0)
if max(target_spacing) / min(target_spacing) >= 3:
lowres_axis = np.argmin(target_spacing)
target_spacing[lowres_axis] = np.percentile(spacing[:, lowres_axis], 10)
self.target_spacing = list(target_spacing)
def check_anisotrophy(self, spacing):
def check(spacing):
return np.max(spacing) / np.min(spacing) >= 3
return check(spacing) or check(self.target_spacing)
def calculate_new_shape(self, spacing, shape):
spacing_ratio = np.array(spacing) / np.array(self.target_spacing)
new_shape = (spacing_ratio * np.array(shape)).astype(int).tolist()
return new_shape
def save_npy(self, image, fname, suffix):
np.save(os.path.join(self.results, fname.replace(".nii.gz", suffix)), image, allow_pickle=False)
def run_parallel(self, func, exec_mode):
return Parallel(n_jobs=self.args.n_jobs)(delayed(func)(pair) for pair in self.metadata[exec_mode])
def load_nifty(self, fname):
return nibabel.load(os.path.join(self.data_path, fname))
@staticmethod
def load_spacing(image):
return image.header["pixdim"][1:4].tolist()[::-1]
@staticmethod
def pad(image, padding):
pad_d, pad_w, pad_h = padding
return np.pad(
image,
(
(0, 0),
(math.floor(pad_d), math.ceil(pad_d)),
(math.floor(pad_w), math.ceil(pad_w)),
(math.floor(pad_h), math.ceil(pad_h)),
),
)
@staticmethod
def standardize_layout(data):
if len(data.shape) == 3:
data = np.expand_dims(data, 3)
return np.transpose(data, (3, 2, 1, 0))
@staticmethod
def resize_fn(image, shape, order, mode):
return resize(image, shape, order=order, mode=mode, cval=0, clip=True, anti_aliasing=False)
def resample_anisotrophic_image(self, image, shape):
resized_channels = []
for image_c in image:
resized = [self.resize_fn(i, shape[1:], 3, "edge") for i in image_c]
resized = np.stack(resized, axis=0)
resized = self.resize_fn(resized, shape, 0, "constant")
resized_channels.append(resized)
resized = np.stack(resized_channels, axis=0)
return resized
def resample_regular_image(self, image, shape):
resized_channels = []
for image_c in image:
resized_channels.append(self.resize_fn(image_c, shape, 3, "edge"))
resized = np.stack(resized_channels, axis=0)
return resized
def resample_anisotrophic_label(self, label, shape):
depth = label.shape[1]
reshaped = np.zeros(shape, dtype=np.uint8)
shape_2d = shape[1:]
reshaped_2d = np.zeros((depth, *shape_2d), dtype=np.uint8)
n_class = np.max(label)
for class_ in range(1, n_class + 1):
for depth_ in range(depth):
mask = label[0, depth_] == class_
resized_2d = self.resize_fn(mask.astype(float), shape_2d, 1, "edge")
reshaped_2d[depth_][resized_2d >= 0.5] = class_
for class_ in range(1, n_class + 1):
mask = reshaped_2d == class_
resized = self.resize_fn(mask.astype(float), shape, 0, "constant")
reshaped[resized >= 0.5] = class_
reshaped = np.expand_dims(reshaped, 0)
return reshaped
def resample_regular_label(self, label, shape):
reshaped = np.zeros(shape, dtype=np.uint8)
n_class = np.max(label)
for class_ in range(1, n_class + 1):
mask = label[0] == class_
resized = self.resize_fn(mask.astype(float), shape, 1, "edge")
reshaped[resized >= 0.5] = class_
reshaped = np.expand_dims(reshaped, 0)
return reshaped
|
PyTorch/Recommendation/NCF/qa | qa | inference_table | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import tabulate
archs = ["a100", "v100"]
precs = ["full", "half"]
for arch in archs:
for prec in precs:
filename = f"inference/{arch}_{prec}.log"
with open(filename) as opened:
line = opened.readlines()[-1]
log = json.loads(line[len("DLLL "):])['data']
print(log)
batch_sizes = [1024, 4096, 16384, 65536, 262144, 1048576]
t_avg = "batch_{}_mean_throughput"
l_mean = "batch_{}_mean_latency"
l_90 = "batch_{}_p90_latency"
l_95 = "batch_{}_p95_latency"
l_99 = "batch_{}_p99_latency"
headers = ["Batch size", "Throughput Avg", "Latency Avg", "Latency 90%", "Latency 95%", "Latency 99%"]
table = []
for bsize in batch_sizes:
table.append([bsize,
"{:3.3f}".format(log[t_avg.format(bsize)]),
"{:.6f}".format(log[l_mean.format(bsize)]),
"{:.6f}".format(log[l_90.format(bsize)]),
"{:.6f}".format(log[l_95.format(bsize)]),
"{:.6f}".format(log[l_99.format(bsize)])])
print(filename)
print(tabulate.tabulate(table, headers, tablefmt='pipe'))
|
Tools/PyTorch/TimeSeriesPredictionPlatform/conf | conf | converter_config | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
defaults:
- deployment: convert
checkpoint: ??? |
PaddlePaddle/LanguageModeling/BERT/data/squad | squad | squad_download | # Copyright (c) 2022 NVIDIA Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
echo "Downloading dataset for squad..."
# Download SQuAD
v1="v1.1"
mkdir $v1
wget https://rajpurkar.github.io/SQuAD-explorer/dataset/train-v1.1.json -O $v1/train-v1.1.json
wget https://rajpurkar.github.io/SQuAD-explorer/dataset/dev-v1.1.json -O $v1/dev-v1.1.json
wget https://worksheets.codalab.org/rest/bundles/0xbcd57bee090b421c982906709c8c27e1/contents/blob/ -O $v1/evaluate-v1.1.py
EXP_TRAIN_v1='981b29407e0affa3b1b156f72073b945 -'
EXP_DEV_v1='3e85deb501d4e538b6bc56f786231552 -'
EXP_EVAL_v1='afb04912d18ff20696f7f88eed49bea9 -'
CALC_TRAIN_v1=`cat ${v1}/train-v1.1.json |md5sum`
CALC_DEV_v1=`cat ${v1}/dev-v1.1.json |md5sum`
CALC_EVAL_v1=`cat ${v1}/evaluate-v1.1.py |md5sum`
v2="v2.0"
mkdir $v2
wget https://rajpurkar.github.io/SQuAD-explorer/dataset/train-v2.0.json -O $v2/train-v2.0.json
wget https://rajpurkar.github.io/SQuAD-explorer/dataset/dev-v2.0.json -O $v2/dev-v2.0.json
wget https://worksheets.codalab.org/rest/bundles/0x6b567e1cf2e041ec80d7098f031c5c9e/contents/blob/ -O $v2/evaluate-v2.0.py
EXP_TRAIN_v2='62108c273c268d70893182d5cf8df740 -'
EXP_DEV_v2='246adae8b7002f8679c027697b0b7cf8 -'
EXP_EVAL_v2='ff23213bed5516ea4a6d9edb6cd7d627 -'
CALC_TRAIN_v2=`cat ${v2}/train-v2.0.json |md5sum`
CALC_DEV_v2=`cat ${v2}/dev-v2.0.json |md5sum`
CALC_EVAL_v2=`cat ${v2}/evaluate-v2.0.py |md5sum`
echo "Squad data download done!"
echo "Verifying Dataset...."
if [ "$EXP_TRAIN_v1" != "$CALC_TRAIN_v1" ]; then
echo "train-v1.1.json is corrupted! md5sum doesn't match"
fi
if [ "$EXP_DEV_v1" != "$CALC_DEV_v1" ]; then
echo "dev-v1.1.json is corrupted! md5sum doesn't match"
fi
if [ "$EXP_EVAL_v1" != "$CALC_EVAL_v1" ]; then
echo "evaluate-v1.1.py is corrupted! md5sum doesn't match"
fi
if [ "$EXP_TRAIN_v2" != "$CALC_TRAIN_v2" ]; then
echo "train-v2.0.json is corrupted! md5sum doesn't match"
fi
if [ "$EXP_DEV_v2" != "$CALC_DEV_v2" ]; then
echo "dev-v2.0.json is corrupted! md5sum doesn't match"
fi
if [ "$EXP_EVAL_v2" != "$CALC_EVAL_v2" ]; then
echo "evaluate-v2.0.py is corrupted! md5sum doesn't match"
fi
echo "Complete!"
|
PyTorch/SpeechRecognition/wav2vec2/common/fairseq | fairseq | dist | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
class ModuleProxyWrapper(torch.nn.Module):
"""
Wrap a DistributedDataParallel module and forward requests for missing
attributes to the module wrapped by DDP (the twice-wrapped module).
Also forward calls to :func:`state_dict` and :func:`load_state_dict`.
Usage::
module.xyz = "hello world"
wrapped_module = DistributedDataParallel(module, **ddp_args)
wrapped_module = ModuleProxyWrapper(wrapped_module)
assert wrapped_module.xyz == "hello world"
assert wrapped_module.state_dict().keys() == module.state_dict().keys()
Args:
module (nn.Module): module to wrap
"""
def __init__(self, module: torch.nn.Module):
super().__init__()
assert hasattr(module, "module"), \
"ModuleProxyWrapper expects input to wrap another module"
self.module = module
def __getattr__(self, name):
"""Forward missing attributes to twice-wrapped module."""
try:
# defer to nn.Module's logic
return super().__getattr__(name)
except AttributeError:
try:
# forward to the once-wrapped module
return getattr(self.module, name)
except AttributeError:
# forward to the twice-wrapped module
return getattr(self.module.module, name)
def state_dict(self, *args, **kwargs):
"""Forward to the twice-wrapped module."""
return self.module.module.state_dict(*args, **kwargs)
def load_state_dict(self, *args, **kwargs):
"""Forward to the twice-wrapped module."""
return self.module.module.load_state_dict(*args, **kwargs)
def forward(self, *args, **kwargs):
return self.module(*args, **kwargs)
|
PyTorch/SpeechRecognition/Jasper/triton/scripts | scripts | preprocess_triton_librispeech | #!/usr/bin/env bash
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Constructs JSON manifests for inference-subset of Librispeech corpus.
python ./utils/convert_librispeech.py \
--input_dir /datasets/LibriSpeech/test-clean \
--dest_dir /datasets/LibriSpeech/test-clean-wav \
--output_json /datasets/LibriSpeech/librispeech-test-clean-wav.json
|
PyTorch/Detection/Efficientdet/effdet/layers | layers | create_act | """ Activation Factory
Hacked together by / Copyright 2020 Ross Wightman
"""
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2019-2022 Ross Wightman
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .activations import *
from .activations_jit import *
from .activations_me import *
from .config import is_exportable, is_scriptable, is_no_jit
_ACT_FN_DEFAULT = dict(
swish=swish,
mish=mish,
relu=F.relu,
relu6=F.relu6,
leaky_relu=F.leaky_relu,
elu=F.elu,
prelu=F.prelu,
celu=F.celu,
selu=F.selu,
gelu=F.gelu,
sigmoid=sigmoid,
tanh=tanh,
hard_sigmoid=hard_sigmoid,
hard_swish=hard_swish,
hard_mish=hard_mish,
)
_ACT_FN_JIT = dict(
swish=swish_jit,
mish=mish_jit,
hard_sigmoid=hard_sigmoid_jit,
hard_swish=hard_swish_jit,
hard_mish=hard_mish_jit
)
_ACT_FN_ME = dict(
swish=swish_me,
mish=mish_me,
hard_sigmoid=hard_sigmoid_me,
hard_swish=hard_swish_me,
hard_mish=hard_mish_me,
)
_ACT_LAYER_DEFAULT = dict(
swish=Swish,
mish=Mish,
relu=nn.ReLU,
relu6=nn.ReLU6,
elu=nn.ELU,
prelu=nn.PReLU,
celu=nn.CELU,
selu=nn.SELU,
gelu=nn.GELU,
sigmoid=Sigmoid,
tanh=Tanh,
hard_sigmoid=HardSigmoid,
hard_swish=HardSwish,
hard_mish=HardMish,
)
_ACT_LAYER_JIT = dict(
swish=SwishJit,
mish=MishJit,
hard_sigmoid=HardSigmoidJit,
hard_swish=HardSwishJit,
hard_mish=HardMishJit
)
_ACT_LAYER_ME = dict(
swish=SwishMe,
mish=MishMe,
hard_sigmoid=HardSigmoidMe,
hard_swish=HardSwishMe,
hard_mish=HardMishMe,
)
def get_act_fn(name='relu'):
""" Activation Function Factory
Fetching activation fns by name with this function allows export or torch script friendly
functions to be returned dynamically based on current config.
"""
if not name:
return None
if not (is_no_jit() or is_exportable() or is_scriptable()):
# If not exporting or scripting the model, first look for a memory-efficient version with
# custom autograd, then fallback
if name in _ACT_FN_ME:
return _ACT_FN_ME[name]
if not is_no_jit():
if name in _ACT_FN_JIT:
return _ACT_FN_JIT[name]
return _ACT_FN_DEFAULT[name]
def get_act_layer(name='relu'):
""" Activation Layer Factory
Fetching activation layers by name with this function allows export or torch script friendly
functions to be returned dynamically based on current config.
"""
if not name:
return None
if not (is_no_jit() or is_exportable() or is_scriptable()):
if name in _ACT_LAYER_ME:
return _ACT_LAYER_ME[name]
if not is_no_jit():
if name in _ACT_LAYER_JIT:
return _ACT_LAYER_JIT[name]
return _ACT_LAYER_DEFAULT[name]
def create_act_layer(name, inplace=False, **kwargs):
act_layer = get_act_layer(name)
if act_layer is not None:
return act_layer(inplace=inplace, **kwargs)
else:
return None |
DGLPyTorch/DrugDiscovery/SE3Transformer/se3_transformer/model/layers | layers | linear | # Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES
# SPDX-License-Identifier: MIT
from typing import Dict
import numpy as np
import torch
import torch.nn as nn
from torch import Tensor
from se3_transformer.model.fiber import Fiber
class LinearSE3(nn.Module):
"""
Graph Linear SE(3)-equivariant layer, equivalent to a 1x1 convolution.
Maps a fiber to a fiber with the same degrees (channels may be different).
No interaction between degrees, but interaction between channels.
type-0 features (C_0 channels) ────> Linear(bias=False) ────> type-0 features (C'_0 channels)
type-1 features (C_1 channels) ────> Linear(bias=False) ────> type-1 features (C'_1 channels)
:
type-k features (C_k channels) ────> Linear(bias=False) ────> type-k features (C'_k channels)
"""
def __init__(self, fiber_in: Fiber, fiber_out: Fiber):
super().__init__()
self.weights = nn.ParameterDict({
str(degree_out): nn.Parameter(
torch.randn(channels_out, fiber_in[degree_out]) / np.sqrt(fiber_in[degree_out]))
for degree_out, channels_out in fiber_out
})
def forward(self, features: Dict[str, Tensor], *args, **kwargs) -> Dict[str, Tensor]:
return {
degree: self.weights[degree] @ features[degree]
for degree, weight in self.weights.items()
}
|
PyTorch/SpeechSynthesis/Tacotron2/tacotron2/text/unidecoder | unidecoder | __init__ | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import warnings
from .homoglyphs import homoglyphs
from .replacements import replacements
_replacements = {uni: asc for uni, asc in replacements}
_homoglyphs = {g: asc for asc, glyphs in homoglyphs.items() for g in glyphs}
def unidecoder(s, homoglyphs=False):
"""Transliterate unicode
Args:
s (str): unicode string
homoglyphs (bool): prioritize translating to homoglyphs
"""
warned = False # Once per utterance
ret = ''
for u in s:
if ord(u) < 127:
a = u
elif homoglyphs:
a = _homoglyphs.get(u, _replacements.get(u, None))
else:
a = _replacements.get(u, _homoglyphs.get(u, None))
if a is None:
if not warned:
warnings.warn(f'Unexpected character {u}: '
'please revise your text cleaning rules.',
stacklevel=10**6)
warned = True
else:
ret += a
return ret
|
TensorFlow/LanguageModeling/BERT/scripts/docker | docker | launch | #!/bin/bash
CMD=${@:-/bin/bash}
NV_VISIBLE_DEVICES=${NVIDIA_VISIBLE_DEVICES:-"all"}
docker run --gpus $NV_VISIBLE_DEVICES --rm -it \
--net=host \
--shm-size=1g \
--ulimit memlock=-1 \
--ulimit stack=67108864 \
-e NVIDIA_VISIBLE_DEVICES=$NV_VISIBLE_DEVICES \
-v $PWD:/workspace/bert \
-v $PWD/results:/results \
bert $CMD
|
PyTorch/Forecasting/TFT/triton/deployment_toolkit | deployment_toolkit | __init__ | # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License. |
TensorFlow/Recommendation | Recommendation | README | # Recommender Systems
Recommender systems are a type of information filtering system that seeks to predict the
"rating" or "preference" a user would give to an item. (Source:
[Wikipedia](https://en.wikipedia.org/wiki/Recommender_system))
In an era where users have to navigate through an exponentially growing number of goods and services, recommender systems have become key in driving user engagement, teaching the internet services how to personalize experiences for users. They are ubiquitous and indispensable in commercial online platforms.
In this guide, you’ll find answers to how recommender systems work, how you might use it in your business, and more. Whether you’re an experienced machine learning engineer considering implementation, a developer wanting to learn more, or a product manager looking to explore what’s possible with recommender systems, this guide is for you.
Here is a look at what we will cover:
- Challenges and opportunities in recommender systems
- How does DL-based recommender systems work?
- Use cases and applications
## Challenges and opportunities in recommender systems
With the rapid growth in scale of industry datasets, deep learning (DL) recommender models have started to gain advantages over traditional methods by capitalizing on large amounts of training data. However, there are multiple challenges when it comes to performance of large-scale recommender systems solutions:
- Huge datasets: Commercial recommenders are trained on huge datasets, often several terabytes in scale.
- Complex data preprocessing and feature engineering pipelines: Datasets need to be preprocessed and transformed into a form relevant to be used with DL models and frameworks. In addition, feature engineering creates an extensive set of new features from existing ones, requiring multiple iterations to arrive at an optimal solution.
- Input bottleneck: Data loading, if not well optimized, can be the slowest part of the training process, leading to under-utilization of high-throughput computing devices such as GPUs.
- Extensive repeated experimentation: The whole data engineering, training, and evaluation process is generally repeated many times, requiring significant time and computational resources.
To meet the computational demands for large-scale DL recommender systems training and inference, recommender-on-GPU solutions aim to provide fast feature engineering and high training throughput (to enable both fast experimentation and production retraining), as well as low latency, high-throughput inference.
Current DL–based models for recommender systems include the [Wide and
Deep](https://arxiv.org/abs/1606.07792) model, Deep Learning Recommendation Model
([DLRM](https://github.com/facebookresearch/dlrm)), neural collaborative filtering
([NCF](https://arxiv.org/abs/1708.05031)), Variational Autoencoder
([VAE](https://arxiv.org/abs/1802.05814)) for Collaborative Filtering, and
[BERT4Rec](https://arxiv.org/pdf/1904.06690.pdf), among others.
## How does DL-based recommender systems work?
In [NVIDIA Deep Learning Examples](https://github.com/NVIDIA/DeepLearningExamples), we introduce several popular state-of-the-art DL-based recommender models in Tensorflow and PyTorch.
As an example, we would like to start with discussing our reference implementation of DLRM. With DLRM, we systematically tackle the challenges mentioned by designing a complete DLRM pipeline, from data preparation to training to production inference. We provide ready-to-go Docker images for training and inference, data downloading and preprocessing tools, and Jupyter demo notebooks to get you started quickly. Also, trained models can be prepared for production inference in one simple step with our exporter tool.
For more details on the model architectures, example code, and how to set to end-to-end data processing, training, and inference pipeline on GPU, please refer to the [DLRM developer blog](https://developer.nvidia.com/blog/optimizing-dlrm-on-nvidia-gpus/) and [NVIDIA GPU-accelerated DL model portfolio ](https://github.com/NVIDIA/DeepLearningExamples) under /PyTorch/Recommendation/DLRM.
In addition, DLRM forms part of NVIDIA [Merlin](https://developer.nvidia.com/nvidia-merlin), a framework for building high-performance, DL–based recommender systems.
## Use cases and applications
### E-Commerce & Retail: Personalized Merchandising
Imagine a user has already purchased a scarf. Why not offer buying a hat that matches this hat, so that the look will be complete? This feature is often implemented by means of AI-based algorithms as “Complete the look” or “You might also like” sections in e-commerce platforms like Amazon, Walmart, Target, and many others.
On average, an intelligent recommender systems delivers a [22.66% lift in conversions rates](https://brandcdn.exacttarget.com/sites/exacttarget/files/deliverables/etmc-predictiveintelligencebenchmarkreport.pdf) for web products.
### Media & Entertainment: Personalized Content
AI based recommender engines can analyze the individual purchase behavior and detect patterns that will help provide a certain user with the content suggestions that will match his or her interests most likely. This is what Google and Facebook actively apply when recommending ads, or what Netflix does behind the scenes when recommending movies and TV shows.
### Personalized Banking
A mass market product that is consumed digitally by millions, banking is prime for recommendations. Knowing a customer’s detailed financial situation and their past preferences, coupled by data of thousands of similar users, is quite powerful.
|
Tools/PyTorch/TimeSeriesPredictionPlatform/conf/trainer/optimizer | optimizer | Adamax | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
_target_: torch.optim.Adamax
lr: 0.002
betas: [0.9, 0.999]
eps: 1e-8
weight_decay: 0.0
|
TensorFlow/Segmentation/UNet_3D_Medical/model | model | losses | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Different losses for UNet3D """
import tensorflow as tf
def make_loss(params, y_true, y_pred):
""" Factory method for loss functions
:param params: Dict with additional parameters
:param y_true: Ground truth labels
:param y_pred: Predicted labels
:return: Loss
"""
if params.loss == 'dice':
return _dice(y_true, y_pred)
if params.loss == 'ce':
return _ce(y_true, y_pred)
if params.loss == 'dice+ce':
return tf.add(_ce(y_true, y_pred), _dice(y_true, y_pred), name="total_loss_ref")
raise ValueError('Unknown loss: {}'.format(params.loss))
def _ce(y_true, y_pred):
""" Crossentropy
:param y_true: Ground truth labels
:param y_pred: Predicted labels
:return: loss
"""
return tf.reduce_sum(
tf.reduce_mean(tf.keras.backend.binary_crossentropy(tf.cast(y_true, tf.float32), y_pred), axis=[0, 1, 2, 3]),
name='crossentropy_loss_ref')
def _dice(y_true, y_pred):
""" Training dice
:param y_true: Ground truth labels
:param y_pred: Predicted labels
:return: loss
"""
return tf.reduce_sum(dice_loss(predictions=y_pred, targets=y_true), name='dice_loss_ref')
def eval_dice(y_true, y_pred):
""" Evaluation dice
:param y_true: Ground truth labels
:param y_pred: Predicted labels
:return: loss
"""
return 1 - dice_loss(predictions=y_pred, targets=y_true)
def dice_loss(predictions,
targets,
squared_pred=False,
smooth=1e-5,
top_smooth=0.0):
""" Dice
:param predictions: Predicted labels
:param targets: Ground truth labels
:param squared_pred: Square the predicate
:param smooth: Smooth term for denominator
:param top_smooth: Smooth term for numerator
:return: loss
"""
is_channels_first = False
n_len = len(predictions.get_shape())
reduce_axis = list(range(2, n_len)) if is_channels_first else list(range(1, n_len - 1))
intersection = tf.reduce_sum(targets * predictions, axis=reduce_axis)
if squared_pred:
targets = tf.square(targets)
predictions = tf.square(predictions)
y_true_o = tf.reduce_sum(targets, axis=reduce_axis)
y_pred_o = tf.reduce_sum(predictions, axis=reduce_axis)
denominator = y_true_o + y_pred_o
dice = (2.0 * intersection + top_smooth) / (denominator + smooth)
return 1 - tf.reduce_mean(dice, axis=0)
def total_dice(predictions,
targets,
smooth=1e-5,
top_smooth=0.0):
""" Total Dice
:param predictions: Predicted labels
:param targets: Ground truth labels
:param smooth: Smooth term for denominator
:param top_smooth: Smooth term for numerator
:return: loss
"""
n_len = len(predictions.get_shape())
reduce_axis = list(range(1, n_len-1))
targets = tf.reduce_sum(targets, axis=-1)
predictions = tf.reduce_sum(predictions, axis=-1)
intersection = tf.reduce_sum(targets * predictions, axis=reduce_axis)
y_true_o = tf.reduce_sum(targets, axis=reduce_axis)
y_pred_o = tf.reduce_sum(predictions, axis=reduce_axis)
denominator = y_true_o + y_pred_o
return tf.reduce_mean((2.0 * intersection + top_smooth) / (denominator + smooth))
|
TensorFlow2/Segmentation/MaskRCNN/mrcnn_tf2/dataset | dataset | dataset_parser | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data loader and processing.
Defines input_fn of Mask-RCNN for TF Estimator. The input_fn includes training
data for category classification, bounding box regression, and number of
positive examples to normalize the loss during training.
"""
import tensorflow as tf
from mrcnn_tf2.model import anchors
from mrcnn_tf2.object_detection import tf_example_decoder
from mrcnn_tf2.ops import preprocess_ops
from mrcnn_tf2.utils import coco_utils
MAX_NUM_INSTANCES = 100
MAX_NUM_VERTICES_PER_INSTANCE = 1500
MAX_NUM_POLYGON_LIST_LEN = 2 * MAX_NUM_VERTICES_PER_INSTANCE * MAX_NUM_INSTANCES
POLYGON_PAD_VALUE = coco_utils.POLYGON_PAD_VALUE
__all__ = [
# dataset parser
"dataset_parser",
# common functions
"preprocess_image",
"process_groundtruth_is_crowd",
"process_source_id",
# eval
"prepare_labels_for_eval",
# training
"augment_image",
"process_boxes_classes_indices_for_training",
"process_gt_masks_for_training",
"process_labels_for_training",
"process_targets_for_training"
]
def dataset_parser(value, mode, params, use_instance_mask, seed=None, regenerate_source_id=False):
"""Parse data to a fixed dimension input image and learning targets.
Args:
value: A dictionary contains an image and groundtruth annotations.
Returns:
features: a dictionary that contains the image and auxiliary
information. The following describes {key: value} pairs in the
dictionary.
image: Image tensor that is preproessed to have normalized value and
fixed dimension [image_size, image_size, 3]
image_info: image information that includes the original height and
width, the scale of the proccessed image to the original image, and
the scaled height and width.
source_ids: Source image id. Default value -1 if the source id is
empty in the groundtruth annotation.
labels: a dictionary that contains auxiliary information plus (optional)
labels. The following describes {key: value} pairs in the dictionary.
`labels` is only for training.
score_targets_dict: ordered dictionary with keys
[min_level, min_level+1, ..., max_level]. The values are tensor with
shape [height_l, width_l, num_anchors]. The height_l and width_l
represent the dimension of objectiveness score at l-th level.
box_targets_dict: ordered dictionary with keys
[min_level, min_level+1, ..., max_level]. The values are tensor with
shape [height_l, width_l, num_anchors * 4]. The height_l and
width_l represent the dimension of bounding box regression output at
l-th level.
gt_boxes: Groundtruth bounding box annotations. The box is represented
in [y1, x1, y2, x2] format. The tennsor is padded with -1 to the
fixed dimension [MAX_NUM_INSTANCES, 4].
gt_classes: Groundtruth classes annotations. The tennsor is padded
with -1 to the fixed dimension [MAX_NUM_INSTANCES].
cropped_gt_masks: groundtrugh masks cropped by the bounding box and
resized to a fixed size determined by params.gt_mask_size
regenerate_source_id: `bool`, if True TFExampleParser will use hashed
value of `image/encoded` for `image/source_id`.
"""
if mode not in ['train', 'eval']:
raise ValueError("Unknown execution mode received: %s" % mode)
def create_example_decoder():
return tf_example_decoder.TfExampleDecoder(
use_instance_mask=use_instance_mask,
regenerate_source_id=regenerate_source_id
)
example_decoder = create_example_decoder()
with tf.xla.experimental.jit_scope(compile_ops=True):
with tf.name_scope('parser'):
data = example_decoder.decode(value)
data['groundtruth_is_crowd'] = process_groundtruth_is_crowd(data)
image = tf.image.convert_image_dtype(data['image'], dtype=tf.float32)
source_id = process_source_id(data['source_id'])
if mode == 'eval':
features = {
'source_ids': source_id,
}
features["images"], features["image_info"], _, _ = preprocess_image(
image,
boxes=None,
instance_masks=None,
image_size=params.image_size,
max_level=params.max_level,
augment_input_data=False,
seed=seed
)
return features, {}
elif mode == 'train':
features = {
'source_ids': source_id
}
boxes, classes, indices, instance_masks = process_boxes_classes_indices_for_training(
data,
skip_crowd_during_training=params.skip_crowd_during_training,
use_category=params.use_category,
use_instance_mask=use_instance_mask
)
image, image_info, boxes, instance_masks = preprocess_image(
image,
boxes=boxes,
instance_masks=instance_masks,
image_size=params.image_size,
max_level=params.max_level,
augment_input_data=params.augment_input_data,
seed=seed
)
features.update({
'images': image,
'image_info': image_info,
})
padded_image_size = image.get_shape().as_list()[:2]
if use_instance_mask:
features['cropped_gt_masks'] = process_gt_masks_for_training(
instance_masks,
boxes,
gt_mask_size=params.gt_mask_size,
padded_image_size=padded_image_size,
max_num_instances=MAX_NUM_INSTANCES
)
with tf.xla.experimental.jit_scope(compile_ops=False):
(score_targets, box_targets), input_anchor = process_targets_for_training(
padded_image_size=padded_image_size,
boxes=boxes,
classes=classes,
params=params
)
features['gt_boxes'], features['gt_classes'], additional_labels = process_labels_for_training(
image_info, boxes, classes, score_targets, box_targets,
max_num_instances=MAX_NUM_INSTANCES,
min_level=params.min_level,
max_level=params.max_level
)
features.update(additional_labels)
# Features
# {
# 'source_ids': <tf.Tensor 'parser/StringToNumber:0' shape=() dtype=float32>,
# 'images': <tf.Tensor 'parser/pad_to_bounding_box/Squeeze:0' shape=(1024, 1024, 3) dtype=float32>,
# 'image_info': <tf.Tensor 'parser/stack_1:0' shape=(5,) dtype=float32>,
# 'cropped_gt_masks': <tf.Tensor 'parser/Reshape_4:0' shape=(100, 116, 116) dtype=float32>,
# 'gt_boxes': <tf.Tensor 'parser/Reshape_20:0' shape=(100, 4) dtype=float32>,
# 'gt_classes': <tf.Tensor 'parser/Reshape_22:0' shape=(100, 1) dtype=float32>,
# 'score_targets_2': <tf.Tensor 'parser/Reshape_9:0' shape=(256, 256, 3) dtype=int32>,
# 'box_targets_2': <tf.Tensor 'parser/Reshape_14:0' shape=(256, 256, 12) dtype=float32>,
# 'score_targets_3': <tf.Tensor 'parser/Reshape_10:0' shape=(128, 128, 3) dtype=int32>,
# 'box_targets_3': <tf.Tensor 'parser/Reshape_15:0' shape=(128, 128, 12) dtype=float32>,
# 'score_targets_4': <tf.Tensor 'parser/Reshape_11:0' shape=(64, 64, 3) dtype=int32>,
# 'box_targets_4': <tf.Tensor 'parser/Reshape_16:0' shape=(64, 64, 12) dtype=float32>,
# 'score_targets_5': <tf.Tensor 'parser/Reshape_12:0' shape=(32, 32, 3) dtype=int32>,
# 'box_targets_5': <tf.Tensor 'parser/Reshape_17:0' shape=(32, 32, 12) dtype=float32>,
# 'score_targets_6': <tf.Tensor 'parser/Reshape_13:0' shape=(16, 16, 3) dtype=int32>,
# 'box_targets_6': <tf.Tensor 'parser/Reshape_18:0' shape=(16, 16, 12) dtype=float32>,
# }
# due to the way keras losses work we are passing all the targets as features
# it is impossible to access labels in custom losses that we are using
# Labels
# {
# }
return features, {}
def preprocess_image(image, boxes, instance_masks, image_size, max_level, augment_input_data=False, seed=None):
image = preprocess_ops.normalize_image(image)
if augment_input_data:
image, boxes, instance_masks = augment_image(image=image, boxes=boxes, instance_masks=instance_masks, seed=seed)
# Scaling and padding.
image, image_info, boxes, instance_masks = preprocess_ops.resize_and_pad(
image=image,
target_size=image_size,
stride=2 ** max_level,
boxes=boxes,
masks=instance_masks
)
return image, image_info, boxes, instance_masks
def process_groundtruth_is_crowd(data):
return tf.cond(
pred=tf.greater(tf.size(input=data['groundtruth_is_crowd']), 0),
true_fn=lambda: data['groundtruth_is_crowd'],
false_fn=lambda: tf.zeros_like(data['groundtruth_classes'], dtype=tf.bool)
)
def process_source_id(source_id):
"""Processes source_id to the right format."""
if source_id.dtype == tf.string:
source_id = tf.cast(tf.strings.to_number(source_id), tf.int64)
with tf.control_dependencies([source_id]):
source_id = tf.cond(
pred=tf.equal(tf.size(input=source_id), 0),
true_fn=lambda: tf.cast(tf.constant(-1), tf.int64),
false_fn=lambda: tf.identity(source_id)
)
return source_id
# eval
def prepare_labels_for_eval(
data,
target_num_instances=MAX_NUM_INSTANCES,
target_polygon_list_len=MAX_NUM_POLYGON_LIST_LEN,
use_instance_mask=False
):
"""Create labels dict for infeed from data of tf.Example."""
image = data['image']
height, width = tf.shape(input=image)[:2]
boxes = data['groundtruth_boxes']
classes = tf.cast(data['groundtruth_classes'], dtype=tf.float32)
num_labels = tf.shape(input=classes)[0]
boxes = preprocess_ops.pad_to_fixed_size(boxes, -1, [target_num_instances, 4])
classes = preprocess_ops.pad_to_fixed_size(classes, -1, [target_num_instances, 1])
is_crowd = tf.cast(data['groundtruth_is_crowd'], dtype=tf.float32)
is_crowd = preprocess_ops.pad_to_fixed_size(is_crowd, 0, [target_num_instances, 1])
labels = dict()
labels['width'] = width
labels['height'] = height
labels['groundtruth_boxes'] = boxes
labels['groundtruth_classes'] = classes
labels['num_groundtruth_labels'] = num_labels
labels['groundtruth_is_crowd'] = is_crowd
if use_instance_mask:
data['groundtruth_polygons'] = preprocess_ops.pad_to_fixed_size(
data=data['groundtruth_polygons'],
pad_value=POLYGON_PAD_VALUE,
output_shape=[target_polygon_list_len, 1]
)
if 'groundtruth_area' in data:
labels['groundtruth_area'] = preprocess_ops.pad_to_fixed_size(
data=labels['groundtruth_area'],
pad_value=0,
output_shape=[target_num_instances, 1]
)
return labels
# training
def augment_image(image, boxes, instance_masks, seed):
flipped_results = preprocess_ops.random_horizontal_flip(
image,
boxes=boxes,
masks=instance_masks,
seed=seed
)
if instance_masks is not None:
image, boxes, instance_masks = flipped_results
else:
image, boxes = flipped_results
# image = tf.image.random_brightness(image, max_delta=0.1, seed=seed)
# image = tf.image.random_contrast(image, lower=0.9, upper=1.1, seed=seed)
# image = tf.image.random_saturation(image, lower=0.9, upper=1.1, seed=seed)
# image = tf.image.random_jpeg_quality(image, min_jpeg_quality=80, max_jpeg_quality=100, seed=seed)
return image, boxes, instance_masks
def process_boxes_classes_indices_for_training(data, skip_crowd_during_training, use_category, use_instance_mask):
boxes = data['groundtruth_boxes']
classes = data['groundtruth_classes']
classes = tf.reshape(tf.cast(classes, dtype=tf.float32), [-1, 1])
indices = None
instance_masks = None
if not use_category:
classes = tf.cast(tf.greater(classes, 0), dtype=tf.float32)
if skip_crowd_during_training:
indices = tf.where(tf.logical_not(data['groundtruth_is_crowd']))
classes = tf.gather_nd(classes, indices)
boxes = tf.gather_nd(boxes, indices)
if use_instance_mask:
instance_masks = tf.gather_nd(data['groundtruth_instance_masks'], indices)
return boxes, classes, indices, instance_masks
def process_gt_masks_for_training(instance_masks, boxes, gt_mask_size, padded_image_size, max_num_instances):
cropped_gt_masks = preprocess_ops.crop_gt_masks(
instance_masks=instance_masks,
boxes=boxes,
gt_mask_size=gt_mask_size,
image_size=padded_image_size
)
# cropped_gt_masks = tf.reshape(cropped_gt_masks, [max_num_instances, -1])
cropped_gt_masks = preprocess_ops.pad_to_fixed_size(
data=cropped_gt_masks,
pad_value=-1,
output_shape=[max_num_instances, (gt_mask_size + 4) ** 2]
)
return tf.reshape(cropped_gt_masks, [max_num_instances, gt_mask_size + 4, gt_mask_size + 4])
def process_labels_for_training(
image_info, boxes, classes,
score_targets, box_targets,
max_num_instances, min_level, max_level
):
labels = {}
# Pad groundtruth data.
# boxes *= image_info[2]
boxes = preprocess_ops.pad_to_fixed_size(boxes, -1, [max_num_instances, 4])
classes = preprocess_ops.pad_to_fixed_size(classes, -1, [max_num_instances, 1])
for level in range(min_level, max_level + 1):
labels['score_targets_%d' % level] = score_targets[level]
labels['box_targets_%d' % level] = box_targets[level]
return boxes, classes, labels
def process_targets_for_training(padded_image_size, boxes, classes, params):
input_anchors = anchors.Anchors(
params.min_level,
params.max_level,
params.num_scales,
params.aspect_ratios,
params.anchor_scale,
padded_image_size
)
anchor_labeler = anchors.AnchorLabeler(
input_anchors,
params.num_classes,
params.rpn_positive_overlap,
params.rpn_negative_overlap,
params.rpn_batch_size_per_im,
params.rpn_fg_fraction
)
return anchor_labeler.label_anchors(boxes, classes), input_anchors
|
PyTorch/LanguageModeling/BART/scripts | scripts | get_pretraining_data | #!/usr/bin/env bash
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
data_folder=${1:-"/workspace/bart/data/"}
mkdir -p $data_folder
# Get wikipedia data
download_wikipedia --outdir $data_folder/wiki
# Get Common Crawl data
download_common_crawl \
--outdir $data_folder/common_crawl \
--warc-files-start-date 2016-09-01 \
--warc-files-end-date 2019-02-28 \
--start-date 2016-09-01 \
--end-date 2019-02-28
# Get OpenWebText data
download_open_webtext --outdir $data_folder/openwebtext
|
PyTorch/Classification/GPUNet/triton | triton | README | # Deploying the GPUNet model on Triton Inference Server
This folder contains instructions for deployment to run inference
on Triton Inference Server, as well as detailed performance analysis.
The purpose of this document is to help you with achieving
the best inference performance.
## Table of contents
- [Solution overview](#solution-overview)
- [Introduction](#introduction)
- [Deployment process](#deployment-process)
- [Setup](#setup)
- [Quick Start Guide](#quick-start-guide)
- [Release notes](#release-notes)
- [Changelog](#changelog)
- [Known issues](#known-issues)
## Solution overview
### Introduction
The [NVIDIA Triton Inference Server](https://github.com/NVIDIA/triton-inference-server)
provides a datacenter and cloud inferencing solution optimized for NVIDIA GPUs.
The server provides an inference service via an HTTP or gRPC endpoint,
allowing remote clients to request inferencing for any number of GPU
or CPU models being managed by the server.
This README provides step-by-step deployment instructions for models generated
during training (as described in the [model README](../readme.md)).
Additionally, this README provides the corresponding deployment scripts that
ensure optimal GPU utilization during inferencing on the Triton Inference Server.
### Deployment process
The deployment process consists of two steps:
1. Conversion.
The purpose of conversion is to find the best performing model
format supported by the Triton Inference Server.
Triton Inference Server uses a number of runtime backends such as
[TensorRT](https://developer.nvidia.com/tensorrt),
[LibTorch](https://github.com/triton-inference-server/pytorch_backend) and
[ONNX Runtime](https://github.com/triton-inference-server/onnxruntime_backend)
to support various model types. Refer to the
[Triton documentation](https://github.com/triton-inference-server/backend#where-can-i-find-all-the-backends-that-are-available-for-triton)
for a list of available backends.
2. Configuration.
Model configuration on the Triton Inference Server, which generates
necessary [configuration files](https://github.com/triton-inference-server/server/blob/master/docs/model_configuration.md).
After deployment, the Triton inference server is used for evaluation of the converted model:
2. Performance tests.
Produce latency and throughput results for offline (static batching)
and online (dynamic batching) scenarios.
All steps are executed by the provided runner script. Refer to [Quick Start Guide](#quick-start-guide)
## Setup
Ensure you have the following components:
* [NVIDIA Docker](https://github.com/NVIDIA/nvidia-docker)
* [PyTorch NGC container 21.12](https://catalog.ngc.nvidia.com/orgs/nvidia/containers/pytorch)
* [Triton Inference Server NGC container 21.12](https://ngc.nvidia.com/catalog/containers/nvidia:tritonserver)
* [NVIDIA CUDA](https://docs.nvidia.com/cuda/archive//index.html)
* [NVIDIA Ampere](https://www.nvidia.com/en-us/data-center/nvidia-ampere-gpu-architecture/), [Volta](https://www.nvidia.com/en-us/data-center/volta-gpu-architecture/) or [Turing](https://www.nvidia.com/en-us/geforce/turing/) based GPU
## Quick Start Guide
Deployment is supported for the following architectures. For the deployment steps, refer to the appropriate readme file:
* [0.65ms (GPUNet-0)](./065ms/README.md)
* [0.85ms (GPUNet-1)](./085ms/README.md)
* [1.75ms (GPUNet-2)](./175ms/README.md)
* [0.5ms-D (GPUNet-P0)](./05ms-D/README.md)
* [0.8ms-D (GPUNet-P1)](./08ms-D/README.md)
* [1.25ms-D (GPUNet-D1)](./125ms-D/README.md)
* [2.25ms-D (GPUNet-D2)](./225ms-D/README.md)
## Release Notes
We’re constantly refining and improving our performance on AI
and HPC workloads with frequent updates
to our software stack. For our latest performance data refer
to these pages for
[AI](https://developer.nvidia.com/deep-learning-performance-training-inference)
and [HPC](https://developer.nvidia.com/hpc-application-performance) benchmarks.
### Changelog
May 2022
- Initial release
### Known issues
- There are no known issues with this model.
|
PyTorch/SpeechRecognition/Jasper/triton/model_repo_configs/fp16/decoder-ts-script | decoder-ts-script | config | # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
name: "decoder-ts-script"
platform: "pytorch_libtorch"
default_model_filename: "model.pt"
max_batch_size: 64
input [
{
name: "input__0"
data_type: TYPE_FP16
dims: [ -1, 29 ]
}
]
output [
{
name: "output__0"
data_type: TYPE_INT32
dims: [-1]
}
]
|
PyTorch/SpeechRecognition/QuartzNet/common | common | helpers | # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import os
import re
from collections import OrderedDict
import torch
import torch.distributed as dist
from .metrics import word_error_rate
def print_once(msg):
if not dist.is_initialized() or dist.get_rank() == 0:
print(msg)
def add_ctc_blank(symbols):
return symbols + ['<BLANK>']
def ctc_decoder_predictions_tensor(tensor, labels):
"""
Takes output of greedy ctc decoder and performs ctc decoding algorithm to
remove duplicates and special symbol. Returns prediction
Args:
tensor: model output tensor
label: A list of labels
Returns:
prediction
"""
blank_id = len(labels) - 1
hypotheses = []
labels_map = {i: labels[i] for i in range(len(labels))}
prediction_cpu_tensor = tensor.long().cpu()
# iterate over batch
for ind in range(prediction_cpu_tensor.shape[0]):
prediction = prediction_cpu_tensor[ind].numpy().tolist()
# CTC decoding procedure
decoded_prediction = []
previous = len(labels) - 1 # id of a blank symbol
for p in prediction:
if (p != previous or previous == blank_id) and p != blank_id:
decoded_prediction.append(p)
previous = p
hypothesis = ''.join([labels_map[c] for c in decoded_prediction])
hypotheses.append(hypothesis)
return hypotheses
def greedy_wer(preds, tgt, tgt_lens, labels):
"""
Takes output of greedy ctc decoder and performs ctc decoding algorithm to
remove duplicates and special symbol. Prints wer and prediction examples to screen
Args:
tensors: A list of 3 tensors (predictions, targets, target_lengths)
labels: A list of labels
Returns:
word error rate
"""
with torch.no_grad():
references = gather_transcripts([tgt], [tgt_lens], labels)
hypotheses = ctc_decoder_predictions_tensor(preds, labels)
wer, _, _ = word_error_rate(hypotheses, references)
return wer, hypotheses[0], references[0]
def gather_losses(losses_list):
return [torch.mean(torch.stack(losses_list))]
def gather_predictions(predictions_list, labels):
results = []
for prediction in predictions_list:
results += ctc_decoder_predictions_tensor(prediction, labels=labels)
return results
def gather_transcripts(transcript_list, transcript_len_list, labels):
results = []
labels_map = {i: labels[i] for i in range(len(labels))}
# iterate over workers
for txt, lens in zip(transcript_list, transcript_len_list):
for t, l in zip(txt.long().cpu(), lens.long().cpu()):
t = list(t.numpy())
results.append(''.join([labels_map[c] for c in t[:l]]))
return results
def process_evaluation_batch(tensors, global_vars, labels):
"""
Processes results of an iteration and saves it in global_vars
Args:
tensors: dictionary with results of an evaluation iteration, e.g. loss, predictions, transcript, and output
global_vars: dictionary where processes results of iteration are saved
labels: A list of labels
"""
for kv, v in tensors.items():
if kv.startswith('loss'):
global_vars['EvalLoss'] += gather_losses(v)
elif kv.startswith('predictions'):
global_vars['preds'] += gather_predictions(v, labels)
elif kv.startswith('transcript_length'):
transcript_len_list = v
elif kv.startswith('transcript'):
transcript_list = v
elif kv.startswith('output'):
global_vars['logits'] += v
global_vars['txts'] += gather_transcripts(
transcript_list, transcript_len_list, labels)
def process_evaluation_epoch(aggregates, tag=None):
"""
Processes results from each worker at the end of evaluation and combine to final result
Args:
aggregates: dictionary containing information of entire evaluation
Return:
wer: final word error rate
loss: final loss
"""
if 'losses' in aggregates:
eloss = torch.mean(torch.stack(aggregates['losses'])).item()
else:
eloss = None
hypotheses = aggregates['preds']
references = aggregates['txts']
wer, scores, num_words = word_error_rate(hypotheses, references)
multi_gpu = dist.is_initialized()
if multi_gpu:
if eloss is not None:
eloss /= dist.get_world_size()
eloss_tensor = torch.tensor(eloss).cuda()
dist.all_reduce(eloss_tensor)
eloss = eloss_tensor.item()
scores_tensor = torch.tensor(scores).cuda()
dist.all_reduce(scores_tensor)
scores = scores_tensor.item()
num_words_tensor = torch.tensor(num_words).cuda()
dist.all_reduce(num_words_tensor)
num_words = num_words_tensor.item()
wer = scores * 1.0 / num_words
return wer, eloss
def num_weights(module):
return sum(p.numel() for p in module.parameters() if p.requires_grad)
class Checkpointer(object):
def __init__(self, save_dir, model_name, keep_milestones=[100, 200, 300]):
self.save_dir = save_dir
self.keep_milestones = keep_milestones
self.model_name = model_name
tracked = [
(int(re.search('epoch(\d+)_', f).group(1)), f)
for f in glob.glob(f'{save_dir}/{self.model_name}_epoch*_checkpoint.pt')]
tracked = sorted(tracked, key=lambda t: t[0])
self.tracked = OrderedDict(tracked)
def save(self, model, ema_model, optimizer, scaler, epoch, step, best_wer,
is_best=False):
"""Saves model checkpoint for inference/resuming training.
Args:
model: the model, optionally wrapped by DistributedDataParallel
ema_model: model with averaged weights, can be None
optimizer: optimizer
epoch (int): epoch during which the model is saved
step (int): number of steps since beginning of training
best_wer (float): lowest recorded WER on the dev set
is_best (bool, optional): set name of checkpoint to 'best'
and overwrite the previous one
"""
rank = 0
if dist.is_initialized():
dist.barrier()
rank = dist.get_rank()
if rank != 0:
return
# Checkpoint already saved
if not is_best and epoch in self.tracked:
return
unwrap_ddp = lambda model: getattr(model, 'module', model)
state = {
'epoch': epoch,
'step': step,
'best_wer': best_wer,
'state_dict': unwrap_ddp(model).state_dict(),
'ema_state_dict': unwrap_ddp(ema_model).state_dict() if ema_model is not None else None,
'optimizer': optimizer.state_dict(),
'scaler': scaler.state_dict(),
}
if is_best:
fpath = os.path.join(
self.save_dir, f"{self.model_name}_best_checkpoint.pt")
else:
fpath = os.path.join(
self.save_dir, f"{self.model_name}_epoch{epoch}_checkpoint.pt")
print_once(f"Saving {fpath}...")
torch.save(state, fpath)
if not is_best:
# Remove old checkpoints; keep milestones and the last two
self.tracked[epoch] = fpath
for epoch in set(list(self.tracked)[:-2]) - set(self.keep_milestones):
try:
os.remove(self.tracked[epoch])
except:
pass
del self.tracked[epoch]
def last_checkpoint(self):
tracked = list(self.tracked.values())
if len(tracked) >= 1:
try:
torch.load(tracked[-1], map_location='cpu')
return tracked[-1]
except:
print_once(f'Last checkpoint {tracked[-1]} appears corrupted.')
elif len(tracked) >= 2:
return tracked[-2]
else:
return None
def load(self, fpath, model, ema_model, optimizer, scaler, meta):
print_once(f'Loading model from {fpath}')
checkpoint = torch.load(fpath, map_location="cpu")
unwrap_ddp = lambda model: getattr(model, 'module', model)
state_dict = checkpoint['state_dict']
unwrap_ddp(model).load_state_dict(state_dict, strict=True)
if ema_model is not None:
if checkpoint.get('ema_state_dict') is not None:
key = 'ema_state_dict'
else:
key = 'state_dict'
print_once('WARNING: EMA weights not found in the checkpoint.')
print_once('WARNING: Initializing EMA model with regular params.')
state_dict = checkpoint[key]
unwrap_ddp(ema_model).load_state_dict(state_dict, strict=True)
optimizer.load_state_dict(checkpoint['optimizer'])
scaler.load_state_dict(checkpoint['scaler'])
meta['start_epoch'] = checkpoint.get('epoch')
meta['best_wer'] = checkpoint.get('best_wer', meta['best_wer'])
|
PyTorch/SpeechSynthesis/Tacotron2/trtis_cpp/src/trt/plugins/taco2ModulationRemovalPlugin | taco2ModulationRemovalPlugin | CMakeLists | #
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
file(GLOB SRCS *.cpp *.cu)
set(PLUGIN_SOURCES ${PLUGIN_SOURCES} ${SRCS})
set(PLUGIN_SOURCES ${PLUGIN_SOURCES} PARENT_SCOPE)
|
PyTorch/LanguageModeling/BART/scripts/docker | docker | build | #!/bin/bash
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
docker build --network=host . --rm -t bart_pyt |
CUDA-Optimized/FastSpeech/fastspeech/utils | utils | logging | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import time
def tprint(msg):
print('[{}] {}'.format(time.strftime('%Y%m%d %H:%M:%S'), msg)) |
TensorFlow/Classification/ConvNets/triton/deployment_toolkit/library | library | onnx | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from pathlib import Path
from typing import Dict, Optional, Union
import numpy as np
# pytype: disable=import-error
import onnx
import onnx.optimizer
import onnx.shape_inference
import onnxruntime
from google.protobuf import text_format
from onnx.mapping import TENSOR_TYPE_TO_NP_TYPE
# pytype: enable=import-error
from ..core import BaseLoader, BaseRunner, BaseRunnerSession, BaseSaver, Format, Model, Precision, TensorSpec
from ..extensions import loaders, runners, savers
from .utils import infer_precision
LOGGER = logging.getLogger(__name__)
def _value_info2tensor_spec(value_info: onnx.ValueInfoProto):
onnx_data_type_map = {"float": "float32", "double": "float64"}
elem_type_name = onnx.TensorProto.DataType.Name(value_info.type.tensor_type.elem_type).lower()
dtype = onnx_data_type_map.get(elem_type_name, elem_type_name)
def _get_dim(dim):
which = dim.WhichOneof("value")
if which is not None: # which is None when dim is None
dim = getattr(dim, which)
return None if isinstance(dim, (str, bytes)) else dim
shape = value_info.type.tensor_type.shape
shape = tuple([_get_dim(d) for d in shape.dim])
return TensorSpec(value_info.name, dtype=dtype, shape=shape)
def _infer_graph_precision(onnx_graph: onnx.GraphProto) -> Optional[Precision]:
import networkx as nx
# build directed graph
nx_graph = nx.DiGraph()
def _get_dtype(vi):
t = vi.type
if hasattr(t, "tensor_type"):
type_id = t.tensor_type.elem_type
else:
raise NotImplementedError("Not implemented yet")
return TENSOR_TYPE_TO_NP_TYPE[type_id]
node_output2type = {vi.name: _get_dtype(vi) for vi in onnx_graph.value_info}
node_outputs2node = {output_name: node for node in onnx_graph.node for output_name in node.output}
node_inputs2node = {input_name: node for node in onnx_graph.node for input_name in node.input}
for node in onnx_graph.node:
node_dtype = node_output2type.get("+".join(node.output), None)
nx_graph.add_node(
node.name,
op=node.op_type,
attr={a.name: a for a in node.attribute},
dtype=node_dtype,
)
for input_name in node.input:
prev_node = node_outputs2node.get(input_name, None)
if prev_node:
nx_graph.add_edge(prev_node.name, node.name)
for input_node in onnx_graph.input:
input_name = input_node.name
nx_graph.add_node(input_name, op="input", dtype=_get_dtype(input_node))
next_node = node_inputs2node.get(input_name, None)
if next_node:
nx_graph.add_edge(input_name, next_node.name)
for output in onnx_graph.output:
output_name = output.name
nx_graph.add_node(output_name, op="output", dtype=_get_dtype(output))
prev_node = node_outputs2node.get(output_name, None)
if prev_node:
nx_graph.add_edge(prev_node.name, output_name)
else:
LOGGER.warning(f"Could not find previous node for {output_name}")
input_names = [n.name for n in onnx_graph.input]
output_names = [n.name for n in onnx_graph.output]
most_common_dtype = infer_precision(nx_graph, input_names, output_names, lambda node: node.get("dtype", None))
if most_common_dtype is not None:
precision = {np.dtype("float32"): Precision.FP32, np.dtype("float16"): Precision.FP16}[most_common_dtype]
else:
precision = None
return precision
class OnnxLoader(BaseLoader):
def load(self, model_path: Union[str, Path], **_) -> Model:
if isinstance(model_path, Path):
model_path = model_path.as_posix()
model = onnx.load(model_path)
onnx.checker.check_model(model)
onnx.helper.strip_doc_string(model)
model = onnx.shape_inference.infer_shapes(model)
# TODO: probably modification of onnx model ios causes error on optimize
# from onnx.utils import polish_model
# model = polish_model(model) # run checker, docs strip, optimizer and shape inference
inputs = {vi.name: _value_info2tensor_spec(vi) for vi in model.graph.input}
outputs = {vi.name: _value_info2tensor_spec(vi) for vi in model.graph.output}
precision = _infer_graph_precision(model.graph)
return Model(model, precision, inputs, outputs)
class OnnxSaver(BaseSaver):
def __init__(self, as_text: bool = False):
self._as_text = as_text
def save(self, model: Model, model_path: Union[str, Path]) -> None:
model_path = Path(model_path)
LOGGER.debug(f"Saving ONNX model to {model_path.as_posix()}")
model_path.parent.mkdir(parents=True, exist_ok=True)
onnx_model: onnx.ModelProto = model.handle
if self._as_text:
with model_path.open("w") as f:
f.write(text_format.MessageToString(onnx_model))
else:
with model_path.open("wb") as f:
f.write(onnx_model.SerializeToString())
"""
ExecutionProviders on onnxruntime 1.4.0
['TensorrtExecutionProvider',
'CUDAExecutionProvider',
'MIGraphXExecutionProvider',
'NGRAPHExecutionProvider',
'OpenVINOExecutionProvider',
'DnnlExecutionProvider',
'NupharExecutionProvider',
'VitisAIExecutionProvider',
'ArmNNExecutionProvider',
'ACLExecutionProvider',
'CPUExecutionProvider']
"""
def _check_providers(providers):
providers = providers or []
if not isinstance(providers, (list, tuple)):
providers = [providers]
available_providers = onnxruntime.get_available_providers()
unavailable = set(providers) - set(available_providers)
if unavailable:
raise RuntimeError(f"Unavailable providers {unavailable}")
return providers
class OnnxRunner(BaseRunner):
def __init__(self, verbose_runtime_logs: bool = False):
self._providers = None
self._verbose_runtime_logs = verbose_runtime_logs
def init_inference(self, model: Model):
assert isinstance(model.handle, onnx.ModelProto)
return OnnxRunnerSession(
model=model, providers=self._providers, verbose_runtime_logs=self._verbose_runtime_logs
)
class OnnxRunnerSession(BaseRunnerSession):
def __init__(self, model: Model, providers, verbose_runtime_logs: bool = False):
super().__init__(model)
self._input_names = None
self._output_names = None
self._session = None
self._providers = providers
self._verbose_runtime_logs = verbose_runtime_logs
self._old_env_values = {}
def __enter__(self):
self._old_env_values = self._set_env_variables()
sess_options = onnxruntime.SessionOptions() # default session options
if self._verbose_runtime_logs:
sess_options.log_severity_level = 0
sess_options.log_verbosity_level = 1
LOGGER.info(
f"Starting inference session for onnx model providers={self._providers} sess_options={sess_options}"
)
self._input_names = list(self._model.inputs)
self._output_names = list(self._model.outputs)
model_payload = self._model.handle.SerializeToString()
self._session = onnxruntime.InferenceSession(
model_payload, providers=self._providers, sess_options=sess_options
)
return self
def __exit__(self, exc_type, exc_value, traceback):
self._input_names = None
self._output_names = None
self._session = None
self._recover_env_variables(self._old_env_values)
def __call__(self, x: Dict[str, object]):
feed_dict = {k: x[k] for k in self._input_names}
y_pred = self._session.run(self._output_names, feed_dict)
y_pred = dict(zip(self._output_names, y_pred))
return y_pred
loaders.register_extension(Format.ONNX.value, OnnxLoader)
runners.register_extension(Format.ONNX.value, OnnxRunner)
savers.register_extension(Format.ONNX.value, OnnxSaver)
|
PyTorch/Translation/Transformer/examples/translation | translation | prepare-wmt14en2fr | #!/bin/bash
# Adapted from https://github.com/facebookresearch/MIXER/blob/master/prepareData.sh
echo 'Cloning Moses github repository (for tokenization scripts)...'
git clone https://github.com/moses-smt/mosesdecoder.git
echo 'Cloning Subword NMT repository (for BPE pre-processing)...'
git clone https://github.com/rsennrich/subword-nmt.git
SCRIPTS=mosesdecoder/scripts
TOKENIZER=$SCRIPTS/tokenizer/tokenizer.perl
CLEAN=$SCRIPTS/training/clean-corpus-n.perl
NORM_PUNC=$SCRIPTS/tokenizer/normalize-punctuation.perl
REM_NON_PRINT_CHAR=$SCRIPTS/tokenizer/remove-non-printing-char.perl
BPEROOT=subword-nmt
BPE_TOKENS=40000
URLS=(
"http://statmt.org/wmt13/training-parallel-europarl-v7.tgz"
"http://statmt.org/wmt13/training-parallel-commoncrawl.tgz"
"http://statmt.org/wmt13/training-parallel-un.tgz"
"http://statmt.org/wmt14/training-parallel-nc-v9.tgz"
"http://statmt.org/wmt10/training-giga-fren.tar"
"http://statmt.org/wmt14/test-full.tgz"
)
FILES=(
"training-parallel-europarl-v7.tgz"
"training-parallel-commoncrawl.tgz"
"training-parallel-un.tgz"
"training-parallel-nc-v9.tgz"
"training-giga-fren.tar"
"test-full.tgz"
)
CORPORA=(
"training/europarl-v7.fr-en"
"commoncrawl.fr-en"
"un/undoc.2000.fr-en"
"training/news-commentary-v9.fr-en"
"giga-fren.release2.fixed"
)
if [ ! -d "$SCRIPTS" ]; then
echo "Please set SCRIPTS variable correctly to point to Moses scripts."
exit
fi
src=en
tgt=fr
lang=en-fr
prep=wmt14_en_fr
tmp=$prep/tmp
orig=orig
mkdir -p $orig $tmp $prep
cd $orig
for ((i=0;i<${#URLS[@]};++i)); do
file=${FILES[i]}
if [ -f $file ]; then
echo "$file already exists, skipping download"
else
url=${URLS[i]}
wget "$url"
if [ -f $file ]; then
echo "$url successfully downloaded."
else
echo "$url not successfully downloaded."
exit -1
fi
if [ ${file: -4} == ".tgz" ]; then
tar zxvf $file
elif [ ${file: -4} == ".tar" ]; then
tar xvf $file
fi
fi
done
gunzip giga-fren.release2.fixed.*.gz
cd ..
echo "pre-processing train data..."
for l in $src $tgt; do
rm $tmp/train.tags.$lang.tok.$l
for f in "${CORPORA[@]}"; do
cat $orig/$f.$l | \
perl $NORM_PUNC $l | \
perl $REM_NON_PRINT_CHAR | \
perl $TOKENIZER -threads 8 -a -l $l >> $tmp/train.tags.$lang.tok.$l
done
done
echo "pre-processing test data..."
for l in $src $tgt; do
if [ "$l" == "$src" ]; then
t="src"
else
t="ref"
fi
grep '<seg id' $orig/test-full/newstest2014-fren-$t.$l.sgm | \
sed -e 's/<seg id="[0-9]*">\s*//g' | \
sed -e 's/\s*<\/seg>\s*//g' | \
sed -e "s/\’/\'/g" | \
perl $TOKENIZER -threads 8 -a -l $l > $tmp/test.$l
echo ""
done
echo "splitting train and valid..."
for l in $src $tgt; do
awk '{if (NR%1333 == 0) print $0; }' $tmp/train.tags.$lang.tok.$l > $tmp/valid.$l
awk '{if (NR%1333 != 0) print $0; }' $tmp/train.tags.$lang.tok.$l > $tmp/train.$l
done
TRAIN=$tmp/train.fr-en
BPE_CODE=$prep/code
rm -f $TRAIN
for l in $src $tgt; do
cat $tmp/train.$l >> $TRAIN
done
echo "learn_bpe.py on ${TRAIN}..."
python $BPEROOT/learn_bpe.py -s $BPE_TOKENS < $TRAIN > $BPE_CODE
for L in $src $tgt; do
for f in train.$L valid.$L test.$L; do
echo "apply_bpe.py to ${f}..."
python $BPEROOT/apply_bpe.py -c $BPE_CODE < $tmp/$f > $tmp/bpe.$f
done
done
perl $CLEAN -ratio 1.5 $tmp/bpe.train $src $tgt $prep/train 1 250
perl $CLEAN -ratio 1.5 $tmp/bpe.valid $src $tgt $prep/valid 1 250
for L in $src $tgt; do
cp $tmp/bpe.test.$L $prep/test.$L
done
|
TensorFlow2/Recommendation/SIM | SIM | .gitignore | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
.ipynb_checkpoints/
.idea/
__pycache__
|
TensorFlow2/Recommendation/DLRM_and_DCNv2/deployment/tf | tf | deploy_dense | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# author: Tomasz Grel ([email protected])
import argparse
import logging
import os
import pathlib
import shutil
import subprocess
import tempfile
import textwrap
from typing import List
import numpy as np
import tensorflow as tf
from nn.dense_model import DenseModel
from . import constants as c
LOGGER = logging.getLogger(__name__)
_dense_model_config_template = r"""name: "{model_name}"
{backend_type}: "{backend_runtime}"
max_batch_size: 0
input [
{{
name: "{input1}"
data_type: TYPE_FP32
dims: [-1, {input1_dim}]
}},
{{
name: "{input2}"
data_type: TYPE_FP32
dims: [-1, {input2_dim}]
}}
]
output [
{{
name: "{output1}"
data_type: TYPE_FP32
dims: [-1,1]
}}
]
version_policy: {{
specific:{{versions: 1}}
}},
instance_group [
{{
count: {engine_count_per_device}
kind : KIND_GPU
gpus: [0]
}}
]
"""
def _execute_cmd(cmd: List, verbose: bool = False):
"""Execute command as subprocess.
Args:
cmd: A command definition
verbose: Stream command output
Raises:
OSError when command execution failed
"""
process = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, encoding="utf-8"
)
if verbose:
LOGGER.info("Command output:")
stream_output = ""
while True:
output = process.stdout.readline()
if output == "" and process.poll() is not None:
break
if output:
stream_output += output
if verbose:
print(textwrap.indent(output.rstrip(), " ")) # noqa: T201
result = process.poll()
if result != 0:
raise OSError(
f"Processes exited with error code:{result}. Command to reproduce error:\n{' '.join(cmd)}"
)
def _savedmodel2onnx(source_model_path, dst_model_path, opset=11, verbose=False):
convert_cmd = [
"python",
"-m",
"tf2onnx.convert",
"--saved-model",
source_model_path.as_posix(),
"--output",
dst_model_path.as_posix(),
"--opset",
str(opset),
"--verbose",
]
_execute_cmd(convert_cmd, verbose=verbose)
def _onnx2trt(
model,
source_model_path,
dst_model_path,
precision,
optimal_batch_size,
max_batch_size,
verbose=False,
):
min_batch = np.array([model.num_numerical_features, sum(model.embedding_dim)])
optimal_batch = min_batch * optimal_batch_size
max_batch = min_batch * max_batch_size
print(
f"min batch {min_batch}, optimal_batch: {optimal_batch}, max_batch: {max_batch}"
)
convert_cmd = [
"trtexec",
f"--onnx={source_model_path.as_posix()}",
"--buildOnly",
f"--saveEngine={dst_model_path.as_posix()}",
f"--minShapes=args_0:1x{min_batch[0]},args_1:1x{min_batch[1]}",
f"--optShapes=args_0:{optimal_batch_size}x{min_batch[0]},args_1:{optimal_batch_size}x{min_batch[1]}",
f"--maxShapes=args_0:{max_batch_size}x{min_batch[0]},args_1:{max_batch_size}x{min_batch[1]}"
]
if precision == "fp16":
convert_cmd += ["--fp16"]
_execute_cmd(convert_cmd, verbose=True)
def _convert2onnx(source_model_path, workdir, verbose=False):
model_path = workdir / "model.onnx"
_savedmodel2onnx(
source_model_path=source_model_path,
dst_model_path=model_path,
verbose=verbose,
)
return model_path
def _convert2trt(
model,
source_model_path,
precision,
workdir,
optimal_batch_size,
max_batch_size,
verbose=False,
):
onnx_model_path = _convert2onnx(
source_model_path=source_model_path,
workdir=workdir,
verbose=verbose,
)
trt_model_path = workdir / "model.plan"
_onnx2trt(
model=model,
source_model_path=onnx_model_path,
dst_model_path=trt_model_path,
precision=precision,
verbose=verbose,
optimal_batch_size=optimal_batch_size,
max_batch_size=max_batch_size,
)
return trt_model_path
def deploy_dense(
src,
dst,
model_name,
model_format,
model_precision,
max_batch_size,
engine_count_per_device,
trt_optimal_batch_size,
version="1",
):
print("deploy dense dst: ", dst)
os.makedirs(dst, exist_ok=True)
dense_model = DenseModel.from_config(os.path.join(src, "config.json"))
if model_precision == "fp16" and model_format == 'tf-savedmodel':
policy = tf.keras.mixed_precision.Policy("mixed_float16")
tf.keras.mixed_precision.set_global_policy(policy)
# Currently, there's no support for custom kernels deployment.
# Use pure tensorflow implementation instead on the inference side.
if dense_model.interaction == 'dot_custom_cuda':
dense_model.interaction = 'dot_tensorflow'
dense_model._create_interaction_op()
dense_model.load_weights(os.path.join(src, "dense"))
dense_model.transpose = False
dense_model.force_initialization(training=False, flattened_input=False)
tempdir_path = '/tmp/deploy_recsys'
shutil.rmtree(tempdir_path, ignore_errors=True)
os.makedirs(tempdir_path, exist_ok=True)
tempdir = pathlib.Path(tempdir_path)
model_path = tempdir / "model.savedmodel"
dense_model.save_model(model_path.as_posix(), save_input_signature=False)
model_store = pathlib.Path(dst) / str(version)
model_store.mkdir(parents=True, exist_ok=True)
if model_format == "tf-savedmodel":
backend_type = "platform"
backend_runtime = "tensorflow_savedmodel"
shutil.copytree(model_path, model_store / "model.savedmodel")
elif model_format == "onnx":
backend_type = "backend"
backend_runtime = "onnxruntime"
model_path = _convert2onnx(model_path, workdir=tempdir)
shutil.copy(model_path, model_store / "model.onnx")
elif model_format == "trt":
backend_type = "backend"
backend_runtime = "tensorrt"
model_path = _convert2trt(
dense_model,
model_path,
precision=model_precision,
workdir=tempdir,
optimal_batch_size=trt_optimal_batch_size,
max_batch_size=max_batch_size,
)
shutil.copy(model_path, model_store / "model.plan")
else:
raise ValueError(f"Unsupported format: {model_format}")
shutil.rmtree(tempdir_path)
with open(os.path.join(dst, "config.pbtxt"), "w") as f:
s = _dense_model_config_template.format(
backend_type=backend_type,
backend_runtime=backend_runtime,
model_name=model_name,
input1=c.dense_input1_name,
input1_dim=sum(dense_model.embedding_dim),
input2=c.dense_numerical_features_name,
input2_dim=dense_model.num_numerical_features,
output1=c.dense_output_name,
max_batch_size=max_batch_size,
engine_count_per_device=engine_count_per_device,
)
f.write(s)
return dense_model.num_numerical_features
if __name__ == "__main__":
main()
|
TensorFlow2/Recommendation/WideAndDeep/data | data | feature_spec | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import yaml
import os
from typing import Dict, List
from data.outbrain.defaults import TRAIN_MAPPING, TEST_MAPPING, ONEHOT_CHANNEL, MULTIHOT_CHANNEL, NUMERICAL_CHANNEL, LABEL_CHANNEL, MAP_FEATURE_CHANNEL, PARQUET_TYPE
TYPE_SELECTOR = "type"
FEATURES_SELECTOR = "features"
FILES_SELECTOR = "files"
DTYPE_SELECTOR = "dtype"
CARDINALITY_SELECTOR = "cardinality"
MAX_HOTNESS_SELECTOR = "max_hotness"
class FeatureSpec:
def __init__(self, feature_spec=None, source_spec=None, channel_spec=None, metadata=None, base_directory=None):
self.feature_spec: Dict = feature_spec if feature_spec is not None else {}
self.source_spec: Dict = source_spec if source_spec is not None else {}
self.channel_spec: Dict = channel_spec if channel_spec is not None else {}
self.metadata: Dict = metadata if metadata is not None else {}
self.base_directory: str = base_directory
@classmethod
def from_yaml(cls, path):
with open(path, 'r') as feature_spec_file:
base_directory = os.path.dirname(path)
feature_spec = yaml.safe_load(feature_spec_file)
return cls.from_dict(feature_spec, base_directory=base_directory)
@classmethod
def from_dict(cls, source_dict, base_directory):
return cls(base_directory=base_directory, **source_dict)
def to_dict(self) -> Dict:
attributes_to_dump = ['feature_spec', 'source_spec', 'channel_spec', 'metadata']
return {attr: self.__dict__[attr] for attr in attributes_to_dump}
def to_string(self):
return yaml.dump(self.to_dict())
def to_yaml(self, output_path=None):
if not output_path:
output_path = self.base_directory + '/feature_spec.yaml'
with open(output_path, 'w') as output_file:
print(yaml.dump(self.to_dict()), file=output_file)
def _check_one_label_feature(self):
assert len(self.get_names_by_channel(LABEL_CHANNEL)) == 1
def _check_all_required_channels_present(self):
# check that channels are the ones expected
present_channels = list(self.channel_spec.keys())
required_channels = [ONEHOT_CHANNEL, MULTIHOT_CHANNEL, NUMERICAL_CHANNEL, LABEL_CHANNEL, MAP_FEATURE_CHANNEL]
assert sorted(present_channels) == sorted(required_channels)
def _check_all_used_features_are_defined(self):
# check that all features used in channel spec are defined in feature_spec
for channel_features in self.channel_spec.values():
for feature in channel_features:
assert feature in self.feature_spec
def _check_categoricals_have_cardinality(self):
all_categoricals = self.get_names_by_channel(ONEHOT_CHANNEL) + self.get_names_by_channel(MULTIHOT_CHANNEL)
for feature_name in all_categoricals:
feature_dict = self.feature_spec[feature_name]
assert CARDINALITY_SELECTOR in feature_dict
assert isinstance(feature_dict[CARDINALITY_SELECTOR], int)
def _check_required_mappings_present(self):
# check that mappings are the ones expected
mapping_name_list = list(self.source_spec.keys())
assert sorted(mapping_name_list) == sorted([TEST_MAPPING, TRAIN_MAPPING])
def _check_all_chunks_are_parquet(self):
for mapping_name in [TRAIN_MAPPING, TEST_MAPPING]:
mapping = self.source_spec[mapping_name]
for chunk in mapping:
assert chunk[TYPE_SELECTOR] == PARQUET_TYPE
def _check_only_one_chunk_per_mapping(self):
for mapping_name in [TRAIN_MAPPING, TEST_MAPPING]:
mapping = self.source_spec[mapping_name]
assert len(mapping) == 1
def _check_all_features_have_source_where_necessary(self, is_map_channel_active):
for channel_name, channel_features in self.channel_spec.items():
if channel_name != MAP_FEATURE_CHANNEL:
for mapping_name in [TRAIN_MAPPING, TEST_MAPPING]:
# This uses the fact that we require that mappings only have one chunk here
features_in_mapping = set(self.source_spec[mapping_name][0][FEATURES_SELECTOR])
for feature in channel_features:
assert feature in features_in_mapping
else:
map_channel_features = self.get_names_by_channel(MAP_FEATURE_CHANNEL)
if len(map_channel_features) == 1:
# This uses the fact that we require that mappings only have one chunk here
map_feature_name = map_channel_features[0]
test_mapping_features = set(self.source_spec[TEST_MAPPING][0][FEATURES_SELECTOR])
assert map_feature_name in test_mapping_features
def _check_map_feature_selected_if_enabled(self, is_map_feature_required):
map_channel_features = self.get_names_by_channel(MAP_FEATURE_CHANNEL)
assert len(map_channel_features) <= 1
if is_map_feature_required:
assert len(map_channel_features) == 1
def _check_dtype_correct_if_specified(self):
# make sure that if dtype is specified, it is convertible to float32 for numerical and convertible to int64 for categorical
# these are the requirements specified by tf.feature_column.categorical_column_with_identity and tf.feature_column.numeric_column
categorical_features = self.get_names_by_channel(ONEHOT_CHANNEL) + self.get_names_by_channel(MULTIHOT_CHANNEL)
categorical_allowed_types = {"int64", "int32"}
for feature in categorical_features:
feature_dict = self.feature_spec[feature]
if DTYPE_SELECTOR in feature_dict:
assert feature_dict[DTYPE_SELECTOR] in categorical_allowed_types
numerical_features = self.get_names_by_channel(NUMERICAL_CHANNEL)
numerical_allowed_types = {"float32", "float64"}
for feature in numerical_features:
feature_dict = self.feature_spec[feature]
if DTYPE_SELECTOR in feature_dict:
assert feature_dict[DTYPE_SELECTOR] in numerical_allowed_types
def _check_multihots_have_hotness_specified(self):
multihot_features = self.get_names_by_channel(MULTIHOT_CHANNEL)
for feature_name in multihot_features:
feature_dict = self.feature_spec[feature_name]
assert MAX_HOTNESS_SELECTOR in feature_dict
assert isinstance(feature_dict[MAX_HOTNESS_SELECTOR], int)
def _check_enough_files_for_ranks(self, world_size):
if world_size is not None:
for mapping in self.source_spec.values():
only_chunk = mapping[0]
files_number = len(only_chunk[FILES_SELECTOR])
assert files_number >= world_size, "NVTabular dataloader requires parquet to have at least as many partitions as there are workers"
def check_feature_spec(self, require_map_channel, world_size=None):
self._check_required_mappings_present()
self._check_all_required_channels_present()
self._check_one_label_feature()
self._check_map_feature_selected_if_enabled(require_map_channel)
self._check_all_used_features_are_defined()
self._check_categoricals_have_cardinality()
self._check_all_chunks_are_parquet()
self._check_only_one_chunk_per_mapping()
self._check_all_features_have_source_where_necessary(require_map_channel)
self._check_dtype_correct_if_specified()
self._check_multihots_have_hotness_specified()
self._check_enough_files_for_ranks(world_size)
def get_paths_by_mapping(self, mapping: str):
paths_from_fspec = []
chunk_list = self.source_spec[mapping]
for chunk in chunk_list:
paths_from_fspec.extend(chunk[FILES_SELECTOR])
paths = [os.path.join(self.base_directory, p) for p in paths_from_fspec]
return paths
def get_names_by_channel(self, channel_name) -> List[str]:
return self.channel_spec[channel_name]
def get_multihot_hotnesses(self, multihot_features: List[str]) -> Dict[str, int]:
return {feature_name:self.feature_spec[feature_name][MAX_HOTNESS_SELECTOR] for feature_name in multihot_features}
def get_cardinalities(self, features: List[str]) -> Dict[str, int]:
cardinalities = {feature_name: self.feature_spec[feature_name][CARDINALITY_SELECTOR]
for feature_name in features}
return cardinalities |
PyTorch/Classification/ConvNets/efficientnet/inference/FP32 | FP32 | DGXA100_efficientnet-b0_FP32 |
python ./multiproc.py --nproc_per_node 8 ./launch.py --model efficientnet-b0 --precision FP32 --mode benchmark_inference --platform DGXA100 /imagenet -b 1 --workspace ${1:-./} --raport-file raport_1.json
python ./multiproc.py --nproc_per_node 8 ./launch.py --model efficientnet-b0 --precision FP32 --mode benchmark_inference --platform DGXA100 /imagenet -b 2 --workspace ${1:-./} --raport-file raport_2.json
python ./multiproc.py --nproc_per_node 8 ./launch.py --model efficientnet-b0 --precision FP32 --mode benchmark_inference --platform DGXA100 /imagenet -b 4 --workspace ${1:-./} --raport-file raport_4.json
python ./multiproc.py --nproc_per_node 8 ./launch.py --model efficientnet-b0 --precision FP32 --mode benchmark_inference --platform DGXA100 /imagenet -b 8 --workspace ${1:-./} --raport-file raport_8.json
python ./multiproc.py --nproc_per_node 8 ./launch.py --model efficientnet-b0 --precision FP32 --mode benchmark_inference --platform DGXA100 /imagenet -b 16 --workspace ${1:-./} --raport-file raport_16.json
python ./multiproc.py --nproc_per_node 8 ./launch.py --model efficientnet-b0 --precision FP32 --mode benchmark_inference --platform DGXA100 /imagenet -b 32 --workspace ${1:-./} --raport-file raport_32.json
python ./multiproc.py --nproc_per_node 8 ./launch.py --model efficientnet-b0 --precision FP32 --mode benchmark_inference --platform DGXA100 /imagenet -b 64 --workspace ${1:-./} --raport-file raport_64.json
python ./multiproc.py --nproc_per_node 8 ./launch.py --model efficientnet-b0 --precision FP32 --mode benchmark_inference --platform DGXA100 /imagenet -b 128 --workspace ${1:-./} --raport-file raport_128.json
|
Tools/PyTorch/TimeSeriesPredictionPlatform/conf/trainer/optimizer | optimizer | TorchAdam | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
_target_: torch.optim.Adam
lr: 0.001
betas: [0.9, 0.999]
eps: 1e-8
weight_decay: 0.0
amsgrad: False
|
PyTorch/SpeechSynthesis/Tacotron2/trtis_cpp | trtis_cpp | export_weights | #!/bin/bash
##
# Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# # Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# # Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# # Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
NVIDIA_VISIBLE_DEVICES="${NVIDIA_VISIBLE_DEVICES:-0}"
DOCKER_FILE="$(realpath Dockerfile.export_weights)"
IMAGE_NAME="trt-tacotron2-waveglow.weight_export"
CONTAINER_NAME="trt-tacotron2-waveglow.weight_export.container"
die() {
echo "ERROR: ${@}" 1>&2
exit 1
}
die_and_remove_image() {
#docker rmi "${IMAGE_NAME}"
die "${@}"
}
if [[ "${#}" != 3 ]]; then
echo "Invalid arguments: ${@}"
echo "USAGE:"
echo " ${0} <tacotron2 checkpoint> <waveglow checkpoint> <output directory>"
exit 1
fi
TACOTRON2_PT="${1}"
WAVEGLOW_PT="${2}"
MODEL_DIR="$(realpath ${3})"
TACOTRON2_DIR="$(dirname $(realpath ${TACOTRON2_PT}))"
TACOTRON2_NAME="$(basename ${TACOTRON2_PT})"
WAVEGLOW_DIR="$(dirname $(realpath ${WAVEGLOW_PT}))"
WAVEGLOW_NAME="$(basename ${WAVEGLOW_PT})"
DLE_DIR="../"
# remove docker container if it exists
docker rm "${CONTAINER_NAME}" &> /dev/null
pushd "${DLE_DIR}"
docker build . -f "${DOCKER_FILE}" -t "${IMAGE_NAME}" || die "Failed to build container"
# export taoctron2
nvidia-docker run \
--rm \
-e "NVIDIA_VISIBLE_DEVICES=${NVIDIA_VISIBLE_DEVICES}" \
--name "${CONTAINER_NAME}" \
-v "${TACOTRON2_DIR}:/checkpoints" \
-v "${MODEL_DIR}:/models" \
"${IMAGE_NAME}" "./scripts/tacotron2_to_json.py \"/checkpoints/${TACOTRON2_NAME}\" /models/tacotron2.json" || \
die_and_remove_image "Failed to export tacotron2."
# export waveglow
nvidia-docker run \
--rm \
-e "NVIDIA_VISIBLE_DEVICES=${NVIDIA_VISIBLE_DEVICES}" \
--name "${CONTAINER_NAME}" \
-v "${WAVEGLOW_DIR}:/checkpoints" \
-v "${MODEL_DIR}:/models" \
"${IMAGE_NAME}" \
"./scripts/waveglow_to_onnx.py -W \"${DLE_DIR}\" -w \"/checkpoints/${WAVEGLOW_NAME}\" -o /models/waveglow.onnx" || \
die_and_remove_image "Failed to export waveglow."
# export denoiser
nvidia-docker run \
--rm \
-e "NVIDIA_VISIBLE_DEVICES=${NVIDIA_VISIBLE_DEVICES}" \
--name "${CONTAINER_NAME}" \
-v "${WAVEGLOW_DIR}:/checkpoints" \
-v "${MODEL_DIR}:/models" \
"${IMAGE_NAME}" \
"./scripts/denoiser_to_json.py \"${DLE_DIR}\" \"/checkpoints/${WAVEGLOW_NAME}\" /models/denoiser.json" || \
die_and_remove_image "Failed to export the denoiser."
docker rmi "${IMAGE_NAME}"
|
TensorFlow/Detection/SSD/models/research/object_detection/samples/configs | configs | ssd_mobilenet_v1_ppn_shared_box_predictor_300x300_coco14_sync | # SSD with Mobilenet v1 PPN feature extractor.
# Trained on COCO, initialized from Imagenet classification checkpoint
# Achieves 19.7 mAP on COCO14 minival dataset.
# This config is TPU compatible.
model {
ssd {
inplace_batchnorm_update: true
freeze_batchnorm: false
num_classes: 90
box_coder {
faster_rcnn_box_coder {
y_scale: 10.0
x_scale: 10.0
height_scale: 5.0
width_scale: 5.0
}
}
matcher {
argmax_matcher {
matched_threshold: 0.5
unmatched_threshold: 0.5
ignore_thresholds: false
negatives_lower_than_unmatched: true
force_match_for_each_row: true
use_matmul_gather: true
}
}
similarity_calculator {
iou_similarity {
}
}
encode_background_as_zeros: true
anchor_generator {
ssd_anchor_generator {
num_layers: 6
min_scale: 0.15
max_scale: 0.95
aspect_ratios: 1.0
aspect_ratios: 2.0
aspect_ratios: 0.5
aspect_ratios: 3.0
aspect_ratios: 0.3333
reduce_boxes_in_lowest_layer: false
}
}
image_resizer {
fixed_shape_resizer {
height: 300
width: 300
}
}
box_predictor {
weight_shared_convolutional_box_predictor {
depth: 512
class_prediction_bias_init: -4.6
conv_hyperparams {
activation: RELU_6,
regularizer {
l2_regularizer {
weight: 0.00004
}
}
initializer {
random_normal_initializer {
stddev: 0.01
mean: 0.0
}
}
batch_norm {
scale: true
center: true
train: true
decay: 0.97
epsilon: 0.001
}
}
num_layers_before_predictor: 1
kernel_size: 1
share_prediction_tower: true
}
}
feature_extractor {
type: 'ssd_mobilenet_v1_ppn'
conv_hyperparams {
activation: RELU_6,
regularizer {
l2_regularizer {
weight: 0.00004
}
}
initializer {
random_normal_initializer {
stddev: 0.01
mean: 0.0
}
}
batch_norm {
scale: true
center: true
decay: 0.97
epsilon: 0.001
}
}
override_base_feature_extractor_hyperparams: true
}
loss {
classification_loss {
weighted_sigmoid_focal {
alpha: 0.75
gamma: 2.0
}
}
localization_loss {
weighted_smooth_l1 {
}
}
classification_weight: 1.0
localization_weight: 1.5
}
normalize_loss_by_num_matches: true
normalize_loc_loss_by_codesize: true
post_processing {
batch_non_max_suppression {
score_threshold: 1e-8
iou_threshold: 0.6
max_detections_per_class: 100
max_total_detections: 100
}
score_converter: SIGMOID
}
}
}
train_config: {
fine_tune_checkpoint: "PATH_TO_BE_CONFIGURED/model.ckpt"
batch_size: 512
sync_replicas: true
startup_delay_steps: 0
replicas_to_aggregate: 8
num_steps: 50000
data_augmentation_options {
random_horizontal_flip {
}
}
data_augmentation_options {
ssd_random_crop {
}
}
optimizer {
momentum_optimizer: {
learning_rate: {
cosine_decay_learning_rate {
learning_rate_base: 0.7
total_steps: 50000
warmup_learning_rate: 0.1333
warmup_steps: 2000
}
}
momentum_optimizer_value: 0.9
}
use_moving_average: false
}
max_number_of_boxes: 100
unpad_groundtruth_tensors: false
}
train_input_reader: {
tf_record_input_reader {
input_path: "PATH_TO_BE_CONFIGURED/mscoco_train.record-00000-of-00100"
}
label_map_path: "PATH_TO_BE_CONFIGURED/mscoco_label_map.pbtxt"
}
eval_config: {
metrics_set: "coco_detection_metrics"
use_moving_averages: false
num_examples: 8000
}
eval_input_reader: {
tf_record_input_reader {
input_path: "PATH_TO_BE_CONFIGURED/mscoco_val.record-00000-of-00010"
}
label_map_path: "PATH_TO_BE_CONFIGURED/mscoco_label_map.pbtxt"
shuffle: false
num_readers: 1
}
|
PyTorch/Translation/Transformer/fairseq/modules/strided_batched_gemm | strided_batched_gemm | strided_batched_gemm | // Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <torch/torch.h>
#include <vector>
at::Tensor strided_batched_gemm_cuda(
float beta,
at::Tensor in_result,
float alpha,
at::Tensor batch1,
at::Tensor batch2);
// C++ interface
#define CHECK_CUDA(x) AT_ASSERTM(x.type().is_cuda(), #x " must be a CUDA tensor")
#define CHECK_CONTIGUOUS(x) AT_ASSERTM(x.is_contiguous(), #x " must be contiguous")
#define CHECK_INPUT(x) CHECK_CUDA(x); CHECK_CONTIGUOUS(x)
at::Tensor strided_batched_gemm(
float beta,
at::Tensor in_result,
float alpha,
at::Tensor batch1,
at::Tensor batch2) {
//CHECK_INPUT(in_result);
//CHECK_INPUT(batch1);
//CHECK_INPUT(batch2);
AT_ASSERTM(in_result.dim() == 3, "expected 3D tensor");
AT_ASSERTM(batch1.dim() == 3, "expected 3D tensor");
AT_ASSERTM(batch2.dim() == 3, "expected 3D tensor");
AT_ASSERTM(in_result.size(0) == batch1.size(0), "equal number of batches expected");
AT_ASSERTM(in_result.size(0) == batch2.size(0), "equal number of batches expected");
AT_ASSERTM(in_result.size(1) == batch1.size(1), "wrong matrix size");
AT_ASSERTM(in_result.size(2) == batch2.size(2), "wrong matrix size");
AT_ASSERTM(batch1.size(2) == batch2.size(1), "wrong matrix size");
AT_ASSERTM(batch1.dtype() == at::ScalarType::Half, "Only HALF is supported");
AT_ASSERTM(batch2.dtype() == at::ScalarType::Half, "Only HALF is supported");
AT_ASSERTM(in_result.dtype() == at::ScalarType::Half, "Only HALF is supported");
return strided_batched_gemm_cuda(beta, in_result, alpha, batch1, batch2);
}
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("strided_batched_gemm", &strided_batched_gemm, "Special strided batched gemm.");
}
|
TensorFlow/Detection/SSD/models/research/slim/deployment | deployment | model_deploy_test | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for model_deploy."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from deployment import model_deploy
slim = tf.contrib.slim
class DeploymentConfigTest(tf.test.TestCase):
def testDefaults(self):
deploy_config = model_deploy.DeploymentConfig()
self.assertEqual(slim.get_variables(), [])
self.assertEqual(deploy_config.caching_device(), None)
self.assertDeviceEqual(deploy_config.clone_device(0), 'GPU:0')
self.assertEqual(deploy_config.clone_scope(0), '')
self.assertDeviceEqual(deploy_config.optimizer_device(), 'CPU:0')
self.assertDeviceEqual(deploy_config.inputs_device(), 'CPU:0')
self.assertDeviceEqual(deploy_config.variables_device(), 'CPU:0')
def testCPUonly(self):
deploy_config = model_deploy.DeploymentConfig(clone_on_cpu=True)
self.assertEqual(deploy_config.caching_device(), None)
self.assertDeviceEqual(deploy_config.clone_device(0), 'CPU:0')
self.assertEqual(deploy_config.clone_scope(0), '')
self.assertDeviceEqual(deploy_config.optimizer_device(), 'CPU:0')
self.assertDeviceEqual(deploy_config.inputs_device(), 'CPU:0')
self.assertDeviceEqual(deploy_config.variables_device(), 'CPU:0')
def testMultiGPU(self):
deploy_config = model_deploy.DeploymentConfig(num_clones=2)
self.assertEqual(deploy_config.caching_device(), None)
self.assertDeviceEqual(deploy_config.clone_device(0), 'GPU:0')
self.assertDeviceEqual(deploy_config.clone_device(1), 'GPU:1')
self.assertEqual(deploy_config.clone_scope(0), 'clone_0')
self.assertEqual(deploy_config.clone_scope(1), 'clone_1')
self.assertDeviceEqual(deploy_config.optimizer_device(), 'CPU:0')
self.assertDeviceEqual(deploy_config.inputs_device(), 'CPU:0')
self.assertDeviceEqual(deploy_config.variables_device(), 'CPU:0')
def testPS(self):
deploy_config = model_deploy.DeploymentConfig(num_clones=1, num_ps_tasks=1)
self.assertDeviceEqual(deploy_config.clone_device(0),
'/job:worker/device:GPU:0')
self.assertEqual(deploy_config.clone_scope(0), '')
self.assertDeviceEqual(deploy_config.optimizer_device(),
'/job:worker/device:CPU:0')
self.assertDeviceEqual(deploy_config.inputs_device(),
'/job:worker/device:CPU:0')
with tf.device(deploy_config.variables_device()):
a = tf.Variable(0)
b = tf.Variable(0)
c = tf.no_op()
d = slim.variable('a', [],
caching_device=deploy_config.caching_device())
self.assertDeviceEqual(a.device, '/job:ps/task:0/device:CPU:0')
self.assertDeviceEqual(a.device, a.value().device)
self.assertDeviceEqual(b.device, '/job:ps/task:0/device:CPU:0')
self.assertDeviceEqual(b.device, b.value().device)
self.assertDeviceEqual(c.device, '')
self.assertDeviceEqual(d.device, '/job:ps/task:0/device:CPU:0')
self.assertDeviceEqual(d.value().device, '')
def testMultiGPUPS(self):
deploy_config = model_deploy.DeploymentConfig(num_clones=2, num_ps_tasks=1)
self.assertEqual(deploy_config.caching_device()(tf.no_op()), '')
self.assertDeviceEqual(deploy_config.clone_device(0),
'/job:worker/device:GPU:0')
self.assertDeviceEqual(deploy_config.clone_device(1),
'/job:worker/device:GPU:1')
self.assertEqual(deploy_config.clone_scope(0), 'clone_0')
self.assertEqual(deploy_config.clone_scope(1), 'clone_1')
self.assertDeviceEqual(deploy_config.optimizer_device(),
'/job:worker/device:CPU:0')
self.assertDeviceEqual(deploy_config.inputs_device(),
'/job:worker/device:CPU:0')
def testReplicasPS(self):
deploy_config = model_deploy.DeploymentConfig(num_replicas=2,
num_ps_tasks=2)
self.assertDeviceEqual(deploy_config.clone_device(0),
'/job:worker/device:GPU:0')
self.assertEqual(deploy_config.clone_scope(0), '')
self.assertDeviceEqual(deploy_config.optimizer_device(),
'/job:worker/device:CPU:0')
self.assertDeviceEqual(deploy_config.inputs_device(),
'/job:worker/device:CPU:0')
def testReplicasMultiGPUPS(self):
deploy_config = model_deploy.DeploymentConfig(num_replicas=2,
num_clones=2,
num_ps_tasks=2)
self.assertDeviceEqual(deploy_config.clone_device(0),
'/job:worker/device:GPU:0')
self.assertDeviceEqual(deploy_config.clone_device(1),
'/job:worker/device:GPU:1')
self.assertEqual(deploy_config.clone_scope(0), 'clone_0')
self.assertEqual(deploy_config.clone_scope(1), 'clone_1')
self.assertDeviceEqual(deploy_config.optimizer_device(),
'/job:worker/device:CPU:0')
self.assertDeviceEqual(deploy_config.inputs_device(),
'/job:worker/device:CPU:0')
def testVariablesPS(self):
deploy_config = model_deploy.DeploymentConfig(num_ps_tasks=2)
with tf.device(deploy_config.variables_device()):
a = tf.Variable(0)
b = tf.Variable(0)
c = tf.no_op()
d = slim.variable('a', [],
caching_device=deploy_config.caching_device())
self.assertDeviceEqual(a.device, '/job:ps/task:0/device:CPU:0')
self.assertDeviceEqual(a.device, a.value().device)
self.assertDeviceEqual(b.device, '/job:ps/task:1/device:CPU:0')
self.assertDeviceEqual(b.device, b.value().device)
self.assertDeviceEqual(c.device, '')
self.assertDeviceEqual(d.device, '/job:ps/task:0/device:CPU:0')
self.assertDeviceEqual(d.value().device, '')
def LogisticClassifier(inputs, labels, scope=None, reuse=None):
with tf.variable_scope(scope, 'LogisticClassifier', [inputs, labels],
reuse=reuse):
predictions = slim.fully_connected(inputs, 1, activation_fn=tf.sigmoid,
scope='fully_connected')
slim.losses.log_loss(predictions, labels)
return predictions
def BatchNormClassifier(inputs, labels, scope=None, reuse=None):
with tf.variable_scope(scope, 'BatchNormClassifier', [inputs, labels],
reuse=reuse):
inputs = slim.batch_norm(inputs, decay=0.1, fused=True)
predictions = slim.fully_connected(inputs, 1,
activation_fn=tf.sigmoid,
scope='fully_connected')
slim.losses.log_loss(predictions, labels)
return predictions
class CreatecloneTest(tf.test.TestCase):
def setUp(self):
# Create an easy training set:
np.random.seed(0)
self._inputs = np.zeros((16, 4))
self._labels = np.random.randint(0, 2, size=(16, 1)).astype(np.float32)
self._logdir = self.get_temp_dir()
for i in range(16):
j = int(2 * self._labels[i] + np.random.randint(0, 2))
self._inputs[i, j] = 1
def testCreateLogisticClassifier(self):
g = tf.Graph()
with g.as_default():
tf.set_random_seed(0)
tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
tf_labels = tf.constant(self._labels, dtype=tf.float32)
model_fn = LogisticClassifier
clone_args = (tf_inputs, tf_labels)
deploy_config = model_deploy.DeploymentConfig(num_clones=1)
self.assertEqual(slim.get_variables(), [])
clones = model_deploy.create_clones(deploy_config, model_fn, clone_args)
clone = clones[0]
self.assertEqual(len(slim.get_variables()), 2)
for v in slim.get_variables():
self.assertDeviceEqual(v.device, 'CPU:0')
self.assertDeviceEqual(v.value().device, 'CPU:0')
self.assertEqual(clone.outputs.op.name,
'LogisticClassifier/fully_connected/Sigmoid')
self.assertEqual(clone.scope, '')
self.assertDeviceEqual(clone.device, 'GPU:0')
self.assertEqual(len(slim.losses.get_losses()), 1)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
self.assertEqual(update_ops, [])
def testCreateSingleclone(self):
g = tf.Graph()
with g.as_default():
tf.set_random_seed(0)
tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
tf_labels = tf.constant(self._labels, dtype=tf.float32)
model_fn = BatchNormClassifier
clone_args = (tf_inputs, tf_labels)
deploy_config = model_deploy.DeploymentConfig(num_clones=1)
self.assertEqual(slim.get_variables(), [])
clones = model_deploy.create_clones(deploy_config, model_fn, clone_args)
clone = clones[0]
self.assertEqual(len(slim.get_variables()), 5)
for v in slim.get_variables():
self.assertDeviceEqual(v.device, 'CPU:0')
self.assertDeviceEqual(v.value().device, 'CPU:0')
self.assertEqual(clone.outputs.op.name,
'BatchNormClassifier/fully_connected/Sigmoid')
self.assertEqual(clone.scope, '')
self.assertDeviceEqual(clone.device, 'GPU:0')
self.assertEqual(len(slim.losses.get_losses()), 1)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
self.assertEqual(len(update_ops), 2)
def testCreateMulticlone(self):
g = tf.Graph()
with g.as_default():
tf.set_random_seed(0)
tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
tf_labels = tf.constant(self._labels, dtype=tf.float32)
model_fn = BatchNormClassifier
clone_args = (tf_inputs, tf_labels)
num_clones = 4
deploy_config = model_deploy.DeploymentConfig(num_clones=num_clones)
self.assertEqual(slim.get_variables(), [])
clones = model_deploy.create_clones(deploy_config, model_fn, clone_args)
self.assertEqual(len(slim.get_variables()), 5)
for v in slim.get_variables():
self.assertDeviceEqual(v.device, 'CPU:0')
self.assertDeviceEqual(v.value().device, 'CPU:0')
self.assertEqual(len(clones), num_clones)
for i, clone in enumerate(clones):
self.assertEqual(
clone.outputs.op.name,
'clone_%d/BatchNormClassifier/fully_connected/Sigmoid' % i)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, clone.scope)
self.assertEqual(len(update_ops), 2)
self.assertEqual(clone.scope, 'clone_%d/' % i)
self.assertDeviceEqual(clone.device, 'GPU:%d' % i)
def testCreateOnecloneWithPS(self):
g = tf.Graph()
with g.as_default():
tf.set_random_seed(0)
tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
tf_labels = tf.constant(self._labels, dtype=tf.float32)
model_fn = BatchNormClassifier
clone_args = (tf_inputs, tf_labels)
deploy_config = model_deploy.DeploymentConfig(num_clones=1,
num_ps_tasks=1)
self.assertEqual(slim.get_variables(), [])
clones = model_deploy.create_clones(deploy_config, model_fn, clone_args)
self.assertEqual(len(clones), 1)
clone = clones[0]
self.assertEqual(clone.outputs.op.name,
'BatchNormClassifier/fully_connected/Sigmoid')
self.assertDeviceEqual(clone.device, '/job:worker/device:GPU:0')
self.assertEqual(clone.scope, '')
self.assertEqual(len(slim.get_variables()), 5)
for v in slim.get_variables():
self.assertDeviceEqual(v.device, '/job:ps/task:0/CPU:0')
self.assertDeviceEqual(v.device, v.value().device)
def testCreateMulticloneWithPS(self):
g = tf.Graph()
with g.as_default():
tf.set_random_seed(0)
tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
tf_labels = tf.constant(self._labels, dtype=tf.float32)
model_fn = BatchNormClassifier
clone_args = (tf_inputs, tf_labels)
deploy_config = model_deploy.DeploymentConfig(num_clones=2,
num_ps_tasks=2)
self.assertEqual(slim.get_variables(), [])
clones = model_deploy.create_clones(deploy_config, model_fn, clone_args)
self.assertEqual(len(slim.get_variables()), 5)
for i, v in enumerate(slim.get_variables()):
t = i % 2
self.assertDeviceEqual(v.device, '/job:ps/task:%d/device:CPU:0' % t)
self.assertDeviceEqual(v.device, v.value().device)
self.assertEqual(len(clones), 2)
for i, clone in enumerate(clones):
self.assertEqual(
clone.outputs.op.name,
'clone_%d/BatchNormClassifier/fully_connected/Sigmoid' % i)
self.assertEqual(clone.scope, 'clone_%d/' % i)
self.assertDeviceEqual(clone.device, '/job:worker/device:GPU:%d' % i)
class OptimizeclonesTest(tf.test.TestCase):
def setUp(self):
# Create an easy training set:
np.random.seed(0)
self._inputs = np.zeros((16, 4))
self._labels = np.random.randint(0, 2, size=(16, 1)).astype(np.float32)
self._logdir = self.get_temp_dir()
for i in range(16):
j = int(2 * self._labels[i] + np.random.randint(0, 2))
self._inputs[i, j] = 1
def testCreateLogisticClassifier(self):
g = tf.Graph()
with g.as_default():
tf.set_random_seed(0)
tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
tf_labels = tf.constant(self._labels, dtype=tf.float32)
model_fn = LogisticClassifier
clone_args = (tf_inputs, tf_labels)
deploy_config = model_deploy.DeploymentConfig(num_clones=1)
self.assertEqual(slim.get_variables(), [])
clones = model_deploy.create_clones(deploy_config, model_fn, clone_args)
self.assertEqual(len(slim.get_variables()), 2)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
self.assertEqual(update_ops, [])
optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0)
total_loss, grads_and_vars = model_deploy.optimize_clones(clones,
optimizer)
self.assertEqual(len(grads_and_vars), len(tf.trainable_variables()))
self.assertEqual(total_loss.op.name, 'total_loss')
for g, v in grads_and_vars:
self.assertDeviceEqual(g.device, 'GPU:0')
self.assertDeviceEqual(v.device, 'CPU:0')
def testCreateSingleclone(self):
g = tf.Graph()
with g.as_default():
tf.set_random_seed(0)
tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
tf_labels = tf.constant(self._labels, dtype=tf.float32)
model_fn = BatchNormClassifier
clone_args = (tf_inputs, tf_labels)
deploy_config = model_deploy.DeploymentConfig(num_clones=1)
self.assertEqual(slim.get_variables(), [])
clones = model_deploy.create_clones(deploy_config, model_fn, clone_args)
self.assertEqual(len(slim.get_variables()), 5)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
self.assertEqual(len(update_ops), 2)
optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0)
total_loss, grads_and_vars = model_deploy.optimize_clones(clones,
optimizer)
self.assertEqual(len(grads_and_vars), len(tf.trainable_variables()))
self.assertEqual(total_loss.op.name, 'total_loss')
for g, v in grads_and_vars:
self.assertDeviceEqual(g.device, 'GPU:0')
self.assertDeviceEqual(v.device, 'CPU:0')
def testCreateMulticlone(self):
g = tf.Graph()
with g.as_default():
tf.set_random_seed(0)
tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
tf_labels = tf.constant(self._labels, dtype=tf.float32)
model_fn = BatchNormClassifier
clone_args = (tf_inputs, tf_labels)
num_clones = 4
deploy_config = model_deploy.DeploymentConfig(num_clones=num_clones)
self.assertEqual(slim.get_variables(), [])
clones = model_deploy.create_clones(deploy_config, model_fn, clone_args)
self.assertEqual(len(slim.get_variables()), 5)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
self.assertEqual(len(update_ops), num_clones * 2)
optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0)
total_loss, grads_and_vars = model_deploy.optimize_clones(clones,
optimizer)
self.assertEqual(len(grads_and_vars), len(tf.trainable_variables()))
self.assertEqual(total_loss.op.name, 'total_loss')
for g, v in grads_and_vars:
self.assertDeviceEqual(g.device, '')
self.assertDeviceEqual(v.device, 'CPU:0')
def testCreateMulticloneCPU(self):
g = tf.Graph()
with g.as_default():
tf.set_random_seed(0)
tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
tf_labels = tf.constant(self._labels, dtype=tf.float32)
model_fn = BatchNormClassifier
model_args = (tf_inputs, tf_labels)
num_clones = 4
deploy_config = model_deploy.DeploymentConfig(num_clones=num_clones,
clone_on_cpu=True)
self.assertEqual(slim.get_variables(), [])
clones = model_deploy.create_clones(deploy_config, model_fn, model_args)
self.assertEqual(len(slim.get_variables()), 5)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
self.assertEqual(len(update_ops), num_clones * 2)
optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0)
total_loss, grads_and_vars = model_deploy.optimize_clones(clones,
optimizer)
self.assertEqual(len(grads_and_vars), len(tf.trainable_variables()))
self.assertEqual(total_loss.op.name, 'total_loss')
for g, v in grads_and_vars:
self.assertDeviceEqual(g.device, '')
self.assertDeviceEqual(v.device, 'CPU:0')
def testCreateOnecloneWithPS(self):
g = tf.Graph()
with g.as_default():
tf.set_random_seed(0)
tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
tf_labels = tf.constant(self._labels, dtype=tf.float32)
model_fn = BatchNormClassifier
model_args = (tf_inputs, tf_labels)
deploy_config = model_deploy.DeploymentConfig(num_clones=1,
num_ps_tasks=1)
self.assertEqual(slim.get_variables(), [])
clones = model_deploy.create_clones(deploy_config, model_fn, model_args)
self.assertEqual(len(slim.get_variables()), 5)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
self.assertEqual(len(update_ops), 2)
optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0)
total_loss, grads_and_vars = model_deploy.optimize_clones(clones,
optimizer)
self.assertEqual(len(grads_and_vars), len(tf.trainable_variables()))
self.assertEqual(total_loss.op.name, 'total_loss')
for g, v in grads_and_vars:
self.assertDeviceEqual(g.device, '/job:worker/device:GPU:0')
self.assertDeviceEqual(v.device, '/job:ps/task:0/CPU:0')
class DeployTest(tf.test.TestCase):
def setUp(self):
# Create an easy training set:
np.random.seed(0)
self._inputs = np.zeros((16, 4))
self._labels = np.random.randint(0, 2, size=(16, 1)).astype(np.float32)
self._logdir = self.get_temp_dir()
for i in range(16):
j = int(2 * self._labels[i] + np.random.randint(0, 2))
self._inputs[i, j] = 1
def _addBesselsCorrection(self, sample_size, expected_var):
correction_factor = sample_size / (sample_size - 1)
expected_var *= correction_factor
return expected_var
def testLocalTrainOp(self):
g = tf.Graph()
with g.as_default():
tf.set_random_seed(0)
tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
tf_labels = tf.constant(self._labels, dtype=tf.float32)
model_fn = BatchNormClassifier
model_args = (tf_inputs, tf_labels)
deploy_config = model_deploy.DeploymentConfig(num_clones=2,
clone_on_cpu=True)
optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0)
self.assertEqual(slim.get_variables(), [])
model = model_deploy.deploy(deploy_config, model_fn, model_args,
optimizer=optimizer)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
self.assertEqual(len(update_ops), 4)
self.assertEqual(len(model.clones), 2)
self.assertEqual(model.total_loss.op.name, 'total_loss')
self.assertEqual(model.summary_op.op.name, 'summary_op/summary_op')
self.assertEqual(model.train_op.op.name, 'train_op')
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
moving_mean = tf.contrib.framework.get_variables_by_name(
'moving_mean')[0]
moving_variance = tf.contrib.framework.get_variables_by_name(
'moving_variance')[0]
initial_loss = sess.run(model.total_loss)
initial_mean, initial_variance = sess.run([moving_mean,
moving_variance])
self.assertAllClose(initial_mean, [0.0, 0.0, 0.0, 0.0])
self.assertAllClose(initial_variance, [1.0, 1.0, 1.0, 1.0])
for _ in range(10):
sess.run(model.train_op)
final_loss = sess.run(model.total_loss)
self.assertLess(final_loss, initial_loss / 5.0)
final_mean, final_variance = sess.run([moving_mean,
moving_variance])
expected_mean = np.array([0.125, 0.25, 0.375, 0.25])
expected_var = np.array([0.109375, 0.1875, 0.234375, 0.1875])
expected_var = self._addBesselsCorrection(16, expected_var)
self.assertAllClose(final_mean, expected_mean)
self.assertAllClose(final_variance, expected_var)
def testNoSummariesOnGPU(self):
with tf.Graph().as_default():
deploy_config = model_deploy.DeploymentConfig(num_clones=2)
# clone function creates a fully_connected layer with a regularizer loss.
def ModelFn():
inputs = tf.constant(1.0, shape=(10, 20), dtype=tf.float32)
reg = tf.contrib.layers.l2_regularizer(0.001)
tf.contrib.layers.fully_connected(inputs, 30, weights_regularizer=reg)
model = model_deploy.deploy(
deploy_config, ModelFn,
optimizer=tf.train.GradientDescentOptimizer(1.0))
# The model summary op should have a few summary inputs and all of them
# should be on the CPU.
self.assertTrue(model.summary_op.op.inputs)
for inp in model.summary_op.op.inputs:
self.assertEqual('/device:CPU:0', inp.device)
def testNoSummariesOnGPUForEvals(self):
with tf.Graph().as_default():
deploy_config = model_deploy.DeploymentConfig(num_clones=2)
# clone function creates a fully_connected layer with a regularizer loss.
def ModelFn():
inputs = tf.constant(1.0, shape=(10, 20), dtype=tf.float32)
reg = tf.contrib.layers.l2_regularizer(0.001)
tf.contrib.layers.fully_connected(inputs, 30, weights_regularizer=reg)
# No optimizer here, it's an eval.
model = model_deploy.deploy(deploy_config, ModelFn)
# The model summary op should have a few summary inputs and all of them
# should be on the CPU.
self.assertTrue(model.summary_op.op.inputs)
for inp in model.summary_op.op.inputs:
self.assertEqual('/device:CPU:0', inp.device)
if __name__ == '__main__':
tf.test.main()
|
TensorFlow/Detection/SSD/models/research/object_detection/models | models | ssd_inception_v3_feature_extractor | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""SSDFeatureExtractor for InceptionV3 features."""
import tensorflow as tf
from object_detection.meta_architectures import ssd_meta_arch
from object_detection.models import feature_map_generators
from object_detection.utils import ops
from object_detection.utils import shape_utils
from nets import inception_v3
slim = tf.contrib.slim
class SSDInceptionV3FeatureExtractor(ssd_meta_arch.SSDFeatureExtractor):
"""SSD Feature Extractor using InceptionV3 features."""
def __init__(self,
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
conv_hyperparams_fn,
reuse_weights=None,
use_explicit_padding=False,
use_depthwise=False,
override_base_feature_extractor_hyperparams=False):
"""InceptionV3 Feature Extractor for SSD Models.
Args:
is_training: whether the network is in training mode.
depth_multiplier: float depth multiplier for feature extractor.
min_depth: minimum feature extractor depth.
pad_to_multiple: the nearest multiple to zero pad the input height and
width dimensions to.
conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d
and separable_conv2d ops in the layers that are added on top of the
base feature extractor.
reuse_weights: Whether to reuse variables. Default is None.
use_explicit_padding: Whether to use explicit padding when extracting
features. Default is False.
use_depthwise: Whether to use depthwise convolutions. Default is False.
override_base_feature_extractor_hyperparams: Whether to override
hyperparameters of the base feature extractor with the one from
`conv_hyperparams_fn`.
Raises:
ValueError: If `override_base_feature_extractor_hyperparams` is False.
"""
super(SSDInceptionV3FeatureExtractor, self).__init__(
is_training=is_training,
depth_multiplier=depth_multiplier,
min_depth=min_depth,
pad_to_multiple=pad_to_multiple,
conv_hyperparams_fn=conv_hyperparams_fn,
reuse_weights=reuse_weights,
use_explicit_padding=use_explicit_padding,
use_depthwise=use_depthwise,
override_base_feature_extractor_hyperparams=
override_base_feature_extractor_hyperparams)
if not self._override_base_feature_extractor_hyperparams:
raise ValueError('SSD Inception V3 feature extractor always uses'
'scope returned by `conv_hyperparams_fn` for both the '
'base feature extractor and the additional layers '
'added since there is no arg_scope defined for the base '
'feature extractor.')
def preprocess(self, resized_inputs):
"""SSD preprocessing.
Maps pixel values to the range [-1, 1].
Args:
resized_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
Returns:
preprocessed_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
"""
return (2.0 / 255.0) * resized_inputs - 1.0
def extract_features(self, preprocessed_inputs):
"""Extract features from preprocessed inputs.
Args:
preprocessed_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
Returns:
feature_maps: a list of tensors where the ith tensor has shape
[batch, height_i, width_i, depth_i]
"""
preprocessed_inputs = shape_utils.check_min_image_dim(
33, preprocessed_inputs)
feature_map_layout = {
'from_layer': ['Mixed_5d', 'Mixed_6e', 'Mixed_7c', '', '', ''],
'layer_depth': [-1, -1, -1, 512, 256, 128],
'use_explicit_padding': self._use_explicit_padding,
'use_depthwise': self._use_depthwise,
}
with slim.arg_scope(self._conv_hyperparams_fn()):
with tf.variable_scope('InceptionV3', reuse=self._reuse_weights) as scope:
_, image_features = inception_v3.inception_v3_base(
ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple),
final_endpoint='Mixed_7c',
min_depth=self._min_depth,
depth_multiplier=self._depth_multiplier,
scope=scope)
feature_maps = feature_map_generators.multi_resolution_feature_maps(
feature_map_layout=feature_map_layout,
depth_multiplier=self._depth_multiplier,
min_depth=self._min_depth,
insert_1x1_conv=True,
image_features=image_features)
return feature_maps.values()
|
PyTorch/Translation/Transformer/fairseq/optim/lr_scheduler | lr_scheduler | reduce_lr_on_plateau | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import torch.optim.lr_scheduler
from . import FairseqLRScheduler, register_lr_scheduler
@register_lr_scheduler('reduce_lr_on_plateau')
class ReduceLROnPlateau(FairseqLRScheduler):
"""Decay the LR by a factor every time the validation loss plateaus."""
def __init__(self, args, optimizer):
super().__init__(args, optimizer)
if len(args.lr) > 1:
raise ValueError(
'Cannot use a fixed learning rate schedule with reduce_lr_on_plateau.'
' Consider --lr-scheduler=fixed instead.'
)
self.lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
self.optimizer.optimizer, patience=0, factor=args.lr_shrink)
def state_dict(self):
"""Return the LR scheduler state dict."""
return {
'best': self.lr_scheduler.best,
'last_epoch': self.lr_scheduler.last_epoch,
}
def load_state_dict(self, state_dict):
"""Load an LR scheduler state dict."""
self.lr_scheduler.best = state_dict['best']
if 'last_epoch' in state_dict:
self.lr_scheduler.last_epoch = state_dict['last_epoch']
def step(self, epoch, val_loss=None):
"""Update the learning rate at the end of the given epoch."""
if val_loss is not None:
self.lr_scheduler.step(val_loss, epoch)
else:
self.lr_scheduler.last_epoch = epoch
return self.optimizer.get_lr()
|
PyTorch/Classification | Classification | README | # Image Classification
Image classification is the task of categorizing an image into one of several predefined classes, often also giving a probability of the input belonging to a certain class. This task is crucial in understanding and analyzing images, and it comes quite effortlessly to human beings with our complex visual systems. Most powerful image classification models today are built using some form of Convolution Neural Networks (CNNs), which are also the backbone of many other tasks in Computer Vision.

[Source](https://github.com/NVlabs/stylegan)
In this overview, we will cover
- Types of image Classification
- How does it work?
- How is the performance evaluated?
- Use cases and applications
- Where to get started
---
## Types of image Classification
Image Classification can be broadly divided into either Binary or Multi-class problems depending on the number of categories. Binary image classification problems entail predicting one of two classes. An example of this would be to predict whether an image is that of a dog or not. A subtly different problem is that of single-class (one vs all) classification, where the goal is to recognize data from one class and reject all other. This is beneficial when there is an overabundance of data from one of the classes, also called a class imbalance.

In Multi-class classification problems, models categorize instances into one of three or more categories. Multi-class models often also return confidence scores (or probabilities) of an image belonging to each of the possible classes. This should not be confused with multi-label classification, where a model assigns multiple labels to an instance.
---
## How does it work?
In recent years, Convolutional Neural Networks (CNNs) have led the way to massive breakthroughs in Computer Vision. Most state-of-the-art Image Classification models today employ CNNs in some form. Convolutional Layers are the building blocks of CNNs, and similar to Neural Networks they are composed of neurons that learn parameters like weights and biases. Most CNNs are composed of many Convolutional layers that work like feature extractors, and coupled with Fully Connected (FC) layers they learn to identify patterns in images to return confidence scores in different categories.
But what makes Convolutional Networks special? Well, CNNs are built with the assumption that input is in the form of images, and exploiting this fact they can be vastly more efficient than a standard Neural Network for a given level of performance.

Network depth (number of layers) and the number of learnable parameters have been found to be of crucial importance in performance. Top models can typically have over a hundred layers and hundreds of millions of parameters. Much of recent research in visual recognition has been focused around “network engineering”, i.e. designing better architectures, even employing Machine Learning algorithms to search for one, such as in the case of Neural Architecture Search.
---
## How is the performance evaluated?
Image Classification performance is often reported as Top-1 or Top-5 scores. In top-1 score, classification is considered correct if the top predicted class (with the highest predicted probability) matches the true class for a given instance. In top-5, we check if one of the top 5 predictions matches the true class. The score is just the number of correct predictions divided by the total number of instances evaluated.
---
## Use cases and applications
### Categorizing Images in Large Visual Databases
Businesses with visual databases may accumulate large amounts of images with missing tags or meta-data. Unless there is an effective way to organize such images, they may not be much use at all. On the contrary, they may hog precious storage space. Automated image classification algorithms can classify such untagged images into predefined categories. Businesses can avoid expensive manual labor by employing automated image classification algorithms.
A related task is that of Image Organization in smart devices like mobile phones. With Image Classification techniques, images and videos can be organized for improved accessibility.
### Visual Search
Visual Search or Image-based search has risen to popularity over the recent years. Many prominent search engines already provide this feature where users can search for visual content similar to a provided image. This has many applications in the e-commerce and retail industry where users can take a snap and upload an image of a product they are interested in purchasing. This makes the shopping experience much more efficient for customers, and can increase sales for businesses.
### Healthcare
Medical Imaging is about creating visual images of internal body parts for clinical purposes. This includes health monitoring, medical diagnosis, treatment, and keeping organized records. Image Classification algorithms can play a crucial role in Medical Imaging by assisting medical professionals detect presence of illness and having consistency in clinical diagnosis.
---
## Where to get started?
In this Collection, you will find state-of-the-art implementations of Image Classification models and their containers. A good place to get started with Image Classification is with the [ResNet-50](https://github.com/NVIDIA/DeepLearningExamples/tree/master/PyTorch/Classification/ConvNets/resnet50v1.5) model.
ResNets (Residual Networks) are very popular Convolutional Neural Network architectures built with blocks utilizing skip connections to jump over some layers. As the name suggests, ResNet-50 is a variant that is 50 layers deep! But why do we need these “skip” connections? As it turns out building better CNN architectures is not as simple as stacking more and more layers. In practice, If we just keep adding depth to a CNN, at some point the performance stagnates or may start getting worse. Very deep networks are notoriously difficult to train, because of the vanishing gradient problem. In simpler terms, as the depth increases, repeated multiplications during back-propagation may end up making the gradient vanishingly small. This may prevent weights from changing. In ResNets, the skip connects are meant to act like a “gradient superhighway” allowing the gradient to flow unrestrained thus alleviating the problem of the vanishing gradients. ResNets were very influential in the development of subsequent Convolutional Network architectures, and there is much more to them than the brief summary above! |
PyTorch/Classification/ConvNets/triton/deployment_toolkit/library | library | tensorrt | # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import sys
from pathlib import Path
from typing import Dict, NamedTuple, Optional, Union
import numpy as np
# pytype: disable=import-error
try:
import pycuda.autoinit
import pycuda.driver as cuda
except (ImportError, Exception) as e:
logging.getLogger(__name__).warning(f"Problems with importing pycuda package; {e}")
# pytype: enable=import-error
import tensorrt as trt # pytype: disable=import-error
from ..core import BaseLoader, BaseRunner, BaseRunnerSession, BaseSaver, Format, Model, Precision, TensorSpec
from ..extensions import loaders, runners, savers
LOGGER = logging.getLogger(__name__)
TRT_LOGGER = trt.Logger(trt.Logger.INFO)
"""
documentation:
https://docs.nvidia.com/deeplearning/tensorrt/api/python_api/index.html
https://docs.nvidia.com/deeplearning/tensorrt/developer-guide/index.html#python_samples_section
"""
class TensorRTLoader(BaseLoader):
def load(self, model_path: Union[str, Path], **_) -> Model:
model_path = Path(model_path)
LOGGER.debug(f"Loading TensorRT engine from {model_path}")
with model_path.open("rb") as fh, trt.Runtime(TRT_LOGGER) as runtime:
engine = runtime.deserialize_cuda_engine(fh.read())
if engine is None:
raise RuntimeError(f"Could not load ICudaEngine from {model_path}")
inputs = {}
outputs = {}
for binding_idx in range(engine.num_bindings):
name = engine.get_binding_name(binding_idx)
is_input = engine.binding_is_input(binding_idx)
dtype = engine.get_binding_dtype(binding_idx)
shape = engine.get_binding_shape(binding_idx)
if is_input:
inputs[name] = TensorSpec(name, dtype, shape)
else:
outputs[name] = TensorSpec(name, dtype, shape)
return Model(engine, None, inputs, outputs)
class TensorRTSaver(BaseSaver):
def __init__(self):
pass
def save(self, model: Model, model_path: Union[str, Path]) -> None:
model_path = Path(model_path)
LOGGER.debug(f"Saving TensorRT engine to {model_path.as_posix()}")
model_path.parent.mkdir(parents=True, exist_ok=True)
engine: "trt.ICudaEngine" = model.handle
with model_path.open("wb") as fh:
fh.write(engine.serialize())
class TRTBuffers(NamedTuple):
x_host: Optional[Dict[str, object]]
x_dev: Dict[str, object]
y_pred_host: Dict[str, object]
y_pred_dev: Dict[str, object]
class TensorRTRunner(BaseRunner):
def __init__(self):
pass
def init_inference(self, model: Model):
return TensorRTRunnerSession(model=model)
class TensorRTRunnerSession(BaseRunnerSession):
def __init__(self, model: Model):
super().__init__(model)
assert isinstance(model.handle, trt.ICudaEngine)
self._model = model
self._has_dynamic_shapes = None
self._context = None
self._engine: trt.ICudaEngine = self._model.handle
self._cuda_context = pycuda.autoinit.context
self._input_names = None
self._output_names = None
self._buffers = None
def __enter__(self):
self._context = self._engine.create_execution_context()
self._context.__enter__()
self._input_names = [
self._engine[idx] for idx in range(self._engine.num_bindings) if self._engine.binding_is_input(idx)
]
self._output_names = [
self._engine[idx] for idx in range(self._engine.num_bindings) if not self._engine.binding_is_input(idx)
]
# all_binding_shapes_specified is True for models without dynamic shapes
# so initially this variable is False for models with dynamic shapes
self._has_dynamic_shapes = not self._context.all_binding_shapes_specified
return self
def __exit__(self, exc_type, exc_value, traceback):
self._context.__exit__(exc_type, exc_value, traceback)
self._input_names = None
self._output_names = None
# TODO: are cuda buffers dealloc automatically?
self._buffers = None
def __call__(self, x):
buffers = self._prepare_buffers_if_needed(x)
bindings = self._update_bindings(buffers)
for name in self._input_names:
cuda.memcpy_htod(buffers.x_dev[name], buffers.x_host[name])
self._cuda_context.push()
self._context.execute_v2(bindings=bindings)
self._cuda_context.pop()
for name in self._output_names:
cuda.memcpy_dtoh(buffers.y_pred_host[name], buffers.y_pred_dev[name])
return buffers.y_pred_host
def _update_bindings(self, buffers: TRTBuffers):
bindings = [None] * self._engine.num_bindings
for name in buffers.y_pred_dev:
binding_idx: int = self._engine[name]
bindings[binding_idx] = buffers.y_pred_dev[name]
for name in buffers.x_dev:
binding_idx: int = self._engine[name]
bindings[binding_idx] = buffers.x_dev[name]
return bindings
def _set_dynamic_input_shapes(self, x_host):
def _is_shape_dynamic(input_shape):
return any([dim is None or dim == -1 for dim in input_shape])
for name in self._input_names:
bindings_idx = self._engine[name]
data_shape = x_host[name].shape # pytype: disable=attribute-error
if self._engine.is_shape_binding(bindings_idx):
input_shape = self._context.get_shape(bindings_idx)
if _is_shape_dynamic(input_shape):
self._context.set_shape_input(bindings_idx, data_shape)
else:
input_shape = self._engine.get_binding_shape(bindings_idx)
if _is_shape_dynamic(input_shape):
self._context.set_binding_shape(bindings_idx, data_shape)
assert self._context.all_binding_shapes_specified and self._context.all_shape_inputs_specified
def _prepare_buffers_if_needed(self, x_host: Dict[str, object]):
# pytype: disable=attribute-error
new_batch_size = list(x_host.values())[0].shape[0]
current_batch_size = list(self._buffers.y_pred_host.values())[0].shape[0] if self._buffers else 0
# pytype: enable=attribute-error
if self._has_dynamic_shapes or new_batch_size != current_batch_size:
# TODO: are CUDA buffers dealloc automatically?
self._set_dynamic_input_shapes(x_host)
y_pred_host = {}
for name in self._output_names:
shape = self._context.get_binding_shape(self._engine[name])
y_pred_host[name] = np.zeros(shape, dtype=trt.nptype(self._model.outputs[name].dtype))
y_pred_dev = {name: cuda.mem_alloc(data.nbytes) for name, data in y_pred_host.items()}
x_dev = {
name: cuda.mem_alloc(host_input.nbytes)
for name, host_input in x_host.items()
if name in self._input_names # pytype: disable=attribute-error
}
self._buffers = TRTBuffers(None, x_dev, y_pred_host, y_pred_dev)
return self._buffers._replace(x_host=x_host)
if "pycuda.driver" in sys.modules:
loaders.register_extension(Format.TRT.value, TensorRTLoader)
runners.register_extension(Format.TRT.value, TensorRTRunner)
savers.register_extension(Format.TRT.value, TensorRTSaver)
else:
LOGGER.warning("Do not register TensorRT extension due problems with importing pycuda.driver package.")
|
PyTorch/Segmentation/MaskRCNN/pytorch/maskrcnn_benchmark/data/samplers | samplers | distributed | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
# Code is copy-pasted exactly as in torch.utils.data.distributed.
# FIXME remove this once c10d fixes the bug it has
import math
import torch
import torch.distributed as dist
from torch.utils.data.sampler import Sampler
class DistributedSampler(Sampler):
"""Sampler that restricts data loading to a subset of the dataset.
It is especially useful in conjunction with
:class:`torch.nn.parallel.DistributedDataParallel`. In such case, each
process can pass a DistributedSampler instance as a DataLoader sampler,
and load a subset of the original dataset that is exclusive to it.
.. note::
Dataset is assumed to be of constant size.
Arguments:
dataset: Dataset used for sampling.
num_replicas (optional): Number of processes participating in
distributed training.
rank (optional): Rank of the current process within num_replicas.
"""
def __init__(self, dataset, num_replicas=None, rank=None, shuffle=True):
if num_replicas is None:
if not dist.is_available():
raise RuntimeError("Requires distributed package to be available")
num_replicas = dist.get_world_size()
if rank is None:
if not dist.is_available():
raise RuntimeError("Requires distributed package to be available")
rank = dist.get_rank()
self.dataset = dataset
self.num_replicas = num_replicas
self.rank = rank
self.epoch = 0
self.num_samples = int(math.ceil(len(self.dataset) * 1.0 / self.num_replicas))
self.total_size = self.num_samples * self.num_replicas
self.shuffle = True
def __iter__(self):
if self.shuffle:
# deterministically shuffle based on epoch
g = torch.Generator()
g.manual_seed(self.epoch)
indices = torch.randperm(len(self.dataset), generator=g).tolist()
else:
indices = torch.arange(len(self.dataset)).tolist()
# add extra samples to make it evenly divisible
indices += indices[: (self.total_size - len(indices))]
assert len(indices) == self.total_size
# subsample
offset = self.num_samples * self.rank
indices = indices[offset : offset + self.num_samples]
assert len(indices) == self.num_samples
return iter(indices)
def __len__(self):
return self.num_samples
def set_epoch(self, epoch):
self.epoch = epoch
|
PaddlePaddle/LanguageModeling/BERT | BERT | create_pretraining_data | # Copyright (c) 2022 NVIDIA Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Create masked LM/next sentence masked_lm examples for BERT."""
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import logging
import os
import random
from io import open
import collections
import h5py
import numpy as np
from tqdm import tqdm
from tokenizer import BertTokenizer, convert_to_unicode
class TrainingInstance:
"""A single training instance (sentence pair)."""
def __init__(self, tokens, segment_ids, masked_lm_positions,
masked_lm_labels, is_random_next):
self.tokens = tokens
self.segment_ids = segment_ids
self.is_random_next = is_random_next
self.masked_lm_positions = masked_lm_positions
self.masked_lm_labels = masked_lm_labels
def write_instance_to_example_file(instances, tokenizer, max_seq_length,
max_predictions_per_seq, output_file):
"""Create example files from `TrainingInstance`s."""
total_written = 0
features = collections.OrderedDict()
num_instances = len(instances)
features["input_ids"] = np.zeros(
[num_instances, max_seq_length], dtype="int32")
features["input_mask"] = np.zeros(
[num_instances, max_seq_length], dtype="int32")
features["segment_ids"] = np.zeros(
[num_instances, max_seq_length], dtype="int32")
features["masked_lm_positions"] = np.zeros(
[num_instances, max_predictions_per_seq], dtype="int32")
features["masked_lm_ids"] = np.zeros(
[num_instances, max_predictions_per_seq], dtype="int32")
features["next_sentence_labels"] = np.zeros(num_instances, dtype="int32")
for inst_index, instance in enumerate(tqdm(instances)):
input_ids = tokenizer.convert_tokens_to_ids(instance.tokens)
input_mask = [1] * len(input_ids)
segment_ids = list(instance.segment_ids)
assert len(input_ids) <= max_seq_length
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
masked_lm_positions = list(instance.masked_lm_positions)
masked_lm_ids = tokenizer.convert_tokens_to_ids(
instance.masked_lm_labels)
masked_lm_weights = [1.0] * len(masked_lm_ids)
while len(masked_lm_positions) < max_predictions_per_seq:
masked_lm_positions.append(0)
masked_lm_ids.append(0)
masked_lm_weights.append(0.0)
next_sentence_label = 1 if instance.is_random_next else 0
features["input_ids"][inst_index] = input_ids
features["input_mask"][inst_index] = input_mask
features["segment_ids"][inst_index] = segment_ids
features["masked_lm_positions"][inst_index] = masked_lm_positions
features["masked_lm_ids"][inst_index] = masked_lm_ids
features["next_sentence_labels"][inst_index] = next_sentence_label
total_written += 1
logging.info("saving data")
f = h5py.File(output_file, 'w')
f.create_dataset(
"input_ids",
data=features["input_ids"],
dtype='i4',
compression='gzip')
f.create_dataset(
"input_mask",
data=features["input_mask"],
dtype='i1',
compression='gzip')
f.create_dataset(
"segment_ids",
data=features["segment_ids"],
dtype='i1',
compression='gzip')
f.create_dataset(
"masked_lm_positions",
data=features["masked_lm_positions"],
dtype='i4',
compression='gzip')
f.create_dataset(
"masked_lm_ids",
data=features["masked_lm_ids"],
dtype='i4',
compression='gzip')
f.create_dataset(
"next_sentence_labels",
data=features["next_sentence_labels"],
dtype='i1',
compression='gzip')
f.flush()
f.close()
def create_training_instances(input_files, tokenizer, max_seq_length,
dupe_factor, short_seq_prob, masked_lm_prob,
max_predictions_per_seq, rng):
"""Create `TrainingInstance`s from raw text."""
all_documents = [[]]
# Input file format:
# (1) One sentence per line. These should ideally be actual sentences, not
# entire paragraphs or arbitrary spans of text. (Because we use the
# sentence boundaries for the "next sentence prediction" task).
# (2) Blank lines between documents. Document boundaries are needed so
# that the "next sentence prediction" task doesn't span between documents.
for input_file in input_files:
logging.info(f"creating instance from {input_file}")
with open(input_file, "r", encoding="UTF-8") as reader:
while True:
line = convert_to_unicode(reader.readline())
if not line:
break
line = line.strip()
# Empty lines are used as document delimiters
if not line:
all_documents.append([])
tokens = tokenizer.tokenize(line)
if tokens:
all_documents[-1].append(tokens)
# Remove empty documents
all_documents = [x for x in all_documents if x]
rng.shuffle(all_documents)
# vocab_words = list(tokenizer.vocab.keys())
vocab_words = list(tokenizer.vocab.keys())
instances = []
for _ in range(dupe_factor):
for document_index in range(len(all_documents)):
instances.extend(
create_instances_from_document(
all_documents, document_index, max_seq_length,
short_seq_prob, masked_lm_prob, max_predictions_per_seq,
vocab_words, rng))
rng.shuffle(instances)
return instances
def create_instances_from_document(
all_documents, document_index, max_seq_length, short_seq_prob,
masked_lm_prob, max_predictions_per_seq, vocab_words, rng):
"""Creates `TrainingInstance`s for a single document."""
document = all_documents[document_index]
# Account for [CLS], [SEP], [SEP]
max_num_tokens = max_seq_length - 3
# We *usually* want to fill up the entire sequence since we are padding
# to `max_seq_length` anyways, so short sequences are generally wasted
# computation. However, we *sometimes*
# (i.e., short_seq_prob == 0.1 == 10% of the time) want to use shorter
# sequences to minimize the mismatch between pre-training and fine-tuning.
# The `target_seq_length` is just a rough target however, whereas
# `max_seq_length` is a hard limit.
target_seq_length = max_num_tokens
if rng.random() < short_seq_prob:
target_seq_length = rng.randint(2, max_num_tokens)
# We DON'T just concatenate all of the tokens from a document into a long
# sequence and choose an arbitrary split point because this would make the
# next sentence prediction task too easy. Instead, we split the input into
# segments "A" and "B" based on the actual "sentences" provided by the user
# input.
instances = []
current_chunk = []
current_length = 0
i = 0
while i < len(document):
segment = document[i]
current_chunk.append(segment)
current_length += len(segment)
if i == len(document) - 1 or current_length >= target_seq_length:
if current_chunk:
# `a_end` is how many segments from `current_chunk` go into the `A`
# (first) sentence.
a_end = 1
if len(current_chunk) >= 2:
a_end = rng.randint(1, len(current_chunk) - 1)
tokens_a = []
for j in range(a_end):
tokens_a.extend(current_chunk[j])
tokens_b = []
# Random next
is_random_next = False
if len(current_chunk) == 1 or rng.random() < 0.5:
is_random_next = True
target_b_length = target_seq_length - len(tokens_a)
# This should rarely go for more than one iteration for large
# corpora. However, just to be careful, we try to make sure that
# the random document is not the same as the document
# we're processing.
for _ in range(10):
random_document_index = rng.randint(
0, len(all_documents) - 1)
if random_document_index != document_index:
break
#If picked random document is the same as the current document
if random_document_index == document_index:
is_random_next = False
random_document = all_documents[random_document_index]
random_start = rng.randint(0, len(random_document) - 1)
for j in range(random_start, len(random_document)):
tokens_b.extend(random_document[j])
if len(tokens_b) >= target_b_length:
break
# We didn't actually use these segments so we "put them back" so
# they don't go to waste.
num_unused_segments = len(current_chunk) - a_end
i -= num_unused_segments
# Actual next
else:
is_random_next = False
for j in range(a_end, len(current_chunk)):
tokens_b.extend(current_chunk[j])
truncate_seq_pair(tokens_a, tokens_b, max_num_tokens, rng)
assert len(tokens_a) >= 1
assert len(tokens_b) >= 1
tokens = []
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for token in tokens_a:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
for token in tokens_b:
tokens.append(token)
segment_ids.append(1)
tokens.append("[SEP]")
segment_ids.append(1)
(tokens, masked_lm_positions,
masked_lm_labels) = create_masked_lm_predictions(
tokens, masked_lm_prob, max_predictions_per_seq,
vocab_words, rng)
instance = TrainingInstance(
tokens=tokens,
segment_ids=segment_ids,
is_random_next=is_random_next,
masked_lm_positions=masked_lm_positions,
masked_lm_labels=masked_lm_labels)
instances.append(instance)
current_chunk = []
current_length = 0
i += 1
return instances
MaskedLmInstance = collections.namedtuple("MaskedLmInstance",
["index", "label"])
def create_masked_lm_predictions(tokens, masked_lm_prob,
max_predictions_per_seq, vocab_words, rng):
"""Creates the predictions for the masked LM objective."""
cand_indexes = []
for (i, token) in enumerate(tokens):
if token == "[CLS]" or token == "[SEP]":
continue
cand_indexes.append(i)
rng.shuffle(cand_indexes)
output_tokens = list(tokens)
num_to_predict = min(max_predictions_per_seq,
max(1, int(round(len(tokens) * masked_lm_prob))))
masked_lms = []
covered_indexes = set()
for index in cand_indexes:
if len(masked_lms) >= num_to_predict:
break
if index in covered_indexes:
continue
covered_indexes.add(index)
masked_token = None
# 80% of the time, replace with [MASK]
if rng.random() < 0.8:
masked_token = "[MASK]"
else:
# 10% of the time, keep original
if rng.random() < 0.5:
masked_token = tokens[index]
# 10% of the time, replace with random word
else:
masked_token = vocab_words[rng.randint(0, len(vocab_words) -
1)]
output_tokens[index] = masked_token
masked_lms.append(MaskedLmInstance(index=index, label=tokens[index]))
masked_lms = sorted(masked_lms, key=lambda x: x.index)
masked_lm_positions = []
masked_lm_labels = []
for p in masked_lms:
masked_lm_positions.append(p.index)
masked_lm_labels.append(p.label)
return (output_tokens, masked_lm_positions, masked_lm_labels)
def truncate_seq_pair(tokens_a, tokens_b, max_num_tokens, rng):
"""Truncates a pair of sequences to a maximum sequence length."""
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_num_tokens:
break
trunc_tokens = tokens_a if len(tokens_a) > len(tokens_b) else tokens_b
assert len(trunc_tokens) >= 1
# We want to sometimes truncate from the front and sometimes from the
# back to add more randomness and avoid biases.
if rng.random() < 0.5:
del trunc_tokens[0]
else:
trunc_tokens.pop()
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--input_file",
default=None,
type=str,
required=True,
help="The input train corpus. can be directory with .txt files or a path to a single file"
)
parser.add_argument(
"--output_file",
default=None,
type=str,
required=True,
help="The output file where created hdf5 formatted data will be written."
)
parser.add_argument(
"--vocab_file",
default=None,
type=str,
required=False,
help="The vocabulary the BERT model will train on. "
"Use bert_model argument would ignore this. "
"The bert_model argument is recommended.")
parser.add_argument(
"--do_lower_case",
action='store_true',
default=True,
help="Whether to lower case the input text. True for uncased models, False for cased models. "
"Use bert_model argument would ignore this. The bert_model argument is recommended."
)
## Other parameters
#int
parser.add_argument(
"--max_seq_length",
default=128,
type=int,
help="The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, and sequences shorter \n"
"than this will be padded.")
parser.add_argument(
"--dupe_factor",
default=10,
type=int,
help="Number of times to duplicate the input data (with different masks)."
)
parser.add_argument(
"--max_predictions_per_seq",
default=20,
type=int,
help="Maximum number of masked LM predictions per sequence.")
# floats
parser.add_argument(
"--masked_lm_prob",
default=0.15,
type=float,
help="Masked LM probability.")
parser.add_argument(
"--short_seq_prob",
default=0.1,
type=float,
help="Probability to create a sequence shorter than maximum sequence length"
)
parser.add_argument(
'--random_seed',
type=int,
default=12345,
help="random seed for initialization")
args = parser.parse_args()
print(args)
tokenizer = BertTokenizer(
args.vocab_file, do_lower_case=args.do_lower_case, max_len=512)
input_files = []
if os.path.isfile(args.input_file):
input_files.append(args.input_file)
elif os.path.isdir(args.input_file):
input_files = [
os.path.join(args.input_file, f)
for f in os.listdir(args.input_file)
if (os.path.isfile(os.path.join(args.input_file, f)) and
f.endswith('.txt'))
]
else:
raise ValueError(f"{args.input_file} is not a valid path")
rng = random.Random(args.random_seed)
instances = create_training_instances(
input_files, tokenizer, args.max_seq_length, args.dupe_factor,
args.short_seq_prob, args.masked_lm_prob, args.max_predictions_per_seq,
rng)
output_file = args.output_file
write_instance_to_example_file(instances, tokenizer, args.max_seq_length,
args.max_predictions_per_seq, output_file)
if __name__ == "__main__":
main()
|
TensorFlow2/Classification/ConvNets/utils | utils | callbacks | # Lint as: python3
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Common modules for callbacks."""
from __future__ import absolute_import
from __future__ import division
# from __future__ import google_type_annotations
from __future__ import print_function
import os
import horovod.tensorflow as hvd
import tensorflow as tf
import tensorflow_addons as tfa
import time
from typing import Any, List, MutableMapping, Text
from tensorflow import keras
from utils import optimizer_factory
def get_callbacks(model_checkpoint: bool = True,
include_tensorboard: bool = True,
time_history: bool = True,
track_lr: bool = True,
write_model_weights: bool = True,
initial_step: int = 0,
batch_size: int = 0,
log_steps: int = 100,
model_dir: str = None,
save_checkpoint_freq: int = 0,
ema_decay=0,
intratrain_eval_using_ema=False,
logger = None) -> List[tf.keras.callbacks.Callback]:
"""Get all callbacks."""
model_dir = model_dir or ''
callbacks = []
if model_checkpoint and hvd.rank() == 0:
ckpt_full_path = os.path.join(model_dir, 'model.ckpt-{epoch:04d}')
# if ema_decay > 0:
# # save average weights in the ckpt
# ckpt_callback = AverageModelCheckpoint(update_weights=False,
# filepath=ckpt_full_path,
# verbose=1,
# save_weights_only=True,
# save_freq=save_checkpoint_freq)
# else:
ckpt_callback = tf.keras.callbacks.ModelCheckpoint(ckpt_full_path,
save_weights_only=True,
verbose=1,
save_freq=save_checkpoint_freq)
callbacks.append(ckpt_callback)
if ema_decay > 0:
# swaps model weights with the average weights during on-the-fly evaluation.
# once evaluation is completed, the original weights are restoed and training is resumed.
callbacks.append(MovingAverageCallback(intratrain_eval_using_ema=intratrain_eval_using_ema))
if time_history and logger is not None and hvd.rank() == 0:
callbacks.append(
TimeHistory(
batch_size,
log_steps,
logdir=model_dir if include_tensorboard else None,
logger=logger))
# Adding hvd.rank() == 0 to the following if condition halts multi-GPU training at the onset!
if include_tensorboard:
callbacks.append(
CustomTensorBoard(
log_dir=model_dir,
track_lr=track_lr,
initial_step=initial_step,
write_images=write_model_weights))
# ProgbarLogger is in charge of printing metrics at the end of each epoch.
# By default, ProgBar callback is inserted at the begining of the callback list by Keras.
# The downside is that if the callbacks invoked after ProgBar want to add a new metric, they won't be
# reflected in the printed metrics because ProgBar is already called. To override this behavior,
# we append this callback explicitly here at the end. If this line is commented, the learning rate,
# which is added to the metrics by CustomTensorboard, won't be printed.
callbacks.append(tf.keras.callbacks.ProgbarLogger())
return callbacks
def get_scalar_from_tensor(t: tf.Tensor) -> int:
"""Utility function to convert a Tensor to a scalar."""
t = tf.keras.backend.get_value(t)
if callable(t):
return t()
else:
return t
class CustomTensorBoard(tf.keras.callbacks.TensorBoard):
"""A customized TensorBoard callback that tracks additional datapoints.
Metrics tracked:
- Global learning rate
Attributes:
log_dir: the path of the directory where to save the log files to be parsed
by TensorBoard.
track_lr: `bool`, whether or not to track the global learning rate.
initial_step: the initial step, used for preemption recovery.
**kwargs: Additional arguments for backwards compatibility. Possible key is
`period`.
"""
# TODO(b/146499062): track params, flops, log lr, l2 loss,
# classification loss
def __init__(self,
log_dir: str,
track_lr: bool = False,
initial_step: int = 0,
**kwargs):
super(CustomTensorBoard, self).__init__(log_dir=log_dir, **kwargs)
self.step = initial_step
self._track_lr = track_lr
def on_batch_begin(self,
epoch: int,
logs: MutableMapping[str, Any] = None) -> None:
self.step += 1
if logs is None:
logs = {}
logs.update(self._calculate_metrics())
super(CustomTensorBoard, self).on_batch_begin(epoch, logs)
def on_epoch_begin(self,
epoch: int,
logs: MutableMapping[str, Any] = None) -> None:
if logs is None:
logs = {}
metrics = self._calculate_metrics()
logs.update(metrics)
super(CustomTensorBoard, self).on_epoch_begin(epoch, logs)
def on_epoch_end(self,
epoch: int,
logs: MutableMapping[str, Any] = None) -> None:
if logs is None:
logs = {}
metrics = self._calculate_metrics()
logs.update(metrics)
super(CustomTensorBoard, self).on_epoch_end(epoch, logs)
def _calculate_metrics(self) -> MutableMapping[str, Any]:
logs = {}
# TODO(b/149030439): disable LR reporting.
if self._track_lr:
logs['learning_rate'] = self._calculate_lr()
return logs
def _calculate_lr(self) -> int:
"""Calculates the learning rate given the current step."""
return get_scalar_from_tensor(
self._get_base_optimizer()._decayed_lr(var_dtype=tf.float32)) # pylint:disable=protected-access
def _get_base_optimizer(self) -> tf.keras.optimizers.Optimizer:
"""Get the base optimizer used by the current model."""
optimizer = self.model.optimizer
# The optimizer might be wrapped by another class, so unwrap it
while hasattr(optimizer, '_optimizer'):
optimizer = optimizer._optimizer # pylint:disable=protected-access
return optimizer
class MovingAverageCallback(tf.keras.callbacks.Callback):
"""A Callback to be used with a `MovingAverage` optimizer.
Applies moving average weights to the model during validation time to test
and predict on the averaged weights rather than the current model weights.
Once training is complete, the model weights will be overwritten with the
averaged weights (by default).
Attributes:
overwrite_weights_on_train_end: Whether to overwrite the current model
weights with the averaged weights from the moving average optimizer.
**kwargs: Any additional callback arguments.
"""
def __init__(self,
intratrain_eval_using_ema: bool = False,
overwrite_weights_on_train_end: bool = False,
**kwargs):
super(MovingAverageCallback, self).__init__(**kwargs)
self.intratrain_eval_using_ema = intratrain_eval_using_ema
self.overwrite_weights_on_train_end = overwrite_weights_on_train_end
self.ema_opt = None
def set_model(self, model: tf.keras.Model):
super(MovingAverageCallback, self).set_model(model)
self.ema_opt = optimizer_factory.fetch_optimizer(model, optimizer_factory.MovingAverage)
self.ema_opt.shadow_copy(model.weights)
def on_test_begin(self, logs: MutableMapping[Text, Any] = None):
if self.intratrain_eval_using_ema:
self.ema_opt.swap_weights()
def on_test_end(self, logs: MutableMapping[Text, Any] = None):
if self.intratrain_eval_using_ema:
self.ema_opt.swap_weights()
def on_train_end(self, logs: MutableMapping[Text, Any] = None):
if self.overwrite_weights_on_train_end:
self.ema_opt.assign_average_vars(self.model.variables)
class AverageModelCheckpoint(tf.keras.callbacks.ModelCheckpoint):
"""Saves and, optionally, assigns the averaged weights.
Taken from tfa.callbacks.AverageModelCheckpoint [original class].
NOTE1: The original class has a type check decorator, which prevents passing non-string save_freq (fix: removed)
NOTE2: The original class may not properly handle layered (nested) optimizer objects (fix: use fetch_optimizer)
Attributes:
update_weights: If True, assign the moving average weights
to the model, and save them. If False, keep the old
non-averaged weights, but the saved model uses the
average weights.
See `tf.keras.callbacks.ModelCheckpoint` for the other args.
"""
def __init__(
self,
update_weights: bool,
filepath: str,
monitor: str = 'val_loss',
verbose: int = 0,
save_best_only: bool = False,
save_weights_only: bool = False,
mode: str = 'auto',
save_freq: str = 'epoch',
**kwargs):
super().__init__(
filepath,
monitor,
verbose,
save_best_only,
save_weights_only,
mode,
save_freq,
**kwargs)
self.update_weights = update_weights
self.ema_opt = None
def set_model(self, model):
self.ema_opt = optimizer_factory.fetch_optimizer(model, optimizer_factory.MovingAverage)
return super().set_model(model)
def _save_model(self, epoch, logs):
assert isinstance(self.ema_opt, optimizer_factory.MovingAverage)
if self.update_weights:
self.ema_opt.assign_average_vars(self.model.variables)
return super()._save_model(epoch, logs)
else:
# Note: `model.get_weights()` gives us the weights (non-ref)
# whereas `model.variables` returns references to the variables.
non_avg_weights = self.model.get_weights()
self.ema_opt.assign_average_vars(self.model.variables)
# result is currently None, since `super._save_model` doesn't
# return anything, but this may change in the future.
result = super()._save_model(epoch, logs)
self.model.set_weights(non_avg_weights)
return result
class BatchTimestamp(object):
"""A structure to store batch time stamp."""
def __init__(self, batch_index, timestamp):
self.batch_index = batch_index
self.timestamp = timestamp
def __repr__(self):
return "'BatchTimestamp<batch_index: {}, timestamp: {}>'".format(
self.batch_index, self.timestamp)
class TimeHistory(tf.keras.callbacks.Callback):
"""Callback for Keras models."""
def __init__(self, batch_size, log_steps, logger, logdir=None):
"""Callback for logging performance.
Args:
batch_size: Total batch size.
log_steps: Interval of steps between logging of batch level stats.
logdir: Optional directory to write TensorBoard summaries.
"""
# TODO(wcromar): remove this parameter and rely on `logs` parameter of
# on_train_batch_end()
self.batch_size = batch_size
super(TimeHistory, self).__init__()
self.log_steps = log_steps
self.last_log_step = 0
self.steps_before_epoch = 0
self.steps_in_epoch = 0
self.start_time = None
self.logger = logger
self.step_per_epoch = 0
if logdir:
self.summary_writer = tf.summary.create_file_writer(logdir)
else:
self.summary_writer = None
# Logs start of step 1 then end of each step based on log_steps interval.
self.timestamp_log = []
# Records the time each epoch takes to run from start to finish of epoch.
self.epoch_runtime_log = []
self.throughput = []
@property
def global_steps(self):
"""The current 1-indexed global step."""
return self.steps_before_epoch + self.steps_in_epoch
@property
def average_steps_per_second(self):
"""The average training steps per second across all epochs."""
return (self.global_steps - self.step_per_epoch) / sum(self.epoch_runtime_log[1:])
@property
def average_examples_per_second(self):
"""The average number of training examples per second across all epochs."""
# return self.average_steps_per_second * self.batch_size
if not self.throughput:
return 0
if len(self.throughput) == 1:
return self.throughput[0] # this throughput is inaccurate because the first step is warmup
ind = max(int(0.1*len(self.throughput)), 1) # ensures exclusion of the first step (warmup step)
return sum(self.throughput[ind:])/(len(self.throughput[ind:])) # removed +1 from denominator
def on_train_end(self, logs=None):
self.train_finish_time = time.time()
if self.summary_writer:
self.summary_writer.flush()
def on_epoch_begin(self, epoch, logs=None):
self.epoch_start = time.time()
def on_batch_begin(self, batch, logs=None):
# tf.print('+++++++++++',self.model.optimizer.iterations,batch)
if not self.start_time:
self.start_time = time.time()
# Record the timestamp of the first global step
if not self.timestamp_log:
self.timestamp_log.append(BatchTimestamp(self.global_steps,
self.start_time))
def on_batch_end(self, batch, logs=None):
"""Records elapse time of the batch and calculates examples per second."""
self.steps_in_epoch = batch + 1
steps_since_last_log = self.global_steps - self.last_log_step
if steps_since_last_log >= self.log_steps:
now = time.time()
elapsed_time = now - self.start_time
steps_per_second = steps_since_last_log / elapsed_time
examples_per_second = steps_per_second * self.batch_size
self.timestamp_log.append(BatchTimestamp(self.global_steps, now))
elapsed_time_str='{:.2f} seconds'.format(elapsed_time)
self.logger.log(step='PARAMETER', data={'TimeHistory': elapsed_time_str, 'examples/second': examples_per_second, 'steps': (self.last_log_step, self.global_steps)})
if self.summary_writer:
with self.summary_writer.as_default():
tf.summary.scalar('global_step/sec', steps_per_second,
self.global_steps)
tf.summary.scalar('examples/sec', examples_per_second,
self.global_steps)
# tf.summary.scalar('grad global norm',
# self.model.gradients_gnorm,
# self.global_steps)
self.last_log_step = self.global_steps
self.start_time = None
self.throughput.append(examples_per_second)
def on_epoch_end(self, epoch, logs=None):
if epoch == 0:
self.step_per_epoch = self.steps_in_epoch
epoch_run_time = time.time() - self.epoch_start
self.epoch_runtime_log.append(epoch_run_time)
self.steps_before_epoch += self.steps_in_epoch
self.steps_in_epoch = 0
class EvalTimeHistory(tf.keras.callbacks.Callback):
"""Callback for Keras models."""
def __init__(self, batch_size, logger, logdir=None):
"""Callback for logging performance.
Args:
batch_size: Total batch size.
log_steps: Interval of steps between logging of batch level stats.
logdir: Optional directory to write TensorBoard summaries.
"""
# TODO(wcromar): remove this parameter and rely on `logs` parameter of
# on_train_batch_end()
self.batch_size = batch_size
self.global_steps = 0
self.batch_time = []
self.eval_time = 0
super(EvalTimeHistory, self).__init__()
self.logger = logger
@property
def average_steps_per_second(self):
"""The average training steps per second across all epochs."""
return (self.global_steps - 1) / self.eval_time
@property
def average_examples_per_second(self):
"""The average number of training examples per second across all epochs."""
return self.average_steps_per_second * self.batch_size
def on_test_batch_end(self, batch, logs=None):
self.global_steps += 1
self.batch_time.append(time.time() - self.test_begin)
def on_test_batch_begin(self, epoch, logs=None):
self.test_begin = time.time()
def on_test_end(self, epoch, logs=None):
self.eval_time = sum(self.batch_time) - self.batch_time[0]
|
PyTorch/Segmentation/nnUNet/triton | triton | run_inference_on_triton | #!/usr/bin/env python3
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""
To infer the model deployed on Triton, you can use `run_inference_on_triton.py` script.
It sends a request with data obtained from pointed data loader and dumps received data into npz files.
Those files are stored in directory pointed by `--output-dir` argument.
Currently, the client communicates with the Triton server asynchronously using GRPC protocol.
Example call:
```shell script
python ./triton/run_inference_on_triton.py \
--server-url localhost:8001 \
--model-name ResNet50 \
--model-version 1 \
--dump-labels \
--output-dir /results/dump_triton
```
"""
import argparse
import functools
import logging
import queue
import threading
import time
from pathlib import Path
from typing import Optional
from tqdm import tqdm
# pytype: disable=import-error
try:
from tritonclient import utils as client_utils # noqa: F401
from tritonclient.grpc import (
InferenceServerClient,
InferInput,
InferRequestedOutput,
)
except ImportError:
import tritongrpcclient as grpc_client
from tritongrpcclient import (
InferenceServerClient,
InferInput,
InferRequestedOutput,
)
# pytype: enable=import-error
# method from PEP-366 to support relative import in executed modules
if __package__ is None:
__package__ = Path(__file__).parent.name
from .deployment_toolkit.args import ArgParserGenerator
from .deployment_toolkit.core import DATALOADER_FN_NAME, load_from_file
from .deployment_toolkit.dump import NpzWriter
LOGGER = logging.getLogger("run_inference_on_triton")
class AsyncGRPCTritonRunner:
DEFAULT_MAX_RESP_WAIT_S = 120
DEFAULT_MAX_UNRESP_REQS = 128
DEFAULT_MAX_FINISH_WAIT_S = 900 # 15min
def __init__(
self,
server_url: str,
model_name: str,
model_version: str,
*,
dataloader,
verbose=False,
resp_wait_s: Optional[float] = None,
max_unresponded_reqs: Optional[int] = None,
):
self._server_url = server_url
self._model_name = model_name
self._model_version = model_version
self._dataloader = dataloader
self._verbose = verbose
self._response_wait_t = self.DEFAULT_MAX_RESP_WAIT_S if resp_wait_s is None else resp_wait_s
self._max_unresp_reqs = self.DEFAULT_MAX_UNRESP_REQS if max_unresponded_reqs is None else max_unresponded_reqs
self._results = queue.Queue()
self._processed_all = False
self._errors = []
self._num_waiting_for = 0
self._sync = threading.Condition()
self._req_thread = threading.Thread(target=self.req_loop, daemon=True)
def __iter__(self):
self._req_thread.start()
timeout_s = 0.050 # check flags processed_all and error flags every 50ms
while True:
try:
ids, x, y_pred, y_real = self._results.get(timeout=timeout_s)
yield ids, x, y_pred, y_real
except queue.Empty:
shall_stop = self._processed_all or self._errors
if shall_stop:
break
LOGGER.debug("Waiting for request thread to stop")
self._req_thread.join()
if self._errors:
error_msg = "\n".join(map(str, self._errors))
raise RuntimeError(error_msg)
def _on_result(self, ids, x, y_real, output_names, result, error):
with self._sync:
if error:
self._errors.append(error)
else:
y_pred = {name: result.as_numpy(name) for name in output_names}
self._results.put((ids, x, y_pred, y_real))
self._num_waiting_for -= 1
self._sync.notify_all()
def req_loop(self):
client = InferenceServerClient(self._server_url, verbose=self._verbose)
self._errors = self._verify_triton_state(client)
if self._errors:
return
LOGGER.debug(
f"Triton server {self._server_url} and model {self._model_name}:{self._model_version} " f"are up and ready!"
)
model_config = client.get_model_config(self._model_name, self._model_version)
model_metadata = client.get_model_metadata(self._model_name, self._model_version)
LOGGER.info(f"Model config {model_config}")
LOGGER.info(f"Model metadata {model_metadata}")
inputs = {tm.name: tm for tm in model_metadata.inputs}
outputs = {tm.name: tm for tm in model_metadata.outputs}
output_names = list(outputs)
outputs_req = [InferRequestedOutput(name) for name in outputs]
self._num_waiting_for = 0
for ids, x, y_real in self._dataloader:
infer_inputs = []
for name in inputs:
data = x[name]
infer_input = InferInput(name, data.shape, inputs[name].datatype)
target_np_dtype = client_utils.triton_to_np_dtype(inputs[name].datatype)
data = data.astype(target_np_dtype)
infer_input.set_data_from_numpy(data)
infer_inputs.append(infer_input)
with self._sync:
def _check_can_send():
return self._num_waiting_for < self._max_unresp_reqs
can_send = self._sync.wait_for(_check_can_send, timeout=self._response_wait_t)
if not can_send:
error_msg = f"Runner could not send new requests for {self._response_wait_t}s"
self._errors.append(error_msg)
break
callback = functools.partial(AsyncGRPCTritonRunner._on_result, self, ids, x, y_real, output_names)
client.async_infer(
model_name=self._model_name,
model_version=self._model_version,
inputs=infer_inputs,
outputs=outputs_req,
callback=callback,
)
self._num_waiting_for += 1
# wait till receive all requested data
with self._sync:
def _all_processed():
LOGGER.debug(f"wait for {self._num_waiting_for} unprocessed jobs")
return self._num_waiting_for == 0
self._processed_all = self._sync.wait_for(_all_processed, self.DEFAULT_MAX_FINISH_WAIT_S)
if not self._processed_all:
error_msg = f"Runner {self._response_wait_t}s timeout received while waiting for results from server"
self._errors.append(error_msg)
LOGGER.debug("Finished request thread")
def _verify_triton_state(self, triton_client):
errors = []
if not triton_client.is_server_live():
errors.append(f"Triton server {self._server_url} is not live")
elif not triton_client.is_server_ready():
errors.append(f"Triton server {self._server_url} is not ready")
elif not triton_client.is_model_ready(self._model_name, self._model_version):
errors.append(f"Model {self._model_name}:{self._model_version} is not ready")
return errors
def _parse_args():
parser = argparse.ArgumentParser(description="Infer model on Triton server", allow_abbrev=False)
parser.add_argument(
"--server-url", type=str, default="localhost:8001", help="Inference server URL (default localhost:8001)"
)
parser.add_argument("--model-name", help="The name of the model used for inference.", required=True)
parser.add_argument("--model-version", help="The version of the model used for inference.", required=True)
parser.add_argument("--dataloader", help="Path to python file containing dataloader.", required=True)
parser.add_argument("--dump-labels", help="Dump labels to output dir", action="store_true", default=False)
parser.add_argument("--dump-inputs", help="Dump inputs to output dir", action="store_true", default=False)
parser.add_argument("-v", "--verbose", help="Verbose logs", action="store_true", default=False)
parser.add_argument("--output-dir", required=True, help="Path to directory where outputs will be saved")
parser.add_argument("--response-wait-time", required=False, help="Maximal time to wait for response", default=120)
parser.add_argument(
"--max-unresponded-requests", required=False, help="Maximal number of unresponded requests", default=128
)
args, *_ = parser.parse_known_args()
get_dataloader_fn = load_from_file(args.dataloader, label="dataloader", target=DATALOADER_FN_NAME)
ArgParserGenerator(get_dataloader_fn).update_argparser(parser)
args = parser.parse_args()
return args
def main():
args = _parse_args()
log_format = "%(asctime)s %(levelname)s %(name)s %(message)s"
log_level = logging.INFO if not args.verbose else logging.DEBUG
logging.basicConfig(level=log_level, format=log_format)
LOGGER.info(f"args:")
for key, value in vars(args).items():
LOGGER.info(f" {key} = {value}")
get_dataloader_fn = load_from_file(args.dataloader, label="dataloader", target=DATALOADER_FN_NAME)
dataloader_fn = ArgParserGenerator(get_dataloader_fn).from_args(args)
runner = AsyncGRPCTritonRunner(
args.server_url,
args.model_name,
args.model_version,
dataloader=dataloader_fn(),
verbose=False,
resp_wait_s=args.response_wait_time,
max_unresponded_reqs=args.max_unresponded_requests,
)
with NpzWriter(output_dir=args.output_dir) as writer:
start = time.time()
for ids, x, y_pred, y_real in tqdm(runner, unit="batch", mininterval=10):
data = _verify_and_format_dump(args, ids, x, y_pred, y_real)
writer.write(**data)
stop = time.time()
LOGGER.info(f"\nThe inference took {stop - start:0.3f}s")
def _verify_and_format_dump(args, ids, x, y_pred, y_real):
data = {"outputs": y_pred, "ids": {"ids": ids}}
if args.dump_inputs:
data["inputs"] = x
if args.dump_labels:
if not y_real:
raise ValueError(
"Found empty label values. Please provide labels in dataloader_fn or do not use --dump-labels argument"
)
data["labels"] = y_real
return data
if __name__ == "__main__":
main()
|
TensorFlow/Detection/SSD/models/research/object_detection/utils | utils | context_manager | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python context management helper."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
class IdentityContextManager(object):
"""Returns an identity context manager that does nothing.
This is helpful in setting up conditional `with` statement as below:
with slim.arg_scope(x) if use_slim_scope else IdentityContextManager():
do_stuff()
"""
def __enter__(self):
return None
def __exit__(self, exec_type, exec_value, traceback):
del exec_type
del exec_value
del traceback
return False
|
Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt | tft_pyt | log_helper | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import subprocess
import sys
import itertools
import atexit
import dllogger
from dllogger import Backend, JSONStreamBackend, StdOutBackend
import torch.distributed as dist
from torch.utils.tensorboard import SummaryWriter
class TensorBoardBackend(Backend):
def __init__(self, verbosity, log_dir):
super().__init__(verbosity=verbosity)
self.summary_writer = SummaryWriter(log_dir=os.path.join(log_dir, 'TB_summary'),
flush_secs=120,
max_queue=200
)
self.hp_cache = None
atexit.register(self.summary_writer.close)
@property
def log_level(self):
return self._log_level
def metadata(self, timestamp, elapsedtime, metric, metadata):
pass
def log(self, timestamp, elapsedtime, step, data):
if step == 'HPARAMS':
parameters = {k: v for k, v in data.items() if not isinstance(v, (list, tuple))}
#Unpack list and tuples
for d in [{k+f'_{i}':v for i,v in enumerate(l)} for k,l in data.items() if isinstance(l, (list, tuple))]:
parameters.update(d)
#Remove custom classes
parameters = {k: v for k, v in data.items() if isinstance(v, (int, float, str, bool))}
parameters.update({k:'None' for k, v in data.items() if v is None})
self.hp_cache = parameters
if step == ():
if self.hp_cache is None:
print('Warning: Cannot save HParameters. Please log HParameters with step=\'HPARAMS\'', file=sys.stderr)
return
self.summary_writer.add_hparams(self.hp_cache, data)
if not isinstance(step, int):
return
for k, v in data.items():
self.summary_writer.add_scalar(k, v, step)
def flush(self):
pass
def setup_logger(args):
os.makedirs(args.results, exist_ok=True)
log_path = os.path.join(args.results, args.log_file)
if os.path.exists(log_path):
for i in itertools.count():
s_fname = args.log_file.split('.')
fname = '.'.join(s_fname[:-1]) + f'_{i}.' + s_fname[-1] if len(s_fname) > 1 else args.stat_file + f'.{i}'
log_path = os.path.join(args.results, fname)
if not os.path.exists(log_path):
break
def metric_format(metric, metadata, value):
return "{}: {}".format(metric, f'{value:.5f}' if isinstance(value, float) else value)
def step_format(step):
if step == ():
return "Finished |"
elif isinstance(step, int):
return "Step {0: <5} |".format(step)
return "Step {} |".format(step)
if not dist.is_initialized() or not args.distributed_world_size > 1 or args.distributed_rank == 0:
dllogger.init(backends=[JSONStreamBackend(verbosity=1, filename=log_path),
TensorBoardBackend(verbosity=1, log_dir=args.results),
StdOutBackend(verbosity=2,
step_format=step_format,
prefix_format=lambda x: "")#,
#metric_format=metric_format)
])
else:
dllogger.init(backends=[])
dllogger.log(step='PARAMETER', data=vars(args), verbosity=0)
container_setup_info = {**get_framework_env_vars(), **get_system_info()}
dllogger.log(step='ENVIRONMENT', data=container_setup_info, verbosity=0)
dllogger.metadata('loss', {'GOAL': 'MINIMIZE', 'STAGE': 'TRAIN', 'format': ':5f'})
dllogger.metadata('P10', {'GOAL': 'MINIMIZE', 'STAGE': 'TRAIN', 'format': ':5f'})
dllogger.metadata('P50', {'GOAL': 'MINIMIZE', 'STAGE': 'TRAIN', 'format': ':5f'})
dllogger.metadata('P90', {'GOAL': 'MINIMIZE', 'STAGE': 'TRAIN', 'format': ':5f'})
dllogger.metadata('items/s', {'GOAL': 'MAXIMIZE', 'STAGE': 'TRAIN', 'format': ':1f'})
dllogger.metadata('val_loss', {'GOAL': 'MINIMIZE', 'STAGE': 'VAL', 'format':':5f'})
dllogger.metadata('val_P10', {'GOAL': 'MINIMIZE', 'STAGE': 'VAL', 'format': ':5f'})
dllogger.metadata('val_P50', {'GOAL': 'MINIMIZE', 'STAGE': 'VAL', 'format': ':5f'})
dllogger.metadata('val_P90', {'GOAL': 'MINIMIZE', 'STAGE': 'VAL', 'format': ':5f'})
dllogger.metadata('val_items/s', {'GOAL': 'MAXIMIZE', 'STAGE': 'VAL', 'format': ':1f'})
dllogger.metadata('test_P10', {'GOAL': 'MINIMIZE', 'STAGE': 'TEST', 'format': ':5f'})
dllogger.metadata('test_P50', {'GOAL': 'MINIMIZE', 'STAGE': 'TEST', 'format': ':5f'})
dllogger.metadata('test_P90', {'GOAL': 'MINIMIZE', 'STAGE': 'TEST', 'format': ':5f'})
dllogger.metadata('throughput', {'GOAL': 'MAXIMIZE', 'STAGE': 'TEST', 'format': ':1f'})
dllogger.metadata('latency_p90', {'GOAL': 'MIMIMIZE', 'STAGE': 'TEST', 'format': ':5f'})
dllogger.metadata('latency_p95', {'GOAL': 'MIMIMIZE', 'STAGE': 'TEST', 'format': ':5f'})
dllogger.metadata('latency_p99', {'GOAL': 'MIMIMIZE', 'STAGE': 'TEST', 'format': ':5f'})
def get_framework_env_vars():
return {
'NVIDIA_PYTORCH_VERSION': os.environ.get('NVIDIA_PYTORCH_VERSION'),
'PYTORCH_VERSION': os.environ.get('PYTORCH_VERSION'),
'CUBLAS_VERSION': os.environ.get('CUBLAS_VERSION'),
'NCCL_VERSION': os.environ.get('NCCL_VERSION'),
'CUDA_DRIVER_VERSION': os.environ.get('CUDA_DRIVER_VERSION'),
'CUDNN_VERSION': os.environ.get('CUDNN_VERSION'),
'CUDA_VERSION': os.environ.get('CUDA_VERSION'),
'NVIDIA_PIPELINE_ID': os.environ.get('NVIDIA_PIPELINE_ID'),
'NVIDIA_BUILD_ID': os.environ.get('NVIDIA_BUILD_ID'),
'NVIDIA_TF32_OVERRIDE': os.environ.get('NVIDIA_TF32_OVERRIDE'),
}
def get_system_info():
system_info = subprocess.run('nvidia-smi --query-gpu=gpu_name,memory.total,enforced.power.limit --format=csv'.split(), capture_output=True).stdout
system_info = [i.decode('utf-8') for i in system_info.split(b'\n')]
system_info = [x for x in system_info if x]
return {'system_info': system_info}
|
PyTorch/Translation/GNMT/seq2seq/inference | inference | beam_search | # Copyright (c) 2017 Elad Hoffer
# Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import torch
from seq2seq.data.config import BOS
from seq2seq.data.config import EOS
class SequenceGenerator:
"""
Generator for the autoregressive inference with beam search decoding.
"""
def __init__(self, model, beam_size=5, max_seq_len=100,
len_norm_factor=0.6, len_norm_const=5,
cov_penalty_factor=0.1):
"""
Constructor for the SequenceGenerator.
Beam search decoding supports coverage penalty and length
normalization. For details, refer to Section 7 of the GNMT paper
(https://arxiv.org/pdf/1609.08144.pdf).
:param model: model which implements generate method
:param beam_size: decoder beam size
:param max_seq_len: maximum decoder sequence length
:param len_norm_factor: length normalization factor
:param len_norm_const: length normalization constant
:param cov_penalty_factor: coverage penalty factor
"""
self.model = model
self.beam_size = beam_size
self.max_seq_len = max_seq_len
self.len_norm_factor = len_norm_factor
self.len_norm_const = len_norm_const
self.cov_penalty_factor = cov_penalty_factor
self.batch_first = self.model.batch_first
def greedy_search(self, batch_size, initial_input, initial_context=None):
"""
Greedy decoder.
:param batch_size: decoder batch size
:param initial_input: initial input, usually tensor of BOS tokens
:param initial_context: initial context, usually [encoder_context,
src_seq_lengths, None]
returns: (translation, lengths, counter)
translation: (batch_size, max_seq_len) - indices of target tokens
lengths: (batch_size) - lengths of generated translations
counter: number of iterations of the decoding loop
"""
device = initial_input.device
max_seq_len = self.max_seq_len
translation = torch.zeros(batch_size, max_seq_len, dtype=torch.int64,
device=device)
lengths = torch.ones(batch_size, dtype=torch.int64,
device=device)
active = torch.arange(0, batch_size, dtype=torch.int64,
device=device)
base_mask = torch.arange(0, batch_size, dtype=torch.int64,
device=device)
translation[:, 0] = BOS
words, context = initial_input, initial_context
if self.batch_first:
word_view = (-1, 1)
ctx_batch_dim = 0
else:
word_view = (1, -1)
ctx_batch_dim = 1
counter = 0
for idx in range(1, max_seq_len):
if not len(active):
break
counter += 1
words = words.view(word_view)
output = self.model.generate(words, context, 1)
words, logprobs, attn, context = output
words = words.view(-1)
translation[active, idx] = words
lengths[active] += 1
terminating = (words == EOS)
if terminating.any():
not_terminating = ~terminating
mask = base_mask[:len(active)]
mask = mask.masked_select(not_terminating)
active = active.masked_select(not_terminating)
words = words[mask]
context[0] = context[0].index_select(ctx_batch_dim, mask)
context[1] = context[1].index_select(0, mask)
context[2] = context[2].index_select(1, mask)
return translation, lengths, counter
def beam_search(self, batch_size, initial_input, initial_context=None):
"""
Beam search decoder.
:param batch_size: decoder batch size
:param initial_input: initial input, usually tensor of BOS tokens
:param initial_context: initial context, usually [encoder_context,
src_seq_lengths, None]
returns: (translation, lengths, counter)
translation: (batch_size, max_seq_len) - indices of target tokens
lengths: (batch_size) - lengths of generated translations
counter: number of iterations of the decoding loop
"""
device = initial_input.device
beam_size = self.beam_size
norm_const = self.len_norm_const
norm_factor = self.len_norm_factor
max_seq_len = self.max_seq_len
cov_penalty_factor = self.cov_penalty_factor
translation = torch.zeros(batch_size * beam_size, max_seq_len,
dtype=torch.int64, device=device)
lengths = torch.ones(batch_size * beam_size,
dtype=torch.int64, device=device)
scores = torch.zeros(batch_size * beam_size,
dtype=torch.float32, device=device)
active = torch.arange(0, batch_size * beam_size,
dtype=torch.int64, device=device)
base_mask = torch.arange(0, batch_size * beam_size,
dtype=torch.int64, device=device)
global_offset = torch.arange(0, batch_size * beam_size, beam_size,
device=device, dtype=torch.int64)
eos_beam_fill = torch.tensor([0] + (beam_size - 1) * [float('-inf')],
dtype=torch.float32, device=device)
translation[:, 0] = BOS
words, context = initial_input, initial_context
if self.batch_first:
word_view = (-1, 1)
ctx_batch_dim = 0
attn_query_dim = 1
else:
word_view = (1, -1)
ctx_batch_dim = 1
attn_query_dim = 0
# replicate context
if self.batch_first:
# context[0] (encoder state): (batch, seq, feature)
_, seq, feature = context[0].shape
context[0].unsqueeze_(1)
context[0] = context[0].expand(-1, beam_size, -1, -1)
context[0] = context[0].contiguous().view(batch_size * beam_size,
seq, feature)
# context[0]: (batch * beam, seq, feature)
else:
# context[0] (encoder state): (seq, batch, feature)
seq, _, feature = context[0].shape
context[0].unsqueeze_(2)
context[0] = context[0].expand(-1, -1, beam_size, -1)
context[0] = context[0].contiguous().view(seq, batch_size *
beam_size, feature)
# context[0]: (seq, batch * beam, feature)
# context[1] (encoder seq length): (batch)
context[1].unsqueeze_(1)
context[1] = context[1].expand(-1, beam_size)
context[1] = context[1].contiguous().view(batch_size * beam_size)
# context[1]: (batch * beam)
accu_attn_scores = torch.zeros(batch_size * beam_size, seq,
dtype=torch.float32, device=device)
counter = 0
for idx in range(1, self.max_seq_len):
if not len(active):
break
counter += 1
eos_mask = (words == EOS)
eos_mask = eos_mask.view(-1, beam_size)
terminating, _ = eos_mask.min(dim=1)
lengths[active[~eos_mask.view(-1)]] += 1
output = self.model.generate(words, context, beam_size)
words, logprobs, attn, context = output
attn = attn.float().squeeze(attn_query_dim)
attn = attn.masked_fill(eos_mask.view(-1).unsqueeze(1), 0)
accu_attn_scores[active] += attn
# words: (batch, beam, k)
words = words.view(-1, beam_size, beam_size)
words = words.masked_fill(eos_mask.unsqueeze(2), EOS)
# logprobs: (batch, beam, k)
logprobs = logprobs.float().view(-1, beam_size, beam_size)
if eos_mask.any():
logprobs[eos_mask] = eos_beam_fill
active_scores = scores[active].view(-1, beam_size)
# new_scores: (batch, beam, k)
new_scores = active_scores.unsqueeze(2) + logprobs
if idx == 1:
new_scores[:, 1:, :].fill_(float('-inf'))
new_scores = new_scores.view(-1, beam_size * beam_size)
# index: (batch, beam)
_, index = new_scores.topk(beam_size, dim=1)
source_beam = index // beam_size
new_scores = new_scores.view(-1, beam_size * beam_size)
best_scores = torch.gather(new_scores, 1, index)
scores[active] = best_scores.view(-1)
words = words.view(-1, beam_size * beam_size)
words = torch.gather(words, 1, index)
# words: (1, batch * beam)
words = words.view(word_view)
offset = global_offset[:source_beam.shape[0]]
source_beam += offset.unsqueeze(1)
translation[active, :] = translation[active[source_beam.view(-1)], :]
translation[active, idx] = words.view(-1)
lengths[active] = lengths[active[source_beam.view(-1)]]
context[2] = context[2].index_select(1, source_beam.view(-1))
if terminating.any():
not_terminating = ~terminating
not_terminating = not_terminating.unsqueeze(1)
not_terminating = not_terminating.expand(-1, beam_size).contiguous()
normalization_mask = active.view(-1, beam_size)[terminating]
# length normalization
norm = lengths[normalization_mask].float()
norm = (norm_const + norm) / (norm_const + 1.0)
norm = norm ** norm_factor
scores[normalization_mask] /= norm
# coverage penalty
penalty = accu_attn_scores[normalization_mask]
penalty = penalty.clamp(0, 1)
penalty = penalty.log()
penalty[penalty == float('-inf')] = 0
penalty = penalty.sum(dim=-1)
scores[normalization_mask] += cov_penalty_factor * penalty
mask = base_mask[:len(active)]
mask = mask.masked_select(not_terminating.view(-1))
words = words.index_select(ctx_batch_dim, mask)
context[0] = context[0].index_select(ctx_batch_dim, mask)
context[1] = context[1].index_select(0, mask)
context[2] = context[2].index_select(1, mask)
active = active.masked_select(not_terminating.view(-1))
scores = scores.view(batch_size, beam_size)
_, idx = scores.max(dim=1)
translation = translation[idx + global_offset, :]
lengths = lengths[idx + global_offset]
return translation, lengths, counter
|
PyTorch/Detection/Efficientdet/scripts/D0 | D0 | validation_AMP_A100-80G | #!/bin/bash
rm -rf *.json
python validate.py '/workspace/object_detection/datasets/coco/' --model efficientdet_d4 -b ${BATCH_SIZE:-8} --torchscript --use-ema --amp --checkpoint ${CKPT_PATH:-/checkpoints/Effdet_B0_test.pth}
|
PyTorch/Segmentation/MaskRCNN/pytorch/maskrcnn_benchmark/modeling/roi_heads/box_head | box_head | loss | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
from torch.nn import functional as F
from maskrcnn_benchmark.layers import smooth_l1_loss
from maskrcnn_benchmark.modeling.box_coder import BoxCoder
from maskrcnn_benchmark.modeling.matcher import Matcher
from maskrcnn_benchmark.structures.boxlist_ops import boxlist_iou
from maskrcnn_benchmark.modeling.balanced_positive_negative_sampler import (
BalancedPositiveNegativeSampler
)
from maskrcnn_benchmark.modeling.utils import cat
class FastRCNNLossComputation(object):
"""
Computes the loss for Faster R-CNN.
Also supports FPN
"""
def __init__(self, proposal_matcher, fg_bg_sampler, box_coder):
"""
Arguments:
proposal_matcher (Matcher)
fg_bg_sampler (BalancedPositiveNegativeSampler)
box_coder (BoxCoder)
"""
self.proposal_matcher = proposal_matcher
self.fg_bg_sampler = fg_bg_sampler
self.box_coder = box_coder
def match_targets_to_proposals(self, proposal, target):
match_quality_matrix = boxlist_iou(target, proposal)
matched_idxs = self.proposal_matcher(match_quality_matrix)
# Fast RCNN only need "labels" field for selecting the targets
target = target.copy_with_fields("labels")
# get the targets corresponding GT for each proposal
# NB: need to clamp the indices because we can have a single
# GT in the image, and matched_idxs can be -2, which goes
# out of bounds
matched_targets = target[matched_idxs.clamp(min=0)]
matched_targets.add_field("matched_idxs", matched_idxs)
return matched_targets
def prepare_targets(self, proposals, targets):
labels = []
regression_targets = []
for proposals_per_image, targets_per_image in zip(proposals, targets):
matched_targets = self.match_targets_to_proposals(
proposals_per_image, targets_per_image
)
matched_idxs = matched_targets.get_field("matched_idxs")
labels_per_image = matched_targets.get_field("labels")
labels_per_image = labels_per_image.to(dtype=torch.int64)
# Label background (below the low threshold)
bg_inds = matched_idxs == Matcher.BELOW_LOW_THRESHOLD
labels_per_image[bg_inds] = 0
# Label ignore proposals (between low and high thresholds)
ignore_inds = matched_idxs == Matcher.BETWEEN_THRESHOLDS
labels_per_image[ignore_inds] = -1 # -1 is ignored by sampler
# compute regression targets
regression_targets_per_image = self.box_coder.encode(
matched_targets.bbox, proposals_per_image.bbox
)
labels.append(labels_per_image)
regression_targets.append(regression_targets_per_image)
return labels, regression_targets
def subsample(self, proposals, targets):
"""
This method performs the positive/negative sampling, and return
the sampled proposals.
Note: this function keeps a state.
Arguments:
proposals (list[BoxList])
targets (list[BoxList])
"""
labels, regression_targets = self.prepare_targets(proposals, targets)
sampled_pos_inds, sampled_neg_inds = self.fg_bg_sampler(labels)
proposals = list(proposals)
# add corresponding label and regression_targets information to the bounding boxes
for labels_per_image, regression_targets_per_image, proposals_per_image in zip(
labels, regression_targets, proposals
):
proposals_per_image.add_field("labels", labels_per_image)
proposals_per_image.add_field(
"regression_targets", regression_targets_per_image
)
# distributed sampled proposals, that were obtained on all feature maps
# concatenated via the fg_bg_sampler, into individual feature map levels
for img_idx, (pos_inds_img, neg_inds_img) in enumerate(
zip(sampled_pos_inds, sampled_neg_inds)
):
img_sampled_inds = torch.nonzero(pos_inds_img | neg_inds_img).squeeze(1)
proposals_per_image = proposals[img_idx][img_sampled_inds]
proposals[img_idx] = proposals_per_image
self._proposals = proposals
return proposals
def __call__(self, class_logits, box_regression):
"""
Computes the loss for Faster R-CNN.
This requires that the subsample method has been called beforehand.
Arguments:
class_logits (list[Tensor])
box_regression (list[Tensor])
Returns:
classification_loss (Tensor)
box_loss (Tensor)
"""
class_logits = cat(class_logits, dim=0)
box_regression = cat(box_regression, dim=0)
device = class_logits.device
if not hasattr(self, "_proposals"):
raise RuntimeError("subsample needs to be called before")
proposals = self._proposals
labels = cat([proposal.get_field("labels") for proposal in proposals], dim=0)
regression_targets = cat(
[proposal.get_field("regression_targets") for proposal in proposals], dim=0
)
classification_loss = F.cross_entropy(class_logits, labels)
# get indices that correspond to the regression targets for
# the corresponding ground truth labels, to be used with
# advanced indexing
sampled_pos_inds_subset = torch.nonzero(labels > 0).squeeze(1)
labels_pos = labels[sampled_pos_inds_subset]
map_inds = 4 * labels_pos[:, None] + torch.tensor([0, 1, 2, 3], device=device)
box_loss = smooth_l1_loss(
box_regression[sampled_pos_inds_subset[:, None], map_inds],
regression_targets[sampled_pos_inds_subset],
size_average=False,
beta=1,
)
box_loss = box_loss / labels.numel()
return classification_loss, box_loss
def make_roi_box_loss_evaluator(cfg):
matcher = Matcher(
cfg.MODEL.ROI_HEADS.FG_IOU_THRESHOLD,
cfg.MODEL.ROI_HEADS.BG_IOU_THRESHOLD,
allow_low_quality_matches=False,
)
bbox_reg_weights = cfg.MODEL.ROI_HEADS.BBOX_REG_WEIGHTS
box_coder = BoxCoder(weights=bbox_reg_weights)
fg_bg_sampler = BalancedPositiveNegativeSampler(
cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE, cfg.MODEL.ROI_HEADS.POSITIVE_FRACTION
)
loss_evaluator = FastRCNNLossComputation(matcher, fg_bg_sampler, box_coder)
return loss_evaluator
|
TensorFlow/Detection/SSD/models/research/slim/nets | nets | pix2pix | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Implementation of the Image-to-Image Translation model.
This network represents a port of the following work:
Image-to-Image Translation with Conditional Adversarial Networks
Phillip Isola, Jun-Yan Zhu, Tinghui Zhou and Alexei A. Efros
Arxiv, 2017
https://phillipi.github.io/pix2pix/
A reference implementation written in Lua can be found at:
https://github.com/phillipi/pix2pix/blob/master/models.lua
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import tensorflow as tf
layers = tf.contrib.layers
def pix2pix_arg_scope():
"""Returns a default argument scope for isola_net.
Returns:
An arg scope.
"""
# These parameters come from the online port, which don't necessarily match
# those in the paper.
# TODO(nsilberman): confirm these values with Philip.
instance_norm_params = {
'center': True,
'scale': True,
'epsilon': 0.00001,
}
with tf.contrib.framework.arg_scope(
[layers.conv2d, layers.conv2d_transpose],
normalizer_fn=layers.instance_norm,
normalizer_params=instance_norm_params,
weights_initializer=tf.random_normal_initializer(0, 0.02)) as sc:
return sc
def upsample(net, num_outputs, kernel_size, method='nn_upsample_conv'):
"""Upsamples the given inputs.
Args:
net: A `Tensor` of size [batch_size, height, width, filters].
num_outputs: The number of output filters.
kernel_size: A list of 2 scalars or a 1x2 `Tensor` indicating the scale,
relative to the inputs, of the output dimensions. For example, if kernel
size is [2, 3], then the output height and width will be twice and three
times the input size.
method: The upsampling method.
Returns:
An `Tensor` which was upsampled using the specified method.
Raises:
ValueError: if `method` is not recognized.
"""
net_shape = tf.shape(net)
height = net_shape[1]
width = net_shape[2]
if method == 'nn_upsample_conv':
net = tf.image.resize_nearest_neighbor(
net, [kernel_size[0] * height, kernel_size[1] * width])
net = layers.conv2d(net, num_outputs, [4, 4], activation_fn=None)
elif method == 'conv2d_transpose':
net = layers.conv2d_transpose(
net, num_outputs, [4, 4], stride=kernel_size, activation_fn=None)
else:
raise ValueError('Unknown method: [%s]' % method)
return net
class Block(
collections.namedtuple('Block', ['num_filters', 'decoder_keep_prob'])):
"""Represents a single block of encoder and decoder processing.
The Image-to-Image translation paper works a bit differently than the original
U-Net model. In particular, each block represents a single operation in the
encoder which is concatenated with the corresponding decoder representation.
A dropout layer follows the concatenation and convolution of the concatenated
features.
"""
pass
def _default_generator_blocks():
"""Returns the default generator block definitions.
Returns:
A list of generator blocks.
"""
return [
Block(64, 0.5),
Block(128, 0.5),
Block(256, 0.5),
Block(512, 0),
Block(512, 0),
Block(512, 0),
Block(512, 0),
]
def pix2pix_generator(net,
num_outputs,
blocks=None,
upsample_method='nn_upsample_conv',
is_training=False): # pylint: disable=unused-argument
"""Defines the network architecture.
Args:
net: A `Tensor` of size [batch, height, width, channels]. Note that the
generator currently requires square inputs (e.g. height=width).
num_outputs: The number of (per-pixel) outputs.
blocks: A list of generator blocks or `None` to use the default generator
definition.
upsample_method: The method of upsampling images, one of 'nn_upsample_conv'
or 'conv2d_transpose'
is_training: Whether or not we're in training or testing mode.
Returns:
A `Tensor` representing the model output and a dictionary of model end
points.
Raises:
ValueError: if the input heights do not match their widths.
"""
end_points = {}
blocks = blocks or _default_generator_blocks()
input_size = net.get_shape().as_list()
input_size[3] = num_outputs
upsample_fn = functools.partial(upsample, method=upsample_method)
encoder_activations = []
###########
# Encoder #
###########
with tf.variable_scope('encoder'):
with tf.contrib.framework.arg_scope(
[layers.conv2d],
kernel_size=[4, 4],
stride=2,
activation_fn=tf.nn.leaky_relu):
for block_id, block in enumerate(blocks):
# No normalizer for the first encoder layers as per 'Image-to-Image',
# Section 5.1.1
if block_id == 0:
# First layer doesn't use normalizer_fn
net = layers.conv2d(net, block.num_filters, normalizer_fn=None)
elif block_id < len(blocks) - 1:
net = layers.conv2d(net, block.num_filters)
else:
# Last layer doesn't use activation_fn nor normalizer_fn
net = layers.conv2d(
net, block.num_filters, activation_fn=None, normalizer_fn=None)
encoder_activations.append(net)
end_points['encoder%d' % block_id] = net
###########
# Decoder #
###########
reversed_blocks = list(blocks)
reversed_blocks.reverse()
with tf.variable_scope('decoder'):
# Dropout is used at both train and test time as per 'Image-to-Image',
# Section 2.1 (last paragraph).
with tf.contrib.framework.arg_scope([layers.dropout], is_training=True):
for block_id, block in enumerate(reversed_blocks):
if block_id > 0:
net = tf.concat([net, encoder_activations[-block_id - 1]], axis=3)
# The Relu comes BEFORE the upsample op:
net = tf.nn.relu(net)
net = upsample_fn(net, block.num_filters, [2, 2])
if block.decoder_keep_prob > 0:
net = layers.dropout(net, keep_prob=block.decoder_keep_prob)
end_points['decoder%d' % block_id] = net
with tf.variable_scope('output'):
# Explicitly set the normalizer_fn to None to override any default value
# that may come from an arg_scope, such as pix2pix_arg_scope.
logits = layers.conv2d(
net, num_outputs, [4, 4], activation_fn=None, normalizer_fn=None)
logits = tf.reshape(logits, input_size)
end_points['logits'] = logits
end_points['predictions'] = tf.tanh(logits)
return logits, end_points
def pix2pix_discriminator(net, num_filters, padding=2, pad_mode='REFLECT',
activation_fn=tf.nn.leaky_relu, is_training=False):
"""Creates the Image2Image Translation Discriminator.
Args:
net: A `Tensor` of size [batch_size, height, width, channels] representing
the input.
num_filters: A list of the filters in the discriminator. The length of the
list determines the number of layers in the discriminator.
padding: Amount of reflection padding applied before each convolution.
pad_mode: mode for tf.pad, one of "CONSTANT", "REFLECT", or "SYMMETRIC".
activation_fn: activation fn for layers.conv2d.
is_training: Whether or not the model is training or testing.
Returns:
A logits `Tensor` of size [batch_size, N, N, 1] where N is the number of
'patches' we're attempting to discriminate and a dictionary of model end
points.
"""
del is_training
end_points = {}
num_layers = len(num_filters)
def padded(net, scope):
if padding:
with tf.variable_scope(scope):
spatial_pad = tf.constant(
[[0, 0], [padding, padding], [padding, padding], [0, 0]],
dtype=tf.int32)
return tf.pad(net, spatial_pad, pad_mode)
else:
return net
with tf.contrib.framework.arg_scope(
[layers.conv2d],
kernel_size=[4, 4],
stride=2,
padding='valid',
activation_fn=activation_fn):
# No normalization on the input layer.
net = layers.conv2d(
padded(net, 'conv0'), num_filters[0], normalizer_fn=None, scope='conv0')
end_points['conv0'] = net
for i in range(1, num_layers - 1):
net = layers.conv2d(
padded(net, 'conv%d' % i), num_filters[i], scope='conv%d' % i)
end_points['conv%d' % i] = net
# Stride 1 on the last layer.
net = layers.conv2d(
padded(net, 'conv%d' % (num_layers - 1)),
num_filters[-1],
stride=1,
scope='conv%d' % (num_layers - 1))
end_points['conv%d' % (num_layers - 1)] = net
# 1-dim logits, stride 1, no activation, no normalization.
logits = layers.conv2d(
padded(net, 'conv%d' % num_layers),
1,
stride=1,
activation_fn=None,
normalizer_fn=None,
scope='conv%d' % num_layers)
end_points['logits'] = logits
end_points['predictions'] = tf.sigmoid(logits)
return logits, end_points
|
PyTorch/SpeechRecognition/QuartzNet/common | common | tb_dllogger | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import atexit
import glob
import os
import re
from pathlib import Path
import numpy as np
import torch
from torch.utils.tensorboard import SummaryWriter
import dllogger
from dllogger import StdOutBackend, JSONStreamBackend, Verbosity
tb_loggers = {}
class TBLogger:
"""
xyz_dummies: stretch the screen with empty plots so the legend would
always fit for other plots
"""
def __init__(self, enabled, log_dir, name, interval=1, dummies=True):
self.enabled = enabled
self.interval = interval
self.cache = {}
if self.enabled:
self.summary_writer = SummaryWriter(
log_dir=os.path.join(log_dir, name),
flush_secs=120, max_queue=200)
atexit.register(self.summary_writer.close)
if dummies:
for key in ('aaa', 'zzz'):
self.summary_writer.add_scalar(key, 0.0, 1)
def log(self, step, data):
for k, v in data.items():
self.log_value(step, k, v.item() if type(v) is torch.Tensor else v)
def log_value(self, step, key, val, stat='mean'):
if self.enabled:
if key not in self.cache:
self.cache[key] = []
self.cache[key].append(val)
if len(self.cache[key]) == self.interval:
agg_val = getattr(np, stat)(self.cache[key])
self.summary_writer.add_scalar(key, agg_val, step)
del self.cache[key]
def log_grads(self, step, model):
if self.enabled:
norms = [p.grad.norm().item() for p in model.parameters()
if p.grad is not None]
for stat in ('max', 'min', 'mean'):
self.log_value(step, f'grad_{stat}', getattr(np, stat)(norms),
stat=stat)
def unique_log_fpath(fpath):
"""Have a unique log filename for every separate run"""
log_num = max([0] + [int(re.search("\.(\d+)", Path(f).suffix).group(1))
for f in glob.glob(f"{fpath}.*")])
return f"{fpath}.{log_num + 1}"
def stdout_step_format(step):
if isinstance(step, str):
return step
fields = []
if len(step) > 0:
fields.append("epoch {:>4}".format(step[0]))
if len(step) > 1:
fields.append("iter {:>4}".format(step[1]))
if len(step) > 2:
fields[-1] += "/{}".format(step[2])
return " | ".join(fields)
def stdout_metric_format(metric, metadata, value):
name = metadata.get("name", metric + " : ")
unit = metadata.get("unit", None)
format = f'{{{metadata.get("format", "")}}}'
fields = [name, format.format(value) if value is not None else value, unit]
fields = [f for f in fields if f is not None]
return "| " + " ".join(fields)
def init_log(args):
enabled = (args.local_rank == 0)
if enabled:
fpath = args.log_file or os.path.join(args.output_dir, 'nvlog.json')
backends = [
JSONStreamBackend(Verbosity.DEFAULT, fpath, append=True),
JSONStreamBackend(Verbosity.DEFAULT, unique_log_fpath(fpath)),
StdOutBackend(Verbosity.VERBOSE, step_format=stdout_step_format,
metric_format=stdout_metric_format)
]
else:
backends = []
dllogger.init(backends=backends)
dllogger.metadata("train_lrate", {"name": "lrate", "unit": None, "format": ":>3.2e"})
for id_, pref in [('train', ''), ('train_avg', 'avg train '),
('dev', ' avg dev '), ('dev_ema', ' EMA dev ')]:
dllogger.metadata(f"{id_}_loss",
{"name": f"{pref}loss", "unit": None, "format": ":>7.2f"})
dllogger.metadata(f"{id_}_wer",
{"name": f"{pref}wer", "unit": "%", "format": ":>6.2f"})
dllogger.metadata(f"{id_}_throughput",
{"name": f"{pref}utts/s", "unit": "samples/s", "format": ":>5.0f"})
dllogger.metadata(f"{id_}_took",
{"name": "took", "unit": "s", "format": ":>5.2f"})
tb_subsets = ['train', 'dev', 'dev_ema'] if args.ema else ['train', 'dev']
global tb_loggers
tb_loggers = {s: TBLogger(enabled, args.output_dir, name=s)
for s in tb_subsets}
log_parameters(vars(args), tb_subset='train')
def log(step, tb_total_steps=None, subset='train', data={}):
if tb_total_steps is not None:
tb_loggers[subset].log(tb_total_steps, data)
if subset != '':
data = {f'{subset}_{key}': val for key, val in data.items()}
dllogger.log(step, data=data)
def log_grads_tb(tb_total_steps, grads, tb_subset='train'):
tb_loggers[tb_subset].log_grads(tb_total_steps, grads)
def log_parameters(data, verbosity=0, tb_subset=None):
for k, v in data.items():
dllogger.log(step="PARAMETER", data={k: v}, verbosity=verbosity)
if tb_subset is not None and tb_loggers[tb_subset].enabled:
tb_data = {k: v for k, v in data.items()
if type(v) in (str, bool, int, float)}
tb_loggers[tb_subset].summary_writer.add_hparams(tb_data, {})
def flush_log():
dllogger.flush()
for tbl in tb_loggers.values():
if tbl.enabled:
tbl.summary_writer.flush()
|
PyTorch/LanguageModeling/BART/scripts | scripts | run_inference_benchmark | #!/usr/bin/env bash
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
PRED_BS=${1:-128}
EVAL_BEAMS=${2:-6}
MAX_SOURCE_LEN=${3:-1024}
MAX_TARGET_LEN=${4:-60}
INIT_CKPT=${5:-"facebook/bart-large"}
DATA_DIR=${6:-data/xsum}
CONFIG_PATH=${7:-"configs/config_xsum.json"}
printf -v TAG "bart_pyt_inference_benchmark"
DATESTAMP=`date +'%y%m%d%H%M%S'`
RESULTS_DIR=${RESULTS_DIR:-results/${TAG}_${DATESTAMP}}
mkdir -p $RESULTS_DIR
echo "Inference for Batch size $PRED_BS Eval Beams $EVAL_BEAMS Source Length $MAX_SOURCE_LEN Target Length $MAX_TARGET_LEN
Model at $INIT_CKPT Data at $DATA_DIR and Config at $CONFIG_PATH $DATA_DIR $CONFIG_PATH" |& tee ${RESULTS_DIR}/inference_benchmark.log
echo "NUM_GPU Precision Throughput" |& tee ${RESULTS_DIR}/inference_benchmark.log
for NUM_GPU in 1 4 8; do
for precision in fp16 fp32; do
if [ "$precision" = "fp16" ] ; then
echo "fp16 activated!"
USE_FP16="--fp16"
else
echo "fp32/tf32 activated!"
USE_FP16=""
fi
python -m torch.distributed.launch --nproc_per_node=$NUM_GPU run_eval.py \
--task summarization \
--bs ${PRED_BS} --max_source_length=${MAX_SOURCE_LEN} --max_target_length=${MAX_TARGET_LEN} \
--eval_max_gen_length=${MAX_TARGET_LEN} --eval_beams=${EVAL_BEAMS} ${USE_FP16} \
${INIT_CKPT} ${CONFIG_PATH} ${DATA_DIR} ${RESULTS_DIR} |& tee -a ${RESULTS_DIR}/log_${NUM_GPU}_${precision}.log
perf=`cat ${RESULTS_DIR}/log_${NUM_GPU}_${precision}.log | grep -F 'INFO:tensorflow:Throughput Average (sentences/sec) =' | tail -1 | awk -F'= ' '{print $2}'`
echo "$NUM_GPU $precision $perf" |& tee ${RESULTS_DIR}/inference_benchmark.log
done
done
|
PyTorch/SpeechSynthesis/Tacotron2/trtis_cpp/src/trt/plugins/taco2DenoiseTransformPlugin | taco2DenoiseTransformPlugin | taco2DenoiseTransformLayerPluginCreator | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "taco2DenoiseTransformLayerPluginCreator.h"
#include "taco2DenoiseTransformLayerPlugin.h"
#include <stdexcept>
#include <vector>
using namespace nvinfer1;
namespace nvinfer1
{
namespace plugin
{
/******************************************************************************
* CONSTANTS ******************************************************************
*****************************************************************************/
namespace
{
constexpr const char* const FILTERLENGTH_STR = "FilterLength";
constexpr const char* const INPUTLENGTH_STR = "InputLength";
constexpr const char* const WEIGHTS_STR = "Weights";
} // namespace
/******************************************************************************
* PUBLIC STATIC METHODS ******************************************************
*****************************************************************************/
PluginFieldCollection* Taco2DenoiseTransformLayerPluginCreator::getFields()
{
static PluginFieldCollection* pluginPtr = nullptr;
static const std::vector<PluginField> fields{
{FILTERLENGTH_STR, nullptr, PluginFieldType::kINT32, 0},
{INPUTLENGTH_STR, nullptr, PluginFieldType::kINT32, 0},
{WEIGHTS_STR, nullptr, PluginFieldType::kFLOAT32, 0},
};
if (!pluginPtr)
{
pluginPtr
= static_cast<PluginFieldCollection*>(malloc(sizeof(*pluginPtr) + fields.size() * sizeof(PluginField)));
pluginPtr->nbFields = static_cast<int>(fields.size());
pluginPtr->fields = fields.data();
}
return pluginPtr;
}
/******************************************************************************
* CONSTRUCTORS / DESTRUCTOR **************************************************
*****************************************************************************/
Taco2DenoiseTransformLayerPluginCreator::Taco2DenoiseTransformLayerPluginCreator()
: mNamespace()
{
// do nothing
}
/******************************************************************************
* PUBLIC METHODS *************************************************************
*****************************************************************************/
const char* Taco2DenoiseTransformLayerPluginCreator::getPluginName() const
{
return Taco2DenoiseTransformLayerPlugin::getName();
}
const char* Taco2DenoiseTransformLayerPluginCreator::getPluginVersion() const
{
return Taco2DenoiseTransformLayerPlugin::getVersion();
}
const PluginFieldCollection* Taco2DenoiseTransformLayerPluginCreator::getFieldNames()
{
return getFields();
}
IPluginV2* Taco2DenoiseTransformLayerPluginCreator::createPlugin(
const char* const /*layerName*/, const PluginFieldCollection* fc)
{
int filterLength = 0;
int inputLength = 0;
Weights weights{DataType::kFLOAT, nullptr, 0};
for (int i = 0; i < fc->nbFields; ++i)
{
const std::string name(fc->fields[i].name);
if (name == FILTERLENGTH_STR)
{
filterLength = static_cast<const int32_t*>(fc->fields[i].data)[0];
}
else if (name == INPUTLENGTH_STR)
{
inputLength = static_cast<const int32_t*>(fc->fields[i].data)[0];
}
else if (name == WEIGHTS_STR)
{
weights.values = fc->fields[i].data;
weights.count = fc->fields[i].length;
}
else
{
throw std::runtime_error("Unknown plugin field: '" + name + "'");
}
}
return new Taco2DenoiseTransformLayerPlugin(weights, filterLength, inputLength);
}
IPluginV2* Taco2DenoiseTransformLayerPluginCreator::deserializePlugin(
const char* const /* layerName */, const void* const serialData, size_t const serialLength)
{
return new Taco2DenoiseTransformLayerPlugin(
Taco2DenoiseTransformLayerPlugin::deserialize(serialData, serialLength));
}
void Taco2DenoiseTransformLayerPluginCreator::setPluginNamespace(const char* pluginNamespace)
{
mNamespace = pluginNamespace;
}
const char* Taco2DenoiseTransformLayerPluginCreator::getPluginNamespace() const
{
return mNamespace.c_str();
}
} // namespace plugin
} // namespace nvinfer1
|
Tools/PyTorch/TimeSeriesPredictionPlatform/triton/deployment_toolkit/model_analyzer | model_analyzer | exceptions | # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class ModelAnalyzerException(Exception):
def __init__(self, message: str):
self._message = message
def __str__(self):
"""
Get the exception string representation.
Returns
-------
str
The message associated with this exception, or None if no message.
"""
return self._message
@property
def message(self):
"""
Get the exception message.
Returns
-------
str
The message associated with this exception, or None if no message.
"""
return self._message
|
PyTorch/Segmentation/MaskRCNN/pytorch/maskrcnn_benchmark/utils | utils | env | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import os
from maskrcnn_benchmark.utils.imports import import_file
def setup_environment():
"""Perform environment setup work. The default setup is a no-op, but this
function allows the user to specify a Python source file that performs
custom setup work that may be necessary to their computing environment.
"""
custom_module_path = os.environ.get("TORCH_DETECTRON_ENV_MODULE")
if custom_module_path:
setup_custom_environment(custom_module_path)
else:
# The default setup is a no-op
pass
def setup_custom_environment(custom_module_path):
"""Load custom environment setup from a Python source file and run the setup
function.
"""
module = import_file("maskrcnn_benchmark.utils.env.custom_module", custom_module_path)
assert hasattr(module, "setup_environment") and callable(
module.setup_environment
), (
"Custom environment module defined in {} does not have the "
"required callable attribute 'setup_environment'."
).format(
custom_module_path
)
module.setup_environment()
# Force environment setup when this module is imported
setup_environment()
|
PyTorch/SpeechRecognition/QuartzNet/common/dali | dali | data_loader | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import math
import os
import torch
import torch.distributed as dist
from .iterator import DaliIterator, SyntheticDataIterator
from .pipeline import make_dali_asr_pipeline
from common.helpers import print_once
def _parse_json(json_path: str, start_label=0, predicate=lambda json: True):
"""
Parses json file to the format required by DALI.
Args:
json_path: path to json file
start_label: the label, starting from which DALI will assign
consecutive int numbers to every transcript
predicate: function, that accepts a sample descriptor
(i.e. json dictionary) as an argument. If the predicate for a given
sample returns True, it will be included in the dataset.
Returns:
output_files: dict that maps file name to label assigned by DALI
transcripts: dict that maps label assigned by DALI to the transcript
"""
global cnt
with open(json_path) as f:
librispeech_json = json.load(f)
output_files = {}
transcripts = {}
curr_label = start_label
for original_sample in librispeech_json:
if not predicate(original_sample):
continue
transcripts[curr_label] = original_sample['transcript']
output_files[original_sample['files'][-1]['fname']] = curr_label
curr_label += 1
return output_files, transcripts
def _dict_to_file(dict: dict, filename: str):
with open(filename, "w") as f:
for key, value in dict.items():
f.write("{} {}\n".format(key, value))
class DaliDataLoader:
"""
DataLoader is the main entry point to the data preprocessing pipeline.
To use, create an object and then just iterate over `data_iterator`.
DataLoader will do the rest for you.
Example:
data_layer = DataLoader(DaliTrainPipeline, path, json, bs, ngpu)
data_it = data_layer.data_iterator
for data in data_it:
print(data) # Here's your preprocessed data
Args:
device_type: Which device to use for preprocessing. Choose: "cpu", "gpu"
pipeline_type: Choose: "train", "val", "synth"
"""
def __init__(self, gpu_id, dataset_path: str, config_data: dict,
config_features: dict, json_names: list, symbols: list,
batch_size: int, pipeline_type: str,
grad_accumulation_steps: int = 1,
synth_iters_per_epoch: int = 544, device_type: str = "gpu"):
self.batch_size = batch_size
self.grad_accumulation_steps = grad_accumulation_steps
self.drop_last = (pipeline_type == 'train')
self.device_type = device_type
pipeline_type = self._parse_pipeline_type(pipeline_type)
if pipeline_type == "synth":
self._dali_data_iterator = self._init_synth_iterator(
self.batch_size,
config_features['nfilt'],
iters_per_epoch=synth_iters_per_epoch,
ngpus=torch.distributed.get_world_size())
else:
self._dali_data_iterator = self._init_iterator(
gpu_id=gpu_id,
dataset_path=dataset_path,
config_data=config_data,
config_features=config_features,
json_names=json_names,
symbols=symbols,
train_pipeline=pipeline_type == "train")
def _init_iterator(self, gpu_id, dataset_path, config_data,
config_features, json_names: list, symbols: list,
train_pipeline: bool):
"""Returns an iterator over data preprocessed with Dali."""
def hash_list_of_strings(li):
return str(abs(hash(''.join(li))))
output_files, transcripts = {}, {}
max_duration = config_data['max_duration']
for jname in json_names:
of, tr = _parse_json(
jname if jname[0] == '/' else os.path.join(dataset_path, jname),
len(output_files),
predicate=lambda json: json['original_duration'] <= max_duration)
output_files.update(of)
transcripts.update(tr)
file_list_path = os.path.join(
"/tmp", "asr_dali.file_list." + hash_list_of_strings(json_names))
_dict_to_file(output_files, file_list_path)
self.dataset_size = len(output_files)
print_once('Dataset read by DALI. '
f'Number of samples: {self.dataset_size}')
pipeline = make_dali_asr_pipeline(
config_data=config_data,
config_features=config_features,
device_id=gpu_id,
file_root=dataset_path,
file_list=file_list_path,
device_type=self.device_type,
batch_size=self.batch_size,
train_pipeline=train_pipeline)
return DaliIterator([pipeline], transcripts=transcripts,
symbols=symbols, batch_size=self.batch_size,
reader_name="file_reader",
train_iterator=train_pipeline)
def _init_synth_iterator(self, batch_size, nfeatures, iters_per_epoch,
ngpus):
self.dataset_size = ngpus * iters_per_epoch * batch_size
return SyntheticDataIterator(batch_size, nfeatures, regenerate=True)
@staticmethod
def _parse_pipeline_type(pipeline_type):
pipe = pipeline_type.lower()
assert pipe in ("train", "val", "synth"), \
'Invalid pipeline type (choices: "train", "val", "synth").'
return pipe
def _shard_size(self):
"""
Total number of samples handled by a single GPU in a single epoch.
"""
world_size = dist.get_world_size() if dist.is_initialized() else 1
if self.drop_last:
divisor = world_size * self.batch_size * self.grad_accumulation_steps
return self.dataset_size // divisor * divisor // world_size
else:
return int(math.ceil(self.dataset_size / world_size))
def __len__(self):
"""
Number of batches handled by each GPU.
"""
if self.drop_last:
assert self._shard_size() % self.batch_size == 0, \
f'{self._shard_size()} {self.batch_size}'
return int(math.ceil(self._shard_size() / self.batch_size))
def data_iterator(self):
return self._dali_data_iterator
def __iter__(self):
return self._dali_data_iterator
|
TensorFlow/Detection/SSD/models/research/object_detection/models | models | ssd_resnet_v1_fpn_feature_extractor_test | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ssd resnet v1 FPN feature extractors."""
import tensorflow as tf
from object_detection.models import ssd_resnet_v1_fpn_feature_extractor
from object_detection.models import ssd_resnet_v1_fpn_feature_extractor_testbase
class SSDResnet50V1FeatureExtractorTest(
ssd_resnet_v1_fpn_feature_extractor_testbase.
SSDResnetFPNFeatureExtractorTestBase):
"""SSDResnet50v1Fpn feature extractor test."""
def _create_feature_extractor(self, depth_multiplier, pad_to_multiple,
use_explicit_padding=False):
min_depth = 32
is_training = True
return ssd_resnet_v1_fpn_feature_extractor.SSDResnet50V1FpnFeatureExtractor(
is_training, depth_multiplier, min_depth, pad_to_multiple,
self.conv_hyperparams_fn, use_explicit_padding=use_explicit_padding)
def _resnet_scope_name(self):
return 'resnet_v1_50'
class SSDResnet101V1FeatureExtractorTest(
ssd_resnet_v1_fpn_feature_extractor_testbase.
SSDResnetFPNFeatureExtractorTestBase):
"""SSDResnet101v1Fpn feature extractor test."""
def _create_feature_extractor(self, depth_multiplier, pad_to_multiple,
use_explicit_padding=False):
min_depth = 32
is_training = True
return (
ssd_resnet_v1_fpn_feature_extractor.SSDResnet101V1FpnFeatureExtractor(
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
self.conv_hyperparams_fn,
use_explicit_padding=use_explicit_padding))
def _resnet_scope_name(self):
return 'resnet_v1_101'
class SSDResnet152V1FeatureExtractorTest(
ssd_resnet_v1_fpn_feature_extractor_testbase.
SSDResnetFPNFeatureExtractorTestBase):
"""SSDResnet152v1Fpn feature extractor test."""
def _create_feature_extractor(self, depth_multiplier, pad_to_multiple,
use_explicit_padding=False):
min_depth = 32
is_training = True
return (
ssd_resnet_v1_fpn_feature_extractor.SSDResnet152V1FpnFeatureExtractor(
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
self.conv_hyperparams_fn,
use_explicit_padding=use_explicit_padding))
def _resnet_scope_name(self):
return 'resnet_v1_152'
if __name__ == '__main__':
tf.test.main()
|
PyTorch/SpeechSynthesis/Tacotron2/trtis_cpp/src | src | CMakeLists | ##
# Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# include headers in current directory
include_directories("${CMAKE_CUDA_TOOLKIT_INCLUDE_DIRECTORIES}")
# sub-pieces
add_subdirectory("trt")
add_subdirectory("bin")
# build trtis
if (DEFINED BUILD_TRTIS AND NOT BUILD_TRTIS EQUAL 0)
message("Building TRTIS backend")
add_subdirectory("trtis")
endif()
# build tests
add_subdirectory("test")
|
TensorFlow2/Recommendation/DLRM_and_DCNv2 | DLRM_and_DCNv2 | dcnv2 | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# author: Tomasz Grel ([email protected])
from absl import app, flags
def define_dcnv2_specific_flags():
flags.DEFINE_integer("batch_size", default=64 * 1024, help="Batch size used for training")
flags.DEFINE_integer("valid_batch_size", default=64 * 1024, help="Batch size used for validation")
flags.DEFINE_list("top_mlp_dims", [1024, 1024, 512, 256, 1], "Linear layer sizes for the top MLP")
flags.DEFINE_list("bottom_mlp_dims", [512, 256, 128], "Linear layer sizes for the bottom MLP")
flags.DEFINE_string("embedding_dim", default='128', help='Number of columns in the embedding tables')
flags.DEFINE_enum("optimizer", default="adam", enum_values=['sgd', 'adam'],
help='The optimization algorithm to be used.')
flags.DEFINE_enum("interaction", default="cross", enum_values=["dot_custom_cuda", "dot_tensorflow", "cross"],
help="Feature interaction implementation to use")
flags.DEFINE_float("learning_rate", default=0.0001, help="Learning rate")
flags.DEFINE_float("beta1", default=0.9, help="Beta1 for the Adam optimizer")
flags.DEFINE_float("beta2", default=0.999, help="Bea2 for the Adam optimizer")
flags.DEFINE_integer("warmup_steps", default=100,
help='Number of steps over which to linearly increase the LR at the beginning')
flags.DEFINE_integer("decay_start_step", default=48000, help='Optimization step at which to start the poly LR decay')
flags.DEFINE_integer("decay_steps", default=24000, help='Number of steps over which to decay from base LR to 0')
flags.DEFINE_integer("num_cross_layers", default=3, help='Number of cross layers for DCNv2')
flags.DEFINE_integer("cross_layer_projection_dim", default=512, help='Projection dimension used in the cross layers')
define_dcnv2_specific_flags()
import main
def _main(argv):
main.main()
if __name__ == '__main__':
app.run(_main)
|
PyTorch/Classification/GPUNet/triton/deployment_toolkit/triton_inference_runner | triton_inference_runner | __init__ | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .runner import TritonInferenceRunner # noqa: F401
|
PyTorch/Segmentation/nnUNet/triton/deployment_toolkit | deployment_toolkit | dump | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
from typing import Dict, Iterable
import numpy as np
MB2B = 2 ** 20
B2MB = 1 / MB2B
FLUSH_THRESHOLD_B = 256 * MB2B
def pad_except_batch_axis(data: np.ndarray, target_shape_with_batch_axis: Iterable[int]):
assert all(
[current_size <= target_size for target_size, current_size in zip(target_shape_with_batch_axis, data.shape)]
), "target_shape should have equal or greater all dimensions comparing to data.shape"
padding = [(0, 0)] + [ # (0, 0) - do not pad on batch_axis (with index 0)
(0, target_size - current_size)
for target_size, current_size in zip(target_shape_with_batch_axis[1:], data.shape[1:])
]
return np.pad(data, padding, "constant", constant_values=np.nan)
class NpzWriter:
"""
Dumps dicts of numpy arrays into npz files
It can/shall be used as context manager:
```
with OutputWriter('mydir') as writer:
writer.write(outputs={'classes': np.zeros(8), 'probs': np.zeros((8, 4))},
labels={'classes': np.zeros(8)},
inputs={'input': np.zeros((8, 240, 240, 3)})
```
## Variable size data
Only dynamic of last axis is handled. Data is padded with np.nan value.
Also each generated file may have different size of dynamic axis.
"""
def __init__(self, output_dir, compress=False):
self._output_dir = Path(output_dir)
self._items_cache: Dict[str, Dict[str, np.ndarray]] = {}
self._items_counters: Dict[str, int] = {}
self._flush_threshold_b = FLUSH_THRESHOLD_B
self._compress = compress
@property
def cache_size(self):
return {name: sum([a.nbytes for a in data.values()]) for name, data in self._items_cache.items()}
def _append_to_cache(self, prefix, data):
if data is None:
return
if not isinstance(data, dict):
raise ValueError(f"{prefix} data to store shall be dict")
cached_data = self._items_cache.get(prefix, {})
for name, value in data.items():
assert isinstance(
value, (list, np.ndarray)
), f"Values shall be lists or np.ndarrays; current type {type(value)}"
if not isinstance(value, np.ndarray):
value = np.array(value)
assert value.dtype.kind in ["S", "U"] or not np.any(
np.isnan(value)
), f"Values with np.nan is not supported; {name}={value}"
cached_value = cached_data.get(name, None)
if cached_value is not None:
target_shape = np.max([cached_value.shape, value.shape], axis=0)
cached_value = pad_except_batch_axis(cached_value, target_shape)
value = pad_except_batch_axis(value, target_shape)
value = np.concatenate((cached_value, value))
cached_data[name] = value
self._items_cache[prefix] = cached_data
def write(self, **kwargs):
"""
Writes named list of dictionaries of np.ndarrays.
Finally keyword names will be later prefixes of npz files where those dictionaries will be stored.
ex. writer.write(inputs={'input': np.zeros((2, 10))},
outputs={'classes': np.zeros((2,)), 'probabilities': np.zeros((2, 32))},
labels={'classes': np.zeros((2,))})
Args:
**kwargs: named list of dictionaries of np.ndarrays to store
"""
for prefix, data in kwargs.items():
self._append_to_cache(prefix, data)
biggest_item_size = max(self.cache_size.values())
if biggest_item_size > self._flush_threshold_b:
self.flush()
def flush(self):
for prefix, data in self._items_cache.items():
self._dump(prefix, data)
self._items_cache = {}
def _dump(self, prefix, data):
idx = self._items_counters.setdefault(prefix, 0)
filename = f"{prefix}-{idx:012d}.npz"
output_path = self._output_dir / filename
if self._compress:
np.savez_compressed(output_path, **data)
else:
np.savez(output_path, **data)
nitems = len(list(data.values())[0])
msg_for_labels = (
"If these are correct shapes - consider moving loading of them into metrics.py."
if prefix == "labels"
else ""
)
shapes = {name: value.shape if isinstance(value, np.ndarray) else (len(value),) for name, value in data.items()}
assert all(len(v) == nitems for v in data.values()), (
f'All items in "{prefix}" shall have same size on 0 axis equal to batch size. {msg_for_labels}'
f'{", ".join(f"{name}: {shape}" for name, shape in shapes.items())}'
)
self._items_counters[prefix] += nitems
def __enter__(self):
if self._output_dir.exists() and len(list(self._output_dir.iterdir())):
raise ValueError(f"{self._output_dir.as_posix()} is not empty")
self._output_dir.mkdir(parents=True, exist_ok=True)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.flush()
|
TensorFlow/Detection/SSD/models/research/object_detection/predictors/heads | heads | box_head_test | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.predictors.heads.box_head."""
import tensorflow as tf
from google.protobuf import text_format
from object_detection.builders import hyperparams_builder
from object_detection.predictors.heads import box_head
from object_detection.protos import hyperparams_pb2
from object_detection.utils import test_case
class MaskRCNNBoxHeadTest(test_case.TestCase):
def _build_arg_scope_with_hyperparams(self,
op_type=hyperparams_pb2.Hyperparams.FC):
hyperparams = hyperparams_pb2.Hyperparams()
hyperparams_text_proto = """
activation: NONE
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
"""
text_format.Merge(hyperparams_text_proto, hyperparams)
hyperparams.op = op_type
return hyperparams_builder.build(hyperparams, is_training=True)
def test_prediction_size(self):
box_prediction_head = box_head.MaskRCNNBoxHead(
is_training=False,
num_classes=20,
fc_hyperparams_fn=self._build_arg_scope_with_hyperparams(),
use_dropout=True,
dropout_keep_prob=0.5,
box_code_size=4,
share_box_across_classes=False)
roi_pooled_features = tf.random_uniform(
[64, 7, 7, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32)
prediction = box_prediction_head.predict(
features=roi_pooled_features, num_predictions_per_location=1)
self.assertAllEqual([64, 1, 20, 4], prediction.get_shape().as_list())
class ConvolutionalBoxPredictorTest(test_case.TestCase):
def _build_arg_scope_with_hyperparams(
self, op_type=hyperparams_pb2.Hyperparams.CONV):
hyperparams = hyperparams_pb2.Hyperparams()
hyperparams_text_proto = """
activation: NONE
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
"""
text_format.Merge(hyperparams_text_proto, hyperparams)
hyperparams.op = op_type
return hyperparams_builder.build(hyperparams, is_training=True)
def test_prediction_size(self):
box_prediction_head = box_head.ConvolutionalBoxHead(
is_training=True,
box_code_size=4,
kernel_size=3)
image_feature = tf.random_uniform(
[64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32)
box_encodings = box_prediction_head.predict(
features=image_feature,
num_predictions_per_location=1)
self.assertAllEqual([64, 323, 1, 4], box_encodings.get_shape().as_list())
class WeightSharedConvolutionalBoxPredictorTest(test_case.TestCase):
def _build_arg_scope_with_hyperparams(
self, op_type=hyperparams_pb2.Hyperparams.CONV):
hyperparams = hyperparams_pb2.Hyperparams()
hyperparams_text_proto = """
activation: NONE
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
"""
text_format.Merge(hyperparams_text_proto, hyperparams)
hyperparams.op = op_type
return hyperparams_builder.build(hyperparams, is_training=True)
def test_prediction_size(self):
box_prediction_head = box_head.WeightSharedConvolutionalBoxHead(
box_code_size=4)
image_feature = tf.random_uniform(
[64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32)
box_encodings = box_prediction_head.predict(
features=image_feature,
num_predictions_per_location=1)
self.assertAllEqual([64, 323, 4], box_encodings.get_shape().as_list())
if __name__ == '__main__':
tf.test.main()
|
TensorFlow2/LanguageModeling/BERT/official/utils/flags | flags | README | # Adding Abseil (absl) flags quickstart
## Defining a flag
absl flag definitions are similar to argparse, although they are defined on a global namespace.
For instance defining a string flag looks like:
```$xslt
from absl import flags
flags.DEFINE_string(
name="my_flag",
default="a_sensible_default",
help="Here is what this flag does."
)
```
All three arguments are required, but default may be `None`. A common optional argument is
short_name for defining abreviations. Certain `DEFINE_*` methods will have other required arguments.
For instance `DEFINE_enum` requires the `enum_values` argument to be specified.
## Key Flags
absl has the concept of a key flag. Any flag defined in `__main__` is considered a key flag by
default. Key flags are displayed in `--help`, others only appear in `--helpfull`. In order to
handle key flags that are defined outside the module in question, absl provides the
`flags.adopt_module_key_flags()` method. This adds the key flags of a different module to one's own
key flags. For example:
```$xslt
File: flag_source.py
---------------------------------------
from absl import flags
flags.DEFINE_string(name="my_flag", default="abc", help="a flag.")
```
```$xslt
File: my_module.py
---------------------------------------
from absl import app as absl_app
from absl import flags
import flag_source
flags.adopt_module_key_flags(flag_source)
def main(_):
pass
absl_app.run(main, [__file__, "-h"]
```
when `my_module.py` is run it will show the help text for `my_flag`. Because not all flags defined
in a file are equally important, `official/utils/flags/core.py` (generally imported as flags_core)
provides an abstraction for handling key flag declaration in an easy way through the
`register_key_flags_in_core()` function, which allows a module to make a single
`adopt_key_flags(flags_core)` call when using the util flag declaration functions.
## Validators
Often the constraints on a flag are complicated. absl provides the validator decorator to allow
one to mark a function as a flag validation function. Suppose we want users to provide a flag
which is a palindrome.
```$xslt
from absl import flags
flags.DEFINE_string(name="pal_flag", short_name="pf", default="", help="Give me a palindrome")
@flags.validator("pal_flag")
def _check_pal(provided_pal_flag):
return provided_pal_flag == provided_pal_flag[::-1]
```
Validators take the form that returning True (truthy) passes, and all others
(False, None, exception) fail.
## Testing
To test using absl, simply declare flags in the setupClass method of TensorFlow's TestCase.
```$xslt
from absl import flags
import tensorflow as tf
def define_flags():
flags.DEFINE_string(name="test_flag", default="abc", help="an example flag")
class BaseTester(unittest.TestCase):
@classmethod
def setUpClass(cls):
super(BaseTester, cls).setUpClass()
define_flags()
def test_trivial(self):
flags_core.parse_flags([__file__, "test_flag", "def"])
self.AssertEqual(flags.FLAGS.test_flag, "def")
```
|
PyTorch/Translation/Transformer | Transformer | setup | #!/usr/bin/env python3
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
#
#-------------------------------------------------------------------------
#
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup, find_packages, Extension
from torch.utils.cpp_extension import BuildExtension, CUDAExtension, CppExtension
import sys
if sys.version_info < (3,):
sys.exit('Sorry, Python3 is required for fairseq.')
with open('README.md') as f:
readme = f.read()
with open('LICENSE') as f:
license = f.read()
with open('requirements.txt') as f:
reqs = f.read()
extra_compile_args = {'cxx' : ['-O2']}
extra_compile_args['nvcc'] = ['-O3',
'-I./cutlass/',
'-U__CUDA_NO_HALF_OPERATORS__',
'-U__CUDA_NO_HALF_CONVERSIONS__',
'-gencode', 'arch=compute_70,code=sm_70',
'-gencode', 'arch=compute_70,code=compute_70',
'-gencode', 'arch=compute_80,code=sm_80',
'-gencode', 'arch=compute_80,code=compute_80',
]
strided_batched_gemm = CUDAExtension(
name='strided_batched_gemm',
sources=['fairseq/modules/strided_batched_gemm/strided_batched_gemm.cpp', 'fairseq/modules/strided_batched_gemm/strided_batched_gemm_cuda.cu'],
extra_compile_args=extra_compile_args
)
batch_utils = CppExtension(
name='fairseq.data.batch_C',
sources=['fairseq/data/csrc/make_batches.cpp'],
extra_compile_args={
'cxx': ['-O2',],
}
)
setup(
name='fairseq',
version='0.5.0',
description='Facebook AI Research Sequence-to-Sequence Toolkit',
long_description=readme,
license=license,
install_requires=reqs.strip().split('\n'),
packages=find_packages(),
ext_modules=[strided_batched_gemm, batch_utils],
cmdclass={
'build_ext': BuildExtension.with_options(use_ninja=False)
},
test_suite='tests',
)
|
Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt/triton/runner | runner | core | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pathlib
from enum import Enum
from typing import Any, Dict, List
import yaml
class CustomDumper(yaml.Dumper):
"""
Custom YAML dumper to avoid craeting aliases
"""
def ignore_aliases(self, data: Dict) -> bool:
return True
class Paths:
"""
Paths mapping inside Triton Container
"""
MODEL_REPOSITORY_PATH = "/mnt/triton-models"
LIBRARIES_PATH = "/mnt/libs"
class Framework(Enum):
"""
Supported frameworks
"""
TensorFlow1 = "TensorFlow1"
TensorFlow2 = "TensorFlow2"
PyTorch = "PyTorch"
class Command:
"""Represents wrapper of raw string command"""
def __init__(self, data: str):
"""
Store command data
Args:
data: string with bash commands to execute
"""
self._data = data
def __str__(self) -> str:
"""
String object representation
Returns:
String
"""
return self._data
class DataObject(object):
"""
Data object representation handling recursive transformation from object to dict
"""
READ_ONLY = set()
def to_dict(self) -> Dict:
"""
Represent object as dictionary
Returns:
Dict
"""
data = dict()
filtered_data = {key: value for key, value in self.__dict__.items() if key not in self.READ_ONLY}
for key, value in filtered_data.items():
data[key] = self._convert_value(value)
return data
def _convert_value(self, value: Any) -> Any:
"""
Convert value based on its type
Args:
value: variable to convert
Returns:
Converted object
"""
if isinstance(value, DataObject):
value = value.to_dict()
elif isinstance(value, dict):
value = self._from_dict(value)
elif isinstance(value, list):
value = self._from_list(value)
elif isinstance(value, Enum):
value = value.value
elif isinstance(value, pathlib.Path):
value = value.as_posix()
return value
def _from_dict(self, values: Dict) -> Any:
"""
Convert dictionary values
Args:
values: dictionary with values
Returns:
Any
"""
data = dict()
for key, value in values.items():
data[key] = self._convert_value(value)
return data
def _from_list(self, values: List) -> Any:
"""
Convert list of values
Args:
values: list with values
Returns:
Any
"""
items = list()
for value in values:
item = self._convert_value(value)
items.append(item)
return items
AVAILABLE_FRAMEWORKS = [f.value for f in Framework]
|
TensorFlow/Detection/SSD/models/research/object_detection/core | core | box_coder_test | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.core.box_coder."""
import tensorflow as tf
from object_detection.core import box_coder
from object_detection.core import box_list
class MockBoxCoder(box_coder.BoxCoder):
"""Test BoxCoder that encodes/decodes using the multiply-by-two function."""
def code_size(self):
return 4
def _encode(self, boxes, anchors):
return 2.0 * boxes.get()
def _decode(self, rel_codes, anchors):
return box_list.BoxList(rel_codes / 2.0)
class BoxCoderTest(tf.test.TestCase):
def test_batch_decode(self):
mock_anchor_corners = tf.constant(
[[0, 0.1, 0.2, 0.3], [0.2, 0.4, 0.4, 0.6]], tf.float32)
mock_anchors = box_list.BoxList(mock_anchor_corners)
mock_box_coder = MockBoxCoder()
expected_boxes = [[[0.0, 0.1, 0.5, 0.6], [0.5, 0.6, 0.7, 0.8]],
[[0.1, 0.2, 0.3, 0.4], [0.7, 0.8, 0.9, 1.0]]]
encoded_boxes_list = [mock_box_coder.encode(
box_list.BoxList(tf.constant(boxes)), mock_anchors)
for boxes in expected_boxes]
encoded_boxes = tf.stack(encoded_boxes_list)
decoded_boxes = box_coder.batch_decode(
encoded_boxes, mock_box_coder, mock_anchors)
with self.test_session() as sess:
decoded_boxes_result = sess.run(decoded_boxes)
self.assertAllClose(expected_boxes, decoded_boxes_result)
if __name__ == '__main__':
tf.test.main()
|
TensorFlow2/LanguageModeling/BERT/data | data | PubMedDownloader | # Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import bz2
import glob
import gzip
import os
import urllib.request
import shutil
import sys
class PubMedDownloader:
def __init__(self, subset, save_path):
self.subset = subset
# Modifying self.save_path in two steps to handle creation of subdirectories
self.save_path = save_path + '/pubmed' + '/'
if not os.path.exists(self.save_path):
os.makedirs(self.save_path)
self.save_path = self.save_path + '/' + subset
if not os.path.exists(self.save_path):
os.makedirs(self.save_path)
self.download_urls = {
'baseline' : 'ftp://ftp.ncbi.nlm.nih.gov/pubmed/baseline/',
'daily_update' : 'ftp://ftp.ncbi.nlm.nih.gov/pubmed/updatefiles/',
'fulltext' : 'ftp://ftp.ncbi.nlm.nih.gov/pub/pmc/oa_bulk/',
'open_access' : 'ftp://ftp.ncbi.nlm.nih.gov/pub/pmc/oa_bulk/'
}
def download(self):
print('subset:', self.subset)
url = self.download_urls[self.subset]
self.download_files(url)
self.extract_files()
def download_files(self, url):
url = self.download_urls[self.subset]
output = os.popen('curl ' + url).read()
if self.subset == 'fulltext' or self.subset == 'open_access':
line_split = 'comm_use' if self.subset == 'fulltext' else 'non_comm_use'
for line in output.splitlines():
if line[-10:] == 'xml.tar.gz' and \
line.split(' ')[-1].split('.')[0] == line_split:
file = os.path.join(self.save_path, line.split(' ')[-1])
if not os.path.isfile(file):
print('Downloading', file)
response = urllib.request.urlopen(url + line.split(' ')[-1])
with open(file, "wb") as handle:
handle.write(response.read())
elif self.subset == 'baseline' or self.subset == 'daily_update':
for line in output.splitlines():
if line[-3:] == '.gz':
file = os.path.join(self.save_path, line.split(' ')[-1])
if not os.path.isfile(file):
print('Downloading', file)
response = urllib.request.urlopen(url + line.split(' ')[-1])
with open(file, "wb") as handle:
handle.write(response.read())
else:
assert False, 'Invalid PubMed dataset/subset specified.'
def extract_files(self):
files = glob.glob(self.save_path + '/*.xml.gz')
for file in files:
print('file:', file)
input = gzip.GzipFile(file, mode='rb')
s = input.read()
input.close()
out = open(file[:-3], mode='wb')
out.write(s)
out.close()
|
TensorFlow2/Recommendation/WideAndDeep/triton/runner/maintainer | maintainer | maintainer | # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import pathlib
from typing import Any, Dict, List, Optional, Union
if __name__ == "__main__" and __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from .container import Container
class Maintainer(abc.ABC):
@abc.abstractmethod
def triton_container(
self, command: str, image: str, devices: List, volumes: Dict, environment: Dict, log_file: Union[pathlib.Path, str]
) -> Container:
"""
Return triton container
Args:
command: Triton Server command that has to be executed
image: Container image
devices: List of device ids which has to be available in container
volumes: Volumes mapping
environment: Environment variables set in container
log_file: File path where server logs has to be saved
Returns:
Container object
"""
pass
@abc.abstractmethod
def build_image(
self,
*,
image_file_path: pathlib.Path,
image_name: str,
workdir_path: Optional[pathlib.Path] = None,
build_args: Optional[Dict[str, Any]] = None,
) -> None:
pass
|
Tools/PyTorch/TimeSeriesPredictionPlatform/data | data | script_download_data | # Copyright 2021-2022 NVIDIA Corporation
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Only downloads data if the csv files are present, unless the "force_download"
argument is supplied. For new datasets, the download_and_unzip(.) can be reused
to pull csv files from an online repository, but may require subsequent
dataset-specific processing.
Usage:
python3 script_download_data --dataset {DATASET} --output_dir {DIR}
Command line args:
DATASET: Name of dataset to download {e.g. electricity}
DIR: Path to main dataset diredtory
"""
from __future__ import absolute_import, division, print_function
import argparse
from cmath import nan
import gc
import os
import sys
import warnings
import numpy as np
import pandas as pd
import pyunpack
import wget
import pickle
from datetime import date, timedelta, datetime
from scipy.spatial import distance_matrix
import dgl
import torch
warnings.filterwarnings("ignore")
# General functions for data downloading & aggregation.
def download_from_url(url, output_path):
"""Downloads a file froma url."""
print("Pulling data from {} to {}".format(url, output_path))
wget.download(url, output_path)
print("done")
def unzip(zip_path, output_file, data_folder):
"""Unzips files and checks successful completion."""
print("Unzipping file: {}".format(zip_path))
pyunpack.Archive(zip_path).extractall(data_folder)
# Checks if unzip was successful
if not os.path.exists(output_file):
raise ValueError(
"Error in unzipping process! {} not found.".format(output_file)
)
def download_and_unzip(url, zip_path, csv_path, data_folder):
"""Downloads and unzips an online csv file.
Args:
url: Web address
zip_path: Path to download zip file
csv_path: Expected path to csv file
data_folder: Folder in which data is stored.
"""
download_from_url(url, zip_path)
unzip(zip_path, csv_path, data_folder)
print("Done.")
# Dataset specific download routines.
def download_electricity(data_folder):
"""Downloads electricity dataset from UCI repository."""
url = "https://archive.ics.uci.edu/ml/machine-learning-databases/00321/LD2011_2014.txt.zip"
csv_path = os.path.join(data_folder, "LD2011_2014.txt")
zip_path = csv_path + ".zip"
download_and_unzip(url, zip_path, csv_path, data_folder)
print("Aggregating to hourly data")
df = pd.read_csv(csv_path, index_col=0, sep=";", decimal=",")
df.index = pd.to_datetime(df.index)
df.sort_index(inplace=True)
# Used to determine the start and end dates of a series
output = df.resample("1h").mean().replace(0.0, np.nan)
earliest_time = output.index.min()
# Filter to match range used by other academic papers
output = output[(output.index >= '2014-01-01') & (output.index < '2014-09-08')]
df_list = []
for label in output:
srs = output[label]
if srs.isna().all():
continue
start_date = min(srs.fillna(method="ffill").dropna().index)
end_date = max(srs.fillna(method="bfill").dropna().index)
srs = output[label].fillna(0.0)
tmp = pd.DataFrame({"power_usage": srs})
date = tmp.index
tmp["t"] = (date - earliest_time).seconds / 60 / 60 + (
date - earliest_time
).days * 24
tmp["days_from_start"] = (date - earliest_time).days
tmp["categorical_id"] = label
tmp["date"] = date
tmp["id"] = label
tmp["hour"] = date.hour
tmp["day"] = date.day
tmp["day_of_week"] = date.dayofweek
tmp["month"] = date.month
tmp["power_usage_weight"] = ((date >= start_date) & (date <= end_date))
df_list.append(tmp)
output = pd.concat(df_list, axis=0, join="outer").reset_index(drop=True)
output["categorical_id"] = output["id"].copy()
output["hours_from_start"] = output["t"]
output["categorical_day_of_week"] = output["day_of_week"].copy()
output["categorical_hour"] = output["hour"].copy()
output["power_usage_weight"] = output["power_usage_weight"].apply(lambda b: 1 if b else 0)
output.to_csv(data_folder + "/electricity.csv")
print("Done.")
def download_traffic(data_folder):
"""Downloads traffic dataset from UCI repository."""
url = "https://archive.ics.uci.edu/ml/machine-learning-databases/00204/PEMS-SF.zip"
csv_path = os.path.join(data_folder, "PEMS_train")
zip_path = os.path.join(data_folder, "PEMS-SF.zip")
download_and_unzip(url, zip_path, csv_path, data_folder)
print("Aggregating to hourly data")
def process_list(s, variable_type=int, delimiter=None):
"""Parses a line in the PEMS format to a list."""
if delimiter is None:
parsed_list = [
variable_type(i)
for i in s.replace("[", "").replace("]", "").split()
]
else:
parsed_list = [
variable_type(i)
for i in s.replace("[", "").replace("]", "").split(delimiter)
]
return parsed_list
def read_single_list(filename):
"""Returns single list from a file in the PEMS-custom format."""
with open(os.path.join(data_folder, filename), "r") as dat:
parsed_list_from_file = process_list(dat.readlines()[0])
return parsed_list_from_file
def read_matrix(filename):
"""Returns a matrix from a file in the PEMS-custom format."""
array_list = []
with open(os.path.join(data_folder, filename), "r") as dat:
lines = dat.readlines()
for i, line in enumerate(lines):
array = [
process_list(row_split, variable_type=float, delimiter=None)
for row_split in process_list(
line, variable_type=str, delimiter=";"
)
]
array_list.append(array)
return array_list
shuffle_order = np.array(read_single_list("randperm")) - 1 # index from 0
train_dayofweek = read_single_list("PEMS_trainlabels")
train_tensor = read_matrix("PEMS_train")
test_dayofweek = read_single_list("PEMS_testlabels")
test_tensor = read_matrix("PEMS_test")
# Inverse permutate shuffle order
print("Shuffling")
inverse_mapping = {
new_location: previous_location
for previous_location, new_location in enumerate(shuffle_order)
}
reverse_shuffle_order = np.array(
[
inverse_mapping[new_location]
for new_location, _ in enumerate(shuffle_order)
]
)
# Group and reoder based on permuation matrix
print("Reodering")
day_of_week = np.array(train_dayofweek + test_dayofweek)
combined_tensor = np.array(train_tensor + test_tensor)
day_of_week = day_of_week[reverse_shuffle_order]
combined_tensor = combined_tensor[reverse_shuffle_order]
# Put everything back into a dataframe
print("Parsing as dataframe")
labels = ["traj_{}".format(i) for i in read_single_list("stations_list")]
hourly_list = []
for day, day_matrix in enumerate(combined_tensor):
# Hourly data
hourly = pd.DataFrame(day_matrix.T, columns=labels)
hourly["hour_on_day"] = [
int(i / 6) for i in hourly.index
] # sampled at 10 min intervals
if hourly["hour_on_day"].max() > 23 or hourly["hour_on_day"].min() < 0:
raise ValueError(
"Invalid hour! {}-{}".format(
hourly["hour_on_day"].min(), hourly["hour_on_day"].max()
)
)
hourly = hourly.groupby("hour_on_day", as_index=True).mean()[labels]
hourly["sensor_day"] = day
hourly["time_on_day"] = hourly.index
hourly["day_of_week"] = day_of_week[day]
hourly_list.append(hourly)
hourly_frame = pd.concat(hourly_list, axis=0, ignore_index=True, sort=False)
# Flatten such that each entitiy uses one row in dataframe
store_columns = [c for c in hourly_frame.columns if "traj" in c]
other_columns = [c for c in hourly_frame.columns if "traj" not in c]
flat_df = pd.DataFrame(
columns=["values", "prev_values", "next_values"]
+ other_columns
+ ["id"]
)
def format_index_string(x):
"""Returns formatted string for key."""
if x < 10:
return "00" + str(x)
elif x < 100:
return "0" + str(x)
elif x < 1000:
return str(x)
raise ValueError("Invalid value of x {}".format(x))
for store in store_columns:
sliced = hourly_frame[[store] + other_columns].copy()
sliced.columns = ["values"] + other_columns
sliced["id"] = int(store.replace("traj_", ""))
# Sort by Sensor-date-time
key = (
sliced["id"].apply(str)
+ sliced["sensor_day"].apply(lambda x: "_" + format_index_string(x))
+ sliced["time_on_day"].apply(
lambda x: "_" + format_index_string(x)
)
)
sliced = sliced.set_index(key).sort_index()
sliced["values"] = sliced["values"].fillna(method="ffill")
sliced["prev_values"] = sliced["values"].shift(1)
sliced["next_values"] = sliced["values"].shift(-1)
flat_df = flat_df.append(sliced.dropna(), ignore_index=True, sort=False)
# Filter to match range used by other academic papers
index = flat_df["sensor_day"]
flat_df = flat_df[index < 173].copy()
# Creating columns fo categorical inputs
flat_df["categorical_id"] = flat_df["id"].copy()
flat_df["hours_from_start"] = (
flat_df["time_on_day"] + flat_df["sensor_day"] * 24.0
)
flat_df["categorical_day_of_week"] = flat_df["day_of_week"].copy()
flat_df["categorical_time_on_day"] = flat_df["time_on_day"].copy()
flat_df.to_csv(data_folder + "/traffic.csv")
def construct_graph(nodes_loc, k=0.8):
"""
Constructs a graph based on a physical location of nodes
nodes_loc: 2D array num_nodes x dim
features: list of node features
"""
dist_mx = distance_matrix(nodes_loc, nodes_loc)
std = dist_mx.std()
adj_mx = np.exp(-np.square(dist_mx / std))
adj_mx[adj_mx < k] = 0
np.fill_diagonal(adj_mx, 0)
edges = np.nonzero(adj_mx)
graph = dgl.graph(edges, num_nodes=nodes_loc.shape[0])
return graph
def main(args):
"""Runs main download routine.
Args:
expt_name: Name of experiment
force_download: Whether to force data download from scratch
output_folder: Folder path for storing data
"""
print("#### Running download script ###")
download_function = DOWNLOAD_FUNCTIONS[args.dataset]
print("Getting {} data...".format(args.dataset))
subdir = os.path.join(args.output_dir, args.dataset)
print(subdir)
if os.path.exists(subdir):
print(f"Warning: Path {subdir} exists. Overwritting files!", file=sys.stderr)
os.makedirs(subdir, exist_ok=True)
download_function(subdir)
print("Download completed.")
DOWNLOAD_FUNCTIONS = {
"electricity": download_electricity,
"traffic": download_traffic,
}
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Data download configs")
parser.add_argument(
"--dataset",
metavar="DATASET",
type=str,
choices=DOWNLOAD_FUNCTIONS.keys(),
required=True,
help="Dataset name"
)
parser.add_argument(
"--output_dir",
metavar="DIR",
type=str,
default=".",
help="Path to folder for data download",
)
args = parser.parse_args()
main(args)
|
TensorFlow/Segmentation/UNet_Medical/utils/hooks | hooks | profiler_hook | # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
import tensorflow as tf
import horovod.tensorflow as hvd
from dllogger.autologging import log_hardware
from dllogger.logger import LOGGER
import dllogger.logger as dllg
from dllogger import tags
class ProfilerHook(tf.train.SessionRunHook):
def __init__(self, out_dir, global_batch_size, log_every=10, warmup_steps=20):
LOGGER.set_model_name('UNet_TF')
LOGGER.set_backends([
dllg.JsonBackend(log_file=os.path.join(out_dir, 'dlloger_out.json'),
logging_scope=dllg.Scope.TRAIN_ITER, iteration_interval=1),
dllg.StdOutBackend(log_file=None,
logging_scope=dllg.Scope.TRAIN_ITER, iteration_interval=log_every)
])
self._perf = dllg.AverageMeter()
LOGGER.register_metric('loss', meter=dllg.AverageMeter(), metric_scope=dllg.Scope.TRAIN_ITER)
LOGGER.register_metric('dice_loss', meter=dllg.AverageMeter(), metric_scope=dllg.Scope.TRAIN_ITER)
LOGGER.register_metric('total_loss', meter=dllg.AverageMeter(), metric_scope=dllg.Scope.TRAIN_ITER)
self._warmup_steps = warmup_steps
self._global_batch_size = global_batch_size
self._current_step = 0
def before_run(self, run_context):
LOGGER.iteration_start()
run_args = tf.train.SessionRunArgs(
fetches=[
'UNet/cross_loss_ref:0',
'UNet/dice_loss_ref:0',
'UNet/total_loss_ref:0']
)
self._t0 = time.time()
return run_args
def after_run(self,
run_context,
run_values):
cross_loss, dice_loss, total_loss = run_values.results
batch_time = time.time() - self._t0
ips = self._global_batch_size / batch_time
ips *= hvd.size()
if self._current_step >= self._warmup_steps:
LOGGER.log("iteration", int(self._current_step))
LOGGER.log("loss", float(cross_loss))
LOGGER.log("dice_loss", float(dice_loss))
LOGGER.log("total_loss", float(total_loss))
self._perf.record(ips)
LOGGER.iteration_stop()
self._current_step += 1
def begin(self):
log_hardware(LOGGER)
LOGGER.log(tags.RUN_INIT)
def end(self, session):
LOGGER.log(tags.RUN_FINAL)
LOGGER.finish()
LOGGER.log("average_images_per_second", self._perf.get_value())
|
PyTorch/Classification/ConvNets/image_classification/models | models | model | from dataclasses import dataclass, asdict, replace
from .common import (
SequentialSqueezeAndExcitationTRT,
SequentialSqueezeAndExcitation,
SqueezeAndExcitation,
SqueezeAndExcitationTRT,
)
from typing import Optional, Callable
import os
import torch
import argparse
from functools import partial
@dataclass
class ModelArch:
pass
@dataclass
class ModelParams:
def parser(self, name):
return argparse.ArgumentParser(
description=f"{name} arguments", add_help=False, usage=""
)
@dataclass
class OptimizerParams:
pass
@dataclass
class Model:
constructor: Callable
arch: ModelArch
params: Optional[ModelParams]
optimizer_params: Optional[OptimizerParams] = None
checkpoint_url: Optional[str] = None
def torchhub_docstring(name: str):
return f"""Constructs a {name} model.
For detailed information on model input and output, training recipies, inference and performance
visit: github.com/NVIDIA/DeepLearningExamples and/or ngc.nvidia.com
Args:
pretrained (bool, True): If True, returns a model pretrained on IMAGENET dataset.
"""
class EntryPoint:
@staticmethod
def create(name: str, model: Model):
ep = EntryPoint(name, model)
ep.__doc__ = torchhub_docstring(name)
return ep
def __init__(self, name: str, model: Model):
self.name = name
self.model = model
def __call__(
self,
pretrained=True,
pretrained_from_file=None,
state_dict_key_map_fn=None,
**kwargs,
):
assert not (pretrained and (pretrained_from_file is not None))
params = replace(self.model.params, **kwargs)
model = self.model.constructor(arch=self.model.arch, **asdict(params))
state_dict = None
if pretrained:
assert self.model.checkpoint_url is not None
state_dict = torch.hub.load_state_dict_from_url(
self.model.checkpoint_url,
map_location=torch.device("cpu"),
progress=True,
)
if pretrained_from_file is not None:
if os.path.isfile(pretrained_from_file):
print(
"=> loading pretrained weights from '{}'".format(
pretrained_from_file
)
)
state_dict = torch.load(
pretrained_from_file, map_location=torch.device("cpu")
)
else:
print(
"=> no pretrained weights found at '{}'".format(
pretrained_from_file
)
)
if state_dict is not None:
state_dict = {
k[len("module.") :] if k.startswith("module.") else k: v
for k, v in state_dict.items()
}
def reshape(t, conv):
if conv:
if len(t.shape) == 4:
return t
else:
return t.view(t.shape[0], -1, 1, 1)
else:
if len(t.shape) == 4:
return t.view(t.shape[0], t.shape[1])
else:
return t
if state_dict_key_map_fn is not None:
state_dict = {
state_dict_key_map_fn(k): v for k, v in state_dict.items()
}
if pretrained and hasattr(model, "ngc_checkpoint_remap"):
remap_fn = model.ngc_checkpoint_remap(url=self.model.checkpoint_url)
state_dict = {remap_fn(k): v for k, v in state_dict.items()}
def _se_layer_uses_conv(m):
return any(
map(
partial(isinstance, m),
[
SqueezeAndExcitationTRT,
SequentialSqueezeAndExcitationTRT,
],
)
)
state_dict = {
k: reshape(
v,
conv=_se_layer_uses_conv(
dict(model.named_modules())[".".join(k.split(".")[:-2])]
),
)
if is_se_weight(k, v)
else v
for k, v in state_dict.items()
}
model.load_state_dict(state_dict)
return model
def parser(self):
if self.model.params is None:
return None
parser = self.model.params.parser(self.name)
parser.add_argument(
"--pretrained-from-file",
default=None,
type=str,
metavar="PATH",
help="load weights from local file",
)
if self.model.checkpoint_url is not None:
parser.add_argument(
"--pretrained",
default=False,
action="store_true",
help="load pretrained weights from NGC",
)
return parser
def is_se_weight(key, value):
return key.endswith("squeeze.weight") or key.endswith("expand.weight")
def create_entrypoint(m: Model):
def _ep(**kwargs):
params = replace(m.params, **kwargs)
return m.constructor(arch=m.arch, **asdict(params))
return _ep
|
TensorFlow2/Recommendation/DLRM_and_DCNv2/deployment | deployment | evaluate_latency | #!/usr/bin/env python3
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import logging
import os
import pathlib
import base64
import tensorflow as tf
import numpy as np
# method from PEP-366 to support relative import in executed modules
if __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
import dataloading.feature_spec
from dataloading.dataloader import create_input_pipelines, get_dataset_metadata
from deployment.hps import constants
from deployment.hps.triton_ensemble_wrapper import NumpyToHpsInputConverter
from deployment.deployment_toolkit.core import EvaluationMode, MeasurementMode, OfflineMode
from deployment.deployment_toolkit.triton_performance_runner import TritonPerformanceRunner
LOGGER = logging.getLogger("run_performance_on_triton")
def b64_tensor(x):
return {'b64': base64.b64encode(x.flatten()).decode("utf-8")}
def create_input_data(sparse_backend, *args, **kwargs):
if sparse_backend == 'hps':
return create_input_data_hps(*args, **kwargs)
elif sparse_backend == 'tf-savedmodel':
return create_input_data_tf(*args, **kwargs)
else:
raise ValueError(f'Unknown sparse backend: {sparse_backend}')
def create_input_data_tf(batch_sizes, dataset_path, dataset_type, feature_spec,
total_benchmark_samples, fused_embedding):
fspec = dataloading.feature_spec.FeatureSpec.from_yaml(
os.path.join(dataset_path, feature_spec)
)
num_tables = len(fspec.get_categorical_sizes())
table_ids = list(range(num_tables))
filename = f"/tmp/triton_input_data_batch.json"
print("generating input data: ", filename)
_, dataloader = create_input_pipelines(dataset_type=dataset_type, dataset_path=dataset_path, train_batch_size=1,
test_batch_size=1, table_ids=table_ids, feature_spec=feature_spec,
rank=0, world_size=1)
generated = 0
samples = []
for sample in dataloader.op():
features, labels = sample
numerical_features, cat_features = features
cat_features = tf.concat(cat_features, axis=1).numpy().astype(np.int32)
numerical_features = numerical_features.numpy().astype(np.float32)
sample = {
"categorical_features": b64_tensor(cat_features),
"numerical_features": b64_tensor(numerical_features),
}
samples.append(sample)
generated += 1
if generated >= total_benchmark_samples:
break
with open(filename, "w") as f:
json.dump(obj={"data": samples}, fp=f, indent=4)
shapes = [
f"categorical_features:{cat_features.shape[1]}",
f"numerical_features:{numerical_features.shape[1]}",
]
input_data = {}
for batch_size in batch_sizes:
input_data[batch_size] = (filename, shapes)
return input_data
def create_input_data_hps(batch_sizes, dataset_path, dataset_type, feature_spec,
total_benchmark_samples, fused_embedding):
input_data = {}
for batch_size in batch_sizes:
filename = f"/tmp/triton_input_data_batch{batch_size}.json"
print("generating input data: ", filename)
shapes = create_input_data_hps_batch(batch_size=batch_size, dst_path=filename, dataset_path=dataset_path,
dataset_type=dataset_type, feature_spec=feature_spec,
total_benchmark_samples=total_benchmark_samples,
fused_embedding=fused_embedding)
input_data[batch_size] = (filename, shapes)
return input_data
def create_input_data_hps_batch(batch_size, dst_path, dataset_path, dataset_type, feature_spec,
total_benchmark_samples, fused_embedding):
fspec = dataloading.feature_spec.FeatureSpec.from_yaml(
os.path.join(dataset_path, feature_spec)
)
num_tables = len(fspec.get_categorical_sizes())
table_ids = list(range(num_tables))
converter = NumpyToHpsInputConverter(categorical_sizes=fspec.get_categorical_sizes(),
fused_embedding=fused_embedding)
_, dataloader = create_input_pipelines(dataset_type=dataset_type, dataset_path=dataset_path,
train_batch_size=batch_size, test_batch_size=batch_size,
table_ids=table_ids, feature_spec=feature_spec, rank=0, world_size=1)
generated = 0
batches = []
for batch in dataloader.op():
features, labels = batch
numerical_features, cat_features = features
key_tensor, nkey_tensor, numerical_features = converter(
numerical_features, cat_features
)
batch = {
constants.key_global_prefix: b64_tensor(key_tensor),
constants.numkey_global_prefix: b64_tensor(nkey_tensor),
constants.ens_numerical_features_name: b64_tensor(numerical_features)
}
batches.append(batch)
generated += batch_size
if generated >= total_benchmark_samples:
break
with open(dst_path, "w") as f:
json.dump(obj={"data": batches}, fp=f, indent=4)
shapes = [
f"{constants.key_global_prefix}:{key_tensor.shape[1]}",
f"{constants.numkey_global_prefix}:{nkey_tensor.shape[1]}",
f"{constants.ens_numerical_features_name}:{numerical_features.shape[1]}",
]
return shapes
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--model-name",
type=str,
required=True,
help="Name of the model to test",
)
parser.add_argument(
"--result-path",
type=pathlib.Path,
required=True,
help="Path where results files is stored.",
)
parser.add_argument(
"--server-url",
type=str,
default="http://127.0.0.1:8000",
help="Url to Triton server",
)
parser.add_argument(
"--model-version",
type=str,
default=1,
help="Version of model",
)
parser.add_argument(
"--sparse-format",
type=str,
help="Target format of dense model part in ensemble.",
choices=["tf-savedmodel", "hps"],
required=True,
default="tf-savedmodel",
)
parser.add_argument(
"--fused-embedding",
action="store_true",
help="Use the fused embedding API for HPS",
)
parser.add_argument(
"--batch-sizes",
type=int,
default=[256, 512, 1024, 2048, 4096, 8192, 16384, 32768],
help="List of batch sizes to test.",
nargs="*",
)
parser.add_argument(
"--concurrency",
type=int,
default=[1],
help="List of concurrency modes.",
nargs="*",
)
parser.add_argument(
"--measurement-mode",
choices=[item.value for item in MeasurementMode],
default=MeasurementMode.COUNT_WINDOWS.value,
type=str,
help="Select measurement mode "
"'time_windows' stabilize performance on measurement window. "
"'count_windows' stabilize performance on number of samples.",
)
parser.add_argument(
"--measurement-interval",
help="Time window perf_analyzer will wait to stabilize the measurement",
default=1000,
type=int,
)
parser.add_argument(
"--measurement-request-count",
help="Number of samples on which perf_analyzer will stabilize the measurement",
default=20,
type=int,
)
parser.add_argument(
"--evaluation-mode",
choices=[item.value for item in EvaluationMode],
default=EvaluationMode.OFFLINE.value,
type=str,
help="Select evaluation mode "
"'offline' run offline analysis and use GPU memory to pass tensors. "
"'online' run online analysis and use HTTP protocol.",
)
parser.add_argument(
"--offline-mode",
choices=[item.value for item in OfflineMode],
default=OfflineMode.SYSTEM.value,
type=str,
help="Select offline mode "
"'system' pass tensors through CPU RAM memory. "
"'cuda' pass tensors through GPU RAM memory.",
)
parser.add_argument(
"--output-shared-memory-size",
default=524288,
type=int,
help="Size of memory buffer allocated for output with dynamic shapes in bytes. "
"Has to be equal to maximal size of output tensor.",
)
parser.add_argument(
"--warmup",
help="Enable model warmup before performance test",
action="store_true",
default=False,
)
parser.add_argument(
"--timeout",
help="Timeout for performance analysis",
type=int,
default=None,
required=False,
)
parser.add_argument(
"-v",
"--verbose",
help="Verbose logs",
action="store_true",
default=False,
)
# dataset and dataloading settings
parser.add_argument(
"--dataset_path", default=None, required=True, help="Path to dataset directory"
)
parser.add_argument(
"--feature_spec",
default="feature_spec.yaml",
help="Name of the feature spec file in the dataset directory",
)
parser.add_argument(
"--dataset_type",
default="tf_raw",
choices=["tf_raw", "synthetic", "split_tfrecords"],
help="The type of the dataset to use",
)
parser.add_argument(
"--num-benchmark-samples",
default=2**18,
type=int,
help="The type of the dataset to use",
)
args = parser.parse_args()
log_level = logging.INFO if not args.verbose else logging.DEBUG
log_format = "%(asctime)s %(levelname)s %(name)s %(message)s"
logging.basicConfig(level=log_level, format=log_format)
input_data = create_input_data(sparse_backend=args.sparse_format,
batch_sizes=args.batch_sizes, dataset_path=args.dataset_path,
dataset_type=args.dataset_type, feature_spec=args.feature_spec,
total_benchmark_samples=args.num_benchmark_samples,
fused_embedding=args.fused_embedding)
runner = TritonPerformanceRunner(
server_url=args.server_url,
model_name=args.model_name,
input_data=input_data,
batch_sizes=args.batch_sizes,
measurement_mode=MeasurementMode(args.measurement_mode),
measurement_interval=args.measurement_interval,
measurement_request_count=args.measurement_request_count,
concurrency=args.concurrency,
evaluation_mode=EvaluationMode(args.evaluation_mode),
offline_mode=OfflineMode(args.offline_mode),
output_shared_memory_size=args.output_shared_memory_size,
result_path=args.result_path,
warmup=args.warmup,
timeout=args.timeout,
verbose=args.verbose,
flattened_input=args.sparse_format == 'hps'
)
runner.run()
for _, (filename, _) in input_data.items():
if os.path.exists(filename):
os.remove(filename)
if __name__ == "__main__":
main()
|
PyTorch/Forecasting/TFT/triton/runner/maintainer | maintainer | maintainer | # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import pathlib
from typing import Any, Dict, List, Optional, Union
if __name__ == "__main__" and __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from .container import Container
class Maintainer(abc.ABC):
@abc.abstractmethod
def triton_container(
self, command: str, image: str, devices: List, volumes: Dict, environment: Dict, log_file: Union[pathlib.Path, str]
) -> Container:
"""
Return triton container
Args:
command: Triton Server command that has to be executed
image: Container image
devices: List of device ids which has to be available in container
volumes: Volumes mapping
environment: Environment variables set in container
log_file: File path where server logs has to be saved
Returns:
Container object
"""
pass
@abc.abstractmethod
def build_image(
self,
*,
image_file_path: pathlib.Path,
image_name: str,
workdir_path: Optional[pathlib.Path] = None,
build_args: Optional[Dict[str, Any]] = None,
) -> None:
pass
|
PyTorch/Recommendation/NCF | NCF | requirements | pandas>=0.24.2
tqdm
pyyaml
git+https://github.com/NVIDIA/dllogger#egg=dllogger
|
TensorFlow/Detection/SSD/models/research/object_detection | object_detection | model_hparams | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Hyperparameters for the object detection model in TF.learn.
This file consolidates and documents the hyperparameters used by the model.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
def create_hparams(hparams_overrides=None):
"""Returns hyperparameters, including any flag value overrides.
Args:
hparams_overrides: Optional hparams overrides, represented as a
string containing comma-separated hparam_name=value pairs.
Returns:
The hyperparameters as a tf.HParams object.
"""
hparams = tf.contrib.training.HParams(
# Whether a fine tuning checkpoint (provided in the pipeline config)
# should be loaded for training.
load_pretrained=True)
# Override any of the preceding hyperparameter values.
if hparams_overrides:
hparams = hparams.parse(hparams_overrides)
return hparams
|
PyTorch/SpeechSynthesis/Tacotron2/trtis_cpp/src/trt/tacotron2 | tacotron2 | postNetInstance | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "postNetInstance.h"
#include "cudaUtils.h"
#include "trtUtils.h"
using namespace nvinfer1;
namespace tts
{
/******************************************************************************
* CONSTRUCTORS / DESTRUCTOR **************************************************
*****************************************************************************/
PostNetInstance::PostNetInstance(TRTPtr<ICudaEngine> engine) :
TimedObject("PostNetInstance::infer()"),
EngineDriver(std::move(engine)),
mBinding(),
mContext(getEngine().createExecutionContext())
{
// do nothing
}
/******************************************************************************
* PUBLIC METHODS *************************************************************
*****************************************************************************/
void PostNetInstance::infer(
cudaStream_t stream, const int batchSize, const void* const inputDevice, void* const outputDevice)
{
startTiming();
const ICudaEngine& engine = mContext->getEngine();
mBinding.setBinding(engine, INPUT_NAME, inputDevice);
mBinding.setBinding(engine, OUTPUT_NAME, outputDevice);
if (!mContext->enqueue(batchSize, mBinding.getBindings(), stream, nullptr))
{
throw std::runtime_error("Failed to run post net.");
}
CudaUtils::sync(stream);
stopTiming();
}
int PostNetInstance::getMelChunkSize() const
{
return TRTUtils::getBindingDimension(getEngine(), INPUT_NAME, 2);
}
int PostNetInstance::getNumMelChannels() const
{
return TRTUtils::getBindingDimension(getEngine(), INPUT_NAME, 1);
}
int PostNetInstance::getOutputSize() const
{
return TRTUtils::getBindingSize(getEngine(), OUTPUT_NAME);
}
} // namespace tts
|
TensorFlow2/Recommendation/DLRM_and_DCNv2/dataloading | dataloading | prepare_synthetic_dataset | # Copyright (c) 2021 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
import os
import tqdm
from absl import app, flags
from .defaults import DTYPE_SELECTOR, TRAIN_MAPPING, TEST_MAPPING
from .synthetic_dataset import SyntheticDataset
from .feature_spec import FeatureSpec
FLAGS = flags.FLAGS
flags.DEFINE_integer("synthetic_dataset_num_entries",
default=int(32768 * 1024), # 1024 batches for single-GPU training by default
help="Number of samples per epoch for the synthetic dataset."
"This is rounded down to a multiple of batch size")
flags.DEFINE_integer("synthetic_dataset_batch_size",
default=int(32768), help="Batch size - number of unique records")
flags.DEFINE_integer("num_numerical_features", default=13,
help="Number of numerical features in the dataset. Defaults to 13 for the Criteo Terabyte Dataset")
flags.DEFINE_list("synthetic_dataset_table_sizes", default=','.join(26 * [str(10 ** 5)]),
help="Cardinality of each categorical feature")
flags.DEFINE_string("feature_spec", default=None,
help="Feature specification file describing the desired dataset."
"Only feature_spec and channel_spec sections are required and used."
"Overrides num_numerical_features and synthetic_dataset_table_sizes")
flags.DEFINE_string("synthetic_dataset_dir", default="/tmp/dlrm_synthetic_data",
help="Destination of the saved synthetic dataset")
flags.DEFINE_integer("seed", default=12345, help="Set a seed for generating synthetic data")
def write_dataset_to_disk(dataset_train, dataset_test, feature_spec: FeatureSpec) -> None:
feature_spec.check_feature_spec() # We rely on the feature spec being properly formatted
categorical_features_list = feature_spec.get_categorical_feature_names()
categorical_features_types = [feature_spec.feature_spec[feature_name][DTYPE_SELECTOR]
for feature_name in categorical_features_list]
number_of_numerical_features = feature_spec.get_number_of_numerical_features()
number_of_categorical_features = len(categorical_features_list)
for mapping_name, dataset in zip((TRAIN_MAPPING, TEST_MAPPING),
(dataset_train, dataset_test)):
file_streams = []
label_path, numerical_path, categorical_paths = feature_spec.get_mapping_paths(mapping_name)
try:
os.makedirs(os.path.dirname(numerical_path), exist_ok=True)
numerical_f = open(numerical_path, "wb+")
file_streams.append(numerical_f)
os.makedirs(os.path.dirname(label_path), exist_ok=True)
label_f = open(label_path, 'wb+')
file_streams.append(label_f)
categorical_fs = []
for feature_name in categorical_features_list:
local_path = categorical_paths[feature_name]
os.makedirs(os.path.dirname(local_path), exist_ok=True)
fs = open(local_path, 'wb+')
categorical_fs.append(fs)
file_streams.append(fs)
pipe = iter(dataset.op())
for _ in tqdm.tqdm(
range(len(dataset)), desc=mapping_name + " dataset saving"):
(numerical, categorical), label = pipe.get_next()
categoricals = tf.split(categorical, number_of_categorical_features, axis=1)
assert (numerical.shape[-1] == number_of_numerical_features)
assert (len(categoricals) == number_of_categorical_features)
numerical_f.write(numerical.numpy().astype('float16').tobytes()) # numerical is always float16
label_f.write(label.numpy().astype('bool').tobytes()) # label is always boolean
for cat_type, cat_tensor, cat_file in zip(categorical_features_types, categoricals, categorical_fs):
cat_file.write(cat_tensor.numpy().astype(cat_type).tobytes())
finally:
for stream in file_streams:
stream.close()
feature_spec.to_yaml()
def main(argv):
tf.random.set_seed(FLAGS.seed)
number_of_entries = FLAGS.synthetic_dataset_num_entries
batch_size = FLAGS.synthetic_dataset_batch_size
number_of_batches = number_of_entries // batch_size
if FLAGS.feature_spec is not None:
fspec = FeatureSpec.from_yaml(FLAGS.feature_spec)
else:
cardinalities = [int(s) for s in FLAGS.synthetic_dataset_table_sizes]
fspec = FeatureSpec.get_default_feature_spec(number_of_numerical_features=FLAGS.num_numerical_features,
categorical_feature_cardinalities=cardinalities)
fspec.base_directory = FLAGS.synthetic_dataset_dir
fspec.check_feature_spec()
number_of_numerical_features = fspec.get_number_of_numerical_features()
categorical_feature_sizes = fspec.get_categorical_sizes()
train_dataset = SyntheticDataset(batch_size=batch_size, num_numerical_features=number_of_numerical_features,
categorical_feature_cardinalities=categorical_feature_sizes,
num_batches=number_of_batches)
test_dataset = SyntheticDataset(batch_size=batch_size, num_numerical_features=number_of_numerical_features,
categorical_feature_cardinalities=categorical_feature_sizes,
num_batches=number_of_batches)
write_dataset_to_disk(
dataset_train=train_dataset,
dataset_test=test_dataset,
feature_spec=fspec
)
if __name__ == '__main__':
app.run(main)
|
PyTorch/Segmentation/MaskRCNN/pytorch/maskrcnn_benchmark/solver | solver | build | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
import torch
from apex.optimizers import FusedSGD
from .lr_scheduler import WarmupMultiStepLR
def make_optimizer(cfg, model):
params = []
for key, value in model.named_parameters():
if not value.requires_grad:
continue
lr = cfg.SOLVER.BASE_LR
weight_decay = cfg.SOLVER.WEIGHT_DECAY
if "bias" in key:
lr = cfg.SOLVER.BASE_LR * cfg.SOLVER.BIAS_LR_FACTOR
weight_decay = cfg.SOLVER.WEIGHT_DECAY_BIAS
params += [{"params": [value], "lr": lr, "weight_decay": weight_decay}]
optimizer = FusedSGD(params, lr, momentum=cfg.SOLVER.MOMENTUM)
return optimizer
def make_lr_scheduler(cfg, optimizer):
return WarmupMultiStepLR(
optimizer,
cfg.SOLVER.STEPS,
cfg.SOLVER.GAMMA,
warmup_factor=cfg.SOLVER.WARMUP_FACTOR,
warmup_iters=cfg.SOLVER.WARMUP_ITERS,
warmup_method=cfg.SOLVER.WARMUP_METHOD,
)
|
TensorFlow/Detection/SSD/models/research/object_detection/samples/configs | configs | faster_rcnn_inception_resnet_v2_atrous_coco | # Faster R-CNN with Inception Resnet v2, Atrous version;
# Configured for MSCOCO Dataset.
# Users should configure the fine_tune_checkpoint field in the train config as
# well as the label_map_path and input_path fields in the train_input_reader and
# eval_input_reader. Search for "PATH_TO_BE_CONFIGURED" to find the fields that
# should be configured.
model {
faster_rcnn {
num_classes: 90
image_resizer {
keep_aspect_ratio_resizer {
min_dimension: 600
max_dimension: 1024
}
}
feature_extractor {
type: 'faster_rcnn_inception_resnet_v2'
first_stage_features_stride: 8
}
first_stage_anchor_generator {
grid_anchor_generator {
scales: [0.25, 0.5, 1.0, 2.0]
aspect_ratios: [0.5, 1.0, 2.0]
height_stride: 8
width_stride: 8
}
}
first_stage_atrous_rate: 2
first_stage_box_predictor_conv_hyperparams {
op: CONV
regularizer {
l2_regularizer {
weight: 0.0
}
}
initializer {
truncated_normal_initializer {
stddev: 0.01
}
}
}
first_stage_nms_score_threshold: 0.0
first_stage_nms_iou_threshold: 0.7
first_stage_max_proposals: 300
first_stage_localization_loss_weight: 2.0
first_stage_objectness_loss_weight: 1.0
initial_crop_size: 17
maxpool_kernel_size: 1
maxpool_stride: 1
second_stage_box_predictor {
mask_rcnn_box_predictor {
use_dropout: false
dropout_keep_probability: 1.0
fc_hyperparams {
op: FC
regularizer {
l2_regularizer {
weight: 0.0
}
}
initializer {
variance_scaling_initializer {
factor: 1.0
uniform: true
mode: FAN_AVG
}
}
}
}
}
second_stage_post_processing {
batch_non_max_suppression {
score_threshold: 0.0
iou_threshold: 0.6
max_detections_per_class: 100
max_total_detections: 100
}
score_converter: SOFTMAX
}
second_stage_localization_loss_weight: 2.0
second_stage_classification_loss_weight: 1.0
}
}
train_config: {
batch_size: 1
optimizer {
momentum_optimizer: {
learning_rate: {
manual_step_learning_rate {
initial_learning_rate: 0.0003
schedule {
step: 900000
learning_rate: .00003
}
schedule {
step: 1200000
learning_rate: .000003
}
}
}
momentum_optimizer_value: 0.9
}
use_moving_average: false
}
gradient_clipping_by_norm: 10.0
fine_tune_checkpoint: "PATH_TO_BE_CONFIGURED/model.ckpt"
from_detection_checkpoint: true
# Note: The below line limits the training process to 200K steps, which we
# empirically found to be sufficient enough to train the pets dataset. This
# effectively bypasses the learning rate schedule (the learning rate will
# never decay). Remove the below line to train indefinitely.
num_steps: 200000
data_augmentation_options {
random_horizontal_flip {
}
}
}
train_input_reader: {
tf_record_input_reader {
input_path: "PATH_TO_BE_CONFIGURED/mscoco_train.record-?????-of-00100"
}
label_map_path: "PATH_TO_BE_CONFIGURED/mscoco_label_map.pbtxt"
}
eval_config: {
num_examples: 8000
# Note: The below line limits the evaluation process to 10 evaluations.
# Remove the below line to evaluate indefinitely.
max_evals: 10
}
eval_input_reader: {
tf_record_input_reader {
input_path: "PATH_TO_BE_CONFIGURED/mscoco_val.record-?????-of-00010"
}
label_map_path: "PATH_TO_BE_CONFIGURED/mscoco_label_map.pbtxt"
shuffle: false
num_readers: 1
}
|
PaddlePaddle/LanguageModeling/BERT | BERT | program | # Copyright (c) 2022 NVIDIA Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
import logging
import shutil
import paddle
import paddle.distributed.fleet as fleet
from modeling import BertForPretraining, BertConfig
from loss import BertPretrainingCriterion
from utils.save_load import save_model
from utils.utility import get_trainer_id
from lr_scheduler import build_lr_scheduler
from optimizer import build_optimizer
import dllogger
def create_pretraining_data_holder():
input_ids = paddle.static.data(
name="input_ids", shape=[-1, -1], dtype="int64")
token_type_ids = paddle.static.data(
name="token_type_ids", shape=[-1, -1], dtype="int64")
attention_mask = paddle.static.data(
name="attention_mask", shape=[-1, 1, 1, -1], dtype="int64")
next_sentence_labels = paddle.static.data(
name="next_sentence_labels", shape=[-1, 1], dtype="int64")
masked_lm_labels = paddle.static.data(
name="masked_lm_labels", shape=[-1, -1], dtype="int64")
return [
input_ids, token_type_ids, attention_mask, next_sentence_labels,
masked_lm_labels
]
def create_strategy(args, use_distributed_fused_lamb=False):
"""
Create paddle.static.BuildStrategy and paddle.static.ExecutionStrategy with arguments.
Args:
args(Namespace): Arguments obtained from ArgumentParser.
use_distributed_fused_lamb(bool, optional): Whether to use distributed fused lamb.
Returns:
build_strategy(paddle.static.BuildStrategy): A instance of BuildStrategy.
exec_strategy(paddle.static.ExecutionStrategy): A instance of ExecutionStrategy.
"""
build_strategy = paddle.static.BuildStrategy()
exec_strategy = paddle.static.ExecutionStrategy()
build_strategy.enable_addto = True
if args.amp:
build_strategy.fuse_gemm_epilogue = True
build_strategy.fuse_dot_product_attention = args.fuse_mha
if use_distributed_fused_lamb:
build_strategy.fuse_all_reduce_ops = False
build_strategy.reduce_strategy = paddle.static.BuildStrategy.ReduceStrategy._NoReduce
else:
build_strategy.fuse_all_reduce_ops = True
build_strategy.reduce_strategy = paddle.static.BuildStrategy.ReduceStrategy.AllReduce
exec_strategy.num_threads = 1
exec_strategy.num_iteration_per_drop_scope = 10000
return build_strategy, exec_strategy
def dist_optimizer(args, optimizer):
"""
Create a distributed optimizer based on a given optimizer.
Args:
args(Namespace): Arguments obtained from ArgumentParser.
optimizer(paddle.optimizer): A normal optimizer.
Returns:
optimizer(fleet.distributed_optimizer): A distributed optimizer.
"""
use_distributed_fused_lamb = True if args.optimizer == 'DistributedFusedLamb' else False
build_strategy, exec_strategy = create_strategy(args,
use_distributed_fused_lamb)
dist_strategy = fleet.DistributedStrategy()
if use_distributed_fused_lamb:
dist_strategy.gradient_scale_configs = {'scale_strategy': 'sum'}
dist_strategy.execution_strategy = exec_strategy
dist_strategy.build_strategy = build_strategy
if use_distributed_fused_lamb:
dist_strategy.fuse_all_reduce_ops = False
else:
dist_strategy.fuse_all_reduce_ops = True
dist_strategy.fuse_grad_size_in_MB = 0
if args.amp:
dist_strategy.amp = True
custom_white_list = ['softmax', 'layer_norm', 'gelu']
custom_black_list = ['lookup_table',
'lookup_table_v2'] if args.use_pure_fp16 else None
dist_strategy.amp_configs = {
'custom_white_list': custom_white_list,
'custom_black_list': custom_black_list,
'init_loss_scaling': args.scale_loss,
'use_dynamic_loss_scaling': True,
'incr_every_n_steps': 2000,
'decr_every_n_nan_or_inf': 1,
'incr_ratio': 2.0,
'decr_ratio': 0.5,
'use_pure_fp16': args.use_pure_fp16,
'use_fp16_guard': args.use_pure_fp16
}
if not use_distributed_fused_lamb and args.gradient_merge_steps > 1:
dist_strategy.gradient_merge = True
dist_strategy.gradient_merge_configs = {
'k_steps': args.gradient_merge_steps
}
optimizer = fleet.distributed_optimizer(optimizer, strategy=dist_strategy)
return optimizer
def build(args, main_prog, startup_prog, is_train=True):
"""
Build a executable paddle.static.Program via following 3 steps:
1. Create feeds.
2. Create model.
3. Create loss.
4. Create optimizer if is_train==True.
Args:
args(Namespace): Arguments obtained from ArgumentParser.
main_prog(paddle.static.Program):The main program.
startup_prog(paddle.static.Program):The startup program.
is_train(bool, optional): Whether the main programe created is for training. Default: True.
Returns:
model(paddle.nn.Layer): An instance of BERT Model defined in modeling.py.
lr_scheduler(paddle.optimizer.lr.LRScheduler): A learning rate scheduler.
optimizer(Optimizer): An optimizer with distributed/AMP strategy.
loss(variable): The output variable of loss function.
feeds(dict): A dict of mapping variables' names to their values
"""
with paddle.static.program_guard(main_prog, startup_prog):
with paddle.utils.unique_name.guard():
feeds = create_pretraining_data_holder()
[
input_ids, token_type_ids, attention_mask,
next_sentence_labels, masked_lm_labels
] = feeds
bert_config = BertConfig.from_json_file(args.config_file)
if bert_config.vocab_size % 8 != 0:
bert_config.vocab_size += 8 - (bert_config.vocab_size % 8)
bert_config.fuse_mha = args.fuse_mha
model = BertForPretraining(bert_config)
criterion = BertPretrainingCriterion(bert_config.vocab_size)
prediction_scores, seq_relationship_score = model(
input_ids=input_ids,
token_type_ids=token_type_ids,
attention_mask=attention_mask,
masked_lm_labels=masked_lm_labels)
loss = criterion(prediction_scores, seq_relationship_score,
masked_lm_labels, next_sentence_labels)
lr_scheduler = None
optimizer = None
if is_train:
lr_scheduler = build_lr_scheduler(args)
optimizer = build_optimizer(args, lr_scheduler)
optimizer = dist_optimizer(args, optimizer)
optimizer.minimize(loss)
return model, lr_scheduler, optimizer, loss, feeds
def run(exe,
program,
args,
lr_scheduler,
loss,
train_dataloader,
progress=None):
"""
Execute program.
Args:
exe(paddle.static.Executor): A executor to run program.
program(paddle.static.Program): The program to be executed.
args(Namespace): Arguments obtained from ArgumentParser.
lr_scheduler(paddle.optimizer.lr.LRScheduler): A learning rate scheduler.
Default: None.
loss(variable): The output variable of loss function.
progress(dict, optional): A dict to record the training progress of checkpoint.
Returns:
global_step(int): Final step id of this run.
loss_return(float): Final loss of this run.
train_time_raw(float): Time to train of this run.
"""
trainer_id = get_trainer_id()
batch_size_per_gpu = args.batch_size
log_steps = args.log_freq
save_steps = args.num_steps_per_checkpoint
gradient_merge_steps = args.gradient_merge_steps
most_recent_ckpts_paths = []
last_step = args.last_step_of_checkpoint
train_iter = 0
epoch = 0
train_time_raw = 0
if progress is None:
progress = dict()
else:
epoch = progress.get('epoch', 0)
global_step = 0 + last_step
logging.info(f"Training will start at the {last_step+1}th step")
max_steps = args.max_steps
steps_this_run = max_steps
if args.steps_this_run is not None:
if args.steps_this_run + last_step > max_steps:
logging.info(
f"Only {max_steps - last_step} steps will be performed in this run due to the limit of --max-steps."
)
else:
steps_this_run = args.steps_this_run
max_steps = steps_this_run + last_step
logging.warning(
f"{steps_this_run} steps will be performed in this run.")
if args.benchmark:
max_steps = args.benchmark_warmup_steps + args.benchmark_steps + last_step
total_samples = 0
raw_train_start = time.time()
step_start = time.time()
avg_loss = 0
while True:
for batch in train_dataloader:
train_iter += 1
loss_return = exe.run(program, feed=batch, fetch_list=[loss])
total_samples += batch_size_per_gpu
avg_loss += loss_return[0].item()
lr = lr_scheduler.get_lr()
if train_iter % (log_steps * gradient_merge_steps) == 0:
step_cost = time.time() - step_start
dllogger_it_data = {
'loss': avg_loss / gradient_merge_steps,
'learning_rate': lr,
'step_cost': step_cost,
'step_samples': total_samples,
'seqs_per_sec': total_samples / step_cost,
}
dllogger.log((epoch, global_step + 1), data=dllogger_it_data)
total_samples = 0
step_start = time.time()
if train_iter % gradient_merge_steps == 0:
global_step += 1
lr_scheduler.step()
avg_loss = 0
if args.benchmark and train_iter == (args.benchmark_warmup_steps *
gradient_merge_steps):
raw_train_start = time.time()
if train_iter % (save_steps * gradient_merge_steps
) == 0 or global_step >= max_steps:
train_time_raw = time.time() - raw_train_start
if trainer_id == 0:
model_path = os.path.join(
args.output_dir, args.bert_model, "phase1"
if args.phase1 else "phase2", f"{global_step}")
progress = {
'epoch': epoch,
'global_step': global_step,
'phase': 1 if args.phase1 else 2,
}
save_model(program, model_path, args.model_prefix,
progress)
most_recent_ckpts_paths.append(model_path)
if len(most_recent_ckpts_paths) > 3:
ckpt_to_be_removed = most_recent_ckpts_paths.pop(0)
shutil.rmtree(ckpt_to_be_removed)
if global_step >= max_steps:
actual_steps_this_run = global_step - last_step
return global_step, actual_steps_this_run, loss_return[0].item(), train_time_raw
epoch += 1
|
PyTorch/Segmentation/MaskRCNN/pytorch/configs/caffe2 | caffe2 | e2e_faster_rcnn_R_50_FPN_1x_caffe2 | MODEL:
META_ARCHITECTURE: "GeneralizedRCNN"
WEIGHT: "catalog://Caffe2Detectron/COCO/35857345/e2e_faster_rcnn_R-50-FPN_1x"
BACKBONE:
CONV_BODY: "R-50-FPN"
OUT_CHANNELS: 256
RPN:
USE_FPN: True
ANCHOR_STRIDE: (4, 8, 16, 32, 64)
PRE_NMS_TOP_N_TRAIN: 2000
PRE_NMS_TOP_N_TEST: 1000
POST_NMS_TOP_N_TEST: 1000
FPN_POST_NMS_TOP_N_TEST: 1000
ROI_HEADS:
USE_FPN: True
ROI_BOX_HEAD:
POOLER_RESOLUTION: 7
POOLER_SCALES: (0.25, 0.125, 0.0625, 0.03125)
POOLER_SAMPLING_RATIO: 2
FEATURE_EXTRACTOR: "FPN2MLPFeatureExtractor"
PREDICTOR: "FPNPredictor"
DATASETS:
TEST: ("coco_2014_minival",)
DATALOADER:
SIZE_DIVISIBILITY: 32
|
PyTorch/Detection/SSD/examples | examples | SSD300_FP32_1GPU | # This script launches SSD300 training in FP32 on 1 GPUs using 32 batch size
# Usage ./SSD300_FP32_1GPU.sh <path to this repository> <path to dataset> <additional flags>
python $1/main.py --backbone resnet50 --bs 32 --warmup 300 --no-amp --data-layout channels_first --data $2 ${@:3}
|
TensorFlow2/Classification/ConvNets/dataloader | dataloader | Dali | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import tensorflow as tf
import horovod.tensorflow.keras as hvd
from nvidia import dali
import nvidia.dali.plugin.tf as dali_tf
import numpy as np
class DaliPipeline(dali.pipeline.Pipeline):
def __init__(
self,
tfrec_filenames,
tfrec_idx_filenames,
height,
width,
batch_size,
num_threads,
device_id,
shard_id,
num_gpus,
num_classes,
deterministic=False,
dali_cpu=True,
training=True
):
kwargs = dict()
if deterministic:
kwargs['seed'] = 7 * (1 + hvd.rank())
super(DaliPipeline, self).__init__(batch_size, num_threads, device_id, **kwargs)
self.training = training
self.input = dali.ops.TFRecordReader(
path=tfrec_filenames,
index_path=tfrec_idx_filenames,
random_shuffle=True,
shard_id=shard_id,
num_shards=num_gpus,
initial_fill=10000,
features={
'image/encoded': dali.tfrecord.FixedLenFeature((), dali.tfrecord.string, ""),
'image/class/label': dali.tfrecord.FixedLenFeature([1], dali.tfrecord.int64, -1),
'image/class/text': dali.tfrecord.FixedLenFeature([], dali.tfrecord.string, ''),
'image/object/bbox/xmin': dali.tfrecord.VarLenFeature(dali.tfrecord.float32, 0.0),
'image/object/bbox/ymin': dali.tfrecord.VarLenFeature(dali.tfrecord.float32, 0.0),
'image/object/bbox/xmax': dali.tfrecord.VarLenFeature(dali.tfrecord.float32, 0.0),
'image/object/bbox/ymax': dali.tfrecord.VarLenFeature(dali.tfrecord.float32, 0.0)
}
)
if self.training:
self.decode = dali.ops.ImageDecoderRandomCrop(
device="cpu" if dali_cpu else "mixed",
output_type=dali.types.RGB,
random_aspect_ratio=[0.75, 1.33],
random_area=[0.05, 1.0],
num_attempts=100
)
self.resize = dali.ops.Resize(device="cpu" if dali_cpu else "gpu", resize_x=width, resize_y=height)
else:
self.decode = dali.ops.ImageDecoder(
device="cpu",
output_type=dali.types.RGB
)
# Make sure that every image > 224 for CropMirrorNormalize
self.resize = dali.ops.Resize(device="cpu" if dali_cpu else "gpu", resize_x=width, resize_y=height)
self.normalize = dali.ops.CropMirrorNormalize(
device="gpu",
output_dtype=dali.types.FLOAT,
image_type=dali.types.RGB,
output_layout=dali.types.NHWC,
mirror=1 if self.training else 0
)
self.one_hot = dali.ops.OneHot(num_classes=num_classes)
self.shapes = dali.ops.Shapes(type=dali.types.INT32)
self.crop = dali.ops.Crop(device="gpu")
self.cast_float = dali.ops.Cast(dtype=dali.types.FLOAT)
self.extract_h = dali.ops.Slice(normalized_anchor=False, normalized_shape=False, axes=[0])
self.extract_w = dali.ops.Slice(normalized_anchor=False, normalized_shape=False, axes=[0])
def define_graph(self):
# Read images and labels
inputs = self.input(name="Reader")
images = inputs["image/encoded"]
labels = inputs["image/class/label"]
labels -= 1
labels = self.one_hot(labels).gpu()
# Decode and augmentation
images = self.decode(images)
if not self.training:
shapes = self.shapes(images)
h = self.extract_h(shapes, dali.types.Constant(np.array([0], dtype=np.float32)), dali.types.Constant(np.array([1], dtype=np.float32)))
w = self.extract_w(shapes, dali.types.Constant(np.array([1], dtype=np.float32)), dali.types.Constant(np.array([1], dtype=np.float32)))
CROP_PADDING = 32
CROP_H = h * h / (h + CROP_PADDING)
CROP_W = w * w / (w + CROP_PADDING)
CROP_H = self.cast_float(CROP_H)
CROP_W = self.cast_float(CROP_W)
images = images.gpu()
images = self.crop(images, crop_h = CROP_H, crop_w = CROP_W)
images = self.resize(images)
images = self.normalize(images)
return (images, labels)
|
TensorFlow/Detection/SSD/models/research/slim/datasets | datasets | download_and_convert_mnist | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Downloads and converts MNIST data to TFRecords of TF-Example protos.
This module downloads the MNIST data, uncompresses it, reads the files
that make up the MNIST data and creates two TFRecord datasets: one for train
and one for test. Each TFRecord dataset is comprised of a set of TF-Example
protocol buffers, each of which contain a single image and label.
The script should take about a minute to run.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
import os
import sys
import numpy as np
from six.moves import urllib
import tensorflow as tf
from datasets import dataset_utils
# The URLs where the MNIST data can be downloaded.
_DATA_URL = 'http://yann.lecun.com/exdb/mnist/'
_TRAIN_DATA_FILENAME = 'train-images-idx3-ubyte.gz'
_TRAIN_LABELS_FILENAME = 'train-labels-idx1-ubyte.gz'
_TEST_DATA_FILENAME = 't10k-images-idx3-ubyte.gz'
_TEST_LABELS_FILENAME = 't10k-labels-idx1-ubyte.gz'
_IMAGE_SIZE = 28
_NUM_CHANNELS = 1
# The names of the classes.
_CLASS_NAMES = [
'zero',
'one',
'two',
'three',
'four',
'five',
'size',
'seven',
'eight',
'nine',
]
def _extract_images(filename, num_images):
"""Extract the images into a numpy array.
Args:
filename: The path to an MNIST images file.
num_images: The number of images in the file.
Returns:
A numpy array of shape [number_of_images, height, width, channels].
"""
print('Extracting images from: ', filename)
with gzip.open(filename) as bytestream:
bytestream.read(16)
buf = bytestream.read(
_IMAGE_SIZE * _IMAGE_SIZE * num_images * _NUM_CHANNELS)
data = np.frombuffer(buf, dtype=np.uint8)
data = data.reshape(num_images, _IMAGE_SIZE, _IMAGE_SIZE, _NUM_CHANNELS)
return data
def _extract_labels(filename, num_labels):
"""Extract the labels into a vector of int64 label IDs.
Args:
filename: The path to an MNIST labels file.
num_labels: The number of labels in the file.
Returns:
A numpy array of shape [number_of_labels]
"""
print('Extracting labels from: ', filename)
with gzip.open(filename) as bytestream:
bytestream.read(8)
buf = bytestream.read(1 * num_labels)
labels = np.frombuffer(buf, dtype=np.uint8).astype(np.int64)
return labels
def _add_to_tfrecord(data_filename, labels_filename, num_images,
tfrecord_writer):
"""Loads data from the binary MNIST files and writes files to a TFRecord.
Args:
data_filename: The filename of the MNIST images.
labels_filename: The filename of the MNIST labels.
num_images: The number of images in the dataset.
tfrecord_writer: The TFRecord writer to use for writing.
"""
images = _extract_images(data_filename, num_images)
labels = _extract_labels(labels_filename, num_images)
shape = (_IMAGE_SIZE, _IMAGE_SIZE, _NUM_CHANNELS)
with tf.Graph().as_default():
image = tf.placeholder(dtype=tf.uint8, shape=shape)
encoded_png = tf.image.encode_png(image)
with tf.Session('') as sess:
for j in range(num_images):
sys.stdout.write('\r>> Converting image %d/%d' % (j + 1, num_images))
sys.stdout.flush()
png_string = sess.run(encoded_png, feed_dict={image: images[j]})
example = dataset_utils.image_to_tfexample(
png_string, 'png'.encode(), _IMAGE_SIZE, _IMAGE_SIZE, labels[j])
tfrecord_writer.write(example.SerializeToString())
def _get_output_filename(dataset_dir, split_name):
"""Creates the output filename.
Args:
dataset_dir: The directory where the temporary files are stored.
split_name: The name of the train/test split.
Returns:
An absolute file path.
"""
return '%s/mnist_%s.tfrecord' % (dataset_dir, split_name)
def _download_dataset(dataset_dir):
"""Downloads MNIST locally.
Args:
dataset_dir: The directory where the temporary files are stored.
"""
for filename in [_TRAIN_DATA_FILENAME,
_TRAIN_LABELS_FILENAME,
_TEST_DATA_FILENAME,
_TEST_LABELS_FILENAME]:
filepath = os.path.join(dataset_dir, filename)
if not os.path.exists(filepath):
print('Downloading file %s...' % filename)
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %.1f%%' % (
float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
filepath, _ = urllib.request.urlretrieve(_DATA_URL + filename,
filepath,
_progress)
print()
with tf.gfile.GFile(filepath) as f:
size = f.size()
print('Successfully downloaded', filename, size, 'bytes.')
def _clean_up_temporary_files(dataset_dir):
"""Removes temporary files used to create the dataset.
Args:
dataset_dir: The directory where the temporary files are stored.
"""
for filename in [_TRAIN_DATA_FILENAME,
_TRAIN_LABELS_FILENAME,
_TEST_DATA_FILENAME,
_TEST_LABELS_FILENAME]:
filepath = os.path.join(dataset_dir, filename)
tf.gfile.Remove(filepath)
def run(dataset_dir):
"""Runs the download and conversion operation.
Args:
dataset_dir: The dataset directory where the dataset is stored.
"""
if not tf.gfile.Exists(dataset_dir):
tf.gfile.MakeDirs(dataset_dir)
training_filename = _get_output_filename(dataset_dir, 'train')
testing_filename = _get_output_filename(dataset_dir, 'test')
if tf.gfile.Exists(training_filename) and tf.gfile.Exists(testing_filename):
print('Dataset files already exist. Exiting without re-creating them.')
return
_download_dataset(dataset_dir)
# First, process the training data:
with tf.python_io.TFRecordWriter(training_filename) as tfrecord_writer:
data_filename = os.path.join(dataset_dir, _TRAIN_DATA_FILENAME)
labels_filename = os.path.join(dataset_dir, _TRAIN_LABELS_FILENAME)
_add_to_tfrecord(data_filename, labels_filename, 60000, tfrecord_writer)
# Next, process the testing data:
with tf.python_io.TFRecordWriter(testing_filename) as tfrecord_writer:
data_filename = os.path.join(dataset_dir, _TEST_DATA_FILENAME)
labels_filename = os.path.join(dataset_dir, _TEST_LABELS_FILENAME)
_add_to_tfrecord(data_filename, labels_filename, 10000, tfrecord_writer)
# Finally, write the labels file:
labels_to_class_names = dict(zip(range(len(_CLASS_NAMES)), _CLASS_NAMES))
dataset_utils.write_label_file(labels_to_class_names, dataset_dir)
_clean_up_temporary_files(dataset_dir)
print('\nFinished converting the MNIST dataset!')
|
TensorFlow/Detection/SSD/models/research/slim/preprocessing | preprocessing | preprocessing_factory | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains a factory for building various models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from preprocessing import cifarnet_preprocessing
from preprocessing import inception_preprocessing
from preprocessing import lenet_preprocessing
from preprocessing import vgg_preprocessing
slim = tf.contrib.slim
def get_preprocessing(name, is_training=False):
"""Returns preprocessing_fn(image, height, width, **kwargs).
Args:
name: The name of the preprocessing function.
is_training: `True` if the model is being used for training and `False`
otherwise.
Returns:
preprocessing_fn: A function that preprocessing a single image (pre-batch).
It has the following signature:
image = preprocessing_fn(image, output_height, output_width, ...).
Raises:
ValueError: If Preprocessing `name` is not recognized.
"""
preprocessing_fn_map = {
'cifarnet': cifarnet_preprocessing,
'inception': inception_preprocessing,
'inception_v1': inception_preprocessing,
'inception_v2': inception_preprocessing,
'inception_v3': inception_preprocessing,
'inception_v4': inception_preprocessing,
'inception_resnet_v2': inception_preprocessing,
'lenet': lenet_preprocessing,
'mobilenet_v1': inception_preprocessing,
'mobilenet_v2': inception_preprocessing,
'mobilenet_v2_035': inception_preprocessing,
'mobilenet_v2_140': inception_preprocessing,
'nasnet_mobile': inception_preprocessing,
'nasnet_large': inception_preprocessing,
'pnasnet_mobile': inception_preprocessing,
'pnasnet_large': inception_preprocessing,
'resnet_v1_50': vgg_preprocessing,
'resnet_v1_101': vgg_preprocessing,
'resnet_v1_152': vgg_preprocessing,
'resnet_v1_200': vgg_preprocessing,
'resnet_v2_50': vgg_preprocessing,
'resnet_v2_101': vgg_preprocessing,
'resnet_v2_152': vgg_preprocessing,
'resnet_v2_200': vgg_preprocessing,
'vgg': vgg_preprocessing,
'vgg_a': vgg_preprocessing,
'vgg_16': vgg_preprocessing,
'vgg_19': vgg_preprocessing,
}
if name not in preprocessing_fn_map:
raise ValueError('Preprocessing name [%s] was not recognized' % name)
def preprocessing_fn(image, output_height, output_width, **kwargs):
return preprocessing_fn_map[name].preprocess_image(
image, output_height, output_width, is_training=is_training, **kwargs)
return preprocessing_fn
|
PyTorch/DrugDiscovery/MoFlow/moflow/runtime | runtime | arguments | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from moflow.config import CONFIGS
from moflow.runtime.logger import LOGGING_LEVELS
PARSER = argparse.ArgumentParser()
PARSER.add_argument('--data_dir', type=str, default='/data', help='Location for the dataset.')
PARSER.add_argument('--config_name', type=str, default='zinc250k', choices=list(CONFIGS),
help='The config to choose. This parameter allows one to switch between different datasets '
'and their dedicated configurations of the neural network. By default, a pre-defined "zinc250k" config is used.')
PARSER.add_argument('--results_dir', type=str, default='/results', help='Directory where checkpoints are stored.')
PARSER.add_argument('--predictions_path', type=str, default='/results/predictions.smi',
help='Path to store generated molecules. If an empty string is provided, predictions will not be '
'saved (useful for benchmarking and debugging).')
PARSER.add_argument('--log_path', type=str, default=None,
help='Path for DLLogger log. This file will contain information about the speed and '
'accuracy of the model during training and inference. Note that if the file '
'already exists, new logs will be added at the end.')
PARSER.add_argument('--log_interval', type=int, default=20, help='Frequency for writing logs, expressed in steps.')
PARSER.add_argument('--warmup_steps', type=int, default=20,
help='Number of warmup steps. This value is used for benchmarking and for CUDA graph capture.')
PARSER.add_argument('--steps', type=int, default=-1,
help='Number of steps used for training/inference. This parameter allows finishing '
'training earlier than the specified number of epochs. If used with inference, '
'it allows generating more molecules (by default only a single batch of molecules is generated).')
PARSER.add_argument('--save_epochs', type=int, default=5,
help='Frequency for saving checkpoints, expressed in epochs. If -1 is provided, checkpoints will not be saved.')
PARSER.add_argument('--eval_epochs', type=int, default=5,
help='Evaluation frequency, expressed in epochs. If -1 is provided, an evaluation will not be performed.')
PARSER.add_argument('--learning_rate', type=float, default=0.0005, help='Base learning rate.')
PARSER.add_argument('--beta1', type=float, default=0.9, help='beta1 parameter for the optimizer.')
PARSER.add_argument('--beta2', type=float, default=0.99, help='beta2 parameter for the optimizer.')
PARSER.add_argument('--clip', type=float, default=1, help='Gradient clipping norm.')
PARSER.add_argument('--epochs', type=int, default=300,
help='Number of training epochs. Note that you can finish training mid-epoch by using "--steps" flag.')
PARSER.add_argument('--batch_size', type=int, default=512, help='Batch size per GPU.')
PARSER.add_argument('--num_workers', type=int, default=4, help='Number of workers in the data loader.')
PARSER.add_argument('--seed', type=int, default=1, help='Random seed used to initialize the distributed loaders.')
PARSER.add_argument('--local_rank', default=os.environ.get('LOCAL_RANK', 0), type=int,
help='rank of the GPU, used to launch distributed training. This argument is specified '
'automatically by `torchrun` and does not have to be provided by the user.')
PARSER.add_argument('--temperature', type=float, default=0.3, help='Temperature used for sampling.')
PARSER.add_argument('--val_batch_size', type=int, default=100, help='Number of molecules to generate during validation step.')
PARSER.add_argument('--allow_untrained', action='store_true',
help='Allow sampling molecules from an untrained network. Useful for performance benchmarking or debugging purposes.')
PARSER.add_argument('--correct_validity', action='store_true', help='Apply validity correction after the generation of the molecules.')
PARSER.add_argument('--amp', action='store_true', help='Use Automatic Mixed Precision.')
PARSER.add_argument('--cuda_graph', action='store_true', help='Capture GPU kernels with CUDA graphs. This option allows to speed up training.')
PARSER.add_argument('--jit', action='store_true', help='Compile the model with `torch.jit.script`. Can be used to speed up training or inference.')
PARSER.add_argument('--verbosity', type=int, default=1, choices=list(LOGGING_LEVELS),
help='Verbosity level. Specify the following values: 0, 1, 2, 3, where 0 means minimal '
'verbosity (errors only) and 3 - maximal (debugging).')
|
TensorFlow/Classification/ConvNets/resnext101-32x4d/training | training | DGX2_RNxt101-32x4d_FP32_90E | #!/bin/bash
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
WORKSPACE=${1:-"/workspace/rn50v15_tf"}
DATA_DIR=${2:-"/data"}
OTHER=${@:3}
if [[ ! -z "${BIND_TO_SOCKET}" ]]; then
BIND_TO_SOCKET="--bind-to socket"
fi
mpiexec --allow-run-as-root ${BIND_TO_SOCKET} -np 16 python3 main.py --arch=resnext101-32x4d \
--mode=train_and_evaluate --iter_unit=epoch --num_iter=90 \
--batch_size=64 --warmup_steps=100 --cosine_lr --label_smoothing 0.1 \
--lr_init=0.256 --lr_warmup_epochs=8 --momentum=0.875 --weight_decay=6.103515625e-05 \
--data_dir=${DATA_DIR}/tfrecords --data_idx_dir=${DATA_DIR}/dali_idx \
--results_dir=${WORKSPACE}/results --weight_init=fan_in ${OTHER}
|
PyTorch/Classification/GPUNet/triton/deployment_toolkit/library | library | pyt | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import typing
from collections import Counter
from pathlib import Path
from typing import Dict, Optional, Union
import numpy as np
import torch # pytype: disable=import-error
import yaml
from model_navigator.model import ModelSignatureConfig
from model_navigator.tensor import TensorSpec
from model_navigator.utils.config import YamlConfigFile
from ..core import (
GET_MODEL_FN_NAME,
BaseLoader,
BaseRunner,
BaseRunnerSession,
BaseSaver,
ExportFormat,
Format,
Model,
ModelInputType,
Precision,
TimeMeasurement,
TorchJit,
load_from_file,
)
from ..extensions import loaders, runners, savers
from .utils import get_dynamic_axes, get_shapes_with_dynamic_axes
LOGGER = logging.getLogger(__name__)
def get_sample_input(dataloader, device):
for batch in dataloader:
_, x, _ = batch
break
if isinstance(x, dict):
sample_input = list(x.values())
elif isinstance(x, list):
sample_input = x
else:
raise TypeError("The first element (x) of batch returned by dataloader must be a list or a dict")
for idx, s in enumerate(sample_input):
sample_input[idx] = torch.from_numpy(s).to(device)
return tuple(sample_input)
def get_model_device(torch_model):
if next(torch_model.parameters()).is_cuda:
return "cuda"
else:
return "cpu"
def infer_model_precision(model):
counter = Counter()
for param in model.parameters():
counter[param.dtype] += 1
if counter[torch.float16] > 0:
return Precision.FP16
else:
return Precision.FP32
def _get_tensor_dtypes(dataloader, precision):
def _get_dtypes(t):
def _get_dtype(v):
dtype = str(v.dtype)
if dtype == "float64":
dtype = "float32"
if precision == Precision.FP16 and dtype == "float32":
dtype = "float16"
return np.dtype(dtype)
return {k: _get_dtype(v) for k, v in t.items()}
batch = next(dataloader)
_, x, y = batch
input_dtypes = _get_dtypes(x)
output_dtypes = _get_dtypes(y)
return input_dtypes, output_dtypes
### TODO assumption: floating point input
### type has same precision as the model
def _get_model_signature(
inputs_names: typing.List[str],
outputs_names: typing.List[str],
precision,
dataloader_fn,
batch_size_dim: typing.Optional[int] = None,
):
dataloader = dataloader_fn()
input_dtypes, output_dtypes = _get_tensor_dtypes(dataloader, precision)
input_shapes, output_shapes = get_shapes_with_dynamic_axes(dataloader, batch_size_dim=batch_size_dim)
inputs = {
name: TensorSpec(name=name, dtype=input_dtypes[name], shape=tuple(input_shapes[name])) for name in inputs_names
}
outputs = {
name: TensorSpec(name=name, dtype=output_dtypes[name], shape=tuple(output_shapes[name]))
for name in outputs_names
}
return ModelSignatureConfig(inputs, outputs)
class PyTorchModelLoader(BaseLoader):
required_fn_name_for_signature_parsing: Optional[str] = GET_MODEL_FN_NAME
def __init__(self, **kwargs):
self._model_args = kwargs
def load(self, model_path: Union[str, Path], **kwargs) -> Model:
if isinstance(model_path, Path):
model_path = model_path.as_posix()
get_model = load_from_file(model_path, "model", GET_MODEL_FN_NAME)
model, io_names_dict = get_model(**self._model_args)
dataloader_fn = kwargs.get("dataloader_fn", None)
output_type = kwargs.get("output_type", None)
torch_jit = kwargs.get("torch_jit", None)
precision = infer_model_precision(model)
batch_axis = getattr(model, "batch_axis", 0) # by default models supports batching; batch_axis=0
model_signature = _get_model_signature(
inputs_names=io_names_dict["inputs"],
outputs_names=io_names_dict["outputs"],
precision=precision,
dataloader_fn=dataloader_fn,
batch_size_dim=batch_axis,
)
model = Model(handle=model, precision=precision, inputs=model_signature.inputs, outputs=model_signature.outputs)
if output_type == ExportFormat.TORCHSCRIPT.value:
if torch_jit == TorchJit.TRACE.value:
return self._trace(model, dataloader_fn)
elif torch_jit == TorchJit.SCRIPT.value:
return self._script(model)
raise ValueError(f"Not supported PyTorch Jit operation type: {torch_jit}")
elif output_type == ExportFormat.ONNX.value:
return model
else:
raise ValueError(f"Not supported PyTorch format: {output_type}")
def _trace(self, model: Model, dataloader_fn) -> Model:
device = get_model_device(model.handle)
dummy_input = get_sample_input(dataloader_fn(), device)
traced_model = torch.jit.trace_module(model.handle, {"forward": dummy_input})
return Model(traced_model, precision=model.precision, inputs=model.inputs, outputs=model.outputs)
def _script(self, model: Model) -> Model:
scripted_model = torch.jit.script(model.handle)
return Model(scripted_model, precision=model.precision, inputs=model.inputs, outputs=model.outputs)
class TorchScriptLoader(BaseLoader):
def __init__(self, tensor_names_path: str = None, **kwargs):
self._model_args = kwargs
self._io_spec = None
if tensor_names_path is not None:
with Path(tensor_names_path).open("r") as fh:
tensor_infos = yaml.load(fh, Loader=yaml.SafeLoader)
self._io_spec = ModelSignatureConfig(tensor_infos["inputs"], tensor_infos["outputs"])
def load(self, model_path: Union[str, Path], **_) -> Model:
if not isinstance(model_path, Path):
model_path = Path(model_path)
model = torch.jit.load(model_path.as_posix())
precision = infer_model_precision(model)
io_spec = self._io_spec
if not io_spec:
yaml_path = model_path.parent / f"{model_path.name}.yaml"
if not yaml_path.is_file():
raise ValueError(
f"If `--tensor-names-path is not provided, "
f"TorchScript model loader expects file {yaml_path} with tensor information."
)
with yaml_path.open("r") as fh:
tensor_info = yaml.load(fh, Loader=yaml.SafeLoader)
io_spec = ModelSignatureConfig(tensor_info["inputs"], tensor_info["outputs"])
return Model(handle=model, precision=precision, inputs=io_spec.inputs, outputs=io_spec.outputs)
class PYT2ONNXSaver(BaseSaver):
def __init__(self, onnx_opset: int = None):
self._onnx_opset = onnx_opset
def save(self, model: Model, model_path: Union[str, Path], dataloader_fn) -> Model:
if isinstance(model_path, Path):
model_path = model_path.as_posix()
assert isinstance(model.handle, torch.jit.ScriptModule) or isinstance(
model.handle, torch.nn.Module
), "The model must be of type 'torch.jit.ScriptModule' or 'torch.nn.Module'. Converter aborted."
dynamic_axes = get_dynamic_axes(dataloader_fn(), batch_size_dim=0)
device = get_model_device(model.handle)
dummy_input = get_sample_input(dataloader_fn(), device)
with torch.no_grad():
torch.onnx.export(
model.handle,
dummy_input,
model_path,
do_constant_folding=True,
input_names=list(model.inputs),
output_names=list(model.outputs),
dynamic_axes=dynamic_axes,
opset_version=self._onnx_opset,
enable_onnx_checker=True,
)
class TorchScriptSaver(BaseSaver):
def save(self, model: Model, model_path: Union[str, Path], dataloader_fn) -> None:
if not isinstance(model_path, Path):
model_path = Path(model_path)
if isinstance(model.handle, torch.jit.ScriptModule):
torch.jit.save(model.handle, model_path.as_posix())
else:
raise RuntimeError("The model must be of type 'torch.jit.ScriptModule'. Saving aborted.")
signature_config = ModelSignatureConfig(inputs=model.inputs, outputs=model.outputs)
annotation_path = model_path.parent / f"{model_path.name}.yaml"
with YamlConfigFile(annotation_path) as config_file:
config_file.save_config(signature_config)
class PyTorchRunner(BaseRunner):
def __init__(self):
pass
def init_inference(
self,
model: Model,
):
return PyTorchRunnerSession(model=model)
class PyTorchRunnerSession(BaseRunnerSession):
def __init__(self, model: Model):
super().__init__(model)
assert isinstance(model.handle, torch.jit.ScriptModule) or isinstance(
model.handle, torch.nn.Module
), "The model must be of type 'torch.jit.ScriptModule' or 'torch.nn.Module'. Runner aborted."
self._model = model
self._output_names = None
def __enter__(self):
self._output_names = list(self._model.outputs)
return self
def __exit__(self, exc_type, exc_value, traceback):
self._output_names = None
self._model = None
def __call__(self, x: Dict[str, object]):
with torch.no_grad():
feed_list = [torch.from_numpy(v).cuda() for k, v in x.items()]
with TimeMeasurement(self):
y_pred = self._model.handle(*feed_list)
if isinstance(y_pred, torch.Tensor):
y_pred = (y_pred,)
y_pred = [t.cpu().numpy() for t in y_pred]
y_pred = dict(zip(self._output_names, y_pred))
return y_pred
loaders.register_extension(ModelInputType.PYT.value, PyTorchModelLoader)
loaders.register_extension(ExportFormat.TORCHSCRIPT.value, TorchScriptLoader)
loaders.register_extension(Format.TORCHSCRIPT.value, TorchScriptLoader)
savers.register_extension(ExportFormat.TORCHSCRIPT.value, TorchScriptSaver)
savers.register_extension(f"{ModelInputType.PYT.value}--{ExportFormat.ONNX.value}", PYT2ONNXSaver)
runners.register_extension(Format.TORCHSCRIPT.value, PyTorchRunner)
|
TensorFlow2/Classification/ConvNets/efficientnet_v1/B4/training/TF32 | TF32 | train_benchmark_8xA100-80G | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
horovodrun -np 8 bash ./scripts/bind.sh --cpu=exclusive --ib=single -- python3 main.py \
--cfg config/efficientnet_v1/b4_cfg.py \
--mode train_and_eval \
--use_xla \
--model_dir ./output \
--data_dir /data \
--log_steps 100 \
--max_epochs 2 \
--save_checkpoint_freq 5 \
--train_batch_size 80 \
--eval_batch_size 80 \
--train_img_size 380 \
--eval_img_size 380 \
--augmenter_name autoaugment \
--mixup_alpha 0.2 \
--lr_decay cosine \
--memory_limit 81000 \
--defer_img_mixing \
--moving_average_decay 0.9999 \
--lr_init 0.005 |
TensorFlow/LanguageModeling/BERT/triton/scripts | scripts | launch_server | NV_VISIBLE_DEVICES=${NVIDIA_VISIBLE_DEVICES:-"all"}
DETACHED=${DETACHED:-"-d"}
# Start TRITON server in DETACHED state
docker run --gpus $NV_VISIBLE_DEVICES --rm $DETACHED \
--shm-size=1g \
--ulimit memlock=-1 \
--ulimit stack=67108864 \
-p8000:8000 \
-p8001:8001 \
-p8002:8002 \
--name triton_server_cont \
-e NVIDIA_VISIBLE_DEVICES=$NV_VISIBLE_DEVICES \
-v $PWD/results/triton_models:/models \
nvcr.io/nvidia/tritonserver:20.09-py3 tritonserver --model-store=/models --strict-model-config=false --log-verbose=1
|
Tools/DGLPyTorch/SyntheticGraphGeneration/configurations | configurations | ieee | {
"nodes": [
{
"name": "user",
"count": 17090,
"features": [],
"features_path": null
},
{
"name": "product",
"count": 197,
"features": [],
"features_path": null
}
],
"edges": [
{
"name": "user-product",
"count": 52008,
"src_node_type": "user",
"dst_node_type": "product",
"directed": false,
"features": [
{
"name": "TransactionDT",
"dtype": "int64",
"feature_type": "continuous"
},
{
"name": "TransactionAmt",
"dtype": "float64",
"feature_type": "continuous"
},
{
"name": "C1",
"dtype": "float64",
"feature_type": "continuous"
},
{
"name": "C2",
"dtype": "float64",
"feature_type": "continuous"
},
{
"name": "C3",
"dtype": "float64",
"feature_type": "continuous"
},
{
"name": "C4",
"dtype": "float64",
"feature_type": "continuous"
},
{
"name": "C5",
"dtype": "float64",
"feature_type": "continuous"
},
{
"name": "C6",
"dtype": "float64",
"feature_type": "continuous"
},
{
"name": "C7",
"dtype": "float64",
"feature_type": "continuous"
},
{
"name": "C8",
"dtype": "float64",
"feature_type": "continuous"
},
{
"name": "C9",
"dtype": "float64",
"feature_type": "continuous"
},
{
"name": "C10",
"dtype": "float64",
"feature_type": "continuous"
},
{
"name": "C11",
"dtype": "float64",
"feature_type": "continuous"
},
{
"name": "C12",
"dtype": "float64",
"feature_type": "continuous"
},
{
"name": "C14",
"dtype": "float64",
"feature_type": "continuous"
},
{
"name": "V279",
"dtype": "float64",
"feature_type": "continuous"
},
{
"name": "V280",
"dtype": "float64",
"feature_type": "continuous"
},
{
"name": "V284",
"dtype": "float64",
"feature_type": "continuous"
},
{
"name": "V285",
"dtype": "float64",
"feature_type": "continuous"
},
{
"name": "V286",
"dtype": "float64",
"feature_type": "continuous"
},
{
"name": "V287",
"dtype": "float64",
"feature_type": "continuous"
},
{
"name": "V290",
"dtype": "float64",
"feature_type": "continuous"
},
{
"name": "V291",
"dtype": "float64",
"feature_type": "continuous"
},
{
"name": "V292",
"dtype": "float64",
"feature_type": "continuous"
},
{
"name": "V293",
"dtype": "float64",
"feature_type": "continuous"
},
{
"name": "V294",
"dtype": "float64",
"feature_type": "continuous"
},
{
"name": "V295",
"dtype": "float64",
"feature_type": "continuous"
},
{
"name": "V297",
"dtype": "float64",
"feature_type": "continuous"
},
{
"name": "V298",
"dtype": "float64",
"feature_type": "continuous"
},
{
"name": "V299",
"dtype": "float64",
"feature_type": "continuous"
},
{
"name": "V302",
"dtype": "float64",
"feature_type": "continuous"
},
{
"name": "V303",
"dtype": "float64",
"feature_type": "continuous"
},
{
"name": "V304",
"dtype": "float64",
"feature_type": "continuous"
},
{
"name": "V305",
"dtype": "float64",
"feature_type": "continuous"
},
{
"name": "V306",
"dtype": "float64",
"feature_type": "continuous"
},
{
"name": "V307",
"dtype": "float64",
"feature_type": "continuous"
},
{
"name": "V308",
"dtype": "float64",
"feature_type": "continuous"
},
{
"name": "V309",
"dtype": "float64",
"feature_type": "continuous"
},
{
"name": "V310",
"dtype": "float64",
"feature_type": "continuous"
},
{
"name": "V311",
"dtype": "float64",
"feature_type": "continuous"
},
{
"name": "V312",
"dtype": "float64",
"feature_type": "continuous"
},
{
"name": "V316",
"dtype": "float64",
"feature_type": "continuous"
},
{
"name": "V317",
"dtype": "float64",
"feature_type": "continuous"
},
{
"name": "V318",
"dtype": "float64",
"feature_type": "continuous"
},
{
"name": "V319",
"dtype": "float64",
"feature_type": "continuous"
},
{
"name": "V320",
"dtype": "float64",
"feature_type": "continuous"
},
{
"name": "V321",
"dtype": "float64",
"feature_type": "continuous"
},
{
"name": "isFraud",
"dtype": "int64",
"feature_type": "categorical"
}
],
"features_path": "user-product.parquet",
"structure_path": "user-product_edge_list.parquet",
"[gen]tabular_generators": [
{
"type": "kde",
"features_list": [
"TransactionDT",
"TransactionAmt",
"C1",
"C2",
"C3",
"C4",
"C5",
"C6",
"C7",
"C8",
"C9",
"C10",
"C11",
"C12",
"C14",
"V279",
"V280",
"V284",
"V285",
"V286",
"V287",
"V290",
"V291",
"V292",
"V293",
"V294",
"V295",
"V297",
"V298",
"V299",
"V302",
"V303",
"V304",
"V305",
"V306",
"V307",
"V308",
"V309",
"V310",
"V311",
"V312",
"V316",
"V317",
"V318",
"V319",
"V320",
"V321",
"isFraud"
],
"data_source": {
"type": "cfg",
"path": "/workspace/data/ieee-preprocessed",
"name": "user-product"
},
"params": {}
}
],
"[gen]structure_generator": {
"type": "RMAT",
"data_source": {
"type": "cfg",
"path": "/workspace/data/ieee-preprocessed",
"name": "user-product"
},
"params": {}
}
}
]
} |
PyTorch/Segmentation/MaskRCNN/pytorch/tools/cityscapes | cityscapes | instances2dict_with_polygons | #!/usr/bin/python
#
# Convert instances from png files to a dictionary
# This files is created according to https://github.com/facebookresearch/Detectron/issues/111
from __future__ import print_function, absolute_import, division
import os, sys
sys.path.append( os.path.normpath( os.path.join( os.path.dirname( __file__ ) , '..' , 'helpers' ) ) )
from csHelpers import *
# Cityscapes imports
from cityscapesscripts.evaluation.instance import *
from cityscapesscripts.helpers.csHelpers import *
import cv2
from maskrcnn_benchmark.utils import cv2_util
def instances2dict_with_polygons(imageFileList, verbose=False):
imgCount = 0
instanceDict = {}
if not isinstance(imageFileList, list):
imageFileList = [imageFileList]
if verbose:
print("Processing {} images...".format(len(imageFileList)))
for imageFileName in imageFileList:
# Load image
img = Image.open(imageFileName)
# Image as numpy array
imgNp = np.array(img)
# Initialize label categories
instances = {}
for label in labels:
instances[label.name] = []
# Loop through all instance ids in instance image
for instanceId in np.unique(imgNp):
if instanceId < 1000:
continue
instanceObj = Instance(imgNp, instanceId)
instanceObj_dict = instanceObj.toDict()
#instances[id2label[instanceObj.labelID].name].append(instanceObj.toDict())
if id2label[instanceObj.labelID].hasInstances:
mask = (imgNp == instanceId).astype(np.uint8)
contour, hier = cv2_util.findContours(
mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
polygons = [c.reshape(-1).tolist() for c in contour]
instanceObj_dict['contours'] = polygons
instances[id2label[instanceObj.labelID].name].append(instanceObj_dict)
imgKey = os.path.abspath(imageFileName)
instanceDict[imgKey] = instances
imgCount += 1
if verbose:
print("\rImages Processed: {}".format(imgCount), end=' ')
sys.stdout.flush()
if verbose:
print("")
return instanceDict
def main(argv):
fileList = []
if (len(argv) > 2):
for arg in argv:
if ("png" in arg):
fileList.append(arg)
instances2dict_with_polygons(fileList, True)
if __name__ == "__main__":
main(sys.argv[1:])
|
TensorFlow/Classification/ConvNets/triton/deployment_toolkit | deployment_toolkit | report | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import csv
import re
from typing import Dict, List
from natsort import natsorted
from tabulate import tabulate
def sort_results(results: List):
results = natsorted(results, key=lambda item: [item[key] for key in item.keys()])
return results
def save_results(filename: str, data: List, formatted: bool = False):
data = format_data(data=data) if formatted else data
with open(filename, "a") as csvfile:
fieldnames = data[0].keys()
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for row in data:
writer.writerow(row)
def format_data(data: List[Dict]) -> List[Dict]:
formatted_data = list()
for item in data:
formatted_item = format_keys(data=item)
formatted_data.append(formatted_item)
return formatted_data
def format_keys(data: Dict) -> Dict:
keys = {format_key(key=key): value for key, value in data.items()}
return keys
def format_key(key: str) -> str:
key = " ".join([k.capitalize() for k in re.split("_| ", key)])
return key
def show_results(results: List[Dict]):
headers = list(results[0].keys())
summary = map(lambda x: list(map(lambda item: item[1], x.items())), results)
print(tabulate(summary, headers=headers))
|
Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt/triton | triton | README | # Deploying the TFT model on Triton Inference Server
This folder contains instructions for deployment to run inference
on Triton Inference Server as well as a detailed performance analysis.
The purpose of this document is to help you with achieving
the best inference performance.
## Table of contents
- [Solution overview](#solution-overview)
- [Introduction](#introduction)
- [Deployment process](#deployment-process)
- [Setup](#setup)
- [Quick Start Guide](#quick-start-guide)
- [Deployment on Production](#deployment-on-production)
- [Performance](#performance)
- [Offline scenario](#offline-scenario)
- [Offline: NVIDIA A30, NVIDIA TensorRT with FP16, Dataset: electricity](#offline-nvidia-a30-nvidia-tensorrt-with-fp16-dataset-electricity)
- [Offline: NVIDIA A30, NVIDIA TensorRT with FP16, Dataset: traffic](#offline-nvidia-a30-nvidia-tensorrt-with-fp16-dataset-traffic)
- [Offline: NVIDIA A30, PyTorch with FP16, Dataset: electricity](#offline-nvidia-a30-pytorch-with-fp16-dataset-electricity)
- [Offline: NVIDIA A30, PyTorch with FP16, Dataset: traffic](#offline-nvidia-a30-pytorch-with-fp16-dataset-traffic)
- [Offline: NVIDIA DGX-1 (1x V100 32GB), NVIDIA TensorRT with FP16, Dataset: electricity](#offline-nvidia-dgx-1-1x-v100-32gb-nvidia-tensorrt-with-fp16-dataset-electricity)
- [Offline: NVIDIA DGX-1 (1x V100 32GB), NVIDIA TensorRT with FP16, Dataset: traffic](#offline-nvidia-dgx-1-1x-v100-32gb-nvidia-tensorrt-with-fp16-dataset-traffic)
- [Offline: NVIDIA DGX-1 (1x V100 32GB), PyTorch with FP16, Dataset: electricity](#offline-nvidia-dgx-1-1x-v100-32gb-pytorch-with-fp16-dataset-electricity)
- [Offline: NVIDIA DGX-1 (1x V100 32GB), PyTorch with FP16, Dataset: traffic](#offline-nvidia-dgx-1-1x-v100-32gb-pytorch-with-fp16-dataset-traffic)
- [Offline: NVIDIA DGX A100 (1x A100 80GB), NVIDIA TensorRT with FP16, Dataset: electricity](#offline-nvidia-dgx-a100-1x-a100-80gb-nvidia-tensorrt-with-fp16-dataset-electricity)
- [Offline: NVIDIA DGX A100 (1x A100 80GB), NVIDIA TensorRT with FP16, Dataset: traffic](#offline-nvidia-dgx-a100-1x-a100-80gb-nvidia-tensorrt-with-fp16-dataset-traffic)
- [Offline: NVIDIA DGX A100 (1x A100 80GB), PyTorch with FP16, Dataset: electricity](#offline-nvidia-dgx-a100-1x-a100-80gb-pytorch-with-fp16-dataset-electricity)
- [Offline: NVIDIA DGX A100 (1x A100 80GB), PyTorch with FP16, Dataset: traffic](#offline-nvidia-dgx-a100-1x-a100-80gb-pytorch-with-fp16-dataset-traffic)
- [Offline: NVIDIA T4, NVIDIA TensorRT with FP16, Dataset: electricity](#offline-nvidia-t4-nvidia-tensorrt-with-fp16-dataset-electricity)
- [Offline: NVIDIA T4, NVIDIA TensorRT with FP16, Dataset: traffic](#offline-nvidia-t4-nvidia-tensorrt-with-fp16-dataset-traffic)
- [Offline: NVIDIA T4, PyTorch with FP16, Dataset: electricity](#offline-nvidia-t4-pytorch-with-fp16-dataset-electricity)
- [Offline: NVIDIA T4, PyTorch with FP16, Dataset: traffic](#offline-nvidia-t4-pytorch-with-fp16-dataset-traffic)
- [Online scenario](#online-scenario)
- [Online: NVIDIA A30, NVIDIA TensorRT with FP16, Dataset: electricity](#online-nvidia-a30-nvidia-tensorrt-with-fp16-dataset-electricity)
- [Online: NVIDIA A30, NVIDIA TensorRT with FP16, Dataset: traffic](#online-nvidia-a30-nvidia-tensorrt-with-fp16-dataset-traffic)
- [Online: NVIDIA A30, PyTorch with FP16, Dataset: electricity](#online-nvidia-a30-pytorch-with-fp16-dataset-electricity)
- [Online: NVIDIA A30, PyTorch with FP16, Dataset: traffic](#online-nvidia-a30-pytorch-with-fp16-dataset-traffic)
- [Online: NVIDIA DGX-1 (1x V100 32GB), NVIDIA TensorRT with FP16, Dataset: electricity](#online-nvidia-dgx-1-1x-v100-32gb-nvidia-tensorrt-with-fp16-dataset-electricity)
- [Online: NVIDIA DGX-1 (1x V100 32GB), NVIDIA TensorRT with FP16, Dataset: traffic](#online-nvidia-dgx-1-1x-v100-32gb-nvidia-tensorrt-with-fp16-dataset-traffic)
- [Online: NVIDIA DGX-1 (1x V100 32GB), PyTorch with FP16, Dataset: electricity](#online-nvidia-dgx-1-1x-v100-32gb-pytorch-with-fp16-dataset-electricity)
- [Online: NVIDIA DGX-1 (1x V100 32GB), PyTorch with FP16, Dataset: traffic](#online-nvidia-dgx-1-1x-v100-32gb-pytorch-with-fp16-dataset-traffic)
- [Online: NVIDIA DGX A100 (1x A100 80GB), NVIDIA TensorRT with FP16, Dataset: electricity](#online-nvidia-dgx-a100-1x-a100-80gb-nvidia-tensorrt-with-fp16-dataset-electricity)
- [Online: NVIDIA DGX A100 (1x A100 80GB), NVIDIA TensorRT with FP16, Dataset: traffic](#online-nvidia-dgx-a100-1x-a100-80gb-nvidia-tensorrt-with-fp16-dataset-traffic)
- [Online: NVIDIA DGX A100 (1x A100 80GB), PyTorch with FP16, Dataset: electricity](#online-nvidia-dgx-a100-1x-a100-80gb-pytorch-with-fp16-dataset-electricity)
- [Online: NVIDIA DGX A100 (1x A100 80GB), PyTorch with FP16, Dataset: traffic](#online-nvidia-dgx-a100-1x-a100-80gb-pytorch-with-fp16-dataset-traffic)
- [Online: NVIDIA T4, NVIDIA TensorRT with FP16, Dataset: electricity](#online-nvidia-t4-nvidia-tensorrt-with-fp16-dataset-electricity)
- [Online: NVIDIA T4, NVIDIA TensorRT with FP16, Dataset: traffic](#online-nvidia-t4-nvidia-tensorrt-with-fp16-dataset-traffic)
- [Online: NVIDIA T4, PyTorch with FP16, Dataset: electricity](#online-nvidia-t4-pytorch-with-fp16-dataset-electricity)
- [Online: NVIDIA T4, PyTorch with FP16, Dataset: traffic](#online-nvidia-t4-pytorch-with-fp16-dataset-traffic)
- [Advanced](#advanced)
- [Step by step deployment process](#step-by-step-deployment-process)
- [Latency explanation](#latency-explanation)
- [Release notes](#release-notes)
- [Changelog](#changelog)
- [Known issues](#known-issues)
## Solution overview
### Introduction
The [NVIDIA Triton Inference Server](https://github.com/NVIDIA/triton-inference-server)
provides a datacenter and cloud inferencing solution optimized for NVIDIA GPUs.
The server provides an inference service via an HTTP or gRPC endpoint,
allowing remote clients to request inferencing for any number of GPU
or CPU models being managed by the server.
This README provides step-by-step deployment instructions for models generated
during training (as described in the [model README](../readme.md)).
Additionally, this README provides the corresponding deployment scripts that
ensure optimal GPU utilization during inferencing on Triton Inference Server.
### Deployment process
The deployment process consists of two steps:
1. Conversion.
The purpose of conversion is to find the best performing model
format supported by Triton Inference Server.
Triton Inference Server uses a number of runtime backends such as
[TensorRT](https://developer.nvidia.com/tensorrt),
[LibTorch](https://github.com/triton-inference-server/pytorch_backend) and
[ONNX Runtime](https://github.com/triton-inference-server/onnxruntime_backend)
to support various model types. Refer to the
[Triton documentation](https://github.com/triton-inference-server/backend#where-can-i-find-all-the-backends-that-are-available-for-triton)
for a list of available backends.
2. Configuration.
Model configuration on Triton Inference Server, which generates
necessary [configuration files](https://github.com/triton-inference-server/server/blob/master/docs/model_configuration.md).
After deployment Triton inference server is used for evaluation of converted model in two steps:
1. Accuracy tests.
Produce results which are tested against given accuracy thresholds.
2. Performance tests.
Produce latency and throughput results for offline (static batching)
and online (dynamic batching) scenarios.
All steps are executed by provided runner script. Refer to [Quick Start Guide](#quick-start-guide)
## Setup
Ensure you have the following components:
* [NVIDIA Docker](https://github.com/NVIDIA/nvidia-docker)
* [PyTorch NGC container 21.12](https://catalog.ngc.nvidia.com/orgs/nvidia/containers/pytorch)
* [Triton Inference Server NGC container 21.12](https://ngc.nvidia.com/catalog/containers/nvidia:tritonserver)
* [NVIDIA CUDA](https://docs.nvidia.com/cuda/archive//index.html)
* [NVIDIA Ampere](https://www.nvidia.com/en-us/data-center/nvidia-ampere-gpu-architecture/), [Volta](https://www.nvidia.com/en-us/data-center/volta-gpu-architecture/) or [Turing](https://www.nvidia.com/en-us/geforce/turing/) based GPU
## Quick Start Guide
Running the following scripts will build and launch the container with all required dependencies for native PyTorch as well as Triton Inference Server. This is necessary for running inference and can also be used for data download, processing, and training of the model.
1. Clone the repository.
```
git clone https://github.com/NVIDIA/DeepLearningExamples.git
cd DeepLearningExamples/PyTorch/Forecasting/TFT
```
2. Prepare dataset.
Please use the data download from the [Main QSG](https://github.com/NVIDIA/DeepLearningExamples/tree/master/PyTorch/Forecasting/TFT#quick-start-guide)
3. Build and run a container that extends NGC PyTorch with the Triton client libraries and necessary dependencies.
```
./triton/scripts/docker/build.sh
./triton/scripts/docker/interactive.sh /path/to/your/data/
```
4. Execute runner script (please mind, the run scripts are prepared per NVIDIA GPU).
```
NVIDIA A30: ./triton/runner/start_NVIDIA-A30.sh
NVIDIA DGX-1 (1x V100 32GB): ./triton/runner/start_NVIDIA-DGX-1-\(1x-V100-32GB\).sh
NVIDIA DGX A100 (1x A100 80GB): ./triton/runner/start_NVIDIA-DGX-A100-\(1x-A100-80GB\).sh
NVIDIA T4: ./triton/runner/start_NVIDIA-T4.sh
```
## Deployment on Production
In order to achieve the best performance results on production use [Triton Model Navigator](https://github.com/triton-inference-server/model_navigator).
The Triton Model Navigator is a tool that provides the ability to automate the process of a model deployment on
the NVIDIA [Triton Inference Server](https://github.com/triton-inference-server).
The tool optimize models running conversion to available formats and applying addition Triton backends optimizations.
Then it uses [Triton Model Analyzer](https://github.com/triton-inference-server/model_analyzer) to find the best Triton Model configuration,
matches the provided constraints, and optimize performance.
1. Export Model
Export model from Python source to desired format (e.g. Savedmodel or TorchScript)
<details>
<summary>Export Model Command</summary>
```shell
if [[ "${EXPORT_FORMAT}" == "ts-trace" || "${EXPORT_FORMAT}" == "ts-script" ]]; then
export FORMAT_SUFFIX="pt"
else
export FORMAT_SUFFIX="${EXPORT_FORMAT}"
fi
python3 triton/export_model.py \
--input-path triton/model.py \
--input-type pyt \
--output-path ${SHARED_DIR}/exported_model.${FORMAT_SUFFIX} \
--output-type ${EXPORT_FORMAT} \
--ignore-unknown-parameters \
--onnx-opset 13 \
\
--checkpoint ${CHECKPOINT_DIR}/ \
--precision ${EXPORT_PRECISION} \
\
--dataloader triton/dataloader.py \
--dataset ${DATASETS_DIR}/${DATASET} \
--batch-size 1
````
</details>
2. Use Model Navigator to find best model configuration.
<details>
<summary>Model Navigator Command</summary>
```shell
model-navigator run --model-name TFT --model-path ${SHARED_DIR}/exported_model.onnx
```
</details>
Read more about Triton Model Navigator usage in [documentation](https://github.com/triton-inference-server/model_navigator)
## Performance
The performance measurements in this document were conducted at the time of publication and may not reflect
the performance achieved from NVIDIA’s latest software release. For the most up-to-date performance measurements, go to
[NVIDIA Data Center Deep Learning Product Performance](https://developer.nvidia.com/deep-learning-performance-training-inference).
### Offline scenario
The offline scenario assumes the client and server are located on the same host. The tests uses:
- tensors are passed through shared memory between client and server, the Perf Analyzer flag `shared-memory=system` is used
- single request is send from client to server with static size of batch
#### Offline: NVIDIA A30, NVIDIA TensorRT with FP16, Dataset: electricity
Our results were obtained using the following configuration:
| Parameter Name | Parameter Value |
|:-----------------------------|:-----------------------------|
| GPU |NVIDIA A30 |
| Backend |NVIDIA TensorRT |
| Backend accelerator |-|
| Precision |FP16 |
| Model format |NVIDIA TensorRT |
| Max batch size |1024 |
| Number of model instances |2|
| Export Precision | FP32 |
| NVIDIA TensorRT Capture CUDA Graph | Disabled |
| Dataset | electricity |
| Device | gpu |
| Request Count | 500 |
<table>
<tbody>
<tr>
<td><img src="./reports/nvidia_a30_experiment_17_triton_performance_offline_17/plots/throughput_vs_batch.png"></td>
<td><img src="./reports/nvidia_a30_experiment_17_triton_performance_offline_17/plots/throughput_vs_latency.png"></td>
</tr>
<tr>
<td><img src="./reports/nvidia_a30_experiment_17_triton_performance_offline_17/plots/latency_vs_batch.png"></td>
</tr>
</tbody>
</table>
<details>
<summary>Results Table</summary>
| Batch | Concurrency | Inferences/Second | Client Send (ms) | Network+Server Send/Recv (ms) | Server Queue (ms) | Server Compute Input (ms) | Server Compute Infer (ms) | Server Compute Output (ms) | Client Recv (ms) | p50 latency (ms) | p90 latency (ms) | p95 latency (ms) | p99 latency (ms) | avg latency (ms) |
|--------:|--------------:|--------------------:|-------------------:|--------------------------------:|--------------------:|----------------------------:|----------------------------:|-----------------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|
| 1 | 1 | 595.0 | 0.0 | 0.2 | 0.1 | 0.1 | 1.3 | 0.0 | 0.0 | 1.7 | 1.7 | 1.8 | 1.8 | 1.7 |
| 2 | 1 | 804.6 | 0.0 | 0.1 | 0.0 | 0.1 | 2.1 | 0.1 | 0.0 | 2.5 | 2.6 | 2.6 | 2.6 | 2.5 |
| 4 | 1 | 1500.0 | 0.0 | 0.2 | 0.1 | 0.1 | 2.2 | 0.1 | 0.0 | 2.7 | 2.7 | 2.7 | 2.8 | 2.7 |
| 8 | 1 | 2696.0 | 0.1 | 0.2 | 0.1 | 0.1 | 2.5 | 0.0 | 0.0 | 2.9 | 3.0 | 3.1 | 3.3 | 3.0 |
| 16 | 1 | 4704.0 | 0.1 | 0.2 | 0.1 | 0.1 | 2.9 | 0.0 | 0.0 | 3.4 | 3.5 | 3.6 | 3.8 | 3.4 |
| 32 | 1 | 8576.0 | 0.1 | 0.2 | 0.0 | 0.1 | 3.2 | 0.1 | 0.0 | 3.7 | 3.9 | 3.9 | 4.0 | 3.7 |
| 64 | 1 | 14101.3 | 0.1 | 0.2 | 0.0 | 0.1 | 4.0 | 0.0 | 0.0 | 4.5 | 4.6 | 4.7 | 5.2 | 4.5 |
| 128 | 1 | 19227.2 | 0.1 | 0.2 | 0.1 | 0.1 | 6.1 | 0.0 | 0.0 | 6.5 | 6.7 | 8.0 | 8.3 | 6.6 |
| 256 | 1 | 24401.3 | 0.1 | 0.3 | 0.1 | 0.2 | 9.8 | 0.0 | 0.0 | 10.4 | 10.5 | 11.4 | 11.6 | 10.5 |
| 512 | 1 | 27235.7 | 0.1 | 0.4 | 0.1 | 1.0 | 17.1 | 0.1 | 0.0 | 18.8 | 18.8 | 18.8 | 18.8 | 18.8 |
| 1024 | 1 | 28782.6 | 0.1 | 0.4 | 0.1 | 1.9 | 32.9 | 0.2 | 0.0 | 35.5 | 35.6 | 35.6 | 35.7 | 35.5 |
</details>
#### Offline: NVIDIA A30, NVIDIA TensorRT with FP16, Dataset: traffic
Our results were obtained using the following configuration:
| Parameter Name | Parameter Value |
|:-----------------------------|:-----------------------------|
| GPU |NVIDIA A30 |
| Backend |NVIDIA TensorRT |
| Backend accelerator |-|
| Precision |FP16 |
| Model format |NVIDIA TensorRT |
| Max batch size |1024 |
| Number of model instances |2|
| Export Precision | FP32 |
| NVIDIA TensorRT Capture CUDA Graph | Disabled |
| Dataset | traffic |
| Device | gpu |
| Request Count | 500 |
<table>
<tbody>
<tr>
<td><img src="./reports/nvidia_a30_experiment_18_triton_performance_offline_18/plots/throughput_vs_batch.png"></td>
<td><img src="./reports/nvidia_a30_experiment_18_triton_performance_offline_18/plots/throughput_vs_latency.png"></td>
</tr>
<tr>
<td><img src="./reports/nvidia_a30_experiment_18_triton_performance_offline_18/plots/latency_vs_batch.png"></td>
</tr>
</tbody>
</table>
<details>
<summary>Results Table</summary>
| Batch | Concurrency | Inferences/Second | Client Send (ms) | Network+Server Send/Recv (ms) | Server Queue (ms) | Server Compute Input (ms) | Server Compute Infer (ms) | Server Compute Output (ms) | Client Recv (ms) | p50 latency (ms) | p90 latency (ms) | p95 latency (ms) | p99 latency (ms) | avg latency (ms) |
|--------:|--------------:|--------------------:|-------------------:|--------------------------------:|--------------------:|----------------------------:|----------------------------:|-----------------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|
| 1 | 1 | 605.4 | 0.0 | 0.2 | 0.0 | 0.1 | 1.3 | 0.0 | 0.0 | 1.6 | 1.7 | 1.7 | 1.7 | 1.6 |
| 2 | 1 | 840.0 | 0.0 | 0.1 | 0.0 | 0.1 | 2.1 | 0.0 | 0.0 | 2.4 | 2.4 | 2.4 | 2.5 | 2.4 |
| 4 | 1 | 1638.0 | 0.0 | 0.1 | 0.0 | 0.1 | 2.2 | 0.0 | 0.0 | 2.4 | 2.5 | 2.5 | 2.6 | 2.4 |
| 8 | 1 | 2876.0 | 0.0 | 0.1 | 0.0 | 0.1 | 2.5 | 0.0 | 0.0 | 2.8 | 2.9 | 2.9 | 2.9 | 2.8 |
| 16 | 1 | 5168.0 | 0.0 | 0.1 | 0.0 | 0.1 | 2.8 | 0.0 | 0.0 | 3.1 | 3.3 | 3.3 | 3.4 | 3.1 |
| 32 | 1 | 8576.0 | 0.0 | 0.1 | 0.0 | 0.1 | 3.3 | 0.0 | 0.0 | 3.7 | 3.9 | 4.0 | 4.1 | 3.7 |
| 64 | 1 | 14592.0 | 0.0 | 0.1 | 0.0 | 0.1 | 4.0 | 0.0 | 0.0 | 4.3 | 4.5 | 4.5 | 4.7 | 4.4 |
| 128 | 1 | 19520.0 | 0.0 | 0.1 | 0.0 | 0.1 | 6.2 | 0.0 | 0.0 | 6.5 | 6.6 | 7.9 | 8.3 | 6.5 |
| 256 | 1 | 24832.0 | 0.0 | 0.2 | 0.0 | 0.2 | 9.8 | 0.0 | 0.0 | 10.2 | 10.4 | 10.9 | 11.1 | 10.3 |
| 512 | 1 | 27235.7 | 0.1 | 0.4 | 0.1 | 1.1 | 17.0 | 0.1 | 0.0 | 18.8 | 18.8 | 18.8 | 18.9 | 18.8 |
| 1024 | 1 | 28725.7 | 0.1 | 0.4 | 0.1 | 2.0 | 32.9 | 0.2 | 0.0 | 35.6 | 35.7 | 35.7 | 35.8 | 35.6 |
</details>
#### Offline: NVIDIA A30, PyTorch with FP16, Dataset: electricity
Our results were obtained using the following configuration:
| Parameter Name | Parameter Value |
|:-----------------------------|:-----------------------------|
| GPU |NVIDIA A30 |
| Backend |PyTorch |
| Backend accelerator |-|
| Precision |FP16 |
| Model format |TorchScript Trace |
| Max batch size |1024 |
| Number of model instances |2|
| Export Precision | FP32 |
| Dataset | electricity |
| Device | gpu |
| Request Count | 500 |
<table>
<tbody>
<tr>
<td><img src="./reports/nvidia_a30_experiment_27_triton_performance_offline_27/plots/throughput_vs_batch.png"></td>
<td><img src="./reports/nvidia_a30_experiment_27_triton_performance_offline_27/plots/throughput_vs_latency.png"></td>
</tr>
<tr>
<td><img src="./reports/nvidia_a30_experiment_27_triton_performance_offline_27/plots/latency_vs_batch.png"></td>
</tr>
</tbody>
</table>
<details>
<summary>Results Table</summary>
| Batch | Concurrency | Inferences/Second | Client Send (ms) | Network+Server Send/Recv (ms) | Server Queue (ms) | Server Compute Input (ms) | Server Compute Infer (ms) | Server Compute Output (ms) | Client Recv (ms) | p50 latency (ms) | p90 latency (ms) | p95 latency (ms) | p99 latency (ms) | avg latency (ms) |
|--------:|--------------:|--------------------:|-------------------:|--------------------------------:|--------------------:|----------------------------:|----------------------------:|-----------------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|
| 1 | 1 | 126.5 | 0.1 | 0.4 | 0.1 | 0.1 | 7.2 | 0.0 | 0.0 | 7.8 | 8.0 | 8.8 | 9.5 | 7.9 |
| 2 | 1 | 234.8 | 0.1 | 0.4 | 0.1 | 0.1 | 7.8 | 0.0 | 0.0 | 8.3 | 9.9 | 10.1 | 10.3 | 8.5 |
| 4 | 1 | 431.1 | 0.1 | 0.4 | 0.1 | 0.1 | 8.5 | 0.0 | 0.0 | 8.6 | 10.3 | 10.4 | 10.5 | 9.2 |
| 8 | 1 | 860.8 | 0.1 | 0.4 | 0.1 | 0.2 | 8.5 | 0.0 | 0.0 | 8.9 | 10.5 | 10.7 | 10.8 | 9.3 |
| 16 | 1 | 1747.2 | 0.1 | 0.5 | 0.1 | 0.2 | 8.3 | 0.0 | 0.0 | 8.8 | 10.5 | 10.6 | 10.7 | 9.1 |
| 32 | 1 | 3205.8 | 0.1 | 0.4 | 0.1 | 0.2 | 9.1 | 0.0 | 0.0 | 9.8 | 11.2 | 11.3 | 11.4 | 10.0 |
| 64 | 1 | 6249.6 | 0.1 | 0.4 | 0.1 | 0.3 | 8.9 | 0.4 | 0.0 | 9.7 | 11.5 | 11.5 | 11.6 | 10.2 |
| 128 | 1 | 9216.0 | 0.1 | 0.3 | 0.1 | 0.5 | 8.9 | 3.9 | 0.0 | 13.9 | 14.1 | 14.2 | 14.4 | 13.9 |
| 256 | 1 | 11369.7 | 0.1 | 0.3 | 0.1 | 0.9 | 5.3 | 15.8 | 0.0 | 22.5 | 22.7 | 22.7 | 23.0 | 22.5 |
| 512 | 1 | 12383.8 | 0.1 | 0.3 | 0.1 | 1.6 | 5.4 | 33.8 | 0.0 | 41.3 | 41.5 | 41.6 | 41.7 | 41.3 |
| 1024 | 1 | 12849.9 | 0.1 | 0.4 | 0.1 | 3.2 | 5.6 | 70.2 | 0.0 | 79.6 | 80.0 | 80.1 | 80.3 | 79.6 |
</details>
#### Offline: NVIDIA A30, PyTorch with FP16, Dataset: traffic
Our results were obtained using the following configuration:
| Parameter Name | Parameter Value |
|:-----------------------------|:-----------------------------|
| GPU |NVIDIA A30 |
| Backend |PyTorch |
| Backend accelerator |-|
| Precision |FP16 |
| Model format |TorchScript Trace |
| Max batch size |1024 |
| Number of model instances |2|
| Export Precision | FP32 |
| Dataset | traffic |
| Device | gpu |
| Request Count | 500 |
<table>
<tbody>
<tr>
<td><img src="./reports/nvidia_a30_experiment_28_triton_performance_offline_28/plots/throughput_vs_batch.png"></td>
<td><img src="./reports/nvidia_a30_experiment_28_triton_performance_offline_28/plots/throughput_vs_latency.png"></td>
</tr>
<tr>
<td><img src="./reports/nvidia_a30_experiment_28_triton_performance_offline_28/plots/latency_vs_batch.png"></td>
</tr>
</tbody>
</table>
<details>
<summary>Results Table</summary>
| Batch | Concurrency | Inferences/Second | Client Send (ms) | Network+Server Send/Recv (ms) | Server Queue (ms) | Server Compute Input (ms) | Server Compute Infer (ms) | Server Compute Output (ms) | Client Recv (ms) | p50 latency (ms) | p90 latency (ms) | p95 latency (ms) | p99 latency (ms) | avg latency (ms) |
|--------:|--------------:|--------------------:|-------------------:|--------------------------------:|--------------------:|----------------------------:|----------------------------:|-----------------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|
| 1 | 1 | 189.0 | 0.1 | 0.3 | 0.0 | 0.1 | 4.8 | 0.0 | 0.0 | 4.6 | 7.4 | 7.4 | 8.5 | 5.3 |
| 2 | 1 | 252.9 | 0.1 | 0.4 | 0.1 | 0.1 | 7.2 | 0.0 | 0.0 | 7.9 | 8.0 | 8.0 | 8.1 | 7.9 |
| 4 | 1 | 500.0 | 0.1 | 0.4 | 0.1 | 0.1 | 7.3 | 0.0 | 0.0 | 8.0 | 8.0 | 8.0 | 9.2 | 8.0 |
| 8 | 1 | 998.0 | 0.1 | 0.3 | 0.1 | 0.1 | 7.4 | 0.0 | 0.0 | 8.0 | 8.0 | 8.1 | 8.2 | 8.0 |
| 16 | 1 | 1996.0 | 0.1 | 0.3 | 0.1 | 0.1 | 7.4 | 0.0 | 0.0 | 8.0 | 8.1 | 8.1 | 9.1 | 8.0 |
| 32 | 1 | 3750.4 | 0.1 | 0.4 | 0.1 | 0.1 | 7.8 | 0.0 | 0.0 | 8.5 | 8.6 | 8.7 | 10.3 | 8.5 |
| 64 | 1 | 7179.4 | 0.1 | 0.4 | 0.1 | 0.2 | 7.7 | 0.4 | 0.0 | 8.9 | 9.0 | 9.1 | 9.4 | 8.9 |
| 128 | 1 | 9946.0 | 0.1 | 0.3 | 0.1 | 0.3 | 7.3 | 4.8 | 0.0 | 12.8 | 13.3 | 13.6 | 13.7 | 12.8 |
| 256 | 1 | 11821.5 | 0.0 | 0.2 | 0.0 | 0.6 | 5.0 | 15.8 | 0.0 | 21.6 | 21.8 | 21.8 | 21.8 | 21.6 |
| 512 | 1 | 12825.0 | 0.0 | 0.2 | 0.0 | 0.8 | 5.0 | 33.8 | 0.0 | 40.0 | 40.3 | 40.5 | 40.6 | 39.8 |
| 1024 | 1 | 13284.7 | 0.0 | 0.2 | 0.0 | 1.8 | 5.3 | 69.7 | 0.0 | 77.3 | 77.7 | 77.8 | 77.9 | 77.1 |
</details>
#### Offline: NVIDIA DGX-1 (1x V100 32GB), NVIDIA TensorRT with FP16, Dataset: electricity
Our results were obtained using the following configuration:
| Parameter Name | Parameter Value |
|:-----------------------------|:-----------------------------|
| GPU |NVIDIA DGX-1 (1x V100 32GB) |
| Backend |NVIDIA TensorRT |
| Backend accelerator |-|
| Precision |FP16 |
| Model format |NVIDIA TensorRT |
| Max batch size |1024 |
| Number of model instances |2|
| Export Precision | FP32 |
| NVIDIA TensorRT Capture CUDA Graph | Disabled |
| Dataset | electricity |
| Device | gpu |
| Request Count | 500 |
<table>
<tbody>
<tr>
<td><img src="./reports/nvidia_dgx-1_(1x_v100_32gb)_experiment_17_triton_performance_offline_17/plots/throughput_vs_batch.png"></td>
<td><img src="./reports/nvidia_dgx-1_(1x_v100_32gb)_experiment_17_triton_performance_offline_17/plots/throughput_vs_latency.png"></td>
</tr>
<tr>
<td><img src="./reports/nvidia_dgx-1_(1x_v100_32gb)_experiment_17_triton_performance_offline_17/plots/latency_vs_batch.png"></td>
</tr>
</tbody>
</table>
<details>
<summary>Results Table</summary>
| Batch | Concurrency | Inferences/Second | Client Send (ms) | Network+Server Send/Recv (ms) | Server Queue (ms) | Server Compute Input (ms) | Server Compute Infer (ms) | Server Compute Output (ms) | Client Recv (ms) | p50 latency (ms) | p90 latency (ms) | p95 latency (ms) | p99 latency (ms) | avg latency (ms) |
|--------:|--------------:|--------------------:|-------------------:|--------------------------------:|--------------------:|----------------------------:|----------------------------:|-----------------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|
| 1 | 1 | 416.5 | 0.1 | 0.2 | 0.1 | 0.1 | 1.8 | 0.0 | 0.0 | 2.4 | 2.5 | 2.5 | 2.6 | 2.4 |
| 2 | 1 | 770.6 | 0.1 | 0.3 | 0.1 | 0.2 | 1.9 | 0.0 | 0.0 | 2.6 | 2.6 | 2.7 | 2.7 | 2.6 |
| 4 | 1 | 1427.3 | 0.1 | 0.2 | 0.1 | 0.2 | 2.2 | 0.0 | 0.0 | 2.8 | 2.9 | 2.9 | 3.0 | 2.8 |
| 8 | 1 | 2604.0 | 0.1 | 0.3 | 0.1 | 0.2 | 2.4 | 0.0 | 0.0 | 3.1 | 3.2 | 3.2 | 3.3 | 3.1 |
| 16 | 1 | 4480.0 | 0.1 | 0.3 | 0.1 | 0.2 | 2.9 | 0.0 | 0.0 | 3.6 | 3.7 | 3.7 | 3.8 | 3.6 |
| 32 | 1 | 7274.7 | 0.1 | 0.2 | 0.1 | 0.2 | 3.9 | 0.0 | 0.0 | 4.4 | 4.5 | 4.5 | 4.6 | 4.4 |
| 64 | 1 | 10922.7 | 0.1 | 0.2 | 0.1 | 0.2 | 5.3 | 0.0 | 0.0 | 5.8 | 6.0 | 6.0 | 6.1 | 5.8 |
| 128 | 1 | 13744.5 | 0.1 | 0.2 | 0.1 | 0.2 | 8.7 | 0.0 | 0.0 | 9.3 | 9.4 | 9.4 | 9.6 | 9.3 |
| 256 | 1 | 17341.8 | 0.1 | 0.2 | 0.1 | 0.3 | 14.0 | 0.0 | 0.0 | 14.7 | 14.9 | 14.9 | 15.1 | 14.7 |
| 512 | 1 | 20439.0 | 0.1 | 0.2 | 0.1 | 0.5 | 24.1 | 0.0 | 0.0 | 25.0 | 25.1 | 25.2 | 25.6 | 25.0 |
| 1024 | 1 | 23410.2 | 0.1 | 0.3 | 0.1 | 0.7 | 42.5 | 0.0 | 0.0 | 43.6 | 43.8 | 43.9 | 44.6 | 43.7 |
</details>
#### Offline: NVIDIA DGX-1 (1x V100 32GB), NVIDIA TensorRT with FP16, Dataset: traffic
Our results were obtained using the following configuration:
| Parameter Name | Parameter Value |
|:-----------------------------|:-----------------------------|
| GPU |NVIDIA DGX-1 (1x V100 32GB) |
| Backend |NVIDIA TensorRT |
| Backend accelerator |-|
| Precision |FP16 |
| Model format |NVIDIA TensorRT |
| Max batch size |1024 |
| Number of model instances |2|
| Export Precision | FP32 |
| NVIDIA TensorRT Capture CUDA Graph | Disabled |
| Dataset | traffic |
| Device | gpu |
| Request Count | 500 |
<table>
<tbody>
<tr>
<td><img src="./reports/nvidia_dgx-1_(1x_v100_32gb)_experiment_18_triton_performance_offline_18/plots/throughput_vs_batch.png"></td>
<td><img src="./reports/nvidia_dgx-1_(1x_v100_32gb)_experiment_18_triton_performance_offline_18/plots/throughput_vs_latency.png"></td>
</tr>
<tr>
<td><img src="./reports/nvidia_dgx-1_(1x_v100_32gb)_experiment_18_triton_performance_offline_18/plots/latency_vs_batch.png"></td>
</tr>
</tbody>
</table>
<details>
<summary>Results Table</summary>
| Batch | Concurrency | Inferences/Second | Client Send (ms) | Network+Server Send/Recv (ms) | Server Queue (ms) | Server Compute Input (ms) | Server Compute Infer (ms) | Server Compute Output (ms) | Client Recv (ms) | p50 latency (ms) | p90 latency (ms) | p95 latency (ms) | p99 latency (ms) | avg latency (ms) |
|--------:|--------------:|--------------------:|-------------------:|--------------------------------:|--------------------:|----------------------------:|----------------------------:|-----------------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|
| 1 | 1 | 406.0 | 0.1 | 0.2 | 0.1 | 0.2 | 1.8 | 0.0 | 0.0 | 2.4 | 2.5 | 2.5 | 2.6 | 2.5 |
| 2 | 1 | 775.0 | 0.1 | 0.2 | 0.1 | 0.2 | 2.0 | 0.0 | 0.0 | 2.6 | 2.7 | 2.7 | 2.8 | 2.6 |
| 4 | 1 | 1431.3 | 0.1 | 0.2 | 0.1 | 0.2 | 2.2 | 0.0 | 0.0 | 2.8 | 3.0 | 3.0 | 3.2 | 2.8 |
| 8 | 1 | 2644.0 | 0.1 | 0.2 | 0.1 | 0.1 | 2.5 | 0.0 | 0.0 | 3.0 | 3.1 | 3.1 | 3.1 | 3.0 |
| 16 | 1 | 4824.0 | 0.1 | 0.2 | 0.1 | 0.2 | 2.7 | 0.0 | 0.0 | 3.3 | 3.4 | 3.4 | 3.5 | 3.3 |
| 32 | 1 | 7637.3 | 0.1 | 0.2 | 0.1 | 0.2 | 3.6 | 0.0 | 0.0 | 4.2 | 4.3 | 4.3 | 4.4 | 4.2 |
| 64 | 1 | 10919.0 | 0.1 | 0.3 | 0.1 | 0.2 | 5.2 | 0.0 | 0.0 | 5.8 | 5.9 | 6.0 | 6.0 | 5.8 |
| 128 | 1 | 13488.5 | 0.1 | 0.2 | 0.1 | 0.2 | 8.8 | 0.0 | 0.0 | 9.4 | 9.7 | 9.8 | 10.0 | 9.5 |
| 256 | 1 | 17216.0 | 0.1 | 0.2 | 0.1 | 0.3 | 14.2 | 0.0 | 0.0 | 14.8 | 15.0 | 15.1 | 15.2 | 14.8 |
| 512 | 1 | 20596.6 | 0.1 | 0.3 | 0.1 | 0.5 | 23.9 | 0.0 | 0.0 | 24.8 | 25.0 | 25.1 | 25.3 | 24.8 |
| 1024 | 1 | 23456.8 | 0.1 | 0.2 | 0.1 | 0.7 | 42.6 | 0.0 | 0.0 | 43.7 | 44.3 | 44.4 | 44.9 | 43.6 |
</details>
#### Offline: NVIDIA DGX-1 (1x V100 32GB), PyTorch with FP16, Dataset: electricity
Our results were obtained using the following configuration:
| Parameter Name | Parameter Value |
|:-----------------------------|:-----------------------------|
| GPU |NVIDIA DGX-1 (1x V100 32GB) |
| Backend |PyTorch |
| Backend accelerator |-|
| Precision |FP16 |
| Model format |TorchScript Trace |
| Max batch size |1024 |
| Number of model instances |2|
| Export Precision | FP32 |
| Dataset | electricity |
| Device | gpu |
| Request Count | 500 |
<table>
<tbody>
<tr>
<td><img src="./reports/nvidia_dgx-1_(1x_v100_32gb)_experiment_27_triton_performance_offline_27/plots/throughput_vs_batch.png"></td>
<td><img src="./reports/nvidia_dgx-1_(1x_v100_32gb)_experiment_27_triton_performance_offline_27/plots/throughput_vs_latency.png"></td>
</tr>
<tr>
<td><img src="./reports/nvidia_dgx-1_(1x_v100_32gb)_experiment_27_triton_performance_offline_27/plots/latency_vs_batch.png"></td>
</tr>
</tbody>
</table>
<details>
<summary>Results Table</summary>
| Batch | Concurrency | Inferences/Second | Client Send (ms) | Network+Server Send/Recv (ms) | Server Queue (ms) | Server Compute Input (ms) | Server Compute Infer (ms) | Server Compute Output (ms) | Client Recv (ms) | p50 latency (ms) | p90 latency (ms) | p95 latency (ms) | p99 latency (ms) | avg latency (ms) |
|--------:|--------------:|--------------------:|-------------------:|--------------------------------:|--------------------:|----------------------------:|----------------------------:|-----------------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|
| 1 | 1 | 134.2 | 0.1 | 0.3 | 0.1 | 0.1 | 6.9 | 0.0 | 0.0 | 8.1 | 8.3 | 8.4 | 9.1 | 7.4 |
| 2 | 1 | 271.5 | 0.0 | 0.2 | 0.1 | 0.1 | 6.9 | 0.0 | 0.0 | 7.2 | 8.2 | 8.3 | 8.3 | 7.3 |
| 4 | 1 | 524.9 | 0.1 | 0.3 | 0.1 | 0.1 | 7.1 | 0.0 | 0.0 | 8.3 | 8.5 | 8.9 | 9.6 | 7.6 |
| 8 | 1 | 1044.0 | 0.1 | 0.3 | 0.1 | 0.1 | 7.1 | 0.0 | 0.0 | 8.4 | 8.5 | 8.6 | 9.5 | 7.6 |
| 16 | 1 | 2119.5 | 0.1 | 0.3 | 0.1 | 0.1 | 7.0 | 0.0 | 0.0 | 8.2 | 8.4 | 8.5 | 8.8 | 7.5 |
| 32 | 1 | 3775.2 | 0.1 | 0.3 | 0.1 | 0.1 | 7.9 | 0.0 | 0.0 | 9.2 | 9.4 | 9.4 | 9.5 | 8.4 |
| 64 | 1 | 6424.3 | 0.1 | 0.3 | 0.1 | 0.1 | 7.9 | 1.5 | 0.0 | 9.9 | 10.1 | 10.1 | 10.6 | 9.9 |
| 128 | 1 | 8528.0 | 0.1 | 0.2 | 0.1 | 0.2 | 8.0 | 6.4 | 0.0 | 15.1 | 15.2 | 15.3 | 15.4 | 15.0 |
| 256 | 1 | 10644.4 | 0.1 | 0.3 | 0.1 | 0.3 | 8.0 | 15.3 | 0.0 | 24.1 | 24.3 | 24.3 | 24.7 | 24.0 |
| 512 | 1 | 12213.7 | 0.1 | 0.3 | 0.1 | 0.5 | 7.3 | 33.8 | 0.0 | 41.9 | 42.1 | 42.1 | 42.2 | 41.9 |
| 1024 | 1 | 13153.4 | 0.1 | 0.3 | 0.1 | 0.8 | 6.6 | 69.9 | 0.0 | 77.7 | 77.8 | 77.9 | 78.1 | 77.7 |
</details>
#### Offline: NVIDIA DGX-1 (1x V100 32GB), PyTorch with FP16, Dataset: traffic
Our results were obtained using the following configuration:
| Parameter Name | Parameter Value |
|:-----------------------------|:-----------------------------|
| GPU |NVIDIA DGX-1 (1x V100 32GB) |
| Backend |PyTorch |
| Backend accelerator |-|
| Precision |FP16 |
| Model format |TorchScript Trace |
| Max batch size |1024 |
| Number of model instances |2|
| Export Precision | FP32 |
| Dataset | traffic |
| Device | gpu |
| Request Count | 500 |
<table>
<tbody>
<tr>
<td><img src="./reports/nvidia_dgx-1_(1x_v100_32gb)_experiment_28_triton_performance_offline_28/plots/throughput_vs_batch.png"></td>
<td><img src="./reports/nvidia_dgx-1_(1x_v100_32gb)_experiment_28_triton_performance_offline_28/plots/throughput_vs_latency.png"></td>
</tr>
<tr>
<td><img src="./reports/nvidia_dgx-1_(1x_v100_32gb)_experiment_28_triton_performance_offline_28/plots/latency_vs_batch.png"></td>
</tr>
</tbody>
</table>
<details>
<summary>Results Table</summary>
| Batch | Concurrency | Inferences/Second | Client Send (ms) | Network+Server Send/Recv (ms) | Server Queue (ms) | Server Compute Input (ms) | Server Compute Infer (ms) | Server Compute Output (ms) | Client Recv (ms) | p50 latency (ms) | p90 latency (ms) | p95 latency (ms) | p99 latency (ms) | avg latency (ms) |
|--------:|--------------:|--------------------:|-------------------:|--------------------------------:|--------------------:|----------------------------:|----------------------------:|-----------------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|
| 1 | 1 | 158.0 | 0.1 | 0.2 | 0.1 | 0.1 | 5.9 | 0.0 | 0.0 | 6.4 | 6.5 | 6.6 | 6.7 | 6.3 |
| 2 | 1 | 312.5 | 0.1 | 0.3 | 0.1 | 0.1 | 5.9 | 0.0 | 0.0 | 6.5 | 6.6 | 6.6 | 6.8 | 6.4 |
| 4 | 1 | 608.0 | 0.1 | 0.3 | 0.1 | 0.1 | 6.0 | 0.0 | 0.0 | 6.6 | 6.8 | 6.8 | 7.0 | 6.6 |
| 8 | 1 | 1208.0 | 0.1 | 0.2 | 0.1 | 0.1 | 6.1 | 0.0 | 0.0 | 6.7 | 6.8 | 6.9 | 6.9 | 6.6 |
| 16 | 1 | 2456.0 | 0.1 | 0.3 | 0.1 | 0.1 | 5.9 | 0.0 | 0.0 | 6.5 | 6.6 | 6.7 | 7.3 | 6.5 |
| 32 | 1 | 4352.0 | 0.1 | 0.3 | 0.1 | 0.1 | 6.8 | 0.0 | 0.0 | 7.3 | 7.4 | 7.5 | 8.1 | 7.3 |
| 64 | 1 | 6366.9 | 0.1 | 0.3 | 0.1 | 0.1 | 7.2 | 2.3 | 0.0 | 10.0 | 10.1 | 10.1 | 10.2 | 10.0 |
| 128 | 1 | 8544.0 | 0.1 | 0.3 | 0.1 | 0.2 | 7.3 | 7.0 | 0.0 | 14.9 | 15.1 | 15.1 | 15.3 | 15.0 |
| 256 | 1 | 10687.1 | 0.1 | 0.3 | 0.1 | 0.3 | 7.3 | 15.9 | 0.0 | 23.9 | 24.0 | 24.0 | 24.1 | 23.9 |
| 512 | 1 | 12189.3 | 0.1 | 0.3 | 0.1 | 0.5 | 7.2 | 33.9 | 0.0 | 42.0 | 42.1 | 42.1 | 42.2 | 42.0 |
| 1024 | 1 | 13153.1 | 0.1 | 0.3 | 0.1 | 0.8 | 7.0 | 69.5 | 0.0 | 77.8 | 77.9 | 77.9 | 78.1 | 77.8 |
</details>
#### Offline: NVIDIA DGX A100 (1x A100 80GB), NVIDIA TensorRT with FP16, Dataset: electricity
Our results were obtained using the following configuration:
| Parameter Name | Parameter Value |
|:-----------------------------|:-----------------------------|
| GPU |NVIDIA DGX A100 (1x A100 80GB) |
| Backend |NVIDIA TensorRT |
| Backend accelerator |-|
| Precision |FP16 |
| Model format |NVIDIA TensorRT |
| Max batch size |1024 |
| Number of model instances |2|
| Export Precision | FP32 |
| NVIDIA TensorRT Capture CUDA Graph | Disabled |
| Dataset | electricity |
| Device | gpu |
| Request Count | 500 |
<table>
<tbody>
<tr>
<td><img src="./reports/nvidia_dgx_a100_(1x_a100_80gb)_experiment_17_triton_performance_offline_17/plots/throughput_vs_batch.png"></td>
<td><img src="./reports/nvidia_dgx_a100_(1x_a100_80gb)_experiment_17_triton_performance_offline_17/plots/throughput_vs_latency.png"></td>
</tr>
<tr>
<td><img src="./reports/nvidia_dgx_a100_(1x_a100_80gb)_experiment_17_triton_performance_offline_17/plots/latency_vs_batch.png"></td>
</tr>
</tbody>
</table>
<details>
<summary>Results Table</summary>
| Batch | Concurrency | Inferences/Second | Client Send (ms) | Network+Server Send/Recv (ms) | Server Queue (ms) | Server Compute Input (ms) | Server Compute Infer (ms) | Server Compute Output (ms) | Client Recv (ms) | p50 latency (ms) | p90 latency (ms) | p95 latency (ms) | p99 latency (ms) | avg latency (ms) |
|--------:|--------------:|--------------------:|-------------------:|--------------------------------:|--------------------:|----------------------------:|----------------------------:|-----------------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|
| 1 | 1 | 663.0 | 0.0 | 0.1 | 0.0 | 0.1 | 1.3 | 0.0 | 0.0 | 1.4 | 1.6 | 1.6 | 4.7 | 1.5 |
| 2 | 1 | 879.0 | 0.0 | 0.1 | 0.0 | 0.1 | 2.1 | 0.0 | 0.0 | 2.3 | 2.4 | 2.4 | 2.4 | 2.3 |
| 4 | 1 | 1638.0 | 0.0 | 0.1 | 0.0 | 0.1 | 2.2 | 0.0 | 0.0 | 2.4 | 2.5 | 2.5 | 2.5 | 2.4 |
| 8 | 1 | 3080.0 | 0.0 | 0.1 | 0.0 | 0.1 | 2.4 | 0.0 | 0.0 | 2.6 | 2.6 | 2.7 | 2.7 | 2.6 |
| 16 | 1 | 5808.0 | 0.0 | 0.1 | 0.0 | 0.1 | 2.5 | 0.0 | 0.0 | 2.7 | 2.8 | 2.8 | 2.9 | 2.8 |
| 32 | 1 | 10688.0 | 0.0 | 0.1 | 0.0 | 0.1 | 2.7 | 0.0 | 0.0 | 3.0 | 3.1 | 3.1 | 3.1 | 3.0 |
| 64 | 1 | 17664.0 | 0.0 | 0.1 | 0.0 | 0.1 | 3.4 | 0.0 | 0.0 | 3.6 | 3.8 | 3.9 | 3.9 | 3.6 |
| 128 | 1 | 24362.7 | 0.0 | 0.1 | 0.0 | 0.2 | 4.9 | 0.0 | 0.0 | 5.2 | 5.5 | 5.5 | 5.6 | 5.2 |
| 256 | 1 | 35136.0 | 0.0 | 0.1 | 0.0 | 0.2 | 6.9 | 0.0 | 0.0 | 7.3 | 7.5 | 7.5 | 7.7 | 7.3 |
| 512 | 1 | 49493.3 | 0.0 | 0.1 | 0.0 | 0.2 | 9.9 | 0.0 | 0.0 | 10.2 | 10.4 | 10.5 | 12.9 | 10.3 |
| 1024 | 1 | 54061.8 | 0.0 | 0.1 | 0.0 | 0.5 | 18.2 | 0.1 | 0.0 | 18.8 | 18.9 | 19.0 | 22.3 | 18.9 |
</details>
#### Offline: NVIDIA DGX A100 (1x A100 80GB), NVIDIA TensorRT with FP16, Dataset: traffic
Our results were obtained using the following configuration:
| Parameter Name | Parameter Value |
|:-----------------------------|:-----------------------------|
| GPU |NVIDIA DGX A100 (1x A100 80GB) |
| Backend |NVIDIA TensorRT |
| Backend accelerator |-|
| Precision |FP16 |
| Model format |NVIDIA TensorRT |
| Max batch size |1024 |
| Number of model instances |2|
| Export Precision | FP32 |
| NVIDIA TensorRT Capture CUDA Graph | Disabled |
| Dataset | traffic |
| Device | gpu |
| Request Count | 500 |
<table>
<tbody>
<tr>
<td><img src="./reports/nvidia_dgx_a100_(1x_a100_80gb)_experiment_18_triton_performance_offline_18/plots/throughput_vs_batch.png"></td>
<td><img src="./reports/nvidia_dgx_a100_(1x_a100_80gb)_experiment_18_triton_performance_offline_18/plots/throughput_vs_latency.png"></td>
</tr>
<tr>
<td><img src="./reports/nvidia_dgx_a100_(1x_a100_80gb)_experiment_18_triton_performance_offline_18/plots/latency_vs_batch.png"></td>
</tr>
</tbody>
</table>
<details>
<summary>Results Table</summary>
| Batch | Concurrency | Inferences/Second | Client Send (ms) | Network+Server Send/Recv (ms) | Server Queue (ms) | Server Compute Input (ms) | Server Compute Infer (ms) | Server Compute Output (ms) | Client Recv (ms) | p50 latency (ms) | p90 latency (ms) | p95 latency (ms) | p99 latency (ms) | avg latency (ms) |
|--------:|--------------:|--------------------:|-------------------:|--------------------------------:|--------------------:|----------------------------:|----------------------------:|-----------------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|
| 1 | 1 | 716.0 | 0.0 | 0.1 | 0.0 | 0.1 | 1.2 | 0.0 | 0.0 | 1.4 | 1.4 | 1.4 | 2.1 | 1.4 |
| 2 | 1 | 878.0 | 0.0 | 0.1 | 0.0 | 0.1 | 2.1 | 0.0 | 0.0 | 2.3 | 2.4 | 2.4 | 2.4 | 2.3 |
| 4 | 1 | 1653.2 | 0.0 | 0.1 | 0.0 | 0.1 | 2.2 | 0.0 | 0.0 | 2.4 | 2.5 | 2.5 | 2.5 | 2.4 |
| 8 | 1 | 3192.0 | 0.0 | 0.1 | 0.0 | 0.1 | 2.3 | 0.0 | 0.0 | 2.5 | 2.5 | 2.6 | 2.6 | 2.5 |
| 16 | 1 | 5920.0 | 0.0 | 0.1 | 0.0 | 0.1 | 2.5 | 0.0 | 0.0 | 2.7 | 2.8 | 2.8 | 2.8 | 2.7 |
| 32 | 1 | 10624.0 | 0.0 | 0.1 | 0.0 | 0.1 | 2.8 | 0.0 | 0.0 | 3.0 | 3.1 | 3.1 | 3.1 | 3.0 |
| 64 | 1 | 18358.8 | 0.0 | 0.1 | 0.0 | 0.1 | 3.2 | 0.0 | 0.0 | 3.5 | 3.5 | 3.6 | 3.6 | 3.5 |
| 128 | 1 | 24738.4 | 0.0 | 0.1 | 0.0 | 0.2 | 4.8 | 0.0 | 0.0 | 5.2 | 5.3 | 5.3 | 5.4 | 5.2 |
| 256 | 1 | 35776.0 | 0.0 | 0.1 | 0.0 | 0.2 | 6.8 | 0.0 | 0.0 | 7.1 | 7.3 | 7.4 | 7.5 | 7.1 |
| 512 | 1 | 49834.7 | 0.0 | 0.1 | 0.0 | 0.2 | 9.9 | 0.0 | 0.0 | 10.2 | 10.3 | 10.3 | 11.3 | 10.3 |
| 1024 | 1 | 53350.4 | 0.0 | 0.1 | 0.0 | 0.4 | 18.6 | 0.0 | 0.0 | 19.1 | 19.2 | 19.3 | 22.4 | 19.2 |
</details>
#### Offline: NVIDIA DGX A100 (1x A100 80GB), PyTorch with FP16, Dataset: electricity
Our results were obtained using the following configuration:
| Parameter Name | Parameter Value |
|:-----------------------------|:-----------------------------|
| GPU |NVIDIA DGX A100 (1x A100 80GB) |
| Backend |PyTorch |
| Backend accelerator |-|
| Precision |FP16 |
| Model format |TorchScript Trace |
| Max batch size |1024 |
| Number of model instances |2|
| Export Precision | FP32 |
| Dataset | electricity |
| Device | gpu |
| Request Count | 500 |
<table>
<tbody>
<tr>
<td><img src="./reports/nvidia_dgx_a100_(1x_a100_80gb)_experiment_27_triton_performance_offline_27/plots/throughput_vs_batch.png"></td>
<td><img src="./reports/nvidia_dgx_a100_(1x_a100_80gb)_experiment_27_triton_performance_offline_27/plots/throughput_vs_latency.png"></td>
</tr>
<tr>
<td><img src="./reports/nvidia_dgx_a100_(1x_a100_80gb)_experiment_27_triton_performance_offline_27/plots/latency_vs_batch.png"></td>
</tr>
</tbody>
</table>
<details>
<summary>Results Table</summary>
| Batch | Concurrency | Inferences/Second | Client Send (ms) | Network+Server Send/Recv (ms) | Server Queue (ms) | Server Compute Input (ms) | Server Compute Infer (ms) | Server Compute Output (ms) | Client Recv (ms) | p50 latency (ms) | p90 latency (ms) | p95 latency (ms) | p99 latency (ms) | avg latency (ms) |
|--------:|--------------:|--------------------:|-------------------:|--------------------------------:|--------------------:|----------------------------:|----------------------------:|-----------------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|
| 1 | 1 | 205.0 | 0.0 | 0.1 | 0.0 | 0.1 | 4.6 | 0.0 | 0.0 | 4.8 | 4.9 | 4.9 | 5.3 | 4.9 |
| 2 | 1 | 396.0 | 0.0 | 0.1 | 0.0 | 0.1 | 4.8 | 0.0 | 0.0 | 5.0 | 5.2 | 5.4 | 5.5 | 5.0 |
| 4 | 1 | 788.0 | 0.0 | 0.1 | 0.0 | 0.1 | 4.8 | 0.0 | 0.0 | 5.0 | 5.1 | 5.3 | 5.5 | 5.1 |
| 8 | 1 | 1544.0 | 0.0 | 0.1 | 0.0 | 0.1 | 4.9 | 0.0 | 0.0 | 5.1 | 5.4 | 5.5 | 5.6 | 5.2 |
| 16 | 1 | 3081.6 | 0.0 | 0.1 | 0.0 | 0.1 | 4.9 | 0.0 | 0.0 | 5.1 | 5.4 | 5.5 | 5.6 | 5.2 |
| 32 | 1 | 5802.7 | 0.0 | 0.1 | 0.0 | 0.1 | 5.2 | 0.0 | 0.0 | 5.5 | 5.5 | 5.8 | 5.9 | 5.5 |
| 64 | 1 | 10624.0 | 0.0 | 0.1 | 0.0 | 0.1 | 5.3 | 0.5 | 0.0 | 6.0 | 6.1 | 6.2 | 6.4 | 6.0 |
| 128 | 1 | 15203.4 | 0.0 | 0.1 | 0.0 | 0.2 | 5.3 | 2.8 | 0.0 | 8.4 | 8.6 | 8.7 | 8.9 | 8.4 |
| 256 | 1 | 19821.7 | 0.0 | 0.1 | 0.0 | 0.3 | 5.3 | 7.2 | 0.0 | 13.0 | 13.1 | 13.3 | 13.4 | 12.9 |
| 512 | 1 | 23123.4 | 0.0 | 0.1 | 0.0 | 0.4 | 5.3 | 16.2 | 0.0 | 22.2 | 22.3 | 22.4 | 22.4 | 22.1 |
| 1024 | 1 | 25159.9 | 0.0 | 0.1 | 0.0 | 0.9 | 5.7 | 33.9 | 0.0 | 40.7 | 40.8 | 40.9 | 40.9 | 40.6 |
</details>
#### Offline: NVIDIA DGX A100 (1x A100 80GB), PyTorch with FP16, Dataset: traffic
Our results were obtained using the following configuration:
| Parameter Name | Parameter Value |
|:-----------------------------|:-----------------------------|
| GPU |NVIDIA DGX A100 (1x A100 80GB) |
| Backend |PyTorch |
| Backend accelerator |-|
| Precision |FP16 |
| Model format |TorchScript Trace |
| Max batch size |1024 |
| Number of model instances |2|
| Export Precision | FP32 |
| Dataset | traffic |
| Device | gpu |
| Request Count | 500 |
<table>
<tbody>
<tr>
<td><img src="./reports/nvidia_dgx_a100_(1x_a100_80gb)_experiment_28_triton_performance_offline_28/plots/throughput_vs_batch.png"></td>
<td><img src="./reports/nvidia_dgx_a100_(1x_a100_80gb)_experiment_28_triton_performance_offline_28/plots/throughput_vs_latency.png"></td>
</tr>
<tr>
<td><img src="./reports/nvidia_dgx_a100_(1x_a100_80gb)_experiment_28_triton_performance_offline_28/plots/latency_vs_batch.png"></td>
</tr>
</tbody>
</table>
<details>
<summary>Results Table</summary>
| Batch | Concurrency | Inferences/Second | Client Send (ms) | Network+Server Send/Recv (ms) | Server Queue (ms) | Server Compute Input (ms) | Server Compute Infer (ms) | Server Compute Output (ms) | Client Recv (ms) | p50 latency (ms) | p90 latency (ms) | p95 latency (ms) | p99 latency (ms) | avg latency (ms) |
|--------:|--------------:|--------------------:|-------------------:|--------------------------------:|--------------------:|----------------------------:|----------------------------:|-----------------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|
| 1 | 1 | 200.3 | 0.0 | 0.1 | 0.0 | 0.1 | 4.7 | 0.0 | 0.0 | 5.0 | 5.1 | 5.3 | 5.4 | 5.0 |
| 2 | 1 | 393.3 | 0.0 | 0.1 | 0.0 | 0.1 | 4.8 | 0.0 | 0.0 | 5.1 | 5.1 | 5.4 | 5.5 | 5.1 |
| 4 | 1 | 774.7 | 0.0 | 0.1 | 0.0 | 0.1 | 4.9 | 0.0 | 0.0 | 5.1 | 5.2 | 5.5 | 5.8 | 5.2 |
| 8 | 1 | 1525.3 | 0.0 | 0.1 | 0.0 | 0.1 | 5.0 | 0.0 | 0.0 | 5.2 | 5.5 | 5.6 | 5.7 | 5.2 |
| 16 | 1 | 3028.3 | 0.0 | 0.1 | 0.0 | 0.1 | 5.0 | 0.0 | 0.0 | 5.2 | 5.6 | 5.7 | 5.7 | 5.3 |
| 32 | 1 | 5696.0 | 0.0 | 0.1 | 0.0 | 0.1 | 5.3 | 0.0 | 0.0 | 5.6 | 5.7 | 5.9 | 6.0 | 5.6 |
| 64 | 1 | 10645.3 | 0.0 | 0.1 | 0.0 | 0.1 | 5.4 | 0.3 | 0.0 | 6.0 | 6.2 | 6.2 | 6.3 | 6.0 |
| 128 | 1 | 15229.0 | 0.0 | 0.2 | 0.0 | 0.2 | 5.4 | 2.6 | 0.0 | 8.4 | 8.6 | 8.7 | 8.8 | 8.4 |
| 256 | 1 | 19965.1 | 0.0 | 0.1 | 0.0 | 0.3 | 5.4 | 7.0 | 0.0 | 12.8 | 13.2 | 13.3 | 13.3 | 12.8 |
| 512 | 1 | 23319.3 | 0.0 | 0.1 | 0.0 | 0.5 | 5.4 | 15.9 | 0.0 | 21.9 | 22.1 | 22.2 | 22.2 | 21.9 |
| 1024 | 1 | 25452.5 | 0.0 | 0.1 | 0.0 | 0.9 | 5.8 | 33.3 | 0.0 | 40.2 | 40.4 | 40.5 | 40.6 | 40.2 |
</details>
#### Offline: NVIDIA T4, NVIDIA TensorRT with FP16, Dataset: electricity
Our results were obtained using the following configuration:
| Parameter Name | Parameter Value |
|:-----------------------------|:-----------------------------|
| GPU |NVIDIA T4 |
| Backend |NVIDIA TensorRT |
| Backend accelerator |-|
| Precision |FP16 |
| Model format |NVIDIA TensorRT |
| Max batch size |1024 |
| Number of model instances |2|
| Export Precision | FP32 |
| NVIDIA TensorRT Capture CUDA Graph | Disabled |
| Dataset | electricity |
| Device | gpu |
| Request Count | 500 |
<table>
<tbody>
<tr>
<td><img src="./reports/nvidia_t4_experiment_17_triton_performance_offline_17/plots/throughput_vs_batch.png"></td>
<td><img src="./reports/nvidia_t4_experiment_17_triton_performance_offline_17/plots/throughput_vs_latency.png"></td>
</tr>
<tr>
<td><img src="./reports/nvidia_t4_experiment_17_triton_performance_offline_17/plots/latency_vs_batch.png"></td>
</tr>
</tbody>
</table>
<details>
<summary>Results Table</summary>
| Batch | Concurrency | Inferences/Second | Client Send (ms) | Network+Server Send/Recv (ms) | Server Queue (ms) | Server Compute Input (ms) | Server Compute Infer (ms) | Server Compute Output (ms) | Client Recv (ms) | p50 latency (ms) | p90 latency (ms) | p95 latency (ms) | p99 latency (ms) | avg latency (ms) |
|--------:|--------------:|--------------------:|-------------------:|--------------------------------:|--------------------:|----------------------------:|----------------------------:|-----------------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|
| 1 | 1 | 415.0 | 0.1 | 0.4 | 0.1 | 0.2 | 1.6 | 0.0 | 0.0 | 2.4 | 2.5 | 2.5 | 2.5 | 2.4 |
| 2 | 1 | 781.6 | 0.1 | 0.4 | 0.1 | 0.2 | 1.7 | 0.0 | 0.0 | 2.5 | 2.6 | 2.6 | 2.6 | 2.5 |
| 4 | 1 | 1617.2 | 0.1 | 0.3 | 0.1 | 0.2 | 1.8 | 0.0 | 0.0 | 2.5 | 2.5 | 2.5 | 2.6 | 2.5 |
| 8 | 1 | 2998.5 | 0.1 | 0.3 | 0.1 | 0.2 | 2.0 | 0.0 | 0.0 | 2.7 | 2.7 | 2.7 | 2.7 | 2.6 |
| 16 | 1 | 4504.0 | 0.1 | 0.5 | 0.1 | 0.2 | 2.7 | 0.0 | 0.0 | 3.5 | 3.6 | 3.6 | 3.6 | 3.5 |
| 32 | 1 | 6483.2 | 0.1 | 0.5 | 0.1 | 0.2 | 4.0 | 0.0 | 0.0 | 4.9 | 5.0 | 5.0 | 5.0 | 4.9 |
| 64 | 1 | 9197.7 | 0.1 | 0.5 | 0.0 | 0.2 | 6.1 | 0.0 | 0.0 | 6.9 | 7.0 | 7.0 | 7.0 | 6.9 |
| 128 | 1 | 11136.0 | 0.0 | 0.3 | 0.1 | 0.2 | 10.8 | 0.0 | 0.0 | 11.5 | 11.6 | 11.6 | 11.6 | 11.5 |
| 256 | 1 | 12682.5 | 0.1 | 0.5 | 0.1 | 0.2 | 19.2 | 0.0 | 0.0 | 20.1 | 20.2 | 20.3 | 20.3 | 20.1 |
| 512 | 1 | 12628.1 | 0.1 | 0.5 | 0.1 | 0.4 | 39.5 | 0.0 | 0.0 | 40.5 | 40.7 | 40.7 | 40.8 | 40.5 |
| 1024 | 1 | 13054.4 | 0.1 | 0.5 | 0.1 | 0.6 | 77.1 | 0.0 | 0.0 | 78.4 | 78.9 | 79.0 | 79.2 | 78.4 |
</details>
#### Offline: NVIDIA T4, NVIDIA TensorRT with FP16, Dataset: traffic
Our results were obtained using the following configuration:
| Parameter Name | Parameter Value |
|:-----------------------------|:-----------------------------|
| GPU |NVIDIA T4 |
| Backend |NVIDIA TensorRT |
| Backend accelerator |-|
| Precision |FP16 |
| Model format |NVIDIA TensorRT |
| Max batch size |1024 |
| Number of model instances |2|
| Export Precision | FP32 |
| NVIDIA TensorRT Capture CUDA Graph | Disabled |
| Dataset | traffic |
| Device | gpu |
| Request Count | 500 |
<table>
<tbody>
<tr>
<td><img src="./reports/nvidia_t4_experiment_18_triton_performance_offline_18/plots/throughput_vs_batch.png"></td>
<td><img src="./reports/nvidia_t4_experiment_18_triton_performance_offline_18/plots/throughput_vs_latency.png"></td>
</tr>
<tr>
<td><img src="./reports/nvidia_t4_experiment_18_triton_performance_offline_18/plots/latency_vs_batch.png"></td>
</tr>
</tbody>
</table>
<details>
<summary>Results Table</summary>
| Batch | Concurrency | Inferences/Second | Client Send (ms) | Network+Server Send/Recv (ms) | Server Queue (ms) | Server Compute Input (ms) | Server Compute Infer (ms) | Server Compute Output (ms) | Client Recv (ms) | p50 latency (ms) | p90 latency (ms) | p95 latency (ms) | p99 latency (ms) | avg latency (ms) |
|--------:|--------------:|--------------------:|-------------------:|--------------------------------:|--------------------:|----------------------------:|----------------------------:|-----------------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|
| 1 | 1 | 455.5 | 0.1 | 0.3 | 0.0 | 0.1 | 1.6 | 0.0 | 0.0 | 2.2 | 2.3 | 2.3 | 2.3 | 2.2 |
| 2 | 1 | 872.0 | 0.1 | 0.3 | 0.1 | 0.1 | 1.7 | 0.0 | 0.0 | 2.3 | 2.4 | 2.4 | 2.4 | 2.3 |
| 4 | 1 | 1622.0 | 0.1 | 0.2 | 0.1 | 0.1 | 1.9 | 0.0 | 0.0 | 2.5 | 2.5 | 2.5 | 2.6 | 2.4 |
| 8 | 1 | 2882.6 | 0.1 | 0.4 | 0.1 | 0.1 | 2.0 | 0.0 | 0.0 | 2.8 | 2.9 | 2.9 | 2.9 | 2.8 |
| 16 | 1 | 4488.0 | 0.1 | 0.5 | 0.1 | 0.1 | 2.8 | 0.0 | 0.0 | 3.6 | 3.6 | 3.6 | 3.6 | 3.5 |
| 32 | 1 | 6592.0 | 0.1 | 0.5 | 0.1 | 0.1 | 4.1 | 0.0 | 0.0 | 4.8 | 4.9 | 4.9 | 4.9 | 4.8 |
| 64 | 1 | 9341.7 | 0.1 | 0.4 | 0.1 | 0.1 | 6.1 | 0.0 | 0.0 | 6.8 | 6.9 | 6.9 | 7.0 | 6.8 |
| 128 | 1 | 10899.5 | 0.1 | 0.5 | 0.1 | 0.1 | 10.9 | 0.0 | 0.0 | 11.7 | 11.8 | 11.8 | 11.8 | 11.7 |
| 256 | 1 | 12681.3 | 0.1 | 0.4 | 0.1 | 0.2 | 19.3 | 0.0 | 0.0 | 20.1 | 20.3 | 20.3 | 20.4 | 20.1 |
| 512 | 1 | 12651.9 | 0.1 | 0.5 | 0.1 | 0.3 | 39.5 | 0.0 | 0.0 | 40.4 | 40.6 | 40.7 | 40.8 | 40.4 |
| 1024 | 1 | 13003.2 | 0.1 | 0.4 | 0.1 | 0.6 | 77.3 | 0.0 | 0.0 | 78.6 | 79.0 | 79.2 | 79.3 | 78.6 |
</details>
#### Offline: NVIDIA T4, PyTorch with FP16, Dataset: electricity
Our results were obtained using the following configuration:
| Parameter Name | Parameter Value |
|:-----------------------------|:-----------------------------|
| GPU |NVIDIA T4 |
| Backend |PyTorch |
| Backend accelerator |-|
| Precision |FP16 |
| Model format |TorchScript Trace |
| Max batch size |1024 |
| Number of model instances |2|
| Export Precision | FP32 |
| Dataset | electricity |
| Device | gpu |
| Request Count | 500 |
<table>
<tbody>
<tr>
<td><img src="./reports/nvidia_t4_experiment_27_triton_performance_offline_27/plots/throughput_vs_batch.png"></td>
<td><img src="./reports/nvidia_t4_experiment_27_triton_performance_offline_27/plots/throughput_vs_latency.png"></td>
</tr>
<tr>
<td><img src="./reports/nvidia_t4_experiment_27_triton_performance_offline_27/plots/latency_vs_batch.png"></td>
</tr>
</tbody>
</table>
<details>
<summary>Results Table</summary>
| Batch | Concurrency | Inferences/Second | Client Send (ms) | Network+Server Send/Recv (ms) | Server Queue (ms) | Server Compute Input (ms) | Server Compute Infer (ms) | Server Compute Output (ms) | Client Recv (ms) | p50 latency (ms) | p90 latency (ms) | p95 latency (ms) | p99 latency (ms) | avg latency (ms) |
|--------:|--------------:|--------------------:|-------------------:|--------------------------------:|--------------------:|----------------------------:|----------------------------:|-----------------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|
| 1 | 1 | 127.8 | 0.1 | 0.6 | 0.2 | 0.1 | 6.8 | 0.0 | 0.0 | 7.7 | 8.6 | 8.9 | 9.4 | 7.8 |
| 2 | 1 | 251.0 | 0.1 | 0.6 | 0.1 | 0.1 | 6.9 | 0.0 | 0.0 | 7.8 | 8.8 | 9.2 | 9.6 | 7.9 |
| 4 | 1 | 498.9 | 0.1 | 0.6 | 0.2 | 0.1 | 7.0 | 0.0 | 0.0 | 8.0 | 8.5 | 9.1 | 9.3 | 8.0 |
| 8 | 1 | 975.8 | 0.1 | 0.6 | 0.2 | 0.1 | 7.1 | 0.0 | 0.0 | 8.1 | 8.7 | 8.8 | 9.4 | 8.2 |
| 16 | 1 | 1913.6 | 0.1 | 0.6 | 0.2 | 0.2 | 7.2 | 0.1 | 0.0 | 8.3 | 8.8 | 8.9 | 9.2 | 8.3 |
| 32 | 1 | 2820.9 | 0.1 | 0.6 | 0.1 | 0.2 | 7.5 | 2.8 | 0.0 | 11.3 | 11.6 | 11.6 | 11.8 | 11.3 |
| 64 | 1 | 3366.1 | 0.1 | 0.6 | 0.1 | 0.2 | 8.1 | 9.9 | 0.0 | 18.9 | 19.3 | 19.4 | 19.7 | 19.0 |
| 128 | 1 | 3786.8 | 0.1 | 0.6 | 0.1 | 0.1 | 4.5 | 28.4 | 0.0 | 33.8 | 34.1 | 34.1 | 34.3 | 33.8 |
| 256 | 1 | 3948.1 | 0.1 | 0.6 | 0.1 | 0.2 | 4.4 | 59.4 | 0.0 | 64.7 | 65.5 | 65.8 | 66.0 | 64.7 |
| 512 | 1 | 4079.3 | 0.1 | 0.6 | 0.1 | 0.4 | 4.5 | 119.7 | 0.0 | 125.2 | 127.1 | 127.6 | 128.3 | 125.3 |
| 1024 | 1 | 4095.5 | 0.1 | 0.6 | 0.1 | 0.8 | 4.5 | 243.8 | 0.0 | 250.0 | 251.7 | 252.0 | 252.6 | 249.9 |
</details>
#### Offline: NVIDIA T4, PyTorch with FP16, Dataset: traffic
Our results were obtained using the following configuration:
| Parameter Name | Parameter Value |
|:-----------------------------|:-----------------------------|
| GPU |NVIDIA T4 |
| Backend |PyTorch |
| Backend accelerator |-|
| Precision |FP16 |
| Model format |TorchScript Trace |
| Max batch size |1024 |
| Number of model instances |2|
| Export Precision | FP32 |
| Dataset | traffic |
| Device | gpu |
| Request Count | 500 |
<table>
<tbody>
<tr>
<td><img src="./reports/nvidia_t4_experiment_28_triton_performance_offline_28/plots/throughput_vs_batch.png"></td>
<td><img src="./reports/nvidia_t4_experiment_28_triton_performance_offline_28/plots/throughput_vs_latency.png"></td>
</tr>
<tr>
<td><img src="./reports/nvidia_t4_experiment_28_triton_performance_offline_28/plots/latency_vs_batch.png"></td>
</tr>
</tbody>
</table>
<details>
<summary>Results Table</summary>
| Batch | Concurrency | Inferences/Second | Client Send (ms) | Network+Server Send/Recv (ms) | Server Queue (ms) | Server Compute Input (ms) | Server Compute Infer (ms) | Server Compute Output (ms) | Client Recv (ms) | p50 latency (ms) | p90 latency (ms) | p95 latency (ms) | p99 latency (ms) | avg latency (ms) |
|--------:|--------------:|--------------------:|-------------------:|--------------------------------:|--------------------:|----------------------------:|----------------------------:|-----------------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|
| 1 | 1 | 136.0 | 0.1 | 0.5 | 0.1 | 0.1 | 6.6 | 0.0 | 0.0 | 7.3 | 7.9 | 8.1 | 8.5 | 7.3 |
| 2 | 1 | 242.8 | 0.1 | 0.6 | 0.1 | 0.1 | 7.2 | 0.0 | 0.0 | 8.1 | 8.7 | 9.0 | 9.4 | 8.2 |
| 4 | 1 | 479.9 | 0.1 | 0.6 | 0.2 | 0.1 | 7.3 | 0.0 | 0.0 | 8.2 | 8.9 | 9.2 | 9.6 | 8.3 |
| 8 | 1 | 943.8 | 0.1 | 0.6 | 0.2 | 0.2 | 7.4 | 0.0 | 0.0 | 8.4 | 9.1 | 9.2 | 9.5 | 8.4 |
| 16 | 1 | 2239.4 | 0.1 | 0.5 | 0.1 | 0.1 | 4.2 | 2.1 | 0.0 | 7.1 | 7.2 | 7.2 | 7.3 | 7.1 |
| 32 | 1 | 2975.5 | 0.1 | 0.5 | 0.1 | 0.1 | 4.5 | 5.5 | 0.0 | 10.7 | 10.9 | 10.9 | 10.9 | 10.7 |
| 64 | 1 | 3436.1 | 0.1 | 0.5 | 0.1 | 0.1 | 5.7 | 12.0 | 0.0 | 18.6 | 19.1 | 19.3 | 19.5 | 18.6 |
| 128 | 1 | 3786.8 | 0.1 | 0.5 | 0.1 | 0.2 | 5.7 | 27.1 | 0.0 | 33.7 | 34.0 | 34.1 | 34.2 | 33.7 |
| 256 | 1 | 3963.6 | 0.1 | 0.6 | 0.1 | 0.3 | 7.0 | 56.4 | 0.0 | 64.5 | 65.2 | 65.4 | 65.8 | 64.5 |
| 512 | 1 | 4103.6 | 0.1 | 0.6 | 0.1 | 0.4 | 6.1 | 117.4 | 0.0 | 124.6 | 126.3 | 126.6 | 127.1 | 124.7 |
| 1024 | 1 | 4120.2 | 0.1 | 0.4 | 0.1 | 1.0 | 7.1 | 239.7 | 0.0 | 248.3 | 250.3 | 250.9 | 251.8 | 248.3 |
</details>
### Online scenario
The online scenario assumes the client and server are located on different hosts. The tests uses:
- tensors are passed through HTTP from client to server
- concurrent requests are send from client to server, the final batch is created on server side
#### Online: NVIDIA A30, NVIDIA TensorRT with FP16, Dataset: electricity
Our results were obtained using the following configuration:
| Parameter Name | Parameter Value |
|:-----------------------------|:-----------------------------|
| GPU |NVIDIA A30 |
| Backend |NVIDIA TensorRT |
| Backend accelerator |-|
| Precision |FP16 |
| Model format |NVIDIA TensorRT |
| Max batch size |1024 |
| Number of model instances |2|
| Export Precision | FP32 |
| NVIDIA TensorRT Capture CUDA Graph | Disabled |
| Dataset | electricity |
| Device | gpu |
| Request Count | 500 |
<table>
<tbody>
<tr>
<td colspan="2" align="center"><img src="./reports/nvidia_a30_experiment_17_triton_performance_online_17/plots/latency_vs_concurrency.png"></td>
</tr>
</tbody>
</table>
<details>
<summary>Results Table</summary>
| Batch | Concurrency | Inferences/Second | Client Send (ms) | Network+Server Send/Recv (ms) | Server Queue (ms) | Server Compute Input (ms) | Server Compute Infer (ms) | Server Compute Output (ms) | Client Recv (ms) | p50 latency (ms) | p90 latency (ms) | p95 latency (ms) | p99 latency (ms) | avg latency (ms) |
|--------:|--------------:|--------------------:|-------------------:|--------------------------------:|--------------------:|----------------------------:|----------------------------:|-----------------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|
| 16 | 8 | 15360.0 | 0.1 | 0.3 | 3.6 | 0.1 | 4.0 | 0.0 | 0.0 | 8.2 | 8.3 | 8.4 | 8.7 | 8.2 |
| 16 | 16 | 15696.0 | 0.1 | 0.5 | 8.5 | 0.2 | 6.9 | 0.1 | 0.0 | 16.4 | 20.2 | 20.4 | 22.2 | 16.2 |
| 16 | 24 | 17072.0 | 0.1 | 0.8 | 10.8 | 0.2 | 10.2 | 0.1 | 0.0 | 22.3 | 30.5 | 31.9 | 33.4 | 22.2 |
| 16 | 32 | 16640.0 | 0.1 | 1.0 | 14.5 | 0.3 | 14.4 | 0.1 | 0.0 | 32.0 | 36.1 | 36.6 | 39.2 | 30.3 |
| 16 | 40 | 19120.0 | 0.1 | 1.6 | 13.8 | 0.3 | 17.2 | 0.1 | 0.0 | 34.9 | 43.8 | 46.3 | 48.5 | 33.1 |
| 16 | 48 | 15984.0 | 0.1 | 1.7 | 16.1 | 0.4 | 27.9 | 0.1 | 0.0 | 49.2 | 52.5 | 53.0 | 53.5 | 46.2 |
| 16 | 56 | 16528.0 | 0.1 | 1.9 | 21.7 | 0.4 | 26.3 | 0.0 | 0.0 | 52.6 | 56.2 | 56.4 | 57.0 | 50.4 |
| 16 | 64 | 16256.0 | 0.1 | 2.2 | 30.6 | 0.3 | 27.0 | 0.0 | 0.0 | 63.8 | 66.2 | 66.5 | 66.9 | 60.3 |
| 16 | 72 | 17696.0 | 0.1 | 2.5 | 34.4 | 0.4 | 25.8 | 0.0 | 0.0 | 65.5 | 68.9 | 69.6 | 70.3 | 63.3 |
| 16 | 80 | 16976.0 | 0.1 | 2.1 | 38.8 | 0.4 | 32.0 | 0.1 | 0.0 | 78.7 | 82.1 | 82.6 | 82.9 | 73.4 |
| 16 | 88 | 20464.0 | 0.1 | 2.7 | 32.0 | 0.6 | 30.5 | 0.0 | 0.0 | 62.7 | 79.0 | 80.0 | 80.8 | 66.0 |
| 16 | 96 | 20064.0 | 0.1 | 2.9 | 39.5 | 0.6 | 31.3 | 0.1 | 0.0 | 75.6 | 79.8 | 80.6 | 81.0 | 74.3 |
| 16 | 104 | 20768.0 | 0.1 | 3.9 | 38.1 | 0.7 | 34.1 | 0.1 | 0.0 | 79.3 | 82.7 | 83.3 | 83.7 | 77.0 |
| 16 | 112 | 22032.0 | 0.1 | 3.5 | 43.1 | 0.7 | 33.1 | 0.1 | 0.0 | 83.0 | 84.1 | 84.3 | 84.5 | 80.5 |
| 16 | 120 | 21584.0 | 0.1 | 3.4 | 49.9 | 0.8 | 33.0 | 0.1 | 0.0 | 92.2 | 93.1 | 93.2 | 94.2 | 87.3 |
| 16 | 128 | 23280.0 | 0.1 | 2.4 | 41.9 | 0.7 | 37.3 | 0.1 | 0.0 | 84.4 | 94.2 | 103.3 | 104.8 | 82.5 |
| 16 | 136 | 23232.0 | 0.1 | 3.6 | 52.6 | 0.7 | 32.7 | 0.1 | 0.0 | 92.4 | 93.4 | 93.7 | 94.4 | 89.7 |
| 16 | 144 | 24224.0 | 0.1 | 3.7 | 50.7 | 0.8 | 34.6 | 0.1 | 0.0 | 92.8 | 95.0 | 96.1 | 102.7 | 90.0 |
| 16 | 152 | 23232.0 | 0.1 | 2.7 | 64.5 | 0.7 | 33.4 | 0.1 | 0.0 | 102.5 | 112.5 | 117.3 | 123.3 | 101.6 |
| 16 | 160 | 21040.0 | 0.1 | 4.6 | 72.2 | 0.8 | 38.0 | 0.1 | 0.0 | 127.8 | 130.2 | 130.8 | 150.9 | 115.8 |
| 16 | 168 | 23848.2 | 0.1 | 4.5 | 66.3 | 0.9 | 35.8 | 0.1 | 0.0 | 109.8 | 111.1 | 111.3 | 111.7 | 107.7 |
| 16 | 176 | 23280.0 | 0.1 | 4.8 | 60.5 | 0.8 | 40.5 | 0.1 | 0.0 | 109.4 | 117.4 | 130.9 | 133.3 | 106.8 |
| 16 | 184 | 21594.4 | 0.3 | 2.8 | 87.2 | 0.9 | 36.6 | 0.1 | 0.0 | 130.0 | 145.0 | 145.2 | 146.6 | 127.8 |
| 16 | 192 | 20816.0 | 0.3 | 3.5 | 99.0 | 0.9 | 36.5 | 0.1 | 0.0 | 145.1 | 147.1 | 148.0 | 165.5 | 140.3 |
| 16 | 200 | 20224.0 | 0.3 | 3.5 | 104.1 | 0.8 | 37.4 | 0.1 | 0.0 | 145.7 | 147.6 | 148.1 | 165.8 | 146.1 |
| 16 | 208 | 21744.0 | 0.2 | 3.9 | 98.5 | 1.0 | 39.0 | 0.2 | 0.0 | 145.8 | 150.7 | 166.3 | 168.3 | 142.8 |
| 16 | 216 | 20112.0 | 0.4 | 2.7 | 117.8 | 0.8 | 34.0 | 0.2 | 0.0 | 156.1 | 157.2 | 157.4 | 157.8 | 156.0 |
| 16 | 224 | 23504.0 | 0.4 | 5.2 | 99.3 | 0.9 | 39.3 | 0.2 | 0.0 | 147.0 | 151.3 | 167.6 | 168.0 | 145.3 |
| 16 | 232 | 24352.0 | 0.5 | 3.6 | 93.6 | 1.0 | 41.3 | 0.2 | 0.0 | 144.9 | 148.2 | 167.3 | 169.5 | 140.2 |
| 16 | 240 | 25760.0 | 0.4 | 2.8 | 89.5 | 0.9 | 45.9 | 0.1 | 0.0 | 140.8 | 159.9 | 171.6 | 181.1 | 139.7 |
| 16 | 248 | 23872.0 | 0.5 | 2.5 | 114.7 | 1.0 | 34.7 | 0.1 | 0.0 | 156.6 | 158.2 | 158.8 | 164.2 | 153.4 |
| 16 | 256 | 24960.0 | 0.5 | 3.4 | 105.6 | 1.1 | 40.0 | 0.1 | 0.0 | 152.3 | 173.8 | 182.2 | 188.4 | 150.8 |
</details>
#### Online: NVIDIA A30, NVIDIA TensorRT with FP16, Dataset: traffic
Our results were obtained using the following configuration:
| Parameter Name | Parameter Value |
|:-----------------------------|:-----------------------------|
| GPU |NVIDIA A30 |
| Backend |NVIDIA TensorRT |
| Backend accelerator |-|
| Precision |FP16 |
| Model format |NVIDIA TensorRT |
| Max batch size |1024 |
| Number of model instances |2|
| Export Precision | FP32 |
| NVIDIA TensorRT Capture CUDA Graph | Disabled |
| Dataset | traffic |
| Device | gpu |
| Request Count | 500 |
<table>
<tbody>
<tr>
<td colspan="2" align="center"><img src="./reports/nvidia_a30_experiment_18_triton_performance_online_18/plots/latency_vs_concurrency.png"></td>
</tr>
</tbody>
</table>
<details>
<summary>Results Table</summary>
| Batch | Concurrency | Inferences/Second | Client Send (ms) | Network+Server Send/Recv (ms) | Server Queue (ms) | Server Compute Input (ms) | Server Compute Infer (ms) | Server Compute Output (ms) | Client Recv (ms) | p50 latency (ms) | p90 latency (ms) | p95 latency (ms) | p99 latency (ms) | avg latency (ms) |
|--------:|--------------:|--------------------:|-------------------:|--------------------------------:|--------------------:|----------------------------:|----------------------------:|-----------------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|
| 16 | 8 | 15104.0 | 0.1 | 0.5 | 3.6 | 0.1 | 4.0 | 0.1 | 0.0 | 8.4 | 8.4 | 8.5 | 8.5 | 8.4 |
| 16 | 16 | 15328.0 | 0.1 | 0.7 | 8.5 | 0.2 | 7.1 | 0.1 | 0.0 | 16.8 | 20.8 | 21.1 | 23.1 | 16.6 |
| 16 | 24 | 17072.0 | 0.1 | 1.2 | 10.4 | 0.3 | 10.2 | 0.1 | 0.0 | 23.6 | 30.2 | 30.6 | 32.2 | 22.3 |
| 16 | 32 | 16176.0 | 0.1 | 1.8 | 14.0 | 0.3 | 14.4 | 0.1 | 0.0 | 33.5 | 35.9 | 36.0 | 36.5 | 30.6 |
| 16 | 40 | 18288.0 | 0.1 | 1.7 | 17.3 | 0.3 | 14.5 | 0.1 | 0.0 | 35.8 | 39.6 | 39.9 | 41.3 | 34.0 |
| 16 | 48 | 17136.0 | 0.1 | 2.0 | 18.0 | 0.4 | 22.8 | 0.1 | 0.0 | 45.6 | 51.5 | 52.5 | 53.9 | 43.4 |
| 16 | 56 | 16992.0 | 0.1 | 2.9 | 22.3 | 0.5 | 26.1 | 0.1 | 0.0 | 55.4 | 56.8 | 57.2 | 57.5 | 51.9 |
| 16 | 64 | 17552.0 | 0.1 | 2.8 | 25.2 | 0.5 | 26.7 | 0.1 | 0.0 | 56.2 | 65.9 | 66.3 | 66.6 | 55.4 |
| 16 | 72 | 19552.0 | 0.1 | 3.3 | 28.8 | 0.6 | 25.4 | 0.1 | 0.0 | 65.2 | 66.6 | 67.0 | 69.4 | 58.3 |
| 16 | 80 | 21072.0 | 0.1 | 3.2 | 26.2 | 0.7 | 29.3 | 0.2 | 0.0 | 62.3 | 65.4 | 66.0 | 66.3 | 59.7 |
| 16 | 88 | 19392.0 | 0.1 | 2.3 | 36.0 | 0.8 | 30.6 | 0.1 | 0.0 | 68.1 | 82.9 | 83.7 | 84.1 | 69.9 |
| 16 | 96 | 19168.0 | 0.1 | 3.5 | 38.0 | 0.7 | 33.9 | 0.2 | 0.0 | 79.2 | 80.2 | 80.6 | 83.3 | 76.3 |
| 16 | 104 | 17920.0 | 0.1 | 3.1 | 51.8 | 0.8 | 32.2 | 0.2 | 0.0 | 92.5 | 93.4 | 93.8 | 94.3 | 88.2 |
| 16 | 112 | 21296.0 | 0.1 | 3.8 | 39.7 | 1.0 | 34.7 | 0.2 | 0.0 | 83.4 | 84.3 | 84.8 | 104.0 | 79.4 |
| 16 | 120 | 22032.0 | 0.1 | 3.1 | 45.0 | 0.8 | 33.0 | 0.2 | 0.0 | 82.9 | 93.0 | 93.5 | 94.7 | 82.2 |
| 16 | 128 | 21882.1 | 0.1 | 3.1 | 53.6 | 0.9 | 32.5 | 0.2 | 0.0 | 93.0 | 93.6 | 93.8 | 94.4 | 90.4 |
| 16 | 136 | 25552.0 | 0.1 | 3.8 | 41.3 | 1.0 | 37.3 | 0.2 | 0.0 | 83.9 | 93.7 | 105.3 | 108.0 | 83.7 |
| 16 | 144 | 21904.0 | 0.1 | 5.5 | 60.9 | 0.8 | 33.6 | 0.2 | 0.0 | 103.9 | 113.3 | 113.4 | 132.9 | 101.1 |
| 16 | 152 | 21456.0 | 0.1 | 3.6 | 66.5 | 0.8 | 35.6 | 0.2 | 0.0 | 109.4 | 110.0 | 110.2 | 110.5 | 106.8 |
| 16 | 160 | 23040.0 | 0.2 | 3.3 | 59.4 | 0.9 | 40.4 | 0.2 | 0.0 | 109.7 | 129.7 | 130.1 | 130.9 | 104.3 |
| 16 | 168 | 19600.0 | 0.2 | 0.9 | 88.8 | 0.8 | 34.2 | 0.1 | 0.0 | 128.7 | 131.4 | 144.9 | 145.6 | 125.0 |
| 16 | 176 | 20880.0 | 0.2 | 4.6 | 84.9 | 0.9 | 34.9 | 0.1 | 0.0 | 129.2 | 130.0 | 130.6 | 133.1 | 125.6 |
| 16 | 184 | 22409.6 | 0.2 | 6.5 | 78.3 | 1.1 | 40.1 | 0.1 | 0.0 | 129.6 | 146.7 | 147.9 | 149.9 | 126.2 |
| 16 | 192 | 19456.0 | 0.2 | 3.9 | 101.8 | 0.9 | 35.5 | 0.2 | 0.0 | 145.9 | 147.1 | 147.3 | 147.7 | 142.4 |
| 16 | 200 | 20155.8 | 0.2 | 3.7 | 105.2 | 1.0 | 35.6 | 0.1 | 0.0 | 146.6 | 147.3 | 147.7 | 148.3 | 145.9 |
| 16 | 208 | 21040.0 | 0.3 | 3.8 | 100.1 | 0.8 | 40.2 | 0.1 | 0.0 | 145.7 | 165.6 | 166.2 | 172.1 | 145.4 |
| 16 | 216 | 20784.0 | 0.4 | 2.7 | 117.4 | 0.8 | 34.0 | 0.1 | 0.0 | 155.5 | 156.4 | 156.6 | 156.9 | 155.3 |
| 16 | 224 | 23344.0 | 0.5 | 3.6 | 99.0 | 0.8 | 41.6 | 0.1 | 0.0 | 149.9 | 157.3 | 173.8 | 190.6 | 145.7 |
| 16 | 232 | 21760.0 | 0.4 | 3.2 | 117.4 | 0.9 | 34.2 | 0.2 | 0.0 | 156.7 | 157.3 | 157.5 | 158.1 | 156.3 |
| 16 | 240 | 20784.0 | 0.2 | 4.4 | 126.7 | 1.0 | 34.1 | 0.1 | 0.0 | 166.6 | 169.1 | 169.5 | 169.8 | 166.6 |
| 16 | 248 | 26352.0 | 0.3 | 3.7 | 107.7 | 1.1 | 32.3 | 0.1 | 0.0 | 146.9 | 149.2 | 163.2 | 169.4 | 145.3 |
| 16 | 256 | 23408.0 | 0.4 | 4.9 | 116.1 | 1.1 | 42.3 | 0.1 | 0.0 | 163.0 | 197.6 | 201.1 | 204.3 | 164.9 |
</details>
#### Online: NVIDIA A30, PyTorch with FP16, Dataset: electricity
Our results were obtained using the following configuration:
| Parameter Name | Parameter Value |
|:-----------------------------|:-----------------------------|
| GPU |NVIDIA A30 |
| Backend |PyTorch |
| Backend accelerator |-|
| Precision |FP16 |
| Model format |TorchScript Trace |
| Max batch size |1024 |
| Number of model instances |2|
| Export Precision | FP32 |
| Dataset | electricity |
| Device | gpu |
| Request Count | 500 |
<table>
<tbody>
<tr>
<td colspan="2" align="center"><img src="./reports/nvidia_a30_experiment_27_triton_performance_online_27/plots/latency_vs_concurrency.png"></td>
</tr>
</tbody>
</table>
<details>
<summary>Results Table</summary>
| Batch | Concurrency | Inferences/Second | Client Send (ms) | Network+Server Send/Recv (ms) | Server Queue (ms) | Server Compute Input (ms) | Server Compute Infer (ms) | Server Compute Output (ms) | Client Recv (ms) | p50 latency (ms) | p90 latency (ms) | p95 latency (ms) | p99 latency (ms) | avg latency (ms) |
|--------:|--------------:|--------------------:|-------------------:|--------------------------------:|--------------------:|----------------------------:|----------------------------:|-----------------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|
| 16 | 8 | 5528.0 | 0.1 | 0.8 | 8.1 | 0.5 | 13.1 | 0.3 | 0.0 | 26.2 | 28.1 | 28.7 | 30.3 | 22.8 |
| 16 | 16 | 9120.0 | 0.1 | 0.6 | 10.3 | 0.7 | 10.5 | 5.3 | 0.0 | 30.8 | 33.5 | 34.7 | 35.8 | 27.5 |
| 16 | 24 | 10384.0 | 0.1 | 0.8 | 14.0 | 1.1 | 10.6 | 9.3 | 0.0 | 39.3 | 42.4 | 43.1 | 46.0 | 35.8 |
| 16 | 32 | 11076.9 | 0.1 | 1.2 | 18.8 | 1.4 | 10.2 | 13.2 | 0.0 | 48.5 | 51.1 | 51.5 | 54.6 | 44.9 |
| 16 | 40 | 11328.0 | 0.1 | 2.0 | 21.6 | 2.3 | 10.7 | 18.4 | 0.0 | 58.8 | 62.0 | 63.2 | 67.5 | 55.1 |
| 16 | 48 | 11296.0 | 0.1 | 3.2 | 25.3 | 5.1 | 9.3 | 22.1 | 0.0 | 67.7 | 73.3 | 76.0 | 79.1 | 65.1 |
| 16 | 56 | 11440.0 | 0.1 | 3.3 | 29.6 | 5.0 | 9.9 | 26.1 | 0.0 | 77.3 | 82.5 | 83.9 | 92.3 | 74.0 |
| 16 | 64 | 11600.0 | 0.1 | 2.9 | 35.5 | 7.6 | 9.3 | 29.0 | 0.0 | 88.5 | 95.2 | 98.9 | 113.5 | 84.4 |
| 16 | 72 | 11316.7 | 0.1 | 4.3 | 38.1 | 16.0 | 7.7 | 29.3 | 0.0 | 99.4 | 103.1 | 123.0 | 125.8 | 95.5 |
| 16 | 80 | 11664.0 | 0.1 | 4.0 | 46.0 | 18.0 | 7.5 | 28.0 | 0.0 | 108.4 | 112.7 | 116.1 | 126.0 | 103.7 |
| 16 | 88 | 11472.0 | 0.1 | 3.0 | 47.8 | 19.8 | 8.2 | 34.4 | 0.0 | 119.7 | 128.6 | 131.9 | 135.5 | 113.3 |
| 16 | 96 | 11760.0 | 0.1 | 4.4 | 53.1 | 22.1 | 7.3 | 36.1 | 0.0 | 128.7 | 131.5 | 132.1 | 133.3 | 123.1 |
| 16 | 104 | 11840.0 | 0.1 | 5.4 | 59.4 | 5.7 | 9.8 | 51.0 | 0.0 | 132.7 | 138.7 | 138.9 | 175.8 | 131.5 |
| 16 | 112 | 11728.0 | 0.1 | 4.2 | 59.1 | 16.9 | 8.8 | 51.3 | 0.0 | 146.7 | 162.7 | 164.0 | 168.4 | 140.3 |
| 16 | 120 | 11796.2 | 0.1 | 5.3 | 54.2 | 20.6 | 7.6 | 61.4 | 0.0 | 155.3 | 164.2 | 172.6 | 173.1 | 149.2 |
| 16 | 128 | 12272.0 | 0.1 | 6.3 | 64.6 | 16.7 | 7.6 | 61.5 | 0.0 | 165.7 | 175.9 | 194.4 | 197.7 | 156.8 |
| 16 | 136 | 11680.0 | 0.1 | 6.0 | 74.7 | 33.5 | 6.6 | 48.7 | 0.0 | 178.5 | 183.0 | 183.9 | 186.4 | 169.5 |
| 16 | 144 | 11408.0 | 0.1 | 5.5 | 76.6 | 33.3 | 7.1 | 55.4 | 0.0 | 190.7 | 198.8 | 203.2 | 204.6 | 178.0 |
| 16 | 152 | 11456.0 | 0.1 | 4.7 | 87.4 | 28.8 | 7.2 | 60.8 | 0.0 | 193.9 | 199.5 | 200.2 | 201.1 | 189.0 |
| 16 | 160 | 11444.6 | 0.2 | 4.7 | 94.3 | 24.3 | 7.0 | 67.1 | 0.0 | 198.0 | 199.4 | 199.5 | 199.6 | 197.5 |
| 16 | 168 | 11040.0 | 0.1 | 7.5 | 89.1 | 35.2 | 6.8 | 70.2 | 0.0 | 214.2 | 220.1 | 222.9 | 225.2 | 208.9 |
| 16 | 176 | 11536.0 | 0.2 | 4.7 | 97.1 | 39.1 | 7.0 | 67.9 | 0.0 | 221.9 | 239.7 | 242.6 | 255.8 | 216.0 |
| 16 | 184 | 11136.0 | 0.1 | 6.5 | 101.3 | 41.8 | 7.1 | 67.2 | 0.0 | 231.3 | 236.7 | 240.0 | 240.4 | 224.1 |
| 16 | 192 | 11376.0 | 0.2 | 6.4 | 106.9 | 47.0 | 7.6 | 68.9 | 0.0 | 245.5 | 252.9 | 256.1 | 265.9 | 237.1 |
| 16 | 200 | 11840.0 | 0.3 | 5.0 | 110.3 | 46.4 | 7.0 | 72.7 | 0.0 | 255.0 | 262.0 | 267.0 | 267.9 | 241.8 |
| 16 | 208 | 11680.0 | 0.2 | 5.3 | 122.0 | 37.8 | 7.6 | 78.0 | 0.0 | 252.1 | 254.0 | 309.6 | 311.0 | 250.9 |
| 16 | 216 | 11280.0 | 0.2 | 6.0 | 151.5 | 41.8 | 6.9 | 59.4 | 0.0 | 270.5 | 279.9 | 283.2 | 283.9 | 265.8 |
| 16 | 224 | 11152.0 | 0.4 | 5.9 | 127.1 | 51.8 | 7.0 | 79.1 | 0.0 | 280.9 | 283.7 | 284.6 | 285.1 | 271.3 |
| 16 | 232 | 10848.0 | 0.2 | 5.0 | 158.1 | 41.7 | 7.8 | 72.7 | 0.0 | 287.4 | 306.0 | 315.8 | 316.9 | 285.5 |
| 16 | 240 | 11088.0 | 0.2 | 10.1 | 166.0 | 34.4 | 7.2 | 78.0 | 0.0 | 296.1 | 318.6 | 348.7 | 354.4 | 295.8 |
| 16 | 248 | 10485.5 | 0.3 | 5.8 | 174.3 | 40.1 | 7.2 | 75.4 | 0.0 | 307.6 | 316.7 | 322.0 | 323.7 | 303.2 |
| 16 | 256 | 11168.0 | 0.4 | 4.5 | 178.3 | 45.8 | 7.1 | 77.2 | 0.0 | 320.5 | 341.6 | 342.6 | 348.6 | 313.2 |
</details>
#### Online: NVIDIA A30, PyTorch with FP16, Dataset: traffic
Our results were obtained using the following configuration:
| Parameter Name | Parameter Value |
|:-----------------------------|:-----------------------------|
| GPU |NVIDIA A30 |
| Backend |PyTorch |
| Backend accelerator |-|
| Precision |FP16 |
| Model format |TorchScript Trace |
| Max batch size |1024 |
| Number of model instances |2|
| Export Precision | FP32 |
| Dataset | traffic |
| Device | gpu |
| Request Count | 500 |
<table>
<tbody>
<tr>
<td colspan="2" align="center"><img src="./reports/nvidia_a30_experiment_28_triton_performance_online_28/plots/latency_vs_concurrency.png"></td>
</tr>
</tbody>
</table>
<details>
<summary>Results Table</summary>
| Batch | Concurrency | Inferences/Second | Client Send (ms) | Network+Server Send/Recv (ms) | Server Queue (ms) | Server Compute Input (ms) | Server Compute Infer (ms) | Server Compute Output (ms) | Client Recv (ms) | p50 latency (ms) | p90 latency (ms) | p95 latency (ms) | p99 latency (ms) | avg latency (ms) |
|--------:|--------------:|--------------------:|-------------------:|--------------------------------:|--------------------:|----------------------------:|----------------------------:|-----------------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|
| 16 | 8 | 6544.0 | 0.1 | 0.5 | 7.0 | 0.4 | 8.8 | 2.6 | 0.0 | 22.1 | 23.9 | 24.5 | 25.8 | 19.3 |
| 16 | 16 | 9456.0 | 0.1 | 0.6 | 9.7 | 0.8 | 8.7 | 6.9 | 0.0 | 30.5 | 32.8 | 33.4 | 34.2 | 26.6 |
| 16 | 24 | 10704.0 | 0.1 | 0.8 | 13.8 | 0.9 | 8.5 | 11.3 | 0.0 | 39.0 | 41.9 | 42.2 | 42.7 | 35.4 |
| 16 | 32 | 11472.0 | 0.1 | 0.9 | 18.3 | 1.3 | 8.4 | 15.0 | 0.0 | 48.1 | 50.2 | 51.1 | 51.9 | 44.0 |
| 16 | 40 | 11568.0 | 0.1 | 1.3 | 21.8 | 1.5 | 8.6 | 20.1 | 0.0 | 57.7 | 60.4 | 60.8 | 62.3 | 53.4 |
| 16 | 48 | 12000.0 | 0.1 | 2.8 | 24.6 | 1.3 | 8.7 | 25.6 | 0.0 | 66.3 | 68.3 | 68.6 | 69.3 | 63.1 |
| 16 | 56 | 12048.0 | 0.1 | 3.1 | 20.9 | 1.6 | 8.3 | 37.6 | 0.0 | 75.2 | 77.2 | 77.9 | 78.8 | 71.5 |
| 16 | 64 | 11824.0 | 0.1 | 2.8 | 29.1 | 1.8 | 8.5 | 38.8 | 0.0 | 85.2 | 87.8 | 88.4 | 89.3 | 81.0 |
| 16 | 72 | 11888.0 | 0.1 | 2.2 | 36.1 | 2.0 | 8.8 | 40.8 | 0.0 | 93.9 | 96.0 | 96.5 | 101.8 | 90.0 |
| 16 | 80 | 11712.0 | 0.1 | 3.7 | 44.4 | 10.6 | 8.1 | 36.3 | 0.0 | 107.1 | 119.0 | 121.6 | 128.2 | 103.3 |
| 16 | 88 | 12240.0 | 0.1 | 4.5 | 44.7 | 5.7 | 7.9 | 48.6 | 0.0 | 115.8 | 119.8 | 130.2 | 153.3 | 111.5 |
| 16 | 96 | 11888.0 | 0.1 | 3.0 | 48.8 | 10.6 | 7.8 | 50.0 | 0.0 | 127.1 | 135.0 | 152.9 | 179.4 | 120.3 |
| 16 | 104 | 12096.0 | 0.1 | 3.4 | 59.4 | 10.2 | 7.4 | 48.6 | 0.0 | 134.8 | 139.1 | 146.7 | 158.2 | 129.1 |
| 16 | 112 | 11408.0 | 0.1 | 5.3 | 57.8 | 27.2 | 5.8 | 46.0 | 0.0 | 146.4 | 147.8 | 149.7 | 155.4 | 142.2 |
| 16 | 120 | 11812.2 | 0.1 | 6.7 | 63.8 | 14.0 | 6.8 | 57.3 | 0.0 | 153.3 | 157.9 | 160.4 | 161.9 | 148.7 |
| 16 | 128 | 11632.0 | 0.1 | 4.9 | 69.6 | 15.9 | 7.3 | 59.2 | 0.0 | 163.6 | 177.1 | 180.0 | 205.3 | 157.0 |
| 16 | 136 | 11620.4 | 0.1 | 3.5 | 76.0 | 9.8 | 8.2 | 68.3 | 0.0 | 172.9 | 182.9 | 195.5 | 196.8 | 166.0 |
| 16 | 144 | 11824.0 | 0.1 | 3.3 | 81.3 | 24.9 | 7.0 | 60.9 | 0.0 | 181.9 | 187.9 | 210.9 | 211.8 | 177.5 |
| 16 | 152 | 12032.0 | 0.1 | 3.8 | 85.9 | 22.9 | 7.1 | 67.1 | 0.0 | 192.9 | 219.2 | 239.1 | 252.4 | 187.0 |
| 16 | 160 | 12048.0 | 0.1 | 4.0 | 89.0 | 21.3 | 6.5 | 72.7 | 0.0 | 199.7 | 206.4 | 230.8 | 246.6 | 193.7 |
| 16 | 168 | 11456.0 | 0.1 | 4.4 | 93.2 | 30.2 | 5.7 | 70.5 | 0.0 | 208.4 | 209.8 | 211.8 | 212.0 | 204.3 |
| 16 | 176 | 11584.0 | 0.2 | 5.7 | 100.5 | 38.5 | 6.5 | 64.0 | 0.0 | 219.8 | 221.4 | 222.1 | 223.7 | 215.4 |
| 16 | 184 | 12096.0 | 0.2 | 5.6 | 103.2 | 40.9 | 6.0 | 69.2 | 0.0 | 230.2 | 233.5 | 233.8 | 233.9 | 225.0 |
| 16 | 192 | 11200.0 | 0.2 | 6.2 | 107.5 | 35.4 | 6.5 | 79.3 | 0.0 | 241.6 | 251.3 | 254.8 | 255.0 | 235.0 |
| 16 | 200 | 10880.0 | 0.3 | 5.0 | 113.9 | 31.7 | 7.0 | 88.9 | 0.0 | 255.2 | 267.0 | 294.9 | 296.2 | 246.8 |
| 16 | 208 | 11984.0 | 0.1 | 6.4 | 116.5 | 45.0 | 6.2 | 78.1 | 0.0 | 261.3 | 267.0 | 268.0 | 268.4 | 252.3 |
| 16 | 216 | 11632.0 | 0.2 | 6.9 | 121.8 | 39.8 | 6.8 | 90.8 | 0.0 | 275.9 | 280.9 | 282.2 | 282.5 | 266.4 |
| 16 | 224 | 11140.9 | 0.3 | 6.6 | 128.6 | 49.4 | 6.8 | 84.3 | 0.0 | 284.0 | 288.6 | 294.6 | 295.2 | 275.8 |
| 16 | 232 | 11568.0 | 0.2 | 5.2 | 162.0 | 15.2 | 8.1 | 89.0 | 0.0 | 285.6 | 312.9 | 315.5 | 335.5 | 279.7 |
| 16 | 240 | 11696.0 | 0.3 | 5.3 | 167.3 | 40.9 | 6.2 | 75.4 | 0.0 | 300.4 | 309.2 | 317.6 | 318.4 | 295.3 |
| 16 | 248 | 11040.0 | 0.2 | 8.0 | 174.9 | 32.4 | 7.1 | 82.8 | 0.0 | 307.4 | 327.0 | 370.7 | 371.9 | 305.6 |
| 16 | 256 | 10528.0 | 0.5 | 4.0 | 179.5 | 42.6 | 6.8 | 80.8 | 0.0 | 321.4 | 325.7 | 326.0 | 327.2 | 314.2 |
</details>
#### Online: NVIDIA DGX-1 (1x V100 32GB), NVIDIA TensorRT with FP16, Dataset: electricity
Our results were obtained using the following configuration:
| Parameter Name | Parameter Value |
|:-----------------------------|:-----------------------------|
| GPU |NVIDIA DGX-1 (1x V100 32GB) |
| Backend |NVIDIA TensorRT |
| Backend accelerator |-|
| Precision |FP16 |
| Model format |NVIDIA TensorRT |
| Max batch size |1024 |
| Number of model instances |2|
| Export Precision | FP32 |
| NVIDIA TensorRT Capture CUDA Graph | Disabled |
| Dataset | electricity |
| Device | gpu |
| Request Count | 500 |
<table>
<tbody>
<tr>
<td colspan="2" align="center"><img src="./reports/nvidia_dgx-1_(1x_v100_32gb)_experiment_17_triton_performance_online_17/plots/latency_vs_concurrency.png"></td>
</tr>
</tbody>
</table>
<details>
<summary>Results Table</summary>
| Batch | Concurrency | Inferences/Second | Client Send (ms) | Network+Server Send/Recv (ms) | Server Queue (ms) | Server Compute Input (ms) | Server Compute Infer (ms) | Server Compute Output (ms) | Client Recv (ms) | p50 latency (ms) | p90 latency (ms) | p95 latency (ms) | p99 latency (ms) | avg latency (ms) |
|--------:|--------------:|--------------------:|-------------------:|--------------------------------:|--------------------:|----------------------------:|----------------------------:|-----------------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|
| 16 | 8 | 11776.0 | 0.1 | 0.5 | 4.7 | 0.2 | 5.3 | 0.0 | 0.0 | 10.8 | 10.9 | 11.0 | 11.0 | 10.7 |
| 16 | 16 | 11360.0 | 0.1 | 0.7 | 11.7 | 0.2 | 9.4 | 0.0 | 0.0 | 23.1 | 28.6 | 32.0 | 32.2 | 22.1 |
| 16 | 24 | 12656.0 | 0.1 | 1.0 | 15.8 | 0.3 | 12.8 | 0.0 | 0.0 | 33.8 | 34.3 | 34.4 | 37.7 | 30.1 |
| 16 | 32 | 11968.0 | 0.1 | 1.6 | 20.9 | 0.4 | 18.8 | 0.0 | 0.0 | 44.2 | 48.0 | 48.1 | 48.7 | 41.8 |
| 16 | 40 | 14640.0 | 0.1 | 1.5 | 20.9 | 0.4 | 19.6 | 0.0 | 0.0 | 47.6 | 48.0 | 48.0 | 48.1 | 42.6 |
| 16 | 48 | 13280.0 | 0.1 | 1.6 | 32.8 | 0.4 | 21.3 | 0.0 | 0.0 | 62.9 | 63.4 | 63.5 | 63.6 | 56.3 |
| 16 | 56 | 13232.0 | 0.1 | 1.9 | 28.4 | 0.6 | 33.8 | 0.0 | 0.0 | 66.9 | 71.8 | 72.2 | 72.3 | 64.8 |
| 16 | 64 | 12656.0 | 0.1 | 1.9 | 42.4 | 0.6 | 32.3 | 0.0 | 0.0 | 82.2 | 83.0 | 83.6 | 83.8 | 77.3 |
| 16 | 72 | 16671.3 | 0.1 | 2.0 | 40.8 | 0.5 | 24.0 | 0.0 | 0.0 | 73.4 | 74.0 | 83.6 | 84.0 | 67.5 |
| 16 | 80 | 16384.0 | 0.1 | 2.1 | 36.3 | 0.6 | 34.6 | 0.1 | 0.0 | 76.8 | 77.3 | 77.4 | 77.6 | 73.7 |
| 16 | 88 | 13728.0 | 0.1 | 2.3 | 53.4 | 0.6 | 38.5 | 0.0 | 0.0 | 100.5 | 101.3 | 101.5 | 101.7 | 95.0 |
| 16 | 96 | 15104.0 | 0.1 | 3.0 | 53.7 | 0.7 | 39.6 | 0.1 | 0.0 | 101.2 | 101.8 | 102.0 | 102.2 | 97.1 |
| 16 | 104 | 14512.0 | 0.1 | 2.0 | 66.6 | 0.7 | 38.5 | 0.1 | 0.0 | 111.1 | 111.5 | 111.7 | 111.9 | 107.9 |
| 16 | 112 | 18464.0 | 0.1 | 3.0 | 49.7 | 1.0 | 40.8 | 0.1 | 0.0 | 96.6 | 101.7 | 101.9 | 102.2 | 94.7 |
| 16 | 120 | 17760.0 | 0.1 | 2.9 | 63.4 | 1.2 | 37.7 | 0.1 | 0.0 | 112.1 | 113.4 | 113.8 | 113.9 | 105.4 |
| 16 | 128 | 17808.0 | 0.1 | 3.9 | 64.6 | 0.9 | 39.5 | 0.1 | 0.0 | 111.7 | 112.3 | 112.5 | 112.5 | 109.0 |
| 16 | 136 | 16848.0 | 0.1 | 2.7 | 74.9 | 0.8 | 41.1 | 0.1 | 0.0 | 129.9 | 130.6 | 130.7 | 130.7 | 119.7 |
| 16 | 144 | 19216.0 | 0.1 | 3.7 | 66.2 | 1.0 | 38.9 | 0.1 | 0.0 | 112.5 | 113.3 | 113.5 | 114.1 | 110.1 |
| 16 | 152 | 20864.0 | 0.1 | 4.3 | 65.4 | 1.0 | 39.1 | 0.2 | 0.0 | 112.3 | 113.4 | 113.7 | 114.9 | 110.2 |
| 16 | 160 | 18288.0 | 0.1 | 3.8 | 81.3 | 1.2 | 42.7 | 0.1 | 0.0 | 131.4 | 133.1 | 134.3 | 135.1 | 129.2 |
| 16 | 168 | 19152.0 | 0.2 | 3.1 | 81.6 | 1.1 | 42.6 | 0.1 | 0.0 | 131.2 | 131.6 | 131.7 | 131.8 | 128.7 |
| 16 | 176 | 15152.0 | 0.2 | 2.5 | 127.3 | 0.9 | 42.8 | 0.1 | 0.0 | 174.9 | 175.3 | 175.4 | 175.4 | 173.9 |
| 16 | 184 | 15824.0 | 0.1 | 3.9 | 126.7 | 1.0 | 42.8 | 0.1 | 0.0 | 175.5 | 176.1 | 176.3 | 176.4 | 174.6 |
| 16 | 192 | 18096.0 | 0.2 | 3.0 | 113.1 | 1.0 | 40.2 | 0.1 | 0.0 | 155.7 | 174.7 | 174.9 | 175.0 | 157.6 |
| 16 | 200 | 18128.0 | 0.2 | 3.1 | 121.0 | 1.1 | 39.1 | 0.1 | 0.0 | 165.0 | 165.9 | 166.2 | 166.6 | 164.7 |
| 16 | 208 | 16720.0 | 0.3 | 3.1 | 127.9 | 1.2 | 42.9 | 0.2 | 0.0 | 176.3 | 178.0 | 178.9 | 179.2 | 175.5 |
| 16 | 216 | 18221.8 | 0.4 | 2.4 | 127.4 | 1.1 | 42.6 | 0.1 | 0.0 | 174.9 | 175.2 | 175.3 | 175.4 | 174.0 |
| 16 | 224 | 18944.0 | 0.3 | 3.1 | 127.4 | 1.1 | 42.8 | 0.1 | 0.0 | 175.8 | 176.3 | 176.4 | 176.5 | 174.9 |
| 16 | 232 | 19484.5 | 0.4 | 3.3 | 126.9 | 1.2 | 42.7 | 0.1 | 0.0 | 175.2 | 176.5 | 176.8 | 177.2 | 174.7 |
| 16 | 240 | 17696.0 | 0.5 | 2.1 | 147.7 | 1.2 | 40.8 | 0.1 | 0.0 | 199.8 | 200.7 | 200.8 | 201.1 | 192.3 |
| 16 | 248 | 17856.0 | 0.5 | 3.0 | 150.1 | 1.1 | 41.3 | 0.1 | 0.0 | 199.8 | 201.0 | 201.2 | 201.5 | 196.1 |
| 16 | 256 | 17712.0 | 0.6 | 2.6 | 155.2 | 1.2 | 41.4 | 0.2 | 0.0 | 201.5 | 202.3 | 202.6 | 202.7 | 201.2 |
</details>
#### Online: NVIDIA DGX-1 (1x V100 32GB), NVIDIA TensorRT with FP16, Dataset: traffic
Our results were obtained using the following configuration:
| Parameter Name | Parameter Value |
|:-----------------------------|:-----------------------------|
| GPU |NVIDIA DGX-1 (1x V100 32GB) |
| Backend |NVIDIA TensorRT |
| Backend accelerator |-|
| Precision |FP16 |
| Model format |NVIDIA TensorRT |
| Max batch size |1024 |
| Number of model instances |2|
| Export Precision | FP32 |
| NVIDIA TensorRT Capture CUDA Graph | Disabled |
| Dataset | traffic |
| Device | gpu |
| Request Count | 500 |
<table>
<tbody>
<tr>
<td colspan="2" align="center"><img src="./reports/nvidia_dgx-1_(1x_v100_32gb)_experiment_18_triton_performance_online_18/plots/latency_vs_concurrency.png"></td>
</tr>
</tbody>
</table>
<details>
<summary>Results Table</summary>
| Batch | Concurrency | Inferences/Second | Client Send (ms) | Network+Server Send/Recv (ms) | Server Queue (ms) | Server Compute Input (ms) | Server Compute Infer (ms) | Server Compute Output (ms) | Client Recv (ms) | p50 latency (ms) | p90 latency (ms) | p95 latency (ms) | p99 latency (ms) | avg latency (ms) |
|--------:|--------------:|--------------------:|-------------------:|--------------------------------:|--------------------:|----------------------------:|----------------------------:|-----------------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|
| 16 | 8 | 12083.9 | 0.1 | 0.4 | 4.6 | 0.2 | 5.1 | 0.0 | 0.0 | 10.5 | 10.7 | 10.7 | 10.8 | 10.5 |
| 16 | 16 | 11248.0 | 0.1 | 0.7 | 11.3 | 0.2 | 10.1 | 0.0 | 0.0 | 23.6 | 28.8 | 32.4 | 32.7 | 22.5 |
| 16 | 24 | 12048.0 | 0.1 | 0.8 | 15.3 | 0.3 | 14.0 | 0.0 | 0.0 | 32.5 | 38.9 | 42.4 | 42.7 | 30.6 |
| 16 | 32 | 13808.0 | 0.1 | 1.0 | 14.8 | 0.3 | 19.3 | 0.1 | 0.0 | 38.6 | 42.5 | 42.6 | 44.0 | 35.5 |
| 16 | 40 | 14160.0 | 0.1 | 1.8 | 22.2 | 0.4 | 19.7 | 0.0 | 0.0 | 44.3 | 53.9 | 54.1 | 57.7 | 44.1 |
| 16 | 48 | 13664.0 | 0.1 | 2.1 | 25.4 | 0.6 | 27.1 | 0.0 | 0.0 | 58.5 | 67.6 | 68.2 | 68.3 | 55.3 |
| 16 | 56 | 14624.0 | 0.1 | 1.4 | 34.6 | 0.5 | 22.1 | 0.0 | 0.0 | 63.5 | 63.8 | 63.8 | 74.0 | 58.8 |
| 16 | 64 | 18784.0 | 0.1 | 1.7 | 27.6 | 0.5 | 22.9 | 0.0 | 0.0 | 53.9 | 58.2 | 58.5 | 63.6 | 52.7 |
| 16 | 72 | 15584.0 | 0.1 | 2.8 | 33.5 | 0.6 | 34.3 | 0.0 | 0.0 | 76.2 | 77.3 | 77.4 | 77.6 | 71.3 |
| 16 | 80 | 14000.0 | 0.1 | 2.2 | 52.8 | 0.6 | 32.8 | 0.0 | 0.0 | 91.7 | 92.7 | 92.8 | 92.8 | 88.4 |
| 16 | 88 | 13760.0 | 0.1 | 2.4 | 55.0 | 0.6 | 38.9 | 0.1 | 0.0 | 100.5 | 101.6 | 101.7 | 102.0 | 96.9 |
| 16 | 96 | 18864.0 | 0.1 | 2.8 | 41.3 | 0.8 | 33.8 | 0.1 | 0.0 | 82.1 | 83.0 | 83.3 | 83.4 | 78.8 |
| 16 | 104 | 18000.0 | 0.1 | 3.0 | 52.9 | 0.7 | 32.7 | 0.1 | 0.0 | 91.9 | 92.8 | 92.9 | 93.0 | 89.4 |
| 16 | 112 | 16896.0 | 0.1 | 3.3 | 56.5 | 0.9 | 39.1 | 0.1 | 0.0 | 102.0 | 103.7 | 111.8 | 112.4 | 100.0 |
| 16 | 120 | 20144.0 | 0.1 | 3.2 | 52.5 | 0.8 | 33.6 | 0.1 | 0.0 | 92.7 | 93.7 | 93.8 | 93.9 | 90.3 |
| 16 | 128 | 19024.0 | 0.1 | 2.9 | 55.0 | 1.0 | 40.4 | 0.1 | 0.0 | 101.8 | 102.9 | 103.1 | 103.2 | 99.5 |
| 16 | 136 | 20560.0 | 0.1 | 3.8 | 55.1 | 1.0 | 39.4 | 0.1 | 0.0 | 101.8 | 102.9 | 103.0 | 103.2 | 99.5 |
| 16 | 144 | 17264.0 | 0.2 | 2.7 | 81.1 | 1.0 | 42.5 | 0.1 | 0.0 | 130.5 | 131.2 | 131.3 | 131.7 | 127.6 |
| 16 | 152 | 18352.0 | 0.2 | 2.8 | 82.8 | 0.9 | 37.6 | 0.1 | 0.0 | 125.2 | 125.5 | 125.6 | 125.7 | 124.4 |
| 16 | 160 | 16016.0 | 0.1 | 1.0 | 99.0 | 0.8 | 37.6 | 0.1 | 0.0 | 135.9 | 154.3 | 154.3 | 154.4 | 138.7 |
| 16 | 168 | 19200.0 | 0.1 | 3.7 | 81.0 | 1.1 | 42.6 | 0.2 | 0.0 | 131.1 | 132.0 | 132.2 | 132.3 | 128.7 |
| 16 | 176 | 16480.0 | 0.1 | 2.5 | 112.7 | 0.9 | 40.8 | 0.1 | 0.0 | 156.3 | 174.0 | 174.2 | 174.3 | 157.1 |
| 16 | 184 | 16528.0 | 0.2 | 4.1 | 120.3 | 1.0 | 41.3 | 0.1 | 0.0 | 174.3 | 174.9 | 175.1 | 175.6 | 167.1 |
| 16 | 192 | 18512.0 | 0.3 | 2.3 | 109.9 | 1.1 | 40.8 | 0.1 | 0.0 | 156.5 | 158.0 | 158.5 | 158.7 | 154.6 |
| 16 | 200 | 16735.3 | 0.2 | 3.0 | 126.4 | 1.0 | 42.7 | 0.1 | 0.0 | 174.2 | 174.9 | 175.1 | 175.2 | 173.5 |
| 16 | 208 | 17584.0 | 0.3 | 2.9 | 126.9 | 1.1 | 42.5 | 0.1 | 0.0 | 175.0 | 175.4 | 175.5 | 176.0 | 173.9 |
| 16 | 216 | 18301.7 | 0.4 | 2.6 | 127.2 | 1.1 | 42.5 | 0.1 | 0.0 | 174.8 | 175.1 | 175.2 | 175.4 | 174.0 |
| 16 | 224 | 19952.0 | 0.4 | 2.6 | 127.2 | 1.1 | 39.1 | 0.1 | 0.0 | 170.7 | 172.2 | 172.5 | 173.2 | 170.6 |
| 16 | 232 | 19536.0 | 0.5 | 2.6 | 127.0 | 1.2 | 42.5 | 0.1 | 0.0 | 174.8 | 175.4 | 175.5 | 175.7 | 173.9 |
| 16 | 240 | 18592.0 | 0.4 | 2.9 | 144.2 | 1.3 | 41.5 | 0.1 | 0.0 | 190.5 | 191.6 | 191.8 | 192.1 | 190.3 |
| 16 | 248 | 17952.0 | 0.3 | 3.3 | 154.6 | 1.1 | 40.2 | 0.1 | 0.0 | 200.4 | 201.1 | 201.4 | 202.0 | 199.8 |
| 16 | 256 | 19616.0 | 0.5 | 2.8 | 144.7 | 1.3 | 41.3 | 0.1 | 0.0 | 190.8 | 192.4 | 192.6 | 193.2 | 190.6 |
</details>
#### Online: NVIDIA DGX-1 (1x V100 32GB), PyTorch with FP16, Dataset: electricity
Our results were obtained using the following configuration:
| Parameter Name | Parameter Value |
|:-----------------------------|:-----------------------------|
| GPU |NVIDIA DGX-1 (1x V100 32GB) |
| Backend |PyTorch |
| Backend accelerator |-|
| Precision |FP16 |
| Model format |TorchScript Trace |
| Max batch size |1024 |
| Number of model instances |2|
| Export Precision | FP32 |
| Dataset | electricity |
| Device | gpu |
| Request Count | 500 |
<table>
<tbody>
<tr>
<td colspan="2" align="center"><img src="./reports/nvidia_dgx-1_(1x_v100_32gb)_experiment_27_triton_performance_online_27/plots/latency_vs_concurrency.png"></td>
</tr>
</tbody>
</table>
<details>
<summary>Results Table</summary>
| Batch | Concurrency | Inferences/Second | Client Send (ms) | Network+Server Send/Recv (ms) | Server Queue (ms) | Server Compute Input (ms) | Server Compute Infer (ms) | Server Compute Output (ms) | Client Recv (ms) | p50 latency (ms) | p90 latency (ms) | p95 latency (ms) | p99 latency (ms) | avg latency (ms) |
|--------:|--------------:|--------------------:|-------------------:|--------------------------------:|--------------------:|----------------------------:|----------------------------:|-----------------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|
| 16 | 8 | 5008.0 | 0.1 | 0.6 | 9.4 | 0.4 | 11.3 | 3.7 | 0.0 | 29.2 | 30.5 | 31.3 | 32.9 | 25.5 |
| 16 | 16 | 7016.0 | 0.1 | 0.7 | 13.5 | 0.8 | 11.7 | 8.9 | 0.0 | 41.2 | 42.9 | 43.4 | 44.2 | 35.7 |
| 16 | 24 | 8560.0 | 0.1 | 1.0 | 17.5 | 1.0 | 11.9 | 12.7 | 0.0 | 49.4 | 51.3 | 51.9 | 53.1 | 44.2 |
| 16 | 32 | 9264.0 | 0.1 | 1.1 | 21.4 | 1.4 | 11.9 | 17.0 | 0.0 | 57.9 | 59.1 | 59.3 | 59.6 | 52.9 |
| 16 | 40 | 10336.0 | 0.1 | 1.9 | 23.2 | 1.5 | 12.0 | 22.3 | 0.0 | 65.8 | 67.6 | 67.9 | 68.2 | 60.9 |
| 16 | 48 | 10064.0 | 0.1 | 2.6 | 22.0 | 1.7 | 11.8 | 32.6 | 0.0 | 75.7 | 76.6 | 76.7 | 77.4 | 70.8 |
| 16 | 56 | 10512.0 | 0.1 | 2.5 | 20.1 | 1.8 | 11.6 | 44.8 | 0.0 | 85.6 | 86.8 | 87.8 | 88.0 | 80.9 |
| 16 | 64 | 10848.0 | 0.1 | 3.1 | 30.1 | 1.9 | 11.7 | 42.2 | 0.0 | 93.8 | 95.9 | 96.0 | 99.7 | 89.2 |
| 16 | 72 | 10800.0 | 0.1 | 2.9 | 22.0 | 2.0 | 11.3 | 61.7 | 0.0 | 104.0 | 104.8 | 105.6 | 107.4 | 99.8 |
| 16 | 80 | 10976.0 | 0.1 | 2.8 | 38.7 | 2.2 | 11.3 | 52.2 | 0.0 | 111.6 | 112.5 | 113.3 | 116.0 | 107.3 |
| 16 | 88 | 11200.0 | 0.1 | 3.4 | 47.7 | 3.1 | 11.7 | 50.9 | 0.0 | 120.7 | 122.2 | 124.2 | 124.7 | 116.8 |
| 16 | 96 | 11152.0 | 0.1 | 2.8 | 54.7 | 3.3 | 11.0 | 54.2 | 0.0 | 130.4 | 132.2 | 133.0 | 133.9 | 126.1 |
| 16 | 104 | 11312.0 | 0.1 | 4.2 | 60.6 | 7.2 | 12.2 | 51.5 | 0.0 | 138.5 | 144.9 | 161.8 | 173.3 | 135.8 |
| 16 | 112 | 11216.0 | 0.1 | 4.6 | 67.1 | 3.2 | 10.5 | 60.7 | 0.0 | 150.1 | 151.5 | 152.3 | 154.1 | 146.2 |
| 16 | 120 | 10736.0 | 0.1 | 4.6 | 73.0 | 10.8 | 10.3 | 58.1 | 0.0 | 161.5 | 162.4 | 166.4 | 173.6 | 157.0 |
| 16 | 128 | 11504.0 | 0.1 | 3.5 | 77.2 | 7.0 | 9.8 | 66.2 | 0.0 | 168.8 | 171.6 | 172.7 | 186.1 | 163.8 |
| 16 | 136 | 11120.0 | 0.1 | 4.5 | 81.4 | 8.8 | 10.3 | 68.5 | 0.0 | 177.7 | 179.5 | 181.3 | 191.2 | 173.5 |
| 16 | 144 | 11808.0 | 0.1 | 4.7 | 84.3 | 8.4 | 10.7 | 73.0 | 0.0 | 185.0 | 193.4 | 196.4 | 202.1 | 181.2 |
| 16 | 152 | 11168.0 | 0.1 | 3.7 | 91.8 | 28.3 | 8.6 | 63.1 | 0.0 | 199.6 | 203.2 | 203.3 | 209.8 | 195.7 |
| 16 | 160 | 11392.0 | 0.1 | 5.2 | 84.7 | 21.9 | 9.6 | 81.9 | 0.0 | 205.7 | 220.0 | 248.4 | 248.8 | 203.4 |
| 16 | 168 | 11696.0 | 0.1 | 4.9 | 103.6 | 10.9 | 10.1 | 82.6 | 0.0 | 216.4 | 224.8 | 269.6 | 270.7 | 212.1 |
| 16 | 176 | 10912.0 | 0.1 | 5.9 | 105.3 | 30.6 | 9.9 | 73.6 | 0.0 | 230.7 | 235.1 | 235.4 | 235.7 | 225.3 |
| 16 | 184 | 11312.0 | 0.2 | 4.2 | 110.4 | 28.5 | 9.5 | 82.6 | 0.0 | 239.8 | 248.2 | 271.9 | 272.2 | 235.3 |
| 16 | 192 | 10992.0 | 0.1 | 5.4 | 113.3 | 43.4 | 8.6 | 70.0 | 0.0 | 246.1 | 248.0 | 248.3 | 248.8 | 241.0 |
| 16 | 200 | 11360.0 | 0.1 | 5.8 | 116.5 | 36.6 | 9.9 | 77.5 | 0.0 | 251.4 | 259.3 | 272.8 | 273.2 | 246.4 |
| 16 | 208 | 11360.0 | 0.1 | 6.1 | 122.2 | 43.4 | 8.5 | 77.2 | 0.0 | 259.1 | 263.0 | 265.2 | 265.9 | 257.6 |
| 16 | 216 | 11296.0 | 0.3 | 3.3 | 129.2 | 37.6 | 8.7 | 88.9 | 0.0 | 272.2 | 275.7 | 275.9 | 276.3 | 267.9 |
| 16 | 224 | 10800.0 | 0.2 | 5.2 | 132.7 | 43.4 | 8.3 | 86.3 | 0.0 | 277.4 | 281.9 | 282.2 | 282.9 | 276.1 |
| 16 | 232 | 11184.0 | 0.4 | 3.2 | 170.0 | 12.8 | 10.5 | 91.9 | 0.0 | 276.9 | 334.5 | 335.1 | 335.5 | 288.8 |
| 16 | 240 | 10992.0 | 0.4 | 6.2 | 175.9 | 27.0 | 9.4 | 84.9 | 0.0 | 301.9 | 342.6 | 348.0 | 348.2 | 303.8 |
| 16 | 248 | 10432.0 | 0.4 | 3.8 | 179.2 | 12.9 | 10.8 | 98.1 | 0.0 | 314.7 | 356.4 | 376.4 | 377.8 | 305.2 |
| 16 | 256 | 10896.0 | 0.5 | 3.7 | 185.5 | 38.1 | 8.6 | 83.4 | 0.0 | 323.5 | 329.8 | 332.4 | 332.7 | 319.6 |
</details>
#### Online: NVIDIA DGX-1 (1x V100 32GB), PyTorch with FP16, Dataset: traffic
Our results were obtained using the following configuration:
| Parameter Name | Parameter Value |
|:-----------------------------|:-----------------------------|
| GPU |NVIDIA DGX-1 (1x V100 32GB) |
| Backend |PyTorch |
| Backend accelerator |-|
| Precision |FP16 |
| Model format |TorchScript Trace |
| Max batch size |1024 |
| Number of model instances |2|
| Export Precision | FP32 |
| Dataset | traffic |
| Device | gpu |
| Request Count | 500 |
<table>
<tbody>
<tr>
<td colspan="2" align="center"><img src="./reports/nvidia_dgx-1_(1x_v100_32gb)_experiment_28_triton_performance_online_28/plots/latency_vs_concurrency.png"></td>
</tr>
</tbody>
</table>
<details>
<summary>Results Table</summary>
| Batch | Concurrency | Inferences/Second | Client Send (ms) | Network+Server Send/Recv (ms) | Server Queue (ms) | Server Compute Input (ms) | Server Compute Infer (ms) | Server Compute Output (ms) | Client Recv (ms) | p50 latency (ms) | p90 latency (ms) | p95 latency (ms) | p99 latency (ms) | avg latency (ms) |
|--------:|--------------:|--------------------:|-------------------:|--------------------------------:|--------------------:|----------------------------:|----------------------------:|-----------------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|
| 16 | 8 | 4992.0 | 0.1 | 0.6 | 9.5 | 0.4 | 11.2 | 3.6 | 0.0 | 28.9 | 29.9 | 30.2 | 32.2 | 25.3 |
| 16 | 16 | 7192.0 | 0.1 | 0.7 | 12.8 | 0.9 | 11.8 | 8.9 | 0.0 | 41.1 | 43.1 | 43.5 | 44.2 | 35.2 |
| 16 | 24 | 8496.0 | 0.1 | 0.9 | 16.1 | 1.1 | 11.7 | 13.7 | 0.0 | 49.2 | 51.3 | 52.5 | 53.4 | 43.6 |
| 16 | 32 | 9264.0 | 0.1 | 1.1 | 19.2 | 1.8 | 13.1 | 17.0 | 0.0 | 57.4 | 58.9 | 59.0 | 60.7 | 52.2 |
| 16 | 40 | 9808.0 | 0.1 | 1.4 | 21.5 | 1.8 | 13.1 | 23.5 | 0.0 | 66.0 | 66.4 | 66.5 | 66.6 | 61.4 |
| 16 | 48 | 10528.0 | 0.1 | 3.2 | 18.6 | 1.6 | 11.6 | 36.3 | 0.0 | 75.6 | 77.1 | 78.3 | 78.6 | 71.3 |
| 16 | 56 | 10480.0 | 0.1 | 2.9 | 20.1 | 1.7 | 11.5 | 44.5 | 0.0 | 85.7 | 86.5 | 86.6 | 87.4 | 80.8 |
| 16 | 64 | 10352.0 | 0.1 | 2.7 | 21.9 | 2.0 | 11.3 | 51.6 | 0.0 | 94.4 | 95.7 | 96.5 | 97.0 | 89.6 |
| 16 | 72 | 10864.0 | 0.1 | 3.3 | 24.1 | 2.2 | 11.6 | 58.0 | 0.0 | 103.6 | 105.6 | 106.1 | 107.1 | 99.4 |
| 16 | 80 | 10992.0 | 0.1 | 2.7 | 35.9 | 2.3 | 11.2 | 54.2 | 0.0 | 111.0 | 111.9 | 112.8 | 115.5 | 106.3 |
| 16 | 88 | 11648.0 | 0.1 | 3.1 | 46.1 | 2.3 | 11.4 | 53.5 | 0.0 | 120.3 | 121.4 | 122.1 | 125.9 | 116.5 |
| 16 | 96 | 11140.9 | 0.1 | 3.7 | 55.3 | 2.6 | 11.3 | 52.6 | 0.0 | 129.6 | 131.3 | 133.1 | 138.9 | 125.6 |
| 16 | 104 | 11280.0 | 0.1 | 3.2 | 61.2 | 3.1 | 10.5 | 57.0 | 0.0 | 138.8 | 140.7 | 140.7 | 144.1 | 135.1 |
| 16 | 112 | 11824.0 | 0.1 | 3.9 | 65.2 | 3.6 | 11.0 | 60.1 | 0.0 | 147.9 | 149.8 | 150.2 | 154.3 | 143.8 |
| 16 | 120 | 10864.0 | 0.1 | 3.6 | 71.2 | 4.6 | 11.2 | 62.9 | 0.0 | 157.6 | 158.7 | 159.4 | 166.0 | 153.5 |
| 16 | 128 | 11552.0 | 0.1 | 4.7 | 75.8 | 5.0 | 11.0 | 66.6 | 0.0 | 166.2 | 170.8 | 174.3 | 177.3 | 163.0 |
| 16 | 136 | 11152.0 | 0.1 | 5.0 | 81.2 | 12.7 | 9.5 | 66.0 | 0.0 | 177.9 | 181.8 | 187.7 | 194.7 | 174.5 |
| 16 | 144 | 11008.0 | 0.1 | 4.1 | 87.5 | 25.8 | 8.6 | 61.2 | 0.0 | 191.5 | 193.4 | 193.6 | 195.5 | 187.3 |
| 16 | 152 | 10992.0 | 0.1 | 6.1 | 89.5 | 18.9 | 9.0 | 71.5 | 0.0 | 200.3 | 207.5 | 207.7 | 208.1 | 195.1 |
| 16 | 160 | 10656.0 | 0.1 | 5.5 | 91.2 | 30.9 | 8.8 | 68.7 | 0.0 | 210.2 | 215.1 | 215.6 | 221.5 | 205.3 |
| 16 | 168 | 11024.0 | 0.1 | 4.8 | 96.1 | 34.5 | 8.6 | 70.2 | 0.0 | 219.3 | 224.1 | 224.8 | 225.3 | 214.3 |
| 16 | 176 | 10864.0 | 0.1 | 4.7 | 101.8 | 36.7 | 8.4 | 70.7 | 0.0 | 227.6 | 229.0 | 229.2 | 229.3 | 222.4 |
| 16 | 184 | 10896.0 | 0.1 | 5.4 | 107.4 | 38.1 | 8.5 | 73.6 | 0.0 | 237.6 | 242.9 | 243.1 | 244.1 | 233.2 |
| 16 | 192 | 10992.0 | 0.1 | 3.2 | 115.2 | 20.8 | 10.0 | 93.2 | 0.0 | 244.9 | 257.2 | 280.7 | 280.9 | 242.5 |
| 16 | 200 | 11552.0 | 0.2 | 4.9 | 118.6 | 44.4 | 8.5 | 73.4 | 0.0 | 254.1 | 257.2 | 257.2 | 257.6 | 250.0 |
| 16 | 208 | 11236.8 | 0.2 | 1.9 | 124.8 | 21.1 | 10.8 | 101.0 | 0.0 | 263.9 | 281.4 | 287.4 | 288.0 | 259.8 |
| 16 | 216 | 11504.0 | 0.2 | 4.4 | 126.3 | 48.3 | 8.4 | 79.7 | 0.0 | 273.0 | 275.6 | 275.9 | 276.0 | 267.3 |
| 16 | 224 | 11056.0 | 0.4 | 4.7 | 131.6 | 28.3 | 9.9 | 102.3 | 0.0 | 285.1 | 290.2 | 304.5 | 304.8 | 277.3 |
| 16 | 232 | 10528.0 | 0.3 | 4.2 | 169.8 | 36.7 | 9.1 | 73.4 | 0.0 | 295.4 | 317.8 | 318.4 | 319.0 | 293.5 |
| 16 | 240 | 10485.5 | 0.2 | 4.6 | 173.9 | 38.0 | 8.4 | 76.7 | 0.0 | 302.6 | 303.9 | 304.2 | 304.7 | 301.8 |
| 16 | 248 | 11168.0 | 0.3 | 6.6 | 175.1 | 32.5 | 9.0 | 88.1 | 0.0 | 314.0 | 331.7 | 333.7 | 334.1 | 311.6 |
| 16 | 256 | 10384.0 | 0.4 | 3.3 | 184.6 | 40.0 | 8.4 | 82.2 | 0.0 | 318.6 | 321.9 | 322.1 | 322.4 | 318.8 |
</details>
#### Online: NVIDIA DGX A100 (1x A100 80GB), NVIDIA TensorRT with FP16, Dataset: electricity
Our results were obtained using the following configuration:
| Parameter Name | Parameter Value |
|:-----------------------------|:-----------------------------|
| GPU |NVIDIA DGX A100 (1x A100 80GB) |
| Backend |NVIDIA TensorRT |
| Backend accelerator |-|
| Precision |FP16 |
| Model format |NVIDIA TensorRT |
| Max batch size |1024 |
| Number of model instances |2|
| Export Precision | FP32 |
| NVIDIA TensorRT Capture CUDA Graph | Disabled |
| Dataset | electricity |
| Device | gpu |
| Request Count | 500 |
<table>
<tbody>
<tr>
<td colspan="2" align="center"><img src="./reports/nvidia_dgx_a100_(1x_a100_80gb)_experiment_17_triton_performance_online_17/plots/latency_vs_concurrency.png"></td>
</tr>
</tbody>
</table>
<details>
<summary>Results Table</summary>
| Batch | Concurrency | Inferences/Second | Client Send (ms) | Network+Server Send/Recv (ms) | Server Queue (ms) | Server Compute Input (ms) | Server Compute Infer (ms) | Server Compute Output (ms) | Client Recv (ms) | p50 latency (ms) | p90 latency (ms) | p95 latency (ms) | p99 latency (ms) | avg latency (ms) |
|--------:|--------------:|--------------------:|-------------------:|--------------------------------:|--------------------:|----------------------------:|----------------------------:|-----------------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|
| 16 | 8 | 18304.0 | 0.0 | 0.3 | 3.1 | 0.1 | 3.3 | 0.0 | 0.0 | 6.9 | 7.0 | 7.1 | 7.4 | 6.9 |
| 16 | 16 | 20448.0 | 0.0 | 0.5 | 6.6 | 0.1 | 5.2 | 0.0 | 0.0 | 12.5 | 15.5 | 15.6 | 17.1 | 12.4 |
| 16 | 24 | 24448.0 | 0.0 | 0.7 | 8.3 | 0.2 | 6.3 | 0.1 | 0.0 | 17.4 | 17.6 | 17.7 | 17.8 | 15.5 |
| 16 | 32 | 25312.0 | 0.0 | 0.8 | 10.2 | 0.2 | 8.5 | 0.1 | 0.0 | 22.8 | 24.4 | 24.7 | 24.9 | 19.8 |
| 16 | 40 | 23232.0 | 0.0 | 1.2 | 14.2 | 0.4 | 11.3 | 0.1 | 0.0 | 28.7 | 30.3 | 30.4 | 30.5 | 27.1 |
| 16 | 48 | 25296.0 | 0.0 | 1.4 | 9.1 | 0.4 | 18.6 | 0.1 | 0.0 | 31.0 | 32.7 | 32.7 | 33.0 | 29.7 |
| 16 | 56 | 26560.0 | 0.0 | 1.4 | 16.2 | 0.4 | 14.8 | 0.1 | 0.0 | 34.4 | 40.2 | 40.4 | 40.6 | 32.9 |
| 16 | 64 | 26848.0 | 0.0 | 2.0 | 16.6 | 0.4 | 17.8 | 0.1 | 0.0 | 38.6 | 39.0 | 39.1 | 39.2 | 36.9 |
| 16 | 72 | 27632.0 | 0.0 | 1.8 | 22.4 | 0.5 | 16.6 | 0.1 | 0.0 | 42.2 | 47.5 | 47.7 | 48.2 | 41.4 |
| 16 | 80 | 27808.0 | 0.0 | 1.9 | 25.7 | 0.5 | 16.9 | 0.1 | 0.0 | 47.9 | 48.2 | 48.4 | 48.8 | 45.2 |
| 16 | 88 | 29152.0 | 0.0 | 2.5 | 22.8 | 0.6 | 21.1 | 0.1 | 0.0 | 48.7 | 49.4 | 50.4 | 50.6 | 47.2 |
| 16 | 96 | 26352.0 | 0.0 | 2.0 | 33.5 | 0.6 | 20.1 | 0.2 | 0.0 | 58.2 | 58.8 | 58.9 | 59.1 | 56.5 |
| 16 | 104 | 31824.0 | 0.0 | 2.1 | 27.9 | 0.8 | 20.5 | 0.2 | 0.0 | 53.0 | 53.5 | 53.6 | 53.7 | 51.6 |
| 16 | 112 | 34992.0 | 0.0 | 3.2 | 24.8 | 0.9 | 21.8 | 0.2 | 0.0 | 51.8 | 59.5 | 61.5 | 67.9 | 50.9 |
| 16 | 120 | 34496.0 | 0.0 | 1.9 | 29.8 | 0.9 | 22.3 | 0.2 | 0.0 | 58.8 | 66.3 | 66.7 | 72.2 | 55.2 |
| 16 | 128 | 36784.0 | 0.0 | 2.7 | 30.6 | 1.1 | 20.0 | 0.2 | 0.0 | 54.4 | 59.0 | 59.1 | 59.6 | 54.5 |
| 16 | 136 | 36912.0 | 0.0 | 2.3 | 33.8 | 0.9 | 20.4 | 0.2 | 0.0 | 59.0 | 59.3 | 59.5 | 59.6 | 57.7 |
| 16 | 144 | 32672.0 | 0.1 | 2.7 | 42.2 | 1.1 | 21.9 | 0.2 | 0.0 | 69.1 | 71.4 | 72.9 | 73.8 | 68.2 |
| 16 | 152 | 36576.0 | 0.1 | 1.6 | 37.4 | 1.3 | 23.4 | 0.2 | 0.0 | 66.4 | 70.2 | 77.5 | 78.2 | 63.9 |
| 16 | 160 | 37824.0 | 0.1 | 2.2 | 42.0 | 0.9 | 20.9 | 0.2 | 0.0 | 67.1 | 72.1 | 77.5 | 81.7 | 66.3 |
| 16 | 168 | 35536.0 | 0.1 | 1.8 | 49.0 | 0.8 | 21.1 | 0.2 | 0.0 | 77.4 | 81.7 | 81.9 | 82.0 | 72.9 |
| 16 | 176 | 35488.0 | 0.1 | 2.6 | 51.3 | 0.8 | 21.5 | 0.2 | 0.0 | 81.6 | 82.2 | 82.4 | 90.9 | 76.5 |
| 16 | 184 | 33744.0 | 0.1 | 3.7 | 56.2 | 0.8 | 22.4 | 0.2 | 0.0 | 81.8 | 91.8 | 92.1 | 99.1 | 83.3 |
| 16 | 192 | 38032.0 | 0.1 | 2.4 | 51.4 | 1.1 | 22.4 | 0.2 | 0.0 | 82.5 | 83.2 | 88.0 | 92.1 | 77.7 |
| 16 | 200 | 39632.0 | 0.1 | 2.5 | 49.4 | 0.9 | 23.9 | 0.2 | 0.0 | 78.3 | 83.0 | 83.3 | 90.1 | 76.9 |
| 16 | 208 | 34400.0 | 0.1 | 2.1 | 66.7 | 1.1 | 21.9 | 0.2 | 0.0 | 92.5 | 93.1 | 93.3 | 93.5 | 92.2 |
| 16 | 216 | 31712.0 | 0.1 | 2.3 | 80.2 | 0.9 | 20.9 | 0.2 | 0.0 | 104.7 | 105.1 | 105.2 | 105.7 | 104.5 |
| 16 | 224 | 38016.0 | 0.1 | 2.4 | 65.3 | 1.2 | 21.4 | 0.2 | 0.0 | 90.2 | 93.1 | 93.2 | 93.3 | 90.7 |
| 16 | 232 | 37168.0 | 0.1 | 1.8 | 72.2 | 1.1 | 19.7 | 0.2 | 0.0 | 95.2 | 95.8 | 95.9 | 96.0 | 95.1 |
| 16 | 240 | 40832.0 | 0.1 | 2.1 | 60.9 | 0.9 | 24.6 | 0.2 | 0.0 | 87.7 | 105.3 | 108.2 | 112.9 | 88.8 |
| 16 | 248 | 38272.0 | 0.1 | 2.4 | 71.3 | 1.3 | 23.1 | 0.2 | 0.0 | 99.2 | 102.3 | 110.3 | 110.8 | 98.5 |
| 16 | 256 | 33472.0 | 0.1 | 2.4 | 90.1 | 1.1 | 21.9 | 0.2 | 0.0 | 115.9 | 116.9 | 117.4 | 117.8 | 115.9 |
</details>
#### Online: NVIDIA DGX A100 (1x A100 80GB), NVIDIA TensorRT with FP16, Dataset: traffic
Our results were obtained using the following configuration:
| Parameter Name | Parameter Value |
|:-----------------------------|:-----------------------------|
| GPU |NVIDIA DGX A100 (1x A100 80GB) |
| Backend |NVIDIA TensorRT |
| Backend accelerator |-|
| Precision |FP16 |
| Model format |NVIDIA TensorRT |
| Max batch size |1024 |
| Number of model instances |2|
| Export Precision | FP32 |
| NVIDIA TensorRT Capture CUDA Graph | Disabled |
| Dataset | traffic |
| Device | gpu |
| Request Count | 500 |
<table>
<tbody>
<tr>
<td colspan="2" align="center"><img src="./reports/nvidia_dgx_a100_(1x_a100_80gb)_experiment_18_triton_performance_online_18/plots/latency_vs_concurrency.png"></td>
</tr>
</tbody>
</table>
<details>
<summary>Results Table</summary>
| Batch | Concurrency | Inferences/Second | Client Send (ms) | Network+Server Send/Recv (ms) | Server Queue (ms) | Server Compute Input (ms) | Server Compute Infer (ms) | Server Compute Output (ms) | Client Recv (ms) | p50 latency (ms) | p90 latency (ms) | p95 latency (ms) | p99 latency (ms) | avg latency (ms) |
|--------:|--------------:|--------------------:|-------------------:|--------------------------------:|--------------------:|----------------------------:|----------------------------:|-----------------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|
| 16 | 8 | 18816.0 | 0.0 | 0.2 | 3.1 | 0.1 | 3.3 | 0.0 | 0.0 | 6.8 | 6.8 | 6.9 | 6.9 | 6.8 |
| 16 | 16 | 20720.0 | 0.0 | 0.4 | 6.5 | 0.2 | 5.0 | 0.1 | 0.0 | 12.4 | 15.6 | 15.9 | 17.1 | 12.2 |
| 16 | 24 | 23424.0 | 0.0 | 0.6 | 8.9 | 0.2 | 6.4 | 0.1 | 0.0 | 17.6 | 19.5 | 19.6 | 19.8 | 16.2 |
| 16 | 32 | 23840.0 | 0.0 | 1.2 | 10.4 | 0.4 | 9.2 | 0.1 | 0.0 | 23.1 | 23.4 | 23.5 | 23.6 | 21.3 |
| 16 | 40 | 27972.0 | 0.0 | 1.3 | 11.2 | 0.4 | 9.6 | 0.1 | 0.0 | 23.8 | 25.2 | 25.3 | 25.5 | 22.6 |
| 16 | 48 | 28704.0 | 0.0 | 1.5 | 13.3 | 0.4 | 11.2 | 0.1 | 0.0 | 28.6 | 29.0 | 29.1 | 30.6 | 26.5 |
| 16 | 56 | 26464.0 | 0.0 | 1.8 | 17.3 | 0.7 | 13.1 | 0.1 | 0.0 | 32.6 | 40.4 | 40.6 | 40.8 | 33.1 |
| 16 | 64 | 27536.0 | 0.0 | 1.4 | 21.8 | 0.3 | 12.5 | 0.1 | 0.0 | 37.9 | 38.3 | 38.7 | 40.7 | 36.2 |
| 16 | 72 | 33680.0 | 0.0 | 1.5 | 13.5 | 0.8 | 17.8 | 0.1 | 0.0 | 35.0 | 38.4 | 38.8 | 40.4 | 33.7 |
| 16 | 80 | 27984.0 | 0.0 | 1.6 | 25.5 | 0.5 | 16.6 | 0.1 | 0.0 | 47.7 | 48.2 | 48.3 | 48.6 | 44.4 |
| 16 | 88 | 36464.0 | 0.0 | 1.9 | 16.8 | 0.9 | 18.2 | 0.2 | 0.0 | 39.0 | 40.7 | 40.9 | 41.1 | 37.9 |
| 16 | 96 | 35792.0 | 0.0 | 1.9 | 21.1 | 0.7 | 17.4 | 0.1 | 0.0 | 42.7 | 43.0 | 43.1 | 43.2 | 41.4 |
| 16 | 104 | 35536.0 | 0.0 | 2.1 | 25.9 | 0.7 | 17.6 | 0.1 | 0.0 | 48.0 | 48.2 | 48.4 | 48.6 | 46.4 |
| 16 | 112 | 30448.0 | 0.0 | 2.0 | 33.5 | 0.9 | 20.1 | 0.1 | 0.0 | 58.2 | 58.7 | 58.9 | 59.0 | 56.8 |
| 16 | 120 | 32480.0 | 0.0 | 2.9 | 32.9 | 0.8 | 20.3 | 0.2 | 0.0 | 58.6 | 59.0 | 59.2 | 60.4 | 57.2 |
| 16 | 128 | 34528.0 | 0.0 | 2.7 | 33.1 | 1.0 | 20.4 | 0.2 | 0.0 | 58.7 | 59.1 | 59.2 | 59.3 | 57.4 |
| 16 | 136 | 37424.0 | 0.1 | 1.8 | 34.3 | 0.9 | 19.9 | 0.2 | 0.0 | 58.9 | 59.4 | 60.0 | 60.3 | 57.1 |
| 16 | 144 | 33552.0 | 0.0 | 2.5 | 41.1 | 0.9 | 21.8 | 0.2 | 0.0 | 68.9 | 69.2 | 69.3 | 69.5 | 66.6 |
| 16 | 152 | 35104.0 | 0.1 | 2.2 | 43.0 | 1.0 | 21.4 | 0.2 | 0.0 | 69.2 | 72.3 | 76.7 | 81.6 | 67.7 |
| 16 | 160 | 31984.0 | 0.1 | 2.3 | 52.8 | 0.9 | 20.4 | 0.2 | 0.0 | 81.4 | 82.0 | 91.3 | 91.4 | 76.7 |
| 16 | 168 | 35456.0 | 0.1 | 2.4 | 49.3 | 0.9 | 20.9 | 0.2 | 0.0 | 71.3 | 91.3 | 91.6 | 92.1 | 73.8 |
| 16 | 176 | 33200.0 | 0.1 | 2.2 | 57.0 | 1.0 | 20.8 | 0.2 | 0.0 | 82.1 | 84.1 | 91.7 | 92.2 | 81.2 |
| 16 | 184 | 32752.0 | 0.1 | 1.6 | 60.2 | 0.9 | 21.0 | 0.2 | 0.0 | 81.8 | 92.0 | 92.3 | 92.4 | 84.1 |
| 16 | 192 | 36192.0 | 0.1 | 2.4 | 54.7 | 1.1 | 23.1 | 0.2 | 0.0 | 84.2 | 92.2 | 92.3 | 93.0 | 81.7 |
| 16 | 200 | 37424.0 | 0.1 | 2.8 | 56.8 | 0.9 | 20.8 | 0.2 | 0.0 | 82.0 | 82.2 | 82.3 | 82.4 | 81.6 |
| 16 | 208 | 35616.0 | 0.1 | 2.1 | 63.3 | 0.9 | 22.8 | 0.2 | 0.0 | 91.7 | 100.4 | 104.0 | 104.6 | 89.3 |
| 16 | 216 | 37200.0 | 0.1 | 2.6 | 63.9 | 1.1 | 21.0 | 0.2 | 0.0 | 89.2 | 89.5 | 89.6 | 89.7 | 88.8 |
| 16 | 224 | 32512.0 | 0.1 | 2.1 | 80.5 | 0.9 | 20.7 | 0.2 | 0.0 | 104.6 | 105.0 | 105.1 | 105.6 | 104.5 |
| 16 | 232 | 40944.0 | 0.1 | 2.0 | 59.3 | 1.0 | 24.4 | 0.2 | 0.0 | 89.3 | 93.4 | 100.7 | 101.8 | 87.0 |
| 16 | 240 | 37952.0 | 0.1 | 2.2 | 74.6 | 1.0 | 17.7 | 0.2 | 0.0 | 94.0 | 101.3 | 101.6 | 103.8 | 95.7 |
| 16 | 248 | 37744.0 | 0.2 | 2.2 | 74.6 | 1.0 | 23.0 | 0.2 | 0.0 | 101.8 | 113.0 | 113.4 | 114.6 | 101.1 |
| 16 | 256 | 31120.0 | 0.1 | 2.0 | 100.8 | 0.9 | 20.1 | 0.1 | 0.0 | 124.2 | 124.9 | 125.1 | 125.5 | 124.2 |
</details>
#### Online: NVIDIA DGX A100 (1x A100 80GB), PyTorch with FP16, Dataset: electricity
Our results were obtained using the following configuration:
| Parameter Name | Parameter Value |
|:-----------------------------|:-----------------------------|
| GPU |NVIDIA DGX A100 (1x A100 80GB) |
| Backend |PyTorch |
| Backend accelerator |-|
| Precision |FP16 |
| Model format |TorchScript Trace |
| Max batch size |1024 |
| Number of model instances |2|
| Export Precision | FP32 |
| Dataset | electricity |
| Device | gpu |
| Request Count | 500 |
<table>
<tbody>
<tr>
<td colspan="2" align="center"><img src="./reports/nvidia_dgx_a100_(1x_a100_80gb)_experiment_27_triton_performance_online_27/plots/latency_vs_concurrency.png"></td>
</tr>
</tbody>
</table>
<details>
<summary>Results Table</summary>
| Batch | Concurrency | Inferences/Second | Client Send (ms) | Network+Server Send/Recv (ms) | Server Queue (ms) | Server Compute Input (ms) | Server Compute Infer (ms) | Server Compute Output (ms) | Client Recv (ms) | p50 latency (ms) | p90 latency (ms) | p95 latency (ms) | p99 latency (ms) | avg latency (ms) |
|--------:|--------------:|--------------------:|-------------------:|--------------------------------:|--------------------:|----------------------------:|----------------------------:|-----------------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|
| 16 | 8 | 8080.0 | 0.0 | 0.2 | 5.0 | 0.2 | 10.1 | 0.1 | 0.0 | 19.3 | 20.4 | 20.5 | 20.9 | 15.6 |
| 16 | 16 | 12275.7 | 0.0 | 0.4 | 7.8 | 0.4 | 10.1 | 1.7 | 0.0 | 23.3 | 25.3 | 25.9 | 26.3 | 20.4 |
| 16 | 24 | 15072.0 | 0.0 | 0.6 | 10.2 | 0.5 | 10.5 | 2.9 | 0.0 | 27.3 | 28.4 | 28.8 | 29.6 | 24.8 |
| 16 | 32 | 17616.0 | 0.0 | 1.0 | 11.7 | 0.6 | 12.0 | 3.1 | 0.0 | 30.9 | 32.0 | 32.3 | 32.6 | 28.5 |
| 16 | 40 | 19024.0 | 0.0 | 0.9 | 14.2 | 0.8 | 11.7 | 5.3 | 0.0 | 34.9 | 36.7 | 37.4 | 47.0 | 32.9 |
| 16 | 48 | 19312.0 | 0.1 | 2.1 | 12.1 | 1.1 | 11.8 | 12.2 | 0.0 | 39.9 | 46.1 | 49.0 | 54.4 | 39.2 |
| 16 | 56 | 20848.0 | 0.0 | 1.4 | 17.9 | 1.1 | 10.0 | 11.1 | 0.0 | 43.6 | 44.9 | 46.0 | 50.8 | 41.6 |
| 16 | 64 | 21456.0 | 0.0 | 1.9 | 14.9 | 1.4 | 9.7 | 18.6 | 0.0 | 48.2 | 50.1 | 51.0 | 51.3 | 46.5 |
| 16 | 72 | 21600.0 | 0.0 | 4.1 | 19.6 | 1.1 | 10.4 | 16.9 | 0.0 | 53.9 | 54.5 | 54.7 | 55.8 | 52.0 |
| 16 | 80 | 22192.0 | 0.1 | 2.1 | 24.1 | 2.2 | 9.5 | 18.0 | 0.0 | 57.9 | 60.0 | 61.5 | 63.2 | 56.0 |
| 16 | 88 | 22304.0 | 0.0 | 2.1 | 27.6 | 3.2 | 8.8 | 19.4 | 0.0 | 63.5 | 66.0 | 66.1 | 77.3 | 61.2 |
| 16 | 96 | 22176.0 | 0.0 | 2.6 | 29.3 | 4.1 | 8.7 | 21.6 | 0.0 | 68.6 | 71.9 | 76.1 | 79.0 | 66.3 |
| 16 | 104 | 22416.0 | 0.0 | 4.4 | 30.2 | 1.6 | 10.8 | 24.1 | 0.0 | 73.4 | 75.0 | 75.9 | 76.5 | 71.1 |
| 16 | 112 | 22096.0 | 0.1 | 2.9 | 33.8 | 10.6 | 7.4 | 23.1 | 0.0 | 81.6 | 83.9 | 84.4 | 90.5 | 77.8 |
| 16 | 120 | 22320.0 | 0.1 | 3.0 | 34.8 | 10.2 | 7.9 | 25.9 | 0.0 | 85.6 | 90.2 | 102.7 | 116.7 | 81.9 |
| 16 | 128 | 22544.0 | 0.1 | 2.9 | 38.9 | 12.9 | 7.1 | 25.4 | 0.0 | 91.8 | 95.3 | 103.6 | 105.4 | 87.3 |
| 16 | 136 | 22704.0 | 0.1 | 3.8 | 40.5 | 13.9 | 7.1 | 25.9 | 0.0 | 95.4 | 97.8 | 98.6 | 114.4 | 91.3 |
| 16 | 144 | 22224.0 | 0.1 | 2.3 | 42.4 | 18.0 | 6.8 | 26.6 | 0.0 | 101.8 | 107.1 | 108.3 | 108.4 | 96.1 |
| 16 | 152 | 22992.0 | 0.1 | 3.3 | 45.4 | 19.0 | 6.8 | 26.6 | 0.0 | 105.8 | 107.6 | 108.0 | 108.8 | 101.2 |
| 16 | 160 | 23328.0 | 0.1 | 2.5 | 47.8 | 11.5 | 7.6 | 34.7 | 0.0 | 106.5 | 121.2 | 123.0 | 140.4 | 104.2 |
| 16 | 168 | 22448.0 | 0.1 | 3.7 | 50.4 | 15.0 | 8.8 | 32.7 | 0.0 | 112.6 | 123.8 | 126.9 | 131.8 | 110.6 |
| 16 | 176 | 22640.0 | 0.1 | 3.6 | 53.3 | 14.9 | 7.7 | 35.1 | 0.0 | 118.0 | 124.1 | 128.9 | 144.0 | 114.7 |
| 16 | 184 | 22937.1 | 0.1 | 4.0 | 52.5 | 23.3 | 7.1 | 32.7 | 0.0 | 124.3 | 126.2 | 127.4 | 128.0 | 119.6 |
| 16 | 192 | 23768.2 | 0.1 | 3.6 | 56.4 | 20.6 | 7.1 | 36.2 | 0.0 | 127.9 | 130.7 | 136.4 | 139.0 | 124.0 |
| 16 | 200 | 23584.0 | 0.1 | 3.9 | 57.8 | 24.4 | 7.2 | 35.5 | 0.0 | 136.1 | 139.0 | 140.3 | 140.7 | 128.7 |
| 16 | 208 | 23192.8 | 0.1 | 4.8 | 62.0 | 20.9 | 7.8 | 38.9 | 0.0 | 140.9 | 145.3 | 170.9 | 187.7 | 134.5 |
| 16 | 216 | 22873.1 | 0.1 | 3.6 | 80.7 | 17.8 | 7.4 | 32.5 | 0.0 | 145.1 | 152.1 | 158.8 | 159.7 | 142.0 |
| 16 | 224 | 23360.0 | 0.1 | 3.7 | 76.7 | 19.9 | 7.4 | 36.1 | 0.0 | 145.4 | 153.1 | 166.4 | 168.8 | 144.0 |
| 16 | 232 | 23152.0 | 0.1 | 3.8 | 83.3 | 17.8 | 7.8 | 38.2 | 0.0 | 151.2 | 162.3 | 176.8 | 185.3 | 150.9 |
| 16 | 240 | 22384.0 | 0.1 | 4.1 | 88.6 | 21.1 | 7.1 | 34.2 | 0.0 | 157.6 | 161.1 | 166.3 | 170.4 | 155.1 |
| 16 | 248 | 22608.0 | 0.2 | 4.5 | 93.4 | 18.5 | 9.3 | 34.8 | 0.0 | 163.3 | 172.8 | 186.2 | 199.5 | 160.8 |
| 16 | 256 | 22320.0 | 0.1 | 3.0 | 94.1 | 16.6 | 8.1 | 41.7 | 0.0 | 165.4 | 178.2 | 188.9 | 202.4 | 163.7 |
</details>
#### Online: NVIDIA DGX A100 (1x A100 80GB), PyTorch with FP16, Dataset: traffic
Our results were obtained using the following configuration:
| Parameter Name | Parameter Value |
|:-----------------------------|:-----------------------------|
| GPU |NVIDIA DGX A100 (1x A100 80GB) |
| Backend |PyTorch |
| Backend accelerator |-|
| Precision |FP16 |
| Model format |TorchScript Trace |
| Max batch size |1024 |
| Number of model instances |2|
| Export Precision | FP32 |
| Dataset | traffic |
| Device | gpu |
| Request Count | 500 |
<table>
<tbody>
<tr>
<td colspan="2" align="center"><img src="./reports/nvidia_dgx_a100_(1x_a100_80gb)_experiment_28_triton_performance_online_28/plots/latency_vs_concurrency.png"></td>
</tr>
</tbody>
</table>
<details>
<summary>Results Table</summary>
| Batch | Concurrency | Inferences/Second | Client Send (ms) | Network+Server Send/Recv (ms) | Server Queue (ms) | Server Compute Input (ms) | Server Compute Infer (ms) | Server Compute Output (ms) | Client Recv (ms) | p50 latency (ms) | p90 latency (ms) | p95 latency (ms) | p99 latency (ms) | avg latency (ms) |
|--------:|--------------:|--------------------:|-------------------:|--------------------------------:|--------------------:|----------------------------:|----------------------------:|-----------------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|
| 16 | 8 | 8032.0 | 0.0 | 0.3 | 5.0 | 0.2 | 10.0 | 0.1 | 0.0 | 19.3 | 20.2 | 20.4 | 21.0 | 15.6 |
| 16 | 16 | 12784.0 | 0.0 | 0.4 | 7.5 | 0.4 | 9.8 | 1.6 | 0.0 | 22.8 | 23.6 | 23.8 | 24.3 | 19.8 |
| 16 | 24 | 15888.0 | 0.0 | 0.7 | 9.3 | 0.5 | 9.8 | 3.6 | 0.0 | 26.5 | 27.3 | 27.5 | 27.7 | 23.9 |
| 16 | 32 | 17952.0 | 0.0 | 0.7 | 10.8 | 0.6 | 9.8 | 6.1 | 0.0 | 30.5 | 31.1 | 31.4 | 31.6 | 28.0 |
| 16 | 40 | 19376.0 | 0.0 | 1.0 | 12.6 | 0.7 | 9.7 | 8.1 | 0.0 | 34.5 | 35.3 | 35.5 | 35.7 | 32.2 |
| 16 | 48 | 20528.0 | 0.0 | 1.4 | 15.9 | 0.9 | 9.6 | 8.6 | 0.0 | 38.7 | 39.5 | 39.8 | 40.1 | 36.4 |
| 16 | 56 | 20848.0 | 0.0 | 1.2 | 18.5 | 0.9 | 10.3 | 10.7 | 0.0 | 43.8 | 45.2 | 45.6 | 46.3 | 41.7 |
| 16 | 64 | 21968.0 | 0.0 | 1.6 | 20.6 | 0.9 | 10.2 | 12.5 | 0.0 | 48.0 | 48.7 | 48.9 | 49.3 | 45.9 |
| 16 | 72 | 22144.0 | 0.1 | 1.7 | 20.8 | 1.2 | 9.8 | 16.7 | 0.0 | 52.5 | 53.6 | 54.1 | 54.7 | 50.3 |
| 16 | 80 | 22656.0 | 0.0 | 2.2 | 23.2 | 2.6 | 9.0 | 18.4 | 0.0 | 57.6 | 59.4 | 59.8 | 62.7 | 55.5 |
| 16 | 88 | 23208.8 | 0.0 | 2.6 | 26.3 | 2.0 | 9.9 | 18.7 | 0.0 | 61.5 | 62.6 | 62.9 | 68.4 | 59.5 |
| 16 | 96 | 22464.0 | 0.0 | 2.6 | 27.4 | 2.6 | 9.0 | 23.7 | 0.0 | 67.3 | 69.6 | 73.2 | 79.3 | 65.4 |
| 16 | 104 | 22752.0 | 0.0 | 2.9 | 31.8 | 3.7 | 8.7 | 22.9 | 0.0 | 72.4 | 76.1 | 78.1 | 85.2 | 70.0 |
| 16 | 112 | 23352.6 | 0.1 | 3.6 | 31.8 | 1.5 | 10.6 | 27.3 | 0.0 | 76.3 | 80.4 | 82.2 | 87.4 | 74.9 |
| 16 | 120 | 22592.0 | 0.1 | 3.7 | 34.0 | 7.5 | 8.1 | 28.6 | 0.0 | 83.8 | 86.1 | 88.0 | 107.9 | 81.9 |
| 16 | 128 | 22288.0 | 0.1 | 3.7 | 38.1 | 8.8 | 8.1 | 26.6 | 0.0 | 87.9 | 99.0 | 100.6 | 113.3 | 85.4 |
| 16 | 136 | 23440.0 | 0.1 | 3.1 | 38.2 | 16.5 | 6.7 | 25.4 | 0.0 | 94.0 | 99.6 | 100.7 | 102.5 | 90.1 |
| 16 | 144 | 22864.0 | 0.1 | 2.8 | 43.7 | 14.4 | 7.3 | 27.5 | 0.0 | 99.4 | 102.7 | 104.8 | 121.1 | 95.7 |
| 16 | 152 | 23224.8 | 0.1 | 3.9 | 45.5 | 11.7 | 7.6 | 31.4 | 0.0 | 103.0 | 108.4 | 116.6 | 128.1 | 100.2 |
| 16 | 160 | 22496.0 | 0.1 | 4.3 | 46.8 | 13.1 | 7.7 | 34.3 | 0.0 | 110.5 | 115.9 | 125.3 | 136.9 | 106.2 |
| 16 | 168 | 23760.0 | 0.1 | 3.4 | 49.5 | 18.7 | 7.2 | 29.3 | 0.0 | 111.9 | 113.3 | 113.8 | 135.5 | 108.1 |
| 16 | 176 | 23328.0 | 0.1 | 3.9 | 51.5 | 21.3 | 7.6 | 29.1 | 0.0 | 116.8 | 120.4 | 121.2 | 124.7 | 113.5 |
| 16 | 184 | 23440.0 | 0.1 | 4.1 | 52.6 | 21.0 | 6.9 | 34.0 | 0.0 | 123.0 | 127.5 | 128.1 | 129.3 | 118.6 |
| 16 | 192 | 23728.0 | 0.1 | 3.7 | 56.8 | 19.4 | 7.0 | 35.9 | 0.0 | 122.8 | 123.1 | 123.2 | 123.3 | 122.8 |
| 16 | 200 | 23808.0 | 0.1 | 4.8 | 57.8 | 23.0 | 7.0 | 33.6 | 0.0 | 128.3 | 132.6 | 133.2 | 136.8 | 126.3 |
| 16 | 208 | 23856.0 | 0.1 | 4.2 | 59.0 | 25.7 | 7.2 | 35.1 | 0.0 | 138.1 | 140.9 | 141.2 | 141.6 | 131.2 |
| 16 | 216 | 23200.0 | 0.1 | 3.6 | 64.5 | 23.8 | 6.9 | 36.7 | 0.0 | 135.5 | 136.1 | 136.6 | 136.7 | 135.6 |
| 16 | 224 | 24384.0 | 0.1 | 4.8 | 67.1 | 24.7 | 6.7 | 36.5 | 0.0 | 139.9 | 140.9 | 141.1 | 142.8 | 139.9 |
| 16 | 232 | 23040.0 | 0.1 | 4.1 | 83.9 | 20.1 | 7.0 | 33.5 | 0.0 | 152.9 | 158.9 | 168.2 | 169.6 | 148.6 |
| 16 | 240 | 23496.5 | 0.1 | 3.1 | 87.0 | 20.9 | 7.1 | 35.2 | 0.0 | 156.1 | 159.9 | 168.7 | 171.1 | 153.3 |
| 16 | 248 | 23072.0 | 0.1 | 4.1 | 95.5 | 13.4 | 8.5 | 38.0 | 0.0 | 161.2 | 178.6 | 179.7 | 193.0 | 159.5 |
| 16 | 256 | 21952.0 | 0.1 | 4.0 | 97.0 | 15.3 | 7.7 | 38.3 | 0.0 | 164.7 | 186.0 | 192.8 | 194.8 | 162.4 |
</details>
#### Online: NVIDIA T4, NVIDIA TensorRT with FP16, Dataset: electricity
Our results were obtained using the following configuration:
| Parameter Name | Parameter Value |
|:-----------------------------|:-----------------------------|
| GPU |NVIDIA T4 |
| Backend |NVIDIA TensorRT |
| Backend accelerator |-|
| Precision |FP16 |
| Model format |NVIDIA TensorRT |
| Max batch size |1024 |
| Number of model instances |2|
| Export Precision | FP32 |
| NVIDIA TensorRT Capture CUDA Graph | Disabled |
| Dataset | electricity |
| Device | gpu |
| Request Count | 500 |
<table>
<tbody>
<tr>
<td colspan="2" align="center"><img src="./reports/nvidia_t4_experiment_17_triton_performance_online_17/plots/latency_vs_concurrency.png"></td>
</tr>
</tbody>
</table>
<details>
<summary>Results Table</summary>
| Batch | Concurrency | Inferences/Second | Client Send (ms) | Network+Server Send/Recv (ms) | Server Queue (ms) | Server Compute Input (ms) | Server Compute Infer (ms) | Server Compute Output (ms) | Client Recv (ms) | p50 latency (ms) | p90 latency (ms) | p95 latency (ms) | p99 latency (ms) | avg latency (ms) |
|--------:|--------------:|--------------------:|-------------------:|--------------------------------:|--------------------:|----------------------------:|----------------------------:|-----------------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|
| 16 | 8 | 10048.0 | 0.1 | 0.7 | 5.3 | 0.1 | 6.3 | 0.0 | 0.0 | 12.6 | 12.7 | 12.7 | 12.8 | 12.6 |
| 16 | 16 | 8464.0 | 0.1 | 1.0 | 15.6 | 0.2 | 13.0 | 0.0 | 0.0 | 30.5 | 41.0 | 41.5 | 41.7 | 29.9 |
| 16 | 24 | 9472.0 | 0.1 | 1.4 | 19.2 | 0.2 | 17.9 | 0.0 | 0.0 | 41.4 | 57.5 | 57.8 | 62.8 | 38.9 |
| 16 | 32 | 9568.0 | 0.1 | 2.0 | 20.2 | 0.3 | 30.3 | 0.0 | 0.0 | 57.4 | 61.5 | 61.6 | 61.7 | 53.1 |
| 16 | 40 | 9616.0 | 0.1 | 2.4 | 31.6 | 0.3 | 29.4 | 0.0 | 0.0 | 70.4 | 71.3 | 71.6 | 72.0 | 63.9 |
| 16 | 48 | 9872.0 | 0.1 | 3.8 | 34.9 | 0.5 | 35.9 | 0.1 | 0.0 | 71.1 | 108.0 | 108.8 | 109.3 | 75.3 |
| 16 | 56 | 9024.0 | 0.1 | 2.8 | 54.7 | 0.3 | 36.5 | 0.0 | 0.0 | 100.7 | 101.2 | 101.7 | 101.8 | 94.5 |
| 16 | 64 | 9536.0 | 0.1 | 4.1 | 37.6 | 0.6 | 61.2 | 0.1 | 0.0 | 108.4 | 109.0 | 109.3 | 109.5 | 103.7 |
| 16 | 72 | 8016.0 | 0.1 | 3.7 | 74.4 | 0.5 | 53.0 | 0.0 | 0.0 | 137.2 | 138.0 | 138.3 | 138.5 | 131.7 |
| 16 | 80 | 9328.0 | 0.1 | 3.8 | 71.0 | 0.6 | 57.2 | 0.1 | 0.0 | 137.5 | 138.6 | 139.6 | 139.8 | 132.7 |
| 16 | 88 | 8240.0 | 0.1 | 3.0 | 85.8 | 0.6 | 61.5 | 0.0 | 0.0 | 158.5 | 175.1 | 176.1 | 176.9 | 151.0 |
| 16 | 96 | 9504.0 | 0.1 | 3.8 | 91.9 | 0.6 | 57.2 | 0.0 | 0.0 | 158.4 | 159.8 | 160.6 | 196.6 | 153.7 |
| 16 | 104 | 9526.5 | 0.2 | 3.6 | 96.2 | 0.8 | 69.6 | 0.0 | 0.0 | 175.4 | 176.3 | 176.3 | 176.6 | 170.4 |
| 16 | 112 | 9424.0 | 0.2 | 3.8 | 94.8 | 0.9 | 70.9 | 0.1 | 0.0 | 175.9 | 176.9 | 177.0 | 177.1 | 170.6 |
| 16 | 120 | 9280.0 | 0.2 | 4.0 | 116.7 | 0.9 | 69.5 | 0.1 | 0.0 | 196.2 | 196.8 | 196.9 | 197.2 | 191.4 |
| 16 | 128 | 9552.0 | 0.2 | 4.3 | 116.8 | 0.9 | 69.3 | 0.1 | 0.0 | 196.4 | 197.2 | 197.4 | 197.6 | 191.5 |
| 16 | 136 | 10165.8 | 0.3 | 3.3 | 117.3 | 1.0 | 69.4 | 0.1 | 0.0 | 196.9 | 197.4 | 197.6 | 197.8 | 191.4 |
| 16 | 144 | 10400.0 | 0.3 | 4.6 | 115.3 | 1.0 | 70.9 | 0.1 | 0.0 | 196.6 | 197.2 | 197.4 | 197.7 | 192.1 |
| 16 | 152 | 9350.6 | 0.3 | 5.1 | 146.4 | 1.0 | 77.2 | 0.1 | 0.0 | 234.6 | 235.3 | 235.6 | 236.0 | 230.1 |
| 16 | 160 | 9744.0 | 0.3 | 4.8 | 145.9 | 1.1 | 77.0 | 0.1 | 0.0 | 234.1 | 234.9 | 235.3 | 235.6 | 229.2 |
| 16 | 168 | 7520.0 | 0.5 | 2.7 | 220.8 | 0.9 | 77.2 | 0.1 | 0.0 | 311.0 | 312.4 | 312.5 | 312.8 | 301.9 |
| 16 | 176 | 7880.1 | 0.5 | 4.0 | 227.3 | 0.9 | 77.0 | 0.1 | 0.0 | 311.6 | 312.7 | 312.8 | 313.1 | 309.8 |
| 16 | 184 | 9760.0 | 0.8 | 5.3 | 183.3 | 1.0 | 73.3 | 0.1 | 0.0 | 256.0 | 275.9 | 276.2 | 276.4 | 263.9 |
| 16 | 192 | 9312.0 | 0.8 | 3.8 | 197.8 | 0.9 | 70.4 | 0.1 | 0.0 | 275.1 | 275.9 | 276.0 | 276.5 | 273.9 |
| 16 | 200 | 8880.0 | 0.9 | 3.5 | 229.1 | 1.0 | 77.2 | 0.1 | 0.0 | 312.8 | 313.9 | 314.0 | 314.2 | 311.7 |
| 16 | 208 | 10992.0 | 1.1 | 3.4 | 188.8 | 1.1 | 71.6 | 0.2 | 0.0 | 266.3 | 266.9 | 267.1 | 267.5 | 266.1 |
| 16 | 216 | 9600.0 | 0.8 | 4.8 | 228.0 | 1.1 | 77.2 | 0.1 | 0.0 | 313.0 | 314.2 | 314.5 | 315.4 | 311.9 |
| 16 | 224 | 9776.0 | 1.1 | 3.8 | 228.5 | 1.1 | 77.2 | 0.1 | 0.0 | 313.0 | 313.7 | 313.8 | 314.0 | 311.9 |
| 16 | 232 | 10928.0 | 1.1 | 3.5 | 220.3 | 1.1 | 69.4 | 0.1 | 0.0 | 296.0 | 296.9 | 297.0 | 297.4 | 295.5 |
| 16 | 240 | 10752.0 | 1.3 | 4.2 | 228.7 | 1.1 | 77.2 | 0.2 | 0.0 | 313.3 | 314.0 | 314.1 | 314.3 | 312.8 |
| 16 | 248 | 9878.1 | 1.4 | 5.1 | 249.7 | 1.2 | 74.8 | 0.2 | 0.0 | 332.9 | 334.1 | 334.3 | 334.6 | 332.4 |
| 16 | 256 | 10368.0 | 1.2 | 4.7 | 251.1 | 1.1 | 74.9 | 0.2 | 0.0 | 333.6 | 334.4 | 334.6 | 335.3 | 333.2 |
</details>
#### Online: NVIDIA T4, NVIDIA TensorRT with FP16, Dataset: traffic
Our results were obtained using the following configuration:
| Parameter Name | Parameter Value |
|:-----------------------------|:-----------------------------|
| GPU |NVIDIA T4 |
| Backend |NVIDIA TensorRT |
| Backend accelerator |-|
| Precision |FP16 |
| Model format |NVIDIA TensorRT |
| Max batch size |1024 |
| Number of model instances |2|
| Export Precision | FP32 |
| NVIDIA TensorRT Capture CUDA Graph | Disabled |
| Dataset | traffic |
| Device | gpu |
| Request Count | 500 |
<table>
<tbody>
<tr>
<td colspan="2" align="center"><img src="./reports/nvidia_t4_experiment_18_triton_performance_online_18/plots/latency_vs_concurrency.png"></td>
</tr>
</tbody>
</table>
<details>
<summary>Results Table</summary>
| Batch | Concurrency | Inferences/Second | Client Send (ms) | Network+Server Send/Recv (ms) | Server Queue (ms) | Server Compute Input (ms) | Server Compute Infer (ms) | Server Compute Output (ms) | Client Recv (ms) | p50 latency (ms) | p90 latency (ms) | p95 latency (ms) | p99 latency (ms) | avg latency (ms) |
|--------:|--------------:|--------------------:|-------------------:|--------------------------------:|--------------------:|----------------------------:|----------------------------:|-----------------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|
| 16 | 8 | 10176.0 | 0.1 | 0.7 | 5.2 | 0.1 | 6.2 | 0.0 | 0.0 | 12.4 | 12.5 | 12.6 | 12.6 | 12.4 |
| 16 | 16 | 8880.0 | 0.1 | 0.9 | 14.6 | 0.1 | 12.4 | 0.0 | 0.0 | 28.6 | 37.0 | 41.6 | 41.9 | 28.3 |
| 16 | 24 | 9520.0 | 0.1 | 1.3 | 19.9 | 0.2 | 17.8 | 0.0 | 0.0 | 41.6 | 50.9 | 57.3 | 61.9 | 39.4 |
| 16 | 32 | 9152.0 | 0.1 | 2.1 | 21.0 | 0.3 | 30.8 | 0.0 | 0.0 | 57.9 | 62.3 | 63.1 | 65.2 | 54.3 |
| 16 | 40 | 9712.0 | 0.1 | 2.7 | 30.0 | 0.3 | 31.6 | 0.0 | 0.0 | 70.7 | 71.2 | 71.4 | 71.6 | 64.8 |
| 16 | 48 | 8000.0 | 0.1 | 3.4 | 28.3 | 0.4 | 61.5 | 0.1 | 0.0 | 95.8 | 104.0 | 104.1 | 104.2 | 93.7 |
| 16 | 56 | 9376.0 | 0.1 | 3.9 | 24.7 | 0.6 | 64.1 | 0.1 | 0.0 | 95.4 | 104.5 | 105.3 | 106.0 | 93.4 |
| 16 | 64 | 8192.0 | 0.1 | 3.4 | 55.8 | 0.5 | 58.8 | 0.0 | 0.0 | 124.4 | 124.7 | 125.2 | 125.3 | 118.7 |
| 16 | 72 | 8432.0 | 0.1 | 2.2 | 73.0 | 0.5 | 51.0 | 0.0 | 0.0 | 137.8 | 138.8 | 139.1 | 139.4 | 126.9 |
| 16 | 80 | 8944.0 | 0.1 | 4.3 | 71.9 | 0.5 | 55.9 | 0.1 | 0.0 | 137.2 | 138.6 | 138.8 | 139.0 | 132.7 |
| 16 | 88 | 7936.0 | 0.1 | 3.0 | 93.5 | 0.7 | 72.3 | 0.1 | 0.0 | 175.2 | 176.1 | 176.3 | 176.4 | 169.6 |
| 16 | 96 | 9152.0 | 0.2 | 3.0 | 92.8 | 0.7 | 56.4 | 0.1 | 0.0 | 159.0 | 159.4 | 159.5 | 159.8 | 153.1 |
| 16 | 104 | 9510.5 | 0.1 | 3.5 | 93.2 | 0.7 | 57.0 | 0.1 | 0.0 | 159.3 | 159.9 | 159.9 | 160.1 | 154.6 |
| 16 | 112 | 10709.3 | 0.2 | 2.8 | 91.4 | 0.9 | 61.3 | 0.1 | 0.0 | 159.2 | 160.2 | 160.4 | 196.7 | 156.7 |
| 16 | 120 | 8848.0 | 0.2 | 3.5 | 116.2 | 0.9 | 70.3 | 0.1 | 0.0 | 196.7 | 198.1 | 198.5 | 199.3 | 191.2 |
| 16 | 128 | 9472.0 | 0.2 | 3.8 | 118.7 | 0.8 | 68.4 | 0.1 | 0.0 | 196.6 | 197.2 | 197.3 | 197.4 | 192.0 |
| 16 | 136 | 10208.0 | 0.2 | 4.1 | 117.3 | 0.9 | 69.6 | 0.1 | 0.0 | 196.9 | 197.8 | 198.1 | 199.0 | 192.2 |
| 16 | 144 | 8599.4 | 0.2 | 4.2 | 146.6 | 0.9 | 77.2 | 0.1 | 0.0 | 234.1 | 235.2 | 235.7 | 236.0 | 229.3 |
| 16 | 152 | 9110.9 | 0.3 | 4.2 | 146.5 | 1.0 | 77.3 | 0.1 | 0.0 | 235.0 | 235.6 | 235.7 | 236.0 | 229.4 |
| 16 | 160 | 7680.0 | 0.4 | 3.2 | 196.0 | 0.8 | 72.5 | 0.1 | 0.0 | 274.5 | 275.2 | 275.6 | 276.1 | 273.1 |
| 16 | 168 | 9968.0 | 0.5 | 4.3 | 147.3 | 1.2 | 77.3 | 0.1 | 0.0 | 234.8 | 236.1 | 236.3 | 236.7 | 230.7 |
| 16 | 176 | 9248.0 | 0.6 | 3.4 | 197.3 | 0.9 | 71.7 | 0.1 | 0.0 | 275.6 | 276.8 | 276.9 | 277.1 | 274.0 |
| 16 | 184 | 8871.1 | 0.6 | 4.2 | 203.9 | 1.1 | 70.7 | 0.1 | 0.0 | 275.5 | 313.3 | 313.9 | 314.6 | 280.6 |
| 16 | 192 | 11252.7 | 0.5 | 5.4 | 151.3 | 1.5 | 77.1 | 0.1 | 0.0 | 235.9 | 237.3 | 237.6 | 238.7 | 235.9 |
| 16 | 200 | 10896.0 | 0.8 | 3.9 | 175.2 | 1.2 | 73.2 | 0.2 | 0.0 | 255.9 | 256.5 | 256.6 | 257.4 | 254.4 |
| 16 | 208 | 11040.0 | 1.1 | 3.5 | 195.6 | 1.1 | 73.1 | 0.1 | 0.0 | 275.9 | 276.8 | 276.9 | 277.1 | 274.6 |
| 16 | 216 | 10384.0 | 1.1 | 4.0 | 215.2 | 1.1 | 71.2 | 0.1 | 0.0 | 295.2 | 296.3 | 296.7 | 297.4 | 292.8 |
| 16 | 224 | 10752.0 | 0.9 | 4.5 | 224.8 | 1.4 | 70.8 | 0.1 | 0.0 | 297.4 | 317.0 | 317.4 | 318.4 | 302.5 |
| 16 | 232 | 10144.0 | 1.0 | 3.7 | 244.1 | 1.0 | 75.1 | 0.2 | 0.0 | 324.5 | 332.0 | 332.9 | 333.0 | 325.0 |
| 16 | 240 | 10560.0 | 1.2 | 4.4 | 228.1 | 1.1 | 77.3 | 0.2 | 0.0 | 313.6 | 314.8 | 315.0 | 315.2 | 312.3 |
| 16 | 248 | 10896.0 | 1.5 | 4.0 | 245.3 | 1.2 | 75.3 | 0.2 | 0.0 | 326.0 | 334.1 | 334.5 | 335.4 | 327.5 |
| 16 | 256 | 11264.0 | 1.5 | 4.3 | 230.6 | 1.7 | 77.0 | 0.2 | 0.0 | 315.4 | 316.4 | 316.6 | 317.0 | 315.4 |
</details>
#### Online: NVIDIA T4, PyTorch with FP16, Dataset: electricity
Our results were obtained using the following configuration:
| Parameter Name | Parameter Value |
|:-----------------------------|:-----------------------------|
| GPU |NVIDIA T4 |
| Backend |PyTorch |
| Backend accelerator |-|
| Precision |FP16 |
| Model format |TorchScript Trace |
| Max batch size |1024 |
| Number of model instances |2|
| Export Precision | FP32 |
| Dataset | electricity |
| Device | gpu |
| Request Count | 500 |
<table>
<tbody>
<tr>
<td colspan="2" align="center"><img src="./reports/nvidia_t4_experiment_27_triton_performance_online_27/plots/latency_vs_concurrency.png"></td>
</tr>
</tbody>
</table>
<details>
<summary>Results Table</summary>
| Batch | Concurrency | Inferences/Second | Client Send (ms) | Network+Server Send/Recv (ms) | Server Queue (ms) | Server Compute Input (ms) | Server Compute Infer (ms) | Server Compute Output (ms) | Client Recv (ms) | p50 latency (ms) | p90 latency (ms) | p95 latency (ms) | p99 latency (ms) | avg latency (ms) |
|--------:|--------------:|--------------------:|-------------------:|--------------------------------:|--------------------:|----------------------------:|----------------------------:|-----------------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|
| 16 | 8 | 3264.0 | 0.1 | 0.6 | 13.9 | 0.8 | 8.9 | 14.2 | 0.0 | 43.8 | 47.8 | 50.1 | 52.1 | 38.5 |
| 16 | 16 | 3669.3 | 0.1 | 1.0 | 26.2 | 2.0 | 9.1 | 30.3 | 0.0 | 76.8 | 82.8 | 84.7 | 86.7 | 68.7 |
| 16 | 24 | 3760.0 | 0.1 | 1.6 | 37.0 | 2.7 | 9.1 | 50.0 | 0.0 | 111.8 | 114.0 | 114.5 | 117.8 | 100.4 |
| 16 | 32 | 3818.7 | 0.1 | 1.3 | 58.1 | 1.9 | 9.0 | 61.7 | 0.0 | 143.8 | 146.6 | 148.1 | 150.5 | 132.2 |
| 16 | 40 | 3801.4 | 0.1 | 3.0 | 69.5 | 2.0 | 8.9 | 80.0 | 0.0 | 175.5 | 180.4 | 180.8 | 181.7 | 163.4 |
| 16 | 48 | 3822.7 | 0.1 | 3.4 | 77.8 | 6.0 | 9.1 | 98.1 | 0.0 | 205.7 | 209.7 | 211.7 | 216.0 | 194.6 |
| 16 | 56 | 3785.4 | 0.1 | 4.7 | 77.8 | 4.2 | 8.8 | 128.9 | 0.0 | 236.4 | 239.9 | 241.8 | 242.0 | 224.5 |
| 16 | 64 | 3669.3 | 0.1 | 4.8 | 65.2 | 10.4 | 8.4 | 169.2 | 0.0 | 270.8 | 277.5 | 278.0 | 278.2 | 258.2 |
| 16 | 72 | 3769.4 | 0.1 | 4.6 | 129.8 | 5.5 | 8.2 | 140.6 | 0.0 | 300.9 | 305.2 | 306.5 | 306.8 | 288.8 |
| 16 | 80 | 3528.0 | 0.1 | 4.7 | 102.8 | 15.8 | 7.3 | 190.4 | 0.0 | 335.5 | 342.8 | 342.9 | 384.7 | 321.2 |
| 16 | 88 | 3594.7 | 0.1 | 4.0 | 158.6 | 15.5 | 9.1 | 163.3 | 0.0 | 363.4 | 369.4 | 370.6 | 420.0 | 350.6 |
| 16 | 96 | 3700.1 | 0.1 | 4.4 | 187.4 | 22.6 | 8.4 | 159.2 | 0.0 | 394.9 | 397.8 | 398.7 | 412.2 | 382.2 |
| 16 | 104 | 3710.8 | 0.1 | 6.4 | 191.4 | 31.9 | 8.7 | 178.8 | 0.0 | 430.1 | 432.2 | 463.7 | 465.9 | 417.4 |
| 16 | 112 | 3680.0 | 0.1 | 6.1 | 213.8 | 33.0 | 8.5 | 187.7 | 0.0 | 461.4 | 464.6 | 465.3 | 465.5 | 449.4 |
| 16 | 120 | 3616.0 | 0.1 | 7.5 | 158.8 | 27.8 | 7.7 | 274.8 | 0.0 | 489.4 | 493.1 | 500.8 | 501.0 | 476.8 |
| 16 | 128 | 3514.7 | 0.2 | 5.2 | 188.4 | 83.0 | 8.0 | 223.8 | 0.0 | 525.3 | 531.1 | 531.6 | 573.8 | 508.6 |
| 16 | 136 | 3716.1 | 0.2 | 5.4 | 243.3 | 67.8 | 8.0 | 210.6 | 0.0 | 547.8 | 551.0 | 551.6 | 552.1 | 535.2 |
| 16 | 144 | 3168.0 | 0.2 | 3.6 | 263.3 | 76.0 | 8.6 | 213.1 | 0.0 | 583.8 | 720.5 | 720.8 | 721.4 | 564.8 |
| 16 | 152 | 3642.7 | 0.2 | 6.6 | 232.6 | 57.1 | 7.4 | 292.4 | 0.0 | 607.9 | 609.5 | 610.0 | 619.0 | 596.4 |
| 16 | 160 | 3512.0 | 0.3 | 3.6 | 280.5 | 119.6 | 7.3 | 221.4 | 0.0 | 647.3 | 650.8 | 651.4 | 666.6 | 632.7 |
| 16 | 168 | 3206.4 | 0.2 | 6.4 | 283.2 | 116.6 | 7.9 | 243.7 | 0.0 | 669.6 | 670.4 | 670.5 | 670.7 | 657.9 |
| 16 | 176 | 3550.8 | 0.4 | 6.3 | 334.8 | 109.5 | 7.0 | 239.9 | 0.0 | 710.4 | 714.1 | 720.1 | 722.4 | 697.9 |
| 16 | 184 | 3462.3 | 0.4 | 5.4 | 334.5 | 141.1 | 6.6 | 235.4 | 0.0 | 739.5 | 741.4 | 755.4 | 755.7 | 723.5 |
| 16 | 192 | 3232.0 | 0.4 | 6.8 | 350.1 | 135.7 | 7.2 | 255.5 | 0.0 | 769.6 | 774.4 | 786.3 | 786.6 | 755.7 |
| 16 | 200 | 3578.7 | 0.5 | 5.9 | 366.7 | 157.9 | 6.5 | 250.9 | 0.0 | 801.4 | 807.8 | 808.4 | 808.8 | 788.3 |
| 16 | 208 | 3384.0 | 0.4 | 5.7 | 384.7 | 134.6 | 7.5 | 283.0 | 0.0 | 827.6 | 832.8 | 836.8 | 837.3 | 816.0 |
| 16 | 216 | 2952.0 | 0.7 | 5.4 | 419.1 | 145.7 | 6.8 | 265.2 | 0.0 | 844.8 | 851.7 | 851.8 | 852.1 | 842.9 |
| 16 | 224 | 3198.4 | 0.8 | 1.5 | 491.9 | 138.6 | 6.9 | 231.5 | 0.0 | 882.4 | 900.1 | 901.0 | 904.3 | 871.1 |
| 16 | 232 | 3370.7 | 1.1 | 6.2 | 436.3 | 169.3 | 7.0 | 281.1 | 0.0 | 900.1 | 906.2 | 906.4 | 906.6 | 900.9 |
| 16 | 240 | 3514.7 | 1.2 | 4.7 | 457.9 | 188.6 | 7.5 | 278.4 | 0.0 | 941.9 | 947.9 | 948.0 | 948.2 | 938.4 |
| 16 | 248 | 3294.9 | 1.1 | 6.2 | 572.9 | 132.5 | 8.2 | 259.2 | 0.0 | 981.8 | 987.8 | 990.1 | 990.2 | 980.0 |
| 16 | 256 | 3144.0 | 0.7 | 8.5 | 602.8 | 120.8 | 7.3 | 269.7 | 0.0 | 1010.5 | 1247.8 | 1248.0 | 1248.8 | 1009.9 |
</details>
#### Online: NVIDIA T4, PyTorch with FP16, Dataset: traffic
Our results were obtained using the following configuration:
| Parameter Name | Parameter Value |
|:-----------------------------|:-----------------------------|
| GPU |NVIDIA T4 |
| Backend |PyTorch |
| Backend accelerator |-|
| Precision |FP16 |
| Model format |TorchScript Trace |
| Max batch size |1024 |
| Number of model instances |2|
| Export Precision | FP32 |
| Dataset | traffic |
| Device | gpu |
| Request Count | 500 |
<table>
<tbody>
<tr>
<td colspan="2" align="center"><img src="./reports/nvidia_t4_experiment_28_triton_performance_online_28/plots/latency_vs_concurrency.png"></td>
</tr>
</tbody>
</table>
<details>
<summary>Results Table</summary>
| Batch | Concurrency | Inferences/Second | Client Send (ms) | Network+Server Send/Recv (ms) | Server Queue (ms) | Server Compute Input (ms) | Server Compute Infer (ms) | Server Compute Output (ms) | Client Recv (ms) | p50 latency (ms) | p90 latency (ms) | p95 latency (ms) | p99 latency (ms) | avg latency (ms) |
|--------:|--------------:|--------------------:|-------------------:|--------------------------------:|--------------------:|----------------------------:|----------------------------:|-----------------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|
| 16 | 8 | 3486.8 | 0.1 | 0.8 | 10.6 | 1.6 | 10.0 | 13.2 | 0.0 | 43.3 | 47.9 | 48.4 | 49.4 | 36.5 |
| 16 | 16 | 3668.1 | 0.1 | 0.9 | 25.4 | 2.2 | 9.1 | 30.4 | 0.0 | 77.2 | 82.6 | 83.9 | 87.3 | 68.0 |
| 16 | 24 | 3764.1 | 0.1 | 1.4 | 40.4 | 2.2 | 9.1 | 46.5 | 0.0 | 111.1 | 115.9 | 116.9 | 117.6 | 99.7 |
| 16 | 32 | 3822.7 | 0.1 | 2.2 | 56.6 | 1.8 | 8.9 | 61.3 | 0.0 | 142.5 | 145.5 | 147.1 | 151.0 | 130.9 |
| 16 | 40 | 3785.4 | 0.1 | 2.6 | 69.6 | 1.9 | 8.9 | 79.1 | 0.0 | 174.4 | 179.3 | 180.0 | 181.6 | 162.2 |
| 16 | 48 | 3854.7 | 0.1 | 4.3 | 67.3 | 4.2 | 8.9 | 107.5 | 0.0 | 205.1 | 209.3 | 209.5 | 212.6 | 192.4 |
| 16 | 56 | 3786.7 | 0.1 | 3.2 | 99.9 | 5.0 | 8.5 | 108.0 | 0.0 | 236.7 | 240.9 | 242.2 | 242.8 | 224.7 |
| 16 | 64 | 3882.7 | 0.1 | 6.3 | 65.8 | 8.2 | 8.3 | 168.3 | 0.0 | 269.1 | 275.5 | 276.0 | 378.1 | 257.1 |
| 16 | 72 | 3690.7 | 0.1 | 6.5 | 103.0 | 11.5 | 8.0 | 159.3 | 0.0 | 300.2 | 303.5 | 304.8 | 391.1 | 288.5 |
| 16 | 80 | 3669.3 | 0.1 | 6.9 | 95.3 | 19.2 | 7.0 | 193.2 | 0.0 | 333.9 | 338.4 | 338.6 | 339.3 | 321.8 |
| 16 | 88 | 3646.2 | 0.1 | 4.8 | 145.9 | 22.0 | 7.1 | 171.3 | 0.0 | 364.1 | 368.4 | 368.6 | 368.7 | 351.2 |
| 16 | 96 | 3712.0 | 0.1 | 6.3 | 174.7 | 32.3 | 7.0 | 159.8 | 0.0 | 394.4 | 399.8 | 400.2 | 400.6 | 380.1 |
| 16 | 104 | 3701.3 | 0.1 | 5.2 | 192.4 | 39.3 | 7.1 | 169.3 | 0.0 | 427.6 | 434.3 | 434.4 | 435.1 | 413.5 |
| 16 | 112 | 3686.2 | 0.1 | 5.8 | 204.9 | 41.2 | 6.9 | 186.4 | 0.0 | 458.5 | 462.0 | 462.3 | 464.8 | 445.5 |
| 16 | 120 | 3600.0 | 0.2 | 5.6 | 221.5 | 28.2 | 7.2 | 211.1 | 0.0 | 487.2 | 491.1 | 491.7 | 491.9 | 473.7 |
| 16 | 128 | 3656.0 | 0.2 | 9.2 | 157.3 | 27.6 | 6.8 | 307.7 | 0.0 | 518.4 | 525.4 | 525.5 | 526.8 | 508.7 |
| 16 | 136 | 3710.8 | 0.2 | 6.8 | 249.1 | 83.8 | 7.3 | 191.2 | 0.0 | 552.1 | 555.3 | 562.4 | 562.6 | 538.2 |
| 16 | 144 | 3593.5 | 0.2 | 5.3 | 267.5 | 77.6 | 6.8 | 213.9 | 0.0 | 583.8 | 586.1 | 587.0 | 587.8 | 571.3 |
| 16 | 152 | 3630.8 | 0.2 | 6.8 | 258.2 | 98.5 | 7.3 | 230.0 | 0.0 | 613.0 | 618.2 | 621.6 | 622.2 | 600.9 |
| 16 | 160 | 3464.0 | 0.2 | 8.6 | 259.1 | 112.2 | 6.8 | 240.4 | 0.0 | 640.7 | 644.5 | 644.6 | 644.8 | 627.2 |
| 16 | 168 | 3240.0 | 0.3 | 6.4 | 278.2 | 104.2 | 7.2 | 261.6 | 0.0 | 672.9 | 676.3 | 676.5 | 677.1 | 657.9 |
| 16 | 176 | 3376.0 | 0.3 | 6.2 | 298.0 | 126.7 | 6.1 | 254.5 | 0.0 | 701.3 | 706.9 | 707.0 | 707.2 | 691.8 |
| 16 | 184 | 3632.0 | 0.3 | 7.2 | 334.7 | 125.6 | 7.4 | 249.8 | 0.0 | 737.0 | 741.4 | 745.2 | 745.6 | 725.0 |
| 16 | 192 | 3504.0 | 0.5 | 7.5 | 362.4 | 125.7 | 7.2 | 252.9 | 0.0 | 766.8 | 768.9 | 769.1 | 769.3 | 756.1 |
| 16 | 200 | 3246.4 | 0.5 | 5.1 | 360.5 | 161.5 | 6.7 | 247.9 | 0.0 | 794.4 | 797.6 | 797.7 | 798.1 | 782.2 |
| 16 | 208 | 3344.0 | 0.4 | 5.6 | 463.1 | 109.0 | 7.1 | 234.1 | 0.0 | 827.3 | 830.1 | 830.4 | 859.6 | 819.4 |
| 16 | 216 | 3192.0 | 0.4 | 9.0 | 409.4 | 153.2 | 6.9 | 268.5 | 0.0 | 859.0 | 862.5 | 862.6 | 862.8 | 847.3 |
| 16 | 224 | 3312.0 | 0.5 | 6.5 | 424.0 | 179.8 | 6.6 | 257.1 | 0.0 | 888.1 | 893.6 | 900.8 | 901.6 | 874.5 |
| 16 | 232 | 3449.5 | 0.5 | 7.0 | 517.0 | 114.4 | 7.3 | 265.1 | 0.0 | 913.9 | 915.8 | 920.3 | 924.9 | 911.4 |
| 16 | 240 | 3392.0 | 0.7 | 12.9 | 555.7 | 100.4 | 8.9 | 289.1 | 0.0 | 952.8 | 1071.4 | 1138.9 | 1139.4 | 967.6 |
| 16 | 248 | 3321.6 | 0.7 | 6.1 | 474.4 | 132.1 | 8.3 | 339.2 | 0.0 | 959.6 | 967.6 | 968.1 | 968.5 | 960.8 |
| 16 | 256 | 3152.0 | 0.7 | 6.1 | 583.5 | 118.6 | 7.7 | 287.4 | 0.0 | 1008.6 | 1026.3 | 1042.2 | 1042.6 | 1004.0 |
</details>
## Advanced
| Inference runtime | Mnemonic used in scripts |
|-------------------|--------------------------|
| [TorchScript Tracing](https://pytorch.org/docs/stable/jit.html) | `ts-trace` |
| [TorchScript Scripting](https://pytorch.org/docs/stable/jit.html) | `ts-script` |
| [ONNX](https://onnx.ai) | `onnx` |
| [NVIDIA TensorRT](https://developer.nvidia.com/tensorrt) | `trt` |
### Step by step deployment process
Commands described below can be used for exporting, converting and profiling the model.
#### Clone Repository
IMPORTANT: This step is executed on the host computer.
<details>
<summary>Clone Repository Command</summary>
```shell
git clone https://github.com/NVIDIA/DeepLearningExamples.git
cd DeepLearningExamples/PyTorch/Forecasting/TFT
```
</details>
#### Setup Environment
Setup the environment in the host computer and start Triton Inference Server.
<details>
<summary>Setup Environment Command</summary>
```shell
source ./triton/scripts/setup_environment.sh
./triton/scripts/docker/triton_inference_server.sh
```
</details>
#### Prepare Dataset.
Please use the data download from the [Main QSG](https://github.com/NVIDIA/DeepLearningExamples/tree/master/PyTorch/Forecasting/TFT#quick-start-guide)
#### Prepare Checkpoint
Please place a `checkpoint.pt` from TFT trained on electricity in `runner_workspace/checkpoints/electricity_bin/`. Note that the `electricity_bin`
subdirectory may not be created yet. In addition one can download a zip archive of a trained checkpoint
[here](https://api.ngc.nvidia.com/v2/models/nvidia/tft_pyt_ckpt_base_eletricity_amp/versions/21.06.0/zip)
#### Setup Container
Build and run a container that extends the NGC PyTorch container with the Triton Inference Server client libraries and dependencies.
<details>
<summary>Setup Container Command</summary>
```shell
./triton/scripts/docker/build.sh
./triton/scripts/docker/interactive.sh /path/to/your/data/
```
</details>
#### Prepare configuration
You can use the environment variables to set the parameters of your inference configuration.
Example values of some key variables in one configuration:
<details>
<summary>Export Variables</summary>
```shell
WORKDIR="${WORKDIR:=$(pwd)}"
export DATASETS_DIR=${WORKDIR}/datasets
export WORKSPACE_DIR=${WORKDIR}/runner_workspace
export CHECKPOINTS_DIR=${WORKSPACE_DIR}/checkpoints
export MODEL_REPOSITORY_PATH=${WORKSPACE_DIR}/model_store
export SHARED_DIR=${WORKSPACE_DIR}/shared_dir
export MODEL_NAME=TFT
export ENSEMBLE_MODEL_NAME=
export TRITON_LOAD_MODEL_METHOD=explicit
export TRITON_INSTANCES=1
export FORMAT="trt"
export PRECISION="fp16"
export ACCELERATOR="none"
export TRITON_GPU_ENGINE_COUNT="2"
export CAPTURE_CUDA_GRAPH="0"
export BATCH_SIZE="1,2,4,8,16,32,64,128,256,512,1024"
export TRITON_MAX_QUEUE_DELAY="1"
export MAX_BATCH_SIZE="1024"
export BATCH_SIZES="1 2 4 8 16 32 64 128 256 512 1024"
export TRITON_PREFERRED_BATCH_SIZES="512 1024"
export EXPORT_FORMAT="onnx"
export EXPORT_PRECISION="fp32"
export DATASET="electricity_bin"
export DEVICE="gpu"
export REQUEST_COUNT="500"
export CHECKPOINT_VARIANT="electricity_bin"
export CHECKPOINT_DIR=${CHECKPOINTS_DIR}/${CHECKPOINT_VARIANT}
```
</details>
#### Export Model
Export model from Python source to desired format (e.g. Savedmodel or TorchScript)
<details>
<summary>Export Model Command</summary>
```shell
if [[ "${EXPORT_FORMAT}" == "ts-trace" || "${EXPORT_FORMAT}" == "ts-script" ]]; then
export FORMAT_SUFFIX="pt"
else
export FORMAT_SUFFIX="${EXPORT_FORMAT}"
fi
python3 triton/export_model.py \
--input-path triton/model.py \
--input-type pyt \
--output-path ${SHARED_DIR}/exported_model.${FORMAT_SUFFIX} \
--output-type ${EXPORT_FORMAT} \
--ignore-unknown-parameters \
--onnx-opset 13 \
\
--checkpoint ${CHECKPOINT_DIR}/ \
--precision ${EXPORT_PRECISION} \
\
--dataloader triton/dataloader.py \
--dataset ${DATASETS_DIR}/${DATASET} \
--batch-size 1
```
</details>
#### Convert Model
Convert the model from training to inference format (e.g. TensorRT).
<details>
<summary>Convert Model Command</summary>
```shell
if [[ "${EXPORT_FORMAT}" == "ts-trace" || "${EXPORT_FORMAT}" == "ts-script" ]]; then
export FORMAT_SUFFIX="pt"
else
export FORMAT_SUFFIX="${EXPORT_FORMAT}"
fi
model-navigator convert \
--model-name ${MODEL_NAME} \
--model-path ${SHARED_DIR}/exported_model.${FORMAT_SUFFIX} \
--output-path ${SHARED_DIR}/converted_model \
--target-formats ${FORMAT} \
--target-precisions ${PRECISION} \
--launch-mode local \
--override-workspace \
--verbose \
\
--onnx-opsets 13 \
--max-batch-size ${MAX_BATCH_SIZE} \
--container-version 21.08 \
--max-workspace-size 10000000000 \
--atol target__0=100 \
--rtol target__0=100
```
</details>
#### Deploy Model
Configure the model on Triton Inference Server.
Generate the configuration from your model repository.
<details>
<summary>Deploy Model Command</summary>
```shell
if [[ "${FORMAT}" == "ts-trace" || "${FORMAT}" == "ts-script" ]]; then
export CONFIG_FORMAT="torchscript"
else
export CONFIG_FORMAT="${FORMAT}"
fi
model-navigator triton-config-model \
--model-repository ${MODEL_REPOSITORY_PATH} \
--model-name ${MODEL_NAME} \
--model-version 1 \
--model-path ${SHARED_DIR}/converted_model \
--model-format ${CONFIG_FORMAT} \
--model-control-mode ${TRITON_LOAD_MODEL_METHOD} \
--load-model \
--load-model-timeout-s 100 \
--verbose \
\
--backend-accelerator ${ACCELERATOR} \
--tensorrt-precision ${PRECISION} \
--tensorrt-capture-cuda-graph \
--tensorrt-max-workspace-size 10000000000 \
--max-batch-size ${MAX_BATCH_SIZE} \
--batching dynamic \
--preferred-batch-sizes ${TRITON_PREFERRED_BATCH_SIZES} \
--max-queue-delay-us ${TRITON_MAX_QUEUE_DELAY} \
--engine-count-per-device ${DEVICE}=${TRITON_GPU_ENGINE_COUNT}
```
</details>
#### Prepare Triton Profiling Data
Prepare data used for profiling on Triton server.
<details>
<summary>Prepare Triton Profiling Data Command</summary>
```shell
mkdir -p ${SHARED_DIR}/input_data
python triton/prepare_input_data.py \
--input-data-dir ${SHARED_DIR}/input_data/ \
--dataset ${DATASETS_DIR}/${DATASET} \
--checkpoint ${CHECKPOINT_DIR}/ \
```
</details>
#### Triton Performance Offline Test
We want to maximize throughput. It assumes you have your data available
for inference or that your data saturate to maximum batch size quickly.
Triton Inference Server supports offline scenarios with static batching.
Static batching allows inference requests to be served
as they are received. The largest improvements to throughput come
from increasing the batch size due to efficiency gains in the GPU with larger
batches.
<details>
<summary>Triton Performance Offline Test Command</summary>
```shell
python triton/run_performance_on_triton.py \
--model-repository ${MODEL_REPOSITORY_PATH} \
--model-name ${MODEL_NAME} \
--input-data ${SHARED_DIR}/input_data/data.json \
--batch-sizes ${BATCH_SIZE} \
--number-of-triton-instances ${TRITON_INSTANCES} \
--batching-mode static \
--evaluation-mode offline \
--measurement-request-count ${REQUEST_COUNT} \
--warmup \
--performance-tool perf_analyzer \
--result-path ${SHARED_DIR}/triton_performance_offline.csv
```
</details>
#### Triton Performance Online Test
We want to maximize throughput within latency budget constraints.
Dynamic batching is a feature of Triton Inference Server that allows
inference requests to be combined by the server, so that a batch is
created dynamically, resulting in a reduced average latency.
<details>
<summary>Triton Performance Online Test</summary>
```shell
python triton/run_performance_on_triton.py \
--model-repository ${MODEL_REPOSITORY_PATH} \
--model-name ${MODEL_NAME} \
--input-data ${SHARED_DIR}/input_data/data.json \
--batch-sizes ${BATCH_SIZE} \
--number-of-triton-instances ${TRITON_INSTANCES} \
--number-of-model-instances ${TRITON_GPU_ENGINE_COUNT} \
--batching-mode dynamic \
--evaluation-mode online \
--measurement-request-count 500 \
--warmup \
--performance-tool perf_analyzer \
--result-path ${SHARED_DIR}/triton_performance_online.csv
```
</details>
### Latency explanation
A typical Triton Inference Server pipeline can be broken down into the following steps:
1. The client serializes the inference request into a message and sends it to
the server (Client Send).
2. The message travels over the network from the client to the server (Network).
3. The message arrives at the server and is deserialized (Server Receive).
4. The request is placed on the queue (Server Queue).
5. The request is removed from the queue and computed (Server Compute).
6. The completed request is serialized in a message and sent back to
the client (Server Send).
7. The completed message then travels over the network from the server
to the client (Network).
8. The completed message is deserialized by the client and processed as
a completed inference request (Client Receive).
Generally, for local clients, steps 1-4 and 6-8 will only occupy
a small fraction of time, compared to step 5. As backend deep learning
systems like TFT are rarely exposed directly to end users, but instead
only interfacing with local front-end servers, for the sake of TFT,
we can consider that all clients are local.
## Release Notes
We’re constantly refining and improving our performance on AI
and HPC workloads even on the same hardware with frequent updates
to our software stack. For our latest performance data refer
to these pages for
[AI](https://developer.nvidia.com/deep-learning-performance-training-inference)
and [HPC](https://developer.nvidia.com/hpc-application-performance) benchmarks.
### Changelog
### Known issues
- There are no known issues with this model. |
TensorFlow2/LanguageModeling/BERT/official/utils/export | export | export_test | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for exporting utils."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf # pylint: disable=g-bad-import-order
from official.utils.export import export
class ExportUtilsTest(tf.test.TestCase):
"""Tests for the ExportUtils."""
def test_build_tensor_serving_input_receiver_fn(self):
receiver_fn = export.build_tensor_serving_input_receiver_fn(shape=[4, 5])
with tf.Graph().as_default():
receiver = receiver_fn()
self.assertIsInstance(
receiver, tf.estimator.export.TensorServingInputReceiver)
self.assertIsInstance(receiver.features, tf.Tensor)
self.assertEqual(receiver.features.shape, tf.TensorShape([1, 4, 5]))
self.assertEqual(receiver.features.dtype, tf.float32)
self.assertIsInstance(receiver.receiver_tensors, dict)
# Note that Python 3 can no longer index .values() directly; cast to list.
self.assertEqual(list(receiver.receiver_tensors.values())[0].shape,
tf.TensorShape([1, 4, 5]))
def test_build_tensor_serving_input_receiver_fn_batch_dtype(self):
receiver_fn = export.build_tensor_serving_input_receiver_fn(
shape=[4, 5], dtype=tf.int8, batch_size=10)
with tf.Graph().as_default():
receiver = receiver_fn()
self.assertIsInstance(
receiver, tf.estimator.export.TensorServingInputReceiver)
self.assertIsInstance(receiver.features, tf.Tensor)
self.assertEqual(receiver.features.shape, tf.TensorShape([10, 4, 5]))
self.assertEqual(receiver.features.dtype, tf.int8)
self.assertIsInstance(receiver.receiver_tensors, dict)
# Note that Python 3 can no longer index .values() directly; cast to list.
self.assertEqual(list(receiver.receiver_tensors.values())[0].shape,
tf.TensorShape([10, 4, 5]))
if __name__ == "__main__":
tf.test.main()
|
PyTorch/SpeechSynthesis/FastPitch/scripts/docker | docker | interactive | #!/usr/bin/env bash
PORT=${PORT:-8888}
docker run --gpus=all -it --rm -e CUDA_VISIBLE_DEVICES --ipc=host -p $PORT:$PORT -v $PWD:/workspace/fastpitch/ fastpitch:latest bash
|
TensorFlow2/LanguageModeling/BERT/scripts | scripts | finetune_train_benchmark | #!/usr/bin/env bash
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
echo "Container nvidia build = " $NVIDIA_BUILD_ID
bert_model=${1:-"large"}
num_gpu=${2:-"8"}
batch_size=${3:-"8"}
precision=${4:-"fp16"}
use_xla=${5:-"true"}
squad_version=1.1
if [ $num_gpu -gt 1 ] ; then
mpi_command="mpirun -np $num_gpu \
--allow-run-as-root -bind-to none -map-by slot \
-x NCCL_DEBUG=INFO \
-x LD_LIBRARY_PATH \
-x PATH -mca pml ob1 -mca btl ^openib"
use_hvd="--use_horovod"
else
mpi_command=""
use_hvd=""
fi
if [ "$precision" = "fp16" ] ; then
echo "fp16 activated!"
use_fp16="--use_fp16"
else
use_fp16=""
fi
if [ "$use_xla" = "true" ] ; then
use_xla_tag="--enable_xla"
echo "XLA activated"
else
use_xla_tag=""
fi
if [ "$bert_model" = "large" ] ; then
export BERT_BASE_DIR=data/download/google_pretrained_weights/uncased_L-24_H-1024_A-16
else
export BERT_BASE_DIR=data/download/google_pretrained_weights/uncased_L-12_H-768_A-12
fi
export SQUAD_VERSION=v$squad_version
export SQUAD_DIR=data/download/squad/$SQUAD_VERSION
printf -v TAG "squad_train_benchmark_%s_%s_gpu%d_bs%d" "$bert_model" "$precision" $num_gpu $batch_size
DATESTAMP=`date +'%y%m%d%H%M%S'`
LOGFILE=/results/$TAG.log
export MODEL_DIR=/tmp/bert_train_benchmark_${DATESTAMP}
printf "Logs written to %s\n" "$LOGFILE"
mkdir -p /results
$mpi_command python run_squad.py \
--mode=train \
--input_meta_data_path=${SQUAD_DIR}/squad_${SQUAD_VERSION}_meta_data \
--train_data_path=${SQUAD_DIR}/squad_${SQUAD_VERSION}_train.tf_record \
--vocab_file=${BERT_BASE_DIR}/vocab.txt \
--bert_config_file=$BERT_BASE_DIR/bert_config.json \
--init_checkpoint=$BERT_BASE_DIR/bert_model.ckpt \
--train_batch_size=$batch_size \
--model_dir=${MODEL_DIR} \
--benchmark \
$use_hvd $use_fp16 $use_xla_tag |& tee $LOGFILE
rm $MODEL_DIR -r
|
TensorFlow/Detection/SSD/models/research/slim/nets | nets | resnet_utils | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains building blocks for various versions of Residual Networks.
Residual networks (ResNets) were proposed in:
Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385, 2015
More variants were introduced in:
Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Identity Mappings in Deep Residual Networks. arXiv: 1603.05027, 2016
We can obtain different ResNet variants by changing the network depth, width,
and form of residual unit. This module implements the infrastructure for
building them. Concrete ResNet units and full ResNet networks are implemented in
the accompanying resnet_v1.py and resnet_v2.py modules.
Compared to https://github.com/KaimingHe/deep-residual-networks, in the current
implementation we subsample the output activations in the last residual unit of
each block, instead of subsampling the input activations in the first residual
unit of each block. The two implementations give identical results but our
implementation is more memory efficient.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import tensorflow as tf
slim = tf.contrib.slim
class Block(collections.namedtuple('Block', ['scope', 'unit_fn', 'args'])):
"""A named tuple describing a ResNet block.
Its parts are:
scope: The scope of the `Block`.
unit_fn: The ResNet unit function which takes as input a `Tensor` and
returns another `Tensor` with the output of the ResNet unit.
args: A list of length equal to the number of units in the `Block`. The list
contains one (depth, depth_bottleneck, stride) tuple for each unit in the
block to serve as argument to unit_fn.
"""
def subsample(inputs, factor, scope=None):
"""Subsamples the input along the spatial dimensions.
Args:
inputs: A `Tensor` of size [batch, height_in, width_in, channels].
factor: The subsampling factor.
scope: Optional variable_scope.
Returns:
output: A `Tensor` of size [batch, height_out, width_out, channels] with the
input, either intact (if factor == 1) or subsampled (if factor > 1).
"""
if factor == 1:
return inputs
else:
return slim.max_pool2d(inputs, [1, 1], stride=factor, scope=scope)
def conv2d_same(inputs, num_outputs, kernel_size, stride, rate=1, scope=None):
"""Strided 2-D convolution with 'SAME' padding.
When stride > 1, then we do explicit zero-padding, followed by conv2d with
'VALID' padding.
Note that
net = conv2d_same(inputs, num_outputs, 3, stride=stride)
is equivalent to
net = slim.conv2d(inputs, num_outputs, 3, stride=1, padding='SAME')
net = subsample(net, factor=stride)
whereas
net = slim.conv2d(inputs, num_outputs, 3, stride=stride, padding='SAME')
is different when the input's height or width is even, which is why we add the
current function. For more details, see ResnetUtilsTest.testConv2DSameEven().
Args:
inputs: A 4-D tensor of size [batch, height_in, width_in, channels].
num_outputs: An integer, the number of output filters.
kernel_size: An int with the kernel_size of the filters.
stride: An integer, the output stride.
rate: An integer, rate for atrous convolution.
scope: Scope.
Returns:
output: A 4-D tensor of size [batch, height_out, width_out, channels] with
the convolution output.
"""
if stride == 1:
return slim.conv2d(inputs, num_outputs, kernel_size, stride=1, rate=rate,
padding='SAME', scope=scope)
else:
kernel_size_effective = kernel_size + (kernel_size - 1) * (rate - 1)
pad_total = kernel_size_effective - 1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
inputs = tf.pad(inputs,
[[0, 0], [pad_beg, pad_end], [pad_beg, pad_end], [0, 0]])
return slim.conv2d(inputs, num_outputs, kernel_size, stride=stride,
rate=rate, padding='VALID', scope=scope)
@slim.add_arg_scope
def stack_blocks_dense(net, blocks, output_stride=None,
store_non_strided_activations=False,
outputs_collections=None):
"""Stacks ResNet `Blocks` and controls output feature density.
First, this function creates scopes for the ResNet in the form of
'block_name/unit_1', 'block_name/unit_2', etc.
Second, this function allows the user to explicitly control the ResNet
output_stride, which is the ratio of the input to output spatial resolution.
This is useful for dense prediction tasks such as semantic segmentation or
object detection.
Most ResNets consist of 4 ResNet blocks and subsample the activations by a
factor of 2 when transitioning between consecutive ResNet blocks. This results
to a nominal ResNet output_stride equal to 8. If we set the output_stride to
half the nominal network stride (e.g., output_stride=4), then we compute
responses twice.
Control of the output feature density is implemented by atrous convolution.
Args:
net: A `Tensor` of size [batch, height, width, channels].
blocks: A list of length equal to the number of ResNet `Blocks`. Each
element is a ResNet `Block` object describing the units in the `Block`.
output_stride: If `None`, then the output will be computed at the nominal
network stride. If output_stride is not `None`, it specifies the requested
ratio of input to output spatial resolution, which needs to be equal to
the product of unit strides from the start up to some level of the ResNet.
For example, if the ResNet employs units with strides 1, 2, 1, 3, 4, 1,
then valid values for the output_stride are 1, 2, 6, 24 or None (which
is equivalent to output_stride=24).
store_non_strided_activations: If True, we compute non-strided (undecimated)
activations at the last unit of each block and store them in the
`outputs_collections` before subsampling them. This gives us access to
higher resolution intermediate activations which are useful in some
dense prediction problems but increases 4x the computation and memory cost
at the last unit of each block.
outputs_collections: Collection to add the ResNet block outputs.
Returns:
net: Output tensor with stride equal to the specified output_stride.
Raises:
ValueError: If the target output_stride is not valid.
"""
# The current_stride variable keeps track of the effective stride of the
# activations. This allows us to invoke atrous convolution whenever applying
# the next residual unit would result in the activations having stride larger
# than the target output_stride.
current_stride = 1
# The atrous convolution rate parameter.
rate = 1
for block in blocks:
with tf.variable_scope(block.scope, 'block', [net]) as sc:
block_stride = 1
for i, unit in enumerate(block.args):
if store_non_strided_activations and i == len(block.args) - 1:
# Move stride from the block's last unit to the end of the block.
block_stride = unit.get('stride', 1)
unit = dict(unit, stride=1)
with tf.variable_scope('unit_%d' % (i + 1), values=[net]):
# If we have reached the target output_stride, then we need to employ
# atrous convolution with stride=1 and multiply the atrous rate by the
# current unit's stride for use in subsequent layers.
if output_stride is not None and current_stride == output_stride:
net = block.unit_fn(net, rate=rate, **dict(unit, stride=1))
rate *= unit.get('stride', 1)
else:
net = block.unit_fn(net, rate=1, **unit)
current_stride *= unit.get('stride', 1)
if output_stride is not None and current_stride > output_stride:
raise ValueError('The target output_stride cannot be reached.')
# Collect activations at the block's end before performing subsampling.
net = slim.utils.collect_named_outputs(outputs_collections, sc.name, net)
# Subsampling of the block's output activations.
if output_stride is not None and current_stride == output_stride:
rate *= block_stride
else:
net = subsample(net, block_stride)
current_stride *= block_stride
if output_stride is not None and current_stride > output_stride:
raise ValueError('The target output_stride cannot be reached.')
if output_stride is not None and current_stride != output_stride:
raise ValueError('The target output_stride cannot be reached.')
return net
def resnet_arg_scope(weight_decay=0.0001,
batch_norm_decay=0.997,
batch_norm_epsilon=1e-5,
batch_norm_scale=True,
activation_fn=tf.nn.relu,
use_batch_norm=True,
batch_norm_updates_collections=tf.GraphKeys.UPDATE_OPS):
"""Defines the default ResNet arg scope.
TODO(gpapan): The batch-normalization related default values above are
appropriate for use in conjunction with the reference ResNet models
released at https://github.com/KaimingHe/deep-residual-networks. When
training ResNets from scratch, they might need to be tuned.
Args:
weight_decay: The weight decay to use for regularizing the model.
batch_norm_decay: The moving average decay when estimating layer activation
statistics in batch normalization.
batch_norm_epsilon: Small constant to prevent division by zero when
normalizing activations by their variance in batch normalization.
batch_norm_scale: If True, uses an explicit `gamma` multiplier to scale the
activations in the batch normalization layer.
activation_fn: The activation function which is used in ResNet.
use_batch_norm: Whether or not to use batch normalization.
batch_norm_updates_collections: Collection for the update ops for
batch norm.
Returns:
An `arg_scope` to use for the resnet models.
"""
batch_norm_params = {
'decay': batch_norm_decay,
'epsilon': batch_norm_epsilon,
'scale': batch_norm_scale,
'updates_collections': batch_norm_updates_collections,
'fused': None, # Use fused batch norm if possible.
}
with slim.arg_scope(
[slim.conv2d],
weights_regularizer=slim.l2_regularizer(weight_decay),
weights_initializer=slim.variance_scaling_initializer(),
activation_fn=activation_fn,
normalizer_fn=slim.batch_norm if use_batch_norm else None,
normalizer_params=batch_norm_params):
with slim.arg_scope([slim.batch_norm], **batch_norm_params):
# The following implies padding='SAME' for pool1, which makes feature
# alignment easier for dense prediction tasks. This is also used in
# https://github.com/facebook/fb.resnet.torch. However the accompanying
# code of 'Deep Residual Learning for Image Recognition' uses
# padding='VALID' for pool1. You can switch to that choice by setting
# slim.arg_scope([slim.max_pool2d], padding='VALID').
with slim.arg_scope([slim.max_pool2d], padding='SAME') as arg_sc:
return arg_sc
|
TensorFlow2/Segmentation/Contrib/UNet3P/losses | losses | loss | """
Implementation of different loss functions
"""
import tensorflow as tf
import tensorflow.keras.backend as K
def iou(y_true, y_pred, smooth=1.e-9):
"""
Calculate intersection over union (IoU) between images.
Input shape should be Batch x Height x Width x #Classes (BxHxWxN).
Using Mean as reduction type for batch values.
"""
intersection = K.sum(K.abs(y_true * y_pred), axis=[1, 2, 3])
union = K.sum(y_true, [1, 2, 3]) + K.sum(y_pred, [1, 2, 3])
union = union - intersection
iou = K.mean((intersection + smooth) / (union + smooth), axis=0)
return iou
def iou_loss(y_true, y_pred):
"""
Jaccard / IoU loss
"""
return 1 - iou(y_true, y_pred)
def focal_loss(y_true, y_pred):
"""
Focal loss
"""
gamma = 2.
alpha = 4.
epsilon = 1.e-9
y_true_c = tf.convert_to_tensor(y_true, tf.float32)
y_pred_c = tf.convert_to_tensor(y_pred, tf.float32)
model_out = tf.add(y_pred_c, epsilon)
ce = tf.multiply(y_true_c, -tf.math.log(model_out))
weight = tf.multiply(y_true_c, tf.pow(
tf.subtract(1., model_out), gamma)
)
fl = tf.multiply(alpha, tf.multiply(weight, ce))
reduced_fl = tf.reduce_max(fl, axis=-1)
return tf.reduce_mean(reduced_fl)
def ssim_loss(y_true, y_pred, smooth=1.e-9):
"""
Structural Similarity Index loss.
Input shape should be Batch x Height x Width x #Classes (BxHxWxN).
Using Mean as reduction type for batch values.
"""
ssim_value = tf.image.ssim(y_true, y_pred, max_val=1)
return K.mean(1 - ssim_value + smooth, axis=0)
class DiceCoefficient(tf.keras.metrics.Metric):
"""
Dice coefficient metric. Can be used to calculate dice on probabilities
or on their respective classes
"""
def __init__(self, post_processed: bool,
classes: int,
name='dice_coef',
**kwargs):
"""
Set post_processed=False if dice coefficient needs to be calculated
on probabilities. Set post_processed=True if probabilities needs to
be first converted/mapped into their respective class.
"""
super(DiceCoefficient, self).__init__(name=name, **kwargs)
self.dice_value = self.add_weight(name='dice_value', initializer='zeros',
aggregation=tf.VariableAggregation.MEAN) # SUM
self.post_processed = post_processed
self.classes = classes
if self.classes == 1:
self.axis = [1, 2, 3]
else:
self.axis = [1, 2, ]
def update_state(self, y_true, y_pred, sample_weight=None):
if self.post_processed:
if self.classes == 1:
y_true_ = y_true
y_pred_ = tf.where(y_pred > .5, 1.0, 0.0)
else:
y_true_ = tf.math.argmax(y_true, axis=-1, output_type=tf.int32)
y_pred_ = tf.math.argmax(y_pred, axis=-1, output_type=tf.int32)
y_true_ = tf.cast(y_true_, dtype=tf.float32)
y_pred_ = tf.cast(y_pred_, dtype=tf.float32)
else:
y_true_, y_pred_ = y_true, y_pred
self.dice_value.assign(self.dice_coef(y_true_, y_pred_))
def result(self):
return self.dice_value
def reset_state(self):
self.dice_value.assign(0.0) # reset metric state
def dice_coef(self, y_true, y_pred, smooth=1.e-9):
"""
Calculate dice coefficient.
Input shape could be either Batch x Height x Width x #Classes (BxHxWxN)
or Batch x Height x Width (BxHxW).
Using Mean as reduction type for batch values.
"""
intersection = K.sum(y_true * y_pred, axis=self.axis)
union = K.sum(y_true, axis=self.axis) + K.sum(y_pred, axis=self.axis)
return K.mean((2. * intersection + smooth) / (union + smooth), axis=0)
|
Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/analyzer/tabular | tabular | tabular_metrics | # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import warnings
from collections import Counter
from itertools import combinations
from typing import Dict, List, Optional
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from dython.nominal import associations, numerical_encoding
from scipy import stats
from scipy.spatial import distance
from scipy.special import kl_div
from sklearn.decomposition import PCA
from syngen.utils.types import DataFrameType, ColumnType
warnings.simplefilter(action="ignore", category=pd.errors.PerformanceWarning)
matplotlib._log.disabled = True
logger = logging.getLogger()
logger.setLevel(logging.CRITICAL)
class TabularMetrics(object):
def __init__(
self,
real: DataFrameType,
fake: DataFrameType,
categorical_columns: Optional[List] = [],
nrows: Optional[int] = None,
seed: Optional[int] = 123,
verbose: bool = False,
debug: bool = False,
):
"""
Args:
real (DataFrameType): the original dataset
fake (DataFrameType): the generated dataset
categorical_columns (list): list of categorical columns in tabular data
nrows (int): number of rows to use for evaluation (default: None), will use the minimum of real/fake data length
seed (int): sets the random seed for reproducibility. (default: 123)
verbose (bool): print intermediate results (default: False)
debug (bool): debug mode (default: False)
"""
assert all(c in fake.columns for c in real.columns) and len(
real.columns
) == len(fake.columns), r"Real and fake have different columns."
self.real = real
self.fake = fake[real.columns]
self.nrows = nrows
self.seed = seed
self.verbose = verbose
self.debug = debug
self.categorical_columns = categorical_columns
self.numerical_columns = [
column
for column in real.columns
if column not in categorical_columns
]
# Make sure columns and their order are the same.
if len(real.columns) == len(fake.columns):
fake = fake[real.columns.tolist()]
assert (
real.columns.tolist() == fake.columns.tolist()
), "Columns in real and fake dataframe are not the same"
# Make sure the number of samples is equal in both datasets.
if nrows is None:
self.nrows = min(len(self.real), len(self.fake))
elif len(fake) >= nrows and len(real) >= nrows:
self.nrows = nrows
else:
raise Exception(
f"Make sure nrows < len(fake/real). len(real): {len(real)}, len(fake): {len(fake)}"
)
self.real = self.real.sample(self.nrows)
self.fake = self.fake.sample(self.nrows)
self.real.loc[:, self.categorical_columns] = (
self.real.loc[:, self.categorical_columns]
.fillna("[NAN]")
.astype(str)
)
self.fake.loc[:, self.categorical_columns] = (
self.fake.loc[:, self.categorical_columns]
.fillna("[NAN]")
.astype(str)
)
self.real.loc[:, self.numerical_columns] = self.real.loc[
:, self.numerical_columns
].fillna(self.real[self.numerical_columns].mean())
self.fake.loc[:, self.numerical_columns] = self.fake.loc[
:, self.numerical_columns
].fillna(self.fake[self.numerical_columns].mean())
def kl_divergence(self) -> float:
def get_frequencies(real, synthetic):
f_obs, f_exp = [], []
real, synthetic = Counter(real), Counter(synthetic)
for value in synthetic:
if value not in real:
warnings.warn(
f"Unexpected value {value} in synthetic data."
)
real[value] += 1e-6 # Regularization to prevent NaN.
for value in real:
f_obs.append(synthetic[value] / sum(synthetic.values()))
f_exp.append(real[value] / sum(real.values()))
return f_obs, f_exp
numerical_columns = self.numerical_columns
# - continuous columns
cont_scores = []
for columns in combinations(numerical_columns, r=2):
columns = list(columns)
rd_cont = self.real[columns]
rd_cont[pd.isna(rd_cont)] = 0.0
rd_cont[pd.isna(rd_cont)] = 0.0
column1, column2 = rd_cont.columns[:2]
real, xedges, yedges = np.histogram2d(
rd_cont[column1], rd_cont[column2]
)
fake, _, _ = np.histogram2d(
self.fake[column1], self.fake[column2], bins=[xedges, yedges]
)
f_obs, f_exp = fake.flatten() + 1e-5, real.flatten() + 1e-5
f_obs, f_exp = f_obs / np.sum(f_obs), f_exp / np.sum(f_exp)
score = 1 / (1 + np.sum(kl_div(f_obs, f_exp)))
cont_scores.append(score)
# - discrete columns
categorical_columns = self.categorical_columns
cat_scores = []
for columns in combinations(categorical_columns, r=2):
columns = list(columns)
real = self.real[columns].itertuples(index=False)
fake = self.fake[columns].itertuples(index=False)
f_obs, f_exp = get_frequencies(real, fake)
score = 1 / (1 + np.sum(kl_div(f_obs, f_exp)))
cat_scores.append(score)
return np.nanmean(cont_scores + cat_scores)
def correlation_correlation(
self, comparison_metric: str = "pearsonr"
) -> float:
"""
computes the column-wise correlation of each dataset, and outputs the
`comparison_metric` score between the datasets.
Args:
comparison_metric (str): metric to be used to compare between the datasets
see `scipy.stats`
Returns:
corr (float): correlation score
"""
comparison_metric = getattr(stats, comparison_metric)
total_metrics = pd.DataFrame()
for ds_name in ["real", "fake"]:
ds = getattr(self, ds_name)
corr_df = associations(
ds, nominal_columns=self.categorical_columns, nom_nom_assoc='theil', compute_only=True
)
values = corr_df['corr'].values
values = values[~np.eye(values.shape[0], dtype=bool)].reshape(
values.shape[0], -1
)
total_metrics[ds_name] = values.flatten()
correlation_correlations = total_metrics
corr, p = comparison_metric(
total_metrics["real"], total_metrics["fake"]
)
if self.debug:
print("\nColumn correlation between datasets:")
print(total_metrics.to_string())
return corr
def statistical_correlation(self, comparison_metric="spearmanr") -> float:
"""
computes correlation between basic statistics of each dataset for each column
Args:
comparison_metric (str): metric to be used to compare between the datasets
see `scipy.stats`
Returns:
corr (float): correlation score
"""
total_metrics = pd.DataFrame()
comparison_metric = getattr(stats, comparison_metric)
discrete_values = {
c: self.real[c].unique() for c in self.categorical_columns
}
for ds_name in ["real", "fake"]:
ds = getattr(self, ds_name)
metrics = {}
num_ds = ds.loc[:, self.numerical_columns]
cat_ds = ds.loc[:, self.categorical_columns]
for idx, value in num_ds.mean().items():
metrics[f"mean_{idx}"] = value
for idx, value in num_ds.median().items():
metrics[f"median_{idx}"] = value
for idx, value in num_ds.std().items():
metrics[f"std_{idx}"] = value
for idx, value in num_ds.var().items():
metrics[f"variance_{idx}"] = value
for cc in self.categorical_columns:
cdf = ds[cc]
v = cdf.value_counts(normalize=True)
unique_vals = set(v.index)
for d in discrete_values[cc]:
if d not in unique_vals:
metrics[f"count_{d}"] = 0.0
else:
metrics[f"count_{d}"] = v[d]
total_metrics[ds_name] = metrics.values()
total_metrics.index = metrics.keys()
statistical_results = total_metrics
if self.debug:
print("\nBasic statistical attributes:")
print(total_metrics.to_string())
corr, p = comparison_metric(
statistical_results["real"], statistical_results["fake"]
)
return corr
def plot_cumsums(self, nr_cols=4, fname=None):
"""
Plot the cumulative sums for all columns in the real and fake dataset.
Height of each row scales with the length of the labels. Each plot contains the
values of a real columns and the corresponding fake column.
Args:
fname: If not none, saves the plot with this file name.
"""
nr_charts = len(self.real.columns)
nr_rows = max(1, nr_charts // nr_cols)
nr_rows = nr_rows + 1 if nr_charts % nr_cols != 0 else nr_rows
max_len = 0
# Increase the length of plots if the labels are long
if not self.real.select_dtypes(include=["object"]).empty:
lengths = []
for d in self.real.select_dtypes(include=["object"]):
lengths.append(
max(
[
len(x.strip())
for x in self.real[d].unique().tolist()
]
)
)
max_len = max(lengths)
row_height = 6 + (max_len // 30)
fig, ax = plt.subplots(
nr_rows, nr_cols, figsize=(16, row_height * nr_rows)
)
fig.suptitle("Cumulative Sums per feature", fontsize=16)
if nr_rows == 1 and nr_cols == 1:
axes = [ax]
else:
axes = ax.flatten()
for i, col in enumerate(self.real.columns):
r = self.real[col]
f = self.fake.iloc[:, self.real.columns.tolist().index(col)]
self.cdf(r, f, col, "Cumsum", ax=axes[i])
plt.tight_layout(rect=[0, 0.02, 1, 0.98])
if fname is not None:
plt.savefig(fname)
plt.show()
def plot_mean_std(self, ax=None, fname=None) -> None:
"""
Plot the means and standard deviations of each dataset.
Args:
ax: Axis to plot on. If none, a new figure is made.
fname: If not none, saves the plot with this file name.
"""
real = self.real
fake = self.fake
if ax is None:
fig, ax = plt.subplots(1, 2, figsize=(10, 5))
fig.suptitle(
"Absolute Log Mean and STDs of numeric data\n", fontsize=16
)
ax[0].grid(True)
ax[1].grid(True)
real = real.select_dtypes(include=np.number).reset_index()
fake = fake.select_dtypes(include=np.number).reset_index()
real_mean = np.log(np.add(abs(real.mean()).values, 1e-5))
fake_mean = np.log(np.add(abs(fake.mean()).values, 1e-5))
min_mean = min(real_mean) - 1
max_mean = max(real_mean) + 1
line = np.arange(min_mean, max_mean)
sns.lineplot(x=line, y=line, ax=ax[0])
sns.scatterplot(x=real_mean, y=fake_mean, ax=ax[0])
ax[0].set_title("Means of real and fake data")
ax[0].set_xlabel("real data mean (log)")
ax[0].set_ylabel("fake data mean (log)")
real_std = np.log(np.add(real.std().values, 1e-5))
fake_std = np.log(np.add(fake.std().values, 1e-5))
min_std = min(real_std) - 1
max_std = max(real_std) + 1
line = np.arange(min_std, max_std)
sns.lineplot(x=line, y=line, ax=ax[1])
sns.scatterplot(x=real_std, y=fake_std, ax=ax[1])
ax[1].set_title("Stds of real and fake data")
ax[1].set_xlabel("real data std (log)")
ax[1].set_ylabel("fake data std (log)")
if fname is not None:
plt.savefig(fname)
if ax is None:
plt.show()
def convert_numerical(self, real, fake):
"""
Convert categorical columns to numerical
"""
for c in self.categorical_columns:
if real[c].dtype == "object":
real[c] = pd.factorize(real[c], sort=True)[0]
fake[c] = pd.factorize(fake[c], sort=True)[0]
return real, fake
def cdf(
self,
real_data,
fake_data,
xlabel: str = "Values",
ylabel: str = "Cumulative Sum",
ax=None,
) -> None:
"""
Plot continous density function on optionally given ax. If no ax, cdf is plotted and shown.
Args:
xlabel: Label to put on the x-axis
ylabel: Label to put on the y-axis
ax: The axis to plot on. If ax=None, a new figure is created.
"""
x1 = np.sort(real_data)
x2 = np.sort(fake_data)
y = np.arange(1, len(real_data) + 1) / len(real_data)
ax = ax if ax else plt.subplots()[1]
axis_font = {"size": "14"}
ax.set_xlabel(xlabel, **axis_font)
ax.set_ylabel(ylabel, **axis_font)
ax.grid()
ax.plot(x1, y, marker="o", linestyle="none", label="Real", ms=8)
ax.plot(x2, y, marker="o", linestyle="none", label="Fake", alpha=0.5)
ax.tick_params(axis="both", which="major", labelsize=8)
ax.legend(loc="upper center", bbox_to_anchor=(0.5, 1.1), ncol=3)
import matplotlib.ticker as mticker
# If labels are strings, rotate them vertical
if isinstance(real_data, pd.Series) and real_data.dtypes == "object":
ticks_loc = ax.get_xticks()
r_unique = real_data.sort_values().unique()
if len(r_unique) > len(ticks_loc):
import pdb; pdb.set_trace()
ticks_loc = ticks_loc[: len(r_unique)]
ax.xaxis.set_major_locator(mticker.FixedLocator(ticks_loc))
ax.set_xticklabels(r_unique, rotation="vertical")
if ax is None:
plt.show()
def plot_correlation_difference(
self,
plot_diff: bool = True,
cat_cols: list = None,
annot=False,
fname=None,
) -> None:
"""
Plot the association matrices for the `real` dataframe, `fake` dataframe and plot the difference between them.
Has support for continuous and categorical data types.
All Object and Category dtypes are considered to be categorical columns if `cat_cols` is not passed.
- Continuous - Continuous: Uses Pearson's correlation coefficient
- Continuous - Categorical: Uses so called correlation ratio (https://en.wikipedia.org/wiki/Correlation_ratio) for both continuous - categorical and categorical - continuous.
- Categorical - Categorical: Uses Theil's U, an asymmetric correlation metric for Categorical associations
Args:
plot_diff: Plot difference if True, else not
cat_cols: List of Categorical columns
boolean annot: Whether to annotate the plot with numbers indicating the associations.
"""
real = self.real
fake = self.fake
cmap = sns.diverging_palette(220, 10, as_cmap=True)
if cat_cols is None:
cat_cols = real.select_dtypes(["object", "category"])
if plot_diff:
fig, ax = plt.subplots(1, 3, figsize=(24, 7))
else:
fig, ax = plt.subplots(1, 2, figsize=(20, 8))
real_corr = associations(
real,
nominal_columns=cat_cols,
plot=False,
nom_nom_assoc='theil',
mark_columns=True,
annot=annot,
ax=ax[0],
cmap=cmap,
)["corr"]
fake_corr = associations(
fake,
nominal_columns=cat_cols,
plot=False,
nom_nom_assoc='theil',
mark_columns=True,
annot=annot,
ax=ax[1],
cmap=cmap,
)["corr"]
if plot_diff:
diff = abs(real_corr - fake_corr)
sns.set(style="white")
sns.heatmap(
diff,
ax=ax[2],
cmap=cmap,
vmax=0.3,
square=True,
annot=annot,
center=0,
linewidths=0.5,
cbar_kws={"shrink": 0.5},
fmt=".2f",
)
titles = (
["Real", "Fake", "Difference"] if plot_diff else ["Real", "Fake"]
)
for i, label in enumerate(titles):
title_font = {"size": "18"}
ax[i].set_title(label, **title_font)
plt.tight_layout()
if fname is not None:
plt.savefig(fname)
plt.show()
def plot_pca(self, fname=None):
"""
Plot the first two components of a PCA of real and fake data.
Args:
fname: If not none, saves the plot with this file name.
"""
real, fake = self.convert_numerical(self.real, self.fake)
pca_r = PCA(n_components=2)
pca_f = PCA(n_components=2)
real_t = pca_r.fit_transform(real)
fake_t = pca_f.fit_transform(fake)
fig, ax = plt.subplots(1, 2, figsize=(12, 6))
fig.suptitle("First two components of PCA", fontsize=16)
sns.scatterplot(ax=ax[0], x=real_t[:, 0], y=real_t[:, 1])
sns.scatterplot(ax=ax[1], x=fake_t[:, 0], y=fake_t[:, 1])
ax[0].set_title("Real data")
ax[1].set_title("Fake data")
if fname is not None:
plt.savefig(fname)
plt.show()
def visual_evaluation(self, save_dir=None, **kwargs):
"""
Plots mean, std, cumulative sum, correlation difference and PCA
Args:
save_dir: directory path to save images
kwargs: any key word argument for matplotlib.
"""
if save_dir is None:
self.plot_mean_std()
self.plot_cumsums()
self.plot_correlation_difference(
plot_diff=True, cat_cols=self.categorical_columns, **kwargs
)
self.plot_pca()
else:
save_dir = Path(save_dir)
save_dir.mkdir(parents=True, exist_ok=True)
self.plot_mean_std(fname=save_dir / "mean_std.png")
self.plot_cumsums(fname=save_dir / "cumsums.png")
self.plot_correlation_difference(
plot_diff=True,
cat_cols=self.categorical_columns,
fname=save_dir / "correlation_difference.png",
**kwargs,
)
self.plot_pca(fname=save_dir / "pca.png")
def evaluate(
self, comparison_metric: str = "pearsonr"
) -> Dict[str, float]:
"""
evaluate synthetic data
Args:
comparison_metric (str): metric to be used to compare between the datasets
see `scipy.stats`
Returns:
results (dict<str, float>): dictionary containing computed metrics, <key> := metric_name, <value>:= score
"""
statistical_correlation = self.statistical_correlation(
comparison_metric
)
kl_divergence = self.kl_divergence()
correlation_correlation = self.correlation_correlation()
results = {
"statistical_correlation": statistical_correlation,
"kl_divergence": kl_divergence,
"correlation_correlation": correlation_correlation,
}
return results
def dd_feat_heatmap(
data,
feat_name_col_info: Dict[str, ColumnType],
src_col: str = "src",
dst_col: str = "dst",
):
src_degree = (
data.groupby(src_col, as_index=False)
.count()[[src_col, dst_col]]
.rename(columns={dst_col: "src_degree"})
)
# - normalized src_degree
src_degree_vals = src_degree["src_degree"].values
normalized_src_degree = src_degree_vals / np.sum(src_degree_vals)
src_degree.loc[:, "src_degree"] = normalized_src_degree
# - normalized dst_degree
dst_degree = (
data.groupby(dst_col, as_index=False)
.count()[[src_col, dst_col]]
.rename(columns={src_col: "dst_degree"})
)
dst_degree_vals = dst_degree["dst_degree"].values
normalized_dst_degree = dst_degree_vals / np.sum(dst_degree_vals)
dst_degree.loc[:, "dst_degree"] = normalized_dst_degree
# - merge
data = data.merge(src_degree, how="outer", on=src_col)
data = data.merge(dst_degree, how="outer", on=dst_col)
# - normalize continuous columns
for feat, col_info in feat_name_col_info.items():
col_type = col_info["type"]
min_ = col_info["min"]
max_ = col_info["max"]
if col_type == ColumnType.CONTINUOUS:
vals = data[feat].values
data.loc[:, feat] = (vals - min_) / (max_ - min_)
# - plot heat maps
def heat_map(x, y):
heatmap, xedges, yedges = np.histogram2d(x, y, bins=10)
extent = [xedges[0], xedges[-1], yedges[0], yedges[-1]]
return heatmap.T, extent
heat_maps = []
for feat in feat_name_col_info:
heatmap, _ = heat_map(data["src_degree"].values, data[feat].values)
heat_maps.append(heatmap)
return heat_maps
def compute_dd_feat_js(
real,
fake,
feat_name_col_info: Dict[str, ColumnType],
src_col: str = "src",
dst_col: str = "dst",
):
col_info = {}
for col_name, col_type in feat_name_col_info.items():
if col_type == ColumnType.CONTINUOUS:
min_ = real[col_name].min()
max_ = real[col_name].max()
col_info[col_name] = {"type": col_type, "min": min_, "max": max_}
elif col_type == ColumnType.CATEGORICAL:
# - none of the datsets align on categorical for now..
pass
real_heatmaps = dd_feat_heatmap(
real, col_info, src_col=src_col, dst_col=dst_col
)
fake_heatmaps = dd_feat_heatmap(
fake, col_info, src_col=src_col, dst_col=dst_col
)
heatmaps = list(zip(real_heatmaps, fake_heatmaps))
score = 0.0
for r, f in heatmaps:
s = distance.jensenshannon(r, f, axis=1) # - along feats
np.nan_to_num(s, copy=False, nan=1.0)
s = np.mean(s)
score += s
return score
def get_frequencies(real, synthetic):
f_obs, f_exp = [], []
real, synthetic = Counter(real), Counter(synthetic)
for value in synthetic:
if value not in real:
warnings.warn(f"Unexpected value {value} in synthetic data.")
real[value] += 1e-6 # Regularization to prevent NaN.
for value in real:
f_obs.append(synthetic[value] / sum(synthetic.values()))
f_exp.append(real[value] / sum(real.values()))
return f_obs, f_exp
|
TensorFlow/Segmentation/UNet_Industrial/scripts/benchmarking | benchmarking | UNet_trainbench_AMP_4GPU | #!/usr/bin/env bash
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script launches UNet training benchmark in TF-AMP on 4 GPUs using 16 batch size (4 per GPU)
# Usage ./UNet_trainbench_AMP_4GPU.sh <path to dataset> <dagm classID (1-10)>
BASEDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
export TF_CPP_MIN_LOG_LEVEL=3
# Cleaning up for benchmark
RESULT_DIR="/tmp"
rm -rf "${RESULT_DIR}"
mpirun \
-np 4 \
-H localhost:4 \
-bind-to none \
-map-by slot \
-x NCCL_DEBUG=VERSION \
-x LD_LIBRARY_PATH \
-x PATH \
-mca pml ob1 -mca btl ^openib \
--allow-run-as-root \
python "${BASEDIR}/../../main.py" \
--unet_variant='tinyUNet' \
--activation_fn='relu' \
--exec_mode='training_benchmark' \
--iter_unit='batch' \
--num_iter=1500 \
--batch_size=4 \
--warmup_step=500 \
--results_dir="${RESULT_DIR}" \
--data_dir="${1}" \
--dataset_name='DAGM2007' \
--dataset_classID="${2}" \
--data_format='NCHW' \
--use_auto_loss_scaling \
--amp \
--xla \
--learning_rate=1e-4 \
--learning_rate_decay_factor=0.8 \
--learning_rate_decay_steps=500 \
--rmsprop_decay=0.9 \
--rmsprop_momentum=0.8 \
--loss_fn_name='adaptive_loss' \
--weight_decay=1e-5 \
--weight_init_method='he_uniform' \
--augment_data \
--display_every=250 \
--debug_verbosity=0
|
TensorFlow/Detection/SSD/models/research/slim/nets | nets | alexnet_test | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for slim.nets.alexnet."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from nets import alexnet
slim = tf.contrib.slim
class AlexnetV2Test(tf.test.TestCase):
def testBuild(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3))
logits, _ = alexnet.alexnet_v2(inputs, num_classes)
self.assertEquals(logits.op.name, 'alexnet_v2/fc8/squeezed')
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
def testFullyConvolutional(self):
batch_size = 1
height, width = 300, 400
num_classes = 1000
with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3))
logits, _ = alexnet.alexnet_v2(inputs, num_classes, spatial_squeeze=False)
self.assertEquals(logits.op.name, 'alexnet_v2/fc8/BiasAdd')
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, 4, 7, num_classes])
def testGlobalPool(self):
batch_size = 1
height, width = 256, 256
num_classes = 1000
with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3))
logits, _ = alexnet.alexnet_v2(inputs, num_classes, spatial_squeeze=False,
global_pool=True)
self.assertEquals(logits.op.name, 'alexnet_v2/fc8/BiasAdd')
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, 1, 1, num_classes])
def testEndPoints(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3))
_, end_points = alexnet.alexnet_v2(inputs, num_classes)
expected_names = ['alexnet_v2/conv1',
'alexnet_v2/pool1',
'alexnet_v2/conv2',
'alexnet_v2/pool2',
'alexnet_v2/conv3',
'alexnet_v2/conv4',
'alexnet_v2/conv5',
'alexnet_v2/pool5',
'alexnet_v2/fc6',
'alexnet_v2/fc7',
'alexnet_v2/fc8'
]
self.assertSetEqual(set(end_points.keys()), set(expected_names))
def testNoClasses(self):
batch_size = 5
height, width = 224, 224
num_classes = None
with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3))
net, end_points = alexnet.alexnet_v2(inputs, num_classes)
expected_names = ['alexnet_v2/conv1',
'alexnet_v2/pool1',
'alexnet_v2/conv2',
'alexnet_v2/pool2',
'alexnet_v2/conv3',
'alexnet_v2/conv4',
'alexnet_v2/conv5',
'alexnet_v2/pool5',
'alexnet_v2/fc6',
'alexnet_v2/fc7'
]
self.assertSetEqual(set(end_points.keys()), set(expected_names))
self.assertTrue(net.op.name.startswith('alexnet_v2/fc7'))
self.assertListEqual(net.get_shape().as_list(),
[batch_size, 1, 1, 4096])
def testModelVariables(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3))
alexnet.alexnet_v2(inputs, num_classes)
expected_names = ['alexnet_v2/conv1/weights',
'alexnet_v2/conv1/biases',
'alexnet_v2/conv2/weights',
'alexnet_v2/conv2/biases',
'alexnet_v2/conv3/weights',
'alexnet_v2/conv3/biases',
'alexnet_v2/conv4/weights',
'alexnet_v2/conv4/biases',
'alexnet_v2/conv5/weights',
'alexnet_v2/conv5/biases',
'alexnet_v2/fc6/weights',
'alexnet_v2/fc6/biases',
'alexnet_v2/fc7/weights',
'alexnet_v2/fc7/biases',
'alexnet_v2/fc8/weights',
'alexnet_v2/fc8/biases',
]
model_variables = [v.op.name for v in slim.get_model_variables()]
self.assertSetEqual(set(model_variables), set(expected_names))
def testEvaluation(self):
batch_size = 2
height, width = 224, 224
num_classes = 1000
with self.test_session():
eval_inputs = tf.random_uniform((batch_size, height, width, 3))
logits, _ = alexnet.alexnet_v2(eval_inputs, is_training=False)
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
predictions = tf.argmax(logits, 1)
self.assertListEqual(predictions.get_shape().as_list(), [batch_size])
def testTrainEvalWithReuse(self):
train_batch_size = 2
eval_batch_size = 1
train_height, train_width = 224, 224
eval_height, eval_width = 300, 400
num_classes = 1000
with self.test_session():
train_inputs = tf.random_uniform(
(train_batch_size, train_height, train_width, 3))
logits, _ = alexnet.alexnet_v2(train_inputs)
self.assertListEqual(logits.get_shape().as_list(),
[train_batch_size, num_classes])
tf.get_variable_scope().reuse_variables()
eval_inputs = tf.random_uniform(
(eval_batch_size, eval_height, eval_width, 3))
logits, _ = alexnet.alexnet_v2(eval_inputs, is_training=False,
spatial_squeeze=False)
self.assertListEqual(logits.get_shape().as_list(),
[eval_batch_size, 4, 7, num_classes])
logits = tf.reduce_mean(logits, [1, 2])
predictions = tf.argmax(logits, 1)
self.assertEquals(predictions.get_shape().as_list(), [eval_batch_size])
def testForward(self):
batch_size = 1
height, width = 224, 224
with self.test_session() as sess:
inputs = tf.random_uniform((batch_size, height, width, 3))
logits, _ = alexnet.alexnet_v2(inputs)
sess.run(tf.global_variables_initializer())
output = sess.run(logits)
self.assertTrue(output.any())
if __name__ == '__main__':
tf.test.main()
|
TensorFlow/LanguageModeling/BERT/utils | utils | create_squad_data | # Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import json
import math
import os
import random
import modeling
import optimization
import tokenization
import six
import tensorflow as tf
import horovod.tensorflow as hvd
import time
flags = tf.flags
FLAGS = None
def extract_flags():
flags.DEFINE_integer(
"max_seq_length", 384,
"The maximum total input sequence length after WordPiece tokenization. "
"Sequences longer than this will be truncated, and sequences shorter "
"than this will be padded.")
flags.DEFINE_integer(
"doc_stride", 128,
"When splitting up a long document into chunks, how much stride to "
"take between chunks.")
flags.DEFINE_integer(
"max_query_length", 64,
"The maximum number of tokens for the question. Questions longer than "
"this will be truncated to this length.")
flags.DEFINE_bool(
"version_2_with_negative", False,
"If true, the SQuAD examples contain some that do not have an answer.")
flags.DEFINE_string("train_file", None,
"SQuAD json for training. E.g., train-v1.1.json")
flags.DEFINE_string(
"predict_file", None,
"SQuAD json for predictions. E.g., dev-v1.1.json or test-v1.1.json")
flags.DEFINE_string(
"squad_dir", None,
"The output directory where the model checkpoints will be written.")
flags.DEFINE_string("vocab_file", None,
"The vocabulary file that the BERT model was trained on.")
flags.DEFINE_bool(
"do_lower_case", True,
"Whether to lower case the input text. Should be True for uncased "
"models and False for cased models.")
flags.DEFINE_bool(
"verbose_logging", False,
"If true, all of the warnings related to data processing will be printed. "
"A number of warnings are expected for a normal SQuAD evaluation.")
flags.mark_flag_as_required("train_file")
flags.mark_flag_as_required("predict_file")
flags.mark_flag_as_required("squad_dir")
flags.mark_flag_as_required("vocab_file")
return flags.FLAGS
class SquadExample(object):
"""A single training/test example for simple sequence classification.
For examples without an answer, the start and end position are -1.
"""
def __init__(self,
qas_id,
question_text,
doc_tokens,
orig_answer_text=None,
start_position=None,
end_position=None,
is_impossible=False):
self.qas_id = qas_id
self.question_text = question_text
self.doc_tokens = doc_tokens
self.orig_answer_text = orig_answer_text
self.start_position = start_position
self.end_position = end_position
self.is_impossible = is_impossible
def __str__(self):
return self.__repr__()
def __repr__(self):
s = ""
s += "qas_id: %s" % (tokenization.printable_text(self.qas_id))
s += ", question_text: %s" % (
tokenization.printable_text(self.question_text))
s += ", doc_tokens: [%s]" % (" ".join(self.doc_tokens))
if self.start_position:
s += ", start_position: %d" % (self.start_position)
if self.start_position:
s += ", end_position: %d" % (self.end_position)
if self.start_position:
s += ", is_impossible: %r" % (self.is_impossible)
return s
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self,
unique_id,
example_index,
doc_span_index,
tokens,
token_to_orig_map,
token_is_max_context,
input_ids,
input_mask,
segment_ids,
start_position=None,
end_position=None,
is_impossible=None):
self.unique_id = unique_id
self.example_index = example_index
self.doc_span_index = doc_span_index
self.tokens = tokens
self.token_to_orig_map = token_to_orig_map
self.token_is_max_context = token_is_max_context
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.start_position = start_position
self.end_position = end_position
self.is_impossible = is_impossible
def read_squad_examples(input_file, is_training, version_2_with_negative=False, input_data=None):
"""Return list of SquadExample from input_data or input_file (SQuAD json file)"""
if input_data is None:
with tf.gfile.Open(input_file, "r") as reader:
input_data = json.load(reader)["data"]
def is_whitespace(c):
if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F:
return True
return False
examples = []
for entry in input_data:
for paragraph in entry["paragraphs"]:
paragraph_text = paragraph["context"]
doc_tokens = []
char_to_word_offset = []
prev_is_whitespace = True
for c in paragraph_text:
if is_whitespace(c):
prev_is_whitespace = True
else:
if prev_is_whitespace:
doc_tokens.append(c)
else:
doc_tokens[-1] += c
prev_is_whitespace = False
char_to_word_offset.append(len(doc_tokens) - 1)
for qa in paragraph["qas"]:
qas_id = qa["id"]
question_text = qa["question"]
start_position = None
end_position = None
orig_answer_text = None
is_impossible = False
if is_training:
if version_2_with_negative:
is_impossible = qa["is_impossible"]
if (len(qa["answers"]) != 1) and (not is_impossible):
raise ValueError(
"For training, each question should have exactly 1 answer.")
if not is_impossible:
answer = qa["answers"][0]
orig_answer_text = answer["text"]
answer_offset = answer["answer_start"]
answer_length = len(orig_answer_text)
start_position = char_to_word_offset[answer_offset]
end_position = char_to_word_offset[answer_offset + answer_length -
1]
# Only add answers where the text can be exactly recovered from the
# document. If this CAN'T happen it's likely due to weird Unicode
# stuff so we will just skip the example.
#
# Note that this means for training mode, every example is NOT
# guaranteed to be preserved.
actual_text = " ".join(
doc_tokens[start_position:(end_position + 1)])
cleaned_answer_text = " ".join(
tokenization.whitespace_tokenize(orig_answer_text))
if actual_text.find(cleaned_answer_text) == -1:
tf.logging.warning("Could not find answer: '%s' vs. '%s'",
actual_text, cleaned_answer_text)
continue
else:
start_position = -1
end_position = -1
orig_answer_text = ""
example = SquadExample(
qas_id=qas_id,
question_text=question_text,
doc_tokens=doc_tokens,
orig_answer_text=orig_answer_text,
start_position=start_position,
end_position=end_position,
is_impossible=is_impossible)
examples.append(example)
return examples
def _check_is_max_context(doc_spans, cur_span_index, position):
"""Check if this is the 'max context' doc span for the token."""
# Because of the sliding window approach taken to scoring documents, a single
# token can appear in multiple documents. E.g.
# Doc: the man went to the store and bought a gallon of milk
# Span A: the man went to the
# Span B: to the store and bought
# Span C: and bought a gallon of
# ...
#
# Now the word 'bought' will have two scores from spans B and C. We only
# want to consider the score with "maximum context", which we define as
# the *minimum* of its left and right context (the *sum* of left and
# right context will always be the same, of course).
#
# In the example the maximum context for 'bought' would be span C since
# it has 1 left context and 3 right context, while span B has 4 left context
# and 0 right context.
best_score = None
best_span_index = None
for (span_index, doc_span) in enumerate(doc_spans):
end = doc_span.start + doc_span.length - 1
if position < doc_span.start:
continue
if position > end:
continue
num_left_context = position - doc_span.start
num_right_context = end - position
score = min(num_left_context, num_right_context) + 0.01 * doc_span.length
if best_score is None or score > best_score:
best_score = score
best_span_index = span_index
return cur_span_index == best_span_index
def _improve_answer_span(doc_tokens, input_start, input_end, tokenizer,
orig_answer_text):
"""Returns tokenized answer spans that better match the annotated answer."""
# The SQuAD annotations are character based. We first project them to
# whitespace-tokenized words. But then after WordPiece tokenization, we can
# often find a "better match". For example:
#
# Question: What year was John Smith born?
# Context: The leader was John Smith (1895-1943).
# Answer: 1895
#
# The original whitespace-tokenized answer will be "(1895-1943).". However
# after tokenization, our tokens will be "( 1895 - 1943 ) .". So we can match
# the exact answer, 1895.
#
# However, this is not always possible. Consider the following:
#
# Question: What country is the top exporter of electornics?
# Context: The Japanese electronics industry is the lagest in the world.
# Answer: Japan
#
# In this case, the annotator chose "Japan" as a character sub-span of
# the word "Japanese". Since our WordPiece tokenizer does not split
# "Japanese", we just use "Japanese" as the annotation. This is fairly rare
# in SQuAD, but does happen.
tok_answer_text = " ".join(tokenizer.tokenize(orig_answer_text))
for new_start in range(input_start, input_end + 1):
for new_end in range(input_end, new_start - 1, -1):
text_span = " ".join(doc_tokens[new_start:(new_end + 1)])
if text_span == tok_answer_text:
return (new_start, new_end)
return (input_start, input_end)
def convert_examples_to_features(examples, tokenizer, max_seq_length,
doc_stride, max_query_length, is_training,
output_fn, verbose_logging=False):
"""Loads a data file into a list of `InputBatch`s."""
unique_id = 1000000000
for (example_index, example) in enumerate(examples):
query_tokens = tokenizer.tokenize(example.question_text)
if len(query_tokens) > max_query_length:
query_tokens = query_tokens[0:max_query_length]
tok_to_orig_index = []
orig_to_tok_index = []
all_doc_tokens = []
for (i, token) in enumerate(example.doc_tokens):
orig_to_tok_index.append(len(all_doc_tokens))
sub_tokens = tokenizer.tokenize(token)
for sub_token in sub_tokens:
tok_to_orig_index.append(i)
all_doc_tokens.append(sub_token)
tok_start_position = None
tok_end_position = None
if is_training and example.is_impossible:
tok_start_position = -1
tok_end_position = -1
if is_training and not example.is_impossible:
tok_start_position = orig_to_tok_index[example.start_position]
if example.end_position < len(example.doc_tokens) - 1:
tok_end_position = orig_to_tok_index[example.end_position + 1] - 1
else:
tok_end_position = len(all_doc_tokens) - 1
(tok_start_position, tok_end_position) = _improve_answer_span(
all_doc_tokens, tok_start_position, tok_end_position, tokenizer,
example.orig_answer_text)
# The -3 accounts for [CLS], [SEP] and [SEP]
max_tokens_for_doc = max_seq_length - len(query_tokens) - 3
# We can have documents that are longer than the maximum sequence length.
# To deal with this we do a sliding window approach, where we take chunks
# of the up to our max length with a stride of `doc_stride`.
_DocSpan = collections.namedtuple( # pylint: disable=invalid-name
"DocSpan", ["start", "length"])
doc_spans = []
start_offset = 0
while start_offset < len(all_doc_tokens):
length = len(all_doc_tokens) - start_offset
if length > max_tokens_for_doc:
length = max_tokens_for_doc
doc_spans.append(_DocSpan(start=start_offset, length=length))
if start_offset + length == len(all_doc_tokens):
break
start_offset += min(length, doc_stride)
for (doc_span_index, doc_span) in enumerate(doc_spans):
tokens = []
token_to_orig_map = {}
token_is_max_context = {}
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for token in query_tokens:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
for i in range(doc_span.length):
split_token_index = doc_span.start + i
token_to_orig_map[len(tokens)] = tok_to_orig_index[split_token_index]
is_max_context = _check_is_max_context(doc_spans, doc_span_index,
split_token_index)
token_is_max_context[len(tokens)] = is_max_context
tokens.append(all_doc_tokens[split_token_index])
segment_ids.append(1)
tokens.append("[SEP]")
segment_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
start_position = None
end_position = None
if is_training and not example.is_impossible:
# For training, if our document chunk does not contain an annotation
# we throw it out, since there is nothing to predict.
doc_start = doc_span.start
doc_end = doc_span.start + doc_span.length - 1
out_of_span = False
if not (tok_start_position >= doc_start and
tok_end_position <= doc_end):
out_of_span = True
if out_of_span:
start_position = 0
end_position = 0
else:
doc_offset = len(query_tokens) + 2
start_position = tok_start_position - doc_start + doc_offset
end_position = tok_end_position - doc_start + doc_offset
if is_training and example.is_impossible:
start_position = 0
end_position = 0
if verbose_logging and example_index < 20:
tf.compat.v1.logging.info("*** Example ***")
tf.compat.v1.logging.info("unique_id: %s" % (unique_id))
tf.compat.v1.logging.info("example_index: %s" % (example_index))
tf.compat.v1.logging.info("doc_span_index: %s" % (doc_span_index))
tf.compat.v1.logging.info("tokens: %s" % " ".join(
[tokenization.printable_text(x) for x in tokens]))
tf.compat.v1.logging.info("token_to_orig_map: %s" % " ".join(
["%d:%d" % (x, y) for (x, y) in six.iteritems(token_to_orig_map)]))
tf.compat.v1.logging.info("token_is_max_context: %s" % " ".join([
"%d:%s" % (x, y) for (x, y) in six.iteritems(token_is_max_context)
]))
tf.compat.v1.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
tf.compat.v1.logging.info(
"input_mask: %s" % " ".join([str(x) for x in input_mask]))
tf.compat.v1.logging.info(
"segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
if is_training and example.is_impossible:
tf.compat.v1.logging.info("impossible example")
if is_training and not example.is_impossible:
answer_text = " ".join(tokens[start_position:(end_position + 1)])
tf.compat.v1.logging.info("start_position: %d" % (start_position))
tf.compat.v1.logging.info("end_position: %d" % (end_position))
tf.compat.v1.logging.info(
"answer: %s" % (tokenization.printable_text(answer_text)))
feature = InputFeatures(
unique_id=unique_id,
example_index=example_index,
doc_span_index=doc_span_index,
tokens=tokens,
token_to_orig_map=token_to_orig_map,
token_is_max_context=token_is_max_context,
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
start_position=start_position,
end_position=end_position,
is_impossible=example.is_impossible)
# Run callback
output_fn(feature)
unique_id += 1
class FeatureWriter(object):
"""Writes InputFeature to TF example file."""
def __init__(self, filename, is_training):
self.filename = filename
self.is_training = is_training
self.num_features = 0
self._writer = tf.python_io.TFRecordWriter(filename)
def process_feature(self, feature):
"""Write a InputFeature to the TFRecordWriter as a tf.train.Example."""
self.num_features += 1
def create_int_feature(values):
feature = tf.train.Feature(
int64_list=tf.train.Int64List(value=list(values)))
return feature
features = collections.OrderedDict()
features["unique_ids"] = create_int_feature([feature.unique_id])
features["input_ids"] = create_int_feature(feature.input_ids)
features["input_mask"] = create_int_feature(feature.input_mask)
features["segment_ids"] = create_int_feature(feature.segment_ids)
if self.is_training:
features["start_positions"] = create_int_feature([feature.start_position])
features["end_positions"] = create_int_feature([feature.end_position])
impossible = 0
if feature.is_impossible:
impossible = 1
features["is_impossible"] = create_int_feature([impossible])
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
self._writer.write(tf_example.SerializeToString())
def close(self):
self._writer.close()
def main():
FLAGS = extract_flags()
tokenizer = tokenization.FullTokenizer(
vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)
tf.gfile.MakeDirs(FLAGS.squad_dir + "/final_tfrecords_sharded")
# We write to a temporary file to avoid storing very large constant tensors
# in memory.
train_examples = read_squad_examples(
input_file=FLAGS.train_file, is_training=True,
version_2_with_negative=FLAGS.version_2_with_negative)
train_writer = FeatureWriter(
filename=os.path.join(FLAGS.squad_dir, "final_tfrecords_sharded/train.tf_record"),
is_training=True)
convert_examples_to_features(
examples=train_examples,
tokenizer=tokenizer,
max_seq_length=FLAGS.max_seq_length,
doc_stride=FLAGS.doc_stride,
max_query_length=FLAGS.max_query_length,
is_training=True,
output_fn=train_writer.process_feature,
verbose_logging=FLAGS.verbose_logging)
train_writer.close()
eval_examples = read_squad_examples(
input_file=FLAGS.predict_file, is_training=False,
version_2_with_negative=FLAGS.version_2_with_negative)
eval_writer = FeatureWriter(
filename=os.path.join(FLAGS.squad_dir, "final_tfrecords_sharded/eval.tf_record"),
is_training=False)
eval_features = []
def append_feature(feature):
eval_features.append(feature)
eval_writer.process_feature(feature)
convert_examples_to_features(
examples=eval_examples,
tokenizer=tokenizer,
max_seq_length=FLAGS.max_seq_length,
doc_stride=FLAGS.doc_stride,
max_query_length=FLAGS.max_query_length,
is_training=False,
output_fn=append_feature,
verbose_logging=FLAGS.verbose_logging)
eval_writer.close()
if __name__ == "__main__":
main() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.