relative_path
stringclasses 812
values | section
stringclasses 339
values | filename
stringlengths 2
61
| text
stringlengths 6
1.76M
|
---|---|---|---|
TensorFlow/Detection/SSD/models/research/object_detection/builders | builders | matcher_builder | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A function to build an object detection matcher from configuration."""
from object_detection.matchers import argmax_matcher
from object_detection.matchers import bipartite_matcher
from object_detection.protos import matcher_pb2
def build(matcher_config):
"""Builds a matcher object based on the matcher config.
Args:
matcher_config: A matcher.proto object containing the config for the desired
Matcher.
Returns:
Matcher based on the config.
Raises:
ValueError: On empty matcher proto.
"""
if not isinstance(matcher_config, matcher_pb2.Matcher):
raise ValueError('matcher_config not of type matcher_pb2.Matcher.')
if matcher_config.WhichOneof('matcher_oneof') == 'argmax_matcher':
matcher = matcher_config.argmax_matcher
matched_threshold = unmatched_threshold = None
if not matcher.ignore_thresholds:
matched_threshold = matcher.matched_threshold
unmatched_threshold = matcher.unmatched_threshold
return argmax_matcher.ArgMaxMatcher(
matched_threshold=matched_threshold,
unmatched_threshold=unmatched_threshold,
negatives_lower_than_unmatched=matcher.negatives_lower_than_unmatched,
force_match_for_each_row=matcher.force_match_for_each_row,
use_matmul_gather=matcher.use_matmul_gather)
if matcher_config.WhichOneof('matcher_oneof') == 'bipartite_matcher':
matcher = matcher_config.bipartite_matcher
return bipartite_matcher.GreedyBipartiteMatcher(matcher.use_matmul_gather)
raise ValueError('Empty matcher.')
|
CUDA-Optimized/FastSpeech/fastspeech/trt/plugins/add_pos_enc | add_pos_enc | AddPosEncPlugin | // Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of the NVIDIA CORPORATION nor the
// names of its contributors may be used to endorse or promote products
// derived from this software without specific prior written permission.
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "NvInfer.h"
#include "NvInferRuntimeCommon.h"
#include <iostream>
#include <cstring>
#include <assert.h>
using namespace std;
using namespace nvinfer1;
class AddPosEncPlugin: public IPluginV2IOExt {
public:
AddPosEncPlugin() {};
AddPosEncPlugin(const void *buffer, size_t length) {
memcpy(&m, buffer, sizeof(m));
}
virtual size_t getSerializationSize() const override {
return sizeof(m);
}
virtual void serialize(void *buffer) const override {
memcpy(buffer, &m, sizeof(m));
}
nvinfer1::IPluginV2Ext* clone() const override {
return new AddPosEncPlugin(&m, sizeof(m));
}
int getNbOutputs() const override {
return 1;
}
nvinfer1::Dims getOutputDimensions(int index, const nvinfer1::Dims* pInputDim, int nInputDim) override {
return pInputDim[0];
}
size_t getWorkspaceSize(int nBatch) const override {return 0;}
int enqueue(int nBatch, const void * const *inputs, void **outputs, void* workspace, cudaStream_t stream) override;
int initialize() override {return 0;}
void terminate() override {}
void destroy() override { delete this; }
void setPluginNamespace(const char* szNamespace) override {}
const char* getPluginNamespace() const override {return "";}
const char* getPluginType() const override {return "AddPosEncPlugin";}
const char* getPluginVersion() const override {return "0.0.1";}
void configurePlugin(const PluginTensorDesc* in, int nbInput, const PluginTensorDesc* out, int nbOutput) override
{
m.inputDim = in[0].dims;
m.dataType = in[0].type;
}
bool supportsFormatCombination(int pos, const PluginTensorDesc* inOut, int nbInputs, int nbOutputs) const override
{
assert(nbInputs == 1 && nbOutputs == 1 && pos < nbInputs + nbOutputs);
bool condition = inOut[pos].format == TensorFormat::kLINEAR;
condition &= ((inOut[pos].type == DataType::kFLOAT)
// || (inOut[pos].type == DataType::kHALF)
);
switch (pos) {
case 0: // input
condition &= ((inOut[pos].type == DataType::kFLOAT) // for seq in fp32
|| (inOut[pos].type == DataType::kHALF)); // for seq in fp16
break;
case 1: // output
condition &= ((inOut[pos].type == inOut[0].type)); // the same type as the input
break;
}
return condition;
}
DataType getOutputDataType(int index, const DataType* inputTypes, int nbInputs) const override
{
return inputTypes[0];
}
bool isOutputBroadcastAcrossBatch(int outputIndex, const bool* inputIsBroadcasted, int nbInputs) const override
{
return false;
}
bool canBroadcastInputAcrossBatch(int inputIndex) const override
{
return false;
}
private:
struct {
Dims inputDim;
DataType dataType;
} m;
};
class AddPosEncPluginCreator : public nvinfer1::IPluginCreator {
public:
nvinfer1::IPluginV2* deserializePlugin(const char* name, const void* serialData, size_t serialLength) override {
return new AddPosEncPlugin(serialData, serialLength);
}
const char* getPluginName() const override {return "AddPosEncPlugin";}
const char* getPluginVersion() const override {return "0.0.1";}
void setPluginNamespace(const char* szNamespace) override {}
const char* getPluginNamespace() const override {return "";}
const nvinfer1::PluginFieldCollection* getFieldNames() override {
std::cout << __FUNCTION__ << std::endl;
return nullptr;
}
nvinfer1::IPluginV2* createPlugin(const char* name, const nvinfer1::PluginFieldCollection* fc) override {
return new AddPosEncPlugin();
}
};
|
TensorFlow/Detection/SSD/models/research/object_detection/models | models | ssd_resnet_v1_ppn_feature_extractor | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""SSD feature extractors based on Resnet v1 and PPN architectures."""
import tensorflow as tf
from object_detection.meta_architectures import ssd_meta_arch
from object_detection.models import feature_map_generators
from object_detection.utils import context_manager
from object_detection.utils import ops
from object_detection.utils import shape_utils
from nets import resnet_v1
slim = tf.contrib.slim
class _SSDResnetPpnFeatureExtractor(ssd_meta_arch.SSDFeatureExtractor):
"""SSD feature extractor based on resnet architecture and PPN."""
def __init__(self,
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
conv_hyperparams_fn,
resnet_base_fn,
resnet_scope_name,
reuse_weights=None,
use_explicit_padding=False,
use_depthwise=False,
base_feature_map_depth=1024,
num_layers=6,
override_base_feature_extractor_hyperparams=False,
use_bounded_activations=False):
"""Resnet based PPN Feature Extractor for SSD Models.
See go/pooling-pyramid for more details about PPN.
Args:
is_training: whether the network is in training mode.
depth_multiplier: float depth multiplier for feature extractor.
min_depth: minimum feature extractor depth.
pad_to_multiple: the nearest multiple to zero pad the input height and
width dimensions to.
conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d
and separable_conv2d ops in the layers that are added on top of the
base feature extractor.
resnet_base_fn: base resnet network to use.
resnet_scope_name: scope name to construct resnet
reuse_weights: Whether to reuse variables. Default is None.
use_explicit_padding: Whether to use explicit padding when extracting
features. Default is False.
use_depthwise: Whether to use depthwise convolutions. Default is False.
base_feature_map_depth: Depth of the base feature before the max pooling.
num_layers: Number of layers used to make predictions. They are pooled
from the base feature.
override_base_feature_extractor_hyperparams: Whether to override
hyperparameters of the base feature extractor with the one from
`conv_hyperparams_fn`.
use_bounded_activations: Whether or not to use bounded activations for
resnet v1 bottleneck residual unit. Bounded activations better lend
themselves to quantized inference.
"""
super(_SSDResnetPpnFeatureExtractor, self).__init__(
is_training, depth_multiplier, min_depth, pad_to_multiple,
conv_hyperparams_fn, reuse_weights, use_explicit_padding, use_depthwise,
override_base_feature_extractor_hyperparams)
self._resnet_base_fn = resnet_base_fn
self._resnet_scope_name = resnet_scope_name
self._base_feature_map_depth = base_feature_map_depth
self._num_layers = num_layers
self._use_bounded_activations = use_bounded_activations
def _filter_features(self, image_features):
# TODO(rathodv): Change resnet endpoint to strip scope prefixes instead
# of munging the scope here.
filtered_image_features = dict({})
for key, feature in image_features.items():
feature_name = key.split('/')[-1]
if feature_name in ['block2', 'block3', 'block4']:
filtered_image_features[feature_name] = feature
return filtered_image_features
def preprocess(self, resized_inputs):
"""SSD preprocessing.
VGG style channel mean subtraction as described here:
https://gist.github.com/ksimonyan/211839e770f7b538e2d8#file-readme-mdnge.
Note that if the number of channels is not equal to 3, the mean subtraction
will be skipped and the original resized_inputs will be returned.
Args:
resized_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
Returns:
preprocessed_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
"""
if resized_inputs.shape.as_list()[3] == 3:
channel_means = [123.68, 116.779, 103.939]
return resized_inputs - [[channel_means]]
else:
return resized_inputs
def extract_features(self, preprocessed_inputs):
"""Extract features from preprocessed inputs.
Args:
preprocessed_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
Returns:
feature_maps: a list of tensors where the ith tensor has shape
[batch, height_i, width_i, depth_i]
Raises:
ValueError: depth multiplier is not supported.
"""
if self._depth_multiplier != 1.0:
raise ValueError('Depth multiplier not supported.')
preprocessed_inputs = shape_utils.check_min_image_dim(
129, preprocessed_inputs)
with tf.variable_scope(
self._resnet_scope_name, reuse=self._reuse_weights) as scope:
with slim.arg_scope(resnet_v1.resnet_arg_scope()):
with (slim.arg_scope(self._conv_hyperparams_fn())
if self._override_base_feature_extractor_hyperparams else
context_manager.IdentityContextManager()):
with slim.arg_scope(
[resnet_v1.bottleneck],
use_bounded_activations=self._use_bounded_activations):
_, activations = self._resnet_base_fn(
inputs=ops.pad_to_multiple(preprocessed_inputs,
self._pad_to_multiple),
num_classes=None,
is_training=None,
global_pool=False,
output_stride=None,
store_non_strided_activations=True,
scope=scope)
with slim.arg_scope(self._conv_hyperparams_fn()):
feature_maps = feature_map_generators.pooling_pyramid_feature_maps(
base_feature_map_depth=self._base_feature_map_depth,
num_layers=self._num_layers,
image_features={
'image_features': self._filter_features(activations)['block3']
})
return feature_maps.values()
class SSDResnet50V1PpnFeatureExtractor(_SSDResnetPpnFeatureExtractor):
"""PPN Resnet50 v1 Feature Extractor."""
def __init__(self,
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
conv_hyperparams_fn,
reuse_weights=None,
use_explicit_padding=False,
use_depthwise=False,
override_base_feature_extractor_hyperparams=False):
"""Resnet50 v1 Feature Extractor for SSD Models.
Args:
is_training: whether the network is in training mode.
depth_multiplier: float depth multiplier for feature extractor.
min_depth: minimum feature extractor depth.
pad_to_multiple: the nearest multiple to zero pad the input height and
width dimensions to.
conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d
and separable_conv2d ops in the layers that are added on top of the
base feature extractor.
reuse_weights: Whether to reuse variables. Default is None.
use_explicit_padding: Whether to use explicit padding when extracting
features. Default is False.
use_depthwise: Whether to use depthwise convolutions. Default is False.
override_base_feature_extractor_hyperparams: Whether to override
hyperparameters of the base feature extractor with the one from
`conv_hyperparams_fn`.
"""
super(SSDResnet50V1PpnFeatureExtractor, self).__init__(
is_training, depth_multiplier, min_depth, pad_to_multiple,
conv_hyperparams_fn, resnet_v1.resnet_v1_50, 'resnet_v1_50',
reuse_weights, use_explicit_padding, use_depthwise,
override_base_feature_extractor_hyperparams=(
override_base_feature_extractor_hyperparams))
class SSDResnet101V1PpnFeatureExtractor(_SSDResnetPpnFeatureExtractor):
"""PPN Resnet101 v1 Feature Extractor."""
def __init__(self,
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
conv_hyperparams_fn,
reuse_weights=None,
use_explicit_padding=False,
use_depthwise=False,
override_base_feature_extractor_hyperparams=False):
"""Resnet101 v1 Feature Extractor for SSD Models.
Args:
is_training: whether the network is in training mode.
depth_multiplier: float depth multiplier for feature extractor.
min_depth: minimum feature extractor depth.
pad_to_multiple: the nearest multiple to zero pad the input height and
width dimensions to.
conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d
and separable_conv2d ops in the layers that are added on top of the
base feature extractor.
reuse_weights: Whether to reuse variables. Default is None.
use_explicit_padding: Whether to use explicit padding when extracting
features. Default is False.
use_depthwise: Whether to use depthwise convolutions. Default is False.
override_base_feature_extractor_hyperparams: Whether to override
hyperparameters of the base feature extractor with the one from
`conv_hyperparams_fn`.
"""
super(SSDResnet101V1PpnFeatureExtractor, self).__init__(
is_training, depth_multiplier, min_depth, pad_to_multiple,
conv_hyperparams_fn, resnet_v1.resnet_v1_101, 'resnet_v1_101',
reuse_weights, use_explicit_padding, use_depthwise,
override_base_feature_extractor_hyperparams=(
override_base_feature_extractor_hyperparams))
class SSDResnet152V1PpnFeatureExtractor(_SSDResnetPpnFeatureExtractor):
"""PPN Resnet152 v1 Feature Extractor."""
def __init__(self,
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
conv_hyperparams_fn,
reuse_weights=None,
use_explicit_padding=False,
use_depthwise=False,
override_base_feature_extractor_hyperparams=False):
"""Resnet152 v1 Feature Extractor for SSD Models.
Args:
is_training: whether the network is in training mode.
depth_multiplier: float depth multiplier for feature extractor.
min_depth: minimum feature extractor depth.
pad_to_multiple: the nearest multiple to zero pad the input height and
width dimensions to.
conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d
and separable_conv2d ops in the layers that are added on top of the
base feature extractor.
reuse_weights: Whether to reuse variables. Default is None.
use_explicit_padding: Whether to use explicit padding when extracting
features. Default is False.
use_depthwise: Whether to use depthwise convolutions. Default is False.
override_base_feature_extractor_hyperparams: Whether to override
hyperparameters of the base feature extractor with the one from
`conv_hyperparams_fn`.
"""
super(SSDResnet152V1PpnFeatureExtractor, self).__init__(
is_training, depth_multiplier, min_depth, pad_to_multiple,
conv_hyperparams_fn, resnet_v1.resnet_v1_152, 'resnet_v1_152',
reuse_weights, use_explicit_padding, use_depthwise,
override_base_feature_extractor_hyperparams=(
override_base_feature_extractor_hyperparams))
|
TensorFlow2/Segmentation/MaskRCNN/mrcnn_tf2/object_detection | object_detection | argmax_matcher | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Argmax matcher implementation.
This class takes a similarity matrix and matches columns to rows based on the
maximum value per column. One can specify matched_thresholds and
to prevent columns from matching to rows (generally resulting in a negative
training example) and unmatched_theshold to ignore the match (generally
resulting in neither a positive or negative training example).
This matcher is used in Fast(er)-RCNN.
Note: matchers are used in TargetAssigners. There is a create_target_assigner
factory function for popular implementations.
"""
import tensorflow as tf
from mrcnn_tf2.object_detection import matcher, shape_utils
class ArgMaxMatcher(matcher.Matcher):
"""Matcher based on highest value.
This class computes matches from a similarity matrix. Each column is matched
to a single row.
To support object detection target assignment this class enables setting both
matched_threshold (upper threshold) and unmatched_threshold (lower thresholds)
defining three categories of similarity which define whether examples are
positive, negative, or ignored:
(1) similarity >= matched_threshold: Highest similarity. Matched/Positive!
(2) matched_threshold > similarity >= unmatched_threshold: Medium similarity.
Depending on negatives_lower_than_unmatched, this is either
Unmatched/Negative OR Ignore.
(3) unmatched_threshold > similarity: Lowest similarity. Depending on flag
negatives_lower_than_unmatched, either Unmatched/Negative OR Ignore.
For ignored matches this class sets the values in the Match object to -2.
"""
def __init__(self,
matched_threshold,
unmatched_threshold=None,
negatives_lower_than_unmatched=True,
force_match_for_each_row=False):
"""Construct ArgMaxMatcher.
Args:
matched_threshold: Threshold for positive matches. Positive if
sim >= matched_threshold, where sim is the maximum value of the
similarity matrix for a given column. Set to None for no threshold.
unmatched_threshold: Threshold for negative matches. Negative if
sim < unmatched_threshold. Defaults to matched_threshold
when set to None.
negatives_lower_than_unmatched: Boolean which defaults to True. If True
then negative matches are the ones below the unmatched_threshold,
whereas ignored matches are in between the matched and umatched
threshold. If False, then negative matches are in between the matched
and unmatched threshold, and everything lower than unmatched is ignored.
force_match_for_each_row: If True, ensures that each row is matched to
at least one column (which is not guaranteed otherwise if the
matched_threshold is high). Defaults to False. See
argmax_matcher_test.testMatcherForceMatch() for an example.
Raises:
ValueError: if unmatched_threshold is set but matched_threshold is not set
or if unmatched_threshold > matched_threshold.
"""
if (matched_threshold is None) and (unmatched_threshold is not None):
raise ValueError('Need to also define matched_threshold when'
'unmatched_threshold is defined')
self._matched_threshold = matched_threshold
if unmatched_threshold is None:
self._unmatched_threshold = matched_threshold
else:
if unmatched_threshold > matched_threshold:
raise ValueError('unmatched_threshold needs to be smaller or equal'
'to matched_threshold')
self._unmatched_threshold = unmatched_threshold
if not negatives_lower_than_unmatched:
if self._unmatched_threshold == self._matched_threshold:
raise ValueError('When negatives are in between matched and '
'unmatched thresholds, these cannot be of equal '
'value. matched: %s, unmatched: %s',
self._matched_threshold, self._unmatched_threshold)
self._force_match_for_each_row = force_match_for_each_row
self._negatives_lower_than_unmatched = negatives_lower_than_unmatched
def _match(self, similarity_matrix):
"""Tries to match each column of the similarity matrix to a row.
Args:
similarity_matrix: tensor of shape [N, M] representing any similarity
metric.
Returns:
Match object with corresponding matches for each of M columns.
"""
def _match_when_rows_are_empty():
"""Performs matching when the rows of similarity matrix are empty.
When the rows are empty, all detections are false positives. So we return
a tensor of -1's to indicate that the columns do not match to any rows.
Returns:
matches: int32 tensor indicating the row each column matches to.
"""
similarity_matrix_shape = shape_utils.combined_static_and_dynamic_shape(
similarity_matrix)
return -1 * tf.ones([similarity_matrix_shape[1]], dtype=tf.int32)
def _match_when_rows_are_non_empty():
"""Performs matching when the rows of similarity matrix are non empty.
Returns:
matches: int32 tensor indicating the row each column matches to.
"""
# Matches for each column
matches = tf.argmax(input=similarity_matrix, axis=0, output_type=tf.int32)
# Deal with matched and unmatched threshold
if self._matched_threshold is not None:
# Get logical indices of ignored and unmatched columns as tf.int64
matched_vals = tf.reduce_max(input_tensor=similarity_matrix, axis=0)
below_unmatched_threshold = tf.greater(self._unmatched_threshold,
matched_vals)
between_thresholds = tf.logical_and(
tf.greater_equal(matched_vals, self._unmatched_threshold),
tf.greater(self._matched_threshold, matched_vals))
if self._negatives_lower_than_unmatched:
matches = self._set_values_using_indicator(matches,
below_unmatched_threshold,
-1)
matches = self._set_values_using_indicator(matches,
between_thresholds,
-2)
else:
matches = self._set_values_using_indicator(matches,
below_unmatched_threshold,
-2)
matches = self._set_values_using_indicator(matches,
between_thresholds,
-1)
if self._force_match_for_each_row:
similarity_matrix_shape = shape_utils.combined_static_and_dynamic_shape(
similarity_matrix)
force_match_column_ids = tf.argmax(input=similarity_matrix, axis=1,
output_type=tf.int32)
force_match_column_indicators = tf.one_hot(
force_match_column_ids, depth=similarity_matrix_shape[1])
force_match_row_ids = tf.argmax(input=force_match_column_indicators, axis=0,
output_type=tf.int32)
force_match_column_mask = tf.cast(
tf.reduce_max(input_tensor=force_match_column_indicators, axis=0), tf.bool)
final_matches = tf.where(force_match_column_mask,
force_match_row_ids, matches)
return final_matches
else:
return matches
if similarity_matrix.shape.is_fully_defined():
if similarity_matrix.shape[0].value == 0:
return _match_when_rows_are_empty()
else:
return _match_when_rows_are_non_empty()
else:
return tf.cond(
pred=tf.greater(tf.shape(input=similarity_matrix)[0], 0),
true_fn=_match_when_rows_are_non_empty, false_fn=_match_when_rows_are_empty)
def _set_values_using_indicator(self, x, indicator, val):
"""Set the indicated fields of x to val.
Args:
x: tensor.
indicator: boolean with same shape as x.
val: scalar with value to set.
Returns:
modified tensor.
"""
indicator = tf.cast(indicator, x.dtype)
return tf.add(tf.multiply(x, 1 - indicator), val * indicator)
|
PyTorch/Segmentation/nnUNet/utils | utils | instance_norm | import importlib
import torch
from torch import Tensor
from torch.nn.modules.batchnorm import _NormBase
global instance_norm_nvfuser_cuda
instance_norm_nvfuser_cuda = None
class InstanceNormNVFuserFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, input, weight, bias, running_mean, running_var, use_input_stats, momentum, eps):
global instance_norm_nvfuser_cuda
if instance_norm_nvfuser_cuda is None:
instance_norm_nvfuser_cuda = importlib.import_module("instance_norm_nvfuser_cuda")
channels_last = input.is_contiguous(memory_format=torch.channels_last) or input.is_contiguous(
memory_format=torch.channels_last_3d
)
if channels_last:
order = [0] + [i for i in range(2, len(input.shape))] + [1]
_input = input.permute(order)
else:
_input = input
assert _input.is_contiguous()
result = instance_norm_nvfuser_cuda.forward(
_input, weight, bias, running_mean, running_var, use_input_stats, momentum, eps, channels_last
)
if len(result) == 3:
out, mean, invstd = result
else:
running_mean, running_var, out, mean, invstd = result
ctx.use_input_stats = use_input_stats
ctx.eps = eps
ctx.channels_last = channels_last
# saving for backward in "explicit channels-last format"
ctx.save_for_backward(_input, weight, running_mean, running_var, mean, invstd)
if channels_last:
order = [0, len(_input.shape) - 1] + [i for i in range(1, len(_input.shape) - 1)]
out = out.permute(order)
if len(out.shape) == 4:
assert out.is_contiguous(memory_format=torch.channels_last)
assert input.is_contiguous(memory_format=torch.channels_last)
elif len(out.shape) == 5:
assert out.is_contiguous(memory_format=torch.channels_last_3d)
assert input.is_contiguous(memory_format=torch.channels_last_3d)
else:
assert False, "unhandled channels_last format variation in forward"
return out
@staticmethod
def backward(ctx, grad_output):
global instance_norm_nvfuser_cuda
if instance_norm_nvfuser_cuda is None:
instance_norm_nvfuser_cuda = importlib.import_module("instance_norm_nvfuser_cuda")
if ctx.channels_last:
order = [0] + [i for i in range(2, len(grad_output.shape))] + [1]
grad_output = grad_output.permute(order)
# input was saved in "explicit channels-last format"
assert ctx.saved_tensors[0].is_contiguous()
grad_output = grad_output.contiguous()
saved = list(ctx.saved_tensors)
saved.insert(1, grad_output)
running_mean = saved[3]
running_var = saved[4]
mean = saved[-2]
var = saved[-1]
grad_input, grad_weight, grad_bias = instance_norm_nvfuser_cuda.backward(
*saved, ctx.use_input_stats, ctx.eps, ctx.channels_last
)
if ctx.channels_last:
order = [0, len(grad_input.shape) - 1] + [i for i in range(1, len(grad_input.shape) - 1)]
grad_input = grad_input.permute(order)
if len(grad_input.shape) == 4:
assert grad_input.is_contiguous(memory_format=torch.channels_last)
elif len(grad_input.shape) == 5:
assert grad_input.is_contiguous(memory_format=torch.channels_last_3d)
else:
assert False, "unhandled channels_last format variation in backward"
return grad_input, grad_weight, grad_bias, None, None, None, None, None, None
class _InstanceNormNVFuser(_NormBase):
def __init__(
self,
num_features: int,
eps: float = 1e-5,
momentum: float = 0.1,
affine: bool = False,
track_running_stats: bool = False,
device=None,
dtype=None,
) -> None:
factory_kwargs = {"device": device, "dtype": dtype}
super(_InstanceNormNVFuser, self).__init__(
num_features, eps, momentum, affine, track_running_stats, **factory_kwargs
)
self.dummy = torch.empty([], device=device)
def _check_input_dim(self, input):
raise NotImplementedError
def _load_from_state_dict(
self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs
):
version = local_metadata.get("version", None)
# at version 1: removed running_mean and running_var when
# track_running_stats=False (default)
if version is None and not self.track_running_stats:
running_stats_keys = []
for name in ("running_mean", "running_var"):
key = prefix + name
if key in state_dict:
running_stats_keys.append(key)
if len(running_stats_keys) > 0:
error_msgs.append(
"Unexpected running stats buffer(s) {names} for {klass} "
"with track_running_stats=False. If state_dict is a "
"checkpoint saved before 0.4.0, this may be expected "
"because {klass} does not track running stats by default "
"since 0.4.0. Please remove these keys from state_dict. If "
"the running stats are actually needed, instead set "
"track_running_stats=True in {klass} to enable them. See "
"the documentation of {klass} for details.".format(
names=" and ".join('"{}"'.format(k) for k in running_stats_keys), klass=self.__class__.__name__
)
)
for key in running_stats_keys:
state_dict.pop(key)
super(_InstanceNormNVFuser, self)._load_from_state_dict(
state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs
)
def forward(self, input: Tensor) -> Tensor:
assert input.is_cuda, "NVFuser InstanceNorm is CUDA only"
self._check_input_dim(input)
if self.running_mean is not None:
out = InstanceNormNVFuserFunction.apply(
input,
self.weight if self.weight is not None else self.dummy,
self.bias if self.bias is not None else self.dummy,
self.running_mean,
self.running_var,
self.training or not self.track_running_stats,
self.momentum,
self.eps,
)
else:
out = InstanceNormNVFuserFunction.apply(
input,
self.weight if self.weight is not None else self.dummy,
self.bias if self.bias is not None else self.dummy,
self.dummy,
self.dummy,
self.training or not self.track_running_stats,
self.momentum,
self.eps,
)
return out
class InstanceNorm3dNVFuser(_InstanceNormNVFuser):
def _check_input_dim(self, input):
if input.dim() != 5:
raise ValueError("expected 5D input (got {}D input)".format(input.dim()))
|
TensorFlow2/Segmentation/Contrib/UNet3P/data_generators | data_generators | tf_data_generator | """
Tensorflow data generator class.
"""
import tensorflow as tf
import numpy as np
from omegaconf import DictConfig
from utils.general_utils import get_data_paths
from utils.images_utils import prepare_image, prepare_mask
class DataGenerator(tf.keras.utils.Sequence):
"""
Generate batches of data for model by reading images and their
corresponding masks using TensorFlow Sequence Generator.
There are two options you can either pass directory path or list.
In case of directory, it should contain relative path of images/mask
folder from project root path.
In case of list of images, every element should contain absolute path
for each image and mask.
Because this generator is also used for prediction, so during testing you can
set mask path to None if mask are not available for visualization.
"""
def __init__(self, cfg: DictConfig, mode: str):
"""
Initialization
"""
self.cfg = cfg
self.mode = mode
self.batch_size = self.cfg.HYPER_PARAMETERS.BATCH_SIZE
# set seed for reproducibility
np.random.seed(cfg.SEED)
# check mask are available or not
self.mask_available = False if cfg.DATASET[mode].MASK_PATH is None or str(
cfg.DATASET[mode].MASK_PATH).lower() == "none" else True
data_paths = get_data_paths(cfg, mode, self.mask_available)
self.images_paths = data_paths[0]
if self.mask_available:
self.mask_paths = data_paths[1]
# self.images_paths.sort() # no need for sorting
self.on_epoch_end()
def __len__(self):
"""
Denotes the number of batches per epoch
"""
# Tensorflow problem: on_epoch_end is not being called at the end
# of each epoch, so forcing on_epoch_end call
self.on_epoch_end()
return int(
np.floor(
len(self.images_paths) / self.batch_size
)
)
def on_epoch_end(self):
"""
Updates indexes after each epoch
"""
self.indexes = np.arange(len(self.images_paths))
if self.cfg.PREPROCESS_DATA.SHUFFLE[self.mode].VALUE:
np.random.shuffle(self.indexes)
def __getitem__(self, index):
"""
Generate one batch of data
"""
# Generate indexes of the batch
indexes = self.indexes[
index * self.batch_size:(index + 1) * self.batch_size
]
# Generate data
return self.__data_generation(indexes)
def __data_generation(self, indexes):
"""
Generates batch data
"""
# create empty array to store batch data
batch_images = np.zeros(
(
self.cfg.HYPER_PARAMETERS.BATCH_SIZE,
self.cfg.INPUT.HEIGHT,
self.cfg.INPUT.WIDTH,
self.cfg.INPUT.CHANNELS
)
).astype(np.float32)
if self.mask_available:
batch_masks = np.zeros(
(
self.cfg.HYPER_PARAMETERS.BATCH_SIZE,
self.cfg.INPUT.HEIGHT,
self.cfg.INPUT.WIDTH,
self.cfg.OUTPUT.CLASSES
)
).astype(np.float32)
for i, index in enumerate(indexes):
# extract path from list
img_path = self.images_paths[int(index)]
if self.mask_available:
mask_path = self.mask_paths[int(index)]
# prepare image for model by resizing and preprocessing it
image = prepare_image(
img_path,
self.cfg.PREPROCESS_DATA.RESIZE,
self.cfg.PREPROCESS_DATA.IMAGE_PREPROCESSING_TYPE,
)
if self.mask_available:
# prepare image for model by resizing and preprocessing it
mask = prepare_mask(
mask_path,
self.cfg.PREPROCESS_DATA.RESIZE,
self.cfg.PREPROCESS_DATA.NORMALIZE_MASK,
)
# numpy to tensorflow conversion
if self.mask_available:
image, mask = tf.numpy_function(
self.tf_func,
[image, mask],
[tf.float32, tf.int32]
)
else:
image = tf.numpy_function(
self.tf_func,
[image, ],
[tf.float32, ]
)
# set shape attributes which was lost during Tf conversion
image.set_shape(
[
self.cfg.INPUT.HEIGHT,
self.cfg.INPUT.WIDTH,
self.cfg.INPUT.CHANNELS
]
)
batch_images[i] = image
if self.mask_available:
# height x width --> height x width x output classes
if self.cfg.OUTPUT.CLASSES == 1:
mask = tf.expand_dims(mask, axis=-1)
else:
# convert mask into one hot vectors
mask = tf.one_hot(
mask,
self.cfg.OUTPUT.CLASSES,
dtype=tf.int32
)
mask.set_shape(
[
self.cfg.INPUT.HEIGHT,
self.cfg.INPUT.WIDTH,
self.cfg.OUTPUT.CLASSES
]
)
batch_masks[i] = mask
if self.mask_available:
return batch_images, batch_masks
else:
return batch_images,
@staticmethod
def tf_func(*args):
return args
|
TensorFlow/Detection/SSD/models/research/slim | slim | eval_image_classifier | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Generic evaluation script that evaluates a model using a given dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import tensorflow as tf
from datasets import dataset_factory
from nets import nets_factory
from preprocessing import preprocessing_factory
slim = tf.contrib.slim
tf.app.flags.DEFINE_integer(
'batch_size', 100, 'The number of samples in each batch.')
tf.app.flags.DEFINE_integer(
'max_num_batches', None,
'Max number of batches to evaluate by default use all.')
tf.app.flags.DEFINE_string(
'master', '', 'The address of the TensorFlow master to use.')
tf.app.flags.DEFINE_string(
'checkpoint_path', '/tmp/tfmodel/',
'The directory where the model was written to or an absolute path to a '
'checkpoint file.')
tf.app.flags.DEFINE_string(
'eval_dir', '/tmp/tfmodel/', 'Directory where the results are saved to.')
tf.app.flags.DEFINE_integer(
'num_preprocessing_threads', 4,
'The number of threads used to create the batches.')
tf.app.flags.DEFINE_string(
'dataset_name', 'imagenet', 'The name of the dataset to load.')
tf.app.flags.DEFINE_string(
'dataset_split_name', 'test', 'The name of the train/test split.')
tf.app.flags.DEFINE_string(
'dataset_dir', None, 'The directory where the dataset files are stored.')
tf.app.flags.DEFINE_integer(
'labels_offset', 0,
'An offset for the labels in the dataset. This flag is primarily used to '
'evaluate the VGG and ResNet architectures which do not use a background '
'class for the ImageNet dataset.')
tf.app.flags.DEFINE_string(
'model_name', 'inception_v3', 'The name of the architecture to evaluate.')
tf.app.flags.DEFINE_string(
'preprocessing_name', None, 'The name of the preprocessing to use. If left '
'as `None`, then the model_name flag is used.')
tf.app.flags.DEFINE_float(
'moving_average_decay', None,
'The decay to use for the moving average.'
'If left as None, then moving averages are not used.')
tf.app.flags.DEFINE_integer(
'eval_image_size', None, 'Eval image size')
tf.app.flags.DEFINE_bool(
'quantize', False, 'whether to use quantized graph or not.')
FLAGS = tf.app.flags.FLAGS
def main(_):
if not FLAGS.dataset_dir:
raise ValueError('You must supply the dataset directory with --dataset_dir')
tf.logging.set_verbosity(tf.logging.INFO)
with tf.Graph().as_default():
tf_global_step = slim.get_or_create_global_step()
######################
# Select the dataset #
######################
dataset = dataset_factory.get_dataset(
FLAGS.dataset_name, FLAGS.dataset_split_name, FLAGS.dataset_dir)
####################
# Select the model #
####################
network_fn = nets_factory.get_network_fn(
FLAGS.model_name,
num_classes=(dataset.num_classes - FLAGS.labels_offset),
is_training=False)
##############################################################
# Create a dataset provider that loads data from the dataset #
##############################################################
provider = slim.dataset_data_provider.DatasetDataProvider(
dataset,
shuffle=False,
common_queue_capacity=2 * FLAGS.batch_size,
common_queue_min=FLAGS.batch_size)
[image, label] = provider.get(['image', 'label'])
label -= FLAGS.labels_offset
#####################################
# Select the preprocessing function #
#####################################
preprocessing_name = FLAGS.preprocessing_name or FLAGS.model_name
image_preprocessing_fn = preprocessing_factory.get_preprocessing(
preprocessing_name,
is_training=False)
eval_image_size = FLAGS.eval_image_size or network_fn.default_image_size
image = image_preprocessing_fn(image, eval_image_size, eval_image_size)
images, labels = tf.train.batch(
[image, label],
batch_size=FLAGS.batch_size,
num_threads=FLAGS.num_preprocessing_threads,
capacity=5 * FLAGS.batch_size)
####################
# Define the model #
####################
logits, _ = network_fn(images)
if FLAGS.quantize:
tf.contrib.quantize.create_eval_graph()
if FLAGS.moving_average_decay:
variable_averages = tf.train.ExponentialMovingAverage(
FLAGS.moving_average_decay, tf_global_step)
variables_to_restore = variable_averages.variables_to_restore(
slim.get_model_variables())
variables_to_restore[tf_global_step.op.name] = tf_global_step
else:
variables_to_restore = slim.get_variables_to_restore()
predictions = tf.argmax(logits, 1)
labels = tf.squeeze(labels)
# Define the metrics:
names_to_values, names_to_updates = slim.metrics.aggregate_metric_map({
'Accuracy': slim.metrics.streaming_accuracy(predictions, labels),
'Recall_5': slim.metrics.streaming_recall_at_k(
logits, labels, 5),
})
# Print the summaries to screen.
for name, value in names_to_values.items():
summary_name = 'eval/%s' % name
op = tf.summary.scalar(summary_name, value, collections=[])
op = tf.Print(op, [value], summary_name)
tf.add_to_collection(tf.GraphKeys.SUMMARIES, op)
# TODO(sguada) use num_epochs=1
if FLAGS.max_num_batches:
num_batches = FLAGS.max_num_batches
else:
# This ensures that we make a single pass over all of the data.
num_batches = math.ceil(dataset.num_samples / float(FLAGS.batch_size))
if tf.gfile.IsDirectory(FLAGS.checkpoint_path):
checkpoint_path = tf.train.latest_checkpoint(FLAGS.checkpoint_path)
else:
checkpoint_path = FLAGS.checkpoint_path
tf.logging.info('Evaluating %s' % checkpoint_path)
slim.evaluation.evaluate_once(
master=FLAGS.master,
checkpoint_path=checkpoint_path,
logdir=FLAGS.eval_dir,
num_evals=num_batches,
eval_op=list(names_to_updates.values()),
variables_to_restore=variables_to_restore)
if __name__ == '__main__':
tf.app.run()
|
PyTorch/SpeechSynthesis/HiFiGAN/platform | platform | DGX1_HiFi-GAN_AMP_4GPU | #!/bin/bash
set -a
: ${NUM_GPUS:=4}
: ${BATCH_SIZE:=32}
: ${GRAD_ACCUMULATION:=1}
: ${AMP:=true}
bash scripts/train_lj22khz.sh "$@"
|
PyTorch/Classification/GPUNet/triton/065ms/runner | runner | config_NVIDIA-DGX-A100-(1x-A100-80GB) | batching: dynamic
checkpoints:
- name: 0.65ms
url: https://api.ngc.nvidia.com/v2/models/nvidia/dle/gpunet_0_pyt_ckpt/versions/21.12.0_amp/zip
configurations:
- checkpoint: 0.65ms
parameters:
backend_accelerator: trt
checkpoint: 0.65ms
device_kind: gpu
export_format: onnx
export_precision: fp16
format: onnx
max_batch_size: 64
number_of_model_instances: 2
precision: fp16
tensorrt_capture_cuda_graph: 0
torch_jit: none
container_version: '21.12'
datasets:
- name: imagenet
datasets_dir: datasets
ensemble_model_name: null
framework: PyTorch
measurement_steps_offline: 8
measurement_steps_online: 32
model_name: GPUnet
performance_tool: model_analyzer
triton_container_image: nvcr.io/nvidia/tritonserver:21.12-py3
triton_custom_operations: null
triton_dockerfile: null
triton_load_model_method: explicit
|
TensorFlow/Detection/SSD/models/research/object_detection/core | core | data_decoder | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Interface for data decoders.
Data decoders decode the input data and return a dictionary of tensors keyed by
the entries in core.reader.Fields.
"""
from abc import ABCMeta
from abc import abstractmethod
class DataDecoder(object):
"""Interface for data decoders."""
__metaclass__ = ABCMeta
@abstractmethod
def decode(self, data):
"""Return a single image and associated labels.
Args:
data: a string tensor holding a serialized protocol buffer corresponding
to data for a single image.
Returns:
tensor_dict: a dictionary containing tensors. Possible keys are defined in
reader.Fields.
"""
pass
|
PyTorch/SpeechSynthesis/Tacotron2 | Tacotron2 | test_infer | # *****************************************************************************
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
from tacotron2.text import text_to_sequence
import models
import torch
import argparse
import numpy as np
from scipy.io.wavfile import write
import sys
from inference import checkpoint_from_distributed, unwrap_distributed, MeasureTime, prepare_input_sequence, load_and_setup_model
import time
import dllogger as DLLogger
from dllogger import StdOutBackend, JSONStreamBackend, Verbosity
from apex import amp
from waveglow.denoiser import Denoiser
def parse_args(parser):
"""
Parse commandline arguments.
"""
parser.add_argument('--tacotron2', type=str,
help='full path to the Tacotron2 model checkpoint file')
parser.add_argument('--waveglow', type=str,
help='full path to the WaveGlow model checkpoint file')
parser.add_argument('-s', '--sigma-infer', default=0.6, type=float)
parser.add_argument('-d', '--denoising-strength', default=0.01, type=float)
parser.add_argument('-sr', '--sampling-rate', default=22050, type=int,
help='Sampling rate')
run_mode = parser.add_mutually_exclusive_group()
run_mode.add_argument('--fp16', action='store_true',
help='Run inference with FP16')
run_mode.add_argument('--cpu', action='store_true',
help='Run inference on CPU')
parser.add_argument('--log-file', type=str, default='nvlog.json',
help='Filename for logging')
parser.add_argument('--stft-hop-length', type=int, default=256,
help='STFT hop length for estimating audio length from mel size')
parser.add_argument('--num-iters', type=int, default=10,
help='Number of iterations')
parser.add_argument('-il', '--input-length', type=int, default=64,
help='Input length')
parser.add_argument('-bs', '--batch-size', type=int, default=1,
help='Batch size')
return parser
def print_stats(measurements_all):
throughput = measurements_all['throughput']
preprocessing = measurements_all['pre_processing']
type_conversion = measurements_all['type_conversion']
storage = measurements_all['storage']
data_transfer = measurements_all['data_transfer']
postprocessing = [sum(p) for p in zip(type_conversion,storage,data_transfer)]
latency = measurements_all['latency']
waveglow_latency = measurements_all['waveglow_latency']
tacotron2_latency = measurements_all['tacotron2_latency']
denoiser_latency = measurements_all['denoiser_latency']
num_mels_per_audio = measurements_all['num_mels_per_audio']
latency.sort()
cf_50 = max(latency[:int(len(latency)*0.50)])
cf_90 = max(latency[:int(len(latency)*0.90)])
cf_95 = max(latency[:int(len(latency)*0.95)])
cf_99 = max(latency[:int(len(latency)*0.99)])
cf_100 = max(latency[:int(len(latency)*1.0)])
print("Throughput average (samples/sec) = {:.0f}".format(np.mean(throughput)))
print("Preprocessing average (seconds) = {:.4f}".format(np.mean(preprocessing)))
print("Postprocessing average (seconds) = {:.4f}".format(np.mean(postprocessing)))
print("Number of mels per audio average = {:.0f}".format(np.mean(num_mels_per_audio)))
print("Tacotron2 latency average (seconds) = {:.2f}".format(np.mean(tacotron2_latency)))
print("WaveGlow latency average (seconds) = {:.2f}".format(np.mean(waveglow_latency)))
print("Denoiser latency average (seconds) = {:.4f}".format(np.mean(denoiser_latency)))
print("Latency average (seconds) = {:.2f}".format(np.mean(latency)))
print("Latency std (seconds) = {:.2f}".format(np.std(latency)))
print("Latency cl 50 (seconds) = {:.2f}".format(cf_50))
print("Latency cl 90 (seconds) = {:.2f}".format(cf_90))
print("Latency cl 95 (seconds) = {:.2f}".format(cf_95))
print("Latency cl 99 (seconds) = {:.2f}".format(cf_99))
print("Latency cl 100 (seconds) = {:.2f}".format(cf_100))
def main():
"""
Launches text to speech (inference).
Inference is executed on a single GPU or CPU.
"""
parser = argparse.ArgumentParser(
description='PyTorch Tacotron 2 Inference')
parser = parse_args(parser)
args, unknown_args = parser.parse_known_args()
DLLogger.init(backends=[JSONStreamBackend(Verbosity.DEFAULT, args.log_file),
StdOutBackend(Verbosity.VERBOSE)])
for k,v in vars(args).items():
DLLogger.log(step="PARAMETER", data={k:v})
DLLogger.log(step="PARAMETER", data={'model_name':'Tacotron2_PyT'})
measurements_all = {"pre_processing": [],
"tacotron2_latency": [],
"waveglow_latency": [],
"denoiser_latency": [],
"latency": [],
"type_conversion": [],
"data_transfer": [],
"storage": [],
"tacotron2_items_per_sec": [],
"waveglow_items_per_sec": [],
"num_mels_per_audio": [],
"throughput": []}
print("args:", args, unknown_args)
tacotron2 = load_and_setup_model('Tacotron2', parser, args.tacotron2,
args.fp16, args.cpu, forward_is_infer=True)
waveglow = load_and_setup_model('WaveGlow', parser, args.waveglow,
args.fp16, args.cpu, forward_is_infer=True)
denoiser = Denoiser(waveglow)
if not args.cpu:
denoiser.cuda()
texts = ["The forms of printed letters should be beautiful, and that their arrangement on the page should be reasonable and a help to the shapeliness of the letters themselves. The forms of printed letters should be beautiful, and that their arrangement on the page should be reasonable and a help to the shapeliness of the letters themselves."]
texts = [texts[0][:args.input_length]]
texts = texts*args.batch_size
warmup_iters = 3
for iter in range(args.num_iters):
measurements = {}
with MeasureTime(measurements, "pre_processing", args.cpu):
sequences_padded, input_lengths = prepare_input_sequence(texts, args.cpu)
with torch.no_grad():
with MeasureTime(measurements, "latency", args.cpu):
with MeasureTime(measurements, "tacotron2_latency", args.cpu):
mel, mel_lengths, _ = tacotron2.infer(sequences_padded, input_lengths)
with MeasureTime(measurements, "waveglow_latency", args.cpu):
audios = waveglow.infer(mel, sigma=args.sigma_infer)
num_mels = mel.size(0)*mel.size(2)
num_samples = audios.size(0)*audios.size(1)
with MeasureTime(measurements, "type_conversion", args.cpu):
audios = audios.float()
with torch.no_grad(), MeasureTime(measurements, "denoiser_latency", args.cpu):
audios = denoiser(audios, strength=args.denoising_strength).squeeze(1)
with MeasureTime(measurements, "data_transfer", args.cpu):
audios = audios.cpu()
with MeasureTime(measurements, "storage", args.cpu):
audios = audios.numpy()
for i, audio in enumerate(audios):
audio_path = "audio_"+str(i)+".wav"
write(audio_path, args.sampling_rate,
audio[:mel_lengths[i]*args.stft_hop_length])
measurements['tacotron2_items_per_sec'] = num_mels/measurements['tacotron2_latency']
measurements['waveglow_items_per_sec'] = num_samples/measurements['waveglow_latency']
measurements['num_mels_per_audio'] = mel.size(2)
measurements['throughput'] = num_samples/measurements['latency']
if iter >= warmup_iters:
for k,v in measurements.items():
measurements_all[k].append(v)
DLLogger.log(step=(iter-warmup_iters), data={k: v})
DLLogger.flush()
print_stats(measurements_all)
if __name__ == '__main__':
main()
|
TensorFlow/Detection/SSD/models/research/object_detection/predictors | predictors | rfcn_box_predictor | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""RFCN Box Predictor."""
import tensorflow as tf
from object_detection.core import box_predictor
from object_detection.utils import ops
slim = tf.contrib.slim
BOX_ENCODINGS = box_predictor.BOX_ENCODINGS
CLASS_PREDICTIONS_WITH_BACKGROUND = (
box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND)
MASK_PREDICTIONS = box_predictor.MASK_PREDICTIONS
class RfcnBoxPredictor(box_predictor.BoxPredictor):
"""RFCN Box Predictor.
Applies a position sensitive ROI pooling on position sensitive feature maps to
predict classes and refined locations. See https://arxiv.org/abs/1605.06409
for details.
This is used for the second stage of the RFCN meta architecture. Notice that
locations are *not* shared across classes, thus for each anchor, a separate
prediction is made for each class.
"""
def __init__(self,
is_training,
num_classes,
conv_hyperparams_fn,
num_spatial_bins,
depth,
crop_size,
box_code_size):
"""Constructor.
Args:
is_training: Indicates whether the BoxPredictor is in training mode.
num_classes: number of classes. Note that num_classes *does not*
include the background category, so if groundtruth labels take values
in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the
assigned classification targets can range from {0,... K}).
conv_hyperparams_fn: A function to construct tf-slim arg_scope with
hyperparameters for convolutional layers.
num_spatial_bins: A list of two integers `[spatial_bins_y,
spatial_bins_x]`.
depth: Target depth to reduce the input feature maps to.
crop_size: A list of two integers `[crop_height, crop_width]`.
box_code_size: Size of encoding for each box.
"""
super(RfcnBoxPredictor, self).__init__(is_training, num_classes)
self._conv_hyperparams_fn = conv_hyperparams_fn
self._num_spatial_bins = num_spatial_bins
self._depth = depth
self._crop_size = crop_size
self._box_code_size = box_code_size
@property
def num_classes(self):
return self._num_classes
def _predict(self, image_features, num_predictions_per_location,
proposal_boxes):
"""Computes encoded object locations and corresponding confidences.
Args:
image_features: A list of float tensors of shape [batch_size, height_i,
width_i, channels_i] containing features for a batch of images.
num_predictions_per_location: A list of integers representing the number
of box predictions to be made per spatial location for each feature map.
Currently, this must be set to [1], or an error will be raised.
proposal_boxes: A float tensor of shape [batch_size, num_proposals,
box_code_size].
Returns:
box_encodings: A list of float tensors of shape
[batch_size, num_anchors_i, q, code_size] representing the location of
the objects, where q is 1 or the number of classes. Each entry in the
list corresponds to a feature map in the input `image_features` list.
class_predictions_with_background: A list of float tensors of shape
[batch_size, num_anchors_i, num_classes + 1] representing the class
predictions for the proposals. Each entry in the list corresponds to a
feature map in the input `image_features` list.
Raises:
ValueError: if num_predictions_per_location is not 1 or if
len(image_features) is not 1.
"""
if (len(num_predictions_per_location) != 1 or
num_predictions_per_location[0] != 1):
raise ValueError('Currently RfcnBoxPredictor only supports '
'predicting a single box per class per location.')
if len(image_features) != 1:
raise ValueError('length of `image_features` must be 1. Found {}'.
format(len(image_features)))
image_feature = image_features[0]
num_predictions_per_location = num_predictions_per_location[0]
batch_size = tf.shape(proposal_boxes)[0]
num_boxes = tf.shape(proposal_boxes)[1]
net = image_feature
with slim.arg_scope(self._conv_hyperparams_fn()):
net = slim.conv2d(net, self._depth, [1, 1], scope='reduce_depth')
# Location predictions.
location_feature_map_depth = (self._num_spatial_bins[0] *
self._num_spatial_bins[1] *
self.num_classes *
self._box_code_size)
location_feature_map = slim.conv2d(net, location_feature_map_depth,
[1, 1], activation_fn=None,
scope='refined_locations')
box_encodings = ops.batch_position_sensitive_crop_regions(
location_feature_map,
boxes=proposal_boxes,
crop_size=self._crop_size,
num_spatial_bins=self._num_spatial_bins,
global_pool=True)
box_encodings = tf.squeeze(box_encodings, squeeze_dims=[2, 3])
box_encodings = tf.reshape(box_encodings,
[batch_size * num_boxes, 1, self.num_classes,
self._box_code_size])
# Class predictions.
total_classes = self.num_classes + 1 # Account for background class.
class_feature_map_depth = (self._num_spatial_bins[0] *
self._num_spatial_bins[1] *
total_classes)
class_feature_map = slim.conv2d(net, class_feature_map_depth, [1, 1],
activation_fn=None,
scope='class_predictions')
class_predictions_with_background = (
ops.batch_position_sensitive_crop_regions(
class_feature_map,
boxes=proposal_boxes,
crop_size=self._crop_size,
num_spatial_bins=self._num_spatial_bins,
global_pool=True))
class_predictions_with_background = tf.squeeze(
class_predictions_with_background, squeeze_dims=[2, 3])
class_predictions_with_background = tf.reshape(
class_predictions_with_background,
[batch_size * num_boxes, 1, total_classes])
return {BOX_ENCODINGS: [box_encodings],
CLASS_PREDICTIONS_WITH_BACKGROUND:
[class_predictions_with_background]}
|
TensorFlow2/Segmentation/MaskRCNN/mrcnn_tf2/object_detection | object_detection | box_coder | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base box coder.
Box coders convert between coordinate frames, namely image-centric
(with (0,0) on the top left of image) and anchor-centric (with (0,0) being
defined by a specific anchor).
Users of a BoxCoder can call two methods:
encode: which encodes a box with respect to a given anchor
(or rather, a tensor of boxes wrt a corresponding tensor of anchors) and
decode: which inverts this encoding with a decode operation.
In both cases, the arguments are assumed to be in 1-1 correspondence already;
it is not the job of a BoxCoder to perform matching.
"""
from abc import ABCMeta, abstractmethod, abstractproperty
import tensorflow as tf
# Box coder types.
FASTER_RCNN = 'faster_rcnn'
KEYPOINT = 'keypoint'
MEAN_STDDEV = 'mean_stddev'
SQUARE = 'square'
class BoxCoder:
"""Abstract base class for box coder."""
__metaclass__ = ABCMeta
@abstractproperty
def code_size(self):
"""Return the size of each code.
This number is a constant and should agree with the output of the `encode`
op (e.g. if rel_codes is the output of self.encode(...), then it should have
shape [N, code_size()]). This abstractproperty should be overridden by
implementations.
Returns:
an integer constant
"""
def encode(self, boxes, anchors):
"""Encode a box list relative to an anchor collection.
Args:
boxes: BoxList holding N boxes to be encoded
anchors: BoxList of N anchors
Returns:
a tensor representing N relative-encoded boxes
"""
return self._encode(boxes, anchors)
def decode(self, rel_codes, anchors):
"""Decode boxes that are encoded relative to an anchor collection.
Args:
rel_codes: a tensor representing N relative-encoded boxes
anchors: BoxList of anchors
Returns:
boxlist: BoxList holding N boxes encoded in the ordinary way (i.e.,
with corners y_min, x_min, y_max, x_max)
"""
return self._decode(rel_codes, anchors)
@abstractmethod
def _encode(self, boxes, anchors):
"""Method to be overriden by implementations.
Args:
boxes: BoxList holding N boxes to be encoded
anchors: BoxList of N anchors
Returns:
a tensor representing N relative-encoded boxes
"""
@abstractmethod
def _decode(self, rel_codes, anchors):
"""Method to be overriden by implementations.
Args:
rel_codes: a tensor representing N relative-encoded boxes
anchors: BoxList of anchors
Returns:
boxlist: BoxList holding N boxes encoded in the ordinary way (i.e.,
with corners y_min, x_min, y_max, x_max)
"""
def batch_decode(encoded_boxes, box_coder, anchors):
"""Decode a batch of encoded boxes.
This op takes a batch of encoded bounding boxes and transforms
them to a batch of bounding boxes specified by their corners in
the order of [y_min, x_min, y_max, x_max].
Args:
encoded_boxes: a float32 tensor of shape [batch_size, num_anchors,
code_size] representing the location of the objects.
box_coder: a BoxCoder object.
anchors: a BoxList of anchors used to encode `encoded_boxes`.
Returns:
decoded_boxes: a float32 tensor of shape [batch_size, num_anchors,
coder_size] representing the corners of the objects in the order
of [y_min, x_min, y_max, x_max].
Raises:
ValueError: if batch sizes of the inputs are inconsistent, or if
the number of anchors inferred from encoded_boxes and anchors are
inconsistent.
"""
if encoded_boxes.get_shape()[1].value != anchors.num_boxes_static():
raise ValueError('The number of anchors inferred from encoded_boxes'
' and anchors are inconsistent: shape[1] of encoded_boxes'
' %s should be equal to the number of anchors: %s.' %
(
encoded_boxes.get_shape()[1].value,
anchors.num_boxes_static()
)
)
decoded_boxes = tf.stack([
box_coder.decode(boxes, anchors).get()
for boxes in tf.unstack(encoded_boxes)
])
return decoded_boxes
|
PyTorch/LanguageModeling/BERT/distillation/BERT_4L_288D | BERT_4L_288D | config | {
"attention_probs_dropout_prob": 0.1,
"hidden_act": "gelu",
"hidden_dropout_prob": 0.1,
"hidden_size": 288,
"initializer_range": 0.02,
"intermediate_size": 1152,
"max_position_embeddings": 512,
"num_attention_heads": 12,
"num_hidden_layers": 4,
"type_vocab_size": 2,
"vocab_size": 30528
} |
PyTorch/Segmentation/MaskRCNN/pytorch/maskrcnn_benchmark/modeling | modeling | poolers | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
import torch
import torch.nn.functional as F
from torch import nn
from maskrcnn_benchmark.layers import ROIAlign
from .utils import cat
class LevelMapper(object):
"""Determine which FPN level each RoI in a set of RoIs should map to based
on the heuristic in the FPN paper.
"""
def __init__(self, k_min, k_max, canonical_scale=224, canonical_level=4, eps=1e-6):
"""
Arguments:
k_min (int)
k_max (int)
canonical_scale (int)
canonical_level (int)
eps (float)
"""
self.k_min = k_min
self.k_max = k_max
self.s0 = canonical_scale
self.lvl0 = canonical_level
self.eps = eps
def __call__(self, boxlists):
"""
Arguments:
boxlists (list[BoxList])
"""
# Compute level ids
s = torch.sqrt(cat([boxlist.area() for boxlist in boxlists]))
# Eqn.(1) in FPN paper
target_lvls = torch.floor(self.lvl0 + torch.log2(s / self.s0 + self.eps))
target_lvls = torch.clamp(target_lvls, min=self.k_min, max=self.k_max)
return target_lvls.to(torch.int64) - self.k_min
class Pooler(nn.Module):
"""
Pooler for Detection with or without FPN.
It currently hard-code ROIAlign in the implementation,
but that can be made more generic later on.
Also, the requirement of passing the scales is not strictly necessary, as they
can be inferred from the size of the feature map / size of original image,
which is available thanks to the BoxList.
"""
def __init__(self, output_size, scales, sampling_ratio, is_nhwc):
"""
Arguments:
output_size (list[tuple[int]] or list[int]): output size for the pooled region
scales (list[float]): scales for each Pooler
sampling_ratio (int): sampling ratio for ROIAlign
"""
super(Pooler, self).__init__()
poolers = []
self.is_nhwc=is_nhwc
for scale in scales:
poolers.append(
ROIAlign(
output_size, spatial_scale=scale, sampling_ratio=sampling_ratio, is_nhwc=is_nhwc
)
)
self.poolers = nn.ModuleList(poolers)
self.output_size = output_size
# get the levels in the feature map by leveraging the fact that the network always
# downsamples by a factor of 2 at each level.
lvl_min = -torch.log2(torch.tensor(scales[0], dtype=torch.float32)).item()
lvl_max = -torch.log2(torch.tensor(scales[-1], dtype=torch.float32)).item()
self.map_levels = LevelMapper(lvl_min, lvl_max)
def convert_to_roi_format(self, boxes):
concat_boxes = cat([b.bbox for b in boxes], dim=0)
device, dtype = concat_boxes.device, concat_boxes.dtype
ids = cat(
[
torch.full((len(b), 1), i, dtype=dtype, device=device)
for i, b in enumerate(boxes)
],
dim=0,
)
rois = torch.cat([ids, concat_boxes], dim=1)
return rois
def forward(self, x, boxes):
"""
Arguments:
x (list[Tensor]): feature maps for each level
boxes (list[BoxList]): boxes to be used to perform the pooling operation.
Returns:
result (Tensor)
"""
num_levels = len(self.poolers)
rois = self.convert_to_roi_format(boxes)
if num_levels == 1:
return self.poolers[0](x[0], rois)
levels = self.map_levels(boxes)
num_rois = len(rois)
num_channels = x[0].shape[1]
output_size = self.output_size[0]
dtype, device = x[0].dtype, x[0].device
result = torch.zeros(
(num_rois, num_channels, output_size, output_size),
dtype=dtype,
device=device,
) if not self.is_nhwc else torch.zeros(
(num_rois, num_channels, output_size, output_size),
dtype=dtype,
device=device,
).to(memory_format=torch.channels_last)
for level, (per_level_feature, pooler) in enumerate(zip(x, self.poolers)):
idx_in_level = torch.nonzero(levels == level).squeeze(1)
rois_per_level = rois[idx_in_level]
result[idx_in_level] = pooler(per_level_feature, rois_per_level).to(dtype)
return result
|
TensorFlow/Recommendation/WideAndDeep/preproc | preproc | preproc3 | #!/usr/bin/env python
# coding: utf-8
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import _pickle as cPickle
import argparse
import datetime
import hashlib
import math
import numpy as np
import pandas as pd
import pyspark.sql.functions as F
import time
from pyspark.context import SparkContext, SparkConf
from pyspark.ml.linalg import SparseVector, VectorUDT
from pyspark.sql.session import SparkSession
from pyspark.sql.types import IntegerType, StringType, StructType, StructField, TimestampType, FloatType, ArrayType
evaluation_verbose = False
OUTPUT_BUCKET_FOLDER = "/outbrain/preprocessed/"
DATA_BUCKET_FOLDER = "/outbrain/orig/"
SPARK_TEMP_FOLDER = "/outbrain/spark-temp/"
conf = SparkConf().setMaster('local[*]').set('spark.executor.memory', '40g').set('spark.driver.memory', '200g').set(
"spark.local.dir", SPARK_TEMP_FOLDER)
sc = SparkContext(conf=conf)
spark = SparkSession(sc)
start_time = time.time()
def hashstr(s, nr_bins):
return int(hashlib.md5(s.encode('utf8')).hexdigest(), 16) % (nr_bins - 1) + 1
parser = argparse.ArgumentParser()
parser.add_argument(
'--submission',
action='store_true',
default=False
)
args = parser.parse_args()
evaluation = not args.submission
# ## UDFs
def date_time_to_unix_epoch(date_time):
return int(time.mktime(date_time.timetuple()))
def date_time_to_unix_epoch_treated(dt):
if dt is not None:
try:
epoch = date_time_to_unix_epoch(dt)
return epoch
except Exception as e:
print("Error processing dt={}".format(dt), e)
return 0
else:
return 0
timestamp_null_to_zero_int_udf = F.udf(lambda x: date_time_to_unix_epoch_treated(x), IntegerType())
INT_DEFAULT_NULL_VALUE = -1
int_null_to_minus_one_udf = F.udf(lambda x: x if x is not None else INT_DEFAULT_NULL_VALUE, IntegerType())
int_list_null_to_empty_list_udf = F.udf(lambda x: x if x is not None else [], ArrayType(IntegerType()))
float_list_null_to_empty_list_udf = F.udf(lambda x: x if x is not None else [], ArrayType(FloatType()))
str_list_null_to_empty_list_udf = F.udf(lambda x: x if x is not None else [], ArrayType(StringType()))
def truncate_day_from_timestamp(ts):
return int(ts / 1000 / 60 / 60 / 24)
truncate_day_from_timestamp_udf = F.udf(lambda ts: truncate_day_from_timestamp(ts), IntegerType())
extract_country_udf = F.udf(lambda geo: geo.strip()[:2] if geo is not None else '', StringType())
extract_country_state_udf = F.udf(lambda geo: geo.strip()[:5] if geo is not None else '', StringType())
list_len_udf = F.udf(lambda x: len(x) if x is not None else 0, IntegerType())
def convert_odd_timestamp(timestamp_ms_relative):
TIMESTAMP_DELTA = 1465876799998
return datetime.datetime.fromtimestamp((int(timestamp_ms_relative) + TIMESTAMP_DELTA) // 1000)
# # Loading Files
# ## Loading UTC/BST for each country and US / CA states (local time)
country_utc_dst_df = pd.read_csv('preproc/data/country_codes_utc_dst_tz_delta.csv', keep_default_na=False)
countries_utc_dst_dict = dict(
zip(country_utc_dst_df['country_code'].tolist(), country_utc_dst_df['utc_dst_time_offset_cleaned'].tolist()))
countries_utc_dst_broad = sc.broadcast(countries_utc_dst_dict)
us_states_utc_dst_df = pd.read_csv('preproc/data/us_states_abbrev_bst.csv', keep_default_na=False)
us_states_utc_dst_dict = dict(
zip(us_states_utc_dst_df['state_abb'].tolist(), us_states_utc_dst_df['utc_dst_time_offset_cleaned'].tolist()))
us_states_utc_dst_broad = sc.broadcast(us_states_utc_dst_dict)
ca_states_utc_dst_df = pd.read_csv('preproc/data/ca_states_abbrev_bst.csv', keep_default_na=False)
ca_countries_utc_dst_dict = dict(
zip(ca_states_utc_dst_df['state_abb'].tolist(), ca_states_utc_dst_df['utc_dst_time_offset_cleaned'].tolist()))
ca_countries_utc_dst_broad = sc.broadcast(ca_countries_utc_dst_dict)
# ## Loading competition csvs
events_schema = StructType(
[StructField("display_id", IntegerType(), True),
StructField("uuid_event", StringType(), True),
StructField("document_id_event", IntegerType(), True),
StructField("timestamp_event", IntegerType(), True),
StructField("platform_event", IntegerType(), True),
StructField("geo_location_event", StringType(), True)]
)
events_df = spark.read.schema(events_schema) \
.options(header='true', inferschema='false', nullValue='\\N') \
.csv(DATA_BUCKET_FOLDER + "events.csv") \
.withColumn('dummyEvents', F.lit(1)) \
.withColumn('day_event', truncate_day_from_timestamp_udf('timestamp_event')) \
.withColumn('event_country', extract_country_udf('geo_location_event')) \
.withColumn('event_country_state', extract_country_state_udf('geo_location_event')) \
.alias('events')
events_df.count()
# Drop rows with empty "geo_location"
events_df = events_df.dropna(subset="geo_location_event")
events_df.count()
# Drop rows with empty "platform"
events_df = events_df.dropna(subset="platform_event")
events_df.count()
page_views_schema = StructType(
[StructField("uuid_pv", StringType(), True),
StructField("document_id_pv", IntegerType(), True),
StructField("timestamp_pv", IntegerType(), True),
StructField("platform_pv", IntegerType(), True),
StructField("geo_location_pv", StringType(), True),
StructField("traffic_source_pv", IntegerType(), True)]
)
page_views_df = spark.read.schema(page_views_schema) \
.options(header='true', inferschema='false', nullValue='\\N') \
.csv(DATA_BUCKET_FOLDER + "page_views.csv") \
.withColumn('day_pv', truncate_day_from_timestamp_udf('timestamp_pv')) \
.alias('page_views')
page_views_df.createOrReplaceTempView('page_views')
page_views_users_df = spark.sql('''
SELECT uuid_pv, document_id_pv, max(timestamp_pv) as max_timestamp_pv, 1 as dummyPageView
FROM page_views p
GROUP BY uuid_pv, document_id_pv
''').alias('page_views_users')
promoted_content_schema = StructType(
[StructField("ad_id", IntegerType(), True),
StructField("document_id_promo", IntegerType(), True),
StructField("campaign_id", IntegerType(), True),
StructField("advertiser_id", IntegerType(), True)]
)
promoted_content_df = spark.read.schema(promoted_content_schema) \
.options(header='true', inferschema='false', nullValue='\\N') \
.csv(DATA_BUCKET_FOLDER + "promoted_content.csv") \
.withColumn('dummyPromotedContent', F.lit(1)).alias('promoted_content').cache()
documents_meta_schema = StructType(
[StructField("document_id_doc", IntegerType(), True),
StructField("source_id", IntegerType(), True),
StructField("publisher_id", IntegerType(), True),
StructField("publish_time", TimestampType(), True)]
)
documents_meta_df = spark.read.schema(documents_meta_schema) \
.options(header='true', inferschema='false', nullValue='\\N') \
.csv(DATA_BUCKET_FOLDER + "documents_meta.csv") \
.withColumn('dummyDocumentsMeta', F.lit(1)).alias('documents_meta').cache()
documents_meta_df.count()
# Drop rows with empty "source_id"
documents_meta_df = documents_meta_df.dropna(subset="source_id")
documents_meta_df.count()
source_publishers_df = documents_meta_df.select(["source_id", "publisher_id"]).dropDuplicates()
source_publishers_df.count()
# get list of source_ids without publisher_id
rows_no_pub = source_publishers_df.filter("publisher_id is NULL")
source_ids_without_publisher = [row['source_id'] for row in rows_no_pub.collect()]
len(source_ids_without_publisher)
# maximum value of publisher_id used so far
max_pub = max(source_publishers_df.select(["publisher_id"]).dropna().collect())['publisher_id']
max_pub
# rows filled with new publisher_ids
new_publishers = [(source, max_pub + 1 + nr) for nr, source in enumerate(source_ids_without_publisher)]
new_publishers_df = spark.createDataFrame(new_publishers, ("source_id", "publisher_id"))
new_publishers_df.take(10)
# old and new publishers merged
fixed_source_publishers_df = source_publishers_df.dropna().union(new_publishers_df)
fixed_source_publishers_df.collect()[-30:]
# update documents_meta with bew publishers
documents_meta_df = documents_meta_df.drop('publisher_id').join(fixed_source_publishers_df, on='source_id')
documents_meta_df.count()
# Joining with Page Views to get traffic_source_pv
events_joined_df = events_df.join(documents_meta_df
.withColumnRenamed('source_id', 'source_id_doc_event')
.withColumnRenamed('publisher_id', 'publisher_doc_event')
.withColumnRenamed('publish_time', 'publish_time_doc_event'),
on=F.col("document_id_event") == F.col("document_id_doc"), how='left') \
.join(page_views_df,
on=[F.col('uuid_event') == F.col('uuid_pv'),
F.col('document_id_event') == F.col('document_id_pv'),
F.col('platform_event') == F.col('platform_pv'),
F.col('geo_location_event') == F.col('geo_location_pv'),
F.col('day_event') == F.col('day_pv')],
how='left') \
.alias('events').cache()
documents_categories_schema = StructType(
[StructField("document_id_cat", IntegerType(), True),
StructField("category_id", IntegerType(), True),
StructField("confidence_level_cat", FloatType(), True)]
)
documents_categories_df = spark.read.schema(documents_categories_schema) \
.options(header='true', inferschema='false', nullValue='\\N') \
.csv(DATA_BUCKET_FOLDER + "documents_categories.csv") \
.alias('documents_categories').cache()
documents_categories_grouped_df = documents_categories_df.groupBy('document_id_cat') \
.agg(F.collect_list('category_id').alias('category_id_list'),
F.collect_list('confidence_level_cat').alias('confidence_level_cat_list')) \
.withColumn('dummyDocumentsCategory', F.lit(1)) \
.alias('documents_categories_grouped')
documents_topics_schema = StructType(
[StructField("document_id_top", IntegerType(), True),
StructField("topic_id", IntegerType(), True),
StructField("confidence_level_top", FloatType(), True)]
)
documents_topics_df = spark.read.schema(documents_topics_schema) \
.options(header='true', inferschema='false', nullValue='\\N') \
.csv(DATA_BUCKET_FOLDER + "documents_topics.csv") \
.alias('documents_topics').cache()
documents_topics_grouped_df = documents_topics_df.groupBy('document_id_top') \
.agg(F.collect_list('topic_id').alias('topic_id_list'),
F.collect_list('confidence_level_top').alias('confidence_level_top_list')) \
.withColumn('dummyDocumentsTopics', F.lit(1)) \
.alias('documents_topics_grouped')
documents_entities_schema = StructType(
[StructField("document_id_ent", IntegerType(), True),
StructField("entity_id", StringType(), True),
StructField("confidence_level_ent", FloatType(), True)]
)
documents_entities_df = spark.read.schema(documents_entities_schema) \
.options(header='true', inferschema='false', nullValue='\\N') \
.csv(DATA_BUCKET_FOLDER + "documents_entities.csv") \
.alias('documents_entities').cache()
documents_entities_grouped_df = documents_entities_df.groupBy('document_id_ent') \
.agg(F.collect_list('entity_id').alias('entity_id_list'),
F.collect_list('confidence_level_ent').alias('confidence_level_ent_list')) \
.withColumn('dummyDocumentsEntities', F.lit(1)) \
.alias('documents_entities_grouped')
clicks_train_schema = StructType(
[StructField("display_id", IntegerType(), True),
StructField("ad_id", IntegerType(), True),
StructField("clicked", IntegerType(), True)]
)
clicks_train_df = spark.read.schema(clicks_train_schema) \
.options(header='true', inferschema='false', nullValue='\\N') \
.csv(DATA_BUCKET_FOLDER + "clicks_train.csv") \
.withColumn('dummyClicksTrain', F.lit(1)).alias('clicks_train')
clicks_train_joined_df = clicks_train_df \
.join(promoted_content_df, on='ad_id', how='left') \
.join(documents_meta_df,
on=F.col("promoted_content.document_id_promo") == F.col("documents_meta.document_id_doc"),
how='left') \
.join(events_joined_df, on='display_id', how='left')
clicks_train_joined_df.createOrReplaceTempView('clicks_train_joined')
if evaluation:
table_name = 'user_profiles_eval'
else:
table_name = 'user_profiles'
user_profiles_df = spark.read.parquet(OUTPUT_BUCKET_FOLDER + table_name) \
.withColumn('dummyUserProfiles', F.lit(1)).alias('user_profiles')
# # Spliting Train/validation set | Test set
if evaluation:
validation_set_exported_df = spark.read.parquet(
OUTPUT_BUCKET_FOLDER + "validation_set.parquet") \
.alias('validation_set')
validation_set_exported_df.select('display_id').distinct() \
.createOrReplaceTempView("validation_display_ids")
validation_set_df = spark.sql('''
SELECT * FROM clicks_train_joined t
WHERE EXISTS (SELECT display_id FROM validation_display_ids
WHERE display_id = t.display_id)''').alias('clicks') \
.join(documents_categories_grouped_df,
on=F.col("document_id_promo") == F.col("documents_categories_grouped.document_id_cat"),
how='left') \
.join(documents_topics_grouped_df,
on=F.col("document_id_promo") == F.col("documents_topics_grouped.document_id_top"),
how='left') \
.join(documents_entities_grouped_df,
on=F.col("document_id_promo") == F.col("documents_entities_grouped.document_id_ent"),
how='left') \
.join(documents_categories_grouped_df
.withColumnRenamed('category_id_list', 'doc_event_category_id_list')
.withColumnRenamed('confidence_level_cat_list', 'doc_event_confidence_level_cat_list')
.alias('documents_event_categories_grouped'),
on=F.col("document_id_event") == F.col("documents_event_categories_grouped.document_id_cat"),
how='left') \
.join(documents_topics_grouped_df
.withColumnRenamed('topic_id_list', 'doc_event_topic_id_list')
.withColumnRenamed('confidence_level_top_list', 'doc_event_confidence_level_top_list')
.alias('documents_event_topics_grouped'),
on=F.col("document_id_event") == F.col("documents_event_topics_grouped.document_id_top"),
how='left') \
.join(documents_entities_grouped_df
.withColumnRenamed('entity_id_list', 'doc_event_entity_id_list')
.withColumnRenamed('confidence_level_ent_list', 'doc_event_confidence_level_ent_list')
.alias('documents_event_entities_grouped'),
on=F.col("document_id_event") == F.col("documents_event_entities_grouped.document_id_ent"),
how='left') \
.join(page_views_users_df,
on=[F.col("clicks.uuid_event") == F.col("page_views_users.uuid_pv"),
F.col("clicks.document_id_promo") == F.col("page_views_users.document_id_pv")],
how='left')
# print("validation_set_df.count() =", validation_set_df.count())
# Added to validation set information about the event and the user for statistics of the error (avg ctr)
validation_set_ground_truth_df = validation_set_df.filter('clicked = 1') \
.join(user_profiles_df,
on=[F.col("user_profiles.uuid") == F.col("uuid_event")],
how='left') \
.withColumn('user_categories_count', list_len_udf('category_id_list')) \
.withColumn('user_topics_count', list_len_udf('topic_id_list')) \
.withColumn('user_entities_count', list_len_udf('entity_id_list')) \
.select('display_id', 'ad_id', 'platform_event', 'day_event', 'timestamp_event',
'geo_location_event', 'event_country', 'event_country_state', 'views',
'user_categories_count', 'user_topics_count', 'user_entities_count') \
.withColumnRenamed('ad_id', 'ad_id_gt') \
.withColumnRenamed('views', 'user_views_count') \
.cache()
# print("validation_set_ground_truth_df.count() =", validation_set_ground_truth_df.count())
train_set_df = spark.sql('''
SELECT * FROM clicks_train_joined t
WHERE NOT EXISTS (SELECT display_id FROM validation_display_ids
WHERE display_id = t.display_id)''').cache()
print("train_set_df.count() =", train_set_df.count())
# validation_display_ids_df.groupBy("day_event").count().show()
else:
clicks_test_schema = StructType(
[StructField("display_id", IntegerType(), True),
StructField("ad_id", IntegerType(), True)]
)
clicks_test_df = spark.read.schema(clicks_test_schema) \
.options(header='true', inferschema='false', nullValue='\\N') \
.csv(DATA_BUCKET_FOLDER + "clicks_test.csv") \
.withColumn('dummyClicksTest', F.lit(1)) \
.withColumn('clicked', F.lit(-999)) \
.alias('clicks_test')
test_set_df = clicks_test_df \
.join(promoted_content_df, on='ad_id', how='left') \
.join(documents_meta_df,
on=F.col("promoted_content.document_id_promo") == F.col("documents_meta.document_id_doc"),
how='left') \
.join(documents_categories_grouped_df,
on=F.col("document_id_promo") == F.col("documents_categories_grouped.document_id_cat"),
how='left') \
.join(documents_topics_grouped_df,
on=F.col("document_id_promo") == F.col("documents_topics_grouped.document_id_top"),
how='left') \
.join(documents_entities_grouped_df,
on=F.col("document_id_promo") == F.col("documents_entities_grouped.document_id_ent"),
how='left') \
.join(events_joined_df, on='display_id', how='left') \
.join(documents_categories_grouped_df
.withColumnRenamed('category_id_list', 'doc_event_category_id_list')
.withColumnRenamed('confidence_level_cat_list', 'doc_event_confidence_level_cat_list')
.alias('documents_event_categories_grouped'),
on=F.col("document_id_event") == F.col("documents_event_categories_grouped.document_id_cat"),
how='left') \
.join(documents_topics_grouped_df
.withColumnRenamed('topic_id_list', 'doc_event_topic_id_list')
.withColumnRenamed('confidence_level_top_list', 'doc_event_confidence_level_top_list')
.alias('documents_event_topics_grouped'),
on=F.col("document_id_event") == F.col("documents_event_topics_grouped.document_id_top"),
how='left') \
.join(documents_entities_grouped_df
.withColumnRenamed('entity_id_list', 'doc_event_entity_id_list')
.withColumnRenamed('confidence_level_ent_list', 'doc_event_confidence_level_ent_list')
.alias('documents_event_entities_grouped'),
on=F.col("document_id_event") == F.col("documents_event_entities_grouped.document_id_ent"),
how='left') \
.join(page_views_users_df,
on=[F.col("events.uuid_event") == F.col("page_views_users.uuid_pv"),
F.col("promoted_content.document_id_promo") == F.col("page_views_users.document_id_pv")],
how='left')
train_set_df = clicks_train_joined_df.cache()
print("train_set_df.count() =", train_set_df.count())
# # Training models
def is_null(value):
return value is None or len(str(value).strip()) == 0
LESS_SPECIAL_CAT_VALUE = 'less'
def get_category_field_values_counts(field, df, min_threshold=10):
category_counts = dict(list(filter(lambda x: not is_null(x[0]) and x[1] >= min_threshold,
df.select(field).groupBy(field).count().rdd.map(
lambda x: (x[0], x[1])).collect())))
# Adding a special value to create a feature for values in this category that are less than min_threshold
category_counts[LESS_SPECIAL_CAT_VALUE] = -1
return category_counts
# ## Building category values counters and indexers
event_country_values_counts = get_category_field_values_counts('event_country', events_df, min_threshold=10)
len(event_country_values_counts)
# All non-null categories: 230
event_country_state_values_counts = get_category_field_values_counts('event_country_state', events_df, min_threshold=10)
len(event_country_state_values_counts)
event_geo_location_values_counts = get_category_field_values_counts('geo_location_event', events_df, min_threshold=10)
len(event_geo_location_values_counts)
# All non-null categories: 2988
doc_entity_id_values_counts = get_category_field_values_counts('entity_id', documents_entities_df, min_threshold=10)
len(doc_entity_id_values_counts)
# All non-null categories: 1326009
# ## Processing average CTR by categories
def get_percentiles(df, field, quantiles_levels=None, max_error_rate=0.0):
if quantiles_levels is None:
quantiles_levels = np.arange(0.0, 1.1, 0.1).tolist()
quantiles = df.approxQuantile(field, quantiles_levels, max_error_rate)
return dict(zip(quantiles_levels, quantiles))
# REG = 10
REG = 0
ctr_udf = F.udf(lambda clicks, views: clicks / float(views + REG), FloatType())
# ### Average CTR by ad_id
ad_id_popularity_df = train_set_df.groupby('ad_id').agg(F.sum('clicked').alias('clicks'),
F.count('*').alias('views')) \
.withColumn('ctr', ctr_udf('clicks', 'views'))
# ad_id_popularity_df.count()
# get_percentiles(ad_id_popularity_df, 'clicks')
# get_percentiles(ad_id_popularity_df, 'views')
ad_id_popularity = ad_id_popularity_df.filter('views > 5').select('ad_id', 'ctr', 'views') \
.rdd.map(lambda x: (x['ad_id'], (x['ctr'], x['views'], 1, 1))).collectAsMap()
ad_id_popularity_broad = sc.broadcast(ad_id_popularity)
list(ad_id_popularity.values())[:3]
len(ad_id_popularity)
# get_ad_id_ctr_udf = F.udf(lambda ad_id: ad_id_popularity[ad_id] if ad_id in ad_id_popularity else -1, FloatType())
ad_id_avg_ctr = sum(map(lambda x: x[0], ad_id_popularity.values())) / float(len(ad_id_popularity))
ad_id_avg_ctr
ad_id_weighted_avg_ctr = sum(map(lambda x: x[0] * x[1], ad_id_popularity.values())) / float(
sum(map(lambda x: x[1], ad_id_popularity.values())))
ad_id_weighted_avg_ctr
ad_id_views_median = np.median(np.array(list(map(lambda x: x[1], ad_id_popularity.values()))))
ad_id_views_median
ad_id_views_mean = sum(map(lambda x: x[1], ad_id_popularity.values())) / float(len(ad_id_popularity))
ad_id_views_mean
# ### Average CTR by document_id (promoted_content)
document_id_popularity_df = train_set_df \
.groupby('document_id_promo') \
.agg(F.sum('clicked').alias('clicks'), F.count('*').alias('views'),
F.countDistinct('ad_id').alias('distinct_ad_ids')) \
.withColumn('ctr', ctr_udf('clicks', 'views'))
document_id_popularity = document_id_popularity_df.filter('views > 5') \
.select('document_id_promo', 'ctr', 'views', 'distinct_ad_ids') \
.rdd.map(lambda x: (x['document_id_promo'],
(x['ctr'], x['views'], x['distinct_ad_ids'], 1))).collectAsMap()
len(document_id_popularity)
document_id_popularity_broad = sc.broadcast(document_id_popularity)
# document_id_popularity_df.count()
# get_percentiles(document_id_popularity_df, 'clicks')
# get_percentiles(document_id_popularity_df, 'views')
document_id_avg_ctr = sum(map(lambda x: x[0], document_id_popularity.values())) / float(len(document_id_popularity))
document_id_avg_ctr
document_id_weighted_avg_ctr = sum(list(map(lambda x: x[0] * x[1], document_id_popularity.values()))) / float(
sum(list(map(lambda x: x[1], document_id_popularity.values()))))
document_id_weighted_avg_ctr
document_id_views_median = np.median(np.array(list(map(lambda x: x[1], document_id_popularity.values()))))
document_id_views_median
document_id_views_mean = sum(map(lambda x: x[1], document_id_popularity.values())) / float(len(document_id_popularity))
document_id_views_mean
# ### Average CTR by (doc_event, doc_ad)
doc_event_doc_ad_avg_ctr_df = train_set_df.groupBy('document_id_event', 'document_id_promo') \
.agg(F.sum('clicked').alias('clicks'),
F.count('*').alias('views'), F.countDistinct('ad_id').alias('distinct_ad_ids')) \
.withColumn('ctr', ctr_udf('clicks', 'views'))
doc_event_doc_ad_avg_ctr = doc_event_doc_ad_avg_ctr_df.filter('views > 5') \
.select('document_id_event', 'document_id_promo', 'ctr', 'views', 'distinct_ad_ids') \
.rdd.map(lambda x: ((x['document_id_event'], x['document_id_promo']),
(x['ctr'], x['views'], x['distinct_ad_ids'], 1))).collectAsMap()
len(doc_event_doc_ad_avg_ctr)
doc_event_doc_ad_avg_ctr_broad = sc.broadcast(doc_event_doc_ad_avg_ctr)
# ### Average CTR by country, source_id
source_id_by_country_popularity_df = train_set_df \
.select('clicked', 'source_id', 'event_country', 'ad_id') \
.groupby('event_country', 'source_id') \
.agg(F.sum('clicked').alias('clicks'), F.count('*').alias('views'),
F.countDistinct('ad_id').alias('distinct_ad_ids')) \
.withColumn('ctr', ctr_udf('clicks', 'views'))
# source_id_popularity = source_id_popularity_df
# .filter('views > 100 and source_id is not null')
# .select('source_id', 'ctr')
# .rdd.collectAsMap()
source_id_by_country_popularity = source_id_by_country_popularity_df.filter(
'views > 5 and source_id is not null and event_country <> ""').select('event_country', 'source_id', 'ctr', 'views',
'distinct_ad_ids').rdd.map(
lambda x: ((x['event_country'], x['source_id']), (x['ctr'], x['views'], x['distinct_ad_ids'], 1))).collectAsMap()
len(source_id_by_country_popularity)
source_id_by_country_popularity_broad = sc.broadcast(source_id_by_country_popularity)
source_id_by_country_avg_ctr = sum(map(lambda x: x[0], source_id_by_country_popularity.values())) / float(
len(source_id_by_country_popularity))
source_id_by_country_avg_ctr
source_id_by_country_weighted_avg_ctr = sum(
map(lambda x: x[0] * x[1], source_id_by_country_popularity.values())) / float(
sum(map(lambda x: x[1], source_id_by_country_popularity.values())))
source_id_by_country_weighted_avg_ctr
source_id_by_country_views_median = np.median(
np.array(list(map(lambda x: x[1], source_id_by_country_popularity.values()))))
source_id_by_country_views_median
source_id_by_country_views_mean = sum(map(lambda x: x[1], source_id_by_country_popularity.values())) / float(
len(source_id_by_country_popularity))
source_id_by_country_views_mean
# ### Average CTR by source_id
source_id_popularity_df = train_set_df.select('clicked', 'source_id', 'ad_id') \
.groupby('source_id').agg(F.sum('clicked').alias('clicks'), F.count('*').alias('views'),
F.countDistinct('ad_id').alias('distinct_ad_ids')) \
.withColumn('ctr', ctr_udf('clicks', 'views'))
source_id_popularity = source_id_popularity_df \
.filter('views > 10 and source_id is not null') \
.select('source_id', 'ctr', 'views', 'distinct_ad_ids') \
.rdd.map(lambda x: (x['source_id'], (x['ctr'], x['views'], x['distinct_ad_ids'], 1))) \
.collectAsMap()
len(source_id_popularity)
source_id_popularity_broad = sc.broadcast(source_id_popularity)
# source_id_popularity_df.count()
# get_percentiles(source_id_popularity_df, 'clicks')
# get_percentiles(source_id_popularity_df, 'views')
# source_id_popularity = source_id_popularity_df
# .filter('views > 100 and source_id is not null')
# .select('source_id', 'ctr')
# .rdd.collectAsMap()
# ### Average CTR by publisher_id
publisher_popularity_df = train_set_df.select('clicked', 'publisher_id', 'ad_id') \
.groupby('publisher_id').agg(F.sum('clicked').alias('clicks'), F.count('*').alias('views'),
F.countDistinct('ad_id').alias('distinct_ad_ids')) \
.withColumn('ctr', ctr_udf('clicks', 'views'))
publisher_popularity = publisher_popularity_df \
.filter('views > 10 and publisher_id is not null') \
.select('publisher_id', 'ctr', 'views', 'distinct_ad_ids') \
.rdd.map(lambda x: (x['publisher_id'], (x['ctr'], x['views'], x['distinct_ad_ids'], 1))) \
.collectAsMap()
len(publisher_popularity)
publisher_popularity_broad = sc.broadcast(publisher_popularity)
# publisher_popularity_df.count()
# ##863
# get_percentiles(publisher_popularity_df, 'clicks')
# get_percentiles(publisher_popularity_df, 'views')
# publisher_id_popularity = publisher_popularity_df
# .filter('views > 100 and publisher_id is not null')
# .select('publisher_id', 'ctr')
# .rdd.collectAsMap()
# len(publisher_id_popularity)
# ##639
# ### Average CTR by advertiser_id
advertiser_id_popularity_df = train_set_df.select('clicked', 'advertiser_id', 'ad_id') \
.groupby('advertiser_id').agg(F.sum('clicked').alias('clicks'), F.count('*').alias('views'),
F.countDistinct('ad_id').alias('distinct_ad_ids')) \
.withColumn('ctr', ctr_udf('clicks', 'views'))
advertiser_id_popularity = advertiser_id_popularity_df \
.filter('views > 10 and advertiser_id is not null') \
.select('advertiser_id', 'ctr', 'views', 'distinct_ad_ids') \
.rdd.map(lambda x: (x['advertiser_id'],
(x['ctr'], x['views'], x['distinct_ad_ids'], 1))).collectAsMap()
len(advertiser_id_popularity)
advertiser_id_popularity_broad = sc.broadcast(advertiser_id_popularity)
# advertiser_id_popularity_df.count()
# ##4063
# get_percentiles(advertiser_id_popularity_df, 'clicks')
# get_percentiles(advertiser_id_popularity_df, 'views')
# advertiser_id_popularity = advertiser_id_popularity_df
# .filter('views > 100 and advertiser_id is not null')
# .select('advertiser_id', 'ctr')
# .rdd.collectAsMap()
# len(advertiser_id_popularity)
# ##3129
# ### Average CTR by campaign_id
campaign_id_popularity_df = train_set_df.select('clicked', 'campaign_id', 'ad_id') \
.groupby('campaign_id').agg(F.sum('clicked').alias('clicks'), F.count('*').alias('views'),
F.countDistinct('ad_id').alias('distinct_ad_ids')) \
.withColumn('ctr', ctr_udf('clicks', 'views'))
campaign_id_popularity = campaign_id_popularity_df \
.filter('views > 10 and campaign_id is not null') \
.select('campaign_id', 'ctr', 'views', 'distinct_ad_ids') \
.rdd.map(lambda x: (x['campaign_id'], (x['ctr'], x['views'], x['distinct_ad_ids'], 1))) \
.collectAsMap()
len(campaign_id_popularity)
campaign_id_popularity_broad = sc.broadcast(campaign_id_popularity)
# campaign_id_popularity_df.count()
# ##31390
# get_percentiles(campaign_id_popularity_df, 'clicks')
# get_percentiles(campaign_id_popularity_df, 'views')
# campaign_id_popularity = campaign_id_popularity_df
# .filter('views > 100 and campaign_id is not null')
# .select('campaign_id', 'ctr')
# .rdd.collectAsMap()
# len(campaign_id_popularity)
# ##16097
# ### Average CTR by category
category_id_popularity_df = train_set_df.join(
documents_categories_df.alias('cat_local'),
on=F.col("document_id_promo") == F.col("cat_local.document_id_cat"), how='inner') \
.select('clicked', 'category_id', 'confidence_level_cat', 'ad_id') \
.groupby('category_id').agg(F.sum('clicked').alias('clicks'),
F.count('*').alias('views'),
F.mean('confidence_level_cat').alias('avg_confidence_level_cat'),
F.countDistinct('ad_id').alias('distinct_ad_ids')) \
.withColumn('ctr', ctr_udf('clicks', 'views'))
category_id_popularity = category_id_popularity_df.filter('views > 10') \
.select('category_id', 'ctr', 'views', 'avg_confidence_level_cat', 'distinct_ad_ids') \
.rdd.map(lambda x: (x['category_id'],
(x['ctr'], x['views'], x['distinct_ad_ids'], x['avg_confidence_level_cat']))).collectAsMap()
len(category_id_popularity)
category_id_popularity_broad = sc.broadcast(category_id_popularity)
list(category_id_popularity.values())[:10]
np.median(np.array(list(map(lambda x: x[1], category_id_popularity.values()))))
sum(map(lambda x: x[1], category_id_popularity.values())) / float(len(category_id_popularity))
# Parece haver uma hierarquia nas categorias pelo padrão dos códigos...
# category_id_popularity
# ### Average CTR by (country, category)
category_id_by_country_popularity_df = train_set_df \
.join(documents_categories_df.alias('cat_local'),
on=F.col("document_id_promo") == F.col("cat_local.document_id_cat"), how='inner') \
.select('clicked', 'category_id', 'confidence_level_cat', 'event_country', 'ad_id') \
.groupby('event_country', 'category_id').agg(F.sum('clicked').alias('clicks'),
F.count('*').alias('views'),
F.mean('confidence_level_cat').alias('avg_confidence_level_cat'),
F.countDistinct('ad_id').alias('distinct_ad_ids')) \
.withColumn('ctr', ctr_udf('clicks', 'views'))
category_id_by_country_popularity = category_id_by_country_popularity_df \
.filter('views > 10 and event_country <> ""') \
.select('event_country', 'category_id', 'ctr', 'views', 'avg_confidence_level_cat',
'distinct_ad_ids') \
.rdd.map(lambda x: ((x['event_country'], x['category_id']),
(x['ctr'], x['views'], x['distinct_ad_ids'], x['avg_confidence_level_cat']))).collectAsMap()
len(category_id_by_country_popularity)
category_id_by_country_popularity_broad = sc.broadcast(category_id_by_country_popularity)
# ### Average CTR by Topic
topic_id_popularity_df = train_set_df.join(
documents_topics_df.alias('top_local'),
on=F.col("document_id_promo") == F.col("top_local.document_id_top"), how='inner') \
.select('clicked', 'topic_id', 'confidence_level_top', 'ad_id') \
.groupby('topic_id').agg(F.sum('clicked').alias('clicks'), F.count('*').alias('views'),
F.mean('confidence_level_top').alias('avg_confidence_level_top'),
F.countDistinct('ad_id').alias('distinct_ad_ids')) \
.withColumn('ctr', ctr_udf('clicks', 'views'))
topic_id_popularity = topic_id_popularity_df.filter('views > 10') \
.select('topic_id', 'ctr', 'views', 'avg_confidence_level_top', 'distinct_ad_ids') \
.rdd.map(lambda x: (x['topic_id'],
(x['ctr'], x['views'], x['distinct_ad_ids'], x['avg_confidence_level_top']))).collectAsMap()
len(topic_id_popularity)
topic_id_popularity_broad = sc.broadcast(topic_id_popularity)
sum(map(lambda x: x[1], topic_id_popularity.values())) / float(len(topic_id_popularity))
sum(map(lambda x: x[2] * x[1], topic_id_popularity.values())) / float(len(topic_id_popularity))
# ### Average CTR by (country, topic)
topic_id_by_country_popularity_df = train_set_df.join(
documents_topics_df.alias('top_local'),
on=F.col("document_id_promo") == F.col("top_local.document_id_top"), how='inner') \
.select('clicked', 'topic_id', 'confidence_level_top', 'event_country', 'ad_id') \
.groupby('event_country', 'topic_id').agg(F.sum('clicked').alias('clicks'),
F.count('*').alias('views'),
F.mean('confidence_level_top').alias('avg_confidence_level_top'),
F.countDistinct('ad_id').alias('distinct_ad_ids')) \
.withColumn('ctr', ctr_udf('clicks', 'views'))
topic_id_id_by_country_popularity = topic_id_by_country_popularity_df \
.filter('views > 10 and event_country <> ""') \
.select('event_country', 'topic_id', 'ctr', 'views', 'avg_confidence_level_top',
'distinct_ad_ids') \
.rdd.map(lambda x: ((x['event_country'], x['topic_id']),
(x['ctr'], x['views'], x['distinct_ad_ids'], x['avg_confidence_level_top']))).collectAsMap()
len(topic_id_id_by_country_popularity)
topic_id_id_by_country_popularity_broad = sc.broadcast(topic_id_id_by_country_popularity)
# ### Average CTR by Entity
entity_id_popularity_df = train_set_df.join(
documents_entities_df.alias('ent_local'),
on=F.col("document_id_promo") == F.col("ent_local.document_id_ent"), how='inner') \
.select('clicked', 'entity_id', 'confidence_level_ent', 'ad_id') \
.groupby('entity_id').agg(F.sum('clicked').alias('clicks'), F.count('*').alias('views'),
F.mean('confidence_level_ent').alias('avg_confidence_level_ent'),
F.countDistinct('ad_id').alias('distinct_ad_ids')) \
.withColumn('ctr', ctr_udf('clicks', 'views'))
entity_id_popularity = entity_id_popularity_df.filter('views > 5') \
.select('entity_id', 'ctr', 'views', 'avg_confidence_level_ent', 'distinct_ad_ids') \
.rdd.map(lambda x: (x['entity_id'],
(x['ctr'], x['views'], x['distinct_ad_ids'], x['avg_confidence_level_ent']))).collectAsMap()
len(entity_id_popularity)
entity_id_popularity_broad = sc.broadcast(entity_id_popularity)
np.median(np.array(list(map(lambda x: x[1], entity_id_popularity.values()))))
sum(map(lambda x: x[1], entity_id_popularity.values())) / float(len(entity_id_popularity))
# ### Average CTR by (country, entity)
entity_id_by_country_popularity_df = train_set_df.join(
documents_entities_df.alias('ent_local'),
on=F.col("document_id_promo") == F.col("ent_local.document_id_ent"), how='inner') \
.select('clicked', 'entity_id', 'event_country', 'confidence_level_ent', 'ad_id') \
.groupby('event_country', 'entity_id').agg(F.sum('clicked').alias('clicks'),
F.count('*').alias('views'),
F.mean('confidence_level_ent').alias('avg_confidence_level_ent'),
F.countDistinct('ad_id').alias('distinct_ad_ids')) \
.withColumn('ctr', ctr_udf('clicks', 'views'))
entity_id_by_country_popularity = entity_id_by_country_popularity_df \
.filter('views > 5 and event_country <> ""') \
.select('event_country', 'entity_id', 'ctr', 'views', 'avg_confidence_level_ent',
'distinct_ad_ids') \
.rdd.map(lambda x: ((x['event_country'], x['entity_id']),
(x['ctr'], x['views'], x['distinct_ad_ids'], x['avg_confidence_level_ent']))).collectAsMap()
len(entity_id_by_country_popularity)
entity_id_by_country_popularity_broad = sc.broadcast(entity_id_by_country_popularity)
# ### Loading # docs by categories, topics, entities
df_filenames_suffix = ''
if evaluation:
df_filenames_suffix = '_eval'
with open(OUTPUT_BUCKET_FOLDER + 'categories_docs_counts' + df_filenames_suffix + '.pickle', 'rb') as input_file:
categories_docs_counts = cPickle.load(input_file)
len(categories_docs_counts)
with open(OUTPUT_BUCKET_FOLDER + 'topics_docs_counts' + df_filenames_suffix + '.pickle', 'rb') as input_file:
topics_docs_counts = cPickle.load(input_file)
len(topics_docs_counts)
with open(OUTPUT_BUCKET_FOLDER + 'entities_docs_counts' + df_filenames_suffix + '.pickle', 'rb') as input_file:
entities_docs_counts = cPickle.load(input_file)
len(entities_docs_counts)
documents_total = documents_meta_df.count()
documents_total
# ## Exploring Publish Time
publish_times_df = train_set_df.filter('publish_time is not null').select('document_id_promo',
'publish_time').distinct().select(
F.col('publish_time').cast(IntegerType()))
publish_time_percentiles = get_percentiles(publish_times_df, 'publish_time', quantiles_levels=[0.5],
max_error_rate=0.001)
publish_time_percentiles
publish_time_median = int(publish_time_percentiles[0.5])
datetime.datetime.utcfromtimestamp(publish_time_median)
def get_days_diff(newer_timestamp, older_timestamp):
sec_diff = newer_timestamp - older_timestamp
days_diff = sec_diff / 60 / 60 / 24
return days_diff
def get_time_decay_factor(timestamp, timestamp_ref=None, alpha=0.001):
if timestamp_ref is None:
timestamp_ref = time.time()
days_diff = get_days_diff(timestamp_ref, timestamp)
denominator = math.pow(1 + alpha, days_diff)
if denominator != 0:
return 1.0 / denominator
else:
return 0.0
TIME_DECAY_ALPHA = 0.0005
ref_dates = [
1476714880, # 7 days
1474727680, # 30 days
1469370880, # 90 days
1461508480, # 180 days
1445697280, # 1 year
1414161280 # 2 years
]
for d in ref_dates:
print(datetime.datetime.utcfromtimestamp(d), get_time_decay_factor(d, alpha=TIME_DECAY_ALPHA))
# ### Get local time
DEFAULT_TZ_EST = -4.0
def get_local_utc_bst_tz(event_country, event_country_state):
local_tz = DEFAULT_TZ_EST
if len(event_country) > 0:
if event_country in countries_utc_dst_broad.value:
local_tz = countries_utc_dst_broad.value[event_country]
if len(event_country_state) > 2:
state = event_country_state[3:5]
if event_country == 'US':
if state in us_states_utc_dst_broad.value:
local_tz = us_states_utc_dst_broad.value[state]
elif event_country == 'CA':
if state in ca_countries_utc_dst_broad.value:
local_tz = ca_countries_utc_dst_broad.value[state]
return float(local_tz)
hour_bins_dict = {'EARLY_MORNING': 0,
'MORNING': 1,
'MIDDAY': 2,
'AFTERNOON': 3,
'EVENING': 4,
'NIGHT': 5}
hour_bins_values = sorted(hour_bins_dict.values())
def get_hour_bin(hour):
if hour >= 5 and hour < 8:
hour_bin = hour_bins_dict['EARLY_MORNING']
elif hour >= 8 and hour < 11:
hour_bin = hour_bins_dict['MORNING']
elif hour >= 11 and hour < 14:
hour_bin = hour_bins_dict['MIDDAY']
elif hour >= 14 and hour < 19:
hour_bin = hour_bins_dict['AFTERNOON']
elif hour >= 19 and hour < 22:
hour_bin = hour_bins_dict['EVENING']
else:
hour_bin = hour_bins_dict['NIGHT']
return hour_bin
def get_local_datetime(dt, event_country, event_country_state):
local_tz = get_local_utc_bst_tz(event_country, event_country_state)
tz_delta = local_tz - DEFAULT_TZ_EST
local_time = dt + datetime.timedelta(hours=tz_delta)
return local_time
get_local_datetime(datetime.datetime.now(), 'US', 'US>CA')
def is_weekend(dt):
return dt.weekday() >= 5
is_weekend(datetime.datetime(2016, 6, 14))
# ## Average CTR functions
timestamp_ref = date_time_to_unix_epoch(datetime.datetime(2016, 6, 29, 3, 59, 59))
decay_factor_default = get_time_decay_factor(publish_time_median, timestamp_ref, alpha=TIME_DECAY_ALPHA)
print("decay_factor_default", decay_factor_default)
def get_confidence_sample_size(sample, max_for_reference=100000):
# Avoiding overflow for large sample size
if sample >= max_for_reference:
return 1.0
ref_log = math.log(1 + max_for_reference,
2) # Curiosly reference in log with base 2 gives a slightly higher score, so I will keep
return math.log(1 + sample) / float(ref_log)
for i in [0, 0.5, 1, 2, 3, 4, 5, 10, 20, 30, 100, 200, 300, 1000, 2000, 3000, 10000, 20000, 30000, 50000, 90000, 100000,
500000, 900000, 1000000, 2171607]:
print(i, get_confidence_sample_size(i))
def get_popularity(an_id, a_dict):
return (a_dict[an_id][0], get_confidence_sample_size(a_dict[an_id][1] / float(a_dict[an_id][2])) * a_dict[an_id][
3]) if an_id in a_dict else (None, None)
def get_weighted_avg_popularity_from_list(ids_list, confidence_ids_list, pop_dict):
pops = list(filter(lambda x: x[0][0] is not None,
[(get_popularity(an_id, pop_dict), confidence) for an_id, confidence in
zip(ids_list, confidence_ids_list)]))
# print("pops",pops)
if len(pops) > 0:
weighted_avg = sum(map(lambda x: x[0][0] * x[0][1] * x[1], pops)) / float(
sum(map(lambda x: x[0][1] * x[1], pops)))
confidence = max(map(lambda x: x[0][1] * x[1], pops))
return weighted_avg, confidence
else:
return None, None
def get_weighted_avg_country_popularity_from_list(event_country, ids_list, confidence_ids_list, pop_dict):
pops = list(filter(lambda x: x[0][0] is not None,
[(get_popularity((event_country, an_id), pop_dict), confidence) for an_id, confidence in
zip(ids_list, confidence_ids_list)]))
if len(pops) > 0:
weighted_avg = sum(map(lambda x: x[0][0] * x[0][1] * x[1], pops)) / float(
sum(map(lambda x: x[0][1] * x[1], pops)))
confidence = max(map(lambda x: x[0][1] * x[1], pops))
return weighted_avg, confidence
else:
return None, None
def get_popularity_score(event_country, ad_id, document_id, source_id,
publisher_id, advertiser_id, campaign_id, document_id_event,
category_ids_by_doc, cat_confidence_level_by_doc,
topic_ids_by_doc, top_confidence_level_by_doc,
entity_ids_by_doc, ent_confidence_level_by_doc,
output_detailed_list=False):
probs = []
avg_ctr, confidence = get_popularity(ad_id, ad_id_popularity_broad.value)
if avg_ctr is not None:
probs.append(('pop_ad_id', avg_ctr, confidence))
avg_ctr, confidence = get_popularity(document_id, document_id_popularity_broad.value)
if avg_ctr is not None:
probs.append(('pop_document_id', avg_ctr, confidence))
avg_ctr, confidence = get_popularity((document_id_event, document_id), doc_event_doc_ad_avg_ctr_broad.value)
if avg_ctr is not None:
probs.append(('pop_doc_event_doc_ad', avg_ctr, confidence))
if source_id != -1:
avg_ctr = None
if event_country != '':
avg_ctr, confidence = get_popularity((event_country, source_id),
source_id_by_country_popularity_broad.value)
if avg_ctr is not None:
probs.append(('pop_source_id_country', avg_ctr, confidence))
avg_ctr, confidence = get_popularity(source_id, source_id_popularity_broad.value)
if avg_ctr is not None:
probs.append(('pop_source_id', avg_ctr, confidence))
if publisher_id is not None:
avg_ctr, confidence = get_popularity(publisher_id, publisher_popularity_broad.value)
if avg_ctr is not None:
probs.append(('pop_publisher_id', avg_ctr, confidence))
if advertiser_id is not None:
avg_ctr, confidence = get_popularity(advertiser_id, advertiser_id_popularity_broad.value)
if avg_ctr is not None:
probs.append(('pop_advertiser_id', avg_ctr, confidence))
if campaign_id is not None:
avg_ctr, confidence = get_popularity(campaign_id, campaign_id_popularity_broad.value)
if avg_ctr is not None:
probs.append(('pop_campain_id', avg_ctr, confidence))
if len(entity_ids_by_doc) > 0:
avg_ctr = None
if event_country != '':
avg_ctr, confidence = get_weighted_avg_country_popularity_from_list(
event_country, entity_ids_by_doc, ent_confidence_level_by_doc,
entity_id_by_country_popularity_broad.value)
if avg_ctr is not None:
probs.append(('pop_entity_id_country', avg_ctr, confidence))
avg_ctr, confidence = get_weighted_avg_popularity_from_list(
entity_ids_by_doc, ent_confidence_level_by_doc,
entity_id_popularity_broad.value)
if avg_ctr is not None:
probs.append(('pop_entity_id', avg_ctr, confidence))
if len(topic_ids_by_doc) > 0:
avg_ctr = None
if event_country != '':
avg_ctr, confidence = get_weighted_avg_country_popularity_from_list(
event_country, topic_ids_by_doc, top_confidence_level_by_doc,
topic_id_id_by_country_popularity_broad.value)
if avg_ctr is not None:
probs.append(('pop_topic_id_country', avg_ctr, confidence))
avg_ctr, confidence = get_weighted_avg_popularity_from_list(
topic_ids_by_doc, top_confidence_level_by_doc,
topic_id_popularity_broad.value)
if avg_ctr is not None:
probs.append(('pop_topic_id', avg_ctr, confidence))
if len(category_ids_by_doc) > 0:
avg_ctr = None
if event_country != '':
avg_ctr, confidence = get_weighted_avg_country_popularity_from_list(
event_country, category_ids_by_doc, cat_confidence_level_by_doc,
category_id_by_country_popularity_broad.value)
if avg_ctr is not None:
probs.append(('pop_category_id_country', avg_ctr, confidence))
avg_ctr, confidence = get_weighted_avg_popularity_from_list(
category_ids_by_doc, cat_confidence_level_by_doc,
category_id_popularity_broad.value)
if avg_ctr is not None:
probs.append(('pop_category_id', avg_ctr, confidence))
# print("[get_popularity_score] probs", probs)
if output_detailed_list:
return probs
else:
if len(probs) > 0:
# weighted_avg_probs_by_confidence = sum(map(lambda x: x[1] * math.log(1+x[2],2), probs)) \
# / float(sum(map(lambda x: math.log(1+x[2],2), probs)))
weighted_avg_probs_by_confidence = sum(map(lambda x: x[1] * x[2], probs)) / float(
sum(map(lambda x: x[2], probs)))
confidence = max(map(lambda x: x[2], probs))
return weighted_avg_probs_by_confidence, confidence
else:
return None, None
# ## Content-Based similarity functions
def cosine_similarity_dicts(dict1, dict2):
dict1_norm = math.sqrt(sum([v ** 2 for v in dict1.values()]))
dict2_norm = math.sqrt(sum([v ** 2 for v in dict2.values()]))
sum_common_aspects = 0.0
intersections = 0
for key in dict1:
if key in dict2:
sum_common_aspects += dict1[key] * dict2[key]
intersections += 1
return sum_common_aspects / (dict1_norm * dict2_norm), intersections
def cosine_similarity_user_docs_aspects(user_aspect_profile, doc_aspect_ids, doc_aspects_confidence,
aspect_docs_counts):
if user_aspect_profile is None or len(user_aspect_profile) == 0 or doc_aspect_ids is None or len(
doc_aspect_ids) == 0:
return None, None
doc_aspects = dict(zip(doc_aspect_ids, doc_aspects_confidence))
doc_aspects_tfidf_confid = {}
for key in doc_aspects:
tf = 1.0
idf = math.log(math.log(documents_total / float(aspect_docs_counts[key])))
confidence = doc_aspects[key]
doc_aspects_tfidf_confid[key] = tf * idf * confidence
user_aspects_tfidf_confid = {}
for key in user_aspect_profile:
tfidf = user_aspect_profile[key][0]
confidence = user_aspect_profile[key][1]
user_aspects_tfidf_confid[key] = tfidf * confidence
similarity, intersections = cosine_similarity_dicts(doc_aspects_tfidf_confid, user_aspects_tfidf_confid)
if intersections > 0:
# P(A intersect B)_intersections = P(A)^intersections * P(B)^intersections
random_error = math.pow(len(doc_aspects) / float(len(aspect_docs_counts)),
intersections) * math.pow(len(user_aspect_profile) / float(len(aspect_docs_counts)),
intersections)
else:
# P(A not intersect B) = 1 - P(A intersect B)
random_error = 1 - ((len(doc_aspects) / float(len(aspect_docs_counts))) *
(len(user_aspect_profile) / float(len(aspect_docs_counts))))
confidence = 1.0 - random_error
return similarity, confidence
def cosine_similarity_doc_event_doc_ad_aspects(doc_event_aspect_ids, doc_event_aspects_confidence,
doc_ad_aspect_ids, doc_ad_aspects_confidence,
aspect_docs_counts):
if doc_event_aspect_ids is None or len(doc_event_aspect_ids) == 0 \
or doc_ad_aspect_ids is None or len(doc_ad_aspect_ids) == 0:
return None, None
doc_event_aspects = dict(zip(doc_event_aspect_ids, doc_event_aspects_confidence))
doc_event_aspects_tfidf_confid = {}
for key in doc_event_aspect_ids:
tf = 1.0
idf = math.log(math.log(documents_total / float(aspect_docs_counts[key])))
confidence = doc_event_aspects[key]
doc_event_aspects_tfidf_confid[key] = tf * idf * confidence
doc_ad_aspects = dict(zip(doc_ad_aspect_ids, doc_ad_aspects_confidence))
doc_ad_aspects_tfidf_confid = {}
for key in doc_ad_aspect_ids:
tf = 1.0
idf = math.log(math.log(documents_total / float(aspect_docs_counts[key])))
confidence = doc_ad_aspects[key]
doc_ad_aspects_tfidf_confid[key] = tf * idf * confidence
similarity, intersections = cosine_similarity_dicts(doc_event_aspects_tfidf_confid, doc_ad_aspects_tfidf_confid)
if intersections > 0:
# P(A intersect B)_intersections = P(A)^intersections * P(B)^intersections
random_error = math.pow(len(doc_event_aspect_ids) / float(len(aspect_docs_counts)),
intersections) * math.pow(len(doc_ad_aspect_ids) / float(len(aspect_docs_counts)),
intersections)
else:
# P(A not intersect B) = 1 - P(A intersect B)
random_error = 1 - ((len(doc_event_aspect_ids) / float(len(aspect_docs_counts))) *
(len(doc_ad_aspect_ids) / float(len(aspect_docs_counts))))
confidence = 1.0 - random_error
return similarity, confidence
def get_user_cb_interest_score(user_views_count, user_categories, user_topics, user_entities,
timestamp_event, category_ids_by_doc, cat_confidence_level_by_doc,
topic_ids_by_doc, top_confidence_level_by_doc,
entity_ids_by_doc, ent_confidence_level_by_doc,
output_detailed_list=False):
# Content-Based
sims = []
categories_similarity, cat_sim_confidence = cosine_similarity_user_docs_aspects(user_categories,
category_ids_by_doc,
cat_confidence_level_by_doc,
categories_docs_counts)
if categories_similarity is not None:
sims.append(('user_doc_ad_sim_categories', categories_similarity, cat_sim_confidence))
topics_similarity, top_sim_confidence = cosine_similarity_user_docs_aspects(user_topics, topic_ids_by_doc,
top_confidence_level_by_doc,
topics_docs_counts)
if topics_similarity is not None:
sims.append(('user_doc_ad_sim_topics', topics_similarity, top_sim_confidence))
entities_similarity, entity_sim_confid = cosine_similarity_user_docs_aspects(user_entities, entity_ids_by_doc,
ent_confidence_level_by_doc,
entities_docs_counts)
if entities_similarity is not None:
sims.append(('user_doc_ad_sim_entities', entities_similarity, entity_sim_confid))
if output_detailed_list:
return sims
else:
if len(sims) > 0:
weighted_avg_sim_by_confidence = sum(map(lambda x: x[1] * x[2], sims)) / float(
sum(map(lambda x: x[2], sims)))
confidence = sum(map(lambda x: x[2], sims)) / float(len(sims))
# print("[get_user_cb_interest_score] sims: {} | \
# Avg: {} - Confid: {}".format(sims, weighted_avg_sim_by_confidence, confidence))
return weighted_avg_sim_by_confidence, confidence
else:
return None, None
def get_doc_event_doc_ad_cb_similarity_score(doc_event_category_ids, doc_event_cat_confidence_levels,
doc_event_topic_ids, doc_event_top_confidence_levels,
doc_event_entity_ids, doc_event_ent_confidence_levels,
doc_ad_category_ids, doc_ad_cat_confidence_levels,
doc_ad_topic_ids, doc_ad_top_confidence_levels,
doc_ad_entity_ids, doc_ad_ent_confidence_levels,
output_detailed_list=False):
# Content-Based
sims = []
categories_similarity, cat_sim_confidence = cosine_similarity_doc_event_doc_ad_aspects(
doc_event_category_ids, doc_event_cat_confidence_levels,
doc_ad_category_ids, doc_ad_cat_confidence_levels,
categories_docs_counts)
if categories_similarity is not None:
sims.append(('doc_event_doc_ad_sim_categories', categories_similarity, cat_sim_confidence))
topics_similarity, top_sim_confidence = cosine_similarity_doc_event_doc_ad_aspects(
doc_event_topic_ids, doc_event_top_confidence_levels,
doc_ad_topic_ids, doc_ad_top_confidence_levels,
topics_docs_counts)
if topics_similarity is not None:
sims.append(('doc_event_doc_ad_sim_topics', topics_similarity, top_sim_confidence))
entities_similarity, entity_sim_confid = cosine_similarity_doc_event_doc_ad_aspects(
doc_event_entity_ids, doc_event_ent_confidence_levels,
doc_ad_entity_ids, doc_ad_ent_confidence_levels,
entities_docs_counts)
if entities_similarity is not None:
sims.append(('doc_event_doc_ad_sim_entities', entities_similarity, entity_sim_confid))
if output_detailed_list:
return sims
else:
if len(sims) > 0:
weighted_avg_sim_by_confidence = sum(map(lambda x: x[1] * x[2], sims)) / float(
sum(map(lambda x: x[2], sims)))
confidence = sum(map(lambda x: x[2], sims)) / float(len(sims))
# print("[get_user_cb_interest_score] sims: {} | \
# Avg: {} - Confid: {}".format(sims, weighted_avg_sim_by_confidence, confidence))
return weighted_avg_sim_by_confidence, confidence
else:
return None, None
# # Feature Vector export
bool_feature_names = ['event_weekend',
'user_has_already_viewed_doc']
int_feature_names = ['user_views',
'ad_views',
'doc_views',
'doc_event_days_since_published',
'doc_event_hour',
'doc_ad_days_since_published',
]
float_feature_names = [
'pop_ad_id',
'pop_ad_id_conf',
'pop_ad_id_conf_multipl',
'pop_document_id',
'pop_document_id_conf',
'pop_document_id_conf_multipl',
'pop_publisher_id',
'pop_publisher_id_conf',
'pop_publisher_id_conf_multipl',
'pop_advertiser_id',
'pop_advertiser_id_conf',
'pop_advertiser_id_conf_multipl',
'pop_campain_id',
'pop_campain_id_conf',
'pop_campain_id_conf_multipl',
'pop_doc_event_doc_ad',
'pop_doc_event_doc_ad_conf',
'pop_doc_event_doc_ad_conf_multipl',
'pop_source_id',
'pop_source_id_conf',
'pop_source_id_conf_multipl',
'pop_source_id_country',
'pop_source_id_country_conf',
'pop_source_id_country_conf_multipl',
'pop_entity_id',
'pop_entity_id_conf',
'pop_entity_id_conf_multipl',
'pop_entity_id_country',
'pop_entity_id_country_conf',
'pop_entity_id_country_conf_multipl',
'pop_topic_id',
'pop_topic_id_conf',
'pop_topic_id_conf_multipl',
'pop_topic_id_country',
'pop_topic_id_country_conf',
'pop_topic_id_country_conf_multipl',
'pop_category_id',
'pop_category_id_conf',
'pop_category_id_conf_multipl',
'pop_category_id_country',
'pop_category_id_country_conf',
'pop_category_id_country_conf_multipl',
'user_doc_ad_sim_categories',
'user_doc_ad_sim_categories_conf',
'user_doc_ad_sim_categories_conf_multipl',
'user_doc_ad_sim_topics',
'user_doc_ad_sim_topics_conf',
'user_doc_ad_sim_topics_conf_multipl',
'user_doc_ad_sim_entities',
'user_doc_ad_sim_entities_conf',
'user_doc_ad_sim_entities_conf_multipl',
'doc_event_doc_ad_sim_categories',
'doc_event_doc_ad_sim_categories_conf',
'doc_event_doc_ad_sim_categories_conf_multipl',
'doc_event_doc_ad_sim_topics',
'doc_event_doc_ad_sim_topics_conf',
'doc_event_doc_ad_sim_topics_conf_multipl',
'doc_event_doc_ad_sim_entities',
'doc_event_doc_ad_sim_entities_conf',
'doc_event_doc_ad_sim_entities_conf_multipl'
]
TRAFFIC_SOURCE_FV = 'traffic_source'
EVENT_HOUR_FV = 'event_hour'
EVENT_COUNTRY_FV = 'event_country'
EVENT_COUNTRY_STATE_FV = 'event_country_state'
EVENT_GEO_LOCATION_FV = 'event_geo_location'
EVENT_PLATFORM_FV = 'event_platform'
AD_ADVERTISER_FV = 'ad_advertiser'
DOC_AD_SOURCE_ID_FV = 'doc_ad_source_id'
DOC_AD_PUBLISHER_ID_FV = 'doc_ad_publisher_id'
DOC_EVENT_SOURCE_ID_FV = 'doc_event_source_id'
DOC_EVENT_PUBLISHER_ID_FV = 'doc_event_publisher_id'
DOC_AD_CATEGORY_ID_FV = 'doc_ad_category_id'
DOC_AD_TOPIC_ID_FV = 'doc_ad_topic_id'
DOC_AD_ENTITY_ID_FV = 'doc_ad_entity_id'
DOC_EVENT_CATEGORY_ID_FV = 'doc_event_category_id'
DOC_EVENT_TOPIC_ID_FV = 'doc_event_topic_id'
DOC_EVENT_ENTITY_ID_FV = 'doc_event_entity_id'
# ### Configuring feature vector
category_feature_names_integral = ['ad_advertiser',
'doc_ad_category_id_1',
'doc_ad_category_id_2',
'doc_ad_category_id_3',
'doc_ad_topic_id_1',
'doc_ad_topic_id_2',
'doc_ad_topic_id_3',
'doc_ad_entity_id_1',
'doc_ad_entity_id_2',
'doc_ad_entity_id_3',
'doc_ad_entity_id_4',
'doc_ad_entity_id_5',
'doc_ad_entity_id_6',
'doc_ad_publisher_id',
'doc_ad_source_id',
'doc_event_category_id_1',
'doc_event_category_id_2',
'doc_event_category_id_3',
'doc_event_topic_id_1',
'doc_event_topic_id_2',
'doc_event_topic_id_3',
'doc_event_entity_id_1',
'doc_event_entity_id_2',
'doc_event_entity_id_3',
'doc_event_entity_id_4',
'doc_event_entity_id_5',
'doc_event_entity_id_6',
'doc_event_publisher_id',
'doc_event_source_id',
'event_country',
'event_country_state',
'event_geo_location',
'event_hour',
'event_platform',
'traffic_source']
feature_vector_labels_integral = bool_feature_names \
+ int_feature_names \
+ float_feature_names \
+ category_feature_names_integral
feature_vector_labels_integral_dict = dict([(key, idx) for idx, key in enumerate(feature_vector_labels_integral)])
with open(OUTPUT_BUCKET_FOLDER + 'feature_vector_labels_integral.txt', 'w') as output:
output.writelines('\n'.join(feature_vector_labels_integral))
# ### Building feature vectors
def set_feature_vector_cat_value_integral(field_name, field_value, feature_vector):
if not is_null(field_value): # and str(field_value) != '-1':
feature_vector[feature_vector_labels_integral_dict[field_name]] = float(field_value)
def set_feature_vector_cat_top_multi_values_integral(
field_name, values, confidences, feature_vector, top=5):
top_values = list(filter(lambda z: z != -1,
map(lambda y: y[0], sorted(zip(values, confidences), key=lambda x: -x[1]))))[:top]
for idx, field_value in list(enumerate(top_values)):
set_feature_vector_cat_value_integral(
'{}_{}'.format(field_name, idx + 1), field_value, feature_vector)
def get_ad_feature_vector_integral(
user_doc_ids_viewed, user_views_count, user_categories, user_topics, user_entities,
event_country, event_country_state,
ad_id, document_id, source_id, doc_ad_publish_time, timestamp_event, platform_event,
geo_location_event,
doc_event_source_id, doc_event_publisher_id, doc_event_publish_time,
traffic_source_pv, advertiser_id, publisher_id,
campaign_id, document_id_event,
doc_ad_category_ids, doc_ad_cat_confidence_levels,
doc_ad_topic_ids, doc_ad_top_confidence_levels,
doc_ad_entity_ids, doc_ad_ent_confidence_levels,
doc_event_category_ids, doc_event_cat_confidence_levels,
doc_event_topic_ids, doc_event_top_confidence_levels,
doc_event_entity_ids, doc_event_ent_confidence_levels):
try:
feature_vector = {}
if user_views_count is not None:
feature_vector[feature_vector_labels_integral_dict['user_views']] = float(user_views_count)
if user_doc_ids_viewed is not None:
feature_vector[feature_vector_labels_integral_dict['user_has_already_viewed_doc']] = float(
document_id in user_doc_ids_viewed)
if ad_id in ad_id_popularity_broad.value:
feature_vector[feature_vector_labels_integral_dict['ad_views']] = float(
ad_id_popularity_broad.value[ad_id][1])
if document_id in document_id_popularity_broad.value:
feature_vector[feature_vector_labels_integral_dict['doc_views']] = float(
document_id_popularity_broad.value[document_id][1])
if timestamp_event > -1:
dt_timestamp_event = convert_odd_timestamp(timestamp_event)
if doc_ad_publish_time is not None:
delta_days = (dt_timestamp_event - doc_ad_publish_time).days
if 0 <= delta_days <= 365 * 10: # 10 years
feature_vector[feature_vector_labels_integral_dict['doc_ad_days_since_published']] = float(
delta_days)
if doc_event_publish_time is not None:
delta_days = (dt_timestamp_event - doc_event_publish_time).days
if 0 <= delta_days <= 365 * 10: # 10 years
feature_vector[feature_vector_labels_integral_dict['doc_event_days_since_published']] = float(
delta_days)
# Local period of the day (hours)
dt_local_timestamp_event = get_local_datetime(dt_timestamp_event, event_country, event_country_state)
local_hour_bin = get_hour_bin(dt_local_timestamp_event.hour)
feature_vector[feature_vector_labels_integral_dict['doc_event_hour']] = float(
local_hour_bin) # Hour for Decision Trees
set_feature_vector_cat_value_integral(EVENT_HOUR_FV, local_hour_bin,
feature_vector) # Period of day for FFM
# Weekend
weekend = int(is_weekend(dt_local_timestamp_event))
feature_vector[feature_vector_labels_integral_dict['event_weekend']] = float(weekend)
conf_field_suffix = '_conf'
conf_multiplied_field_suffix = '_conf_multipl'
# Setting Popularity fields
pop_scores = get_popularity_score(event_country, ad_id, document_id, source_id,
publisher_id, advertiser_id, campaign_id, document_id_event,
doc_ad_category_ids, doc_ad_cat_confidence_levels,
doc_ad_topic_ids, doc_ad_top_confidence_levels,
doc_ad_entity_ids, doc_ad_ent_confidence_levels,
output_detailed_list=True)
for score in pop_scores:
feature_vector[feature_vector_labels_integral_dict[score[0]]] = score[1]
feature_vector[feature_vector_labels_integral_dict[score[0] + conf_field_suffix]] = score[2]
feature_vector[feature_vector_labels_integral_dict[score[0] + conf_multiplied_field_suffix]] = \
score[1] * score[2]
# Setting User-Doc_ad CB Similarity fields
user_doc_ad_cb_sim_scores = get_user_cb_interest_score(
user_views_count, user_categories, user_topics, user_entities,
timestamp_event,
doc_ad_category_ids, doc_ad_cat_confidence_levels,
doc_ad_topic_ids, doc_ad_top_confidence_levels,
doc_ad_entity_ids, doc_ad_ent_confidence_levels,
output_detailed_list=True)
for score in user_doc_ad_cb_sim_scores:
feature_vector[feature_vector_labels_integral_dict[score[0]]] = score[1]
feature_vector[feature_vector_labels_integral_dict[score[0] + conf_field_suffix]] = score[2]
feature_vector[feature_vector_labels_integral_dict[score[0] + conf_multiplied_field_suffix]] = \
score[1] * score[2]
# Setting Doc_event-doc_ad CB Similarity fields
doc_event_doc_ad_cb_sim_scores = get_doc_event_doc_ad_cb_similarity_score(
doc_event_category_ids, doc_event_cat_confidence_levels,
doc_event_topic_ids, doc_event_top_confidence_levels,
doc_event_entity_ids, doc_event_ent_confidence_levels,
doc_ad_category_ids, doc_ad_cat_confidence_levels,
doc_ad_topic_ids, doc_ad_top_confidence_levels,
doc_ad_entity_ids, doc_ad_ent_confidence_levels,
output_detailed_list=True)
for score in doc_event_doc_ad_cb_sim_scores:
feature_vector[feature_vector_labels_integral_dict[score[0]]] = score[1]
feature_vector[feature_vector_labels_integral_dict[score[0] + conf_field_suffix]] = score[2]
feature_vector[feature_vector_labels_integral_dict[score[0] + conf_multiplied_field_suffix]] = \
score[1] * score[2]
# Process code for event_country
if event_country in event_country_values_counts:
event_country_code = event_country_values_counts[event_country]
else:
event_country_code = event_country_values_counts[LESS_SPECIAL_CAT_VALUE]
set_feature_vector_cat_value_integral(EVENT_COUNTRY_FV, event_country_code, feature_vector)
# Process code for event_country_state
if event_country_state in event_country_state_values_counts:
event_country_state_code = event_country_state_values_counts[event_country_state]
else:
event_country_state_code = event_country_state_values_counts[LESS_SPECIAL_CAT_VALUE]
set_feature_vector_cat_value_integral(EVENT_COUNTRY_STATE_FV, event_country_state_code, feature_vector)
# Process code for geo_location_event
if geo_location_event in event_geo_location_values_counts:
geo_location_event_code = event_geo_location_values_counts[geo_location_event]
else:
geo_location_event_code = event_geo_location_values_counts[LESS_SPECIAL_CAT_VALUE]
# -1 to traffic_source and platform_event
if traffic_source_pv is not None:
feature_vector[feature_vector_labels_integral_dict[TRAFFIC_SOURCE_FV]] = int(traffic_source_pv - 1)
if platform_event is not None:
feature_vector[feature_vector_labels_integral_dict[EVENT_PLATFORM_FV]] = int(platform_event - 1)
set_feature_vector_cat_value_integral(EVENT_GEO_LOCATION_FV, geo_location_event_code, feature_vector)
# set_feature_vector_cat_value_integral(TRAFFIC_SOURCE_FV, traffic_source_pv - 1, feature_vector)
# set_feature_vector_cat_value_integral(EVENT_PLATFORM_FV, platform_event - 1, feature_vector)
set_feature_vector_cat_value_integral(AD_ADVERTISER_FV, advertiser_id, feature_vector)
set_feature_vector_cat_value_integral(DOC_AD_SOURCE_ID_FV, source_id, feature_vector)
set_feature_vector_cat_value_integral(DOC_AD_PUBLISHER_ID_FV, publisher_id, feature_vector)
set_feature_vector_cat_value_integral(DOC_EVENT_SOURCE_ID_FV, doc_event_source_id, feature_vector)
set_feature_vector_cat_value_integral(DOC_EVENT_PUBLISHER_ID_FV, doc_event_publisher_id, feature_vector)
set_feature_vector_cat_top_multi_values_integral(DOC_AD_CATEGORY_ID_FV, doc_ad_category_ids,
doc_ad_cat_confidence_levels, feature_vector, top=3)
set_feature_vector_cat_top_multi_values_integral(DOC_AD_TOPIC_ID_FV, doc_ad_topic_ids,
doc_ad_top_confidence_levels, feature_vector, top=3)
set_feature_vector_cat_top_multi_values_integral(DOC_EVENT_CATEGORY_ID_FV, doc_event_category_ids,
doc_event_cat_confidence_levels, feature_vector, top=3)
set_feature_vector_cat_top_multi_values_integral(DOC_EVENT_TOPIC_ID_FV, doc_event_topic_ids,
doc_event_top_confidence_levels, feature_vector, top=3)
# Process codes for doc_ad_entity_ids
doc_ad_entity_ids_codes = [doc_entity_id_values_counts[x]
if x in doc_entity_id_values_counts
else doc_entity_id_values_counts[LESS_SPECIAL_CAT_VALUE]
for x in doc_ad_entity_ids]
set_feature_vector_cat_top_multi_values_integral(DOC_AD_ENTITY_ID_FV, doc_ad_entity_ids_codes,
doc_ad_ent_confidence_levels, feature_vector, top=6)
# Process codes for doc_event_entity_ids
doc_event_entity_ids_codes = [doc_entity_id_values_counts[x]
if x in doc_entity_id_values_counts
else doc_entity_id_values_counts[LESS_SPECIAL_CAT_VALUE]
for x in doc_event_entity_ids]
set_feature_vector_cat_top_multi_values_integral(DOC_EVENT_ENTITY_ID_FV, doc_event_entity_ids_codes,
doc_event_ent_confidence_levels, feature_vector, top=6)
# Creating dummy column as the last column
# because xgboost have a problem if the last column is undefined for all rows,
# saying that dimentions of data and feature_names do not match
# feature_vector[feature_vector_labels_dict[DUMMY_FEATURE_COLUMN]] = float(0)
# Ensuring that all elements are floats for compatibility with UDF output (ArrayType(FloatType()))
# feature_vector = list([float(x) for x in feature_vector])
except Exception as e:
raise Exception("[get_ad_feature_vector_integral] ERROR PROCESSING FEATURE VECTOR! Params: {}"
.format([user_doc_ids_viewed, user_views_count, user_categories, user_topics, user_entities,
event_country, event_country_state,
ad_id, document_id, source_id, doc_ad_publish_time, timestamp_event, platform_event,
geo_location_event,
doc_event_source_id, doc_event_publisher_id, doc_event_publish_time,
traffic_source_pv, advertiser_id, publisher_id,
campaign_id, document_id_event,
doc_ad_category_ids, doc_ad_cat_confidence_levels,
doc_ad_topic_ids, doc_ad_top_confidence_levels,
doc_ad_entity_ids, doc_ad_ent_confidence_levels,
doc_event_category_ids, doc_event_cat_confidence_levels,
doc_event_topic_ids, doc_event_top_confidence_levels,
doc_event_entity_ids, doc_event_ent_confidence_levels]),
e)
return SparseVector(len(feature_vector_labels_integral_dict), feature_vector)
get_ad_feature_vector_integral_udf = F.udf(
lambda user_doc_ids_viewed, user_views_count, user_categories, user_topics,
user_entities, event_country, event_country_state, ad_id, document_id, source_id,
doc_ad_publish_time, timestamp_event, platform_event,
geo_location_event,
doc_event_source_id, doc_event_publisher_id, doc_event_publish_time,
traffic_source_pv, advertiser_id, publisher_id,
campaign_id, document_id_event,
category_ids_by_doc, cat_confidence_level_by_doc,
topic_ids_by_doc, top_confidence_level_by_doc,
entity_ids_by_doc, ent_confidence_level_by_doc,
doc_event_category_id_list, doc_event_confidence_level_cat_list,
doc_event_topic_id_list, doc_event_confidence_level_top,
doc_event_entity_id_list, doc_event_confidence_level_ent:
get_ad_feature_vector_integral(user_doc_ids_viewed, user_views_count, user_categories, user_topics,
user_entities,
event_country, event_country_state,
ad_id, document_id, source_id, doc_ad_publish_time, timestamp_event,
platform_event,
geo_location_event,
doc_event_source_id, doc_event_publisher_id, doc_event_publish_time,
traffic_source_pv, advertiser_id, publisher_id,
campaign_id, document_id_event,
category_ids_by_doc, cat_confidence_level_by_doc,
topic_ids_by_doc, top_confidence_level_by_doc,
entity_ids_by_doc, ent_confidence_level_by_doc,
doc_event_category_id_list, doc_event_confidence_level_cat_list,
doc_event_topic_id_list, doc_event_confidence_level_top,
doc_event_entity_id_list, doc_event_confidence_level_ent),
VectorUDT())
# ## Export Train set feature vectors
train_set_enriched_df = train_set_df \
.join(documents_categories_grouped_df,
on=F.col("document_id_promo") == F.col("documents_categories_grouped.document_id_cat"),
how='left') \
.join(documents_topics_grouped_df,
on=F.col("document_id_promo") == F.col("documents_topics_grouped.document_id_top"),
how='left') \
.join(documents_entities_grouped_df,
on=F.col("document_id_promo") == F.col("documents_entities_grouped.document_id_ent"),
how='left') \
.join(documents_categories_grouped_df
.withColumnRenamed('category_id_list', 'doc_event_category_id_list')
.withColumnRenamed('confidence_level_cat_list', 'doc_event_confidence_level_cat_list')
.alias('documents_event_categories_grouped'),
on=F.col("document_id_event") == F.col("documents_event_categories_grouped.document_id_cat"),
how='left') \
.join(documents_topics_grouped_df
.withColumnRenamed('topic_id_list', 'doc_event_topic_id_list')
.withColumnRenamed('confidence_level_top_list', 'doc_event_confidence_level_top_list')
.alias('documents_event_topics_grouped'),
on=F.col("document_id_event") == F.col("documents_event_topics_grouped.document_id_top"),
how='left') \
.join(documents_entities_grouped_df
.withColumnRenamed('entity_id_list', 'doc_event_entity_id_list')
.withColumnRenamed('confidence_level_ent_list', 'doc_event_confidence_level_ent_list')
.alias('documents_event_entities_grouped'),
on=F.col("document_id_event") == F.col("documents_event_entities_grouped.document_id_ent"),
how='left') \
.select('display_id', 'uuid_event', 'event_country', 'event_country_state', 'platform_event',
'source_id_doc_event', 'publisher_doc_event', 'publish_time_doc_event',
'publish_time', 'ad_id', 'document_id_promo', 'clicked',
'geo_location_event', 'advertiser_id', 'publisher_id',
'campaign_id', 'document_id_event',
'traffic_source_pv',
int_list_null_to_empty_list_udf('doc_event_category_id_list')
.alias('doc_event_category_id_list'),
float_list_null_to_empty_list_udf('doc_event_confidence_level_cat_list')
.alias('doc_event_confidence_level_cat_list'),
int_list_null_to_empty_list_udf('doc_event_topic_id_list')
.alias('doc_event_topic_id_list'),
float_list_null_to_empty_list_udf('doc_event_confidence_level_top_list')
.alias('doc_event_confidence_level_top_list'),
str_list_null_to_empty_list_udf('doc_event_entity_id_list')
.alias('doc_event_entity_id_list'),
float_list_null_to_empty_list_udf('doc_event_confidence_level_ent_list')
.alias('doc_event_confidence_level_ent_list'),
int_null_to_minus_one_udf('source_id').alias('source_id'),
int_null_to_minus_one_udf('timestamp_event').alias('timestamp_event'),
int_list_null_to_empty_list_udf('category_id_list').alias('category_id_list'),
float_list_null_to_empty_list_udf('confidence_level_cat_list')
.alias('confidence_level_cat_list'),
int_list_null_to_empty_list_udf('topic_id_list').alias('topic_id_list'),
float_list_null_to_empty_list_udf('confidence_level_top_list')
.alias('confidence_level_top_list'),
str_list_null_to_empty_list_udf('entity_id_list').alias('entity_id_list'),
float_list_null_to_empty_list_udf('confidence_level_ent_list')
.alias('confidence_level_ent_list')) \
.join(user_profiles_df, on=[F.col("user_profiles.uuid") == F.col("uuid_event")], how='left') \
.withColumnRenamed('categories', 'user_categories') \
.withColumnRenamed('topics', 'user_topics') \
.withColumnRenamed('entities', 'user_entities') \
.withColumnRenamed('doc_ids', 'user_doc_ids_viewed') \
.withColumnRenamed('views', 'user_views_count')
train_set_feature_vectors_df = train_set_enriched_df \
.withColumn('feature_vector',
get_ad_feature_vector_integral_udf(
'user_doc_ids_viewed',
'user_views_count',
'user_categories',
'user_topics',
'user_entities',
'event_country',
'event_country_state',
'ad_id',
'document_id_promo',
'source_id',
'publish_time',
'timestamp_event',
'platform_event',
'geo_location_event',
'source_id_doc_event',
'publisher_doc_event',
'publish_time_doc_event',
'traffic_source_pv',
'advertiser_id',
'publisher_id',
'campaign_id',
'document_id_event',
'category_id_list',
'confidence_level_cat_list',
'topic_id_list',
'confidence_level_top_list',
'entity_id_list',
'confidence_level_ent_list',
'doc_event_category_id_list',
'doc_event_confidence_level_cat_list',
'doc_event_topic_id_list',
'doc_event_confidence_level_top_list',
'doc_event_entity_id_list',
'doc_event_confidence_level_ent_list')) \
.select(F.col('uuid_event').alias('uuid'), 'display_id', 'ad_id', 'document_id_event',
F.col('document_id_promo').alias('document_id'), F.col('clicked').alias('label'),
'feature_vector')
if evaluation:
train_feature_vector_gcs_folder_name = 'train_feature_vectors_integral_eval'
else:
train_feature_vector_gcs_folder_name = 'train_feature_vectors_integral'
train_set_feature_vectors_df.write.parquet(OUTPUT_BUCKET_FOLDER + train_feature_vector_gcs_folder_name,
mode='overwrite')
# # Export Validation/Test set feature vectors
def is_leak(max_timestamp_pv_leak, timestamp_event):
return max_timestamp_pv_leak >= 0 and max_timestamp_pv_leak >= timestamp_event
is_leak_udf = F.udf(lambda max_timestamp_pv_leak, timestamp_event: int(is_leak(max_timestamp_pv_leak, timestamp_event)),
IntegerType())
if evaluation:
data_df = validation_set_df
else:
data_df = test_set_df
test_validation_set_enriched_df = data_df.select(
'display_id', 'uuid_event', 'event_country', 'event_country_state', 'platform_event',
'source_id_doc_event', 'publisher_doc_event', 'publish_time_doc_event',
'publish_time',
'ad_id', 'document_id_promo', 'clicked',
'geo_location_event', 'advertiser_id', 'publisher_id',
'campaign_id', 'document_id_event',
'traffic_source_pv',
int_list_null_to_empty_list_udf('doc_event_category_id_list')
.alias('doc_event_category_id_list'),
float_list_null_to_empty_list_udf('doc_event_confidence_level_cat_list')
.alias('doc_event_confidence_level_cat_list'),
int_list_null_to_empty_list_udf('doc_event_topic_id_list')
.alias('doc_event_topic_id_list'),
float_list_null_to_empty_list_udf('doc_event_confidence_level_top_list')
.alias('doc_event_confidence_level_top_list'),
str_list_null_to_empty_list_udf('doc_event_entity_id_list')
.alias('doc_event_entity_id_list'),
float_list_null_to_empty_list_udf('doc_event_confidence_level_ent_list')
.alias('doc_event_confidence_level_ent_list'),
int_null_to_minus_one_udf('source_id')
.alias('source_id'),
int_null_to_minus_one_udf('timestamp_event').alias('timestamp_event'),
int_list_null_to_empty_list_udf('category_id_list').alias('category_id_list'),
float_list_null_to_empty_list_udf('confidence_level_cat_list')
.alias('confidence_level_cat_list'),
int_list_null_to_empty_list_udf('topic_id_list').alias('topic_id_list'),
float_list_null_to_empty_list_udf('confidence_level_top_list')
.alias('confidence_level_top_list'),
str_list_null_to_empty_list_udf('entity_id_list').alias('entity_id_list'),
float_list_null_to_empty_list_udf('confidence_level_ent_list')
.alias('confidence_level_ent_list'),
int_null_to_minus_one_udf('max_timestamp_pv').alias('max_timestamp_pv_leak')) \
.join(user_profiles_df, on=[F.col("user_profiles.uuid") == F.col("uuid_event")], how='left') \
.withColumnRenamed('categories', 'user_categories') \
.withColumnRenamed('topics', 'user_topics') \
.withColumnRenamed('entities', 'user_entities') \
.withColumnRenamed('doc_ids', 'user_doc_ids_viewed') \
.withColumnRenamed('views', 'user_views_count')
test_validation_set_feature_vectors_df = test_validation_set_enriched_df \
.withColumn('feature_vector',
get_ad_feature_vector_integral_udf(
'user_doc_ids_viewed',
'user_views_count',
'user_categories',
'user_topics',
'user_entities',
'event_country',
'event_country_state',
'ad_id',
'document_id_promo',
'source_id',
'publish_time',
'timestamp_event',
'platform_event',
'geo_location_event',
'source_id_doc_event',
'publisher_doc_event',
'publish_time_doc_event',
'traffic_source_pv',
'advertiser_id',
'publisher_id',
'campaign_id',
'document_id_event',
'category_id_list',
'confidence_level_cat_list',
'topic_id_list',
'confidence_level_top_list',
'entity_id_list',
'confidence_level_ent_list',
'doc_event_category_id_list',
'doc_event_confidence_level_cat_list',
'doc_event_topic_id_list',
'doc_event_confidence_level_top_list',
'doc_event_entity_id_list',
'doc_event_confidence_level_ent_list')) \
.select(F.col('uuid').alias('uuid'), 'display_id', 'ad_id', 'document_id_event',
F.col('document_id_promo').alias('document_id'), F.col('clicked').alias('label'),
is_leak_udf('max_timestamp_pv_leak', 'timestamp_event').alias('is_leak'),
'feature_vector')
if evaluation:
test_validation_feature_vector_gcs_folder_name = 'validation_feature_vectors_integral'
else:
test_validation_feature_vector_gcs_folder_name = 'test_feature_vectors_integral'
test_validation_set_feature_vectors_df.write.parquet(
OUTPUT_BUCKET_FOLDER + test_validation_feature_vector_gcs_folder_name, mode='overwrite')
spark.stop()
|
PyTorch/Segmentation/MaskRCNN/pytorch/configs/caffe2 | caffe2 | e2e_mask_rcnn_X_101_32x8d_FPN_1x_caffe2 | MODEL:
META_ARCHITECTURE: "GeneralizedRCNN"
WEIGHT: "catalog://Caffe2Detectron/COCO/36761843/e2e_mask_rcnn_X-101-32x8d-FPN_1x"
BACKBONE:
CONV_BODY: "R-101-FPN"
OUT_CHANNELS: 256
RPN:
USE_FPN: True
ANCHOR_STRIDE: (4, 8, 16, 32, 64)
PRE_NMS_TOP_N_TRAIN: 2000
PRE_NMS_TOP_N_TEST: 1000
POST_NMS_TOP_N_TEST: 1000
FPN_POST_NMS_TOP_N_TEST: 1000
ROI_HEADS:
USE_FPN: True
ROI_BOX_HEAD:
POOLER_RESOLUTION: 7
POOLER_SCALES: (0.25, 0.125, 0.0625, 0.03125)
POOLER_SAMPLING_RATIO: 2
FEATURE_EXTRACTOR: "FPN2MLPFeatureExtractor"
PREDICTOR: "FPNPredictor"
ROI_MASK_HEAD:
POOLER_SCALES: (0.25, 0.125, 0.0625, 0.03125)
FEATURE_EXTRACTOR: "MaskRCNNFPNFeatureExtractor"
PREDICTOR: "MaskRCNNC4Predictor"
POOLER_RESOLUTION: 14
POOLER_SAMPLING_RATIO: 2
RESOLUTION: 28
SHARE_BOX_FEATURE_EXTRACTOR: False
RESNETS:
STRIDE_IN_1X1: False
NUM_GROUPS: 32
WIDTH_PER_GROUP: 8
MASK_ON: True
DATASETS:
TEST: ("coco_2014_minival",)
DATALOADER:
SIZE_DIVISIBILITY: 32
|
PyTorch/LanguageModeling/BERT/triton/deployment_toolkit/model_analyzer | model_analyzer | model_analyzer_config | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .exceptions import ModelAnalyzerException
class ModelAnalyzerConfig:
"""
A config class to set arguments to the Model Analyzer.
An argument set to None will use the default.
"""
model_analyzer_args = [
"config-file",
]
input_to_options = [
"config-file",
]
def __init__(self):
# Args will be a dict with the string representation as key
self._args = {k: None for k in self.model_analyzer_args}
self._options = {
"-f": "config.yaml",
}
self._input_to_options = {
"config-file": "-f",
}
def to_cli_string(self):
"""
Utility function to convert a config into a
string of arguments to the server with CLI.
Returns
-------
str
the command consisting of all set arguments to
the model analyzer.
e.g. '--model-repository=/models --verbose=True'
"""
# single dashed options, then verbose flags, then main args
args = [f"{k} {v}" for k, v in self._options.items() if v]
args += [f"--{k}={v}" for k, v in self._args.items() if v]
return " ".join(args)
@classmethod
def allowed_keys(cls):
"""
Returns
-------
list of str
The keys that are allowed to be
passed into model_analyzer
"""
return list(cls.model_analyzer_args) + list(cls.input_to_options)
def __getitem__(self, key):
"""
Gets an arguments value in config
Parameters
----------
key : str
The name of the argument to the model analyzer
Returns
-------
The value that the argument is set to in this config
"""
if key in self._args:
return self._args[key]
elif key in self._input_to_options:
return self._options[self._input_to_options[key]]
else:
raise ModelAnalyzerException(f"'{key}' Key not found in config")
def __setitem__(self, key, value):
"""
Sets an arguments value in config
after checking if defined/supported.
Parameters
----------
key : str
The name of the argument to the model analyzer
value : (any)
The value to which the argument is being set
Raises
------
TritonModelAnalyzerException
If key is unsupported or undefined in the
config class
"""
if key in self._args:
self._args[key] = value
elif key in self._input_to_options:
self._options[self._input_to_options[key]] = value
else:
raise ModelAnalyzerException(f"The argument '{key}' to the Model Analyzer is not supported.")
|
PyTorch/Segmentation/MaskRCNN/pytorch/maskrcnn_benchmark/modeling/backbone | backbone | __init__ | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
from .backbone import build_backbone
|
MxNet/Classification/RN50v1.5 | RN50v1.5 | global_metrics | import numpy as np
class CompositeMeter:
def __init__(self):
self.register = {}
def register_metric(self, name, metric):
self.register[name] = metric
def _validate(self, metric_name):
if metric_name not in self.register:
raise ValueError('{} is not registered metric'.format(metric_name))
def update_metric(self, metric_name, value):
self._validate(metric_name)
self.register[metric_name].update(value)
def update_dict(self, dict_metric):
for name, val in dict_metric.items():
if name in self.register.keys():
self.update_metric(name, val)
def get(self, metric_name=None):
if metric_name is not None:
self._validate(metric_name)
return self.register[metric_name].get()
res_dict = {name: metric.get() for name, metric in self.register.items()}
return res_dict
class MaxMeter:
def __init__(self):
self.max = None
self.n = 0
def reset(self):
self.max = None
self.n = 0
def update(self, val):
if self.max is None:
self.max = val
else:
self.max = max(self.max, val)
def get(self):
return self.max
class MinMeter:
def __init__(self):
self.min = None
self.n = 0
def reset(self):
self.min = None
self.n = 0
def update(self, val):
if self.min is None:
self.min = val
else:
self.min = min(self.min, val)
def get(self):
return self.min
class AvgMeter:
def __init__(self):
self.sum = 0
self.n = 0
def reset(self):
self.sum = 0
self.n = 0
def update(self, val):
self.sum += val
self.n += 1
def get(self):
return self.sum / self.n
class PercentileMeter:
def __init__(self, q):
self.data = []
self.q = q
def reset(self):
self.data = []
def update(self, data):
self.data.extend(data)
def get(self):
return np.percentile(self.data, self.q)
|
TensorFlow/Detection/SSD/models/research/object_detection/builders | builders | losses_builder_test | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for losses_builder."""
import tensorflow as tf
from google.protobuf import text_format
from object_detection.builders import losses_builder
from object_detection.core import losses
from object_detection.protos import losses_pb2
from object_detection.utils import ops
class LocalizationLossBuilderTest(tf.test.TestCase):
def test_build_weighted_l2_localization_loss(self):
losses_text_proto = """
localization_loss {
weighted_l2 {
}
}
classification_loss {
weighted_softmax {
}
}
"""
losses_proto = losses_pb2.Loss()
text_format.Merge(losses_text_proto, losses_proto)
_, localization_loss, _, _, _, _, _ = losses_builder.build(losses_proto)
self.assertTrue(isinstance(localization_loss,
losses.WeightedL2LocalizationLoss))
def test_build_weighted_smooth_l1_localization_loss_default_delta(self):
losses_text_proto = """
localization_loss {
weighted_smooth_l1 {
}
}
classification_loss {
weighted_softmax {
}
}
"""
losses_proto = losses_pb2.Loss()
text_format.Merge(losses_text_proto, losses_proto)
_, localization_loss, _, _, _, _, _ = losses_builder.build(losses_proto)
self.assertTrue(isinstance(localization_loss,
losses.WeightedSmoothL1LocalizationLoss))
self.assertAlmostEqual(localization_loss._delta, 1.0)
def test_build_weighted_smooth_l1_localization_loss_non_default_delta(self):
losses_text_proto = """
localization_loss {
weighted_smooth_l1 {
delta: 0.1
}
}
classification_loss {
weighted_softmax {
}
}
"""
losses_proto = losses_pb2.Loss()
text_format.Merge(losses_text_proto, losses_proto)
_, localization_loss, _, _, _, _, _ = losses_builder.build(losses_proto)
self.assertTrue(isinstance(localization_loss,
losses.WeightedSmoothL1LocalizationLoss))
self.assertAlmostEqual(localization_loss._delta, 0.1)
def test_build_weighted_iou_localization_loss(self):
losses_text_proto = """
localization_loss {
weighted_iou {
}
}
classification_loss {
weighted_softmax {
}
}
"""
losses_proto = losses_pb2.Loss()
text_format.Merge(losses_text_proto, losses_proto)
_, localization_loss, _, _, _, _, _ = losses_builder.build(losses_proto)
self.assertTrue(isinstance(localization_loss,
losses.WeightedIOULocalizationLoss))
def test_anchorwise_output(self):
losses_text_proto = """
localization_loss {
weighted_smooth_l1 {
}
}
classification_loss {
weighted_softmax {
}
}
"""
losses_proto = losses_pb2.Loss()
text_format.Merge(losses_text_proto, losses_proto)
_, localization_loss, _, _, _, _, _ = losses_builder.build(losses_proto)
self.assertTrue(isinstance(localization_loss,
losses.WeightedSmoothL1LocalizationLoss))
predictions = tf.constant([[[0.0, 0.0, 1.0, 1.0], [0.0, 0.0, 1.0, 1.0]]])
targets = tf.constant([[[0.0, 0.0, 1.0, 1.0], [0.0, 0.0, 1.0, 1.0]]])
weights = tf.constant([[1.0, 1.0]])
loss = localization_loss(predictions, targets, weights=weights)
self.assertEqual(loss.shape, [1, 2])
def test_raise_error_on_empty_localization_config(self):
losses_text_proto = """
classification_loss {
weighted_softmax {
}
}
"""
losses_proto = losses_pb2.Loss()
text_format.Merge(losses_text_proto, losses_proto)
with self.assertRaises(ValueError):
losses_builder._build_localization_loss(losses_proto)
class ClassificationLossBuilderTest(tf.test.TestCase):
def test_build_weighted_sigmoid_classification_loss(self):
losses_text_proto = """
classification_loss {
weighted_sigmoid {
}
}
localization_loss {
weighted_l2 {
}
}
"""
losses_proto = losses_pb2.Loss()
text_format.Merge(losses_text_proto, losses_proto)
classification_loss, _, _, _, _, _, _ = losses_builder.build(losses_proto)
self.assertTrue(isinstance(classification_loss,
losses.WeightedSigmoidClassificationLoss))
def test_build_weighted_sigmoid_focal_classification_loss(self):
losses_text_proto = """
classification_loss {
weighted_sigmoid_focal {
}
}
localization_loss {
weighted_l2 {
}
}
"""
losses_proto = losses_pb2.Loss()
text_format.Merge(losses_text_proto, losses_proto)
classification_loss, _, _, _, _, _, _ = losses_builder.build(losses_proto)
self.assertTrue(isinstance(classification_loss,
losses.SigmoidFocalClassificationLoss))
self.assertAlmostEqual(classification_loss._alpha, None)
self.assertAlmostEqual(classification_loss._gamma, 2.0)
def test_build_weighted_sigmoid_focal_loss_non_default(self):
losses_text_proto = """
classification_loss {
weighted_sigmoid_focal {
alpha: 0.25
gamma: 3.0
}
}
localization_loss {
weighted_l2 {
}
}
"""
losses_proto = losses_pb2.Loss()
text_format.Merge(losses_text_proto, losses_proto)
classification_loss, _, _, _, _, _, _ = losses_builder.build(losses_proto)
self.assertTrue(isinstance(classification_loss,
losses.SigmoidFocalClassificationLoss))
self.assertAlmostEqual(classification_loss._alpha, 0.25)
self.assertAlmostEqual(classification_loss._gamma, 3.0)
def test_build_weighted_softmax_classification_loss(self):
losses_text_proto = """
classification_loss {
weighted_softmax {
}
}
localization_loss {
weighted_l2 {
}
}
"""
losses_proto = losses_pb2.Loss()
text_format.Merge(losses_text_proto, losses_proto)
classification_loss, _, _, _, _, _, _ = losses_builder.build(losses_proto)
self.assertTrue(isinstance(classification_loss,
losses.WeightedSoftmaxClassificationLoss))
def test_build_weighted_logits_softmax_classification_loss(self):
losses_text_proto = """
classification_loss {
weighted_logits_softmax {
}
}
localization_loss {
weighted_l2 {
}
}
"""
losses_proto = losses_pb2.Loss()
text_format.Merge(losses_text_proto, losses_proto)
classification_loss, _, _, _, _, _, _ = losses_builder.build(losses_proto)
self.assertTrue(
isinstance(classification_loss,
losses.WeightedSoftmaxClassificationAgainstLogitsLoss))
def test_build_weighted_softmax_classification_loss_with_logit_scale(self):
losses_text_proto = """
classification_loss {
weighted_softmax {
logit_scale: 2.0
}
}
localization_loss {
weighted_l2 {
}
}
"""
losses_proto = losses_pb2.Loss()
text_format.Merge(losses_text_proto, losses_proto)
classification_loss, _, _, _, _, _, _ = losses_builder.build(losses_proto)
self.assertTrue(isinstance(classification_loss,
losses.WeightedSoftmaxClassificationLoss))
def test_build_bootstrapped_sigmoid_classification_loss(self):
losses_text_proto = """
classification_loss {
bootstrapped_sigmoid {
alpha: 0.5
}
}
localization_loss {
weighted_l2 {
}
}
"""
losses_proto = losses_pb2.Loss()
text_format.Merge(losses_text_proto, losses_proto)
classification_loss, _, _, _, _, _, _ = losses_builder.build(losses_proto)
self.assertTrue(isinstance(classification_loss,
losses.BootstrappedSigmoidClassificationLoss))
def test_anchorwise_output(self):
losses_text_proto = """
classification_loss {
weighted_sigmoid {
anchorwise_output: true
}
}
localization_loss {
weighted_l2 {
}
}
"""
losses_proto = losses_pb2.Loss()
text_format.Merge(losses_text_proto, losses_proto)
classification_loss, _, _, _, _, _, _ = losses_builder.build(losses_proto)
self.assertTrue(isinstance(classification_loss,
losses.WeightedSigmoidClassificationLoss))
predictions = tf.constant([[[0.0, 1.0, 0.0], [0.0, 0.5, 0.5]]])
targets = tf.constant([[[0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]])
weights = tf.constant([[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0]]])
loss = classification_loss(predictions, targets, weights=weights)
self.assertEqual(loss.shape, [1, 2, 3])
def test_raise_error_on_empty_config(self):
losses_text_proto = """
localization_loss {
weighted_l2 {
}
}
"""
losses_proto = losses_pb2.Loss()
text_format.Merge(losses_text_proto, losses_proto)
with self.assertRaises(ValueError):
losses_builder.build(losses_proto)
class HardExampleMinerBuilderTest(tf.test.TestCase):
def test_do_not_build_hard_example_miner_by_default(self):
losses_text_proto = """
localization_loss {
weighted_l2 {
}
}
classification_loss {
weighted_softmax {
}
}
"""
losses_proto = losses_pb2.Loss()
text_format.Merge(losses_text_proto, losses_proto)
_, _, _, _, hard_example_miner, _, _ = losses_builder.build(losses_proto)
self.assertEqual(hard_example_miner, None)
def test_build_hard_example_miner_for_classification_loss(self):
losses_text_proto = """
localization_loss {
weighted_l2 {
}
}
classification_loss {
weighted_softmax {
}
}
hard_example_miner {
loss_type: CLASSIFICATION
}
"""
losses_proto = losses_pb2.Loss()
text_format.Merge(losses_text_proto, losses_proto)
_, _, _, _, hard_example_miner, _, _ = losses_builder.build(losses_proto)
self.assertTrue(isinstance(hard_example_miner, losses.HardExampleMiner))
self.assertEqual(hard_example_miner._loss_type, 'cls')
def test_build_hard_example_miner_for_localization_loss(self):
losses_text_proto = """
localization_loss {
weighted_l2 {
}
}
classification_loss {
weighted_softmax {
}
}
hard_example_miner {
loss_type: LOCALIZATION
}
"""
losses_proto = losses_pb2.Loss()
text_format.Merge(losses_text_proto, losses_proto)
_, _, _, _, hard_example_miner, _, _ = losses_builder.build(losses_proto)
self.assertTrue(isinstance(hard_example_miner, losses.HardExampleMiner))
self.assertEqual(hard_example_miner._loss_type, 'loc')
def test_build_hard_example_miner_with_non_default_values(self):
losses_text_proto = """
localization_loss {
weighted_l2 {
}
}
classification_loss {
weighted_softmax {
}
}
hard_example_miner {
num_hard_examples: 32
iou_threshold: 0.5
loss_type: LOCALIZATION
max_negatives_per_positive: 10
min_negatives_per_image: 3
}
"""
losses_proto = losses_pb2.Loss()
text_format.Merge(losses_text_proto, losses_proto)
_, _, _, _, hard_example_miner, _, _ = losses_builder.build(losses_proto)
self.assertTrue(isinstance(hard_example_miner, losses.HardExampleMiner))
self.assertEqual(hard_example_miner._num_hard_examples, 32)
self.assertAlmostEqual(hard_example_miner._iou_threshold, 0.5)
self.assertEqual(hard_example_miner._max_negatives_per_positive, 10)
self.assertEqual(hard_example_miner._min_negatives_per_image, 3)
class LossBuilderTest(tf.test.TestCase):
def test_build_all_loss_parameters(self):
losses_text_proto = """
localization_loss {
weighted_l2 {
}
}
classification_loss {
weighted_softmax {
}
}
hard_example_miner {
}
classification_weight: 0.8
localization_weight: 0.2
"""
losses_proto = losses_pb2.Loss()
text_format.Merge(losses_text_proto, losses_proto)
(classification_loss, localization_loss, classification_weight,
localization_weight, hard_example_miner, _,
_) = losses_builder.build(losses_proto)
self.assertTrue(isinstance(hard_example_miner, losses.HardExampleMiner))
self.assertTrue(isinstance(classification_loss,
losses.WeightedSoftmaxClassificationLoss))
self.assertTrue(isinstance(localization_loss,
losses.WeightedL2LocalizationLoss))
self.assertAlmostEqual(classification_weight, 0.8)
self.assertAlmostEqual(localization_weight, 0.2)
def test_build_expected_sampling(self):
losses_text_proto = """
localization_loss {
weighted_l2 {
}
}
classification_loss {
weighted_softmax {
}
}
hard_example_miner {
}
classification_weight: 0.8
localization_weight: 0.2
"""
losses_proto = losses_pb2.Loss()
text_format.Merge(losses_text_proto, losses_proto)
(classification_loss, localization_loss, classification_weight,
localization_weight, hard_example_miner, _,
_) = losses_builder.build(losses_proto)
self.assertTrue(isinstance(hard_example_miner, losses.HardExampleMiner))
self.assertTrue(
isinstance(classification_loss,
losses.WeightedSoftmaxClassificationLoss))
self.assertTrue(
isinstance(localization_loss, losses.WeightedL2LocalizationLoss))
self.assertAlmostEqual(classification_weight, 0.8)
self.assertAlmostEqual(localization_weight, 0.2)
def test_build_reweighting_unmatched_anchors(self):
losses_text_proto = """
localization_loss {
weighted_l2 {
}
}
classification_loss {
weighted_softmax {
}
}
hard_example_miner {
}
classification_weight: 0.8
localization_weight: 0.2
"""
losses_proto = losses_pb2.Loss()
text_format.Merge(losses_text_proto, losses_proto)
(classification_loss, localization_loss, classification_weight,
localization_weight, hard_example_miner, _,
_) = losses_builder.build(losses_proto)
self.assertTrue(isinstance(hard_example_miner, losses.HardExampleMiner))
self.assertTrue(
isinstance(classification_loss,
losses.WeightedSoftmaxClassificationLoss))
self.assertTrue(
isinstance(localization_loss, losses.WeightedL2LocalizationLoss))
self.assertAlmostEqual(classification_weight, 0.8)
self.assertAlmostEqual(localization_weight, 0.2)
def test_raise_error_when_both_focal_loss_and_hard_example_miner(self):
losses_text_proto = """
localization_loss {
weighted_l2 {
}
}
classification_loss {
weighted_sigmoid_focal {
}
}
hard_example_miner {
}
classification_weight: 0.8
localization_weight: 0.2
"""
losses_proto = losses_pb2.Loss()
text_format.Merge(losses_text_proto, losses_proto)
with self.assertRaises(ValueError):
losses_builder.build(losses_proto)
class FasterRcnnClassificationLossBuilderTest(tf.test.TestCase):
def test_build_sigmoid_loss(self):
losses_text_proto = """
weighted_sigmoid {
}
"""
losses_proto = losses_pb2.ClassificationLoss()
text_format.Merge(losses_text_proto, losses_proto)
classification_loss = losses_builder.build_faster_rcnn_classification_loss(
losses_proto)
self.assertTrue(isinstance(classification_loss,
losses.WeightedSigmoidClassificationLoss))
def test_build_softmax_loss(self):
losses_text_proto = """
weighted_softmax {
}
"""
losses_proto = losses_pb2.ClassificationLoss()
text_format.Merge(losses_text_proto, losses_proto)
classification_loss = losses_builder.build_faster_rcnn_classification_loss(
losses_proto)
self.assertTrue(isinstance(classification_loss,
losses.WeightedSoftmaxClassificationLoss))
def test_build_logits_softmax_loss(self):
losses_text_proto = """
weighted_logits_softmax {
}
"""
losses_proto = losses_pb2.ClassificationLoss()
text_format.Merge(losses_text_proto, losses_proto)
classification_loss = losses_builder.build_faster_rcnn_classification_loss(
losses_proto)
self.assertTrue(
isinstance(classification_loss,
losses.WeightedSoftmaxClassificationAgainstLogitsLoss))
def test_build_sigmoid_focal_loss(self):
losses_text_proto = """
weighted_sigmoid_focal {
}
"""
losses_proto = losses_pb2.ClassificationLoss()
text_format.Merge(losses_text_proto, losses_proto)
classification_loss = losses_builder.build_faster_rcnn_classification_loss(
losses_proto)
self.assertTrue(
isinstance(classification_loss,
losses.SigmoidFocalClassificationLoss))
def test_build_softmax_loss_by_default(self):
losses_text_proto = """
"""
losses_proto = losses_pb2.ClassificationLoss()
text_format.Merge(losses_text_proto, losses_proto)
classification_loss = losses_builder.build_faster_rcnn_classification_loss(
losses_proto)
self.assertTrue(isinstance(classification_loss,
losses.WeightedSoftmaxClassificationLoss))
if __name__ == '__main__':
tf.test.main()
|
PyTorch/SpeechSynthesis/Tacotron2/trtis_cpp/src/trt/util | util | engineDriver | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "engineDriver.h"
#include "trtUtils.h"
using namespace nvinfer1;
namespace tts
{
/******************************************************************************
* CONSTRUCTORS / DESTRUCTOR **************************************************
*****************************************************************************/
EngineDriver::EngineDriver(TRTPtr<ICudaEngine> engine) :
mEngine(std::move(engine))
{
// do nothing
}
/******************************************************************************
* PUBLIC METHODS *************************************************************
*****************************************************************************/
const ICudaEngine& EngineDriver::getEngine() const
{
return *mEngine;
}
ICudaEngine& EngineDriver::getEngine()
{
return *mEngine;
}
int EngineDriver::getMaxBatchSize() const
{
return TRTUtils::getMaxBatchSize(*mEngine);
}
} // namespace tts
|
PyTorch/Detection/Efficientdet/effdet/layers | layers | conv2d_same | """ Conv2d w/ Same Padding
Hacked together by / Copyright 2020 Ross Wightman
"""
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2019-2022 Ross Wightman
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
import torch.nn.functional as F
from typing import Tuple, Optional
from .padding import pad_same, get_padding_value
def conv2d_same(
x, weight: torch.Tensor, bias: Optional[torch.Tensor] = None, stride: Tuple[int, int] = (1, 1),
padding: Tuple[int, int] = (0, 0), dilation: Tuple[int, int] = (1, 1), groups: int = 1):
x = pad_same(x, weight.shape[-2:], stride, dilation)
return F.conv2d(x, weight, bias, stride, (0, 0), dilation, groups)
class Conv2dSame(nn.Conv2d):
""" Tensorflow like 'SAME' convolution wrapper for 2D convolutions
"""
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=True):
super(Conv2dSame, self).__init__(
in_channels, out_channels, kernel_size, stride, 0, dilation, groups, bias)
def forward(self, x):
return conv2d_same(x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups)
def create_conv2d_pad(in_chs, out_chs, kernel_size, **kwargs):
padding = kwargs.pop('padding', '')
kwargs.setdefault('bias', False)
padding, is_dynamic = get_padding_value(padding, kernel_size, **kwargs)
if is_dynamic:
return Conv2dSame(in_chs, out_chs, kernel_size, **kwargs)
else:
return nn.Conv2d(in_chs, out_chs, kernel_size, padding=padding, **kwargs) |
Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt/triton/deployment_toolkit/model_analyzer | model_analyzer | __init__ | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .model_analyzer import ModelAnalyzer, ModelAnalyzerMode, ModelAnalyzerReportMode # noqa: F401
from .model_analyzer_config import ModelAnalyzerConfig # noqa: F401
|
PyTorch/LanguageModeling/BART/utils | utils | generation_logits_process | # coding=utf-8
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# Copyright 2020 The HuggingFace Inc. team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import math
from abc import ABC
from typing import Callable, Iterable, List
import numpy as np
import torch
from .file_utils import add_start_docstrings
LOGITS_PROCESSOR_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using :class:`~transformers.BertTokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for
details.
`What are input IDs? <../glossary.html#input-ids>`__
scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax
or scores for each vocabulary token after SoftMax.
kwargs:
Additional logits processor specific kwargs.
Return:
:obj:`torch.FloatTensor` of shape :obj:`(batch_size, config.vocab_size)`: The processed prediction scores.
"""
class LogitsProcessor(ABC):
"""Abstract base class for all logit processors that can be applied during generation."""
@add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING)
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
"""Torch method for processing logits."""
raise NotImplementedError(
f"{self.__class__} is an abstract class. Only classes inheriting this class can be called."
)
class LogitsWarper(ABC):
"""Abstract base class for all logit warpers that can be applied during generation with multinomial sampling."""
@add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING)
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
"""Torch method for warping logits."""
raise NotImplementedError(
f"{self.__class__} is an abstract class. Only classes inheriting this class can be called."
)
class LogitsProcessorList(list):
"""
This class can be used to create a list of :class:`~transformers.LogitsProcessor` or
:class:`~transformers.LogitsWarper` to subsequently process a :obj:`scores` input tensor. This class inherits from
list and adds a specific `__call__` method to apply each :class:`~transformers.LogitsProcessor` or
:class:`~transformers.LogitsProcessor` to the inputs.
"""
@add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING)
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> torch.FloatTensor:
for processor in self:
function_args = inspect.signature(processor.__call__).parameters
if len(function_args) > 2:
assert all(
arg in kwargs for arg in list(function_args.keys())[2:]
), f"Make sure that all the required parameters: {list(function_args.keys())} for {processor.__class__} are passed to the logits processor."
scores = processor(input_ids, scores, **kwargs)
else:
scores = processor(input_ids, scores)
return scores
class MinLengthLogitsProcessor(LogitsProcessor):
r"""
:class:`transformers.LogitsProcessor` enforcing a min-length by setting EOS probability to 0.
Args:
min_length (:obj:`int`):
The minimum length below which the score of :obj:`eos_token_id` is set to :obj:`-float("Inf")`.
eos_token_id (:obj:`int`):
The id of the `end-of-sequence` token.
"""
def __init__(self, min_length: int, eos_token_id: int):
if not isinstance(min_length, int) or min_length < 0:
raise ValueError(f"`min_length` has to be a positive integer, but is {min_length}")
if not isinstance(eos_token_id, int) or eos_token_id < 0:
raise ValueError(f"`eos_token_id` has to be a positive integer, but is {eos_token_id}")
self.min_length = min_length
self.eos_token_id = eos_token_id
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
cur_len = input_ids.shape[-1]
if cur_len < self.min_length:
scores[:, self.eos_token_id] = -float("inf")
return scores
class TemperatureLogitsWarper(LogitsWarper):
r"""
:class:`transformers.LogitsWarper` for temperature (exponential scaling output probability distribution).
Args:
temperature (:obj:`float`):
The value used to module the logits distribution.
"""
def __init__(self, temperature: float):
if not isinstance(temperature, float) or not (temperature > 0):
raise ValueError(f"`temperature` has to be a strictly positive float, but is {temperature}")
self.temperature = temperature
def __call__(self, input_ids: torch.Tensor, scores: torch.Tensor) -> torch.Tensor:
scores = scores / self.temperature
return scores
class RepetitionPenaltyLogitsProcessor(LogitsProcessor):
r"""
:class:`transformers.LogitsProcessor` enforcing an exponential penalty on repeated sequences.
Args:
repetition_penalty (:obj:`float`):
The parameter for repetition penalty. 1.0 means no penalty. See `this paper
<https://arxiv.org/pdf/1909.05858.pdf>`__ for more details.
"""
def __init__(self, penalty: float):
if not isinstance(penalty, float) or not (penalty > 0):
raise ValueError(f"`penalty` has to be a strictly positive float, but is {penalty}")
self.penalty = penalty
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
score = torch.gather(scores, 1, input_ids)
# if score < 0 then repetition penalty has to be multiplied to reduce the previous token probability
score = torch.where(score < 0, score * self.penalty, score / self.penalty)
scores.scatter_(1, input_ids, score)
return scores
class TopPLogitsWarper(LogitsWarper):
"""
:class:`transformers.LogitsWarper` that performs top-p, i.e. restricting to top tokens summing to prob_cut_off <=
prob_cut_off.
Args:
top_p (:obj:`float`):
If set to < 1, only the most probable tokens with probabilities that add up to :obj:`top_p` or higher are
kept for generation.
filter_value (:obj:`float`, `optional`, defaults to :obj:`-float("Inf")`):
All filtered values will be set to this float value.
min_tokens_to_keep (:obj:`int`, `optional`, defaults to 1):
Minimum number of tokens that cannot be filtered.
"""
def __init__(self, top_p: float, filter_value: float = -float("Inf"), min_tokens_to_keep: int = 1):
if not isinstance(top_p, float) or (top_p < 0 or top_p > 1.0):
raise ValueError(f"`top_p` has to be a float > 0 and < 1, but is {top_p}")
self.top_p = top_p
self.filter_value = filter_value
self.min_tokens_to_keep = min_tokens_to_keep
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
sorted_logits, sorted_indices = torch.sort(scores, descending=True)
cumulative_probs = sorted_logits.softmax(dim=-1).cumsum(dim=-1)
# Remove tokens with cumulative top_p above the threshold (token with 0 are kept)
sorted_indices_to_remove = cumulative_probs > self.top_p
if self.min_tokens_to_keep > 1:
# Keep at least min_tokens_to_keep (set to min_tokens_to_keep-1 because we add the first one below)
sorted_indices_to_remove[..., : self.min_tokens_to_keep - 1] = 0
# Shift the indices to the right to keep also the first token above the threshold
sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
sorted_indices_to_remove[..., 0] = 0
# scatter sorted tensors to original indexing
indices_to_remove = sorted_indices_to_remove.scatter(1, sorted_indices, sorted_indices_to_remove)
scores = scores.masked_fill(indices_to_remove, self.filter_value)
return scores
class TopKLogitsWarper(LogitsWarper):
r"""
:class:`transformers.LogitsWarper` that performs top-k, i.e. restricting to the k highest probability elements.
Args:
top_k (:obj:`int`):
The number of highest probability vocabulary tokens to keep for top-k-filtering.
filter_value (:obj:`float`, `optional`, defaults to :obj:`-float("Inf")`):
All filtered values will be set to this float value.
min_tokens_to_keep (:obj:`int`, `optional`, defaults to 1):
Minimum number of tokens that cannot be filtered.
"""
def __init__(self, top_k: int, filter_value: float = -float("Inf"), min_tokens_to_keep: int = 1):
if not isinstance(top_k, int) or top_k <= 0:
raise ValueError(f"`top_k` has to be a strictly positive integer, but is {top_k}")
self.top_k = top_k
self.filter_value = filter_value
self.min_tokens_to_keep = min_tokens_to_keep
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
top_k = min(max(self.top_k, self.min_tokens_to_keep), scores.size(-1)) # Safety check
# Remove all tokens with a probability less than the last token of the top-k
indices_to_remove = scores < torch.topk(scores, top_k)[0][..., -1, None]
scores = scores.masked_fill(indices_to_remove, self.filter_value)
return scores
def _get_ngrams(ngram_size: int, prev_input_ids: torch.Tensor, num_hypos: int):
generated_ngrams = [{} for _ in range(num_hypos)]
for idx in range(num_hypos):
gen_tokens = prev_input_ids[idx].tolist()
generated_ngram = generated_ngrams[idx]
for ngram in zip(*[gen_tokens[i:] for i in range(ngram_size)]):
prev_ngram_tuple = tuple(ngram[:-1])
generated_ngram[prev_ngram_tuple] = generated_ngram.get(prev_ngram_tuple, []) + [ngram[-1]]
return generated_ngrams
def _get_generated_ngrams(banned_ngrams, prev_input_ids, ngram_size, cur_len):
# Before decoding the next token, prevent decoding of ngrams that have already appeared
start_idx = cur_len + 1 - ngram_size
ngram_idx = tuple(prev_input_ids[start_idx:cur_len].tolist())
return banned_ngrams.get(ngram_idx, [])
def _calc_banned_ngram_tokens(
ngram_size: int, prev_input_ids: torch.Tensor, num_hypos: int, cur_len: int
) -> List[Iterable[int]]:
"""Copied from fairseq for no_repeat_ngram in beam_search"""
if cur_len + 1 < ngram_size:
# return no banned tokens if we haven't generated no_repeat_ngram_size tokens yet
return [[] for _ in range(num_hypos)]
generated_ngrams = _get_ngrams(ngram_size, prev_input_ids, num_hypos)
banned_tokens = [
_get_generated_ngrams(generated_ngrams[hypo_idx], prev_input_ids[hypo_idx], ngram_size, cur_len)
for hypo_idx in range(num_hypos)
]
return banned_tokens
class NoRepeatNGramLogitsProcessor(LogitsProcessor):
r"""
:class:`transformers.LogitsProcessor` that enforces no repetition of n-grams. See `Fairseq
<https://github.com/pytorch/fairseq/blob/a07cb6f40480928c9e0548b737aadd36ee66ac76/fairseq/sequence_generator.py#L345>`__.
Args:
ngram_size (:obj:`int`):
All ngrams of size :obj:`ngram_size` can only occur once.
"""
def __init__(self, ngram_size: int):
if not isinstance(ngram_size, int) or ngram_size <= 0:
raise ValueError(f"`ngram_size` has to be a strictly positive integer, but is {ngram_size}")
self.ngram_size = ngram_size
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
num_batch_hypotheses = scores.shape[0]
cur_len = input_ids.shape[-1]
banned_batch_tokens = _calc_banned_ngram_tokens(self.ngram_size, input_ids, num_batch_hypotheses, cur_len)
for i, banned_tokens in enumerate(banned_batch_tokens):
scores[i, banned_tokens] = -float("inf")
return scores
class EncoderNoRepeatNGramLogitsProcessor(LogitsProcessor):
r"""
:class:`transformers.LogitsProcessor` that enforces no repetition of encoder input ids n-grams for the decoder ids.
See `ParlAI <https://github.com/facebookresearch/ParlAI/blob/master/parlai/core/torch_generator_agent.py#L1350>`__.
Args:
encoder_ngram_size (:obj:`int`):
All ngrams of size :obj:`ngram_size` can only occur within the encoder input ids.
encoder_input_ids (:obj:`int`):
The encoder_input_ids that should not be repeated within the decoder ids.
"""
def __init__(self, encoder_ngram_size: int, encoder_input_ids: torch.LongTensor):
if not isinstance(encoder_ngram_size, int) or encoder_ngram_size <= 0:
raise ValueError(
f"`encoder_ngram_size` has to be a strictly positive integer, but is {encoder_ngram_size}"
)
self.ngram_size = encoder_ngram_size
if len(encoder_input_ids.shape) == 1:
encoder_input_ids = encoder_input_ids.unsqueeze(0)
self.batch_size = encoder_input_ids.shape[0]
self.generated_ngrams = _get_ngrams(encoder_ngram_size, encoder_input_ids, self.batch_size)
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
# B x num_beams
num_hypos = scores.shape[0]
num_beams = num_hypos // self.batch_size
cur_len = input_ids.shape[-1]
banned_batch_tokens = [
_get_generated_ngrams(
self.generated_ngrams[hypo_idx // num_beams], input_ids[hypo_idx], self.ngram_size, cur_len
)
for hypo_idx in range(num_hypos)
]
for i, banned_tokens in enumerate(banned_batch_tokens):
scores[i, banned_tokens] = -float("inf")
return scores
class NoBadWordsLogitsProcessor(LogitsProcessor):
"""
:class:`transformers.LogitsProcessor` that enforces that specified sequences will never be sampled.
Args:
bad_words_ids (:obj:`List[List[int]]`):
List of list of token ids that are not allowed to be generated. In order to get the tokens of the words
that should not appear in the generated text, use :obj:`tokenizer(bad_word,
add_prefix_space=True).input_ids`.
eos_token_id (:obj:`int`):
The id of the `end-of-sequence` token.
"""
def __init__(self, bad_words_ids: Iterable[Iterable[int]], eos_token_id: int):
if not isinstance(bad_words_ids, List) or len(bad_words_ids) == 0:
raise ValueError(f"`bad_words_ids` has to be a non-emtpy list, but is {bad_words_ids}.")
if any(not isinstance(bad_word_ids, list) for bad_word_ids in bad_words_ids):
raise ValueError(f"`bad_words_ids` has to be a list of lists, but is {bad_words_ids}.")
if any(
any((not isinstance(token_id, (int, np.integer)) or token_id < 0) for token_id in bad_word_ids)
for bad_word_ids in bad_words_ids
):
raise ValueError(
f"Each list in `bad_words_ids` has to be a list of positive integers, but is {bad_words_ids}."
)
self.bad_words_ids = list(filter(lambda bad_token_seq: bad_token_seq != [eos_token_id], bad_words_ids))
for banned_token_seq in self.bad_words_ids:
assert len(banned_token_seq) > 0, "Banned words token sequences {} cannot have an empty list".format(
bad_words_ids
)
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
banned_tokens = self._calc_banned_bad_words_ids(input_ids)
scores = self._set_scores_to_inf_for_banned_tokens(scores, banned_tokens)
return scores
def _tokens_match(self, prev_tokens: torch.LongTensor, tokens: List[int]) -> bool:
if len(tokens) == 0:
# if bad word tokens is just one token always ban it
return True
elif len(tokens) > len(prev_tokens):
# if bad word tokens are longer then prev input_ids they can't be equal
return False
elif prev_tokens[-len(tokens) :].tolist() == tokens:
# if tokens match
return True
else:
return False
def _calc_banned_bad_words_ids(self, prev_input_ids: Iterable[int]) -> Iterable[int]:
banned_tokens = []
for prev_input_ids_slice in prev_input_ids:
banned_tokens_slice = []
for banned_token_seq in self.bad_words_ids:
if self._tokens_match(prev_input_ids_slice, banned_token_seq[:-1]) is False:
# if tokens do not match continue
continue
banned_tokens_slice.append(banned_token_seq[-1])
banned_tokens.append(banned_tokens_slice)
return banned_tokens
def _set_scores_to_inf_for_banned_tokens(self, scores: torch.Tensor, banned_tokens: List[List[int]]) -> None:
"""
Modifies the scores in place by setting the banned token positions to `-inf`. Banned token is expected to be a
list of list of banned tokens to ban in the format [[batch index, vocabulary position],...
Args:
scores: logits distribution of shape (batch size, vocabulary size)
banned_tokens: list of list of tokens to ban of length (batch_size)
"""
banned_mask_list = []
for idx, batch_banned_tokens in enumerate(banned_tokens):
for token in batch_banned_tokens:
banned_mask_list.append([idx, token])
if not banned_mask_list:
return scores
banned_mask = torch.LongTensor(banned_mask_list)
indices = torch.ones(len(banned_mask))
# A sparse tensor is generated from a list of coordinates: [[0, 1], [0, 2], [2, 0]]. A conversion to dense tensor generates:
# [ 0 1 1 ]
# [ 0 0 0 ]
# [ 1 0 0 ]
banned_mask = (
torch.sparse.LongTensor(banned_mask.t(), indices, scores.size()).to(scores.device).to_dense().bool()
)
scores = scores.masked_fill(banned_mask, -float("inf"))
return scores
class PrefixConstrainedLogitsProcessor(LogitsProcessor):
r"""
:class:`transformers.LogitsProcessor` that enforces contrained generation and is useful for prefix-conditioned
constrained generation. See `Autoregressive Entity Retrieval <https://arxiv.org/abs/2010.00904>`__ for more
information.
Args:
prefix_allowed_tokens_fn: (:obj:`Callable[[int, torch.Tensor], List[int]]`):
This function constraints the beam search to allowed tokens only at each step. This function takes 2
arguments :obj:`inputs_ids` and the batch ID :obj:`batch_id`. It has to return a list with the allowed
tokens for the next generation step conditioned on the previously generated tokens :obj:`inputs_ids` and
the batch ID :obj:`batch_id`.
"""
def __init__(self, prefix_allowed_tokens_fn: Callable[[int, torch.Tensor], List[int]], num_beams: int):
self._prefix_allowed_tokens_fn = prefix_allowed_tokens_fn
self._num_beams = num_beams
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
mask = torch.full_like(scores, -math.inf)
for batch_id, beam_sent in enumerate(input_ids.view(-1, self._num_beams, input_ids.shape[-1])):
for beam_id, sent in enumerate(beam_sent):
mask[batch_id * self._num_beams + beam_id, self._prefix_allowed_tokens_fn(batch_id, sent)] = 0
return scores + mask
class HammingDiversityLogitsProcessor(LogitsProcessor):
r"""
:class:`transformers.LogitsProcessor` that enforces diverse beam search. Note that this logits processor is only
effective for :meth:`transformers.PretrainedModel.group_beam_search`. See `Diverse Beam Search: Decoding Diverse
Solutions from Neural Sequence Models <https://arxiv.org/pdf/1610.02424.pdf>`__ for more details.
Args:
diversity_penalty (:obj:`float`):
This value is subtracted from a beam's score if it generates a token same as any beam from other group at a
particular time. Note that :obj:`diversity_penalty` is only effective if ``group beam search`` is enabled.
num_beams (:obj:`int`):
Number of beams used for group beam search. See `this paper <https://arxiv.org/pdf/1610.02424.pdf>`__ for
more details.
num_beam_groups (:obj:`int`):
Number of groups to divide :obj:`num_beams` into in order to ensure diversity among different groups of
beams. See `this paper <https://arxiv.org/pdf/1610.02424.pdf>`__ for more details.
"""
def __init__(self, diversity_penalty: float, num_beams: int, num_beam_groups: int):
if not isinstance(diversity_penalty, float) or (not diversity_penalty > 0.0):
raise ValueError("`diversity_penalty` should be a float strictly larger than 0.")
self._diversity_penalty = diversity_penalty
if not isinstance(num_beams, int) or num_beams < 2:
raise ValueError("`num_beams` should be an integer strictly larger than 1.")
self._num_beams = num_beams
if not isinstance(num_beam_groups, int) or num_beam_groups < 2:
raise ValueError("`num_beam_groups` should be an integer strictly larger than 1.")
if num_beam_groups > num_beams:
raise ValueError("`beam_groups` has to be smaller or equal to `num_beams`.")
self._num_sub_beams = num_beams // num_beam_groups
def __call__(
self,
input_ids: torch.LongTensor,
scores: torch.FloatTensor,
current_tokens: torch.LongTensor,
beam_group_idx: int,
) -> torch.FloatTensor:
# hamming diversity: penalise using same token in current group which was used in previous groups at
# the same time step
batch_size = current_tokens.shape[0] // self._num_beams
group_start_idx = beam_group_idx * self._num_sub_beams
group_end_idx = min(group_start_idx + self._num_sub_beams, self._num_beams)
group_size = group_end_idx - group_start_idx
vocab_size = scores.shape[-1]
if group_start_idx == 0:
return scores
for batch_idx in range(batch_size):
# predicted tokens of last time step of previous groups
previous_group_tokens = current_tokens[
batch_idx * self._num_beams : batch_idx * self._num_beams + group_start_idx
]
token_frequency = torch.bincount(previous_group_tokens, minlength=vocab_size).to(scores.device)
scores[batch_idx * group_size : (batch_idx + 1) * group_size] -= self._diversity_penalty * token_frequency
return scores |
PyTorch/SpeechSynthesis/FastPitch/notebooks | notebooks | README | # FastPitch: Voice Modification with Transformations of Pitch
This readme details how to run the Jupyter notebook for FastPitch inference using different pitch transformations.
## Build and run the FastPitch Docker container
1. Clone the repository:
```
git clone https://github.com/NVIDIA/DeepLearningExamples.git
cd DeepLearningExamples/PyTorch/SpeechSynthesis/FastPitch
```
2. Build the container for the FastPitch model:
```bash
docker build . -t fastpitch:latest
```
3. Launch the container of FastPitch model. By default port `8888` if forwarded.
```bash
bash scripts/docker/interactive.sh
```
## Run Jupyter notebook
Inside the container, navigate to the `notebooks/` directory and start the Jupyter notebook server:
```
cd notebooks
jupyter notebook --ip='*' --port=8888 --allow-root
```
Then navigate a web browser to the IP address or hostname of the host machine at port `8888`:
```
http://[host machine]:8888
```
Use the token listed in the output from running the jupyter command to log in, for example:
```
http://[host machine]:8888/?token=aae96ae9387cd28151868fee318c3b3581a2d794f3b25c6b
```
|
TensorFlow2/LanguageModeling/BERT/official/nlp/modeling/layers | layers | transformer_scaffold | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras-based transformer scaffold layer."""
from __future__ import absolute_import
from __future__ import division
# from __future__ import google_type_annotations
from __future__ import print_function
import tensorflow as tf
from official.nlp.modeling.layers import attention
from official.nlp.modeling.layers import dense_einsum
# @tf.keras.utils.register_keras_serializable(package="Text")
class TransformerScaffold(tf.keras.layers.Layer):
"""Transformer scaffold layer.
This layer implements the Transformer from "Attention Is All You Need".
(https://arxiv.org/abs/1706.03762), with a customizable attention layer
option. Users can pass a class to `attention_cls` and associated config to
`attention_cfg`, in which case the scaffold will instantiate the class with
the config, or pass a class instance to `attention_cls`.
Attributes:
num_attention_heads: Number of attention heads.
intermediate_size: Size of the intermediate layer.
intermediate_activation: Activation for the intermediate layer.
attention_cls: A class to instantate, or a layer instance.
attention_cfg: The config with which to instantiate `attention_cls`. Ignored
if attention_cls is a layer instance.
dropout_rate: Dropout probability for the post-attention and output dropout.
attention_dropout_rate: Dropout probability for within the attention layer.
kernel_initializer: Initializer for dense layer kernels.
bias_initializer: Initializer for dense layer biases.
kernel_regularizer: Regularizer for dense layer kernels.
bias_regularizer: Regularizer for dense layer biases.
activity_regularizer: Regularizer for dense layer activity.
kernel_constraint: Constraint for dense layer kernels.
bias_constraint: Constraint for dense layer kernels.
"""
def __init__(self,
num_attention_heads,
intermediate_size,
intermediate_activation,
attention_cls=attention.Attention,
attention_cfg=None,
dropout_rate=0.0,
attention_dropout_rate=0.0,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
super(TransformerScaffold, self).__init__(**kwargs)
self._attention_cfg = attention_cfg
self._attention_cls = attention_cls
self._num_heads = num_attention_heads
self._intermediate_size = intermediate_size
self._intermediate_activation = intermediate_activation
self._attention_dropout_rate = attention_dropout_rate
self._dropout_rate = dropout_rate
self._kernel_initializer = tf.keras.initializers.get(kernel_initializer)
self._bias_initializer = tf.keras.initializers.get(bias_initializer)
self._kernel_regularizer = tf.keras.regularizers.get(kernel_regularizer)
self._bias_regularizer = tf.keras.regularizers.get(bias_regularizer)
self._kernel_constraint = tf.keras.constraints.get(kernel_constraint)
self._bias_constraint = tf.keras.constraints.get(bias_constraint)
def build(self, input_shape):
input_tensor = input_shape[0] if len(input_shape) == 2 else input_shape
input_tensor_shape = tf.TensorShape(input_tensor)
if len(input_tensor_shape) != 3:
raise ValueError(
"TransformerScaffold expects a three-dimensional input of "
"shape [batch, sequence, width].")
batch_size, sequence_length, hidden_size = input_tensor_shape
if len(input_shape) == 2:
mask_tensor_shape = tf.TensorShape(input_shape[1])
expected_mask_tensor_shape = tf.TensorShape(
[batch_size, sequence_length, sequence_length])
if not expected_mask_tensor_shape.is_compatible_with(mask_tensor_shape):
raise ValueError("When passing a mask tensor to TransformerLayer, the "
"mask tensor must be of shape [batch, "
"sequence_length, sequence_length] (here %s). Got a "
"mask tensor of shape %s." %
(expected_mask_tensor_shape, mask_tensor_shape))
if hidden_size % self._num_heads != 0:
raise ValueError(
"The input size (%d) is not a multiple of the number of attention "
"heads (%d)" % (hidden_size, self._num_heads))
self._attention_head_size = int(hidden_size // self._num_heads)
if isinstance(self._attention_cls, tf.keras.layers.Layer):
self._attention_layer = self._attention_cls
else:
if self._attention_cfg is None:
attention_cfg = {
"num_heads": self._num_heads,
"head_size": self._attention_head_size,
"dropout_rate": self._attention_dropout_rate,
"kernel_initializer": self._kernel_initializer,
"bias_initializer": self._bias_initializer,
"kernel_regularizer": self._kernel_regularizer,
"bias_regularizer": self._bias_regularizer,
"activity_regularizer": self._activity_regularizer,
"kernel_constraint": self._kernel_constraint,
"bias_constraint": self._bias_constraint,
"name": "self_attention"
}
else:
attention_cfg = self._attention_cfg
self._attention_layer = self._attention_cls(**attention_cfg)
self._attention_output_dense = dense_einsum.DenseEinsum(
output_shape=hidden_size,
num_summed_dimensions=2,
kernel_initializer=self._kernel_initializer,
bias_initializer=self._bias_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer,
activity_regularizer=self._activity_regularizer,
kernel_constraint=self._kernel_constraint,
bias_constraint=self._bias_constraint,
name="self_attention_output")
self._attention_dropout = tf.keras.layers.Dropout(rate=self._dropout_rate)
self._attention_layer_norm = (
tf.keras.layers.LayerNormalization(
name="self_attention_layer_norm", axis=-1, epsilon=1e-12,
dtype=tf.float32))
self._intermediate_dense = dense_einsum.DenseEinsum(
output_shape=self._intermediate_size,
activation=self._intermediate_activation,
kernel_initializer=self._kernel_initializer,
bias_initializer=self._bias_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer,
activity_regularizer=self._activity_regularizer,
kernel_constraint=self._kernel_constraint,
bias_constraint=self._bias_constraint,
dtype=tf.float32, # This layer is always float32 for numeric stability.
name="intermediate")
self._output_dense = dense_einsum.DenseEinsum(
output_shape=hidden_size,
kernel_initializer=self._kernel_initializer,
bias_initializer=self._bias_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer,
activity_regularizer=self._activity_regularizer,
kernel_constraint=self._kernel_constraint,
bias_constraint=self._bias_constraint,
name="output")
self._output_dropout = tf.keras.layers.Dropout(rate=self._dropout_rate)
self._output_layer_norm = tf.keras.layers.LayerNormalization(
name="output_layer_norm", axis=-1, epsilon=1e-12, dtype=tf.float32)
super(TransformerScaffold, self).build(input_shape)
def get_config(self):
config = {
"attention_cls":
self._attention_layer,
"num_attention_heads":
self._num_heads,
"intermediate_size":
self._intermediate_size,
"intermediate_activation":
self._intermediate_activation,
"dropout_rate":
self._dropout_rate,
"attention_dropout_rate":
self._attention_dropout_rate,
"kernel_initializer":
tf.keras.initializers.serialize(self._kernel_initializer),
"bias_initializer":
tf.keras.initializers.serialize(self._bias_initializer),
"kernel_regularizer":
tf.keras.regularizers.serialize(self._kernel_regularizer),
"bias_regularizer":
tf.keras.regularizers.serialize(self._bias_regularizer),
"activity_regularizer":
tf.keras.regularizers.serialize(self._activity_regularizer),
"kernel_constraint":
tf.keras.constraints.serialize(self._kernel_constraint),
"bias_constraint":
tf.keras.constraints.serialize(self._bias_constraint)
}
base_config = super(TransformerScaffold, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def call(self, inputs):
if isinstance(inputs, (list, tuple)) and len(inputs) == 2:
input_tensor, attention_mask = inputs
else:
input_tensor, attention_mask = (inputs, None)
attention_inputs = [input_tensor, input_tensor]
if attention_mask is not None:
attention_inputs.append(attention_mask)
attention_output = self._attention_layer(attention_inputs)
attention_output = self._attention_output_dense(attention_output)
attention_output = self._attention_dropout(attention_output)
# Use float32 in keras layer norm and the gelu activation in the
# intermediate dense layer for numeric stability
if self.dtype == tf.float16:
input_tensor = tf.cast(input_tensor, tf.float32)
attention_output = tf.cast(attention_output, tf.float32)
attention_output = self._attention_layer_norm(input_tensor +
attention_output)
intermediate_output = self._intermediate_dense(attention_output)
if self.dtype == tf.float16:
intermediate_output = tf.cast(intermediate_output, tf.float16)
layer_output = self._output_dense(intermediate_output)
layer_output = self._output_dropout(layer_output)
# Use float32 in keras layer norm for numeric stability
if self.dtype == tf.float16:
layer_output = tf.cast(layer_output, tf.float32)
layer_output = self._output_layer_norm(layer_output + attention_output)
if self.dtype == tf.float16:
layer_output = tf.cast(layer_output, tf.float16)
return layer_output
|
Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/utils/types | types | str_enum | # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import enum
class StrEnum(str, enum.Enum):
def __new__(cls, *args):
for arg in args:
if not isinstance(arg, (str, enum.auto)):
raise TypeError(
"Values of StrEnums must be strings: {} is a {}".format(
repr(arg), type(arg)
)
)
return super().__new__(cls, *args)
def __str__(self):
return self.value
def _generate_next_value_(name, *_):
return name
|
TensorFlow2/LanguageModeling/BERT/scripts/docker | docker | launch | #!/bin/bash
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
CMD=${@:-/bin/bash}
NV_VISIBLE_DEVICES=${NVIDIA_VISIBLE_DEVICES:-"all"}
docker run --gpus $NV_VISIBLE_DEVICES --rm -it \
--net=host \
--shm-size=1g \
--ulimit memlock=-1 \
--ulimit stack=67108864 \
-e NVIDIA_VISIBLE_DEVICES=$NV_VISIBLE_DEVICES \
-v $PWD:/workspace/bert_tf2 -v $PWD/results:/results \
bert_tf2 $CMD
|
PaddlePaddle/LanguageModeling/BERT | BERT | lr_scheduler | # Copyright (c) 2022 NVIDIA Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import math
import paddle
from utils.utility import is_integer
class Poly:
"""
Polynormial learning rate decay.
lr = (learning_rate - end_lr) * (1 - min(step, decay_steps) / decay_steps) ^ power + end_lr
If `power` is 1.0, it's also equivalent to linear learning rate decay.
Args:
learning_rate (float): The initial learning rate.
num_steps(int): The total number of training steps.
end_lr(float, optional): The minimum final learning rate. Default: 0.0.
power(float, optional): Power of polynomial. Default: 1.0.
warmup(int|float, optional):
If warmup is int, it indicates the number of warmup steps. Default: 0.
If warmup is float, it indicates the proportion of warmup steps.
warmup_start_lr(float, optional): Initial learning rate of warm up. Default: 0.0.
last_step(int, optional): The step id of the last run. Can be set to resume training.
Default: 0.
"""
def __init__(self,
learning_rate,
num_steps,
end_lr=0.0,
power=1.0,
warmup=0,
warmup_start_lr=0.0,
last_step=0):
super().__init__()
self.end_lr = end_lr
self.power = power
self.learning_rate = learning_rate
self.warmup_start_lr = warmup_start_lr
self.last_step = last_step
self.total_steps = num_steps
self.warmup_steps = warmup if is_integer(warmup) else int(
math.floor(warmup * self.total_steps))
self.steps = self.total_steps - self.warmup_steps
assert self.warmup_steps <= self.total_steps, "warmup steps can't be larger than total steps"
def __call__(self):
learning_rate = paddle.optimizer.lr.PolynomialDecay(
learning_rate=self.learning_rate,
decay_steps=self.steps,
end_lr=self.end_lr,
power=self.power,
last_epoch=self.
last_step) if self.steps > 0 else self.learning_rate
if self.warmup_steps > 0:
learning_rate = paddle.optimizer.lr.LinearWarmup(
learning_rate=learning_rate,
warmup_steps=self.warmup_steps,
start_lr=self.warmup_start_lr,
end_lr=self.learning_rate,
last_epoch=self.last_step)
return learning_rate
def build_lr_scheduler(args):
"""
Build a learning rate scheduler.
Args:
args(Namespace): Arguments obtained from ArgumentParser.
return:
lr(paddle.optimizer.lr.LRScheduler): A learning rate scheduler.
"""
lr = Poly(
args.learning_rate,
args.max_steps,
warmup=args.warmup_proportion,
last_step=args.last_step_of_checkpoint)
if not isinstance(lr, paddle.optimizer.lr.LRScheduler):
lr = lr()
logging.info("build lr %s success..", lr)
return lr
|
Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/generator/tabular | tabular | chunked_tabular_generator | # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os.path
from abc import ABC
import tqdm
import cupy as cp
import numpy as np
import multiprocessing
from functools import partial
from syngen.utils.io_utils import dump_dataframe
from syngen.utils.types.dataframe_type import DataFrameType
from syngen.utils.memory_manager import MemoryManager
from syngen.generator.tabular import BaseTabularGenerator
class ChunkedBaseTabularGenerator(BaseTabularGenerator, ABC):
""" A Chunked Base Tabular Generator contains the base functionality of the multiprocess (Multi-GPU) data generation.
"""
def chunked_sampling(self, n_samples: int, save_path: str, fname: str, n_workers: int = 0, gpus: int = -1,
use_memmap=False, memory_threshold=0.8, verbose=True):
memory_manager = MemoryManager()
if gpus < 0:
gpus = memory_manager.get_available_gpus()
emp_n = 1000
est_samples = self.sample(emp_n, gpu=False)
mem_usage = est_samples.memory_usage(index=True, deep=True).sum()
est_sample_mem = int(np.ceil(mem_usage / emp_n * self._space_complexity_factor()))
est_mem = est_sample_mem * n_samples
memmap_kwargs = None
chunk_save_path = None
if use_memmap:
assert fname.endswith(".npy")
memmap_shape = list(est_samples.shape)
memmap_shape[0] = n_samples
memmap_shape = tuple(memmap_shape)
memmap_dtype = est_samples.dtypes.iloc[0]
memmap_filename = os.path.join(save_path, fname)
memmap_kwargs = dict(
filename=memmap_filename,
)
memmap_outfile = np.lib.format.open_memmap(memmap_filename, dtype=memmap_dtype, shape=memmap_shape, mode='w+')
else:
chunk_format = '{chunk_id}'
chunk_save_path = os.path.join(save_path, f'{fname}_{chunk_format}')
if gpus > 0:
mem_avail = memory_manager.get_min_available_across_gpus_memory(gpus=gpus)
n_workers = gpus
chunk_partial = partial(self._generate_chunk,
chunk_save_path=chunk_save_path, gpu=True, gpus=gpus, memmap_kwargs=memmap_kwargs)
else:
mem_avail = memory_manager.get_available_virtual_memory()
chunk_partial = partial(self._generate_chunk,
chunk_save_path=chunk_save_path, gpu=False, memmap_kwargs=memmap_kwargs)
if mem_avail * memory_threshold > est_mem:
df = self.sample(n_samples, gpu=True, memmap_kwargs=memmap_kwargs, start_idx=0, end_idx=n_samples)
if chunk_save_path:
chunk_save_path = chunk_save_path.format(chunk_id=0)
dump_dataframe(df, save_path=chunk_save_path, format='parquet')
res = [chunk_save_path]
else:
mem_avail = int(mem_avail * memory_threshold) # to avoid OOM
max_samples_per_chunk = int(mem_avail // est_sample_mem)
n_chunks = n_samples//max_samples_per_chunk + (1 if n_samples % max_samples_per_chunk > 0 else 0)
samples_per_chunk = n_samples // n_chunks
chunk_sizes = [samples_per_chunk] * n_chunks
if n_samples % n_chunks > 0:
chunk_sizes.append(n_samples % n_chunks)
multiprocessing.set_start_method('spawn', force=True)
with multiprocessing.Pool(processes=n_workers) as pool:
res = pool.imap_unordered(chunk_partial, enumerate(zip(chunk_sizes, np.cumsum(chunk_sizes))),
chunksize=(len(chunk_sizes)+n_workers-1)//n_workers)
if verbose:
res = tqdm.tqdm(res, total=len(chunk_sizes))
res = list(res)
return res
def _generate_chunk(self, chunk_info, chunk_save_path, gpu, memmap_kwargs, gpus=0):
chunk_id, (chunk_size, chunk_end) = chunk_info
if gpu:
gpu_id = int(multiprocessing.current_process()._identity[0]) % gpus
with cp.cuda.Device(gpu_id):
df = self.sample(chunk_size, gpu=True, memmap_kwargs=memmap_kwargs,
start_idx=chunk_end-chunk_size, end_idx=chunk_end)
else:
df = self.sample(chunk_size, gpu=False, memmap_kwargs=memmap_kwargs,
start_idx=chunk_end-chunk_size, end_idx=chunk_end)
if chunk_save_path:
chunk_save_path = chunk_save_path.format(chunk_id=chunk_id)
dump_dataframe(df, save_path=chunk_save_path, format='parquet')
return chunk_save_path
def _space_complexity_factor(self):
return 2.0 # we support float16 but it requires intermediate float32
@property
def supports_memmap(self) -> bool:
return True
def sample(self, num_samples, *args, gpu=False, **kwargs) -> DataFrameType:
"""generate `num_samples` from generator
Args:
num_samples (int): number of samples to generate
gpu (bool): whether to use cpu or gpu implementation (default: False)
*args: optional positional args
**kwargs: optional key-word arguments
"""
raise NotImplementedError()
|
PyTorch/Segmentation/MaskRCNN/pytorch/maskrcnn_benchmark/modeling/detector | detector | __init__ | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
from .detectors import build_detection_model
|
TensorFlow/Detection/SSD/models/research/object_detection/samples/configs | configs | faster_rcnn_resnet50_pets | # Faster R-CNN with Resnet-50 (v1), configured for Oxford-IIIT Pets Dataset.
# Users should configure the fine_tune_checkpoint field in the train config as
# well as the label_map_path and input_path fields in the train_input_reader and
# eval_input_reader. Search for "PATH_TO_BE_CONFIGURED" to find the fields that
# should be configured.
model {
faster_rcnn {
num_classes: 37
image_resizer {
keep_aspect_ratio_resizer {
min_dimension: 600
max_dimension: 1024
}
}
feature_extractor {
type: 'faster_rcnn_resnet50'
first_stage_features_stride: 16
}
first_stage_anchor_generator {
grid_anchor_generator {
scales: [0.25, 0.5, 1.0, 2.0]
aspect_ratios: [0.5, 1.0, 2.0]
height_stride: 16
width_stride: 16
}
}
first_stage_box_predictor_conv_hyperparams {
op: CONV
regularizer {
l2_regularizer {
weight: 0.0
}
}
initializer {
truncated_normal_initializer {
stddev: 0.01
}
}
}
first_stage_nms_score_threshold: 0.0
first_stage_nms_iou_threshold: 0.7
first_stage_max_proposals: 300
first_stage_localization_loss_weight: 2.0
first_stage_objectness_loss_weight: 1.0
initial_crop_size: 14
maxpool_kernel_size: 2
maxpool_stride: 2
second_stage_box_predictor {
mask_rcnn_box_predictor {
use_dropout: false
dropout_keep_probability: 1.0
fc_hyperparams {
op: FC
regularizer {
l2_regularizer {
weight: 0.0
}
}
initializer {
variance_scaling_initializer {
factor: 1.0
uniform: true
mode: FAN_AVG
}
}
}
}
}
second_stage_post_processing {
batch_non_max_suppression {
score_threshold: 0.0
iou_threshold: 0.6
max_detections_per_class: 100
max_total_detections: 300
}
score_converter: SOFTMAX
}
second_stage_localization_loss_weight: 2.0
second_stage_classification_loss_weight: 1.0
}
}
train_config: {
batch_size: 1
optimizer {
momentum_optimizer: {
learning_rate: {
manual_step_learning_rate {
initial_learning_rate: 0.0003
schedule {
step: 900000
learning_rate: .00003
}
schedule {
step: 1200000
learning_rate: .000003
}
}
}
momentum_optimizer_value: 0.9
}
use_moving_average: false
}
gradient_clipping_by_norm: 10.0
fine_tune_checkpoint: "PATH_TO_BE_CONFIGURED/model.ckpt"
from_detection_checkpoint: true
load_all_detection_checkpoint_vars: true
# Note: The below line limits the training process to 200K steps, which we
# empirically found to be sufficient enough to train the pets dataset. This
# effectively bypasses the learning rate schedule (the learning rate will
# never decay). Remove the below line to train indefinitely.
num_steps: 200000
data_augmentation_options {
random_horizontal_flip {
}
}
max_number_of_boxes: 50
}
train_input_reader: {
tf_record_input_reader {
input_path: "PATH_TO_BE_CONFIGURED/pet_faces_train.record-?????-of-00010"
}
label_map_path: "PATH_TO_BE_CONFIGURED/pet_label_map.pbtxt"
}
eval_config: {
metrics_set: "coco_detection_metrics"
num_examples: 1101
}
eval_input_reader: {
tf_record_input_reader {
input_path: "PATH_TO_BE_CONFIGURED/pet_faces_val.record-?????-of-00010"
}
label_map_path: "PATH_TO_BE_CONFIGURED/pet_label_map.pbtxt"
shuffle: false
num_readers: 1
}
|
PyTorch/SpeechRecognition/QuartzNet/scripts | scripts | inference_benchmark | #!/bin/bash
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -a
: ${OUTPUT_DIR:=${3:-"/results"}}
: ${CUDNN_BENCHMARK:=true}
: ${PAD_TO_MAX_DURATION:=true}
: ${NUM_WARMUP_STEPS:=10}
: ${NUM_STEPS:=500}
: ${AMP:=false}
: ${DALI_DEVICE:="cpu"}
: ${BATCH_SIZE_SEQ:="1 2 4 8 16"}
: ${MAX_DURATION_SEQ:="2 7 16.7"}
for MAX_DURATION in $MAX_DURATION_SEQ; do
for BATCH_SIZE in $BATCH_SIZE_SEQ; do
LOG_FILE="$OUTPUT_DIR/perf-infer_dali-${DALI_DEVICE}_amp-${AMP}_dur${MAX_DURATION}_bs${BATCH_SIZE}.json"
bash ./scripts/inference.sh "$@"
done
done
|
PyTorch/SpeechRecognition/wav2vec2/common/fairseq/modules | modules | fp32_group_norm | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Layer norm done in fp32 (for fp16 training)."""
import torch
import torch.nn as nn
import torch.nn.functional as F
class Fp32GroupNorm(nn.GroupNorm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def forward(self, input):
output = F.group_norm(
input.float(),
self.num_groups,
self.weight.float() if self.weight is not None else None,
self.bias.float() if self.bias is not None else None,
self.eps,
)
return output.type_as(input)
class MaskedGroupNorm(nn.Module):
"""GroupNorm layer which skips padding.
In wav2vec 2.0 encoder where batch size is small and time dimensio huge,
this is nearly as fast as nn.GroupNorm.
Ready for TorchScript, favors composition over inheritance.
"""
def __init__(self, num_groups, num_channels, eps=1e-05, affine=True,
device=None, dtype=None):
assert num_groups == num_channels, (
"num_groups != num_channels not yet supported in MaskedGroupNorm")
super().__init__()
self._group_norm = nn.GroupNorm(num_groups, num_channels, eps=eps,
affine=affine, device=device,
dtype=dtype)
def forward(self, x, x_lens):
var = torch.zeros_like(x[:, :, 0])
mean = torch.zeros_like(x[:, :, 0])
for i in range(x.size(0)):
mean[i] = torch.mean(x[i, :, :x_lens[i]], dim=1)
var[i] = torch.var(x[i, :, :x_lens[i]], dim=1, unbiased=False)
out = (x - mean[:, :, None]) / torch.sqrt(var[:, :, None] + self._group_norm.eps)
if self._group_norm.affine:
return out * self._group_norm.weight[None, :, None] + self._group_norm.bias[None, :, None]
else:
return out
class Fp32MaskedGroupNorm(nn.Module):
"""GroupNorm layer which skips padding.
In wav2vec 2.0 encoder where batch size is small and time dimensio huge,
this is nearly as fast as nn.GroupNorm.
Ready for TorchScript, favors composition over inheritance.
"""
def __init__(self, num_groups, num_channels, eps=1e-05, affine=True,
device=None, dtype=None):
assert num_groups == num_channels, (
"num_groups != num_channels not yet supported in MaskedGroupNorm")
super().__init__()
self._group_norm = nn.GroupNorm(num_groups, num_channels, eps=eps,
affine=affine, device=device,
dtype=dtype)
def hook(state_dict, prefix, *args, **kwargs):
"""Renames keys from layers which used inheritance."""
new_sd = {}
for k, v in state_dict.items():
if not k.startswith(prefix):
new_sd[k] = v
else:
*pref, param = k.split(".")
new_k = ".".join(pref + ["_group_norm", param])
new_sd[new_k] = v
state_dict.clear()
state_dict.update(new_sd)
self._register_load_state_dict_pre_hook(hook)
def forward(self, x, x_lens):
return self._forward(
x.float(),
x_lens,
self._group_norm.weight.float() if self._group_norm.weight is not None else None,
self._group_norm.bias.float() if self._group_norm.bias is not None else None,
).type_as(x)
def _forward(self, x, x_lens, weight, bias):
var = torch.zeros_like(x[:, :, 0])
mean = torch.zeros_like(x[:, :, 0])
for i in range(x.size(0)):
mean[i] = torch.mean(x[i, :, :x_lens[i]], dim=1)
var[i] = torch.var(x[i, :, :x_lens[i]], dim=1, unbiased=False)
out = (x - mean[:, :, None]) / torch.sqrt(var[:, :, None] + self._group_norm.eps)
if self._group_norm.affine:
return out * weight[None, :, None] + bias[None, :, None]
else:
return out
|
DGLPyTorch/DrugDiscovery/SE3Transformer/se3_transformer/model/layers | layers | convolution | # Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES
# SPDX-License-Identifier: MIT
from enum import Enum
from itertools import product
from typing import Dict
import dgl
import numpy as np
import torch
import torch.nn as nn
import torch.utils.checkpoint
from dgl import DGLGraph
from torch import Tensor
from torch.cuda.nvtx import range as nvtx_range
from se3_transformer.model.fiber import Fiber
from se3_transformer.runtime.utils import degree_to_dim, unfuse_features
class ConvSE3FuseLevel(Enum):
"""
Enum to select a maximum level of fusing optimizations that will be applied when certain conditions are met.
If a desired level L is picked and the level L cannot be applied to a level, other fused ops < L are considered.
A higher level means faster training, but also more memory usage.
If you are tight on memory and want to feed large inputs to the network, choose a low value.
If you want to train fast, choose a high value.
Recommended value is FULL with AMP.
Fully fused TFN convolutions requirements:
- all input channels are the same
- all output channels are the same
- input degrees span the range [0, ..., max_degree]
- output degrees span the range [0, ..., max_degree]
Partially fused TFN convolutions requirements:
* For fusing by output degree:
- all input channels are the same
- input degrees span the range [0, ..., max_degree]
* For fusing by input degree:
- all output channels are the same
- output degrees span the range [0, ..., max_degree]
Original TFN pairwise convolutions: no requirements
"""
FULL = 2
PARTIAL = 1
NONE = 0
class RadialProfile(nn.Module):
"""
Radial profile function.
Outputs weights used to weigh basis matrices in order to get convolution kernels.
In TFN notation: $R^{l,k}$
In SE(3)-Transformer notation: $\phi^{l,k}$
Note:
In the original papers, this function only depends on relative node distances ||x||.
Here, we allow this function to also take as input additional invariant edge features.
This does not break equivariance and adds expressive power to the model.
Diagram:
invariant edge features (node distances included) ───> MLP layer (shared across edges) ───> radial weights
"""
def __init__(
self,
num_freq: int,
channels_in: int,
channels_out: int,
edge_dim: int = 1,
mid_dim: int = 32,
use_layer_norm: bool = False
):
"""
:param num_freq: Number of frequencies
:param channels_in: Number of input channels
:param channels_out: Number of output channels
:param edge_dim: Number of invariant edge features (input to the radial function)
:param mid_dim: Size of the hidden MLP layers
:param use_layer_norm: Apply layer normalization between MLP layers
"""
super().__init__()
modules = [
nn.Linear(edge_dim, mid_dim),
nn.LayerNorm(mid_dim) if use_layer_norm else None,
nn.ReLU(),
nn.Linear(mid_dim, mid_dim),
nn.LayerNorm(mid_dim) if use_layer_norm else None,
nn.ReLU(),
nn.Linear(mid_dim, num_freq * channels_in * channels_out, bias=False)
]
self.net = torch.jit.script(nn.Sequential(*[m for m in modules if m is not None]))
def forward(self, features: Tensor) -> Tensor:
return self.net(features)
class VersatileConvSE3(nn.Module):
"""
Building block for TFN convolutions.
This single module can be used for fully fused convolutions, partially fused convolutions, or pairwise convolutions.
"""
def __init__(self,
freq_sum: int,
channels_in: int,
channels_out: int,
edge_dim: int,
use_layer_norm: bool,
fuse_level: ConvSE3FuseLevel):
super().__init__()
self.freq_sum = freq_sum
self.channels_out = channels_out
self.channels_in = channels_in
self.fuse_level = fuse_level
self.radial_func = RadialProfile(num_freq=freq_sum,
channels_in=channels_in,
channels_out=channels_out,
edge_dim=edge_dim,
use_layer_norm=use_layer_norm)
def forward(self, features: Tensor, invariant_edge_feats: Tensor, basis: Tensor):
with nvtx_range(f'VersatileConvSE3'):
num_edges = features.shape[0]
in_dim = features.shape[2]
with nvtx_range(f'RadialProfile'):
radial_weights = self.radial_func(invariant_edge_feats) \
.view(-1, self.channels_out, self.channels_in * self.freq_sum)
if basis is not None:
# This block performs the einsum n i l, n o i f, n l f k -> n o k
basis_view = basis.view(num_edges, in_dim, -1)
tmp = (features @ basis_view).view(num_edges, -1, basis.shape[-1])
return radial_weights @ tmp
else:
# k = l = 0 non-fused case
return radial_weights @ features
class ConvSE3(nn.Module):
"""
SE(3)-equivariant graph convolution (Tensor Field Network convolution).
This convolution can map an arbitrary input Fiber to an arbitrary output Fiber, while preserving equivariance.
Features of different degrees interact together to produce output features.
Note 1:
The option is given to not pool the output. This means that the convolution sum over neighbors will not be
done, and the returned features will be edge features instead of node features.
Note 2:
Unlike the original paper and implementation, this convolution can handle edge feature of degree greater than 0.
Input edge features are concatenated with input source node features before the kernel is applied.
"""
def __init__(
self,
fiber_in: Fiber,
fiber_out: Fiber,
fiber_edge: Fiber,
pool: bool = True,
use_layer_norm: bool = False,
self_interaction: bool = False,
max_degree: int = 4,
fuse_level: ConvSE3FuseLevel = ConvSE3FuseLevel.FULL,
allow_fused_output: bool = False,
low_memory: bool = False
):
"""
:param fiber_in: Fiber describing the input features
:param fiber_out: Fiber describing the output features
:param fiber_edge: Fiber describing the edge features (node distances excluded)
:param pool: If True, compute final node features by averaging incoming edge features
:param use_layer_norm: Apply layer normalization between MLP layers
:param self_interaction: Apply self-interaction of nodes
:param max_degree: Maximum degree used in the bases computation
:param fuse_level: Maximum fuse level to use in TFN convolutions
:param allow_fused_output: Allow the module to output a fused representation of features
"""
super().__init__()
self.pool = pool
self.fiber_in = fiber_in
self.fiber_out = fiber_out
self.self_interaction = self_interaction
self.max_degree = max_degree
self.allow_fused_output = allow_fused_output
self.conv_checkpoint = torch.utils.checkpoint.checkpoint if low_memory else lambda m, *x: m(*x)
# channels_in: account for the concatenation of edge features
channels_in_set = set([f.channels + fiber_edge[f.degree] * (f.degree > 0) for f in self.fiber_in])
channels_out_set = set([f.channels for f in self.fiber_out])
unique_channels_in = (len(channels_in_set) == 1)
unique_channels_out = (len(channels_out_set) == 1)
degrees_up_to_max = list(range(max_degree + 1))
common_args = dict(edge_dim=fiber_edge[0] + 1, use_layer_norm=use_layer_norm)
if fuse_level.value >= ConvSE3FuseLevel.FULL.value and \
unique_channels_in and fiber_in.degrees == degrees_up_to_max and \
unique_channels_out and fiber_out.degrees == degrees_up_to_max:
# Single fused convolution
self.used_fuse_level = ConvSE3FuseLevel.FULL
sum_freq = sum([
degree_to_dim(min(d_in, d_out))
for d_in, d_out in product(degrees_up_to_max, degrees_up_to_max)
])
self.conv = VersatileConvSE3(sum_freq, list(channels_in_set)[0], list(channels_out_set)[0],
fuse_level=self.used_fuse_level, **common_args)
elif fuse_level.value >= ConvSE3FuseLevel.PARTIAL.value and \
unique_channels_in and fiber_in.degrees == degrees_up_to_max:
# Convolutions fused per output degree
self.used_fuse_level = ConvSE3FuseLevel.PARTIAL
self.conv_out = nn.ModuleDict()
for d_out, c_out in fiber_out:
sum_freq = sum([degree_to_dim(min(d_out, d)) for d in fiber_in.degrees])
self.conv_out[str(d_out)] = VersatileConvSE3(sum_freq, list(channels_in_set)[0], c_out,
fuse_level=self.used_fuse_level, **common_args)
elif fuse_level.value >= ConvSE3FuseLevel.PARTIAL.value and \
unique_channels_out and fiber_out.degrees == degrees_up_to_max:
# Convolutions fused per input degree
self.used_fuse_level = ConvSE3FuseLevel.PARTIAL
self.conv_in = nn.ModuleDict()
for d_in, c_in in fiber_in:
channels_in_new = c_in + fiber_edge[d_in] * (d_in > 0)
sum_freq = sum([degree_to_dim(min(d_in, d)) for d in fiber_out.degrees])
self.conv_in[str(d_in)] = VersatileConvSE3(sum_freq, channels_in_new, list(channels_out_set)[0],
fuse_level=self.used_fuse_level, **common_args)
else:
# Use pairwise TFN convolutions
self.used_fuse_level = ConvSE3FuseLevel.NONE
self.conv = nn.ModuleDict()
for (degree_in, channels_in), (degree_out, channels_out) in (self.fiber_in * self.fiber_out):
dict_key = f'{degree_in},{degree_out}'
channels_in_new = channels_in + fiber_edge[degree_in] * (degree_in > 0)
sum_freq = degree_to_dim(min(degree_in, degree_out))
self.conv[dict_key] = VersatileConvSE3(sum_freq, channels_in_new, channels_out,
fuse_level=self.used_fuse_level, **common_args)
if self_interaction:
self.to_kernel_self = nn.ParameterDict()
for degree_out, channels_out in fiber_out:
if fiber_in[degree_out]:
self.to_kernel_self[str(degree_out)] = nn.Parameter(
torch.randn(channels_out, fiber_in[degree_out]) / np.sqrt(fiber_in[degree_out]))
def _try_unpad(self, feature, basis):
# Account for padded basis
if basis is not None:
out_dim = basis.shape[-1]
out_dim += out_dim % 2 - 1
return feature[..., :out_dim]
else:
return feature
def forward(
self,
node_feats: Dict[str, Tensor],
edge_feats: Dict[str, Tensor],
graph: DGLGraph,
basis: Dict[str, Tensor]
):
with nvtx_range(f'ConvSE3'):
invariant_edge_feats = edge_feats['0'].squeeze(-1)
src, dst = graph.edges()
out = {}
in_features = []
# Fetch all input features from edge and node features
for degree_in in self.fiber_in.degrees:
src_node_features = node_feats[str(degree_in)][src]
if degree_in > 0 and str(degree_in) in edge_feats:
# Handle edge features of any type by concatenating them to node features
src_node_features = torch.cat([src_node_features, edge_feats[str(degree_in)]], dim=1)
in_features.append(src_node_features)
if self.used_fuse_level == ConvSE3FuseLevel.FULL:
in_features_fused = torch.cat(in_features, dim=-1)
out = self.conv_checkpoint(
self.conv, in_features_fused, invariant_edge_feats, basis['fully_fused']
)
if not self.allow_fused_output or self.self_interaction or self.pool:
out = unfuse_features(out, self.fiber_out.degrees)
elif self.used_fuse_level == ConvSE3FuseLevel.PARTIAL and hasattr(self, 'conv_out'):
in_features_fused = torch.cat(in_features, dim=-1)
for degree_out in self.fiber_out.degrees:
basis_used = basis[f'out{degree_out}_fused']
out[str(degree_out)] = self._try_unpad(
self.conv_checkpoint(
self.conv_out[str(degree_out)], in_features_fused, invariant_edge_feats, basis_used
), basis_used)
elif self.used_fuse_level == ConvSE3FuseLevel.PARTIAL and hasattr(self, 'conv_in'):
out = 0
for degree_in, feature in zip(self.fiber_in.degrees, in_features):
out = out + self.conv_checkpoint(
self.conv_in[str(degree_in)], feature, invariant_edge_feats, basis[f'in{degree_in}_fused']
)
if not self.allow_fused_output or self.self_interaction or self.pool:
out = unfuse_features(out, self.fiber_out.degrees)
else:
# Fallback to pairwise TFN convolutions
for degree_out in self.fiber_out.degrees:
out_feature = 0
for degree_in, feature in zip(self.fiber_in.degrees, in_features):
dict_key = f'{degree_in},{degree_out}'
basis_used = basis.get(dict_key, None)
out_feature = out_feature + self._try_unpad(
self.conv_checkpoint(
self.conv[dict_key], feature, invariant_edge_feats, basis_used
), basis_used)
out[str(degree_out)] = out_feature
for degree_out in self.fiber_out.degrees:
if self.self_interaction and str(degree_out) in self.to_kernel_self:
with nvtx_range(f'self interaction'):
dst_features = node_feats[str(degree_out)][dst]
kernel_self = self.to_kernel_self[str(degree_out)]
out[str(degree_out)] = out[str(degree_out)] + kernel_self @ dst_features
if self.pool:
with nvtx_range(f'pooling'):
if isinstance(out, dict):
out[str(degree_out)] = dgl.ops.copy_e_sum(graph, out[str(degree_out)])
else:
out = dgl.ops.copy_e_sum(graph, out)
return out
|
TensorFlow/LanguageModeling/BERT/biobert/scripts | scripts | biobert_finetune_inference_benchmark | #!/bin/bash
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
task=${1:-"ner_bc5cdr-chem"}
bert_model=${2:-"base"}
cased=${3:-"false"}
if [ "$cased" = "true" ] ; then
DO_LOWER_CASE=0
CASING_DIR_PREFIX="cased"
case_flag="--do_lower_case=False"
else
DO_LOWER_CASE=1
CASING_DIR_PREFIX="uncased"
case_flag="--do_lower_case=True"
fi
if [ "$bert_model" = "large" ] ; then
export BERT_DIR=/workspace/bert/data/download/google_pretrained_weights/${CASING_DIR_PREFIX}_L-24_H-1024_A-16
else
export BERT_DIR=/workspace/bert/data/download/google_pretrained_weights/${CASING_DIR_PREFIX}_L-12_H-768_A-12
fi
DATESTAMP=`date +'%y%m%d%H%M%S'`
printf -v TAG "tf_bert_biobert_%s_inference_benchmark_%s_%s" "$task" "$bert_model" "$CASING_DIR_PREFIX"
OUTPUT_DIR=/results/${TAG}_${DATESTAMP}
mkdir -p ${OUTPUT_DIR}
if [ "$task" = "ner_bc5cdr-chem" ] ; then
DATASET_DIR=/workspace/bert/data/biobert/BC5CDR/chem
LOGFILE="${OUTPUT_DIR}/${task}_training_benchmark_bert_${bert_model}.log"
echo "Training performance benchmarking for BERT $bert_model from $BERT_DIR" >> $LOGFILE
echo "Precision Sequence Length Batch size Performance(sent/sec)" >> $LOGFILE
for seq_length in 128 512; do
for batch_size in 8 32 64; do
for use_fp16 in "--amp" "--noamp"; do
res_dir=${OUTPUT_DIR}/bert_${bert_model}_sl_${seq_len}_prec_${use_fp16}_bs_${batch_size}
mkdir -p ${res_dir}
tmp_file="${res_dir}/${task}_training_benchmark.log"
python /workspace/bert/run_ner.py \
--do_prepare=true \
--do_eval=true \
--do_predict=true \
--task_name="bc5cdr" \
--vocab_file=$BERT_DIR/vocab.txt \
--bert_config_file=$BERT_DIR/bert_config.json \
--init_checkpoint="$BERT_DIR/bert_model.ckpt" \
--data_dir=$DATASET_DIR \
--output_dir=$res_dir \
--eval_batch_size=$batch_size \
--predict_batch_size=$batch_size \
--max_seq_length=$seq_length \
$use_fp16 --use_xla $case_flag |& tee $tmp_file
perf=`cat $tmp_file | grep -F 'Throughput Average (sentences/sec) =' | tail -1 | awk -F'= ' '{print $2}' | awk -F' sen' '{print $1}'`
echo "$use_fp16 $seq_len $batch_size $perf" >> $LOGFILE
done
done
done
elif [ "$task" = "ner_bc5cdr-disease" ] ; then
DATASET_DIR=/workspace/bert/data/biobert/BC5CDR/disease
LOGFILE="${OUTPUT_DIR}/${task}_training_benchmark_bert_${bert_model}.log"
echo "Training performance benchmarking for BERT $bert_model from $BERT_DIR" >> $LOGFILE
echo "Precision Sequence Length Batch size Performance(sent/sec)" >> $LOGFILE
for seq_length in 128 512; do
for batch_size in 8 32 64; do
for use_fp16 in "--amp" "--noamp"; do
res_dir=${OUTPUT_DIR}/bert_${bert_model}_sl_${seq_len}_prec_${use_fp16}_bs_${batch_size}
mkdir -p ${res_dir}
tmp_file="${res_dir}/${task}_training_benchmark.log"
python3 /workspace/bert/run_ner.py \
--do_prepare=true \
--do_eval=true \
--do_predict=true \
--task_name="bc5cdr" \
--vocab_file=$BERT_DIR/vocab.txt \
--bert_config_file=$BERT_DIR/bert_config.json \
--init_checkpoint="$BERT_DIR/bert_model.ckpt" \
--data_dir=$DATASET_DIR \
--output_dir=$res_dir \
--eval_batch_size=$batch_size \
--predict_batch_size=$batch_size \
--max_seq_length=$seq_length \
"$use_fp16" --use_xla $case_flag |& tee $tmp_file
perf=`cat $tmp_file | grep -F 'Throughput Average (sentences/sec) =' | tail -1 | awk -F'= ' '{print $2}' | awk -F' sen' '{print $1}'`
echo "$use_fp16 $seq_len $batch_size $perf" >> $LOGFILE
done
done
done
elif [ "$task" = "rel_chemprot" ] ; then
DATASET_DIR=/workspace/bert/data/biobert/chemprot-data_treeLSTM
LOGFILE="${OUTPUT_DIR}/${task}_training_benchmark_bert_${bert_model}.log"
echo "Training performance benchmarking for BERT $bert_model from $BERT_DIR" >> $LOGFILE
echo "Precision Sequence Length Batch size Performance(sent/sec)" >> $LOGFILE
for seq_length in 128 512; do
for batch_size in 8 32 64; do
for use_fp16 in "--amp" "--noamp"; do
res_dir=${OUTPUT_DIR}/bert_${bert_model}_sl_${seq_len}_prec_${use_fp16}_bs_${batch_size}
mkdir -p ${res_dir}
tmp_file="${res_dir}/${task}_training_benchmark.log"
python3 /workspace/bert/run_re.py \
--do_prepare=true \
--do_eval=true \
--do_predict=true \
--task_name="chemprot" \
--vocab_file=$BERT_DIR/vocab.txt \
--bert_config_file=$BERT_DIR/bert_config.json \
--init_checkpoint="$BERT_DIR/bert_model.ckpt" \
--data_dir=$DATASET_DIR \
--output_dir=$res_dir \
--eval_batch_size=$batch_size \
--predict_batch_size=$batch_size \
--max_seq_length=$seq_length \
"$use_fp16" --use_xla $case_flag |& tee $tmp_file
perf=`cat $tmp_file | grep -F 'Throughput Average (sentences/sec) =' | tail -1 | awk -F'= ' '{print $2}' | awk -F' sen' '{print $1}'`
echo "$use_fp16 $seq_len $batch_size $perf" >> $LOGFILE
done
done
done
else
echo "Benchmarking for " $task "currently not supported. Sorry!"
fi |
PyTorch/Recommendation/DLRM/dlrm/cuda_src/dot_based_interact_ampere | dot_based_interact_ampere | dot_based_interact_pytorch_types | #include <torch/extension.h>
#include <torch/types.h>
#include <stdexcept>
#include "../dot_based_interact/dot_based_interact_fp16_fwd.cu"
#include "../dot_based_interact/dot_based_interact_fp16_bwd.cu"
#include "../dot_based_interact/dot_based_interact_tf32_fwd.cu"
#include "../dot_based_interact/dot_based_interact_tf32_bwd.cu"
torch::Tensor dotBasedInteractFwdTorch(torch::Tensor input, torch::Tensor bottom_mlp_output) {
//input includes bottom_mlp_output along with the embeddings, at the first position
auto size = input.sizes();
auto batch_size = size[0];
auto num_rows = size[1];
auto num_cols = size[2];
uint raw_output_size = ((num_rows * (num_rows - 1)) >> 1) + num_cols;
uint output_size = ((raw_output_size-1)/8 + 1)*8; //round up to multiple of 8
int64_t outputShape[2] = {batch_size, output_size};
auto output = torch::empty(c10::IntArrayRef(outputShape), input.options());
if (input.scalar_type() == torch::ScalarType::Half && bottom_mlp_output.scalar_type() == torch::ScalarType::Half) {
dotBasedInteractFwd(input.contiguous().data_ptr<at::Half>(),
bottom_mlp_output.contiguous().data_ptr<at::Half>(),
output.contiguous().data_ptr<at::Half>(),
batch_size,
num_rows,
num_cols);
} else if (input.scalar_type() == torch::ScalarType::Float &&
bottom_mlp_output.scalar_type() == torch::ScalarType::Float) {
dotBasedInteractTF32Fwd(input.contiguous().data_ptr<float>(),
bottom_mlp_output.contiguous().data_ptr<float>(),
output.contiguous().data_ptr<float>(),
batch_size,
num_rows,
num_cols);
} else {
throw std::invalid_argument("Invalid input type.");
}
return output;
}
std::vector<torch::Tensor> dotBasedInteractBwdTorch(torch::Tensor input, torch::Tensor upstreamGrad) {
auto size = input.sizes();
auto batch_size = size[0];
auto num_rows = size[1];
auto num_cols = size[2];
auto outputGrad = torch::empty_like(input);
int64_t outputShape[2] = {batch_size, num_cols};
auto mlp_grad = torch::empty(c10::IntArrayRef(outputShape), input.options());
if (input.scalar_type() == torch::ScalarType::Half && upstreamGrad.scalar_type() == torch::ScalarType::Half) {
dotBasedInteractBwd(input.contiguous().data_ptr<at::Half>(),
upstreamGrad.contiguous().data_ptr<at::Half>(),
outputGrad.contiguous().data_ptr<at::Half>(),
mlp_grad.contiguous().data_ptr<at::Half>(),
batch_size,
num_rows,
num_cols);
} else if (input.scalar_type() == torch::ScalarType::Float &&
upstreamGrad.scalar_type() == torch::ScalarType::Float) {
dotBasedInteractTF32Bwd(input.contiguous().data_ptr<float>(),
upstreamGrad.contiguous().data_ptr<float>(),
outputGrad.contiguous().data_ptr<float>(),
mlp_grad.contiguous().data_ptr<float>(),
batch_size,
num_rows,
num_cols);
} else {
throw std::invalid_argument("Invalid input type.");
}
return {outputGrad, mlp_grad};
}
|
PyTorch/Segmentation/MaskRCNN/pytorch/configs/cityscapes | cityscapes | e2e_mask_rcnn_R_50_FPN_1x_cocostyle | MODEL:
META_ARCHITECTURE: "GeneralizedRCNN"
WEIGHT: "catalog://ImageNetPretrained/MSRA/R-50"
BACKBONE:
CONV_BODY: "R-50-FPN"
OUT_CHANNELS: 256
RPN:
USE_FPN: True
ANCHOR_STRIDE: (4, 8, 16, 32, 64)
PRE_NMS_TOP_N_TRAIN: 2000
PRE_NMS_TOP_N_TEST: 1000
POST_NMS_TOP_N_TEST: 1000
FPN_POST_NMS_TOP_N_TEST: 1000
ROI_HEADS:
USE_FPN: True
ROI_BOX_HEAD:
POOLER_RESOLUTION: 7
POOLER_SCALES: (0.25, 0.125, 0.0625, 0.03125)
POOLER_SAMPLING_RATIO: 2
FEATURE_EXTRACTOR: "FPN2MLPFeatureExtractor"
PREDICTOR: "FPNPredictor"
NUM_CLASSES: 9
ROI_MASK_HEAD:
POOLER_SCALES: (0.25, 0.125, 0.0625, 0.03125)
FEATURE_EXTRACTOR: "MaskRCNNFPNFeatureExtractor"
PREDICTOR: "MaskRCNNC4Predictor"
POOLER_RESOLUTION: 14
POOLER_SAMPLING_RATIO: 2
RESOLUTION: 28
SHARE_BOX_FEATURE_EXTRACTOR: False
MASK_ON: True
DATASETS:
TRAIN: ("cityscapes_fine_instanceonly_seg_train_cocostyle",)
TEST: ("cityscapes_fine_instanceonly_seg_val_cocostyle",)
DATALOADER:
SIZE_DIVISIBILITY: 32
SOLVER:
BASE_LR: 0.01
WEIGHT_DECAY: 0.0001
STEPS: (18000,)
MAX_ITER: 24000
|
PyTorch/SpeechSynthesis/Tacotron2/trtis_cpp/src/trt/util | util | binding | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef TT2I_BINDING_H
#define TT2I_BINDING_H
#include "cudaMemory.h"
#include "NvInfer.h"
#include <unordered_map>
#include <vector>
namespace tts
{
class Binding
{
public:
Binding();
/**
* @brief Set a given binding.
*
* @tparam T The type of binding.
* @param engine The engine to bind to.
* @param name The name of the binding.
* @param ptr The pointer to bind.
*/
template <typename T>
void setBinding(const nvinfer1::ICudaEngine& engine, const char* const name, T* const ptr)
{
setVoidBinding(engine, name, (void*) ptr);
}
/**
* @brief Set a given binding.
*
* @tparam T The type of binding.
* @param engine The engine to bind to.
* @param name The name of the binding.
* @param buffer The buffer to bind.
*/
template <typename T>
void setBinding(
const nvinfer1::ICudaEngine& engine,
const char* const name,
CudaMemory<T>& buffer)
{
setVoidBinding(engine, name, buffer.data());
}
/**
* @brief Get the bindings to pass into TRT. This is non-const because TRT
* requires `void **` rather than `void * const *`.
*
* @return The bindings.
*/
void** getBindings();
private:
std::vector<void*> mBindings;
/**
* @brief Set a given binding via `void*` pointer.
*
* @param engine The engine to bind to.
* @param name The name of the binding.
* @param ptr The pointer to bind.
*/
void setVoidBinding(const nvinfer1::ICudaEngine& engine, const char* const name, void* ptr);
};
} // namespace tts
#endif
|
CUDA-Optimized/FastSpeech/fastspeech/inferencer | inferencer | inferencer | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import pathlib
import time
import abc
import numpy as np
import torch
from tensorboardX import SummaryWriter
import glob
from fastspeech.utils.logging import tprint
from fastspeech.utils.pytorch import to_device_async, to_cpu_numpy
import torch.nn as nn
class Inferencer(object):
"""
set seed
load model
logging
"""
def __init__(self, model_name, model, data_loader=None, ckpt_path=None, ckpt_file=None, log_path=None, device='cuda', use_fp16=False, seed=None):
self.data_loader = data_loader
self.model_name = model_name
self.model = model
self.ckpt_path = ckpt_path
self.log_path = log_path
self.device = device
self.seed = seed
self.step = 0
self.ckpt_file = ckpt_file
self.use_fp16 = use_fp16
# model
self.model.eval()
to_device_async(self.model, self.device)
num_param = sum(param.numel() for param in model.parameters())
tprint('The number of {} parameters: {}'.format(self.model_name, num_param))
# precision
if self.use_fp16:
self.model = self.model.half()
# data parallel
self.model = nn.DataParallel(self.model)
# set seed
if seed is None:
seed = np.random.randint(2**16)
np.random.seed(seed)
torch.manual_seed(seed)
self.data_loader_iter = iter(self.data_loader)
# logging
if log_path:
# tensorboard log path : {log_path}/YYYYMMDD-HHMMMSS
log_path = os.path.join(log_path, time.strftime('%Y%m%d-%H%M%S'))
self.tbwriter = SummaryWriter(log_dir=log_path, flush_secs=10)
# checkpoint path
if self.ckpt_path:
self.ckpt_path = os.path.join(self.ckpt_path, self.model_name)
pathlib.Path(self.ckpt_path).mkdir(parents=True, exist_ok=True)
# load checkpoint
self.load(ckpt_file)
def __enter__(self):
pass
def __exit__(self, exception_type, exception_value, traceback):
pass
@abc.abstractmethod
def infer(self):
return NotImplemented
def load(self, ckpt_file):
# load latest checkpoint file if not defined.
if not ckpt_file:
files_exist = glob.glob(os.path.join(self.ckpt_path, '*'))
if files_exist:
ckpt_file = max(files_exist, key=os.path.getctime)
if ckpt_file:
state_dict = torch.load(ckpt_file, map_location=self.device)
self.step = state_dict['step']
self.model.load_state_dict(state_dict['model'])
tprint('[Load] Checkpoint \'{}\'. Step={}'.format(ckpt_file, self.step))
else:
tprint('No checkpoints in {}. Load skipped.'.format(self.ckpt_path))
raise Exception("No checkpoints found.")
def log(self, output):
output = {k: to_cpu_numpy(v) for k, v in output.items()}
self.console_log('infer', output)
if self.log_path:
self.tensorboard_log('infer', output)
@abc.abstractmethod
def console_log(self, tag, output):
raise NotImplemented
@abc.abstractmethod
def tensorboard_log(self, tag, output):
raise NotImplemented
|
PyTorch/SpeechSynthesis/Tacotron2/filelists | filelists | ljs_audio_text_test_filelist | LJSpeech-1.1/wavs/LJ045-0096.wav|Mrs. De Mohrenschildt thought that Oswald,
LJSpeech-1.1/wavs/LJ049-0022.wav|The Secret Service believed that it was very doubtful that any President would ride regularly in a vehicle with a fixed top, even though transparent.
LJSpeech-1.1/wavs/LJ033-0042.wav|Between the hours of eight and nine p.m. they were occupied with the children in the bedrooms located at the extreme east end of the house.
LJSpeech-1.1/wavs/LJ016-0117.wav|The prisoner had nothing to deal with but wooden panels, and by dint of cutting and chopping he got both the lower panels out.
LJSpeech-1.1/wavs/LJ025-0157.wav|Under these circumstances, unnatural as they are, with proper management, the bean will thrust forth its radicle and its plumule;
LJSpeech-1.1/wavs/LJ042-0219.wav|Oswald demonstrated his thinking in connection with his return to the United States by preparing two sets of identical questions of the type which he might have thought
LJSpeech-1.1/wavs/LJ032-0164.wav|it is not possible to state with scientific certainty that a particular small group of fibers come from a certain piece of clothing
LJSpeech-1.1/wavs/LJ046-0092.wav|has confidence in the dedicated Secret Service men who are ready to lay down their lives for him
LJSpeech-1.1/wavs/LJ050-0118.wav|Since these agencies are already obliged constantly to evaluate the activities of such groups,
LJSpeech-1.1/wavs/LJ043-0016.wav|Jeanne De Mohrenschildt said, quote,
LJSpeech-1.1/wavs/LJ021-0078.wav|no economic panacea, which could simply revive over-night the heavy industries and the trades dependent upon them.
LJSpeech-1.1/wavs/LJ039-0148.wav|Examination of the cartridge cases found on the sixth floor of the Depository Building
LJSpeech-1.1/wavs/LJ047-0202.wav|testified that the information available to the Federal Government about Oswald before the assassination would, if known to PRS,
LJSpeech-1.1/wavs/LJ023-0056.wav|It is an easy document to understand when you remember that it was called into being
LJSpeech-1.1/wavs/LJ021-0025.wav|And in many directions, the intervention of that organized control which we call government
LJSpeech-1.1/wavs/LJ030-0105.wav|Communications in the motorcade.
LJSpeech-1.1/wavs/LJ021-0012.wav|with respect to industry and business, but nearly all are agreed that private enterprise in times such as these
LJSpeech-1.1/wavs/LJ019-0169.wav|and one or two men were allowed to mend clothes and make shoes. The rules made by the Secretary of State were hung up in conspicuous parts of the prison;
LJSpeech-1.1/wavs/LJ039-0088.wav|It just is an aid in seeing in the fact that you only have the one element, the crosshair,
LJSpeech-1.1/wavs/LJ016-0192.wav|"I think I could do that sort of job," said Calcraft, on the spur of the moment.
LJSpeech-1.1/wavs/LJ014-0142.wav|was strewn in front of the dock, and sprinkled it towards the bench with a contemptuous gesture.
LJSpeech-1.1/wavs/LJ012-0015.wav|Weedon and Lecasser to twelve and six months respectively in Coldbath Fields.
LJSpeech-1.1/wavs/LJ048-0033.wav|Prior to November twenty-two, nineteen sixty-three
LJSpeech-1.1/wavs/LJ028-0349.wav|who were each required to send so large a number to Babylon, that in all there were collected no fewer than fifty thousand.
LJSpeech-1.1/wavs/LJ030-0197.wav|At first Mrs. Connally thought that her husband had been killed,
LJSpeech-1.1/wavs/LJ017-0133.wav|Palmer speedily found imitators.
LJSpeech-1.1/wavs/LJ034-0123.wav|Although Brennan testified that the man in the window was standing when he fired the shots, most probably he was either sitting or kneeling.
LJSpeech-1.1/wavs/LJ003-0282.wav|Many years were to elapse before these objections should be fairly met and universally overcome.
LJSpeech-1.1/wavs/LJ032-0204.wav|Special Agent Lyndal L. Shaneyfelt, a photography expert with the FBI,
LJSpeech-1.1/wavs/LJ016-0241.wav|Calcraft served the city of London till eighteen seventy-four, when he was pensioned at the rate of twenty-five shillings per week.
LJSpeech-1.1/wavs/LJ023-0033.wav|we will not allow ourselves to run around in new circles of futile discussion and debate, always postponing the day of decision.
LJSpeech-1.1/wavs/LJ009-0286.wav|There has never been much science in the system of carrying out the extreme penalty in this country; the "finisher of the law"
LJSpeech-1.1/wavs/LJ008-0181.wav|he had his pockets filled with bread and cheese, and it was generally supposed that he had come a long distance to see the fatal show.
LJSpeech-1.1/wavs/LJ015-0052.wav|to the value of twenty thousand pounds.
LJSpeech-1.1/wavs/LJ016-0314.wav|Sir George Grey thought there was a growing feeling in favor of executions within the prison precincts.
LJSpeech-1.1/wavs/LJ047-0056.wav|From August nineteen sixty-two
LJSpeech-1.1/wavs/LJ010-0027.wav|Nor did the methods by which they were perpetrated greatly vary from those in times past.
LJSpeech-1.1/wavs/LJ010-0065.wav|At the former the "Provisional Government" was to be established,
LJSpeech-1.1/wavs/LJ046-0113.wav|The Commission has concluded that at the time of the assassination
LJSpeech-1.1/wavs/LJ028-0410.wav|There among the ruins they still live in the same kind of houses,
LJSpeech-1.1/wavs/LJ044-0137.wav|More seriously, the facts of his defection had become known, leaving him open to almost unanswerable attack by those who opposed his views.
LJSpeech-1.1/wavs/LJ008-0215.wav|One by one the huge uprights of black timber were fitted together,
LJSpeech-1.1/wavs/LJ030-0084.wav|or when the press of the crowd made it impossible for the escort motorcycles to stay in position on the car's rear flanks.
LJSpeech-1.1/wavs/LJ020-0092.wav|Have yourself called on biscuit mornings an hour earlier than usual.
LJSpeech-1.1/wavs/LJ029-0096.wav|On November fourteen, Lawson and Sorrels attended a meeting at Love Field
LJSpeech-1.1/wavs/LJ015-0308.wav|and others who swore to the meetings of the conspirators and their movements. Saward was found guilty,
LJSpeech-1.1/wavs/LJ012-0067.wav|But Mrs. Solomons could not resist the temptation to dabble in stolen goods, and she was found shipping watches of the wrong category to New York.
LJSpeech-1.1/wavs/LJ018-0231.wav|namely, to suppress it and substitute another.
LJSpeech-1.1/wavs/LJ014-0265.wav|and later he became manager of the newly rebuilt Olympic at Wych Street.
LJSpeech-1.1/wavs/LJ024-0102.wav|would be the first to exclaim as soon as an amendment was proposed
LJSpeech-1.1/wavs/LJ007-0233.wav|it consists of several circular perforations, about two inches in diameter,
LJSpeech-1.1/wavs/LJ013-0213.wav|This seems to have decided Courvoisier,
LJSpeech-1.1/wavs/LJ032-0045.wav|This price included nineteen dollars, ninety-five cents for the rifle and the scope, and one dollar, fifty cents for postage and handling.
LJSpeech-1.1/wavs/LJ011-0048.wav|Wherefore let him that thinketh he standeth take heed lest he fall," and was full of the most pointed allusions to the culprit.
LJSpeech-1.1/wavs/LJ005-0294.wav|It was frequently stated in evidence that the jail of the borough was in so unfit a state for the reception of prisoners,
LJSpeech-1.1/wavs/LJ016-0007.wav|There were others less successful.
LJSpeech-1.1/wavs/LJ028-0138.wav|perhaps the tales that travelers told him were exaggerated as travelers' tales are likely to be,
LJSpeech-1.1/wavs/LJ050-0029.wav|that is reflected in definite and comprehensive operating procedures.
LJSpeech-1.1/wavs/LJ014-0121.wav|The prisoners were in due course transferred to Newgate, to be put upon their trial at the Central Criminal Court.
LJSpeech-1.1/wavs/LJ014-0146.wav|They had to handcuff her by force against the most violent resistance, and still she raged and stormed,
LJSpeech-1.1/wavs/LJ046-0111.wav|The Secret Service has attempted to perform this function through the activities of its Protective Research Section
LJSpeech-1.1/wavs/LJ012-0257.wav|But the affair still remained a profound mystery. No light was thrown upon it till, towards the end of March,
LJSpeech-1.1/wavs/LJ002-0260.wav|Yet the public opinion of the whole body seems to have checked dissipation.
LJSpeech-1.1/wavs/LJ031-0014.wav|the Presidential limousine arrived at the emergency entrance of the Parkland Hospital at about twelve:thirty-five p.m.
LJSpeech-1.1/wavs/LJ047-0093.wav|Oswald was arrested and jailed by the New Orleans Police Department for disturbing the peace, in connection with a street fight which broke out when he was accosted
LJSpeech-1.1/wavs/LJ003-0324.wav|gaming of all sorts should be peremptorily forbidden under heavy pains and penalties.
LJSpeech-1.1/wavs/LJ021-0115.wav|we have reached into the heart of the problem which is to provide such annual earnings for the lowest paid worker as will meet his minimum needs.
LJSpeech-1.1/wavs/LJ046-0191.wav|it had established periodic regular review of the status of four hundred individuals;
LJSpeech-1.1/wavs/LJ034-0197.wav|who was one of the first witnesses to alert the police to the Depository as the source of the shots, as has been discussed in chapter three.
LJSpeech-1.1/wavs/LJ002-0253.wav|were governed by rules which they themselves had framed, and under which subscriptions were levied
LJSpeech-1.1/wavs/LJ048-0288.wav|might have been more alert in the Dallas motorcade if they had retired promptly in Fort Worth.
LJSpeech-1.1/wavs/LJ007-0112.wav|Many of the old customs once prevalent in the State Side, so properly condemned and abolished,
LJSpeech-1.1/wavs/LJ017-0189.wav|who was presently attacked in the same way as the others, but, but, thanks to the prompt administration of remedies, he recovered.
LJSpeech-1.1/wavs/LJ042-0230.wav|basically, although I hate the USSR and socialist system I still think marxism can work under different circumstances, end quote.
LJSpeech-1.1/wavs/LJ050-0161.wav|The Secret Service should not and does not plan to develop its own intelligence gathering facilities to duplicate the existing facilities of other Federal agencies.
LJSpeech-1.1/wavs/LJ003-0011.wav|that not more than one bottle of wine or one quart of beer could be issued at one time. No account was taken of the amount of liquors admitted in one day,
LJSpeech-1.1/wavs/LJ008-0206.wav|and caused a number of stout additional barriers to be erected in front of the scaffold,
LJSpeech-1.1/wavs/LJ002-0261.wav|The poorer prisoners were not in abject want, as in other prisons,
LJSpeech-1.1/wavs/LJ012-0189.wav|Hunt, in consideration of the information he had given, escaped death, and was sentenced to transportation for life.
LJSpeech-1.1/wavs/LJ019-0317.wav|The former, which consisted principally of the tread-wheel, cranks, capstans, shot-drill,
LJSpeech-1.1/wavs/LJ011-0041.wav|Visited Mr. Fauntleroy. My application for books for him not having been attended, I had no prayer-book to give him.
LJSpeech-1.1/wavs/LJ023-0089.wav|That is not only my accusation.
LJSpeech-1.1/wavs/LJ044-0224.wav|would not agree with that particular wording, end quote.
LJSpeech-1.1/wavs/LJ013-0104.wav|He found them at length residing at the latter place, one as a landed proprietor, the other as a publican.
LJSpeech-1.1/wavs/LJ013-0055.wav|The jury did not believe him, and the verdict was for the defendants.
LJSpeech-1.1/wavs/LJ014-0306.wav|These had been attributed to political action; some thought that the large purchases in foreign grains, effected at losing prices,
LJSpeech-1.1/wavs/LJ029-0052.wav|To supplement the PRS files, the Secret Service depends largely on local police departments and local offices of other Federal agencies
LJSpeech-1.1/wavs/LJ028-0459.wav|Its bricks, measuring about thirteen inches square and three inches in thickness, were burned and stamped with the usual short inscription:
LJSpeech-1.1/wavs/LJ017-0183.wav|Soon afterwards Dixon died, showing all the symptoms already described.
LJSpeech-1.1/wavs/LJ009-0084.wav|At length the ordinary pauses, and then, in a deep tone, which, though hardly above a whisper, is audible to all, says,
LJSpeech-1.1/wavs/LJ007-0170.wav|That in this vast metropolis, the center of wealth, civilization, and information;
LJSpeech-1.1/wavs/LJ016-0277.wav|This is proved by contemporary accounts, especially one graphic and realistic article which appeared in the 'Times,'
LJSpeech-1.1/wavs/LJ009-0061.wav|He staggers towards the pew, reels into it, stumbles forward, flings himself on the ground, and, by a curious twist of the spine,
LJSpeech-1.1/wavs/LJ019-0201.wav|to select a sufficiently spacious piece of ground, and erect a prison which from foundations to roofs should be in conformity with the newest ideas.
LJSpeech-1.1/wavs/LJ030-0063.wav|He had repeated this wish only a few days before, during his visit to Tampa, Florida.
LJSpeech-1.1/wavs/LJ010-0257.wav|a third miscreant made a similar but far less serious attempt in the month of July following.
LJSpeech-1.1/wavs/LJ009-0106.wav|The keeper tries to appear unmoved, but his eye wanders anxiously over the combustible assembly.
LJSpeech-1.1/wavs/LJ008-0121.wav|After the construction and action of the machine had been explained, the doctor asked the governor what kind of men he had commanded at Goree,
LJSpeech-1.1/wavs/LJ050-0069.wav|the Secret Service had received from the FBI some nine thousand reports on members of the Communist Party.
LJSpeech-1.1/wavs/LJ006-0202.wav|The news-vendor was also a tobacconist,
LJSpeech-1.1/wavs/LJ012-0230.wav|Shortly before the day fixed for execution, Bishop made a full confession, the bulk of which bore the impress of truth,
LJSpeech-1.1/wavs/LJ005-0248.wav|and stated that in his opinion Newgate, as the common jail of Middlesex, was wholly inadequate to the proper confinement of its prisoners.
LJSpeech-1.1/wavs/LJ037-0053.wav|who had been greatly upset by her experience, was able to view a lineup of four men handcuffed together at the police station.
LJSpeech-1.1/wavs/LJ045-0177.wav|For the first time
LJSpeech-1.1/wavs/LJ004-0036.wav|it was hoped that their rulers would hire accommodation in the county prisons, and that the inferior establishments would in course of time disappear.
LJSpeech-1.1/wavs/LJ026-0054.wav|carbohydrates (starch, cellulose) and fats.
LJSpeech-1.1/wavs/LJ020-0085.wav|Break apart from one another and pile on a plate, throwing a clean doily or a small napkin over them. Break open at table.
LJSpeech-1.1/wavs/LJ046-0226.wav|The several military intelligence agencies reported crank mail and similar threats involving the President.
LJSpeech-1.1/wavs/LJ014-0233.wav|he shot an old soldier who had attempted to detain him. He was convicted and executed.
LJSpeech-1.1/wavs/LJ033-0152.wav|The portion of the palm which was identified was the heel of the right palm, i.e., the area near the wrist, on the little finger side.
LJSpeech-1.1/wavs/LJ004-0009.wav|as indefatigable and self-sacrificing, found by personal visitation that the condition of jails throughout the kingdom was,
LJSpeech-1.1/wavs/LJ017-0134.wav|Within a few weeks occurred the Leeds poisoning case, in which the murderer undoubtedly was inspired by the facts made public at Palmer's trial.
LJSpeech-1.1/wavs/LJ019-0318.wav|was to be the rule for all convicted prisoners throughout the early stages of their detention;
LJSpeech-1.1/wavs/LJ020-0093.wav|Rise, wash face and hands, rinse the mouth out and brush back the hair.
LJSpeech-1.1/wavs/LJ012-0188.wav|Probert was then admitted as a witness, and the case was fully proved against Thurtell, who was hanged in front of Hertford Jail.
LJSpeech-1.1/wavs/LJ019-0202.wav|The preference given to the Pentonville system destroyed all hopes of a complete reformation of Newgate.
LJSpeech-1.1/wavs/LJ039-0027.wav|Oswald's revolver
LJSpeech-1.1/wavs/LJ040-0176.wav|He admitted to fantasies about being powerful and sometimes hurting and killing people, but refused to elaborate on them.
LJSpeech-1.1/wavs/LJ018-0354.wav|Doubts were long entertained whether Thomas Wainwright,
LJSpeech-1.1/wavs/LJ031-0185.wav|From the Presidential airplane, the Vice President telephoned Attorney General Robert F. Kennedy,
LJSpeech-1.1/wavs/LJ006-0137.wav|They were not obliged to attend chapel, and seldom if ever went; "prisoners," said one of them under examination, "did not like the trouble of going to chapel."
LJSpeech-1.1/wavs/LJ032-0085.wav|The Hidell signature on the notice of classification was in the handwriting of Oswald.
LJSpeech-1.1/wavs/LJ009-0037.wav|the schoolmaster and the juvenile prisoners being seated round the communion-table, opposite the pulpit.
LJSpeech-1.1/wavs/LJ006-0021.wav|Later on he had devoted himself to the personal investigation of the prisons of the United States.
LJSpeech-1.1/wavs/LJ006-0082.wav|and this particular official took excellent care to select as residents for his own ward those most suitable from his own point of view.
LJSpeech-1.1/wavs/LJ016-0380.wav|with hope to the last. There is always the chance of a flaw in the indictment, of a missing witness, or extenuating circumstances.
LJSpeech-1.1/wavs/LJ019-0344.wav|monitor, or schoolmaster, nor to be engaged in the service of any officer of the prison.
LJSpeech-1.1/wavs/LJ019-0161.wav|These disciplinary improvements were, however, only slowly and gradually introduced.
LJSpeech-1.1/wavs/LJ028-0145.wav|And here I may not omit to tell the use to which the mould dug out of the great moat was turned, nor the manner wherein the wall was wrought.
LJSpeech-1.1/wavs/LJ018-0349.wav|His disclaimer, distinct and detailed on every point, was intended simply for effect.
LJSpeech-1.1/wavs/LJ043-0010.wav|Some of the members of that group saw a good deal of the Oswalds through the fall of nineteen sixty-three,
LJSpeech-1.1/wavs/LJ027-0178.wav|These were undoubtedly perennibranchs. In the Permian and Triassic higher forms appeared, which were certainly caducibranch.
LJSpeech-1.1/wavs/LJ041-0070.wav|He did not rise above the rank of private first class, even though he had passed a qualifying examination for the rank of corporal.
LJSpeech-1.1/wavs/LJ008-0266.wav|Thus in the years between May first, eighteen twenty-seven, and thirtieth April, eighteen thirty-one,
LJSpeech-1.1/wavs/LJ021-0091.wav|In this recent reorganization we have recognized three distinct functions:
LJSpeech-1.1/wavs/LJ019-0129.wav|which marked the growth of public interest in prison affairs, and which was the germ of the new system
LJSpeech-1.1/wavs/LJ018-0215.wav|William Roupell was the eldest but illegitimate son of a wealthy man who subsequently married Roupell's mother, and had further legitimate issue.
LJSpeech-1.1/wavs/LJ015-0194.wav|and behaved so as to justify a belief that he had been a jail-bird all his life.
LJSpeech-1.1/wavs/LJ016-0137.wav|that numbers of men, "lifers," and others with ten, fourteen, or twenty years to do, can be trusted to work out of doors without bolts and bars
LJSpeech-1.1/wavs/LJ002-0289.wav|the latter raised eighteen pence among them to pay for a truss of straw for the poor woman to lie on.
LJSpeech-1.1/wavs/LJ023-0016.wav|In nineteen thirty-three you and I knew that we must never let our economic system get completely out of joint again
LJSpeech-1.1/wavs/LJ011-0141.wav|There were at the moment in Newgate six convicts sentenced to death for forging wills.
LJSpeech-1.1/wavs/LJ016-0283.wav|to do them mere justice, there was at least till then a half-drunken ribald gaiety among the crowd that made them all akin."
LJSpeech-1.1/wavs/LJ035-0082.wav|The only interval was the time necessary to ride in the elevator from the second to the sixth floor and walk back to the southeast corner.
LJSpeech-1.1/wavs/LJ045-0194.wav|Anyone who was familiar with that area of Dallas would have known that the motorcade would probably pass the Texas School Book Depository to get from Main Street
LJSpeech-1.1/wavs/LJ009-0124.wav|occupied when they saw it last, but a few hours ago, by their comrades who are now dead;
LJSpeech-1.1/wavs/LJ030-0162.wav|In the Presidential Limousine
LJSpeech-1.1/wavs/LJ050-0223.wav|The plan provides for an additional two hundred five agents for the Secret Service. Seventeen of this number are proposed for the Protective Research Section;
LJSpeech-1.1/wavs/LJ008-0228.wav|their harsh and half-cracked voices full of maudlin, besotted sympathy for those about to die.
LJSpeech-1.1/wavs/LJ002-0096.wav|The eight courts above enumerated were well supplied with water;
LJSpeech-1.1/wavs/LJ018-0288.wav|After this the other conspirators traveled to obtain genuine bills and master the system of the leading houses at home and abroad.
LJSpeech-1.1/wavs/LJ002-0106.wav|in which latterly a copper had been fixed for the cooking of provisions sent in by charitable persons.
LJSpeech-1.1/wavs/LJ025-0129.wav|On each lobe of the bi-lobed leaf of Venus flytrap are three delicate filaments which stand out at right angles from the surface of the leaf.
LJSpeech-1.1/wavs/LJ044-0013.wav|Hands Off Cuba, end quote, an application form for, and a membership card in,
LJSpeech-1.1/wavs/LJ049-0115.wav|of the person who is actually in the exercise of the executive power, or
LJSpeech-1.1/wavs/LJ019-0145.wav|But reformation was only skin deep. Below the surface many of the old evils still rankled.
LJSpeech-1.1/wavs/LJ019-0355.wav|came up in all respects to modern requirements.
LJSpeech-1.1/wavs/LJ019-0289.wav|There was unrestrained association of untried and convicted, juvenile with adult prisoners, vagrants, misdemeanants, felons.
LJSpeech-1.1/wavs/LJ048-0222.wav|in Fort Worth, there occurred a breach of discipline by some members of the Secret Service who were officially traveling with the President.
LJSpeech-1.1/wavs/LJ016-0367.wav|Under the new system the whole of the arrangements from first to last fell upon the officers.
LJSpeech-1.1/wavs/LJ047-0097.wav|Agent Quigley did not know of Oswald's prior FBI record when he interviewed him,
LJSpeech-1.1/wavs/LJ007-0075.wav|as effectually to rebuke and abash the profane spirit of the more insolent and daring of the criminals.
LJSpeech-1.1/wavs/LJ047-0022.wav|provided by other agencies.
LJSpeech-1.1/wavs/LJ007-0085.wav|at Newgate and York Castle as long as five years; "at Ilchester and Morpeth for seven years; at Warwick for eight years,
LJSpeech-1.1/wavs/LJ047-0075.wav|Hosty had inquired earlier and found no evidence that it was functioning in the Dallas area.
LJSpeech-1.1/wavs/LJ008-0098.wav|One was the "yeoman of the halter," a Newgate official, the executioner's assistant, whom Mr. J. T. Smith, who was present at the execution,
LJSpeech-1.1/wavs/LJ017-0102.wav|The second attack was fatal, and ended in Cook's death from tetanus.
LJSpeech-1.1/wavs/LJ046-0105.wav|Second, the adequacy of other advance preparations for the security of the President, during his visit to Dallas,
LJSpeech-1.1/wavs/LJ018-0206.wav|He was a tall, slender man, with a long face and iron-gray hair.
LJSpeech-1.1/wavs/LJ012-0271.wav|Whether it was greed or a quarrel that drove Greenacre to the desperate deed remains obscure.
LJSpeech-1.1/wavs/LJ005-0086.wav|with such further separation as the justices should deem conducive to good order and discipline.
LJSpeech-1.1/wavs/LJ042-0097.wav|and considerably better living quarters than those accorded to Soviet citizens of equal age and station.
LJSpeech-1.1/wavs/LJ047-0126.wav|we would handle it in due course, in accord with the whole context of the investigation. End quote.
LJSpeech-1.1/wavs/LJ041-0022.wav|Oswald first wrote, quote, Edward Vogel, end quote, an obvious misspelling of Voebel's name,
LJSpeech-1.1/wavs/LJ015-0025.wav|The bank enjoyed an excellent reputation, it had a good connection, and was supposed to be perfectly sound.
LJSpeech-1.1/wavs/LJ012-0194.wav|But Burke and Hare had their imitators further south,
LJSpeech-1.1/wavs/LJ028-0416.wav|(if man may speak so confidently of His great impenetrable counsels), for an eternal Testimony of His great work in the confusion of Man's pride,
LJSpeech-1.1/wavs/LJ007-0130.wav|are all huddled together without discrimination, oversight, or control."
LJSpeech-1.1/wavs/LJ015-0005.wav|About this time Davidson and Gordon, the people above-mentioned,
LJSpeech-1.1/wavs/LJ016-0125.wav|with this, placed against the wall near the chevaux-de-frise, he made an escalade.
LJSpeech-1.1/wavs/LJ014-0224.wav|As Dwyer survived, Cannon escaped the death sentence, which was commuted to penal servitude for life.
LJSpeech-1.1/wavs/LJ005-0019.wav|refuted by abundant evidence, and having no foundation whatever in truth.
LJSpeech-1.1/wavs/LJ042-0221.wav|With either great ambivalence, or cold calculation he prepared completely different answers to the same questions.
LJSpeech-1.1/wavs/LJ001-0063.wav|which was generally more formally Gothic than the printing of the German workmen,
LJSpeech-1.1/wavs/LJ030-0006.wav|They took off in the Presidential plane, Air Force One, at eleven a.m., arriving at San Antonio at one:thirty p.m., Eastern Standard Time.
LJSpeech-1.1/wavs/LJ024-0054.wav|democracy will have failed far beyond the importance to it of any king of precedent concerning the judiciary.
LJSpeech-1.1/wavs/LJ006-0044.wav|the same callous indifference to the moral well-being of the prisoners, the same want of employment and of all disciplinary control.
LJSpeech-1.1/wavs/LJ039-0154.wav|four point eight to five point six seconds if the second shot missed,
LJSpeech-1.1/wavs/LJ050-0090.wav|they seem unduly restrictive in continuing to require some manifestation of animus against a Government official.
LJSpeech-1.1/wavs/LJ028-0421.wav|it was the beginning of the great collections of Babylonian antiquities in the museums of the Western world.
LJSpeech-1.1/wavs/LJ033-0205.wav|then I would say the possibility exists, these fibers could have come from this blanket, end quote.
LJSpeech-1.1/wavs/LJ019-0335.wav|The books and journals he was to keep were minutely specified, and his constant presence in or near the jail was insisted upon.
LJSpeech-1.1/wavs/LJ013-0045.wav|Wallace's relations warned him against his Liverpool friend,
LJSpeech-1.1/wavs/LJ037-0002.wav|Chapter four. The Assassin: Part six.
LJSpeech-1.1/wavs/LJ018-0159.wav|This was all the police wanted to know.
LJSpeech-1.1/wavs/LJ026-0140.wav|In the plant as in the animal metabolism must consist of anabolic and catabolic processes.
LJSpeech-1.1/wavs/LJ014-0171.wav|I will briefly describe one or two of the more remarkable murders in the years immediately following, then pass on to another branch of crime.
LJSpeech-1.1/wavs/LJ037-0007.wav|Three others subsequently identified Oswald from a photograph.
LJSpeech-1.1/wavs/LJ033-0174.wav|microscopic and UV (ultra violet) characteristics, end quote.
LJSpeech-1.1/wavs/LJ040-0110.wav|he apparently adjusted well enough there to have had an average, although gradually deteriorating, school record
LJSpeech-1.1/wavs/LJ039-0192.wav|he had a total of between four point eight and five point six seconds between the two shots which hit
LJSpeech-1.1/wavs/LJ032-0261.wav|When he appeared before the Commission, Michael Paine lifted the blanket
LJSpeech-1.1/wavs/LJ040-0097.wav|Lee was brought up in this atmosphere of constant money problems, and I am sure it had quite an effect on him, and also Robert, end quote.
LJSpeech-1.1/wavs/LJ037-0249.wav|Mrs. Earlene Roberts, the housekeeper at Oswald's roominghouse and the last person known to have seen him before he reached tenth Street and Patton Avenue,
LJSpeech-1.1/wavs/LJ016-0248.wav|Marwood was proud of his calling, and when questioned as to whether his process was satisfactory, replied that he heard "no complaints."
LJSpeech-1.1/wavs/LJ004-0083.wav|As Mr. Buxton pointed out, many old acts of parliament designed to protect the prisoner were still in full force.
LJSpeech-1.1/wavs/LJ014-0029.wav|This was Delarue's watch, fully identified as such, which Hocker told his brother Delarue had given him the morning of the murder.
LJSpeech-1.1/wavs/LJ021-0110.wav|have been best calculated to promote industrial recovery and a permanent improvement of business and labor conditions.
LJSpeech-1.1/wavs/LJ003-0107.wav|he slept in the same bed with a highwayman on one side, and a man charged with murder on the other.
LJSpeech-1.1/wavs/LJ039-0076.wav|Ronald Simmons, chief of the U.S. Army Infantry Weapons Evaluation Branch of the Ballistics Research Laboratory, said, quote,
LJSpeech-1.1/wavs/LJ016-0347.wav|had undoubtedly a solemn, impressive effect upon those outside.
LJSpeech-1.1/wavs/LJ001-0072.wav|After the end of the fifteenth century the degradation of printing, especially in Germany and Italy,
LJSpeech-1.1/wavs/LJ024-0018.wav|Consequently, although there never can be more than fifteen, there may be only fourteen, or thirteen, or twelve.
LJSpeech-1.1/wavs/LJ032-0180.wav|that the fibers were caught in the crevice of the rifle's butt plate, quote, in the recent past, end quote,
LJSpeech-1.1/wavs/LJ010-0083.wav|and measures taken to arrest them when their plans were so far developed that no doubt could remain as to their guilt.
LJSpeech-1.1/wavs/LJ002-0299.wav|and gave the garnish for the common side at that sum, which is five shillings more than Mr. Neild says was extorted on the common side.
LJSpeech-1.1/wavs/LJ048-0143.wav|the Secret Service did not at the time of the assassination have any established procedure governing its relationships with them.
LJSpeech-1.1/wavs/LJ012-0054.wav|Solomons, while waiting to appear in court, persuaded the turnkeys to take him to a public-house, where all might "refresh."
LJSpeech-1.1/wavs/LJ019-0270.wav|Vegetables, especially the potato, that most valuable anti-scorbutic, was too often omitted.
LJSpeech-1.1/wavs/LJ035-0164.wav|three minutes after the shooting.
LJSpeech-1.1/wavs/LJ014-0326.wav|Maltby and Co. would issue warrants on them deliverable to the importer, and the goods were then passed to be stored in neighboring warehouses.
LJSpeech-1.1/wavs/LJ001-0173.wav|The essential point to be remembered is that the ornament, whatever it is, whether picture or pattern-work, should form part of the page,
LJSpeech-1.1/wavs/LJ050-0056.wav|On December twenty-six, nineteen sixty-three, the FBI circulated additional instructions to all its agents,
LJSpeech-1.1/wavs/LJ003-0319.wav|provided only that their security was not jeopardized, and dependent upon the enforcement of another new rule,
LJSpeech-1.1/wavs/LJ006-0040.wav|The fact was that the years as they passed, nearly twenty in all, had worked but little permanent improvement in this detestable prison.
LJSpeech-1.1/wavs/LJ017-0231.wav|His body was found lying in a pool of blood in a night-dress, stabbed over and over again in the left side.
LJSpeech-1.1/wavs/LJ017-0226.wav|One half of the mutineers fell upon him unawares with handspikes and capstan-bars.
LJSpeech-1.1/wavs/LJ004-0239.wav|He had been committed for an offense for which he was acquitted.
LJSpeech-1.1/wavs/LJ048-0112.wav|The Commission also regards the security arrangements worked out by Lawson and Sorrels at Love Field as entirely adequate.
LJSpeech-1.1/wavs/LJ039-0125.wav|that Oswald was a good shot, somewhat better than or equal to -- better than the average let us say.
LJSpeech-1.1/wavs/LJ030-0196.wav|He cried out, quote, Oh, no, no, no. My God, they are going to kill us all, end quote,
LJSpeech-1.1/wavs/LJ010-0228.wav|He was released from Broadmoor in eighteen seventy-eight, and went abroad.
LJSpeech-1.1/wavs/LJ045-0228.wav|On the other hand, he could have traveled some distance with the money he did have and he did return to his room where he obtained his revolver.
LJSpeech-1.1/wavs/LJ028-0168.wav|in the other was the sacred precinct of Jupiter Belus,
LJSpeech-1.1/wavs/LJ021-0140.wav|and in such an effort we should be able to secure for employers and employees and consumers
LJSpeech-1.1/wavs/LJ009-0280.wav|Again the wretched creature succeeded in obtaining foothold, but this time on the left side of the drop.
LJSpeech-1.1/wavs/LJ003-0159.wav|To constitute this the aristocratic quarter, unwarrantable demands were made upon the space properly allotted to the female felons,
LJSpeech-1.1/wavs/LJ016-0274.wav|and the windows of the opposite houses, which commanded a good view, as usual fetched high prices.
LJSpeech-1.1/wavs/LJ035-0014.wav|it sounded high and I immediately kind of looked up,
LJSpeech-1.1/wavs/LJ033-0120.wav|which he believed was where the bag reached when it was laid on the seat with one edge against the door.
LJSpeech-1.1/wavs/LJ045-0015.wav|which Johnson said he did not receive until after the assassination. The letter said in part, quote,
LJSpeech-1.1/wavs/LJ003-0299.wav|the latter end of the nineteenth century, several of which still fall far short of our English ideal,
LJSpeech-1.1/wavs/LJ032-0206.wav|After comparing the rifle in the simulated photograph with the rifle in Exhibit Number one thirty-three A, Shaneyfelt testified, quote,
LJSpeech-1.1/wavs/LJ028-0494.wav|Between the several sections were wide spaces where foot soldiers and charioteers might fight.
LJSpeech-1.1/wavs/LJ005-0099.wav|and report at length upon the condition of the prisons of the country.
LJSpeech-1.1/wavs/LJ015-0144.wav|developed to a colossal extent the frauds he had already practiced as a subordinate.
LJSpeech-1.1/wavs/LJ019-0221.wav|It was intended as far as possible that, except awaiting trial, no prisoner should find himself relegated to Newgate.
LJSpeech-1.1/wavs/LJ003-0088.wav|in one, for seven years -- that of a man sentenced to death, for whom great interest had been made, but whom it was not thought right to pardon.
LJSpeech-1.1/wavs/LJ045-0216.wav|nineteen sixty-three, merely to disarm her and to provide a justification of sorts,
LJSpeech-1.1/wavs/LJ042-0135.wav|that he was not yet twenty years old when he went to the Soviet Union with such high hopes and not quite twenty-three when he returned bitterly disappointed.
LJSpeech-1.1/wavs/LJ049-0196.wav|On the other hand, it is urged that all features of the protection of the President and his family should be committed to an elite and independent corps.
LJSpeech-1.1/wavs/LJ018-0278.wav|This was the well and astutely devised plot of the brothers Bidwell,
LJSpeech-1.1/wavs/LJ030-0238.wav|and then looked around again and saw more of this movement, and so I proceeded to go to the back seat and get on top of him.
LJSpeech-1.1/wavs/LJ018-0309.wav|where probably the money still remains.
LJSpeech-1.1/wavs/LJ041-0199.wav|is shown most clearly by his employment relations after his return from the Soviet Union. Of course, he made his real problems worse to the extent
LJSpeech-1.1/wavs/LJ007-0076.wav|The lax discipline maintained in Newgate was still further deteriorated by the presence of two other classes of prisoners who ought never to have been inmates of such a jail.
LJSpeech-1.1/wavs/LJ039-0118.wav|He had high motivation. He had presumably a good to excellent rifle and good ammunition.
LJSpeech-1.1/wavs/LJ024-0019.wav|And there may be only nine.
LJSpeech-1.1/wavs/LJ008-0085.wav|The fire had not quite burnt out at twelve, in nearly four hours, that is to say.
LJSpeech-1.1/wavs/LJ018-0031.wav|This fixed the crime pretty certainly upon Müller, who had already left the country, thus increasing suspicion under which he lay.
LJSpeech-1.1/wavs/LJ030-0032.wav|Dallas police stood at intervals along the fence and Dallas plain clothes men mixed in the crowd.
LJSpeech-1.1/wavs/LJ050-0004.wav|General Supervision of the Secret Service
LJSpeech-1.1/wavs/LJ039-0096.wav|This is a definite advantage to the shooter, the vehicle moving directly away from him and the downgrade of the street, and he being in an elevated position
LJSpeech-1.1/wavs/LJ041-0195.wav|Oswald's interest in Marxism led some people to avoid him,
LJSpeech-1.1/wavs/LJ047-0158.wav|After a moment's hesitation, she told me that he worked at the Texas School Book Depository near the downtown area of Dallas.
LJSpeech-1.1/wavs/LJ050-0162.wav|In planning its data processing techniques,
LJSpeech-1.1/wavs/LJ001-0051.wav|and paying great attention to the "press work" or actual process of printing,
LJSpeech-1.1/wavs/LJ028-0136.wav|Of all the ancient descriptions of the famous walls and the city they protected, that of Herodotus is the fullest.
LJSpeech-1.1/wavs/LJ034-0134.wav|Shortly after the assassination Brennan noticed
LJSpeech-1.1/wavs/LJ019-0348.wav|Every facility was promised. The sanction of the Secretary of State would not be withheld if plans and estimates were duly submitted,
LJSpeech-1.1/wavs/LJ010-0219.wav|While one stood over the fire with the papers, another stood with lighted torch to fire the house.
LJSpeech-1.1/wavs/LJ011-0245.wav|Mr. Mullay called again, taking with him five hundred pounds in cash. Howard discovered this, and his manner was very suspicious;
LJSpeech-1.1/wavs/LJ030-0035.wav|Organization of the Motorcade
LJSpeech-1.1/wavs/LJ044-0135.wav|While he had drawn some attention to himself and had actually appeared on two radio programs, he had been attacked by Cuban exiles and arrested,
LJSpeech-1.1/wavs/LJ045-0090.wav|He was very much interested in autobiographical works of outstanding statesmen of the United States, to whom his wife thought he compared himself.
LJSpeech-1.1/wavs/LJ026-0034.wav|When any given "protist" has to be classified the case must be decided on its individual merits;
LJSpeech-1.1/wavs/LJ045-0092.wav|as to the fact that he was an outstanding man, end quote.
LJSpeech-1.1/wavs/LJ017-0050.wav|Palmer, who was only thirty-one at the time of his trial, was in appearance short and stout, with a round head
LJSpeech-1.1/wavs/LJ036-0104.wav|Whaley picked Oswald.
LJSpeech-1.1/wavs/LJ019-0055.wav|High authorities were in favor of continuous separation.
LJSpeech-1.1/wavs/LJ010-0030.wav|The brutal ferocity of the wild beast once aroused, the same means, the same weapons were employed to do the dreadful deed,
LJSpeech-1.1/wavs/LJ038-0047.wav|Some of the officers saw Oswald strike McDonald with his fist. Most of them heard a click which they assumed to be a click of the hammer of the revolver.
LJSpeech-1.1/wavs/LJ009-0074.wav|Let us pass on.
LJSpeech-1.1/wavs/LJ048-0069.wav|Efforts made by the Bureau since the assassination, on the other hand,
LJSpeech-1.1/wavs/LJ003-0211.wav|They were never left quite alone for fear of suicide, and for the same reason they were searched for weapons or poisons.
LJSpeech-1.1/wavs/LJ048-0053.wav|It is the conclusion of the Commission that, even in the absence of Secret Service criteria
LJSpeech-1.1/wavs/LJ033-0093.wav|Frazier estimated that the bag was two feet long, quote, give and take a few inches, end quote, and about five or six inches wide.
LJSpeech-1.1/wavs/LJ006-0149.wav|The turnkeys left the prisoners very much to themselves, never entering the wards after locking-up time, at dusk, till unlocking next morning,
LJSpeech-1.1/wavs/LJ018-0211.wav|The false coin was bought by an agent from an agent, and dealings were carried on secretly at the "Clock House" in Seven Dials.
LJSpeech-1.1/wavs/LJ008-0054.wav|This contrivance appears to have been copied with improvements from that which had been used in Dublin at a still earlier date,
LJSpeech-1.1/wavs/LJ040-0052.wav|that his commitment to Marxism was an important factor influencing his conduct during his adult years.
LJSpeech-1.1/wavs/LJ028-0023.wav|Two weeks pass, and at last you stand on the eastern edge of the plateau
LJSpeech-1.1/wavs/LJ009-0184.wav|Lord Ferrers' body was brought to Surgeons' Hall after execution in his own carriage and six;
LJSpeech-1.1/wavs/LJ005-0252.wav|A committee was appointed, under the presidency of the Duke of Richmond
LJSpeech-1.1/wavs/LJ015-0266.wav|has probably no parallel in the annals of crime. Saward himself is a striking and in some respects an unique figure in criminal history.
LJSpeech-1.1/wavs/LJ017-0059.wav|even after sentence, and until within a few hours of execution, he was buoyed up with the hope of reprieve.
LJSpeech-1.1/wavs/LJ024-0034.wav|What do they mean by the words "packing the Court"?
LJSpeech-1.1/wavs/LJ016-0089.wav|He was engaged in whitewashing and cleaning; the officer who had him in charge left him on the stairs leading to the gallery.
LJSpeech-1.1/wavs/LJ039-0227.wav|with two hits, within four point eight and five point six seconds.
LJSpeech-1.1/wavs/LJ001-0096.wav|have now come into general use and are obviously a great improvement on the ordinary "modern style" in use in England, which is in fact the Bodoni type
LJSpeech-1.1/wavs/LJ018-0129.wav|who threatened to betray the theft. But Brewer, either before or after this, succumbed to temptation,
LJSpeech-1.1/wavs/LJ010-0157.wav|and that, as he was starving, he had resolved on this desperate deed,
LJSpeech-1.1/wavs/LJ038-0264.wav|He concluded that, quote, the general rifling characteristics of the rifle are of the same type as those found on the bullet
LJSpeech-1.1/wavs/LJ031-0165.wav|When security arrangements at the airport were complete, the Secret Service made the necessary arrangements for the Vice President to leave the hospital.
LJSpeech-1.1/wavs/LJ018-0244.wav|The effect of establishing the forgeries would be to restore to the Roupell family lands for which a price had already been paid
LJSpeech-1.1/wavs/LJ007-0071.wav|in the face of impediments confessedly discouraging
LJSpeech-1.1/wavs/LJ028-0340.wav|Such of the Babylonians as witnessed the treachery took refuge in the temple of Jupiter Belus;
LJSpeech-1.1/wavs/LJ017-0164.wav|with the idea of subjecting her to the irritant poison slowly but surely until the desired effect, death, was achieved.
LJSpeech-1.1/wavs/LJ048-0197.wav|I then told the officers that their primary duty was traffic and crowd control and that they should be alert for any persons who might attempt to throw anything
LJSpeech-1.1/wavs/LJ013-0098.wav|Mr. Oxenford having denied that he had made any transfer of stock, the matter was at once put into the hands of the police.
LJSpeech-1.1/wavs/LJ012-0049.wav|led him to think seriously of trying his fortunes in another land.
LJSpeech-1.1/wavs/LJ030-0014.wav|quote, that the crowd was about the same as the one which came to see him before but there were one hundred thousand extra people on hand who came to see Mrs. Kennedy.
LJSpeech-1.1/wavs/LJ014-0186.wav|A milliner's porter,
LJSpeech-1.1/wavs/LJ015-0027.wav|Yet even so early as the death of the first Sir John Paul,
LJSpeech-1.1/wavs/LJ047-0049.wav|Marina Oswald, however, recalled that her husband was upset by this interview.
LJSpeech-1.1/wavs/LJ012-0021.wav|at fourteen he was a pickpocket and a "duffer," or a seller of sham goods.
LJSpeech-1.1/wavs/LJ003-0140.wav|otherwise he would have been stripped of his clothes. End quote.
LJSpeech-1.1/wavs/LJ042-0130.wav|Shortly thereafter, less than eighteen months after his defection, about six weeks before he met Marina Prusakova,
LJSpeech-1.1/wavs/LJ019-0180.wav|His letter to the Corporation, under date fourth June,
LJSpeech-1.1/wavs/LJ017-0108.wav|He was struck with the appearance of the corpse, which was not emaciated, as after a long disease ending in death;
LJSpeech-1.1/wavs/LJ006-0268.wav|Women saw men if they merely pretended to be wives; even boys were visited by their sweethearts.
LJSpeech-1.1/wavs/LJ044-0125.wav|of residence in the U.S.S.R. against any cause which I join, by association,
LJSpeech-1.1/wavs/LJ015-0231.wav|It was Tester's business, who had access to the railway company's books, to watch for this.
LJSpeech-1.1/wavs/LJ002-0225.wav|The rentals of rooms and fees went to the warden, whose income was two thousand three hundred seventy-two pounds.
LJSpeech-1.1/wavs/LJ034-0072.wav|The employees raced the elevators to the first floor. Givens saw Oswald standing at the gate on the fifth floor as the elevator went by.
LJSpeech-1.1/wavs/LJ045-0033.wav|He began to treat me better. He helped me more -- although he always did help. But he was more attentive, end quote.
LJSpeech-1.1/wavs/LJ031-0058.wav|to infuse blood and fluids into the circulatory system.
LJSpeech-1.1/wavs/LJ029-0197.wav|During November the Dallas papers reported frequently on the plans for protecting the President, stressing the thoroughness of the preparations.
LJSpeech-1.1/wavs/LJ043-0047.wav|Oswald and his family lived for a brief period with his mother at her urging, but Oswald soon decided to move out.
LJSpeech-1.1/wavs/LJ021-0026.wav|seems necessary to produce the same result of justice and right conduct
LJSpeech-1.1/wavs/LJ003-0230.wav|The prison allowances were eked out by the broken victuals generously given by several eating-house keepers in the city,
LJSpeech-1.1/wavs/LJ037-0252.wav|Ted Callaway, who saw the gunman moments after the shooting, testified that Commission Exhibit Number one sixty-two
LJSpeech-1.1/wavs/LJ031-0008.wav|Meanwhile, Chief Curry ordered the police base station to notify Parkland Hospital that the wounded President was en route.
LJSpeech-1.1/wavs/LJ030-0021.wav|all one had to do was get a high building someday with a telescopic rifle, and there was nothing anybody could do to defend against such an attempt.
LJSpeech-1.1/wavs/LJ046-0179.wav|being reviewed regularly.
LJSpeech-1.1/wavs/LJ025-0118.wav|and that, however diverse may be the fabrics or tissues of which their bodies are composed, all these varied structures result
LJSpeech-1.1/wavs/LJ028-0278.wav|Zopyrus, when they told him, not thinking that it could be true, went and saw the colt with his own eyes;
LJSpeech-1.1/wavs/LJ007-0090.wav|Not only did their presence tend greatly to interfere with the discipline of the prison, but their condition was deplorable in the extreme.
LJSpeech-1.1/wavs/LJ045-0045.wav|that she would be able to leave the Soviet Union. Marina Oswald has denied this.
LJSpeech-1.1/wavs/LJ028-0289.wav|For he cut off his own nose and ears, and then, clipping his hair close and flogging himself with a scourge,
LJSpeech-1.1/wavs/LJ009-0276.wav|Calcraft, the moment he had adjusted the cap and rope, ran down the steps, drew the bolt, and disappeared.
LJSpeech-1.1/wavs/LJ031-0122.wav|treated the gunshot wound in the left thigh.
LJSpeech-1.1/wavs/LJ016-0205.wav|he received a retaining fee of five pounds, five shillings, with the usual guinea for each job;
LJSpeech-1.1/wavs/LJ019-0248.wav|leading to an inequality, uncertainty, and inefficiency of punishment productive of the most prejudicial results.
LJSpeech-1.1/wavs/LJ033-0183.wav|it was not surprising that the replica sack made on December one, nineteen sixty-three,
LJSpeech-1.1/wavs/LJ037-0001.wav|Report of the President's Commission on the Assassination of President Kennedy. The Warren Commission Report. By The President's Commission on the Assassination of President Kennedy.
LJSpeech-1.1/wavs/LJ018-0218.wav|In eighteen fifty-five
LJSpeech-1.1/wavs/LJ001-0102.wav|Here and there a book is printed in France or Germany with some pretension to good taste,
LJSpeech-1.1/wavs/LJ007-0125.wav|It was diverted from its proper uses, and, as the "place of the greatest comfort," was allotted to persons who should not have been sent to Newgate at all.
LJSpeech-1.1/wavs/LJ050-0022.wav|A formal and thorough description of the responsibilities of the advance agent is now in preparation by the Service.
LJSpeech-1.1/wavs/LJ028-0212.wav|On the night of the eleventh day Gobrias killed the son of the King.
LJSpeech-1.1/wavs/LJ028-0357.wav|yet we may be sure that Babylon was taken by Darius only by use of stratagem. Its walls were impregnable.
LJSpeech-1.1/wavs/LJ014-0199.wav|there was no case to make out; why waste money on lawyers for the defense? His demeanor was cool and collected throughout;
LJSpeech-1.1/wavs/LJ016-0077.wav|A man named Lears, under sentence of transportation for an attempt at murder on board ship, got up part of the way,
LJSpeech-1.1/wavs/LJ009-0194.wav|and that executors or persons having lawful possession of the bodies
LJSpeech-1.1/wavs/LJ014-0094.wav|Discovery of the murder came in this wise. O'Connor, a punctual and well-conducted official, was at once missed at the London Docks.
LJSpeech-1.1/wavs/LJ001-0079.wav|Caslon's type is clear and neat, and fairly well designed;
LJSpeech-1.1/wavs/LJ026-0052.wav|In the nutrition of the animal the most essential and characteristic part of the food supply is derived from vegetable
LJSpeech-1.1/wavs/LJ013-0005.wav|One of the earliest of the big operators in fraudulent finance was Edward Beaumont Smith,
LJSpeech-1.1/wavs/LJ033-0072.wav|I then stepped off of it and the officer picked it up in the middle and it bent so.
LJSpeech-1.1/wavs/LJ036-0067.wav|According to McWatters, the Beckley bus was behind the Marsalis bus, but he did not actually see it.
LJSpeech-1.1/wavs/LJ025-0098.wav|and it is probable that amyloid substances are universally present in the animal organism, though not in the precise form of starch.
LJSpeech-1.1/wavs/LJ005-0257.wav|during which time a host of witnesses were examined, and the committee presented three separate reports,
LJSpeech-1.1/wavs/LJ004-0024.wav|Thus in eighteen thirteen the exaction of jail fees had been forbidden by law,
LJSpeech-1.1/wavs/LJ049-0154.wav|In eighteen ninety-four,
LJSpeech-1.1/wavs/LJ039-0059.wav|(three) his experience and practice after leaving the Marine Corps, and (four) the accuracy of the weapon and the quality of the ammunition.
LJSpeech-1.1/wavs/LJ007-0150.wav|He is allowed intercourse with prostitutes who, in nine cases out of ten, have originally conduced to his ruin;
LJSpeech-1.1/wavs/LJ015-0001.wav|Chronicles of Newgate, Volume two. By Arthur Griffiths. Section eighteen: Newgate notorieties continued, part three.
LJSpeech-1.1/wavs/LJ010-0158.wav|feeling, as he said, that he might as well be shot or hanged as remain in such a state.
LJSpeech-1.1/wavs/LJ010-0281.wav|who had borne the Queen's commission, first as cornet, and then lieutenant, in the tenth Hussars.
LJSpeech-1.1/wavs/LJ033-0055.wav|and he could disassemble it more rapidly.
LJSpeech-1.1/wavs/LJ015-0218.wav|A new accomplice was now needed within the company's establishment, and Pierce looked about long before he found the right person.
LJSpeech-1.1/wavs/LJ027-0006.wav|In all these lines the facts are drawn together by a strong thread of unity.
LJSpeech-1.1/wavs/LJ016-0049.wav|He had here completed his ascent.
LJSpeech-1.1/wavs/LJ006-0088.wav|It was not likely that a system which left innocent men -- for the great bulk of new arrivals were still untried
LJSpeech-1.1/wavs/LJ042-0133.wav|a great change must have occurred in Oswald's thinking to induce him to return to the United States.
LJSpeech-1.1/wavs/LJ045-0234.wav|While he did become enraged at at least one point in his interrogation,
LJSpeech-1.1/wavs/LJ046-0033.wav|The adequacy of existing procedures can fairly be assessed only after full consideration of the difficulty of the protective assignment,
LJSpeech-1.1/wavs/LJ037-0061.wav|and having, quote, somewhat bushy, end quote, hair.
LJSpeech-1.1/wavs/LJ032-0025.wav|the officers of Klein's discovered that a rifle bearing serial number C two seven six six had been shipped to one A. Hidell,
LJSpeech-1.1/wavs/LJ047-0197.wav|in view of all the information concerning Oswald in its files, should have alerted the Secret Service to Oswald's presence in Dallas
LJSpeech-1.1/wavs/LJ018-0130.wav|and stole paper on a much larger scale than Brown.
LJSpeech-1.1/wavs/LJ005-0265.wav|It was recommended that the dietaries should be submitted and approved like the rules; that convicted prisoners should not receive any food but the jail allowance;
LJSpeech-1.1/wavs/LJ044-0105.wav|He presented Arnold Johnson, Gus Hall,
LJSpeech-1.1/wavs/LJ015-0043.wav|This went on for some time, and might never have been discovered had some good stroke of luck provided any of the partners
LJSpeech-1.1/wavs/LJ030-0125.wav|On several occasions when the Vice President's car was slowed down by the throng, Special Agent Youngblood stepped out to hold the crowd back.
LJSpeech-1.1/wavs/LJ043-0140.wav|He also studied Dallas bus schedules to prepare for his later use of buses to travel to and from General Walker's house.
LJSpeech-1.1/wavs/LJ002-0220.wav|In consequence of these disclosures, both Bambridge and Huggin, his predecessor in the office, were committed to Newgate,
LJSpeech-1.1/wavs/LJ034-0117.wav|At one:twenty-nine p.m. the police radio reported
LJSpeech-1.1/wavs/LJ018-0276.wav|The first plot was against Mr. Harry Emmanuel, but he escaped, and the attempt was made upon Loudon and Ryder.
LJSpeech-1.1/wavs/LJ004-0077.wav|nor has he a right to poison or starve his fellow-creatures."
LJSpeech-1.1/wavs/LJ042-0194.wav|they should not be confused with slowness, indecision or fear. Only the intellectually fearless could even be remotely attracted to our doctrine,
LJSpeech-1.1/wavs/LJ029-0114.wav|The route chosen from the airport to Main Street was the normal one, except where Harwood Street was selected as the means of access to Main Street
LJSpeech-1.1/wavs/LJ014-0194.wav|The policemen were now in possession;
LJSpeech-1.1/wavs/LJ032-0027.wav|According to its microfilm records, Klein's received an order for a rifle on March thirteen, nineteen sixty-three,
LJSpeech-1.1/wavs/LJ048-0289.wav|However, there is no evidence that these men failed to take any action in Dallas within their power that would have averted the tragedy.
LJSpeech-1.1/wavs/LJ043-0188.wav|that he was the leader of a fascist organization, and when I said that even though all of that might be true, just the same he had no right to take his life,
LJSpeech-1.1/wavs/LJ011-0118.wav|In eighteen twenty-nine the gallows claimed two more victims for this offense.
LJSpeech-1.1/wavs/LJ040-0201.wav|After her interview with Mrs. Oswald,
LJSpeech-1.1/wavs/LJ033-0056.wav|While the rifle may have already been disassembled when Oswald arrived home on Thursday, he had ample time that evening to disassemble the rifle
LJSpeech-1.1/wavs/LJ047-0073.wav|Hosty considered the information to be, quote, stale, unquote, by that time, and did not attempt to verify Oswald's reported statement.
LJSpeech-1.1/wavs/LJ001-0153.wav|only nominally so, however, in many cases, since when he uses a headline he counts that in,
LJSpeech-1.1/wavs/LJ007-0158.wav|or any kind of moral improvement was impossible; the prisoner's career was inevitably downward, till he struck the lowest depths.
LJSpeech-1.1/wavs/LJ028-0502.wav|The Ishtar gateway leading to the palace was encased with beautiful blue glazed bricks,
LJSpeech-1.1/wavs/LJ028-0226.wav|Though Herodotus wrote nearly a hundred years after Babylon fell, his story seems to bear the stamp of truth.
LJSpeech-1.1/wavs/LJ010-0038.wav|as there had been before; as in the year eighteen forty-nine, a year memorable for the Rush murders at Norwich,
LJSpeech-1.1/wavs/LJ019-0241.wav|But in the interval very comprehensive and, I think it must be admitted, salutary changes were successively introduced into the management of prisons.
LJSpeech-1.1/wavs/LJ001-0094.wav|were induced to cut punches for a series of "old style" letters.
LJSpeech-1.1/wavs/LJ001-0015.wav|the forms of printed letters should be beautiful, and that their arrangement on the page should be reasonable and a help to the shapeliness of the letters themselves.
LJSpeech-1.1/wavs/LJ047-0015.wav|From defection to return to Fort Worth.
LJSpeech-1.1/wavs/LJ044-0139.wav|since there was no background to the New Orleans FPCC, quote, organization, end quote, which consisted solely of Oswald.
LJSpeech-1.1/wavs/LJ050-0031.wav|that the Secret Service consciously set about the task of inculcating and maintaining the highest standard of excellence and esprit, for all of its personnel.
LJSpeech-1.1/wavs/LJ050-0235.wav|It has also used other Federal law enforcement agents during Presidential visits to cities in which such agents are stationed.
LJSpeech-1.1/wavs/LJ050-0137.wav|FBI, and the Secret Service.
LJSpeech-1.1/wavs/LJ031-0109.wav|At one:thirty-five p.m., after Governor Connally had been moved to the operating room, Dr. Shaw started the first operation
LJSpeech-1.1/wavs/LJ031-0041.wav|He noted that the President was blue-white or ashen in color; had slow, spasmodic, agonal respiration without any coordination;
LJSpeech-1.1/wavs/LJ021-0139.wav|There should be at least a full and fair trial given to these means of ending industrial warfare;
LJSpeech-1.1/wavs/LJ029-0004.wav|The narrative of these events is based largely on the recollections of the participants,
LJSpeech-1.1/wavs/LJ023-0122.wav|It was said in last year's Democratic platform,
LJSpeech-1.1/wavs/LJ005-0264.wav|inspectors of prisons should be appointed, who should visit all the prisons from time to time and report to the Secretary of State.
LJSpeech-1.1/wavs/LJ002-0105.wav|and beyond it was a room called the "wine room," because formerly used for the sale of wine, but
LJSpeech-1.1/wavs/LJ017-0035.wav|in the interests and for the due protection of the public, that the fullest and fairest inquiry should be made,
LJSpeech-1.1/wavs/LJ048-0252.wav|Three of these agents occupied positions on the running boards of the car, and the fourth was seated in the car.
LJSpeech-1.1/wavs/LJ013-0109.wav|The proceeds of the robbery were lodged in a Boston bank,
LJSpeech-1.1/wavs/LJ039-0139.wav|Oswald obtained a hunting license, joined a hunting club and went hunting about six times, as discussed more fully in chapter six.
LJSpeech-1.1/wavs/LJ044-0047.wav|that anyone ever attacked any street demonstration in which Oswald was involved, except for the Bringuier incident mentioned above,
LJSpeech-1.1/wavs/LJ016-0417.wav|Catherine Wilson, the poisoner, was reserved and reticent to the last, expressing no contrition, but also no fear --
LJSpeech-1.1/wavs/LJ045-0178.wav|he left his wedding ring in a cup on the dresser in his room. He also left one hundred seventy dollars in a wallet in one of the dresser drawers.
LJSpeech-1.1/wavs/LJ009-0172.wav|While in London, for instance, in eighteen twenty-nine, twenty-four persons had been executed for crimes other than murder,
LJSpeech-1.1/wavs/LJ049-0202.wav|incident to its responsibilities.
LJSpeech-1.1/wavs/LJ032-0103.wav|The name "Hidell" was stamped on some of the "Chapter's" printed literature and on the membership application blanks.
LJSpeech-1.1/wavs/LJ013-0091.wav|and Elder had to be assisted by two bank porters, who carried it for him to a carriage waiting near the Mansion House.
LJSpeech-1.1/wavs/LJ037-0208.wav|nineteen dollars, ninety-five cents, plus one dollar, twenty-seven cents shipping charge, had been collected from the consignee, Hidell.
LJSpeech-1.1/wavs/LJ014-0128.wav|her hair was dressed in long crepe bands. She had lace ruffles at her wrist, and wore primrose-colored kid gloves.
LJSpeech-1.1/wavs/LJ015-0007.wav|This affected Cole's credit, and ugly reports were in circulation charging him with the issue of simulated warrants.
LJSpeech-1.1/wavs/LJ036-0169.wav|he would have reached his destination at approximately twelve:fifty-four p.m.
LJSpeech-1.1/wavs/LJ021-0040.wav|The second step we have taken in the restoration of normal business enterprise
LJSpeech-1.1/wavs/LJ015-0036.wav|The bank was already insolvent,
LJSpeech-1.1/wavs/LJ034-0041.wav|Although Bureau experiments had shown that twenty-four hours was a likely maximum time, Latona stated
LJSpeech-1.1/wavs/LJ009-0192.wav|The dissection of executed criminals was abolished soon after the discovery of the crime of burking,
LJSpeech-1.1/wavs/LJ037-0248.wav|The eyewitnesses vary in their identification of the jacket.
LJSpeech-1.1/wavs/LJ015-0289.wav|As each transaction was carried out from a different address, and a different messenger always employed,
LJSpeech-1.1/wavs/LJ005-0072.wav|After a few years of active exertion the Society was rewarded by fresh legislation.
LJSpeech-1.1/wavs/LJ023-0047.wav|The three horses are, of course, the three branches of government -- the Congress, the Executive and the courts.
LJSpeech-1.1/wavs/LJ009-0126.wav|Hardly any one.
LJSpeech-1.1/wavs/LJ034-0097.wav|The window was approximately one hundred twenty feet away.
LJSpeech-1.1/wavs/LJ028-0462.wav|They were laid in bitumen.
LJSpeech-1.1/wavs/LJ046-0055.wav|It is now possible for Presidents to travel the length and breadth of a land far larger than the United States
LJSpeech-1.1/wavs/LJ019-0371.wav|Yet the law was seldom if ever enforced.
LJSpeech-1.1/wavs/LJ039-0207.wav|Although all of the shots were a few inches high and to the right of the target,
LJSpeech-1.1/wavs/LJ002-0174.wav|Mr. Buxton's friends at once paid the forty shillings, and the boy was released.
LJSpeech-1.1/wavs/LJ016-0233.wav|In his own profession
LJSpeech-1.1/wavs/LJ026-0108.wav|It is clear that there are upward and downward currents of water containing food (comparable to blood of an animal),
LJSpeech-1.1/wavs/LJ038-0035.wav|Oswald rose from his seat, bringing up both hands.
LJSpeech-1.1/wavs/LJ026-0148.wav|water which is lost by evaporation, especially from the leaf surface through the stomata;
LJSpeech-1.1/wavs/LJ001-0186.wav|the position of our Society that a work of utility might be also a work of art, if we cared to make it so.
LJSpeech-1.1/wavs/LJ016-0264.wav|The upturned faces of the eager spectators resembled those of the 'gods' at Drury Lane on Boxing Night;
LJSpeech-1.1/wavs/LJ009-0041.wav|The occupants of this terrible black pew were the last always to enter the chapel.
LJSpeech-1.1/wavs/LJ010-0297.wav|But there were other notorious cases of forgery.
LJSpeech-1.1/wavs/LJ040-0018.wav|the Commission is not able to reach any definite conclusions as to whether or not he was, quote, sane, unquote, under prevailing legal standards.
LJSpeech-1.1/wavs/LJ005-0253.wav|"to inquire into and report upon the several jails and houses of correction in the counties, cities, and corporate towns within England and Wales
LJSpeech-1.1/wavs/LJ027-0176.wav|Fishes first appeared in the Devonian and Upper Silurian in very reptilian or rather amphibian forms.
LJSpeech-1.1/wavs/LJ034-0035.wav|The position of this palmprint on the carton was parallel with the long axis of the box, and at right angles with the short axis;
LJSpeech-1.1/wavs/LJ016-0054.wav|But he did not like the risk of entering a room by the fireplace, and the chances of detection it offered.
LJSpeech-1.1/wavs/LJ018-0262.wav|Roupell received the announcement with a cheerful countenance,
LJSpeech-1.1/wavs/LJ044-0237.wav|with thirteen dollars, eighty-seven cents when considerably greater resources were available to him.
LJSpeech-1.1/wavs/LJ034-0166.wav|Two other witnesses were able to offer partial descriptions of a man they saw in the southeast corner window
LJSpeech-1.1/wavs/LJ016-0238.wav|"just to steady their legs a little;" in other words, to add his weight to that of the hanging bodies.
LJSpeech-1.1/wavs/LJ042-0198.wav|The discussion above has already set forth examples of his expression of hatred for the United States.
LJSpeech-1.1/wavs/LJ031-0189.wav|At two:thirty-eight p.m., Eastern Standard Time, Lyndon Baines Johnson took the oath of office as the thirty-sixth President of the United States.
LJSpeech-1.1/wavs/LJ050-0084.wav|or, quote, other high government officials in the nature of a complaint coupled with an expressed or implied determination to use a means,
LJSpeech-1.1/wavs/LJ044-0158.wav|As for my return entrance visa please consider it separately. End quote.
LJSpeech-1.1/wavs/LJ045-0082.wav|it appears that Marina Oswald also complained that her husband was not able to provide more material things for her.
LJSpeech-1.1/wavs/LJ045-0190.wav|appeared in The Dallas Times Herald on November fifteen, nineteen sixty-three.
LJSpeech-1.1/wavs/LJ035-0155.wav|The only exit from the office in the direction Oswald was moving was through the door to the front stairway.
LJSpeech-1.1/wavs/LJ044-0004.wav|Political Activities
LJSpeech-1.1/wavs/LJ046-0016.wav|The Commission has not undertaken a comprehensive examination of all facets of this subject;
LJSpeech-1.1/wavs/LJ019-0368.wav|The latter too was to be laid before the House of Commons.
LJSpeech-1.1/wavs/LJ010-0062.wav|But they proceeded in all seriousness, and would have shrunk from no outrage or atrocity in furtherance of their foolhardy enterprise.
LJSpeech-1.1/wavs/LJ033-0159.wav|It was from Oswald's right hand, in which he carried the long package as he walked from Frazier's car to the building.
LJSpeech-1.1/wavs/LJ002-0171.wav|The boy declared he saw no one, and accordingly passed through without paying the toll of a penny.
LJSpeech-1.1/wavs/LJ002-0298.wav|in his evidence in eighteen fourteen, said it was more,
LJSpeech-1.1/wavs/LJ012-0219.wav|and in one corner, at some depth, a bundle of clothes were unearthed, which, with a hairy cap,
LJSpeech-1.1/wavs/LJ017-0190.wav|After this came the charge of administering oil of vitriol, which failed, as has been described.
LJSpeech-1.1/wavs/LJ019-0179.wav|This, with a scheme for limiting the jail to untried prisoners, had been urgently recommended by Lord John Russell in eighteen thirty.
LJSpeech-1.1/wavs/LJ050-0188.wav|each patrolman might be given a prepared booklet of instructions explaining what is expected of him. The Secret Service has expressed concern
LJSpeech-1.1/wavs/LJ006-0043.wav|The disgraceful overcrowding had been partially ended, but the same evils of indiscriminate association were still present; there was the old neglect of decency,
LJSpeech-1.1/wavs/LJ029-0060.wav|A number of people who resembled some of those in the photographs were placed under surveillance at the Trade Mart.
LJSpeech-1.1/wavs/LJ019-0052.wav|Both systems came to us from the United States. The difference was really more in degree than in principle,
LJSpeech-1.1/wavs/LJ037-0081.wav|Later in the day each woman found an empty shell on the ground near the house. These two shells were delivered to the police.
LJSpeech-1.1/wavs/LJ048-0200.wav|paying particular attention to the crowd for any unusual activity.
LJSpeech-1.1/wavs/LJ016-0426.wav|come along, gallows.
LJSpeech-1.1/wavs/LJ008-0182.wav|A tremendous crowd assembled when Bellingham was executed in eighteen twelve for the murder of Spencer Percival, at that time prime minister;
LJSpeech-1.1/wavs/LJ043-0107.wav|Upon moving to New Orleans on April twenty-four, nineteen sixty-three,
LJSpeech-1.1/wavs/LJ006-0084.wav|and so numerous were his opportunities of showing favoritism, that all the prisoners may be said to be in his power.
LJSpeech-1.1/wavs/LJ025-0081.wav|has no permanent digestive cavity or mouth, but takes in its food anywhere and digests, so to speak, all over its body.
LJSpeech-1.1/wavs/LJ019-0042.wav|These were either satisfied with a makeshift, and modified existing buildings, without close regard to their suitability, or for a long time did nothing at all.
LJSpeech-1.1/wavs/LJ047-0240.wav|They agree that Hosty told Revill
LJSpeech-1.1/wavs/LJ032-0012.wav|the resistance to arrest and the attempted shooting of another police officer by the man (Lee Harvey Oswald) subsequently accused of assassinating President Kennedy
LJSpeech-1.1/wavs/LJ050-0209.wav|The assistant to the Director of the FBI testified that
|
PyTorch/LanguageModeling/BART/bart/modeling | modeling | modeling_outputs | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from dataclasses import dataclass
from typing import List, Optional, Tuple
import torch
from utils.file_utils import ModelOutput
@dataclass
class BaseModelOutput(ModelOutput):
"""
Base class for model's outputs, with potential hidden states and attentions.
Args:
last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
last_hidden_state: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class BaseModelOutputWithPooling(ModelOutput):
"""
Base class for model's outputs that also contains a pooling of the last hidden states.
Args:
last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
pooler_output (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, hidden_size)`):
Last layer hidden-state of the first token of the sequence (classification token) further processed by a
Linear layer and a Tanh activation function. The Linear layer weights are trained from the next sentence
prediction (classification) objective during pretraining.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
last_hidden_state: torch.FloatTensor = None
pooler_output: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class BaseModelOutputWithPast(ModelOutput):
"""
Base class for model's outputs that may also contain a past key/values (to speed up sequential decoding).
Args:
last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
If :obj:`past_key_values` is used only the last hidden-state of the sequences of shape :obj:`(batch_size,
1, hidden_size)` is output.
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))`, `optional`, returned when ``use_cache=True`` is passed or when ``config.use_cache=True``):
Tuple of :obj:`tuple(torch.FloatTensor)` of length :obj:`config.n_layers`, with each tuple having 2 tensors
of shape :obj:`(batch_size, num_heads, sequence_length, embed_size_per_head)`) and optionally if
``config.is_encoder_decoder=True`` 2 additional tensors of shape :obj:`(batch_size, num_heads,
encoder_sequence_length, embed_size_per_head)`.
Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if
``config.is_encoder_decoder=True`` in the cross-attention blocks) that can be used (see
:obj:`past_key_values` input) to speed up sequential decoding.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
last_hidden_state: torch.FloatTensor = None
past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class BaseModelOutputWithCrossAttentions(ModelOutput):
"""
Base class for model's outputs, with potential hidden states and attentions.
Args:
last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
cross_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` and ``config.add_cross_attention=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the
weighted average in the cross-attention heads.
"""
last_hidden_state: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
cross_attentions: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class BaseModelOutputWithPoolingAndCrossAttentions(ModelOutput):
"""
Base class for model's outputs that also contains a pooling of the last hidden states.
Args:
last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
pooler_output (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, hidden_size)`):
Last layer hidden-state of the first token of the sequence (classification token) further processed by a
Linear layer and a Tanh activation function. The Linear layer weights are trained from the next sentence
prediction (classification) objective during pretraining.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
cross_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` and ``config.add_cross_attention=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the
weighted average in the cross-attention heads.
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))`, `optional`, returned when ``use_cache=True`` is passed or when ``config.use_cache=True``):
Tuple of :obj:`tuple(torch.FloatTensor)` of length :obj:`config.n_layers`, with each tuple having 2 tensors
of shape :obj:`(batch_size, num_heads, sequence_length, embed_size_per_head)`) and optionally if
``config.is_encoder_decoder=True`` 2 additional tensors of shape :obj:`(batch_size, num_heads,
encoder_sequence_length, embed_size_per_head)`.
Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if
``config.is_encoder_decoder=True`` in the cross-attention blocks) that can be used (see
:obj:`past_key_values` input) to speed up sequential decoding.
"""
last_hidden_state: torch.FloatTensor = None
pooler_output: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
cross_attentions: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class BaseModelOutputWithPastAndCrossAttentions(ModelOutput):
"""
Base class for model's outputs that may also contain a past key/values (to speed up sequential decoding).
Args:
last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
If :obj:`past_key_values` is used only the last hidden-state of the sequences of shape :obj:`(batch_size,
1, hidden_size)` is output.
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))`, `optional`, returned when ``use_cache=True`` is passed or when ``config.use_cache=True``):
Tuple of :obj:`tuple(torch.FloatTensor)` of length :obj:`config.n_layers`, with each tuple having 2 tensors
of shape :obj:`(batch_size, num_heads, sequence_length, embed_size_per_head)`) and optionally if
``config.is_encoder_decoder=True`` 2 additional tensors of shape :obj:`(batch_size, num_heads,
encoder_sequence_length, embed_size_per_head)`.
Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if
``config.is_encoder_decoder=True`` in the cross-attention blocks) that can be used (see
:obj:`past_key_values` input) to speed up sequential decoding.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
cross_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` and ``config.add_cross_attention=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the
weighted average in the cross-attention heads.
"""
last_hidden_state: torch.FloatTensor = None
past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
cross_attentions: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class Seq2SeqModelOutput(ModelOutput):
"""
Base class for model encoder's outputs that also contains : pre-computed hidden states that can speed up sequential
decoding.
Args:
last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the decoder of the model.
If :obj:`past_key_values` is used only the last hidden-state of the sequences of shape :obj:`(batch_size,
1, hidden_size)` is output.
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))`, `optional`, returned when ``use_cache=True`` is passed or when ``config.use_cache=True``):
Tuple of :obj:`tuple(torch.FloatTensor)` of length :obj:`config.n_layers`, with each tuple having 2 tensors
of shape :obj:`(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of
shape :obj:`(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
blocks) that can be used (see :obj:`past_key_values` input) to speed up sequential decoding.
decoder_hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.
decoder_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the
self-attention heads.
cross_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the
weighted average in the cross-attention heads.
encoder_last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder of the model.
encoder_hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.
encoder_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the
self-attention heads.
"""
last_hidden_state: torch.FloatTensor = None
past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
decoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
cross_attentions: Optional[Tuple[torch.FloatTensor]] = None
encoder_last_hidden_state: Optional[torch.FloatTensor] = None
encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class CausalLMOutput(ModelOutput):
"""
Base class for causal language model (or autoregressive) outputs.
Args:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`labels` is provided):
Language modeling loss (for next-token prediction).
logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
loss: Optional[torch.FloatTensor] = None
logits: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class CausalLMOutputWithPast(ModelOutput):
"""
Base class for causal language model (or autoregressive) outputs.
Args:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`labels` is provided):
Language modeling loss (for next-token prediction).
logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
past_key_values (:obj:`tuple(tupel(torch.FloatTensor))`, `optional`, returned when ``use_cache=True`` is passed or when ``config.use_cache=True``):
Tuple of :obj:`tuple(torch.FloatTensor)` of length :obj:`config.n_layers`, with each tuple having 2 tensors
of shape :obj:`(batch_size, num_heads, sequence_length, embed_size_per_head)`)
Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
:obj:`past_key_values` input) to speed up sequential decoding.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
loss: Optional[torch.FloatTensor] = None
logits: torch.FloatTensor = None
past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class CausalLMOutputWithCrossAttentions(ModelOutput):
"""
Base class for causal language model (or autoregressive) outputs.
Args:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`labels` is provided):
Language modeling loss (for next-token prediction).
logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
cross_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Cross attentions weights after the attention softmax, used to compute the weighted average in the
cross-attention heads.
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))`, `optional`, returned when ``use_cache=True`` is passed or when ``config.use_cache=True``):
Tuple of :obj:`torch.FloatTensor` tuples of length :obj:`config.n_layers`, with each tuple containing the
cached key, value states of the self-attention and the cross-attention layers if model is used in
encoder-decoder setting. Only relevant if ``config.is_decoder = True``.
Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see
:obj:`past_key_values` input) to speed up sequential decoding.
"""
loss: Optional[torch.FloatTensor] = None
logits: torch.FloatTensor = None
past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
cross_attentions: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class SequenceClassifierOutputWithPast(ModelOutput):
"""
Base class for outputs of sentence classification models.
Args:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`labels` is provided):
Classification (or regression if config.num_labels==1) loss.
logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, config.num_labels)`):
Classification (or regression if config.num_labels==1) scores (before SoftMax).
past_key_values (:obj:`tuple(tupel(torch.FloatTensor))`, `optional`, returned when ``use_cache=True`` is passed or when ``config.use_cache=True``):
Tuple of :obj:`tuple(torch.FloatTensor)` of length :obj:`config.n_layers`, with each tuple having 2 tensors
of shape :obj:`(batch_size, num_heads, sequence_length, embed_size_per_head)`)
Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
:obj:`past_key_values` input) to speed up sequential decoding.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
loss: Optional[torch.FloatTensor] = None
logits: torch.FloatTensor = None
past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class MaskedLMOutput(ModelOutput):
"""
Base class for masked language models outputs.
Args:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`labels` is provided):
Masked language modeling (MLM) loss.
logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
loss: Optional[torch.FloatTensor] = None
logits: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class Seq2SeqLMOutput(ModelOutput):
"""
Base class for sequence-to-sequence language models outputs.
Args:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`labels` is provided):
Language modeling loss.
logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))`, `optional`, returned when ``use_cache=True`` is passed or when ``config.use_cache=True``):
Tuple of :obj:`tuple(torch.FloatTensor)` of length :obj:`config.n_layers`, with each tuple having 2 tensors
of shape :obj:`(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of
shape :obj:`(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
blocks) that can be used (see :obj:`past_key_values` input) to speed up sequential decoding.
decoder_hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.
decoder_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the
self-attention heads.
cross_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the
weighted average in the cross-attention heads.
encoder_last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder of the model.
encoder_hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.
encoder_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the
self-attention heads.
"""
loss: Optional[torch.FloatTensor] = None
logits: torch.FloatTensor = None
past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
decoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
cross_attentions: Optional[Tuple[torch.FloatTensor]] = None
encoder_last_hidden_state: Optional[torch.FloatTensor] = None
encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class NextSentencePredictorOutput(ModelOutput):
"""
Base class for outputs of models predicting if two sentences are consecutive or not.
Args:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`next_sentence_label` is provided):
Next sequence prediction (classification) loss.
logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, 2)`):
Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation
before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
loss: Optional[torch.FloatTensor] = None
logits: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class SequenceClassifierOutput(ModelOutput):
"""
Base class for outputs of sentence classification models.
Args:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`labels` is provided):
Classification (or regression if config.num_labels==1) loss.
logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, config.num_labels)`):
Classification (or regression if config.num_labels==1) scores (before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
loss: Optional[torch.FloatTensor] = None
logits: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class Seq2SeqSequenceClassifierOutput(ModelOutput):
"""
Base class for outputs of sequence-to-sequence sentence classification models.
Args:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`label` is provided):
Classification (or regression if config.num_labels==1) loss.
logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, config.num_labels)`):
Classification (or regression if config.num_labels==1) scores (before SoftMax).
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))`, `optional`, returned when ``use_cache=True`` is passed or when ``config.use_cache=True``):
Tuple of :obj:`tuple(torch.FloatTensor)` of length :obj:`config.n_layers`, with each tuple having 2 tensors
of shape :obj:`(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of
shape :obj:`(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
blocks) that can be used (see :obj:`past_key_values` input) to speed up sequential decoding.
decoder_hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.
decoder_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the
self-attention heads.
cross_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the
weighted average in the cross-attention heads.
encoder_last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder of the model.
encoder_hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.
encoder_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the
self-attention heads.
"""
loss: Optional[torch.FloatTensor] = None
logits: torch.FloatTensor = None
past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
decoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
cross_attentions: Optional[Tuple[torch.FloatTensor]] = None
encoder_last_hidden_state: Optional[torch.FloatTensor] = None
encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class MultipleChoiceModelOutput(ModelOutput):
"""
Base class for outputs of multiple choice models.
Args:
loss (:obj:`torch.FloatTensor` of shape `(1,)`, `optional`, returned when :obj:`labels` is provided):
Classification loss.
logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_choices)`):
`num_choices` is the second dimension of the input tensors. (see `input_ids` above).
Classification scores (before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
loss: Optional[torch.FloatTensor] = None
logits: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class TokenClassifierOutput(ModelOutput):
"""
Base class for outputs of token classification models.
Args:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when ``labels`` is provided) :
Classification loss.
logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.num_labels)`):
Classification scores (before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
loss: Optional[torch.FloatTensor] = None
logits: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class QuestionAnsweringModelOutput(ModelOutput):
"""
Base class for outputs of question answering models.
Args:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`labels` is provided):
Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.
start_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`):
Span-start scores (before SoftMax).
end_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`):
Span-end scores (before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
loss: Optional[torch.FloatTensor] = None
start_logits: torch.FloatTensor = None
end_logits: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class Seq2SeqQuestionAnsweringModelOutput(ModelOutput):
"""
Base class for outputs of sequence-to-sequence question answering models.
Args:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`labels` is provided):
Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.
start_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`):
Span-start scores (before SoftMax).
end_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`):
Span-end scores (before SoftMax).
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))`, `optional`, returned when ``use_cache=True`` is passed or when ``config.use_cache=True``):
Tuple of :obj:`tuple(torch.FloatTensor)` of length :obj:`config.n_layers`, with each tuple having 2 tensors
of shape :obj:`(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of
shape :obj:`(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
blocks) that can be used (see :obj:`past_key_values` input) to speed up sequential decoding.
decoder_hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.
decoder_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the
self-attention heads.
cross_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the
weighted average in the cross-attention heads.
encoder_last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder of the model.
encoder_hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.
encoder_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the
self-attention heads.
"""
loss: Optional[torch.FloatTensor] = None
start_logits: torch.FloatTensor = None
end_logits: torch.FloatTensor = None
past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
decoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
cross_attentions: Optional[Tuple[torch.FloatTensor]] = None
encoder_last_hidden_state: Optional[torch.FloatTensor] = None
encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None |
TensorFlow2/Classification/ConvNets/model | model | efficientnet_model_v2 | # Lint as: python3
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains definitions for EfficientNet v2 model.
[1] Mingxing Tan, Quoc V. Le
EfficientNetV2: Smaller Models and Faster Training.
xxxx, https://arxiv.org/pdf/2104.00298v2.pdf
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import os
from typing import Any, Dict, Optional, List, Text, Tuple
import copy
import tensorflow as tf
import horovod.tensorflow as hvd
from tensorflow import keras
from utils.optimizer_factory import GradientAccumulator
from model.layers import simple_swish, hard_swish, identity, gelu, get_activation
from model.blocks import conv2d_block, mb_conv_block, fused_mb_conv_block
from model.common_modules import round_filters, round_repeats, load_weights
from dataloader import preprocessing
from dataloader.dataset_factory import mixing_lite
DENSE_KERNEL_INITIALIZER = {
'class_name': 'VarianceScaling',
'config': {
'scale': 1 / 3.0,
'mode': 'fan_in',
'distribution': 'uniform'
}
}
@tf.keras.utils.register_keras_serializable(package='Vision')
class Model(tf.keras.Model):
"""Wrapper class for an EfficientNet v2 Keras model.
Contains helper methods to build, manage, and save metadata about the model.
"""
def __init__(self,
config: Dict[Text, Any] = None):
"""Create an EfficientNet v2 model.
Args:
config: (optional) the main model parameters to create the model
"""
super().__init__()
self.config = config
if self.config.grad_accum_steps > 1:
self.grad_accumulator = GradientAccumulator()
self.gradients_gnorm = tf.Variable(0, trainable=False, dtype=tf.float32)
self.local_step = tf.Variable(initial_value=0, dtype=tf.int64, trainable=False, aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA)
input_channels = config.mparams.input_channels
# Consistent with channels-last format. will be permuted in _build, if channels first requested.
input_shape = (None, None, input_channels) # Should handle any image size
image_input = tf.keras.layers.Input(shape=input_shape)
is_predict ="predict" in config.mode
if not is_predict:
mixup_input = tf.keras.layers.Input(shape=(1, 1, 1))
cutmix_input = tf.keras.layers.Input(shape=(None, None, 1))
is_tr_split = tf.keras.layers.Input(shape=(1)) # indicates whether we use tr or eval data loader
inputs = [image_input,mixup_input,cutmix_input,is_tr_split]
else:
inputs = [image_input]
output = self._build(inputs)
# Cast to float32 in case we have a different model dtype
output = tf.cast(output, tf.float32)
self.model = tf.keras.Model(inputs=inputs,outputs=output)
def call(self,data):
is_predict ="predict" in self.config.mode
if not is_predict:
x=data['image']
mixup_weights = data['mixup_weight']
cutmix_masks = data['cutmix_mask']
is_tr_split = data['is_tr_split']
return self.model([x,mixup_weights,cutmix_masks,is_tr_split])
else:
return self.model([data])
def _build(self,
input: List[tf.keras.layers.Input]):
"""Creates an EfficientNet v2 graph given the model parameters.
This function is wrapped by the `EfficientNet_v2` class to make a tf.keras.Model.
Args:
image_input: the input batch of images
Returns:
the output of efficientnet v2
"""
config = self.config
depth_coefficient = config.mparams.depth_coefficient
blocks = config.mparams.blocks
stem_base_filters = config.mparams.stem_base_filters
top_base_filters = config.mparams.top_base_filters
activation = get_activation(config.mparams.activation)
dropout_rate = config.mparams.dropout_rate
drop_connect_rate = config.mparams.drop_connect_rate
num_classes = config.mparams.num_classes
input_channels = config.mparams.input_channels
rescale_input = config.mparams.rescale_input
data_format = tf.keras.backend.image_data_format()
dtype = config.mparams.dtype
weight_decay = config.weight_decay
weight_init = config.mparams.weight_init
train_batch_size = config.train_batch_size
do_mixup = config.mixup_alpha > 0
do_cutmix = config.cutmix_alpha > 0
def cond_mixing(args):
images,mixup_weights,cutmix_masks,is_tr_split = args
return tf.cond(tf.keras.backend.equal(is_tr_split[0],0),
lambda: images, # eval phase
lambda: mixing_lite(images,mixup_weights,cutmix_masks,train_batch_size, do_mixup, do_cutmix)) # tr phase
images = input[0]
x = images
if len(input) > 1:
# we get here only during train or train_and_eval modes
if self.config.defer_img_mixing:
# we get here only if we chose not to perform image mixing in the data loader
# image mixing on device further accelrates training
mixup_weights = input[1]
cutmix_masks = input[2]
is_tr_split = input[3]
x = tf.keras.layers.Lambda(cond_mixing)([images,mixup_weights,cutmix_masks,is_tr_split])
# data loader outputs data in the channels last format
if data_format == 'channels_first':
# Happens on GPU/TPU if available.
x = tf.keras.layers.Permute((3, 1, 2))(x)
if rescale_input:
# x-mean/std
x = preprocessing.normalize_images(x,
mean_rgb=config.mparams.mean_rgb,
stddev_rgb=config.mparams.std_rgb,
num_channels=input_channels,
dtype=dtype,
data_format=data_format)
outputs = dict()
# Build stem
x = conv2d_block(x,
round_filters(stem_base_filters, config),
config,
kernel_size=[3, 3], # OK
strides=[2, 2], # OK
activation=activation,
name='stem')
outputs['stem'] = x
# Build blocks
num_blocks_total = sum(
round_repeats(block['num_repeat'], depth_coefficient) for block in blocks)
block_num = 0
for stack_idx, block in enumerate(blocks):
assert block['num_repeat'] > 0
# Update block input and output filters based on depth multiplier
block.update({
'input_filters':round_filters(block['input_filters'], config),
'output_filters':round_filters(block['output_filters'], config),
'num_repeat':round_repeats(block['num_repeat'], depth_coefficient)})
# The first block needs to take care of stride and filter size increase
conv_block = fused_mb_conv_block if block['fused_conv'] else mb_conv_block
drop_rate = drop_connect_rate * float(block_num) / num_blocks_total
config.mparams.update({'drop_connect_rate': drop_rate}) # TODO(Sugh) replace
block_prefix = 'stack_{}/block_0/'.format(stack_idx)
x = conv_block(x, block, config, block_prefix)
outputs[f'b_{block_num}'] = x
block_num += 1
if block['num_repeat'] > 1:
block.update({
'input_filters':block['output_filters'],
'strides':(1, 1)
})
for block_idx in range(block['num_repeat'] - 1):
drop_rate = drop_connect_rate * float(block_num) / num_blocks_total
config.mparams.update({'drop_connect_rate': drop_rate})
block_prefix = 'stack_{}/block_{}/'.format(stack_idx, block_idx + 1)
x = conv_block(x, block, config, prefix=block_prefix)
outputs[f'b_{block_num}'] = x
block_num += 1
# Build top
x = conv2d_block(x,
round_filters(top_base_filters, config),
config,
activation=activation,
name='top')
# Build classifier
DENSE_KERNEL_INITIALIZER['config']['mode'] = weight_init
x = tf.keras.layers.GlobalAveragePooling2D(name='top_pool',data_format=data_format)(x)
if dropout_rate and dropout_rate > 0:
x = tf.keras.layers.Dropout(dropout_rate, name='top_dropout')(x)
x = tf.keras.layers.Dense(
num_classes,
kernel_initializer=DENSE_KERNEL_INITIALIZER,
kernel_regularizer=tf.keras.regularizers.l2(weight_decay),
bias_regularizer=tf.keras.regularizers.l2(weight_decay),
name='logits')(x)
x = tf.keras.layers.Activation('softmax', name='probs', dtype=tf.float32)(x)
return x
|
TensorFlow/Detection/SSD/models/research/object_detection/models | models | ssd_pnasnet_feature_extractor_test | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ssd_pnas_feature_extractor."""
import numpy as np
import tensorflow as tf
from object_detection.models import ssd_feature_extractor_test
from object_detection.models import ssd_pnasnet_feature_extractor
slim = tf.contrib.slim
class SsdPnasNetFeatureExtractorTest(
ssd_feature_extractor_test.SsdFeatureExtractorTestBase):
def _create_feature_extractor(self, depth_multiplier, pad_to_multiple,
is_training=True, use_explicit_padding=False):
"""Constructs a new feature extractor.
Args:
depth_multiplier: float depth multiplier for feature extractor
pad_to_multiple: the nearest multiple to zero pad the input height and
width dimensions to.
is_training: whether the network is in training mode.
use_explicit_padding: Use 'VALID' padding for convolutions, but prepad
inputs so that the output dimensions are the same as if 'SAME' padding
were used.
Returns:
an ssd_meta_arch.SSDFeatureExtractor object.
"""
min_depth = 32
return ssd_pnasnet_feature_extractor.SSDPNASNetFeatureExtractor(
is_training, depth_multiplier, min_depth, pad_to_multiple,
self.conv_hyperparams_fn,
use_explicit_padding=use_explicit_padding)
def test_extract_features_returns_correct_shapes_128(self):
image_height = 128
image_width = 128
depth_multiplier = 1.0
pad_to_multiple = 1
expected_feature_map_shape = [(2, 8, 8, 2160), (2, 4, 4, 4320),
(2, 2, 2, 512), (2, 1, 1, 256),
(2, 1, 1, 256), (2, 1, 1, 128)]
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape)
def test_extract_features_returns_correct_shapes_299(self):
image_height = 299
image_width = 299
depth_multiplier = 1.0
pad_to_multiple = 1
expected_feature_map_shape = [(2, 19, 19, 2160), (2, 10, 10, 4320),
(2, 5, 5, 512), (2, 3, 3, 256),
(2, 2, 2, 256), (2, 1, 1, 128)]
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape)
def test_preprocess_returns_correct_value_range(self):
image_height = 128
image_width = 128
depth_multiplier = 1
pad_to_multiple = 1
test_image = np.random.rand(2, image_height, image_width, 3)
feature_extractor = self._create_feature_extractor(depth_multiplier,
pad_to_multiple)
preprocessed_image = feature_extractor.preprocess(test_image)
self.assertTrue(np.all(np.less_equal(np.abs(preprocessed_image), 1.0)))
if __name__ == '__main__':
tf.test.main()
|
PyTorch/Segmentation/MaskRCNN/pytorch/demo | demo | predictor | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import cv2
import torch
from torchvision import transforms as T
from maskrcnn_benchmark.modeling.detector import build_detection_model
from maskrcnn_benchmark.utils.checkpoint import DetectronCheckpointer
from maskrcnn_benchmark.structures.image_list import to_image_list
from maskrcnn_benchmark.modeling.roi_heads.mask_head.inference import Masker
from maskrcnn_benchmark import layers as L
from maskrcnn_benchmark.utils import cv2_util
class COCODemo(object):
# COCO categories for pretty print
CATEGORIES = [
"__background",
"person",
"bicycle",
"car",
"motorcycle",
"airplane",
"bus",
"train",
"truck",
"boat",
"traffic light",
"fire hydrant",
"stop sign",
"parking meter",
"bench",
"bird",
"cat",
"dog",
"horse",
"sheep",
"cow",
"elephant",
"bear",
"zebra",
"giraffe",
"backpack",
"umbrella",
"handbag",
"tie",
"suitcase",
"frisbee",
"skis",
"snowboard",
"sports ball",
"kite",
"baseball bat",
"baseball glove",
"skateboard",
"surfboard",
"tennis racket",
"bottle",
"wine glass",
"cup",
"fork",
"knife",
"spoon",
"bowl",
"banana",
"apple",
"sandwich",
"orange",
"broccoli",
"carrot",
"hot dog",
"pizza",
"donut",
"cake",
"chair",
"couch",
"potted plant",
"bed",
"dining table",
"toilet",
"tv",
"laptop",
"mouse",
"remote",
"keyboard",
"cell phone",
"microwave",
"oven",
"toaster",
"sink",
"refrigerator",
"book",
"clock",
"vase",
"scissors",
"teddy bear",
"hair drier",
"toothbrush",
]
def __init__(
self,
cfg,
confidence_threshold=0.7,
show_mask_heatmaps=False,
masks_per_dim=2,
min_image_size=224,
):
self.cfg = cfg.clone()
self.model = build_detection_model(cfg)
self.model.eval()
self.device = torch.device(cfg.MODEL.DEVICE)
self.model.to(self.device)
self.min_image_size = min_image_size
save_dir = cfg.OUTPUT_DIR
checkpointer = DetectronCheckpointer(cfg, self.model, save_dir=save_dir)
_ = checkpointer.load(cfg.MODEL.WEIGHT)
self.transforms = self.build_transform()
mask_threshold = -1 if show_mask_heatmaps else 0.5
self.masker = Masker(threshold=mask_threshold, padding=1)
# used to make colors for each class
self.palette = torch.tensor([2 ** 25 - 1, 2 ** 15 - 1, 2 ** 21 - 1])
self.cpu_device = torch.device("cpu")
self.confidence_threshold = confidence_threshold
self.show_mask_heatmaps = show_mask_heatmaps
self.masks_per_dim = masks_per_dim
def build_transform(self):
"""
Creates a basic transformation that was used to train the models
"""
cfg = self.cfg
# we are loading images with OpenCV, so we don't need to convert them
# to BGR, they are already! So all we need to do is to normalize
# by 255 if we want to convert to BGR255 format, or flip the channels
# if we want it to be in RGB in [0-1] range.
if cfg.INPUT.TO_BGR255:
to_bgr_transform = T.Lambda(lambda x: x * 255)
else:
to_bgr_transform = T.Lambda(lambda x: x[[2, 1, 0]])
normalize_transform = T.Normalize(
mean=cfg.INPUT.PIXEL_MEAN, std=cfg.INPUT.PIXEL_STD
)
transform = T.Compose(
[
T.ToPILImage(),
T.Resize(self.min_image_size),
T.ToTensor(),
to_bgr_transform,
normalize_transform,
]
)
return transform
def run_on_opencv_image(self, image):
"""
Arguments:
image (np.ndarray): an image as returned by OpenCV
Returns:
prediction (BoxList): the detected objects. Additional information
of the detection properties can be found in the fields of
the BoxList via `prediction.fields()`
"""
predictions = self.compute_prediction(image)
top_predictions = self.select_top_predictions(predictions)
result = image.copy()
if self.show_mask_heatmaps:
return self.create_mask_montage(result, top_predictions)
result = self.overlay_boxes(result, top_predictions)
if self.cfg.MODEL.MASK_ON:
result = self.overlay_mask(result, top_predictions)
result = self.overlay_class_names(result, top_predictions)
return result
def compute_prediction(self, original_image):
"""
Arguments:
original_image (np.ndarray): an image as returned by OpenCV
Returns:
prediction (BoxList): the detected objects. Additional information
of the detection properties can be found in the fields of
the BoxList via `prediction.fields()`
"""
# apply pre-processing to image
image = self.transforms(original_image)
# convert to an ImageList, padded so that it is divisible by
# cfg.DATALOADER.SIZE_DIVISIBILITY
image_list = to_image_list(image, self.cfg.DATALOADER.SIZE_DIVISIBILITY)
image_list = image_list.to(self.device)
# compute predictions
with torch.no_grad():
predictions = self.model(image_list)
predictions = [o.to(self.cpu_device) for o in predictions]
# always single image is passed at a time
prediction = predictions[0]
# reshape prediction (a BoxList) into the original image size
height, width = original_image.shape[:-1]
prediction = prediction.resize((width, height))
if prediction.has_field("mask"):
# if we have masks, paste the masks in the right position
# in the image, as defined by the bounding boxes
masks = prediction.get_field("mask")
# always single image is passed at a time
masks = self.masker([masks], [prediction])[0]
prediction.add_field("mask", masks)
return prediction
def select_top_predictions(self, predictions):
"""
Select only predictions which have a `score` > self.confidence_threshold,
and returns the predictions in descending order of score
Arguments:
predictions (BoxList): the result of the computation by the model.
It should contain the field `scores`.
Returns:
prediction (BoxList): the detected objects. Additional information
of the detection properties can be found in the fields of
the BoxList via `prediction.fields()`
"""
scores = predictions.get_field("scores")
keep = torch.nonzero(scores > self.confidence_threshold).squeeze(1)
predictions = predictions[keep]
scores = predictions.get_field("scores")
_, idx = scores.sort(0, descending=True)
return predictions[idx]
def compute_colors_for_labels(self, labels):
"""
Simple function that adds fixed colors depending on the class
"""
colors = labels[:, None] * self.palette
colors = (colors % 255).numpy().astype("uint8")
return colors
def overlay_boxes(self, image, predictions):
"""
Adds the predicted boxes on top of the image
Arguments:
image (np.ndarray): an image as returned by OpenCV
predictions (BoxList): the result of the computation by the model.
It should contain the field `labels`.
"""
labels = predictions.get_field("labels")
boxes = predictions.bbox
colors = self.compute_colors_for_labels(labels).tolist()
for box, color in zip(boxes, colors):
box = box.to(torch.int64)
top_left, bottom_right = box[:2].tolist(), box[2:].tolist()
image = cv2.rectangle(
image, tuple(top_left), tuple(bottom_right), tuple(color), 1
)
return image
def overlay_mask(self, image, predictions):
"""
Adds the instances contours for each predicted object.
Each label has a different color.
Arguments:
image (np.ndarray): an image as returned by OpenCV
predictions (BoxList): the result of the computation by the model.
It should contain the field `mask` and `labels`.
"""
masks = predictions.get_field("mask").numpy()
labels = predictions.get_field("labels")
colors = self.compute_colors_for_labels(labels).tolist()
for mask, color in zip(masks, colors):
thresh = mask[0, :, :, None]
contours, hierarchy = cv2_util.findContours(
thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE
)
image = cv2.drawContours(image, contours, -1, color, 3)
composite = image
return composite
def create_mask_montage(self, image, predictions):
"""
Create a montage showing the probability heatmaps for each one one of the
detected objects
Arguments:
image (np.ndarray): an image as returned by OpenCV
predictions (BoxList): the result of the computation by the model.
It should contain the field `mask`.
"""
masks = predictions.get_field("mask")
masks_per_dim = self.masks_per_dim
masks = L.interpolate(
masks.float(), scale_factor=1 / masks_per_dim
).byte()
height, width = masks.shape[-2:]
max_masks = masks_per_dim ** 2
masks = masks[:max_masks]
# handle case where we have less detections than max_masks
if len(masks) < max_masks:
masks_padded = torch.zeros(max_masks, 1, height, width, dtype=torch.uint8)
masks_padded[: len(masks)] = masks
masks = masks_padded
masks = masks.reshape(masks_per_dim, masks_per_dim, height, width)
result = torch.zeros(
(masks_per_dim * height, masks_per_dim * width), dtype=torch.uint8
)
for y in range(masks_per_dim):
start_y = y * height
end_y = (y + 1) * height
for x in range(masks_per_dim):
start_x = x * width
end_x = (x + 1) * width
result[start_y:end_y, start_x:end_x] = masks[y, x]
return cv2.applyColorMap(result.numpy(), cv2.COLORMAP_JET)
def overlay_class_names(self, image, predictions):
"""
Adds detected class names and scores in the positions defined by the
top-left corner of the predicted bounding box
Arguments:
image (np.ndarray): an image as returned by OpenCV
predictions (BoxList): the result of the computation by the model.
It should contain the field `scores` and `labels`.
"""
scores = predictions.get_field("scores").tolist()
labels = predictions.get_field("labels").tolist()
labels = [self.CATEGORIES[i] for i in labels]
boxes = predictions.bbox
template = "{}: {:.2f}"
for box, score, label in zip(boxes, scores, labels):
x, y = box[:2]
s = template.format(label, score)
cv2.putText(
image, s, (x, y), cv2.FONT_HERSHEY_SIMPLEX, .5, (255, 255, 255), 1
)
return image
|
Tools/PyTorch/TimeSeriesPredictionPlatform/triton/deployment_toolkit | deployment_toolkit | dump | # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import json
import pickle
import threading
from pathlib import Path
from typing import Dict, Iterator, List, Union
import numpy as np
MB2B = 2 ** 20
B2MB = 1 / MB2B
FLUSH_THRESHOLD_B = 256 * MB2B
def _validate_batch(name: str, value: Union[list, np.ndarray]):
if not isinstance(value, (list, np.ndarray)):
raise ValueError(f"Values shall be lists or np.ndarrays; current type {type(value)}")
def _validate_prefix_data(prefix_data: Dict[str, List[np.ndarray]]):
batch_sizes_per_io_name = {name: [len(batch) for batch in batches] for name, batches in prefix_data.items()}
names = list(batch_sizes_per_io_name)
for io_name in names:
for batch_idx, batch_size in enumerate(batch_sizes_per_io_name[io_name]):
if not all([batch_sizes_per_io_name[other_name][batch_idx] == batch_size for other_name in names]):
non_equal_batch_sizes = {
other_name: batch_sizes_per_io_name[other_name][batch_idx] for other_name in names
}
non_equal_batch_sizes_str = ", ".join(
[f"{name}={batch_size}" for name, batch_size in non_equal_batch_sizes.items()]
)
raise ValueError(
"All inputs/outputs should have same number of batches with equal batch_size. "
f"At batch_idx={batch_idx} there are batch_sizes: {non_equal_batch_sizes_str}"
)
# ensure if each io has same number of batches with equal size
def _get_nitems_and_batches(prefix_data: Dict[str, List[np.ndarray]]):
nitems = 0
nbatches = 0
if prefix_data:
nitems_per_io_name = {name: sum(len(batch) for batch in batches) for name, batches in prefix_data.items()}
nbatches_per_io_name = {name: len(batches) for name, batches in prefix_data.items()}
nitems = list(nitems_per_io_name.values())[0]
nbatches = list(nbatches_per_io_name.values())[0]
return nitems, nbatches
class BaseDumpWriter(abc.ABC):
FILE_SUFFIX = ".abstract"
def __init__(self, output_dir: Union[str, Path]):
self._output_dir = Path(output_dir)
# outer dict key is prefix (i.e. input/output/labels/...), inner dict key is input/output name
# list is list of batches
self._items_cache: Dict[str, Dict[str, List[np.ndarray]]] = {}
# key is prefix
self._items_counters: Dict[str, int] = {}
self._cache_lock = threading.RLock()
self._flush_threshold_b = FLUSH_THRESHOLD_B
@property
def cache_size(self):
def _get_bytes_size(name, batch):
_validate_batch(name, batch)
if not isinstance(batch, np.ndarray):
batch = np.narray(batch)
return batch.nbytes
with self._cache_lock:
return {
prefix: sum(_get_bytes_size(name, batch) for name, batches in data.items() for batch in batches)
for prefix, data in self._items_cache.items()
}
def _append_to_cache(self, prefix, prefix_data):
if prefix_data is None:
return
if not isinstance(prefix_data, dict):
raise ValueError(f"{prefix} data to store shall be dict")
with self._cache_lock:
cached_prefix_data = self._items_cache.setdefault(prefix, {})
for name, batch in prefix_data.items():
_validate_batch(name, batch)
if not isinstance(batch, np.ndarray):
batch = np.array(batch)
cached_batches = cached_prefix_data.setdefault(name, [])
cached_batches += [batch]
def write(self, **kwargs):
with self._cache_lock:
for prefix, prefix_data in kwargs.items():
self._append_to_cache(prefix, prefix_data)
biggest_prefix_data_size = max(self.cache_size.values())
if biggest_prefix_data_size > self._flush_threshold_b:
self.flush()
def flush(self):
with self._cache_lock:
for prefix, prefix_data in self._items_cache.items():
_validate_prefix_data(prefix_data)
output_path = self._output_dir / self._get_filename(prefix)
self._dump(prefix_data, output_path)
nitems, nbatches = _get_nitems_and_batches(prefix_data)
self._items_counters[prefix] += nitems
self._items_cache = {}
def _get_filename(self, prefix):
idx = self._items_counters.setdefault(prefix, 0)
return f"{prefix}-{idx:012d}{self.FILE_SUFFIX}"
@abc.abstractmethod
def _dump(self, prefix_data: Dict[str, List[np.ndarray]], output_path: Path):
pass
def __enter__(self):
if self._output_dir.exists() and len(list(self._output_dir.iterdir())):
raise ValueError(f"{self._output_dir.as_posix()} is not empty")
self._output_dir.mkdir(parents=True, exist_ok=True)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.flush()
class PickleDumpWriter(BaseDumpWriter):
FILE_SUFFIX = ".pkl"
def _dump(self, prefix_data: Dict[str, List[np.ndarray]], output_path: Path):
output_path.parent.mkdir(parents=True, exist_ok=True)
with output_path.open("wb") as pickle_file:
pickle.dump(prefix_data, pickle_file)
class JsonDumpWriter(BaseDumpWriter):
FILE_SUFFIX = ".json"
def _dump(self, prefix_data: Dict[str, List[np.ndarray]], output_path: Path):
repacked_prefix_data = self._format_data(prefix_data)
output_path.parent.mkdir(parents=True, exist_ok=True)
with output_path.open("w") as json_file:
json.dump(repacked_prefix_data, json_file)
def _format_data(self, prefix_data: Dict[str, List[np.ndarray]]) -> Dict:
def _format_batch_for_perf_analyzer_json_format(batch: np.ndarray):
return {
"content": batch.flatten().tolist(),
"shape": list(batch.shape),
"dtype": str(batch.dtype),
}
_, nbatches = _get_nitems_and_batches(prefix_data)
batches = [{} for _ in range(nbatches)]
for io_name, batches_per_io in prefix_data.items():
for batch_idx, batch in enumerate(batches_per_io):
batches[batch_idx][io_name] = _format_batch_for_perf_analyzer_json_format(batch)
return {"data": batches}
class BaseDumpReader(abc.ABC):
FILE_SUFFIX = ".abstract"
def __init__(self, dump_dir: Union[Path, str]):
self._dump_dir = Path(dump_dir)
def get(self, prefix: str) -> Iterator[Dict[str, np.ndarray]]:
dump_files_paths = sorted(self._dump_dir.glob(f"{prefix}*{self.FILE_SUFFIX}"))
for dump_file_path in dump_files_paths:
prefix_data = self._load_file(dump_file_path)
nitems, nbatches = _get_nitems_and_batches(prefix_data)
for batch_idx in range(nbatches):
yield {io_name: prefix_data[io_name][batch_idx] for io_name in prefix_data}
@abc.abstractmethod
def _load_file(self, dump_file_path: Path) -> Dict[str, List[np.ndarray]]:
pass
def iterate_over(self, prefix_list: List[str]) -> Iterator:
iterators = [self.get(prefix) for prefix in prefix_list]
empty_iterators = [False] * len(iterators)
while not all(empty_iterators):
values = [None] * len(iterators)
for idx, iterator in enumerate(iterators):
if empty_iterators[idx]:
continue
try:
values[idx] = next(iterator)
except StopIteration:
empty_iterators[idx] = True
if all(empty_iterators):
break
if not all(empty_iterators):
yield values
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
pass
class PickleDumpReader(BaseDumpReader):
FILE_SUFFIX = ".pkl"
def _load_file(self, dump_file_path: Path) -> Dict[str, List[np.ndarray]]:
with dump_file_path.open("rb") as pickle_file:
return pickle.load(pickle_file)
class JsonDumpReader(BaseDumpReader):
FILE_SUFFIX = ".json"
def _load_file(self, dump_file_path: Path) -> Dict[str, List[np.ndarray]]:
with dump_file_path.open("rb") as json_file:
data = json.load(json_file)
return self._repack_data(data)
def _repack_data(self, data: Dict) -> Dict[str, List[np.ndarray]]:
result: Dict[str, List[np.ndarray]] = {}
batches = data["data"]
for batch in batches:
for io_name, batch_as_dict in batch.items():
io_batches = result.setdefault(io_name, [])
flat_array = batch_as_dict["content"]
shape = batch_as_dict["shape"]
dtype = batch_as_dict["dtype"]
batch_as_array = np.array(flat_array).reshape(shape).astype(dtype)
io_batches.append(batch_as_array)
return result
|
Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt/triton/runner | runner | executor | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import pathlib
import shutil
import traceback
from typing import Dict, List, Optional
from colorama import Fore
# method from PEP-366 to support relative import in executed modules
if __name__ == "__main__" and __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from ..deployment_toolkit.core import Accelerator, Precision
from .core import Paths
from .exceptions import RunnerException
from .experiment import ExperimentResult, ExperimentStatus, Status
from .exporter import CommandsExporter
from .logger import LOGGER
from .maintainer import Container, Maintainer
from .pipeline import Pipeline
from .stages import Stage
from .task import Experiment, Task
from .triton import Triton
from .utils import clean_directory, exec_command, format_env_key, format_env_value, get_result_path
class Executor:
"""
Experiments executor
"""
def __init__(
self,
workspace: pathlib.Path,
maintainer: Maintainer,
pipeline: Pipeline,
devices: List[str] = None,
):
"""
Initialize experiments executor
Args:
workspace: Path to workspace to store artifacts
maintainer: maintainer for running commands
pipeline: pipeline definition
devices: List of devices on which Triton Inference Server will be executed
"""
self._maintainer = maintainer
self._pipeline = pipeline
self._devices = devices or ["0"]
self._workspace = workspace
self._executor_workspace = workspace / "executor"
self._shared_dir = self._executor_workspace / "shared"
self._triton_models_repository_dir = self._executor_workspace / "triton_models"
self._scripts_dir = self._executor_workspace / "scripts"
self._libraries_dir = self._executor_workspace / "libs"
self._exporter = CommandsExporter(self._scripts_dir)
self._triton_container: Optional[Container] = None
def start(self, task: Task):
"""
Process the task and execute experiments.
"""
self._create_dirs()
total_experiment = len(task.experiments)
LOGGER.info(f"Total experiments to verify: {total_experiment}")
for idx, experiment in enumerate(task.experiments, start=1):
LOGGER.info(
f"{Fore.CYAN}================ Experiment: {idx}/{total_experiment} Started ================{Fore.RESET}"
)
results = {}
environment = self._prepare_environment(task, experiment.parameters)
LOGGER.info(f"Experiment details")
LOGGER.info(json.dumps(environment, indent=4))
self._clean_experiment_artifacts(idx, total_experiment)
self._create_experiment_results_dir(task, experiment)
experiment.start()
LOGGER.info("Running Triton Servers:")
log_file = self._workspace / task.logs_dir / f"triton-server-experiment-{idx}.log"
self._triton_container = self._triton_server_container(
triton_container_image=task.triton_container_image,
framework=task.framework,
accelerator=experiment.parameters["accelerator"],
precision=experiment.parameters["precision"],
custom_library=bool(task.triton_custom_operations is not None),
load_model_method=task.triton_load_model_method,
log_file=log_file,
)
try:
self._triton_container.start()
for stage in self._pipeline.stages():
LOGGER.info(
f"{Fore.GREEN}[Experiment: {idx}/{total_experiment}] ================ Stage {stage.label} Started ================{Fore.RESET}"
)
experiment_stage = experiment.stages[stage.label]
experiment_stage.start()
is_ok = self._run_stage(stage=stage)
if not is_ok:
LOGGER.error(f"Stage {stage.label} failed.")
break
self._save_results(task, experiment, stage.label, results)
experiment_stage.end()
LOGGER.info(
f"{Fore.GREEN}[Experiment: {idx}/{total_experiment}] ================ Stage {stage.label} Finished ================{Fore.RESET}"
)
except Exception:
message = traceback.format_exc()
LOGGER.error(f"Error running experiment: {message}")
yield ExperimentResult(
status=Status(state=ExperimentStatus.FAILED, message=message),
experiment=experiment,
results=results,
)
finally:
self._triton_container.stop()
experiment.end()
LOGGER.info(
f"{Fore.CYAN}================ Experiment: {idx}/{total_experiment} Finished ================{Fore.RESET}"
)
yield ExperimentResult(
status=Status(state=ExperimentStatus.SUCCEED, message="Experiment Succeed"),
experiment=experiment,
results=results,
)
def stop(self) -> None:
"""
Stop executor
Returns:
None
"""
if self._triton_container:
self._triton_container.stop()
def _prepare_environment(self, task: Task, parameters: Dict) -> Dict:
"""
Prepare environment data and export it
Args:
parameters: Key and values which should be exported to environment
Returns:
Dictionary with environment data
"""
environment = {
"MODEL_NAME": task.model_name,
"FRAMEWORK": task.framework,
"SHARED_DIR": self._shared_dir.as_posix(),
"MODEL_REPOSITORY_PATH": self._triton_models_repository_dir.as_posix(),
"TRITON_SERVER_URL": "localhost",
"TRITON_INSTANCES": "1",
"TRITON_LOAD_MODEL_METHOD": task.triton_load_model_method,
}
checkpoint_variant = parameters.get("checkpoint_variant")
if checkpoint_variant:
del parameters["checkpoint_variant"]
environment["CHECKPOINT_DIR"] = task.checkpoints[checkpoint_variant].path.as_posix()
if task.datasets_dir:
environment["DATASETS_DIR"] = task.datasets_dir.as_posix()
for key, value in parameters.items():
key = format_env_key(key)
value = format_env_value(value)
environment[key] = value
for key, value in environment.items():
os.environ[key] = value
return environment
def _triton_server_container(
self,
triton_container_image: str,
framework: str,
load_model_method: str,
accelerator: str,
precision: str,
log_file: pathlib.Path,
custom_library: bool,
) -> Container:
"""
Create Triton Inference Server container for experiment
Args:
triton_container_image: Triton Inference Server container image
framework: Framework used to run model
accelerator: Accelerator used for experiment
precision: Precision used for experiment
load_model_method: Configure how Triton will load model
log_file: File where Triton logs are stored
Returns:
Container object
"""
volumes = {
self._triton_models_repository_dir: {"bind": Paths.MODEL_REPOSITORY_PATH, "mode": "rw"},
self._libraries_dir: {"bind": Paths.LIBRARIES_PATH, "mode": "rw"},
}
environment = {
"MODEL_REPOSITORY_PATH": Paths.MODEL_REPOSITORY_PATH,
"LIBRARIES_PATH": Paths.LIBRARIES_PATH,
"TRITON_LOAD_MODEL_METHOD": load_model_method,
}
if custom_library:
library_path = Triton.library_path(framework=framework)
environment["LD_LIBRARY_PATH"] = f"{library_path}:${{LD_LIBRARY_PATH}}"
environment["LD_PRELOAD"] = Triton.custom_library_path_remote()
if accelerator == Accelerator.TRT.value and precision == Precision.FP16.value:
environment["ORT_TENSORRT_FP16_ENABLE"] = 1
strict_mode = False
command = Triton.command(
framework=framework,
repository_path=Paths.MODEL_REPOSITORY_PATH,
strict_mode=strict_mode,
)
command = f' bash -c "{command}"'
container = self._maintainer.triton_container(
command=command,
image=triton_container_image,
devices=self._devices,
volumes=volumes,
environment=environment,
log_file=log_file,
)
return container
def _save_results(self, task: Task, experiment: Experiment, stage_name: str, results: Dict) -> None:
"""
Update results for stage
Args:
task: Task object
experiment: Experiment for which stage has to be updated
stage_name: Name of stage
results: Results path mapping
Returns:
None
"""
stage = experiment.stages[stage_name]
if not stage.result_path:
LOGGER.debug(f"No results file to copy for {stage.name}")
return
if not stage.result_type:
LOGGER.debug(f"No results type provided for {stage.name}")
return
os.environ["SHARED_DIR"] = self._shared_dir.as_posix()
result_path = get_result_path(result_path=stage.result_path)
result_path = pathlib.Path(result_path)
if not result_path.is_file() and not result_path.is_dir():
raise RunnerException(f"Results file {result_path} not found.")
experiment_dir = self._workspace / task.results_dir / experiment.results_dir
LOGGER.info(f"Saving {stage.result_type} to {experiment_dir}")
if result_path.is_dir():
dst_path = experiment_dir / stage.result_type
shutil.copytree(result_path, dst_path)
elif result_path.is_file():
suffix = result_path.suffix
dst_path = experiment_dir / f"{stage.result_type}{suffix}"
shutil.copy(result_path, dst_path)
else:
raise RunnerException(f"Result not found {result_path}")
LOGGER.info("Done")
results[stage.result_type] = dst_path
def _create_dirs(self) -> None:
"""
Create directories used to store artifacts and final results
Returns:
None
"""
LOGGER.info(f"{Fore.GREEN}================ Creating Artifacts Directories Started ================{Fore.RESET}")
if self._executor_workspace.is_dir():
LOGGER.info(f"Removing previous executor workspace: {self._executor_workspace}")
shutil.rmtree(self._executor_workspace)
for directory in [
self._libraries_dir,
self._shared_dir,
self._scripts_dir,
self._triton_models_repository_dir,
]:
directory.mkdir(parents=True, exist_ok=True)
LOGGER.info(f"Directory {directory.name} created.")
LOGGER.info(
f"{Fore.GREEN}================ Creating Artifacts Directories Finished ================{Fore.RESET}"
)
def _clean_experiment_artifacts(self, idx: int, total: int) -> None:
"""
Clean artifacts stored between experiments
Returns:
None
"""
LOGGER.info(
f"{Fore.GREEN}[Experiment: {idx}/{total}] ================ Cleanup Experiment Data Started ================{Fore.RESET}"
)
for directory in [
self._shared_dir,
self._scripts_dir,
self._triton_models_repository_dir,
]:
clean_directory(directory)
LOGGER.info(f"Location {directory} cleaned.")
LOGGER.info(
f"{Fore.GREEN}[Experiment: {idx}/{total}] ================ Cleanup Experiment Data Finished ================{Fore.RESET}"
)
def _create_experiment_results_dir(self, task: Task, experiment: Experiment):
"""
Create result directory for experiment
Returns:
"""
experiment_dir = self._workspace / task.results_dir / experiment.results_dir
experiment_dir.mkdir(parents=True, exist_ok=True)
def _prepare_triton_custom_operations(self, task: Task) -> None:
"""
Prepare Triton Server custom operations library
Returns:
None
"""
if task.triton_custom_operations:
target_library_path = Triton.custom_library_path_local(self._libraries_dir)
target_library_path_dir = target_library_path.parent
target_library_path_dir.mkdir(parents=True, exist_ok=True)
shutil.copy(task.triton_custom_operations, target_library_path)
def _run_stage(self, stage: Stage) -> bool:
"""
Run single stage commands
Args:
stage: Stage object with defined commands
Returns:
True on success, False otherwise
"""
try:
command = self._exporter.export(stage=stage)
exec_command(command)
except RunnerException:
return False
return True
|
TensorFlow/Detection/SSD/models/research/object_detection/core | core | keypoint_ops | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keypoint operations.
Keypoints are represented as tensors of shape [num_instances, num_keypoints, 2],
where the last dimension holds rank 2 tensors of the form [y, x] representing
the coordinates of the keypoint.
"""
import numpy as np
import tensorflow as tf
def scale(keypoints, y_scale, x_scale, scope=None):
"""Scales keypoint coordinates in x and y dimensions.
Args:
keypoints: a tensor of shape [num_instances, num_keypoints, 2]
y_scale: (float) scalar tensor
x_scale: (float) scalar tensor
scope: name scope.
Returns:
new_keypoints: a tensor of shape [num_instances, num_keypoints, 2]
"""
with tf.name_scope(scope, 'Scale'):
y_scale = tf.cast(y_scale, tf.float32)
x_scale = tf.cast(x_scale, tf.float32)
new_keypoints = keypoints * [[[y_scale, x_scale]]]
return new_keypoints
def clip_to_window(keypoints, window, scope=None):
"""Clips keypoints to a window.
This op clips any input keypoints to a window.
Args:
keypoints: a tensor of shape [num_instances, num_keypoints, 2]
window: a tensor of shape [4] representing the [y_min, x_min, y_max, x_max]
window to which the op should clip the keypoints.
scope: name scope.
Returns:
new_keypoints: a tensor of shape [num_instances, num_keypoints, 2]
"""
with tf.name_scope(scope, 'ClipToWindow'):
y, x = tf.split(value=keypoints, num_or_size_splits=2, axis=2)
win_y_min, win_x_min, win_y_max, win_x_max = tf.unstack(window)
y = tf.maximum(tf.minimum(y, win_y_max), win_y_min)
x = tf.maximum(tf.minimum(x, win_x_max), win_x_min)
new_keypoints = tf.concat([y, x], 2)
return new_keypoints
def prune_outside_window(keypoints, window, scope=None):
"""Prunes keypoints that fall outside a given window.
This function replaces keypoints that fall outside the given window with nan.
See also clip_to_window which clips any keypoints that fall outside the given
window.
Args:
keypoints: a tensor of shape [num_instances, num_keypoints, 2]
window: a tensor of shape [4] representing the [y_min, x_min, y_max, x_max]
window outside of which the op should prune the keypoints.
scope: name scope.
Returns:
new_keypoints: a tensor of shape [num_instances, num_keypoints, 2]
"""
with tf.name_scope(scope, 'PruneOutsideWindow'):
y, x = tf.split(value=keypoints, num_or_size_splits=2, axis=2)
win_y_min, win_x_min, win_y_max, win_x_max = tf.unstack(window)
valid_indices = tf.logical_and(
tf.logical_and(y >= win_y_min, y <= win_y_max),
tf.logical_and(x >= win_x_min, x <= win_x_max))
new_y = tf.where(valid_indices, y, np.nan * tf.ones_like(y))
new_x = tf.where(valid_indices, x, np.nan * tf.ones_like(x))
new_keypoints = tf.concat([new_y, new_x], 2)
return new_keypoints
def change_coordinate_frame(keypoints, window, scope=None):
"""Changes coordinate frame of the keypoints to be relative to window's frame.
Given a window of the form [y_min, x_min, y_max, x_max], changes keypoint
coordinates from keypoints of shape [num_instances, num_keypoints, 2]
to be relative to this window.
An example use case is data augmentation: where we are given groundtruth
keypoints and would like to randomly crop the image to some window. In this
case we need to change the coordinate frame of each groundtruth keypoint to be
relative to this new window.
Args:
keypoints: a tensor of shape [num_instances, num_keypoints, 2]
window: a tensor of shape [4] representing the [y_min, x_min, y_max, x_max]
window we should change the coordinate frame to.
scope: name scope.
Returns:
new_keypoints: a tensor of shape [num_instances, num_keypoints, 2]
"""
with tf.name_scope(scope, 'ChangeCoordinateFrame'):
win_height = window[2] - window[0]
win_width = window[3] - window[1]
new_keypoints = scale(keypoints - [window[0], window[1]], 1.0 / win_height,
1.0 / win_width)
return new_keypoints
def to_normalized_coordinates(keypoints, height, width,
check_range=True, scope=None):
"""Converts absolute keypoint coordinates to normalized coordinates in [0, 1].
Usually one uses the dynamic shape of the image or conv-layer tensor:
keypoints = keypoint_ops.to_normalized_coordinates(keypoints,
tf.shape(images)[1],
tf.shape(images)[2]),
This function raises an assertion failed error at graph execution time when
the maximum coordinate is smaller than 1.01 (which means that coordinates are
already normalized). The value 1.01 is to deal with small rounding errors.
Args:
keypoints: A tensor of shape [num_instances, num_keypoints, 2].
height: Maximum value for y coordinate of absolute keypoint coordinates.
width: Maximum value for x coordinate of absolute keypoint coordinates.
check_range: If True, checks if the coordinates are normalized.
scope: name scope.
Returns:
tensor of shape [num_instances, num_keypoints, 2] with normalized
coordinates in [0, 1].
"""
with tf.name_scope(scope, 'ToNormalizedCoordinates'):
height = tf.cast(height, tf.float32)
width = tf.cast(width, tf.float32)
if check_range:
max_val = tf.reduce_max(keypoints)
max_assert = tf.Assert(tf.greater(max_val, 1.01),
['max value is lower than 1.01: ', max_val])
with tf.control_dependencies([max_assert]):
width = tf.identity(width)
return scale(keypoints, 1.0 / height, 1.0 / width)
def to_absolute_coordinates(keypoints, height, width,
check_range=True, scope=None):
"""Converts normalized keypoint coordinates to absolute pixel coordinates.
This function raises an assertion failed error when the maximum keypoint
coordinate value is larger than 1.01 (in which case coordinates are already
absolute).
Args:
keypoints: A tensor of shape [num_instances, num_keypoints, 2]
height: Maximum value for y coordinate of absolute keypoint coordinates.
width: Maximum value for x coordinate of absolute keypoint coordinates.
check_range: If True, checks if the coordinates are normalized or not.
scope: name scope.
Returns:
tensor of shape [num_instances, num_keypoints, 2] with absolute coordinates
in terms of the image size.
"""
with tf.name_scope(scope, 'ToAbsoluteCoordinates'):
height = tf.cast(height, tf.float32)
width = tf.cast(width, tf.float32)
# Ensure range of input keypoints is correct.
if check_range:
max_val = tf.reduce_max(keypoints)
max_assert = tf.Assert(tf.greater_equal(1.01, max_val),
['maximum keypoint coordinate value is larger '
'than 1.01: ', max_val])
with tf.control_dependencies([max_assert]):
width = tf.identity(width)
return scale(keypoints, height, width)
def flip_horizontal(keypoints, flip_point, flip_permutation, scope=None):
"""Flips the keypoints horizontally around the flip_point.
This operation flips the x coordinate for each keypoint around the flip_point
and also permutes the keypoints in a manner specified by flip_permutation.
Args:
keypoints: a tensor of shape [num_instances, num_keypoints, 2]
flip_point: (float) scalar tensor representing the x coordinate to flip the
keypoints around.
flip_permutation: rank 1 int32 tensor containing the keypoint flip
permutation. This specifies the mapping from original keypoint indices
to the flipped keypoint indices. This is used primarily for keypoints
that are not reflection invariant. E.g. Suppose there are 3 keypoints
representing ['head', 'right_eye', 'left_eye'], then a logical choice for
flip_permutation might be [0, 2, 1] since we want to swap the 'left_eye'
and 'right_eye' after a horizontal flip.
scope: name scope.
Returns:
new_keypoints: a tensor of shape [num_instances, num_keypoints, 2]
"""
with tf.name_scope(scope, 'FlipHorizontal'):
keypoints = tf.transpose(keypoints, [1, 0, 2])
keypoints = tf.gather(keypoints, flip_permutation)
v, u = tf.split(value=keypoints, num_or_size_splits=2, axis=2)
u = flip_point * 2.0 - u
new_keypoints = tf.concat([v, u], 2)
new_keypoints = tf.transpose(new_keypoints, [1, 0, 2])
return new_keypoints
def flip_vertical(keypoints, flip_point, flip_permutation, scope=None):
"""Flips the keypoints vertically around the flip_point.
This operation flips the y coordinate for each keypoint around the flip_point
and also permutes the keypoints in a manner specified by flip_permutation.
Args:
keypoints: a tensor of shape [num_instances, num_keypoints, 2]
flip_point: (float) scalar tensor representing the y coordinate to flip the
keypoints around.
flip_permutation: rank 1 int32 tensor containing the keypoint flip
permutation. This specifies the mapping from original keypoint indices
to the flipped keypoint indices. This is used primarily for keypoints
that are not reflection invariant. E.g. Suppose there are 3 keypoints
representing ['head', 'right_eye', 'left_eye'], then a logical choice for
flip_permutation might be [0, 2, 1] since we want to swap the 'left_eye'
and 'right_eye' after a horizontal flip.
scope: name scope.
Returns:
new_keypoints: a tensor of shape [num_instances, num_keypoints, 2]
"""
with tf.name_scope(scope, 'FlipVertical'):
keypoints = tf.transpose(keypoints, [1, 0, 2])
keypoints = tf.gather(keypoints, flip_permutation)
v, u = tf.split(value=keypoints, num_or_size_splits=2, axis=2)
v = flip_point * 2.0 - v
new_keypoints = tf.concat([v, u], 2)
new_keypoints = tf.transpose(new_keypoints, [1, 0, 2])
return new_keypoints
def rot90(keypoints, scope=None):
"""Rotates the keypoints counter-clockwise by 90 degrees.
Args:
keypoints: a tensor of shape [num_instances, num_keypoints, 2]
scope: name scope.
Returns:
new_keypoints: a tensor of shape [num_instances, num_keypoints, 2]
"""
with tf.name_scope(scope, 'Rot90'):
keypoints = tf.transpose(keypoints, [1, 0, 2])
v, u = tf.split(value=keypoints[:, :, ::-1], num_or_size_splits=2, axis=2)
v = 1.0 - v
new_keypoints = tf.concat([v, u], 2)
new_keypoints = tf.transpose(new_keypoints, [1, 0, 2])
return new_keypoints
|
PyTorch/SpeechSynthesis/Tacotron2/trtis_cpp/src/trt/tacotron2 | tacotron2 | postNetBuilder | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "postNetBuilder.h"
#include "convBatchNormCreator.h"
#include "postNetInstance.h"
using namespace nvinfer1;
namespace tts
{
/******************************************************************************
* CONSTANTS ******************************************************************
*****************************************************************************/
namespace
{
constexpr const int NUM_LAYERS = 5;
constexpr const char* const INPUT_NAME = PostNetInstance::INPUT_NAME;
constexpr const char* const OUTPUT_NAME = PostNetInstance::OUTPUT_NAME;
constexpr const char* const ENGINE_NAME = PostNetInstance::ENGINE_NAME;
} // namespace
/******************************************************************************
* CONSTRUCTORS / DESTRUCTOR **************************************************
*****************************************************************************/
PostNetBuilder::PostNetBuilder(const int numChannels, const int maxChunkSize, const int numDimensions)
: mNumChannels(numChannels)
, mMaxChunkSize(maxChunkSize)
, mNumDimensions(numDimensions)
{
// do nothing
}
/******************************************************************************
* PUBLIC METHODS *************************************************************
*****************************************************************************/
TRTPtr<ICudaEngine> PostNetBuilder::build(
IBuilder& builder,
IModelImporter& importer,
const int maxBatchSize,
const bool useFP16)
{
TRTPtr<INetworkDefinition> network(builder.createNetworkV2(0));
network->setName("Tacotron2_PostNet");
ITensor* const input = network->addInput(
INPUT_NAME, DataType::kFLOAT, Dims4{1, mNumChannels, mMaxChunkSize, 1});
ITensor* convInput = input;
ConvBatchNormCreator convBatchNormCreator;
for (int layer = 0; layer < NUM_LAYERS; ++layer) {
const LayerData* const convData
= importer.getWeights({"postnet",
"convolutions",
std::to_string(layer),
"conv_layer",
"conv"});
const LayerData* const normData = importer.getWeights(
{"postnet", "convolutions", std::to_string(layer), "batch_norm"});
ILayer* convLayer;
if (layer == 0) {
// first layer
convLayer = convBatchNormCreator.add(
*network,
convInput,
*convData,
*normData,
"tanh",
"postnet.convolutions." + std::to_string(layer));
} else if (layer == NUM_LAYERS - 1) {
// last layer
convLayer = convBatchNormCreator.add(
*network,
convInput,
*convData,
*normData,
"none",
"postnet.convolutions." + std::to_string(layer));
} else {
// intermediate layer
convLayer = convBatchNormCreator.add(
*network,
convInput,
*convData,
*normData,
"tanh",
"postnet.convolutions." + std::to_string(layer));
}
convInput = convLayer->getOutput(0);
}
// perform the addition
ILayer* const sumLayer = network->addElementWise(*convInput, *input, ElementWiseOperation::kSUM);
sumLayer->setName("postnet.elementwise_sum");
// and transpose before output
IShuffleLayer* const transLayer = network->addShuffle(*sumLayer->getOutput(0));
transLayer->setFirstTranspose({0, 2, 1, 3});
transLayer->setName("postnet.transpose");
ITensor* const output = transLayer->getOutput(0);
output->setName(OUTPUT_NAME);
network->markOutput(*output);
// build engine
TRTPtr<IBuilderConfig> config(builder.createBuilderConfig());
config->setMaxWorkspaceSize(1ULL << 29); // 512 MB
if (useFP16)
{
config->setFlag(BuilderFlag::kFP16);
}
builder.setMaxBatchSize(maxBatchSize);
TRTPtr<ICudaEngine> engine(
builder.buildEngineWithConfig(*network, *config));
if (!engine)
{
throw std::runtime_error("Failed to build Tacotron2::PostNet engine.");
}
return engine;
}
} // namespace tts
|
TensorFlow2/Segmentation/nnUNet/scripts | scripts | inference | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from pathlib import Path
from subprocess import call
parser = ArgumentParser()
parser.add_argument("--data", type=Path, required=True, help="Path to data")
parser.add_argument("--task", type=str, default="01", help="Task code")
parser.add_argument("--dim", type=int, required=True, choices=[2, 3], help="Dimension of UNet")
parser.add_argument("--batch-size", "--batch_size", type=int, default=2, help="Batch size")
parser.add_argument("--fold", type=int, required=True, choices=[0, 1, 2, 3, 4], help="Fold number")
parser.add_argument("--amp", action="store_true", help="Enable automatic mixed precision")
parser.add_argument("--tta", action="store_true", help="Enable test time augmentation")
parser.add_argument("--save-preds", "--save_preds", action="store_true", help="Save predicted masks")
parser.add_argument(
"--results", type=Path, default=Path("/results"), help="Path to results directory, output for the predicted masks"
)
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument("--ckpt-dir", "--ckpt_dir", type=Path, help="Path to checkpoint directory")
group.add_argument("--saved-model-dir", "--saved_model_dir", type=Path, help="Path to saved model directory")
if __name__ == "__main__":
args = parser.parse_args()
path_to_main = Path(__file__).resolve().parent.parent / "main.py"
cmd = ""
cmd += f"python {path_to_main} --exec-mode predict "
cmd += f"--data {args.data} "
cmd += f"--task {args.task} "
cmd += f"--dim {args.dim} "
cmd += f"--batch-size {args.batch_size} "
cmd += f"--fold {args.fold} "
cmd += f"--amp {args.amp} "
cmd += f"--tta {args.tta} "
cmd += f"--save-preds {args.save_preds} "
cmd += f"--results {args.results} "
if args.ckpt_dir:
cmd += f"--ckpt-dir {args.ckpt_dir} "
elif args.saved_model_dir:
cmd += f"--saved-model-dir {args.saved_model_dir} "
cmd += "--use-wandb false"
call(cmd, shell=True)
|
TensorFlow/Recommendation/VAE-CF | VAE-CF | requirements | git+https://github.com/NVIDIA/dllogger#egg=dllogger
mpi4py
sklearn
pandas |
PyTorch/SpeechSynthesis/Tacotron2/tacotron2 | tacotron2 | loss_function | # *****************************************************************************
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
from torch import nn
class Tacotron2Loss(nn.Module):
def __init__(self):
super(Tacotron2Loss, self).__init__()
def forward(self, model_output, targets):
mel_target, gate_target = targets[0], targets[1]
mel_target.requires_grad = False
gate_target.requires_grad = False
gate_target = gate_target.view(-1, 1)
mel_out, mel_out_postnet, gate_out, _ = model_output
gate_out = gate_out.view(-1, 1)
mel_loss = nn.MSELoss()(mel_out, mel_target) + \
nn.MSELoss()(mel_out_postnet, mel_target)
gate_loss = nn.BCEWithLogitsLoss()(gate_out, gate_target)
return mel_loss + gate_loss
|
PyTorch/SpeechSynthesis/Tacotron2 | Tacotron2 | .gitignore | __pycache__/
/checkpoints/
/output/
nvlog.json
|
TensorFlow/Classification/ConvNets/utils/hooks | hooks | benchmark_hooks | #! /usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import tensorflow as tf
import dllogger
from .training_hooks import MeanAccumulator
__all__ = ['BenchmarkLoggingHook']
class BenchmarkLoggingHook(tf.estimator.SessionRunHook):
def __init__(self, global_batch_size, warmup_steps=20, logging_steps=1):
self.latencies = []
self.warmup_steps = warmup_steps
self.global_batch_size = global_batch_size
self.current_step = 0
self.t0 = None
self.mean_throughput = MeanAccumulator()
self.logging_steps = logging_steps
def before_run(self, run_context):
self.t0 = time.time()
def after_run(self, run_context, run_values):
batch_time = time.time() - self.t0
ips = self.global_batch_size / batch_time
if self.current_step >= self.warmup_steps:
self.latencies.append(batch_time)
self.mean_throughput.consume(ips)
if (self.current_step % self.logging_steps) == 0:
dllogger.log(data={"total_ips": ips}, step=(0, self.current_step))
self.current_step += 1
|
TensorFlow/Classification/ConvNets/resnet50v1.5/training | training | DGX1_RN50_FP32_250E | #!/bin/bash
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
WORKSPACE=${1:-"/workspace/rn50v15_tf"}
DATA_DIR=${2:-"/data"}
OTHER=${@:3}
if [[ ! -z "${BIND_TO_SOCKET}" ]]; then
BIND_TO_SOCKET="--bind-to socket"
fi
mpiexec --allow-run-as-root ${BIND_TO_SOCKET} -np 8 python3 main.py --arch=resnet50 \
--mode=train_and_evaluate --iter_unit=epoch --num_iter=250 --mixup=0.2 \
--batch_size=128 --warmup_steps=100 --cosine_lr --label_smoothing 0.1 \
--lr_init=0.256 --lr_warmup_epochs=8 --momentum=0.875 --weight_decay=3.0517578125e-05 \
--data_dir=${DATA_DIR}/tfrecords --data_idx_dir=${DATA_DIR}/dali_idx \
--results_dir=${WORKSPACE}/results --weight_init=fan_in ${OTHER}
|
TensorFlow/Detection/SSD/models/research/object_detection/core | core | anchor_generator | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base anchor generator.
The job of the anchor generator is to create (or load) a collection
of bounding boxes to be used as anchors.
Generated anchors are assumed to match some convolutional grid or list of grid
shapes. For example, we might want to generate anchors matching an 8x8
feature map and a 4x4 feature map. If we place 3 anchors per grid location
on the first feature map and 6 anchors per grid location on the second feature
map, then 3*8*8 + 6*4*4 = 288 anchors are generated in total.
To support fully convolutional settings, feature map shapes are passed
dynamically at generation time. The number of anchors to place at each location
is static --- implementations of AnchorGenerator must always be able return
the number of anchors that it uses per location for each feature map.
"""
from abc import ABCMeta
from abc import abstractmethod
import tensorflow as tf
class AnchorGenerator(object):
"""Abstract base class for anchor generators."""
__metaclass__ = ABCMeta
@abstractmethod
def name_scope(self):
"""Name scope.
Must be defined by implementations.
Returns:
a string representing the name scope of the anchor generation operation.
"""
pass
@property
def check_num_anchors(self):
"""Whether to dynamically check the number of anchors generated.
Can be overridden by implementations that would like to disable this
behavior.
Returns:
a boolean controlling whether the Generate function should dynamically
check the number of anchors generated against the mathematically
expected number of anchors.
"""
return True
@abstractmethod
def num_anchors_per_location(self):
"""Returns the number of anchors per spatial location.
Returns:
a list of integers, one for each expected feature map to be passed to
the `generate` function.
"""
pass
def generate(self, feature_map_shape_list, **params):
"""Generates a collection of bounding boxes to be used as anchors.
TODO(rathodv): remove **params from argument list and make stride and
offsets (for multiple_grid_anchor_generator) constructor arguments.
Args:
feature_map_shape_list: list of (height, width) pairs in the format
[(height_0, width_0), (height_1, width_1), ...] that the generated
anchors must align with. Pairs can be provided as 1-dimensional
integer tensors of length 2 or simply as tuples of integers.
**params: parameters for anchor generation op
Returns:
boxes_list: a list of BoxLists each holding anchor boxes corresponding to
the input feature map shapes.
Raises:
ValueError: if the number of feature map shapes does not match the length
of NumAnchorsPerLocation.
"""
if self.check_num_anchors and (
len(feature_map_shape_list) != len(self.num_anchors_per_location())):
raise ValueError('Number of feature maps is expected to equal the length '
'of `num_anchors_per_location`.')
with tf.name_scope(self.name_scope()):
anchors_list = self._generate(feature_map_shape_list, **params)
if self.check_num_anchors:
with tf.control_dependencies([
self._assert_correct_number_of_anchors(
anchors_list, feature_map_shape_list)]):
for item in anchors_list:
item.set(tf.identity(item.get()))
return anchors_list
@abstractmethod
def _generate(self, feature_map_shape_list, **params):
"""To be overridden by implementations.
Args:
feature_map_shape_list: list of (height, width) pairs in the format
[(height_0, width_0), (height_1, width_1), ...] that the generated
anchors must align with.
**params: parameters for anchor generation op
Returns:
boxes_list: a list of BoxList, each holding a collection of N anchor
boxes.
"""
pass
def _assert_correct_number_of_anchors(self, anchors_list,
feature_map_shape_list):
"""Assert that correct number of anchors was generated.
Args:
anchors_list: A list of box_list.BoxList object holding anchors generated.
feature_map_shape_list: list of (height, width) pairs in the format
[(height_0, width_0), (height_1, width_1), ...] that the generated
anchors must align with.
Returns:
Op that raises InvalidArgumentError if the number of anchors does not
match the number of expected anchors.
"""
expected_num_anchors = 0
actual_num_anchors = 0
for num_anchors_per_location, feature_map_shape, anchors in zip(
self.num_anchors_per_location(), feature_map_shape_list, anchors_list):
expected_num_anchors += (num_anchors_per_location
* feature_map_shape[0]
* feature_map_shape[1])
actual_num_anchors += anchors.num_boxes()
return tf.assert_equal(expected_num_anchors, actual_num_anchors)
|
PyTorch/SpeechRecognition/wav2vec2/common/fairseq/data/audio | audio | raw_audio_dataset | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import logging
import os
import sys
from itertools import groupby
import numpy as np
import torch
import torch.nn.functional as F
import torch.utils
from common.fairseq.data import data_utils
from common.fairseq.data.audio.audio_utils import (
parse_path,
read_from_stored_zip,
is_sf_audio_data,
)
from common.fairseq.data.data_utils import (
compute_mask_indices,
get_bucketed_sizes,
get_buckets,
)
from common.utils import print_once
logger = logging.getLogger(__name__)
class RawAudioDataset(torch.utils.data.Dataset):
def __init__(
self,
sample_rate,
max_sample_size=None,
min_sample_size=0,
shuffle=True,
pad=False,
normalize=False,
compute_mask_indices=False,
**mask_compute_kwargs,
):
super().__init__()
self.sample_rate = sample_rate
self.sizes = []
self.max_sample_size = (
max_sample_size if max_sample_size is not None else sys.maxsize
)
self.min_sample_size = min_sample_size
self.pad = pad
self.shuffle = shuffle
self.normalize = normalize
self.compute_mask_indices = compute_mask_indices
if self.compute_mask_indices:
self.mask_compute_kwargs = mask_compute_kwargs
self._features_size_map = {}
self._C = mask_compute_kwargs["encoder_embed_dim"]
self._conv_feature_layers = eval(mask_compute_kwargs["conv_feature_layers"])
def __getitem__(self, index):
raise NotImplementedError()
def __len__(self):
return len(self.sizes)
def postprocess(self, feats, curr_sample_rate):
if feats.dim() == 2:
feats = feats.mean(-1)
if curr_sample_rate != self.sample_rate:
raise Exception(f"sample rate: {curr_sample_rate}, need {self.sample_rate}")
assert feats.dim() == 1, feats.dim()
if self.normalize:
with torch.no_grad():
feats = F.layer_norm(feats, feats.shape)
return feats
def crop_to_max_size(self, wav, target_size):
size = len(wav)
diff = size - target_size
if diff <= 0:
return wav
start = np.random.randint(0, diff + 1)
end = size - diff + start
return wav[start:end]
def _compute_mask_indices(self, dims, padding_mask):
B, T, C = dims
mask_indices, mask_channel_indices = None, None
if self.mask_compute_kwargs["mask_prob"] > 0:
mask_indices = compute_mask_indices(
(B, T),
padding_mask,
self.mask_compute_kwargs["mask_prob"],
self.mask_compute_kwargs["mask_length"],
self.mask_compute_kwargs["mask_selection"],
self.mask_compute_kwargs["mask_other"],
min_masks=2,
no_overlap=self.mask_compute_kwargs["no_mask_overlap"],
min_space=self.mask_compute_kwargs["mask_min_space"],
)
mask_indices = torch.from_numpy(mask_indices)
if self.mask_compute_kwargs["mask_channel_prob"] > 0:
mask_channel_indices = compute_mask_indices(
(B, C),
None,
self.mask_compute_kwargs["mask_channel_prob"],
self.mask_compute_kwargs["mask_channel_length"],
self.mask_compute_kwargs["mask_channel_selection"],
self.mask_compute_kwargs["mask_channel_other"],
no_overlap=self.mask_compute_kwargs["no_mask_channel_overlap"],
min_space=self.mask_compute_kwargs["mask_channel_min_space"],
)
mask_channel_indices = (
torch.from_numpy(mask_channel_indices).unsqueeze(1).expand(-1, T, -1)
)
return mask_indices, mask_channel_indices
@staticmethod
def _bucket_tensor(tensor, num_pad, value):
return F.pad(tensor, (0, num_pad), value=value)
def collater(self, samples):
samples = [s for s in samples if s["source"] is not None]
if len(samples) == 0:
return {}
sources = [s["source"] for s in samples]
sizes = [len(s) for s in sources]
if self.pad:
target_size = min(max(sizes), self.max_sample_size)
else:
target_size = min(min(sizes), self.max_sample_size)
input, out = {}, {}
if "batch_id" in samples[0]:
# The data for wav2vec 2.0 is sorted by len and cut into batches.
# We concat --num_concat_batches together to better utilize GPUs.
# Yet, we split them back to calculate masking, sample negatives,
# and calculate loss, as these ops are dependent on batch size.
# In order to split, we need to remember original (sub)batch ids.
batch_inds = [s['batch_id'] for s in samples]
sub_batch_lens = [len(list(b)) for _, b in groupby(batch_inds)]
starts_ends = np.cumsum([0] + sub_batch_lens)
target_sizes = np.array(
[min(max(sizes[s:e]), self.max_sample_size)
for s, e in zip(starts_ends[:-1], starts_ends[1:])]
)
out["sub_batch_sizes"] = torch.LongTensor(sub_batch_lens)
out["sub_batch_lens"] = torch.LongTensor(target_sizes)
collated_sources = sources[0].new_zeros(len(sources), target_size)
padding_mask = (
torch.BoolTensor(collated_sources.shape).fill_(False) if self.pad else None
)
for i, (source, size) in enumerate(zip(sources, sizes)):
diff = size - target_size
if diff == 0:
collated_sources[i] = source
elif diff > 0:
collated_sources[i] = self.crop_to_max_size(source, target_size)
else: # diff < 0:
assert self.pad
collated_sources[i] = torch.cat(
[source, source.new_full((-diff,), 0.0)]
)
padding_mask[i, diff:] = True
input["source"] = collated_sources
out["id"] = torch.LongTensor([s["id"] for s in samples])
if self.pad:
input["padding_mask"] = padding_mask
if hasattr(self, "num_buckets") and self.num_buckets > 0:
assert self.pad, "Cannot bucket without padding first."
bucket = max(self._bucketed_sizes[s["id"]] for s in samples)
num_pad = bucket - collated_sources.size(-1)
if num_pad:
input["source"] = self._bucket_tensor(collated_sources, num_pad, 0)
input["padding_mask"] = self._bucket_tensor(padding_mask, num_pad, True)
if self.compute_mask_indices:
B = input["source"].size(0)
T = self._get_mask_indices_dims(input["source"].size(-1))
padding_mask_reshaped = input["padding_mask"].clone()
extra = padding_mask_reshaped.size(1) % T
if extra > 0:
padding_mask_reshaped = padding_mask_reshaped[:, :-extra]
padding_mask_reshaped = padding_mask_reshaped.view(
padding_mask_reshaped.size(0), T, -1
)
padding_mask_reshaped = padding_mask_reshaped.all(-1)
input["padding_count"] = padding_mask_reshaped.sum(-1).max().item()
mask_indices, mask_channel_indices = self._compute_mask_indices(
(B, T, self._C),
padding_mask_reshaped,
)
input["mask_indices"] = mask_indices
input["mask_channel_indices"] = mask_channel_indices
out["sample_size"] = mask_indices.sum().item()
out["net_input"] = input
return out
def _get_mask_indices_dims(self, size, padding=0, dilation=1):
if size not in self._features_size_map:
L_in = size
for (_, kernel_size, stride) in self._conv_feature_layers:
L_out = L_in + 2 * padding - dilation * (kernel_size - 1) - 1
L_out = 1 + L_out // stride
L_in = L_out
self._features_size_map[size] = L_out
return self._features_size_map[size]
def num_tokens(self, index):
return self.size(index)
def size(self, index):
"""Return an example's size as a float or tuple. This value is used when
filtering a dataset with ``--max-positions``."""
if self.pad:
return self.sizes[index]
return min(self.sizes[index], self.max_sample_size)
def ordered_indices(self):
"""Return an ordered list of indices. Batches will be constructed based
on this order."""
if self.shuffle:
order = [np.random.permutation(len(self))]
order.append(
np.minimum(
np.array(self.sizes),
self.max_sample_size,
)
)
return np.lexsort(order)[::-1]
else:
return np.arange(len(self))
def set_bucket_info(self, num_buckets):
self.num_buckets = num_buckets
if self.num_buckets > 0:
self._collated_sizes = np.minimum(
np.array(self.sizes),
self.max_sample_size,
)
self.buckets = get_buckets(
self._collated_sizes,
self.num_buckets,
)
self._bucketed_sizes = get_bucketed_sizes(
self._collated_sizes, self.buckets
)
logger.info(
f"{len(self.buckets)} bucket(s) for the audio dataset: "
f"{self.buckets}"
)
def batch_by_size(
self,
indices,
max_tokens=None,
max_sentences=None,
required_batch_size_multiple=1,
num_concat_batches=1,
):
"""
Given an ordered set of indices, return batches according to
*max_tokens*, *max_sentences* and *required_batch_size_multiple*.
"""
from common.fairseq.data import data_utils
return data_utils.batch_by_size(
indices,
num_tokens_fn=self.num_tokens,
num_tokens_vec=None,
max_tokens=max_tokens,
max_sentences=max_sentences,
required_batch_size_multiple=required_batch_size_multiple,
num_concat_batches=num_concat_batches,
)
def filter_indices_by_size(self, indices, max_sizes):
"""
Filter a list of sample indices. Remove those that are longer than
specified in *max_sizes*.
WARNING: don't update, override method in child classes
Args:
indices (np.array): original array of sample indices
max_sizes (int or list[int] or tuple[int]): max sample size,
can be defined separately for src and tgt (then list or tuple)
Returns:
np.array: filtered sample array
list: list of removed indices
"""
if isinstance(max_sizes, float) or isinstance(max_sizes, int):
if hasattr(self, "sizes") and isinstance(self.sizes, np.ndarray):
ignored = indices[self.sizes[indices] > max_sizes].tolist()
indices = indices[self.sizes[indices] <= max_sizes]
elif (
hasattr(self, "sizes")
and isinstance(self.sizes, list)
and len(self.sizes) == 1
):
ignored = indices[self.sizes[0][indices] > max_sizes].tolist()
indices = indices[self.sizes[0][indices] <= max_sizes]
else:
indices, ignored = data_utils._filter_by_size_dynamic(
indices, self.size, max_sizes
)
else:
indices, ignored = data_utils._filter_by_size_dynamic(
indices, self.size, max_sizes
)
return indices, ignored
class FileAudioDataset(RawAudioDataset):
def __init__(
self,
manifest_path,
sample_rate,
max_sample_size=None,
min_sample_size=0,
shuffle=True,
pad=False,
normalize=False,
num_buckets=0,
compute_mask_indices=False,
**mask_compute_kwargs,
):
super().__init__(
sample_rate=sample_rate,
max_sample_size=max_sample_size,
min_sample_size=min_sample_size,
shuffle=shuffle,
pad=pad,
normalize=normalize,
compute_mask_indices=compute_mask_indices,
**mask_compute_kwargs,
)
skipped = 0
self.fnames = []
sizes = []
self.skipped_indices = set()
with open(manifest_path, "r") as f:
self.root_dir = f.readline().strip()
for i, line in enumerate(f):
items = line.strip().split("\t")
assert len(items) == 2, line
sz = int(items[1])
if min_sample_size is not None and sz < min_sample_size:
skipped += 1
self.skipped_indices.add(i)
continue
self.fnames.append(items[0])
sizes.append(sz)
print_once(f"loaded {len(self.fnames)}, skipped {skipped} samples")
self.sizes = np.array(sizes, dtype=np.int64)
try:
import pyarrow
self.fnames = pyarrow.array(self.fnames)
except:
logger.debug("Could not create a pyarrow array. "
"Please install pyarrow for better performance")
pass
self.set_bucket_info(num_buckets)
def __getitem__(self, index):
import soundfile as sf
path_or_fp = os.path.join(self.root_dir, str(self.fnames[index]))
_path, slice_ptr = parse_path(path_or_fp)
if len(slice_ptr) == 2:
byte_data = read_from_stored_zip(_path, slice_ptr[0], slice_ptr[1])
assert is_sf_audio_data(byte_data)
path_or_fp = io.BytesIO(byte_data)
try:
wav, curr_sample_rate = sf.read(path_or_fp, dtype="float32")
except RuntimeError as e:
if not os.path.isfile(path_or_fp):
raise FileNotFoundError(path_or_fp)
else:
raise e
feats = torch.from_numpy(wav).float()
feats = self.postprocess(feats, curr_sample_rate)
ret = {"id": index, "source": feats}
if hasattr(self, 'batch_ids'):
ret['batch_id'] = self.batch_ids[index]
return ret
|
TensorFlow2/Recommendation/WideAndDeep/triton/runner | runner | config_NVIDIA-T4 | batching: dynamic
checkpoints:
- name: widedeep_tf2_amp_base_128k_nvtabular
url: ''
configurations:
- checkpoint: widedeep_tf2_amp_base_128k_nvtabular
parameters:
backend_accelerator: amp
checkpoint: widedeep_tf2_amp_base_128k_nvtabular
device_kind: gpu
export_format: tf-savedmodel
export_precision: fp32
format: tf-savedmodel
max_batch_size: 131072
number_of_model_instances: 2
precision: fp32
tensorrt_capture_cuda_graph: 0
torch_jit: none
- checkpoint: widedeep_tf2_amp_base_128k_nvtabular
parameters:
backend_accelerator: none
checkpoint: widedeep_tf2_amp_base_128k_nvtabular
device_kind: gpu
export_format: tf-savedmodel
export_precision: fp16
format: trt
max_batch_size: 131072
number_of_model_instances: 2
precision: fp16
tensorrt_capture_cuda_graph: 1
torch_jit: none
container_version: '22.02'
datasets:
- name: outbrain
datasets_dir: datasets
ensemble_model_name: null
framework: TensorFlow2
measurement_steps_offline: 8
measurement_steps_online: 32
model_name: WidenDeep
performance_tool: perf_analyzer
triton_container_image: nvcr.io/nvidia/tritonserver:22.02-py3
triton_custom_operations: null
triton_dockerfile: null
triton_load_model_method: explicit
|
PyTorch/SpeechSynthesis/Tacotron2/trtis_cpp/src/trt/util | util | pluginBuilder | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "pluginBuilder.h"
#include "taco2AttentionLayerPluginCreator.h"
#include "taco2DenoiseTransformLayerPluginCreator.h"
#include "taco2LSTMCellLayerPluginCreator.h"
#include "taco2ModulationRemovalLayerPluginCreator.h"
#include "taco2PrenetLayerPluginCreator.h"
#include "taco2ProjectionLayerPluginCreator.h"
#include <stdexcept>
using namespace nvinfer1;
// register plugins
namespace nvinfer1
{
namespace plugin
{
REGISTER_TENSORRT_PLUGIN(Taco2AttentionLayerPluginCreator);
REGISTER_TENSORRT_PLUGIN(Taco2PrenetLayerPluginCreator);
REGISTER_TENSORRT_PLUGIN(Taco2LSTMCellLayerPluginCreator);
REGISTER_TENSORRT_PLUGIN(Taco2ProjectionLayerPluginCreator);
REGISTER_TENSORRT_PLUGIN(Taco2ModulationRemovalLayerPluginCreator);
REGISTER_TENSORRT_PLUGIN(Taco2DenoiseTransformLayerPluginCreator);
} // namespace plugin
} // namespace nvinfer1
namespace tts
{
/******************************************************************************
* CONSTRUCTORS / DESTRUCTOR **************************************************
*****************************************************************************/
PluginBuilder::PluginBuilder(const std::string& pluginName, const std::string& pluginVersion)
: mCreator(nullptr)
, mFields()
, mNames()
, mScalars()
{
mCreator = getPluginRegistry()->getPluginCreator(pluginName.c_str(), pluginVersion.c_str());
if (!mCreator)
{
throw std::runtime_error("Failed to create plugin '" + pluginName + "'.");
}
}
/******************************************************************************
* PUBLIC METHODS *************************************************************
*****************************************************************************/
void PluginBuilder::setField(const std::string& name, const nvinfer1::Weights& weights)
{
PluginFieldType type;
if (weights.type == DataType::kFLOAT)
{
type = PluginFieldType::kFLOAT32;
}
else if (weights.type == DataType::kINT32)
{
type = PluginFieldType::kINT32;
}
else
{
throw std::runtime_error(
"PluginBuilder: Unsupported data type field type: " + std::to_string(static_cast<int32_t>(weights.type)));
}
mNames.emplace_back(new std::string(name));
setField(PluginField{mNames.back()->c_str(), weights.values, type, static_cast<int32_t>(weights.count)});
}
void PluginBuilder::setField(const std::string& name, const int32_t value)
{
mScalars.emplace_back(new scalar_t{value});
mNames.emplace_back(new std::string(name));
setField(PluginField{
mNames.back()->c_str(), reinterpret_cast<const void*>(&mScalars.back()->i), PluginFieldType::kINT32, 1});
}
void PluginBuilder::setField(const std::string& name, const float value)
{
mScalars.emplace_back(new scalar_t{value});
mNames.emplace_back(new std::string(name));
setField(PluginField{
mNames.back()->c_str(), reinterpret_cast<const void*>(&mScalars.back()->f), PluginFieldType::kFLOAT32, 1});
}
TRTPtr<IPluginV2> PluginBuilder::make(const std::string& name)
{
PluginFieldCollection collection{static_cast<int>(mFields.size()), mFields.data()};
TRTPtr<IPluginV2> plugin(mCreator->createPlugin(name.c_str(), &collection));
if (!plugin)
{
throw std::runtime_error(
"Failed to instantiate plugin '" + name + "' with " + std::to_string(mFields.size()) + ".");
}
return plugin;
}
/******************************************************************************
* PRIVATE METHODS ************************************************************
*****************************************************************************/
void PluginBuilder::setField(const nvinfer1::PluginField& field)
{
mFields.emplace_back(field);
}
} // namespace tts
|
TensorFlow2/Recommendation/DLRM_and_DCNv2/deployment/deployment_toolkit/triton_performance_runner/perf_analyzer | perf_analyzer | warmup | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import pathlib
from distutils.version import LooseVersion
from importlib.metadata import version
from typing import Dict, List, Optional, Tuple
# method from PEP-366 to support relative import in executed modules
if __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from ...core import EvaluationMode, MeasurementMode, OfflineMode
from ...utils import parse_server_url
from .perf_analyzer import PerfAnalyzer
from .perf_config import PerfAnalyzerConfig
LOGGER = logging.getLogger("warmup")
TRITON_CLIENT_VERSION = LooseVersion(version("tritonclient"))
class PerfAnalyzerWarmupRunner:
def __init__(
self,
server_url: str,
model_name: str,
batch_sizes: List[int],
concurrency: List[int],
input_data: Dict[int, Tuple],
measurement_mode: MeasurementMode,
measurement_interval: int,
measurement_request_count: int,
offline_mode: OfflineMode,
evaluation_mode: EvaluationMode,
output_shared_memory_size: int,
timeout: Optional[int],
flattened_input: bool = False,
):
self._model_name = model_name
self._input_data = input_data
self._measurement_mode = measurement_mode
self._offline_mode = offline_mode
self._evaluation_mode = evaluation_mode
self._output_shared_memory_size = output_shared_memory_size
self._protocol, self._host, self._port = parse_server_url(server_url)
self._measurement_interval = 2 * measurement_interval
self._measurement_request_count = 2 * measurement_request_count
self._batch_sizes = [min(batch_sizes)]
self._concurrency = [max(concurrency)]
self._timeout = timeout
self._flattened_input = flattened_input
def run(self):
for batch_size in self._batch_sizes:
input_data_filename, shapes = self._input_data[batch_size]
perf_analyzer_batch_size = 1 if self._flattened_input else batch_size
concurrency = 1
params = {
"model-name": self._model_name,
"model-version": 1,
"batch-size": perf_analyzer_batch_size,
"url": f"{self._host}:{self._port}",
"protocol": self._protocol.value,
"input-data": input_data_filename,
"measurement-interval": self._measurement_interval,
"concurrency-range": f"{concurrency}:{concurrency}:1",
"verbose": True,
}
if TRITON_CLIENT_VERSION >= LooseVersion("2.11.0"):
params["measurement-mode"] = self._measurement_mode.value
params["measurement-request-count"] = self._measurement_request_count
if self._evaluation_mode == EvaluationMode.OFFLINE:
params["shared-memory"] = self._offline_mode.value
params["output-shared-memory-size"] = self._output_shared_memory_size
config = PerfAnalyzerConfig()
for param, value in params.items():
config[param] = value
for shape in shapes:
config["shape"] = shape
perf_analyzer = PerfAnalyzer(config=config, timeout=self._timeout)
perf_analyzer.run()
|
PyTorch/SpeechSynthesis/Tacotron2/platform | platform | DGXA100_waveglow_TF32_4NGPU_train | mkdir -p output
python -m multiproc train.py -m WaveGlow -o output/ -lr 1e-4 --epochs 1001 -bs 4 --segment-length 8000 --weight-decay 0 --grad-clip-thresh 3.4028234663852886e+38 --cudnn-benchmark --cudnn-enabled --log-file nvlog.json
|
TensorFlow2/LanguageModeling/BERT/official/nlp/modeling/layers | layers | on_device_embedding | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras-based one-hot embedding layer."""
from __future__ import absolute_import
from __future__ import division
# from __future__ import google_type_annotations
from __future__ import print_function
import tensorflow as tf
from official.modeling import tf_utils
# @tf.keras.utils.register_keras_serializable(package="Text")
class OnDeviceEmbedding(tf.keras.layers.Layer):
"""Performs an embedding lookup suitable for accelerator devices.
This layer uses either tf.gather or tf.one_hot to translate integer indices to
float embeddings.
Attributes:
vocab_size: Number of elements in the vocabulary.
embedding_width: Output size of the embedding layer.
initializer: The initializer to use for the embedding weights. Defaults to
"glorot_uniform".
use_one_hot: Whether to use tf.one_hot over tf.gather for the embedding
lookup. Defaults to False (that is, using tf.gather). Setting this option
to True may improve performance, especially on small vocabulary sizes,
but will generally require more memory.
"""
def __init__(self,
vocab_size,
embedding_width,
initializer="glorot_uniform",
use_one_hot=False,
**kwargs):
# We need to have a default dtype of float32, since the inputs (which Keras
# usually uses to infer the dtype) will always be int32.
if "dtype" not in kwargs:
kwargs["dtype"] = "float32"
super(OnDeviceEmbedding, self).__init__(**kwargs)
self._vocab_size = vocab_size
self._embedding_width = embedding_width
self._initializer = initializer
self._use_one_hot = use_one_hot
def get_config(self):
config = {
"vocab_size": self._vocab_size,
"embedding_width": self._embedding_width,
"initializer": self._initializer,
"use_one_hot": self._use_one_hot,
}
base_config = super(OnDeviceEmbedding, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def build(self, input_shape):
self.embeddings = self.add_weight(
"embeddings",
shape=[self._vocab_size, self._embedding_width],
initializer=self._initializer)
super(OnDeviceEmbedding, self).build(input_shape)
def call(self, inputs):
input_shape = tf_utils.get_shape_list(inputs, expected_rank=2)
input_shape.append(self._embedding_width)
flat_inputs = tf.reshape(inputs, [-1])
if self._use_one_hot:
one_hot_data = tf.one_hot(
flat_inputs, depth=self._vocab_size, dtype=self._dtype)
embeddings = tf.matmul(one_hot_data, self.embeddings)
else:
embeddings = tf.gather(self.embeddings, flat_inputs)
embeddings = tf.reshape(embeddings, input_shape)
return embeddings
|
TensorFlow/Detection/SSD/models/research/object_detection/samples/configs | configs | faster_rcnn_inception_resnet_v2_atrous_oid | # Faster R-CNN with Inception Resnet v2, Atrous version;
# Configured for Open Images Dataset.
# Users should configure the fine_tune_checkpoint field in the train config as
# well as the label_map_path and input_path fields in the train_input_reader and
# eval_input_reader. Search for "PATH_TO_BE_CONFIGURED" to find the fields that
# should be configured.
model {
faster_rcnn {
num_classes: 546
image_resizer {
keep_aspect_ratio_resizer {
min_dimension: 600
max_dimension: 1024
}
}
feature_extractor {
type: 'faster_rcnn_inception_resnet_v2'
first_stage_features_stride: 8
}
first_stage_anchor_generator {
grid_anchor_generator {
scales: [0.25, 0.5, 1.0, 2.0]
aspect_ratios: [0.5, 1.0, 2.0]
height_stride: 8
width_stride: 8
}
}
first_stage_atrous_rate: 2
first_stage_box_predictor_conv_hyperparams {
op: CONV
regularizer {
l2_regularizer {
weight: 0.0
}
}
initializer {
truncated_normal_initializer {
stddev: 0.01
}
}
}
first_stage_nms_score_threshold: 0.0
first_stage_nms_iou_threshold: 0.7
first_stage_max_proposals: 300
first_stage_localization_loss_weight: 2.0
first_stage_objectness_loss_weight: 1.0
initial_crop_size: 17
maxpool_kernel_size: 1
maxpool_stride: 1
second_stage_box_predictor {
mask_rcnn_box_predictor {
use_dropout: false
dropout_keep_probability: 1.0
fc_hyperparams {
op: FC
regularizer {
l2_regularizer {
weight: 0.0
}
}
initializer {
variance_scaling_initializer {
factor: 1.0
uniform: true
mode: FAN_AVG
}
}
}
}
}
second_stage_post_processing {
batch_non_max_suppression {
score_threshold: 0.0
iou_threshold: 0.6
max_detections_per_class: 100
max_total_detections: 100
}
score_converter: SOFTMAX
}
second_stage_localization_loss_weight: 2.0
second_stage_classification_loss_weight: 1.0
}
}
train_config: {
batch_size: 1
optimizer {
momentum_optimizer: {
learning_rate: {
manual_step_learning_rate {
initial_learning_rate: 0.00006
schedule {
step: 6000000
learning_rate: .000006
}
schedule {
step: 7000000
learning_rate: .0000006
}
}
}
momentum_optimizer_value: 0.9
}
use_moving_average: false
}
gradient_clipping_by_norm: 10.0
fine_tune_checkpoint: "PATH_TO_BE_CONFIGURED/model.ckpt"
# Note: The below line limits the training process to 800K steps, which we
# empirically found to be sufficient enough to train the Open Images dataset.
# This effectively bypasses the learning rate schedule (the learning rate will
# never decay). Remove the below line to train indefinitely.
num_steps: 8000000
data_augmentation_options {
random_horizontal_flip {
}
}
}
train_input_reader: {
tf_record_input_reader {
input_path: "PATH_TO_BE_CONFIGURED/oid_bbox_trainable_train.record"
}
label_map_path: "PATH_TO_BE_CONFIGURED/oid_bbox_trainable_label_map.pbtxt"
}
eval_config: {
metrics_set: "open_images_metrics"
num_examples: 8000
# Note: The below line limits the evaluation process to 10 evaluations.
# Remove the below line to evaluate indefinitely.
max_evals: 10
}
eval_input_reader: {
tf_record_input_reader {
input_path: "PATH_TO_BE_CONFIGURED/oid_bbox_trainable_val.record"
}
label_map_path: "PATH_TO_BE_CONFIGURED/oid_bbox_trainable_label_map.pbtxt"
shuffle: false
num_readers: 1
}
|
TensorFlow/LanguageModeling/Transformer-XL/tf | tf | exp_utils | # Copyright (c) 2020 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import dllogger
import os
class AverageMeter:
"""
Computes and stores the average and current value
"""
def __init__(self, warmup=0, keep=False):
self.reset()
self.warmup = warmup
self.keep = keep
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
self.iters = 0
self.vals = []
def update(self, val, n=1):
self.iters += 1
self.val = val
if self.iters > self.warmup:
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
if self.keep:
self.vals.append(val)
def setup_dllogger(enabled=True, filename=os.devnull, rank=0):
if enabled and rank == 0:
backends = [
dllogger.JSONStreamBackend(
dllogger.Verbosity.VERBOSE,
filename,
),
]
dllogger.init(backends)
else:
dllogger.init([])
dllogger.metadata("eval_avg_latency", {"unit": "ms"})
dllogger.metadata("eval_ppl", {"unit": None})
dllogger.metadata("eval_avg_throughput", {"unit": "tokens/s"})
dllogger.metadata("train_throughput", {"unit": "tokens/s"})
|
PyTorch/SpeechSynthesis/Tacotron2/trtis_cpp/src/trt/plugins/taco2DenoiseTransformPlugin | taco2DenoiseTransformPlugin | taco2DenoiseTransformLayerPlugin | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef TT2I_DENOISETRANSFORMLAYERPLUGIN_H
#define TT2I_DENOISETRANSFORMLAYERPLUGIN_H
#include "cudaMemory.h"
#include "NvInfer.h"
#include <string>
#include <vector>
namespace nvinfer1
{
namespace plugin
{
class Taco2DenoiseTransformLayerPlugin : public nvinfer1::IPluginV2Ext
{
public:
using value_type = float;
/**
* @brief Get the name of this plugin.
*
* @return The name.
*/
static const char* getName();
/**
* @brief Get the version of this plugin.
*
* @return The version.
*/
static const char* getVersion();
/**
* @brief Create a new Taco2DenoiseTransformLayer from serialized data.
*
* @param data The data.
* @param length The length of the data in bytes.
*
* @return The instantiated plugin.
*/
static Taco2DenoiseTransformLayerPlugin deserialize(const void* data, size_t length);
/**
* @brief Create a new Taco2DenoiseTransformLayerPlugin.
*
* @param weight The weights to use.
* @param filterLength The length of the filter.
* @param inputLength The input length.
*/
Taco2DenoiseTransformLayerPlugin(const nvinfer1::Weights& weight, int filterLength, int inputLength);
/**
* @brief Move constructor.
*
* @param other The Taco2DenoiseTransformLayerPlugin to move.
*/
Taco2DenoiseTransformLayerPlugin(Taco2DenoiseTransformLayerPlugin&& other);
/**
* @brief The move operator.
*
* @param other The Taco2DenoiseTransformLayerPlugin to move.
*
* @return This object.
*/
Taco2DenoiseTransformLayerPlugin& operator=(Taco2DenoiseTransformLayerPlugin&& other);
/**
* @brief Destructor.
*/
~Taco2DenoiseTransformLayerPlugin();
// disable copying
Taco2DenoiseTransformLayerPlugin(const Taco2DenoiseTransformLayerPlugin& other) = delete;
Taco2DenoiseTransformLayerPlugin& operator=(const Taco2DenoiseTransformLayerPlugin& other) = delete;
/**
* @brief Return the data tyep of the plugin output at the requested index.
*
* @param index The output index.
* @param inputTypes The input data types.
* @param nbInputs The number of inputs.
*
* @return The type of output.
*/
nvinfer1::DataType getOutputDataType(int index, const nvinfer1::DataType* inputTypes, int nbInputs) const override;
/**
* @brief Check if the output will be broadcast across the batch.
*
* @param outputIndex The output index.
* @param inputIsBroadCasted Whether or not the input is broadcasted.
* @param nbInputs The number of inputs.
*
* @return True if the output will be broadcasted.
*/
bool isOutputBroadcastAcrossBatch(int outputIndex, const bool* inputIsBroadCasted, int nbInputs) const override;
/**
* @brief Check if the input can be broadcasted across the batch.
*
* @param inputIndex The input index.
*
* @return True if the input can be broadcasted.
*/
bool canBroadcastInputAcrossBatch(int inputIndex) const override;
/**
* @brief Get the plugin type.
*
* @return The plugin type.
*/
const char* getPluginType() const override;
/**
* @brief Get the plugin version.
*
* @return The plugin version.
*/
const char* getPluginVersion() const override;
/**
* @brief Get the number of outputs.
*
* @return The number of outputs.
*/
int getNbOutputs() const override;
/**
* @brief Get the dimensions of an output tensor.
*
* @param index The index of the output.
* @param inputs The given inputs.
* @param nbInputDims The number of inputs.
*
* @return The resulting dimensions.
*/
nvinfer1::Dims getOutputDimensions(int index, const nvinfer1::Dims* inputs, int nbInputDims) override;
/**
* @brief Check if the given plugin format is supported.
*
* @param type The data type.
* @param format The plugin format.
*
* @return True if it is supported.
*/
bool supportsFormat(nvinfer1::DataType type, nvinfer1::PluginFormat format) const override;
/**
* @brief Configure this plugin with the given inputs, outputs, and datat
* types.
*
* @param inputDims The input tensors dimensions.
* @param nbInputs The number of inputs.
* @param outputDims The output tensor dimensions.
* @param nbOutputs The number of outputs.
* @param inputTypes The input data types.
* @param outputTypes The output data types.
* @param inputIsBroadcast Whether or not the input is broadcast.
* @param outputIsBroadcast Whether or not the output is broadcast.
* @param format The format for the plugin.
* @param maxBatchSize The maximum batch size that will be used.
*/
void configurePlugin(const nvinfer1::Dims* inputDims, int nbInputs, const nvinfer1::Dims* outputDims, int nbOutputs,
const nvinfer1::DataType* inputTypes, const nvinfer1::DataType* outputTypes, const bool* inputIsBroadcast,
const bool* outputIsBroadcast, nvinfer1::PluginFormat format, int maxBatchSize) override;
/**
* @brief Initialize the plugin.
*
* @return 0 if initialization was successful. Non-zero otherwise.
*/
int initialize() override;
/**
* @brief Terminate the plugin (deinitialize).
*/
void terminate() override;
/**
* @brief Get workspace size required by this plugin for up to the given
* batch size.
*
* @param maxBatchSize The maximum number of items in the batch.
*
* @return The workspace size in bytes.
*/
size_t getWorkspaceSize(int maxBatchSize) const override;
/**
* @brief Set this plugin for execution on the stream.
*
* @param batchSize The number of items in the batch.
* @param inputs The input tensors.
* @param outputs The output tensors.
* @param workspace The workspace.
* @param stream The stream to operate on.
*
* @return 0 if successfully queued, non-zero otherwise.
*/
int enqueue(
int batchSize, const void* const* inputs, void** outputs, void* workspace, cudaStream_t stream) override;
/**
* @brief Get the number of bytes occupied by this plugin if serialized.
*
* @return The size in bytes.
*/
size_t getSerializationSize() const override;
/**
* @brief Serialize this plugin.
*
* @param buffer The buffer to write to.
*/
void serialize(void* buffer) const override;
/**
* @brief Destroy this plugin instance.
*/
void destroy() override;
/**
* @brief Clone this pulgin instance.
*
* @return The cloned plugin.
*/
IPluginV2Ext* clone() const override;
/**
* @brief Set the namespace of this plugin.
*
* @param pluginNamespace The namespace.
*/
void setPluginNamespace(const char* pluginNamespace) override;
/**
* @brief Get the namespace of this plugin.
*
* @return The namespace.
*/
const char* getPluginNamespace() const override;
private:
int mFilterLength;
int mInputLength;
std::vector<value_type> mWeightsHost;
tts::CudaMemory<float> mWeightsDevice;
std::string mNamespace;
};
} // namespace plugin
} // namespace nvinfer1
#endif
|
Tools/PyTorch/TimeSeriesPredictionPlatform/triton | triton | model | # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import torch
import torch.nn as nn
import hydra
from typing import Dict, Tuple, Optional, List
from omegaconf import OmegaConf
def update_argparser(parser):
parser.add_argument("--model-dir", type=str, help="Path to the model directory you would like to use (likely in outputs)", required=True)
class ModelWrapper(nn.Module):
def __init__(self, model, test_func):
super().__init__()
self.model = model
self.test_func = test_func
def unwrap(self, t):
if not torch.isnan(t).any():
return t
return None
def forward(self, s_cat, s_cont, k_cat, k_cont, o_cat, o_cont, target, sample_weight, id, weight):
wrapped_input = {}
wrapped_input['s_cat'] = self.unwrap(s_cat)
wrapped_input['s_cont'] = self.unwrap(s_cont)
wrapped_input['k_cat'] = self.unwrap(k_cat)
wrapped_input['k_cont'] = self.unwrap(k_cont)
wrapped_input['o_cat'] = self.unwrap(o_cat)
wrapped_input['o_cont'] = self.unwrap(o_cont)
wrapped_input['sample_weight'] = self.unwrap(sample_weight)
wrapped_input['target'] = target
wrapped_input['id'] = id if id.numel() else None
wrapped_input['weight'] = self.unwrap(weight)
output = self.test_func(wrapped_input)
return output
def get_model(**args):
#get model config
with open(os.path.join(args['model_dir'], ".hydra/config_merged.yaml"), "rb") as f:
config = OmegaConf.load(f)
os.environ["TFT_SCRIPTING"] = "True"
state_dict = torch.load(os.path.join(args['model_dir'], "best_checkpoint.zip"))['model_state_dict']
model = hydra.utils.instantiate(config.model)
test_method_name = 'predict' if hasattr(model, "predict") else '__call__'
test_method = getattr(model, test_method_name)
#load model
model.load_state_dict(state_dict)
model.eval()
model.cuda()
model = ModelWrapper(model, test_method).cuda()
tensor_names = {
"inputs": ['s_cat__0', 's_cont__1', 'k_cat__2', 'k_cont__3', 'o_cat__4', 'o_cont__5', 'target__6', 'sample_weight__7', 'id__8', 'weight__9'],
"outputs": ["target__0"]
}
return model, tensor_names
|
TensorFlow/Detection/SSD/models/research/object_detection/utils | utils | np_mask_ops | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Operations for [N, height, width] numpy arrays representing masks.
Example mask operations that are supported:
* Areas: compute mask areas
* IOU: pairwise intersection-over-union scores
"""
import numpy as np
EPSILON = 1e-7
def area(masks):
"""Computes area of masks.
Args:
masks: Numpy array with shape [N, height, width] holding N masks. Masks
values are of type np.uint8 and values are in {0,1}.
Returns:
a numpy array with shape [N*1] representing mask areas.
Raises:
ValueError: If masks.dtype is not np.uint8
"""
if masks.dtype != np.uint8:
raise ValueError('Masks type should be np.uint8')
return np.sum(masks, axis=(1, 2), dtype=np.float32)
def intersection(masks1, masks2):
"""Compute pairwise intersection areas between masks.
Args:
masks1: a numpy array with shape [N, height, width] holding N masks. Masks
values are of type np.uint8 and values are in {0,1}.
masks2: a numpy array with shape [M, height, width] holding M masks. Masks
values are of type np.uint8 and values are in {0,1}.
Returns:
a numpy array with shape [N*M] representing pairwise intersection area.
Raises:
ValueError: If masks1 and masks2 are not of type np.uint8.
"""
if masks1.dtype != np.uint8 or masks2.dtype != np.uint8:
raise ValueError('masks1 and masks2 should be of type np.uint8')
n = masks1.shape[0]
m = masks2.shape[0]
answer = np.zeros([n, m], dtype=np.float32)
for i in np.arange(n):
for j in np.arange(m):
answer[i, j] = np.sum(np.minimum(masks1[i], masks2[j]), dtype=np.float32)
return answer
def iou(masks1, masks2):
"""Computes pairwise intersection-over-union between mask collections.
Args:
masks1: a numpy array with shape [N, height, width] holding N masks. Masks
values are of type np.uint8 and values are in {0,1}.
masks2: a numpy array with shape [M, height, width] holding N masks. Masks
values are of type np.uint8 and values are in {0,1}.
Returns:
a numpy array with shape [N, M] representing pairwise iou scores.
Raises:
ValueError: If masks1 and masks2 are not of type np.uint8.
"""
if masks1.dtype != np.uint8 or masks2.dtype != np.uint8:
raise ValueError('masks1 and masks2 should be of type np.uint8')
intersect = intersection(masks1, masks2)
area1 = area(masks1)
area2 = area(masks2)
union = np.expand_dims(area1, axis=1) + np.expand_dims(
area2, axis=0) - intersect
return intersect / np.maximum(union, EPSILON)
def ioa(masks1, masks2):
"""Computes pairwise intersection-over-area between box collections.
Intersection-over-area (ioa) between two masks, mask1 and mask2 is defined as
their intersection area over mask2's area. Note that ioa is not symmetric,
that is, IOA(mask1, mask2) != IOA(mask2, mask1).
Args:
masks1: a numpy array with shape [N, height, width] holding N masks. Masks
values are of type np.uint8 and values are in {0,1}.
masks2: a numpy array with shape [M, height, width] holding N masks. Masks
values are of type np.uint8 and values are in {0,1}.
Returns:
a numpy array with shape [N, M] representing pairwise ioa scores.
Raises:
ValueError: If masks1 and masks2 are not of type np.uint8.
"""
if masks1.dtype != np.uint8 or masks2.dtype != np.uint8:
raise ValueError('masks1 and masks2 should be of type np.uint8')
intersect = intersection(masks1, masks2)
areas = np.expand_dims(area(masks2), axis=0)
return intersect / (areas + EPSILON)
|
TensorFlow/Segmentation/UNet_Industrial/utils | utils | losses | # !/usr/bin/env python
# -*- coding: utf-8 -*-
# ==============================================================================
#
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==============================================================================
import tensorflow as tf
__all__ = ["regularization_l2loss", "reconstruction_l2loss", "reconstruction_x_entropy", "adaptive_loss"]
def regularization_l2loss(weight_decay):
def loss_filter_fn(name):
"""we don't need to compute L2 loss for BN"""
return all(
[tensor_name not in name.lower() for tensor_name in ["batchnorm", "batch_norm", "batch_normalization"]]
)
filtered_params = [tf.cast(v, tf.float32) for v in tf.trainable_variables() if loss_filter_fn(v.name)]
if len(filtered_params) != 0:
l2_loss_per_vars = [tf.nn.l2_loss(v) for v in filtered_params]
l2_loss = tf.multiply(tf.add_n(l2_loss_per_vars), weight_decay)
else:
l2_loss = tf.zeros(shape=(), dtype=tf.float32)
return l2_loss
def reconstruction_l2loss(y_pred, y_true):
reconstruction_err = tf.subtract(y_pred, y_true)
return tf.reduce_mean(tf.nn.l2_loss(reconstruction_err), name='reconstruction_loss_l2_loss')
def reconstruction_x_entropy(y_pred, y_true, from_logits=False):
return tf.reduce_mean(tf.keras.losses.binary_crossentropy(y_true=y_true, y_pred=y_pred, from_logits=from_logits))
def dice_coe(y_pred, y_true, loss_type='jaccard', smooth=1.):
"""Soft dice (Sørensen or Jaccard) coefficient for comparing the similarity
of two batch of data, usually be used for binary image segmentation
i.e. labels are binary. The coefficient between 0 to 1, 1 means totally match.
Parameters
-----------
y_true : Tensor
A distribution with shape: [batch_size, ....], (any dimensions).
y_pred : Tensor
The target distribution, format the same with `output`.
loss_type : str
``jaccard`` or ``sorensen``, default is ``jaccard``.
smooth : float
This small value will be added to the numerator and denominator.
- If both output and target are empty, it makes sure dice is 1.
- If either output or target are empty (all pixels are background),
dice = ```smooth/(small_value + smooth)``,
then if smooth is very small, dice close to 0 (even the image values lower than the threshold),
so in this case, higher smooth can have a higher dice.
References
-----------
- `Wiki-Dice <https://en.wikipedia.org/wiki/Sørensen–Dice_coefficient>`__
"""
y_true_f = tf.layers.flatten(y_true)
y_pred_f = tf.layers.flatten(y_pred)
intersection = tf.reduce_sum(y_true_f * y_pred_f)
if loss_type == 'jaccard':
union = tf.reduce_sum(tf.square(y_pred_f)) + tf.reduce_sum(tf.square(y_true_f))
elif loss_type == 'sorensen':
union = tf.reduce_sum(y_pred_f) + tf.reduce_sum(y_true_f)
else:
raise ValueError("Unknown `loss_type`: %s" % loss_type)
return (2. * intersection + smooth) / (union + smooth)
def adaptive_loss(y_pred, y_pred_logits, y_true, switch_at_threshold=0.3, loss_type='jaccard'):
dice_loss = 1 - dice_coe(y_pred=y_pred, y_true=y_true, loss_type=loss_type, smooth=1.)
return tf.cond(
dice_loss < switch_at_threshold,
true_fn=lambda: dice_loss,
false_fn=lambda: reconstruction_x_entropy(y_pred=y_pred_logits, y_true=y_true, from_logits=True)
)
|
TensorFlow2/Segmentation/MaskRCNN/mrcnn_tf2/model/models/resnet50 | resnet50 | bottleneck_group | import tensorflow as tf
from mrcnn_tf2.model.models.resnet50 import BottleneckBlock
class BottleneckGroup(tf.keras.layers.Layer):
def __init__(self, blocks, filters, strides, trainable=True):
super().__init__(trainable=trainable)
self.blocks = []
for block_id in range(blocks):
self.blocks.append(
BottleneckBlock(
filters=filters,
strides=strides if block_id == 0 else 1,
expansion=4,
shortcut='conv2d' if block_id == 0 else None
)
)
def call(self, inputs, training=None, **kwargs):
net = inputs
for block in self.blocks:
net = block(net, training=training)
return net
|
PyTorch/Classification/GPUNet/triton/deployment_toolkit | deployment_toolkit | args | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import inspect
import logging
from typing import Callable, Dict, Optional, Union
from model_navigator.utils.cli import is_dict_generic, is_list_generic, is_optional_generic
from .core import GET_ARGPARSER_FN_NAME, load_from_file
LOGGER = logging.getLogger(__name__)
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("Boolean value expected.")
def filter_fn_args(args: Union[dict, argparse.Namespace], fn: Callable) -> dict:
signature = inspect.signature(fn)
parameters_names = list(signature.parameters)
if isinstance(args, argparse.Namespace):
args = vars(args)
args = {k: v for k, v in args.items() if k in parameters_names}
return args
def add_args_for_fn_signature(parser, fn) -> argparse.ArgumentParser:
parser.conflict_handler = "resolve"
signature = inspect.signature(fn)
for parameter in signature.parameters.values():
if parameter.name in ["self", "args", "kwargs"]:
continue
argument_kwargs = {}
if parameter.annotation != inspect.Parameter.empty:
is_optional = is_optional_generic(parameter.annotation)
if is_optional:
annotation = parameter.annotation.__args__[0] # Optional[cls] will be changed into Union[cls, None]
else:
annotation = parameter.annotation
is_list = is_list_generic(annotation)
is_dict = is_dict_generic(annotation)
if parameter.annotation == bool:
argument_kwargs["type"] = str2bool
argument_kwargs["choices"] = [0, 1]
elif is_list:
argument_kwargs["type"] = annotation.__args__[0] # List[cls] -> cls
elif is_dict:
raise RuntimeError(
f"Could not prepare argument parser for {parameter.name}: {parameter.annotation} in {fn}"
)
else:
argument_kwargs["type"] = annotation
if parameter.default != inspect.Parameter.empty:
if parameter.annotation == bool:
argument_kwargs["default"] = str2bool(parameter.default)
else:
argument_kwargs["default"] = parameter.default
else:
argument_kwargs["required"] = True
name = parameter.name.replace("_", "-")
LOGGER.debug(f"Adding argument {name} with {argument_kwargs}")
parser.add_argument(f"--{name}", **argument_kwargs)
return parser
class ArgParserGenerator:
def __init__(self, cls_or_fn, module_path: Optional[str] = None):
self._cls_or_fn = cls_or_fn
init_method_name = "__init__"
self._handle = cls_or_fn if inspect.isfunction(cls_or_fn) else getattr(cls_or_fn, init_method_name, None)
input_is_python_file = module_path and module_path.endswith(".py")
self._input_path = module_path if input_is_python_file else None
self._required_fn_name_for_signature_parsing = getattr(
cls_or_fn, "required_fn_name_for_signature_parsing", None
)
def update_argparser(self, parser):
name = self._handle.__name__
group_parser = parser.add_argument_group(name)
add_args_for_fn_signature(group_parser, fn=self._handle)
self._update_argparser(group_parser)
def get_args(self, args: argparse.Namespace):
filtered_args = filter_fn_args(args, fn=self._handle)
tmp_parser = argparse.ArgumentParser(allow_abbrev=False)
self._update_argparser(tmp_parser)
custom_names = [
p.dest.replace("-", "_") for p in tmp_parser._actions if not isinstance(p, argparse._HelpAction)
]
custom_params = {n: getattr(args, n) for n in custom_names}
filtered_args = {**filtered_args, **custom_params}
return filtered_args
def from_args(self, args: Union[argparse.Namespace, Dict]):
args = self.get_args(args)
LOGGER.info(f"Initializing {self._cls_or_fn.__name__}({args})")
return self._cls_or_fn(**args)
def _update_argparser(self, parser):
label = "argparser_update"
if self._input_path:
update_argparser_handle = load_from_file(self._input_path, label=label, target=GET_ARGPARSER_FN_NAME)
if update_argparser_handle:
update_argparser_handle(parser)
elif self._required_fn_name_for_signature_parsing:
fn_handle = load_from_file(
self._input_path, label=label, target=self._required_fn_name_for_signature_parsing
)
if fn_handle:
add_args_for_fn_signature(parser, fn_handle)
|
TensorFlow/Classification/ConvNets/se-resnext101-32x4d/training | training | DGX2_SE-RNxt101-32x4d_FP32_250E | #!/bin/bash
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
WORKSPACE=${1:-"/workspace/rn50v15_tf"}
DATA_DIR=${2:-"/data"}
OTHER=${@:3}
if [[ ! -z "${BIND_TO_SOCKET}" ]]; then
BIND_TO_SOCKET="--bind-to socket"
fi
mpiexec --allow-run-as-root ${BIND_TO_SOCKET} -np 8 python3 main.py --arch=se-resnext101-32x4d \
--mode=train_and_evaluate --iter_unit=epoch --num_iter=250 --mixup=0.2 \
--batch_size=64 --warmup_steps=100 --cosine_lr --label_smoothing 0.1 \
--lr_init=0.256 --lr_warmup_epochs=8 --momentum=0.875 --weight_decay=6.103515625e-05 \
--data_dir=${DATA_DIR}/tfrecords --data_idx_dir=${DATA_DIR}/dali_idx \
--results_dir=${WORKSPACE}/results --weight_init=fan_in ${OTHER}
|
TensorFlow2/Segmentation/Contrib/UNet3P | UNet3P | .gitignore | .idea
__pycache__
checkpoint/tb_logs/*
checkpoint/*.hdf5
checkpoint/*.csv
!checkpoint/tb_logs/.gitkeep
#data/*
/data/**/*.png
/data/**/*.jpg
/data/**/*.nii
!data/**/.gitkeep
data_preparation/verify_preprocess_data.ipynb
old_data_preperation/
others/
**/outputs |
PyTorch/Translation/GNMT | GNMT | requirements | pytablewriter==0.64.0
sacrebleu==1.2.10
sacremoses==0.0.19
pynvml==8.0.4
git+https://github.com/rsennrich/subword-nmt.git@48ba99e657591c329e0003f0c6e32e493fa959ef
|
TensorFlow2/Detection/Efficientdet/model | model | efficientdet_keras | # Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras implementation of efficientdet."""
import functools
from absl import logging
import numpy as np
import tensorflow as tf
from efficientnet import efficientnet_model
from model import dataloader
from model import normalization_builder
from model import activation_builder
from model import fpn_configs
from model import postprocess
from utils import hparams_config
from utils import model_utils
from utils import util_keras
# pylint: disable=arguments-differ # fo keras layers.
class FNode(tf.keras.layers.Layer):
"""A Keras Layer implementing BiFPN Node."""
def __init__(self,
feat_level,
inputs_offsets,
fpn_num_filters,
apply_bn_for_resampling,
is_training_bn,
conv_after_downsample,
conv_bn_act_pattern,
separable_conv,
act_type,
weight_method,
data_format,
name='fnode'):
super().__init__(name=name)
self.feat_level = feat_level
self.inputs_offsets = inputs_offsets
self.fpn_num_filters = fpn_num_filters
self.apply_bn_for_resampling = apply_bn_for_resampling
self.separable_conv = separable_conv
self.act_type = act_type
self.is_training_bn = is_training_bn
self.conv_after_downsample = conv_after_downsample
self.data_format = data_format
self.weight_method = weight_method
self.conv_bn_act_pattern = conv_bn_act_pattern
self.resample_layers = []
self.vars = []
def fuse_features(self, nodes):
"""Fuse features from different resolutions and return a weighted sum.
Args:
nodes: a list of tensorflow features at different levels
Returns:
A tensor denoting the fused feature.
"""
dtype = nodes[0].dtype
if self.weight_method == 'attn':
edge_weights = []
for var in self.vars:
var = tf.cast(var, dtype=dtype)
edge_weights.append(var)
normalized_weights = tf.nn.softmax(tf.stack(edge_weights))
nodes = tf.stack(nodes, axis=-1)
new_node = tf.reduce_sum(nodes * normalized_weights, -1)
elif self.weight_method == 'fastattn':
edge_weights = []
for var in self.vars:
var = tf.cast(var, dtype=dtype)
edge_weights.append(var)
weights_sum = tf.add_n(edge_weights)
nodes = [
nodes[i] * edge_weights[i] / (weights_sum + 0.0001)
for i in range(len(nodes))
]
new_node = tf.add_n(nodes)
elif self.weight_method == 'channel_attn':
edge_weights = []
for var in self.vars:
var = tf.cast(var, dtype=dtype)
edge_weights.append(var)
normalized_weights = tf.nn.softmax(tf.stack(edge_weights, -1), axis=-1)
nodes = tf.stack(nodes, axis=-1)
new_node = tf.reduce_sum(nodes * normalized_weights, -1)
elif self.weight_method == 'channel_fastattn':
edge_weights = []
for var in self.vars:
var = tf.cast(var, dtype=dtype)
edge_weights.append(var)
weights_sum = tf.add_n(edge_weights)
nodes = [
nodes[i] * edge_weights[i] / (weights_sum + 0.0001)
for i in range(len(nodes))
]
new_node = tf.add_n(nodes)
elif self.weight_method == 'sum':
new_node = sum(nodes) # tf.add_n is not supported by tflite gpu.
else:
raise ValueError('unknown weight_method %s' % self.weight_method)
return new_node
def _add_wsm(self, initializer):
for i, _ in enumerate(self.inputs_offsets):
name = 'WSM' + ('' if i == 0 else '_' + str(i))
self.vars.append(self.add_weight(initializer=initializer, name=name))
def build(self, feats_shape):
for i, input_offset in enumerate(self.inputs_offsets):
name = 'resample_{}_{}_{}'.format(i, input_offset, len(feats_shape))
self.resample_layers.append(
ResampleFeatureMap(
self.feat_level,
self.fpn_num_filters,
self.apply_bn_for_resampling,
self.is_training_bn,
self.conv_after_downsample,
data_format=self.data_format,
name=name))
if self.weight_method == 'attn':
self._add_wsm('ones')
elif self.weight_method == 'fastattn':
self._add_wsm('ones')
elif self.weight_method == 'channel_attn':
num_filters = int(self.fpn_num_filters)
self._add_wsm(lambda: tf.ones([num_filters]))
elif self.weight_method == 'channel_fastattn':
num_filters = int(self.fpn_num_filters)
self._add_wsm(lambda: tf.ones([num_filters]))
self.op_after_combine = OpAfterCombine(
self.is_training_bn,
self.conv_bn_act_pattern,
self.separable_conv,
self.fpn_num_filters,
self.act_type,
self.data_format,
name='op_after_combine{}'.format(len(feats_shape)))
self.built = True
super().build(feats_shape)
def call(self, feats, training):
nodes = []
for i, input_offset in enumerate(self.inputs_offsets):
input_node = feats[input_offset]
input_node = self.resample_layers[i](input_node, training, feats)
nodes.append(input_node)
new_node = self.fuse_features(nodes)
new_node = self.op_after_combine(new_node)
return feats + [new_node]
class OpAfterCombine(tf.keras.layers.Layer):
"""Operation after combining input features during feature fusiong."""
def __init__(self,
is_training_bn,
conv_bn_act_pattern,
separable_conv,
fpn_num_filters,
act_type,
data_format,
name='op_after_combine'):
super().__init__(name=name)
self.conv_bn_act_pattern = conv_bn_act_pattern
self.separable_conv = separable_conv
self.fpn_num_filters = fpn_num_filters
self.act_type = act_type
self.data_format = data_format
self.is_training_bn = is_training_bn
if self.separable_conv:
conv2d_layer = functools.partial(
tf.keras.layers.SeparableConv2D, depth_multiplier=1)
else:
conv2d_layer = tf.keras.layers.Conv2D
self.conv_op = conv2d_layer(
filters=fpn_num_filters,
kernel_size=(3, 3),
padding='same',
use_bias=not self.conv_bn_act_pattern,
data_format=self.data_format,
name='conv')
self.bn = util_keras.build_batch_norm(
is_training_bn=self.is_training_bn,
data_format=self.data_format,
name='bn')
def call(self, new_node, training):
if not self.conv_bn_act_pattern:
new_node = activation_builder.activation_fn(new_node, self.act_type)
new_node = self.conv_op(new_node)
new_node = self.bn(new_node, training=training)
if self.conv_bn_act_pattern:
new_node = activation_builder.activation_fn(new_node, self.act_type)
return new_node
class ResampleFeatureMap(tf.keras.layers.Layer):
"""Resample feature map for downsampling or upsampling."""
def __init__(self,
feat_level,
target_num_channels,
apply_bn=False,
is_training_bn=None,
conv_after_downsample=False,
data_format=None,
pooling_type=None,
upsampling_type=None,
name='resample_p0'):
super().__init__(name=name)
self.apply_bn = apply_bn
self.is_training_bn = is_training_bn
self.data_format = data_format
self.target_num_channels = target_num_channels
self.feat_level = feat_level
self.conv_after_downsample = conv_after_downsample
self.pooling_type = pooling_type or 'max'
self.upsampling_type = upsampling_type or 'nearest'
self.conv2d = tf.keras.layers.Conv2D(
self.target_num_channels, (1, 1),
padding='same',
data_format=self.data_format,
name='conv2d')
self.bn = util_keras.build_batch_norm(
is_training_bn=self.is_training_bn,
data_format=self.data_format,
name='bn')
def _pool2d(self, inputs, height, width, target_height, target_width):
"""Pool the inputs to target height and width."""
height_stride_size = int((height - 1) // target_height + 1)
width_stride_size = int((width - 1) // target_width + 1)
if self.pooling_type == 'max':
return tf.keras.layers.MaxPooling2D(
pool_size=[height_stride_size + 1, width_stride_size + 1],
strides=[height_stride_size, width_stride_size],
padding='SAME',
data_format=self.data_format)(inputs)
elif self.pooling_type == 'avg':
return tf.keras.layers.AveragePooling2D(
pool_size=[height_stride_size + 1, width_stride_size + 1],
strides=[height_stride_size, width_stride_size],
padding='SAME',
data_format=self.data_format)(inputs)
else:
raise ValueError('Unsupported pooling type {}.'.format(self.pooling_type))
def _upsample2d(self, inputs, target_height, target_width):
return tf.cast(
tf.image.resize(
tf.cast(inputs, tf.float32), [target_height, target_width],
method=self.upsampling_type), inputs.dtype)
def _maybe_apply_1x1(self, feat, training, num_channels):
"""Apply 1x1 conv to change layer width if necessary."""
if num_channels != self.target_num_channels:
feat = self.conv2d(feat)
if self.apply_bn:
feat = self.bn(feat, training=training)
return feat
def call(self, feat, training, all_feats):
hwc_idx = (2, 3, 1) if self.data_format == 'channels_first' else (1, 2, 3)
height, width, num_channels = [feat.shape.as_list()[i] for i in hwc_idx]
if all_feats:
target_feat_shape = all_feats[self.feat_level].shape.as_list()
target_height, target_width, _ = [target_feat_shape[i] for i in hwc_idx]
else:
# Default to downsampling if all_feats is empty.
target_height, target_width = (height + 1) // 2, (width + 1) // 2
# If conv_after_downsample is True, when downsampling, apply 1x1 after
# downsampling for efficiency.
if height > target_height and width > target_width:
if not self.conv_after_downsample:
feat = self._maybe_apply_1x1(feat, training, num_channels)
feat = self._pool2d(feat, height, width, target_height, target_width)
if self.conv_after_downsample:
feat = self._maybe_apply_1x1(feat, training, num_channels)
elif height <= target_height and width <= target_width:
feat = self._maybe_apply_1x1(feat, training, num_channels)
if height < target_height or width < target_width:
feat = self._upsample2d(feat, target_height, target_width)
else:
raise ValueError(
'Incompatible Resampling : feat shape {}x{} target_shape: {}x{}'
.format(height, width, target_height, target_width))
return feat
class ClassNet(tf.keras.layers.Layer):
"""Object class prediction network."""
def __init__(self,
num_classes=90,
num_anchors=9,
num_filters=32,
min_level=3,
max_level=7,
is_training_bn=False,
act_type='swish',
repeats=4,
separable_conv=True,
survival_prob=None,
data_format='channels_last',
name='class_net',
**kwargs):
"""Initialize the ClassNet.
Args:
num_classes: number of classes.
num_anchors: number of anchors.
num_filters: number of filters for "intermediate" layers.
min_level: minimum level for features.
max_level: maximum level for features.
is_training_bn: True if we train the BatchNorm.
act_type: String of the activation used.
repeats: number of intermediate layers.
separable_conv: True to use separable_conv instead of conv2D.
survival_prob: if a value is set then drop connect will be used.
data_format: string of 'channel_first' or 'channels_last'.
name: the name of this layerl.
**kwargs: other parameters.
"""
super().__init__(name=name, **kwargs)
self.num_classes = num_classes
self.num_anchors = num_anchors
self.num_filters = num_filters
self.min_level = min_level
self.max_level = max_level
self.repeats = repeats
self.separable_conv = separable_conv
self.is_training_bn = is_training_bn
self.survival_prob = survival_prob
self.act_type = act_type
self.data_format = data_format
self.conv_ops = []
self.bns = []
if separable_conv:
conv2d_layer = functools.partial(
tf.keras.layers.SeparableConv2D,
depth_multiplier=1,
data_format=data_format,
pointwise_initializer=tf.initializers.VarianceScaling(),
depthwise_initializer=tf.initializers.VarianceScaling())
else:
conv2d_layer = functools.partial(
tf.keras.layers.Conv2D,
data_format=data_format,
kernel_initializer=tf.random_normal_initializer(stddev=0.01))
for i in range(self.repeats):
# If using SeparableConv2D
self.conv_ops.append(
conv2d_layer(
self.num_filters,
kernel_size=3,
bias_initializer=tf.zeros_initializer(),
activation=None,
padding='same',
name='class-%d' % i))
bn_per_level = []
for level in range(self.min_level, self.max_level + 1):
bn_per_level.append(
util_keras.build_batch_norm(
is_training_bn=self.is_training_bn,
data_format=self.data_format,
name='class-%d-bn-%d' % (i, level),
))
self.bns.append(bn_per_level)
self.classes = conv2d_layer(
num_classes * num_anchors,
kernel_size=3,
bias_initializer=tf.constant_initializer(-np.log((1 - 0.01) / 0.01)),
padding='same',
name='class-predict')
def call(self, inputs, training, **kwargs):
"""Call ClassNet."""
class_outputs = []
for level_id in range(0, self.max_level - self.min_level + 1):
image = inputs[level_id]
for i in range(self.repeats):
original_image = image
image = self.conv_ops[i](image)
image = self.bns[i][level_id](image, training=training)
if self.act_type:
image = activation_builder.activation_fn(image, self.act_type)
if i > 0 and self.survival_prob:
image = model_utils.drop_connect(image, training, self.survival_prob)
image = image + original_image
class_outputs.append(self.classes(image))
return class_outputs
class BoxNet(tf.keras.layers.Layer):
"""Box regression network."""
def __init__(self,
num_anchors=9,
num_filters=32,
min_level=3,
max_level=7,
is_training_bn=False,
act_type='swish',
repeats=4,
separable_conv=True,
survival_prob=None,
data_format='channels_last',
name='box_net',
**kwargs):
"""Initialize BoxNet.
Args:
num_anchors: number of anchors used.
num_filters: number of filters for "intermediate" layers.
min_level: minimum level for features.
max_level: maximum level for features.
is_training_bn: True if we train the BatchNorm.
act_type: String of the activation used.
repeats: number of "intermediate" layers.
separable_conv: True to use separable_conv instead of conv2D.
survival_prob: if a value is set then drop connect will be used.
data_format: string of 'channel_first' or 'channels_last'.
name: Name of the layer.
**kwargs: other parameters.
"""
super().__init__(name=name, **kwargs)
self.num_anchors = num_anchors
self.num_filters = num_filters
self.min_level = min_level
self.max_level = max_level
self.repeats = repeats
self.separable_conv = separable_conv
self.is_training_bn = is_training_bn
self.survival_prob = survival_prob
self.act_type = act_type
self.data_format = data_format
self.conv_ops = []
self.bns = []
for i in range(self.repeats):
# If using SeparableConv2D
if self.separable_conv:
self.conv_ops.append(
tf.keras.layers.SeparableConv2D(
filters=self.num_filters,
depth_multiplier=1,
pointwise_initializer=tf.initializers.VarianceScaling(),
depthwise_initializer=tf.initializers.VarianceScaling(),
data_format=self.data_format,
kernel_size=3,
activation=None,
bias_initializer=tf.zeros_initializer(),
padding='same',
name='box-%d' % i))
# If using Conv2d
else:
self.conv_ops.append(
tf.keras.layers.Conv2D(
filters=self.num_filters,
kernel_initializer=tf.random_normal_initializer(stddev=0.01),
data_format=self.data_format,
kernel_size=3,
activation=None,
bias_initializer=tf.zeros_initializer(),
padding='same',
name='box-%d' % i))
bn_per_level = []
for level in range(self.min_level, self.max_level + 1):
bn_per_level.append(
util_keras.build_batch_norm(
is_training_bn=self.is_training_bn,
data_format=self.data_format,
name='box-%d-bn-%d' % (i, level)))
self.bns.append(bn_per_level)
if self.separable_conv:
self.boxes = tf.keras.layers.SeparableConv2D(
filters=4 * self.num_anchors,
depth_multiplier=1,
pointwise_initializer=tf.initializers.VarianceScaling(),
depthwise_initializer=tf.initializers.VarianceScaling(),
data_format=self.data_format,
kernel_size=3,
activation=None,
bias_initializer=tf.zeros_initializer(),
padding='same',
name='box-predict')
else:
self.boxes = tf.keras.layers.Conv2D(
filters=4 * self.num_anchors,
kernel_initializer=tf.random_normal_initializer(stddev=0.01),
data_format=self.data_format,
kernel_size=3,
activation=None,
bias_initializer=tf.zeros_initializer(),
padding='same',
name='box-predict')
def call(self, inputs, training):
"""Call boxnet."""
box_outputs = []
for level_id in range(0, self.max_level - self.min_level + 1):
image = inputs[level_id]
for i in range(self.repeats):
original_image = image
image = self.conv_ops[i](image)
image = self.bns[i][level_id](image, training=training)
if self.act_type:
image = activation_builder.activation_fn(image, self.act_type)
if i > 0 and self.survival_prob:
image = model_utils.drop_connect(image, training, self.survival_prob)
image = image + original_image
box_outputs.append(self.boxes(image))
return box_outputs
class SegmentationHead(tf.keras.layers.Layer):
"""Keras layer for semantic segmentation head."""
def __init__(self,
num_classes,
num_filters,
min_level,
max_level,
data_format,
is_training_bn,
act_type,
**kwargs):
"""Initialize SegmentationHead.
Args:
num_classes: number of classes.
num_filters: number of filters for "intermediate" layers.
min_level: minimum level for features.
max_level: maximum level for features.
data_format: string of 'channel_first' or 'channels_last'.
is_training_bn: True if we train the BatchNorm.
act_type: String of the activation used.
**kwargs: other parameters.
"""
super().__init__(**kwargs)
self.act_type = act_type
self.con2d_ts = []
self.con2d_t_bns = []
for _ in range(max_level - min_level):
self.con2d_ts.append(
tf.keras.layers.Conv2DTranspose(
num_filters,
3,
strides=2,
padding='same',
data_format=data_format,
use_bias=False))
self.con2d_t_bns.append(
util_keras.build_batch_norm(
is_training_bn=is_training_bn,
data_format=data_format,
name='bn'))
self.head_transpose = tf.keras.layers.Conv2DTranspose(
num_classes, 3, strides=2, padding='same')
def call(self, feats, training):
x = feats[-1]
skips = list(reversed(feats[:-1]))
for con2d_t, con2d_t_bn, skip in zip(self.con2d_ts, self.con2d_t_bns,
skips):
x = con2d_t(x)
x = con2d_t_bn(x, training)
x = activation_builder.activation_fn(x, self.act_type)
x = tf.concat([x, skip], axis=-1)
# This is the last layer of the model
return self.head_transpose(x) # 64x64 -> 128x128
class FPNCells(tf.keras.layers.Layer):
"""FPN cells."""
def __init__(self, config, name='fpn_cells'):
super().__init__(name=name)
self.config = config
if config.fpn_config:
self.fpn_config = config.fpn_config
else:
self.fpn_config = fpn_configs.get_fpn_config(config.fpn_name,
config.min_level,
config.max_level,
config.fpn_weight_method)
self.cells = [
FPNCell(self.config, name='cell_%d' % rep)
for rep in range(self.config.fpn_cell_repeats)
]
def call(self, feats, training):
for cell in self.cells:
cell_feats = cell(feats, training)
min_level = self.config.min_level
max_level = self.config.max_level
feats = []
for level in range(min_level, max_level + 1):
for i, fnode in enumerate(reversed(self.fpn_config.nodes)):
if fnode['feat_level'] == level:
feats.append(cell_feats[-1 - i])
break
return feats
class FPNCell(tf.keras.layers.Layer):
"""A single FPN cell."""
def __init__(self, config, name='fpn_cell'):
super().__init__(name=name)
self.config = config
if config.fpn_config:
self.fpn_config = config.fpn_config
else:
self.fpn_config = fpn_configs.get_fpn_config(config.fpn_name,
config.min_level,
config.max_level,
config.fpn_weight_method)
self.fnodes = []
for i, fnode_cfg in enumerate(self.fpn_config.nodes):
logging.info('fnode %d : %s', i, fnode_cfg)
fnode = FNode(
fnode_cfg['feat_level'] - self.config.min_level,
fnode_cfg['inputs_offsets'],
config.fpn_num_filters,
config.apply_bn_for_resampling,
config.is_training_bn,
config.conv_after_downsample,
config.conv_bn_act_pattern,
config.separable_conv,
config.act_type,
weight_method=self.fpn_config.weight_method,
data_format=config.data_format,
name='fnode%d' % i)
self.fnodes.append(fnode)
def call(self, feats, training):
for fnode in self.fnodes:
feats = fnode(feats, training)
return feats
class EfficientDetNet(tf.keras.Model):
"""EfficientDet keras network without pre/post-processing."""
def __init__(self, model_name=None, config=None, name=''):
"""Initialize model."""
super().__init__(name=name)
config = config or hparams_config.get_efficientdet_config(model_name)
self.config = config
# Backbone.
backbone_name = config.backbone_name
is_training_bn = config.is_training_bn
if 'efficientnet' in backbone_name:
override_params = {
'batch_norm':
normalization_builder.batch_norm_class(is_training_bn),
'relu_fn':
functools.partial(activation_builder.activation_fn, act_type=config.act_type),
'weight_decay': config.weight_decay,
'data_format': config.data_format,
'activation': config.act_type,
}
if 'b0' in backbone_name:
override_params['survival_prob'] = 0.0
override_params['data_format'] = config.data_format
self.backbone = efficientnet_model.EfficientNet().from_name(
model_name=backbone_name, features_only=True, model_weights_path=config.backbone_init,
weights_format='saved_model', overrides=override_params)
# Feature network.
self.resample_layers = [] # additional resampling layers.
for level in range(6, config.max_level + 1):
# Adds a coarser level by downsampling the last feature map.
self.resample_layers.append(
ResampleFeatureMap(
feat_level=(level - config.min_level),
target_num_channels=config.fpn_num_filters,
apply_bn=config.apply_bn_for_resampling,
is_training_bn=config.is_training_bn,
conv_after_downsample=config.conv_after_downsample,
data_format=config.data_format,
name='resample_p%d' % level,
))
self.fpn_cells = FPNCells(config)
# class/box output prediction network.
num_anchors = len(config.aspect_ratios) * config.num_scales
num_filters = config.fpn_num_filters
for head in config.heads:
if head == 'object_detection':
self.class_net = ClassNet(
num_classes=config.num_classes,
num_anchors=num_anchors,
num_filters=num_filters,
min_level=config.min_level,
max_level=config.max_level,
is_training_bn=config.is_training_bn,
act_type=config.act_type,
repeats=config.box_class_repeats,
separable_conv=config.separable_conv,
survival_prob=config.survival_prob,
data_format=config.data_format)
self.box_net = BoxNet(
num_anchors=num_anchors,
num_filters=num_filters,
min_level=config.min_level,
max_level=config.max_level,
is_training_bn=config.is_training_bn,
act_type=config.act_type,
repeats=config.box_class_repeats,
separable_conv=config.separable_conv,
survival_prob=config.survival_prob,
data_format=config.data_format)
if head == 'segmentation':
self.seg_head = SegmentationHead(
num_classes=config.seg_num_classes,
num_filters=num_filters,
min_level=config.min_level,
max_level=config.max_level,
is_training_bn=config.is_training_bn,
act_type=config.act_type,
data_format=config.data_format)
def _init_set_name(self, name, zero_based=True):
"""A hack to allow empty model name for legacy checkpoint compitability."""
if name == '': # pylint: disable=g-explicit-bool-comparison
self._name = name
else:
self._name = super().__init__(name, zero_based)
def call(self, inputs, training):
config = self.config
# call backbone network.
all_feats = self.backbone(inputs, training=training)
feats = all_feats[config.min_level:config.max_level + 1]
# Build additional input features that are not from backbone.
for resample_layer in self.resample_layers:
feats.append(resample_layer(feats[-1], training, None))
# call feature network.
fpn_feats = self.fpn_cells(feats, training)
# call class/box/seg output network.
outputs = []
if 'object_detection' in config.heads:
class_outputs = self.class_net(fpn_feats, training)
box_outputs = self.box_net(fpn_feats, training)
outputs.extend([class_outputs, box_outputs])
if 'segmentation' in config.heads:
seg_outputs = self.seg_head(fpn_feats, training)
outputs.append(seg_outputs)
return tuple(outputs)
class EfficientDetModel(EfficientDetNet):
"""EfficientDet full keras model with pre and post processing."""
def _preprocessing(self, raw_images, image_size, mode=None):
"""Preprocess images before feeding to the network."""
if not mode:
return raw_images, None
image_size = model_utils.parse_image_size(image_size)
if mode != 'infer':
# We only support inference for now.
raise ValueError('preprocessing must be infer or empty')
def map_fn(image):
input_processor = dataloader.DetectionInputProcessor(
image, image_size)
input_processor.normalize_image()
input_processor.set_scale_factors_to_output_size()
image = input_processor.resize_and_crop_image()
image_scale = input_processor.image_scale_to_original
return image, image_scale
if raw_images.shape.as_list()[0]: # fixed batch size.
batch_size = raw_images.shape.as_list()[0]
outputs = [map_fn(raw_images[i]) for i in range(batch_size)]
return [tf.stack(y) for y in zip(*outputs)]
# otherwise treat it as dynamic batch size.
return tf.vectorized_map(map_fn, raw_images)
def _postprocess(self, cls_outputs, box_outputs, scales, mode='global'):
"""Postprocess class and box predictions."""
if not mode:
return cls_outputs, box_outputs
# TODO(tanmingxing): remove this cast once FP16 works postprocessing.
cls_outputs = [tf.cast(i, tf.float32) for i in cls_outputs]
box_outputs = [tf.cast(i, tf.float32) for i in box_outputs]
if mode == 'global':
return postprocess.postprocess_global(self.config.as_dict(), cls_outputs,
box_outputs, scales)
if mode == 'per_class':
return postprocess.postprocess_per_class(self.config.as_dict(),
cls_outputs, box_outputs, scales)
raise ValueError('Unsupported postprocess mode {}'.format(mode))
def call(self, inputs, training=False, pre_mode='infer', post_mode='global'):
"""Call this model.
Args:
inputs: a tensor with common shape [batch, height, width, channels].
training: If true, it is training mode. Otherwise, eval mode.
pre_mode: preprocessing mode, must be {None, 'infer'}.
post_mode: postprrocessing mode, must be {None, 'global', 'per_class'}.
Returns:
the output tensor list.
"""
config = self.config
# preprocess.
inputs, scales = self._preprocessing(inputs, config.image_size, pre_mode)
# network.
outputs = super().call(inputs, training)
if 'object_detection' in config.heads and post_mode:
# postprocess for detection
det_outputs = self._postprocess(outputs[0], outputs[1], scales, post_mode)
outputs = det_outputs + outputs[2:]
return outputs
|
CUDA-Optimized/FastSpeech/fastspeech/dataset | dataset | __init__ | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
PyTorch/Forecasting/TFT/triton/deployment_toolkit/perf_analyzer | perf_analyzer | __init__ | # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pathlib
# method from PEP-366 to support relative import in executed modules
if __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from .perf_analyzer import PerfAnalyzer # noqa: F401
from .perf_config import PerfAnalyzerConfig # noqa: F401
|
TensorFlow/Detection/SSD/models/research/object_detection/builders | builders | dataset_builder | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
#
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""tf.data.Dataset builder.
Creates data sources for DetectionModels from an InputReader config. See
input_reader.proto for options.
Note: If users wishes to also use their own InputReaders with the Object
Detection configuration framework, they should define their own builder function
that wraps the build function.
"""
import functools
import tensorflow as tf
import horovod.tensorflow as hvd
from object_detection.data_decoders import tf_example_decoder
from object_detection.protos import input_reader_pb2
def make_initializable_iterator(dataset):
"""Creates an iterator, and initializes tables.
This is useful in cases where make_one_shot_iterator wouldn't work because
the graph contains a hash table that needs to be initialized.
Args:
dataset: A `tf.data.Dataset` object.
Returns:
A `tf.data.Iterator`.
"""
iterator = dataset.make_initializable_iterator()
tf.add_to_collection(tf.GraphKeys.TABLE_INITIALIZERS, iterator.initializer)
return iterator
def read_dataset(file_read_func, input_files, config):
"""Reads a dataset, and handles repetition and shuffling.
Args:
file_read_func: Function to use in tf.contrib.data.parallel_interleave, to
read every individual file into a tf.data.Dataset.
input_files: A list of file paths to read.
config: A input_reader_builder.InputReader object.
Returns:
A tf.data.Dataset of (undecoded) tf-records based on config.
"""
# Shard, shuffle, and read files.
filenames = tf.gfile.Glob(input_files)
if not filenames:
raise ValueError('Invalid input path specified in '
'`input_reader_config`.')
num_readers = config.num_readers
if num_readers > len(filenames):
num_readers = len(filenames)
tf.logging.warning('num_readers has been reduced to %d to match input file '
'shards.' % num_readers)
filename_dataset = tf.data.Dataset.from_tensor_slices(filenames)
if config.shuffle:
filename_dataset = filename_dataset.shuffle(
config.filenames_shuffle_buffer_size)
elif num_readers > 1:
tf.logging.warning('`shuffle` is false, but the input data stream is '
'still slightly shuffled since `num_readers` > 1.')
filename_dataset = filename_dataset.repeat(config.num_epochs or None)
records_dataset = filename_dataset.apply(
tf.contrib.data.parallel_interleave(
file_read_func,
cycle_length=num_readers,
block_length=config.read_block_length,
sloppy=config.shuffle))
if config.shuffle:
records_dataset = records_dataset.shuffle(config.shuffle_buffer_size)
return records_dataset
def build(input_reader_config, batch_size=None, transform_input_data_fn=None, multi_gpu=True):
"""Builds a tf.data.Dataset.
Builds a tf.data.Dataset by applying the `transform_input_data_fn` on all
records. Applies a padded batch to the resulting dataset.
Args:
input_reader_config: A input_reader_pb2.InputReader object.
batch_size: Batch size. If batch size is None, no batching is performed.
transform_input_data_fn: Function to apply transformation to all records,
or None if no extra decoding is required.
Returns:
A tf.data.Dataset based on the input_reader_config.
Raises:
ValueError: On invalid input reader proto.
ValueError: If no input paths are specified.
"""
if not isinstance(input_reader_config, input_reader_pb2.InputReader):
raise ValueError('input_reader_config not of type '
'input_reader_pb2.InputReader.')
if input_reader_config.WhichOneof('input_reader') == 'tf_record_input_reader':
config = input_reader_config.tf_record_input_reader
if not config.input_path:
raise ValueError('At least one input path must be specified in '
'`input_reader_config`.')
label_map_proto_file = None
if input_reader_config.HasField('label_map_path'):
label_map_proto_file = input_reader_config.label_map_path
decoder = tf_example_decoder.TfExampleDecoder(
load_instance_masks=input_reader_config.load_instance_masks,
instance_mask_type=input_reader_config.mask_type,
label_map_proto_file=label_map_proto_file,
use_display_name=input_reader_config.use_display_name,
num_additional_channels=input_reader_config.num_additional_channels)
def process_fn(value):
"""Sets up tf graph that decodes, transforms and pads input data."""
processed_tensors = decoder.decode(value)
if transform_input_data_fn is not None:
processed_tensors = transform_input_data_fn(processed_tensors)
return processed_tensors
dataset = read_dataset(
functools.partial(tf.data.TFRecordDataset, buffer_size=8 * 1000 * 1000),
config.input_path[:], input_reader_config)
if multi_gpu:
dataset = dataset.shard(hvd.size(), hvd.rank())
# TODO(rathodv): make batch size a required argument once the old binaries
# are deleted.
if batch_size:
num_parallel_calls = batch_size * input_reader_config.num_parallel_batches
else:
num_parallel_calls = input_reader_config.num_parallel_map_calls
dataset = dataset.map(
process_fn,
num_parallel_calls=num_parallel_calls)
if batch_size:
dataset = dataset.apply(
tf.contrib.data.batch_and_drop_remainder(batch_size))
dataset = dataset.prefetch(input_reader_config.num_prefetch_batches)
return dataset
raise ValueError('Unsupported input_reader_config.')
|
TensorFlow2/Recommendation/WideAndDeep/triton/runner/maintainer | maintainer | __init__ | # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .container import Container # noqa: F401
from .docker.maintainer import DockerMaintainer # noqa: F401
from .maintainer import Maintainer # noqa: F401
|
PyTorch/DrugDiscovery/MoFlow/moflow/runtime | runtime | logger | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABC, abstractmethod
import logging
import time
import dllogger
from dllogger import JSONStreamBackend, StdOutBackend, Verbosity
import numpy as np
LOGGING_LEVELS = dict(enumerate([logging.ERROR, logging.WARNING, logging.INFO, logging.DEBUG]))
def get_dllogger(args):
backends = []
if args.local_rank == 0:
backends.append(StdOutBackend(Verbosity.VERBOSE))
if args.log_path is not None:
backends.append(JSONStreamBackend(Verbosity.VERBOSE, args.log_path, append=True))
dllogger.init(backends=backends)
return dllogger
def setup_logging(args):
logging.basicConfig(
format='%(asctime)s %(levelname)s:\t%(message)s', datefmt='%H:%M:%S', level=LOGGING_LEVELS[args.verbosity], force=True
)
return get_dllogger(args)
class BaseLogger(ABC):
@abstractmethod
def update(self, **kwargs) -> None:
pass
@abstractmethod
def process_stats(self) -> dict:
return {}
@abstractmethod
def reset(self) -> None:
pass
def summarize(self, step: tuple) -> None:
stats = self.process_stats()
if len(stats) == 0:
logging.warn('Empty stats for logging, skipping')
return
self.logger.log(step=step, data=stats)
self.logger.flush()
class PerformanceLogger(BaseLogger):
def __init__(self, logger, batch_size: int, warmup_steps: int = 100, mode: str = 'train'):
self.logger = logger
self.batch_size = batch_size
self.warmup_steps = warmup_steps
self._step = 0
self._timestamps = []
self.mode = mode
def update(self, **kwargs) -> None:
self._step += 1
if self._step >= self.warmup_steps:
self._timestamps.append(time.time())
def reset(self) -> None:
self._step = 0
self._timestamps = []
def process_stats(self) -> dict:
if len(self._timestamps) < 2:
logging.warn('Cannot process performance stats - less than 2 measurements collected')
return {}
timestamps = np.asarray(self._timestamps)
deltas = np.diff(timestamps)
throughput = (self.batch_size / deltas).mean()
stats = {
f'throughput_{self.mode}': throughput,
f'latency_{self.mode}_mean': deltas.mean(),
f'total_time_{self.mode}': timestamps[-1] - timestamps[0],
}
for level in [90, 95, 99]:
stats.update({f'latency_{self.mode}_{level}': np.percentile(deltas, level)})
return stats
class MetricsLogger(BaseLogger):
def __init__(self, logger, mode: str = 'train'):
self.logger = logger
self.mode = mode
self._metrics_dict = {}
def update(self, metrics: dict, **kwargs) -> None:
for metrics_name, metric_val in metrics.items():
if metrics_name not in self._metrics_dict:
self._metrics_dict[metrics_name] = []
self._metrics_dict[metrics_name].append(float(metric_val))
def reset(self) -> None:
self._metrics_dict = {}
def process_stats(self) -> dict:
stats = {}
for metric_name, metric_val in self._metrics_dict.items():
stats[metric_name] = np.mean(metric_val)
return stats
|
TensorFlow/Detection/SSD/examples | examples | SSD320_FP16_8GPU_BENCHMARK | # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
CKPT_DIR=${1:-"/results/SSD320_FP16_8GPU"}
PIPELINE_CONFIG_PATH=${2:-"/workdir/models/research/configs"}"/ssd320_bench.config"
GPUS=8
TENSOR_OPS=0
export TF_ENABLE_CUBLAS_TENSOR_OP_MATH_FP32=${TENSOR_OPS}
export TF_ENABLE_CUDNN_TENSOR_OP_MATH_FP32=${TENSOR_OPS}
export TF_ENABLE_CUDNN_RNN_TENSOR_OP_MATH_FP32=${TENSOR_OPS}
TRAIN_LOG=$(mpirun --allow-run-as-root \
-np $GPUS \
-H localhost:$GPUS \
-bind-to none \
-map-by slot \
-x NCCL_DEBUG=INFO \
-x LD_LIBRARY_PATH \
-x PATH \
-mca pml ob1 \
-mca btl ^openib \
python -u ./object_detection/model_main.py \
--pipeline_config_path=${PIPELINE_CONFIG_PATH} \
--model_dir=${CKPT_DIR} \
--alsologtostder \
--amp \
"${@:3}" 2>&1)
PERF=$(echo "$TRAIN_LOG" | sed -n 's|.*global_step/sec: \(\S\+\).*|\1|p' | python -c "import sys; x = sys.stdin.readlines(); x = [float(a) for a in x[int(len(x)*3/4):]]; print(32*$GPUS*sum(x)/len(x), 'img/s')")
mkdir -p $CKPT_DIR
echo "$GPUS GPUs mixed precision training performance: $PERF" | tee $CKPT_DIR/train_log
echo "$TRAIN_LOG" >> $CKPT_DIR/train_log
|
PyTorch/SpeechSynthesis/Tacotron2/trtis_cpp/src/trt/util | util | dropoutGenerator | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "dropoutGenerator.h"
#include "taco2Utils.h"
#include <stdexcept>
using namespace taco2;
namespace tts
{
/******************************************************************************
* CONSTANTS ******************************************************************
*****************************************************************************/
namespace
{
constexpr const int DROPOUT_BLOCK_SIZE = 256;
} // namespace
/******************************************************************************
* CUDA KERNELS ***************************************************************
*****************************************************************************/
__global__ void dropoutKernel(curandState_t* const states, const int numStates, float* const outValues,
const int numValues, const float dropProbability, const float scale)
{
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < numStates)
{
// load random state information from global memory
curandState_t localState = states[tid];
for (int index = tid; index < numValues; index += numStates)
{
outValues[index] = scale * (curand_uniform(&localState) < dropProbability);
}
// save random state information back to global memory
states[tid] = localState;
}
}
/******************************************************************************
* CONSTRUCTORS / DESTRUCTOR **************************************************
*****************************************************************************/
DropoutGenerator::DropoutGenerator(
const int maxBatchSize,
const int maxChunkSize,
const int numValues,
const float prob,
const unsigned int seed) :
mProb(prob),
mMaxChunkSize(maxChunkSize),
mNumValues(numValues),
mGeneratedChunks(0),
mBatchSize(0),
mDropoutDevice(maxBatchSize * maxChunkSize * numValues),
mRand(mNumValues, seed)
{
// do nothing
}
/******************************************************************************
* PUBLIC METHODS *************************************************************
*****************************************************************************/
void DropoutGenerator::reset(unsigned int seed, cudaStream_t stream)
{
mRand.setSeed(seed, stream);
}
void DropoutGenerator::generate(const int batchSize, const int numChunks, cudaStream_t stream)
{
if (numChunks > mMaxChunkSize)
{
throw std::runtime_error("Cannot generate more chunks than maximum: " + std::to_string(numChunks) + " vs. "
+ std::to_string(mMaxChunkSize));
}
const dim3 grid(
Taco2Utils::roundUpBlocks(mRand.size(), DROPOUT_BLOCK_SIZE));
const dim3 block(DROPOUT_BLOCK_SIZE);
const float scale = 1.0f / (1.0f - mProb);
assert(mRand.size() <= grid.x * block.x);
mBatchSize = batchSize;
mGeneratedChunks = numChunks;
dropoutKernel<<<grid, block, 0, stream>>>(
mRand.getRandomStates(),
mRand.size(),
mDropoutDevice.data(),
mGeneratedChunks * mNumValues * mBatchSize,
mProb,
scale);
}
const float* DropoutGenerator::get(const int chunk) const
{
if (chunk > mGeneratedChunks)
{
throw std::runtime_error("Cannot chunk past number generated: " + std::to_string(chunk) + " vs. "
+ std::to_string(mGeneratedChunks));
}
return mDropoutDevice.data() + chunk * mNumValues * mBatchSize;
}
} // namespace tts
|
PyTorch/Classification/GPUNet/triton/runner/maintainer/docker | docker | container | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import pathlib
import docker
from docker.models.containers import ExecResult
if __name__ == "__main__" and __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from ..container import Container
class DockerContainer(Container):
def __init__(self, name: str):
super().__init__(name)
self._container = None
self._docker_client = docker.from_env()
self._docker_api_client = docker.APIClient()
@abc.abstractmethod
def start(self):
"""
Start container
"""
pass
@abc.abstractmethod
def stop(self):
"""
Stop container
"""
@abc.abstractmethod
def run(self, command: str) -> ExecResult:
"""
Run command inside container
Args:
command: command to execute
Returns:
ExecResult
"""
pass
|
PyTorch/SpeechSynthesis/HiFiGAN/hifigan | hifigan | models | # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# MIT License
#
# Copyright (c) 2020 Jungil Kong
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# The following functions/classes were based on code from https://github.com/jik876/hifi-gan:
# ResBlock1, ResBlock2, Generator, DiscriminatorP, DiscriminatorS, MultiScaleDiscriminator,
# MultiPeriodDiscriminator, feature_loss, discriminator_loss, generator_loss,
# init_weights, get_padding
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import AvgPool1d, Conv1d, Conv2d, ConvTranspose1d
from torch.nn.utils import remove_weight_norm, spectral_norm, weight_norm
from common import filter_warnings
from common.stft import STFT
from common.utils import AttrDict, init_weights, get_padding
LRELU_SLOPE = 0.1
class NoAMPConv1d(Conv1d):
def __init__(self, *args, no_amp=False, **kwargs):
super().__init__(*args, **kwargs)
self.no_amp = no_amp
def _cast(self, x, dtype):
if isinstance(x, (list, tuple)):
return [self._cast(t, dtype) for t in x]
else:
return x.to(dtype)
def forward(self, *args):
if not self.no_amp:
return super().forward(*args)
with torch.cuda.amp.autocast(enabled=False):
return self._cast(
super().forward(*self._cast(args, torch.float)), args[0].dtype)
class ResBlock1(nn.Module):
__constants__ = ['lrelu_slope']
def __init__(self, conf, channels, kernel_size=3, dilation=(1, 3, 5)):
super().__init__()
self.conf = conf
self.lrelu_slope = LRELU_SLOPE
ch, ks = channels, kernel_size
self.convs1 = nn.Sequential(*[
weight_norm(Conv1d(ch, ch, ks, 1, get_padding(ks, dilation[0]), dilation[0])),
weight_norm(Conv1d(ch, ch, ks, 1, get_padding(ks, dilation[1]), dilation[1])),
weight_norm(Conv1d(ch, ch, ks, 1, get_padding(ks, dilation[2]), dilation[2])),
])
self.convs2 = nn.Sequential(*[
weight_norm(Conv1d(ch, ch, ks, 1, get_padding(ks, 1))),
weight_norm(Conv1d(ch, ch, ks, 1, get_padding(ks, 1))),
weight_norm(Conv1d(ch, ch, ks, 1, get_padding(ks, 1))),
])
self.convs1.apply(init_weights)
self.convs2.apply(init_weights)
def forward(self, x):
for c1, c2 in zip(self.convs1, self.convs2):
xt = F.leaky_relu(x, self.lrelu_slope)
xt = c1(xt)
xt = F.leaky_relu(xt, self.lrelu_slope)
xt = c2(xt)
x = xt + x
return x
def remove_weight_norm(self):
for l in self.convs1:
remove_weight_norm(l)
for l in self.convs2:
remove_weight_norm(l)
class ResBlock2(nn.Module):
__constants__ = ['lrelu_slope']
def __init__(self, conf, channels, kernel_size=3, dilation=(1, 3)):
super().__init__()
self.conf = conf
ch, ks = channels, kernel_size
self.convs = nn.ModuleList([
weight_norm(Conv1d(ch, ch, ks, 1, get_padding(kernel_size, dilation[0]), dilation[0])),
weight_norm(Conv1d(ch, ch, ks, 1, get_padding(kernel_size, dilation[1]), dilation[1])),
])
self.convs.apply(init_weights)
def forward(self, x):
for c in self.convs:
xt = F.leaky_relu(x, self.lrelu_slope)
xt = c(xt)
x = xt + x
return x
def remove_weight_norm(self):
for l in self.convs:
remove_weight_norm(l)
class Generator(nn.Module):
__constants__ = ['lrelu_slope', 'num_kernels', 'num_upsamples']
def __init__(self, conf):
super().__init__()
conf = AttrDict(conf)
self.conf = conf
self.num_kernels = len(conf.resblock_kernel_sizes)
self.num_upsamples = len(conf.upsample_rates)
self.conv_pre = weight_norm(
Conv1d(80, conf.upsample_initial_channel, 7, 1, padding=3))
self.lrelu_slope = LRELU_SLOPE
resblock = ResBlock1 if conf.resblock == '1' else ResBlock2
self.ups = []
for i, (u, k) in enumerate(zip(conf.upsample_rates,
conf.upsample_kernel_sizes)):
self.ups.append(weight_norm(
ConvTranspose1d(conf.upsample_initial_channel // (2 ** i),
conf.upsample_initial_channel // (2 ** (i + 1)),
k, u, padding=(k-u)//2)))
self.ups = nn.Sequential(*self.ups)
self.resblocks = []
for i in range(len(self.ups)):
resblock_list = []
ch = conf.upsample_initial_channel // (2 ** (i + 1))
for j, (k, d) in enumerate(zip(conf.resblock_kernel_sizes,
conf.resblock_dilation_sizes)):
resblock_list.append(resblock(conf, ch, k, d))
resblock_list = nn.Sequential(*resblock_list)
self.resblocks.append(resblock_list)
self.resblocks = nn.Sequential(*self.resblocks)
self.conv_post = weight_norm(Conv1d(ch, 1, 7, 1, padding=3))
self.ups.apply(init_weights)
self.conv_post.apply(init_weights)
def load_state_dict(self, state_dict, strict=True):
# Fallback for old checkpoints (pre-ONNX fix)
new_sd = {}
for k, v in state_dict.items():
new_k = k
if 'resblocks' in k:
parts = k.split(".")
# only do this is the checkpoint type is older
if len(parts) == 5:
layer = int(parts[1])
new_layer = f"{layer//3}.{layer%3}"
new_k = f"resblocks.{new_layer}.{'.'.join(parts[2:])}"
new_sd[new_k] = v
# Fix for conv1d/conv2d/NHWC
curr_sd = self.state_dict()
for key in new_sd:
len_diff = len(new_sd[key].size()) - len(curr_sd[key].size())
if len_diff == -1:
new_sd[key] = new_sd[key].unsqueeze(-1)
elif len_diff == 1:
new_sd[key] = new_sd[key].squeeze(-1)
super().load_state_dict(new_sd, strict=strict)
def forward(self, x):
x = self.conv_pre(x)
for upsample_layer, resblock_group in zip(self.ups, self.resblocks):
x = F.leaky_relu(x, self.lrelu_slope)
x = upsample_layer(x)
xs = 0
for resblock in resblock_group:
xs += resblock(x)
x = xs / self.num_kernels
x = F.leaky_relu(x)
x = self.conv_post(x)
x = torch.tanh(x)
return x
def remove_weight_norm(self):
print('HiFi-GAN: Removing weight norm.')
for l in self.ups:
remove_weight_norm(l)
for group in self.resblocks:
for block in group:
block.remove_weight_norm()
remove_weight_norm(self.conv_pre)
remove_weight_norm(self.conv_post)
class Denoiser(nn.Module):
""" Removes model bias from audio produced with hifigan """
def __init__(self, hifigan, filter_length=1024, n_overlap=4,
win_length=1024, mode='zeros', **infer_kw):
super().__init__()
w = next(p for name, p in hifigan.named_parameters()
if name.endswith('.weight'))
self.stft = STFT(filter_length=filter_length,
hop_length=int(filter_length/n_overlap),
win_length=win_length).to(w.device)
mel_init = {'zeros': torch.zeros, 'normal': torch.randn}[mode]
mel_input = mel_init((1, 80, 88), dtype=w.dtype, device=w.device)
with torch.no_grad():
bias_audio = hifigan(mel_input, **infer_kw).float()
if len(bias_audio.size()) > 2:
bias_audio = bias_audio.squeeze(0)
elif len(bias_audio.size()) < 2:
bias_audio = bias_audio.unsqueeze(0)
assert len(bias_audio.size()) == 2
bias_spec, _ = self.stft.transform(bias_audio)
self.register_buffer('bias_spec', bias_spec[:, :, 0][:, :, None])
def forward(self, audio, strength=0.1):
audio_spec, audio_angles = self.stft.transform(audio.float())
audio_spec_denoised = audio_spec - self.bias_spec * strength
audio_spec_denoised = torch.clamp(audio_spec_denoised, 0.0)
audio_denoised = self.stft.inverse(audio_spec_denoised, audio_angles)
return audio_denoised
class DiscriminatorP(nn.Module):
def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False):
super().__init__()
self.period = period
norm_f = spectral_norm if use_spectral_norm else weight_norm
ks = kernel_size
self.convs = nn.ModuleList([
norm_f(Conv2d(1, 32, (ks, 1), (stride, 1), (get_padding(5, 1), 0))),
norm_f(Conv2d(32, 128, (ks, 1), (stride, 1), (get_padding(5, 1), 0))),
norm_f(Conv2d(128, 512, (ks, 1), (stride, 1), (get_padding(5, 1), 0))),
norm_f(Conv2d(512, 1024, (ks, 1), (stride, 1), (get_padding(5, 1), 0))),
norm_f(Conv2d(1024, 1024, (ks, 1), 1, padding=(2, 0))),
])
self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0)))
def forward(self, x):
fmap = []
# 1d to 2d
b, c, t = x.shape
if t % self.period != 0: # pad first
n_pad = self.period - (t % self.period)
x = F.pad(x, (0, n_pad), "reflect")
t = t + n_pad
x = x.view(b, c, t // self.period, self.period)
for l in self.convs:
x = l(x)
x = F.leaky_relu(x, LRELU_SLOPE)
fmap.append(x)
x = self.conv_post(x)
fmap.append(x)
x = torch.flatten(x, 1, -1)
return x, fmap
def share_params_of(self, dp):
assert len(self.convs) == len(dp.convs)
for c1, c2 in zip(self.convs, dp.convs):
c1.weight = c2.weight
c1.bias = c2.bias
class MultiPeriodDiscriminator(nn.Module):
def __init__(self, periods, concat_fwd=False):
super().__init__()
layers = [DiscriminatorP(p) for p in periods]
self.discriminators = nn.ModuleList(layers)
self.concat_fwd = concat_fwd
def forward(self, y, y_hat):
y_d_rs = []
y_d_gs = []
fmap_rs = []
fmap_gs = []
for i, d in enumerate(self.discriminators):
if self.concat_fwd:
y_ds, fmaps = d(concat_discr_input(y, y_hat))
y_d_r, y_d_g, fmap_r, fmap_g = split_discr_output(y_ds, fmaps)
else:
y_d_r, fmap_r = d(y)
y_d_g, fmap_g = d(y_hat)
y_d_rs.append(y_d_r)
fmap_rs.append(fmap_r)
y_d_gs.append(y_d_g)
fmap_gs.append(fmap_g)
return y_d_rs, y_d_gs, fmap_rs, fmap_gs
class DiscriminatorS(nn.Module):
def __init__(self, use_spectral_norm=False, no_amp_grouped_conv=False):
super().__init__()
norm_f = spectral_norm if use_spectral_norm else weight_norm
self.convs = nn.ModuleList([
norm_f(Conv1d(1, 128, 15, 1, padding=7)),
norm_f(Conv1d(128, 128, 41, 2, groups=4, padding=20)),
norm_f(NoAMPConv1d(128, 256, 41, 2, groups=16, padding=20, no_amp=no_amp_grouped_conv)),
norm_f(NoAMPConv1d(256, 512, 41, 4, groups=16, padding=20, no_amp=no_amp_grouped_conv)),
norm_f(NoAMPConv1d(512, 1024, 41, 4, groups=16, padding=20, no_amp=no_amp_grouped_conv)),
norm_f(NoAMPConv1d(1024, 1024, 41, 1, groups=16, padding=20, no_amp=no_amp_grouped_conv)),
norm_f(Conv1d(1024, 1024, 5, 1, padding=2)),
])
self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1))
def forward(self, x):
fmap = []
for l in self.convs:
# x = l(x.unsqueeze(-1)).squeeze(-1)
x = l(x)
x = F.leaky_relu(x, LRELU_SLOPE)
fmap.append(x)
x = self.conv_post(x)
fmap.append(x)
x = torch.flatten(x, 1, -1)
return x, fmap
class MultiScaleDiscriminator(nn.Module):
def __init__(self, no_amp_grouped_conv=False, concat_fwd=False):
super().__init__()
self.discriminators = nn.ModuleList([
DiscriminatorS(use_spectral_norm=True, no_amp_grouped_conv=no_amp_grouped_conv),
DiscriminatorS(no_amp_grouped_conv=no_amp_grouped_conv),
DiscriminatorS(no_amp_grouped_conv=no_amp_grouped_conv),
])
self.meanpools = nn.ModuleList([
AvgPool1d(4, 2, padding=1),
AvgPool1d(4, 2, padding=1)
])
self.concat_fwd = concat_fwd
def forward(self, y, y_hat):
y_d_rs = []
y_d_gs = []
fmap_rs = []
fmap_gs = []
for i, d in enumerate(self.discriminators):
if self.concat_fwd:
ys = concat_discr_input(y, y_hat)
if i != 0:
ys = self.meanpools[i-1](ys)
y_ds, fmaps = d(ys)
y_d_r, y_d_g, fmap_r, fmap_g = split_discr_output(y_ds, fmaps)
else:
if i != 0:
y = self.meanpools[i-1](y)
y_hat = self.meanpools[i-1](y_hat)
y_d_r, fmap_r = d(y)
y_d_g, fmap_g = d(y_hat)
y_d_rs.append(y_d_r)
fmap_rs.append(fmap_r)
y_d_gs.append(y_d_g)
fmap_gs.append(fmap_g)
return y_d_rs, y_d_gs, fmap_rs, fmap_gs
def concat_discr_input(y, y_hat):
return torch.cat((y, y_hat), dim=0)
def split_discr_output(y_ds, fmaps):
y_d_r, y_d_g = torch.chunk(y_ds, 2, dim=0)
fmap_r, fmap_g = zip(*(torch.chunk(f, 2, dim=0) for f in fmaps))
return y_d_r, y_d_g, fmap_r, fmap_g
def feature_loss(fmap_r, fmap_g):
loss = 0
for dr, dg in zip(fmap_r, fmap_g):
for rl, gl in zip(dr, dg):
loss += torch.mean(torch.abs(rl - gl))
return loss*2
def discriminator_loss(disc_real_outputs, disc_generated_outputs):
loss = 0
for dr, dg in zip(disc_real_outputs, disc_generated_outputs):
r_loss = torch.mean((1-dr)**2)
g_loss = torch.mean(dg**2)
loss += (r_loss + g_loss)
return loss
def generator_loss(disc_outputs):
loss = 0
gen_losses = []
for dg in disc_outputs:
l = torch.mean((1-dg)**2)
gen_losses.append(l)
loss += l
return loss, gen_losses
|
PyTorch/Translation/Transformer/fairseq | fairseq | criterions | import torch.nn.functional as F
from torch.nn.modules.loss import _Loss
class CrossEntropyCriterion(_Loss):
def __init__(self, args):
super().__init__()
self.padding_idx = args.padding_idx
def forward(self, norm_probs, target, reduce=True):
"""Compute the loss for the given sample.
"""
lprobs = norm_probs.view(-1, norm_probs.size(-1))
target = target.view(-1)
loss = F.nll_loss(lprobs, target, size_average=False, ignore_index=self.padding_idx,
reduce=reduce)
return loss
class LabelSmoothedCrossEntropyCriterion(_Loss):
def __init__(self, args):
super().__init__()
self.eps = args.label_smoothing
self.padding_idx = args.padding_idx
def forward(self, norm_probs, target, reduce=True):
"""Compute the loss for the given sample.
"""
target = target.view(-1, 1)
lprobs = norm_probs.view(-1, norm_probs.size(-1))
non_pad_mask = target.ne(self.padding_idx)
nll_loss = -lprobs.gather(dim=-1, index=target)[non_pad_mask]
smooth_loss = -lprobs.sum(dim=-1, keepdim=True)[non_pad_mask]
if reduce:
nll_loss = nll_loss.sum()
smooth_loss = smooth_loss.sum()
eps_i = self.eps / lprobs.size(-1)
loss = (1. - self.eps) * nll_loss + eps_i * smooth_loss
return loss
CRITERION_REGISTRY = {
'label_smoothed_cross_entropy' : LabelSmoothedCrossEntropyCriterion,
'cross_entropy' : CrossEntropyCriterion,
}
|
PyTorch/LanguageModeling/BERT/triton/dist6l | dist6l | README | # Deploying the BERT model on Triton Inference Server
This folder contains instructions for deployment to run inference
on Triton Inference Server as well as a detailed performance analysis.
The purpose of this document is to help you with achieving
the best inference performance.
## Table of contents
- [Solution overview](#solution-overview)
- [Introduction](#introduction)
- [Deployment process](#deployment-process)
- [Setup](#setup)
- [Quick Start Guide](#quick-start-guide)
- [Performance](#performance)
- [Offline scenario](#offline-scenario)
- [Offline: NVIDIA A30, ONNX Runtime with FP16](#offline-nvidia-a30-onnx-runtime-with-fp16)
- [Offline: NVIDIA A30, ONNX Runtime with FP16, Backend accelerator TensorRT](#offline-nvidia-a30-onnx-runtime-with-fp16-backend-accelerator-tensorrt)
- [Offline: NVIDIA A30, NVIDIA TensorRT with FP16](#offline-nvidia-a30-nvidia-tensorrt-with-fp16)
- [Offline: NVIDIA A30, NVIDIA PyTorch with FP16](#offline-nvidia-a30-pytorch-with-fp16)
- [Offline: NVIDIA DGX-1 (1x V100 32GB), ONNX Runtime with FP16](#offline-nvidia-dgx-1-1x-v100-32gb-onnx-runtime-with-fp16)
- [Offline: NVIDIA DGX-1 (1x V100 32GB), ONNX Runtime with FP16, Backend accelerator TensorRT](#offline-nvidia-dgx-1-1x-v100-32gb-onnx-runtime-with-fp16-backend-accelerator-tensorrt)
- [Offline: NVIDIA DGX-1 (1x V100 32GB), NVIDIA TensorRT with FP16](#offline-nvidia-dgx-1-1x-v100-32gb-nvidia-tensorrt-with-fp16)
- [Offline: NVIDIA DGX-1 (1x V100 32GB), PyTorch with FP16](#offline-nvidia-dgx-1-1x-v100-32gb-pytorch-with-fp16)
- [Offline: NVIDIA DGX A100 (1x A100 80GB), ONNX Runtime with FP16](#offline-nvidia-dgx-a100-1x-a100-80gb-onnx-runtime-with-fp16)
- [Offline: NVIDIA DGX A100 (1x A100 80GB), ONNX Runtime with FP16, Backend accelerator TensorRT](#offline-nvidia-dgx-a100-1x-a100-80gb-onnx-runtime-with-fp16-backend-accelerator-tensorrt)
- [Offline: NVIDIA DGX A100 (1x A100 80GB), NVIDIA TensorRT with FP16](#offline-nvidia-dgx-a100-1x-a100-80gb-nvidia-tensorrt-with-fp16)
- [Offline: NVIDIA DGX A100 (1x A100 80GB), PyTorch with FP16](#offline-nvidia-dgx-a100-1x-a100-80gb-pytorch-with-fp16)
- [Offline: NVIDIA T4, ONNX Runtime with FP16](#offline-nvidia-t4-onnx-runtime-with-fp16)
- [Offline: NVIDIA T4, ONNX Runtime with FP16, Backend accelerator TensorRT](#offline-nvidia-t4-onnx-runtime-with-fp16-backend-accelerator-tensorrt)
- [Offline: NVIDIA T4, NVIDIA TensorRT with FP16](#offline-nvidia-t4-nvidia-tensorrt-with-fp16)
- [Offline: NVIDIA T4, PyTorch with FP16](#offline-nvidia-t4-pytorch-with-fp16)
- [Advanced](#advanced)
- [Prepare configuration](#prepare-configuration)
- [Step by step deployment process](#step-by-step-deployment-process)
- [Latency explanation](#latency-explanation)
- [Release notes](#release-notes)
- [Changelog](#changelog)
- [Known issues](#known-issues)
## Solution overview
### Introduction
The [NVIDIA Triton Inference Server](https://github.com/NVIDIA/triton-inference-server)
provides a datacenter and cloud inferencing solution optimized for NVIDIA GPUs.
The server provides an inference service via an HTTP or gRPC endpoint,
allowing remote clients to request inferencing for any number of GPU
or CPU models being managed by the server.
This README provides step-by-step deployment instructions for models generated
during training (as described in the [model README](../readme.md)).
Additionally, this README provides the corresponding deployment scripts that
ensure optimal GPU utilization during inferencing on Triton Inference Server.
### Deployment process
The deployment process consists of two steps:
1. Conversion.
The purpose of conversion is to find the best performing model
format supported by Triton Inference Server.
Triton Inference Server uses a number of runtime backends such as
[TensorRT](https://developer.nvidia.com/tensorrt),
[LibTorch](https://github.com/triton-inference-server/pytorch_backend) and
[ONNX Runtime](https://github.com/triton-inference-server/onnxruntime_backend)
to support various model types. Refer to the
[Triton documentation](https://github.com/triton-inference-server/backend#where-can-i-find-all-the-backends-that-are-available-for-triton)
for a list of available backends.
2. Configuration.
Model configuration on Triton Inference Server, which generates
necessary [configuration files](https://github.com/triton-inference-server/server/blob/master/docs/model_configuration.md).
After deployment Triton inference server is used for evaluation of converted model in two steps:
1. Accuracy tests.
Produce results which are tested against given accuracy thresholds.
2. Performance tests.
Produce latency and throughput results for offline (static batching)
and online (dynamic batching) scenarios.
All steps are executed by provided runner script. Refer to [Quick Start Guide](#quick-start-guide)
## Setup
Ensure you have the following components:
* [NVIDIA Docker](https://github.com/NVIDIA/nvidia-docker)
* [PyTorch NGC container 21.10](https://catalog.ngc.nvidia.com/orgs/nvidia/containers/pytorch)
* [Triton Inference Server NGC container 21.10](https://ngc.nvidia.com/catalog/containers/nvidia:tritonserver)
* [NVIDIA CUDA](https://docs.nvidia.com/cuda/archive//index.html)
* [NVIDIA Ampere](https://www.nvidia.com/en-us/data-center/nvidia-ampere-gpu-architecture/), [Volta](https://www.nvidia.com/en-us/data-center/volta-gpu-architecture/) or [Turing](https://www.nvidia.com/en-us/geforce/turing/) based GPU
## Quick Start Guide
Running the following scripts will build and launch the container with all required dependencies for native PyTorch as well as Triton Inference Server. This is necessary for running inference and can also be used for data download, processing, and training of the model.
1. Clone the repository.
```
git clone https://github.com/NVIDIA/DeepLearningExamples.git
cd DeepLearningExamples/PyTorch/LanguageModeling/BERT/
```
2. Build and run a container that extends NGC PyTorch with the Triton client libraries and necessary dependencies.
```
./triton/dist6l/scripts/docker/build.sh
./triton/dist6l/scripts/docker/interactive.sh
```
3. Prepare dataset.
Runner requires script downloading and preparing publicly available datasets to run the process.
Script will download necessary data to DeepLearningExamples/PyTorch/LanguageModeling/BERT/datasets catalog.
```
./triton/dist6l/runner/prepare_datasets.sh
```
4. Execute runner script (please mind, the run scripts are prepared per NVIDIA GPU).
```
NVIDIA A30: ./triton/dist6l/runner/start_NVIDIA-A30.sh
NVIDIA DGX-1 (1x V100 32GB): ./triton/dist6l/runner/start_NVIDIA-DGX-1-\(1x-V100-32GB\).sh
NVIDIA DGX A100 (1x A100 80GB): ./triton/dist6l/runner/start_NVIDIA-DGX-A100-\(1x-A100-80GB\).sh
NVIDIA T4: ./triton/dist6l/runner/start_NVIDIA-T4.sh
```
## Performance
The performance measurements in this document were conducted at the time of publication and may not reflect
the performance achieved from NVIDIA’s latest software release. For the most up-to-date performance measurements, go to
[NVIDIA Data Center Deep Learning Product Performance](https://developer.nvidia.com/deep-learning-performance-training-inference).
### Offline scenario
The offline scenario assumes the client and server are located on the same host. The tests uses:
- tensors are passed through shared memory between client and server, the Perf Analyzer flag `shared-memory=system` is used
- single request is send from client to server with static size of batch
#### Offline: NVIDIA A30, ONNX Runtime with FP16
Our results were obtained using the following configuration:
| Parameter Name | Parameter Value |
|:-----------------------------|:----------------|
| GPU | NVIDIA A30 |
| Backend | ONNX Runtime |
| Backend accelerator | - |
| Precision | FP16 |
| Model format | ONNX |
| Max batch size | 16 |
| Number of model instances | 1 |
| Accelerator Precision | - |
| Max Seq Length | 384 |
| SQuAD v1.1 F1 Score | 88.49 |
<summary>Results Table</summary>
| Batch | Concurrency | Inferences/Second | Client Send (ms) | Network+Server Send/Recv (ms) | Server Queue (ms) | Server Compute Input (ms) | Server Compute Infer (ms) | Server Compute Output (ms) | Client Recv (ms) | p50 latency (ms) | p90 latency (ms) | p95 latency (ms) | p99 latency (ms) | avg latency (ms) |
|--------:|--------------:|--------------------:|-------------------:|--------------------------------:|--------------------:|----------------------------:|----------------------------:|-----------------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|
| 1 | 1 | 437.0 | 0.0 | 0.2 | 0.0 | 0.1 | 2.0 | 0.0 | 0.0 | 2.3 | 2.3 | 2.3 | 2.3 | 2.3 |
| 8 | 1 | 712.0 | 0.0 | 0.4 | 0.0 | 0.1 | 10.7 | 0.0 | 0.0 | 11.2 | 11.3 | 11.3 | 11.4 | 11.2 |
| 16 | 1 | 744.0 | 0.0 | 0.5 | 0.1 | 0.1 | 20.9 | 0.0 | 0.0 | 21.4 | 21.7 | 21.8 | 21.9 | 21.4 |
#### Offline: NVIDIA A30, ONNX Runtime with FP16, Backend accelerator TensorRT
Our results were obtained using the following configuration:
| Parameter Name | Parameter Value |
|:-----------------------------|:-----------------------|
| GPU | NVIDIA A30 |
| Backend | ONNX Runtime |
| Backend accelerator | NVIDIA TensorRT |
| Precision | FP16 |
| Model format | ONNX |
| Max batch size | 16 |
| Number of model instances | 1 |
| Accelerator Precision | FP16 |
| Max Seq Length | 384 |
| SQuAD v1.1 F1 Score | 88.48 |
<summary>Results Table</summary>
| Batch | Concurrency | Inferences/Second | Client Send (ms) | Network+Server Send/Recv (ms) | Server Queue (ms) | Server Compute Input (ms) | Server Compute Infer (ms) | Server Compute Output (ms) | Client Recv (ms) | p50 latency (ms) | p90 latency (ms) | p95 latency (ms) | p99 latency (ms) | avg latency (ms) |
|--------:|--------------:|--------------------:|-------------------:|--------------------------------:|--------------------:|----------------------------:|----------------------------:|-----------------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|
| 1 | 1 | 698.0 | 0.0 | 0.2 | 0.0 | 0.1 | 1.2 | 0.0 | 0.0 | 1.4 | 1.4 | 1.4 | 1.5 | 1.4 |
| 8 | 1 | 1326.7 | 0.0 | 0.4 | 0.1 | 0.1 | 5.5 | 0.0 | 0.0 | 6.0 | 6.1 | 6.1 | 6.2 | 6.0 |
| 16 | 1 | 1454.5 | 0.0 | 0.4 | 0.0 | 0.1 | 10.4 | 0.0 | 0.0 | 10.9 | 11.0 | 11.0 | 11.1 | 10.9 |
#### Offline: NVIDIA A30, NVIDIA TensorRT with FP16
Our results were obtained using the following configuration:
| Parameter Name | Parameter Value |
|:-----------------------------|:----------------|
| GPU | NVIDIA A30 |
| Backend | NVIDIA TensorRT |
| Backend accelerator | - |
| Precision | FP16 |
| Model format | NVIDIA TensorRT |
| Max batch size | 16 |
| Number of model instances | 1 |
| NVIDIA TensorRT Capture CUDA Graph | Disabled |
| Accelerator Precision | - |
| Max Seq Length | 384 |
| SQuAD v1.1 F1 Score | 88.48 |
<summary>Results Table</summary>
| Batch | Concurrency | Inferences/Second | Client Send (ms) | Network+Server Send/Recv (ms) | Server Queue (ms) | Server Compute Input (ms) | Server Compute Infer (ms) | Server Compute Output (ms) | Client Recv (ms) | p50 latency (ms) | p90 latency (ms) | p95 latency (ms) | p99 latency (ms) | avg latency (ms) |
|--------:|--------------:|--------------------:|-------------------:|--------------------------------:|--------------------:|----------------------------:|----------------------------:|-----------------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|
| 1 | 1 | 665.0 | 0.0 | 0.2 | 0.0 | 0.1 | 1.2 | 0.0 | 0.0 | 1.5 | 1.5 | 1.5 | 1.6 | 1.5 |
| 8 | 1 | 1280.0 | 0.0 | 0.4 | 0.0 | 0.1 | 5.6 | 0.0 | 0.0 | 6.2 | 6.3 | 6.3 | 6.4 | 6.2 |
| 16 | 1 | 1408.0 | 0.0 | 0.5 | 0.1 | 0.2 | 10.6 | 0.0 | 0.0 | 11.3 | 11.4 | 11.5 | 11.5 | 11.3 |
#### Offline: NVIDIA A30, PyTorch with FP16
Our results were obtained using the following configuration:
| Parameter Name | Parameter Value |
|:-----------------------------|:------------------|
| GPU | NVIDIA A30 |
| Backend | PyTorch |
| Backend accelerator | - |
| Precision | FP16 |
| Model format | TorchScript Trace |
| Max batch size | 16 |
| Number of model instances | 1 |
| Accelerator Precision | - |
| Max Seq Length | 384 |
| SQuAD v1.1 F1 Score | 88.48 |
<summary>Results Table</summary>
| Batch | Concurrency | Inferences/Second | Client Send (ms) | Network+Server Send/Recv (ms) | Server Queue (ms) | Server Compute Input (ms) | Server Compute Infer (ms) | Server Compute Output (ms) | Client Recv (ms) | p50 latency (ms) | p90 latency (ms) | p95 latency (ms) | p99 latency (ms) | avg latency (ms) |
|--------:|--------------:|--------------------:|-------------------:|--------------------------------:|--------------------:|----------------------------:|----------------------------:|-----------------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|
| 1 | 1 | 417.0 | 0.0 | 0.2 | 0.0 | 0.0 | 2.0 | 0.0 | 0.0 | 2.4 | 2.4 | 2.4 | 2.4 | 2.4 |
| 8 | 1 | 832.0 | 0.0 | 0.4 | 0.1 | 0.1 | 2.5 | 6.5 | 0.0 | 9.6 | 9.7 | 9.7 | 9.7 | 9.6 |
| 16 | 1 | 864.0 | 0.0 | 0.4 | 0.1 | 0.1 | 2.1 | 15.7 | 0.0 | 18.4 | 18.4 | 18.4 | 18.5 | 18.4 |
#### Offline: NVIDIA DGX-1 (1x V100 32GB), ONNX Runtime with FP16
Our results were obtained using the following configuration:
| Parameter Name | Parameter Value |
|:-----------------------------|:----------------------------|
| GPU | NVIDIA DGX-1 (1x V100 32GB) |
| Backend | ONNX Runtime |
| Backend accelerator | - |
| Precision | FP16 |
| Model format | ONNX |
| Max batch size | 16 |
| Number of model instances | 1 |
| Accelerator Precision | - |
| Max Seq Length | 384 |
| SQuAD v1.1 F1 Score | 88.49 |
<summary>Results Table</summary>
| Batch | Concurrency | Inferences/Second | Client Send (ms) | Network+Server Send/Recv (ms) | Server Queue (ms) | Server Compute Input (ms) | Server Compute Infer (ms) | Server Compute Output (ms) | Client Recv (ms) | p50 latency (ms) | p90 latency (ms) | p95 latency (ms) | p99 latency (ms) | avg latency (ms) |
|--------:|--------------:|--------------------:|-------------------:|--------------------------------:|--------------------:|----------------------------:|----------------------------:|-----------------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|
| 1 | 1 | 324.0 | 0.0 | 0.4 | 0.1 | 0.1 | 2.5 | 0.0 | 0.0 | 3.0 | 3.2 | 3.3 | 3.3 | 3.1 |
| 8 | 1 | 712.0 | 0.0 | 0.3 | 0.0 | 0.1 | 10.8 | 0.0 | 0.0 | 11.2 | 11.2 | 11.3 | 11.3 | 11.2 |
| 16 | 1 | 752.0 | 0.0 | 0.3 | 0.1 | 0.1 | 20.7 | 0.0 | 0.0 | 21.2 | 21.5 | 21.5 | 21.5 | 21.2 |
#### Offline: NVIDIA DGX-1 (1x V100 32GB), ONNX Runtime with FP16, Backend accelerator TensorRT
Our results were obtained using the following configuration:
| Parameter Name | Parameter Value |
|:-----------------------------|:----------------------------|
| GPU | NVIDIA DGX-1 (1x V100 32GB) |
| Backend | ONNX Runtime |
| Backend accelerator | NVIDIA TensorRT |
| Precision | FP16 |
| Model format | ONNX |
| Max batch size | 16 |
| Number of model instances | 1 |
| Accelerator Precision | FP16 |
| Max Seq Length | 384 |
| SQuAD v1.1 F1 Score | 88.48 |
<summary>Results Table</summary>
| Batch | Concurrency | Inferences/Second | Client Send (ms) | Network+Server Send/Recv (ms) | Server Queue (ms) | Server Compute Input (ms) | Server Compute Infer (ms) | Server Compute Output (ms) | Client Recv (ms) | p50 latency (ms) | p90 latency (ms) | p95 latency (ms) | p99 latency (ms) | avg latency (ms) |
|--------:|--------------:|--------------------:|-------------------:|--------------------------------:|--------------------:|----------------------------:|----------------------------:|-----------------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|
| 1 | 1 | 466.0 | 0.0 | 0.3 | 0.1 | 0.1 | 1.6 | 0.0 | 0.0 | 2.1 | 2.3 | 2.4 | 2.4 | 2.1 |
| 8 | 1 | 1056.0 | 0.0 | 0.3 | 0.1 | 0.1 | 7.0 | 0.0 | 0.0 | 7.6 | 7.7 | 7.8 | 7.9 | 7.6 |
| 16 | 1 | 1200.0 | 0.0 | 0.2 | 0.0 | 0.1 | 13.0 | 0.0 | 0.0 | 13.3 | 13.3 | 13.4 | 13.4 | 13.3 |
#### Offline: NVIDIA DGX-1 (1x V100 32GB), NVIDIA TensorRT with FP16
Our results were obtained using the following configuration:
| Parameter Name | Parameter Value |
|:-----------------------------|:----------------------------|
| GPU | NVIDIA DGX-1 (1x V100 32GB) |
| Backend | NVIDIA TensorRT |
| Backend accelerator | - |
| Precision | FP16 |
| Model format | NVIDIA TensorRT |
| Max batch size | 16 |
| Number of model instances | 1 |
| NVIDIA TensorRT Capture CUDA Graph | Disabled |
| Accelerator Precision | - |
| Max Seq Length | 384 |
| SQuAD v1.1 F1 Score | 88.48 |
<summary>Results Table</summary>
| Batch | Concurrency | Inferences/Second | Client Send (ms) | Network+Server Send/Recv (ms) | Server Queue (ms) | Server Compute Input (ms) | Server Compute Infer (ms) | Server Compute Output (ms) | Client Recv (ms) | p50 latency (ms) | p90 latency (ms) | p95 latency (ms) | p99 latency (ms) | avg latency (ms) |
|--------:|--------------:|--------------------:|-------------------:|--------------------------------:|--------------------:|----------------------------:|----------------------------:|-----------------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|
| 1 | 1 | 492.0 | 0.0 | 0.2 | 0.0 | 0.1 | 1.6 | 0.0 | 0.0 | 2.0 | 2.1 | 2.1 | 2.2 | 2.0 |
| 8 | 1 | 1056.0 | 0.0 | 0.3 | 0.1 | 0.2 | 7.0 | 0.0 | 0.0 | 7.5 | 7.6 | 7.6 | 7.8 | 7.5 |
| 16 | 1 | 1152.0 | 0.0 | 0.4 | 0.1 | 0.2 | 13.1 | 0.0 | 0.0 | 13.7 | 13.8 | 13.8 | 13.8 | 13.7 |
#### Offline: NVIDIA DGX-1 (1x V100 32GB), PyTorch with FP16
Our results were obtained using the following configuration:
| Parameter Name | Parameter Value |
|:-----------------------------|:----------------------------|
| GPU | NVIDIA DGX-1 (1x V100 32GB) |
| Backend | PyTorch |
| Backend accelerator | - |
| Precision | FP16 |
| Model format | TorchScript Trace |
| Max batch size | 16 |
| Number of model instances | 1 |
| Accelerator Precision | - |
| Max Seq Length | 384 |
| SQuAD v1.1 F1 Score | 88.49 |
<summary>Results Table</summary>
| Batch | Concurrency | Inferences/Second | Client Send (ms) | Network+Server Send/Recv (ms) | Server Queue (ms) | Server Compute Input (ms) | Server Compute Infer (ms) | Server Compute Output (ms) | Client Recv (ms) | p50 latency (ms) | p90 latency (ms) | p95 latency (ms) | p99 latency (ms) | avg latency (ms) |
|--------:|--------------:|--------------------:|-------------------:|--------------------------------:|--------------------:|----------------------------:|----------------------------:|-----------------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|
| 1 | 1 | 227.8 | 0.0 | 0.3 | 0.1 | 0.1 | 3.8 | 0.0 | 0.0 | 4.3 | 4.6 | 4.6 | 4.7 | 4.4 |
| 8 | 1 | 872.0 | 0.0 | 0.3 | 0.1 | 0.1 | 3.7 | 5.0 | 0.0 | 9.1 | 9.3 | 9.3 | 9.3 | 9.1 |
| 16 | 1 | 944.0 | 0.0 | 0.3 | 0.1 | 0.1 | 3.5 | 12.8 | 0.0 | 16.7 | 16.9 | 17.0 | 17.1 | 16.7 |
#### Offline: NVIDIA DGX A100 (1x A100 80GB), ONNX Runtime with FP16
Our results were obtained using the following configuration:
| Parameter Name | Parameter Value |
|:-----------------------------|:-------------------------------|
| GPU | NVIDIA DGX A100 (1x A100 80GB) |
| Backend | ONNX Runtime |
| Backend accelerator | - |
| Precision | FP16 |
| Model format | ONNX |
| Max batch size | 16 |
| Number of model instances | 1 |
| Accelerator Precision | - |
| Max Seq Length | 384 |
| SQuAD v1.1 F1 Score | 88.50 |
<summary>Results Table</summary>
| Batch | Concurrency | Inferences/Second | Client Send (ms) | Network+Server Send/Recv (ms) | Server Queue (ms) | Server Compute Input (ms) | Server Compute Infer (ms) | Server Compute Output (ms) | Client Recv (ms) | p50 latency (ms) | p90 latency (ms) | p95 latency (ms) | p99 latency (ms) | avg latency (ms) |
|--------:|--------------:|--------------------:|-------------------:|--------------------------------:|--------------------:|----------------------------:|----------------------------:|-----------------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|
| 1 | 1 | 514.0 | 0.0 | 0.1 | 0.0 | 0.1 | 1.7 | 0.0 | 0.0 | 1.9 | 1.9 | 1.9 | 2.0 | 1.9 |
| 8 | 1 | 1360.0 | 0.0 | 0.1 | 0.0 | 0.1 | 5.6 | 0.0 | 0.0 | 5.8 | 5.8 | 5.9 | 7.3 | 5.9 |
| 16 | 1 | 1536.0 | 0.0 | 0.1 | 0.0 | 0.1 | 10.2 | 0.0 | 0.0 | 10.4 | 10.4 | 10.5 | 10.6 | 10.4 |
#### Offline: NVIDIA DGX A100 (1x A100 80GB), ONNX Runtime with FP16, Backend accelerator TensorRT
Our results were obtained using the following configuration:
| Parameter Name | Parameter Value |
|:-----------------------------|:-------------------------------|
| GPU | NVIDIA DGX A100 (1x A100 80GB) |
| Backend | ONNX Runtime |
| Backend accelerator | NVIDIA TensorRT |
| Precision | FP16 |
| Model format | ONNX |
| Max batch size | 16 |
| Number of model instances | 1 |
| Accelerator Precision | FP16 |
| Max Seq Length | 384 |
| SQuAD v1.1 F1 Score | 88.48 |
<summary>Results Table</summary>
| Batch | Concurrency | Inferences/Second | Client Send (ms) | Network+Server Send/Recv (ms) | Server Queue (ms) | Server Compute Input (ms) | Server Compute Infer (ms) | Server Compute Output (ms) | Client Recv (ms) | p50 latency (ms) | p90 latency (ms) | p95 latency (ms) | p99 latency (ms) | avg latency (ms) |
|--------:|--------------:|--------------------:|-------------------:|--------------------------------:|--------------------:|----------------------------:|----------------------------:|-----------------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|
| 1 | 1 | 998.0 | 0.0 | 0.1 | 0.0 | 0.1 | 0.8 | 0.0 | 0.0 | 1.0 | 1.0 | 1.0 | 1.1 | 1.0 |
| 8 | 1 | 2512.0 | 0.0 | 0.1 | 0.0 | 0.1 | 3.0 | 0.0 | 0.0 | 3.2 | 3.2 | 3.2 | 3.3 | 3.2 |
| 16 | 1 | 2880.0 | 0.0 | 0.1 | 0.0 | 0.1 | 5.3 | 0.0 | 0.0 | 5.5 | 5.5 | 5.6 | 5.6 | 5.5 |
#### Offline: NVIDIA DGX A100 (1x A100 80GB), NVIDIA TensorRT with FP16
Our results were obtained using the following configuration:
| Parameter Name | Parameter Value |
|:-----------------------------|:-------------------------------|
| GPU | NVIDIA DGX A100 (1x A100 80GB) |
| Backend | NVIDIA TensorRT |
| Backend accelerator | - |
| Precision | FP16 |
| Model format | NVIDIA TensorRT |
| Max batch size | 16 |
| Number of model instances | 1 |
| NVIDIA TensorRT Capture CUDA Graph | Disabled |
| Accelerator Precision | - |
| Max Seq Length | 384 |
| SQuAD v1.1 F1 Score | 88.48 |
<summary>Results Table</summary>
| Batch | Concurrency | Inferences/Second | Client Send (ms) | Network+Server Send/Recv (ms) | Server Queue (ms) | Server Compute Input (ms) | Server Compute Infer (ms) | Server Compute Output (ms) | Client Recv (ms) | p50 latency (ms) | p90 latency (ms) | p95 latency (ms) | p99 latency (ms) | avg latency (ms) |
|--------:|--------------:|--------------------:|-------------------:|--------------------------------:|--------------------:|----------------------------:|----------------------------:|-----------------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|
| 1 | 1 | 862.0 | 0.0 | 0.1 | 0.0 | 0.1 | 1.0 | 0.0 | 0.0 | 1.1 | 1.1 | 1.1 | 5.5 | 1.1 |
| 8 | 1 | 2312.0 | 0.0 | 0.1 | 0.0 | 0.1 | 3.2 | 0.0 | 0.0 | 3.3 | 3.4 | 4.4 | 6.2 | 3.5 |
| 16 | 1 | 2784.0 | 0.0 | 0.1 | 0.0 | 0.1 | 5.5 | 0.0 | 0.0 | 5.6 | 5.6 | 6.3 | 9.4 | 5.7 |
#### Offline: NVIDIA DGX A100 (1x A100 80GB), PyTorch with FP16
Our results were obtained using the following configuration:
| Parameter Name | Parameter Value |
|:-----------------------------|:-------------------------------|
| GPU | NVIDIA DGX A100 (1x A100 80GB) |
| Backend | PyTorch |
| Backend accelerator | - |
| Precision | FP16 |
| Model format | TorchScript Trace |
| Max batch size | 16 |
| Number of model instances | 1 |
| Accelerator Precision | - |
| Max Seq Length | 384 |
| SQuAD v1.1 F1 Score | 88.48 |
<summary>Results Table</summary>
| Batch | Concurrency | Inferences/Second | Client Send (ms) | Network+Server Send/Recv (ms) | Server Queue (ms) | Server Compute Input (ms) | Server Compute Infer (ms) | Server Compute Output (ms) | Client Recv (ms) | p50 latency (ms) | p90 latency (ms) | p95 latency (ms) | p99 latency (ms) | avg latency (ms) |
|--------:|--------------:|--------------------:|-------------------:|--------------------------------:|--------------------:|----------------------------:|----------------------------:|-----------------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|
| 1 | 1 | 358.0 | 0.0 | 0.1 | 0.0 | 0.0 | 2.6 | 0.0 | 0.0 | 2.8 | 2.8 | 2.8 | 2.9 | 2.8 |
| 8 | 1 | 1592.0 | 0.0 | 0.1 | 0.0 | 0.0 | 2.8 | 2.0 | 0.0 | 5.0 | 5.0 | 5.1 | 5.2 | 5.0 |
| 16 | 1 | 1776.0 | 0.0 | 0.1 | 0.0 | 0.1 | 2.7 | 6.0 | 0.0 | 8.9 | 9.0 | 9.1 | 9.1 | 8.9 |
#### Offline: NVIDIA T4, ONNX Runtime with FP16
Our results were obtained using the following configuration:
| Parameter Name | Parameter Value |
|:-----------------------------|:----------------|
| GPU | NVIDIA T4 |
| Backend | ONNX Runtime |
| Backend accelerator | - |
| Precision | FP16 |
| Model format | ONNX |
| Max batch size | 1 |
| Number of model instances | 1 |
| Accelerator Precision | - |
| Max Seq Length | 384 |
| SQuAD v1.1 F1 Score | 88.49 |
<summary>Results Table</summary>
| Batch | Concurrency | Inferences/Second | Client Send (ms) | Network+Server Send/Recv (ms) | Server Queue (ms) | Server Compute Input (ms) | Server Compute Infer (ms) | Server Compute Output (ms) | Client Recv (ms) | p50 latency (ms) | p90 latency (ms) | p95 latency (ms) | p99 latency (ms) | avg latency (ms) |
|--------:|--------------:|--------------------:|-------------------:|--------------------------------:|--------------------:|----------------------------:|----------------------------:|-----------------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|
| 1 | 1 | 219.8 | 0.0 | 0.5 | 0.1 | 0.0 | 3.9 | 0.0 | 0.0 | 4.5 | 4.6 | 4.6 | 4.6 | 4.5 |
| 8 | 1 | 256.0 | 0.0 | 0.5 | 0.1 | 0.1 | 30.2 | 0.0 | 0.0 | 30.9 | 31.1 | 31.1 | 31.2 | 30.9 |
| 16 | 1 | 259.9 | 0.0 | 0.4 | 0.1 | 0.1 | 60.1 | 0.0 | 0.0 | 60.6 | 61.3 | 61.3 | 61.6 | 60.7 |
#### Offline: NVIDIA T4, ONNX Runtime with FP16, Backend accelerator TensorRT
Our results were obtained using the following configuration:
| Parameter Name | Parameter Value |
|:-----------------------------|:-----------------------|
| GPU | NVIDIA T4 |
| Backend | ONNX Runtime |
| Backend accelerator | NVIDIA TensorRT |
| Precision | FP16 |
| Model format | ONNX |
| Max batch size | 16 |
| Number of model instances | 1 |
| Accelerator Precision | FP16 |
| Max Seq Length | 384 |
| SQuAD v1.1 F1 Score | 88.47 |
<summary>Results Table</summary>
| Batch | Concurrency | Inferences/Second | Client Send (ms) | Network+Server Send/Recv (ms) | Server Queue (ms) | Server Compute Input (ms) | Server Compute Infer (ms) | Server Compute Output (ms) | Client Recv (ms) | p50 latency (ms) | p90 latency (ms) | p95 latency (ms) | p99 latency (ms) | avg latency (ms) |
|--------:|--------------:|--------------------:|-------------------:|--------------------------------:|--------------------:|----------------------------:|----------------------------:|-----------------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|
| 1 | 1 | 362.0 | 0.0 | 0.5 | 0.1 | 0.0 | 2.2 | 0.0 | 0.0 | 2.7 | 2.8 | 2.8 | 2.8 | 2.7 |
| 8 | 1 | 440.0 | 0.0 | 0.5 | 0.1 | 0.1 | 17.2 | 0.0 | 0.0 | 17.6 | 19.1 | 19.3 | 19.5 | 17.9 |
| 16 | 1 | 456.0 | 0.0 | 0.5 | 0.1 | 0.1 | 34.3 | 0.0 | 0.0 | 35.0 | 36.2 | 36.4 | 36.7 | 35.0 |
#### Offline: NVIDIA T4, NVIDIA TensorRT with FP16
Our results were obtained using the following configuration:
| Parameter Name | Parameter Value |
|:-----------------------------|:----------------|
| GPU | NVIDIA T4 |
| Backend | NVIDIA TensorRT |
| Backend accelerator | - |
| Precision | FP16 |
| Model format | NVIDIA TensorRT |
| Max batch size | 16 |
| Number of model instances | 1 |
| NVIDIA TensorRT Capture CUDA Graph | Disabled |
| Accelerator Precision | - |
| Max Seq Length | 384 |
| SQuAD v1.1 F1 Score | 88.47 |
<summary>Results Table</summary>
| Batch | Concurrency | Inferences/Second | Client Send (ms) | Network+Server Send/Recv (ms) | Server Queue (ms) | Server Compute Input (ms) | Server Compute Infer (ms) | Server Compute Output (ms) | Client Recv (ms) | p50 latency (ms) | p90 latency (ms) | p95 latency (ms) | p99 latency (ms) | avg latency (ms) |
|--------:|--------------:|--------------------:|-------------------:|--------------------------------:|--------------------:|----------------------------:|----------------------------:|-----------------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|
| 1 | 1 | 350.0 | 0.0 | 0.5 | 0.0 | 0.1 | 2.2 | 0.0 | 0.0 | 2.8 | 2.9 | 3.0 | 3.0 | 2.8 |
| 8 | 1 | 440.0 | 0.0 | 0.6 | 0.0 | 0.1 | 17.1 | 0.0 | 0.0 | 17.6 | 19.2 | 19.2 | 19.5 | 17.9 |
| 16 | 1 | 456.0 | 0.0 | 0.6 | 0.0 | 0.1 | 34.1 | 0.0 | 0.0 | 34.9 | 35.9 | 36.1 | 36.4 | 34.8 |
#### Offline: NVIDIA T4, PyTorch with FP16
Our results were obtained using the following configuration:
| Parameter Name | Parameter Value |
|:-----------------------------|:------------------|
| GPU | NVIDIA T4 |
| Backend | PyTorch |
| Backend accelerator | - |
| Precision | FP16 |
| Model format | TorchScript Trace |
| Max batch size | 16 |
| Number of model instances | 1 |
| Accelerator Precision | - |
| Max Seq Length | 384 |
| SQuAD v1.1 F1 Score | 88.48 |
<summary>Results Table</summary>
| Batch | Concurrency | Inferences/Second | Client Send (ms) | Network+Server Send/Recv (ms) | Server Queue (ms) | Server Compute Input (ms) | Server Compute Infer (ms) | Server Compute Output (ms) | Client Recv (ms) | p50 latency (ms) | p90 latency (ms) | p95 latency (ms) | p99 latency (ms) | avg latency (ms) |
|--------:|--------------:|--------------------:|-------------------:|--------------------------------:|--------------------:|----------------------------:|----------------------------:|-----------------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|
| 1 | 1 | 258.0 | 0.0 | 0.5 | 0.1 | 0.0 | 2.2 | 1.1 | 0.0 | 3.9 | 3.9 | 3.9 | 3.9 | 3.9 |
| 8 | 1 | 316.0 | 0.0 | 0.5 | 0.1 | 0.1 | 2.2 | 22.4 | 0.0 | 25.2 | 25.8 | 26.1 | 26.2 | 25.2 |
| 16 | 1 | 314.7 | 0.0 | 0.4 | 0.1 | 0.1 | 2.1 | 47.5 | 0.0 | 50.1 | 51.4 | 51.7 | 52.0 | 50.3 |
## Advanced
### Prepare configuration
You can use the environment variables to set the parameters of your inference
configuration.
Triton deployment scripts support several inference runtimes listed in the table below:
Example values of some key variables in one configuration:
```
FORMAT="onnx"
PRECISION="fp16"
EXPORT_FORMAT="onnx"
EXPORT_PRECISION="fp16"
ACCELERATOR="trt"
ACCELERATOR_PRECISION="fp16"
CAPTURE_CUDA_GRAPH="0"
BATCH_SIZE="16"
MAX_BATCH_SIZE="16"
MAX_SEQ_LENGTH="384"
CHECKPOINT_VARIANT="dist-6l-qa"
CHECKPOINT_DIR=${CHECKPOINTS_DIR}/${CHECKPOINT_VARIANT}
TRITON_MAX_QUEUE_DELAY="1"
TRITON_GPU_ENGINE_COUNT="1"
TRITON_PREFERRED_BATCH_SIZES="1"
```
| Inference runtime | Mnemonic used in scripts |
|-------------------|--------------------------|
| [TorchScript Tracing](https://pytorch.org/docs/stable/jit.html) | `ts-trace` |
| [TorchScript Scripting](https://pytorch.org/docs/stable/jit.html) | `ts-script` |
| [ONNX](https://onnx.ai) | `onnx` |
| [NVIDIA TensorRT](https://developer.nvidia.com/tensorrt) | `trt` |
The deployment process consists of the following steps.
1. Export step. We export the model into the format set by `${EXPORT_FORMAT}`, with precision set by `${EXPORT_PRECISION}`.
2. Convert step. We convert the exported model from `${EXPORT_FORMAT}` into `${FORMAT}`. The precision of the model in `${FORMAT}` is set by `${PRECISION}`.
3. Deploy step. We create the triton model repository.
The most common use-case scenario is to export the model into ONNX format, and then convert it into TensorRT.
`${ACCELERATOR}` here refers to the accelerator of the ONNX format, which can be either `trt` or `none`.
All the above values are set in the `triton/scripts/setup_parameters.sh` file.
### Step by step deployment process
Commands described below can be used for exporting, converting and profiling the model.
#### Clone Repository
IMPORTANT: This step is executed on the host computer.
<details>
<summary>Clone Repository Command</summary>
```shell
git clone https://github.com/NVIDIA/DeepLearningExamples.git
cd DeepLearningExamples/PyTorch/LanguageModeling/BERT/
```
</details>
#### Setup Environment
Setup the environment in the host computer and start Triton Inference Server.
<details>
<summary>Setup Environment Command</summary>
```shell
source ./triton/dist6l/scripts/setup_environment.sh
./triton/dist6l/scripts/docker/triton_inference_server.sh
```
</details>
#### Setup Container
Build and run a container that extends the NGC PyTorch container with the Triton Inference Server client libraries and dependencies.
<details>
<summary>Setup Container Command</summary>
```shell
./triton/dist6l/scripts/docker/build.sh
./triton/dist6l/scripts/docker/interactive.sh
```
</details>
#### Setup Parameters and Environment
Setup the environment and deployment parameters inside interactive container.
<details>
<summary>Setup Environment Command</summary>
```shell
source ./triton/dist6l/scripts/setup_environment.sh
```
</details>
<details>
<summary>Setup Parameters Command</summary>
```shell
source ./triton/dist6l/scripts/setup_parameters.sh
```
</details>
#### Prepare Dataset and Checkpoint
Prepare datasets and checkpoint if not run automatic evaluation scripts.
<details>
<summary>Prepare Datasets Command</summary>
```shell
./triton/dist6l/runner/prepare_datasets.sh
```
</details>
<details>
<summary>Prepare Checkpoint Command</summary>
Download checkpoint from
```
https://catalog.ngc.nvidia.com/orgs/nvidia/dle/models/bert_pyt_ckpt_distill_6l_768d_3072di_12h_squad
```
Create the directory for checkpoint and copy the downloaded checkpoint content:
```shell
mkdir -p ${CHECKPOINTS_DIR}/dist-6l-qa
```
</details>
#### Export Model
Export model from Python source to desired format (e.g. Savedmodel or TorchScript)
<details>
<summary>Export Model Command</summary>
```shell
python3 triton/export_model.py \
--input-path triton/model.py \
--input-type pyt \
--output-path ${SHARED_DIR}/exported_model.${FORMAT_SUFFIX} \
--output-type ${EXPORT_FORMAT} \
--dataloader triton/dataloader.py \
--ignore-unknown-parameters \
--onnx-opset 13 \
${FLAG} \
\
--config-file ${CHECKPOINT_DIR}/config.json \
--checkpoint ${CHECKPOINT_DIR}/pytorch_model.bin \
--precision ${EXPORT_PRECISION} \
\
--vocab-file ${CHECKPOINT_DIR}/vocab.txt \
--max-seq-length ${MAX_SEQ_LENGTH} \
--predict-file ${DATASETS_DIR}/data/squad/v1.1/dev-v1.1.json \
--batch-size ${MAX_BATCH_SIZE}
```
</details>
#### Convert Model
Convert the model from training to inference format (e.g. TensorRT).
<details>
<summary>Convert Model Command</summary>
```shell
model-navigator convert \
--model-name ${MODEL_NAME} \
--model-path ${SHARED_DIR}/exported_model.${FORMAT_SUFFIX} \
--output-path ${SHARED_DIR}/converted_model \
--target-formats ${FORMAT} \
--target-precisions ${PRECISION} \
--launch-mode local \
--override-workspace \
--verbose \
\
--onnx-opsets 13 \
--inputs input__0:${MAX_BATCH_SIZE},${MAX_SEQ_LENGTH}:int32 \
--inputs input__1:${MAX_BATCH_SIZE},${MAX_SEQ_LENGTH}:int32 \
--inputs input__2:${MAX_BATCH_SIZE},${MAX_SEQ_LENGTH}:int32 \
--min-shapes input__0=${MAX_BATCH_SIZE},${MAX_SEQ_LENGTH} \
input__1=${MAX_BATCH_SIZE},${MAX_SEQ_LENGTH} \
input__2=${MAX_BATCH_SIZE},${MAX_SEQ_LENGTH} \
--max-shapes input__0=${MAX_BATCH_SIZE},${MAX_SEQ_LENGTH} \
input__1=${MAX_BATCH_SIZE},${MAX_SEQ_LENGTH} \
input__2=${MAX_BATCH_SIZE},${MAX_SEQ_LENGTH} \
--opt-shapes input__0=${MAX_BATCH_SIZE},${MAX_SEQ_LENGTH} \
input__1=${MAX_BATCH_SIZE},${MAX_SEQ_LENGTH} \
input__2=${MAX_BATCH_SIZE},${MAX_SEQ_LENGTH} \
--max-batch-size ${MAX_BATCH_SIZE} \
--tensorrt-max-workspace-size 8589934592 \
--atol 2 output__0=5.0 \
output__1=5.0 \
--rtol 1 output__0=5.0 \
output__1=5.0
```
</details>
#### Deploy Model
Configure the model on Triton Inference Server.
Generate the configuration from your model repository.
<details>
<summary>Deploy Model Command</summary>
```shell
model-navigator triton-config-model \
--model-repository ${MODEL_REPOSITORY_PATH} \
--model-name ${MODEL_NAME} \
--model-version 1 \
--model-path ${SHARED_DIR}/converted_model \
--model-format ${CONFIG_FORMAT} \
--model-control-mode ${TRITON_LOAD_MODEL_METHOD} \
--verbose \
--load-model \
--load-model-timeout-s 100 \
\
--backend-accelerator ${ACCELERATOR} \
--tensorrt-precision ${ACCELERATOR_PRECISION} \
--max-batch-size ${MBS} \
--preferred-batch-sizes ${TRITON_PREFERRED_BATCH_SIZES} \
--max-queue-delay-us ${TRITON_MAX_QUEUE_DELAY} \
--engine-count-per-device gpu=${TRITON_GPU_ENGINE_COUNT}
```
</details>
#### Prepare Triton Profiling Data
Prepare data used for profiling on Triton server.
<details>
<summary>Prepare Triton Profiling Data Command</summary>
```shell
mkdir -p ${SHARED_DIR}/input_data
python triton/prepare_input_data.py \
--dataloader triton/dataloader.py \
--input-data-dir ${SHARED_DIR}/input_data \
\
--batch-size ${MAX_BATCH_SIZE} \
--max-seq-length ${MAX_SEQ_LENGTH} \
--predict-file ${DATASETS_DIR}/data/squad/v1.1/dev-v1.1.json \
--vocab-file ${CHECKPOINT_DIR}/vocab.txt
```
</details>
#### Triton Performance Offline Test
We want to maximize throughput. It assumes you have your data available
for inference or that your data saturate to maximum batch size quickly.
Triton Inference Server supports offline scenarios with static batching.
Static batching allows inference requests to be served
as they are received. The largest improvements to throughput come
from increasing the batch size due to efficiency gains in the GPU with larger
batches.
<details>
<summary>Triton Performance Offline Test Command</summary>
```shell
python triton/run_performance_on_triton.py \
--model-repository ${MODEL_REPOSITORY_PATH} \
--model-name ${MODEL_NAME} \
--input-data ${SHARED_DIR}/input_data/data.json \
--input-shapes input__0:${MAX_SEQ_LENGTH} \
--input-shapes input__1:${MAX_SEQ_LENGTH} \
--input-shapes input__2:${MAX_SEQ_LENGTH} \
--batch-sizes ${BATCH_SIZE} \
--number-of-triton-instances ${TRITON_INSTANCES} \
--number-of-model-instances ${TRITON_GPU_ENGINE_COUNT} \
--batching-mode static \
--evaluation-mode offline \
--performance-tool perf_analyzer \
--result-path ${SHARED_DIR}/triton_performance_offline.csv
```
</details>
### Latency explanation
A typical Triton Inference Server pipeline can be broken down into the following steps:
1. The client serializes the inference request into a message and sends it to
the server (Client Send).
2. The message travels over the network from the client to the server (Network).
3. The message arrives at the server and is deserialized (Server Receive).
4. The request is placed on the queue (Server Queue).
5. The request is removed from the queue and computed (Server Compute).
6. The completed request is serialized in a message and sent back to
the client (Server Send).
7. The completed message then travels over the network from the server
to the client (Network).
8. The completed message is deserialized by the client and processed as
a completed inference request (Client Receive).
Generally, for local clients, steps 1-4 and 6-8 will only occupy
a small fraction of time, compared to step 5. As backend deep learning
systems like Jasper are rarely exposed directly to end users, but instead
only interfacing with local front-end servers, for the sake of Jasper,
we can consider that all clients are local.
## Release Notes
We’re constantly refining and improving our performance on AI
and HPC workloads even on the same hardware with frequent updates
to our software stack. For our latest performance data refer
to these pages for
[AI](https://developer.nvidia.com/deep-learning-performance-training-inference)
and [HPC](https://developer.nvidia.com/hpc-application-performance) benchmarks.
### Changelog
### Known issues
- There are no known issues with this model.
|
DGLPyTorch/DrugDiscovery/SE3Transformer/se3_transformer/runtime | runtime | metrics | # Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES
# SPDX-License-Identifier: MIT
from abc import ABC, abstractmethod
import torch
import torch.distributed as dist
from torch import Tensor
class Metric(ABC):
""" Metric class with synchronization capabilities similar to TorchMetrics """
def __init__(self):
self.states = {}
def add_state(self, name: str, default: Tensor):
assert name not in self.states
self.states[name] = default.clone()
setattr(self, name, default)
def synchronize(self):
if dist.is_initialized():
for state in self.states:
dist.all_reduce(getattr(self, state), op=dist.ReduceOp.SUM, group=dist.group.WORLD)
def __call__(self, *args, **kwargs):
self.update(*args, **kwargs)
def reset(self):
for name, default in self.states.items():
setattr(self, name, default.clone())
def compute(self):
self.synchronize()
value = self._compute().item()
self.reset()
return value
@abstractmethod
def _compute(self):
pass
@abstractmethod
def update(self, preds: Tensor, targets: Tensor):
pass
class MeanAbsoluteError(Metric):
def __init__(self):
super().__init__()
self.add_state('error', torch.tensor(0, dtype=torch.float32, device='cuda'))
self.add_state('total', torch.tensor(0, dtype=torch.int32, device='cuda'))
def update(self, preds: Tensor, targets: Tensor):
preds = preds.detach()
n = preds.shape[0]
error = torch.abs(preds.view(n, -1) - targets.view(n, -1)).sum()
self.total += n
self.error += error
def _compute(self):
return self.error / self.total
|
PyTorch/Segmentation/MaskRCNN/pytorch/maskrcnn_benchmark/layers | layers | roi_pool | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
from torch import nn
from torch.autograd import Function
from torch.autograd.function import once_differentiable
from torch.nn.modules.utils import _pair
from maskrcnn_benchmark import _C
class _ROIPool(Function):
@staticmethod
def forward(ctx, input, roi, output_size, spatial_scale):
ctx.output_size = _pair(output_size)
ctx.spatial_scale = spatial_scale
ctx.input_shape = input.size()
output, argmax = _C.roi_pool_forward(
input, roi, spatial_scale, output_size[0], output_size[1]
)
ctx.save_for_backward(input, roi, argmax)
return output
@staticmethod
@once_differentiable
def backward(ctx, grad_output):
input, rois, argmax = ctx.saved_tensors
output_size = ctx.output_size
spatial_scale = ctx.spatial_scale
bs, ch, h, w = ctx.input_shape
grad_input = _C.roi_pool_backward(
grad_output,
input,
rois,
argmax,
spatial_scale,
output_size[0],
output_size[1],
bs,
ch,
h,
w,
)
return grad_input, None, None, None
roi_pool = _ROIPool.apply
class ROIPool(nn.Module):
def __init__(self, output_size, spatial_scale):
super(ROIPool, self).__init__()
self.output_size = output_size
self.spatial_scale = spatial_scale
@torch.cuda.amp.custom_fwd(cast_inputs=torch.float32)
def forward(self, input, rois):
return roi_pool(input, rois, self.output_size, self.spatial_scale)
def __repr__(self):
tmpstr = self.__class__.__name__ + "("
tmpstr += "output_size=" + str(self.output_size)
tmpstr += ", spatial_scale=" + str(self.spatial_scale)
tmpstr += ")"
return tmpstr
|
TensorFlow/Detection/SSD/models/research/object_detection/core | core | freezable_batch_norm | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A freezable batch norm layer that uses Keras batch normalization."""
import tensorflow as tf
class FreezableBatchNorm(tf.keras.layers.BatchNormalization):
"""Batch normalization layer (Ioffe and Szegedy, 2014).
This is a `freezable` batch norm layer that supports setting the `training`
parameter in the __init__ method rather than having to set it either via
the Keras learning phase or via the `call` method parameter. This layer will
forward all other parameters to the default Keras `BatchNormalization`
layer
This is class is necessary because Object Detection model training sometimes
requires batch normalization layers to be `frozen` and used as if it was
evaluation time, despite still training (and potentially using dropout layers)
Like the default Keras BatchNormalization layer, this will normalize the
activations of the previous layer at each batch,
i.e. applies a transformation that maintains the mean activation
close to 0 and the activation standard deviation close to 1.
Arguments:
training: Boolean or None. If True, the batch normalization layer will
normalize the input batch using the batch mean and standard deviation,
and update the total moving mean and standard deviations. If False, the
layer will normalize using the moving average and std. dev, without
updating the learned avg and std. dev.
If None, the layer will follow the keras BatchNormalization layer
strategy of checking the Keras learning phase at `call` time to decide
what to do.
**kwargs: The keyword arguments to forward to the keras BatchNormalization
layer constructor.
Input shape:
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
Output shape:
Same shape as input.
References:
- [Batch Normalization: Accelerating Deep Network Training by Reducing
Internal Covariate Shift](https://arxiv.org/abs/1502.03167)
"""
def __init__(self, training=None, **kwargs):
super(FreezableBatchNorm, self).__init__(**kwargs)
self._training = training
def call(self, inputs, training=None):
if training is None:
training = self._training
return super(FreezableBatchNorm, self).call(inputs, training=training)
|
TensorFlow2/Classification/ConvNets/efficientnet_v1/B0/evaluation | evaluation | evaluation_AMP_V100-32G | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
python3 main.py --cfg config/efficientnet_v1/b0_cfg.py \
--mode eval \
--use_amp \
--use_xla \
--model_dir ./output \
--data_dir /data \
--eval_batch_size 512
|
TensorFlow/Detection/SSD/examples | examples | SSD320_FP16_4GPU_BENCHMARK | # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
CKPT_DIR=${1:-"/results/SSD320_FP16_4GPU"}
PIPELINE_CONFIG_PATH=${2:-"/workdir/models/research/configs"}"/ssd320_bench.config"
GPUS=4
TENSOR_OPS=0
export TF_ENABLE_CUBLAS_TENSOR_OP_MATH_FP32=${TENSOR_OPS}
export TF_ENABLE_CUDNN_TENSOR_OP_MATH_FP32=${TENSOR_OPS}
export TF_ENABLE_CUDNN_RNN_TENSOR_OP_MATH_FP32=${TENSOR_OPS}
TRAIN_LOG=$(mpirun --allow-run-as-root \
-np $GPUS \
-H localhost:$GPUS \
-bind-to none \
-map-by slot \
-x NCCL_DEBUG=INFO \
-x LD_LIBRARY_PATH \
-x PATH \
-mca pml ob1 \
-mca btl ^openib \
python -u ./object_detection/model_main.py \
--pipeline_config_path=${PIPELINE_CONFIG_PATH} \
--model_dir=${CKPT_DIR} \
--alsologtostder \
--amp \
"${@:3}" 2>&1)
PERF=$(echo "$TRAIN_LOG" | sed -n 's|.*global_step/sec: \(\S\+\).*|\1|p' | python -c "import sys; x = sys.stdin.readlines(); x = [float(a) for a in x[int(len(x)*3/4):]]; print(32*$GPUS*sum(x)/len(x), 'img/s')")
mkdir -p $CKPT_DIR
echo "$GPUS GPUs mixed precision training performance: $PERF" | tee $CKPT_DIR/train_log
echo "$TRAIN_LOG" >> $CKPT_DIR/train_log
|
PyTorch/SpeechSynthesis/Tacotron2/notebooks/conversationalai/model_repo/jasper-trt-ensemble | jasper-trt-ensemble | config | name: "jasper-trt-ensemble"
platform: "ensemble"
max_batch_size: 1#MAX_BATCH
input {
name: "AUDIO_SIGNAL"
data_type: TYPE_FP32
dims: -1#AUDIO_LENGTH
}
input {
name: "NUM_SAMPLES"
data_type: TYPE_INT32
dims: [ 1 ]
}
output {
name: "TRANSCRIPT"
data_type: TYPE_INT32
dims: [-1]
}
ensemble_scheduling {
step {
model_name: "jasper-feature-extractor"
model_version: -1
input_map {
key: "AUDIO_SIGNAL__0"
value: "AUDIO_SIGNAL"
}
input_map {
key: "NUM_SAMPLES__1"
value: "NUM_SAMPLES"
}
output_map {
key: "AUDIO_FEATURES__0"
value: "AUDIO_FEATURES"
}
}
step {
model_name: "jasper-trt"
model_version: -1
input_map {
key: "FEATURES"
value: "AUDIO_FEATURES"
}
output_map {
key: "LOGITS"
value: "CHARACTER_PROBABILITIES"
}
}
step {
model_name: "jasper-decoder"
model_version: -1
input_map {
key: "CLASS_LOGITS__0"
value: "CHARACTER_PROBABILITIES"
}
output_map {
key: "CANDIDATE_TRANSCRIPT__0"
value: "TRANSCRIPT"
}
}
}
|
TensorFlow2/Segmentation/nnUNet | nnUNet | main | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from data_loading.data_module import DataModule
from models.nn_unet import NNUnet
from runtime.args import get_main_args
from runtime.checkpoint import load_model
from runtime.logging import get_logger
from runtime.run import evaluate, export_model, predict, train
from runtime.utils import hvd_init, set_seed, set_tf_flags
def main(args):
hvd_init()
if args.seed is not None:
set_seed(args.seed)
set_tf_flags(args)
data = DataModule(args)
data.setup()
logger = get_logger(args)
logger.log_hyperparams(vars(args))
logger.log_metadata("dice_score", {"unit": None})
logger.log_metadata("eval_dice_nobg", {"unit": None})
logger.log_metadata("throughput_predict", {"unit": "images/s"})
logger.log_metadata("throughput_train", {"unit": "images/s"})
logger.log_metadata("latency_predict_mean", {"unit": "ms"})
logger.log_metadata("latency_train_mean", {"unit": "ms"})
if args.exec_mode == "train":
model = NNUnet(args)
train(args, model, data, logger)
elif args.exec_mode == "evaluate":
model = load_model(args)
evaluate(args, model, data, logger)
elif args.exec_mode == "predict":
model = NNUnet(args) if args.benchmark else load_model(args)
predict(args, model, data, logger)
elif args.exec_mode == "export":
# Export model
model = load_model(args)
export_model(args, model)
suffix = "amp" if args.amp else "fp32"
sm = f"{args.results}/saved_model_task_{args.task}_dim_{args.dim}_" + suffix
trt = f"{args.results}/trt_saved_model_task_{args.task}_dim_{args.dim}_" + suffix
args.saved_model_dir = sm if args.load_sm else trt
args.exec_mode = "evaluate" if args.validate else "predict"
# Run benchmarking
model = load_model(args)
data = DataModule(args)
data.setup()
if args.validate:
evaluate(args, model, data, logger)
else:
predict(args, model, data, logger)
else:
raise NotImplementedError
if __name__ == "__main__":
args = get_main_args()
main(args)
|
TensorFlow2/Recommendation/WideAndDeep/triton/runner | runner | preparer | # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import pathlib
from datetime import datetime
from typing import Dict, List
# method from PEP-366 to support relative import in executed modules
if __name__ == "__main__" and __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from .config import Config
from .configuration import Configuration
from .downloader import download
from .experiment import Experiment, Stage
from .logger import LOGGER
from .maintainer import Maintainer
from .pipeline import Pipeline
from .stages import ResultsType, TritonPerformanceOfflineStage, TritonPerformanceOnlineStage
from .task import Checkpoint, Dataset, SystemInfo, Task
from .triton import Triton
from .utils import clean_directory
class Preparer(abc.ABC):
"""
Runner preparer object.
"""
@abc.abstractmethod
def exec(
self,
workspace: pathlib.Path,
config: Config,
pipeline: Pipeline,
maintainer: Maintainer,
triton: Triton,
logs_dir: pathlib.Path,
):
pass
class ExperimentPreparer(Preparer):
"""
Experiment runner preparer object.
"""
def exec(
self,
workspace: pathlib.Path,
config: Config,
pipeline: Pipeline,
maintainer: Maintainer,
triton: Triton,
logs_dir: pathlib.Path,
):
LOGGER.info("Preparing Triton container image")
triton_container_image = self._prepare_triton_container_image(config, maintainer, triton)
LOGGER.info("Initialize task")
task = self._initialize_task(
workspace=workspace,
config=config,
pipeline=pipeline,
triton_container_image=triton_container_image,
logs_dir=logs_dir,
)
LOGGER.info("Preparing directories")
self._create_dirs(workspace, task)
LOGGER.info("Clean previous run artifacts directories")
self._clean_previous_run_artifacts(workspace, task)
LOGGER.info("Downloading checkpoints")
self._download_checkpoints(task)
return task
def _create_dirs(self, workspace: pathlib.Path, task: Task) -> None:
"""
Create directories used to store artifacts and final results
Returns:
None
"""
for directory in [task.results_dir, task.logs_dir, task.checkpoints_dir]:
directory_path = workspace / directory
directory_path.mkdir(parents=True, exist_ok=True)
LOGGER.info(f"Directory {directory} created.")
def _clean_previous_run_artifacts(self, workspace: pathlib.Path, task: Task) -> None:
"""
Clean logs from previous run
Returns:
None
"""
for directory in [
task.logs_dir,
task.results_dir,
]:
directory_path = workspace / directory
clean_directory(directory_path)
LOGGER.info(f"Location {directory} cleaned.")
def _prepare_triton_container_image(self, config: Config, maintainer: Maintainer, triton: Triton) -> str:
"""
Prepare Triton Container Image based on provided configuration
Returns:
Name of container image to use in process
"""
if not config.triton_dockerfile:
image_name = triton.container_image(config.container_version)
LOGGER.info(f"Using official Triton container image: {image_name}.")
return image_name
if config.triton_container_image:
LOGGER.info(f"Using provided Triton Container Image: {config.triton_container_image}")
return config.triton_container_image
normalized_model_name = config.model_name.lower().replace("_", "-")
image_name = f"tritonserver-{normalized_model_name}:latest"
LOGGER.info(f"Building Triton Container Image: {image_name}")
maintainer.build_image(
image_name=image_name,
image_file_path=pathlib.Path(config.triton_dockerfile),
build_args={"FROM_IMAGE": triton.container_image(container_version=config.container_version)},
)
return image_name
def _download_checkpoints(self, task: Task) -> None:
"""
Download checkpoints
"""
for variant, checkpoint in task.checkpoints.items():
checkpoint_url = checkpoint.url
download_path = checkpoint.path
if download_path.is_dir():
LOGGER.info(f"Checkpoint {download_path.name} already downloaded.")
continue
if not checkpoint_url:
LOGGER.warning(
f"Checkpoint {variant} url is not provided."
"\nIf you want to use that checkpoint please train the model locally"
f"\nand copy to {download_path} directory"
)
continue
download(checkpoint_url, download_path)
def _initialize_task(
self,
workspace: pathlib.Path,
config: Config,
pipeline: Pipeline,
triton_container_image: str,
logs_dir: pathlib.Path,
) -> Task:
"""
Initialize task object
Args:
workspace: Path to workspace where artifacts are stored
config: Config object
pipeline: Pipeline object
triton_container_image: Triton Inference Server container image used for tests
Returns:
Task object
"""
datasets = {}
for dataset in config.datasets:
datasets[dataset.name] = Dataset(name=dataset.name)
checkpoints = {}
for checkpoint in config.checkpoints:
download_path = workspace / Task.checkpoints_dir / checkpoint.name
checkpoints[checkpoint.name] = Checkpoint(name=checkpoint.name, url=checkpoint.url, path=download_path)
results_types = self._task_results_types(pipeline=pipeline)
stages = {}
for stage in pipeline.stages():
stages[stage.label] = {"result_path": stage.result_path, "result_type": stage.result_type}
experiments = []
for idx, configuration in enumerate(config.configurations, start=1):
experiment = self._prepare_experiment(
idx=idx,
configuration=configuration,
results_types=results_types,
stages=stages,
)
experiments.append(experiment)
system_info = SystemInfo.from_host()
task = Task(
model_name=config.model_name,
ensemble_model_name=config.ensemble_model_name,
framework=config.framework,
checkpoints=checkpoints,
datasets=datasets,
datasets_dir=config.datasets_dir,
experiments=experiments,
container_version=config.container_version,
system_info=system_info,
triton_container_image=triton_container_image,
triton_custom_operations=config.triton_custom_operations,
triton_load_model_method=config.triton_load_model_method,
started_at=int(datetime.utcnow().timestamp()),
logs_dir=logs_dir,
batching=config.batching,
measurement_steps_offline=config.measurement_steps_offline,
measurement_steps_online=config.measurement_steps_online,
performance_tool=config.performance_tool,
)
return task
def _task_results_types(self, pipeline: Pipeline) -> List[str]:
"""
Types of results generated as part of task
Returns:
List of result types
"""
results = []
for stage in pipeline.stages():
if TritonPerformanceOfflineStage.label == stage.label:
results.append(ResultsType.TRITON_PERFORMANCE_OFFLINE)
continue
if TritonPerformanceOnlineStage.label == stage.label:
results.append(ResultsType.TRITON_PERFORMANCE_ONLINE)
continue
return results
def _prepare_experiment(
self,
idx: int,
configuration: Configuration,
results_types: List[str],
stages: Dict,
) -> Experiment:
"""
Prepare experiments data
Args:
idx: Experiment index
configuration: Configuration object
results_types: Results types stored in experiment
stages: Stages executed as part of experiment
Returns:
Experiment object
"""
results_mapped = {}
for result_type in results_types:
results_mapped[result_type] = result_type
stages_mapped = {}
for name, stage_data in stages.items():
stages_mapped[name] = Stage(name=name, **stage_data)
experiment = Experiment(
experiment_id=idx,
parameters=configuration.parameters,
stages=stages_mapped,
results=results_mapped,
checkpoint=configuration.checkpoint,
)
return experiment
|
Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt/triton/deployment_toolkit/perf_analyzer | perf_analyzer | perf_analyzer | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import pathlib
from subprocess import PIPE, CalledProcessError, Popen
# method from PEP-366 to support relative import in executed modules
if __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from .exceptions import PerfAnalyzerException
MAX_INTERVAL_CHANGES = 10
COUNT_INTERVAL_DELTA = 50
TIME_INTERVAL_DELTA = 2000
LOGGER = logging.getLogger(__name__)
class PerfAnalyzer:
"""
This class provides an interface for running workloads
with perf_analyzer.
"""
def __init__(self, config):
"""
Parameters
----------
config : PerfAnalyzerConfig
keys are names of arguments to perf_analyzer,
values are their values.
"""
self.bin_path = "perf_analyzer"
self._config = config
self._output = str()
def run(self):
"""
Runs the perf analyzer with the
initialized configuration
Returns
-------
List of Records
List of the metrics obtained from this
run of perf_analyzer
Raises
------
PerfAnalyzerException
If subprocess throws CalledProcessError
"""
for _ in range(MAX_INTERVAL_CHANGES):
command = [self.bin_path]
command += self._config.to_cli_string().replace("=", " ").split()
LOGGER.debug(f"Perf Analyze command: {command}")
try:
process = Popen(command, start_new_session=True, stdout=PIPE, encoding="utf-8")
streamed_output = ""
while True:
output = process.stdout.readline()
if output == "" and process.poll() is not None:
break
if output:
streamed_output += output
print(output.rstrip())
self._output += streamed_output
result = process.poll()
if result != 0:
raise CalledProcessError(returncode=result, cmd=command, output=streamed_output)
return
except CalledProcessError as e:
if self._faild_with_measruement_inverval(e.output):
if self._config["measurement-mode"] is None or self._config["measurement-mode"] == "count_windows":
self._increase_request_count()
else:
self._increase_time_interval()
else:
raise PerfAnalyzerException(
f"Running perf_analyzer with {e.cmd} failed with" f" exit status {e.returncode} : {e.output}"
)
raise PerfAnalyzerException(f"Ran perf_analyzer {MAX_INTERVAL_CHANGES} times, but no valid requests recorded.")
def output(self):
"""
Returns
-------
The stdout output of the
last perf_analyzer run
"""
if self._output:
return self._output
raise PerfAnalyzerException("Attempted to get perf_analyzer output" "without calling run first.")
def _faild_with_measruement_inverval(self, output: str):
return (
output.find("Failed to obtain stable measurement") or output.find("Please use a larger time window")
) != -1
def _increase_request_count(self):
self._config["measurement-request-count"] += COUNT_INTERVAL_DELTA
LOGGER.debug(
"perf_analyzer's measurement request count is too small, "
f"increased to {self._config['measurement-request-count']}."
)
def _increase_time_interval(self):
self._config["measurement-interval"] += TIME_INTERVAL_DELTA
LOGGER.debug(
"perf_analyzer's measurement window is too small, "
f"increased to {self._config['measurement-interval']} ms."
)
|
TensorFlow/Classification/ConvNets/resnet50v1.5/training | training | DGXA100_RN50_TF32_90E | #!/bin/bash
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
WORKSPACE=${1:-"/workspace/rn50v15_tf"}
DATA_DIR=${2:-"/data"}
OTHER=${@:3}
if [[ ! -z "${BIND_TO_SOCKET}" ]]; then
BIND_TO_SOCKET="--bind-to socket"
fi
mpiexec --allow-run-as-root ${BIND_TO_SOCKET} -np 8 python3 main.py --arch=resnet50 \
--mode=train_and_evaluate --iter_unit=epoch --num_iter=90 \
--batch_size=256 --warmup_steps=100 --cosine_lr --label_smoothing 0.1 \
--lr_init=0.256 --lr_warmup_epochs=8 --momentum=0.875 --weight_decay=3.0517578125e-05 \
--data_dir=${DATA_DIR}/tfrecords --data_idx_dir=${DATA_DIR}/dali_idx \
--results_dir=${WORKSPACE}/results --weight_init=fan_in ${OTHER}
|
TensorFlow/Detection/SSD/models/research/object_detection/utils | utils | object_detection_evaluation | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""object_detection_evaluation module.
ObjectDetectionEvaluation is a class which manages ground truth information of a
object detection dataset, and computes frequently used detection metrics such as
Precision, Recall, CorLoc of the provided detection results.
It supports the following operations:
1) Add ground truth information of images sequentially.
2) Add detection result of images sequentially.
3) Evaluate detection metrics on already inserted detection results.
4) Write evaluation result into a pickle file for future processing or
visualization.
Note: This module operates on numpy boxes and box lists.
"""
from abc import ABCMeta
from abc import abstractmethod
import collections
import logging
import unicodedata
import numpy as np
import tensorflow as tf
from object_detection.core import standard_fields
from object_detection.utils import label_map_util
from object_detection.utils import metrics
from object_detection.utils import per_image_evaluation
class DetectionEvaluator(object):
"""Interface for object detection evalution classes.
Example usage of the Evaluator:
------------------------------
evaluator = DetectionEvaluator(categories)
# Detections and groundtruth for image 1.
evaluator.add_single_groundtruth_image_info(...)
evaluator.add_single_detected_image_info(...)
# Detections and groundtruth for image 2.
evaluator.add_single_groundtruth_image_info(...)
evaluator.add_single_detected_image_info(...)
metrics_dict = evaluator.evaluate()
"""
__metaclass__ = ABCMeta
def __init__(self, categories):
"""Constructor.
Args:
categories: A list of dicts, each of which has the following keys -
'id': (required) an integer id uniquely identifying this category.
'name': (required) string representing category name e.g., 'cat', 'dog'.
"""
self._categories = categories
@abstractmethod
def add_single_ground_truth_image_info(self, image_id, groundtruth_dict):
"""Adds groundtruth for a single image to be used for evaluation.
Args:
image_id: A unique string/integer identifier for the image.
groundtruth_dict: A dictionary of groundtruth numpy arrays required
for evaluations.
"""
pass
@abstractmethod
def add_single_detected_image_info(self, image_id, detections_dict):
"""Adds detections for a single image to be used for evaluation.
Args:
image_id: A unique string/integer identifier for the image.
detections_dict: A dictionary of detection numpy arrays required
for evaluation.
"""
pass
def get_estimator_eval_metric_ops(self, eval_dict):
"""Returns dict of metrics to use with `tf.estimator.EstimatorSpec`.
Note that this must only be implemented if performing evaluation with a
`tf.estimator.Estimator`.
Args:
eval_dict: A dictionary that holds tensors for evaluating an object
detection model, returned from
eval_util.result_dict_for_single_example().
Returns:
A dictionary of metric names to tuple of value_op and update_op that can
be used as eval metric ops in `tf.estimator.EstimatorSpec`.
"""
pass
@abstractmethod
def evaluate(self):
"""Evaluates detections and returns a dictionary of metrics."""
pass
@abstractmethod
def clear(self):
"""Clears the state to prepare for a fresh evaluation."""
pass
class ObjectDetectionEvaluator(DetectionEvaluator):
"""A class to evaluate detections."""
def __init__(self,
categories,
matching_iou_threshold=0.5,
evaluate_corlocs=False,
evaluate_precision_recall=False,
metric_prefix=None,
use_weighted_mean_ap=False,
evaluate_masks=False,
group_of_weight=0.0):
"""Constructor.
Args:
categories: A list of dicts, each of which has the following keys -
'id': (required) an integer id uniquely identifying this category.
'name': (required) string representing category name e.g., 'cat', 'dog'.
matching_iou_threshold: IOU threshold to use for matching groundtruth
boxes to detection boxes.
evaluate_corlocs: (optional) boolean which determines if corloc scores
are to be returned or not.
evaluate_precision_recall: (optional) boolean which determines if
precision and recall values are to be returned or not.
metric_prefix: (optional) string prefix for metric name; if None, no
prefix is used.
use_weighted_mean_ap: (optional) boolean which determines if the mean
average precision is computed directly from the scores and tp_fp_labels
of all classes.
evaluate_masks: If False, evaluation will be performed based on boxes.
If True, mask evaluation will be performed instead.
group_of_weight: Weight of group-of boxes.If set to 0, detections of the
correct class within a group-of box are ignored. If weight is > 0, then
if at least one detection falls within a group-of box with
matching_iou_threshold, weight group_of_weight is added to true
positives. Consequently, if no detection falls within a group-of box,
weight group_of_weight is added to false negatives.
Raises:
ValueError: If the category ids are not 1-indexed.
"""
super(ObjectDetectionEvaluator, self).__init__(categories)
self._num_classes = max([cat['id'] for cat in categories])
if min(cat['id'] for cat in categories) < 1:
raise ValueError('Classes should be 1-indexed.')
self._matching_iou_threshold = matching_iou_threshold
self._use_weighted_mean_ap = use_weighted_mean_ap
self._label_id_offset = 1
self._evaluate_masks = evaluate_masks
self._group_of_weight = group_of_weight
self._evaluation = ObjectDetectionEvaluation(
num_groundtruth_classes=self._num_classes,
matching_iou_threshold=self._matching_iou_threshold,
use_weighted_mean_ap=self._use_weighted_mean_ap,
label_id_offset=self._label_id_offset,
group_of_weight=self._group_of_weight)
self._image_ids = set([])
self._evaluate_corlocs = evaluate_corlocs
self._evaluate_precision_recall = evaluate_precision_recall
self._metric_prefix = (metric_prefix + '_') if metric_prefix else ''
self._expected_keys = set([
standard_fields.InputDataFields.key,
standard_fields.InputDataFields.groundtruth_boxes,
standard_fields.InputDataFields.groundtruth_classes,
standard_fields.InputDataFields.groundtruth_difficult,
standard_fields.InputDataFields.groundtruth_instance_masks,
standard_fields.DetectionResultFields.detection_boxes,
standard_fields.DetectionResultFields.detection_scores,
standard_fields.DetectionResultFields.detection_classes,
standard_fields.DetectionResultFields.detection_masks
])
self._build_metric_names()
def _build_metric_names(self):
"""Builds a list with metric names."""
self._metric_names = [
self._metric_prefix + 'Precision/mAP@{}IOU'.format(
self._matching_iou_threshold)
]
if self._evaluate_corlocs:
self._metric_names.append(
self._metric_prefix +
'Precision/meanCorLoc@{}IOU'.format(self._matching_iou_threshold))
category_index = label_map_util.create_category_index(self._categories)
for idx in range(self._num_classes):
if idx + self._label_id_offset in category_index:
category_name = category_index[idx + self._label_id_offset]['name']
try:
category_name = unicode(category_name, 'utf-8')
except TypeError:
pass
category_name = unicodedata.normalize('NFKD', category_name).encode(
'ascii', 'ignore')
self._metric_names.append(
self._metric_prefix + 'PerformanceByCategory/AP@{}IOU/{}'.format(
self._matching_iou_threshold, category_name))
if self._evaluate_corlocs:
self._metric_names.append(
self._metric_prefix + 'PerformanceByCategory/CorLoc@{}IOU/{}'
.format(self._matching_iou_threshold, category_name))
def add_single_ground_truth_image_info(self, image_id, groundtruth_dict):
"""Adds groundtruth for a single image to be used for evaluation.
Args:
image_id: A unique string/integer identifier for the image.
groundtruth_dict: A dictionary containing -
standard_fields.InputDataFields.groundtruth_boxes: float32 numpy array
of shape [num_boxes, 4] containing `num_boxes` groundtruth boxes of
the format [ymin, xmin, ymax, xmax] in absolute image coordinates.
standard_fields.InputDataFields.groundtruth_classes: integer numpy array
of shape [num_boxes] containing 1-indexed groundtruth classes for the
boxes.
standard_fields.InputDataFields.groundtruth_difficult: Optional length
M numpy boolean array denoting whether a ground truth box is a
difficult instance or not. This field is optional to support the case
that no boxes are difficult.
standard_fields.InputDataFields.groundtruth_instance_masks: Optional
numpy array of shape [num_boxes, height, width] with values in {0, 1}.
Raises:
ValueError: On adding groundtruth for an image more than once. Will also
raise error if instance masks are not in groundtruth dictionary.
"""
if image_id in self._image_ids:
raise ValueError('Image with id {} already added.'.format(image_id))
groundtruth_classes = (
groundtruth_dict[standard_fields.InputDataFields.groundtruth_classes] -
self._label_id_offset)
# If the key is not present in the groundtruth_dict or the array is empty
# (unless there are no annotations for the groundtruth on this image)
# use values from the dictionary or insert None otherwise.
if (standard_fields.InputDataFields.groundtruth_difficult in
groundtruth_dict.keys() and
(groundtruth_dict[standard_fields.InputDataFields.groundtruth_difficult]
.size or not groundtruth_classes.size)):
groundtruth_difficult = groundtruth_dict[
standard_fields.InputDataFields.groundtruth_difficult]
else:
groundtruth_difficult = None
if not len(self._image_ids) % 1000:
logging.warn(
'image %s does not have groundtruth difficult flag specified',
image_id)
groundtruth_masks = None
if self._evaluate_masks:
if (standard_fields.InputDataFields.groundtruth_instance_masks not in
groundtruth_dict):
raise ValueError('Instance masks not in groundtruth dictionary.')
groundtruth_masks = groundtruth_dict[
standard_fields.InputDataFields.groundtruth_instance_masks]
self._evaluation.add_single_ground_truth_image_info(
image_key=image_id,
groundtruth_boxes=groundtruth_dict[
standard_fields.InputDataFields.groundtruth_boxes],
groundtruth_class_labels=groundtruth_classes,
groundtruth_is_difficult_list=groundtruth_difficult,
groundtruth_masks=groundtruth_masks)
self._image_ids.update([image_id])
def add_single_detected_image_info(self, image_id, detections_dict):
"""Adds detections for a single image to be used for evaluation.
Args:
image_id: A unique string/integer identifier for the image.
detections_dict: A dictionary containing -
standard_fields.DetectionResultFields.detection_boxes: float32 numpy
array of shape [num_boxes, 4] containing `num_boxes` detection boxes
of the format [ymin, xmin, ymax, xmax] in absolute image coordinates.
standard_fields.DetectionResultFields.detection_scores: float32 numpy
array of shape [num_boxes] containing detection scores for the boxes.
standard_fields.DetectionResultFields.detection_classes: integer numpy
array of shape [num_boxes] containing 1-indexed detection classes for
the boxes.
standard_fields.DetectionResultFields.detection_masks: uint8 numpy
array of shape [num_boxes, height, width] containing `num_boxes` masks
of values ranging between 0 and 1.
Raises:
ValueError: If detection masks are not in detections dictionary.
"""
detection_classes = (
detections_dict[standard_fields.DetectionResultFields.detection_classes]
- self._label_id_offset)
detection_masks = None
if self._evaluate_masks:
if (standard_fields.DetectionResultFields.detection_masks not in
detections_dict):
raise ValueError('Detection masks not in detections dictionary.')
detection_masks = detections_dict[
standard_fields.DetectionResultFields.detection_masks]
self._evaluation.add_single_detected_image_info(
image_key=image_id,
detected_boxes=detections_dict[
standard_fields.DetectionResultFields.detection_boxes],
detected_scores=detections_dict[
standard_fields.DetectionResultFields.detection_scores],
detected_class_labels=detection_classes,
detected_masks=detection_masks)
def evaluate(self):
"""Compute evaluation result.
Returns:
A dictionary of metrics with the following fields -
1. summary_metrics:
'<prefix if not empty>_Precision/mAP@<matching_iou_threshold>IOU': mean
average precision at the specified IOU threshold.
2. per_category_ap: category specific results with keys of the form
'<prefix if not empty>_PerformanceByCategory/
mAP@<matching_iou_threshold>IOU/category'.
"""
(per_class_ap, mean_ap, per_class_precision, per_class_recall,
per_class_corloc, mean_corloc) = (
self._evaluation.evaluate())
pascal_metrics = {self._metric_names[0]: mean_ap}
if self._evaluate_corlocs:
pascal_metrics[self._metric_names[1]] = mean_corloc
category_index = label_map_util.create_category_index(self._categories)
for idx in range(per_class_ap.size):
if idx + self._label_id_offset in category_index:
category_name = category_index[idx + self._label_id_offset]['name']
try:
category_name = unicode(category_name, 'utf-8')
except TypeError:
pass
category_name = unicodedata.normalize(
'NFKD', category_name).encode('ascii', 'ignore')
display_name = (
self._metric_prefix + 'PerformanceByCategory/AP@{}IOU/{}'.format(
self._matching_iou_threshold, category_name))
pascal_metrics[display_name] = per_class_ap[idx]
# Optionally add precision and recall values
if self._evaluate_precision_recall:
display_name = (
self._metric_prefix +
'PerformanceByCategory/Precision@{}IOU/{}'.format(
self._matching_iou_threshold, category_name))
pascal_metrics[display_name] = per_class_precision[idx]
display_name = (
self._metric_prefix +
'PerformanceByCategory/Recall@{}IOU/{}'.format(
self._matching_iou_threshold, category_name))
pascal_metrics[display_name] = per_class_recall[idx]
# Optionally add CorLoc metrics.classes
if self._evaluate_corlocs:
display_name = (
self._metric_prefix + 'PerformanceByCategory/CorLoc@{}IOU/{}'
.format(self._matching_iou_threshold, category_name))
pascal_metrics[display_name] = per_class_corloc[idx]
return pascal_metrics
def clear(self):
"""Clears the state to prepare for a fresh evaluation."""
self._evaluation = ObjectDetectionEvaluation(
num_groundtruth_classes=self._num_classes,
matching_iou_threshold=self._matching_iou_threshold,
use_weighted_mean_ap=self._use_weighted_mean_ap,
label_id_offset=self._label_id_offset)
self._image_ids.clear()
def get_estimator_eval_metric_ops(self, eval_dict):
"""Returns dict of metrics to use with `tf.estimator.EstimatorSpec`.
Note that this must only be implemented if performing evaluation with a
`tf.estimator.Estimator`.
Args:
eval_dict: A dictionary that holds tensors for evaluating an object
detection model, returned from
eval_util.result_dict_for_single_example(). It must contain
standard_fields.InputDataFields.key.
Returns:
A dictionary of metric names to tuple of value_op and update_op that can
be used as eval metric ops in `tf.estimator.EstimatorSpec`.
"""
# remove unexpected fields
eval_dict_filtered = dict()
for key, value in eval_dict.items():
if key in self._expected_keys:
eval_dict_filtered[key] = value
eval_dict_keys = eval_dict_filtered.keys()
def update_op(image_id, *eval_dict_batched_as_list):
"""Update operation that adds batch of images to ObjectDetectionEvaluator.
Args:
image_id: image id (single id or an array)
*eval_dict_batched_as_list: the values of the dictionary of tensors.
"""
if np.isscalar(image_id):
single_example_dict = dict(
zip(eval_dict_keys, eval_dict_batched_as_list))
self.add_single_ground_truth_image_info(image_id, single_example_dict)
self.add_single_detected_image_info(image_id, single_example_dict)
else:
for unzipped_tuple in zip(*eval_dict_batched_as_list):
single_example_dict = dict(zip(eval_dict_keys, unzipped_tuple))
image_id = single_example_dict[standard_fields.InputDataFields.key]
self.add_single_ground_truth_image_info(image_id, single_example_dict)
self.add_single_detected_image_info(image_id, single_example_dict)
args = [eval_dict_filtered[standard_fields.InputDataFields.key]]
args.extend(eval_dict_filtered.values())
update_op = tf.py_func(update_op, args, [])
def first_value_func():
self._metrics = self.evaluate()
self.clear()
return np.float32(self._metrics[self._metric_names[0]])
def value_func_factory(metric_name):
def value_func():
return np.float32(self._metrics[metric_name])
return value_func
# Ensure that the metrics are only evaluated once.
first_value_op = tf.py_func(first_value_func, [], tf.float32)
eval_metric_ops = {self._metric_names[0]: (first_value_op, update_op)}
with tf.control_dependencies([first_value_op]):
for metric_name in self._metric_names[1:]:
eval_metric_ops[metric_name] = (tf.py_func(
value_func_factory(metric_name), [], np.float32), update_op)
return eval_metric_ops
class PascalDetectionEvaluator(ObjectDetectionEvaluator):
"""A class to evaluate detections using PASCAL metrics."""
def __init__(self, categories, matching_iou_threshold=0.5):
super(PascalDetectionEvaluator, self).__init__(
categories,
matching_iou_threshold=matching_iou_threshold,
evaluate_corlocs=False,
metric_prefix='PascalBoxes',
use_weighted_mean_ap=False)
class WeightedPascalDetectionEvaluator(ObjectDetectionEvaluator):
"""A class to evaluate detections using weighted PASCAL metrics.
Weighted PASCAL metrics computes the mean average precision as the average
precision given the scores and tp_fp_labels of all classes. In comparison,
PASCAL metrics computes the mean average precision as the mean of the
per-class average precisions.
This definition is very similar to the mean of the per-class average
precisions weighted by class frequency. However, they are typically not the
same as the average precision is not a linear function of the scores and
tp_fp_labels.
"""
def __init__(self, categories, matching_iou_threshold=0.5):
super(WeightedPascalDetectionEvaluator, self).__init__(
categories,
matching_iou_threshold=matching_iou_threshold,
evaluate_corlocs=False,
metric_prefix='WeightedPascalBoxes',
use_weighted_mean_ap=True)
class PascalInstanceSegmentationEvaluator(ObjectDetectionEvaluator):
"""A class to evaluate instance masks using PASCAL metrics."""
def __init__(self, categories, matching_iou_threshold=0.5):
super(PascalInstanceSegmentationEvaluator, self).__init__(
categories,
matching_iou_threshold=matching_iou_threshold,
evaluate_corlocs=False,
metric_prefix='PascalMasks',
use_weighted_mean_ap=False,
evaluate_masks=True)
class WeightedPascalInstanceSegmentationEvaluator(ObjectDetectionEvaluator):
"""A class to evaluate instance masks using weighted PASCAL metrics.
Weighted PASCAL metrics computes the mean average precision as the average
precision given the scores and tp_fp_labels of all classes. In comparison,
PASCAL metrics computes the mean average precision as the mean of the
per-class average precisions.
This definition is very similar to the mean of the per-class average
precisions weighted by class frequency. However, they are typically not the
same as the average precision is not a linear function of the scores and
tp_fp_labels.
"""
def __init__(self, categories, matching_iou_threshold=0.5):
super(WeightedPascalInstanceSegmentationEvaluator, self).__init__(
categories,
matching_iou_threshold=matching_iou_threshold,
evaluate_corlocs=False,
metric_prefix='WeightedPascalMasks',
use_weighted_mean_ap=True,
evaluate_masks=True)
class OpenImagesDetectionEvaluator(ObjectDetectionEvaluator):
"""A class to evaluate detections using Open Images V2 metrics.
Open Images V2 introduce group_of type of bounding boxes and this metric
handles those boxes appropriately.
"""
def __init__(self,
categories,
matching_iou_threshold=0.5,
evaluate_corlocs=False,
metric_prefix='OpenImagesV2',
group_of_weight=0.0):
"""Constructor.
Args:
categories: A list of dicts, each of which has the following keys -
'id': (required) an integer id uniquely identifying this category.
'name': (required) string representing category name e.g., 'cat', 'dog'.
matching_iou_threshold: IOU threshold to use for matching groundtruth
boxes to detection boxes.
evaluate_corlocs: if True, additionally evaluates and returns CorLoc.
metric_prefix: Prefix name of the metric.
group_of_weight: Weight of the group-of bounding box. If set to 0 (default
for Open Images V2 detection protocol), detections of the correct class
within a group-of box are ignored. If weight is > 0, then if at least
one detection falls within a group-of box with matching_iou_threshold,
weight group_of_weight is added to true positives. Consequently, if no
detection falls within a group-of box, weight group_of_weight is added
to false negatives.
"""
super(OpenImagesDetectionEvaluator, self).__init__(
categories,
matching_iou_threshold,
evaluate_corlocs,
metric_prefix=metric_prefix,
group_of_weight=group_of_weight)
self._expected_keys = set([
standard_fields.InputDataFields.key,
standard_fields.InputDataFields.groundtruth_boxes,
standard_fields.InputDataFields.groundtruth_classes,
standard_fields.InputDataFields.groundtruth_group_of,
standard_fields.DetectionResultFields.detection_boxes,
standard_fields.DetectionResultFields.detection_scores,
standard_fields.DetectionResultFields.detection_classes,
])
def add_single_ground_truth_image_info(self, image_id, groundtruth_dict):
"""Adds groundtruth for a single image to be used for evaluation.
Args:
image_id: A unique string/integer identifier for the image.
groundtruth_dict: A dictionary containing -
standard_fields.InputDataFields.groundtruth_boxes: float32 numpy array
of shape [num_boxes, 4] containing `num_boxes` groundtruth boxes of
the format [ymin, xmin, ymax, xmax] in absolute image coordinates.
standard_fields.InputDataFields.groundtruth_classes: integer numpy array
of shape [num_boxes] containing 1-indexed groundtruth classes for the
boxes.
standard_fields.InputDataFields.groundtruth_group_of: Optional length
M numpy boolean array denoting whether a groundtruth box contains a
group of instances.
Raises:
ValueError: On adding groundtruth for an image more than once.
"""
if image_id in self._image_ids:
raise ValueError('Image with id {} already added.'.format(image_id))
groundtruth_classes = (
groundtruth_dict[standard_fields.InputDataFields.groundtruth_classes] -
self._label_id_offset)
# If the key is not present in the groundtruth_dict or the array is empty
# (unless there are no annotations for the groundtruth on this image)
# use values from the dictionary or insert None otherwise.
if (standard_fields.InputDataFields.groundtruth_group_of in
groundtruth_dict.keys() and
(groundtruth_dict[standard_fields.InputDataFields.groundtruth_group_of]
.size or not groundtruth_classes.size)):
groundtruth_group_of = groundtruth_dict[
standard_fields.InputDataFields.groundtruth_group_of]
else:
groundtruth_group_of = None
if not len(self._image_ids) % 1000:
logging.warn(
'image %s does not have groundtruth group_of flag specified',
image_id)
self._evaluation.add_single_ground_truth_image_info(
image_id,
groundtruth_dict[standard_fields.InputDataFields.groundtruth_boxes],
groundtruth_classes,
groundtruth_is_difficult_list=None,
groundtruth_is_group_of_list=groundtruth_group_of)
self._image_ids.update([image_id])
class OpenImagesDetectionChallengeEvaluator(OpenImagesDetectionEvaluator):
"""A class implements Open Images Challenge Detection metrics.
Open Images Challenge Detection metric has two major changes in comparison
with Open Images V2 detection metric:
- a custom weight might be specified for detecting an object contained in
a group-of box.
- verified image-level labels should be explicitelly provided for
evaluation: in case in image has neither positive nor negative image level
label of class c, all detections of this class on this image will be
ignored.
"""
def __init__(self,
categories,
matching_iou_threshold=0.5,
evaluate_corlocs=False,
group_of_weight=1.0):
"""Constructor.
Args:
categories: A list of dicts, each of which has the following keys -
'id': (required) an integer id uniquely identifying this category.
'name': (required) string representing category name e.g., 'cat', 'dog'.
matching_iou_threshold: IOU threshold to use for matching groundtruth
boxes to detection boxes.
evaluate_corlocs: if True, additionally evaluates and returns CorLoc.
group_of_weight: weight of a group-of box. If set to 0, detections of the
correct class within a group-of box are ignored. If weight is > 0
(default for Open Images Detection Challenge 2018), then if at least one
detection falls within a group-of box with matching_iou_threshold,
weight group_of_weight is added to true positives. Consequently, if no
detection falls within a group-of box, weight group_of_weight is added
to false negatives.
"""
super(OpenImagesDetectionChallengeEvaluator, self).__init__(
categories,
matching_iou_threshold,
evaluate_corlocs,
metric_prefix='OpenImagesChallenge2018',
group_of_weight=group_of_weight)
self._evaluatable_labels = {}
self._expected_keys = set([
standard_fields.InputDataFields.key,
standard_fields.InputDataFields.groundtruth_boxes,
standard_fields.InputDataFields.groundtruth_classes,
standard_fields.InputDataFields.groundtruth_group_of,
standard_fields.InputDataFields.groundtruth_image_classes,
standard_fields.DetectionResultFields.detection_boxes,
standard_fields.DetectionResultFields.detection_scores,
standard_fields.DetectionResultFields.detection_classes,
])
def add_single_ground_truth_image_info(self, image_id, groundtruth_dict):
"""Adds groundtruth for a single image to be used for evaluation.
Args:
image_id: A unique string/integer identifier for the image.
groundtruth_dict: A dictionary containing -
standard_fields.InputDataFields.groundtruth_boxes: float32 numpy array
of shape [num_boxes, 4] containing `num_boxes` groundtruth boxes of
the format [ymin, xmin, ymax, xmax] in absolute image coordinates.
standard_fields.InputDataFields.groundtruth_classes: integer numpy array
of shape [num_boxes] containing 1-indexed groundtruth classes for the
boxes.
standard_fields.InputDataFields.groundtruth_image_classes: integer 1D
numpy array containing all classes for which labels are verified.
standard_fields.InputDataFields.groundtruth_group_of: Optional length
M numpy boolean array denoting whether a groundtruth box contains a
group of instances.
Raises:
ValueError: On adding groundtruth for an image more than once.
"""
super(OpenImagesDetectionChallengeEvaluator,
self).add_single_ground_truth_image_info(image_id, groundtruth_dict)
groundtruth_classes = (
groundtruth_dict[standard_fields.InputDataFields.groundtruth_classes] -
self._label_id_offset)
self._evaluatable_labels[image_id] = np.unique(
np.concatenate(((groundtruth_dict.get(
standard_fields.InputDataFields.groundtruth_image_classes,
np.array([], dtype=int)) - self._label_id_offset),
groundtruth_classes)))
def add_single_detected_image_info(self, image_id, detections_dict):
"""Adds detections for a single image to be used for evaluation.
Args:
image_id: A unique string/integer identifier for the image.
detections_dict: A dictionary containing -
standard_fields.DetectionResultFields.detection_boxes: float32 numpy
array of shape [num_boxes, 4] containing `num_boxes` detection boxes
of the format [ymin, xmin, ymax, xmax] in absolute image coordinates.
standard_fields.DetectionResultFields.detection_scores: float32 numpy
array of shape [num_boxes] containing detection scores for the boxes.
standard_fields.DetectionResultFields.detection_classes: integer numpy
array of shape [num_boxes] containing 1-indexed detection classes for
the boxes.
Raises:
ValueError: If detection masks are not in detections dictionary.
"""
if image_id not in self._image_ids:
# Since for the correct work of evaluator it is assumed that groundtruth
# is inserted first we make sure to break the code if is it not the case.
self._image_ids.update([image_id])
self._evaluatable_labels[image_id] = np.array([])
detection_classes = (
detections_dict[standard_fields.DetectionResultFields.detection_classes]
- self._label_id_offset)
allowed_classes = np.where(
np.isin(detection_classes, self._evaluatable_labels[image_id]))
detection_classes = detection_classes[allowed_classes]
detected_boxes = detections_dict[
standard_fields.DetectionResultFields.detection_boxes][allowed_classes]
detected_scores = detections_dict[
standard_fields.DetectionResultFields.detection_scores][allowed_classes]
self._evaluation.add_single_detected_image_info(
image_key=image_id,
detected_boxes=detected_boxes,
detected_scores=detected_scores,
detected_class_labels=detection_classes)
def clear(self):
"""Clears stored data."""
super(OpenImagesDetectionChallengeEvaluator, self).clear()
self._evaluatable_labels.clear()
ObjectDetectionEvalMetrics = collections.namedtuple(
'ObjectDetectionEvalMetrics', [
'average_precisions', 'mean_ap', 'precisions', 'recalls', 'corlocs',
'mean_corloc'
])
class ObjectDetectionEvaluation(object):
"""Internal implementation of Pascal object detection metrics."""
def __init__(self,
num_groundtruth_classes,
matching_iou_threshold=0.5,
nms_iou_threshold=1.0,
nms_max_output_boxes=10000,
use_weighted_mean_ap=False,
label_id_offset=0,
group_of_weight=0.0,
per_image_eval_class=per_image_evaluation.PerImageEvaluation):
"""Constructor.
Args:
num_groundtruth_classes: Number of ground-truth classes.
matching_iou_threshold: IOU threshold used for matching detected boxes
to ground-truth boxes.
nms_iou_threshold: IOU threshold used for non-maximum suppression.
nms_max_output_boxes: Maximum number of boxes returned by non-maximum
suppression.
use_weighted_mean_ap: (optional) boolean which determines if the mean
average precision is computed directly from the scores and tp_fp_labels
of all classes.
label_id_offset: The label id offset.
group_of_weight: Weight of group-of boxes.If set to 0, detections of the
correct class within a group-of box are ignored. If weight is > 0, then
if at least one detection falls within a group-of box with
matching_iou_threshold, weight group_of_weight is added to true
positives. Consequently, if no detection falls within a group-of box,
weight group_of_weight is added to false negatives.
per_image_eval_class: The class that contains functions for computing
per image metrics.
Raises:
ValueError: if num_groundtruth_classes is smaller than 1.
"""
if num_groundtruth_classes < 1:
raise ValueError('Need at least 1 groundtruth class for evaluation.')
self.per_image_eval = per_image_eval_class(
num_groundtruth_classes=num_groundtruth_classes,
matching_iou_threshold=matching_iou_threshold,
nms_iou_threshold=nms_iou_threshold,
nms_max_output_boxes=nms_max_output_boxes,
group_of_weight=group_of_weight)
self.group_of_weight = group_of_weight
self.num_class = num_groundtruth_classes
self.use_weighted_mean_ap = use_weighted_mean_ap
self.label_id_offset = label_id_offset
self.groundtruth_boxes = {}
self.groundtruth_class_labels = {}
self.groundtruth_masks = {}
self.groundtruth_is_difficult_list = {}
self.groundtruth_is_group_of_list = {}
self.num_gt_instances_per_class = np.zeros(self.num_class, dtype=float)
self.num_gt_imgs_per_class = np.zeros(self.num_class, dtype=int)
self._initialize_detections()
def _initialize_detections(self):
"""Initializes internal data structures."""
self.detection_keys = set()
self.scores_per_class = [[] for _ in range(self.num_class)]
self.tp_fp_labels_per_class = [[] for _ in range(self.num_class)]
self.num_images_correctly_detected_per_class = np.zeros(self.num_class)
self.average_precision_per_class = np.empty(self.num_class, dtype=float)
self.average_precision_per_class.fill(np.nan)
self.precisions_per_class = [np.nan] * self.num_class
self.recalls_per_class = [np.nan] * self.num_class
self.corloc_per_class = np.ones(self.num_class, dtype=float)
def clear_detections(self):
self._initialize_detections()
def add_single_ground_truth_image_info(self,
image_key,
groundtruth_boxes,
groundtruth_class_labels,
groundtruth_is_difficult_list=None,
groundtruth_is_group_of_list=None,
groundtruth_masks=None):
"""Adds groundtruth for a single image to be used for evaluation.
Args:
image_key: A unique string/integer identifier for the image.
groundtruth_boxes: float32 numpy array of shape [num_boxes, 4]
containing `num_boxes` groundtruth boxes of the format
[ymin, xmin, ymax, xmax] in absolute image coordinates.
groundtruth_class_labels: integer numpy array of shape [num_boxes]
containing 0-indexed groundtruth classes for the boxes.
groundtruth_is_difficult_list: A length M numpy boolean array denoting
whether a ground truth box is a difficult instance or not. To support
the case that no boxes are difficult, it is by default set as None.
groundtruth_is_group_of_list: A length M numpy boolean array denoting
whether a ground truth box is a group-of box or not. To support
the case that no boxes are groups-of, it is by default set as None.
groundtruth_masks: uint8 numpy array of shape
[num_boxes, height, width] containing `num_boxes` groundtruth masks.
The mask values range from 0 to 1.
"""
if image_key in self.groundtruth_boxes:
logging.warn(
'image %s has already been added to the ground truth database.',
image_key)
return
self.groundtruth_boxes[image_key] = groundtruth_boxes
self.groundtruth_class_labels[image_key] = groundtruth_class_labels
self.groundtruth_masks[image_key] = groundtruth_masks
if groundtruth_is_difficult_list is None:
num_boxes = groundtruth_boxes.shape[0]
groundtruth_is_difficult_list = np.zeros(num_boxes, dtype=bool)
self.groundtruth_is_difficult_list[
image_key] = groundtruth_is_difficult_list.astype(dtype=bool)
if groundtruth_is_group_of_list is None:
num_boxes = groundtruth_boxes.shape[0]
groundtruth_is_group_of_list = np.zeros(num_boxes, dtype=bool)
self.groundtruth_is_group_of_list[
image_key] = groundtruth_is_group_of_list.astype(dtype=bool)
self._update_ground_truth_statistics(
groundtruth_class_labels,
groundtruth_is_difficult_list.astype(dtype=bool),
groundtruth_is_group_of_list.astype(dtype=bool))
def add_single_detected_image_info(self, image_key, detected_boxes,
detected_scores, detected_class_labels,
detected_masks=None):
"""Adds detections for a single image to be used for evaluation.
Args:
image_key: A unique string/integer identifier for the image.
detected_boxes: float32 numpy array of shape [num_boxes, 4]
containing `num_boxes` detection boxes of the format
[ymin, xmin, ymax, xmax] in absolute image coordinates.
detected_scores: float32 numpy array of shape [num_boxes] containing
detection scores for the boxes.
detected_class_labels: integer numpy array of shape [num_boxes] containing
0-indexed detection classes for the boxes.
detected_masks: np.uint8 numpy array of shape [num_boxes, height, width]
containing `num_boxes` detection masks with values ranging
between 0 and 1.
Raises:
ValueError: if the number of boxes, scores and class labels differ in
length.
"""
if (len(detected_boxes) != len(detected_scores) or
len(detected_boxes) != len(detected_class_labels)):
raise ValueError('detected_boxes, detected_scores and '
'detected_class_labels should all have same lengths. Got'
'[%d, %d, %d]' % len(detected_boxes),
len(detected_scores), len(detected_class_labels))
if image_key in self.detection_keys:
logging.warn(
'image %s has already been added to the detection result database',
image_key)
return
self.detection_keys.add(image_key)
if image_key in self.groundtruth_boxes:
groundtruth_boxes = self.groundtruth_boxes[image_key]
groundtruth_class_labels = self.groundtruth_class_labels[image_key]
# Masks are popped instead of look up. The reason is that we do not want
# to keep all masks in memory which can cause memory overflow.
groundtruth_masks = self.groundtruth_masks.pop(
image_key)
groundtruth_is_difficult_list = self.groundtruth_is_difficult_list[
image_key]
groundtruth_is_group_of_list = self.groundtruth_is_group_of_list[
image_key]
else:
groundtruth_boxes = np.empty(shape=[0, 4], dtype=float)
groundtruth_class_labels = np.array([], dtype=int)
if detected_masks is None:
groundtruth_masks = None
else:
groundtruth_masks = np.empty(shape=[0, 1, 1], dtype=float)
groundtruth_is_difficult_list = np.array([], dtype=bool)
groundtruth_is_group_of_list = np.array([], dtype=bool)
scores, tp_fp_labels, is_class_correctly_detected_in_image = (
self.per_image_eval.compute_object_detection_metrics(
detected_boxes=detected_boxes,
detected_scores=detected_scores,
detected_class_labels=detected_class_labels,
groundtruth_boxes=groundtruth_boxes,
groundtruth_class_labels=groundtruth_class_labels,
groundtruth_is_difficult_list=groundtruth_is_difficult_list,
groundtruth_is_group_of_list=groundtruth_is_group_of_list,
detected_masks=detected_masks,
groundtruth_masks=groundtruth_masks))
for i in range(self.num_class):
if scores[i].shape[0] > 0:
self.scores_per_class[i].append(scores[i])
self.tp_fp_labels_per_class[i].append(tp_fp_labels[i])
(self.num_images_correctly_detected_per_class
) += is_class_correctly_detected_in_image
def _update_ground_truth_statistics(self, groundtruth_class_labels,
groundtruth_is_difficult_list,
groundtruth_is_group_of_list):
"""Update grouth truth statitistics.
1. Difficult boxes are ignored when counting the number of ground truth
instances as done in Pascal VOC devkit.
2. Difficult boxes are treated as normal boxes when computing CorLoc related
statitistics.
Args:
groundtruth_class_labels: An integer numpy array of length M,
representing M class labels of object instances in ground truth
groundtruth_is_difficult_list: A boolean numpy array of length M denoting
whether a ground truth box is a difficult instance or not
groundtruth_is_group_of_list: A boolean numpy array of length M denoting
whether a ground truth box is a group-of box or not
"""
for class_index in range(self.num_class):
num_gt_instances = np.sum(groundtruth_class_labels[
~groundtruth_is_difficult_list
& ~groundtruth_is_group_of_list] == class_index)
num_groupof_gt_instances = self.group_of_weight * np.sum(
groundtruth_class_labels[groundtruth_is_group_of_list] == class_index)
self.num_gt_instances_per_class[
class_index] += num_gt_instances + num_groupof_gt_instances
if np.any(groundtruth_class_labels == class_index):
self.num_gt_imgs_per_class[class_index] += 1
def evaluate(self):
"""Compute evaluation result.
Returns:
A named tuple with the following fields -
average_precision: float numpy array of average precision for
each class.
mean_ap: mean average precision of all classes, float scalar
precisions: List of precisions, each precision is a float numpy
array
recalls: List of recalls, each recall is a float numpy array
corloc: numpy float array
mean_corloc: Mean CorLoc score for each class, float scalar
"""
if (self.num_gt_instances_per_class == 0).any():
logging.warn(
'The following classes have no ground truth examples: %s',
np.squeeze(np.argwhere(self.num_gt_instances_per_class == 0)) +
self.label_id_offset)
if self.use_weighted_mean_ap:
all_scores = np.array([], dtype=float)
all_tp_fp_labels = np.array([], dtype=bool)
for class_index in range(self.num_class):
if self.num_gt_instances_per_class[class_index] == 0:
continue
if not self.scores_per_class[class_index]:
scores = np.array([], dtype=float)
tp_fp_labels = np.array([], dtype=float)
else:
scores = np.concatenate(self.scores_per_class[class_index])
tp_fp_labels = np.concatenate(self.tp_fp_labels_per_class[class_index])
if self.use_weighted_mean_ap:
all_scores = np.append(all_scores, scores)
all_tp_fp_labels = np.append(all_tp_fp_labels, tp_fp_labels)
precision, recall = metrics.compute_precision_recall(
scores, tp_fp_labels, self.num_gt_instances_per_class[class_index])
self.precisions_per_class[class_index] = precision
self.recalls_per_class[class_index] = recall
average_precision = metrics.compute_average_precision(precision, recall)
self.average_precision_per_class[class_index] = average_precision
logging.info('average_precision: %f', average_precision)
self.corloc_per_class = metrics.compute_cor_loc(
self.num_gt_imgs_per_class,
self.num_images_correctly_detected_per_class)
if self.use_weighted_mean_ap:
num_gt_instances = np.sum(self.num_gt_instances_per_class)
precision, recall = metrics.compute_precision_recall(
all_scores, all_tp_fp_labels, num_gt_instances)
mean_ap = metrics.compute_average_precision(precision, recall)
else:
mean_ap = np.nanmean(self.average_precision_per_class)
mean_corloc = np.nanmean(self.corloc_per_class)
return ObjectDetectionEvalMetrics(
self.average_precision_per_class, mean_ap, self.precisions_per_class,
self.recalls_per_class, self.corloc_per_class, mean_corloc)
|
Tools/PyTorch/TimeSeriesPredictionPlatform/conf/model_dataset | model_dataset | auto_arima_electricity | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
dataset:
config:
stride: 400
|
PyTorch/SpeechSynthesis/HiFiGAN/common | common | audio_processing | # *****************************************************************************
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
import librosa.util as librosa_util
import numpy as np
import torch
from scipy.signal import get_window
def window_sumsquare(window, n_frames, hop_length=200, win_length=800,
n_fft=800, dtype=np.float32, norm=None):
"""
# from librosa 0.6
Compute the sum-square envelope of a window function at a given hop length.
This is used to estimate modulation effects induced by windowing
observations in short-time fourier transforms.
Parameters
----------
window : string, tuple, number, callable, or list-like
Window specification, as in `get_window`
n_frames : int > 0
The number of analysis frames
hop_length : int > 0
The number of samples to advance between frames
win_length : [optional]
The length of the window function. By default, this matches `n_fft`.
n_fft : int > 0
The length of each analysis frame.
dtype : np.dtype
The data type of the output
Returns
-------
wss : np.ndarray, shape=`(n_fft + hop_length * (n_frames - 1))`
The sum-squared envelope of the window function
"""
if win_length is None:
win_length = n_fft
n = n_fft + hop_length * (n_frames - 1)
x = np.zeros(n, dtype=dtype)
# Compute the squared window at the desired length
win_sq = get_window(window, win_length, fftbins=True)
win_sq = librosa_util.normalize(win_sq, norm=norm)**2
win_sq = librosa_util.pad_center(win_sq, size=n_fft)
# Fill the envelope
for i in range(n_frames):
sample = i * hop_length
x[sample:min(n, sample + n_fft)] += win_sq[:max(0, min(n_fft, n - sample))]
return x
def griffin_lim(magnitudes, stft_fn, n_iters=30):
"""
PARAMS
------
magnitudes: spectrogram magnitudes
stft_fn: STFT class with transform (STFT) and inverse (ISTFT) methods
"""
angles = np.angle(np.exp(2j * np.pi * np.random.rand(*magnitudes.size())))
angles = angles.astype(np.float32)
angles = torch.autograd.Variable(torch.from_numpy(angles))
signal = stft_fn.inverse(magnitudes, angles).squeeze(1)
for i in range(n_iters):
_, angles = stft_fn.transform(signal)
signal = stft_fn.inverse(magnitudes, angles).squeeze(1)
return signal
def dynamic_range_compression(x, C=1, clip_val=1e-5):
"""
PARAMS
------
C: compression factor
"""
return torch.log(torch.clamp(x, min=clip_val) * C)
def dynamic_range_decompression(x, C=1):
"""
PARAMS
------
C: compression factor used to compress
"""
return torch.exp(x) / C
|
PyTorch/LanguageModeling/BART/bart/tokenization | tokenization | tokenization_roberta | # coding=utf-8
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# Copyright 2018 The Open AI Team Authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization classes for RoBERTa."""
import logging
from typing import List, Optional
from tokenizers.processors import RobertaProcessing
from bart.tokenization.tokenization_gpt2 import GPT2Tokenizer, GPT2TokenizerFast
from bart.tokenization.tokenization_utils import AddedToken
logger = logging.getLogger(__name__)
VOCAB_FILES_NAMES = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
}
PRETRAINED_VOCAB_FILES_MAP = {
"vocab_file": {
"roberta-base": "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-base-vocab.json",
"roberta-large": "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-large-vocab.json",
"roberta-large-mnli": "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-large-mnli-vocab.json",
"distilroberta-base": "https://s3.amazonaws.com/models.huggingface.co/bert/distilroberta-base-vocab.json",
"roberta-base-openai-detector": "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-base-vocab.json",
"roberta-large-openai-detector": "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-large-vocab.json",
},
"merges_file": {
"roberta-base": "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-base-merges.txt",
"roberta-large": "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-large-merges.txt",
"roberta-large-mnli": "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-large-mnli-merges.txt",
"distilroberta-base": "https://s3.amazonaws.com/models.huggingface.co/bert/distilroberta-base-merges.txt",
"roberta-base-openai-detector": "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-base-merges.txt",
"roberta-large-openai-detector": "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-large-merges.txt",
},
}
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
"roberta-base": 512,
"roberta-large": 512,
"roberta-large-mnli": 512,
"distilroberta-base": 512,
"roberta-base-openai-detector": 512,
"roberta-large-openai-detector": 512,
}
class RobertaTokenizer(GPT2Tokenizer):
"""
Constructs a RoBERTa BPE tokenizer, derived from the GPT-2 tokenizer, using byte-level Byte-Pair-Encoding.
This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will
be encoded differently whether it is at the beginning of the sentence (without space) or not:
::
>>> from transformers import RobertaTokenizer
>>> tokenizer = RobertaTokenizer.from_pretrained("roberta-base")
>>> tokenizer("Hello world")['input_ids']
[0, 31414, 232, 328, 2]
>>> tokenizer(" Hello world")['input_ids']
[0, 20920, 232, 2]
You can get around that behavior by passing ``add_prefix_space=True`` when instantiating this tokenizer or when you
call it on some text, but since the model was not pretrained this way, it might yield a decrease in performance.
.. note::
When used with ``is_pretokenized=True``, this tokenizer will add a space before each word (even the first one).
This tokenizer inherits from :class:`~transformers.PreTrainedTokenizer` which contains most of the methods. Users
should refer to the superclass for more information regarding methods.
Args:
vocab_file (:obj:`str`):
Path to the vocabulary file.
merges_file (:obj:`str`):
Path to the merges file.
errors (:obj:`str`, `optional`, defaults to "replace"):
Paradigm to follow when decoding bytes to UTF-8. See `bytes.decode
<https://docs.python.org/3/library/stdtypes.html#bytes.decode>`__ for more information.
bos_token (:obj:`string`, `optional`, defaults to "<s>"):
The beginning of sequence token that was used during pre-training. Can be used a sequence classifier token.
.. note::
When building a sequence using special tokens, this is not the token that is used for the beginning
of sequence. The token used is the :obj:`cls_token`.
eos_token (:obj:`string`, `optional`, defaults to "</s>"):
The end of sequence token.
.. note::
When building a sequence using special tokens, this is not the token that is used for the end
of sequence. The token used is the :obj:`sep_token`.
sep_token (:obj:`string`, `optional`, defaults to "</s>"):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences
for sequence classification or for a text and a question for question answering.
It is also used as the last token of a sequence built with special tokens.
cls_token (:obj:`string`, `optional`, defaults to "<s>"):
The classifier token which is used when doing sequence classification (classification of the whole
sequence instead of per-token classification). It is the first token of the sequence when built with
special tokens.
unk_token (:obj:`string`, `optional`, defaults to "<unk>"):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
pad_token (:obj:`string`, `optional`, defaults to "<pad>"):
The token used for padding, for example when batching sequences of different lengths.
mask_token (:obj:`string`, `optional`, defaults to "<mask>"):
The token used for masking values. This is the token used when training this model with masked language
modeling. This is the token which the model will try to predict.
"""
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
model_input_names = ["attention_mask"]
def __init__(
self,
vocab_file,
merges_file,
errors="replace",
bos_token="<s>",
eos_token="</s>",
sep_token="</s>",
cls_token="<s>",
unk_token="<unk>",
pad_token="<pad>",
mask_token="<mask>",
add_prefix_space=False,
**kwargs
):
bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token
eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token
sep_token = AddedToken(sep_token, lstrip=False, rstrip=False) if isinstance(sep_token, str) else sep_token
cls_token = AddedToken(cls_token, lstrip=False, rstrip=False) if isinstance(cls_token, str) else cls_token
unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token
pad_token = AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token
super().__init__(
vocab_file=vocab_file,
merges_file=merges_file,
errors=errors,
bos_token=bos_token,
eos_token=eos_token,
unk_token=unk_token,
sep_token=sep_token,
cls_token=cls_token,
pad_token=pad_token,
mask_token=mask_token,
add_prefix_space=add_prefix_space,
**kwargs,
)
def build_inputs_with_special_tokens(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks
by concatenating and adding special tokens.
A RoBERTa sequence has the following format:
- single sequence: ``<s> X </s>``
- pair of sequences: ``<s> A </s></s> B </s>``
Args:
token_ids_0 (:obj:`List[int]`):
List of IDs to which the special tokens will be added
token_ids_1 (:obj:`List[int]`, `optional`, defaults to :obj:`None`):
Optional second list of IDs for sequence pairs.
Returns:
:obj:`List[int]`: list of `input IDs <../glossary.html#input-ids>`__ with the appropriate special tokens.
"""
if token_ids_1 is None:
return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
cls = [self.cls_token_id]
sep = [self.sep_token_id]
return cls + token_ids_0 + sep + sep + token_ids_1 + sep
def get_special_tokens_mask(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
) -> List[int]:
"""
Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer ``prepare_for_model`` method.
Args:
token_ids_0 (:obj:`List[int]`):
List of ids.
token_ids_1 (:obj:`List[int]`, `optional`, defaults to :obj:`None`):
Optional second list of IDs for sequence pairs.
already_has_special_tokens (:obj:`bool`, `optional`, defaults to :obj:`False`):
Set to True if the token list is already formatted with special tokens for the model
Returns:
:obj:`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
"""
if already_has_special_tokens:
if token_ids_1 is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model."
)
return list(map(lambda x: 1 if x in [self.sep_token_id, self.cls_token_id] else 0, token_ids_0))
if token_ids_1 is None:
return [1] + ([0] * len(token_ids_0)) + [1]
return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1]
def create_token_type_ids_from_sequences(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
"""
Creates a mask from the two sequences passed to be used in a sequence-pair classification task.
RoBERTa does not make use of token type ids, therefore a list of zeros is returned.
Args:
token_ids_0 (:obj:`List[int]`):
List of ids.
token_ids_1 (:obj:`List[int]`, `optional`, defaults to :obj:`None`):
Optional second list of IDs for sequence pairs.
Returns:
:obj:`List[int]`: List of zeros.
"""
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if token_ids_1 is None:
return len(cls + token_ids_0 + sep) * [0]
return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
def prepare_for_tokenization(self, text, is_pretokenized=False, **kwargs):
add_prefix_space = kwargs.pop("add_prefix_space", self.add_prefix_space)
if (is_pretokenized or add_prefix_space) and (len(text) > 0 and not text[0].isspace()):
text = " " + text
return (text, kwargs)
class RobertaTokenizerFast(GPT2TokenizerFast):
"""
Constructs a "Fast" RoBERTa BPE tokenizer (backed by HuggingFace's `tokenizers` library), derived from the GPT-2
tokenizer, using byte-level Byte-Pair-Encoding.
This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will
be encoded differently whether it is at the beginning of the sentence (without space) or not:
::
>>> from transformers import RobertaTokenizerFast
>>> tokenizer = RobertaTokenizerFast.from_pretrained("roberta-base")
>>> tokenizer("Hello world")['input_ids']
[0, 31414, 232, 328, 2]
>>> tokenizer(" Hello world")['input_ids']
[0, 20920, 232, 2]
You can get around that behavior by passing ``add_prefix_space=True`` when instantiating this tokenizer or when you
call it on some text, but since the model was not pretrained this way, it might yield a decrease in performance.
.. note::
When used with ``is_pretokenized=True``, this tokenizer needs to be instantiated with
``add_prefix_space=True``.
This tokenizer inherits from :class:`~transformers.PreTrainedTokenizerFast` which contains most of the methods. Users
should refer to the superclass for more information regarding methods.
Args:
vocab_file (:obj:`str`):
Path to the vocabulary file.
merges_file (:obj:`str`):
Path to the merges file.
errors (:obj:`str`, `optional`, defaults to "replace"):
Paradigm to follow when decoding bytes to UTF-8. See `bytes.decode
<https://docs.python.org/3/library/stdtypes.html#bytes.decode>`__ for more information.
unk_token (:obj:`string`, `optional`, defaults to `<|endoftext|>`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
bos_token (:obj:`string`, `optional`, defaults to `<|endoftext|>`):
The beginning of sequence token.
eos_token (:obj:`string`, `optional`, defaults to `<|endoftext|>`):
The end of sequence token.
add_prefix_space (:obj:`bool`, `optional`, defaults to `False`):
Whether to add a leading space to the first word.
This allows to treat the leading word just as any other word.
(GPT2 tokenizer detect beginning of words by the preceeding space)
trim_offsets (:obj:`bool`, `optional`, defaults to `True`):
Whether the post processing step should trim offsets to avoid including whitespaces.
"""
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
model_input_names = ["attention_mask"]
def __init__(
self,
vocab_file,
merges_file,
errors="replace",
bos_token="<s>",
eos_token="</s>",
sep_token="</s>",
cls_token="<s>",
unk_token="<unk>",
pad_token="<pad>",
mask_token="<mask>",
add_prefix_space=False,
trim_offsets=True,
**kwargs
):
# Mask token behave like a normal word, i.e. include the space before it
mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token
kwargs.setdefault("pad_token", pad_token)
kwargs.setdefault("sep_token", sep_token)
kwargs.setdefault("cls_token", cls_token)
kwargs.setdefault("mask_token", mask_token)
super().__init__(
vocab_file=vocab_file,
merges_file=merges_file,
unk_token=unk_token,
bos_token=bos_token,
eos_token=eos_token,
add_prefix_space=add_prefix_space,
trim_offsets=trim_offsets,
**kwargs,
)
# This will add the necessary special tokens to the vocabulary if needed
self.sanitize_special_tokens()
self.backend_tokenizer._tokenizer.post_processor = RobertaProcessing(
sep=(sep_token, self.sep_token_id),
cls=(cls_token, self.cls_token_id),
add_prefix_space=add_prefix_space,
trim_offsets=trim_offsets,
)
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
output = [self.bos_token_id] + token_ids_0 + [self.eos_token_id]
if token_ids_1 is None:
return output
return output + [self.eos_token_id] + token_ids_1 + [self.eos_token_id]
def create_token_type_ids_from_sequences(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
"""
Creates a mask from the two sequences passed to be used in a sequence-pair classification task.
RoBERTa does not make use of token type ids, therefore a list of zeros is returned.
Args:
token_ids_0 (:obj:`List[int]`):
List of ids.
token_ids_1 (:obj:`List[int]`, `optional`, defaults to :obj:`None`):
Optional second list of IDs for sequence pairs.
Returns:
:obj:`List[int]`: List of zeros.
"""
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if token_ids_1 is None:
return len(cls + token_ids_0 + sep) * [0]
return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.