relative_path
stringclasses 812
values | section
stringclasses 339
values | filename
stringlengths 2
61
| text
stringlengths 6
1.76M
|
---|---|---|---|
PyTorch/LanguageModeling/BERT/triton/large/runner | runner | config_NVIDIA-T4 | checkpoints:
- name: large-qa
url: https://api.ngc.nvidia.com/v2/models/nvidia/bert_pyt_ckpt_large_qa_squad11_amp/versions/19.09.0/zip
configurations:
- accelerator: none
accelerator_precision: fp16
batch_size:
- 1
batch_sizes: '1'
capture_cuda_graph: 0
checkpoint_variant: large-qa
export_format: onnx
export_precision: fp16
format: onnx
max_batch_size: 1
max_seq_length: 384
precision: fp16
triton_gpu_engine_count: 1
triton_max_queue_delay: 1
triton_preferred_batch_sizes: '1'
- accelerator: none
accelerator_precision: fp16
batch_size:
- 16
batch_sizes: '16'
capture_cuda_graph: 0
checkpoint_variant: large-qa
export_format: onnx
export_precision: fp16
format: onnx
max_batch_size: 16
max_seq_length: 384
precision: fp16
triton_gpu_engine_count: 1
triton_max_queue_delay: 1
triton_preferred_batch_sizes: 8 16
- accelerator: none
accelerator_precision: fp16
batch_size:
- 8
batch_sizes: '8'
capture_cuda_graph: 0
checkpoint_variant: large-qa
export_format: onnx
export_precision: fp16
format: onnx
max_batch_size: 8
max_seq_length: 384
precision: fp16
triton_gpu_engine_count: 1
triton_max_queue_delay: 1
triton_preferred_batch_sizes: 4 8
- accelerator: trt
accelerator_precision: fp16
batch_size:
- 1
batch_sizes: '1'
capture_cuda_graph: 0
checkpoint_variant: large-qa
export_format: onnx
export_precision: fp16
format: onnx
max_batch_size: 1
max_seq_length: 384
precision: fp16
triton_gpu_engine_count: 1
triton_max_queue_delay: 1
triton_preferred_batch_sizes: '1'
- accelerator: trt
accelerator_precision: fp16
batch_size:
- 8
batch_sizes: '8'
capture_cuda_graph: 0
checkpoint_variant: large-qa
export_format: onnx
export_precision: fp16
format: onnx
max_batch_size: 8
max_seq_length: 384
precision: fp16
triton_gpu_engine_count: 1
triton_max_queue_delay: 1
triton_preferred_batch_sizes: 4 8
- accelerator: none
accelerator_precision: fp16
batch_size:
- 1
batch_sizes: '1'
capture_cuda_graph: 0
checkpoint_variant: large-qa
export_format: onnx
export_precision: fp16
format: trt
max_batch_size: 1
max_seq_length: 384
precision: fp16
triton_gpu_engine_count: 1
triton_max_queue_delay: 1
triton_preferred_batch_sizes: '1'
- accelerator: none
accelerator_precision: fp16
batch_size:
- 16
batch_sizes: '16'
capture_cuda_graph: 0
checkpoint_variant: large-qa
export_format: onnx
export_precision: fp16
format: trt
max_batch_size: 16
max_seq_length: 384
precision: fp16
triton_gpu_engine_count: 1
triton_max_queue_delay: 1
triton_preferred_batch_sizes: 8 16
- accelerator: none
accelerator_precision: fp16
batch_size:
- 8
batch_sizes: '8'
capture_cuda_graph: 0
checkpoint_variant: large-qa
export_format: onnx
export_precision: fp16
format: trt
max_batch_size: 8
max_seq_length: 384
precision: fp16
triton_gpu_engine_count: 1
triton_max_queue_delay: 1
triton_preferred_batch_sizes: 4 8
- accelerator: none
accelerator_precision: fp16
batch_size:
- 1
- 8
- 16
batch_sizes: 1 8 16
capture_cuda_graph: 0
checkpoint_variant: large-qa
export_format: ts-trace
export_precision: fp16
format: ts-trace
max_batch_size: 16
max_seq_length: 384
precision: fp16
triton_gpu_engine_count: 1
triton_max_queue_delay: 1
triton_preferred_batch_sizes: 8 16
container_version: '21.10'
datasets:
- name: data
datasets_dir: datasets
framework: PyTorch
model_name: BERT
triton_container_image: null
triton_custom_operations: null
triton_dockerfile: null
triton_load_model_method: explicit
|
Tools/PyTorch/TimeSeriesPredictionPlatform/conf/trainer/criterion | criterion | MSE | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
_target_: torch.nn.MSELoss
|
Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/generator/graph | graph | __init__ | # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# flake8: noqa
from .base_graph_generator import BaseGenerator, BaseGraphGenerator, BaseBipartiteGraphGenerator
from .rmat import RMATGenerator
from .rmat_bipartite import RMATBipartiteGenerator
from .random import RandomGraph
from .random_bipartite import RandomBipartite
def get_structural_generator_class(type, is_bipartite, is_random):
if type == 'RMAT':
rmats = {
(True, True): RandomBipartite,
(True, False): RMATBipartiteGenerator,
(False, True): RandomGraph,
(False, False): RMATGenerator
}
return rmats[(is_bipartite, is_random)]
else:
raise ValueError("unsupported generator type")
|
PyTorch/Detection/SSD | SSD | .gitignore | **/__pycache__
resnet50-19c8e357.pth
nogit/
pbr/
models/
scripts/
|
TensorFlow2/Segmentation/UNet_Medical/model | model | unet | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Model construction utils
This module provides a convenient way to create different topologies
based around UNet.
"""
import tensorflow as tf
from model.layers import InputBlock, DownsampleBlock, BottleneckBlock, UpsampleBlock, OutputBlock
class Unet(tf.keras.Model):
""" U-Net: Convolutional Networks for Biomedical Image Segmentation
Source:
https://arxiv.org/pdf/1505.04597
"""
def __init__(self):
super().__init__(self)
self.input_block = InputBlock(filters=64)
self.bottleneck = BottleneckBlock(1024)
self.output_block = OutputBlock(filters=64, n_classes=2)
self.down_blocks = [DownsampleBlock(filters, idx)
for idx, filters in enumerate([128, 256, 512])]
self.up_blocks = [UpsampleBlock(filters, idx)
for idx, filters in enumerate([512, 256, 128])]
def call(self, x, training=True):
skip_connections = []
out, residual = self.input_block(x)
skip_connections.append(residual)
for down_block in self.down_blocks:
out, residual = down_block(out)
skip_connections.append(residual)
out = self.bottleneck(out, training)
for up_block in self.up_blocks:
out = up_block(out, skip_connections.pop())
out = self.output_block(out, skip_connections.pop())
return out
|
PyTorch/Classification/GPUNet/triton/deployment_toolkit/triton_performance_runner | triton_performance_runner | __init__ | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .runner import TritonPerformanceRunner # noqa: F401
|
TensorFlow2/LanguageModeling/BERT/official/nlp/transformer | transformer | beam_search_v1_test | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test beam search helper methods."""
import tensorflow.compat.v1 as tf
from official.nlp.transformer import beam_search_v1 as beam_search
class BeamSearchHelperTests(tf.test.TestCase):
def setUp(self):
super(BeamSearchHelperTests, self).setUp()
tf.compat.v1.disable_eager_execution()
def test_expand_to_beam_size(self):
x = tf.ones([7, 4, 2, 5])
x = beam_search._expand_to_beam_size(x, 3)
with self.session() as sess:
shape = sess.run(tf.shape(x))
self.assertAllEqual([7, 3, 4, 2, 5], shape)
def test_shape_list(self):
y = tf.compat.v1.placeholder(dtype=tf.int32, shape=[])
x = tf.ones([7, y, 2, 5])
shape = beam_search._shape_list(x)
self.assertIsInstance(shape[0], int)
self.assertIsInstance(shape[1], tf.Tensor)
self.assertIsInstance(shape[2], int)
self.assertIsInstance(shape[3], int)
def test_get_shape_keep_last_dim(self):
y = tf.constant(4.0)
x = tf.ones([7, tf.cast(tf.sqrt(y), tf.int32), 2, 5])
shape = beam_search._get_shape_keep_last_dim(x)
self.assertAllEqual([None, None, None, 5],
shape.as_list())
def test_flatten_beam_dim(self):
x = tf.ones([7, 4, 2, 5])
x = beam_search._flatten_beam_dim(x)
with self.session() as sess:
shape = sess.run(tf.shape(x))
self.assertAllEqual([28, 2, 5], shape)
def test_unflatten_beam_dim(self):
x = tf.ones([28, 2, 5])
x = beam_search._unflatten_beam_dim(x, 7, 4)
with self.session() as sess:
shape = sess.run(tf.shape(x))
self.assertAllEqual([7, 4, 2, 5], shape)
def test_gather_beams(self):
x = tf.reshape(tf.range(24), [2, 3, 4])
# x looks like: [[[ 0 1 2 3]
# [ 4 5 6 7]
# [ 8 9 10 11]]
#
# [[12 13 14 15]
# [16 17 18 19]
# [20 21 22 23]]]
y = beam_search._gather_beams(x, [[1, 2], [0, 2]], 2, 2)
with self.session() as sess:
y = sess.run(y)
self.assertAllEqual([[[4, 5, 6, 7],
[8, 9, 10, 11]],
[[12, 13, 14, 15],
[20, 21, 22, 23]]],
y)
def test_gather_topk_beams(self):
x = tf.reshape(tf.range(24), [2, 3, 4])
x_scores = [[0, 1, 1], [1, 0, 1]]
y = beam_search._gather_topk_beams(x, x_scores, 2, 2)
with self.session() as sess:
y = sess.run(y)
self.assertAllEqual([[[4, 5, 6, 7],
[8, 9, 10, 11]],
[[12, 13, 14, 15],
[20, 21, 22, 23]]],
y)
if __name__ == "__main__":
tf.test.main()
|
TensorFlow/Detection/SSD/models/research/object_detection/legacy | legacy | trainer | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Detection model trainer.
This file provides a generic training method that can be used to train a
DetectionModel.
"""
import functools
import tensorflow as tf
from object_detection.builders import optimizer_builder
from object_detection.builders import preprocessor_builder
from object_detection.core import batcher
from object_detection.core import preprocessor
from object_detection.core import standard_fields as fields
from object_detection.utils import ops as util_ops
from object_detection.utils import variables_helper
from deployment import model_deploy
slim = tf.contrib.slim
def create_input_queue(batch_size_per_clone, create_tensor_dict_fn,
batch_queue_capacity, num_batch_queue_threads,
prefetch_queue_capacity, data_augmentation_options):
"""Sets up reader, prefetcher and returns input queue.
Args:
batch_size_per_clone: batch size to use per clone.
create_tensor_dict_fn: function to create tensor dictionary.
batch_queue_capacity: maximum number of elements to store within a queue.
num_batch_queue_threads: number of threads to use for batching.
prefetch_queue_capacity: maximum capacity of the queue used to prefetch
assembled batches.
data_augmentation_options: a list of tuples, where each tuple contains a
data augmentation function and a dictionary containing arguments and their
values (see preprocessor.py).
Returns:
input queue: a batcher.BatchQueue object holding enqueued tensor_dicts
(which hold images, boxes and targets). To get a batch of tensor_dicts,
call input_queue.Dequeue().
"""
tensor_dict = create_tensor_dict_fn()
tensor_dict[fields.InputDataFields.image] = tf.expand_dims(
tensor_dict[fields.InputDataFields.image], 0)
images = tensor_dict[fields.InputDataFields.image]
float_images = tf.to_float(images)
tensor_dict[fields.InputDataFields.image] = float_images
include_instance_masks = (fields.InputDataFields.groundtruth_instance_masks
in tensor_dict)
include_keypoints = (fields.InputDataFields.groundtruth_keypoints
in tensor_dict)
include_multiclass_scores = (fields.InputDataFields.multiclass_scores
in tensor_dict)
if data_augmentation_options:
tensor_dict = preprocessor.preprocess(
tensor_dict, data_augmentation_options,
func_arg_map=preprocessor.get_default_func_arg_map(
include_label_weights=True,
include_multiclass_scores=include_multiclass_scores,
include_instance_masks=include_instance_masks,
include_keypoints=include_keypoints))
input_queue = batcher.BatchQueue(
tensor_dict,
batch_size=batch_size_per_clone,
batch_queue_capacity=batch_queue_capacity,
num_batch_queue_threads=num_batch_queue_threads,
prefetch_queue_capacity=prefetch_queue_capacity)
return input_queue
def get_inputs(input_queue,
num_classes,
merge_multiple_label_boxes=False,
use_multiclass_scores=False):
"""Dequeues batch and constructs inputs to object detection model.
Args:
input_queue: BatchQueue object holding enqueued tensor_dicts.
num_classes: Number of classes.
merge_multiple_label_boxes: Whether to merge boxes with multiple labels
or not. Defaults to false. Merged boxes are represented with a single
box and a k-hot encoding of the multiple labels associated with the
boxes.
use_multiclass_scores: Whether to use multiclass scores instead of
groundtruth_classes.
Returns:
images: a list of 3-D float tensor of images.
image_keys: a list of string keys for the images.
locations_list: a list of tensors of shape [num_boxes, 4]
containing the corners of the groundtruth boxes.
classes_list: a list of padded one-hot (or K-hot) float32 tensors containing
target classes.
masks_list: a list of 3-D float tensors of shape [num_boxes, image_height,
image_width] containing instance masks for objects if present in the
input_queue. Else returns None.
keypoints_list: a list of 3-D float tensors of shape [num_boxes,
num_keypoints, 2] containing keypoints for objects if present in the
input queue. Else returns None.
weights_lists: a list of 1-D float32 tensors of shape [num_boxes]
containing groundtruth weight for each box.
"""
read_data_list = input_queue.dequeue()
label_id_offset = 1
def extract_images_and_targets(read_data):
"""Extract images and targets from the input dict."""
image = read_data[fields.InputDataFields.image]
key = ''
if fields.InputDataFields.source_id in read_data:
key = read_data[fields.InputDataFields.source_id]
location_gt = read_data[fields.InputDataFields.groundtruth_boxes]
classes_gt = tf.cast(read_data[fields.InputDataFields.groundtruth_classes],
tf.int32)
classes_gt -= label_id_offset
if merge_multiple_label_boxes and use_multiclass_scores:
raise ValueError(
'Using both merge_multiple_label_boxes and use_multiclass_scores is'
'not supported'
)
if merge_multiple_label_boxes:
location_gt, classes_gt, _ = util_ops.merge_boxes_with_multiple_labels(
location_gt, classes_gt, num_classes)
classes_gt = tf.cast(classes_gt, tf.float32)
elif use_multiclass_scores:
classes_gt = tf.cast(read_data[fields.InputDataFields.multiclass_scores],
tf.float32)
else:
classes_gt = util_ops.padded_one_hot_encoding(
indices=classes_gt, depth=num_classes, left_pad=0)
masks_gt = read_data.get(fields.InputDataFields.groundtruth_instance_masks)
keypoints_gt = read_data.get(fields.InputDataFields.groundtruth_keypoints)
if (merge_multiple_label_boxes and (
masks_gt is not None or keypoints_gt is not None)):
raise NotImplementedError('Multi-label support is only for boxes.')
weights_gt = read_data.get(
fields.InputDataFields.groundtruth_weights)
return (image, key, location_gt, classes_gt, masks_gt, keypoints_gt,
weights_gt)
return zip(*map(extract_images_and_targets, read_data_list))
def _create_losses(input_queue, create_model_fn, train_config):
"""Creates loss function for a DetectionModel.
Args:
input_queue: BatchQueue object holding enqueued tensor_dicts.
create_model_fn: A function to create the DetectionModel.
train_config: a train_pb2.TrainConfig protobuf.
"""
detection_model = create_model_fn()
(images, _, groundtruth_boxes_list, groundtruth_classes_list,
groundtruth_masks_list, groundtruth_keypoints_list,
groundtruth_weights_list) = get_inputs(
input_queue,
detection_model.num_classes,
train_config.merge_multiple_label_boxes,
train_config.use_multiclass_scores)
preprocessed_images = []
true_image_shapes = []
for image in images:
resized_image, true_image_shape = detection_model.preprocess(image)
preprocessed_images.append(resized_image)
true_image_shapes.append(true_image_shape)
images = tf.concat(preprocessed_images, 0)
true_image_shapes = tf.concat(true_image_shapes, 0)
if any(mask is None for mask in groundtruth_masks_list):
groundtruth_masks_list = None
if any(keypoints is None for keypoints in groundtruth_keypoints_list):
groundtruth_keypoints_list = None
detection_model.provide_groundtruth(
groundtruth_boxes_list,
groundtruth_classes_list,
groundtruth_masks_list,
groundtruth_keypoints_list,
groundtruth_weights_list=groundtruth_weights_list)
prediction_dict = detection_model.predict(images, true_image_shapes)
losses_dict = detection_model.loss(prediction_dict, true_image_shapes)
for loss_tensor in losses_dict.values():
tf.losses.add_loss(loss_tensor)
def train(create_tensor_dict_fn,
create_model_fn,
train_config,
master,
task,
num_clones,
worker_replicas,
clone_on_cpu,
ps_tasks,
worker_job_name,
is_chief,
train_dir,
graph_hook_fn=None):
"""Training function for detection models.
Args:
create_tensor_dict_fn: a function to create a tensor input dictionary.
create_model_fn: a function that creates a DetectionModel and generates
losses.
train_config: a train_pb2.TrainConfig protobuf.
master: BNS name of the TensorFlow master to use.
task: The task id of this training instance.
num_clones: The number of clones to run per machine.
worker_replicas: The number of work replicas to train with.
clone_on_cpu: True if clones should be forced to run on CPU.
ps_tasks: Number of parameter server tasks.
worker_job_name: Name of the worker job.
is_chief: Whether this replica is the chief replica.
train_dir: Directory to write checkpoints and training summaries to.
graph_hook_fn: Optional function that is called after the inference graph is
built (before optimization). This is helpful to perform additional changes
to the training graph such as adding FakeQuant ops. The function should
modify the default graph.
Raises:
ValueError: If both num_clones > 1 and train_config.sync_replicas is true.
"""
detection_model = create_model_fn()
data_augmentation_options = [
preprocessor_builder.build(step)
for step in train_config.data_augmentation_options]
with tf.Graph().as_default():
# Build a configuration specifying multi-GPU and multi-replicas.
deploy_config = model_deploy.DeploymentConfig(
num_clones=num_clones,
clone_on_cpu=clone_on_cpu,
replica_id=task,
num_replicas=worker_replicas,
num_ps_tasks=ps_tasks,
worker_job_name=worker_job_name)
# Place the global step on the device storing the variables.
with tf.device(deploy_config.variables_device()):
global_step = slim.create_global_step()
if num_clones != 1 and train_config.sync_replicas:
raise ValueError('In Synchronous SGD mode num_clones must ',
'be 1. Found num_clones: {}'.format(num_clones))
batch_size = train_config.batch_size // num_clones
if train_config.sync_replicas:
batch_size //= train_config.replicas_to_aggregate
with tf.device(deploy_config.inputs_device()):
input_queue = create_input_queue(
batch_size, create_tensor_dict_fn,
train_config.batch_queue_capacity,
train_config.num_batch_queue_threads,
train_config.prefetch_queue_capacity, data_augmentation_options)
# Gather initial summaries.
# TODO(rathodv): See if summaries can be added/extracted from global tf
# collections so that they don't have to be passed around.
summaries = set(tf.get_collection(tf.GraphKeys.SUMMARIES))
global_summaries = set([])
model_fn = functools.partial(_create_losses,
create_model_fn=create_model_fn,
train_config=train_config)
clones = model_deploy.create_clones(deploy_config, model_fn, [input_queue])
first_clone_scope = clones[0].scope
if graph_hook_fn:
with tf.device(deploy_config.variables_device()):
graph_hook_fn()
# Gather update_ops from the first clone. These contain, for example,
# the updates for the batch_norm variables created by model_fn.
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, first_clone_scope)
with tf.device(deploy_config.optimizer_device()):
training_optimizer, optimizer_summary_vars = optimizer_builder.build(
train_config.optimizer)
for var in optimizer_summary_vars:
tf.summary.scalar(var.op.name, var, family='LearningRate')
sync_optimizer = None
if train_config.sync_replicas:
training_optimizer = tf.train.SyncReplicasOptimizer(
training_optimizer,
replicas_to_aggregate=train_config.replicas_to_aggregate,
total_num_replicas=worker_replicas)
sync_optimizer = training_optimizer
with tf.device(deploy_config.optimizer_device()):
regularization_losses = (None if train_config.add_regularization_loss
else [])
total_loss, grads_and_vars = model_deploy.optimize_clones(
clones, training_optimizer,
regularization_losses=regularization_losses)
total_loss = tf.check_numerics(total_loss, 'LossTensor is inf or nan.')
# Optionally multiply bias gradients by train_config.bias_grad_multiplier.
if train_config.bias_grad_multiplier:
biases_regex_list = ['.*/biases']
grads_and_vars = variables_helper.multiply_gradients_matching_regex(
grads_and_vars,
biases_regex_list,
multiplier=train_config.bias_grad_multiplier)
# Optionally freeze some layers by setting their gradients to be zero.
if train_config.freeze_variables:
grads_and_vars = variables_helper.freeze_gradients_matching_regex(
grads_and_vars, train_config.freeze_variables)
# Optionally clip gradients
if train_config.gradient_clipping_by_norm > 0:
with tf.name_scope('clip_grads'):
grads_and_vars = slim.learning.clip_gradient_norms(
grads_and_vars, train_config.gradient_clipping_by_norm)
# Create gradient updates.
grad_updates = training_optimizer.apply_gradients(grads_and_vars,
global_step=global_step)
update_ops.append(grad_updates)
update_op = tf.group(*update_ops, name='update_barrier')
with tf.control_dependencies([update_op]):
train_tensor = tf.identity(total_loss, name='train_op')
# Add summaries.
for model_var in slim.get_model_variables():
global_summaries.add(tf.summary.histogram('ModelVars/' +
model_var.op.name, model_var))
for loss_tensor in tf.losses.get_losses():
global_summaries.add(tf.summary.scalar('Losses/' + loss_tensor.op.name,
loss_tensor))
global_summaries.add(
tf.summary.scalar('Losses/TotalLoss', tf.losses.get_total_loss()))
# Add the summaries from the first clone. These contain the summaries
# created by model_fn and either optimize_clones() or _gather_clone_loss().
summaries |= set(tf.get_collection(tf.GraphKeys.SUMMARIES,
first_clone_scope))
summaries |= global_summaries
# Merge all summaries together.
summary_op = tf.summary.merge(list(summaries), name='summary_op')
# Soft placement allows placing on CPU ops without GPU implementation.
session_config = tf.ConfigProto(allow_soft_placement=True,
log_device_placement=False)
# Save checkpoints regularly.
keep_checkpoint_every_n_hours = train_config.keep_checkpoint_every_n_hours
saver = tf.train.Saver(
keep_checkpoint_every_n_hours=keep_checkpoint_every_n_hours)
# Create ops required to initialize the model from a given checkpoint.
init_fn = None
if train_config.fine_tune_checkpoint:
if not train_config.fine_tune_checkpoint_type:
# train_config.from_detection_checkpoint field is deprecated. For
# backward compatibility, fine_tune_checkpoint_type is set based on
# from_detection_checkpoint.
if train_config.from_detection_checkpoint:
train_config.fine_tune_checkpoint_type = 'detection'
else:
train_config.fine_tune_checkpoint_type = 'classification'
var_map = detection_model.restore_map(
fine_tune_checkpoint_type=train_config.fine_tune_checkpoint_type,
load_all_detection_checkpoint_vars=(
train_config.load_all_detection_checkpoint_vars))
available_var_map = (variables_helper.
get_variables_available_in_checkpoint(
var_map, train_config.fine_tune_checkpoint,
include_global_step=False))
init_saver = tf.train.Saver(available_var_map)
def initializer_fn(sess):
init_saver.restore(sess, train_config.fine_tune_checkpoint)
init_fn = initializer_fn
slim.learning.train(
train_tensor,
logdir=train_dir,
master=master,
is_chief=is_chief,
session_config=session_config,
startup_delay_steps=train_config.startup_delay_steps,
init_fn=init_fn,
summary_op=summary_op,
number_of_steps=(
train_config.num_steps if train_config.num_steps else None),
save_summaries_secs=120,
sync_optimizer=sync_optimizer,
saver=saver)
|
PyTorch/LanguageModeling/BART/utils | utils | generation_beam_search | # coding=utf-8
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# Copyright 2020 The HuggingFace Inc. team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABC, abstractmethod
from collections import UserDict
from typing import Optional, Tuple
import torch
from .file_utils import add_start_docstrings
PROCESS_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size * num_beams, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using any class inheriting from :class:`~transformers.PretrainedTokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for
details.
`What are input IDs? <../glossary.html#input-ids>`__
next_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, 2 * num_beams)`):
Current scores of the top :obj:`2 * num_beams` non-finished beam hypotheses.
next_tokens (:obj:`torch.LongTensor` of shape :obj:`(batch_size, 2 * num_beams)`):
:obj:`input_ids` of the tokens corresponding to the top :obj:`2 * num_beams` non-finished beam hypotheses.
next_indices (:obj:`torch.LongTensor` of shape :obj:`(batch_size, 2 * num_beams)`):
Beam indices indicating to which beam hypothesis the :obj:`next_tokens` correspond.
pad_token_id (:obj:`int`, `optional`):
The id of the `padding` token.
eos_token_id (:obj:`int`, `optional`):
The id of the `end-of-sequence` token.
Return:
:obj:`UserDict`: A dictionary composed of the fields as defined above:
- **next_beam_scores** (:obj:`torch.FloatTensor` of shape :obj:`(batch_size * num_beams)`) -- Updated
scores of all non-finished beams.
- **next_beam_tokens** (:obj:`torch.FloatTensor` of shape :obj:`(batch_size * num_beams)`) -- Next tokens
to be added to the non-finished beam_hypotheses.
- **next_beam_indices** (:obj:`torch.FloatTensor` of shape :obj:`(batch_size * num_beams)`) -- Beam indices
indicating to which beam the next tokens shall be added.
"""
FINALIZE_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size * num_beams, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using any class inheriting from :class:`~transformers.PretrainedTokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for
details.
`What are input IDs? <../glossary.html#input-ids>`__
final_beam_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size * num_beams)`):
The final scores of all non-finished beams.
final_beam_tokens (:obj:`torch.FloatTensor` of shape :obj:`(batch_size * num_beams)`):
The last tokens to be added to the non-finished beam_hypotheses.
final_beam_indices (:obj:`torch.FloatTensor` of shape :obj:`(batch_size * num_beams)`):
The beam indices indicating to which beam the :obj:`final_beam_tokens` shall be added.
pad_token_id (:obj:`int`, `optional`):
The id of the `padding` token.
eos_token_id (:obj:`int`, `optional`):
The id of the `end-of-sequence` token.
Return:
:obj:`torch.LongTensor` of shape :obj:`(batch_size * num_return_sequences, sequence_length)`: The generated
sequences. The second dimension (sequence_length) is either equal to :obj:`max_length` or shorter if all
batches finished early due to the :obj:`eos_token_id`.
"""
class BeamScorer(ABC):
"""
Abstract base class for all beam scorers that are used for :meth:`~transformers.PretrainedModel.beam_search` and
:meth:`~transformers.PretrainedModel.beam_sample`.
"""
@abstractmethod
@add_start_docstrings(PROCESS_INPUTS_DOCSTRING)
def process(
self,
input_ids: torch.LongTensor,
next_scores: torch.FloatTensor,
next_tokens: torch.LongTensor,
next_indices: torch.LongTensor,
**kwargs
) -> Tuple[torch.Tensor]:
raise NotImplementedError("This is an abstract method.")
@abstractmethod
@add_start_docstrings(FINALIZE_INPUTS_DOCSTRING)
def finalize(
self,
input_ids: torch.LongTensor,
next_scores: torch.FloatTensor,
next_tokens: torch.LongTensor,
next_indices: torch.LongTensor,
**kwargs
) -> torch.LongTensor:
raise NotImplementedError("This is an abstract method.")
class BeamSearchScorer(BeamScorer):
r"""
:class:`transformers.BeamScorer` implementing standard beam search decoding.
Adapted in part from `Facebook's XLM beam search code
<https://github.com/facebookresearch/XLM/blob/9e6f6814d17be4fe5b15f2e6c43eb2b2d76daeb4/src/model/transformer.py#L529>`__.
Reference for the diverse beam search algorithm and implementation `Ashwin Kalyan's DBS implementation
<https://github.com/ashwinkalyan/dbs/blob/master/dbs/beam_utils.lua>`__
Args:
batch_size (:obj:`int`):
Batch Size of :obj:`input_ids` for which standard beam search decoding is run in parallel.
max_length (:obj:`int`):
The maximum length of the sequence to be generated.
num_beams (:obj:`int`):
Number of beams for beam search.
device (:obj:`torch.device`):
Defines the device type (*e.g.*, :obj:`"cpu"` or :obj:`"cuda"`) on which this instance of
:obj:`BeamSearchScorer` will be allocated.
length_penalty (:obj:`float`, `optional`, defaults to 1.0):
Exponential penalty to the length. 1.0 means no penalty. Set to values < 1.0 in order to encourage the
model to generate shorter sequences, to a value > 1.0 in order to encourage the model to produce longer
sequences.
do_early_stopping (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether to stop the beam search when at least ``num_beams`` sentences are finished per batch or not.
num_beam_hyps_to_keep (:obj:`int`, `optional`, defaults to 1):
The number of beam hypotheses that shall be returned upon calling
:meth:`~transformer.BeamSearchScorer.finalize`.
num_beam_groups (:obj:`int`):
Number of groups to divide :obj:`num_beams` into in order to ensure diversity among different groups of
beams. See `this paper <https://arxiv.org/pdf/1610.02424.pdf>`__ for more details.
"""
def __init__(
self,
batch_size: int,
max_length: int,
num_beams: int,
device: torch.device,
length_penalty: Optional[float] = 1.0,
do_early_stopping: Optional[bool] = False,
num_beam_hyps_to_keep: Optional[int] = 1,
num_beam_groups: Optional[int] = 1,
):
self.max_length = max_length
self.num_beams = num_beams
self.device = device
self.length_penalty = length_penalty
self.do_early_stopping = do_early_stopping
self.num_beam_hyps_to_keep = num_beam_hyps_to_keep
self.num_beam_groups = num_beam_groups
self.group_size = self.num_beams // self.num_beam_groups
self._is_init = False
self._beam_hyps = [
BeamHypotheses(
num_beams=self.num_beams,
max_length=self.max_length,
length_penalty=self.length_penalty,
early_stopping=self.do_early_stopping,
)
for _ in range(batch_size)
]
self._done = torch.tensor([False for _ in range(batch_size)], dtype=torch.bool, device=self.device)
if not isinstance(num_beams, int) or num_beams <= 1:
raise ValueError(
f"`num_beams` has to be an integer strictly greater than 1, but is {num_beams}. For `num_beams` == 1, one should make use of `greedy_search` instead."
)
if not isinstance(num_beam_groups, int) or (num_beam_groups > num_beams) or (num_beams % num_beam_groups != 0):
raise ValueError(
f"`num_beam_groups` has to be an integer smaller or equal than `num_beams` and `num_beams` "
f"has to be divisible by `num_beam_groups`, but is {num_beam_groups} with `num_beams` being {num_beams}."
)
@property
def is_done(self) -> bool:
return self._done.all()
def process(
self,
input_ids: torch.LongTensor,
next_scores: torch.FloatTensor,
next_tokens: torch.LongTensor,
next_indices: torch.LongTensor,
pad_token_id: Optional[int] = None,
eos_token_id: Optional[int] = None,
) -> Tuple[torch.Tensor]:
cur_len = input_ids.shape[-1]
batch_size = len(self._beam_hyps)
assert batch_size == (input_ids.shape[0] // self.group_size)
device = input_ids.device
next_beam_scores = torch.zeros((batch_size, self.group_size), dtype=next_scores.dtype, device=device)
next_beam_tokens = torch.zeros((batch_size, self.group_size), dtype=next_tokens.dtype, device=device)
next_beam_indices = torch.zeros((batch_size, self.group_size), dtype=next_indices.dtype, device=device)
for batch_idx, beam_hyp in enumerate(self._beam_hyps):
if self._done[batch_idx]:
assert (
len(beam_hyp) >= self.num_beams
), "Batch can only be done if at least {} beams have been generated".format(self.num_beams)
assert (
eos_token_id is not None and pad_token_id is not None
), "generated beams >= num_beams -> eos_token_id and pad_token have to be defined"
# pad the batch
next_beam_scores[batch_idx, :] = 0
next_beam_tokens[batch_idx, :] = pad_token_id
next_beam_indices[batch_idx, :] = 0
continue
# next tokens for this sentence
beam_idx = 0
for beam_token_rank, (next_token, next_score, next_index) in enumerate(
zip(next_tokens[batch_idx], next_scores[batch_idx], next_indices[batch_idx])
):
batch_beam_idx = batch_idx * self.group_size + next_index
# add to generated hypotheses if end of sentence
if (eos_token_id is not None) and (next_token.item() == eos_token_id):
# if beam_token does not belong to top num_beams tokens, it should not be added
is_beam_token_worse_than_top_num_beams = beam_token_rank >= self.group_size
if is_beam_token_worse_than_top_num_beams:
continue
beam_hyp.add(
input_ids[batch_beam_idx].clone(),
next_score.item(),
)
else:
# add next predicted token since it is not eos_token
next_beam_scores[batch_idx, beam_idx] = next_score
next_beam_tokens[batch_idx, beam_idx] = next_token
next_beam_indices[batch_idx, beam_idx] = batch_beam_idx
beam_idx += 1
# once the beam for next step is full, don't add more tokens to it.
if beam_idx == self.group_size:
break
if beam_idx < self.group_size:
raise ValueError(
f"At most {self.group_size} tokens in {next_tokens[batch_idx]} can be equal to `eos_token_id: {eos_token_id}`. Make sure {next_tokens[batch_idx]} are corrected."
)
# Check if we are done so that we can save a pad step if all(done)
self._done[batch_idx] = self._done[batch_idx] or beam_hyp.is_done(
next_scores[batch_idx].max().item(), cur_len
)
return UserDict(
{
"next_beam_scores": next_beam_scores.view(-1),
"next_beam_tokens": next_beam_tokens.view(-1),
"next_beam_indices": next_beam_indices.view(-1),
}
)
def finalize(
self,
input_ids: torch.LongTensor,
final_beam_scores: torch.FloatTensor,
final_beam_tokens: torch.LongTensor,
final_beam_indices: torch.LongTensor,
pad_token_id: Optional[int] = None,
eos_token_id: Optional[int] = None,
) -> Tuple[torch.LongTensor]:
batch_size = len(self._beam_hyps)
# finalize all open beam hypotheses and add to generated hypotheses
for batch_idx, beam_hyp in enumerate(self._beam_hyps):
if self._done[batch_idx]:
continue
# all open beam hypotheses are added to the beam hypothesis
# beam hypothesis class automatically keeps the best beams
for beam_id in range(self.num_beams):
batch_beam_idx = batch_idx * self.num_beams + beam_id
final_score = final_beam_scores[batch_beam_idx].item()
final_tokens = input_ids[batch_beam_idx]
beam_hyp.add(final_tokens, final_score)
# select the best hypotheses
sent_lengths = input_ids.new(batch_size * self.num_beam_hyps_to_keep)
best = []
best_scores = torch.zeros(batch_size * self.num_beam_hyps_to_keep, device=self.device, dtype=torch.float32)
# retrieve best hypotheses
for i, beam_hyp in enumerate(self._beam_hyps):
sorted_hyps = sorted(beam_hyp.beams, key=lambda x: x[0])
for j in range(self.num_beam_hyps_to_keep):
best_hyp_tuple = sorted_hyps.pop()
best_score = best_hyp_tuple[0]
best_hyp = best_hyp_tuple[1]
sent_lengths[self.num_beam_hyps_to_keep * i + j] = len(best_hyp)
# append to lists
best.append(best_hyp)
best_scores[i * self.num_beam_hyps_to_keep + j] = best_score
# prepare for adding eos
sent_max_len = min(sent_lengths.max().item() + 1, self.max_length)
decoded: torch.LongTensor = input_ids.new(batch_size * self.num_beam_hyps_to_keep, sent_max_len)
# shorter batches are padded if needed
if sent_lengths.min().item() != sent_lengths.max().item():
assert pad_token_id is not None, "`pad_token_id` has to be defined"
decoded.fill_(pad_token_id)
# fill with hypotheses and eos_token_id if the latter fits in
for i, hypo in enumerate(best):
decoded[i, : sent_lengths[i]] = hypo
if sent_lengths[i] < self.max_length:
decoded[i, sent_lengths[i]] = eos_token_id
return UserDict(
{
"sequences": decoded,
"sequence_scores": best_scores,
}
)
class BeamHypotheses:
def __init__(self, num_beams: int, max_length: int, length_penalty: float, early_stopping: bool):
"""
Initialize n-best list of hypotheses.
"""
self.max_length = max_length - 1 # ignoring bos_token
self.length_penalty = length_penalty
self.early_stopping = early_stopping
self.num_beams = num_beams
self.beams = []
self.worst_score = 1e9
def __len__(self):
"""
Number of hypotheses in the list.
"""
return len(self.beams)
def add(self, hyp: torch.LongTensor, sum_logprobs: float):
"""
Add a new hypothesis to the list.
"""
score = sum_logprobs / (hyp.shape[-1] ** self.length_penalty)
if len(self) < self.num_beams or score > self.worst_score:
self.beams.append((score, hyp))
if len(self) > self.num_beams:
sorted_next_scores = sorted([(s, idx) for idx, (s, _) in enumerate(self.beams)])
del self.beams[sorted_next_scores[0][1]]
self.worst_score = sorted_next_scores[1][0]
else:
self.worst_score = min(score, self.worst_score)
def is_done(self, best_sum_logprobs: float, cur_len: int) -> bool:
"""
If there are enough hypotheses and that none of the hypotheses being generated can become better than the worst
one in the heap, then we are done with this sentence.
"""
if len(self) < self.num_beams:
return False
elif self.early_stopping:
return True
else:
cur_score = best_sum_logprobs / cur_len ** self.length_penalty
ret = self.worst_score >= cur_score
return ret |
Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt/triton/runner | runner | config_NVIDIA-A30 | checkpoints:
- name: electricity_bin
url: https://api.ngc.nvidia.com/v2/models/nvidia/tft_pyt_ckpt_base_eletricity_amp/versions/21.06.0/zip
- name: traffic_bin
url: https://api.ngc.nvidia.com/v2/models/nvidia/tft_pyt_ckpt_base_traffic_amp/versions/21.06.0/zip
configurations:
- accelerator: none
batch_size:
- 1
- 2
- 4
- 8
- 16
- 32
- 64
- 128
- 256
- 512
- 1024
batch_sizes: 1 2 4 8 16 32 64 128 256 512 1024
capture_cuda_graph: 0
checkpoint_variant: electricity_bin
dataset: electricity_bin
device: gpu
export_format: onnx
export_precision: fp32
format: trt
max_batch_size: 1024
precision: fp16
request_count: 500
triton_gpu_engine_count: 2
triton_max_queue_delay: 1
triton_preferred_batch_sizes: 512 1024
- accelerator: none
batch_size:
- 1
- 2
- 4
- 8
- 16
- 32
- 64
- 128
- 256
- 512
- 1024
batch_sizes: 1 2 4 8 16 32 64 128 256 512 1024
capture_cuda_graph: 0
checkpoint_variant: traffic_bin
dataset: traffic_bin
device: gpu
export_format: onnx
export_precision: fp32
format: trt
max_batch_size: 1024
precision: fp16
request_count: 500
triton_gpu_engine_count: 2
triton_max_queue_delay: 1
triton_preferred_batch_sizes: 512 1024
- accelerator: none
batch_size:
- 1
- 2
- 4
- 8
- 16
- 32
- 64
- 128
- 256
- 512
- 1024
batch_sizes: 1 2 4 8 16 32 64 128 256 512 1024
capture_cuda_graph: 0
checkpoint_variant: electricity_bin
dataset: electricity_bin
device: gpu
export_format: ts-trace
export_precision: fp32
format: ts-trace
max_batch_size: 1024
precision: fp16
request_count: 500
triton_gpu_engine_count: 2
triton_max_queue_delay: 1
triton_preferred_batch_sizes: 512 1024
- accelerator: none
batch_size:
- 1
- 2
- 4
- 8
- 16
- 32
- 64
- 128
- 256
- 512
- 1024
batch_sizes: 1 2 4 8 16 32 64 128 256 512 1024
capture_cuda_graph: 0
checkpoint_variant: traffic_bin
dataset: traffic_bin
device: gpu
export_format: ts-trace
export_precision: fp32
format: ts-trace
max_batch_size: 1024
precision: fp16
request_count: 500
triton_gpu_engine_count: 2
triton_max_queue_delay: 1
triton_preferred_batch_sizes: 512 1024
container_version: '21.12'
datasets:
- name: electricity_bin
- name: traffic_bin
datasets_dir: datasets
framework: PyTorch
model_name: TFT
triton_container_image: null
triton_custom_operations: null
triton_dockerfile: null
triton_load_model_method: explicit
|
TensorFlow/Classification/ConvNets | ConvNets | postprocess_ckpt | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
import numpy as np
import argparse
import os
def process_checkpoint(input_ckpt, output_ckpt_path, dense_layer):
"""
This function loads a RN50 checkpoint with Dense layer as the final layer
and transforms the final dense layer into a 1x1 convolution layer. The weights
of the dense layer are reshaped into weights of 1x1 conv layer.
Args:
input_ckpt: Path to the input RN50 ckpt which has dense layer as classification layer.
Returns:
None. New checkpoint with 1x1 conv layer as classification layer is generated.
"""
with tf.Session() as sess:
# Load all the variables
all_vars = tf.train.list_variables(input_ckpt)
# Capture the dense layer weights and reshape them to a 4D tensor which would be
# the weights of a 1x1 convolution layer. This code replaces the dense (FC) layer
# to a 1x1 conv layer.
dense_layer_value=0.
new_var_list=[]
for var in all_vars:
curr_var = tf.train.load_variable(input_ckpt, var[0])
if var[0]==dense_layer:
dense_layer_value = curr_var
else:
new_var_list.append(tf.Variable(curr_var, name=var[0]))
dense_layer_shape = [1, 1, 2048, 1001]
new_var_value = np.reshape(dense_layer_value, dense_layer_shape)
new_var = tf.Variable(new_var_value, name=dense_layer)
new_var_list.append(new_var)
sess.run(tf.global_variables_initializer())
tf.train.Saver(var_list=new_var_list).save(sess, output_ckpt_path, write_meta_graph=False, write_state=False)
print ("Rewriting checkpoint completed")
if __name__=='__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--input', type=str, required=True, help='Path to pretrained RN50 checkpoint with dense layer')
parser.add_argument('--dense_layer', type=str, default='resnet50/output/dense/kernel')
parser.add_argument('--output', type=str, default='output_dir', help="Output directory to store new checkpoint")
args = parser.parse_args()
input_ckpt = args.input
# Create an output directory
os.mkdir(args.output)
new_ckpt='new.ckpt'
new_ckpt_path = os.path.join(args.output, new_ckpt)
with open(os.path.join(args.output, "checkpoint"), 'w') as file:
file.write("model_checkpoint_path: "+ "\"" + new_ckpt + "\"")
# Process the input checkpoint, apply transforms and generate a new checkpoint.
process_checkpoint(input_ckpt, new_ckpt_path, args.dense_layer)
|
PyTorch/Classification/ConvNets/se-resnext101-32x4d/training/AMP | AMP | DGX1V_se-resnext101-32x4d_AMP_250E | python ./multiproc.py --nproc_per_node 8 ./launch.py --model se-resnext101-32x4d --precision AMP --mode convergence --platform DGX1V /imagenet --workspace ${1:-./} --raport-file raport.json
|
Tools/PyTorch/TimeSeriesPredictionPlatform/conf/trainer/callbacks | callbacks | standard | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
early_stopping:
_target_: callbacks.ctl_callbacks.EarlyStopping
metric: val_loss
min_delta: 0
patience: 5
logging:
_target_: callbacks.ctl_callbacks.LoggingCallback
save_best_checkpoint:
_target_: callbacks.ctl_callbacks.SaveBestCheckpoint
metric: val_loss
save_checkpoint:
_target_: callbacks.ctl_callbacks.SaveCheckpoint
throughput_benchmark:
_target_: callbacks.ctl_callbacks.ThroughputBenchmark
warmup_epochs: 0
|
PyTorch/SpeechSynthesis | SpeechSynthesis | README | # Text-to-Speech (TTS)
Speech Synthesis or Text-to-Speech is the task of artificially producing human speech from a raw transcripts. With deep learning today, the synthesized waveforms can sound very natural, almost undistinguishable from how a human would speak. Such Text-to-Speech models can be used in cases like when an interactive virtual assistants responds, or when a mobile device converts the text on a webpage to speech for accessibility reasons.
In this collection, we will cover:
- How does Text-to-Speech work?
- Usecases and applications
- Where to get started
---
## How does Text-to-Speech work?

TTS synthesis is a 2-step process described as follows:
1. Text to Spectrogram Model:
This model Transforms the text into time-aligned features such as spectrogram, mel spectrogram, or F0 frequencies and other acoustic features. We use architectures like Tacotron
2. Spectrogram to Audio Model:
Converts generated spectrogram time-aligned representation into continuous human-like audio—for example, WaveGlow.

---
## Use Cases and applications
### Telecommunications and Multimedia:
E-mail services have become very prevalent in this decade. However, it is sometimes challenging to understand and read those important messages when being abroad. The lack of proper computer systems or some security problems may arise. With TTS technology, e-mail messages can listen quickly and efficiently on smartphones, adding to productivity.
### Voice assistant for Visually Impaired, Vocally Handicapped:
- Possibly, TTS's most useful and vital application is the reading of printed or non-braille texts for visually impaired/blind.
- This process also helps vocally handicapped people find difficulties communicating with others who do not understand sign language.
### Voice Assistant:
- Modern home appliances such as refrigerators can adopt this use case for reading cooking recipes.
- Automobiles for voice navigation to the destination spot.
- Easy teaching of pronunciation, phonetics of humongous difficult natural multi-lingual texts.
---
## Where to get started
NVIDIA provides Deep Learning Examples for Image Segmentation on its GitHub repository. These examples provide you with easy to consume and highly optimized scripts for both training and inferencing. The quick start guide at our GitHub repository will help you in setting up the environment using NGC Docker Images, download pre-trained models from NGC and adapt the model training and inference for your application/use-case.
Here are the examples relevant for image segmentation, directly from [Deep Learning Examples](https://github.com/NVIDIA/DeepLearningExamples):
1. Tacotron2 and WaveGlow for Speech Synthesis using PyTorch
- [Git repository](https://github.com/NVIDIA/DeepLearningExamples/tree/master/PyTorch/SpeechSynthesis/Tacotron2)
- Uses PyTorch 20.03-py3 [NGC container](https://ngc.nvidia.com/registry/nvidia-pytorch)
2. FastPitch for text to melspectogram generation using PyTorch
- [Git repository](https://github.com/NVIDIA/DeepLearningExamples/tree/master/PyTorch/SpeechSynthesis/FastPitch)
- Uses PyTorch 20.03-py3 [NGC container](https://ngc.nvidia.com/registry/nvidia-pytorch)
|
PyTorch/SpeechSynthesis/Tacotron2/exports | exports | export_tacotron2_onnx | # *****************************************************************************
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
import torch
from torch import nn
from torch.nn import functional as F
import argparse
import sys
sys.path.append('./')
import models
from inference import checkpoint_from_distributed, unwrap_distributed, load_and_setup_model, prepare_input_sequence
from tacotron2_common.utils import to_gpu, get_mask_from_lengths
def parse_args(parser):
"""
Parse commandline arguments.
"""
parser.add_argument('--tacotron2', type=str,
help='full path to the Tacotron2 model checkpoint file')
parser.add_argument('-o', '--output', type=str, required=True,
help='Directory for the exported Tacotron 2 ONNX model')
parser.add_argument('--fp16', action='store_true',
help='Export with half precision to ONNX')
return parser
def encoder_infer(self, x, input_lengths):
device = x.device
for conv in self.convolutions:
x = F.dropout(F.relu(conv(x.to(device))), 0.5, False)
x = x.transpose(1, 2)
input_lengths_cpu = input_lengths[:] # TODO
input_lengths_cpu = input_lengths_cpu.cpu().numpy() # TODO
x = nn.utils.rnn.pack_padded_sequence(
x, input_lengths_cpu, batch_first=True)
outputs, _ = self.lstm(x)
outputs, _ = nn.utils.rnn.pad_packed_sequence(
outputs, batch_first=True)
lens = input_lengths*2
return outputs, lens
class Encoder(torch.nn.Module):
def __init__(self, tacotron2):
super(Encoder, self).__init__()
self.tacotron2 = tacotron2
self.tacotron2.encoder.lstm.flatten_parameters()
self.infer = encoder_infer
def forward(self, sequence, sequence_lengths):
embedded_inputs = self.tacotron2.embedding(sequence).transpose(1, 2)
memory, lens = self.infer(self.tacotron2.encoder, embedded_inputs, sequence_lengths)
processed_memory = self.tacotron2.decoder.attention_layer.memory_layer(memory)
return memory, processed_memory, lens
class Postnet(torch.nn.Module):
def __init__(self, tacotron2):
super(Postnet, self).__init__()
self.tacotron2 = tacotron2
def forward(self, mel_outputs):
mel_outputs_postnet = self.tacotron2.postnet(mel_outputs)
return mel_outputs + mel_outputs_postnet
def lstmcell2lstm_params(lstm_mod, lstmcell_mod):
lstm_mod.weight_ih_l0 = torch.nn.Parameter(lstmcell_mod.weight_ih)
lstm_mod.weight_hh_l0 = torch.nn.Parameter(lstmcell_mod.weight_hh)
lstm_mod.bias_ih_l0 = torch.nn.Parameter(lstmcell_mod.bias_ih)
lstm_mod.bias_hh_l0 = torch.nn.Parameter(lstmcell_mod.bias_hh)
def prenet_infer(self, x):
x1 = x[:]
for linear in self.layers:
x1 = F.relu(linear(x1))
x0 = x1[0].unsqueeze(0)
mask = torch.le(torch.rand(256, device='cuda').to(x.dtype), 0.5).to(x.dtype)
mask = mask.expand(x1.size(0), x1.size(1))
x1 = x1*mask*2.0
return x1
class DecoderIter(torch.nn.Module):
def __init__(self, tacotron2):
super(DecoderIter, self).__init__()
self.tacotron2 = tacotron2
dec = tacotron2.decoder
self.p_attention_dropout = dec.p_attention_dropout
self.p_decoder_dropout = dec.p_decoder_dropout
self.prenet = dec.prenet
self.prenet.infer = prenet_infer
self.attention_rnn = nn.LSTM(dec.prenet_dim + dec.encoder_embedding_dim,
dec.attention_rnn_dim, 1)
lstmcell2lstm_params(self.attention_rnn, dec.attention_rnn)
self.attention_rnn.flatten_parameters()
self.attention_layer = dec.attention_layer
self.decoder_rnn = nn.LSTM(dec.attention_rnn_dim + dec.encoder_embedding_dim,
dec.decoder_rnn_dim, 1)
lstmcell2lstm_params(self.decoder_rnn, dec.decoder_rnn)
self.decoder_rnn.flatten_parameters()
self.linear_projection = dec.linear_projection
self.gate_layer = dec.gate_layer
def decode(self, decoder_input, in_attention_hidden, in_attention_cell,
in_decoder_hidden, in_decoder_cell, in_attention_weights,
in_attention_weights_cum, in_attention_context, memory,
processed_memory, mask):
cell_input = torch.cat((decoder_input, in_attention_context), -1)
_, (out_attention_hidden, out_attention_cell) = self.attention_rnn(
cell_input.unsqueeze(0), (in_attention_hidden.unsqueeze(0),
in_attention_cell.unsqueeze(0)))
out_attention_hidden = out_attention_hidden.squeeze(0)
out_attention_cell = out_attention_cell.squeeze(0)
out_attention_hidden = F.dropout(
out_attention_hidden, self.p_attention_dropout, False)
attention_weights_cat = torch.cat(
(in_attention_weights.unsqueeze(1),
in_attention_weights_cum.unsqueeze(1)), dim=1)
out_attention_context, out_attention_weights = self.attention_layer(
out_attention_hidden, memory, processed_memory,
attention_weights_cat, mask)
out_attention_weights_cum = in_attention_weights_cum + out_attention_weights
decoder_input_tmp = torch.cat(
(out_attention_hidden, out_attention_context), -1)
_, (out_decoder_hidden, out_decoder_cell) = self.decoder_rnn(
decoder_input_tmp.unsqueeze(0), (in_decoder_hidden.unsqueeze(0),
in_decoder_cell.unsqueeze(0)))
out_decoder_hidden = out_decoder_hidden.squeeze(0)
out_decoder_cell = out_decoder_cell.squeeze(0)
out_decoder_hidden = F.dropout(
out_decoder_hidden, self.p_decoder_dropout, False)
decoder_hidden_attention_context = torch.cat(
(out_decoder_hidden, out_attention_context), 1)
decoder_output = self.linear_projection(
decoder_hidden_attention_context)
gate_prediction = self.gate_layer(decoder_hidden_attention_context)
return (decoder_output, gate_prediction, out_attention_hidden,
out_attention_cell, out_decoder_hidden, out_decoder_cell,
out_attention_weights, out_attention_weights_cum, out_attention_context)
# @torch.jit.script
def forward(self,
decoder_input,
attention_hidden,
attention_cell,
decoder_hidden,
decoder_cell,
attention_weights,
attention_weights_cum,
attention_context,
memory,
processed_memory,
mask):
decoder_input1 = self.prenet.infer(self.prenet, decoder_input)
outputs = self.decode(decoder_input1,
attention_hidden,
attention_cell,
decoder_hidden,
decoder_cell,
attention_weights,
attention_weights_cum,
attention_context,
memory,
processed_memory,
mask)
return outputs
def test_inference(encoder, decoder_iter, postnet):
encoder.eval()
decoder_iter.eval()
postnet.eval()
from trt.inference_trt import init_decoder_inputs
texts = ["Hello World, good day."]
sequences, sequence_lengths = prepare_input_sequence(texts)
measurements = {}
print("Running Tacotron2 Encoder")
with torch.no_grad():
memory, processed_memory, lens = encoder(sequences, sequence_lengths)
print("Running Tacotron2 Decoder")
device = memory.device
mel_lengths = torch.zeros([memory.size(0)], dtype=torch.int32, device = device)
not_finished = torch.ones([memory.size(0)], dtype=torch.int32, device = device)
mel_outputs, gate_outputs, alignments = (torch.zeros(1), torch.zeros(1), torch.zeros(1))
gate_threshold = 0.6
max_decoder_steps = 1000
first_iter = True
(decoder_input, attention_hidden, attention_cell, decoder_hidden,
decoder_cell, attention_weights, attention_weights_cum,
attention_context, memory, processed_memory,
mask) = init_decoder_inputs(memory, processed_memory, sequence_lengths)
while True:
with torch.no_grad():
(mel_output, gate_output,
attention_hidden, attention_cell,
decoder_hidden, decoder_cell,
attention_weights, attention_weights_cum,
attention_context) = decoder_iter(decoder_input, attention_hidden, attention_cell, decoder_hidden,
decoder_cell, attention_weights, attention_weights_cum,
attention_context, memory, processed_memory, mask)
if first_iter:
mel_outputs = torch.unsqueeze(mel_output, 2)
gate_outputs = torch.unsqueeze(gate_output, 2)
alignments = torch.unsqueeze(attention_weights, 2)
first_iter = False
else:
mel_outputs = torch.cat((mel_outputs, torch.unsqueeze(mel_output, 2)), 2)
gate_outputs = torch.cat((gate_outputs, torch.unsqueeze(gate_output, 2)), 2)
alignments = torch.cat((alignments, torch.unsqueeze(attention_weights, 2)), 2)
dec = torch.le(torch.sigmoid(gate_output), gate_threshold).to(torch.int32).squeeze(1)
not_finished = not_finished*dec
mel_lengths += not_finished
if torch.sum(not_finished) == 0:
print("Stopping after ",mel_outputs.size(2)," decoder steps")
break
if mel_outputs.size(2) == max_decoder_steps:
print("Warning! Reached max decoder steps")
break
decoder_input = mel_output
print("Running Tacotron2 PostNet")
with torch.no_grad():
mel_outputs_postnet = postnet(mel_outputs)
return mel_outputs_postnet
def main():
parser = argparse.ArgumentParser(
description='PyTorch Tacotron 2 export to TRT')
parser = parse_args(parser)
args, _ = parser.parse_known_args()
tacotron2 = load_and_setup_model('Tacotron2', parser, args.tacotron2,
fp16_run=args.fp16, cpu_run=False)
opset_version = 10
sequences = torch.randint(low=0, high=148, size=(1,50),
dtype=torch.long).cuda()
sequence_lengths = torch.IntTensor([sequences.size(1)]).cuda().long()
dummy_input = (sequences, sequence_lengths)
encoder = Encoder(tacotron2)
encoder.eval()
with torch.no_grad():
encoder(*dummy_input)
torch.onnx.export(encoder, dummy_input, args.output+"/"+"encoder.onnx",
opset_version=opset_version,
do_constant_folding=True,
input_names=["sequences", "sequence_lengths"],
output_names=["memory", "processed_memory", "lens"],
dynamic_axes={"sequences": {1: "text_seq"},
"memory": {1: "mem_seq"},
"processed_memory": {1: "mem_seq"}
})
decoder_iter = DecoderIter(tacotron2)
memory = torch.randn((1,sequence_lengths[0],512)).cuda() #encoder_outputs
if args.fp16:
memory = memory.half()
memory_lengths = sequence_lengths
# initialize decoder states for dummy_input
decoder_input = tacotron2.decoder.get_go_frame(memory)
mask = get_mask_from_lengths(memory_lengths)
(attention_hidden,
attention_cell,
decoder_hidden,
decoder_cell,
attention_weights,
attention_weights_cum,
attention_context,
processed_memory) = tacotron2.decoder.initialize_decoder_states(memory)
dummy_input = (decoder_input,
attention_hidden,
attention_cell,
decoder_hidden,
decoder_cell,
attention_weights,
attention_weights_cum,
attention_context,
memory,
processed_memory,
mask)
decoder_iter = DecoderIter(tacotron2)
decoder_iter.eval()
with torch.no_grad():
decoder_iter(*dummy_input)
torch.onnx.export(decoder_iter, dummy_input, args.output+"/"+"decoder_iter.onnx",
opset_version=opset_version,
do_constant_folding=True,
input_names=["decoder_input",
"attention_hidden",
"attention_cell",
"decoder_hidden",
"decoder_cell",
"attention_weights",
"attention_weights_cum",
"attention_context",
"memory",
"processed_memory",
"mask"],
output_names=["decoder_output",
"gate_prediction",
"out_attention_hidden",
"out_attention_cell",
"out_decoder_hidden",
"out_decoder_cell",
"out_attention_weights",
"out_attention_weights_cum",
"out_attention_context"],
dynamic_axes={"attention_weights" : {1: "seq_len"},
"attention_weights_cum" : {1: "seq_len"},
"memory" : {1: "seq_len"},
"processed_memory" : {1: "seq_len"},
"mask" : {1: "seq_len"},
"out_attention_weights" : {1: "seq_len"},
"out_attention_weights_cum" : {1: "seq_len"}
})
postnet = Postnet(tacotron2)
dummy_input = torch.randn((1,80,620)).cuda()
if args.fp16:
dummy_input = dummy_input.half()
torch.onnx.export(postnet, dummy_input, args.output+"/"+"postnet.onnx",
opset_version=opset_version,
do_constant_folding=True,
input_names=["mel_outputs"],
output_names=["mel_outputs_postnet"],
dynamic_axes={"mel_outputs": {2: "mel_seq"},
"mel_outputs_postnet": {2: "mel_seq"}})
mel = test_inference(encoder, decoder_iter, postnet)
torch.save(mel, "mel.pt")
if __name__ == '__main__':
main()
|
CUDA-Optimized/FastSpeech/fastspeech/text_norm | text_norm | numbers | # Copyright (c) 2017 Keith Ito
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
""" from https://github.com/keithito/tacotron """
import inflect
import re
_inflect = inflect.engine()
_comma_number_re = re.compile(r'([0-9][0-9\,]+[0-9])')
_decimal_number_re = re.compile(r'([0-9]+\.[0-9]+)')
_pounds_re = re.compile(r'£([0-9\,]*[0-9]+)')
_dollars_re = re.compile(r'\$([0-9\.\,]*[0-9]+)')
_ordinal_re = re.compile(r'[0-9]+(st|nd|rd|th)')
_number_re = re.compile(r'[0-9]+')
def _remove_commas(m):
return m.group(1).replace(',', '')
def _expand_decimal_point(m):
return m.group(1).replace('.', ' point ')
def _expand_dollars(m):
match = m.group(1)
parts = match.split('.')
if len(parts) > 2:
return match + ' dollars' # Unexpected format
dollars = int(parts[0]) if parts[0] else 0
cents = int(parts[1]) if len(parts) > 1 and parts[1] else 0
if dollars and cents:
dollar_unit = 'dollar' if dollars == 1 else 'dollars'
cent_unit = 'cent' if cents == 1 else 'cents'
return '%s %s, %s %s' % (dollars, dollar_unit, cents, cent_unit)
elif dollars:
dollar_unit = 'dollar' if dollars == 1 else 'dollars'
return '%s %s' % (dollars, dollar_unit)
elif cents:
cent_unit = 'cent' if cents == 1 else 'cents'
return '%s %s' % (cents, cent_unit)
else:
return 'zero dollars'
def _expand_ordinal(m):
return _inflect.number_to_words(m.group(0))
def _expand_number(m):
num = int(m.group(0))
if num > 1000 and num < 3000:
if num == 2000:
return 'two thousand'
elif num > 2000 and num < 2010:
return 'two thousand ' + _inflect.number_to_words(num % 100)
elif num % 100 == 0:
return _inflect.number_to_words(num // 100) + ' hundred'
else:
return _inflect.number_to_words(num, andword='', zero='oh', group=2).replace(', ', ' ')
else:
return _inflect.number_to_words(num, andword='')
def normalize_numbers(text):
text = re.sub(_comma_number_re, _remove_commas, text)
text = re.sub(_pounds_re, r'\1 pounds', text)
text = re.sub(_dollars_re, _expand_dollars, text)
text = re.sub(_decimal_number_re, _expand_decimal_point, text)
text = re.sub(_ordinal_re, _expand_ordinal, text)
text = re.sub(_number_re, _expand_number, text)
return text
|
TensorFlow/Segmentation/UNet_Industrial/runtime | runtime | runner | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# ==============================================================================
#
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==============================================================================
import os
import json
import multiprocessing
import operator
import random
import numpy as np
import tensorflow as tf
import horovod.tensorflow as hvd
from datasets import known_datasets
from model.unet import UNet_v1
from utils import hvd_utils
from utils.hooks import ProfilerHook
import dllogger as Logger
__all__ = [
'Runner',
]
class Runner(object):
def __init__(
self,
# Model Params
input_format, # NCHW or NHWC
compute_format, # NCHW or NHWC
n_channels,
activation_fn,
weight_init_method,
model_variant,
input_shape,
mask_shape,
input_normalization_method,
# Training HParams
augment_data,
loss_fn_name,
# Runtime HParams
amp,
xla,
# Directory Params
model_dir=None,
log_dir=None,
sample_dir=None,
data_dir=None,
dataset_name=None,
dataset_hparams=None,
# Debug Params
log_every_n_steps=1,
debug_verbosity=0,
seed=None
):
if dataset_hparams is None:
dataset_hparams = dict()
if compute_format not in ["NHWC", 'NCHW']:
raise ValueError("Unknown `compute_format` received: %s (allowed: ['NHWC', 'NCHW'])" % compute_format)
if input_format not in ["NHWC", 'NCHW']:
raise ValueError("Unknown `input_format` received: %s (allowed: ['NHWC', 'NCHW'])" % input_format)
if n_channels not in [1, 3]:
raise ValueError("Unsupported number of channels: %d (allowed: 1 (grayscale) and 3 (color))" % n_channels)
if data_dir is not None and not os.path.exists(data_dir):
raise ValueError("The `data_dir` received does not exists: %s" % data_dir)
if hvd_utils.is_using_hvd():
hvd.init()
if hvd.rank() == 0:
print("Horovod successfully initialized ...")
tf_seed = 2 * (seed + hvd.rank()) if seed is not None else None
else:
tf_seed = 2 * seed if seed is not None else None
# ============================================
# Optimisation Flags - Do not remove
# ============================================
os.environ['CUDA_CACHE_DISABLE'] = '0'
os.environ['HOROVOD_GPU_ALLREDUCE'] = 'NCCL'
os.environ['TF_GPU_THREAD_MODE'] = 'gpu_private'
os.environ['TF_GPU_THREAD_COUNT'] = '1' if not hvd_utils.is_using_hvd() else str(hvd.size())
print("WORLD_SIZE", hvd.size())
os.environ['TF_USE_CUDNN_BATCHNORM_SPATIAL_PERSISTENT'] = '1'
os.environ['TF_ADJUST_HUE_FUSED'] = '1'
os.environ['TF_ADJUST_SATURATION_FUSED'] = '1'
os.environ['TF_ENABLE_WINOGRAD_NONFUSED'] = '1'
os.environ['TF_SYNC_ON_FINISH'] = '0'
os.environ['TF_AUTOTUNE_THRESHOLD'] = '2'
# =================================================
self.xla = xla
if amp:
if not hvd_utils.is_using_hvd() or hvd.rank() == 0:
print("TF AMP is activated - Experimental Feature")
os.environ["TF_ENABLE_AUTO_MIXED_PRECISION_GRAPH_REWRITE"] = "1"
# =================================================
model_hparams = tf.contrib.training.HParams(
# Model Params
input_format=input_format,
compute_format=compute_format,
input_shape=input_shape,
mask_shape=mask_shape,
n_channels=n_channels,
activation_fn=activation_fn,
weight_init_method=weight_init_method,
model_variant=model_variant,
input_normalization_method=input_normalization_method,
# Training HParams
augment_data=augment_data,
loss_fn_name=loss_fn_name,
# Runtime Params
amp=amp,
# Debug Params
log_every_n_steps=log_every_n_steps,
debug_verbosity=debug_verbosity,
seed=tf_seed
)
run_config_additional = tf.contrib.training.HParams(
dataset_hparams=dataset_hparams,
model_dir=model_dir if not hvd_utils.is_using_hvd() or hvd.rank() == 0 else None,
log_dir=log_dir if not hvd_utils.is_using_hvd() or hvd.rank() == 0 else None,
sample_dir=sample_dir if not hvd_utils.is_using_hvd() or hvd.rank() == 0 else None,
data_dir=data_dir,
num_preprocessing_threads=32,
)
if not hvd_utils.is_using_hvd() or hvd.rank() == 0:
try:
os.makedirs(sample_dir)
except FileExistsError:
pass
self.run_hparams = Runner._build_hparams(model_hparams, run_config_additional)
if not hvd_utils.is_using_hvd() or hvd.rank() == 0:
print('Defining Model Estimator ...\n')
self._model = UNet_v1(
model_name="UNet_v1",
input_format=self.run_hparams.input_format,
compute_format=self.run_hparams.compute_format,
n_output_channels=1,
unet_variant=self.run_hparams.model_variant,
weight_init_method=self.run_hparams.weight_init_method,
activation_fn=self.run_hparams.activation_fn
)
if self.run_hparams.seed is not None:
if not hvd_utils.is_using_hvd() or hvd.rank() == 0:
print("Deterministic Run - Seed: %d\n" % seed)
tf.set_random_seed(self.run_hparams.seed)
np.random.seed(self.run_hparams.seed)
random.seed(self.run_hparams.seed)
if dataset_name not in known_datasets.keys():
raise RuntimeError(
"The dataset `%s` is unknown, allowed values: %s ..." % (dataset_name, list(known_datasets.keys()))
)
self.dataset = known_datasets[dataset_name](data_dir=data_dir, **self.run_hparams.dataset_hparams)
self.num_gpus = 1 if not hvd_utils.is_using_hvd() else hvd.size()
@staticmethod
def _build_hparams(*args):
hparams = tf.contrib.training.HParams()
for _hparams in args:
if not isinstance(_hparams, tf.contrib.training.HParams):
raise ValueError("Non valid HParams argument object detected:", _hparams)
for key, val in _hparams.values().items():
try:
hparams.add_hparam(name=key, value=val)
except ValueError:
print(
"the parameter `{}` already exists - existing value: {} and duplicated value: {}".format(
key, hparams.get(key), val
)
)
return hparams
@staticmethod
def _get_global_batch_size(worker_batch_size):
if hvd_utils.is_using_hvd():
return worker_batch_size * hvd.size()
else:
return worker_batch_size
@staticmethod
def _get_session_config(mode, xla):
if mode not in ["train", 'validation', 'benchmark']:
raise ValueError("Unknown mode received: %s (allowed: 'train', 'validation', 'benchmark')" % mode)
config = tf.ConfigProto()
config.allow_soft_placement = True
config.log_device_placement = False
config.gpu_options.allow_growth = True
if hvd_utils.is_using_hvd():
config.gpu_options.visible_device_list = str(hvd.rank())
if xla:
if not hvd_utils.is_using_hvd() or hvd.rank() == 0:
print("XLA is activated - Experimental Feature")
config.graph_options.optimizer_options.global_jit_level = tf.OptimizerOptions.ON_1
config.gpu_options.force_gpu_compatible = True # Force pinned memory
# TODO: Provide correct session configuration for both
# variations with comments explaining why specific options were used
if mode == 'train':
config.intra_op_parallelism_threads = 1 # Avoid pool of Eigen threads
if hvd_utils.is_using_hvd():
config.inter_op_parallelism_threads = max(2, (multiprocessing.cpu_count() // hvd.size()) - 2)
else:
config.inter_op_parallelism_threads = 4
return config
@staticmethod
def _get_run_config(mode, model_dir, xla, seed=None):
if mode not in ["train", 'validation', 'benchmark']:
raise ValueError("Unknown mode received: %s (allowed: 'train', 'validation', 'benchmark')" % mode)
if seed is not None:
if hvd_utils.is_using_hvd():
tf_random_seed = 2 * (seed + hvd.rank())
else:
tf_random_seed = 2 * seed
else:
tf_random_seed = None
config = tf.estimator.RunConfig(
model_dir=model_dir,
tf_random_seed=tf_random_seed,
save_summary_steps=10 if mode == "train" else 1e9, # disabled
save_checkpoints_steps=None,
save_checkpoints_secs=None,
session_config=Runner._get_session_config(mode=mode, xla=xla),
keep_checkpoint_max=5,
keep_checkpoint_every_n_hours=1e6, # disabled
log_step_count_steps=1e9,
train_distribute=None,
device_fn=None,
protocol=None,
eval_distribute=None,
experimental_distribute=None
)
if mode == 'train':
if hvd_utils.is_using_hvd():
config = config.replace(
save_checkpoints_steps=1000 if hvd.rank() == 0 else None, keep_checkpoint_every_n_hours=3
)
else:
config = config.replace(save_checkpoints_steps=1000, keep_checkpoint_every_n_hours=3)
return config
def _get_estimator(self, mode, run_params, xla):
if mode not in ["train", 'validation', 'benchmark']:
raise ValueError("Unknown mode received: %s (allowed: 'train', 'validation', 'benchmark')" % mode)
run_config = Runner._get_run_config(
mode=mode, model_dir=self.run_hparams.model_dir, xla=xla, seed=self.run_hparams.seed
)
return tf.estimator.Estimator(
model_fn=self._model, model_dir=self.run_hparams.model_dir, config=run_config, params=run_params
)
def train(
self,
iter_unit,
num_iter,
batch_size,
weight_decay,
learning_rate,
learning_rate_decay_factor,
learning_rate_decay_steps,
rmsprop_decay,
rmsprop_momentum,
use_auto_loss_scaling,
augment_data,
warmup_steps=50,
is_benchmark=False
):
if iter_unit not in ["epoch", "batch"]:
raise ValueError('`iter_unit` value is unknown: %s (allowed: ["epoch", "batch"])' % iter_unit)
if self.run_hparams.data_dir is None and not is_benchmark:
raise ValueError('`data_dir` must be specified for training!')
if self.run_hparams.amp:
if use_auto_loss_scaling:
if not hvd_utils.is_using_hvd() or hvd.rank() == 0:
print("TF Loss Auto Scaling is activated - Experimental Feature")
os.environ["TF_ENABLE_AUTO_MIXED_PRECISION_LOSS_SCALING"] = "1"
apply_manual_loss_scaling = False
else:
os.environ["TF_ENABLE_AUTO_MIXED_PRECISION_LOSS_SCALING"] = "0"
apply_manual_loss_scaling = True
else:
apply_manual_loss_scaling = False
global_batch_size = batch_size * self.num_gpus
if self.run_hparams.data_dir is not None:
filenames, num_samples, num_steps, num_epochs = self.dataset.get_dataset_runtime_specs(
training=True, iter_unit=iter_unit, num_iter=num_iter, global_batch_size=global_batch_size
)
steps_per_epoch = int(num_steps / num_epochs)
else:
num_epochs = 1
num_steps = num_iter
steps_per_epoch = 625
training_hooks = []
if hvd_utils.is_using_hvd():
training_hooks.append(hvd.BroadcastGlobalVariablesHook(0))
if not hvd_utils.is_using_hvd() or hvd.rank() == 0:
training_hooks.append(
ProfilerHook(
global_batch_size=global_batch_size,
log_every=self.run_hparams.log_every_n_steps,
warmup_steps=warmup_steps,
is_training=True,
sample_dir=self.run_hparams.sample_dir
)
)
print("Starting Model Training ...")
Logger.log(step=('PARAMETER'), data={"Epochs": num_epochs}, verbosity=Logger.Verbosity.DEFAULT)
Logger.log(step=('PARAMETER'), data={"Total Steps": num_steps}, verbosity=Logger.Verbosity.DEFAULT)
Logger.log(step=('PARAMETER'), data={"Steps per Epoch": steps_per_epoch}, verbosity=Logger.Verbosity.DEFAULT)
Logger.log(step=('PARAMETER'), data={"Weight Decay Factor": weight_decay}, verbosity=Logger.Verbosity.DEFAULT)
Logger.log(step=('PARAMETER'), data={"Learning Rate": learning_rate}, verbosity=Logger.Verbosity.DEFAULT)
Logger.log(step=('PARAMETER'), data={"Learning Rate Decay Factor": learning_rate_decay_factor}, verbosity=Logger.Verbosity.DEFAULT)
Logger.log(step=('PARAMETER'), data={"Learning Rate Decay Steps": learning_rate_decay_steps}, verbosity=Logger.Verbosity.DEFAULT)
Logger.log(step=('PARAMETER'), data={"RMSProp - Decay": rmsprop_decay}, verbosity=Logger.Verbosity.DEFAULT)
Logger.log(step=('PARAMETER'), data={"RMSProp - Momentum": rmsprop_momentum}, verbosity=Logger.Verbosity.DEFAULT)
Logger.log(step=('PARAMETER'), data={"Loss Function Name": self.run_hparams.loss_fn_name}, verbosity=Logger.Verbosity.DEFAULT)
if self.run_hparams.amp:
Logger.log(step=('PARAMETER'), data={"Use Auto Loss Scaling": use_auto_loss_scaling}, verbosity=Logger.Verbosity.DEFAULT)
Logger.log(step=('PARAMETER'), data={"# GPUs": self.num_gpus}, verbosity=Logger.Verbosity.DEFAULT)
Logger.log(step=('PARAMETER'), data={"GPU Batch Size": batch_size}, verbosity=Logger.Verbosity.DEFAULT)
Logger.log(step=('PARAMETER'), data={"Global Batch Size": global_batch_size}, verbosity=Logger.Verbosity.DEFAULT)
Logger.log(step=('PARAMETER'), data={"Total Files to be Processed": num_steps * global_batch_size}, verbosity=Logger.Verbosity.DEFAULT)
print() # visual spacing
estimator_params = {
'batch_size': batch_size,
'steps_per_epoch': steps_per_epoch,
'learning_rate': learning_rate,
'learning_rate_decay_steps': learning_rate_decay_steps,
'learning_rate_decay_factor': learning_rate_decay_factor,
'rmsprop_decay': rmsprop_decay,
'rmsprop_momentum': rmsprop_momentum,
'weight_decay': weight_decay,
'apply_manual_loss_scaling': apply_manual_loss_scaling,
'loss_fn_name': self.run_hparams.loss_fn_name,
'debug_verbosity': self.run_hparams.debug_verbosity,
}
def training_data_fn():
if not is_benchmark or self.run_hparams.data_dir is not None:
return self.dataset.dataset_fn(
batch_size=batch_size,
training=True,
only_defective_images=True,
augment_data=augment_data,
input_shape=list(self.run_hparams.input_shape) + [self.run_hparams.n_channels],
mask_shape=list(self.run_hparams.mask_shape) + [self.run_hparams.n_channels],
num_threads=64,
use_gpu_prefetch=True,
normalize_data_method="zero_centered",
seed=self.run_hparams.seed
)
else:
if not hvd_utils.is_using_hvd() or hvd.rank() == 0:
print("Using Synthetic Data ...")
return self.dataset.synth_dataset_fn(
batch_size=batch_size,
training=True,
input_shape=list(self.run_hparams.input_shape) + [self.run_hparams.n_channels],
mask_shape=list(self.run_hparams.mask_shape) + [self.run_hparams.n_channels],
num_threads=64,
use_gpu_prefetch=True,
normalize_data_method="zero_centered",
only_defective_images=True,
augment_data=augment_data,
seed=self.run_hparams.seed
)
model = self._get_estimator(mode='train', run_params=estimator_params, xla=self.xla)
try:
model.train(
input_fn=training_data_fn,
steps=num_steps,
hooks=training_hooks,
)
except KeyboardInterrupt:
print("Keyboard interrupt")
if not hvd_utils.is_using_hvd() or hvd.rank() == 0:
print('Ending Model Training ...')
def evaluate(self, iter_unit, num_iter, batch_size, warmup_steps=50, is_benchmark=False, save_eval_results_to_json=False):
if iter_unit not in ["epoch", "batch"]:
raise ValueError('`iter_unit` value is unknown: %s (allowed: ["epoch", "batch"])' % iter_unit)
if self.run_hparams.data_dir is None and not is_benchmark:
raise ValueError('`data_dir` must be specified for evaluation!')
# if hvd_utils.is_using_hvd() and hvd.rank() != 0:
# raise RuntimeError('Multi-GPU inference is not supported')
print('Defining Model Estimator ...\n')
if self.run_hparams.data_dir is not None:
filenames, num_samples, num_steps, num_epochs = self.dataset.get_dataset_runtime_specs(
training=False, iter_unit=iter_unit, num_iter=num_iter, global_batch_size=batch_size
)
steps_per_epoch = num_steps / num_epochs
else:
num_epochs = 1
num_steps = num_iter
steps_per_epoch = num_steps
evaluation_hooks = [
ProfilerHook(
global_batch_size=batch_size,
log_every=self.run_hparams.log_every_n_steps,
warmup_steps=warmup_steps,
is_training=False,
sample_dir=self.run_hparams.sample_dir
)
]
print('Starting Model Evaluation ...\n')
Logger.log(step=('PARAMETER'), data={"Epochs": num_epochs}, verbosity=Logger.Verbosity.DEFAULT)
Logger.log(step=('PARAMETER'), data={"Total Steps": num_steps}, verbosity=Logger.Verbosity.DEFAULT)
Logger.log(step=('PARAMETER'), data={"Steps per Epoch": steps_per_epoch}, verbosity=Logger.Verbosity.DEFAULT)
Logger.log(step=('PARAMETER'), data={"GPU Batch Size": batch_size}, verbosity=Logger.Verbosity.DEFAULT)
Logger.log(step=('PARAMETER'), data={"Total Files to Processed": num_steps * batch_size}, verbosity=Logger.Verbosity.DEFAULT)
print() # visual spacing
estimator_params = {
'batch_size': batch_size,
'steps_per_epoch': steps_per_epoch,
'loss_fn_name': self.run_hparams.loss_fn_name,
'debug_verbosity': self.run_hparams.debug_verbosity,
}
def evaluation_data_fn():
if not is_benchmark or self.run_hparams.data_dir is not None:
return self.dataset.dataset_fn(
batch_size=batch_size,
training=False,
input_shape=list(self.run_hparams.input_shape) + [self.run_hparams.n_channels],
mask_shape=list(self.run_hparams.mask_shape) + [self.run_hparams.n_channels],
num_threads=64,
use_gpu_prefetch=True,
normalize_data_method="zero_centered",
only_defective_images=False,
augment_data=False,
seed=self.run_hparams.seed
)
else:
print("Using Synthetic Data ...")
return self.dataset.synth_dataset_fn(
batch_size=batch_size,
training=False,
input_shape=list(self.run_hparams.input_shape) + [self.run_hparams.n_channels],
mask_shape=list(self.run_hparams.mask_shape) + [self.run_hparams.n_channels],
num_threads=64,
use_gpu_prefetch=True,
normalize_data_method="zero_centered",
only_defective_images=False,
augment_data=False,
seed=self.run_hparams.seed
)
model = self._get_estimator(mode='validation', run_params=estimator_params, xla=self.xla)
try:
eval_results = model.evaluate(
input_fn=evaluation_data_fn,
steps=num_steps,
hooks=evaluation_hooks,
)
print('Ending Model Evaluation ...')
print('###################################\n\nEvaluation Results:\n')
data_to_log = {"{prefix}.{key}".format(prefix=Logger._stage, key=key): float(val)
for key, val in sorted(eval_results.items(), key=operator.itemgetter(0))
if not any(val in key for val in ["loss", "global_step", "Confusion_Matrix"])}
Logger.log(step=(), data=data_to_log, verbosity=Logger.Verbosity.DEFAULT)
fns = eval_results["Confusion_Matrix_FN"]
fps = eval_results["Confusion_Matrix_FP"]
tns = eval_results["Confusion_Matrix_TN"]
tps = eval_results["Confusion_Matrix_TP"]
positives = np.add(tps, fns)
negatives = np.add(tns, fps)
tpr = np.divide(tps, positives)
tnr = np.divide(tns, negatives)
Logger.log(
step=(num_steps,),
data={"{prefix}.true_positives".format(prefix=Logger._stage): str(tps)},
verbosity=Logger.Verbosity.DEFAULT
)
Logger.log(
step=(num_steps,),
data={"{prefix}.true_negatives".format(prefix=Logger._stage): str(tns)},
verbosity=Logger.Verbosity.DEFAULT
)
Logger.log(
step=(num_steps,),
data={"{prefix}.false_positives".format(prefix=Logger._stage): str(fps)},
verbosity=Logger.Verbosity.DEFAULT
)
Logger.log(
step=(num_steps,),
data={"{prefix}.false_negatives".format(prefix=Logger._stage): str(fns)},
verbosity=Logger.Verbosity.DEFAULT
)
Logger.log(
step=(num_steps,),
data={"{prefix}.true_positive_rate".format(prefix=Logger._stage): str(["%.3f" % x for x in tpr])},
verbosity=Logger.Verbosity.DEFAULT
)
Logger.log(
step=(num_steps,),
data={"{prefix}.true_negative_rate".format(prefix=Logger._stage): str(["%.3f" % x for x in tnr])},
verbosity=Logger.Verbosity.DEFAULT
)
if save_eval_results_to_json:
results_dict = {
'IoU': {
'0.75': str(eval_results["IoU_THS_0.75"]),
'0.85': str(eval_results["IoU_THS_0.85"]),
'0.95': str(eval_results["IoU_THS_0.95"]),
'0.99': str(eval_results["IoU_THS_0.99"]),
},
'TPR': {
'0.75': str(tpr[-4]),
'0.85': str(tpr[-3]),
'0.95': str(tpr[-2]),
'0.99': str(tpr[-1]),
},
'TNR': {
'0.75': str(tnr[-4]),
'0.85': str(tnr[-3]),
'0.95': str(tnr[-2]),
'0.99': str(tnr[-1]),
}
}
with open(os.path.join(self.run_hparams.model_dir, "..", "results.json"), 'w') as f:
json.dump(results_dict, f)
except KeyboardInterrupt:
print("Keyboard interrupt")
|
PyTorch/SpeechRecognition/Jasper/scripts | scripts | evaluation | #!/bin/bash
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -a
: ${PREDICTION_FILE:=}
: ${DATASET:="test-other"}
bash ./scripts/inference.sh "$@"
|
PyTorch/Translation/Transformer/scripts | scripts | run_DGX1_FP32 | #! /bin/bash
#
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
nvidia-smi
RESULTS_DIR='/results'
CHECKPOINTS_DIR='/results/checkpoints'
mkdir -p $CHECKPOINTS_DIR
: ${SEED:=1}
: ${LR:=0.000846}
: ${WARMUP:=4000}
: ${NUM_EPOCHS:=30}
: ${BS:=2560}
: ${NUM_GPU:=8}
STAT_FILE=${RESULTS_DIR}/DGX1_fp32_${NUM_GPU}GPU.json
DISTRIBUTED="-m torch.distributed.run --nproc_per_node=${NUM_GPU}"
python ${DISTRIBUTED} /workspace/translation/train.py \
/data/ \
--arch transformer_wmt_en_de_big_t2t \
--share-all-embeddings \
--optimizer adam \
--adam-betas 0.9 0.997 \
--adam-eps 1e-9 \
--clip-norm 0.0 \
--lr-scheduler inverse_sqrt \
--warmup-init-lr 0.0 \
--warmup-updates ${WARMUP} \
--lr $LR \
--min-lr 0.0 \
--dropout 0.1 \
--weight-decay 0.0 \
--criterion label_smoothed_cross_entropy \
--label-smoothing 0.1 \
--max-tokens ${BS} \
--seed ${SEED} \
--max-epoch ${NUM_EPOCHS} \
--no-epoch-checkpoints \
--fuse-layer-norm \
--online-eval \
--log-interval 500 \
--save-dir ${RESULTS_DIR} \
--stat-file ${STAT_FILE}
|
PyTorch/SpeechSynthesis/Tacotron2/notebooks/conversationalai/model_repo/jasper-feature-extractor | jasper-feature-extractor | config | name: "jasper-feature-extractor"
platform: "pytorch_libtorch"
default_model_filename: "jasper-feature-extractor.pt"
max_batch_size: 16
input [ {
name: "AUDIO_SIGNAL__0"
data_type: TYPE_FP32
dims: [ -1 ]
},
{
name: "NUM_SAMPLES__1"
data_type: TYPE_INT32
dims: [ 1 ]
reshape { shape: [] }
}
]
output [
{
name: "AUDIO_FEATURES__0"
data_type: TYPE_FP32
dims: [64, -1]
}
,
{
name: "NUM_TIME_STEPS__1"
data_type: TYPE_INT32
dims: [ 1 ]
reshape: { shape: [] }
}
] |
TensorFlow2/Recommendation/WideAndDeep/scripts | scripts | training_benchmark | #!/bin/bash
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -e
usage() {
cat <<EOF
Usage: bash scripts/training_benchmark.sh -g gpu
-g | --gpu (Required) Number of gpus
-a | --amp (Optional) Use amp
-x | --xla (Optional) Use xla
EOF
}
if [ ! -d "scripts" ] || [ ! "$(ls -A 'scripts')" ]; then
echo "You are probably calling this script from wrong directory"
usage
exit 1
fi
amp=
xla=
gpu=
while [ "$1" != "" ]; do
case $1 in
-g | --gpu)
shift
gpu="$1"
;;
-a | --amp)
amp="--amp"
;;
-x | --xla)
xla="--xla"
;;
*)
usage
exit 1
;;
esac
shift
done
if [ -z "$gpu" ]; then
echo "Missing number of gpus param"
usage
exit 1
fi
if ! [ "$gpu" -ge 0 ] || [[ ! "$gpu" =~ ^(1|4|8)$ ]] 2>/dev/null; then
echo "Expected number of gpus (${gpu}) to be equal 1, 4 or 8"
usage
exit 1
fi
cmd="horovodrun -np ${gpu} sh hvd_wrapper.sh \
python main.py \
--benchmark \
--benchmark_warmup_steps 500 \
--benchmark_steps 1000 \
${amp} \
${xla}"
set -x
$cmd
|
PyTorch/SpeechSynthesis/Tacotron2/trtis_cpp/src/trt/tacotron2 | tacotron2 | encoderBuilder | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "encoderBuilder.h"
#include "convBatchNormCreator.h"
#include "cudaUtils.h"
#include "dims1.h"
#include "encoderInstance.h"
#include "lstm.h"
#include "utils.h"
#include <cassert>
namespace tts
{
/******************************************************************************
* CONSTANTS ******************************************************************
*****************************************************************************/
namespace
{
constexpr const char* const INPUT_NAME = EncoderInstance::INPUT_NAME;
constexpr const char* const INPUT_MASK_NAME = EncoderInstance::INPUT_MASK_NAME;
constexpr const char* const INPUT_LENGTH_NAME = EncoderInstance::INPUT_LENGTH_NAME;
constexpr const char* const OUTPUT_NAME = EncoderInstance::OUTPUT_NAME;
constexpr const char* const OUTPUT_PROCESSED_NAME = EncoderInstance::OUTPUT_PROCESSED_NAME;
constexpr const char* const ENGINE_NAME = EncoderInstance::ENGINE_NAME;
} // namespace
using namespace nvinfer1;
/******************************************************************************
* CONSTRUCTORS / DESTRUCTOR **************************************************
*****************************************************************************/
EncoderBuilder::EncoderBuilder(const int numEmbeddingDimensions, const int numEncodingDimensions,
const int numAttentionDimensions, const int inputLength)
: mNumEmbeddingDimensions(numEmbeddingDimensions)
, mNumEncodingDimensions(numEncodingDimensions)
, mNumAttentionDimensions(numAttentionDimensions)
, mInputLength(inputLength)
{
}
/******************************************************************************
* PUBLIC METHODS *************************************************************
*****************************************************************************/
TRTPtr<ICudaEngine> EncoderBuilder::build(
IBuilder& builder,
IModelImporter& importer,
const int maxBatchSize,
const bool useFP16)
{
TRTPtr<INetworkDefinition> network(builder.createNetworkV2(0));
network->setName("Tacotron2_Encoder");
// EMBEDDING ////////////////////////////////////////////////////////////////
ITensor* input
= network->addInput(INPUT_NAME, DataType::kINT32, Dims2(1, mInputLength));
const LayerData* embeddingData = importer.getWeights({"embedding"});
const int numSymbols
= embeddingData->get("weight").count / mNumEmbeddingDimensions;
assert(
numSymbols * mNumEmbeddingDimensions
== embeddingData->get("weight").count);
ILayer* const lookupLayer = network->addConstant(
Dims3(1, numSymbols, mNumEmbeddingDimensions),
embeddingData->get("weight"));
lookupLayer->setName("embedding.constant");
ILayer* const gatherLayer
= network->addGather(*lookupLayer->getOutput(0), *input, 1);
gatherLayer->setName("embedding.gather");
IShuffleLayer* const embTransLayer
= network->addShuffle(*gatherLayer->getOutput(0));
embTransLayer->setFirstTranspose({0, 1, 3, 2});
embTransLayer->setReshapeDimensions(Dims3(mNumEmbeddingDimensions, -1, 1));
embTransLayer->setName("embedding.transpose");
input = embTransLayer->getOutput(0);
// ENCODING /////////////////////////////////////////////////////////////////
ITensor* inputMask = network->addInput(
INPUT_MASK_NAME, DataType::kFLOAT, Dims3(1, mInputLength, 1));
ITensor* inputLength
= network->addInput(INPUT_LENGTH_NAME, DataType::kINT32, Dims1(1));
ILayer* const inputMaskLayer = network->addElementWise(
*input, *inputMask, ElementWiseOperation::kPROD);
input = inputMaskLayer->getOutput(0);
// we need to ensure layer data is around during network construction
ConvBatchNormCreator convBatchNormCreator;
for (int layer = 0; layer < 3; ++layer) {
const LayerData* const convData
= importer.getWeights({"encoder",
"convolutions",
std::to_string(layer),
"conv_layer",
"conv"});
const LayerData* const normData = importer.getWeights(
{"encoder", "convolutions", std::to_string(layer), "batch_norm"});
ILayer* const convLayer = convBatchNormCreator.add(
*network,
input,
*convData,
*normData,
"relu",
"encoder.convolutions." + std::to_string(layer));
ILayer* const maskLayer = network->addElementWise(
*convLayer->getOutput(0), *inputMask, ElementWiseOperation::kPROD);
input = maskLayer->getOutput(0);
}
IShuffleLayer* const transposeLayer = network->addShuffle(*input);
transposeLayer->setFirstTranspose({2, 1, 0});
transposeLayer->setName("encoder.convolutions.transpose");
const LayerData* const lstmData = importer.getWeights({"encoder", "lstm"});
ILayer* const lstmLayer = LSTM::addPaddedBidirectional(
network.get(), transposeLayer->getOutput(0), inputLength, mNumEncodingDimensions, *lstmData);
lstmLayer->setName("encoder.lstm");
ILayer* const outputMaskLayer
= network->addElementWise(*lstmLayer->getOutput(0), *inputMask, ElementWiseOperation::kPROD);
outputMaskLayer->setName("encoder.mask");
ITensor* const output = outputMaskLayer->getOutput(0);
output->setName(OUTPUT_NAME);
network->markOutput(*output);
// MEMORY ///////////////////////////////////////////////////////////////////
IShuffleLayer* const memTransLayer = network->addShuffle(*output);
memTransLayer->setReshapeDimensions(Dims4(-1, mNumEncodingDimensions, 1, 1));
const LayerData* const linearData
= importer.getWeights({"decoder", "attention_layer", "memory_layer", "linear_layer"});
ILayer* const linearLayer = network->addFullyConnected(*memTransLayer->getOutput(0), mNumAttentionDimensions,
linearData->get("weight"), Weights{DataType::kFLOAT, 0, 0});
linearLayer->setName("decoder.attention_layer.memory_layer.linear_layer");
ITensor* const outputProcessed = linearLayer->getOutput(0);
outputProcessed->setName(OUTPUT_PROCESSED_NAME);
network->markOutput(*outputProcessed);
// build engine
TRTPtr<IBuilderConfig> config(builder.createBuilderConfig());
config->setMaxWorkspaceSize(1ULL << 29); // 512 MB
if (useFP16)
{
config->setFlag(BuilderFlag::kFP16);
}
builder.setMaxBatchSize(maxBatchSize);
TRTPtr<ICudaEngine> engine(
builder.buildEngineWithConfig(*network, *config));
if (!engine)
{
throw std::runtime_error("Failed to build Tacotron2::Encoder engine.");
}
return engine;
}
} // namespace tts
|
PyTorch/SpeechRecognition/Jasper/common/text | text | numbers | # Copyright (c) 2017 Keith Ito
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" from https://github.com/keithito/tacotron
Modifed to add support for time and slight tweaks to _expand_number
"""
import inflect
import re
_inflect = inflect.engine()
_comma_number_re = re.compile(r'([0-9][0-9\,]+[0-9])')
_decimal_number_re = re.compile(r'([0-9]+\.[0-9]+)')
_pounds_re = re.compile(r'£([0-9\,]*[0-9]+)')
_dollars_re = re.compile(r'\$([0-9\.\,]*[0-9]+)')
_ordinal_re = re.compile(r'[0-9]+(st|nd|rd|th)')
_number_re = re.compile(r'[0-9]+')
_time_re = re.compile(r'([0-9]{1,2}):([0-9]{2})')
def _remove_commas(m):
return m.group(1).replace(',', '')
def _expand_decimal_point(m):
return m.group(1).replace('.', ' point ')
def _expand_dollars(m):
match = m.group(1)
parts = match.split('.')
if len(parts) > 2:
return match + ' dollars' # Unexpected format
dollars = int(parts[0]) if parts[0] else 0
cents = int(parts[1]) if len(parts) > 1 and parts[1] else 0
if dollars and cents:
dollar_unit = 'dollar' if dollars == 1 else 'dollars'
cent_unit = 'cent' if cents == 1 else 'cents'
return '%s %s, %s %s' % (dollars, dollar_unit, cents, cent_unit)
elif dollars:
dollar_unit = 'dollar' if dollars == 1 else 'dollars'
return '%s %s' % (dollars, dollar_unit)
elif cents:
cent_unit = 'cent' if cents == 1 else 'cents'
return '%s %s' % (cents, cent_unit)
else:
return 'zero dollars'
def _expand_ordinal(m):
return _inflect.number_to_words(m.group(0))
def _expand_number(m):
if int(m.group(0)[0]) == 0:
return _inflect.number_to_words(m.group(0), andword='', group=1)
num = int(m.group(0))
if num > 1000 and num < 3000:
if num == 2000:
return 'two thousand'
elif num > 2000 and num < 2010:
return 'two thousand ' + _inflect.number_to_words(num % 100)
elif num % 100 == 0:
return _inflect.number_to_words(num // 100) + ' hundred'
else:
return _inflect.number_to_words(num, andword='', zero='oh', group=2).replace(', ', ' ')
# Add check for number phones and other large numbers
elif num > 1000000000 and num % 10000 != 0:
return _inflect.number_to_words(num, andword='', group=1)
else:
return _inflect.number_to_words(num, andword='')
def _expand_time(m):
mins = int(m.group(2))
if mins == 0:
return _inflect.number_to_words(m.group(1))
return " ".join([_inflect.number_to_words(m.group(1)), _inflect.number_to_words(m.group(2))])
def normalize_numbers(text):
text = re.sub(_comma_number_re, _remove_commas, text)
text = re.sub(_pounds_re, r'\1 pounds', text)
text = re.sub(_dollars_re, _expand_dollars, text)
text = re.sub(_decimal_number_re, _expand_decimal_point, text)
text = re.sub(_ordinal_re, _expand_ordinal, text)
text = re.sub(_number_re, _expand_number, text)
text = re.sub(_time_re, _expand_time, text)
return text
|
TensorFlow2/Detection/Efficientdet/utils | utils | util_keras | # Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Common keras utils."""
from typing import Text
from absl import logging
import tensorflow as tf
from model import normalization_builder
def build_batch_norm(is_training_bn: bool,
beta_initializer: Text = 'zeros',
gamma_initializer: Text = 'ones',
data_format: Text = 'channels_last',
momentum: float = 0.99,
epsilon: float = 1e-3,
name: Text = 'tpu_batch_normalization'):
"""Build a batch normalization layer.
Args:
is_training_bn: `bool` for whether the model is training.
beta_initializer: `str`, beta initializer.
gamma_initializer: `str`, gamma initializer.
data_format: `str` either "channels_first" for `[batch, channels, height,
width]` or "channels_last for `[batch, height, width, channels]`.
momentum: `float`, momentume of batch norm.
epsilon: `float`, small value for numerical stability.
name: the name of the batch normalization layer
Returns:
A normalized `Tensor` with the same `data_format`.
"""
axis = 1 if data_format == 'channels_first' else -1
batch_norm_class = normalization_builder.batch_norm_class(is_training_bn)
bn_layer = batch_norm_class(
axis=axis,
momentum=momentum,
epsilon=epsilon,
center=True,
scale=True,
beta_initializer=beta_initializer,
gamma_initializer=gamma_initializer,
name=name)
return bn_layer
def get_ema_vars(model):
"""Get all exponential moving average (ema) variables."""
ema_vars = model.trainable_weights
for v in model.weights:
# We maintain mva for batch norm moving mean and variance as well.
if 'moving_mean' in v.name or 'moving_variance' in v.name:
ema_vars.append(v)
ema_vars_dict = dict()
# Remove duplicate vars
for var in ema_vars:
ema_vars_dict[var.ref()] = var
return ema_vars_dict
def average_name(ema, var):
"""Returns the name of the `Variable` holding the average for `var`.
A hacker for tf2.
Args:
ema: A `ExponentialMovingAverage` object.
var: A `Variable` object.
Returns:
A string: The name of the variable that will be used or was used
by the `ExponentialMovingAverage class` to hold the moving average of `var`.
"""
if var.ref() in ema._averages: # pylint: disable=protected-access
return ema._averages[var.ref()].name.split(':')[0] # pylint: disable=protected-access
return tf.compat.v1.get_default_graph().unique_name(
var.name.split(':')[0] + '/' + ema.name, mark_as_used=False)
def restore_ckpt(model,
ckpt_path_or_file,
ema_decay=0.9998,
steps_per_epoch=0,
skip_mismatch=True,
expect_partial=False):
"""Restore variables from a given checkpoint.
Args:
model: the keras model to be restored.
ckpt_path_or_file: the path or file for checkpoint.
ema_decay: ema decay rate. If None or zero or negative value, disable ema.
steps_per_epoch: number of iterations in each training epoch
skip_mismatch: whether to skip variables if shape mismatch.
expect_partial: this will supress warnings when variables mismatch
"""
if ckpt_path_or_file == '_':
logging.info('Running test: do not load any ckpt.')
return
if tf.io.gfile.isdir(ckpt_path_or_file):
ckpt_path_or_file = tf.train.latest_checkpoint(ckpt_path_or_file)
if not ckpt_path_or_file:
return 0
if (tf.train.list_variables(ckpt_path_or_file)[0][0] ==
'_CHECKPOINTABLE_OBJECT_GRAPH'):
if expect_partial:
model.load_weights(ckpt_path_or_file).expect_partial()
else:
model.load_weights(ckpt_path_or_file)
logging.info('Restored checkpoint with load_weights method!')
else:
if ema_decay > 0:
ema = tf.train.ExponentialMovingAverage(decay=0.0)
ema_vars = get_ema_vars(model)
var_dict = {
average_name(ema, var): var for (ref, var) in ema_vars.items()
}
else:
ema_vars = get_ema_vars(model)
var_dict = {
var.name.split(':')[0]: var for (ref, var) in ema_vars.items()
}
# add variables that not in var_dict
for v in model.weights:
if v.ref() not in ema_vars:
var_dict[v.name.split(':')[0]] = v
# try to load graph-based checkpoint with ema support,
# else load checkpoint via keras.load_weights which doesn't support ema.
for i, (key, var) in enumerate(var_dict.items()):
try:
var.assign(tf.train.load_variable(ckpt_path_or_file, key))
if i < 10:
logging.info('Init %s from %s (%s)', var.name, key, ckpt_path_or_file)
except tf.errors.NotFoundError as e:
if skip_mismatch:
logging.warning('Not found %s in %s', key, ckpt_path_or_file)
else:
raise e
except ValueError as e:
if skip_mismatch:
logging.warning('%s: %s', key, e)
else:
raise e
if steps_per_epoch > 0:
last_iteration = model.optimizer.iterations
ckpt_epoch = last_iteration // steps_per_epoch
logging.info("Restored checkpoint at epoch: {}".format(ckpt_epoch + 1))
return ckpt_epoch
def get_mixed_precision_policy():
current_version = tuple(map(int, tf.__version__.split('.')))[:3]
threshold_version = (2, 4, 0) # The threshold tensorflow version is 2.4.0
return tf.keras.mixed_precision.global_policy() if current_version >= threshold_version \
else tf.keras.mixed_precision.experimental.global_policy() |
TensorFlow/Detection/SSD/models/research/object_detection/box_coders | box_coders | keypoint_box_coder_test | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.box_coder.keypoint_box_coder."""
import tensorflow as tf
from object_detection.box_coders import keypoint_box_coder
from object_detection.core import box_list
from object_detection.core import standard_fields as fields
class KeypointBoxCoderTest(tf.test.TestCase):
def test_get_correct_relative_codes_after_encoding(self):
boxes = [[10., 10., 20., 15.],
[0.2, 0.1, 0.5, 0.4]]
keypoints = [[[15., 12.], [10., 15.]],
[[0.5, 0.3], [0.2, 0.4]]]
num_keypoints = len(keypoints[0])
anchors = [[15., 12., 30., 18.],
[0.1, 0.0, 0.7, 0.9]]
expected_rel_codes = [
[-0.5, -0.416666, -0.405465, -0.182321,
-0.5, -0.5, -0.833333, 0.],
[-0.083333, -0.222222, -0.693147, -1.098612,
0.166667, -0.166667, -0.333333, -0.055556]
]
boxes = box_list.BoxList(tf.constant(boxes))
boxes.add_field(fields.BoxListFields.keypoints, tf.constant(keypoints))
anchors = box_list.BoxList(tf.constant(anchors))
coder = keypoint_box_coder.KeypointBoxCoder(num_keypoints)
rel_codes = coder.encode(boxes, anchors)
with self.test_session() as sess:
rel_codes_out, = sess.run([rel_codes])
self.assertAllClose(rel_codes_out, expected_rel_codes)
def test_get_correct_relative_codes_after_encoding_with_scaling(self):
boxes = [[10., 10., 20., 15.],
[0.2, 0.1, 0.5, 0.4]]
keypoints = [[[15., 12.], [10., 15.]],
[[0.5, 0.3], [0.2, 0.4]]]
num_keypoints = len(keypoints[0])
anchors = [[15., 12., 30., 18.],
[0.1, 0.0, 0.7, 0.9]]
scale_factors = [2, 3, 4, 5]
expected_rel_codes = [
[-1., -1.25, -1.62186, -0.911608,
-1.0, -1.5, -1.666667, 0.],
[-0.166667, -0.666667, -2.772588, -5.493062,
0.333333, -0.5, -0.666667, -0.166667]
]
boxes = box_list.BoxList(tf.constant(boxes))
boxes.add_field(fields.BoxListFields.keypoints, tf.constant(keypoints))
anchors = box_list.BoxList(tf.constant(anchors))
coder = keypoint_box_coder.KeypointBoxCoder(
num_keypoints, scale_factors=scale_factors)
rel_codes = coder.encode(boxes, anchors)
with self.test_session() as sess:
rel_codes_out, = sess.run([rel_codes])
self.assertAllClose(rel_codes_out, expected_rel_codes)
def test_get_correct_boxes_after_decoding(self):
anchors = [[15., 12., 30., 18.],
[0.1, 0.0, 0.7, 0.9]]
rel_codes = [
[-0.5, -0.416666, -0.405465, -0.182321,
-0.5, -0.5, -0.833333, 0.],
[-0.083333, -0.222222, -0.693147, -1.098612,
0.166667, -0.166667, -0.333333, -0.055556]
]
expected_boxes = [[10., 10., 20., 15.],
[0.2, 0.1, 0.5, 0.4]]
expected_keypoints = [[[15., 12.], [10., 15.]],
[[0.5, 0.3], [0.2, 0.4]]]
num_keypoints = len(expected_keypoints[0])
anchors = box_list.BoxList(tf.constant(anchors))
coder = keypoint_box_coder.KeypointBoxCoder(num_keypoints)
boxes = coder.decode(rel_codes, anchors)
with self.test_session() as sess:
boxes_out, keypoints_out = sess.run(
[boxes.get(), boxes.get_field(fields.BoxListFields.keypoints)])
self.assertAllClose(boxes_out, expected_boxes)
self.assertAllClose(keypoints_out, expected_keypoints)
def test_get_correct_boxes_after_decoding_with_scaling(self):
anchors = [[15., 12., 30., 18.],
[0.1, 0.0, 0.7, 0.9]]
rel_codes = [
[-1., -1.25, -1.62186, -0.911608,
-1.0, -1.5, -1.666667, 0.],
[-0.166667, -0.666667, -2.772588, -5.493062,
0.333333, -0.5, -0.666667, -0.166667]
]
scale_factors = [2, 3, 4, 5]
expected_boxes = [[10., 10., 20., 15.],
[0.2, 0.1, 0.5, 0.4]]
expected_keypoints = [[[15., 12.], [10., 15.]],
[[0.5, 0.3], [0.2, 0.4]]]
num_keypoints = len(expected_keypoints[0])
anchors = box_list.BoxList(tf.constant(anchors))
coder = keypoint_box_coder.KeypointBoxCoder(
num_keypoints, scale_factors=scale_factors)
boxes = coder.decode(rel_codes, anchors)
with self.test_session() as sess:
boxes_out, keypoints_out = sess.run(
[boxes.get(), boxes.get_field(fields.BoxListFields.keypoints)])
self.assertAllClose(boxes_out, expected_boxes)
self.assertAllClose(keypoints_out, expected_keypoints)
def test_very_small_width_nan_after_encoding(self):
boxes = [[10., 10., 10.0000001, 20.]]
keypoints = [[[10., 10.], [10.0000001, 20.]]]
anchors = [[15., 12., 30., 18.]]
expected_rel_codes = [[-0.833333, 0., -21.128731, 0.510826,
-0.833333, -0.833333, -0.833333, 0.833333]]
boxes = box_list.BoxList(tf.constant(boxes))
boxes.add_field(fields.BoxListFields.keypoints, tf.constant(keypoints))
anchors = box_list.BoxList(tf.constant(anchors))
coder = keypoint_box_coder.KeypointBoxCoder(2)
rel_codes = coder.encode(boxes, anchors)
with self.test_session() as sess:
rel_codes_out, = sess.run([rel_codes])
self.assertAllClose(rel_codes_out, expected_rel_codes)
if __name__ == '__main__':
tf.test.main()
|
TensorFlow2/Segmentation/UNet_Medical | UNet_Medical | README | # UNet Medical Image Segmentation for TensorFlow 2.x
This repository provides a script and recipe to train UNet Medical to achieve state of the art accuracy, and is tested and maintained by NVIDIA.
## Table of Contents
- [Model overview](#model-overview)
* [Model architecture](#model-architecture)
* [Default configuration](#default-configuration)
* [Feature support matrix](#feature-support-matrix)
* [Features](#features)
* [Mixed precision training](#mixed-precision-training)
* [Enabling mixed precision](#enabling-mixed-precision)
* [Enabling TF32](#enabling-tf32)
- [Setup](#setup)
* [Requirements](#requirements)
- [Quick Start Guide](#quick-start-guide)
- [Advanced](#advanced)
* [Scripts and sample code](#scripts-and-sample-code)
* [Parameters](#parameters)
* [Command-line options](#command-line-options)
* [Getting the data](#getting-the-data)
* [Dataset guidelines](#dataset-guidelines)
* [Multi-dataset](#multi-dataset)
* [Training process](#training-process)
* [Inference process](#inference-process)
- [Performance](#performance)
* [Benchmarking](#benchmarking)
* [Training performance benchmark](#training-performance-benchmark)
* [Inference performance benchmark](#inference-performance-benchmark)
* [Results](#results)
* [Training accuracy results](#training-accuracy-results)
* [Training accuracy: NVIDIA DGX A100 (8x A100 80G)](#training-accuracy-nvidia-dgx-a100-8x-a100-80g)
* [Training accuracy: NVIDIA DGX-1 (8x V100 16G)](#training-accuracy-nvidia-dgx-1-8x-v100-16g)
* [Training performance results](#training-performance-results)
* [Training performance: NVIDIA DGX A100 (8x A100 80G)](#training-performance-nvidia-dgx-a100-8x-a100-80g)
* [Training performance: NVIDIA DGX-1 (8x V100 16G)](#training-performance-nvidia-dgx-1-8x-v100-16g)
* [Inference performance results](#inference-performance-results)
* [Inference performance: NVIDIA DGX A100 (1x A100 80G)](#inference-performance-nvidia-dgx-a100-1x-a100-80g)
* [Inference performance: NVIDIA DGX-1 (1x V100 16G)](#inference-performance-nvidia-dgx-1-1x-v100-16g)
- [Release notes](#release-notes)
* [Changelog](#changelog)
* [Known issues](#known-issues)
## Model overview
The UNet model is a convolutional neural network for 2D image segmentation. This repository contains a UNet implementation as described in the original paper [UNet: Convolutional Networks for Biomedical Image Segmentation](https://arxiv.org/abs/1505.04597), without any alteration.
This model is trained with mixed precision using Tensor Cores on Volta, Turing, and the NVIDIA Ampere GPU architectures. Therefore, researchers can get results 2.2x faster than training without Tensor Cores, while experiencing the benefits of mixed precision training. This model is tested against each NGC monthly container release to ensure consistent accuracy and performance over time.
### Model architecture
UNet was first introduced by Olaf Ronneberger, Philip Fischer, and Thomas Brox in the paper: [UNet: Convolutional Networks for Biomedical Image Segmentation](https://arxiv.org/abs/1505.04597). UNet allows for seamless segmentation of 2D images, with high accuracy and performance, and can be adapted to solve many different segmentation problems.
The following figure shows the construction of the UNet model and its different components. UNet is composed of a contractive and an expanding path, that aims at building a bottleneck in its centermost part through a combination of convolution and pooling operations. After this bottleneck, the image is reconstructed through a combination of convolutions and upsampling. Skip connections are added with the goal of helping the backward flow of gradients in order to improve the training.

Figure 1. The architecture of a UNet model. Taken from the <a href="https://arxiv.org/abs/1505.04597">UNet: Convolutional Networks for Biomedical Image Segmentation paper</a>.
### Default configuration
UNet consists of a contractive (left-side) and expanding (right-side) path. It repeatedly applies unpadded convolutions followed by max pooling for downsampling. Every step in the expanding path consists of an upsampling of the feature maps and concatenation with the correspondingly cropped feature map from the contractive path.
### Feature support matrix
The following features are supported by this model:
| **Feature** | **UNet Medical** |
|-------------|---------------------|
| Automatic mixed precision (AMP) | Yes |
| Horovod Multi-GPU (NCCL) | Yes |
| Accelerated Linear Algebra (XLA)| Yes |
#### Features
**Automatic Mixed Precision (AMP)**
This implementation of UNet uses AMP to implement mixed precision training. It allows us to use FP16 training with FP32 master weights by modifying just a few lines of code.
**Horovod**
Horovod is a distributed training framework for TensorFlow, Keras, PyTorch, and MXNet. The goal of Horovod is to make distributed deep learning fast and easy to use. For more information about how to get started with Horovod, see the [Horovod: Official repository](https://github.com/horovod/horovod).
Multi-GPU training with Horovod
Our model uses Horovod to implement efficient multi-GPU training with NCCL. For details, see example sources in this repository or see the [TensorFlow tutorial](https://github.com/horovod/horovod/#usage).
**XLA support (experimental)**
XLA is a domain-specific compiler for linear algebra that can accelerate TensorFlow models with potentially no source code changes. The results are improvements in speed and memory usage: most internal benchmarks run ~1.1-1.5x faster after XLA is enabled.
### Mixed precision training
Mixed precision is the combined use of different numerical precisions in a computational method. [Mixed precision](https://arxiv.org/abs/1710.03740) training offers significant computational speedup by performing operations in half-precision format while storing minimal information in single-precision to retain as much information as possible in critical parts of the network. Since the introduction of [Tensor Cores](https://developer.nvidia.com/tensor-cores) in Volta, and following with both the Turing and Ampere architectures, significant training speedups are experienced by switching to mixed precision -- up to 3x overall speedup on the most arithmetically intense model architectures. Using [mixed precision training](https://docs.nvidia.com/deeplearning/performance/mixed-precision-training/index.html) previously required two steps:
1. Porting the model to use the FP16 data type where appropriate.
2. Adding loss scaling to preserve small gradient values.
This can now be achieved using Automatic Mixed Precision (AMP) for TensorFlow to enable the full [mixed precision methodology](https://docs.nvidia.com/deeplearning/sdk/mixed-precision-training/index.html#tensorflow) in your existing TensorFlow model code. AMP enables mixed precision training on Volta and Turing GPUs automatically. The TensorFlow framework code makes all necessary model changes internally.
In TF-AMP, the computational graph is optimized to use as few casts as necessary and maximize the use of FP16, and the loss scaling is automatically applied inside of supported optimizers. AMP can be configured to work with the existing tf.contrib loss scaling manager by disabling the AMP scaling with a single environment variable to perform only the automatic mixed-precision optimization. It accomplishes this by automatically rewriting all computation graphs with the necessary operations to enable mixed precision training and automatic loss scaling.
For information about:
- How to train using mixed precision, see the [Mixed Precision Training](https://arxiv.org/abs/1710.03740) paper and [Training With Mixed Precision](https://docs.nvidia.com/deeplearning/performance/mixed-precision-training/index.html) documentation.
- Techniques used for mixed precision training, see the [Mixed-Precision Training of Deep Neural Networks](https://devblogs.nvidia.com/mixed-precision-training-deep-neural-networks/) blog.
- How to access and enable AMP for TensorFlow, see [Using TF-AMP](https://docs.nvidia.com/deeplearning/dgx/tensorflow-user-guide/index.html#tfamp) from the TensorFlow User Guide.
#### Enabling mixed precision
This implementation exploits the TensorFlow Automatic Mixed Precision feature. To enable AMP, you simply need to supply the `--amp` flag to the `main.py` script. For reference, enabling the AMP required us to apply the following changes to the code:
1. Set Keras mixed precision policy:
```python
if params['use_amp']:
tf.keras.mixed_precision.experimental.set_policy('mixed_float16')
```
2. Use loss scaling wrapper on the optimizer:
```python
optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate)
if using_amp:
optimizer = tf.keras.mixed_precision.LossScaleOptimizer(optimizer, dynamic=True)
```
3. Use scaled loss to calculate gradients:
```python
scaled_loss = optimizer.get_scaled_loss(loss)
tape = hvd.DistributedGradientTape(tape)
scaled_gradients = tape.gradient(scaled_loss, model.trainable_variables)
gradients = optimizer.get_unscaled_gradients(scaled_gradients)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
```
#### Enabling TF32
TensorFloat-32 (TF32) is the new math mode in [NVIDIA A100](https://www.nvidia.com/en-us/data-center/a100/) GPUs for handling the matrix math also called tensor operations. TF32 running on Tensor Cores in A100 GPUs can provide up to 10x speedups compared to single-precision floating-point math (FP32) on Volta GPUs.
TF32 Tensor Cores can speed up networks using FP32, typically with no loss of accuracy. It is more robust than FP16 for models which require high dynamic range for weights or activations.
For more information, refer to the [TensorFloat-32 in the A100 GPU Accelerates AI Training, HPC up to 20x](https://blogs.nvidia.com/blog/2020/05/14/tensorfloat-32-precision-format/) blog post.
TF32 is supported in the NVIDIA Ampere GPU architecture and is enabled by default.
## Setup
The following section lists the requirements that you need to meet in order to start training the UNet Medical model.
### Requirements
This repository contains Dockerfile which extends the TensorFlow NGC container and encapsulates some dependencies. Aside from these dependencies, ensure you have the following components:
- [NVIDIA Docker](https://github.com/NVIDIA/nvidia-docker)
- TensorFlow 21.02-tf2-py3 [NGC container](https://ngc.nvidia.com/registry/nvidia-tensorflow) with Tensorflow 2.2 or later
- GPU-based architecture:
- [NVIDIA Volta](https://www.nvidia.com/en-us/data-center/volta-gpu-architecture/)
- [NVIDIA Turing](https://www.nvidia.com/en-us/geforce/turing/)
- [NVIDIA Ampere architecture](https://www.nvidia.com/en-us/data-center/nvidia-ampere-gpu-architecture/)
For more information about how to get started with NGC containers, see the following sections from the NVIDIA GPU Cloud Documentation and the Deep Learning Documentation:
- [Getting Started Using NVIDIA GPU Cloud](https://docs.nvidia.com/ngc/ngc-getting-started-guide/index.html)
- [Accessing And Pulling From The NGC container registry](https://docs.nvidia.com/deeplearning/dgx/user-guide/index.html#accessing_registry)
- [Running TensorFlow](https://docs.nvidia.com/deeplearning/dgx/tensorflow-release-notes/running.html#running)
For those unable to use the TensorFlow NGC container, to set up the required environment or create your own container, see the versioned [NVIDIA Container Support Matrix](https://docs.nvidia.com/deeplearning/frameworks/support-matrix/index.html).
## Quick Start Guide
To train your model using mixed precision with Tensor Cores or using FP32, perform the following steps using the default parameters of the UNet model on the [EM segmentation challenge dataset](http://brainiac2.mit.edu/isbi_challenge/home). These steps enable you to build the UNet TensorFlow NGC container, train and evaluate your model, and generate predictions on the test data. Furthermore, you can then choose to:
* compare your evaluation accuracy with our [Training accuracy results](#training-accuracy-results),
* compare your training performance with our [Training performance benchmark](#training-performance-benchmark),
* compare your inference performance with our [Inference performance benchmark](#inference-performance-benchmark).
For the specifics concerning training and inference, see the [Advanced](#advanced) section.
1. Clone the repository.
Executing this command will create your local repository with all the code to run UNet.
```bash
git clone https://github.com/NVIDIA/DeepLearningExamples
cd DeepLearningExamples/TensorFlow2/Segmentation/UNet_Medical/
```
2. Build the UNet TensorFlow NGC container.
This command will use the `Dockerfile` to create a Docker image named `unet_tf2`, downloading all the required components automatically.
```
docker build -t unet_tf2 .
```
The NGC container contains all the components optimized for usage on NVIDIA hardware.
3. Start an interactive session in the NGC container to run preprocessing/training/inference.
The following command will launch the container and mount the `./data` directory as a volume to the `/data` directory inside the container, and `./results` directory to the `/results` directory in the container.
```bash
mkdir data
mkdir results
docker run --runtime=nvidia -it --shm-size=1g --ulimit memlock=-1 --ulimit stack=67108864 --rm --ipc=host -v ${PWD}/data:/data -v ${PWD}/results:/results unet_tf2:latest /bin/bash
```
Any datasets and experiment results (logs, checkpoints, etc.) saved to `/data` or `/results` will be accessible
in the `./data` or `./results` directory on the host, respectively.
4. Download and preprocess the data.
The UNet script `main.py` operates on data from the [ISBI Challenge](http://brainiac2.mit.edu/isbi_challenge/home), the dataset originally employed in the [UNet paper](https://arxiv.org/abs/1505.04597). The data is available to download upon registration on the website.
Training and test data are composed of 3 multi-page `TIF` files, each containing 30 2D-images (around 30 Mb total). Once downloaded, the data can be used to run the training and benchmark scripts described below, by pointing `main.py` to its location using the `--data_dir` flag.
**Note:** Masks are only provided for training data.
5. Start training.
After the Docker container is launched, the training with the [default hyperparameters](#default-parameters) (for example 1/8 GPUs FP32/TF-AMP) can be started with:
```bash
bash examples/unet_TRAIN_SINGLE{_TF-AMP}.sh <number/of/gpus> <path/to/dataset> <path/to/checkpoint>
```
For example, to run training with full precision (32-bit) on 1 GPU from the project’s folder, simply use:
```bash
bash examples/unet_TRAIN_SINGLE.sh 1 /data /results /model
```
This script will launch a training on a single fold (fold 0) and store the model’s checkpoint in the <path/to/checkpoint> directory.
The script can be run directly by modifying flags if necessary, especially the number of GPUs, which is defined after the `-np` flag. Since the test volume does not have labels, 20% of the training data is used for validation in 5-fold cross-validation manner. The number of fold can be changed using `--fold` with an integer in range 0-4. For example, to run with 4 GPUs using fold 1 use:
```bash
horovodrun -np 4 python main.py --data_dir /data --model_dir /results --batch_size 1 --exec_mode train --fold 1 --xla --amp
```
Training will result in a checkpoint file being written to `./results` on the host machine.
6. Start validation/evaluation.
The trained model can be evaluated by passing the `--exec_mode evaluate` flag. Since evaluation is carried out on a validation dataset, the `--fold` parameter should be filled. For example:
```bash
python main.py --data_dir /data --model_dir /results --batch_size 1 --exec_mode evaluate --fold 0 --xla --amp
```
Evaluation can also be triggered jointly after training by passing the `--exec_mode train_and_evaluate` flag.
7. Start inference/predictions.
The trained model can be used for inference by passing the `--exec_mode predict` flag:
```bash
python main.py --data_dir /data --model_dir /results --batch_size 1 --exec_mode predict --xla --amp
```
Now that you have your model trained and evaluated, you can choose to compare your training results with our [Training accuracy results](#training-accuracy-results). You can also choose to benchmark the performance of your training [Training performance benchmark](#training-performance-benchmark), or [Inference performance benchmark](#inference-performance-benchmark). Following the steps in these sections will ensure that you achieve the same accuracy and performance results as stated in the [Results](#results) section.
## Advanced
The following sections provide greater details of the dataset, running training and inference, and the training results.
### Scripts and sample code
In the root directory, the most important files are:
* `main.py`: Serves as the entry point to the application.
* `run.py`: Implements the logic for training, evaluation, and inference.
* `Dockerfile`: Specifies the container with the basic set of dependencies to run UNet.
* `requirements.txt`: Set of extra requirements for running UNet.
The `utils/` folder encapsulates the necessary tools to train and perform inference using UNet. Its main components are:
* `cmd_util.py`: Implements the command-line arguments parsing.
* `data_loader.py`: Implements the data loading and augmentation.
* `losses.py`: Implements the losses used during training and evaluation.
* `parse_results.py`: Implements the intermediate results parsing.
* `setup.py`: Implements helper setup functions.
The `model/` folder contains information about the building blocks of UNet and the way they are assembled. Its contents are:
* `layers.py`: Defines the different blocks that are used to assemble UNet.
* `unet.py`: Defines the model architecture using the blocks from the `layers.py` script.
Other folders included in the root directory are:
* `examples/`: Provides examples for training and benchmarking UNet.
* `images/`: Contains a model diagram.
### Parameters
The complete list of the available parameters for the `main.py` script contains:
* `--exec_mode`: Select the execution mode to run the model (default: `train`). Modes available:
* `train` - trains model from scratch.
* `evaluate` - loads checkpoint (if available) and performs evaluation on validation subset (requires `--fold` other than `None`).
* `train_and_evaluate` - trains model from scratch and performs validation at the end (requires `--fold` other than `None`).
* `predict` - loads checkpoint (if available) and runs inference on the test set. Stores the results in `--model_dir` directory.
* `train_and_predict` - trains model from scratch and performs inference.
* `--model_dir`: Set the output directory for information related to the model (default: `/results`).
* `--log_dir`: Set the output directory for logs (default: None).
* `--data_dir`: Set the input directory containing the dataset (default: `None`).
* `--batch_size`: Size of each minibatch per GPU (default: `1`).
* `--fold`: Selected fold for cross-validation (default: `None`).
* `--max_steps`: Maximum number of steps (batches) for training (default: `1000`).
* `--seed`: Set random seed for reproducibility (default: `0`).
* `--weight_decay`: Weight decay coefficient (default: `0.0005`).
* `--log_every`: Log performance every n steps (default: `100`).
* `--learning_rate`: Model’s learning rate (default: `0.0001`).
* `--augment`: Enable data augmentation (default: `False`).
* `--benchmark`: Enable performance benchmarking (default: `False`). If the flag is set, the script runs in a benchmark mode - each iteration is timed and the performance result (in images per second) is printed at the end. Works for both `train` and `predict` execution modes.
* `--warmup_steps`: Used during benchmarking - the number of steps to skip (default: `200`). First iterations are usually much slower since the graph is being constructed. Skipping the initial iterations is required for a fair performance assessment.
* `--xla`: Enable accelerated linear algebra optimization (default: `False`).
* `--amp`: Enable automatic mixed precision (default: `False`).
### Command-line options
To see the full list of available options and their descriptions, use the `-h` or `--help` command-line option, for example:
```bash
python main.py --help
```
The following example output is printed when running the model:
```python main.py --help
usage: main.py [-h]
[--exec_mode {train,train_and_predict,predict,evaluate,train_and_evaluate}]
[--model_dir MODEL_DIR] --data_dir DATA_DIR [--log_dir LOG_DIR]
[--batch_size BATCH_SIZE] [--learning_rate LEARNING_RATE]
[--fold FOLD]
[--max_steps MAX_STEPS] [--weight_decay WEIGHT_DECAY]
[--log_every LOG_EVERY] [--warmup_steps WARMUP_STEPS]
[--seed SEED] [--augment] [--benchmark]
[--amp] [--xla]
UNet-medical
optional arguments:
-h, --help show this help message and exit
--exec_mode {train,train_and_predict,predict,evaluate,train_and_evaluate}
Execution mode of running the model
--model_dir MODEL_DIR
Output directory for information related to the model
--data_dir DATA_DIR Input directory containing the dataset for training
the model
--log_dir LOG_DIR Output directory for training logs
--batch_size BATCH_SIZE
Size of each minibatch per GPU
--learning_rate LEARNING_RATE
Learning rate coefficient for AdamOptimizer
--fold FOLD
Chosen fold for cross-validation. Use None to disable
cross-validation
--max_steps MAX_STEPS
Maximum number of steps (batches) used for training
--weight_decay WEIGHT_DECAY
Weight decay coefficient
--log_every LOG_EVERY
Log performance every n steps
--warmup_steps WARMUP_STEPS
Number of warmup steps
--seed SEED Random seed
--augment Perform data augmentation during training
--benchmark Collect performance metrics during training
--amp Train using TF-AMP
--xla Train using XLA
```
### Getting the data
The UNet model uses the [EM segmentation challenge dataset](http://brainiac2.mit.edu/isbi_challenge/home). Test images provided by the organization were used to produce the resulting masks for submission. The challenge's data is made available upon registration.
Training and test data are comprised of three 512x512x30 `TIF` volumes (`test-volume.tif`, `train-volume.tif` and `train-labels.tif`). Files `test-volume.tif` and `train-volume.tif` contain grayscale 2D slices to be segmented. Additionally, training masks are provided in `train-labels.tif` as a 512x512x30 `TIF` volume, where each pixel has one of two classes:
* 0 indicating the presence of cellular membrane,
* 1 corresponding to background.
The objective is to produce a set of masks that segment the data as accurately as possible. The results are expected to be submitted as a 32-bit `TIF` 3D image, with values between `0` (100% membrane certainty) and `1` (100% non-membrane certainty).
#### Dataset guidelines
The training and test datasets are given as stacks of 30 2D-images provided as a multi-page `TIF` that can be read using the Pillow library and NumPy (both Python packages are installed by the `Dockerfile`).
Initially, data is loaded from a multi-page `TIF` file and converted to 512x512x30 NumPy arrays with the use of Pillow. The process of loading, normalizing and augmenting the data contained in the dataset can be found in the `data_loader.py` script.
These NumPy arrays are fed to the model through `tf.data.Dataset.from_tensor_slices()`, in order to achieve high performance.
The voxel intensities then normalized to an interval `[-1, 1]`, whereas labels are one-hot encoded for their later use in dice or pixel-wise cross-entropy loss, becoming 512x512x30x2 tensors.
If augmentation is enabled, the following set of augmentation techniques are applied:
* Random horizontal flipping
* Random vertical flipping
* Crop to a random dimension and resize to input dimension
* Random brightness shifting
In the end, images are reshaped to 388x388 and padded to 572x572 to fit the input of the network. Masks are only reshaped to 388x388 to fit the output of the network. Moreover, pixel intensities are clipped to the `[-1, 1]` interval.
#### Multi-dataset
This implementation is tuned for the EM segmentation challenge dataset. Using other datasets is possible, but might require changes to the code (data loader) and tuning some hyperparameters (e.g. learning rate, number of iterations).
In the current implementation, the data loader works with NumPy arrays by loading them at the initialization, and passing them for training in slices by `tf.data.Dataset.from_tensor_slices()`. If you’re able to fit your dataset into the memory, then convert the data into three NumPy arrays - training images, training labels, and testing images (optional). If your dataset is large, you will have to adapt the optimizer for the lazy-loading of data. For a walk-through, check the [TensorFlow tf.data API guide](https://www.tensorflow.org/guide/data_performance)
The performance of the model depends on the dataset size.
Generally, the model should scale better for datasets containing more data. For a smaller dataset, you might experience lower performance.
### Training process
The model trains for a total 6,400 batches (6,400 / number of GPUs), with the default UNet setup:
* Adam optimizer with learning rate of 0.0001.
This default parametrization is applied when running scripts from the `./examples` directory and when running `main.py` without explicitly overriding these parameters. By default, the training is in full precision. To enable AMP, pass the `--amp` flag. AMP can be enabled for every mode of execution.
The default configuration minimizes a function _L = 1 - DICE + cross entropy_ during training.
The training can be run directly without using the predefined scripts. The name of the training script is `main.py`. Because of the multi-GPU support, training should always be run with the Horovod distributed launcher like this:
```bash
horovodrun -np <number/of/gpus> python main.py --data_dir /data [other parameters]
```
*Note:* When calling the `main.py` script manually, data augmentation is disabled. In order to enable data augmentation, use the `--augment` flag in your invocation.
The main result of the training are checkpoints stored by default in `./results/` on the host machine, and in the `/results` in the container. This location can be controlled
by the `--model_dir` command-line argument, if a different location was mounted while starting the container. In the case when the training is run in `train_and_predict` mode, the inference will take place after the training is finished, and inference results will be stored to the `/results` directory.
If the `--exec_mode train_and_evaluate` parameter was used, and if `--fold` parameter is set to an integer value of {0, 1, 2, 3, 4}, the evaluation of the validation set takes place after the training is completed. The results of the evaluation will be printed to the console.
### Inference process
Inference can be launched with the same script used for training by passing the `--exec_mode predict` flag:
```bash
python main.py --exec_mode predict --data_dir /data --model_dir <path/to/checkpoint> [other parameters]
```
The script will then:
* Load the checkpoint from the directory specified by the `<path/to/checkpoint>` directory (`/results`),
* Run inference on the test dataset,
* Save the resulting binary masks in a `TIF` format.
## Performance
The performance measurements in this document were conducted at the time of publication and may not reflect the performance achieved from NVIDIA’s latest software release. For the most up-to-date performance measurements, go to [NVIDIA Data Center Deep Learning Product Performance](https://developer.nvidia.com/deep-learning-performance-training-inference).
### Benchmarking
The following section shows how to run benchmarks measuring the model performance in training and inference modes.
#### Training performance benchmark
To benchmark training, run one of the `TRAIN_BENCHMARK` scripts in `./examples/`:
```bash
bash examples/unet_TRAIN_BENCHMARK{_TF-AMP}.sh <number/of/gpus> <path/to/dataset> <path/to/checkpoints> <batch/size>
```
For example, to benchmark training using mixed-precision on 8 GPUs use:
```bash
bash examples/unet_TRAIN_BENCHMARK_TF-AMP.sh 8 <path/to/dataset> <path/to/checkpoint> <batch/size>
```
Each of these scripts will by default run 200 warm-up iterations and benchmark the performance during training in the next 800 iterations.
To have more control, you can run the script by directly providing all relevant run parameters. For example:
```bash
horovodrun -np <num of gpus> python main.py --exec_mode train --benchmark --augment --data_dir <path/to/dataset> --model_dir <optional, path/to/checkpoint> --batch_size <batch/size> --warmup_steps <warm-up/steps> --max_steps <max/steps>
```
At the end of the script, a line reporting the best train throughput will be printed.
#### Inference performance benchmark
To benchmark inference, run one of the scripts in `./examples/`:
```bash
bash examples/unet_INFER_BENCHMARK{_TF-AMP}.sh <path/to/dataset> <path/to/checkpoint> <batch/size>
```
For example, to benchmark inference using mixed-precision:
```bash
bash examples/unet_INFER_BENCHMARK_TF-AMP.sh <path/to/dataset> <path/to/checkpoint> <batch/size>
```
Each of these scripts will by default run 200 warm-up iterations and benchmark the performance during inference in the next 400 iterations.
To have more control, you can run the script by directly providing all relevant run parameters. For example:
```bash
python main.py --exec_mode predict --benchmark --data_dir <path/to/dataset> --model_dir <optional, path/to/checkpoint> --batch_size <batch/size> --warmup_steps <warm-up/steps> --max_steps <max/steps>
```
At the end of the script, a line reporting the best inference throughput will be printed.
### Results
The following sections provide details on how we achieved our performance and accuracy in training and inference.
#### Training accuracy results
##### Training accuracy: NVIDIA DGX A100 (8x A100 80G)
The following table lists the average DICE score across 5-fold cross-validation. Our results were obtained by running the `examples/unet_TRAIN_FULL{_TF-AMP}.sh` training script in the `tensorflow:21.02-tf2-py3` NGC container on NVIDIA DGX A100 (8x A100 80G) GPUs.
| GPUs | Batch size / GPU | DICE - TF32 | DICE - mixed precision | Time to train - TF32 | Time to train - mixed precision | Time to train speedup (TF32 to mixed precision) |
|:---:|:---:|:---:|:---:|:---:|:---:|:---:|
| 1 | 8 | 0.8900 | 0.8902 | 21.3 | 8.6 | 2.48 |
| 8 | 8 | 0.8855 | 0.8858 | 2.5 | 2.5 | 1.00 |
##### Training accuracy: NVIDIA DGX-1 (8x V100 16G)
The following table lists the average DICE score across 5-fold cross-validation. Our results were obtained by running the `examples/unet_TRAIN_FULL{_TF-AMP}.sh` training script in the `tensorflow:21.02-tf2-py3` NGC container on NVIDIA DGX-1 with (8x V100 16G) GPUs.
| GPUs | Batch size / GPU | DICE - FP32 | DICE - mixed precision | Time to train - FP32 [min] | Time to train - mixed precision [min] | Time to train speedup (FP32 to mixed precision) |
|:---:|:---:|:---:|:---:|:---:|:---:|:---:|
| 1 | 8 | 0.8901 | 0.8898 | 47 | 16 | 2.94 |
| 8 | 8 | 0.8848 | 0.8857 | 7 | 4.5 | 1.56 |
To reproduce this result, start the Docker container interactively and run one of the TRAIN scripts:
```bash
bash examples/unet_TRAIN_FULL{_TF-AMP}.sh <number/of/gpus> <path/to/dataset> <path/to/checkpoint> <batch/size>
```
for example
```bash
bash examples/unet_TRAIN_FULL_TF-AMP.sh 8 /data /results 8
```
This command will launch a script which will run training on 8 GPUs for 6400 iterations five times for 5-fold cross-validation.
At the end, it will collect the scores and print the average validation DICE score and cross-entropy loss.
The time reported is for one fold, which means that the training for 5 folds will take 5 times longer.
The default batch size is 8, however if you have less than 16 Gb memory card and you encounter GPU memory issue you should decrease the batch size.
The logs of the runs can be found in `/results` directory once the script is finished.
**Learning curves**
The following image show the training loss as a function of iteration for training using DGX A100 (TF32 and TF-AMP) and DGX-1 V100 (FP32 and TF-AMP).

#### Training performance results
##### Training performance: NVIDIA DGX A100 (8x A100 80G)
Our results were obtained by running the `examples/unet_TRAIN_BENCHMARK{_TF-AMP}.sh` training script in the NGC container on NVIDIA DGX A100 (8x A100 80G) GPUs. Performance numbers (in images per second) were averaged over 1000 iterations, excluding the first 200 warm-up steps.
| GPUs | Batch size / GPU | Throughput - TF32 [img/s] | Throughput - mixed precision [img/s] | Throughput speedup (TF32 - mixed precision) | Weak scaling - TF32 | Weak scaling - mixed precision |
|:----:|:----------------:|:-------------------------:|:------------------------------------:|:-------------------------------------------:|:-------------------:|:------------------------------:|
| 1 | 1 | 46.88 | 75.04 | 1.60 | - | - |
| 1 | 8 | 63.33 | 141.03 | 2.23 | - | - |
| 8 | 1 | 298.91 | 384.27 | 1.29 | 6.37 | 5.12 |
| 8 | 8 | 470.50 | 1000.89 | 2.13 | 7.43 | 7.10 |
##### Training performance: NVIDIA DGX-1 (8x V100 16G)
Our results were obtained by running the `examples/unet_TRAIN_BENCHMARK{_TF-AMP}.sh` training script in the `tensorflow:21.02-tf2-py3` NGC container on NVIDIA DGX-1 with (8x V100 16G) GPUs. Performance numbers (in images per second) were averaged over 1000 iterations, excluding the first 200 warm-up steps.
| GPUs | Batch size / GPU | Throughput - FP32 [img/s] | Throughput - mixed precision [img/s] | Throughput speedup (FP32 - mixed precision) | Weak scaling - FP32 | Weak scaling - mixed precision |
|:----:|:----------------:|:-------------------------:|:------------------------------------:|:-------------------------------------------:|:-------------------:|:------------------------------:|
| 1 | 1 | 16.92 | 39.63 | 2.34 | - | - |
| 1 | 8 | 19.40 | 60.65 | 3.12 | - | - |
| 8 | 1 | 120.90 | 225.27 | 1.86 | 7.14 | 5.68 |
| 8 | 8 | 137.11 | 419.99 | 3.06 | 7.07 | 6.92 |
To achieve these same results, follow the steps in the [Training performance benchmark](#training-performance-benchmark) section.
Throughput is reported in images per second. Latency is reported in milliseconds per image.
TensorFlow 2 runs by default using the eager mode, which makes tensor evaluation trivial at the cost of lower performance. To mitigate this issue multiple layers of performance optimization were implemented. Two of them, AMP and XLA, were already described. There is an additional one called Autograph, which allows to construct a graph from a subset of Python syntax improving the performance simply by adding a `@tf.function` decorator to the train function. To read more about Autograph see [Better performance with tf.function and AutoGraph](https://www.tensorflow.org/guide/function).
#### Inference performance results
##### Inference performance: NVIDIA DGX A100 (1x A100 80G)
Our results were obtained by running the `examples/unet_INFER_BENCHMARK{_TF-AMP}.sh` inference benchmarking script in the `tensorflow:21.02-tf2-py3` NGC container on NVIDIA DGX A100 (1x A100 80G) GPU.
FP16
| Batch size | Resolution | Throughput Avg [img/s] | Latency Avg [ms] | Latency 90% [ms] | Latency 95% [ms] | Latency 99% [ms] |
|:----------:|:----------:|:----------------------:|:----------------:|:----------------:|:----------------:|:----------------:|
| 1 | 572x572x1 | 275.98 | 3.534 | 3.543 | 3.544 | 3.547 |
| 2 | 572x572x1 | 376.68 | 10.603 | 10.619 | 10.623 | 10.629 |
| 4 | 572x572x1 | 443.05 | 19.572 | 19.610 | 19.618 | 19.632 |
| 8 | 572x572x1 | 440.71 | 19.386 | 19.399 | 19.401 | 19.406 |
| 16 | 572x572x1 | 462.79 | 37.760 | 37.783 | 37.788 | 37.797 |
TF32
| Batch size | Resolution | Throughput Avg [img/s] | Latency Avg [ms] | Latency 90% [ms] | Latency 95% [ms] | Latency 99% [ms] |
|:----------:|:----------:|:----------------------:|:----------------:|:----------------:|:----------------:|:----------------:|
| 1 | 572x572x1 | 152.27 | 9.317 | 9.341 | 9.346 | 9.355 |
| 2 | 572x572x1 | 180.84 | 17.294 | 17.309 | 17.312 | 17.318 |
| 4 | 572x572x1 | 203.60 | 31.676 | 31.698 | 31.702 | 31.710 |
| 8 | 572x572x1 | 208.70 | 57.742 | 57.755 | 57.757 | 57.762 |
| 16 | 572x572x1 | 213.15 | 112.545 | 112.562 | 112.565 | 112.572 |
To achieve these same results, follow the steps in the [Quick Start Guide](#quick-start-guide).
##### Inference performance: NVIDIA DGX-1 (1x V100 16G)
Our results were obtained by running the `examples/unet_INFER_BENCHMARK{_TF-AMP}.sh` inference benchmarking script in the `tensorflow:21.02-tf2-py3` NGC container on NVIDIA DGX-1 with (1x V100 16G) GPU.
FP16
| Batch size | Resolution | Throughput Avg [img/s] | Latency Avg [ms] | Latency 90% [ms] | Latency 95% [ms] | Latency 99% [ms] |
|:----------:|:----------:|:----------------------:|:----------------:|:----------------:|:----------------:|:----------------:|
| 1 | 572x572x1 | 142.15 | 10.537 | 6.851 | 6.853 | 6.856 |
| 2 | 572x572x1 | 159.25 | 13.230 | 13.242 | 13.244 | 13.248 |
| 4 | 572x572x1 | 178.19 | 26.035 | 26.049 | 26.051 | 26.057 |
| 8 | 572x572x1 | 188.54 | 43.602 | 43.627 | 43.631 | 43.640 |
| 16 | 572x572x1 | 195.27 | 85.743 | 85.807 | 85.819 | 85.843 |
FP32
| Batch size | Resolution | Throughput Avg [img/s] | Latency Avg [ms] | Latency 90% [ms] | Latency 95% [ms] | Latency 99% [ms] |
|:----------:|:----------:|:----------------------:|:----------------:|:----------------:|:----------------:|:----------------:|
| 1 | 572x572x1 | 51.71 | 20.065 | 20.544 | 20.955 | 21.913 |
| 2 | 572x572x1 | 55.87 | 37.112 | 37.127 | 37.130 | 37.136 |
| 4 | 572x572x1 | 58.15 | 73.033 | 73.068 | 73.074 | 73.087 |
| 8 | 572x572x1 | 59.28 | 144.829 | 144.924 | 144.943 | 144.979 |
| 16 | 572x572x1 | 73.01 | 234.995 | 235.098 | 235.118 | 235.157 |
To achieve these same results, follow the steps in the [Inference performance benchmark](#inference-performance-benchmark) section.
Throughput is reported in images per second. Latency is reported in milliseconds per batch.
## Release notes
### Changelog
February 2021
* Updated training and inference performance with V100 and A100 results
* Refactored the example scripts
June 2020
* Updated training and inference accuracy with A100 results
* Updated training and inference performance with A100 results
February 2020
* Initial release
### Known issues
* Training on 8 GPUs with FP32 and XLA using batch size 8 may sometimes cause out-of-memory errors.
* For TensorFlow 2.0 the training performance using AMP and XLA is around 30% lower than reported here. The issue was solved in TensorFlow 2.1.
|
TensorFlow/Segmentation/UNet_Industrial | UNet_Industrial | .gitignore | # Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
.hypothesis/
.pytest_cache/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
target/
# Jupyter Notebook
.ipynb_checkpoints
# pyenv
.python-version
# celery beat schedule file
celerybeat-schedule
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# weights
/pretrained_weights
/exported_models
# mypy
.mypy_cache/
.idea/
.vscode/
*.ckpt
dlloger_out.json
cudnn_cublas_logs/
|
TensorFlow/Segmentation/UNet_3D_Medical/dataset | dataset | preprocess_data | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Preprocess dataset and prepare it for training
Example usage:
$ python preprocess_data.py --input_dir ./src --output_dir ./dst
--vol_per_file 2
All arguments are listed under `python preprocess_data.py -h`.
"""
import os
import argparse
from random import shuffle
import numpy as np
import nibabel as nib
import tensorflow as tf
PARSER = argparse.ArgumentParser()
PARSER.add_argument('--input_dir', '-i',
type=str, help='path to the input directory with data')
PARSER.add_argument('--output_dir', '-o',
type=str, help='path to the output directory where tfrecord files will be stored')
PARSER.add_argument('--verbose', '-v', dest='verbose', action='store_true', default=False)
PARSER.add_argument('--vol_per_file', default=4, dest='vol_per_file',
type=int, help='how many volumes to pack into a single tfrecord file')
PARSER.add_argument('--single_data_dir', dest='single_data_dir', action='store_true', default=False)
def load_features(path):
""" Load features from Nifti
:param path: Path to dataset
:return: Loaded data
"""
data = np.zeros((240, 240, 155, 4), dtype=np.uint8)
name = os.path.basename(path)
for i, modality in enumerate(["_t1.nii.gz", "_t1ce.nii.gz", "_t2.nii.gz", "_flair.nii.gz"]):
vol = load_single_nifti(os.path.join(path, name + modality)).astype(np.float32)
vol[vol > 0.85 * vol.max()] = 0.85 * vol.max()
vol = 255 * vol / vol.max()
data[..., i] = vol.astype(np.uint8)
return data
def load_segmentation(path):
""" Load segmentations from Nifti
:param path: Path to dataset
:return: Loaded data
"""
path = os.path.join(path, os.path.basename(path)) + "_seg.nii.gz"
return load_single_nifti(path).astype(np.uint8)
def load_single_nifti(path):
""" Load Nifti file as numpy
:param path: Path to file
:return: Loaded data
"""
data = nib.load(path).get_fdata().astype(np.int16)
return np.transpose(data, (1, 0, 2))
def write_to_file(features_list, labels_list, foreground_mean_list, foreground_std_list, output_dir, # pylint: disable=R0913
count):
""" Dump numpy array to tfrecord
:param features_list: List of features
:param labels_list: List of labels
:param foreground_mean_list: List of means for each volume
:param foreground_std_list: List of std for each volume
:param output_dir: Directory where to write
:param count: Index of the record
:return:
"""
output_filename = os.path.join(output_dir, "volume-{}.tfrecord".format(count))
filelist = list(zip(np.array(features_list),
np.array(labels_list),
np.array(foreground_mean_list),
np.array(foreground_std_list)))
np_to_tfrecords(filelist, output_filename)
def np_to_tfrecords(filelist, output_filename):
""" Convert numpy array to tfrecord
:param filelist: List of files
:param output_filename: Destination directory
"""
writer = tf.io.TFRecordWriter(output_filename)
for file_item in filelist:
sample = file_item[0].flatten().tostring()
label = file_item[1].flatten().tostring()
mean = file_item[2].astype(np.float32).flatten()
stdev = file_item[3].astype(np.float32).flatten()
d_feature = {}
d_feature['X'] = tf.train.Feature(bytes_list=tf.train.BytesList(value=[sample]))
d_feature['Y'] = tf.train.Feature(bytes_list=tf.train.BytesList(value=[label]))
d_feature['mean'] = tf.train.Feature(float_list=tf.train.FloatList(value=mean))
d_feature['stdev'] = tf.train.Feature(float_list=tf.train.FloatList(value=stdev))
features = tf.train.Features(feature=d_feature)
example = tf.train.Example(features=features)
serialized = example.SerializeToString()
writer.write(serialized)
writer.close()
def main(): # pylint: disable=R0914
""" Starting point of the application"""
params = PARSER.parse_args()
input_dir = params.input_dir
output_dir = params.output_dir
os.makedirs(params.output_dir, exist_ok=True)
patient_list = []
if params.single_data_dir:
patient_list.extend([os.path.join(input_dir, folder) for folder in os.listdir(input_dir)])
else:
assert "HGG" in os.listdir(input_dir) and "LGG" in os.listdir(input_dir), \
"Data directory has to contain folders named HGG and LGG. " \
"If you have a single folder with patient's data please set --single_data_dir flag"
path_hgg = os.path.join(input_dir, "HGG")
path_lgg = os.path.join(input_dir, "LGG")
patient_list.extend([os.path.join(path_hgg, folder) for folder in os.listdir(path_hgg)])
patient_list.extend([os.path.join(path_lgg, folder) for folder in os.listdir(path_lgg)])
shuffle(patient_list)
features_list = []
labels_list = []
foreground_mean_list = []
foreground_std_list = []
count = 0
total_tfrecord_files = len(patient_list) // params.vol_per_file + (1 if len(patient_list) % params.vol_per_file
else 0)
for i, folder in enumerate(patient_list):
# Calculate mean and stdev only for foreground voxels
features = load_features(folder)
foreground = features > 0
fg_mean = np.array([(features[..., i][foreground[..., i]]).mean() for i in range(features.shape[-1])])
fg_std = np.array([(features[..., i][foreground[..., i]]).std() for i in range(features.shape[-1])])
# BraTS labels are 0,1,2,4 -> switching to 0,1,2,3
labels = load_segmentation(folder)
labels[labels == 4] = 3
features_list.append(features)
labels_list.append(labels)
foreground_mean_list.append(fg_mean)
foreground_std_list.append(fg_std)
if (i + 1) % params.vol_per_file == 0:
write_to_file(features_list, labels_list, foreground_mean_list, foreground_std_list, output_dir, count)
# Clear lists
features_list = []
labels_list = []
foreground_mean_list = []
foreground_std_list = []
count += 1
if params.verbose:
print("{}/{} tfrecord files created".format(count, total_tfrecord_files))
# create one more file if there are any remaining unpacked volumes
if features_list:
write_to_file(features_list, labels_list, foreground_mean_list, foreground_std_list, output_dir, count)
count += 1
if params.verbose:
print("{}/{} tfrecord files created".format(count, total_tfrecord_files))
if __name__ == '__main__':
main()
|
TensorFlow/Detection/SSD/models/research/object_detection/models | models | faster_rcnn_mobilenet_v1_feature_extractor | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Mobilenet v1 Faster R-CNN implementation."""
import numpy as np
import tensorflow as tf
from object_detection.meta_architectures import faster_rcnn_meta_arch
from object_detection.utils import shape_utils
from nets import mobilenet_v1
slim = tf.contrib.slim
def _get_mobilenet_conv_no_last_stride_defs(conv_depth_ratio_in_percentage):
if conv_depth_ratio_in_percentage not in [25, 50, 75, 100]:
raise ValueError(
'Only the following ratio percentages are supported: 25, 50, 75, 100')
conv_depth_ratio_in_percentage = float(conv_depth_ratio_in_percentage) / 100.0
channels = np.array([
32, 64, 128, 128, 256, 256, 512, 512, 512, 512, 512, 512, 1024, 1024
], dtype=np.float32)
channels = (channels * conv_depth_ratio_in_percentage).astype(np.int32)
return [
mobilenet_v1.Conv(kernel=[3, 3], stride=2, depth=channels[0]),
mobilenet_v1.DepthSepConv(kernel=[3, 3], stride=1, depth=channels[1]),
mobilenet_v1.DepthSepConv(kernel=[3, 3], stride=2, depth=channels[2]),
mobilenet_v1.DepthSepConv(kernel=[3, 3], stride=1, depth=channels[3]),
mobilenet_v1.DepthSepConv(kernel=[3, 3], stride=2, depth=channels[4]),
mobilenet_v1.DepthSepConv(kernel=[3, 3], stride=1, depth=channels[5]),
mobilenet_v1.DepthSepConv(kernel=[3, 3], stride=2, depth=channels[6]),
mobilenet_v1.DepthSepConv(kernel=[3, 3], stride=1, depth=channels[7]),
mobilenet_v1.DepthSepConv(kernel=[3, 3], stride=1, depth=channels[8]),
mobilenet_v1.DepthSepConv(kernel=[3, 3], stride=1, depth=channels[9]),
mobilenet_v1.DepthSepConv(kernel=[3, 3], stride=1, depth=channels[10]),
mobilenet_v1.DepthSepConv(kernel=[3, 3], stride=1, depth=channels[11]),
mobilenet_v1.DepthSepConv(kernel=[3, 3], stride=1, depth=channels[12]),
mobilenet_v1.DepthSepConv(kernel=[3, 3], stride=1, depth=channels[13])
]
class FasterRCNNMobilenetV1FeatureExtractor(
faster_rcnn_meta_arch.FasterRCNNFeatureExtractor):
"""Faster R-CNN Mobilenet V1 feature extractor implementation."""
def __init__(self,
is_training,
first_stage_features_stride,
batch_norm_trainable=False,
reuse_weights=None,
weight_decay=0.0,
depth_multiplier=1.0,
min_depth=16,
skip_last_stride=False,
conv_depth_ratio_in_percentage=100):
"""Constructor.
Args:
is_training: See base class.
first_stage_features_stride: See base class.
batch_norm_trainable: See base class.
reuse_weights: See base class.
weight_decay: See base class.
depth_multiplier: float depth multiplier for feature extractor.
min_depth: minimum feature extractor depth.
skip_last_stride: Skip the last stride if True.
conv_depth_ratio_in_percentage: Conv depth ratio in percentage. Only
applied if skip_last_stride is True.
Raises:
ValueError: If `first_stage_features_stride` is not 8 or 16.
"""
if first_stage_features_stride != 8 and first_stage_features_stride != 16:
raise ValueError('`first_stage_features_stride` must be 8 or 16.')
self._depth_multiplier = depth_multiplier
self._min_depth = min_depth
self._skip_last_stride = skip_last_stride
self._conv_depth_ratio_in_percentage = conv_depth_ratio_in_percentage
super(FasterRCNNMobilenetV1FeatureExtractor, self).__init__(
is_training, first_stage_features_stride, batch_norm_trainable,
reuse_weights, weight_decay)
def preprocess(self, resized_inputs):
"""Faster R-CNN Mobilenet V1 preprocessing.
Maps pixel values to the range [-1, 1].
Args:
resized_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
Returns:
preprocessed_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
"""
return (2.0 / 255.0) * resized_inputs - 1.0
def _extract_proposal_features(self, preprocessed_inputs, scope):
"""Extracts first stage RPN features.
Args:
preprocessed_inputs: A [batch, height, width, channels] float32 tensor
representing a batch of images.
scope: A scope name.
Returns:
rpn_feature_map: A tensor with shape [batch, height, width, depth]
activations: A dictionary mapping feature extractor tensor names to
tensors
Raises:
InvalidArgumentError: If the spatial size of `preprocessed_inputs`
(height or width) is less than 33.
ValueError: If the created network is missing the required activation.
"""
preprocessed_inputs.get_shape().assert_has_rank(4)
preprocessed_inputs = shape_utils.check_min_image_dim(
min_dim=33, image_tensor=preprocessed_inputs)
with slim.arg_scope(
mobilenet_v1.mobilenet_v1_arg_scope(
is_training=self._train_batch_norm,
weight_decay=self._weight_decay)):
with tf.variable_scope('MobilenetV1',
reuse=self._reuse_weights) as scope:
params = {}
if self._skip_last_stride:
params['conv_defs'] = _get_mobilenet_conv_no_last_stride_defs(
conv_depth_ratio_in_percentage=self.
_conv_depth_ratio_in_percentage)
_, activations = mobilenet_v1.mobilenet_v1_base(
preprocessed_inputs,
final_endpoint='Conv2d_11_pointwise',
min_depth=self._min_depth,
depth_multiplier=self._depth_multiplier,
scope=scope,
**params)
return activations['Conv2d_11_pointwise'], activations
def _extract_box_classifier_features(self, proposal_feature_maps, scope):
"""Extracts second stage box classifier features.
Args:
proposal_feature_maps: A 4-D float tensor with shape
[batch_size * self.max_num_proposals, crop_height, crop_width, depth]
representing the feature map cropped to each proposal.
scope: A scope name (unused).
Returns:
proposal_classifier_features: A 4-D float tensor with shape
[batch_size * self.max_num_proposals, height, width, depth]
representing box classifier features for each proposal.
"""
net = proposal_feature_maps
conv_depth = 1024
if self._skip_last_stride:
conv_depth_ratio = float(self._conv_depth_ratio_in_percentage) / 100.0
conv_depth = int(float(conv_depth) * conv_depth_ratio)
depth = lambda d: max(int(d * 1.0), 16)
with tf.variable_scope('MobilenetV1', reuse=self._reuse_weights):
with slim.arg_scope(
mobilenet_v1.mobilenet_v1_arg_scope(
is_training=self._train_batch_norm,
weight_decay=self._weight_decay)):
with slim.arg_scope(
[slim.conv2d, slim.separable_conv2d], padding='SAME'):
net = slim.separable_conv2d(
net,
depth(conv_depth), [3, 3],
depth_multiplier=1,
stride=2,
scope='Conv2d_12_pointwise')
return slim.separable_conv2d(
net,
depth(conv_depth), [3, 3],
depth_multiplier=1,
stride=1,
scope='Conv2d_13_pointwise')
|
PyTorch/SpeechRecognition/wav2vec2/scripts | scripts | generate_filelists | #!/usr/bin/env bash
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -eu
: ${DATASET_DIR:=/datasets/LibriSpeech}
: ${FILELISTS_DIR:=$DATASET_DIR}
: ${EXT:=flac} # or wav
mkdir -p $DATASET_DIR
mkdir -p $FILELISTS_DIR
for SUBSET in train-clean-100 train-clean-360 train-other-500 \
dev-clean dev-other test-clean test-other \
; do
TSV=$FILELISTS_DIR/$SUBSET.tsv
if [ ! -d $DATASET_DIR/$SUBSET ]; then
echo "ERROR: $DATASET_DIR/$SUBSET does not exist; skipping."
continue
fi
python3 utils/generate_filelist.py --extension $EXT $DATASET_DIR/$SUBSET $TSV
python3 utils/libri_labels.py $TSV --output-dir $FILELISTS_DIR --output-name $SUBSET
done
# Combine
python3 utils/combine_filelists.py $FILELISTS_DIR/train-{clean-100,clean-360,other-500}.tsv > $FILELISTS_DIR/train-full-960.tsv
cat $FILELISTS_DIR/train-clean-100.wrd > $FILELISTS_DIR/train-full-960.wrd
cat $FILELISTS_DIR/train-clean-360.wrd >> $FILELISTS_DIR/train-full-960.wrd
cat $FILELISTS_DIR/train-other-500.wrd >> $FILELISTS_DIR/train-full-960.wrd
cat $FILELISTS_DIR/train-clean-100.ltr > $FILELISTS_DIR/train-full-960.ltr
cat $FILELISTS_DIR/train-clean-360.ltr >> $FILELISTS_DIR/train-full-960.ltr
cat $FILELISTS_DIR/train-other-500.ltr >> $FILELISTS_DIR/train-full-960.ltr
python3 utils/generate_dictionary.py $FILELISTS_DIR/train-full-960.ltr $FILELISTS_DIR/dict.ltr.txt
|
PyTorch/SpeechRecognition/Jasper/triton | triton | converter | # *****************************************************************************
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
import os
import json
import torch
import argparse
import importlib
from pytorch.utils import extract_io_props, load_io_props
import logging
def get_parser():
parser = argparse.ArgumentParser()
# required args
parser.add_argument("--model-module", type=str, default="", required=True,
help="Module with model initializer and data loader")
parser.add_argument('--convert', choices=['ts-script', 'ts-trace',
'onnx', 'tensorrt'],
required=True, help='convert to '
'ts-script: TorchScript using torch.jit.script, '
'ts-trace: TorchScript using torch.jit.trace, '
'onnx: ONNX using torch.onnx.export, '
'tensorrt: TensorRT using OnnxParser, ')
parser.add_argument("--max_workspace_size", type=int,
default=512*1024*1024,
help="set the size of the workspace for TensorRT \
conversion")
parser.add_argument("--precision", choices=['fp16', 'fp32'],
default='fp32', help="convert TensorRT or \
TorchScript model in a given precision")
parser.add_argument('--convert-filename', type=str, default='',
help='Saved model name')
parser.add_argument('--save-dir', type=str, default='',
help='Saved model directory')
parser.add_argument("--max-batch-size", type=int, default=1,
help="Specifies the 'max_batch_size' in the Triton \
model config and in TensorRT builder. See the \
Triton and TensorRT documentation for more info.")
parser.add_argument('--device', type=str, default='cuda',
help='Select device for conversion.')
parser.add_argument('model_arguments', nargs=argparse.REMAINDER,
help='arguments that will be ignored by \
converter lib and will be forwarded to your convert \
script')
return parser
class Converter:
def __init__(self, model, dataloader):
self.model = model
self.dataloader = dataloader
self.convert_props = {
'ts-script': {
'convert_func': self.to_torchscript,
'convert_filename': 'model.pt'
},
'ts-trace': {
'convert_func' : self.to_torchscript,
'convert_filename': 'model.pt'
},
'onnx': {
'convert_func' : self.to_onnx,
'convert_filename': 'model.onnx'
},
'tensorrt': {
'convert_func' : self.to_tensorrt,
'convert_filename': 'model.plan'
}
}
def convert(self, convert_type, save_dir, convert_filename,
device, precision='fp32',
max_batch_size=1,
# args for TensorRT:
max_workspace_size=None):
''' convert the model '''
self.convert_type = convert_type
self.max_workspace_size = max_workspace_size
self.max_batch_size = max_batch_size
self.precision = precision
# override default name if user provided name
if convert_filename != '':
self.convert_props[convert_type]['convert_filename'] = convert_filename
# setup device
torch_device = torch.device(device)
# prepare model
self.model.to(torch_device)
self.model.eval()
assert (not self.model.training), \
"[Converter error]: could not set the model to eval() mode!"
io_props = None
if self.dataloader is not None:
io_props = extract_io_props(self.model, self.dataloader,
torch_device, precision, max_batch_size)
assert self.convert_type == "ts-script" or io_props is not None, \
"Input and output properties are empty. For conversion types \
other than \'ts-script\' input shapes are required to generate dummy input. \
Make sure that dataloader works correctly or that IO props file is provided."
# prepare save path
model_name = self.convert_props[convert_type]['convert_filename']
convert_model_path = os.path.join(save_dir, model_name)
# get convert method depending on the convert type
convert_func = self.convert_props[convert_type]['convert_func']
# convert the model - will be saved to disk
if self.convert_type == "tensorrt":
io_filepath = "triton/tensorrt_io_props_" + str(precision) + ".json"
io_props = load_io_props(io_filepath)
convert_func(model, torch_device, io_props, convert_model_path)
assert (os.path.isfile(convert_model_path)), \
f"[Converter error]: saving model to {convert_model_path} failed!"
def generate_dummy_input(self, io_props, device):
from pytorch.utils import triton_type_to_torch_type
dummy_input = []
for s,t in zip(io_props['opt_shapes'], io_props['input_types']):
t = triton_type_to_torch_type[t]
tensor = torch.empty(size=s, dtype=t, device=device).random_()
dummy_input.append(tensor)
dummy_input = tuple(dummy_input)
return dummy_input
def to_onnx(self, model, device, io_props, convert_model_path):
''' convert the model to onnx '''
dummy_input = self.generate_dummy_input(io_props, device)
opset_version = 11
# convert the model to onnx
with torch.no_grad():
torch.onnx.export(model, dummy_input,
convert_model_path,
do_constant_folding=True,
input_names=io_props['input_names'],
output_names=io_props['output_names'],
dynamic_axes=io_props['dynamic_axes'],
opset_version=opset_version,
enable_onnx_checker=True)
def to_tensorrt(self, model, device, io_props, convert_model_path):
''' convert the model to tensorrt '''
assert (self.max_workspace_size), "[Converter error]: for TensorRT conversion you must provide \'max_workspace_size\'."
import tensorrt as trt
from pytorch.utils import build_tensorrt_engine
# convert the model to onnx first
self.to_onnx(model, device, io_props, convert_model_path)
del model
torch.cuda.empty_cache()
zipped = zip(io_props['input_names'], io_props['min_shapes'],
io_props['opt_shapes'], io_props['max_shapes'])
shapes = []
for name,min_shape,opt_shape,max_shape in zipped:
d = {"name":name, "min": min_shape,
"opt": opt_shape, "max": max_shape}
shapes.append(d)
tensorrt_fp16 = True if self.precision == 'fp16' else False
# build tensorrt engine
engine = build_tensorrt_engine(convert_model_path, shapes,
self.max_workspace_size,
self.max_batch_size,
tensorrt_fp16)
assert engine is not None, "[Converter error]: TensorRT build failure"
# write tensorrt engine
with open(convert_model_path, 'wb') as f:
f.write(engine.serialize())
def to_torchscript(self, model, device, io_props, convert_model_path):
''' convert the model to torchscript '''
if self.convert_type == 'ts-script':
model_ts = torch.jit.script(model)
else: # self.convert_type == 'ts-trace'
dummy_input = self.generate_dummy_input(io_props, device)
with torch.no_grad():
model_ts = torch.jit.trace(model, dummy_input)
# save the model
torch.jit.save(model_ts, convert_model_path)
if __name__=='__main__':
parser = get_parser()
args = parser.parse_args()
model_args_list = args.model_arguments[1:]
logging.basicConfig(level=logging.INFO)
mm = importlib.import_module(args.model_module)
model = mm.init_model(model_args_list, args.precision, args.device)
dataloader = mm.get_dataloader(model_args_list)
converter = Converter(model, dataloader)
converter.convert(args.convert, args.save_dir, args.convert_filename,
args.device, args.precision,
args.max_batch_size,
args.max_workspace_size)
|
PyTorch/Classification/GPUNet/triton/175ms/runner | runner | start_NVIDIA-DGX-1-(1x-V100-32GB) | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/bin/bash
# Evaluate Runner
python3 -m "triton.175ms.runner.__main__" \
--config-path "triton/175ms/runner/config_NVIDIA-DGX-1-(1x-V100-32GB).yaml" \
--device 0 |
PyTorch/SpeechSynthesis/Tacotron2 | Tacotron2 | README | # Tacotron 2 And WaveGlow v1.10 For PyTorch
This repository provides a script and recipe to train Tacotron 2 and WaveGlow
v1.6 models to achieve state of the art accuracy, and is tested and maintained by NVIDIA.
## Table of Contents
- [Model overview](#model-overview)
* [Model architecture](#model-architecture)
* [Default configuration](#default-configuration)
* [Feature support matrix](#feature-support-matrix)
* [Features](#features)
* [Mixed precision training](#mixed-precision-training)
* [Enabling mixed precision](#enabling-mixed-precision)
* [Enabling TF32](#enabling-tf32)
- [Setup](#setup)
* [Requirements](#requirements)
- [Quick Start Guide](#quick-start-guide)
- [Advanced](#advanced)
* [Scripts and sample code](#scripts-and-sample-code)
* [Parameters](#parameters)
* [Shared parameters](#shared-parameters)
* [Shared audio/STFT parameters](#shared-audiostft-parameters)
* [Tacotron 2 parameters](#tacotron-2-parameters)
* [WaveGlow parameters](#waveglow-parameters)
* [Command-line options](#command-line-options)
* [Getting the data](#getting-the-data)
* [Dataset guidelines](#dataset-guidelines)
* [Multi-dataset](#multi-dataset)
* [Training process](#training-process)
* [Inference process](#inference-process)
- [Performance](#performance)
* [Benchmarking](#benchmarking)
* [Training performance benchmark](#training-performance-benchmark)
* [Inference performance benchmark](#inference-performance-benchmark)
* [Results](#results)
* [Training accuracy results](#training-accuracy-results)
* [Training accuracy: NVIDIA DGX A100 (8x A100 40GB)](#training-accuracy-nvidia-dgx-a100-8x-a100-40gb)
* [Training accuracy: NVIDIA DGX-1 (8x V100 16GB)](#training-accuracy-nvidia-dgx-1-8x-v100-16gb)
* [Training curves](#training-curves)
* [Training performance results](#training-performance-results)
* [Training performance: NVIDIA DGX A100 (8x A100 40GB)](#training-performance-nvidia-dgx-a100-8x-a100-40gb)
* [Training performance: NVIDIA DGX-1 (8x V100 16GB)](#training-performance-nvidia-dgx-1-8x-v100-16gB)
* [Expected training time](#expected-training-time)
* [Inference performance results](#inference-performance-results)
* [Inference performance: NVIDIA DGX A100 (1x A100 40GB)](#inference-performance-nvidia-dgx-a100-1x-a100-40gb)
* [Inference performance: NVIDIA DGX-1 (1x V100 16GB)](#inference-performance-nvidia-dgx-1-1x-v100-16gb)
* [Inference performance: NVIDIA T4](#inference-performance-nvidia-t4)
- [Release notes](#release-notes)
* [Changelog](#changelog)
* [Known issues](#known-issues)
## Model overview
This text-to-speech (TTS) system is a combination of two neural network
models:
* a modified Tacotron 2 model from the [Natural TTS Synthesis by Conditioning WaveNet on Mel Spectrogram Predictions](https://arxiv.org/abs/1712.05884)
paper
* a flow-based neural network model from the [WaveGlow: A Flow-based Generative Network for Speech Synthesis](https://arxiv.org/abs/1811.00002) paper
The Tacotron 2 and WaveGlow models form a text-to-speech system that enables
users to synthesize natural sounding speech from raw transcripts without
any additional information such as patterns and/or rhythms of speech.
Our implementation of Tacotron 2 models differs from the model described in the
paper. Our implementation uses Dropout instead of Zoneout to regularize the
LSTM layers. Also, the original text-to-speech system proposed in the paper
uses the [WaveNet](https://arxiv.org/abs/1609.03499) model to synthesize
waveforms. In our implementation, we use the WaveGlow model for this purpose.
Both models are based on implementations of NVIDIA GitHub repositories
[Tacotron 2](https://github.com/NVIDIA/tacotron2) and
[WaveGlow](https://github.com/NVIDIA/waveglow), and are trained on a publicly
available [LJ Speech dataset](https://keithito.com/LJ-Speech-Dataset/).
The Tacotron 2 and WaveGlow model enables you to efficiently synthesize high
quality speech from text.
Both models are trained with mixed precision using Tensor Cores on Volta,
Turing, and the NVIDIA Ampere GPU architectures. Therefore, researchers can
get results 2.0x faster for Tacotron 2 and 3.1x faster for WaveGlow than
training without Tensor Cores, while experiencing the benefits of mixed
precision training. The models are tested against each NGC monthly
container release to ensure consistent accuracy and performance over time.
### Model architecture
The Tacotron 2 model is a recurrent sequence-to-sequence model with attention that
predicts mel-spectrograms from text. The encoder (blue blocks in the figure
below) transforms the whole text into a fixed-size hidden feature
representation. This feature representation is then consumed by the
autoregressive decoder (orange blocks) that produces one spectrogram frame at
a time. In our implementation, the autoregressive WaveNet (green block) is
replaced by the flow-based generative WaveGlow.

Figure 1. Architecture of the Tacotron 2 model. Taken from the
[Tacotron 2](https://arxiv.org/abs/1712.05884) paper.
The WaveGlow model is a flow-based generative model that generates audio
samples from Gaussian distribution using mel-spectrogram conditioning (Figure
2). During training, the model learns to transform the dataset distribution
into spherical Gaussian distribution through a series of flows. One step of a
flow consists of an invertible convolution, followed by a modified WaveNet
architecture that serves as an affine coupling layer. During inference, the
network is inverted and audio samples are generated from the Gaussian
distribution. Our implementation uses 512 residual channels in the coupling layer.

Figure 2. Architecture of the WaveGlow model. Taken from the
[WaveGlow](https://arxiv.org/abs/1811.00002) paper.
### Default configuration
Both models support multi-GPU and mixed precision training with dynamic loss
scaling (see Apex code
[here](https://github.com/NVIDIA/apex/blob/master/apex/fp16_utils/loss_scaler.py)),
as well as mixed precision inference. To speed up Tacotron 2 training,
reference mel-spectrograms are generated during a preprocessing step and read
directly from disk during training, instead of being generated during training.
The following features were implemented in this model:
* data-parallel multi-GPU training
* dynamic loss scaling with backoff for Tensor Cores (mixed precision)
training.
### Feature support matrix
The following features are supported by this model.
| Feature | Tacotron 2 | WaveGlow |
| :-----------------------|------------:|--------------:|
|[AMP](https://nvidia.github.io/apex/amp.html) | Yes | Yes |
|[Apex DistributedDataParallel](https://nvidia.github.io/apex/parallel.html) | Yes | Yes |
#### Features
AMP - a tool that enables Tensor Core-accelerated training. For more information,
refer to [Enabling mixed precision](#enabling-mixed-precision).
Apex DistributedDataParallel - a module wrapper that enables easy multiprocess
distributed data parallel training, similar to `torch.nn.parallel.DistributedDataParallel`.
`DistributedDataParallel` is optimized for use with NCCL. It achieves high
performance by overlapping communication with computation during `backward()`
and bucketing smaller gradient transfers to reduce the total number of transfers
required.
### Mixed precision training
*Mixed precision* is the combined use of different numerical precisions in a
computational method. [Mixed precision](https://arxiv.org/abs/1710.03740)
training offers significant computational speedup by performing operations in
half-precision format, while storing minimal information in single-precision
to retain as much information as possible in critical parts of the network.
Since the introduction of [Tensor Cores](https://developer.nvidia.com/tensor-cores)
in Volta, and following with both the Turing and Ampere architectures,
significant training speedups are
experienced by switching to mixed precision -- up to 3x overall speedup on
the most arithmetically intense model architectures. Using mixed precision
training requires two steps:
1. Porting the model to use the FP16 data type where appropriate.
2. Adding loss scaling to preserve small gradient values.
The ability to train deep learning networks with lower precision was
introduced in the Pascal architecture and first supported in [CUDA 8](https://devblogs.nvidia.com/parallelforall/tag/fp16/) in the NVIDIA Deep Learning SDK.
For information about:
* How to train using mixed precision, see the [Mixed Precision Training](https://arxiv.org/abs/1710.03740)
paper and [Training With Mixed Precision](https://docs.nvidia.com/deeplearning/sdk/mixed-precision-training/index.html)
documentation.
* Techniques used for mixed precision training, see the [Mixed-Precision Training of Deep Neural Networks](https://devblogs.nvidia.com/mixed-precision-training-deep-neural-networks/)
blog.
* APEX tools for mixed precision training, see the [NVIDIA Apex: Tools for Easy Mixed-Precision Training in PyTorch](https://devblogs.nvidia.com/apex-pytorch-easy-mixed-precision-training/).
#### Enabling mixed precision
Mixed precision is enabled in PyTorch by using the Automatic Mixed Precision
(AMP) library from [APEX](https://github.com/NVIDIA/apex) that casts variables
to half-precision upon retrieval, while storing variables in single-precision
format. Furthermore, to preserve small gradient magnitudes in backpropagation,
a [loss scaling](https://docs.nvidia.com/deeplearning/sdk/mixed-precision-training/index.html#lossscaling)
step must be included when applying gradients. In PyTorch, loss scaling can be
easily applied by using the `scale_loss()` method provided by AMP. The scaling value
to be used can be [dynamic](https://nvidia.github.io/apex/fp16_utils.html#apex.fp16_utils.DynamicLossScaler) or fixed.
By default, the `train_tacotron2.sh` and `train_waveglow.sh` scripts will
launch mixed precision training with Tensor Cores. You can change this
behaviour by removing the `--amp` flag from the `train.py` script.
To enable mixed precision, the following steps were performed in the Tacotron 2 and
WaveGlow models:
* Import AMP from APEX:
```bash
from apex import amp
amp.lists.functional_overrides.FP32_FUNCS.remove('softmax')
amp.lists.functional_overrides.FP16_FUNCS.append('softmax')
```
* Initialize AMP:
```bash
model, optimizer = amp.initialize(model, optimizer, opt_level="O1")
```
* If running on multi-GPU, wrap the model with `DistributedDataParallel`:
```bash
from apex.parallel import DistributedDataParallel as DDP
model = DDP(model)
```
* Scale loss before backpropagation (assuming loss is stored in a variable
called `losses`):
* Default backpropagate for FP32:
```bash
losses.backward()
```
* Scale loss and backpropagate with AMP:
```bash
with optimizer.scale_loss(losses) as scaled_losses:
scaled_losses.backward()
```
#### Enabling TF32
TensorFloat-32 (TF32) is the new math mode in [NVIDIA A100](#https://www.nvidia.com/en-us/data-center/a100/) GPUs for handling the matrix math also called tensor operations. TF32 running on Tensor Cores in A100 GPUs can provide up to 10x speedups compared to single-precision floating-point math (FP32) on Volta GPUs.
TF32 Tensor Cores can speed up networks using FP32, typically with no loss of accuracy. It is more robust than FP16 for models which require high dynamic range for weights or activations.
For more information, refer to the [TensorFloat-32 in the A100 GPU Accelerates AI Training, HPC up to 20x](#https://blogs.nvidia.com/blog/2020/05/14/tensorfloat-32-precision-format/) blog post.
TF32 is supported in the NVIDIA Ampere GPU architecture and is enabled by default.
## Setup
The following section lists the requirements in order to start training the
Tacotron 2 and WaveGlow models.
### Requirements
This repository contains Dockerfile which extends the PyTorch NGC container
and encapsulates some dependencies. Aside from these dependencies, ensure you
have the following components:
- [NVIDIA Docker](https://github.com/NVIDIA/nvidia-docker)
- [PyTorch 20.06-py3 NGC container](https://ngc.nvidia.com/registry/nvidia-pytorch)
or newer
- Supported GPUs:
- [NVIDIA Volta](https://www.nvidia.com/en-us/data-center/volta-gpu-architecture/)
- [NVIDIA Turing](https://www.nvidia.com/en-us/geforce/turing/)
- [NVIDIA Ampere architecture](https://www.nvidia.com/en-us/data-center/nvidia-ampere-gpu-architecture/)
For more information about how to get started with NGC containers, see the
following sections from the NVIDIA GPU Cloud Documentation and the Deep Learning
Documentation:
- [Getting Started Using NVIDIA GPU Cloud](https://docs.nvidia.com/ngc/ngc-getting-started-guide/index.html)
- [Accessing And Pulling From The NGC Container Registry](https://docs.nvidia.com/deeplearning/frameworks/user-guide/index.html#accessing_registry)
- [Running PyTorch](https://docs.nvidia.com/deeplearning/frameworks/pytorch-release-notes/running.html#running)
For those unable to use the PyTorch NGC container, to set up the required
environment or create your own container, see the versioned
[NVIDIA Container Support Matrix](https://docs.nvidia.com/deeplearning/frameworks/support-matrix/index.html).
## Quick Start Guide
To train your model using mixed precision with Tensor Cores or using FP32,
perform the following steps using the default parameters of the Tacrotron 2
and WaveGlow model on the [LJ Speech](https://keithito.com/LJ-Speech-Dataset/)
dataset.
1. Clone the repository.
```bash
git clone https://github.com/NVIDIA/DeepLearningExamples.git
cd DeepLearningExamples/PyTorch/SpeechSynthesis/Tacotron2
```
2. Download and preprocess the dataset.
Use the `./scripts/prepare_dataset.sh` download script to automatically
download and preprocess the training, validation and test datasets. To run
this script, issue:
```bash
bash scripts/prepare_dataset.sh
```
Data is downloaded to the `./LJSpeech-1.1` directory (on the host). The
`./LJSpeech-1.1` directory is mounted to the `/workspace/tacotron2/LJSpeech-1.1`
location in the NGC container.
3. Build the Tacotron 2 and WaveGlow PyTorch NGC container.
```bash
bash scripts/docker/build.sh
```
4. Start an interactive session in the NGC container to run training/inference.
After you build the container image, you can start an interactive CLI session with:
```bash
bash scripts/docker/interactive.sh
```
The `interactive.sh` script requires that the location on the dataset is specified.
For example, `LJSpeech-1.1`. To preprocess the datasets for Tacotron 2 training, use
the `./scripts/prepare_mels.sh` script:
```bash
bash scripts/prepare_mels.sh
```
The preprocessed mel-spectrograms are stored in the `./LJSpeech-1.1/mels` directory.
5. Start training.
To start Tacotron 2 training, run:
```bash
bash scripts/train_tacotron2.sh
```
To start WaveGlow training, run:
```bash
bash scripts/train_waveglow.sh
```
6. Start validation/evaluation.
Ensure your loss values are comparable to those listed in the table in the
[Results](#results) section. For both models, the loss values are stored in the `./output/nvlog.json` log file.
After you have trained the Tacotron 2 and WaveGlow models, you should get
audio results similar to the
samples in the `./audio` folder. For details about generating audio, see the
[Inference process](#inference-process) section below.
The training scripts automatically run the validation after each training
epoch. The results from the validation are printed to the standard output
(`stdout`) and saved to the log files.
7. Start inference.
After you have trained the Tacotron 2 and WaveGlow models, you can perform
inference using the respective checkpoints that are passed as `--tacotron2`
and `--waveglow` arguments. Tacotron2 and WaveGlow checkpoints can also be downloaded from NGC:
https://ngc.nvidia.com/catalog/models/nvidia:tacotron2pyt_fp16/files?version=3
https://ngc.nvidia.com/catalog/models/nvidia:waveglow256pyt_fp16/files?version=2
To run inference issue:
```bash
python inference.py --tacotron2 <Tacotron2_checkpoint> --waveglow <WaveGlow_checkpoint> --wn-channels 256 -o output/ -i phrases/phrase.txt --fp16
```
The speech is generated from lines of text in the file that is passed with
`-i` argument. The number of lines determines inference batch size. To run
inference in mixed precision, use the `--fp16` flag. The output audio will
be stored in the path specified by the `-o` argument.
You can also run inference on CPU with TorchScript by adding flag --cpu:
```bash
export CUDA_VISIBLE_DEVICES=
```
```bash
python inference.py --tacotron2 <Tacotron2_checkpoint> --waveglow <WaveGlow_checkpoint> --wn-channels 256 --cpu -o output/ -i phrases/phrase.txt
```
## Advanced
The following sections provide greater details of the dataset, running
training and inference, and the training results.
### Scripts and sample code
The sample code for Tacotron 2 and WaveGlow has scripts specific to a
particular model, located in directories `./tacotron2` and `./waveglow`, as well as scripts common to both
models, located in the `./common` directory. The model-specific scripts are as follows:
* `<model_name>/model.py` - the model architecture, definition of forward and
inference functions
* `<model_name>/arg_parser.py` - argument parser for parameters specific to a
given model
* `<model_name>/data_function.py` - data loading functions
* `<model_name>/loss_function.py` - loss function for the model
The common scripts contain layer definitions common to both models
(`common/layers.py`), some utility scripts (`common/utils.py`) and scripts
for audio processing (`common/audio_processing.py` and `common/stft.py`). In
the root directory `./` of this repository, the `./run.py` script is used for
training while inference can be executed with the `./inference.py` script. The
scripts `./models.py`, `./data_functions.py` and `./loss_functions.py` call
the respective scripts in the `<model_name>` directory, depending on what
model is trained using the `run.py` script.
### Parameters
In this section, we list the most important hyperparameters and command-line arguments,
together with their default values that are used to train Tacotron 2 and
WaveGlow models.
#### Shared parameters
* `--epochs` - number of epochs (Tacotron 2: 1501, WaveGlow: 1001)
* `--learning-rate` - learning rate (Tacotron 2: 1e-3, WaveGlow: 1e-4)
* `--batch-size` - batch size (Tacotron 2 FP16/FP32: 104/48, WaveGlow FP16/FP32: 10/4)
* `--amp` - use mixed precision training
* `--cpu` - use CPU with TorchScript for inference
#### Shared audio/STFT parameters
* `--sampling-rate` - sampling rate in Hz of input and output audio (22050)
* `--filter-length` - (1024)
* `--hop-length` - hop length for FFT, i.e., sample stride between consecutive FFTs (256)
* `--win-length` - window size for FFT (1024)
* `--mel-fmin` - lowest frequency in Hz (0.0)
* `--mel-fmax` - highest frequency in Hz (8.000)
#### Tacotron 2 parameters
* `--anneal-steps` - epochs at which to anneal the learning rate (500 1000 1500)
* `--anneal-factor` - factor by which to anneal the learning rate (FP16/FP32: 0.3/0.1)
#### WaveGlow parameters
* `--segment-length` - segment length of input audio processed by the neural network (8000)
* `--wn-channels` - number of residual channels in the coupling layer networks (512)
### Command-line options
To see the full list of available options and their descriptions, use the `-h`
or `--help` command line option, for example:
```bash
python train.py --help
```
The following example output is printed when running the sample:
```bash
Batch: 7/260 epoch 0
:::NVLOGv0.2.2 Tacotron2_PyT 1560936205.667271376 (/workspace/tacotron2/dllogger/logger.py:251) train_iter_start: 7
:::NVLOGv0.2.2 Tacotron2_PyT 1560936207.209611416 (/workspace/tacotron2/dllogger/logger.py:251) train_iteration_loss: 5.415428161621094
:::NVLOGv0.2.2 Tacotron2_PyT 1560936208.705905914 (/workspace/tacotron2/dllogger/logger.py:251) train_iter_stop: 7
:::NVLOGv0.2.2 Tacotron2_PyT 1560936208.706479311 (/workspace/tacotron2/dllogger/logger.py:251) train_iter_items/sec: 8924.00136085362
:::NVLOGv0.2.2 Tacotron2_PyT 1560936208.706998110 (/workspace/tacotron2/dllogger/logger.py:251) iter_time: 3.0393316745758057
Batch: 8/260 epoch 0
:::NVLOGv0.2.2 Tacotron2_PyT 1560936208.711485624 (/workspace/tacotron2/dllogger/logger.py:251) train_iter_start: 8
:::NVLOGv0.2.2 Tacotron2_PyT 1560936210.236668825 (/workspace/tacotron2/dllogger/logger.py:251) train_iteration_loss: 5.516331672668457
```
### Getting the data
The Tacotron 2 and WaveGlow models were trained on the LJSpeech-1.1 dataset.
This repository contains the `./scripts/prepare_dataset.sh` script which will automatically download and extract the whole dataset. By default, data will be extracted to the `./LJSpeech-1.1` directory. The dataset directory contains a `README` file, a `wavs` directory with all audio samples, and a file `metadata.csv` that contains audio file names and the corresponding transcripts.
#### Dataset guidelines
The LJSpeech dataset has 13,100 clips that amount to about 24 hours of speech. Since the original dataset has all transcripts in the `metadata.csv` file, in this repository we provide file lists in the `./filelists` directory that determine training and validation subsets; `ljs_audio_text_train_filelist.txt` is a test set used as a training dataset and `ljs_audio_text_val_filelist.txt` is a test set used as a validation dataset.
#### Multi-dataset
To use datasets different than the default LJSpeech dataset:
1. Prepare a directory with all audio files and pass it to the `--dataset-path` command-line option.
2. Add two text files containing file lists: one for the training subset (`--training-files`) and one for the validation subset (`--validation files`).
The structure of the filelists should be as follows:
```bash
`<audio file path>|<transcript>`
```
The `<audio file path>` is the relative path to the path provided by the `--dataset-path` option.
### Training process
The Tacotron2 and WaveGlow models are trained separately and independently.
Both models obtain mel-spectrograms from short time Fourier transform (STFT)
during training. These mel-spectrograms are used for loss computation in case
of Tacotron 2 and as conditioning input to the network in case of WaveGlow.
The training loss is averaged over an entire training epoch, whereas the
validation loss is averaged over the validation dataset. Performance is
reported in total output mel-spectrograms per second for the Tacotron 2 model and
in total output samples per second for the WaveGlow model. Both measures are
recorded as `train_iter_items/sec` (after each iteration) and
`train_epoch_items/sec` (averaged over epoch) in the output log file `./output/nvlog.json`. The result is
averaged over an entire training epoch and summed over all GPUs that were
included in the training.
Even though the training script uses all available GPUs, you can change
this behavior by setting the `CUDA_VISIBLE_DEVICES` variable in your
environment or by setting the `NV_GPU` variable at the Docker container launch
([see section "GPU isolation"](https://github.com/NVIDIA/nvidia-docker/wiki/nvidia-docker#gpu-isolation)).
### Inference process
You can run inference using the `./inference.py` script. This script takes
text as input and runs Tacotron 2 and then WaveGlow inference to produce an
audio file. It requires pre-trained checkpoints from Tacotron 2 and WaveGlow
models and input text as a text file, with one phrase per line.
To run inference, issue:
```bash
python inference.py --tacotron2 <Tacotron2_checkpoint> --waveglow <WaveGlow_checkpoint> --wn-channels 256 -o output/ --include-warmup -i phrases/phrase.txt --fp16
```
Here, `Tacotron2_checkpoint` and `WaveGlow_checkpoint` are pre-trained
checkpoints for the respective models, and `phrases/phrase.txt` contains input
phrases. The number of text lines determines the inference batch size. Audio
will be saved in the output folder. The audio files [audio_fp16](./audio/audio_fp16.wav)
and [audio_fp32](./audio/audio_fp32.wav) were generated using checkpoints from
mixed precision and FP32 training, respectively.
You can find all the available options by calling `python inference.py --help`.
You can also run inference on CPU with TorchScript by adding flag --cpu:
```bash
export CUDA_VISIBLE_DEVICES=
```
```bash
python inference.py --tacotron2 <Tacotron2_checkpoint> --waveglow <WaveGlow_checkpoint> --wn-channels 256 --cpu -o output/ -i phrases/phrase.txt
```
## Performance
The performance measurements in this document were conducted at the time of publication and may not reflect the performance achieved from NVIDIA’s latest software release. For the most up-to-date performance measurements, go to [NVIDIA Data Center Deep Learning Product Performance](https://developer.nvidia.com/deep-learning-performance-training-inference).
### Benchmarking
The following section shows how to run benchmarks measuring the model
performance in training and inference mode.
#### Training performance benchmark
To benchmark the training performance on a specific batch size, run:
**Tacotron 2**
* For 1 GPU
* FP16
```bash
python train.py -m Tacotron2 -o <output_dir> -lr 1e-3 --epochs 10 -bs <batch_size> --weight-decay 1e-6 --grad-clip-thresh 1.0 --cudnn-enabled --log-file nvlog.json --load-mel-from-disk --training-files=filelists/ljs_mel_text_train_subset_2500_filelist.txt --validation-files=filelists/ljs_mel_text_val_filelist.txt --dataset-path <dataset-path> --amp
```
* TF32 (or FP32 if TF32 not enabled)
```bash
python train.py -m Tacotron2 -o <output_dir> -lr 1e-3 --epochs 10 -bs <batch_size> --weight-decay 1e-6 --grad-clip-thresh 1.0 --cudnn-enabled --log-file nvlog.json --load-mel-from-disk --training-files=filelists/ljs_mel_text_train_subset_2500_filelist.txt --validation-files=filelists/ljs_mel_text_val_filelist.txt --dataset-path <dataset-path>
```
* For multiple GPUs
* FP16
```bash
python -m multiproc train.py -m Tacotron2 -o <output_dir> -lr 1e-3 --epochs 10 -bs <batch_size> --weight-decay 1e-6 --grad-clip-thresh 1.0 --cudnn-enabled --log-file nvlog.json --load-mel-from-disk --training-files=filelists/ljs_mel_text_train_subset_2500_filelist.txt --validation-files=filelists/ljs_mel_text_val_filelist.txt --dataset-path <dataset-path> --amp
```
* TF32 (or FP32 if TF32 not enabled)
```bash
python -m multiproc train.py -m Tacotron2 -o <output_dir> -lr 1e-3 --epochs 10 -bs <batch_size> --weight-decay 1e-6 --grad-clip-thresh 1.0 --cudnn-enabled --log-file nvlog.json --load-mel-from-disk --training-files=filelists/ljs_mel_text_train_subset_2500_filelist.txt --validation-files=filelists/ljs_mel_text_val_filelist.txt --dataset-path <dataset-path>
```
**WaveGlow**
* For 1 GPU
* FP16
```bash
python train.py -m WaveGlow -o <output_dir> -lr 1e-4 --epochs 10 -bs <batch_size> --segment-length 8000 --weight-decay 0 --grad-clip-thresh 65504.0 --cudnn-enabled --cudnn-benchmark --log-file nvlog.json --training-files filelists/ljs_audio_text_train_subset_1250_filelist.txt --dataset-path <dataset-path> --amp
```
* TF32 (or FP32 if TF32 not enabled)
```bash
python train.py -m WaveGlow -o <output_dir> -lr 1e-4 --epochs 10 -bs <batch_size> --segment-length 8000 --weight-decay 0 --grad-clip-thresh 3.4028234663852886e+38 --cudnn-enabled --cudnn-benchmark --log-file nvlog.json --training-files filelists/ljs_audio_text_train_subset_1250_filelist.txt --dataset-path <dataset-path>
```
* For multiple GPUs
* FP16
```bash
python -m multiproc train.py -m WaveGlow -o <output_dir> -lr 1e-4 --epochs 10 -bs <batch_size> --segment-length 8000 --weight-decay 0 --grad-clip-thresh 65504.0 --cudnn-enabled --cudnn-benchmark --log-file nvlog.json --training-files filelists/ljs_audio_text_train_subset_1250_filelist.txt --dataset-path <dataset-path> --amp
```
* TF32 (or FP32 if TF32 not enabled)
```bash
python -m multiproc train.py -m WaveGlow -o <output_dir> -lr 1e-4 --epochs 10 -bs <batch_size> --segment-length 8000 --weight-decay 0 --grad-clip-thresh 3.4028234663852886e+38 --cudnn-enabled --cudnn-benchmark --log-file nvlog.json --training-files filelists/ljs_audio_text_train_subset_1250_filelist.txt --dataset-path <dataset-path>
```
Each of these scripts runs for 10 epochs and for each epoch measures the
average number of items per second. The performance results can be read from
the `nvlog.json` files produced by the commands.
#### Inference performance benchmark
To benchmark the inference performance on a batch size=1, run:
* For FP16
```bash
python inference.py --tacotron2 <Tacotron2_checkpoint> --waveglow <WaveGlow_checkpoint> -o output/ --include-warmup -i phrases/phrase_1_64.txt --fp16 --log-file=output/nvlog_fp16.json
```
* For TF32 (or FP32 if TF32 not enabled)
```bash
python inference.py --tacotron2 <Tacotron2_checkpoint> --waveglow <WaveGlow_checkpoint> -o output/ --include-warmup -i phrases/phrase_1_64.txt --log-file=output/nvlog_fp32.json
```
The output log files will contain performance numbers for Tacotron 2 model
(number of output mel-spectrograms per second, reported as `tacotron2_items_per_sec`)
and for WaveGlow (number of output samples per second, reported as `waveglow_items_per_sec`).
The `inference.py` script will run a few warmup iterations before running the benchmark.
### Results
The following sections provide details on how we achieved our performance
and accuracy in training and inference.
#### Training accuracy results
##### Training accuracy: NVIDIA DGX A100 (8x A100 40GB)
Our results were obtained by running the `./platform/DGXA100_{tacotron2,waveglow}_{AMP,TF32}_{1,4,8}NGPU_train.sh`
training script in the PyTorch-20.06-py3 NGC container on
NVIDIA DGX A100 (8x A100 40GB) GPUs.
All of the results were produced using the `train.py` script as described in the
[Training process](#training-process) section of this document. For each model,
the loss is taken from a sample run.
| Loss (Model/Epoch) | 1 | 250 | 500 | 750 | 1000 |
| :----------------: | ------: | ------: | ------: | ------: | ------: |
| Tacotron 2 FP16 | 3.82| 0.56| 0.42| 0.38| 0.35|
| Tacotron 2 TF32 | 3.50| 0.54| 0.41| 0.37| 0.35|
| WaveGlow FP16 | -3.31| -5.72| -5.87 | -5.94| -5.99
| WaveGlow TF32 | -4.46| -5.93| -5.98| | |

Figure 4. Tacotron 2 FP16 loss - batch size 128 (sample run)

Figure 5. Tacotron 2 TF32 loss - batch size 128 (sample run)

Figure 6. WaveGlow FP16 loss - batch size 10 (sample run)

Figure 7. WaveGlow TF32 loss - batch size 4 (sample run)
##### Training accuracy: NVIDIA DGX-1 (8x V100 16GB)
Our results were obtained by running the `./platform/DGX1_{tacotron2,waveglow}_{AMP,TF32}_{1,4,8}NGPU_train.sh`
training script in the PyTorch-20.06-py3 NGC container on
NVIDIA DGX-1 with 8x V100 16G GPUs.
All of the results were produced using the `train.py` script as described in the
[Training process](#training-process) section of this document.
| Loss (Model/Epoch) | 1 | 250 | 500 | 750 | 1000 |
| :----------------: | ------: | ------: | ------: | ------: | ------: |
| Tacotron 2 FP16 | 13.0732 | 0.5736 | 0.4408 | 0.3923 | 0.3735 |
| Tacotron 2 FP32 | 8.5776 | 0.4807 | 0.3875 | 0.3421 | 0.3308 |
| WaveGlow FP16 | -2.2054 | -5.7602 | -5.901 | -5.9706 | -6.0258 |
| WaveGlow FP32 | -3.0327 | -5.858 | -6.0056 | -6.0613 | -6.1087 |

Figure 4. Tacotron 2 FP16 loss - batch size 104 (mean and std over 16 runs)

Figure 5. Tacotron 2 FP32 loss - batch size 48 (mean and std over 16 runs)

Figure 6. WaveGlow FP16 loss - batch size 10 (mean and std over 16 runs)

Figure 7. WaveGlow FP32 loss - batch size 4 (mean and std over 16 runs)
#### Training curves

Figure 3. Tacotron 2 and WaveGlow training loss.
#### Training performance results
##### Training performance: NVIDIA DGX A100 (8x A100 40GB)
Our results were obtained by running the `./platform/DGXA100_{tacotron2,waveglow}_{AMP,TF32}_{1,4,8}NGPU_train.sh`
training script in the [framework-container-name] NGC container on
NVIDIA DGX A100 (8x A100 40GB) GPUs. Performance numbers (in output mel-spectrograms per second for
Tacotron 2 and output samples per second for WaveGlow)
were averaged over an entire training epoch.
This table shows the results for Tacotron 2:
|Number of GPUs|Batch size per GPU|Number of mels used with mixed precision|Number of mels used with TF32|Speed-up with mixed precision|Multi-GPU weak scaling with mixed precision|Multi-GPU weak scaling with TF32|
|---:|---:|---:|---:|---:|---:|---:|
|1| 128| 26,484| 31,499| 0.84| 1.00| 1.00|
|4| 128| 107,482| 124,591| 0.86| 4.06| 3.96|
|8| 128| 209,186| 250,556| 0.83| 7.90| 7.95|
The following table shows the results for WaveGlow:
|Number of GPUs|Batch size per GPU|Number of samples used with mixed precision|Number of samples used with TF32|Speed-up with mixed precision|Multi-GPU weak scaling with mixed precision|Multi-GPU weak scaling with TF32|
|---:|---:|---:|---:|---:|---:|---:|
|1| 10@FP16, 4@TF32 | 149,479| 67,581| 2.21| 1.00| 1.00|
|4| 10@FP16, 4@TF32 | 532,363| 233,846| 2.28| 3.56| 3.46|
|8| 10@FP16, 4@TF32 | 905,043| 383,043| 2.36| 6.05| 5.67|
##### Expected training time
The following table shows the expected training time for convergence for Tacotron 2 (1501 epochs):
|Number of GPUs|Batch size per GPU|Time to train with mixed precision (Hrs)|Time to train with TF32 (Hrs)|Speed-up with mixed precision|
|---:|---:|---:|---:|---:|
|1| 128| 112| 94| 0.84|
|4| 128| 29| 25| 0.87|
|8| 128| 16| 14| 0.84|
The following table shows the expected training time for convergence for WaveGlow (1001 epochs):
|Number of GPUs|Batch size per GPU|Time to train with mixed precision (Hrs)|Time to train with TF32 (Hrs)|Speed-up with mixed precision|
|---:|---:|---:|---:|---:|
|1| 10@FP16, 4@TF32 | 188| 416| 2.21|
|4| 10@FP16, 4@TF32 | 54| 122| 2.27|
|8| 10@FP16, 4@TF32 | 33| 75| 2.29|
##### Training performance: NVIDIA DGX-1 (8x V100 16GB)
Our results were obtained by running the `./platform/DGX1_{tacotron2,waveglow}_{AMP,TF32}_{1,4,8}NGPU_train.sh`
training script in the PyTorch-20.06-py3 NGC container on NVIDIA DGX-1 with
8x V100 16G GPUs. Performance numbers (in output mel-spectrograms per second for
Tacotron 2 and output samples per second for WaveGlow) were averaged over
an entire training epoch.
This table shows the results for Tacotron 2:
|Number of GPUs|Batch size per GPU|Number of mels used with mixed precision|Number of mels used with FP32|Speed-up with mixed precision|Multi-GPU weak scaling with mixed precision|Multi-GPU weak scaling with FP32|
|---:|---:|---:|---:|---:|---:|---:|
|1|104@FP16, 48@FP32| 15,891| 9,174| 1.73| 1.00| 1.00|
|4|104@FP16, 48@FP32| 53,417| 32,035| 1.67| 3.36| 3.49|
|8|104@FP16, 48@FP32| 115,032| 58,703| 1.96| 7.24| 6.40|
The following table shows the results for WaveGlow:
|Number of GPUs|Batch size per GPU|Number of samples used with mixed precision|Number of samples used with FP32|Speed-up with mixed precision|Multi-GPU weak scaling with mixed precision|Multi-GPU weak scaling with FP32|
|---:|---:|---:|---:|---:|---:|---:|
|1| 10@FP16, 4@FP32 | 105,873| 33,761| 3.14| 1.00| 1.00|
|4| 10@FP16, 4@FP32 | 364,471| 118,254| 3.08| 3.44| 3.50|
|8| 10@FP16, 4@FP32 | 690,909| 222,794| 3.10| 6.53| 6.60|
To achieve these same results, follow the steps in the [Quick Start Guide](#quick-start-guide).
##### Expected training time
The following table shows the expected training time for convergence for Tacotron 2 (1501 epochs):
|Number of GPUs|Batch size per GPU|Time to train with mixed precision (Hrs)|Time to train with FP32 (Hrs)|Speed-up with mixed precision|
|---:|---:|---:|---:|---:|
|1| 104@FP16, 48@FP32| 181| 333| 1.84|
|4| 104@FP16, 48@FP32| 53| 88| 1.66|
|8| 104@FP16, 48@FP32| 31| 48| 1.56|
The following table shows the expected training time for convergence for WaveGlow (1001 epochs):
|Number of GPUs|Batch size per GPU|Time to train with mixed precision (Hrs)|Time to train with FP32 (Hrs)|Speed-up with mixed precision|
|---:|---:|---:|---:|---:|
|1| 10@FP16, 4@FP32 | 249| 793| 3.18|
|4| 10@FP16, 4@FP32 | 78| 233| 3.00|
|8| 10@FP16, 4@FP32 | 48| 127| 2.98|
#### Inference performance results
The following tables show inference statistics for the Tacotron2 and WaveGlow
text-to-speech system, gathered from 1000 inference runs, on 1x A100, 1x V100 and 1x T4,
respectively. Latency is measured from the start of Tacotron 2 inference to
the end of WaveGlow inference. The tables include average latency, latency standard
deviation, and latency confidence intervals. Throughput is measured
as the number of generated audio samples per second. RTF is the real-time factor
which tells how many seconds of speech are generated in 1 second of compute.
##### Inference performance: NVIDIA DGX A100 (1x A100 40GB)
Our results were obtained by running the `inference-script-name.sh` inferencing
benchmarking script in the PyTorch-20.06-py3 NGC container on NVIDIA DGX A100 (1x A100 40GB) GPU.
|Batch size|Input length|Precision|WN channels|Avg latency (s)|Latency std (s)|Latency confidence interval 50% (s)|Latency confidence interval 90% (s)|Latency confidence interval 95% (s)|Latency confidence interval 99% (s)|Throughput (samples/sec)|Speed-up with mixed precision|Avg mels generated (81 mels=1 sec of speech)|Avg audio length (s)|Avg RTF|
|---:|---:|---:|---:|---:|---:|---:|---:|---:|---:|---:|---:|---:|---:|---:|
|1| 128| FP16| 256| 0.80| 0.02| 0.80| 0.83| 0.84| 0.86| 192,086| 1.08| 602| 6.99| 8.74|
|4| 128| FP16| 256| 1.05| 0.03| 1.05| 1.09| 1.10| 1.13| 602,856| 1.20| 619| 7.19| 6.85|
|1| 128| FP32| 256| 0.87| 0.02| 0.87| 0.90| 0.91| 0.93| 177,210| 1.00| 601| 6.98| 8.02|
|4| 128| FP32| 256| 1.27| 0.03| 1.26| 1.31| 1.32| 1.35| 500,458| 1.00| 620| 7.20| 5.67|
|1| 128| FP16| 512| 0.87| 0.02| 0.87| 0.90| 0.92| 0.94| 176,135| 1.12| 601| 6.98| 8.02|
|4| 128| FP16| 512| 1.37| 0.03| 1.36| 1.42| 1.43| 1.45| 462,691| 1.32| 619| 7.19| 5.25|
|1| 128| FP32| 512| 0.98| 0.03| 0.98| 1.02| 1.03| 1.07| 156,586| 1.00| 602| 6.99| 7.13|
|4| 128| FP32| 512| 1.81| 0.05| 1.79| 1.86| 1.90| 1.93| 351,465| 1.00| 620| 7.20| 3.98|
##### Inference performance: NVIDIA DGX-1 (1x V100 16GB)
|Batch size|Input length|Precision|WN channels|Avg latency (s)|Latency std (s)|Latency confidence interval 50% (s)|Latency confidence interval 90% (s)|Latency confidence interval 95% (s)|Latency confidence interval 99% (s)|Throughput (samples/sec)|Speed-up with mixed precision|Avg mels generated (81 mels=1 sec of speech)|Avg audio length (s)|Avg RTF|
|---:|---:|---:|---:|---:|---:|---:|---:|---:|---:|---:|---:|---:|---:|---:|
|1| 128| FP16| 256| 1.14| 0.07| 1.12| 1.20| 1.33| 1.40| 136,069| 1.58| 602| 6.99| 6.13|
|4| 128| FP16| 256| 1.52| 0.05| 1.52| 1.58| 1.61| 1.65| 416,688| 1.72| 619| 7.19| 4.73|
|1| 128| FP32| 256| 1.79| 0.06| 1.78| 1.86| 1.89| 1.99| 86,175| 1.00| 602| 6.99| 3.91|
|4| 128| FP32| 256| 2.61| 0.07| 2.61| 2.71| 2.74| 2.78| 242,656| 1.00| 619| 7.19| 2.75|
|1| 128| FP16| 512| 1.25| 0.08| 1.23| 1.32| 1.44| 1.50| 124,057| 1.90| 602| 6.99| 5.59|
|4| 128| FP16| 512| 2.11| 0.06| 2.10| 2.19| 2.22| 2.29| 300,505| 2.37| 620| 7.20| 3.41|
|1| 128| FP32| 512| 2.36| 0.08| 2.35| 2.46| 2.54| 2.61| 65,239| 1.00| 601| 6.98| 2.96|
|4| 128| FP32| 512| 5.00| 0.14| 4.96| 5.18| 5.26| 5.42| 126,810| 1.00| 618| 7.18| 1.44|
##### Inference performance: NVIDIA T4
|Batch size|Input length|Precision|WN channels|Avg latency (s)|Latency std (s)|Latency confidence interval 50% (s)|Latency confidence interval 90% (s)|Latency confidence interval 95% (s)|Latency confidence interval 99% (s)|Throughput (samples/sec)|Speed-up with mixed precision|Avg mels generated (81 mels=1 sec of speech)|Avg audio length (s)|Avg RTF|
|---:|---:|---:|---:|---:|---:|---:|---:|---:|---:|---:|---:|---:|---:|---:|
|1| 128| FP16| 256| 1.23| 0.05| 1.22| 1.29| 1.33| 1.42| 125,397| 2.46| 602| 6.99| 5.68|
|4| 128| FP16| 256| 2.85| 0.08| 2.84| 2.96| 2.99| 3.07| 222,672| 1.90| 620| 7.20| 2.53|
|1| 128| FP32| 256| 3.03| 0.10| 3.02| 3.14| 3.19| 3.32| 50,900| 1.00| 602| 6.99| 2.31|
|4| 128| FP32| 256| 5.41| 0.15| 5.38| 5.61| 5.66| 5.85| 117,325| 1.00| 620| 7.20| 1.33|
|1| 128| FP16| 512| 1.75| 0.08| 1.73| 1.87| 1.91| 1.98| 88,319| 2.79| 602| 6.99| 4.00|
|4| 128| FP16| 512| 4.59| 0.13| 4.57| 4.77| 4.83| 4.94| 138,226| 2.84| 620| 7.20| 1.57|
|1| 128| FP32| 512| 4.87| 0.14| 4.86| 5.03| 5.13| 5.27| 31,630| 1.00| 602| 6.99| 1.44|
|4| 128| FP32| 512| 13.02| 0.37| 12.96| 13.53| 13.67| 14.13| 48,749| 1.00| 620| 7.20| 0.55|
Our results were obtained by running the `./run_latency_tests.sh` script in
the PyTorch-20.06-py3 NGC container. Please note that to reproduce the results,
you need to provide pretrained checkpoints for Tacotron 2 and WaveGlow. Please
edit the script to provide your checkpoint filenames.
To compare with inference performance on CPU with TorchScript, benchmark inference on CPU using `./run_latency_tests_cpu.sh` script and get the performance numbers for batch size 1 and 4. Intel's optimization for PyTorch on CPU are added, you need to set `export OMP_NUM_THREADS=<num physical cores>` based on your CPU's core number, for your reference: https://software.intel.com/content/www/us/en/develop/articles/maximize-tensorflow-performance-on-cpu-considerations-and-recommendations-for-inference.html
## Release notes
### Changelog
June 2020
* Updated performance tables to include A100 results
March 2020
* Added Tacotron 2 and WaveGlow inference using TensorRT Inference Server with custom TensorRT backend in `trtis_cpp`
* Added Conversational AI demo script in `notebooks/conversationalai`
* Fixed loading CUDA RNG state in `load_checkpoint()` function in `train.py`
* Fixed FP16 export to TensorRT in `trt/README.md`
January 2020
* Updated batch sizes and performance results for Tacotron 2.
December 2019
* Added export and inference scripts for TensorRT. See [Tacotron2 TensorRT README](trt/README.md).
November 2019
* Implemented training resume from checkpoint
* Added notebook for running Tacotron 2 and WaveGlow in TRTIS.
October 2019
* Tacotron 2 inference with torch.jit.script
September 2019
* Introduced inference statistics
August 2019
* Fixed inference results
* Fixed initialization of Batch Normalization
July 2019
* Changed measurement units for Tacotron 2 training and inference performance
benchmarks from input tokes per second to output mel-spectrograms per second
* Introduced batched inference
* Included warmup in the inference script
June 2019
* AMP support
* Data preprocessing for Tacotron 2 training
* Fixed dropouts on LSTMCells
March 2019
* Initial release
### Known issues
There are no known issues in this release.
|
PyTorch/LanguageModeling/Transformer-XL/pytorch/scripts | scripts | inference_benchmark | #!/bin/bash
# Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
CHECKPOINT=${CHECKPOINT:-"LM-TFM/checkpoint_best.pt"}
MODEL=${MODEL:-"base"}
GPU=${GPU:-"v100"}
BATCH_SIZES=(1 2 4 8 16 32)
TYPES=("pytorch" "torchscript")
# "empty" MATH corresponds to fp32
MATHS=("" "--fp16")
MATHS_FULL=("fp32" "fp16")
for (( i = 0; i < ${#TYPES[@]}; i++ )); do
for (( j = 0; j < ${#BATCH_SIZES[@]}; j++ )); do
for (( k = 0; k < ${#MATHS[@]}; k++ )); do
echo type: ${TYPES[i]} batch size: ${BATCH_SIZES[j]} math: ${MATHS[k]}
DIR="LM-TFM/inference/${GPU}_${BATCH_SIZES[j]}_${MATHS_FULL[k]}_${TYPES[i]}"
mkdir -p "${DIR}"
bash run_wt103_"${MODEL}".sh eval 1 \
--work_dir "${DIR}" \
--model "${CHECKPOINT}" \
--type "${TYPES[i]}" \
--batch_size "${BATCH_SIZES[j]}" \
--log_interval 1 \
--no_env \
"${MATHS[k]}" \
--save_data \
"${@:1}"
done
done
done
|
PyTorch/Forecasting/TFT/triton/runner/maintainer | maintainer | __init__ | # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .container import Container
from .docker.maintainer import DockerMaintainer
from .maintainer import Maintainer
|
TensorFlow/Detection/SSD/models/research/object_detection/dataset_tools | dataset_tools | oid_tfrecord_creation_test | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for oid_tfrecord_creation.py."""
import pandas as pd
import tensorflow as tf
from object_detection.dataset_tools import oid_tfrecord_creation
def create_test_data():
data = {
'ImageID': ['i1', 'i1', 'i1', 'i1', 'i1', 'i2', 'i2'],
'LabelName': ['a', 'a', 'b', 'b', 'c', 'b', 'c'],
'YMin': [0.3, 0.6, 0.8, 0.1, None, 0.0, 0.0],
'XMin': [0.1, 0.3, 0.7, 0.0, None, 0.1, 0.1],
'XMax': [0.2, 0.3, 0.8, 0.5, None, 0.9, 0.9],
'YMax': [0.3, 0.6, 1, 0.8, None, 0.8, 0.8],
'IsOccluded': [0, 1, 1, 0, None, 0, 0],
'IsTruncated': [0, 0, 0, 1, None, 0, 0],
'IsGroupOf': [0, 0, 0, 0, None, 0, 1],
'IsDepiction': [1, 0, 0, 0, None, 0, 0],
'ConfidenceImageLabel': [None, None, None, None, 0, None, None],
}
df = pd.DataFrame(data=data)
label_map = {'a': 0, 'b': 1, 'c': 2}
return label_map, df
class TfExampleFromAnnotationsDataFrameTests(tf.test.TestCase):
def test_simple(self):
label_map, df = create_test_data()
tf_example = oid_tfrecord_creation.tf_example_from_annotations_data_frame(
df[df.ImageID == 'i1'], label_map, 'encoded_image_test')
self.assertProtoEquals(
"""
features {
feature {
key: "image/encoded"
value { bytes_list { value: "encoded_image_test" } } }
feature {
key: "image/filename"
value { bytes_list { value: "i1.jpg" } } }
feature {
key: "image/object/bbox/ymin"
value { float_list { value: [0.3, 0.6, 0.8, 0.1] } } }
feature {
key: "image/object/bbox/xmin"
value { float_list { value: [0.1, 0.3, 0.7, 0.0] } } }
feature {
key: "image/object/bbox/ymax"
value { float_list { value: [0.3, 0.6, 1.0, 0.8] } } }
feature {
key: "image/object/bbox/xmax"
value { float_list { value: [0.2, 0.3, 0.8, 0.5] } } }
feature {
key: "image/object/class/label"
value { int64_list { value: [0, 0, 1, 1] } } }
feature {
key: "image/object/class/text"
value { bytes_list { value: ["a", "a", "b", "b"] } } }
feature {
key: "image/source_id"
value { bytes_list { value: "i1" } } }
feature {
key: "image/object/depiction"
value { int64_list { value: [1, 0, 0, 0] } } }
feature {
key: "image/object/group_of"
value { int64_list { value: [0, 0, 0, 0] } } }
feature {
key: "image/object/occluded"
value { int64_list { value: [0, 1, 1, 0] } } }
feature {
key: "image/object/truncated"
value { int64_list { value: [0, 0, 0, 1] } } }
feature {
key: "image/class/label"
value { int64_list { value: [2] } } }
feature {
key: "image/class/text"
value { bytes_list { value: ["c"] } } } }
""", tf_example)
def test_no_attributes(self):
label_map, df = create_test_data()
del df['IsDepiction']
del df['IsGroupOf']
del df['IsOccluded']
del df['IsTruncated']
del df['ConfidenceImageLabel']
tf_example = oid_tfrecord_creation.tf_example_from_annotations_data_frame(
df[df.ImageID == 'i2'], label_map, 'encoded_image_test')
self.assertProtoEquals("""
features {
feature {
key: "image/encoded"
value { bytes_list { value: "encoded_image_test" } } }
feature {
key: "image/filename"
value { bytes_list { value: "i2.jpg" } } }
feature {
key: "image/object/bbox/ymin"
value { float_list { value: [0.0, 0.0] } } }
feature {
key: "image/object/bbox/xmin"
value { float_list { value: [0.1, 0.1] } } }
feature {
key: "image/object/bbox/ymax"
value { float_list { value: [0.8, 0.8] } } }
feature {
key: "image/object/bbox/xmax"
value { float_list { value: [0.9, 0.9] } } }
feature {
key: "image/object/class/label"
value { int64_list { value: [1, 2] } } }
feature {
key: "image/object/class/text"
value { bytes_list { value: ["b", "c"] } } }
feature {
key: "image/source_id"
value { bytes_list { value: "i2" } } } }
""", tf_example)
def test_label_filtering(self):
label_map, df = create_test_data()
label_map = {'a': 0}
tf_example = oid_tfrecord_creation.tf_example_from_annotations_data_frame(
df[df.ImageID == 'i1'], label_map, 'encoded_image_test')
self.assertProtoEquals(
"""
features {
feature {
key: "image/encoded"
value { bytes_list { value: "encoded_image_test" } } }
feature {
key: "image/filename"
value { bytes_list { value: "i1.jpg" } } }
feature {
key: "image/object/bbox/ymin"
value { float_list { value: [0.3, 0.6] } } }
feature {
key: "image/object/bbox/xmin"
value { float_list { value: [0.1, 0.3] } } }
feature {
key: "image/object/bbox/ymax"
value { float_list { value: [0.3, 0.6] } } }
feature {
key: "image/object/bbox/xmax"
value { float_list { value: [0.2, 0.3] } } }
feature {
key: "image/object/class/label"
value { int64_list { value: [0, 0] } } }
feature {
key: "image/object/class/text"
value { bytes_list { value: ["a", "a"] } } }
feature {
key: "image/source_id"
value { bytes_list { value: "i1" } } }
feature {
key: "image/object/depiction"
value { int64_list { value: [1, 0] } } }
feature {
key: "image/object/group_of"
value { int64_list { value: [0, 0] } } }
feature {
key: "image/object/occluded"
value { int64_list { value: [0, 1] } } }
feature {
key: "image/object/truncated"
value { int64_list { value: [0, 0] } } }
feature {
key: "image/class/label"
value { int64_list { } } }
feature {
key: "image/class/text"
value { bytes_list { } } } }
""", tf_example)
if __name__ == '__main__':
tf.test.main()
|
TensorFlow2/Segmentation/MaskRCNN/mrcnn_tf2/model/models/resnet50 | resnet50 | bottleneck_block | import tensorflow as tf
from mrcnn_tf2.model.models.resnet50 import Conv2DBlock
class BottleneckBlock(tf.keras.layers.Layer):
def __init__(self, filters, strides, expansion=1, shortcut='conv2d', trainable=True, *args, **kwargs):
super().__init__(trainable=trainable, *args, **kwargs)
if shortcut == 'conv2d':
self.shortcut = Conv2DBlock(
filters=filters * expansion,
kernel_size=1,
strides=strides,
use_batch_norm=True,
use_relu=False, # Applied at the end after addition with bottleneck
name='shortcut'
)
elif shortcut == 'avg_pool':
self.shortcut = tf.keras.layers.AveragePooling2D(
pool_size=1,
strides=strides,
name='shortcut'
)
else:
self.shortcut = tf.keras.layers.Layer(name='shortcut') # identity
self.conv2d_1 = Conv2DBlock(
filters=filters,
kernel_size=1,
strides=1,
use_batch_norm=True,
use_relu=True
)
self.conv2d_2 = Conv2DBlock(
filters=filters,
kernel_size=3,
strides=strides,
use_batch_norm=True,
use_relu=True
)
self.conv2d_3 = Conv2DBlock(
filters=filters * expansion,
kernel_size=1,
strides=1,
use_batch_norm=True,
use_relu=False # Applied at the end after addition with shortcut
)
self.add = tf.keras.layers.Add()
self.relu = tf.keras.layers.ReLU()
def call(self, inputs, training=None, **kwargs):
shortcut = self.shortcut(inputs)
bottleneck = self.conv2d_1(inputs, training=training)
bottleneck = self.conv2d_2(bottleneck, training=training)
bottleneck = self.conv2d_3(bottleneck, training=training)
net = self.add([bottleneck, shortcut])
net = self.relu(net)
return net
|
TensorFlow/Detection/SSD/models/research/object_detection/g3doc | g3doc | instance_segmentation | ## Run an Instance Segmentation Model
For some applications it isn't adequate enough to localize an object with a
simple bounding box. For instance, you might want to segment an object region
once it is detected. This class of problems is called **instance segmentation**.
<p align="center">
<img src="img/kites_with_segment_overlay.png" width=676 height=450>
</p>
### Materializing data for instance segmentation {#materializing-instance-seg}
Instance segmentation is an extension of object detection, where a binary mask
(i.e. object vs. background) is associated with every bounding box. This allows
for more fine-grained information about the extent of the object within the box.
To train an instance segmentation model, a groundtruth mask must be supplied for
every groundtruth bounding box. In additional to the proto fields listed in the
section titled [Using your own dataset](using_your_own_dataset.md), one must
also supply `image/object/mask`, which can either be a repeated list of
single-channel encoded PNG strings, or a single dense 3D binary tensor where
masks corresponding to each object are stacked along the first dimension. Each
is described in more detail below.
#### PNG Instance Segmentation Masks
Instance segmentation masks can be supplied as serialized PNG images.
```shell
image/object/mask = ["\x89PNG\r\n\x1A\n\x00\x00\x00\rIHDR\...", ...]
```
These masks are whole-image masks, one for each object instance. The spatial
dimensions of each mask must agree with the image. Each mask has only a single
channel, and the pixel values are either 0 (background) or 1 (object mask).
**PNG masks are the preferred parameterization since they offer considerable
space savings compared to dense numerical masks.**
#### Dense Numerical Instance Segmentation Masks
Masks can also be specified via a dense numerical tensor.
```shell
image/object/mask = [0.0, 0.0, 1.0, 1.0, 0.0, ...]
```
For an image with dimensions `H` x `W` and `num_boxes` groundtruth boxes, the
mask corresponds to a [`num_boxes`, `H`, `W`] float32 tensor, flattened into a
single vector of shape `num_boxes` * `H` * `W`. In TensorFlow, examples are read
in row-major format, so the elements are organized as:
```shell
... mask 0 row 0 ... mask 0 row 1 ... // ... mask 0 row H-1 ... mask 1 row 0 ...
```
where each row has W contiguous binary values.
To see an example tf-records with mask labels, see the examples under the
[Preparing Inputs](preparing_inputs.md) section.
### Pre-existing config files
We provide four instance segmentation config files that you can use to train
your own models:
1. <a href="https://github.com/tensorflow/models/blob/master/research/object_detection/samples/configs/mask_rcnn_inception_resnet_v2_atrous_coco.config" target=_blank>mask_rcnn_inception_resnet_v2_atrous_coco</a>
1. <a href="https://github.com/tensorflow/models/blob/master/research/object_detection/samples/configs/mask_rcnn_resnet101_atrous_coco.config" target=_blank>mask_rcnn_resnet101_atrous_coco</a>
1. <a href="https://github.com/tensorflow/models/blob/master/research/object_detection/samples/configs/mask_rcnn_resnet50_atrous_coco.config" target=_blank>mask_rcnn_resnet50_atrous_coco</a>
1. <a href="https://github.com/tensorflow/models/blob/master/research/object_detection/samples/configs/mask_rcnn_inception_v2_coco.config" target=_blank>mask_rcnn_inception_v2_coco</a>
For more details see the [detection model zoo](detection_model_zoo.md).
### Updating a Faster R-CNN config file
Currently, the only supported instance segmentation model is [Mask
R-CNN](https://arxiv.org/abs/1703.06870), which requires Faster R-CNN as the
backbone object detector.
Once you have a baseline Faster R-CNN pipeline configuration, you can make the
following modifications in order to convert it into a Mask R-CNN model.
1. Within `train_input_reader` and `eval_input_reader`, set
`load_instance_masks` to `True`. If using PNG masks, set `mask_type` to
`PNG_MASKS`, otherwise you can leave it as the default 'NUMERICAL_MASKS'.
1. Within the `faster_rcnn` config, use a `MaskRCNNBoxPredictor` as the
`second_stage_box_predictor`.
1. Within the `MaskRCNNBoxPredictor` message, set `predict_instance_masks` to
`True`. You must also define `conv_hyperparams`.
1. Within the `faster_rcnn` message, set `number_of_stages` to `3`.
1. Add instance segmentation metrics to the set of metrics:
`'coco_mask_metrics'`.
1. Update the `input_path`s to point at your data.
Please refer to the section on [Running the pets dataset](running_pets.md) for
additional details.
> Note: The mask prediction branch consists of a sequence of convolution layers.
> You can set the number of convolution layers and their depth as follows:
>
> 1. Within the `MaskRCNNBoxPredictor` message, set the
> `mask_prediction_conv_depth` to your value of interest. The default value
> is 256. If you set it to `0` (recommended), the depth is computed
> automatically based on the number of classes in the dataset.
> 1. Within the `MaskRCNNBoxPredictor` message, set the
> `mask_prediction_num_conv_layers` to your value of interest. The default
> value is 2.
|
PyTorch/Forecasting/TFT/triton/runner | runner | config_NVIDIA-DGX-1-(1x-V100-32GB) | checkpoints:
- name: electricity_bin
url: https://api.ngc.nvidia.com/v2/models/nvidia/dle/tft_base_pyt_ckpt_ds-electricity/versions/22.11.0_amp/zip
- name: traffic_bin
url: https://api.ngc.nvidia.com/v2/models/nvidia/dle/tft_base_pyt_ckpt_ds-traffic/versions/22.11.0_amp/zip
configurations:
- accelerator: none
batch_size:
- 1
- 2
- 4
- 8
- 16
- 32
- 64
- 128
- 256
- 512
- 1024
batch_sizes: 1 2 4 8 16 32 64 128 256 512 1024
capture_cuda_graph: 0
checkpoint_variant: electricity_bin
dataset: electricity_bin
device: gpu
export_format: onnx
export_precision: fp32
format: trt
max_batch_size: 1024
precision: fp16
request_count: 500
triton_gpu_engine_count: 2
triton_max_queue_delay: 1
triton_preferred_batch_sizes: 512 1024
- accelerator: none
batch_size:
- 1
- 2
- 4
- 8
- 16
- 32
- 64
- 128
- 256
- 512
- 1024
batch_sizes: 1 2 4 8 16 32 64 128 256 512 1024
capture_cuda_graph: 0
checkpoint_variant: traffic_bin
dataset: traffic_bin
device: gpu
export_format: onnx
export_precision: fp32
format: trt
max_batch_size: 1024
precision: fp16
request_count: 500
triton_gpu_engine_count: 2
triton_max_queue_delay: 1
triton_preferred_batch_sizes: 512 1024
- accelerator: none
batch_size:
- 1
- 2
- 4
- 8
- 16
- 32
- 64
- 128
- 256
- 512
- 1024
batch_sizes: 1 2 4 8 16 32 64 128 256 512 1024
capture_cuda_graph: 0
checkpoint_variant: electricity_bin
dataset: electricity_bin
device: gpu
export_format: ts-trace
export_precision: fp32
format: ts-trace
max_batch_size: 1024
precision: fp16
request_count: 500
triton_gpu_engine_count: 2
triton_max_queue_delay: 1
triton_preferred_batch_sizes: 512 1024
- accelerator: none
batch_size:
- 1
- 2
- 4
- 8
- 16
- 32
- 64
- 128
- 256
- 512
- 1024
batch_sizes: 1 2 4 8 16 32 64 128 256 512 1024
capture_cuda_graph: 0
checkpoint_variant: traffic_bin
dataset: traffic_bin
device: gpu
export_format: ts-trace
export_precision: fp32
format: ts-trace
max_batch_size: 1024
precision: fp16
request_count: 500
triton_gpu_engine_count: 2
triton_max_queue_delay: 1
triton_preferred_batch_sizes: 512 1024
container_version: '22.11'
datasets:
- name: electricity_bin
- name: traffic_bin
datasets_dir: datasets
framework: PyTorch
model_name: TFT
triton_container_image: null
triton_custom_operations: null
triton_dockerfile: null
triton_load_model_method: explicit
|
TensorFlow2/Classification/ConvNets/config/efficientnet_v2 | efficientnet_v2 | s_cfg | import tensorflow as tf
# NOTE: this confile file can further be overridden by user-defined params provided at the command line
config = dict(
path_to_impl='model.efficientnet_model_v2',
#data-related model params
num_classes=1000, # must be the same as data.num_classes
input_channels= 3,
rescale_input= 1, # binary
mean_rgb=(0.485 * 255, 0.456 * 255, 0.406 * 255), # used when rescale_input=True
std_rgb=(0.229 * 255, 0.224 * 255, 0.225 * 255), # used when rescale_input=True
dtype= tf.float32, #used for input image normalization/casting, # tf.float32, tf.bfloat16, tf.float16, tf.float32, tf.bfloat16,
# GUIDE
# width depth resolution dropout
# efficientnet_v2-s 1.0 1.0 300 0.2
width_coefficient= 1.0,
depth_coefficient= 1.0,
dropout_rate= 0.2, # used in the cls head
# image resolution must be set in tr/eval/predict configs below
drop_connect_rate= 0.2, # used in residual for stochastic depth
conv_dropout= None, # used in pre-SE, but never used
stem_base_filters= 24, # effnetv2
top_base_filters= 1280,
activation= 'swish', # same as silu
depth_divisor= 8,
min_depth=8,
# use_se= True, # No longer global: blocks may or may not have it.
batch_norm= 'syncbn',
bn_momentum= 0.99, # google uses 0.9
bn_epsilon= 1e-3,
weight_init= 'fan_out', # google uses untruncated
# NEW
# gn_groups=8, # group normalization
# local_pooling=0, # as opposed global pooling for SE
# headbias=None, # bias for cls head
blocks= (
# (input_filters, output_filters, kernel_size, num_repeat,expand_ratio, strides, se_ratio)
# pylint: disable=bad-whitespace
dict(input_filters=24, output_filters=24, kernel_size=3, num_repeat=2, expand_ratio=1, strides=(1, 1), se_ratio=None,id_skip=True,fused_conv=True,conv_type=None),
dict(input_filters=24, output_filters=48, kernel_size=3, num_repeat=4, expand_ratio=4, strides=(2, 2), se_ratio=None,id_skip=True,fused_conv=True,conv_type=None),
dict(input_filters=48, output_filters=64, kernel_size=3, num_repeat=4, expand_ratio=4, strides=(2, 2), se_ratio=None,id_skip=True,fused_conv=True,conv_type=None),
dict(input_filters=64, output_filters=128, kernel_size=3, num_repeat=6, expand_ratio=4, strides=(2, 2), se_ratio=0.25,id_skip=True,fused_conv=False,conv_type='depthwise'),
dict(input_filters=128, output_filters=160, kernel_size=3, num_repeat=9, expand_ratio=6, strides=(1, 1), se_ratio=0.25,id_skip=True,fused_conv=False,conv_type='depthwise'),
dict(input_filters=160, output_filters=256, kernel_size=3, num_repeat=15, expand_ratio=6, strides=(2, 2), se_ratio=0.25,id_skip=True,fused_conv=False,conv_type='depthwise'),
# pylint: enable=bad-whitespace
),
) |
TensorFlow2/LanguageModeling/BERT | BERT | optimization | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions and classes related to optimization (weight updates)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import tensorflow as tf
import tensorflow_addons.optimizers as tfa_optimizers
class WarmUp(tf.keras.optimizers.schedules.LearningRateSchedule):
"""Applys a warmup schedule on a given learning rate decay schedule."""
def __init__(
self,
initial_learning_rate,
decay_schedule_fn,
warmup_steps,
power=1.0,
name=None):
super(WarmUp, self).__init__()
self.initial_learning_rate = initial_learning_rate
self.warmup_steps = warmup_steps
self.power = power
self.decay_schedule_fn = decay_schedule_fn
self.name = name
def __call__(self, step):
with tf.name_scope(self.name or 'WarmUp') as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
global_step_float = tf.cast(step, tf.float32)
warmup_steps_float = tf.cast(self.warmup_steps, tf.float32)
warmup_percent_done = global_step_float / warmup_steps_float
warmup_learning_rate = (
self.initial_learning_rate *
tf.math.pow(warmup_percent_done, self.power))
return tf.cond(global_step_float < warmup_steps_float,
lambda: warmup_learning_rate,
lambda: self.decay_schedule_fn(step),
name=name)
def get_config(self):
return {
'initial_learning_rate': self.initial_learning_rate,
'decay_schedule_fn': self.decay_schedule_fn,
'warmup_steps': self.warmup_steps,
'power': self.power,
'name': self.name
}
def create_optimizer(init_lr, num_train_steps, num_warmup_steps, optimizer_type="adam"):
"""Creates an optimizer with learning rate schedule."""
# Implements linear decay of the learning rate.
if optimizer_type == "adam":
power = 1.0
decayed_learning_rate_at_crossover_point = init_lr * (
(1.0 - float(num_warmup_steps) / float(num_train_steps)) ** power)
else:
power = 0.5
decayed_learning_rate_at_crossover_point = init_lr
init_lr = init_lr * (init_lr / decayed_learning_rate_at_crossover_point)
print('decayed_learning_rate_at_crossover_point = %e, adjusted_init_lr = %e' % (decayed_learning_rate_at_crossover_point, init_lr))
learning_rate_fn = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=init_lr,
decay_steps=num_train_steps,
end_learning_rate=0.0,
power=power)
if num_warmup_steps:
learning_rate_fn = WarmUp(initial_learning_rate=init_lr,
decay_schedule_fn=learning_rate_fn,
warmup_steps=num_warmup_steps)
if optimizer_type == 'adam':
optimizer = AdamWeightDecay(
learning_rate=learning_rate_fn,
weight_decay_rate=0.01,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-6,
exclude_from_weight_decay=['LayerNorm', 'layer_norm', 'bias'])
else:
skip_list = ['None'] # to avoid exclude_from_layer_adaptation set to exclude_from_weight_decay if the arg is None
optimizer = tfa_optimizers.LAMB(
learning_rate=learning_rate_fn,
weight_decay_rate=0.01,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-6,
exclude_from_weight_decay=['LayerNorm', 'layer_norm', 'bias'],
exclude_from_layer_adaptation=skip_list)
return optimizer
class AdamWeightDecay(tf.keras.optimizers.Adam):
"""Adam enables L2 weight decay and clip_by_global_norm on gradients.
Just adding the square of the weights to the loss function is *not* the
correct way of using L2 regularization/weight decay with Adam, since that will
interact with the m and v parameters in strange ways.
Instead we want ot decay the weights in a manner that doesn't interact with
the m/v parameters. This is equivalent to adding the square of the weights to
the loss with plain (non-momentum) SGD.
"""
def __init__(self,
learning_rate=0.001,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-7,
amsgrad=False,
weight_decay_rate=0.0,
include_in_weight_decay=None,
exclude_from_weight_decay=None,
name='AdamWeightDecay',
**kwargs):
super(AdamWeightDecay, self).__init__(learning_rate, beta_1, beta_2,
epsilon, amsgrad, name, **kwargs)
self.weight_decay_rate = weight_decay_rate
self._include_in_weight_decay = include_in_weight_decay
self._exclude_from_weight_decay = exclude_from_weight_decay
@classmethod
def from_config(cls, config):
"""Creates an optimizer from its config with WarmUp custom object."""
custom_objects = {'WarmUp': WarmUp}
return super(AdamWeightDecay, cls).from_config(
config, custom_objects=custom_objects)
def _prepare_local(self, var_device, var_dtype, apply_state):
super(AdamWeightDecay, self)._prepare_local(var_device, var_dtype,
apply_state)
apply_state[(var_device, var_dtype)]['weight_decay_rate'] = tf.constant(
self.weight_decay_rate, name='adam_weight_decay_rate')
def _decay_weights_op(self, var, learning_rate, apply_state):
do_decay = self._do_use_weight_decay(var.name)
if do_decay:
return var.assign_sub(
learning_rate * var *
apply_state[(var.device, var.dtype.base_dtype)]['weight_decay_rate'],
use_locking=self._use_locking)
return tf.no_op()
def _get_lr(self, var_device, var_dtype, apply_state):
"""Retrieves the learning rate with the given state."""
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
apply_state = apply_state or {}
coefficients = apply_state.get((var_device, var_dtype))
if coefficients is None:
coefficients = self._fallback_apply_state(var_device, var_dtype)
apply_state[(var_device, var_dtype)] = coefficients
return coefficients['lr_t'], dict(apply_state=apply_state)
def _resource_apply_dense(self, grad, var, apply_state=None):
lr_t, kwargs = self._get_lr(var.device, var.dtype.base_dtype, apply_state)
decay = self._decay_weights_op(var, lr_t, apply_state)
with tf.control_dependencies([decay]):
return super(AdamWeightDecay,
self)._resource_apply_dense(grad, var, **kwargs)
def _resource_apply_sparse(self, grad, var, indices, apply_state=None):
lr_t, kwargs = self._get_lr(var.device, var.dtype.base_dtype, apply_state)
decay = self._decay_weights_op(var, lr_t, apply_state)
with tf.control_dependencies([decay]):
return super(AdamWeightDecay,
self)._resource_apply_sparse(grad, var, indices, **kwargs)
def get_config(self):
config = super(AdamWeightDecay, self).get_config()
config.update({
'weight_decay_rate': self.weight_decay_rate,
})
return config
def _do_use_weight_decay(self, param_name):
"""Whether to use L2 weight decay for `param_name`."""
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(r, param_name) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(r, param_name) is not None:
return False
return True
# Inspired from https://github.com/OpenNMT/OpenNMT-tf/blob/master/opennmt/optimizers/utils.py
class GradientAccumulator():
def __init__(self):
self._gradients = []
self._accum_steps = None
def zero(self, dtype):
return tf.Variable(
tf.constant(0, dtype=dtype),
trainable=False,
synchronization=tf.VariableSynchronization.ON_READ,
aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA)
@property
def step(self):
if self._accum_steps is None:
self._accum_steps = self.zero(tf.int64)
return self._accum_steps.value()
@property
def gradients(self):
if not self._gradients:
raise ValueError("The accumulator should be called first to initialize the gradients")
return list(gradient.value() if gradient is not None else None for gradient in self._gradients)
def reset(self):
if not self._gradients:
return
self._accum_steps.assign(0)
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros(tf.shape(gradient), dtype=gradient.dtype))
def add_gradients(self, grads):
if not self._gradients:
_ = self.step
self._gradients.extend([
tf.Variable(
tf.zeros_like(g),
trainable=False,
synchronization=tf.VariableSynchronization.ON_READ
) if g is not None else None
for g in grads
])
if len(grads) != len(self._gradients):
raise ValueError("Expected %s gradients, but got %d" % (
len(self._gradients), len(grads)))
for accum_grad, grad in zip(self._gradients, grads):
if accum_grad is not None:
accum_grad.assign_add(grad)
self._accum_steps.assign_add(1)
|
PyTorch/SpeechSynthesis/HiFiGAN/common/text | text | datestime | import re
_ampm_re = re.compile(
r'([0-9]|0[0-9]|1[0-9]|2[0-3]):?([0-5][0-9])?\s*([AaPp][Mm]\b)')
def _expand_ampm(m):
matches = list(m.groups(0))
txt = matches[0]
txt = txt if int(matches[1]) == 0 else txt + ' ' + matches[1]
if matches[2][0].lower() == 'a':
txt += ' a.m.'
elif matches[2][0].lower() == 'p':
txt += ' p.m.'
return txt
def normalize_datestime(text):
text = re.sub(_ampm_re, _expand_ampm, text)
#text = re.sub(r"([0-9]|0[0-9]|1[0-9]|2[0-3]):([0-5][0-9])?", r"\1 \2", text)
return text
|
TensorFlow2/Classification/ConvNets | ConvNets | main | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import warnings
warnings.simplefilter("ignore")
import tensorflow as tf
import horovod.tensorflow as hvd
from dllogger import StdOutBackend, JSONStreamBackend, Verbosity
import dllogger as DLLogger
from utils import hvd_utils
from copy import deepcopy
from importlib import import_module
from config.defaults import base_config
from config.defaults import Config
from utils.setup import set_flags
from runtime import Runner
from utils.cmdline_helper import parse_cmdline
def get_module_path(sys_path):
"""[summary]
converts the path to a py module to a format suitable for the import_module function.
Ex: config/model/hparams.py -> config.model.hparams
Args:
sys_path (string): module path in sys format
Returns:
string: new format
"""
no_ext = sys_path.split('.')[0]
return no_ext.replace('/','.')
if __name__== "__main__":
# get command line args
FLAGS = parse_cmdline()
config = Config(FLAGS.__dict__)
# get model hyperparameters from the user-provided model config
model_config = import_module(get_module_path(FLAGS.cfg))
model_config = Config(model_config.config)
#override model hyper parameters by those provided by the user via cmd
model_config.override(FLAGS.mparams)
config.mparams = model_config
# make sure number of classes in the model config is consistent with data loader config
config.num_classes = config.mparams.num_classes
#========== horovod initialization
hvd.init()
#========== set up env variables, tf flags, and seeds
set_flags(config)
#========== set up the loggers and log dir
backends = []
if not hvd_utils.is_using_hvd() or hvd.rank() == 0:
# Prepare Model Dir
os.makedirs(config.model_dir, exist_ok=True)
# Setup dlLogger
backends+=[
JSONStreamBackend(verbosity=Verbosity.VERBOSE, filename=config.log_filename),
StdOutBackend(verbosity=Verbosity.DEFAULT)
]
DLLogger.init(backends=backends)
DLLogger.log(data=vars(config), step='PARAMETER')
DLLogger.metadata('avg_exp_per_second_training', {'unit': 'samples/s'})
DLLogger.metadata('avg_exp_per_second_training_per_GPU', {'unit': 'samples/s'})
DLLogger.metadata('avg_exp_per_second_eval', {'unit': 'samples/s'})
DLLogger.metadata('avg_exp_per_second_eval_per_GPU', {'unit': 'samples/s'})
DLLogger.metadata('latency_pct', {'unit': 'ms'})
DLLogger.metadata('latency_90pct', {'unit': 'ms'})
DLLogger.metadata('latency_95pct', {'unit': 'ms'})
DLLogger.metadata('latency_99pct', {'unit': 'ms'})
DLLogger.metadata('eval_loss', {'unit': None})
DLLogger.metadata('eval_accuracy_top_1', {'unit': None})
DLLogger.metadata('eval_accuracy_top_5', {'unit': None})
DLLogger.metadata('training_loss', {'unit': None})
DLLogger.metadata('training_accuracy_top_1', {'unit': None})
DLLogger.metadata('training_accuracy_top_5', {'unit': None})
#========== initialize the runner
runner = Runner(config, DLLogger)
#========== determine the operation mode of the runner (tr,eval,predict)
if config.mode in ["train", "train_and_eval", "training_benchmark"]:
runner.train()
if config.mode in ['eval', 'evaluate', 'inference_benchmark']:
if config.mode == 'inference_benchmark' and hvd_utils.is_using_hvd():
raise NotImplementedError("Only single GPU inference is implemented.")
elif hvd_utils.is_using_hvd():
raise NotImplementedError("Only single GPU evaluation is implemented.")
else:
runner.evaluate()
if config.mode == 'predict':
if config.predict_img_dir is None:
raise ValueError("No data to predict on.")
if not os.path.isdir(config.predict_img_dir):
raise ValueError("Provide directory with images to infer!")
if hvd_utils.is_using_hvd():
raise NotImplementedError("Only single GPU inference is implemented.")
elif not hvd_utils.is_using_hvd() or hvd.rank() == 0:
runner.predict(config.predict_img_dir, config.predict_ckpt)
|
PyTorch/SpeechSynthesis/Tacotron2/tacotron2/text | text | numbers | """ from https://github.com/keithito/tacotron """
import inflect
import re
_inflect = inflect.engine()
_comma_number_re = re.compile(r'([0-9][0-9\,]+[0-9])')
_decimal_number_re = re.compile(r'([0-9]+\.[0-9]+)')
_pounds_re = re.compile(r'£([0-9\,]*[0-9]+)')
_dollars_re = re.compile(r'\$([0-9\.\,]*[0-9]+)')
_ordinal_re = re.compile(r'[0-9]+(st|nd|rd|th)')
_number_re = re.compile(r'[0-9]+')
def _remove_commas(m):
return m.group(1).replace(',', '')
def _expand_decimal_point(m):
return m.group(1).replace('.', ' point ')
def _expand_dollars(m):
match = m.group(1)
parts = match.split('.')
if len(parts) > 2:
return match + ' dollars' # Unexpected format
dollars = int(parts[0]) if parts[0] else 0
cents = int(parts[1]) if len(parts) > 1 and parts[1] else 0
if dollars and cents:
dollar_unit = 'dollar' if dollars == 1 else 'dollars'
cent_unit = 'cent' if cents == 1 else 'cents'
return '%s %s, %s %s' % (dollars, dollar_unit, cents, cent_unit)
elif dollars:
dollar_unit = 'dollar' if dollars == 1 else 'dollars'
return '%s %s' % (dollars, dollar_unit)
elif cents:
cent_unit = 'cent' if cents == 1 else 'cents'
return '%s %s' % (cents, cent_unit)
else:
return 'zero dollars'
def _expand_ordinal(m):
return _inflect.number_to_words(m.group(0))
def _expand_number(m):
num = int(m.group(0))
if num > 1000 and num < 3000:
if num == 2000:
return 'two thousand'
elif num > 2000 and num < 2010:
return 'two thousand ' + _inflect.number_to_words(num % 100)
elif num % 100 == 0:
return _inflect.number_to_words(num // 100) + ' hundred'
else:
return _inflect.number_to_words(num, andword='', zero='oh', group=2).replace(', ', ' ')
else:
return _inflect.number_to_words(num, andword='')
def normalize_numbers(text):
text = re.sub(_comma_number_re, _remove_commas, text)
text = re.sub(_pounds_re, r'\1 pounds', text)
text = re.sub(_dollars_re, _expand_dollars, text)
text = re.sub(_decimal_number_re, _expand_decimal_point, text)
text = re.sub(_ordinal_re, _expand_ordinal, text)
text = re.sub(_number_re, _expand_number, text)
return text
|
TensorFlow2/Detection/Efficientdet/dataset | dataset | label_map_util | # Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Label map utility functions."""
from absl import logging
from six.moves import range
def _validate_label_map(label_map):
"""Checks if a label map is valid.
Args:
label_map: StringIntLabelMap to validate.
Raises:
ValueError: if label map is invalid.
"""
for item in label_map.item:
if item.id < 0:
raise ValueError('Label map ids should be >= 0.')
if (item.id == 0 and item.name != 'background' and
item.display_name != 'background'):
raise ValueError('Label map id 0 is reserved for the background label')
def create_category_index(categories):
"""Creates dictionary of COCO compatible categories keyed by category id.
Args:
categories: a list of dicts, each of which has the following keys:
'id': (required) an integer id uniquely identifying this category.
'name': (required) string representing category name
e.g., 'cat', 'dog', 'pizza'.
Returns:
category_index: a dict containing the same entries as categories, but keyed
by the 'id' field of each category.
"""
category_index = {}
for cat in categories:
category_index[cat['id']] = cat
return category_index
def get_max_label_map_index(label_map):
"""Get maximum index in label map.
Args:
label_map: a StringIntLabelMapProto
Returns:
an integer
"""
return max([item.id for item in label_map.item])
def convert_label_map_to_categories(label_map,
max_num_classes,
use_display_name=True):
"""Given label map proto returns categories list compatible with eval.
This function converts label map proto and returns a list of dicts, each of
which has the following keys:
'id': (required) an integer id uniquely identifying this category.
'name': (required) string representing category name
e.g., 'cat', 'dog', 'pizza'.
'keypoints': (optional) a dictionary of keypoint string 'label' to integer
'id'.
We only allow class into the list if its id-label_id_offset is
between 0 (inclusive) and max_num_classes (exclusive).
If there are several items mapping to the same id in the label map,
we will only keep the first one in the categories list.
Args:
label_map: a StringIntLabelMapProto or None. If None, a default categories
list is created with max_num_classes categories.
max_num_classes: maximum number of (consecutive) label indices to include.
use_display_name: (boolean) choose whether to load 'display_name' field as
category name. If False or if the display_name field does not exist, uses
'name' field as category names instead.
Returns:
categories: a list of dictionaries representing all possible categories.
"""
categories = []
list_of_ids_already_added = []
if not label_map:
label_id_offset = 1
for class_id in range(max_num_classes):
categories.append({
'id': class_id + label_id_offset,
'name': 'category_{}'.format(class_id + label_id_offset)
})
return categories
for item in label_map.item:
if not 0 < item.id <= max_num_classes:
logging.info(
'Ignore item %d since it falls outside of requested '
'label range.', item.id)
continue
if use_display_name and item.HasField('display_name'):
name = item.display_name
else:
name = item.name
if item.id not in list_of_ids_already_added:
list_of_ids_already_added.append(item.id)
category = {'id': item.id, 'name': name}
if item.keypoints:
keypoints = {}
list_of_keypoint_ids = []
for kv in item.keypoints:
if kv.id in list_of_keypoint_ids:
raise ValueError('Duplicate keypoint ids are not allowed. '
'Found {} more than once'.format(kv.id))
keypoints[kv.label] = kv.id
list_of_keypoint_ids.append(kv.id)
category['keypoints'] = keypoints
categories.append(category)
return categories
def create_class_agnostic_category_index():
"""Creates a category index with a single `object` class."""
return {1: {'id': 1, 'name': 'object'}}
|
TensorFlow2/Recommendation/DLRM_and_DCNv2/deployment/tf | tf | triton_ensemble_wrapper | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# author: Tomasz Grel ([email protected])
import tritonclient.utils
import tritonclient.http
import numpy as np
import tensorflow as tf
import deployment.tf.constants as c
class RecsysTritonEnsemble:
def __init__(self, model_name, num_tables, verbose, categorical_sizes, fused_embedding=True):
self.model_name = model_name
self.triton_client = tritonclient.http.InferenceServerClient(url="localhost:8000", verbose=verbose)
if not self.triton_client.is_server_live():
raise ValueError('Triton server is not live!')
print('triton model repo: ', self.triton_client.get_model_repository_index())
def __call__(self, inputs, sigmoid=False, training=False):
numerical_features, cat_features = list(inputs.values())
batch_size = cat_features[0].shape[0]
cat_features = tf.concat(cat_features, axis=1).numpy().astype(np.int32)
numerical_features = numerical_features.numpy().astype(np.float32)
inputs = [
tritonclient.http.InferInput("categorical_features",
cat_features.shape,
tritonclient.utils.np_to_triton_dtype(np.int32)),
tritonclient.http.InferInput("numerical_features",
numerical_features.shape,
tritonclient.utils.np_to_triton_dtype(np.float32)),
]
inputs[0].set_data_from_numpy(cat_features)
inputs[1].set_data_from_numpy(numerical_features)
outputs = [tritonclient.http.InferRequestedOutput(c.ens_output_name)]
response = self.triton_client.infer(self.model_name, inputs, outputs=outputs)
result_np = response.as_numpy(c.ens_output_name)
result_np = result_np.reshape([batch_size])
return result_np
|
TensorFlow2/Recommendation/WideAndDeep/tests/feature_specs | feature_specs | no_multihot | channel_spec:
label:
- clicked
map: []
multihot_categorical: []
numerical:
- document_id_document_id_promo_sim_categories
- document_id_document_id_promo_sim_topics
- document_id_document_id_promo_sim_entities
- document_id_promo_ctr
- publisher_id_promo_ctr
- source_id_promo_ctr
- document_id_promo_count
- publish_time_days_since_published
- ad_id_ctr
- advertiser_id_ctr
- campaign_id_ctr
- ad_id_count
- publish_time_promo_days_since_published
onehot_categorical:
- ad_id
- document_id
- platform
- document_id_promo
- campaign_id
- advertiser_id
- source_id
- geo_location
- geo_location_country
- geo_location_state
- publisher_id
- source_id_promo
- publisher_id_promo
feature_spec:
ad_id:
cardinality: 250000
ad_id_count: {}
ad_id_ctr: {}
advertiser_id:
cardinality: 2500
advertiser_id_ctr: {}
campaign_id:
cardinality: 5000
campaign_id_ctr: {}
clicked: {}
document_id:
cardinality: 300000
document_id_document_id_promo_sim_categories: {}
document_id_document_id_promo_sim_entities: {}
document_id_document_id_promo_sim_topics: {}
document_id_promo:
cardinality: 100000
document_id_promo_count: {}
document_id_promo_ctr: {}
geo_location:
cardinality: 2500
geo_location_country:
cardinality: 300
geo_location_state:
cardinality: 2000
platform:
cardinality: 4
publish_time_days_since_published: {}
publish_time_promo_days_since_published: {}
publisher_id:
cardinality: 1000
publisher_id_promo:
cardinality: 1000
publisher_id_promo_ctr: {}
source_id:
cardinality: 4000
source_id_promo:
cardinality: 4000
source_id_promo_ctr: {}
metadata: {}
source_spec:
test:
- features:
- clicked
- ad_id
- document_id
- platform
- document_id_promo
- campaign_id
- advertiser_id
- source_id
- geo_location
- geo_location_country
- geo_location_state
- publisher_id
- source_id_promo
- publisher_id_promo
- document_id_document_id_promo_sim_categories
- document_id_document_id_promo_sim_topics
- document_id_document_id_promo_sim_entities
- document_id_promo_ctr
- publisher_id_promo_ctr
- source_id_promo_ctr
- document_id_promo_count
- publish_time_days_since_published
- ad_id_ctr
- advertiser_id_ctr
- campaign_id_ctr
- ad_id_count
- publish_time_promo_days_since_published
files:
- valid.csv
type: csv
train:
- features:
- clicked
- ad_id
- document_id
- platform
- document_id_promo
- campaign_id
- advertiser_id
- source_id
- geo_location
- geo_location_country
- geo_location_state
- publisher_id
- source_id_promo
- publisher_id_promo
- document_id_document_id_promo_sim_categories
- document_id_document_id_promo_sim_topics
- document_id_document_id_promo_sim_entities
- document_id_promo_ctr
- publisher_id_promo_ctr
- source_id_promo_ctr
- document_id_promo_count
- publish_time_days_since_published
- ad_id_ctr
- advertiser_id_ctr
- campaign_id_ctr
- ad_id_count
- publish_time_promo_days_since_published
files:
- train.csv
type: csv
|
TensorFlow2/LanguageModeling/BERT/official/nlp/modeling/networks | networks | bert_classifier_test | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for BERT trainer network."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.python.keras import keras_parameterized # pylint: disable=g-direct-tensorflow-import
from official.nlp.modeling import networks
from official.nlp.modeling.networks import bert_classifier
# This decorator runs the test in V1, V2-Eager, and V2-Functional mode. It
# guarantees forward compatibility of this code for the V2 switchover.
@keras_parameterized.run_all_keras_modes
class BertClassifierTest(keras_parameterized.TestCase):
def test_bert_trainer(self):
"""Validate that the Keras object can be created."""
# Build a transformer network to use within the BERT trainer.
vocab_size = 100
sequence_length = 512
test_network = networks.TransformerEncoder(
vocab_size=vocab_size, num_layers=2, sequence_length=sequence_length)
# Create a BERT trainer with the created network.
num_classes = 3
bert_trainer_model = bert_classifier.BertClassifier(
test_network,
num_classes=num_classes)
# Create a set of 2-dimensional inputs (the first dimension is implicit).
word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
# Invoke the trainer model on the inputs. This causes the layer to be built.
cls_outs = bert_trainer_model([word_ids, mask, type_ids])
# Validate that the outputs are of the expected shape.
expected_classification_shape = [None, num_classes]
self.assertAllEqual(expected_classification_shape, cls_outs.shape.as_list())
def test_bert_trainer_tensor_call(self):
"""Validate that the Keras object can be invoked."""
# Build a transformer network to use within the BERT trainer. (Here, we use
# a short sequence_length for convenience.)
test_network = networks.TransformerEncoder(
vocab_size=100, num_layers=2, sequence_length=2)
# Create a BERT trainer with the created network.
bert_trainer_model = bert_classifier.BertClassifier(
test_network, num_classes=2)
# Create a set of 2-dimensional data tensors to feed into the model.
word_ids = tf.constant([[1, 1], [2, 2]], dtype=tf.int32)
mask = tf.constant([[1, 1], [1, 0]], dtype=tf.int32)
type_ids = tf.constant([[1, 1], [2, 2]], dtype=tf.int32)
# Invoke the trainer model on the tensors. In Eager mode, this does the
# actual calculation. (We can't validate the outputs, since the network is
# too complex: this simply ensures we're not hitting runtime errors.)
_ = bert_trainer_model([word_ids, mask, type_ids])
def test_serialize_deserialize(self):
"""Validate that the BERT trainer can be serialized and deserialized."""
# Build a transformer network to use within the BERT trainer. (Here, we use
# a short sequence_length for convenience.)
test_network = networks.TransformerEncoder(
vocab_size=100, num_layers=2, sequence_length=5)
# Create a BERT trainer with the created network. (Note that all the args
# are different, so we can catch any serialization mismatches.)
bert_trainer_model = bert_classifier.BertClassifier(
test_network, num_classes=4, initializer='zeros', output='predictions')
# Create another BERT trainer via serialization and deserialization.
config = bert_trainer_model.get_config()
new_bert_trainer_model = bert_classifier.BertClassifier.from_config(config)
# Validate that the config can be forced to JSON.
_ = new_bert_trainer_model.to_json()
# If the serialization was successful, the new config should match the old.
self.assertAllEqual(bert_trainer_model.get_config(),
new_bert_trainer_model.get_config())
if __name__ == '__main__':
tf.test.main()
|
PyTorch/LanguageModeling/BERT/distillation | distillation | distillation_config_heads | {"distillation": true,
"distillation_config": {"use_attention_scores": false,
"use_hidden_states": false,
"use_value_states": false,
"use_embedding_states": false,
"use_pred_states": true,
"attention_loss": "kld",
"hidden_state_loss": "cosine",
"embedding_state_loss": "cosine",
"value_state_loss": "kld",
"student_teacher_layer_mapping": "last_layer"}
}
|
PyTorch/LanguageModeling/BART/utils | utils | gpu_affinity | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import collections
import math
import os
import pathlib
import re
import pynvml
pynvml.nvmlInit()
def systemGetDriverVersion():
return pynvml.nvmlSystemGetDriverVersion()
def deviceGetCount():
return pynvml.nvmlDeviceGetCount()
class device:
# assume nvml returns list of 64 bit ints
_nvml_affinity_elements = math.ceil(os.cpu_count() / 64)
def __init__(self, device_idx):
super().__init__()
self.handle = pynvml.nvmlDeviceGetHandleByIndex(device_idx)
def getName(self):
return pynvml.nvmlDeviceGetName(self.handle)
def getCpuAffinity(self):
affinity_string = ''
for j in pynvml.nvmlDeviceGetCpuAffinity(
self.handle, device._nvml_affinity_elements
):
# assume nvml returns list of 64 bit ints
affinity_string = '{:064b}'.format(j) + affinity_string
affinity_list = [int(x) for x in affinity_string]
affinity_list.reverse() # so core 0 is in 0th element of list
ret = [i for i, e in enumerate(affinity_list) if e != 0]
return ret
def set_socket_affinity(gpu_id):
dev = device(gpu_id)
affinity = dev.getCpuAffinity()
os.sched_setaffinity(0, affinity)
def set_single_affinity(gpu_id):
dev = device(gpu_id)
affinity = dev.getCpuAffinity()
os.sched_setaffinity(0, affinity[:1])
def set_single_unique_affinity(gpu_id, nproc_per_node):
devices = [device(i) for i in range(nproc_per_node)]
socket_affinities = [dev.getCpuAffinity() for dev in devices]
siblings_list = get_thread_siblings_list()
siblings_dict = dict(siblings_list)
# remove siblings
for idx, socket_affinity in enumerate(socket_affinities):
socket_affinities[idx] = list(set(socket_affinity) - set(siblings_dict.values()))
affinities = []
assigned = []
for socket_affinity in socket_affinities:
for core in socket_affinity:
if core not in assigned:
affinities.append([core])
assigned.append(core)
break
os.sched_setaffinity(0, affinities[gpu_id])
def set_socket_unique_affinity(gpu_id, nproc_per_node, mode):
device_ids = [device(i) for i in range(nproc_per_node)]
socket_affinities = [dev.getCpuAffinity() for dev in device_ids]
siblings_list = get_thread_siblings_list()
siblings_dict = dict(siblings_list)
# remove siblings
for idx, socket_affinity in enumerate(socket_affinities):
socket_affinities[idx] = list(set(socket_affinity) - set(siblings_dict.values()))
socket_affinities_to_device_ids = collections.defaultdict(list)
for idx, socket_affinity in enumerate(socket_affinities):
socket_affinities_to_device_ids[tuple(socket_affinity)].append(idx)
for socket_affinity, device_ids in socket_affinities_to_device_ids.items():
devices_per_group = len(device_ids)
cores_per_device = len(socket_affinity) // devices_per_group
for group_id, device_id in enumerate(device_ids):
if device_id == gpu_id:
if mode == 'interleaved':
affinity = list(socket_affinity[group_id::devices_per_group])
elif mode == 'continuous':
affinity = list(socket_affinity[group_id*cores_per_device:(group_id+1)*cores_per_device])
else:
raise RuntimeError('Unknown set_socket_unique_affinity mode')
# reintroduce siblings
affinity += [siblings_dict[aff] for aff in affinity if aff in siblings_dict]
os.sched_setaffinity(0, affinity)
def get_thread_siblings_list():
path = '/sys/devices/system/cpu/cpu*/topology/thread_siblings_list'
thread_siblings_list = []
pattern = re.compile(r'(\d+)\D(\d+)')
for fname in pathlib.Path(path[0]).glob(path[1:]):
with open(fname) as f:
content = f.read().strip()
res = pattern.findall(content)
if res:
pair = tuple(map(int, res[0]))
thread_siblings_list.append(pair)
return thread_siblings_list
def set_affinity(gpu_id, nproc_per_node, mode='socket'):
if mode == 'socket':
set_socket_affinity(gpu_id)
elif mode == 'single':
set_single_affinity(gpu_id)
elif mode == 'single_unique':
set_single_unique_affinity(gpu_id, nproc_per_node)
elif mode == 'socket_unique_interleaved':
set_socket_unique_affinity(gpu_id, nproc_per_node, 'interleaved')
elif mode == 'socket_unique_continuous':
set_socket_unique_affinity(gpu_id, nproc_per_node, 'continuous')
else:
raise RuntimeError('Unknown affinity mode')
affinity = os.sched_getaffinity(0)
return affinity
|
TensorFlow/Detection/SSD/models/research/object_detection/models | models | faster_rcnn_inception_v2_feature_extractor | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Inception V2 Faster R-CNN implementation.
See "Rethinking the Inception Architecture for Computer Vision"
https://arxiv.org/abs/1512.00567
"""
import tensorflow as tf
from object_detection.meta_architectures import faster_rcnn_meta_arch
from nets import inception_v2
slim = tf.contrib.slim
def _batch_norm_arg_scope(list_ops,
use_batch_norm=True,
batch_norm_decay=0.9997,
batch_norm_epsilon=0.001,
batch_norm_scale=False,
train_batch_norm=False):
"""Slim arg scope for InceptionV2 batch norm."""
if use_batch_norm:
batch_norm_params = {
'is_training': train_batch_norm,
'scale': batch_norm_scale,
'decay': batch_norm_decay,
'epsilon': batch_norm_epsilon
}
normalizer_fn = slim.batch_norm
else:
normalizer_fn = None
batch_norm_params = None
return slim.arg_scope(list_ops,
normalizer_fn=normalizer_fn,
normalizer_params=batch_norm_params)
class FasterRCNNInceptionV2FeatureExtractor(
faster_rcnn_meta_arch.FasterRCNNFeatureExtractor):
"""Faster R-CNN Inception V2 feature extractor implementation."""
def __init__(self,
is_training,
first_stage_features_stride,
batch_norm_trainable=False,
reuse_weights=None,
weight_decay=0.0,
depth_multiplier=1.0,
min_depth=16):
"""Constructor.
Args:
is_training: See base class.
first_stage_features_stride: See base class.
batch_norm_trainable: See base class.
reuse_weights: See base class.
weight_decay: See base class.
depth_multiplier: float depth multiplier for feature extractor.
min_depth: minimum feature extractor depth.
Raises:
ValueError: If `first_stage_features_stride` is not 8 or 16.
"""
if first_stage_features_stride != 8 and first_stage_features_stride != 16:
raise ValueError('`first_stage_features_stride` must be 8 or 16.')
self._depth_multiplier = depth_multiplier
self._min_depth = min_depth
super(FasterRCNNInceptionV2FeatureExtractor, self).__init__(
is_training, first_stage_features_stride, batch_norm_trainable,
reuse_weights, weight_decay)
def preprocess(self, resized_inputs):
"""Faster R-CNN Inception V2 preprocessing.
Maps pixel values to the range [-1, 1].
Args:
resized_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
Returns:
preprocessed_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
"""
return (2.0 / 255.0) * resized_inputs - 1.0
def _extract_proposal_features(self, preprocessed_inputs, scope):
"""Extracts first stage RPN features.
Args:
preprocessed_inputs: A [batch, height, width, channels] float32 tensor
representing a batch of images.
scope: A scope name.
Returns:
rpn_feature_map: A tensor with shape [batch, height, width, depth]
activations: A dictionary mapping feature extractor tensor names to
tensors
Raises:
InvalidArgumentError: If the spatial size of `preprocessed_inputs`
(height or width) is less than 33.
ValueError: If the created network is missing the required activation.
"""
preprocessed_inputs.get_shape().assert_has_rank(4)
shape_assert = tf.Assert(
tf.logical_and(tf.greater_equal(tf.shape(preprocessed_inputs)[1], 33),
tf.greater_equal(tf.shape(preprocessed_inputs)[2], 33)),
['image size must at least be 33 in both height and width.'])
with tf.control_dependencies([shape_assert]):
with tf.variable_scope('InceptionV2',
reuse=self._reuse_weights) as scope:
with _batch_norm_arg_scope([slim.conv2d, slim.separable_conv2d],
batch_norm_scale=True,
train_batch_norm=self._train_batch_norm):
_, activations = inception_v2.inception_v2_base(
preprocessed_inputs,
final_endpoint='Mixed_4e',
min_depth=self._min_depth,
depth_multiplier=self._depth_multiplier,
scope=scope)
return activations['Mixed_4e'], activations
def _extract_box_classifier_features(self, proposal_feature_maps, scope):
"""Extracts second stage box classifier features.
Args:
proposal_feature_maps: A 4-D float tensor with shape
[batch_size * self.max_num_proposals, crop_height, crop_width, depth]
representing the feature map cropped to each proposal.
scope: A scope name (unused).
Returns:
proposal_classifier_features: A 4-D float tensor with shape
[batch_size * self.max_num_proposals, height, width, depth]
representing box classifier features for each proposal.
"""
net = proposal_feature_maps
depth = lambda d: max(int(d * self._depth_multiplier), self._min_depth)
trunc_normal = lambda stddev: tf.truncated_normal_initializer(0.0, stddev)
data_format = 'NHWC'
concat_dim = 3 if data_format == 'NHWC' else 1
with tf.variable_scope('InceptionV2', reuse=self._reuse_weights):
with slim.arg_scope(
[slim.conv2d, slim.max_pool2d, slim.avg_pool2d],
stride=1,
padding='SAME',
data_format=data_format):
with _batch_norm_arg_scope([slim.conv2d, slim.separable_conv2d],
batch_norm_scale=True,
train_batch_norm=self._train_batch_norm):
with tf.variable_scope('Mixed_5a'):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(
net, depth(128), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_0 = slim.conv2d(branch_0, depth(192), [3, 3], stride=2,
scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(
net, depth(192), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(256), [3, 3],
scope='Conv2d_0b_3x3')
branch_1 = slim.conv2d(branch_1, depth(256), [3, 3], stride=2,
scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.max_pool2d(net, [3, 3], stride=2,
scope='MaxPool_1a_3x3')
net = tf.concat([branch_0, branch_1, branch_2], concat_dim)
with tf.variable_scope('Mixed_5b'):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(352), [1, 1],
scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(
net, depth(192), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(320), [3, 3],
scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(
net, depth(160), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, depth(224), [3, 3],
scope='Conv2d_0b_3x3')
branch_2 = slim.conv2d(branch_2, depth(224), [3, 3],
scope='Conv2d_0c_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(
branch_3, depth(128), [1, 1],
weights_initializer=trunc_normal(0.1),
scope='Conv2d_0b_1x1')
net = tf.concat([branch_0, branch_1, branch_2, branch_3],
concat_dim)
with tf.variable_scope('Mixed_5c'):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(352), [1, 1],
scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(
net, depth(192), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(320), [3, 3],
scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(
net, depth(192), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, depth(224), [3, 3],
scope='Conv2d_0b_3x3')
branch_2 = slim.conv2d(branch_2, depth(224), [3, 3],
scope='Conv2d_0c_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = slim.conv2d(
branch_3, depth(128), [1, 1],
weights_initializer=trunc_normal(0.1),
scope='Conv2d_0b_1x1')
proposal_classifier_features = tf.concat(
[branch_0, branch_1, branch_2, branch_3], concat_dim)
return proposal_classifier_features
|
TensorFlow2/Segmentation/nnUNet/data_preprocessing | data_preprocessing | configs | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
task = {
"01": "Task01_BrainTumour",
"02": "Task02_Heart",
"03": "Task03_Liver",
"04": "Task04_Hippocampus",
"05": "Task05_Prostate",
"06": "Task06_Lung",
"07": "Task07_Pancreas",
"08": "Task08_HepaticVessel",
"09": "Task09_Spleen",
"10": "Task10_Colon",
}
patch_size = {
"01_3d_tf2": [128, 128, 128],
"02_3d_tf2": [80, 192, 160],
"03_3d_tf2": [128, 128, 128],
"04_3d_tf2": [40, 56, 40],
"05_3d_tf2": [20, 320, 256],
"06_3d_tf2": [80, 192, 160],
"07_3d_tf2": [40, 224, 224],
"08_3d_tf2": [64, 192, 192],
"09_3d_tf2": [64, 192, 160],
"10_3d_tf2": [56, 192, 160],
"01_2d_tf2": [192, 160],
"02_2d_tf2": [320, 256],
"03_2d_tf2": [512, 512],
"04_2d_tf2": [56, 40],
"05_2d_tf2": [320, 320],
"06_2d_tf2": [512, 512],
"07_2d_tf2": [512, 512],
"08_2d_tf2": [512, 512],
"09_2d_tf2": [512, 512],
"10_2d_tf2": [512, 512],
}
spacings = {
"01_3d_tf2": [1.0, 1.0, 1.0],
"02_3d_tf2": [1.37, 1.25, 1.25],
"03_3d_tf2": [1, 0.7676, 0.7676],
"04_3d_tf2": [1.0, 1.0, 1.0],
"05_3d_tf2": [3.6, 0.62, 0.62],
"06_3d_tf2": [1.24, 0.79, 0.79],
"07_3d_tf2": [2.5, 0.8, 0.8],
"08_3d_tf2": [1.5, 0.8, 0.8],
"09_3d_tf2": [1.6, 0.79, 0.79],
"10_3d_tf2": [3, 0.78, 0.78],
"11_3d_tf2": [5, 0.741, 0.741],
"01_2d_tf2": [1.0, 1.0],
"02_2d_tf2": [1.25, 1.25],
"03_2d_tf2": [0.7676, 0.7676],
"04_2d_tf2": [1.0, 1.0],
"05_2d_tf2": [0.62, 0.62],
"06_2d_tf2": [0.79, 0.79],
"07_2d_tf2": [0.8, 0.8],
"08_2d_tf2": [0.8, 0.8],
"09_2d_tf2": [0.79, 0.79],
"10_2d_tf2": [0.78, 0.78],
}
ct_min = {
"03": -17,
"06": -1024,
"07": -96,
"08": -3,
"09": -41,
"10": -30,
"11": -958,
}
ct_max = {
"03": 201,
"06": 325,
"07": 215,
"08": 243,
"09": 176,
"10": 165.82,
"11": 93,
}
ct_mean = {"03": 99.4, "06": -158.58, "07": 77.9, "08": 104.37, "09": 99.29, "10": 62.18, "11": -547.7}
ct_std = {"03": 39.36, "06": 324.7, "07": 75.4, "08": 52.62, "09": 39.47, "10": 32.65, "11": 281.08}
|
PyTorch/Translation/GNMT/scripts/tests | tests | train_bench | #!/bin/bash
# Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
set -e
DATASET_DIR='data/wmt16_de_en'
REPO_DIR='/workspace/gnmt'
REFERENCE_FILE=$REPO_DIR/scripts/tests/reference_training_performance
MATH=$1
if [[ ${MATH} != "fp16" && ${MATH} != "fp32" && ${MATH} != "tf32" ]]; then
echo "Unsupported option for MATH, use either 'fp16' or 'fp32' or 'tf32'"
exit 1
fi
PERF_TOLERANCE=0.9
GPU_NAME=`nvidia-smi --query-gpu=gpu_name --format=csv,noheader |uniq`
echo 'GPU_NAME:' ${GPU_NAME}
GPU_COUNT=`nvidia-smi --query-gpu=gpu_name --format=csv,noheader |wc -l`
echo 'GPU_COUNT:' ${GPU_COUNT}
if [[ ${GPU_COUNT} -eq 1 || ${GPU_COUNT} -eq 2 || ${GPU_COUNT} -eq 4 || ${GPU_COUNT} -eq 8 ]]; then
GLOBAL_BATCH_SIZE=1024
elif [ ${GPU_COUNT} -eq 16 ]; then
GLOBAL_BATCH_SIZE=2048
else
echo "Unsupported number of GPUs"
exit 1
fi
REFERENCE_PERF=`grep "${MATH},${GPU_COUNT},${GPU_NAME}" \
${REFERENCE_FILE} | \cut -f 4 -d ','`
if [ -z "${REFERENCE_PERF}" ]; then
echo "WARNING: COULD NOT FIND REFERENCE PERFORMANCE FOR EXECUTED CONFIG"
TARGET_PERF=''
else
PERF_THRESHOLD=$(awk 'BEGIN {print ('${REFERENCE_PERF}' * '${PERF_TOLERANCE}')}')
TARGET_PERF='--target-perf '${PERF_THRESHOLD}
fi
cd $REPO_DIR
python3 -m torch.distributed.launch --nproc_per_node=${GPU_COUNT} train.py \
--dataset-dir $DATASET_DIR \
--seed 2 \
--epochs 1 \
--remain-steps 1.0 \
--no-eval \
--train-max-size $((128 * ${GPU_COUNT} * 300)) \
--math ${MATH} \
--train-global-batch-size ${GLOBAL_BATCH_SIZE} \
${TARGET_PERF}
|
TensorFlow/Recommendation/NCF | NCF | README | # Neural Collaborative Filtering (NCF) for TensorFlow
This repository provides a script and recipe to train Neural Collaborative Filtering to achieve state of the art
accuracy, and is tested and maintained by NVIDIA.
NCF model for TensorFlow1 is no longer maintained and will soon become unavailable, please consider DLRM and Wide & Deep models in TensorFlow2 as a substitute for your requirements.
## Table of Contents
* [Model overview](#model-overview)
* [Model architecture](#model-architecture)
* [Default Configuration](#default-configuration)
* [Feature support matrix](#feature-support-matrix)
* [Features](#features)
* [Mixed precision training](#mixed-precision-training)
* [Enabling mixed precision](#enabling-mixed-precision)
* [Enabling TF32](#enabling-tf32)
* [Setup](#setup)
* [Requirements](#requirements)
* [Quick Start Guide](#quick-start-guide)
* [Advanced](#advanced)
* [Command Line Arguments](#command-line-arguments)
* [Getting the Data](#getting-the-data)
* [Multi-dataset](#multi-dataset)
* [Training Process](#training-process)
* [Evaluation Process](#evaluation-process)
* [Performance](#performance)
* [Benchmarking](#benchmarking)
* [Performance Benchmark](#performance-benchmark)
* [Results](#results)
* [Training accuracy results](#training-accuracy-results)
* [Training accuracy: NVIDIA DGX A100 (8x A100 40GB)](#training-accuracy-nvidia-dgx-a100-8x-a100-40gb)
* [Training accuracy: NVIDIA DGX-1 (8x V100 32GB)](#training-accuracy-nvidia-dgx-1-8x-v100-32gb)
* [Training Performance Results](#training-performance-results)
* [Training performance: NVIDIA DGX A100 (8x A100 40GB)](#training-performance-nvidia-dgx-a100-8x-a100-40gb)
* [Training performance: NVIDIA DGX-1 (8x V100 32GB)](#training-performance-nvidia-dgx-1-8x-v100-32gb)
* [Inference Performance Results](#inference-performance-results)
* [Inference performance: NVIDIA DGX A100 (1x A100 40GB)](#inference-performance-nvidia-dgx-a100-1x-a100-40gb)
* [Inference performance: NVIDIA DGX-1 (8x V100 32GB)](#inference-performance-nvidia-dgx-1-8x-v100-32gb)
* [Release Notes](#release-notes)
* [Changelog](#changelog)
* [Known Issues](#known-issues)
* [Multi-GPU Scaling Efficiency](#multi-gpu-scaling-efficiency)
* [Scaling beyond 8 GPUs](#scaling-beyond-8-gpus)
* [Preprocessing Out-of-Memory with 16GB GPUs](#preprocessing-out-of-memory-with-16gb-gpus)
## Model overview
The Neural Collaborative Filtering (NCF) model is a neural network that provides collaborative filtering based on
implicit feedback. Specifically, it provides product recommendations based on user and item interactions. The training
data for this model should contain a sequence of (user ID, item ID) pairs indicating that the specified user has
interacted with an item, for example, by giving a rating or clicking. NCF was first described by
Xiangnan He, Lizi Liao, Hanwang Zhang, Liqiang Nie, Xia Hu and Tat-Seng Chua in the [Neural Collaborative Filtering
paper](https://arxiv.org/abs/1708.05031).
The implementation in this repository focuses on the NeuMF instantiation of the NCF architecture. We modified it to use
Dropout in the fully connected layers. This reduces overfitting and increases the final accuracy. Training the other two
instantiations of NCF (GMF and MLP) is not supported.
The original paper evaluates the model on the ml-1m dataset.
Conversely, we evaluate on the ml-20m dataset, which provides a more practical production scenario.
However, using the ml-1m dataset is also supported.
This model takes advantage of the mixed precision Tensor Cores found on Volta, Turing, and the NVIDIA Ampere GPU architectures
demonstrating the reduction in
training time possible by leveraging Tensor Cores. In the single GPU configuration, training times can be improved close
to 1.6x through the usage of Tensor Cores.
This model is tested against each NGC monthly container release to ensure consistent accuracy and performance over time.
### Model architecture
NCF-TF takes in a sequence of (user ID, item ID) pairs as inputs, then feeds them separately into a matrix
factorization step (where the embeddings are multiplied) and into a multilayer perceptron (MLP) network.
The outputs of the matrix factorization and the MLP network are then combined and fed into a single dense layer which
predicts whether the input user is likely to interact with the input item. The architecture of the MLP network is shown
below.
<p align="center">
<img width="70%" src="./img/ncf_diagram.png" />
<br>
Figure 1. The architecture of a Neural Collaborative Filtering model. Taken from the <a href="https://arxiv.org/abs/1708.05031">Neural Collaborative Filtering paper</a>.
</p>
### Default Configuration
This implementation has the following features:
- model-parallel multi-gpu training with Horovod
- mixed precision training with TF-AMP (TensorFlow-Automatic Mixed Precision), which enables mixed precision training
without any changes to the code-base by performing automatic graph rewrites and loss scaling controlled by an
environmental variable
- fast negative sample generation and data preprocessing with CuPy
- Before each training epoch, the training data is augmented with randomly generated negatives samples. A “shortcut” is
enabled by default where the script does not verify that the randomly generated samples are actually negative samples.
We have found that this shortcut has a low impact on model accuracy while considerably improving the speed and memory
footprint of the data augmentation stage of training.
- Note: The negative samples generated for the test set are always verified regardless of the shortcut being enabled or
not.
### Feature support matrix
| Feature | NCF-TF |
|-----------------------|--------------------------
|Horovod | Yes |
|Automatic mixed precision (AMP) | Yes |
#### Features
*Horovod*
Horovod is a distributed training framework for TensorFlow, Keras, PyTorch and MXNet. The goal of Horovod is to make distributed deep learning fast and easy to use. For more information about how to get started with Horovod, see the [Horovod: Official repository](https://github.com/horovod/horovod).
*Multi-GPU training with Horovod*
Our model uses Horovod to implement efficient multi-GPU training with NCCL. For details, see example sources in this repository or see the [TensorFlow tutorial](https://github.com/horovod/horovod/#usage).
*Automatic Mixed Precision (AMP)*
Computation graphs can be modified by TensorFlow on runtime to support mixed precision training. Detailed explanation of mixed precision can be found in the next section.
### Mixed precision training
Mixed precision is the combined use of different numerical precisions in a computational method. [Mixed precision](https://arxiv.org/abs/1710.03740) training offers significant computational speedup by performing operations in half-precision format while storing minimal information in single-precision to retain as much information as possible in critical parts of the network. Since the introduction of [Tensor Cores](https://developer.nvidia.com/tensor-cores) in Volta, and following with both the Turing and Ampere architectures, significant training speedups are experienced by switching to mixed precision -- up to 3x overall speedup on the most arithmetically intense model architectures. Using [mixed precision training](https://docs.nvidia.com/deeplearning/performance/mixed-precision-training/index.html) previously required two steps:
1. Porting the model to use the FP16 data type where appropriate.
2. Adding loss scaling to preserve small gradient values.
This can now be achieved using Automatic Mixed Precision (AMP) for TensorFlow to enable the full [mixed precision methodology](https://docs.nvidia.com/deeplearning/sdk/mixed-precision-training/index.html#tensorflow) in your existing TensorFlow model code. AMP enables mixed precision training on Volta, Turing, and NVIDIA Ampere GPU architectures automatically. The TensorFlow framework code makes all necessary model changes internally.
In TF-AMP, the computational graph is optimized to use as few casts as necessary and maximize the use of FP16, and the loss scaling is automatically applied inside of supported optimizers. AMP can be configured to work with the existing tf.contrib loss scaling manager by disabling the AMP scaling with a single environment variable to perform only the automatic mixed-precision optimization. It accomplishes this by automatically rewriting all computation graphs with the necessary operations to enable mixed precision training and automatic loss scaling.
For information about:
- How to train using mixed precision, see the [Mixed Precision Training](https://arxiv.org/abs/1710.03740) paper and [Training With Mixed Precision](https://docs.nvidia.com/deeplearning/performance/mixed-precision-training/index.html) documentation.
- Techniques used for mixed precision training, see the [Mixed-Precision Training of Deep Neural Networks](https://devblogs.nvidia.com/mixed-precision-training-deep-neural-networks/) blog.
- How to access and enable AMP for TensorFlow, see [Using TF-AMP](https://docs.nvidia.com/deeplearning/dgx/tensorflow-user-guide/index.html#tfamp) from the TensorFlow User Guide.
#### Enabling mixed precision
Mixed precision is enabled in TensorFlow by using the Automatic Mixed Precision (TF-AMP) extension which casts variables to half-precision upon retrieval, while storing variables in single-precision format. Furthermore, to preserve small gradient magnitudes in backpropagation, a [loss scaling](https://docs.nvidia.com/deeplearning/sdk/mixed-precision-training/index.html#lossscaling) step must be included when applying gradients. In TensorFlow, loss scaling can be applied statically by using simple multiplication of loss by a constant value or automatically, by TF-AMP. Automatic mixed precision makes all the adjustments internally in TensorFlow, providing two benefits over manual operations. First, programmers need not modify network model code, reducing development and maintenance effort. Second, using AMP maintains forward and backward compatibility with all the APIs for defining and running TensorFlow models.
To enable mixed precision, you can simply add the values to the environmental variables inside your training script:
- Enable TF-AMP graph rewrite:
```
os.environ["TF_ENABLE_AUTO_MIXED_PRECISION_GRAPH_REWRITE"] = "1"
```
- Enable Automated Mixed Precision:
```
os.environ['TF_ENABLE_AUTO_MIXED_PRECISION'] = '1'
```
#### Enabling TF32
TensorFloat-32 (TF32) is the new math mode in [NVIDIA A100](https://www.nvidia.com/en-us/data-center/a100/) GPUs for handling the matrix math also called tensor operations. TF32 running on Tensor Cores in A100 GPUs can provide up to 10x speedups compared to single-precision floating-point math (FP32) on Volta GPUs.
TF32 Tensor Cores can speed up networks using FP32, typically with no loss of accuracy. It is more robust than FP16 for models which require high dynamic range for weights or activations.
For more information, refer to the [TensorFloat-32 in the A100 GPU Accelerates AI Training, HPC up to 20x](https://blogs.nvidia.com/blog/2020/05/14/tensorfloat-32-precision-format/) blog post.
TF32 is supported in the NVIDIA Ampere GPU architecture and is enabled by default.
## Setup
The following section lists the requirements that you need to meet in order to start training NCF-TF.
### Requirements
This repository contains Dockerfile which extends the TensorFlow NGC container and encapsulates some dependencies. Aside from these dependencies, ensure you have the following components:
- [NVIDIA Docker](https://github.com/NVIDIA/nvidia-docker)
- TensorFlow 20.07-py3+ NGC container
- Supported GPUs:
- [NVIDIA Volta architecture](https://www.nvidia.com/en-us/data-center/volta-gpu-architecture/)
- [NVIDIA Turing architecture](https://www.nvidia.com/en-us/geforce/turing/)
- [NVIDIA Ampere architecture](https://www.nvidia.com/en-us/data-center/nvidia-ampere-gpu-architecture/)
For more information about how to get started with NGC containers, see the following sections from the NVIDIA GPU Cloud Documentation and the Deep Learning Documentation:
- [Getting Started Using NVIDIA GPU Cloud](https://docs.nvidia.com/ngc/ngc-getting-started-guide/index.html)
- [Accessing And Pulling From The NGC Container Registry](https://docs.nvidia.com/deeplearning/frameworks/user-guide/index.html#accessing_registry)
For those unable to use the [framework name] NGC container, to set up the required environment or create your own container, see the versioned [NVIDIA Container Support Matrix](https://docs.nvidia.com/deeplearning/frameworks/support-matrix/index.html).
## Quick Start Guide
To train your model using mixed or TF32 precision with Tensor Cores or using FP32, perform the following steps using the default
parameters of the NCF model on the ml-20m dataset.
1. Clone the repository.
```bash
git clone https://github.com/NVIDIA/DeepLearningExamples
cd DeepLearningExamples/TensorFlow/Recommendation/NCF
```
2. Build the NCF TensorFlow NGC container.
After Docker is correctly set up, you can build the NCF image with:
```bash
docker build . -t nvidia_ncf
```
3. Launch the NCF TensorFlow Docker container.
```bash
mkdir data
docker run --runtime=nvidia -it --rm --ipc=host -v ${PWD}/data:/data nvidia_ncf bash
```
This will launch the container and mount the `./data` directory as a volume to the `/data` directory inside the container.
Any datasets and experiment results (logs, checkpoints etc.) saved to `/data` will be accessible in the `./data` directory
on the host.
4. Download and preprocess the dataset.
**ml-20m**
Preprocessing consists of downloading the data, filtering out users that have less than 20 ratings (by default), sorting
the data and dropping the duplicates. No data augmentation techniques are used in the preprocessing stage.
To download and preprocess the ml-20m dataset, run:
```bash
./prepare_dataset.sh
```
**ml-1m**
To download and preprocess the ml-1m dataset, run:
```bash
./prepare_dataset.sh ml-1m
```
This will store the preprocessed training and evaluation data in the `/data` directory, so that it can be later used to
train the model (by passing the appropriate `--data` argument to the `ncf.py` script).
5. Start the training.
After the Docker container is launched, the training with the default hyper-parameters can be started with:
```bash
mpirun -np 4 --allow-run-as-root python ncf.py --amp --data /data/cache/ml-20m --checkpoint-dir /data/checkpoints/
```
After the training is complete, the model parameters that provide the best evaluation accuracy are saved to the
directory passed to the `--checkpoint-dir` argument. By default, this will be in the `/data/checkpoints/` directory.
6. Perform a validation/evaluation.
To run evaluation on a specific checkpoint, simply run the following command:
```bash
python ncf.py --data /data/cache/ml-20m --mode test --load-checkpoint-path /data/checkpoints/model.ckpt
```
Note: TensorFlow checkpoints consist of three files each with a `*.ckpt` prefix.
## Advanced
The following sections provide greater details of the dataset, running training and inference, and the training results.
### Command Line Arguments
To see the full list of available options and their descriptions, use the `-h` or `--help` command line option, for
example:
```bash
python ncf.py --help
```
Aside from options to set hyperparameters, the relevant options to control the behavior of the script are:
```
--data DATA path to test and training data files
-e EPOCHS, --epochs EPOCHS
number of epochs to train for
-b BATCH_SIZE, --batch-size BATCH_SIZE
number of examples for each iteration
--valid-users-per-batch VALID_USERS_PER_BATCH
Number of users tested in each evaluation batch
-n NEGATIVE_SAMPLES, --negative-samples NEGATIVE_SAMPLES
number of negative examples per interaction
-k TOPK, --topk TOPK rank for test examples to be considered a hit
--amp enable half-precision computations using automatic
mixed precision (only available in supported
containers)
--xla enable TensorFlow XLA (Accelerated Linear Algebra)
--valid-negative VALID_NEGATIVE
Number of negative samples for each positive test
example
--loss-scale LOSS_SCALE
Loss scale value to use when manually enabling mixed precision training
--checkpoint-dir CHECKPOINT_DIR
Path to store the result checkpoint file for training, or to read from for evaluation
--mode {train,test} Passing "test" will only run a single evaluation,
otherwise full training will be performed
--no-neg-trick do not use negative sample generation shortcut to
speed up preprocessing (will increase GPU memory
consumption)
--eval-after EVAL_AFTER
Perform evaluations only after this many epochs
--verbose Log the performance and accuracy after every epoch
```
### Getting the Data
For each user, the test dataset is generated by removing one movie the user has
interacted with. For each removed movie, the data is augmented with a large
number of movies (corresponding to the `--valid-negative option`) that the user
has not interacted with.
The repository contains the `prepare_dataset.sh` that will preprocess the training and test datasets.
By default, the data will be downloaded to the `/data` directory.
#### Multi-dataset
This implementation is tuned for the ml-20m and ml-1m datasets. Using other
datasets might require tuning some hyperparameters (for example, learning rate,
beta1, beta2).
If you'd like to use your custom dataset, you can do so by adding support for
it in the `prepare_dataset.sh` and `download_dataset.sh` scripts. The required
format of the data is a CSV file which should follow the pattern outlined
below:
```
userId, movieId
1,2
1,10
...
```
The CSV file may contain additional columns with extra features such as ratings
and timestamps, but only the `userId` and `movieId` columns are required.
The performance of the model depends on the dataset size. Generally, the model
should scale better for datasets containing more data points. For a smaller
dataset, you might experience slower performance as fixed cost operations that
do not scale with input size will have a larger impact. Furthermore, it will be
difficult for the model to converge.
### Training Process
The training can be launched with the `ncf.py` script. This script will train the
NCF model for a number of epochs specified by the `--epochs` argument, which has
a default value of 30.
During training, the script will begin logging after the number of epochs
specified by the `--eval-after` option. After that the script will output a line like the one below:
```
DLL 2020-07-03 10:58:43.371321 - (26,) train_time : 9.889576196670532 eval_time : 0.674187183380127 hr@10 : 0.9526329850606168 ndcg : 0.7448715819572108
```
The evaluation metrics are: HR (hit rate), and NDCG (normalized discounted
cumulative gain). In the evaluation set, each user will be assigned one item
that they have actually interacted with, and a number (by default 99) of items
that they have not interacted with. For each user, the evaluation process will
rank each of the items assigned to that user based on the user’s likeliness to
interact with the items. The hit rate measures the percentage of users for
which the item that they have interacted with is ranked within the top `k` items,
where `k` is a number (by default 10) specified by the `-k` option. NDCG has a
similar meaning, except the rank of the positive item is taken into account.
Typically, HR is used as the primary evaluation metric.
Additionally, the model parameters that give the best accuracy in validation
will be stored at the directory pointed to by the `--checkpoint-dir` argument.
Multiple GPUs can be used for training through Horovod. The number of GPUs can
be controlled by the `-np` parameter passed to `mpirun`.
### Evaluation Process
The evaluation process can be run by the ncf.py script as well. By passing the
`--mode=test argument`, the script will run evaluation once using the TensorFlow
checkpoint specified by the `--checkpoint-dir` file.
## Performance
The performance measurements in this document were conducted at the time of publication and may not reflect the performance achieved from NVIDIA’s latest software release. For the most up-to-date performance measurements, go to [NVIDIA Data Center Deep Learning Product Performance](https://developer.nvidia.com/deep-learning-performance-training-inference).
### Benchmarking
The following section shows how to run benchmarks measuring the model
performance in training and inference modes.
#### Performance Benchmark
To benchmark the training and inference performance, run:
```
mpirun -np 1 --allow-run-as-root python ncf.py --data /data/cache/ml-20m
```
By default, the `ncf.py` script outputs metrics describing the following:
* Training speed and throughput
* Evaluation speed and throughput
### Results
The following sections provide details on how we achieved our performance and accuracy in training and inference.
All throughput numbers are reported in millions of samples per second while time-to-train numbers are in seconds.
#### Training accuracy results
For all the sections below, our results were obtained by running:
```bash
mpirun -np <number_of_GPUs> --allow-run-as-root python ncf.py [--amp] --data /data/cache/ml-20m
````
in the TensorFlow-1 20.07 NGC container.
##### Training accuracy: NVIDIA DGX A100 (8x A100 40GB)
| GPUs | Batch size / GPU | Accuracy - TF32 | Accuracy - mixed precision | Time to train - TF32 [s] | Time to train - mixed precision [s] | Time to train speedup (TF32 to mixed precision)
|-------:|-----------------:|-------------:|-----------:|----------------:|--------------:|---------------:|
| 1 | 1,048,576 | 0.9588 | 0.9589 | 59.4 | 53.1 | 1.12 |
| 4 | 262,144 | 0.9588 | 0.9590 | 22.8 | 21.5 | 1.06 |
| 8 | 131,072 | 0.9587 | 0.9589 | 19.8 | 20.2 | 0.98 |
##### Training accuracy: NVIDIA DGX-1 (8x V100 32GB)
| GPUs | Batch size / GPU | Accuracy - FP32 | Accuracy - mixed precision | Time to train - FP32 | Time to train - mixed precision | Time to train speedup (FP32 to mixed precision)
|-------:|-----------------:|----------------:|--------------:|-------------:|-----------:|---------------:|
| 1 | 1,048,576 | 0.9583 | 0.9589 | 120.9 | 91.6 | 1.32 |
| 4 | 262,144 | 0.9589 | 0.9583 | 43.7 | 31.8 | 1.37 |
| 8 | 131,072 | 0.9590 | 0.9588 | 26.2 | 21.9 | 1.20 |
### Training Performance Results
For all the sections below, our results were obtained by running:
```bash
mpirun -np <number_of_GPUs> --allow-run-as-root python ncf.py [--amp] --data /data/cache/ml-20m
````
in the TensorFlow-1 20.07 NGC container.
##### Training performance: NVIDIA DGX A100 (8x A100 40GB)
| GPUs | Batch size / GPU | Throughput - TF32 | Throughput - mixed precision | Throughput speedup (TF32 - mixed precision) | Strong scaling - TF32 | Strong scaling - mixed precision
|-------:|-----------------:|-------------------:|-----------------:|---------------------:|---:|---:|
| 1 | 1,048,576 | 20.18 | 22.84 | 1.132 | 1 | 1 |
| 4 | 262,144 | 60.34 | 62.70 | 1.039 | 2.99 | 2.75 |
| 8 | 131,072 | 89.88 | 80.86 | 0.900 | 4.45 | 3.54 |
##### Training performance: NVIDIA DGX-1 (8x V100 32GB)
| GPUs | Batch size / GPU | Throughput - FP32 | Throughput - mixed precision | Throughput speedup (FP32 - mixed precision) | Strong scaling - FP32 | Strong scaling - mixed precision
|-------:|-----------------:|-------------------:|-----------------:|---------------------:|---:|---:|
| 1 | 1,048,576 | 9.73 | 15.21 | 1.563 | 1 | 1 |
| 4 | 262,144 | 30.31 | 39.47 | 1.302 | 3.11 | 2.60 |
| 8 | 131,072 | 50.91 | 59.13 | 1.161 | 5.23 | 3.89 |
### Inference Performance Results
Our results were obtained by running the `inference.py` script in the PyTorch 20.07 NGC container.
Throughput is reported in millions of samples per second while latency is reported in seconds.
##### Inference performance: NVIDIA DGX A100 (1x A100 40GB)
TF32
| Batch size | Throughput Avg | Latency Avg | Latency 90% | Latency 95% | Latency 99% |
|-------------:|-----------------:|--------------:|--------------:|--------------:|---------------:|
| 1,024 | 1.67 | 0.0006 | 0.0006 | 0.0007 | 0.0007 |
| 4,096 | 6.02 | 0.0007 | 0.0007 | 0.0007 | 0.0007 |
| 16,384 | 19.01 | 0.0009 | 0.0009 | 0.0009 | 0.0009 |
| 65,536 | 34.91 | 0.0019 | 0.0019 | 0.0019 | 0.0019 |
| 262,144 | 44.72 | 0.0059 | 0.0063 | 0.0063 | 0.0066 |
| 1,048,576 | 47.22 | 0.0222 | 0.0230 | 0.0232 | 0.0237 |
FP16
| Batch size | Throughput Avg | Latency Avg | Latency 90% | Latency 95% | Latency 99% |
|-------------:|-----------------:|--------------:|--------------:|--------------:|---------------:|
| 1,024 | 1.34 | 0.0008 | 0.0008 | 0.0008 | 0.0008 |
| 4,096 | 5.23 | 0.0008 | 0.0008 | 0.0008 | 0.0008 |
| 16,384 | 17.61 | 0.0009 | 0.0009 | 0.0010 | 0.0010 |
| 65,536 | 38.63 | 0.0017 | 0.0017 | 0.0018 | 0.0018 |
| 262,144 | 55.36 | 0.0047 | 0.0049 | 0.0050 | 0.0051 |
| 1,048,576 | 59.48 | 0.0176 | 0.0178 | 0.0179 | 0.0184 |
##### Inference performance: NVIDIA DGX-1 (8x V100 32GB)
FP32
| Batch size | Throughput Avg | Latency Avg | Latency 90% | Latency 95% | Latency 99% |
|-------------:|-----------------:|--------------:|--------------:|--------------:|---------------:|
| 1,024 | 0.79 | 0.0013 | 0.0015 | 0.0015 | 0.0016 |
| 4,096 | 2.88 | 0.0014 | 0.0016 | 0.0016 | 0.0017 |
| 16,384 | 8.38 | 0.0020 | 0.0021 | 0.0021 | 0.0024 |
| 65,536 | 16.77 | 0.0039 | 0.0041 | 0.0041 | 0.0041 |
| 262,144 | 22.53 | 0.0116 | 0.0118 | 0.0119 | 0.0122 |
| 1,048,576 | 25.14 | 0.0417 | 0.0425 | 0.0431 | 0.0440 |
FP16
| Batch size | Throughput Avg | Latency Avg | Latency 90% | Latency 95% | Latency 99% |
|-----------:|-----------------:|--------------:|--------------:|-------------:|---------------:|
| 1,024 | 0.69 | 0.0015 | 0.0017 | 0.0017 | 0.0018 |
| 4,096 | 2.64 | 0.0016 | 0.0017 | 0.0017 | 0.0018 |
| 16,384 | 8.84 | 0.0019 | 0.0020 | 0.0020 | 0.0021 |
| 65,536 | 21.43 | 0.0031 | 0.0032 | 0.0032 | 0.0032 |
| 262,144 | 33.61 | 0.0078 | 0.0080 | 0.0081 | 0.0083 |
| 1,048,576 | 38.83 | 0.0270 | 0.0276 | 0.0277 | 0.0286 |
## Release Notes
### Changelog
April 2023
- Ceased maintenance of this model in TensorFlow1
June 2020
- Updated performance tables to include A100 results
March 2019
- Initial Release
### Known Issues
#### AMP speedup for Ampere
In this model the TF32 precision can in some cases be as fast as the FP16 precision on Ampere GPUs.
This is because TF32 also uses Tensor Cores and doesn't need any additional logic
such as maintaining FP32 master weights and casts.
However, please note that NCF is, by modern recommender standards, a very small model.
Larger models should still see significant benefits of using FP16 math.
#### Multi-GPU Scaling Efficiency
Currently, this model does not exhibit good scaling efficiency when scaling to
4 and 8 GPUs. Since we could not find hyper-parameters that could hit the
target accuracy for batch size of over 1 million samples, we elected to use a
strong scaling strategy which generally has worse scaling efficiency compared
to a more common weak scaling strategy. Additionally, we believe that the small
dataset size does not facilitate great scaling. However, the training scripts
allow the use of custom datasets provided they are in the correct format.
#### Scaling beyond 8 GPUs
Neural Collaborative Filtering (NCF) is a relatively lightweight model that
trains quickly with this relatively smaller dataset, ml-20m. Because of the
smaller dataset, the high ratio of communication to computation makes it
difficult to efficiently use more than 8 GPUs. Typically, this is not an issue
because when using 8 GPUs with FP16 precision the training is sufficiently
fast. However, if you would like to scale the training to 16 GPUs and beyond,
you might try modifying the model so that the communication to computation
ratio facilitates better scaling. This could be done, for example, by finding
hyper-parameters that enable using a larger global batch size.
### Preprocessing Out-of-Memory with 16GB GPUs
When running on GPUs with 16GB of memory, ensure the `--no-neg-trick` flag is
**not** set. Otherwise, the data augmentation stage of training will consume
too much GPU memory, causing TensorFlow to raise an out-of-memory error.
This flag, when it is not set, reduces memory consumption in the negative
samples generation phase of training by telling the script not to verify that
the randomly generated samples are actually negative samples (verification
still occurs for negative samples generated for the test set). Therefore, there
is no need to keep the data structures used to verify negative samples in
memory during training.
|
PyTorch/Classification/GPUNet/triton/175ms | 175ms | README | # Deploying the GPUNet model on Triton Inference Server
This folder contains instructions for deployment to run inference
on Triton Inference Server as well as a detailed performance analysis.
The purpose of this document is to help you with achieving
the best inference performance.
## Table of contents
- [Solution overview](#solution-overview)
- [Introduction](#introduction)
- [Deployment process](#deployment-process)
- [Setup](#setup)
- [Quick Start Guide](#quick-start-guide)
- [Performance](#performance)
- [Offline scenario](#offline-scenario)
- [Offline: NVIDIA DGX-1 (1x V100 32GB), ONNX Runtime with FP16](#offline-nvidia-dgx-1-1x-v100-32gb-onnx-runtime-with-fp16)
- [Offline: NVIDIA DGX A100 (1x A100 80GB), ONNX Runtime with FP16](#offline-nvidia-dgx-a100-1x-a100-80gb-onnx-runtime-with-fp16)
- [Online scenario](#online-scenario)
- [Online: NVIDIA DGX-1 (1x V100 32GB), ONNX Runtime with FP16](#online-nvidia-dgx-1-1x-v100-32gb-onnx-runtime-with-fp16)
- [Online: NVIDIA DGX A100 (1x A100 80GB), ONNX Runtime with FP16](#online-nvidia-dgx-a100-1x-a100-80gb-onnx-runtime-with-fp16)
- [Advanced](#advanced)
- [Step by step deployment process](#step-by-step-deployment-process)
- [Latency explanation](#latency-explanation)
- [Release notes](#release-notes)
- [Changelog](#changelog)
- [Known issues](#known-issues)
## Solution overview
### Introduction
The [NVIDIA Triton Inference Server](https://github.com/NVIDIA/triton-inference-server)
provides a datacenter and cloud inferencing solution optimized for NVIDIA GPUs.
The server provides an inference service via an HTTP or gRPC endpoint,
allowing remote clients to request inferencing for any number of GPU
or CPU models being managed by the server.
This README provides step-by-step deployment instructions for models generated
during training (as described in the [model README](../readme.md)).
Additionally, this README provides the corresponding deployment scripts that
ensure optimal GPU utilization during inferencing on Triton Inference Server.
### Deployment process
The deployment process consists of two steps:
1. Conversion.
The purpose of conversion is to find the best performing model
format supported by Triton Inference Server.
Triton Inference Server uses a number of runtime backends such as
[TensorRT](https://developer.nvidia.com/tensorrt),
[LibTorch](https://github.com/triton-inference-server/pytorch_backend) and
[ONNX Runtime](https://github.com/triton-inference-server/onnxruntime_backend)
to support various model types. Refer to the
[Triton documentation](https://github.com/triton-inference-server/backend#where-can-i-find-all-the-backends-that-are-available-for-triton)
for a list of available backends.
2. Configuration.
Model configuration on Triton Inference Server, which generates
necessary [configuration files](https://github.com/triton-inference-server/server/blob/master/docs/model_configuration.md).
After deployment Triton inference server is used for evaluation of converted model in two steps:
1. Correctness tests.
Produce results which are tested against given correctness thresholds.
2. Performance tests.
Produce latency and throughput results for offline (static batching)
and online (dynamic batching) scenarios.
All steps are executed by provided runner script. Refer to [Quick Start Guide](#quick-start-guide)
## Setup
Ensure you have the following components:
* [NVIDIA Docker](https://github.com/NVIDIA/nvidia-docker)
* [NVIDIA PyTorch NGC container 21.12](https://catalog.ngc.nvidia.com/orgs/nvidia/containers/pytorch)
* [NVIDIA Triton Inference Server NGC container 21.12](https://ngc.nvidia.com/catalog/containers/nvidia:tritonserver)
* [NVIDIA CUDA](https://docs.nvidia.com/cuda/archive//index.html)
* [NVIDIA Ampere](https://www.nvidia.com/en-us/data-center/nvidia-ampere-gpu-architecture/), [Volta](https://www.nvidia.com/en-us/data-center/volta-gpu-architecture/) or [Turing](https://www.nvidia.com/en-us/geforce/turing/) based GPU
## Quick Start Guide
Running the following scripts will build and launch the container with all required dependencies for native PyTorch as well as Triton Inference Server. This is necessary for running inference and can also be used for data download, processing, and training of the model.
1. Clone the repository.
```
git clone https://github.com/NVIDIA/DeepLearningExamples.git
cd PyTorch/Classification/GPUNet
```
2. Prepare dataset.
See the [Quick Start Guide](../../README.md#prepare-the-dataset)
3. Build and run a container that extends NGC PyTorch with the Triton client libraries and necessary dependencies.
```
./triton/scripts/docker/build.sh
./triton/scripts/docker/interactive.sh /path/to/imagenet/val/
```
4. Execute runner script (please mind, the run scripts are prepared per NVIDIA GPU).
```
NVIDIA DGX-1 (1x V100 32GB): ./triton/175ms/runner/start_NVIDIA-DGX-1-\(1x-V100-32GB\).sh
NVIDIA DGX A100 (1x A100 80GB): ./triton/175ms/runner/start_NVIDIA-DGX-A100-\(1x-A100-80GB\).sh
```
## Performance
The performance measurements in this document were conducted at the time of publication and may not reflect
the performance achieved from NVIDIA’s latest software release. For the most up-to-date performance measurements, go to
[NVIDIA Data Center Deep Learning Product Performance](https://developer.nvidia.com/deep-learning-performance-training-inference).
### Offline scenario
The offline scenario assumes the client and server are located on the same host. The tests uses:
- tensors are passed through shared memory between client and server, the Perf Analyzer flag `shared-memory=system` is used
- single request is send from client to server with static size of batch
#### Offline: NVIDIA DGX-1 (1x V100 32GB), ONNX Runtime with FP16
Our results were obtained using the following configuration:
| Parameter Name | Parameter Value |
|:-----------------------------|:-----------------------------|
| GPU |NVIDIA DGX-1 (1x V100 32GB) |
| Backend |ONNX Runtime |
| Backend accelerator |NVIDIA TensorRT|
| Precision |FP16 |
| Model format |ONNX |
| Max batch size |64 |
| Number of model instances |2|
| Export Format | ONNX |
| Device Kind | gpu |
| Torch Jit | none |
<table>
<tbody>
<tr>
<td><img src="./reports/nvidia_dgx-1_(1x_v100_32gb)_experiment_2_triton_performance_offline_2/plots/throughput_vs_batch.png"></td>
<td><img src="./reports/nvidia_dgx-1_(1x_v100_32gb)_experiment_2_triton_performance_offline_2/plots/throughput_vs_latency.png"></td>
</tr>
<tr>
<td><img src="./reports/nvidia_dgx-1_(1x_v100_32gb)_experiment_2_triton_performance_offline_2/plots/latency_vs_batch.png"></td>
</tr>
</tbody>
</table>
<details>
<summary>Results Table</summary>
| Batch | Concurrency | Inferences/Second | Client Send (ms) | Network+Server Send/Recv (ms) | Server Queue (ms) | Server Compute Input (ms) | Server Compute Infer (ms) | Server Compute Output (ms) | Client Recv (ms) | p50 latency (ms) | p90 latency (ms) | p95 latency (ms) | p99 latency (ms) | avg latency (ms) |
|--------:|--------------:|--------------------:|-------------------:|--------------------------------:|--------------------:|----------------------------:|----------------------------:|-----------------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|
| 1 | 1 | 450.00 | 0.05 | 0.22 | 0.08 | 0.18 | 1.68 | 0.01 | 0.00 | 2.21 | 2.24 | 2.26 | 2.30 | 2.22 |
| 2 | 1 | 720.00 | 0.05 | 0.22 | 0.08 | 0.27 | 2.14 | 0.01 | 0.00 | 2.77 | 2.80 | 2.83 | 2.88 | 2.77 |
| 4 | 1 | 1008.00 | 0.05 | 0.23 | 0.08 | 0.46 | 3.14 | 0.01 | 0.00 | 3.96 | 4.01 | 4.03 | 4.09 | 3.96 |
| 8 | 1 | 1320.00 | 0.05 | 0.20 | 0.05 | 0.80 | 4.94 | 0.01 | 0.00 | 6.07 | 6.12 | 6.14 | 6.16 | 6.05 |
| 16 | 1 | 1536.00 | 0.05 | 0.22 | 0.08 | 1.44 | 8.54 | 0.01 | 0.00 | 10.33 | 10.38 | 10.38 | 10.41 | 10.33 |
| 32 | 1 | 1664.00 | 0.05 | 0.22 | 0.08 | 2.76 | 15.88 | 0.02 | 0.00 | 19.01 | 19.05 | 19.07 | 19.14 | 19.01 |
| 64 | 1 | 1728.00 | 0.05 | 0.28 | 0.08 | 5.79 | 29.59 | 0.03 | 0.00 | 35.84 | 35.94 | 36.02 | 36.11 | 35.83 |
</details>
#### Offline: NVIDIA DGX A100 (1x A100 80GB), ONNX Runtime with FP16
Our results were obtained using the following configuration:
| Parameter Name | Parameter Value |
|:-----------------------------|:-----------------------------|
| GPU |NVIDIA DGX A100 (1x A100 80GB) |
| Backend |ONNX Runtime |
| Backend accelerator |NVIDIA TensorRT|
| Precision |FP16 |
| Model format |ONNX |
| Max batch size |64 |
| Number of model instances |2|
| Export Format | ONNX |
| Device Kind | gpu |
| Torch Jit | none |
<table>
<tbody>
<tr>
<td><img src="./reports/nvidia_dgx_a100_(1x_a100_80gb)_experiment_2_triton_performance_offline_2/plots/throughput_vs_batch.png"></td>
<td><img src="./reports/nvidia_dgx_a100_(1x_a100_80gb)_experiment_2_triton_performance_offline_2/plots/throughput_vs_latency.png"></td>
</tr>
<tr>
<td><img src="./reports/nvidia_dgx_a100_(1x_a100_80gb)_experiment_2_triton_performance_offline_2/plots/latency_vs_batch.png"></td>
</tr>
</tbody>
</table>
<details>
<summary>Results Table</summary>
| Batch | Concurrency | Inferences/Second | Client Send (ms) | Network+Server Send/Recv (ms) | Server Queue (ms) | Server Compute Input (ms) | Server Compute Infer (ms) | Server Compute Output (ms) | Client Recv (ms) | p50 latency (ms) | p90 latency (ms) | p95 latency (ms) | p99 latency (ms) | avg latency (ms) |
|--------:|--------------:|--------------------:|-------------------:|--------------------------------:|--------------------:|----------------------------:|----------------------------:|-----------------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|
| 1 | 1 | 679.00 | 0.02 | 0.06 | 0.02 | 0.12 | 1.25 | 0.00 | 0.00 | 1.48 | 1.49 | 1.49 | 1.53 | 1.47 |
| 2 | 1 | 1164.00 | 0.02 | 0.06 | 0.02 | 0.17 | 1.44 | 0.00 | 0.00 | 1.72 | 1.74 | 1.75 | 1.79 | 1.72 |
| 4 | 1 | 1736.00 | 0.03 | 0.07 | 0.02 | 0.28 | 1.89 | 0.00 | 0.00 | 2.29 | 2.38 | 2.40 | 2.43 | 2.30 |
| 8 | 1 | 2320.00 | 0.03 | 0.08 | 0.02 | 0.52 | 2.79 | 0.00 | 0.00 | 3.45 | 3.49 | 3.50 | 3.51 | 3.44 |
| 16 | 1 | 2640.00 | 0.03 | 0.11 | 0.02 | 1.25 | 4.59 | 0.01 | 0.00 | 6.01 | 6.11 | 6.13 | 6.24 | 6.02 |
| 32 | 1 | 2880.00 | 0.03 | 0.19 | 0.03 | 2.77 | 8.04 | 0.02 | 0.00 | 11.08 | 11.14 | 11.19 | 11.20 | 11.09 |
| 64 | 1 | 3072.00 | 0.03 | 0.20 | 0.03 | 5.49 | 14.76 | 0.03 | 0.00 | 20.50 | 20.59 | 20.64 | 21.48 | 20.54 |
</details>
### Online scenario
The online scenario assumes the client and server are located on different hosts. The tests uses:
- tensors are passed through HTTP from client to server
- concurrent requests are send from client to server, the final batch is created on server side
#### Online: NVIDIA DGX-1 (1x V100 32GB), ONNX Runtime with FP16
Our results were obtained using the following configuration:
| Parameter Name | Parameter Value |
|:-----------------------------|:-----------------------------|
| GPU |NVIDIA DGX-1 (1x V100 32GB) |
| Backend |ONNX Runtime |
| Backend accelerator |NVIDIA TensorRT|
| Precision |FP16 |
| Model format |ONNX |
| Max batch size |64 |
| Number of model instances |2|
| Export Format | ONNX |
| Device Kind | gpu |
| Torch Jit | none |
<table>
<tbody>
<tr>
<td colspan="2" align="center"><img src="./reports/nvidia_dgx-1_(1x_v100_32gb)_experiment_2_triton_performance_online_2/plots/latency_vs_concurrency.png"></td>
</tr>
</tbody>
</table>
<details>
<summary>Results Table</summary>
| Batch | Concurrency | Inferences/Second | Client Send (ms) | Network+Server Send/Recv (ms) | Server Queue (ms) | Server Compute Input (ms) | Server Compute Infer (ms) | Server Compute Output (ms) | Client Recv (ms) | p50 latency (ms) | p90 latency (ms) | p95 latency (ms) | p99 latency (ms) | avg latency (ms) |
|--------:|--------------:|--------------------:|-------------------:|--------------------------------:|--------------------:|----------------------------:|----------------------------:|-----------------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|
| 1 | 8 | 463.00 | 0.12 | 1.16 | 10.98 | 0.26 | 4.68 | 0.01 | 0.00 | 17.12 | 18.64 | 19.30 | 20.15 | 17.21 |
| 1 | 16 | 670.00 | 0.16 | 2.76 | 13.21 | 0.76 | 6.62 | 0.02 | 0.00 | 25.05 | 26.14 | 26.40 | 27.35 | 23.53 |
| 1 | 24 | 838.00 | 0.19 | 4.46 | 15.00 | 1.26 | 7.33 | 0.02 | 0.00 | 30.21 | 30.91 | 31.23 | 33.42 | 28.26 |
| 1 | 32 | 906.00 | 0.22 | 6.16 | 16.01 | 2.55 | 9.59 | 0.03 | 0.00 | 36.47 | 39.98 | 40.78 | 46.44 | 34.57 |
| 1 | 40 | 949.00 | 0.21 | 9.12 | 18.35 | 3.13 | 10.57 | 0.04 | 0.00 | 43.66 | 50.86 | 51.64 | 56.49 | 41.41 |
| 1 | 48 | 1053.00 | 0.22 | 8.29 | 21.50 | 3.56 | 10.74 | 0.04 | 0.00 | 46.24 | 51.10 | 53.02 | 54.33 | 44.37 |
| 1 | 56 | 1037.00 | 0.26 | 13.87 | 21.57 | 4.19 | 12.51 | 0.05 | 0.00 | 54.20 | 68.42 | 71.18 | 76.82 | 52.45 |
| 1 | 64 | 1146.00 | 0.24 | 10.20 | 25.70 | 4.64 | 13.37 | 0.06 | 0.00 | 55.59 | 65.23 | 66.42 | 74.42 | 54.21 |
| 1 | 72 | 1117.00 | 0.26 | 14.73 | 24.72 | 6.95 | 15.89 | 0.08 | 0.00 | 70.17 | 78.56 | 79.46 | 80.07 | 62.63 |
| 1 | 80 | 1068.00 | 0.27 | 20.57 | 24.80 | 9.11 | 17.19 | 0.08 | 0.00 | 83.16 | 94.21 | 95.05 | 104.18 | 72.03 |
| 1 | 88 | 1202.00 | 0.27 | 17.69 | 28.71 | 7.16 | 17.24 | 0.08 | 0.00 | 72.00 | 89.29 | 97.32 | 112.86 | 71.15 |
| 1 | 96 | 1222.00 | 0.27 | 19.24 | 29.13 | 8.20 | 18.05 | 0.09 | 0.00 | 79.70 | 94.74 | 99.06 | 112.32 | 74.97 |
| 1 | 104 | 1223.00 | 0.31 | 17.00 | 33.40 | 9.15 | 20.36 | 0.09 | 0.00 | 85.34 | 100.78 | 111.91 | 116.65 | 80.32 |
| 1 | 112 | 1284.72 | 0.30 | 17.92 | 35.11 | 10.01 | 21.42 | 0.10 | 0.00 | 84.71 | 110.02 | 114.83 | 120.93 | 84.86 |
| 1 | 120 | 1205.00 | 0.32 | 20.18 | 36.48 | 12.68 | 24.79 | 0.12 | 0.00 | 101.85 | 120.79 | 122.94 | 124.10 | 94.58 |
| 1 | 128 | 1358.00 | 0.36 | 19.33 | 40.48 | 9.30 | 21.78 | 0.12 | 0.00 | 91.34 | 111.79 | 117.33 | 119.85 | 91.37 |
| 1 | 136 | 1311.00 | 0.30 | 19.90 | 40.81 | 10.97 | 24.24 | 0.13 | 0.00 | 97.12 | 121.21 | 122.16 | 138.63 | 96.36 |
| 1 | 144 | 1316.00 | 0.33 | 21.60 | 40.88 | 13.16 | 28.39 | 0.16 | 0.00 | 113.62 | 131.17 | 136.02 | 138.50 | 104.53 |
| 1 | 152 | 1344.00 | 0.32 | 21.58 | 46.75 | 12.27 | 25.42 | 0.13 | 0.00 | 107.65 | 128.42 | 130.97 | 157.26 | 106.47 |
| 1 | 160 | 1346.00 | 0.32 | 27.56 | 40.14 | 14.34 | 31.42 | 0.16 | 0.00 | 131.24 | 145.92 | 146.20 | 146.55 | 113.94 |
| 1 | 168 | 1394.00 | 0.33 | 22.48 | 52.94 | 11.70 | 26.70 | 0.14 | 0.00 | 116.36 | 139.53 | 144.13 | 147.28 | 114.27 |
| 1 | 176 | 1283.00 | 0.46 | 19.78 | 52.26 | 16.86 | 32.78 | 0.17 | 0.00 | 120.30 | 162.20 | 162.90 | 164.82 | 122.30 |
| 1 | 184 | 1282.00 | 0.35 | 33.05 | 47.57 | 15.81 | 30.73 | 0.16 | 0.00 | 132.25 | 162.44 | 166.19 | 177.57 | 127.67 |
| 1 | 192 | 1384.62 | 0.42 | 18.01 | 62.42 | 14.52 | 29.11 | 0.15 | 0.00 | 127.38 | 153.04 | 158.47 | 175.99 | 124.64 |
| 1 | 200 | 1363.00 | 0.43 | 23.70 | 59.96 | 16.24 | 33.37 | 0.16 | 0.00 | 131.84 | 166.32 | 180.64 | 200.00 | 133.87 |
| 1 | 208 | 1401.00 | 0.49 | 23.61 | 64.76 | 15.95 | 31.32 | 0.17 | 0.00 | 138.53 | 157.89 | 160.36 | 199.73 | 136.30 |
| 1 | 216 | 1412.00 | 0.33 | 33.12 | 61.57 | 17.23 | 32.44 | 0.17 | 0.00 | 147.33 | 167.41 | 174.59 | 188.53 | 144.86 |
| 1 | 224 | 1386.00 | 0.39 | 30.63 | 67.18 | 15.08 | 31.89 | 0.16 | 0.00 | 145.52 | 176.12 | 183.03 | 212.18 | 145.33 |
| 1 | 232 | 1410.00 | 0.44 | 27.54 | 73.21 | 17.18 | 32.10 | 0.17 | 0.00 | 150.59 | 183.86 | 194.89 | 214.27 | 150.65 |
| 1 | 240 | 1428.00 | 0.47 | 32.68 | 71.93 | 16.27 | 30.38 | 0.17 | 0.00 | 152.66 | 178.22 | 181.79 | 182.79 | 151.91 |
| 1 | 248 | 1404.00 | 0.46 | 35.46 | 75.09 | 16.28 | 32.17 | 0.16 | 0.00 | 159.62 | 206.98 | 216.56 | 218.05 | 159.63 |
| 1 | 256 | 1356.00 | 0.72 | 20.03 | 99.56 | 20.05 | 33.35 | 0.17 | 0.00 | 193.38 | 201.67 | 202.06 | 202.46 | 173.88 |
</details>
#### Online: NVIDIA DGX A100 (1x A100 80GB), ONNX Runtime with FP16
Our results were obtained using the following configuration:
| Parameter Name | Parameter Value |
|:-----------------------------|:-----------------------------|
| GPU |NVIDIA DGX A100 (1x A100 80GB) |
| Backend |ONNX Runtime |
| Backend accelerator |NVIDIA TensorRT|
| Precision |FP16 |
| Model format |ONNX |
| Max batch size |64 |
| Number of model instances |2|
| Export Format | ONNX |
| Device Kind | gpu |
| Torch Jit | none |
<table>
<tbody>
<tr>
<td colspan="2" align="center"><img src="./reports/nvidia_dgx_a100_(1x_a100_80gb)_experiment_2_triton_performance_online_2/plots/latency_vs_concurrency.png"></td>
</tr>
</tbody>
</table>
<details>
<summary>Results Table</summary>
| Batch | Concurrency | Inferences/Second | Client Send (ms) | Network+Server Send/Recv (ms) | Server Queue (ms) | Server Compute Input (ms) | Server Compute Infer (ms) | Server Compute Output (ms) | Client Recv (ms) | p50 latency (ms) | p90 latency (ms) | p95 latency (ms) | p99 latency (ms) | avg latency (ms) |
|--------:|--------------:|--------------------:|-------------------:|--------------------------------:|--------------------:|----------------------------:|----------------------------:|-----------------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|
| 1 | 8 | 814.00 | 0.11 | 0.74 | 6.07 | 0.23 | 2.63 | 0.01 | 0.00 | 9.63 | 11.29 | 11.77 | 12.12 | 9.78 |
| 1 | 16 | 1119.00 | 0.15 | 2.98 | 6.25 | 0.97 | 3.88 | 0.02 | 0.00 | 13.15 | 19.89 | 20.16 | 20.80 | 14.26 |
| 1 | 24 | 1272.00 | 0.17 | 5.14 | 7.02 | 1.44 | 4.74 | 0.04 | 0.00 | 17.71 | 28.49 | 28.75 | 30.49 | 18.55 |
| 1 | 32 | 1561.00 | 0.18 | 4.72 | 9.26 | 1.40 | 4.82 | 0.03 | 0.00 | 21.40 | 24.38 | 24.97 | 27.41 | 20.41 |
| 1 | 40 | 1595.00 | 0.13 | 7.56 | 9.58 | 1.90 | 5.50 | 0.05 | 0.00 | 27.03 | 32.66 | 33.99 | 37.28 | 24.72 |
| 1 | 48 | 1790.00 | 0.15 | 7.37 | 10.12 | 2.40 | 6.31 | 0.05 | 0.00 | 27.54 | 34.28 | 37.34 | 39.77 | 26.40 |
| 1 | 56 | 1904.00 | 0.17 | 8.47 | 11.34 | 2.50 | 6.59 | 0.05 | 0.00 | 30.16 | 36.48 | 38.73 | 46.14 | 29.12 |
| 1 | 64 | 1948.00 | 0.16 | 10.50 | 10.93 | 3.09 | 7.57 | 0.06 | 0.00 | 34.87 | 39.38 | 41.02 | 43.31 | 32.30 |
| 1 | 72 | 1921.00 | 0.22 | 10.51 | 13.95 | 3.97 | 7.84 | 0.07 | 0.00 | 38.30 | 47.12 | 47.94 | 52.26 | 36.56 |
| 1 | 80 | 1992.00 | 0.15 | 12.90 | 13.38 | 4.49 | 8.48 | 0.08 | 0.00 | 41.92 | 48.74 | 50.64 | 53.21 | 39.48 |
| 1 | 88 | 2015.00 | 0.20 | 13.38 | 14.43 | 5.49 | 9.14 | 0.09 | 0.00 | 44.11 | 61.05 | 64.80 | 72.96 | 42.72 |
| 1 | 96 | 2155.00 | 0.26 | 13.70 | 14.67 | 5.15 | 9.82 | 0.09 | 0.00 | 44.64 | 57.22 | 61.18 | 63.20 | 43.69 |
| 1 | 104 | 2222.78 | 0.22 | 12.69 | 16.85 | 5.58 | 10.42 | 0.10 | 0.00 | 47.78 | 58.21 | 62.93 | 70.73 | 45.85 |
| 1 | 112 | 2229.00 | 0.20 | 17.51 | 14.78 | 5.73 | 10.63 | 0.10 | 0.00 | 51.75 | 59.36 | 62.31 | 69.00 | 48.95 |
| 1 | 120 | 2323.68 | 0.21 | 16.15 | 17.07 | 5.93 | 10.73 | 0.10 | 0.00 | 52.25 | 61.82 | 64.02 | 67.84 | 50.20 |
| 1 | 128 | 2302.00 | 0.18 | 16.02 | 18.20 | 7.42 | 12.04 | 0.13 | 0.00 | 58.55 | 71.09 | 72.66 | 75.42 | 54.00 |
| 1 | 136 | 2403.00 | 0.21 | 16.19 | 20.46 | 6.30 | 11.58 | 0.10 | 0.00 | 56.70 | 68.51 | 70.63 | 76.49 | 54.85 |
| 1 | 144 | 2340.00 | 0.16 | 21.65 | 18.10 | 7.29 | 12.24 | 0.12 | 0.00 | 63.71 | 76.17 | 77.64 | 81.72 | 59.57 |
| 1 | 152 | 2365.00 | 0.36 | 12.02 | 25.57 | 9.20 | 16.00 | 0.17 | 0.00 | 72.96 | 74.10 | 74.33 | 75.89 | 63.32 |
| 1 | 160 | 2389.00 | 0.16 | 20.48 | 21.98 | 8.46 | 13.62 | 0.14 | 0.00 | 67.97 | 84.11 | 86.19 | 87.81 | 64.83 |
| 1 | 168 | 2508.49 | 0.18 | 14.93 | 27.00 | 8.85 | 14.51 | 0.14 | 0.00 | 66.48 | 81.76 | 86.21 | 90.01 | 65.61 |
| 1 | 176 | 2448.00 | 0.20 | 16.68 | 28.23 | 8.67 | 14.30 | 0.14 | 0.00 | 71.33 | 84.01 | 84.99 | 92.62 | 68.22 |
| 1 | 184 | 2502.00 | 0.20 | 14.14 | 31.41 | 8.82 | 15.39 | 0.15 | 0.00 | 72.39 | 93.16 | 97.28 | 101.08 | 70.11 |
| 1 | 192 | 2494.00 | 0.15 | 18.25 | 30.75 | 9.50 | 15.34 | 0.18 | 0.00 | 76.37 | 90.09 | 100.47 | 121.06 | 74.17 |
| 1 | 200 | 2516.00 | 0.14 | 20.15 | 28.80 | 9.70 | 16.65 | 0.16 | 0.00 | 78.82 | 92.92 | 96.43 | 106.64 | 75.60 |
| 1 | 208 | 2665.00 | 0.18 | 16.11 | 34.07 | 8.78 | 14.85 | 0.15 | 0.00 | 74.73 | 89.39 | 96.32 | 102.39 | 74.14 |
| 1 | 216 | 2624.00 | 0.13 | 23.95 | 29.77 | 8.81 | 16.23 | 0.16 | 0.00 | 80.98 | 94.58 | 100.95 | 107.04 | 79.05 |
| 1 | 224 | 2671.33 | 0.15 | 22.46 | 33.23 | 9.14 | 16.35 | 0.16 | 0.00 | 83.92 | 98.92 | 103.15 | 110.30 | 81.49 |
| 1 | 232 | 2675.00 | 0.24 | 17.74 | 38.86 | 10.33 | 16.62 | 0.16 | 0.00 | 86.56 | 103.55 | 106.26 | 109.90 | 83.94 |
| 1 | 240 | 2725.00 | 0.16 | 19.73 | 38.43 | 9.74 | 16.69 | 0.17 | 0.00 | 85.56 | 102.57 | 105.98 | 110.88 | 84.92 |
| 1 | 248 | 2822.00 | 0.29 | 14.35 | 44.63 | 8.61 | 16.73 | 0.14 | 0.00 | 84.43 | 104.72 | 105.87 | 122.66 | 84.77 |
| 1 | 256 | 2852.00 | 0.28 | 16.62 | 44.71 | 8.40 | 16.07 | 0.14 | 0.00 | 89.04 | 102.50 | 106.28 | 113.40 | 86.23 |
</details>
## Advanced
| Inference runtime | Mnemonic used in scripts |
|-------------------|--------------------------|
| [TorchScript Tracing](https://pytorch.org/docs/stable/jit.html) | `ts-trace` |
| [TorchScript Scripting](https://pytorch.org/docs/stable/jit.html) | `ts-script` |
| [ONNX](https://onnx.ai) | `onnx` |
| [NVIDIA TensorRT](https://developer.nvidia.com/tensorrt) | `trt` |
### Step by step deployment process
Commands described below can be used for exporting, converting and profiling the model.
#### Clone Repository
IMPORTANT: This step is executed on the host computer.
<details>
<summary>Clone Repository Command</summary>
```shell
git clone https://github.com/NVIDIA/DeepLearningExamples.git
cd PyTorch/Classification/GPUNet
```
</details>
#### Start Triton Inference Server
Setup the environment in the host computer and start Triton Inference Server.
<details>
<summary>Setup Environment and Start Triton Inference Server Command</summary>
```shell
source ./triton/scripts/setup_environment.sh
./triton/scripts/docker/triton_inference_server.sh
```
</details>
#### Prepare Dataset.
Please use the data download from the [Main QSG](../../README.md#prepare-the-dataset)
#### Prepare Checkpoint
Please download a checkpoint from [here](https://api.ngc.nvidia.com/v2/models/nvidia/dle/gpunet_2_pyt_ckpt/versions/21.12.0_amp/zip)
and place it in `runner_workspace/checkpoints/1.75ms/`. Note that the `1.75ms` subdirectory may not be created yet.
#### Setup Container
Build and run a container that extends the NGC PyTorch container with the Triton Inference Server client libraries and dependencies.
<details>
<summary>Setup Container Command</summary>
Build container:
```shell
./triton/scripts/docker/build.sh
```
Run container in interactive mode:
```shell
./triton/scripts/docker/interactive.sh /path/to/imagenet/val/
```
Setup environment in order to share artifacts in steps and with Triton Inference Server:
```shell
source ./triton/scripts/setup_environment.sh
```
</details>
#### Prepare configuration
You can use the environment variables to set the parameters of your inference configuration.
Example values of some key variables in one configuration:
<details>
<summary>Export Variables</summary>
```shell
export FORMAT="onnx"
export PRECISION="fp16"
export EXPORT_FORMAT="onnx"
export EXPORT_PRECISION="fp16"
export BACKEND_ACCELERATOR="trt"
export NUMBER_OF_MODEL_INSTANCES="2"
export TENSORRT_CAPTURE_CUDA_GRAPH="0"
export CHECKPOINT="1.75ms"
export CHECKPOINT_DIR=${CHECKPOINTS_DIR}/${CHECKPOINT}
```
</details>
#### Export Model
Export model from Python source to desired format (e.g. Savedmodel or TorchScript)
<details>
<summary>Export Model Command</summary>
```shell
if [[ "${EXPORT_FORMAT}" == "torchscript" ]]; then
export FORMAT_SUFFIX="pt"
else
export FORMAT_SUFFIX="${EXPORT_FORMAT}"
fi
python3 triton/export_model.py \
--input-path triton/model.py \
--input-type pyt \
--output-path ${SHARED_DIR}/exported_model.${FORMAT_SUFFIX} \
--output-type ${EXPORT_FORMAT} \
--ignore-unknown-parameters \
--onnx-opset 13 \
--torch-jit none \
\
--config /workspace/gpunet/configs/batch1/GV100/1.75ms.json \
--checkpoint ${CHECKPOINT_DIR}/1.75ms.pth.tar \
--precision ${EXPORT_PRECISION} \
\
--dataloader triton/dataloader.py \
--val-path ${DATASETS_DIR}/ \
--is-prunet False \
--batch-size 1
```
</details>
#### Convert Model
Convert the model from training to inference format (e.g. TensorRT).
<details>
<summary>Convert Model Command</summary>
```shell
if [[ "${EXPORT_FORMAT}" == "torchscript" ]]; then
export FORMAT_SUFFIX="pt"
else
export FORMAT_SUFFIX="${EXPORT_FORMAT}"
fi
model-navigator convert \
--model-name ${MODEL_NAME} \
--model-path ${SHARED_DIR}/exported_model.${FORMAT_SUFFIX} \
--output-path ${SHARED_DIR}/converted_model \
--target-formats ${FORMAT} \
--target-precisions ${PRECISION} \
--launch-mode local \
--override-workspace \
--verbose \
\
--onnx-opsets 13 \
--max-batch-size 64 \
--container-version 21.12 \
--max-workspace-size 10000000000 \
--atol OUTPUT__0=100 \
--rtol OUTPUT__0=100
```
</details>
#### Deploy Model
Configure the model on Triton Inference Server.
Generate the configuration from your model repository.
<details>
<summary>Deploy Model Command</summary>
```shell
model-navigator triton-config-model \
--model-repository ${MODEL_REPOSITORY_PATH} \
--model-name ${MODEL_NAME} \
--model-version 1 \
--model-path ${SHARED_DIR}/converted_model \
--model-format ${FORMAT} \
--model-control-mode explicit \
--load-model \
--load-model-timeout-s 100 \
--verbose \
\
--backend-accelerator ${BACKEND_ACCELERATOR} \
--tensorrt-precision ${PRECISION} \
--tensorrt-capture-cuda-graph \
--tensorrt-max-workspace-size 10000000000 \
--max-batch-size 64 \
--batching dynamic \
--preferred-batch-sizes 64 \
--engine-count-per-device gpu=${NUMBER_OF_MODEL_INSTANCES}
```
</details>
#### Triton Performance Offline Test
We want to maximize throughput. It assumes you have your data available
for inference or that your data saturate to maximum batch size quickly.
Triton Inference Server supports offline scenarios with static batching.
Static batching allows inference requests to be served
as they are received. The largest improvements to throughput come
from increasing the batch size due to efficiency gains in the GPU with larger
batches.
<details>
<summary>Triton Performance Offline Test Command</summary>
```shell
python triton/run_performance_on_triton.py \
--model-repository ${MODEL_REPOSITORY_PATH} \
--model-name ${MODEL_NAME} \
--input-data random \
--batch-sizes 1 2 4 8 16 32 64 \
--concurrency 1 \
--evaluation-mode offline \
--measurement-request-count 10 \
--warmup \
--performance-tool perf_analyzer \
--result-path ${SHARED_DIR}/triton_performance_offline.csv
```
</details>
#### Triton Performance Online Test
We want to maximize throughput within latency budget constraints.
Dynamic batching is a feature of Triton Inference Server that allows
inference requests to be combined by the server, so that a batch is
created dynamically, resulting in a reduced average latency.
<details>
<summary>Triton Performance Online Test</summary>
```shell
python triton/run_performance_on_triton.py \
--model-repository ${MODEL_REPOSITORY_PATH} \
--model-name ${MODEL_NAME} \
--input-data random \
--batch-sizes 1 \
--concurrency 8 16 24 32 40 48 56 64 72 80 88 96 104 112 120 128 136 144 152 160 168 176 184 192 200 208 216 224 232 240 248 256 \
--evaluation-mode online \
--measurement-request-count 500 \
--warmup \
--performance-tool perf_analyzer \
--result-path ${SHARED_DIR}/triton_performance_online.csv
```
</details>
### Latency explanation
A typical Triton Inference Server pipeline can be broken down into the following steps:
1. The client serializes the inference request into a message and sends it to
the server (Client Send).
2. The message travels over the network from the client to the server (Network).
3. The message arrives at the server and is deserialized (Server Receive).
4. The request is placed on the queue (Server Queue).
5. The request is removed from the queue and computed (Server Compute).
6. The completed request is serialized in a message and sent back to
the client (Server Send).
7. The completed message then travels over the network from the server
to the client (Network).
8. The completed message is deserialized by the client and processed as
a completed inference request (Client Receive).
Generally, for local clients, steps 1-4 and 6-8 will only occupy
a small fraction of time, compared to step 5. In distributed systems and online processing
where client and server side are connect through network, the send and receive steps might have impact
on overall processing performance. In order to analyze the possible bottlenecks the detailed
charts are presented in online scenario cases.
## Release Notes
We’re constantly refining and improving our performance on AI
and HPC workloads even on the same hardware with frequent updates
to our software stack. For our latest performance data refer
to these pages for
[AI](https://developer.nvidia.com/deep-learning-performance-training-inference)
and [HPC](https://developer.nvidia.com/hpc-application-performance) benchmarks.
### Changelog
May 2022
- Initial release
### Known issues
- There are no known issues with this model. |
PyTorch/LanguageModeling/BERT/triton/runner | runner | runner_proxy | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pathlib
from typing import List, Type
# method from PEP-366 to support relative import in executed modules
if __name__ == "__main__" and __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from .config import Config
from .executor import Executor
from .finalizer import Finalizer
from .maintainer import Maintainer
from .pipeline import Pipeline
from .preparer import Preparer
from .runner import Runner
class RunnerProxy:
"""
Runner proxy to configure original runner
"""
maintainer_cls: Type[Maintainer] = None
executor_cls: Type[Executor] = None
preparer_cls: Type[Preparer] = None
finalizer_cls: Type[Finalizer] = None
def __init__(self, config: Config, pipeline: Pipeline, devices: List[str]):
"""
RunnerProxy constructor
Args:
config: Config object
pipeline: Pipeline to evaluate
devices: List of devices to use for tests
"""
self._runner = Runner(
config=config,
pipeline=pipeline,
devices=devices,
maintainer_cls=self.maintainer_cls,
executor_cls=self.executor_cls,
preparer_cls=self.preparer_cls,
finalizer_cls=self.finalizer_cls,
)
def start(self) -> None:
"""
Runner interface
"""
self._runner.start()
|
TensorFlow2/Recommendation/WideAndDeep/triton/deployment_toolkit/triton_inference_runner | triton_inference_runner | base | # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import Optional
LOGGER = logging.getLogger("triton_inference_runner.base")
class BaseRunner:
DEFAULT_MAX_RESP_WAIT_S = 120
DEFAULT_MAX_FINISH_WAIT_S = 900 # 15min
def __init__(
self,
server_url: str,
model_name: str,
model_version: str,
*,
dataloader,
verbose=False,
response_wait_time: Optional[float] = None,
):
self._model_name = model_name
self._model_version = model_version
self._dataloader = dataloader
self._verbose = verbose
self._response_wait_t = int(self.DEFAULT_MAX_RESP_WAIT_S if response_wait_time is None else response_wait_time)
self._response_wait_t_ms = self._response_wait_t * 1000 * 1000
self._max_wait_time = max(self._response_wait_t, self.DEFAULT_MAX_FINISH_WAIT_S)
self._server_url = server_url
def _verify_triton_state(self, triton_client):
errors = []
if not triton_client.is_server_live():
errors.append(f"Triton server {self._server_url} is not live")
elif not triton_client.is_server_ready():
errors.append(f"Triton server {self._server_url} is not ready")
elif not triton_client.is_model_ready(self._model_name, self._model_version):
errors.append(f"Model {self._model_name}:{self._model_version} is not ready")
return errors
|
PyTorch/SpeechSynthesis/Tacotron2/trtis_cpp/src/trtis | trtis | custom | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "CustomContext.hpp"
#ifdef __GNUC__
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Weffc++"
#pragma GCC diagnostic ignored "-Wunused-parameter"
#endif
#include "src/backends/custom/custom.h"
#ifdef __GNUC__
#pragma GCC diagnostic pop
#endif
#include <iostream>
/******************************************************************************
* HELPER FUNCTIONS ***********************************************************
*****************************************************************************/
namespace
{
CustomContext* parseContext(
void * customContext)
{
return reinterpret_cast<CustomContext*>(customContext);
}
}
/******************************************************************************
* BACKEND FUNCTIONS **********************************************************
*****************************************************************************/
int CustomInitialize(
const CustomInitializeData* data, void** customContext)
{
return CustomContext::create(
data, reinterpret_cast<CustomContext**>(customContext));
}
int CustomFinalize(void* const customContext)
{
CustomContext * const context = parseContext(customContext);
delete context;
return CustomContext::ErrorCode::SUCCESS;
}
const char* CustomErrorString(
void* const customContext, const int errcode)
{
CustomContext * const context = parseContext(customContext);
return context->errorToString(errcode);
}
int CustomExecute(
void* const customContext,
const uint32_t numPayloads,
CustomPayload* const payloads,
CustomGetNextInputFn_t input_fn,
CustomGetOutputFn_t output_fn)
{
CustomContext * const context = parseContext(customContext);
return context->execute(numPayloads, payloads, input_fn, output_fn);
}
|
TensorFlow/Classification/ConvNets/resnext101-32x4d/training | training | DGX2_RNxt101-32x4d_AMP_250E | #!/bin/bash
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
WORKSPACE=${1:-"/workspace/rn50v15_tf"}
DATA_DIR=${2:-"/data"}
OTHER=${@:3}
if [[ ! -z "${BIND_TO_SOCKET}" ]]; then
BIND_TO_SOCKET="--bind-to socket"
fi
mpiexec --allow-run-as-root ${BIND_TO_SOCKET} -np 8 python3 main.py --arch=resnext101-32x4d \
--mode=train_and_evaluate --iter_unit=epoch --num_iter=250 --mixup=0.2 \
--batch_size=128 --warmup_steps=100 --cosine_lr --label_smoothing 0.1 \
--lr_init=0.256 --lr_warmup_epochs=8 --momentum=0.875 --weight_decay=6.103515625e-05 \
--amp --static_loss_scale 128 \
--data_dir=${DATA_DIR}/tfrecords --data_idx_dir=${DATA_DIR}/dali_idx \
--results_dir=${WORKSPACE}/results --weight_init=fan_in ${OTHER}
|
CUDA-Optimized/FastSpeech | FastSpeech | requirements | librosa >= 0.7.0
tensorboardX
matplotlib
fire
unidecode
pandas
inflect
torchvision
opencv-python
pyyaml
tqdm
data
pycuda |
TensorFlow2/Segmentation/nnUNet | nnUNet | download | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser
from subprocess import call
from data_preprocessing.configs import task
parser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument("--task", type=str, required=True, help="Task to download")
parser.add_argument("--results", type=str, default="/data", help="Directory for data storage")
if __name__ == "__main__":
args = parser.parse_args()
tar_file = task[args.task] + ".tar"
file_path = os.path.join(args.results, tar_file)
call(f"aws s3 cp s3://msd-for-monai-eu/{tar_file} --no-sign-request {args.results}", shell=True)
call(f"tar -xf {file_path} -C {args.results}", shell=True)
call(f"rm -rf {file_path}", shell=True)
|
PyTorch/Translation/Transformer/examples | examples | .gitignore | */*
!*/*.sh
!*/*.md
|
TensorFlow2/Recommendation/DLRM_and_DCNv2/preproc | preproc | parquet_to_binary | # Copyright (c) 2021 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pandas as pd
import os
from joblib import Parallel, delayed
import glob
import argparse
import tqdm
import subprocess
def process_file(f, dst):
label = '_c0'
dense_columns = [f'_c{i}' for i in range(1, 14)]
categorical_columns = [f'_c{i}' for i in range(14, 40)]
all_columns_sorted = [f'_c{i}' for i in range(0, 40)]
data = pd.read_parquet(f)
data = data[all_columns_sorted]
data[label] = data[label].astype(np.int32)
data[dense_columns] = data[dense_columns].astype(np.float32)
data[categorical_columns] = data[categorical_columns].astype(np.int32)
data = data.to_records(index=False)
data = data.tobytes()
dst_file = dst + '/' + f.split('/')[-1] + '.bin'
with open(dst_file, 'wb') as dst_fd:
dst_fd.write(data)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--src_dir', type=str)
parser.add_argument('--intermediate_dir', type=str)
parser.add_argument('--dst_dir', type=str)
parser.add_argument('--parallel_jobs', default=40, type=int)
args = parser.parse_args()
print('Processing train files...')
train_src_files = glob.glob(args.src_dir + '/train/*.parquet')
train_intermediate_dir = os.path.join(args.intermediate_dir, 'train')
os.makedirs(train_intermediate_dir, exist_ok=True)
Parallel(n_jobs=args.parallel_jobs)(delayed(process_file)(f, train_intermediate_dir) for f in tqdm.tqdm(train_src_files))
print('Train files conversion done')
print('Processing test files...')
test_src_files = glob.glob(args.src_dir + '/test/*.parquet')
test_intermediate_dir = os.path.join(args.intermediate_dir, 'test')
os.makedirs(test_intermediate_dir, exist_ok=True)
Parallel(n_jobs=args.parallel_jobs)(delayed(process_file)(f, test_intermediate_dir) for f in tqdm.tqdm(test_src_files))
print('Test files conversion done')
print('Processing validation files...')
valid_src_files = glob.glob(args.src_dir + '/validation/*.parquet')
valid_intermediate_dir = os.path.join(args.intermediate_dir, 'validation')
os.makedirs(valid_intermediate_dir, exist_ok=True)
Parallel(n_jobs=args.parallel_jobs)(delayed(process_file)(f, valid_intermediate_dir) for f in tqdm.tqdm(valid_src_files))
print('Validation files conversion done')
os.makedirs(args.dst_dir, exist_ok=True)
print('Concatenating train files')
os.system(f'cat {train_intermediate_dir}/*.bin > {args.dst_dir}/train_data.bin')
print('Concatenating test files')
os.system(f'cat {test_intermediate_dir}/*.bin > {args.dst_dir}/test_data.bin')
print('Concatenating validation files')
os.system(f'cat {valid_intermediate_dir}/*.bin > {args.dst_dir}/validation_data.bin')
print('Done')
if __name__ == '__main__':
main()
|
PaddlePaddle/LanguageModeling/BERT/utils | utils | __init__ | # Copyright (c) 2022 NVIDIA Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
TensorFlow/Detection/SSD/models/research/slim/nets | nets | dcgan | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""DCGAN generator and discriminator from https://arxiv.org/abs/1511.06434."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from math import log
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
slim = tf.contrib.slim
def _validate_image_inputs(inputs):
inputs.get_shape().assert_has_rank(4)
inputs.get_shape()[1:3].assert_is_fully_defined()
if inputs.get_shape()[1] != inputs.get_shape()[2]:
raise ValueError('Input tensor does not have equal width and height: ',
inputs.get_shape()[1:3])
width = inputs.get_shape().as_list()[1]
if log(width, 2) != int(log(width, 2)):
raise ValueError('Input tensor `width` is not a power of 2: ', width)
# TODO(joelshor): Use fused batch norm by default. Investigate why some GAN
# setups need the gradient of gradient FusedBatchNormGrad.
def discriminator(inputs,
depth=64,
is_training=True,
reuse=None,
scope='Discriminator',
fused_batch_norm=False):
"""Discriminator network for DCGAN.
Construct discriminator network from inputs to the final endpoint.
Args:
inputs: A tensor of size [batch_size, height, width, channels]. Must be
floating point.
depth: Number of channels in first convolution layer.
is_training: Whether the network is for training or not.
reuse: Whether or not the network variables should be reused. `scope`
must be given to be reused.
scope: Optional variable_scope.
fused_batch_norm: If `True`, use a faster, fused implementation of
batch norm.
Returns:
logits: The pre-softmax activations, a tensor of size [batch_size, 1]
end_points: a dictionary from components of the network to their activation.
Raises:
ValueError: If the input image shape is not 4-dimensional, if the spatial
dimensions aren't defined at graph construction time, if the spatial
dimensions aren't square, or if the spatial dimensions aren't a power of
two.
"""
normalizer_fn = slim.batch_norm
normalizer_fn_args = {
'is_training': is_training,
'zero_debias_moving_mean': True,
'fused': fused_batch_norm,
}
_validate_image_inputs(inputs)
inp_shape = inputs.get_shape().as_list()[1]
end_points = {}
with tf.variable_scope(scope, values=[inputs], reuse=reuse) as scope:
with slim.arg_scope([normalizer_fn], **normalizer_fn_args):
with slim.arg_scope([slim.conv2d],
stride=2,
kernel_size=4,
activation_fn=tf.nn.leaky_relu):
net = inputs
for i in xrange(int(log(inp_shape, 2))):
scope = 'conv%i' % (i + 1)
current_depth = depth * 2**i
normalizer_fn_ = None if i == 0 else normalizer_fn
net = slim.conv2d(
net, current_depth, normalizer_fn=normalizer_fn_, scope=scope)
end_points[scope] = net
logits = slim.conv2d(net, 1, kernel_size=1, stride=1, padding='VALID',
normalizer_fn=None, activation_fn=None)
logits = tf.reshape(logits, [-1, 1])
end_points['logits'] = logits
return logits, end_points
# TODO(joelshor): Use fused batch norm by default. Investigate why some GAN
# setups need the gradient of gradient FusedBatchNormGrad.
def generator(inputs,
depth=64,
final_size=32,
num_outputs=3,
is_training=True,
reuse=None,
scope='Generator',
fused_batch_norm=False):
"""Generator network for DCGAN.
Construct generator network from inputs to the final endpoint.
Args:
inputs: A tensor with any size N. [batch_size, N]
depth: Number of channels in last deconvolution layer.
final_size: The shape of the final output.
num_outputs: Number of output features. For images, this is the number of
channels.
is_training: whether is training or not.
reuse: Whether or not the network has its variables should be reused. scope
must be given to be reused.
scope: Optional variable_scope.
fused_batch_norm: If `True`, use a faster, fused implementation of
batch norm.
Returns:
logits: the pre-softmax activations, a tensor of size
[batch_size, 32, 32, channels]
end_points: a dictionary from components of the network to their activation.
Raises:
ValueError: If `inputs` is not 2-dimensional.
ValueError: If `final_size` isn't a power of 2 or is less than 8.
"""
normalizer_fn = slim.batch_norm
normalizer_fn_args = {
'is_training': is_training,
'zero_debias_moving_mean': True,
'fused': fused_batch_norm,
}
inputs.get_shape().assert_has_rank(2)
if log(final_size, 2) != int(log(final_size, 2)):
raise ValueError('`final_size` (%i) must be a power of 2.' % final_size)
if final_size < 8:
raise ValueError('`final_size` (%i) must be greater than 8.' % final_size)
end_points = {}
num_layers = int(log(final_size, 2)) - 1
with tf.variable_scope(scope, values=[inputs], reuse=reuse) as scope:
with slim.arg_scope([normalizer_fn], **normalizer_fn_args):
with slim.arg_scope([slim.conv2d_transpose],
normalizer_fn=normalizer_fn,
stride=2,
kernel_size=4):
net = tf.expand_dims(tf.expand_dims(inputs, 1), 1)
# First upscaling is different because it takes the input vector.
current_depth = depth * 2 ** (num_layers - 1)
scope = 'deconv1'
net = slim.conv2d_transpose(
net, current_depth, stride=1, padding='VALID', scope=scope)
end_points[scope] = net
for i in xrange(2, num_layers):
scope = 'deconv%i' % (i)
current_depth = depth * 2 ** (num_layers - i)
net = slim.conv2d_transpose(net, current_depth, scope=scope)
end_points[scope] = net
# Last layer has different normalizer and activation.
scope = 'deconv%i' % (num_layers)
net = slim.conv2d_transpose(
net, depth, normalizer_fn=None, activation_fn=None, scope=scope)
end_points[scope] = net
# Convert to proper channels.
scope = 'logits'
logits = slim.conv2d(
net,
num_outputs,
normalizer_fn=None,
activation_fn=None,
kernel_size=1,
stride=1,
padding='VALID',
scope=scope)
end_points[scope] = logits
logits.get_shape().assert_has_rank(4)
logits.get_shape().assert_is_compatible_with(
[None, final_size, final_size, num_outputs])
return logits, end_points
|
PyTorch/LanguageModeling/BERT/data | data | TextSharding | # Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import defaultdict
from itertools import islice
import multiprocessing
import statistics
class Sharding:
def __init__(self, input_files, output_name_prefix, n_training_shards, n_test_shards, fraction_test_set):
assert len(input_files) > 0, 'The input file list must contain at least one file.'
assert n_training_shards > 0, 'There must be at least one output shard.'
assert n_test_shards > 0, 'There must be at least one output shard.'
self.n_training_shards = n_training_shards
self.n_test_shards = n_test_shards
self.fraction_test_set = fraction_test_set
self.input_files = input_files
self.output_name_prefix = output_name_prefix
self.output_training_identifier = '_training'
self.output_test_identifier = '_test'
self.output_file_extension = '.txt'
self.articles = {} # key: integer identifier, value: list of articles
self.sentences = {} # key: integer identifier, value: list of sentences
self.output_training_files = {} # key: filename, value: list of articles to go into file
self.output_test_files = {} # key: filename, value: list of articles to go into file
self.init_output_files()
# Remember, the input files contain one article per line (the whitespace check is to skip extraneous blank lines)
def load_articles(self):
print('Start: Loading Articles')
global_article_count = 0
for input_file in self.input_files:
print('input file:', input_file)
with open(input_file, mode='r', newline='\n') as f:
for i, line in enumerate(f):
if line.strip():
self.articles[global_article_count] = line.rstrip()
global_article_count += 1
print('End: Loading Articles: There are', len(self.articles), 'articles.')
def segment_articles_into_sentences(self, segmenter):
print('Start: Sentence Segmentation')
if len(self.articles) is 0:
self.load_articles()
assert len(self.articles) is not 0, 'Please check that input files are present and contain data.'
# TODO: WIP: multiprocessing (create independent ranges and spawn processes)
use_multiprocessing = 'serial'
def chunks(data, size=len(self.articles)):
it = iter(data)
for i in range(0, len(data), size):
yield {k: data[k] for k in islice(it, size)}
if use_multiprocessing == 'manager':
manager = multiprocessing.Manager()
return_dict = manager.dict()
jobs = []
n_processes = 7 # in addition to the main process, total = n_proc+1
def work(articles, return_dict):
sentences = {}
for i, article in enumerate(articles):
sentences[i] = segmenter.segment_string(articles[article])
if i % 5000 == 0:
print('Segmenting article', i)
return_dict.update(sentences)
for item in chunks(self.articles, len(self.articles)):
p = multiprocessing.Process(target=work, args=(item, return_dict))
# Busy wait
while len(jobs) >= n_processes:
pass
jobs.append(p)
p.start()
for proc in jobs:
proc.join()
elif use_multiprocessing == 'queue':
work_queue = multiprocessing.Queue()
jobs = []
for item in chunks(self.articles, len(self.articles)):
pass
else: # serial option
for i, article in enumerate(self.articles):
self.sentences[i] = segmenter.segment_string(self.articles[article])
if i % 5000 == 0:
print('Segmenting article', i)
print('End: Sentence Segmentation')
def init_output_files(self):
print('Start: Init Output Files')
assert len(self.output_training_files) is 0, 'Internal storage self.output_files already contains data. This function is intended to be used by the constructor only.'
assert len(self.output_test_files) is 0, 'Internal storage self.output_files already contains data. This function is intended to be used by the constructor only.'
for i in range(self.n_training_shards):
name = self.output_name_prefix + self.output_training_identifier + '_' + str(i) + self.output_file_extension
self.output_training_files[name] = []
for i in range(self.n_test_shards):
name = self.output_name_prefix + self.output_test_identifier + '_' + str(i) + self.output_file_extension
self.output_test_files[name] = []
print('End: Init Output Files')
def get_sentences_per_shard(self, shard):
result = 0
for article_id in shard:
result += len(self.sentences[article_id])
return result
def distribute_articles_over_shards(self):
print('Start: Distribute Articles Over Shards')
assert len(self.articles) >= self.n_training_shards + self.n_test_shards, 'There are fewer articles than shards. Please add more data or reduce the number of shards requested.'
# Create dictionary with - key: sentence count per article, value: article id number
sentence_counts = defaultdict(lambda: [])
max_sentences = 0
total_sentences = 0
for article_id in self.sentences:
current_length = len(self.sentences[article_id])
sentence_counts[current_length].append(article_id)
max_sentences = max(max_sentences, current_length)
total_sentences += current_length
n_sentences_assigned_to_training = int((1 - self.fraction_test_set) * total_sentences)
nominal_sentences_per_training_shard = n_sentences_assigned_to_training // self.n_training_shards
nominal_sentences_per_test_shard = (total_sentences - n_sentences_assigned_to_training) // self.n_test_shards
consumed_article_set = set({})
unused_article_set = set(self.articles.keys())
# Make first pass and add one article worth of lines per file
for file in self.output_training_files:
current_article_id = sentence_counts[max_sentences][-1]
sentence_counts[max_sentences].pop(-1)
self.output_training_files[file].append(current_article_id)
consumed_article_set.add(current_article_id)
unused_article_set.remove(current_article_id)
# Maintain the max sentence count
while len(sentence_counts[max_sentences]) == 0 and max_sentences > 0:
max_sentences -= 1
if len(self.sentences[current_article_id]) > nominal_sentences_per_training_shard:
nominal_sentences_per_training_shard = len(self.sentences[current_article_id])
print('Warning: A single article contains more than the nominal number of sentences per training shard.')
for file in self.output_test_files:
current_article_id = sentence_counts[max_sentences][-1]
sentence_counts[max_sentences].pop(-1)
self.output_test_files[file].append(current_article_id)
consumed_article_set.add(current_article_id)
unused_article_set.remove(current_article_id)
# Maintain the max sentence count
while len(sentence_counts[max_sentences]) == 0 and max_sentences > 0:
max_sentences -= 1
if len(self.sentences[current_article_id]) > nominal_sentences_per_test_shard:
nominal_sentences_per_test_shard = len(self.sentences[current_article_id])
print('Warning: A single article contains more than the nominal number of sentences per test shard.')
training_counts = []
test_counts = []
for shard in self.output_training_files:
training_counts.append(self.get_sentences_per_shard(self.output_training_files[shard]))
for shard in self.output_test_files:
test_counts.append(self.get_sentences_per_shard(self.output_test_files[shard]))
training_median = statistics.median(training_counts)
test_median = statistics.median(test_counts)
# Make subsequent passes over files to find articles to add without going over limit
history_remaining = []
n_history_remaining = 4
while len(consumed_article_set) < len(self.articles):
for fidx, file in enumerate(self.output_training_files):
nominal_next_article_size = min(nominal_sentences_per_training_shard - training_counts[fidx], max_sentences)
# Maintain the max sentence count
while len(sentence_counts[max_sentences]) == 0 and max_sentences > 0:
max_sentences -= 1
while len(sentence_counts[nominal_next_article_size]) == 0 and nominal_next_article_size > 0:
nominal_next_article_size -= 1
if nominal_next_article_size not in sentence_counts or nominal_next_article_size is 0 or training_counts[fidx] > training_median:
continue # skip adding to this file, will come back later if no file can accept unused articles
current_article_id = sentence_counts[nominal_next_article_size][-1]
sentence_counts[nominal_next_article_size].pop(-1)
self.output_training_files[file].append(current_article_id)
consumed_article_set.add(current_article_id)
unused_article_set.remove(current_article_id)
for fidx, file in enumerate(self.output_test_files):
nominal_next_article_size = min(nominal_sentences_per_test_shard - test_counts[fidx], max_sentences)
# Maintain the max sentence count
while len(sentence_counts[max_sentences]) == 0 and max_sentences > 0:
max_sentences -= 1
while len(sentence_counts[nominal_next_article_size]) == 0 and nominal_next_article_size > 0:
nominal_next_article_size -= 1
if nominal_next_article_size not in sentence_counts or nominal_next_article_size is 0 or test_counts[fidx] > test_median:
continue # skip adding to this file, will come back later if no file can accept unused articles
current_article_id = sentence_counts[nominal_next_article_size][-1]
sentence_counts[nominal_next_article_size].pop(-1)
self.output_test_files[file].append(current_article_id)
consumed_article_set.add(current_article_id)
unused_article_set.remove(current_article_id)
# If unable to place articles a few times, bump up nominal sizes by fraction until articles get placed
if len(history_remaining) == n_history_remaining:
history_remaining.pop(0)
history_remaining.append(len(unused_article_set))
history_same = True
for i in range(1, len(history_remaining)):
history_same = history_same and (history_remaining[i-1] == history_remaining[i])
if history_same:
nominal_sentences_per_training_shard += 1
# nominal_sentences_per_test_shard += 1
training_counts = []
test_counts = []
for shard in self.output_training_files:
training_counts.append(self.get_sentences_per_shard(self.output_training_files[shard]))
for shard in self.output_test_files:
test_counts.append(self.get_sentences_per_shard(self.output_test_files[shard]))
training_median = statistics.median(training_counts)
test_median = statistics.median(test_counts)
print('Distributing data over shards:', len(unused_article_set), 'articles remaining.')
if len(unused_article_set) != 0:
print('Warning: Some articles did not make it into output files.')
for shard in self.output_training_files:
print('Training shard:', self.get_sentences_per_shard(self.output_training_files[shard]))
for shard in self.output_test_files:
print('Test shard:', self.get_sentences_per_shard(self.output_test_files[shard]))
print('End: Distribute Articles Over Shards')
def write_shards_to_disk(self):
print('Start: Write Shards to Disk')
for shard in self.output_training_files:
self.write_single_shard(shard, self.output_training_files[shard])
for shard in self.output_test_files:
self.write_single_shard(shard, self.output_test_files[shard])
print('End: Write Shards to Disk')
def write_single_shard(self, shard_name, shard):
with open(shard_name, mode='w', newline='\n') as f:
for article_id in shard:
for line in self.sentences[article_id]:
f.write(line + '\n')
f.write('\n') # Line break between articles
import nltk
nltk.download('punkt')
class NLTKSegmenter:
def __init(self):
pass
def segment_string(self, article):
return nltk.tokenize.sent_tokenize(article)
|
TensorFlow2/LanguageModeling/BERT/official/modeling | modeling | model_training_utils | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A light weight utilities to train NLP models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
import time
from absl import logging
import tensorflow as tf
from horovod.tensorflow.compression import Compression
from dllogger import Verbosity
from optimization import GradientAccumulator
from official.utils.misc import distribution_utils
from official.utils.misc import tpu_lib
_SUMMARY_TXT = 'training_summary.txt'
_MIN_SUMMARY_STEPS = 10
def _save_checkpoint(checkpoint, model_dir, checkpoint_prefix):
"""Saves model to with provided checkpoint prefix."""
checkpoint_path = os.path.join(model_dir, checkpoint_prefix)
saved_path = checkpoint.save(checkpoint_path)
logging.info('Saving model as TF checkpoint: %s', saved_path)
return
def _get_input_iterator(input_fn, strategy):
"""Returns distributed dataset iterator."""
# When training with TPU pods, datasets needs to be cloned across
# workers. Since Dataset instance cannot be cloned in eager mode, we instead
# pass callable that returns a dataset.
if not callable(input_fn):
raise ValueError('`input_fn` should be a closure that returns a dataset.')
if strategy is None:
input_data = input_fn()
iterator = iter(input_data)
else:
iterator = iter(
strategy.experimental_distribute_datasets_from_function(input_fn))
return iterator
def _float_metric_value(metric):
"""Gets the value of a float-value keras metric."""
return metric.result().numpy().astype(float)
def steps_to_run(current_step, steps_per_epoch, steps_per_loop):
"""Calculates steps to run on device."""
if steps_per_loop <= 0:
raise ValueError('steps_per_loop should be positive integer.')
if steps_per_loop == 1:
return steps_per_loop
remainder_in_epoch = current_step % steps_per_epoch
if remainder_in_epoch != 0:
return min(steps_per_epoch - remainder_in_epoch, steps_per_loop)
else:
return steps_per_loop
def write_txt_summary(training_summary, summary_dir):
"""Writes a summary text file to record stats."""
summary_path = os.path.join(summary_dir, _SUMMARY_TXT)
with tf.io.gfile.GFile(summary_path, 'wb') as f:
logging.info('Training Summary: \n%s', str(training_summary))
f.write(json.dumps(training_summary, indent=4))
def run_customized_training_loop(
# pylint: disable=invalid-name
_sentinel=None,
# pylint: enable=invalid-name
strategy=None,
model_fn=None,
loss_fn=None,
model_dir=None,
train_input_fn=None,
steps_per_epoch=None,
num_accumulative_step=1,
steps_per_loop=1,
epochs=1,
eval_input_fn=None,
eval_steps=None,
metric_fn=None,
init_checkpoint=None,
custom_callbacks=None,
run_eagerly=False,
hvd=None,
sub_model_export_name=None,
params=None):
"""Run BERT pretrain model training using low-level API.
Arguments:
_sentinel: Used to prevent positional parameters. Internal, do not use.
strategy: Distribution strategy on which to run low level training loop.
model_fn: Function that returns a tuple (model, sub_model). Caller of this
function should add optimizer to the `model` via calling
`model.compile()` API or manually setting `model.optimizer` attribute.
Second element of the returned tuple(sub_model) is an optional sub model
to be used for initial checkpoint -- if provided.
loss_fn: Function with signature func(labels, logits) and returns a loss
tensor.
model_dir: Model directory used during training for restoring/saving model
weights.
train_input_fn: Function that returns a tf.data.Dataset used for training.
steps_per_epoch: Number of steps to run per epoch. At the end of each
epoch, model checkpoint will be saved and evaluation will be conducted
if evaluation dataset is provided.
steps_per_loop: Number of steps per graph-mode loop. In order to reduce
communication in eager context, training logs are printed every
steps_per_loop.
epochs: Number of epochs to train.
eval_input_fn: Function that returns evaluation dataset. If none,
evaluation is skipped.
eval_steps: Number of steps to run evaluation. Required if `eval_input_fn`
is not none.
metric_fn: A metrics function that returns a Keras Metric object to record
evaluation result using evaluation dataset or with training dataset
after every epoch.
init_checkpoint: Optional checkpoint to load to `sub_model` returned by
`model_fn`.
custom_callbacks: A list of Keras Callbacks objects to run during
training. More specifically, `on_batch_begin()`, `on_batch_end()`,
methods are invoked during training.
run_eagerly: Whether to run model training in pure eager execution. This
should be disable for TPUStrategy.
sub_model_export_name: If not None, will export `sub_model` returned by
`model_fn` into checkpoint files. The name of intermediate checkpoint
file is {sub_model_export_name}_step_{step}.ckpt and the last
checkpint's name is {sub_model_export_name}.ckpt;
if None, `sub_model` will not be exported as checkpoint.
Returns:
Trained model.
Raises:
ValueError: (1) When model returned by `model_fn` does not have optimizer
attribute or when required parameters are set to none. (2) eval args are
not specified correctly. (3) metric_fn must be a callable if specified.
(4) sub_model_checkpoint_name is specified, but `sub_model` returned
by `model_fn` is None.
"""
if _sentinel is not None:
raise ValueError('only call `run_customized_training_loop()` '
'with named arguments.')
required_arguments = [
model_fn, loss_fn, model_dir, steps_per_epoch, train_input_fn
]
if [arg for arg in required_arguments if arg is None]:
raise ValueError('`model_fn`, `loss_fn`, `model_dir`, '
'`steps_per_loop` and `steps_per_epoch` are required '
'parameters.')
if steps_per_loop > steps_per_epoch:
logging.error(
'steps_per_loop: %d is specified to be greater than '
' steps_per_epoch: %d, we will use steps_per_epoch as'
' steps_per_loop.', steps_per_loop, steps_per_epoch)
steps_per_loop = steps_per_epoch
assert tf.executing_eagerly()
if run_eagerly:
if steps_per_loop > 1:
raise ValueError(
'steps_per_loop is used for performance optimization. When you want '
'to run eagerly, you cannot leverage graph mode loop.')
if isinstance(strategy, tf.distribute.experimental.TPUStrategy):
raise ValueError(
'TPUStrategy should not run eagerly as it heavily replies on graph'
' optimization for the distributed system.')
if eval_input_fn and (eval_steps is None or metric_fn is None):
raise ValueError(
'`eval_step` and `metric_fn` are required when `eval_input_fn ` '
'is not none.')
if metric_fn and not callable(metric_fn):
raise ValueError(
'if `metric_fn` is specified, metric_fn must be a callable.')
total_training_steps = steps_per_epoch * epochs
# To reduce unnecessary send/receive input pipeline operation, we place input
# pipeline ops in worker task.
train_iterator = _get_input_iterator(train_input_fn, strategy)
with distribution_utils.get_strategy_scope(strategy):
# To correctly place the model weights on accelerators,
# model and optimizer should be created in scope.
model, sub_model = model_fn()
first_batch = True
if not hasattr(model, 'optimizer'):
raise ValueError('User should set optimizer attribute to model '
'inside `model_fn`.')
if sub_model_export_name and sub_model is None:
raise ValueError('sub_model_export_name is specified as %s, but '
'sub_model is None.' % sub_model_export_name)
optimizer = model.optimizer
use_float16 = isinstance(
optimizer, tf.keras.mixed_precision.LossScaleOptimizer)
if init_checkpoint:
logging.info(
'Checkpoint file %s found and restoring from '
'initial checkpoint for core model.', init_checkpoint)
checkpoint = tf.train.Checkpoint(model=sub_model)
checkpoint.restore(init_checkpoint).assert_existing_objects_matched()
logging.info('Loading from checkpoint file completed')
train_loss_metric = tf.keras.metrics.Mean(
'training_loss', dtype=tf.float32)
eval_metrics = [metric_fn()] if metric_fn else []
# If evaluation is required, make a copy of metric as it will be used by
# both train and evaluation.
train_metrics = [
metric.__class__.from_config(metric.get_config())
for metric in eval_metrics
]
# Create summary writers
if not hvd or hvd.rank() == 0:
summary_dir = os.path.join(model_dir, 'summaries')
eval_summary_writer = tf.summary.create_file_writer(
os.path.join(summary_dir, 'eval'))
if steps_per_loop >= _MIN_SUMMARY_STEPS:
# Only writes summary when the stats are collected sufficiently over
# enough steps.
train_summary_writer = tf.summary.create_file_writer(
os.path.join(summary_dir, 'train'))
else:
train_summary_writer = None
else:
eval_summary_writer = None
train_summary_writer = None
eval_input_fn = None
# Collects training variables.
training_vars = model.trainable_variables
accum_gradients = GradientAccumulator()
def _replicated_step(inputs, first_batch=False):
"""Replicated training step."""
inputs, labels = inputs
with tf.GradientTape() as tape:
model_outputs = model(inputs, training=True)
loss = loss_fn(labels, model_outputs)
if use_float16:
scaled_loss = optimizer.get_scaled_loss(loss)
if hvd:
tape = hvd.DistributedGradientTape(tape, sparse_as_dense=True, compression=Compression.fp16 if use_float16 else Compression.none)
if use_float16:
scaled_grads = tape.gradient(scaled_loss, training_vars)
grads = optimizer.get_unscaled_gradients(scaled_grads)
else:
grads = tape.gradient(loss, training_vars)
(grads, _) = tf.clip_by_global_norm(grads, clip_norm=1.0)
optimizer.apply_gradients(zip(grads, training_vars))
if hvd and first_batch:
hvd.broadcast_variables(model.variables, 0)
hvd.broadcast_variables(optimizer.variables(), 0)
# For reporting, the metric takes the mean of losses.
train_loss_metric.update_state(loss)
for metric in train_metrics:
metric.update_state(labels, model_outputs)
def forward(inputs):
inputs, labels = inputs
with tf.GradientTape() as tape:
model_outputs = model(inputs, training=True)
loss = loss_fn(labels, model_outputs)
if use_float16:
scaled_loss = optimizer.get_scaled_loss(loss)
if use_float16:
scaled_grads = tape.gradient(scaled_loss, training_vars)
grads = optimizer.get_unscaled_gradients(scaled_grads)
else:
grads = tape.gradient(loss, training_vars)
# For reporting, the metric takes the mean of losses.
train_loss_metric.update_state(loss)
for metric in train_metrics:
metric.update_state(labels, model_outputs)
accum_gradients.add_gradients(grads)
def step(num_grad_accumulates):
gradients = accum_gradients.gradients
if hvd:
gradients = [None if g is None else hvd.allreduce(g / tf.cast(num_grad_accumulates, g.dtype), compression=Compression.fp16 if use_float16 else Compression.none) for g in gradients]
else:
gradients = [None if g is None else g / tf.cast(num_grad_accumulates, g.dtype) for g in gradients]
(gradients, _) = tf.clip_by_global_norm(gradients, clip_norm=1.0)
optimizer.apply_gradients(zip(gradients, training_vars))
accum_gradients.reset()
@tf.function
def train_steps_strategy(iterator, steps, num_grad_accumulates):
"""Performs distributed training steps in a loop.
Args:
iterator: the distributed iterator of training datasets.
steps: an tf.int32 integer tensor to specify number of steps to run
inside host training loop.
Raises:
ValueError: Any of the arguments or tensor shapes are invalid.
"""
if not isinstance(steps, tf.Tensor):
raise ValueError('steps should be an Tensor. Python object may cause '
'retracing.')
if num_grad_accumulates != 1:
for _ in tf.range(steps*num_grad_accumulates):
strategy.experimental_run_v2(forward, args=(next(iterator),))
if _ == 0 or (_ + 1) % num_grad_accumulates == 0:
strategy.experimental_run_v2(step, args=(num_grad_accumulates,))
else:
for _ in tf.range(steps):
strategy.experimental_run_v2(_replicated_step, args=(next(iterator),))
@tf.function
def train_steps(iterator, steps, num_grad_accumulates, first_batch):
if not isinstance(steps, tf.Tensor):
raise ValueError('steps should be an Tensor. Python object may cause '
'retracing.')
if num_grad_accumulates != 1:
for _ in tf.range(steps*num_grad_accumulates):
forward(next(iterator))
if _ == 0 or (_ + 1) % num_grad_accumulates == 0:
step(num_grad_accumulates)
if hvd and _ == 0 and first_batch:
hvd.broadcast_variables(model.variables, 0)
hvd.broadcast_variables(optimizer.variables(), 0)
else:
for _ in tf.range(steps):
_replicated_step(next(iterator), (first_batch and _ == 0))
def train_single_step_strategy(iterator, num_grad_accumulates):
"""Performs a distributed training step.
Args:
iterator: the distributed iterator of training datasets.
Raises:
ValueError: Any of the arguments or tensor shapes are invalid.
"""
if num_grad_accumulates != 1:
for _ in tf.range(num_grad_accumulates):
strategy.experimental_run_v2(forward, args=(next(iterator),))
if _ == 0 or (_ + 1) % num_grad_accumulates == 0:
strategy.experimental_run_v2(step, args=(num_grad_accumulates,))
else:
strategy.experimental_run_v2(_replicated_step, args=(next(iterator),))
def train_single_step(iterator, num_grad_accumulates, first_batch):
"""Performs a distributed training step.
Args:
iterator: the distributed iterator of training datasets.
Raises:
ValueError: Any of the arguments or tensor shapes are invalid.
"""
if num_grad_accumulates != 1:
for _ in tf.range(num_grad_accumulates):
forward(next(iterator))
if _ == 0 or (_ + 1) % num_grad_accumulates == 0:
step(num_grad_accumulates)
if hvd and _ == 0 and first_batch:
hvd.broadcast_variables(model.variables, 0)
hvd.broadcast_variables(optimizer.variables(), 0)
else:
_replicated_step(next(iterator), first_batch)
def test_step(iterator):
"""Calculates evaluation metrics on distributed devices."""
def _test_step_fn(inputs):
"""Replicated accuracy calculation."""
inputs, labels = inputs
model_outputs = model(inputs, training=False)
for metric in eval_metrics:
metric.update_state(labels, model_outputs)
if strategy:
strategy.experimental_run_v2(_test_step_fn, args=(next(iterator),))
else:
_test_step_fn(next(iterator))
if not run_eagerly:
train_single_step = tf.function(train_single_step)
test_step = tf.function(test_step)
def _run_evaluation(current_training_step, test_iterator):
"""Runs validation steps and aggregate metrics."""
for _ in range(eval_steps):
test_step(test_iterator)
with eval_summary_writer.as_default():
for metric in eval_metrics + model.metrics:
metric_value = _float_metric_value(metric)
logging.info('Step: [%d] Validation %s = %f', current_training_step,
metric.name, metric_value)
tf.summary.scalar(
metric.name, metric_value, step=current_training_step)
eval_summary_writer.flush()
def _run_callbacks_on_batch_begin(batch):
"""Runs custom callbacks at the start of every step."""
if not custom_callbacks:
return
for callback in custom_callbacks:
callback.on_batch_begin(batch)
def _run_callbacks_on_batch_end(batch):
"""Runs custom callbacks at the end of every step."""
if not custom_callbacks:
return
for callback in custom_callbacks:
callback.on_batch_end(batch)
# Training loop starts here.
checkpoint = tf.train.Checkpoint(model=model, optimizer=optimizer)
sub_model_checkpoint = tf.train.Checkpoint(
model=sub_model) if sub_model_export_name else None
latest_checkpoint_file = tf.train.latest_checkpoint(model_dir)
if latest_checkpoint_file:
logging.info(
'Checkpoint file %s found and restoring from '
'checkpoint', latest_checkpoint_file)
checkpoint.restore(latest_checkpoint_file)
logging.info('Loading from checkpoint file completed')
current_step = optimizer.iterations.numpy()
checkpoint_name = 'ctl_step_{step}.ckpt'
manager = tf.train.CheckpointManager(checkpoint, model_dir, max_to_keep=3)
FLAGS = params['FLAGS']
steps_from_save = 0
start_time = time.time()
total_wo = 0
total_steps_wo = 0
perf_wo = 0
perf_wo_n = 0
first_steps = current_step
total_running_steps = total_training_steps - first_steps
global_batch_size = FLAGS.train_batch_size * num_accumulative_step
if hvd:
global_batch_size *= hvd.size()
while current_step < total_training_steps:
# Training loss/metric are taking average over steps inside micro
# training loop. We reset the their values before each round.
t0 = time.time()
train_loss_metric.reset_states()
for metric in train_metrics + model.metrics:
metric.reset_states()
_run_callbacks_on_batch_begin(current_step)
# Runs several steps in the host while loop.
steps = steps_to_run(current_step, steps_per_epoch, steps_per_loop)
t0_wo = time.time()
if steps == 1:
# TODO(zongweiz): merge with train_steps once tf.while_loop
# GPU performance bugs are fixed.
if strategy:
train_single_step_strategy(train_iterator, num_accumulative_step)
else:
train_single_step(train_iterator, num_accumulative_step, first_batch)
else:
# Converts steps to a Tensor to avoid tf.function retracing.
if strategy:
train_steps_strategy(train_iterator,
tf.convert_to_tensor(steps, dtype=tf.int32), num_accumulative_step)
else:
train_steps(train_iterator,
tf.convert_to_tensor(steps, dtype=tf.int32), num_accumulative_step, first_batch)
elapse_wo = time.time() - t0_wo
first_batch = False
_run_callbacks_on_batch_end(current_step)
current_step += steps
train_loss = _float_metric_value(train_loss_metric)
elapse_time = time.time() - t0
# Updates training logging.
training_status = 'Train Step: %d/%d / loss = %s / time = %.3f sec' % (
current_step, total_training_steps, train_loss, elapse_time)
steps_from_save += steps
if (not hvd or hvd.rank() == 0) and steps_from_save >= FLAGS.save_checkpoint_steps:
save_path = manager.save()
logging.info('Saved checkpoint to {}'.format(save_path))
steps_from_save = 0
if train_summary_writer:
with train_summary_writer.as_default():
tf.summary.scalar(
train_loss_metric.name, train_loss, step=current_step)
for metric in train_metrics + model.metrics:
metric_value = _float_metric_value(metric)
training_status += ' %s = %f' % (metric.name, metric_value)
tf.summary.scalar(metric.name, metric_value, step=current_step)
train_summary_writer.flush()
if not hvd or hvd.rank() == 0:
if use_float16:
logging.info('Step: %d Lr %g Loss scale %g' % (current_step, optimizer._optimizer._decayed_lr('float32'), optimizer.loss_scale))
logging.info(training_status)
logging.info('Perf %.2f' % (steps * global_batch_size / elapse_wo))
if current_step > first_steps + steps * 2:
total_wo += elapse_wo
total_steps_wo += steps
perf_wo += steps * global_batch_size / elapse_wo
perf_wo_n += 1
# Saves model checkpoints and run validation steps at every epoch end.
if current_step % steps_per_epoch == 0:
# To avoid repeated model saving, we do not save after the last
# step of training.
if current_step < total_training_steps and (not hvd or hvd.rank() == 0):
manager.save()
if sub_model_export_name:
_save_checkpoint(
sub_model_checkpoint, model_dir,
'%s_step_%d.ckpt' % (sub_model_export_name, current_step))
if eval_input_fn:
logging.info('Running evaluation after step: %s.', current_step)
_run_evaluation(current_step,
_get_input_iterator(eval_input_fn, strategy))
# Re-initialize evaluation metric.
for metric in eval_metrics + model.metrics:
metric.reset_states()
total_time = time.time() - start_time
if not hvd or hvd.rank() == 0:
_save_checkpoint(checkpoint, model_dir,
checkpoint_name.format(step=current_step))
if sub_model_export_name:
_save_checkpoint(sub_model_checkpoint, model_dir,
'%s.ckpt' % sub_model_export_name)
if eval_input_fn:
logging.info('Running final evaluation after training is complete.')
_run_evaluation(current_step,
_get_input_iterator(eval_input_fn, strategy))
training_summary = {
'total_training_steps': total_training_steps,
'train_loss': _float_metric_value(train_loss_metric),
}
if eval_metrics:
# TODO(hongkuny): Cleans up summary reporting in text.
training_summary['last_train_metrics'] = _float_metric_value(
train_metrics[0])
training_summary['eval_metrics'] = _float_metric_value(eval_metrics[0])
write_txt_summary(training_summary, summary_dir)
dllogging = params['dllogging'] if 'dllogging' in params else None
total_sentences = total_running_steps * global_batch_size
total_sentences_wo = total_steps_wo * global_batch_size
logging.info("-----------------------------")
logging.info(" Batch size = %d", FLAGS.train_batch_size)
logging.info(" Num steps = %d", total_training_steps)
logging.info(" LR = %g", FLAGS.learning_rate)
if hvd:
logging.info("Multi-GPU training with TF Horovod")
logging.info("hvd.size() = %d", hvd.size())
logging.info("Total Training Time = %0.2f for Sequences = %d", total_time, total_sentences)
if total_time != 0:
logging.info("Throughput Average (sequences/sec) with overhead = %0.2f", total_sentences/total_time)
if perf_wo_n != 0:
logging.info("Throughput Average (sequences/sec) = %0.2f", perf_wo/perf_wo_n)
logging.info("-----------------------------")
if dllogging and perf_wo_n != 0:
dllogging.logger.log(step=(), data={"throughput_train": perf_wo/perf_wo_n}, verbosity=Verbosity.DEFAULT)
dllogging.logger.log(step=(), data={"total_loss": training_summary['train_loss']}, verbosity=Verbosity.DEFAULT)
return model
|
PyTorch/Translation/Transformer/fairseq/modules | modules | beamable_mm | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import torch
import torch.nn as nn
class BeamableMM(nn.Module):
"""This module provides an optimized MM for beam decoding with attention.
It leverage the fact that the source-side of the input is replicated beam
times and the target-side of the input is of width one. This layer speeds up
inference by replacing the inputs {(bsz x 1 x nhu), (bsz x sz2 x nhu)}
with smaller inputs {(bsz/beam x beam x nhu), (bsz/beam x sz2 x nhu)}.
"""
def __init__(self, beam_size=None):
super(BeamableMM, self).__init__()
self.beam_size = beam_size
def forward(self, input1, input2):
if (
not self.training and # test mode
self.beam_size is not None and # beam size is set
input1.dim() == 3 and # only support batched input
input1.size(1) == 1 # single time step update
):
bsz, beam = input1.size(0), self.beam_size
# bsz x 1 x nhu --> bsz/beam x beam x nhu
input1 = input1[:, 0, :].unfold(0, beam, beam).transpose(2, 1)
# bsz x sz2 x nhu --> bsz/beam x sz2 x nhu
input2 = input2.unfold(0, beam, beam)[:, :, :, 0]
# use non batched operation if bsz = beam
if input1.size(0) == 1:
output = torch.mm(input1[0, :, :], input2[0, :, :])
else:
output = input1.bmm(input2)
return output.view(bsz, 1, -1)
else:
return input1.bmm(input2)
def set_beam_size(self, beam_size):
self.beam_size = beam_size
|
PyTorch/Detection/SSD/ssd | ssd | data | # Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import torch
from torch.utils.data import DataLoader
from ssd.utils import dboxes300_coco, COCODetection
from ssd.utils import SSDTransformer
from pycocotools.coco import COCO
#DALI import
from ssd.coco_pipeline import COCOPipeline, DALICOCOIterator
def get_train_loader(args, local_seed):
train_annotate = os.path.join(args.data, "annotations/instances_train2017.json")
train_coco_root = os.path.join(args.data, "train2017")
train_pipe = COCOPipeline(batch_size=args.batch_size,
file_root=train_coco_root,
annotations_file=train_annotate,
default_boxes=dboxes300_coco(),
device_id=args.local_rank,
num_shards=args.N_gpu,
output_fp16=args.amp,
output_nhwc=False,
pad_output=False,
num_threads=args.num_workers, seed=local_seed)
train_pipe.build()
test_run = train_pipe.schedule_run(), train_pipe.share_outputs(), train_pipe.release_outputs()
train_loader = DALICOCOIterator(train_pipe, 118287 / args.N_gpu)
return train_loader
def get_val_dataset(args):
dboxes = dboxes300_coco()
val_trans = SSDTransformer(dboxes, (300, 300), val=True)
val_annotate = os.path.join(args.data, "annotations/instances_val2017.json")
val_coco_root = os.path.join(args.data, "val2017")
val_coco = COCODetection(val_coco_root, val_annotate, val_trans)
return val_coco
def get_val_dataloader(dataset, args):
if args.distributed:
val_sampler = torch.utils.data.distributed.DistributedSampler(dataset)
else:
val_sampler = None
val_dataloader = DataLoader(dataset,
batch_size=args.eval_batch_size,
shuffle=False, # Note: distributed sampler is shuffled :(
sampler=val_sampler,
num_workers=args.num_workers)
return val_dataloader
def get_coco_ground_truth(args):
val_annotate = os.path.join(args.data, "annotations/instances_val2017.json")
cocoGt = COCO(annotation_file=val_annotate, use_ext=True)
return cocoGt
|
TensorFlow2/Classification/ConvNets/efficientnet_v2/S/training/AMP | AMP | convergence_8xV100-32G | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
horovodrun -np 8 bash ./scripts/bind.sh --cpu=exclusive --ib=single -- python3 main.py \
--cfg config/efficientnet_v2/s_cfg.py \
--mode train_and_eval \
--use_amp \
--use_xla \
--model_dir ./output/ \
--data_dir /data/ \
--log_steps 500 \
--save_checkpoint_freq 10 \
--n_stages 4 \
--max_epochs 350 \
--train_batch_size 128 \
--train_img_size 300 \
--base_img_size 128 \
--lr_decay cosine \
--lr_init 0.005 \
--weight_decay .000005 \
--opt_epsilon 0.001 \
--moving_average_decay 0.9999 \
--eval_img_size 384 \
--eval_batch_size 100 \
--augmenter_name randaugment \
--raug_num_layers 2 \
--raug_magnitude 15 \
--cutmix_alpha 0 \
--mixup_alpha 0 \
--defer_img_mixing |
PyTorch/Translation/GNMT/seq2seq/inference | inference | translator | # Copyright (c) 2017 Elad Hoffer
# Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import logging
import subprocess
import time
import torch
import torch.distributed as dist
import seq2seq.data.config as config
import seq2seq.utils as utils
from seq2seq.inference.beam_search import SequenceGenerator
def gather_predictions(preds):
world_size = utils.get_world_size()
if world_size > 1:
all_preds = [preds.new(preds.size(0), preds.size(1)) for i in range(world_size)]
dist.all_gather(all_preds, preds)
preds = torch.cat(all_preds)
return preds
def run_sacrebleu(test_path, reference_path):
"""
Executes sacrebleu and returns BLEU score.
:param test_path: path to the test file
:param reference_path: path to the reference file
"""
sacrebleu_params = '--score-only -lc --tokenize intl'
logging.info(f'Running sacrebleu (parameters: {sacrebleu_params})')
sacrebleu = subprocess.run([f'sacrebleu --input {test_path} \
{reference_path} {sacrebleu_params}'],
stdout=subprocess.PIPE, shell=True)
test_bleu = round(float(sacrebleu.stdout.strip()), 2)
return test_bleu
class Translator:
"""
Translator runs validation on test dataset, executes inference, optionally
computes BLEU score using sacrebleu.
"""
def __init__(self,
model,
tokenizer,
loader=None,
beam_size=5,
len_norm_factor=0.6,
len_norm_const=5.0,
cov_penalty_factor=0.1,
max_seq_len=50,
print_freq=1,
reference=None,
):
self.model = model
self.tokenizer = tokenizer
self.loader = loader
self.insert_target_start = [config.BOS]
self.insert_src_start = [config.BOS]
self.insert_src_end = [config.EOS]
self.batch_first = model.batch_first
self.beam_size = beam_size
self.print_freq = print_freq
self.reference = reference
self.distributed = (utils.get_world_size() > 1)
self.generator = SequenceGenerator(
model=self.model,
beam_size=beam_size,
max_seq_len=max_seq_len,
len_norm_factor=len_norm_factor,
len_norm_const=len_norm_const,
cov_penalty_factor=cov_penalty_factor)
def run(self, calc_bleu=True, epoch=None, iteration=None, eval_path=None,
summary=False, warmup=0, reference_path=None):
"""
Runs translation on test dataset.
:param calc_bleu: if True compares results with reference and computes
BLEU score
:param epoch: index of the current epoch
:param iteration: index of the current iteration
:param eval_path: path to the file for saving results
:param summary: if True prints summary
:param reference_path: path to the file with reference translation
"""
if reference_path is None:
reference_path = self.reference
device = next(self.model.parameters()).device
test_bleu = torch.tensor([0.], device=device)
rank = utils.get_rank()
logging.info(f'Running evaluation on test set')
self.model.eval()
output, eval_stats = self.evaluate(self.loader, epoch, iteration,
warmup, summary)
output = output[:len(self.loader.dataset)]
output = self.loader.dataset.unsort(output)
if rank == 0 and eval_path:
with open(eval_path, 'w') as eval_file:
lines = [line + '\n' for line in output]
eval_file.writelines(lines)
if calc_bleu:
test_bleu[0] = run_sacrebleu(eval_path, reference_path)
if summary:
logging.info(f'BLEU on test dataset: {test_bleu[0]:.2f}')
utils.barrier()
logging.info(f'Finished evaluation on test set')
if self.distributed:
dist.broadcast(test_bleu, 0)
if calc_bleu:
eval_stats['bleu'] = test_bleu[0].item()
else:
eval_stats['bleu'] = None
return output, eval_stats
def evaluate(self, loader, epoch=0, iteration=0, warmup=0, summary=False):
"""
Runs evaluation on test dataset.
:param epoch: index of the current epoch
:param iteration: index of the current iteration
:param summary: if True prints summary
"""
device = next(self.model.parameters()).device
batch_time = utils.AverageMeter(warmup, keep=True)
tot_tok_per_sec = utils.AverageMeter(warmup, keep=True)
iterations = utils.AverageMeter()
enc_seq_len = utils.AverageMeter()
dec_seq_len = utils.AverageMeter()
stats = {}
batch_size = loader.batch_size
global_batch_size = batch_size * utils.get_world_size()
beam_size = self.beam_size
bos = [self.insert_target_start] * (batch_size * beam_size)
bos = torch.tensor(bos, dtype=torch.int64, device=device)
if self.batch_first:
bos = bos.view(-1, 1)
else:
bos = bos.view(1, -1)
if beam_size == 1:
generator = self.generator.greedy_search
else:
generator = self.generator.beam_search
output = []
for i, (src, indices) in enumerate(loader):
if device.type == 'cuda':
torch.cuda.synchronize()
translate_timer = time.time()
src, src_length = src
stats['total_enc_len'] = int(src_length.sum())
src = src.to(device)
src_length = src_length.to(device)
with torch.no_grad():
context = self.model.encode(src, src_length)
context = [context, src_length, None]
preds, lengths, counter = generator(batch_size, bos, context)
stats['total_dec_len'] = lengths.sum().item()
stats['iters'] = counter
indices = torch.tensor(indices).to(preds)
preds = preds.scatter(0, indices.unsqueeze(1).expand_as(preds), preds)
preds = gather_predictions(preds).cpu()
if self.tokenizer:
for pred in preds:
pred = pred.tolist()
detok = self.tokenizer.detokenize(pred)
output.append(detok)
if device.type == 'cuda':
torch.cuda.synchronize()
elapsed = time.time() - translate_timer
batch_time.update(elapsed, batch_size)
total_tokens = stats['total_dec_len'] + stats['total_enc_len']
ttps = total_tokens / elapsed
tot_tok_per_sec.update(ttps, elapsed)
iterations.update(stats['iters'])
enc_seq_len.update(stats['total_enc_len'] / batch_size, batch_size)
dec_seq_len.update(stats['total_dec_len'] / batch_size, batch_size)
if i % self.print_freq == self.print_freq - 1:
log = []
log += f'TEST '
if epoch is not None:
log += f'[{epoch}]'
if iteration is not None:
log += f'[{iteration}]'
log += f'[{i}/{len(loader)}]\t'
log += f'Time {batch_time.val:.4f} ({batch_time.avg:.4f})\t'
log += f'Decoder iters {iterations.val:.1f} ({iterations.avg:.1f})\t'
log += f'Tok/s {tot_tok_per_sec.val:.0f} ({tot_tok_per_sec.avg:.0f})'
log = ''.join(log)
logging.info(log)
tot_tok_per_sec.reduce('sum')
enc_seq_len.reduce('mean')
dec_seq_len.reduce('mean')
batch_time.reduce('mean')
iterations.reduce('sum')
if summary and utils.get_rank() == 0:
time_per_sentence = (batch_time.avg / global_batch_size)
log = []
log += f'TEST SUMMARY:\n'
log += f'Lines translated: {len(loader.dataset)}\t'
log += f'Avg total tokens/s: {tot_tok_per_sec.avg:.0f}\n'
log += f'Avg time per batch: {batch_time.avg:.3f} s\t'
log += f'Avg time per sentence: {1000*time_per_sentence:.3f} ms\n'
log += f'Avg encoder seq len: {enc_seq_len.avg:.2f}\t'
log += f'Avg decoder seq len: {dec_seq_len.avg:.2f}\t'
log += f'Total decoder iterations: {int(iterations.sum)}'
log = ''.join(log)
logging.info(log)
eval_stats = {}
eval_stats['tokens_per_sec'] = tot_tok_per_sec.avg
eval_stats['runtimes'] = batch_time.vals
eval_stats['throughputs'] = tot_tok_per_sec.vals
return output, eval_stats
|
PyTorch/Segmentation/nnUNet/triton/deployment_toolkit | deployment_toolkit | extensions | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib
import logging
import os
import re
from pathlib import Path
from typing import List
LOGGER = logging.getLogger(__name__)
class ExtensionManager:
def __init__(self, name: str):
self._name = name
self._registry = {}
def register_extension(self, extension: str, clazz):
already_registered_class = self._registry.get(extension, None)
if already_registered_class and already_registered_class.__module__ != clazz.__module__:
raise RuntimeError(
f"Conflicting extension {self._name}/{extension}; "
f"{already_registered_class.__module__}.{already_registered_class.__name} "
f"and "
f"{clazz.__module__}.{clazz.__name__}"
)
elif already_registered_class is None:
clazz_full_name = f"{clazz.__module__}.{clazz.__name__}" if clazz is not None else "None"
LOGGER.debug(f"Registering extension {self._name}/{extension}: {clazz_full_name}")
self._registry[extension] = clazz
def get(self, extension):
if extension not in self._registry:
raise RuntimeError(f"Missing extension {self._name}/{extension}")
return self._registry[extension]
@property
def supported_extensions(self):
return list(self._registry)
@staticmethod
def scan_for_extensions(extension_dirs: List[Path]):
register_pattern = r".*\.register_extension\(.*"
for extension_dir in extension_dirs:
for python_path in extension_dir.rglob("*.py"):
if not python_path.is_file():
continue
payload = python_path.read_text()
if re.findall(register_pattern, payload):
import_path = python_path.relative_to(toolkit_root_dir.parent)
package = import_path.parent.as_posix().replace(os.sep, ".")
package_with_module = f"{package}.{import_path.stem}"
spec = importlib.util.spec_from_file_location(name=package_with_module, location=python_path)
my_module = importlib.util.module_from_spec(spec)
my_module.__package__ = package
try:
spec.loader.exec_module(my_module) # pytype: disable=attribute-error
except ModuleNotFoundError as e:
LOGGER.error(
f"Could not load extensions from {import_path} due to missing python packages; {e}"
)
runners = ExtensionManager("runners")
loaders = ExtensionManager("loaders")
savers = ExtensionManager("savers")
converters = ExtensionManager("converters")
toolkit_root_dir = (Path(__file__).parent / "..").resolve()
ExtensionManager.scan_for_extensions([toolkit_root_dir])
|
TensorFlow/Detection/SSD/models/research/object_detection/utils | utils | context_manager_test | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow_models.object_detection.utils.context_manager."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from object_detection.utils import context_manager
class ContextManagerTest(tf.test.TestCase):
def test_identity_context_manager(self):
with context_manager.IdentityContextManager() as identity_context:
self.assertIsNone(identity_context)
if __name__ == '__main__':
tf.test.main()
|
TensorFlow/Classification/ConvNets/model/layers | layers | normalization | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import tensorflow as tf
from tensorflow.python.training import moving_averages
__all__ = ['batch_norm']
def batch_norm(
inputs,
decay=0.999,
epsilon=0.001,
scale=False,
center=True,
is_training=True,
data_format='NHWC',
param_initializers=None
):
"""Adds a Batch Normalization layer."""
if data_format not in ['NHWC', 'NCHW']:
raise ValueError("Unknown data format: `%s` (accepted: ['NHWC', 'NCHW'])" % data_format)
if param_initializers is not None:
for key, initializer in param_initializers.items():
if key not in ['beta', 'gamma', 'moving_mean', 'moving_variance']:
raise ValueError("Unknown key received: `%s`" % key)
if inspect.isclass(initializer):
initializer = initializer()
setattr(param_initializers, key, initializer)
if initializer.__class__.__module__ != 'tensorflow.python.ops.init_ops':
raise ValueError("The object `%s` is not a Tensor initializer" % str(initializer))
input_shape = inputs.get_shape()
input_rank = input_shape.ndims
input_channels = input_shape[1]
if input_rank == 2:
if data_format == 'NCHW':
new_shape = [-1, input_channels, 1, 1]
else:
new_shape = [-1, 1, 1, input_channels]
inputs = tf.reshape(inputs, new_shape)
net = tf.contrib.layers.batch_norm(
inputs,
decay=decay,
scale=scale,
epsilon=epsilon,
is_training=is_training,
trainable=is_training,
fused=True,
data_format=data_format,
center=center,
param_initializers=param_initializers
)
if input_rank == 2:
net = tf.reshape(net, [-1, input_channels])
return net |
Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt/scripts | scripts | benchmark | #! /bin/bash
NUM_GPUS=$(nvidia-smi --query-gpu=name --format=csv,noheader | wc -l)
[ $NUM_GPUS -eq 16 ] && WORKER_NUMS=(1 8 16) || WORKER_NUMS=(1 8)
DATASETS=(electricity volatility traffic favorita)
rm -r /tmp/benchmark_results
for DATASET in ${DATASETS[@]}
do
for NGPU in ${WORKER_NUMS[@]}
do
for BATCH_SIZE in 512 1024 1536 2048 2560
do
for USE_AMP in --use_amp ""
do
for AFFINITY in "--affinity disabled" "--affinity single" "--affinity socket_unique_interleaved"
do
EXP_NAME="TFT_benchmark_${DATASET}_BS_${BATCH_SIZE}_${NGPU}GPU${USE_AMP}_${AFFINITY}"
python -m torch.distributed.run --nproc_per_node=${NGPU} train.py \
--dataset ${DATASET} \
--data_path /data/processed/${DATASET}_bin \
--batch_size=${BATCH_SIZE} \
--lr 5e-4 \
--epochs 1 \
--sample 100000 5000 \
--seed 1 \
${USE_AMP} \
${AFFINITY} \
--clip_grad 0.1 \
--results /tmp/benchmark_results/${EXP_NAME}
done
done
done
done
done
for P in `ls /tmp/benchmark_results/`;
do
echo ${P}
tail -n 1 /tmp/benchmark_results/${P}/dllogger.json
done
|
Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt/triton/deployment_toolkit/bermuda | bermuda | __init__ | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License. |
PyTorch/Recommendation/DLRM/dlrm/nn | nn | mlps | # Copyright (c) 2021 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from typing import Sequence, List, Iterable
import apex.mlp
import torch
from torch import nn
class AmpMlpFunction(torch.autograd.Function):
@staticmethod
@torch.cuda.amp.custom_fwd(cast_inputs=torch.half)
def forward(*args, **kwargs):
return apex.mlp.MlpFunction.forward(*args, **kwargs)
@staticmethod
@torch.cuda.amp.custom_fwd(cast_inputs=torch.half)
def backward(*args, **kwargs):
return apex.mlp.MlpFunction.backward(*args, **kwargs)
mlp_function = AmpMlpFunction.apply
class AmpMlp(apex.mlp.MLP):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def forward(self, input):
return mlp_function(self.bias, self.activation, input, *self.weights, *self.biases)
class AbstractMlp(nn.Module):
"""
MLP interface used for configuration-agnostic checkpointing (`dlrm.utils.checkpointing`)
and easily swappable MLP implementation
"""
@property
def weights(self) -> List[torch.Tensor]:
"""
Getter for all MLP layers weights (without biases)
"""
raise NotImplementedError()
@property
def biases(self) -> List[torch.Tensor]:
"""
Getter for all MLP layers biases
"""
raise NotImplementedError()
def forward(self, mlp_input: torch.Tensor) -> torch.Tensor:
raise NotImplementedError()
def load_state(self, weights: Iterable[torch.Tensor], biases: Iterable[torch.Tensor]):
for new_weight, weight, new_bias, bias in zip(weights, self.weights, biases, self.biases):
weight.data = new_weight.data
weight.data.requires_grad_()
bias.data = new_bias.data
bias.data.requires_grad_()
class TorchMlp(AbstractMlp):
def __init__(self, input_dim: int, sizes: Sequence[int]):
super().__init__()
layers = []
for output_dims in sizes:
layers.append(nn.Linear(input_dim, output_dims))
layers.append(nn.ReLU(inplace=True))
input_dim = output_dims
self.layers = nn.Sequential(*layers)
self._initialize_weights()
def _initialize_weights(self):
for module in self.modules():
if isinstance(module, nn.Linear):
nn.init.normal_(module.weight.data, 0., math.sqrt(2. / (module.in_features + module.out_features)))
nn.init.normal_(module.bias.data, 0., math.sqrt(1. / module.out_features))
@property
def weights(self):
return [layer.weight for layer in self.layers if isinstance(layer, nn.Linear)]
@property
def biases(self):
return [layer.bias for layer in self.layers if isinstance(layer, nn.Linear)]
def forward(self, mlp_input: torch.Tensor) -> torch.Tensor:
"""
Args:
mlp_input (Tensor): with shape [batch_size, num_features]
Returns:
Tensor: Mlp output in shape [batch_size, num_output_features]
"""
return self.layers(mlp_input)
class CppMlp(AbstractMlp):
def __init__(self, input_dim: int, sizes: Sequence[int]):
super().__init__()
self.mlp = AmpMlp([input_dim] + list(sizes))
@property
def weights(self):
return self.mlp.weights
@property
def biases(self):
return self.mlp.biases
def forward(self, mlp_input: torch.Tensor) -> torch.Tensor:
"""
Args:
mlp_input (Tensor): with shape [batch_size, num_features]
Returns:
Tensor: Mlp output in shape [batch_size, num_output_features]
"""
return self.mlp(mlp_input)
|
TensorFlow/Detection/SSD/models/research/object_detection/models | models | feature_map_generators | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions to generate a list of feature maps based on image features.
Provides several feature map generators that can be used to build object
detection feature extractors.
Object detection feature extractors usually are built by stacking two components
- A base feature extractor such as Inception V3 and a feature map generator.
Feature map generators build on the base feature extractors and produce a list
of final feature maps.
"""
import collections
import functools
import tensorflow as tf
from object_detection.utils import ops
slim = tf.contrib.slim
def get_depth_fn(depth_multiplier, min_depth):
"""Builds a callable to compute depth (output channels) of conv filters.
Args:
depth_multiplier: a multiplier for the nominal depth.
min_depth: a lower bound on the depth of filters.
Returns:
A callable that takes in a nominal depth and returns the depth to use.
"""
def multiply_depth(depth):
new_depth = int(depth * depth_multiplier)
return max(new_depth, min_depth)
return multiply_depth
class KerasMultiResolutionFeatureMaps(tf.keras.Model):
"""Generates multi resolution feature maps from input image features.
A Keras model that generates multi-scale feature maps for detection as in the
SSD papers by Liu et al: https://arxiv.org/pdf/1512.02325v2.pdf, See Sec 2.1.
More specifically, when called on inputs it performs the following two tasks:
1) If a layer name is provided in the configuration, returns that layer as a
feature map.
2) If a layer name is left as an empty string, constructs a new feature map
based on the spatial shape and depth configuration. Note that the current
implementation only supports generating new layers using convolution of
stride 2 resulting in a spatial resolution reduction by a factor of 2.
By default convolution kernel size is set to 3, and it can be customized
by caller.
An example of the configuration for Inception V3:
{
'from_layer': ['Mixed_5d', 'Mixed_6e', 'Mixed_7c', '', '', ''],
'layer_depth': [-1, -1, -1, 512, 256, 128]
}
When this feature generator object is called on input image_features:
Args:
image_features: A dictionary of handles to activation tensors from the
base feature extractor.
Returns:
feature_maps: an OrderedDict mapping keys (feature map names) to
tensors where each tensor has shape [batch, height_i, width_i, depth_i].
"""
def __init__(self,
feature_map_layout,
depth_multiplier,
min_depth,
insert_1x1_conv,
is_training,
conv_hyperparams,
freeze_batchnorm,
name=None):
"""Constructor.
Args:
feature_map_layout: Dictionary of specifications for the feature map
layouts in the following format (Inception V2/V3 respectively):
{
'from_layer': ['Mixed_3c', 'Mixed_4c', 'Mixed_5c', '', '', ''],
'layer_depth': [-1, -1, -1, 512, 256, 128]
}
or
{
'from_layer': ['Mixed_5d', 'Mixed_6e', 'Mixed_7c', '', '', ''],
'layer_depth': [-1, -1, -1, 512, 256, 128]
}
If 'from_layer' is specified, the specified feature map is directly used
as a box predictor layer, and the layer_depth is directly infered from
the feature map (instead of using the provided 'layer_depth' parameter).
In this case, our convention is to set 'layer_depth' to -1 for clarity.
Otherwise, if 'from_layer' is an empty string, then the box predictor
layer will be built from the previous layer using convolution
operations. Note that the current implementation only supports
generating new layers using convolutions of stride 2 (resulting in a
spatial resolution reduction by a factor of 2), and will be extended to
a more flexible design. Convolution kernel size is set to 3 by default,
and can be customized by 'conv_kernel_size' parameter (similarily,
'conv_kernel_size' should be set to -1 if 'from_layer' is specified).
The created convolution operation will be a normal 2D convolution by
default, and a depthwise convolution followed by 1x1 convolution if
'use_depthwise' is set to True.
depth_multiplier: Depth multiplier for convolutional layers.
min_depth: Minimum depth for convolutional layers.
insert_1x1_conv: A boolean indicating whether an additional 1x1
convolution should be inserted before shrinking the feature map.
is_training: Indicates whether the feature generator is in training mode.
conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object
containing hyperparameters for convolution ops.
freeze_batchnorm: Bool. Whether to freeze batch norm parameters during
training or not. When training with a small batch size (e.g. 1), it is
desirable to freeze batch norm update and use pretrained batch norm
params.
name: A string name scope to assign to the model. If 'None', Keras
will auto-generate one from the class name.
"""
super(KerasMultiResolutionFeatureMaps, self).__init__(name=name)
self.feature_map_layout = feature_map_layout
self.convolutions = []
depth_fn = get_depth_fn(depth_multiplier, min_depth)
base_from_layer = ''
use_explicit_padding = False
if 'use_explicit_padding' in feature_map_layout:
use_explicit_padding = feature_map_layout['use_explicit_padding']
use_depthwise = False
if 'use_depthwise' in feature_map_layout:
use_depthwise = feature_map_layout['use_depthwise']
for index, from_layer in enumerate(feature_map_layout['from_layer']):
net = []
layer_depth = feature_map_layout['layer_depth'][index]
conv_kernel_size = 3
if 'conv_kernel_size' in feature_map_layout:
conv_kernel_size = feature_map_layout['conv_kernel_size'][index]
if from_layer:
base_from_layer = from_layer
else:
if insert_1x1_conv:
layer_name = '{}_1_Conv2d_{}_1x1_{}'.format(
base_from_layer, index, depth_fn(layer_depth / 2))
net.append(tf.keras.layers.Conv2D(depth_fn(layer_depth / 2),
[1, 1],
padding='SAME',
strides=1,
name=layer_name + '_conv',
**conv_hyperparams.params()))
net.append(
conv_hyperparams.build_batch_norm(
training=(is_training and not freeze_batchnorm),
name=layer_name + '_batchnorm'))
net.append(
conv_hyperparams.build_activation_layer(
name=layer_name))
layer_name = '{}_2_Conv2d_{}_{}x{}_s2_{}'.format(
base_from_layer, index, conv_kernel_size, conv_kernel_size,
depth_fn(layer_depth))
stride = 2
padding = 'SAME'
if use_explicit_padding:
padding = 'VALID'
# We define this function here while capturing the value of
# conv_kernel_size, to avoid holding a reference to the loop variable
# conv_kernel_size inside of a lambda function
def fixed_padding(features, kernel_size=conv_kernel_size):
return ops.fixed_padding(features, kernel_size)
net.append(tf.keras.layers.Lambda(fixed_padding))
# TODO(rathodv): Add some utilities to simplify the creation of
# Depthwise & non-depthwise convolutions w/ normalization & activations
if use_depthwise:
net.append(tf.keras.layers.DepthwiseConv2D(
[conv_kernel_size, conv_kernel_size],
depth_multiplier=1,
padding=padding,
strides=stride,
name=layer_name + '_depthwise_conv',
**conv_hyperparams.params()))
net.append(
conv_hyperparams.build_batch_norm(
training=(is_training and not freeze_batchnorm),
name=layer_name + '_depthwise_batchnorm'))
net.append(
conv_hyperparams.build_activation_layer(
name=layer_name + '_depthwise'))
net.append(tf.keras.layers.Conv2D(depth_fn(layer_depth), [1, 1],
padding='SAME',
strides=1,
name=layer_name + '_conv',
**conv_hyperparams.params()))
net.append(
conv_hyperparams.build_batch_norm(
training=(is_training and not freeze_batchnorm),
name=layer_name + '_batchnorm'))
net.append(
conv_hyperparams.build_activation_layer(
name=layer_name))
else:
net.append(tf.keras.layers.Conv2D(
depth_fn(layer_depth),
[conv_kernel_size, conv_kernel_size],
padding=padding,
strides=stride,
name=layer_name + '_conv',
**conv_hyperparams.params()))
net.append(
conv_hyperparams.build_batch_norm(
training=(is_training and not freeze_batchnorm),
name=layer_name + '_batchnorm'))
net.append(
conv_hyperparams.build_activation_layer(
name=layer_name))
# Until certain bugs are fixed in checkpointable lists,
# this net must be appended only once it's been filled with layers
self.convolutions.append(net)
def call(self, image_features):
"""Generate the multi-resolution feature maps.
Executed when calling the `.__call__` method on input.
Args:
image_features: A dictionary of handles to activation tensors from the
base feature extractor.
Returns:
feature_maps: an OrderedDict mapping keys (feature map names) to
tensors where each tensor has shape [batch, height_i, width_i, depth_i].
"""
feature_maps = []
feature_map_keys = []
for index, from_layer in enumerate(self.feature_map_layout['from_layer']):
if from_layer:
feature_map = image_features[from_layer]
feature_map_keys.append(from_layer)
else:
feature_map = feature_maps[-1]
for layer in self.convolutions[index]:
feature_map = layer(feature_map)
layer_name = self.convolutions[index][-1].name
feature_map_keys.append(layer_name)
feature_maps.append(feature_map)
return collections.OrderedDict(
[(x, y) for (x, y) in zip(feature_map_keys, feature_maps)])
def multi_resolution_feature_maps(feature_map_layout, depth_multiplier,
min_depth, insert_1x1_conv, image_features,
pool_residual=False):
"""Generates multi resolution feature maps from input image features.
Generates multi-scale feature maps for detection as in the SSD papers by
Liu et al: https://arxiv.org/pdf/1512.02325v2.pdf, See Sec 2.1.
More specifically, it performs the following two tasks:
1) If a layer name is provided in the configuration, returns that layer as a
feature map.
2) If a layer name is left as an empty string, constructs a new feature map
based on the spatial shape and depth configuration. Note that the current
implementation only supports generating new layers using convolution of
stride 2 resulting in a spatial resolution reduction by a factor of 2.
By default convolution kernel size is set to 3, and it can be customized
by caller.
An example of the configuration for Inception V3:
{
'from_layer': ['Mixed_5d', 'Mixed_6e', 'Mixed_7c', '', '', ''],
'layer_depth': [-1, -1, -1, 512, 256, 128]
}
Args:
feature_map_layout: Dictionary of specifications for the feature map
layouts in the following format (Inception V2/V3 respectively):
{
'from_layer': ['Mixed_3c', 'Mixed_4c', 'Mixed_5c', '', '', ''],
'layer_depth': [-1, -1, -1, 512, 256, 128]
}
or
{
'from_layer': ['Mixed_5d', 'Mixed_6e', 'Mixed_7c', '', '', ''],
'layer_depth': [-1, -1, -1, 512, 256, 128]
}
If 'from_layer' is specified, the specified feature map is directly used
as a box predictor layer, and the layer_depth is directly infered from the
feature map (instead of using the provided 'layer_depth' parameter). In
this case, our convention is to set 'layer_depth' to -1 for clarity.
Otherwise, if 'from_layer' is an empty string, then the box predictor
layer will be built from the previous layer using convolution operations.
Note that the current implementation only supports generating new layers
using convolutions of stride 2 (resulting in a spatial resolution
reduction by a factor of 2), and will be extended to a more flexible
design. Convolution kernel size is set to 3 by default, and can be
customized by 'conv_kernel_size' parameter (similarily, 'conv_kernel_size'
should be set to -1 if 'from_layer' is specified). The created convolution
operation will be a normal 2D convolution by default, and a depthwise
convolution followed by 1x1 convolution if 'use_depthwise' is set to True.
depth_multiplier: Depth multiplier for convolutional layers.
min_depth: Minimum depth for convolutional layers.
insert_1x1_conv: A boolean indicating whether an additional 1x1 convolution
should be inserted before shrinking the feature map.
image_features: A dictionary of handles to activation tensors from the
base feature extractor.
pool_residual: Whether to add an average pooling layer followed by a
residual connection between subsequent feature maps when the channel
depth match. For example, with option 'layer_depth': [-1, 512, 256, 256],
a pooling and residual layer is added between the third and forth feature
map. This option is better used with Weight Shared Convolution Box
Predictor when all feature maps have the same channel depth to encourage
more consistent features across multi-scale feature maps.
Returns:
feature_maps: an OrderedDict mapping keys (feature map names) to
tensors where each tensor has shape [batch, height_i, width_i, depth_i].
Raises:
ValueError: if the number entries in 'from_layer' and
'layer_depth' do not match.
ValueError: if the generated layer does not have the same resolution
as specified.
"""
depth_fn = get_depth_fn(depth_multiplier, min_depth)
feature_map_keys = []
feature_maps = []
base_from_layer = ''
use_explicit_padding = False
if 'use_explicit_padding' in feature_map_layout:
use_explicit_padding = feature_map_layout['use_explicit_padding']
use_depthwise = False
if 'use_depthwise' in feature_map_layout:
use_depthwise = feature_map_layout['use_depthwise']
for index, from_layer in enumerate(feature_map_layout['from_layer']):
layer_depth = feature_map_layout['layer_depth'][index]
conv_kernel_size = 3
if 'conv_kernel_size' in feature_map_layout:
conv_kernel_size = feature_map_layout['conv_kernel_size'][index]
if from_layer:
feature_map = image_features[from_layer]
base_from_layer = from_layer
feature_map_keys.append(from_layer)
else:
pre_layer = feature_maps[-1]
pre_layer_depth = pre_layer.get_shape().as_list()[3]
intermediate_layer = pre_layer
if insert_1x1_conv:
layer_name = '{}_1_Conv2d_{}_1x1_{}'.format(
base_from_layer, index, depth_fn(layer_depth / 2))
intermediate_layer = slim.conv2d(
pre_layer,
depth_fn(layer_depth / 2), [1, 1],
padding='SAME',
stride=1,
scope=layer_name)
layer_name = '{}_2_Conv2d_{}_{}x{}_s2_{}'.format(
base_from_layer, index, conv_kernel_size, conv_kernel_size,
depth_fn(layer_depth))
stride = 2
padding = 'SAME'
if use_explicit_padding:
padding = 'VALID'
intermediate_layer = ops.fixed_padding(
intermediate_layer, conv_kernel_size)
if use_depthwise:
feature_map = slim.separable_conv2d(
intermediate_layer,
None, [conv_kernel_size, conv_kernel_size],
depth_multiplier=1,
padding=padding,
stride=stride,
scope=layer_name + '_depthwise')
feature_map = slim.conv2d(
feature_map,
depth_fn(layer_depth), [1, 1],
padding='SAME',
stride=1,
scope=layer_name)
if pool_residual and pre_layer_depth == depth_fn(layer_depth):
feature_map += slim.avg_pool2d(
pre_layer, [3, 3],
padding='SAME',
stride=2,
scope=layer_name + '_pool')
else:
feature_map = slim.conv2d(
intermediate_layer,
depth_fn(layer_depth), [conv_kernel_size, conv_kernel_size],
padding=padding,
stride=stride,
scope=layer_name)
feature_map_keys.append(layer_name)
feature_maps.append(feature_map)
return collections.OrderedDict(
[(x, y) for (x, y) in zip(feature_map_keys, feature_maps)])
def fpn_top_down_feature_maps(image_features,
depth,
use_depthwise=False,
use_explicit_padding=False,
scope=None):
"""Generates `top-down` feature maps for Feature Pyramid Networks.
See https://arxiv.org/abs/1612.03144 for details.
Args:
image_features: list of tuples of (tensor_name, image_feature_tensor).
Spatial resolutions of succesive tensors must reduce exactly by a factor
of 2.
depth: depth of output feature maps.
use_depthwise: whether to use depthwise separable conv instead of regular
conv.
use_explicit_padding: whether to use explicit padding.
scope: A scope name to wrap this op under.
Returns:
feature_maps: an OrderedDict mapping keys (feature map names) to
tensors where each tensor has shape [batch, height_i, width_i, depth_i].
"""
with tf.name_scope(scope, 'top_down'):
num_levels = len(image_features)
output_feature_maps_list = []
output_feature_map_keys = []
padding = 'VALID' if use_explicit_padding else 'SAME'
kernel_size = 3
with slim.arg_scope(
[slim.conv2d, slim.separable_conv2d], padding=padding, stride=1):
top_down = slim.conv2d(
image_features[-1][1],
depth, [1, 1], activation_fn=None, normalizer_fn=None,
scope='projection_%d' % num_levels)
output_feature_maps_list.append(top_down)
output_feature_map_keys.append(
'top_down_%s' % image_features[-1][0])
for level in reversed(range(num_levels - 1)):
top_down = ops.nearest_neighbor_upsampling(top_down, 2)
residual = slim.conv2d(
image_features[level][1], depth, [1, 1],
activation_fn=None, normalizer_fn=None,
scope='projection_%d' % (level + 1))
if use_explicit_padding:
# slice top_down to the same shape as residual
residual_shape = tf.shape(residual)
top_down = top_down[:, :residual_shape[1], :residual_shape[2], :]
top_down += residual
if use_depthwise:
conv_op = functools.partial(slim.separable_conv2d, depth_multiplier=1)
else:
conv_op = slim.conv2d
if use_explicit_padding:
top_down = ops.fixed_padding(top_down, kernel_size)
output_feature_maps_list.append(conv_op(
top_down,
depth, [kernel_size, kernel_size],
scope='smoothing_%d' % (level + 1)))
output_feature_map_keys.append('top_down_%s' % image_features[level][0])
return collections.OrderedDict(reversed(
list(zip(output_feature_map_keys, output_feature_maps_list))))
def pooling_pyramid_feature_maps(base_feature_map_depth, num_layers,
image_features, replace_pool_with_conv=False):
"""Generates pooling pyramid feature maps.
The pooling pyramid feature maps is motivated by
multi_resolution_feature_maps. The main difference are that it is simpler and
reduces the number of free parameters.
More specifically:
- Instead of using convolutions to shrink the feature map, it uses max
pooling, therefore totally gets rid of the parameters in convolution.
- By pooling feature from larger map up to a single cell, it generates
features in the same feature space.
- Instead of independently making box predictions from individual maps, it
shares the same classifier across different feature maps, therefore reduces
the "mis-calibration" across different scales.
See go/ppn-detection for more details.
Args:
base_feature_map_depth: Depth of the base feature before the max pooling.
num_layers: Number of layers used to make predictions. They are pooled
from the base feature.
image_features: A dictionary of handles to activation tensors from the
feature extractor.
replace_pool_with_conv: Whether or not to replace pooling operations with
convolutions in the PPN. Default is False.
Returns:
feature_maps: an OrderedDict mapping keys (feature map names) to
tensors where each tensor has shape [batch, height_i, width_i, depth_i].
Raises:
ValueError: image_features does not contain exactly one entry
"""
if len(image_features) != 1:
raise ValueError('image_features should be a dictionary of length 1.')
image_features = image_features[image_features.keys()[0]]
feature_map_keys = []
feature_maps = []
feature_map_key = 'Base_Conv2d_1x1_%d' % base_feature_map_depth
if base_feature_map_depth > 0:
image_features = slim.conv2d(
image_features,
base_feature_map_depth,
[1, 1], # kernel size
padding='SAME', stride=1, scope=feature_map_key)
# Add a 1x1 max-pooling node (a no op node) immediately after the conv2d for
# TPU v1 compatibility. Without the following dummy op, TPU runtime
# compiler will combine the convolution with one max-pooling below into a
# single cycle, so getting the conv2d feature becomes impossible.
image_features = slim.max_pool2d(
image_features, [1, 1], padding='SAME', stride=1, scope=feature_map_key)
feature_map_keys.append(feature_map_key)
feature_maps.append(image_features)
feature_map = image_features
if replace_pool_with_conv:
with slim.arg_scope([slim.conv2d], padding='SAME', stride=2):
for i in range(num_layers - 1):
feature_map_key = 'Conv2d_{}_3x3_s2_{}'.format(i,
base_feature_map_depth)
feature_map = slim.conv2d(
feature_map, base_feature_map_depth, [3, 3], scope=feature_map_key)
feature_map_keys.append(feature_map_key)
feature_maps.append(feature_map)
else:
with slim.arg_scope([slim.max_pool2d], padding='SAME', stride=2):
for i in range(num_layers - 1):
feature_map_key = 'MaxPool2d_%d_2x2' % i
feature_map = slim.max_pool2d(
feature_map, [2, 2], padding='SAME', scope=feature_map_key)
feature_map_keys.append(feature_map_key)
feature_maps.append(feature_map)
return collections.OrderedDict(
[(x, y) for (x, y) in zip(feature_map_keys, feature_maps)])
|
PyTorch/Classification/ConvNets/resnet50v1.5/training/FP32 | FP32 | DGX2V_resnet50_FP32_90E | python ./multiproc.py --nproc_per_node 8 ./launch.py --model resnet50 --precision FP32 --mode convergence --platform DGX2V /imagenet --epochs 90 --mixup 0.0 --workspace ${1:-./} --raport-file raport.json
|
PyTorch/Segmentation/MaskRCNN/pytorch/notebooks | notebooks | README | ## Jupyter demo notebooks
This folder contains demo notebooks for the MaskRCNN model.
1 - pytorch_MaskRCNN_pyt_train_and_inference.ipynb: end to end training and inference demo.
The most convenient way to make use of this notebook is via a docker container, which provides a self-contained, isolated and re-producible environment for all experiments. The steps to follow are:
First, clone the repository:
```
git clone https://github.com/NVIDIA/DeepLearningExamples.git
cd DeepLearningExamples/PyTorch/Segmentation/MaskRCNN
```
Next, build the NVIDIA Mask R-CNN container:
```
cd pytorch
docker build --rm -t nvidia_joc_maskrcnn_pt .
```
Then launch the container with:
```
PATH_TO_COCO='/path/to/coco-2014'
MOUNT_LOCATION='/datasets/data'
NAME='nvidia_maskrcnn'
docker run -it --runtime=nvidia -p 8888:8888 -v $PATH_TO_COCO:/$MOUNT_LOCATION --rm --name=$NAME --shm-size=10g --ulimit memlock=-1 --ulimit stack=67108864 --ipc=host nvidia_joc_maskrcnn_pt
```
where `/path/to/coco-2014` is the path on the host machine where the data was/is to be downloaded.
Within the docker interactive bash session, start Jupyter with
`jupyter notebook --ip 0.0.0.0 --port 8888`
Then open the Jupyter GUI interface on your host machine at http://localhost:8888. Within the container, this notebook itself is located at /workspace/object_detection/notebooks.
|
PyTorch/Segmentation/MaskRCNN/pytorch/maskrcnn_benchmark/csrc/cuda | cuda | rpn_generate_proposals | /**
* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <ATen/ATen.h>
#include <THC/THC.h>
#include <vector>
namespace rpn {
/**
* Generate boxes associated to topN pre-NMS scores
*/
std::vector<at::Tensor> GeneratePreNMSUprightBoxes(
const int num_images,
const int A,
const int H,
const int W,
at::Tensor& sorted_indices, // topK sorted pre_nms_topn indices
at::Tensor& sorted_scores, // topK sorted pre_nms_topn scores [N, A, H, W]
at::Tensor& bbox_deltas, // [N, A*4, H, W] (full, unsorted / sliced)
at::Tensor& anchors, // input (full, unsorted, unsliced)
at::Tensor& image_shapes, // (h, w) of images
const int pre_nms_nboxes,
const int rpn_min_size,
const float bbox_xform_clip_default,
const bool correct_transform_coords,
const bool is_channels_last);
} // namespace rpn
|
PyTorch/LanguageModeling/BART/bart/configuration | configuration | configuration_utils | # coding=utf-8
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Configuration base class and utilities."""
import copy
import json
import logging
import os
from typing import Any, Dict, Tuple
from utils.file_utils import CONFIG_NAME, cached_path, hf_bucket_url, is_remote_url
logger = logging.getLogger(__name__)
class PretrainedConfig(object):
r""" Base class for all configuration classes.
Handles a few parameters common to all models' configurations as well as methods for loading/downloading/saving
configurations.
Note:
A configuration file can be loaded and saved to disk. Loading the configuration file and using this file to
initialize a model does **not** load the model weights.
It only affects the model's configuration.
Class attributes (overridden by derived classes)
- **model_type** (:obj:`str`): An identifier for the model type, serialized into the JSON file, and used to
recreate the correct object in :class:`~transformers.AutoConfig`.
Args:
output_hidden_states (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not the model should return all hidden-states.
output_attentions (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not the model should returns all attentions.
use_cache (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether or not the model should return the last key/values attentions (not used by all models).
return_dict (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not the model should return a :class:`~transformers.file_utils.ModelOutput` instead of a
plain tuple.
is_encoder_decoder (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether the model is used as an encoder/decoder or not.
is_decoder (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether the model is used as decoder or not (in which case it's used as an encoder).
add_cross_attention (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether cross-attention layers should be added to the model. Note, this option is only relevant for models that can be used as decoder models within the `:class:~transformers.EncoderDecoderModel` class, which consists of all models in ``AUTO_MODELS_FOR_CAUSAL_LM``.
tie_encoder_decoder (:obj:`bool`, `optional`, defaults to :obj:`False`)
Whether all encoder weights should be tied to their equivalent decoder weights. This requires the encoder and decoder model to have the exact same parameter names.
prune_heads (:obj:`Dict[int, List[int]]`, `optional`, defaults to :obj:`{}`):
Pruned heads of the model. The keys are the selected layer indices and the associated values, the list
of heads to prune in said layer.
For instance ``{1: [0, 2], 2: [2, 3]}`` will prune heads 0 and 2 on layer 1 and heads 2 and 3 on layer
2.
xla_device (:obj:`bool`, `optional`):
A flag to indicate if TPU are available or not.
chunk_size_feed_forward (:obj:`int`, `optional`, defaults to :obj:`0`):
The chunk size of all feed forward layers in the residual attention blocks.
A chunk size of :obj:`0` means that the feed forward layer is not chunked.
A chunk size of n means that the feed forward layer processes :obj:`n` < sequence_length embeddings at a time.
For more information on feed forward chunking, see `How does Feed Forward Chunking work? <../glossary.html#feed-forward-chunking>`__ .
Parameters for sequence generation
- **max_length** (:obj:`int`, `optional`, defaults to 20) -- Maximum length that will be used by
default in the :obj:`generate` method of the model.
- **min_length** (:obj:`int`, `optional`, defaults to 10) -- Minimum length that will be used by
default in the :obj:`generate` method of the model.
- **do_sample** (:obj:`bool`, `optional`, defaults to :obj:`False`) -- Flag that will be used by default in
the :obj:`generate` method of the model. Whether or not to use sampling ; use greedy decoding otherwise.
- **early_stopping** (:obj:`bool`, `optional`, defaults to :obj:`False`) -- Flag that will be used by
default in the :obj:`generate` method of the model. Whether to stop the beam search when at least
``num_beams`` sentences are finished per batch or not.
- **num_beams** (:obj:`int`, `optional`, defaults to 1) -- Number of beams for beam search that will be
used by default in the :obj:`generate` method of the model. 1 means no beam search.
- **temperature** (:obj:`float`, `optional`, defaults to 1) -- The value used to module the next token
probabilities that will be used by default in the :obj:`generate` method of the model. Must be strictly
positive.
- **top_k** (:obj:`int`, `optional`, defaults to 50) -- Number of highest probability vocabulary tokens to
keep for top-k-filtering that will be used by default in the :obj:`generate` method of the model.
- **top_p** (:obj:`float`, `optional`, defaults to 1) -- Value that will be used by default in the
:obj:`generate` method of the model for ``top_p``. If set to float < 1, only the most probable tokens
with probabilities that add up to ``top_p`` or higher are kept for generation.
- **repetition_penalty** (:obj:`float`, `optional`, defaults to 1) -- Parameter for repetition penalty
that will be used by default in the :obj:`generate` method of the model. 1.0 means no penalty.
- **length_penalty** (:obj:`float`, `optional`, defaults to 1) -- Exponential penalty to the length that
will be used by default in the :obj:`generate` method of the model.
- **no_repeat_ngram_size** (:obj:`int`, `optional`, defaults to 0) -- Value that will be used by default
in the :obj:`generate` method of the model for ``no_repeat_ngram_size``. If set to int > 0, all ngrams of
that size can only occur once.
- **bad_words_ids** (:obj:`List[int]`, `optional`) -- List of token ids that are not allowed to be
generated that will be used by default in the :obj:`generate` method of the model. In order to get the
tokens of the words that should not appear in the generated text, use
:obj:`tokenizer.encode(bad_word, add_prefix_space=True)`.
- **num_return_sequences** (:obj:`int`, `optional`, defaults to 1) -- Number of independently computed
returned sequences for each element in the batch that will be used by default in the :obj:`generate`
method of the model.
Parameters for fine-tuning tasks
- **architectures** (:obj:`List[str]`, `optional`) -- Model architectures that can be used with the
model pretrained weights.
- **finetuning_task** (:obj:`str`, `optional`) -- Name of the task used to fine-tune the model. This can be
used when converting from an original (TensorFlow or PyTorch) checkpoint.
- **id2label** (:obj:`List[str]`, `optional`) -- A map from index (for instance prediction index, or target
index) to label.
- **label2id** (:obj:`Dict[str, int]`, `optional`) -- A map from label to index for the model.
- **num_labels** (:obj:`int`, `optional`) -- Number of labels to use in the last layer added to the model,
typically for a classification task.
- **task_specific_params** (:obj:`Dict[str, Any]`, `optional`) -- Additional keyword arguments to store for
the current task.
Parameters linked to the tokenizer
- **prefix** (:obj:`str`, `optional`) -- A specific prompt that should be added at the beginning of each
text before calling the model.
- **bos_token_id** (:obj:`int`, `optional`)) -- The id of the `beginning-of-stream` token.
- **pad_token_id** (:obj:`int`, `optional`)) -- The id of the `padding` token.
- **eos_token_id** (:obj:`int`, `optional`)) -- The id of the `end-of-stream` token.
- **decoder_start_token_id** (:obj:`int`, `optional`)) -- If an encoder-decoder model starts decoding with
a different token than `bos`, the id of that token.
PyTorch specific parameters
- **torchscript** (:obj:`bool`, `optional`, defaults to :obj:`False`) -- Whether or not the model should be
used with Torchscript.
TensorFlow specific parameters
- **use_bfloat16** (:obj:`bool`, `optional`, defaults to :obj:`False`) -- Whether or not the model should
use BFloat16 scalars (only used by some TensorFlow models).
"""
model_type: str = ""
def __init__(self, **kwargs):
# Attributes with defaults
self.return_dict = kwargs.pop("return_dict", False)
self.output_hidden_states = kwargs.pop("output_hidden_states", False)
self.output_attentions = kwargs.pop("output_attentions", False)
self.use_cache = kwargs.pop("use_cache", True) # Not used by all models
self.torchscript = kwargs.pop("torchscript", False) # Only used by PyTorch models
self.use_bfloat16 = kwargs.pop("use_bfloat16", False)
self.pruned_heads = kwargs.pop("pruned_heads", {})
# Is decoder is used in encoder-decoder models to differentiate encoder from decoder
self.is_encoder_decoder = kwargs.pop("is_encoder_decoder", False)
self.is_decoder = kwargs.pop("is_decoder", False)
self.add_cross_attention = kwargs.pop("add_cross_attention", False)
self.tie_encoder_decoder = kwargs.pop("tie_encoder_decoder", False)
# Parameters for sequence generation
self.max_length = kwargs.pop("max_length", 20)
self.min_length = kwargs.pop("min_length", 0)
self.do_sample = kwargs.pop("do_sample", False)
self.early_stopping = kwargs.pop("early_stopping", False)
self.num_beams = kwargs.pop("num_beams", 1)
self.temperature = kwargs.pop("temperature", 1.0)
self.top_k = kwargs.pop("top_k", 50)
self.top_p = kwargs.pop("top_p", 1.0)
self.repetition_penalty = kwargs.pop("repetition_penalty", 1.0)
self.length_penalty = kwargs.pop("length_penalty", 1.0)
self.no_repeat_ngram_size = kwargs.pop("no_repeat_ngram_size", 0)
self.bad_words_ids = kwargs.pop("bad_words_ids", None)
self.num_return_sequences = kwargs.pop("num_return_sequences", 1)
self.chunk_size_feed_forward = kwargs.pop("chunk_size_feed_forward", 0)
# Fine-tuning task arguments
self.architectures = kwargs.pop("architectures", None)
self.finetuning_task = kwargs.pop("finetuning_task", None)
self.id2label = kwargs.pop("id2label", None)
self.label2id = kwargs.pop("label2id", None)
if self.id2label is not None:
kwargs.pop("num_labels", None)
self.id2label = dict((int(key), value) for key, value in self.id2label.items())
# Keys are always strings in JSON so convert ids to int here.
else:
self.num_labels = kwargs.pop("num_labels", 2)
# Tokenizer arguments TODO: eventually tokenizer and models should share the same config
self.prefix = kwargs.pop("prefix", None)
self.bos_token_id = kwargs.pop("bos_token_id", None)
self.pad_token_id = kwargs.pop("pad_token_id", None)
self.eos_token_id = kwargs.pop("eos_token_id", None)
self.decoder_start_token_id = kwargs.pop("decoder_start_token_id", None)
self.chunk_size_feed_forward = kwargs.pop("chunk_size_feed_forwar", 0)
# task specific arguments
self.task_specific_params = kwargs.pop("task_specific_params", None)
# TPU arguments
self.xla_device = kwargs.pop("xla_device", None)
# Additional attributes without default values
for key, value in kwargs.items():
try:
setattr(self, key, value)
except AttributeError as err:
logger.error("Can't set {} with value {} for {}".format(key, value, self))
raise err
@property
def use_return_dict(self) -> bool:
"""
:obj:`bool`: Whether or not return :class:`~transformers.file_utils.ModelOutput` instead of tuples.
"""
# If torchscript is set, force `return_dict=False` to avoid jit errors
return self.return_dict and not self.torchscript
@property
def num_labels(self) -> int:
"""
:obj:`int`: The number of labels for classification models.
"""
return len(self.id2label)
@num_labels.setter
def num_labels(self, num_labels: int):
self.id2label = {i: "LABEL_{}".format(i) for i in range(num_labels)}
self.label2id = dict(zip(self.id2label.values(), self.id2label.keys()))
def save_pretrained(self, save_directory: str):
"""
Save a configuration object to the directory ``save_directory``, so that it can be re-loaded using the
:func:`~transformers.PretrainedConfig.from_pretrained` class method.
Args:
save_directory (:obj:`str`):
Directory where the configuration JSON file will be saved (will be created if it does not exist).
"""
if os.path.isfile(save_directory):
raise AssertionError("Provided path ({}) should be a directory, not a file".format(save_directory))
os.makedirs(save_directory, exist_ok=True)
# If we save using the predefined names, we can load using `from_pretrained`
output_config_file = os.path.join(save_directory, CONFIG_NAME)
self.to_json_file(output_config_file, use_diff=True)
logger.info("Configuration saved in {}".format(output_config_file))
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path: str, **kwargs) -> "PretrainedConfig":
r"""
Instantiate a :class:`~transformers.PretrainedConfig` (or a derived class) from a pretrained model
configuration.
Args:
pretrained_model_name_or_path (:obj:`str`):
This can be either:
- the `shortcut name` of a pretrained model configuration to load from cache or download, e.g.,
``bert-base-uncased``.
- the `identifier name` of a pretrained model configuration that was uploaded to our S3 by any user,
e.g., ``dbmdz/bert-base-german-cased``.
- a path to a `directory` containing a configuration file saved using the
:func:`~transformers.PretrainedConfig.save_pretrained` method, e.g., ``./my_model_directory/``.
- a path or url to a saved configuration JSON `file`, e.g.,
``./my_model_directory/configuration.json``.
cache_dir (:obj:`str`, `optional`):
Path to a directory in which a downloaded pretrained model configuration should be cached if the
standard cache should not be used.
force_download (:obj:`bool`, `optional`, defaults to :obj:`False`):
Wheter or not to force to (re-)download the configuration files and override the cached versions if they
exist.
resume_download (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to delete incompletely received file. Attempts to resume the download if such a file
exists.
proxies (:obj:`Dict[str, str]`, `optional`):
A dictionary of proxy servers to use by protocol or endpoint, e.g.,
:obj:`{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.`
The proxies are used on each request.
return_unused_kwargs (:obj:`bool`, `optional`, defaults to :obj:`False`):
If :obj:`False`, then this function returns just the final configuration object.
If :obj:`True`, then this functions returns a :obj:`Tuple(config, unused_kwargs)` where `unused_kwargs`
is a dictionary consisting of the key/value pairs whose keys are not configuration attributes: i.e.,
the part of ``kwargs`` which has not been used to update ``config`` and is otherwise ignored.
kwargs (:obj:`Dict[str, Any]`, `optional`):
The values in kwargs of any keys which are configuration attributes will be used to override the loaded
values. Behavior concerning key/value pairs whose keys are *not* configuration attributes is
controlled by the ``return_unused_kwargs`` keyword parameter.
Returns:
:class:`PretrainedConfig`: The configuration object instantiated from this pretrained model.
Examples::
# We can't instantiate directly the base class `PretrainedConfig` so let's show the examples on a
# derived class: BertConfig
config = BertConfig.from_pretrained('bert-base-uncased') # Download configuration from S3 and cache.
config = BertConfig.from_pretrained('./test/saved_model/') # E.g. config (or model) was saved using `save_pretrained('./test/saved_model/')`
config = BertConfig.from_pretrained('./test/saved_model/my_configuration.json')
config = BertConfig.from_pretrained('bert-base-uncased', output_attention=True, foo=False)
assert config.output_attention == True
config, unused_kwargs = BertConfig.from_pretrained('bert-base-uncased', output_attention=True,
foo=False, return_unused_kwargs=True)
assert config.output_attention == True
assert unused_kwargs == {'foo': False}
"""
config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
return cls.from_dict(config_dict, **kwargs)
@classmethod
def get_config_dict(cls, pretrained_model_name_or_path: str, **kwargs) -> Tuple[Dict[str, Any], Dict[str, Any]]:
"""
From a ``pretrained_model_name_or_path``, resolve to a dictionary of parameters, to be used
for instantiating a :class:`~transformers.PretrainedConfig` using ``from_dict``.
Parameters:
pretrained_model_name_or_path (:obj:`str`):
The identifier of the pre-trained checkpoint from which we want the dictionary of parameters.
Returns:
:obj:`Tuple[Dict, Dict]`: The dictionary(ies) that will be used to instantiate the configuration object.
"""
cache_dir = kwargs.pop("cache_dir", None)
force_download = kwargs.pop("force_download", False)
resume_download = kwargs.pop("resume_download", False)
proxies = kwargs.pop("proxies", None)
local_files_only = kwargs.pop("local_files_only", False)
if os.path.isdir(pretrained_model_name_or_path):
config_file = os.path.join(pretrained_model_name_or_path, CONFIG_NAME)
elif os.path.isfile(pretrained_model_name_or_path) or is_remote_url(pretrained_model_name_or_path):
config_file = pretrained_model_name_or_path
else:
config_file = hf_bucket_url(pretrained_model_name_or_path, filename=CONFIG_NAME, use_cdn=False)
try:
# Load from URL or cache if already cached
resolved_config_file = cached_path(
config_file,
cache_dir=cache_dir,
force_download=force_download,
proxies=proxies,
resume_download=resume_download,
local_files_only=local_files_only,
)
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
config_dict = cls._dict_from_json_file(resolved_config_file)
except EnvironmentError:
msg = (
f"Can't load config for '{pretrained_model_name_or_path}'. Make sure that:\n\n"
f"- '{pretrained_model_name_or_path}' is a correct model identifier listed on 'https://huggingface.co/models'\n\n"
f"- or '{pretrained_model_name_or_path}' is the correct path to a directory containing a {CONFIG_NAME} file\n\n"
)
raise EnvironmentError(msg)
except json.JSONDecodeError:
msg = (
"Couldn't reach server at '{}' to download configuration file or "
"configuration file is not a valid JSON file. "
"Please check network or file content here: {}.".format(config_file, resolved_config_file)
)
raise EnvironmentError(msg)
if resolved_config_file == config_file:
logger.info("loading configuration file {}".format(config_file))
else:
logger.info("loading configuration file {} from cache at {}".format(config_file, resolved_config_file))
return config_dict, kwargs
@classmethod
def from_dict(cls, config_dict: Dict[str, Any], **kwargs) -> "PretrainedConfig":
"""
Instantiates a :class:`~transformers.PretrainedConfig` from a Python dictionary of parameters.
Args:
config_dict (:obj:`Dict[str, Any]`):
Dictionary that will be used to instantiate the configuration object. Such a dictionary can be
retrieved from a pretrained checkpoint by leveraging the
:func:`~transformers.PretrainedConfig.get_config_dict` method.
kwargs (:obj:`Dict[str, Any]`):
Additional parameters from which to initialize the configuration object.
Returns:
:class:`PretrainedConfig`: The configuration object instantiated from those parameters.
"""
return_unused_kwargs = kwargs.pop("return_unused_kwargs", False)
config = cls(**config_dict)
if hasattr(config, "pruned_heads"):
config.pruned_heads = dict((int(key), value) for key, value in config.pruned_heads.items())
# Update config with kwargs if needed
to_remove = []
for key, value in kwargs.items():
if hasattr(config, key):
setattr(config, key, value)
to_remove.append(key)
for key in to_remove:
kwargs.pop(key, None)
logger.info("Model config %s", str(config))
if return_unused_kwargs:
return config, kwargs
else:
return config
@classmethod
def from_json_file(cls, json_file: str) -> "PretrainedConfig":
"""
Instantiates a :class:`~transformers.PretrainedConfig` from the path to a JSON file of parameters.
Args:
json_file (:obj:`str`):
Path to the JSON file containing the parameters.
Returns:
:class:`PretrainedConfig`: The configuration object instantiated from that JSON file.
"""
config_dict = cls._dict_from_json_file(json_file)
return cls(**config_dict)
@classmethod
def _dict_from_json_file(cls, json_file: str):
with open(json_file, "r", encoding="utf-8") as reader:
text = reader.read()
return json.loads(text)
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __repr__(self):
return "{} {}".format(self.__class__.__name__, self.to_json_string())
def to_diff_dict(self) -> Dict[str, Any]:
"""
Removes all attributes from config which correspond to the default
config attributes for better readability and serializes to a Python
dictionary.
Returns:
:obj:`Dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance,
"""
config_dict = self.to_dict()
# get the default config dict
default_config_dict = PretrainedConfig().to_dict()
serializable_config_dict = {}
# only serialize values that differ from the default config
for key, value in config_dict.items():
if key not in default_config_dict or value != default_config_dict[key]:
serializable_config_dict[key] = value
return serializable_config_dict
def to_dict(self) -> Dict[str, Any]:
"""
Serializes this instance to a Python dictionary.
Returns:
:obj:`Dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance.
"""
output = copy.deepcopy(self.__dict__)
if hasattr(self.__class__, "model_type"):
output["model_type"] = self.__class__.model_type
return output
def to_json_string(self, use_diff: bool = True) -> str:
"""
Serializes this instance to a JSON string.
Args:
use_diff (:obj:`bool`, `optional`, defaults to :obj:`True`):
If set to ``True``, only the difference between the config instance and the default
``PretrainedConfig()`` is serialized to JSON string.
Returns:
:obj:`str`: String containing all the attributes that make up this configuration instance in JSON format.
"""
if use_diff is True:
config_dict = self.to_diff_dict()
else:
config_dict = self.to_dict()
return json.dumps(config_dict, indent=2, sort_keys=True) + "\n"
def to_json_file(self, json_file_path: str, use_diff: bool = True):
"""
Save this instance to a JSON file.
Args:
json_file_path (:obj:`str`):
Path to the JSON file in which this configuration instance's parameters will be saved.
use_diff (:obj:`bool`, `optional`, defaults to :obj:`True`):
If set to ``True``, only the difference between the config instance and the default
``PretrainedConfig()`` is serialized to JSON file.
"""
with open(json_file_path, "w", encoding="utf-8") as writer:
writer.write(self.to_json_string(use_diff=use_diff))
def update(self, config_dict: Dict[str, Any]):
"""
Updates attributes of this class with attributes from ``config_dict``.
Args:
config_dict (:obj:`Dict[str, Any]`): Dictionary of attributes that shall be updated for this class.
"""
for key, value in config_dict.items():
setattr(self, key, value)
|
PyTorch/Recommendation/DLRM/triton | triton | deployer | #!/usr/bin/python
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import os
import sys
import torch
import numpy as np
from dlrm.data.datasets import SyntheticDataset
from dlrm.model.distributed import DistributedDlrm
from dlrm.utils.checkpointing.distributed import make_distributed_checkpoint_loader
from dlrm.utils.distributed import get_gpu_batch_sizes, get_device_mapping, is_main_process
from triton import deployer_lib
sys.path.append('../')
def get_model_args(model_args):
parser = argparse.ArgumentParser()
parser.add_argument("--batch_size", default=1, type=int)
parser.add_argument("--fp16", action="store_true", default=False)
parser.add_argument("--dump_perf_data", type=str, default=None)
parser.add_argument("--model_checkpoint", type=str, default=None)
parser.add_argument("--num_numerical_features", type=int, default=13)
parser.add_argument("--embedding_dim", type=int, default=128)
parser.add_argument("--embedding_type", type=str, default="joint", choices=["joint", "multi_table"])
parser.add_argument("--top_mlp_sizes", type=int, nargs="+",
default=[1024, 1024, 512, 256, 1])
parser.add_argument("--bottom_mlp_sizes", type=int, nargs="+",
default=[512, 256, 128])
parser.add_argument("--interaction_op", type=str, default="dot",
choices=["dot", "cat"])
parser.add_argument("--cpu", default=False, action="store_true")
parser.add_argument("--dataset", type=str, required=True)
return parser.parse_args(model_args)
def initialize_model(args, categorical_sizes, device_mapping):
''' return model, ready to trace '''
device = "cuda:0" if not args.cpu else "cpu"
model_config = {
'top_mlp_sizes': args.top_mlp_sizes,
'bottom_mlp_sizes': args.bottom_mlp_sizes,
'embedding_dim': args.embedding_dim,
'interaction_op': args.interaction_op,
'categorical_feature_sizes': categorical_sizes,
'num_numerical_features': args.num_numerical_features,
'embedding_type': args.embedding_type,
'hash_indices': False,
'use_cpp_mlp': False,
'fp16': args.fp16,
'device': device,
}
model = DistributedDlrm.from_dict(model_config)
model.to(device)
if args.model_checkpoint:
checkpoint_loader = make_distributed_checkpoint_loader(device_mapping=device_mapping, rank=0)
checkpoint_loader.load_checkpoint(model, args.model_checkpoint)
model.to(device)
if args.fp16:
model = model.half()
return model
def get_dataloader(args, categorical_sizes):
dataset_test = SyntheticDataset(num_entries=2000,
batch_size=args.batch_size,
numerical_features=args.num_numerical_features,
categorical_feature_sizes=categorical_sizes,
device="cpu" if args.cpu else "cuda:0")
class RemoveOutput:
def __init__(self, dataset):
self.dataset = dataset
def __getitem__(self, idx):
value = self.dataset[idx]
if args.fp16:
value = (value[0].half(), value[1].long(), value[2])
else:
value = (value[0], value[1].long(), value[2])
return value[:-1]
def __len__(self):
return len(self.dataset)
test_loader = torch.utils.data.DataLoader(RemoveOutput(dataset_test),
batch_size=None,
num_workers=0,
pin_memory=False)
return test_loader
def main():
# deploys and returns removed deployer arguments
deployer, model_args = deployer_lib.create_deployer(sys.argv[1:],
get_model_args)
with open(os.path.join(model_args.dataset, "model_size.json")) as f:
categorical_sizes = list(json.load(f).values())
categorical_sizes = [s + 1 for s in categorical_sizes]
categorical_sizes = np.array(categorical_sizes)
device_mapping = get_device_mapping(categorical_sizes, num_gpus=1)
categorical_sizes = categorical_sizes[device_mapping['embedding'][0]].tolist()
model = initialize_model(model_args, categorical_sizes, device_mapping)
dataloader = get_dataloader(model_args, categorical_sizes)
if model_args.dump_perf_data:
input_0, input_1 = next(iter(dataloader))
if model_args.fp16:
input_0 = input_0.half()
os.makedirs(model_args.dump_perf_data, exist_ok=True)
input_0.detach().cpu().numpy()[0].tofile(os.path.join(model_args.dump_perf_data, "input__0"))
input_1.detach().cpu().numpy()[0].tofile(os.path.join(model_args.dump_perf_data, "input__1"))
deployer.deploy(dataloader, model)
if __name__=='__main__':
main()
|
PyTorch/SpeechSynthesis/FastPitch/common/text/zh | zh | mandarin_text_processing | import re
import numpy as np
from .chinese import split_text, is_chinese, chinese_text_to_symbols
from ..text_processing import TextProcessing
class MandarinTextProcessing(TextProcessing):
def __init__(self, symbol_set, cleaner_names, p_arpabet=0.0,
handle_arpabet='word', handle_arpabet_ambiguous='ignore',
expand_currency=True):
super().__init__(symbol_set, cleaner_names, p_arpabet, handle_arpabet,
handle_arpabet_ambiguous, expand_currency)
def sequence_to_text(self, sequence):
result = ''
tmp = ''
for symbol_id in sequence:
if symbol_id in self.id_to_symbol:
s = self.id_to_symbol[symbol_id]
# Enclose ARPAbet and mandarin phonemes back in curly braces:
if len(s) > 1 and s[0] == '@':
s = '{%s}' % s[1:]
result += s
elif len(s) > 1 and s[0] == '#' and s[1].isdigit(): # mandarin tone
tmp += s[1] + '} '
result += tmp
tmp = ''
elif len(s) > 1 and s[0] == '#' and (s[1].isalpha() or s[1] == '^'): # mandarin phoneme
if tmp == '':
tmp += ' {' + s[1:] + ' '
else:
tmp += s[1:] + ' '
elif len(s) > 1 and s[0] == '#': # chinese punctuation
s = s[1]
result += s
else:
result += s
return result.replace('}{', ' ').replace(' ', ' ')
def chinese_symbols_to_sequence(self, symbols):
return self.symbols_to_sequence(['#' + s for s in symbols])
def encode_text(self, text, return_all=False):
# split the text into English and Chinese segments
segments = [segment for segment in split_text(text) if segment != ""]
text_encoded = []
text_clean = ""
text_arpabet = ""
for segment in segments:
if is_chinese(segment[0]): # process the Chinese segment
chinese_symbols, segment_arpabet = chinese_text_to_symbols(segment)
segment_encoded = self.chinese_symbols_to_sequence(chinese_symbols)
segment_clean = segment
segment_encoded = segment_encoded
else: # process the English segment
segment_encoded, segment_clean, segment_arpabet = \
super().encode_text(segment, return_all=True)
text_encoded += segment_encoded
text_clean += segment_clean
text_arpabet += segment_arpabet
if return_all:
return text_encoded, text_clean, text_arpabet
return text_encoded |
PyTorch/SpeechRecognition/QuartzNet/platform | platform | DGX2_QuartzNet_FP32_8GPU | #!/bin/bash
set -a
: ${NUM_GPUS:=8}
: ${GPU_BATCH_SIZE:=36}
: ${GRAD_ACCUMULATION:=4}
: ${AMP=:false}
bash scripts/train.sh "$@"
|
PyTorch/Recommendation/DLRM/dlrm/data | data | defaults | # Copyright (c) 2021 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
CATEGORICAL_CHANNEL = "categorical"
NUMERICAL_CHANNEL = "numerical"
LABEL_CHANNEL = "label"
SPLIT_BINARY = "split_binary"
TRAIN_MAPPING = "train"
TEST_MAPPING = "test"
TYPE_SELECTOR = "type"
FEATURES_SELECTOR = "features"
FILES_SELECTOR = "files"
DTYPE_SELECTOR = "dtype"
CARDINALITY_SELECTOR = "cardinality"
def get_categorical_feature_type(size: int):
"""This function works both when max value and cardinality is passed.
Consistency by the user is required"""
types = (np.int8, np.int16, np.int32)
for numpy_type in types:
if size < np.iinfo(numpy_type).max:
return numpy_type
raise RuntimeError(f"Categorical feature of size {size} is too big for defined types")
|
PyTorch/LanguageModeling/BERT/triton/dist6l/runner | runner | config_NVIDIA-A30 | checkpoints:
- name: dist-6l-qa
url: https://api.ngc.nvidia.com/v2/models/nvidia/dle/bert_pyt_ckpt_distilled_6l_768d_qa_squad11_amp/versions/20.12.0/zip
configurations:
- accelerator: none
accelerator_precision: fp16
batch_size:
- 1
batch_sizes: '1'
capture_cuda_graph: 0
checkpoint_variant: dist-6l-qa
export_format: onnx
export_precision: fp16
format: onnx
max_batch_size: 1
max_seq_length: 384
precision: fp16
triton_gpu_engine_count: 1
triton_max_queue_delay: 1
triton_preferred_batch_sizes: '1'
- accelerator: none
accelerator_precision: fp16
batch_size:
- 16
batch_sizes: '16'
capture_cuda_graph: 0
checkpoint_variant: dist-6l-qa
export_format: onnx
export_precision: fp16
format: onnx
max_batch_size: 16
max_seq_length: 384
precision: fp16
triton_gpu_engine_count: 1
triton_max_queue_delay: 1
triton_preferred_batch_sizes: 8 16
- accelerator: none
accelerator_precision: fp16
batch_size:
- 8
batch_sizes: '8'
capture_cuda_graph: 0
checkpoint_variant: dist-6l-qa
export_format: onnx
export_precision: fp16
format: onnx
max_batch_size: 8
max_seq_length: 384
precision: fp16
triton_gpu_engine_count: 1
triton_max_queue_delay: 1
triton_preferred_batch_sizes: 4 8
- accelerator: trt
accelerator_precision: fp16
batch_size:
- 1
batch_sizes: '1'
capture_cuda_graph: 0
checkpoint_variant: dist-6l-qa
export_format: onnx
export_precision: fp16
format: onnx
max_batch_size: 1
max_seq_length: 384
precision: fp16
triton_gpu_engine_count: 1
triton_max_queue_delay: 1
triton_preferred_batch_sizes: '1'
- accelerator: trt
accelerator_precision: fp16
batch_size:
- 16
batch_sizes: '16'
capture_cuda_graph: 0
checkpoint_variant: dist-6l-qa
export_format: onnx
export_precision: fp16
format: onnx
max_batch_size: 16
max_seq_length: 384
precision: fp16
triton_gpu_engine_count: 1
triton_max_queue_delay: 1
triton_preferred_batch_sizes: 8 16
- accelerator: trt
accelerator_precision: fp16
batch_size:
- 8
batch_sizes: '8'
capture_cuda_graph: 0
checkpoint_variant: dist-6l-qa
export_format: onnx
export_precision: fp16
format: onnx
max_batch_size: 8
max_seq_length: 384
precision: fp16
triton_gpu_engine_count: 1
triton_max_queue_delay: 1
triton_preferred_batch_sizes: 4 8
- accelerator: none
accelerator_precision: fp16
batch_size:
- 1
batch_sizes: '1'
capture_cuda_graph: 0
checkpoint_variant: dist-6l-qa
export_format: onnx
export_precision: fp16
format: trt
max_batch_size: 1
max_seq_length: 384
precision: fp16
triton_gpu_engine_count: 1
triton_max_queue_delay: 1
triton_preferred_batch_sizes: '1'
- accelerator: none
accelerator_precision: fp16
batch_size:
- 16
batch_sizes: '16'
capture_cuda_graph: 0
checkpoint_variant: dist-6l-qa
export_format: onnx
export_precision: fp16
format: trt
max_batch_size: 16
max_seq_length: 384
precision: fp16
triton_gpu_engine_count: 1
triton_max_queue_delay: 1
triton_preferred_batch_sizes: 8 16
- accelerator: none
accelerator_precision: fp16
batch_size:
- 8
batch_sizes: '8'
capture_cuda_graph: 0
checkpoint_variant: dist-6l-qa
export_format: onnx
export_precision: fp16
format: trt
max_batch_size: 8
max_seq_length: 384
precision: fp16
triton_gpu_engine_count: 1
triton_max_queue_delay: 1
triton_preferred_batch_sizes: 4 8
- accelerator: none
accelerator_precision: fp16
batch_size:
- 1
- 8
- 16
batch_sizes: 1 8 16
capture_cuda_graph: 0
checkpoint_variant: dist-6l-qa
export_format: ts-trace
export_precision: fp16
format: ts-trace
max_batch_size: 16
max_seq_length: 384
precision: fp16
triton_gpu_engine_count: 1
triton_max_queue_delay: 1
triton_preferred_batch_sizes: 8 16
container_version: '21.10'
datasets:
- name: data
datasets_dir: datasets
framework: PyTorch
model_name: BERT
triton_container_image: null
triton_custom_operations: null
triton_dockerfile: null
triton_load_model_method: explicit
|
PyTorch/Recommendation/DLRM/tests/feature_specs | feature_specs | 13_num_26_cat | channel_spec:
categorical:
- cat_0.bin
- cat_1.bin
- cat_2.bin
- cat_3.bin
- cat_4.bin
- cat_5.bin
- cat_6.bin
- cat_7.bin
- cat_8.bin
- cat_9.bin
- cat_10.bin
- cat_11.bin
- cat_12.bin
- cat_13.bin
- cat_14.bin
- cat_15.bin
- cat_16.bin
- cat_17.bin
- cat_18.bin
- cat_19.bin
- cat_20.bin
- cat_21.bin
- cat_22.bin
- cat_23.bin
- cat_24.bin
- cat_25.bin
label:
- label
numerical: &id001
- num_0
- num_1
- num_2
- num_3
- num_4
- num_5
- num_6
- num_7
- num_8
- num_9
- num_10
- num_11
- num_12
feature_spec:
cat_0.bin:
cardinality: 100000
dtype: int32
cat_1.bin:
cardinality: 100001
dtype: int32
cat_10.bin:
cardinality: 100010
dtype: int32
cat_11.bin:
cardinality: 100011
dtype: int32
cat_12.bin:
cardinality: 100012
dtype: int32
cat_13.bin:
cardinality: 100013
dtype: int32
cat_14.bin:
cardinality: 100014
dtype: int32
cat_15.bin:
cardinality: 100015
dtype: int32
cat_16.bin:
cardinality: 100016
dtype: int32
cat_17.bin:
cardinality: 100017
dtype: int32
cat_18.bin:
cardinality: 100018
dtype: int32
cat_19.bin:
cardinality: 100019
dtype: int32
cat_2.bin:
cardinality: 100002
dtype: int32
cat_20.bin:
cardinality: 100020
dtype: int32
cat_21.bin:
cardinality: 100021
dtype: int32
cat_22.bin:
cardinality: 100022
dtype: int32
cat_23.bin:
cardinality: 100023
dtype: int32
cat_24.bin:
cardinality: 100024
dtype: int32
cat_25.bin:
cardinality: 100025
dtype: int32
cat_3.bin:
cardinality: 100003
dtype: int32
cat_4.bin:
cardinality: 100004
dtype: int32
cat_5.bin:
cardinality: 100005
dtype: int32
cat_6.bin:
cardinality: 100006
dtype: int32
cat_7.bin:
cardinality: 100007
dtype: int32
cat_8.bin:
cardinality: 100008
dtype: int32
cat_9.bin:
cardinality: 100009
dtype: int32
label:
dtype: bool
num_0:
dtype: float16
num_1:
dtype: float16
num_10:
dtype: float16
num_11:
dtype: float16
num_12:
dtype: float16
num_2:
dtype: float16
num_3:
dtype: float16
num_4:
dtype: float16
num_5:
dtype: float16
num_6:
dtype: float16
num_7:
dtype: float16
num_8:
dtype: float16
num_9:
dtype: float16
metadata: {}
source_spec:
test:
- features: *id001
files:
- test/numerical.bin
type: split_binary
- features:
- label
files:
- test/label.bin
type: split_binary
- features:
- cat_0.bin
files:
- test/cat_0.bin
type: split_binary
- features:
- cat_1.bin
files:
- test/cat_1.bin
type: split_binary
- features:
- cat_2.bin
files:
- test/cat_2.bin
type: split_binary
- features:
- cat_3.bin
files:
- test/cat_3.bin
type: split_binary
- features:
- cat_4.bin
files:
- test/cat_4.bin
type: split_binary
- features:
- cat_5.bin
files:
- test/cat_5.bin
type: split_binary
- features:
- cat_6.bin
files:
- test/cat_6.bin
type: split_binary
- features:
- cat_7.bin
files:
- test/cat_7.bin
type: split_binary
- features:
- cat_8.bin
files:
- test/cat_8.bin
type: split_binary
- features:
- cat_9.bin
files:
- test/cat_9.bin
type: split_binary
- features:
- cat_10.bin
files:
- test/cat_10.bin
type: split_binary
- features:
- cat_11.bin
files:
- test/cat_11.bin
type: split_binary
- features:
- cat_12.bin
files:
- test/cat_12.bin
type: split_binary
- features:
- cat_13.bin
files:
- test/cat_13.bin
type: split_binary
- features:
- cat_14.bin
files:
- test/cat_14.bin
type: split_binary
- features:
- cat_15.bin
files:
- test/cat_15.bin
type: split_binary
- features:
- cat_16.bin
files:
- test/cat_16.bin
type: split_binary
- features:
- cat_17.bin
files:
- test/cat_17.bin
type: split_binary
- features:
- cat_18.bin
files:
- test/cat_18.bin
type: split_binary
- features:
- cat_19.bin
files:
- test/cat_19.bin
type: split_binary
- features:
- cat_20.bin
files:
- test/cat_20.bin
type: split_binary
- features:
- cat_21.bin
files:
- test/cat_21.bin
type: split_binary
- features:
- cat_22.bin
files:
- test/cat_22.bin
type: split_binary
- features:
- cat_23.bin
files:
- test/cat_23.bin
type: split_binary
- features:
- cat_24.bin
files:
- test/cat_24.bin
type: split_binary
- features:
- cat_25.bin
files:
- test/cat_25.bin
type: split_binary
train:
- features: *id001
files:
- train/numerical.bin
type: split_binary
- features:
- label
files:
- train/label.bin
type: split_binary
- features:
- cat_0.bin
files:
- train/cat_0.bin
type: split_binary
- features:
- cat_1.bin
files:
- train/cat_1.bin
type: split_binary
- features:
- cat_2.bin
files:
- train/cat_2.bin
type: split_binary
- features:
- cat_3.bin
files:
- train/cat_3.bin
type: split_binary
- features:
- cat_4.bin
files:
- train/cat_4.bin
type: split_binary
- features:
- cat_5.bin
files:
- train/cat_5.bin
type: split_binary
- features:
- cat_6.bin
files:
- train/cat_6.bin
type: split_binary
- features:
- cat_7.bin
files:
- train/cat_7.bin
type: split_binary
- features:
- cat_8.bin
files:
- train/cat_8.bin
type: split_binary
- features:
- cat_9.bin
files:
- train/cat_9.bin
type: split_binary
- features:
- cat_10.bin
files:
- train/cat_10.bin
type: split_binary
- features:
- cat_11.bin
files:
- train/cat_11.bin
type: split_binary
- features:
- cat_12.bin
files:
- train/cat_12.bin
type: split_binary
- features:
- cat_13.bin
files:
- train/cat_13.bin
type: split_binary
- features:
- cat_14.bin
files:
- train/cat_14.bin
type: split_binary
- features:
- cat_15.bin
files:
- train/cat_15.bin
type: split_binary
- features:
- cat_16.bin
files:
- train/cat_16.bin
type: split_binary
- features:
- cat_17.bin
files:
- train/cat_17.bin
type: split_binary
- features:
- cat_18.bin
files:
- train/cat_18.bin
type: split_binary
- features:
- cat_19.bin
files:
- train/cat_19.bin
type: split_binary
- features:
- cat_20.bin
files:
- train/cat_20.bin
type: split_binary
- features:
- cat_21.bin
files:
- train/cat_21.bin
type: split_binary
- features:
- cat_22.bin
files:
- train/cat_22.bin
type: split_binary
- features:
- cat_23.bin
files:
- train/cat_23.bin
type: split_binary
- features:
- cat_24.bin
files:
- train/cat_24.bin
type: split_binary
- features:
- cat_25.bin
files:
- train/cat_25.bin
type: split_binary
|
Kaldi/SpeechRecognition | SpeechRecognition | README | # Kaldi ASR Integration With Triton
This repository provides a Kaldi ASR custom backend for the NVIDIA Triton (former TensorRT Inference Server). It can be used to demonstrate high-performance online inference on Kaldi ASR models. This includes handling the gRPC communication between the Triton and clients, and the dynamic batching of inference requests. This repository is tested and maintained by NVIDIA.
## Table Of Contents
- [Table Of Contents](#table-of-contents)
- [Solution overview](#solution-overview)
* [Reference model](#reference-model)
* [Default configuration](#default-configuration)
- [Setup](#setup)
* [Requirements](#requirements)
- [Quick Start Guide](#quick-start-guide)
- [Advanced](#advanced)
* [Parameters](#parameters)
* [Model path](#model-path)
* [Model configuration](#model-configuration)
* [Inference engine configuration](#inference-engine-configuration)
* [Inference process](#inference-process)
* [Client command-line parameters](#client-command-line-parameters)
* [Input/Output](#inputoutput)
* [Input](#input)
* [Output](#output)
* [Using a custom Kaldi ASR model](#using-a-custom-kaldi-asr-model)
- [Performance](#performance)
* [Metrics](#metrics)
* [Results](#results)
- [Release notes](#release-notes)
* [Changelog](#changelog)
* [Known issues](#known-issues)
## Solution overview
This repository provides a wrapper around the online GPU-accelerated ASR pipeline from the paper [GPU-Accelerated Viterbi Exact Lattice Decoder for Batched Online and Offline Speech Recognition](https://arxiv.org/abs/1910.10032). That work includes a high-performance implementation of a GPU HMM Decoder, a low-latency Neural Net driver, fast Feature Extraction for preprocessing, and new ASR pipelines tailored for GPUs. These different modules have been integrated into the Kaldi ASR framework.
This repository contains a Triton custom backend for the Kaldi ASR framework. This custom backend calls the high-performance online GPU pipeline from the Kaldi ASR framework. This Triton integration provides ease-of-use to Kaldi ASR inference: gRPC streaming server, dynamic sequence batching, and multi-instances support. A client connects to the gRPC server, streams audio by sending chunks to the server, and gets back the inferred text as an answer (see [Input/Output](#input-output)). More information about the Triton can be found [here](https://docs.nvidia.com/deeplearning/sdk/tensorrt-inference-server-guide/docs/).
This Triton integration is meant to be used with the LibriSpeech model for demonstration purposes. We include a pre-trained version of this model to allow you to easily test this work (see [Quick Start Guide](#quick-start-guide)). Both the Triton integration and the underlying Kaldi ASR online GPU pipeline are a work in progress and will support more functionalities in the future. Support for a custom Kaldi model is experimental (see [Using a custom Kaldi model](#using-custom-kaldi-model)).
### Reference model
A reference model is used by all test scripts and benchmarks presented in this repository to illustrate this solution. We are using the Kaldi ASR `LibriSpeech` recipe, available [here](https://github.com/kaldi-asr/kaldi/blob/master/egs/librispeech/s5). It was trained by NVIDIA and is delivered as a pre-trained model.
### Default configuration
Details about parameters can be found in the [Parameters](#parameters) section.
* `model path`: Configured to use the pretrained LibriSpeech model.
* `use_tensor_cores`: 1
* `main_q_capacity`: 30000
* `aux_q_capacity`: 400000
* `beam`: 10
* `num_channels`: 4000
* `lattice_beam`: 7
* `max_active`: 10,000
* `frame_subsampling_factor`: 3
* `acoustic_scale`: 1.0
* `num_worker_threads`: 40
* `max_batch_size`: 400
* `instance_group.count`: 1
## Setup
### Requirements
This repository contains Dockerfiles which extends the Kaldi and Triton NVIDIA GPU Cloud (NGC) containers and encapsulates some dependencies. Aside from these dependencies, ensure you have [NVIDIA Docker](https://github.com/NVIDIA/nvidia-docker) installed.
For more information about how to get started with NGC containers, see the following sections from the NVIDIA GPU Cloud Documentation and the Deep Learning Documentation:
- [Getting Started Using NVIDIA GPU Cloud](https://docs.nvidia.com/ngc/ngc-getting-started-guide/index.html)
- [Accessing And Pulling From The NGC Container Registry](https://docs.nvidia.com/deeplearning/dgx/user-guide/index.html#accessing_registry)
## Quick Start Guide
1. Clone the repository.
```
git clone https://github.com/NVIDIA/DeepLearningExamples.git
cd DeepLearningExamples/Kaldi/SpeechRecognition
```
2. Build the client and server containers.
`scripts/docker/build.sh`
3. Download and set up the pre-trained model and eval dataset.
`scripts/docker/launch_download.sh`
The model and dataset are downloaded in the `data/` folder.
4. Start the server.
`scripts/docker/launch_server.sh`
Once you see the line `Starting Metrics Service at 0.0.0.0:8002`, the server is ready to be used. You can then start the client.
Currently, multi-GPU is not supported. By default GPU 0 is used. You can use a specific GPU by using `NVIDIA_VISIBLE_DEVICES`:
`NVIDIA_VISIBLE_DEVICES=<GPUID> scripts/docker/launch_server.sh`
5. Start the client.
The following command will stream 1000 parallel streams to the server. The `-p` option prints the inferred `TEXT` sent back from the server.
`scripts/docker/launch_client.sh -p`
## Advanced
### Parameters
The configuration is done through the `config.pbtxt` file available in the `model-repo/kaldi_online/` directory. It allows you to specify the following:
#### Model path
The following parameters can be modified if you want to use your own Kaldi model.
* `mfcc_filename`
* `ivector_filename`
* `nnet3_rxfilename`
* `fst_rxfilename`
* `word_syms_rxfilename`
#### Model configuration
The model configuration parameters are passed to the model and have an impact on both accuracy and performance. The model parameters are usually Kaldi ASR parameters, meaning, if they are, you can reuse the values that are currently being used in the CPU Kaldi ASR pipeline.
* `beam`
* `lattice_beam`
* `max_active`
* `frame_subsampling_factor`
* `acoustic_scale`
#### Inference engine configuration
The inference engine configuration parameters configure the inference engine. They impact performance, but not accuracy.
* `max_batch_size`: The size of one execution batch on the GPU. This parameter should be set as large as necessary to saturate the GPU, but not bigger. Larger batches will lead to a higher throughput, smaller batches to lower latency.
* `num_worker_threads`: The number of CPU threads for the postprocessing CPU tasks, such as lattice determinization and text generation from the lattice.
* `input.WAV_DATA.dims`: The maximum number of samples per chunk. The value must be a multiple of `frame_subsampling_factor * chunks_per_frame`.
### Inference process
Inference is done through simulating concurrent users. Each user is attributed to one utterance from the LibriSpeech dataset. It streams that utterance by cutting it into chunks and gets the final `TEXT` output once the final chunk has been sent. The `-c` parameter sets the number of active users being simulated in parallel.
### Client command-line parameters
The client can be configured through a set of parameters that define its behavior. To see the full list of available options and their descriptions, use the `-h` command-line option. The parameters are:
```
-v
-i <Number of iterations on the dataset>
-c <Number of parallel audio channels>
-a <Path to the scp dataset file>
-l <Maximum number of samples per chunk. Must correspond to the server config>
-u <URL for inference service and its gRPC port>
-o : Only feed each channel at realtime speed. Simulates online clients.
-p : Print text outputs
-b : Print partial (best path) text outputs
```
### Input/Output
The API is currently experimental.
#### Input
The server execpts chunks of audio each containing up to `input.WAV_DATA.dims` samples. Per default, this corresponds to 510ms of audio per chunk. The last chunk can send a partial chunk smaller than this maximum value.
The chunk is made of a float array set in the input `WAV_DATA`, with the input `WAV_DATA_DIM` containing the number of samples contained in that chunk. Flags can be set to declare a chunk as a first chunk or last chunk for a sequence. Finally, each chunk from a given sequence is associated with a `CorrelationID`. Every chunk belonging to the same sequence must be given the same `CorrelationID`.
#### Output
Once the server receives the final chunk for a sequence (with the `END` flag set), it will generate the output associated with that sequence, and send it back to the client. The end of the sequencing procedure is:
1. Process the last chunk.
2. Flush and process the Neural Net context.
3. Generate the full lattice for the sequence.
4. Determinize the lattice.
5. Find the best path in the lattice.
6. Generate the text output for that best path.
7. Send the text back to the client.
Even if only the best path is used, we are still generating a full lattice for benchmarking purposes. Partial results (generated after each timestep) are currently not available but will be added in a future release.
### Using a custom Kaldi ASR model
Support for Kaldi ASR models that are different from the provided LibriSpeech model is experimental. However, it is possible to modify the [Model Path](#model-path) section of the config file `model-repo/kaldi_online/config.pbtxt` to set up your own model.
## Performance
### Metrics
Throughput is measured using the RTFX metric. It is defined such as : `RTFX = (number of seconds of audio inferred) / (compute time in seconds)`. It is the inverse of the RTF (Real Time Factor) metric, such as `RTFX = 1/RTF`.
Latency is defined as the delay between the availability of the last chunk of audio and the reception of the inferred text. More precisely, it is defined such as :
1. *Client:* Last audio chunk available
2. ***t0** <- Current time*
3. *Client:* Send last audio chunk
4. *Server:* Compute inference of last chunk
5. *Server:* Generate the raw lattice for the full utterance
6. *Server:* Determinize the raw lattice
8. *Client:* Receive lattice output
9. *Client:* Call callback with output
10. ***t1** <- Current time*
The latency is defined such as `latency = t1 - t0`.
### Results
Our results were obtained by:
1. Building and starting the server as described in [Quick Start Guide](#quick-start-guide).
2. Running `scripts/run_inference_all_a100.sh`, `scripts/run_inference_all_v100.sh` and `scripts/run_inference_all_t4.sh`
| GPU | Realtime I/O | Number of parallel audio channels | Latency (s) | | | |
| ----- | ------------ | --------------------------------- | ----------- | ----- | ----- | ----- |
| | | | 90% | 95% | 99% | Avg |
| A100 | Yes | 2000 | 0.11 | 0.12 | 0.14 | 0.09 |
| V100 | Yes | 2000 | 0.42 | 0.50 | 0.61 | 0.23 |
| V100 | Yes | 1000 | 0.09 | 0.09 | 0.11 | 0.07 |
| T4 | Yes | 600 | 0.17 | 0.18 | 0.22 | 0.14 |
| T4 | Yes | 400 | 0.12 | 0.13 | 0.15 | 0.10 |
## Release notes
### Changelog
January 2020
* Initial release
April 2020
* Printing WER accuracy in Triton client
* Using the latest Kaldi GPU ASR pipeline, extended support for features (ivectors, fbanks)
July 2021
* Significantly improve latency and throughput for the backend
* Update Triton to v2.10.0
### Known issues
* No multi-gpu support for the Triton integration
|
PyTorch/Recommendation/NCF | NCF | test_cases | #!/bin/bash
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -e
set -x
for test_name in more_pos less_pos less_user less_item more_user more_item other_names;
do
CACHED_DATADIR='/data/cache/ml-20m'
NEW_DIR=${CACHED_DATADIR}/${test_name}
echo "Trying to run on modified dataset: $test_name"
python -m torch.distributed.launch --nproc_per_node=1 --use_env ncf.py --data ${NEW_DIR} --epochs 1
echo "Model runs on modified dataset: $test_name"
done
for test_sample in '0' '10' '200';
do
CACHED_DATADIR='/data/cache/ml-20m'
NEW_DIR=${CACHED_DATADIR}/sample_${test_name}
echo "Trying to run on dataset with test sampling: $test_sample"
python -m torch.distributed.launch --nproc_per_node=1 --use_env ncf.py --data ${NEW_DIR} --epochs 1
echo "Model runs on dataset with test sampling: $test_sample"
done
for online_sample in '0' '1' '10';
do
CACHED_DATADIR='/data/cache/ml-20m'
echo "Trying to run with train sampling: $online_sample"
python -m torch.distributed.launch --nproc_per_node=1 --use_env ncf.py --data ${CACHED_DATADIR} --epochs 1 -n ${online_sample}
echo "Model runs with train sampling: $online_sample"
done |
TensorFlow/LanguageModeling/BERT/triton/scripts | scripts | run_client | #!/bin/bash
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
batch_size=${1:-"8"}
seq_length=${2:-"384"}
doc_stride=${3:-"128"}
triton_version_name=${4:-"1"}
triton_model_name=${5:-"bert"}
BERT_DIR=${6:-"data/download/nvidia_pretrained/bert_tf_pretraining_large_lamb"}
bash scripts/docker/launch.sh \
"python triton/run_squad_triton_client.py \
--triton_model_name=$triton_model_name \
--triton_model_version=$triton_version_name \
--vocab_file=$BERT_DIR/vocab.txt \
--predict_batch_size=$batch_size \
--max_seq_length=${seq_length} \
--doc_stride=${doc_stride} \
--output_dir=/results \
${@:7}" |
TensorFlow/Detection/SSD/models/research/object_detection/dataset_tools | dataset_tools | create_coco_tf_record | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Convert raw COCO dataset to TFRecord for object_detection.
Please note that this tool creates sharded output files.
Example usage:
python create_coco_tf_record.py --logtostderr \
--train_image_dir="${TRAIN_IMAGE_DIR}" \
--val_image_dir="${VAL_IMAGE_DIR}" \
--test_image_dir="${TEST_IMAGE_DIR}" \
--train_annotations_file="${TRAIN_ANNOTATIONS_FILE}" \
--val_annotations_file="${VAL_ANNOTATIONS_FILE}" \
--testdev_annotations_file="${TESTDEV_ANNOTATIONS_FILE}" \
--output_dir="${OUTPUT_DIR}"
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import hashlib
import io
import json
import os
import contextlib2
import numpy as np
import PIL.Image
from pycocotools import mask
import tensorflow as tf
from object_detection.dataset_tools import tf_record_creation_util
from object_detection.utils import dataset_util
from object_detection.utils import label_map_util
flags = tf.app.flags
tf.flags.DEFINE_boolean('include_masks', False,
'Whether to include instance segmentations masks '
'(PNG encoded) in the result. default: False.')
tf.flags.DEFINE_string('train_image_dir', '',
'Training image directory.')
tf.flags.DEFINE_string('val_image_dir', '',
'Validation image directory.')
tf.flags.DEFINE_string('test_image_dir', '',
'Test image directory.')
tf.flags.DEFINE_string('train_annotations_file', '',
'Training annotations JSON file.')
tf.flags.DEFINE_string('val_annotations_file', '',
'Validation annotations JSON file.')
tf.flags.DEFINE_string('testdev_annotations_file', '',
'Test-dev annotations JSON file.')
tf.flags.DEFINE_string('output_dir', '/tmp/', 'Output data directory.')
FLAGS = flags.FLAGS
tf.logging.set_verbosity(tf.logging.INFO)
def create_tf_example(image,
annotations_list,
image_dir,
category_index,
include_masks=False):
"""Converts image and annotations to a tf.Example proto.
Args:
image: dict with keys:
[u'license', u'file_name', u'coco_url', u'height', u'width',
u'date_captured', u'flickr_url', u'id']
annotations_list:
list of dicts with keys:
[u'segmentation', u'area', u'iscrowd', u'image_id',
u'bbox', u'category_id', u'id']
Notice that bounding box coordinates in the official COCO dataset are
given as [x, y, width, height] tuples using absolute coordinates where
x, y represent the top-left (0-indexed) corner. This function converts
to the format expected by the Tensorflow Object Detection API (which is
which is [ymin, xmin, ymax, xmax] with coordinates normalized relative
to image size).
image_dir: directory containing the image files.
category_index: a dict containing COCO category information keyed
by the 'id' field of each category. See the
label_map_util.create_category_index function.
include_masks: Whether to include instance segmentations masks
(PNG encoded) in the result. default: False.
Returns:
example: The converted tf.Example
num_annotations_skipped: Number of (invalid) annotations that were ignored.
Raises:
ValueError: if the image pointed to by data['filename'] is not a valid JPEG
"""
image_height = image['height']
image_width = image['width']
filename = image['file_name']
image_id = image['id']
full_path = os.path.join(image_dir, filename)
with tf.gfile.GFile(full_path, 'rb') as fid:
encoded_jpg = fid.read()
encoded_jpg_io = io.BytesIO(encoded_jpg)
image = PIL.Image.open(encoded_jpg_io)
key = hashlib.sha256(encoded_jpg).hexdigest()
xmin = []
xmax = []
ymin = []
ymax = []
is_crowd = []
category_names = []
category_ids = []
area = []
encoded_mask_png = []
num_annotations_skipped = 0
for object_annotations in annotations_list:
(x, y, width, height) = tuple(object_annotations['bbox'])
if width <= 0 or height <= 0:
num_annotations_skipped += 1
continue
if x + width > image_width or y + height > image_height:
num_annotations_skipped += 1
continue
xmin.append(float(x) / image_width)
xmax.append(float(x + width) / image_width)
ymin.append(float(y) / image_height)
ymax.append(float(y + height) / image_height)
is_crowd.append(object_annotations['iscrowd'])
category_id = int(object_annotations['category_id'])
category_ids.append(category_id)
category_names.append(category_index[category_id]['name'].encode('utf8'))
area.append(object_annotations['area'])
if include_masks:
run_len_encoding = mask.frPyObjects(object_annotations['segmentation'],
image_height, image_width)
binary_mask = mask.decode(run_len_encoding)
if not object_annotations['iscrowd']:
binary_mask = np.amax(binary_mask, axis=2)
pil_image = PIL.Image.fromarray(binary_mask)
output_io = io.BytesIO()
pil_image.save(output_io, format='PNG')
encoded_mask_png.append(output_io.getvalue())
feature_dict = {
'image/height':
dataset_util.int64_feature(image_height),
'image/width':
dataset_util.int64_feature(image_width),
'image/filename':
dataset_util.bytes_feature(filename.encode('utf8')),
'image/source_id':
dataset_util.bytes_feature(str(image_id).encode('utf8')),
'image/key/sha256':
dataset_util.bytes_feature(key.encode('utf8')),
'image/encoded':
dataset_util.bytes_feature(encoded_jpg),
'image/format':
dataset_util.bytes_feature('jpeg'.encode('utf8')),
'image/object/bbox/xmin':
dataset_util.float_list_feature(xmin),
'image/object/bbox/xmax':
dataset_util.float_list_feature(xmax),
'image/object/bbox/ymin':
dataset_util.float_list_feature(ymin),
'image/object/bbox/ymax':
dataset_util.float_list_feature(ymax),
'image/object/class/text':
dataset_util.bytes_list_feature(category_names),
'image/object/is_crowd':
dataset_util.int64_list_feature(is_crowd),
'image/object/area':
dataset_util.float_list_feature(area),
}
if include_masks:
feature_dict['image/object/mask'] = (
dataset_util.bytes_list_feature(encoded_mask_png))
example = tf.train.Example(features=tf.train.Features(feature=feature_dict))
return key, example, num_annotations_skipped
def _create_tf_record_from_coco_annotations(
annotations_file, image_dir, output_path, include_masks, num_shards):
"""Loads COCO annotation json files and converts to tf.Record format.
Args:
annotations_file: JSON file containing bounding box annotations.
image_dir: Directory containing the image files.
output_path: Path to output tf.Record file.
include_masks: Whether to include instance segmentations masks
(PNG encoded) in the result. default: False.
num_shards: number of output file shards.
"""
with contextlib2.ExitStack() as tf_record_close_stack, \
tf.gfile.GFile(annotations_file, 'r') as fid:
output_tfrecords = tf_record_creation_util.open_sharded_output_tfrecords(
tf_record_close_stack, output_path, num_shards)
groundtruth_data = json.load(fid)
images = groundtruth_data['images']
category_index = label_map_util.create_category_index(
groundtruth_data['categories'])
annotations_index = {}
if 'annotations' in groundtruth_data:
tf.logging.info(
'Found groundtruth annotations. Building annotations index.')
for annotation in groundtruth_data['annotations']:
image_id = annotation['image_id']
if image_id not in annotations_index:
annotations_index[image_id] = []
annotations_index[image_id].append(annotation)
missing_annotation_count = 0
for image in images:
image_id = image['id']
if image_id not in annotations_index:
missing_annotation_count += 1
annotations_index[image_id] = []
tf.logging.info('%d images are missing annotations.',
missing_annotation_count)
total_num_annotations_skipped = 0
for idx, image in enumerate(images):
if idx % 100 == 0:
tf.logging.info('On image %d of %d', idx, len(images))
annotations_list = annotations_index[image['id']]
_, tf_example, num_annotations_skipped = create_tf_example(
image, annotations_list, image_dir, category_index, include_masks)
total_num_annotations_skipped += num_annotations_skipped
shard_idx = idx % num_shards
output_tfrecords[shard_idx].write(tf_example.SerializeToString())
tf.logging.info('Finished writing, skipped %d annotations.',
total_num_annotations_skipped)
def main(_):
assert FLAGS.train_image_dir, '`train_image_dir` missing.'
assert FLAGS.val_image_dir, '`val_image_dir` missing.'
assert FLAGS.test_image_dir, '`test_image_dir` missing.'
assert FLAGS.train_annotations_file, '`train_annotations_file` missing.'
assert FLAGS.val_annotations_file, '`val_annotations_file` missing.'
assert FLAGS.testdev_annotations_file, '`testdev_annotations_file` missing.'
if not tf.gfile.IsDirectory(FLAGS.output_dir):
tf.gfile.MakeDirs(FLAGS.output_dir)
train_output_path = os.path.join(FLAGS.output_dir, 'coco_train.record')
val_output_path = os.path.join(FLAGS.output_dir, 'coco_val.record')
testdev_output_path = os.path.join(FLAGS.output_dir, 'coco_testdev.record')
_create_tf_record_from_coco_annotations(
FLAGS.train_annotations_file,
FLAGS.train_image_dir,
train_output_path,
FLAGS.include_masks,
num_shards=100)
_create_tf_record_from_coco_annotations(
FLAGS.val_annotations_file,
FLAGS.val_image_dir,
val_output_path,
FLAGS.include_masks,
num_shards=10)
_create_tf_record_from_coco_annotations(
FLAGS.testdev_annotations_file,
FLAGS.test_image_dir,
testdev_output_path,
FLAGS.include_masks,
num_shards=100)
if __name__ == '__main__':
tf.app.run()
|
TensorFlow/Detection/SSD/models/research/object_detection/utils | utils | shape_utils | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utils used to manipulate tensor shapes."""
import tensorflow as tf
from object_detection.utils import static_shape
def _is_tensor(t):
"""Returns a boolean indicating whether the input is a tensor.
Args:
t: the input to be tested.
Returns:
a boolean that indicates whether t is a tensor.
"""
return isinstance(t, (tf.Tensor, tf.SparseTensor, tf.Variable))
def _set_dim_0(t, d0):
"""Sets the 0-th dimension of the input tensor.
Args:
t: the input tensor, assuming the rank is at least 1.
d0: an integer indicating the 0-th dimension of the input tensor.
Returns:
the tensor t with the 0-th dimension set.
"""
t_shape = t.get_shape().as_list()
t_shape[0] = d0
t.set_shape(t_shape)
return t
def pad_tensor(t, length):
"""Pads the input tensor with 0s along the first dimension up to the length.
Args:
t: the input tensor, assuming the rank is at least 1.
length: a tensor of shape [1] or an integer, indicating the first dimension
of the input tensor t after padding, assuming length <= t.shape[0].
Returns:
padded_t: the padded tensor, whose first dimension is length. If the length
is an integer, the first dimension of padded_t is set to length
statically.
"""
t_rank = tf.rank(t)
t_shape = tf.shape(t)
t_d0 = t_shape[0]
pad_d0 = tf.expand_dims(length - t_d0, 0)
pad_shape = tf.cond(
tf.greater(t_rank, 1), lambda: tf.concat([pad_d0, t_shape[1:]], 0),
lambda: tf.expand_dims(length - t_d0, 0))
padded_t = tf.concat([t, tf.zeros(pad_shape, dtype=t.dtype)], 0)
if not _is_tensor(length):
padded_t = _set_dim_0(padded_t, length)
return padded_t
def clip_tensor(t, length):
"""Clips the input tensor along the first dimension up to the length.
Args:
t: the input tensor, assuming the rank is at least 1.
length: a tensor of shape [1] or an integer, indicating the first dimension
of the input tensor t after clipping, assuming length <= t.shape[0].
Returns:
clipped_t: the clipped tensor, whose first dimension is length. If the
length is an integer, the first dimension of clipped_t is set to length
statically.
"""
clipped_t = tf.gather(t, tf.range(length))
if not _is_tensor(length):
clipped_t = _set_dim_0(clipped_t, length)
return clipped_t
def pad_or_clip_tensor(t, length):
"""Pad or clip the input tensor along the first dimension.
Args:
t: the input tensor, assuming the rank is at least 1.
length: a tensor of shape [1] or an integer, indicating the first dimension
of the input tensor t after processing.
Returns:
processed_t: the processed tensor, whose first dimension is length. If the
length is an integer, the first dimension of the processed tensor is set
to length statically.
"""
return pad_or_clip_nd(t, [length] + t.shape.as_list()[1:])
def pad_or_clip_nd(tensor, output_shape):
"""Pad or Clip given tensor to the output shape.
Args:
tensor: Input tensor to pad or clip.
output_shape: A list of integers / scalar tensors (or None for dynamic dim)
representing the size to pad or clip each dimension of the input tensor.
Returns:
Input tensor padded and clipped to the output shape.
"""
tensor_shape = tf.shape(tensor)
clip_size = [
tf.where(tensor_shape[i] - shape > 0, shape, -1)
if shape is not None else -1 for i, shape in enumerate(output_shape)
]
clipped_tensor = tf.slice(
tensor,
begin=tf.zeros(len(clip_size), dtype=tf.int32),
size=clip_size)
# Pad tensor if the shape of clipped tensor is smaller than the expected
# shape.
clipped_tensor_shape = tf.shape(clipped_tensor)
trailing_paddings = [
shape - clipped_tensor_shape[i] if shape is not None else 0
for i, shape in enumerate(output_shape)
]
paddings = tf.stack(
[
tf.zeros(len(trailing_paddings), dtype=tf.int32),
trailing_paddings
],
axis=1)
padded_tensor = tf.pad(clipped_tensor, paddings=paddings)
output_static_shape = [
dim if not isinstance(dim, tf.Tensor) else None for dim in output_shape
]
padded_tensor.set_shape(output_static_shape)
return padded_tensor
def combined_static_and_dynamic_shape(tensor):
"""Returns a list containing static and dynamic values for the dimensions.
Returns a list of static and dynamic values for shape dimensions. This is
useful to preserve static shapes when available in reshape operation.
Args:
tensor: A tensor of any type.
Returns:
A list of size tensor.shape.ndims containing integers or a scalar tensor.
"""
static_tensor_shape = tensor.shape.as_list()
dynamic_tensor_shape = tf.shape(tensor)
combined_shape = []
for index, dim in enumerate(static_tensor_shape):
if dim is not None:
combined_shape.append(dim)
else:
combined_shape.append(dynamic_tensor_shape[index])
return combined_shape
def static_or_dynamic_map_fn(fn, elems, dtype=None,
parallel_iterations=32, back_prop=True):
"""Runs map_fn as a (static) for loop when possible.
This function rewrites the map_fn as an explicit unstack input -> for loop
over function calls -> stack result combination. This allows our graphs to
be acyclic when the batch size is static.
For comparison, see https://www.tensorflow.org/api_docs/python/tf/map_fn.
Note that `static_or_dynamic_map_fn` currently is not *fully* interchangeable
with the default tf.map_fn function as it does not accept nested inputs (only
Tensors or lists of Tensors). Likewise, the output of `fn` can only be a
Tensor or list of Tensors.
TODO(jonathanhuang): make this function fully interchangeable with tf.map_fn.
Args:
fn: The callable to be performed. It accepts one argument, which will have
the same structure as elems. Its output must have the
same structure as elems.
elems: A tensor or list of tensors, each of which will
be unpacked along their first dimension. The sequence of the
resulting slices will be applied to fn.
dtype: (optional) The output type(s) of fn. If fn returns a structure of
Tensors differing from the structure of elems, then dtype is not optional
and must have the same structure as the output of fn.
parallel_iterations: (optional) number of batch items to process in
parallel. This flag is only used if the native tf.map_fn is used
and defaults to 32 instead of 10 (unlike the standard tf.map_fn default).
back_prop: (optional) True enables support for back propagation.
This flag is only used if the native tf.map_fn is used.
Returns:
A tensor or sequence of tensors. Each tensor packs the
results of applying fn to tensors unpacked from elems along the first
dimension, from first to last.
Raises:
ValueError: if `elems` a Tensor or a list of Tensors.
ValueError: if `fn` does not return a Tensor or list of Tensors
"""
if isinstance(elems, list):
for elem in elems:
if not isinstance(elem, tf.Tensor):
raise ValueError('`elems` must be a Tensor or list of Tensors.')
elem_shapes = [elem.shape.as_list() for elem in elems]
# Fall back on tf.map_fn if shapes of each entry of `elems` are None or fail
# to all be the same size along the batch dimension.
for elem_shape in elem_shapes:
if (not elem_shape or not elem_shape[0]
or elem_shape[0] != elem_shapes[0][0]):
return tf.map_fn(fn, elems, dtype, parallel_iterations, back_prop)
arg_tuples = zip(*[tf.unstack(elem) for elem in elems])
outputs = [fn(arg_tuple) for arg_tuple in arg_tuples]
else:
if not isinstance(elems, tf.Tensor):
raise ValueError('`elems` must be a Tensor or list of Tensors.')
elems_shape = elems.shape.as_list()
if not elems_shape or not elems_shape[0]:
return tf.map_fn(fn, elems, dtype, parallel_iterations, back_prop)
outputs = [fn(arg) for arg in tf.unstack(elems)]
# Stack `outputs`, which is a list of Tensors or list of lists of Tensors
if all([isinstance(output, tf.Tensor) for output in outputs]):
return tf.stack(outputs)
else:
if all([isinstance(output, list) for output in outputs]):
if all([all(
[isinstance(entry, tf.Tensor) for entry in output_list])
for output_list in outputs]):
return [tf.stack(output_tuple) for output_tuple in zip(*outputs)]
raise ValueError('`fn` should return a Tensor or a list of Tensors.')
def check_min_image_dim(min_dim, image_tensor):
"""Checks that the image width/height are greater than some number.
This function is used to check that the width and height of an image are above
a certain value. If the image shape is static, this function will perform the
check at graph construction time. Otherwise, if the image shape varies, an
Assertion control dependency will be added to the graph.
Args:
min_dim: The minimum number of pixels along the width and height of the
image.
image_tensor: The image tensor to check size for.
Returns:
If `image_tensor` has dynamic size, return `image_tensor` with a Assert
control dependency. Otherwise returns image_tensor.
Raises:
ValueError: if `image_tensor`'s' width or height is smaller than `min_dim`.
"""
image_shape = image_tensor.get_shape()
image_height = static_shape.get_height(image_shape)
image_width = static_shape.get_width(image_shape)
if image_height is None or image_width is None:
shape_assert = tf.Assert(
tf.logical_and(tf.greater_equal(tf.shape(image_tensor)[1], min_dim),
tf.greater_equal(tf.shape(image_tensor)[2], min_dim)),
['image size must be >= {} in both height and width.'.format(min_dim)])
with tf.control_dependencies([shape_assert]):
return tf.identity(image_tensor)
if image_height < min_dim or image_width < min_dim:
raise ValueError(
'image size must be >= %d in both height and width; image dim = %d,%d' %
(min_dim, image_height, image_width))
return image_tensor
def assert_shape_equal(shape_a, shape_b):
"""Asserts that shape_a and shape_b are equal.
If the shapes are static, raises a ValueError when the shapes
mismatch.
If the shapes are dynamic, raises a tf InvalidArgumentError when the shapes
mismatch.
Args:
shape_a: a list containing shape of the first tensor.
shape_b: a list containing shape of the second tensor.
Returns:
Either a tf.no_op() when shapes are all static and a tf.assert_equal() op
when the shapes are dynamic.
Raises:
ValueError: When shapes are both static and unequal.
"""
if (all(isinstance(dim, int) for dim in shape_a) and
all(isinstance(dim, int) for dim in shape_b)):
if shape_a != shape_b:
raise ValueError('Unequal shapes {}, {}'.format(shape_a, shape_b))
else: return tf.no_op()
else:
return tf.assert_equal(shape_a, shape_b)
def assert_shape_equal_along_first_dimension(shape_a, shape_b):
"""Asserts that shape_a and shape_b are the same along the 0th-dimension.
If the shapes are static, raises a ValueError when the shapes
mismatch.
If the shapes are dynamic, raises a tf InvalidArgumentError when the shapes
mismatch.
Args:
shape_a: a list containing shape of the first tensor.
shape_b: a list containing shape of the second tensor.
Returns:
Either a tf.no_op() when shapes are all static and a tf.assert_equal() op
when the shapes are dynamic.
Raises:
ValueError: When shapes are both static and unequal.
"""
if isinstance(shape_a[0], int) and isinstance(shape_b[0], int):
if shape_a[0] != shape_b[0]:
raise ValueError('Unequal first dimension {}, {}'.format(
shape_a[0], shape_b[0]))
else: return tf.no_op()
else:
return tf.assert_equal(shape_a[0], shape_b[0])
def assert_box_normalized(boxes, maximum_normalized_coordinate=1.1):
"""Asserts the input box tensor is normalized.
Args:
boxes: a tensor of shape [N, 4] where N is the number of boxes.
maximum_normalized_coordinate: Maximum coordinate value to be considered
as normalized, default to 1.1.
Returns:
a tf.Assert op which fails when the input box tensor is not normalized.
Raises:
ValueError: When the input box tensor is not normalized.
"""
box_minimum = tf.reduce_min(boxes)
box_maximum = tf.reduce_max(boxes)
return tf.Assert(
tf.logical_and(
tf.less_equal(box_maximum, maximum_normalized_coordinate),
tf.greater_equal(box_minimum, 0)),
[boxes])
|
PyTorch/SpeechRecognition/QuartzNet/common | common | features | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import random
import librosa
import torch
import torch.nn as nn
class BaseFeatures(nn.Module):
"""Base class for GPU accelerated audio preprocessing."""
__constants__ = ["pad_align", "pad_to_max_duration", "max_len"]
def __init__(self, pad_align, pad_to_max_duration, max_duration,
sample_rate, window_size, window_stride, spec_augment=None,
cutout_augment=None):
super(BaseFeatures, self).__init__()
self.pad_align = pad_align
self.pad_to_max_duration = pad_to_max_duration
self.win_length = int(sample_rate * window_size) # frame size
self.hop_length = int(sample_rate * window_stride)
# Calculate maximum sequence length (# frames)
if pad_to_max_duration:
self.max_len = 1 + math.ceil(
(max_duration * sample_rate - self.win_length) / self.hop_length
)
if spec_augment is not None:
self.spec_augment = SpecAugment(**spec_augment)
else:
self.spec_augment = None
if cutout_augment is not None:
self.cutout_augment = CutoutAugment(**cutout_augment)
else:
self.cutout_augment = None
@torch.no_grad()
def calculate_features(self, audio, audio_lens):
return audio, audio_lens
def __call__(self, audio, audio_lens):
dtype = audio.dtype
audio = audio.float()
feat, feat_lens = self.calculate_features(audio, audio_lens)
feat = self.apply_padding(feat)
if self.cutout_augment is not None:
feat = self.cutout_augment(feat)
if self.spec_augment is not None:
feat = self.spec_augment(feat)
feat = feat.to(dtype)
return feat, feat_lens
def apply_padding(self, x):
if self.pad_to_max_duration:
x_size = max(x.size(-1), self.max_len)
else:
x_size = x.size(-1)
if self.pad_align > 0:
pad_amt = x_size % self.pad_align
else:
pad_amt = 0
padded_len = x_size + (self.pad_align - pad_amt if pad_amt > 0 else 0)
return nn.functional.pad(x, (0, padded_len - x.size(-1)))
class SpecAugment(nn.Module):
"""Spec augment. refer to https://arxiv.org/abs/1904.08779
"""
def __init__(self, freq_masks=0, min_freq=0, max_freq=10, time_masks=0,
min_time=0, max_time=10):
super(SpecAugment, self).__init__()
assert 0 <= min_freq <= max_freq
assert 0 <= min_time <= max_time
self.freq_masks = freq_masks
self.min_freq = min_freq
self.max_freq = max_freq
self.time_masks = time_masks
self.min_time = min_time
self.max_time = max_time
@torch.no_grad()
def forward(self, x):
sh = x.shape
mask = torch.zeros(x.shape, dtype=torch.bool, device=x.device)
for idx in range(sh[0]):
for _ in range(self.freq_masks):
w = torch.randint(self.min_freq, self.max_freq + 1, size=(1,)).item()
f0 = torch.randint(0, max(1, sh[1] - w), size=(1,))
mask[idx, f0:f0+w] = 1
for _ in range(self.time_masks):
w = torch.randint(self.min_time, self.max_time + 1, size=(1,)).item()
t0 = torch.randint(0, max(1, sh[2] - w), size=(1,))
mask[idx, :, t0:t0+w] = 1
return x.masked_fill(mask, 0)
class CutoutAugment(nn.Module):
"""Cutout. refer to https://arxiv.org/pdf/1708.04552.pdf
"""
def __init__(self, masks=0, min_freq=20, max_freq=20, min_time=5, max_time=5):
super(CutoutAugment, self).__init__()
assert 0 <= min_freq <= max_freq
assert 0 <= min_time <= max_time
self.masks = masks
self.min_freq = min_freq
self.max_freq = max_freq
self.min_time = min_time
self.max_time = max_time
@torch.no_grad()
def forward(self, x):
sh = x.shape
mask = torch.zeros(x.shape, dtype=torch.bool, device=x.device)
for idx in range(sh[0]):
for i in range(self.masks):
w = torch.randint(self.min_freq, self.max_freq + 1, size=(1,)).item()
h = torch.randint(self.min_time, self.max_time + 1, size=(1,)).item()
f0 = int(random.uniform(0, sh[1] - w))
t0 = int(random.uniform(0, sh[2] - h))
mask[idx, f0:f0+w, t0:t0+h] = 1
return x.masked_fill(mask, 0)
@torch.jit.script
def normalize_batch(x, seq_len, normalize_type: str):
# print ("normalize_batch: x, seq_len, shapes: ", x.shape, seq_len, seq_len.shape)
if normalize_type == "per_feature":
x_mean = torch.zeros((seq_len.shape[0], x.shape[1]), dtype=x.dtype,
device=x.device)
x_std = torch.zeros((seq_len.shape[0], x.shape[1]), dtype=x.dtype,
device=x.device)
for i in range(x.shape[0]):
x_mean[i, :] = x[i, :, :seq_len[i]].mean(dim=1)
x_std[i, :] = x[i, :, :seq_len[i]].std(dim=1)
# make sure x_std is not zero
x_std += 1e-5
return (x - x_mean.unsqueeze(2)) / x_std.unsqueeze(2)
elif normalize_type == "all_features":
x_mean = torch.zeros(seq_len.shape, dtype=x.dtype, device=x.device)
x_std = torch.zeros(seq_len.shape, dtype=x.dtype, device=x.device)
for i in range(x.shape[0]):
x_mean[i] = x[i, :, :int(seq_len[i])].mean()
x_std[i] = x[i, :, :int(seq_len[i])].std()
# make sure x_std is not zero
x_std += 1e-5
return (x - x_mean.view(-1, 1, 1)) / x_std.view(-1, 1, 1)
else:
return x
@torch.jit.script
def splice_frames(x, frame_splicing: int):
""" Stacks frames together across feature dim
input is batch_size, feature_dim, num_frames
output is batch_size, feature_dim*frame_splicing, num_frames
"""
seq = [x]
# TORCHSCRIPT: JIT doesnt like range(start, stop)
for n in range(frame_splicing - 1):
seq.append(torch.cat([x[:, :, :n + 1], x[:, :, n + 1:]], dim=2))
return torch.cat(seq, dim=1)
class FilterbankFeatures(BaseFeatures):
# For JIT, https://pytorch.org/docs/stable/jit.html#python-defined-constants
__constants__ = ["dither", "preemph", "n_fft", "hop_length", "win_length",
"log", "frame_splicing", "normalize"]
# torchscript: "center" removed due to a bug
def __init__(self, spec_augment=None, cutout_augment=None,
sample_rate=8000, window_size=0.02, window_stride=0.01,
window="hamming", normalize="per_feature", n_fft=None,
preemph=0.97, n_filt=64, lowfreq=0, highfreq=None, log=True,
dither=1e-5, pad_align=8, pad_to_max_duration=False,
max_duration=float('inf'), frame_splicing=1):
super(FilterbankFeatures, self).__init__(
pad_align=pad_align, pad_to_max_duration=pad_to_max_duration,
max_duration=max_duration, sample_rate=sample_rate,
window_size=window_size, window_stride=window_stride,
spec_augment=spec_augment, cutout_augment=cutout_augment)
torch_windows = {
'hann': torch.hann_window,
'hamming': torch.hamming_window,
'blackman': torch.blackman_window,
'bartlett': torch.bartlett_window,
'none': None,
}
self.n_fft = n_fft or 2 ** math.ceil(math.log2(self.win_length))
self.normalize = normalize
self.log = log
#TORCHSCRIPT: Check whether or not we need this
self.dither = dither
self.frame_splicing = frame_splicing
self.n_filt = n_filt
self.preemph = preemph
highfreq = highfreq or sample_rate / 2
window_fn = torch_windows.get(window, None)
window_tensor = window_fn(self.win_length,
periodic=False) if window_fn else None
filterbanks = torch.tensor(
librosa.filters.mel(sr=sample_rate, n_fft=self.n_fft, n_mels=n_filt,
fmin=lowfreq, fmax=highfreq),
dtype=torch.float).unsqueeze(0)
# torchscript
self.register_buffer("fb", filterbanks)
self.register_buffer("window", window_tensor)
def get_seq_len(self, seq_len):
return torch.ceil(seq_len.to(dtype=torch.float) / self.hop_length).to(
dtype=torch.int)
# TORCHSCRIPT: center removed due to bug
def stft(self, x):
spec = torch.stft(x, n_fft=self.n_fft, hop_length=self.hop_length,
win_length=self.win_length,
window=self.window.to(dtype=torch.float),
return_complex=True)
return torch.view_as_real(spec)
@torch.no_grad()
def calculate_features(self, x, seq_len):
dtype = x.dtype
seq_len = self.get_seq_len(seq_len)
# dither
if self.dither > 0:
x += self.dither * torch.randn_like(x)
# do preemphasis
if self.preemph is not None:
x = torch.cat(
(x[:, 0].unsqueeze(1), x[:, 1:] - self.preemph * x[:, :-1]), dim=1)
x = self.stft(x)
# get power spectrum
x = x.pow(2).sum(-1)
# dot with filterbank energies
x = torch.matmul(self.fb.to(x.dtype), x)
# log features if required
if self.log:
x = torch.log(x + 1e-20)
# frame splicing if required
if self.frame_splicing > 1:
raise ValueError('Frame splicing not supported')
# normalize if required
x = normalize_batch(x, seq_len, normalize_type=self.normalize)
# mask to zero any values beyond seq_len in batch,
# pad to multiple of `pad_align` (for efficiency)
max_len = x.size(-1)
mask = torch.arange(max_len, dtype=seq_len.dtype, device=x.device)
mask = mask.expand(x.size(0), max_len) >= seq_len.unsqueeze(1)
x = x.masked_fill(mask.unsqueeze(1), 0)
# TORCHSCRIPT: Is this del important? It breaks scripting
# del mask
return x.to(dtype), seq_len
|
PyTorch/SpeechSynthesis/FastPitch/fastpitch | fastpitch | attention | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from torch import nn
from torch.nn import functional as F
class ConvNorm(torch.nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=1, stride=1,
padding=None, dilation=1, bias=True, w_init_gain='linear'):
super(ConvNorm, self).__init__()
if padding is None:
assert(kernel_size % 2 == 1)
padding = int(dilation * (kernel_size - 1) / 2)
self.conv = torch.nn.Conv1d(in_channels, out_channels,
kernel_size=kernel_size, stride=stride,
padding=padding, dilation=dilation,
bias=bias)
torch.nn.init.xavier_uniform_(
self.conv.weight, gain=torch.nn.init.calculate_gain(w_init_gain))
def forward(self, signal):
conv_signal = self.conv(signal)
return conv_signal
class Invertible1x1ConvLUS(torch.nn.Module):
def __init__(self, c):
super(Invertible1x1ConvLUS, self).__init__()
# Sample a random orthonormal matrix to initialize weights
W, _ = torch.linalg.qr(torch.randn(c, c))
# Ensure determinant is 1.0 not -1.0
if torch.det(W) < 0:
W[:, 0] = -1*W[:, 0]
p, lower, upper = torch.lu_unpack(*torch.lu(W))
self.register_buffer('p', p)
# diagonals of lower will always be 1s anyway
lower = torch.tril(lower, -1)
lower_diag = torch.diag(torch.eye(c, c))
self.register_buffer('lower_diag', lower_diag)
self.lower = nn.Parameter(lower)
self.upper_diag = nn.Parameter(torch.diag(upper))
self.upper = nn.Parameter(torch.triu(upper, 1))
def forward(self, z, reverse=False):
U = torch.triu(self.upper, 1) + torch.diag(self.upper_diag)
L = torch.tril(self.lower, -1) + torch.diag(self.lower_diag)
W = torch.mm(self.p, torch.mm(L, U))
if reverse:
if not hasattr(self, 'W_inverse'):
# Reverse computation
W_inverse = W.float().inverse()
if z.type() == 'torch.cuda.HalfTensor':
W_inverse = W_inverse.half()
self.W_inverse = W_inverse[..., None]
z = F.conv1d(z, self.W_inverse, bias=None, stride=1, padding=0)
return z
else:
W = W[..., None]
z = F.conv1d(z, W, bias=None, stride=1, padding=0)
log_det_W = torch.sum(torch.log(torch.abs(self.upper_diag)))
return z, log_det_W
class ConvAttention(torch.nn.Module):
def __init__(self, n_mel_channels=80, n_speaker_dim=128,
n_text_channels=512, n_att_channels=80, temperature=1.0,
n_mel_convs=2, align_query_enc_type='3xconv',
use_query_proj=True):
super(ConvAttention, self).__init__()
self.temperature = temperature
self.att_scaling_factor = np.sqrt(n_att_channels)
self.softmax = torch.nn.Softmax(dim=3)
self.log_softmax = torch.nn.LogSoftmax(dim=3)
self.query_proj = Invertible1x1ConvLUS(n_mel_channels)
self.attn_proj = torch.nn.Conv2d(n_att_channels, 1, kernel_size=1)
self.align_query_enc_type = align_query_enc_type
self.use_query_proj = bool(use_query_proj)
self.key_proj = nn.Sequential(
ConvNorm(n_text_channels,
n_text_channels * 2,
kernel_size=3,
bias=True,
w_init_gain='relu'),
torch.nn.ReLU(),
ConvNorm(n_text_channels * 2,
n_att_channels,
kernel_size=1,
bias=True))
self.align_query_enc_type = align_query_enc_type
if align_query_enc_type == "inv_conv":
self.query_proj = Invertible1x1ConvLUS(n_mel_channels)
elif align_query_enc_type == "3xconv":
self.query_proj = nn.Sequential(
ConvNorm(n_mel_channels,
n_mel_channels * 2,
kernel_size=3,
bias=True,
w_init_gain='relu'),
torch.nn.ReLU(),
ConvNorm(n_mel_channels * 2,
n_mel_channels,
kernel_size=1,
bias=True),
torch.nn.ReLU(),
ConvNorm(n_mel_channels,
n_att_channels,
kernel_size=1,
bias=True))
else:
raise ValueError("Unknown query encoder type specified")
def run_padded_sequence(self, sorted_idx, unsort_idx, lens, padded_data,
recurrent_model):
"""Sorts input data by previded ordering (and un-ordering) and runs the
packed data through the recurrent model
Args:
sorted_idx (torch.tensor): 1D sorting index
unsort_idx (torch.tensor): 1D unsorting index (inverse of sorted_idx)
lens: lengths of input data (sorted in descending order)
padded_data (torch.tensor): input sequences (padded)
recurrent_model (nn.Module): recurrent model to run data through
Returns:
hidden_vectors (torch.tensor): outputs of the RNN, in the original,
unsorted, ordering
"""
# sort the data by decreasing length using provided index
# we assume batch index is in dim=1
padded_data = padded_data[:, sorted_idx]
padded_data = nn.utils.rnn.pack_padded_sequence(padded_data, lens)
hidden_vectors = recurrent_model(padded_data)[0]
hidden_vectors, _ = nn.utils.rnn.pad_packed_sequence(hidden_vectors)
# unsort the results at dim=1 and return
hidden_vectors = hidden_vectors[:, unsort_idx]
return hidden_vectors
def encode_query(self, query, query_lens):
query = query.permute(2, 0, 1) # seq_len, batch, feature dim
lens, ids = torch.sort(query_lens, descending=True)
original_ids = [0] * lens.size(0)
for i in range(len(ids)):
original_ids[ids[i]] = i
query_encoded = self.run_padded_sequence(ids, original_ids, lens,
query, self.query_lstm)
query_encoded = query_encoded.permute(1, 2, 0)
return query_encoded
def forward(self, queries, keys, query_lens, mask=None, key_lens=None,
keys_encoded=None, attn_prior=None):
"""Attention mechanism for flowtron parallel
Unlike in Flowtron, we have no restrictions such as causality etc,
since we only need this during training.
Args:
queries (torch.tensor): B x C x T1 tensor
(probably going to be mel data)
keys (torch.tensor): B x C2 x T2 tensor (text data)
query_lens: lengths for sorting the queries in descending order
mask (torch.tensor): uint8 binary mask for variable length entries
(should be in the T2 domain)
Output:
attn (torch.tensor): B x 1 x T1 x T2 attention mask.
Final dim T2 should sum to 1
"""
keys_enc = self.key_proj(keys) # B x n_attn_dims x T2
# Beware can only do this since query_dim = attn_dim = n_mel_channels
if self.use_query_proj:
if self.align_query_enc_type == "inv_conv":
queries_enc, log_det_W = self.query_proj(queries)
elif self.align_query_enc_type == "3xconv":
queries_enc = self.query_proj(queries)
log_det_W = 0.0
else:
queries_enc, log_det_W = self.query_proj(queries)
else:
queries_enc, log_det_W = queries, 0.0
# different ways of computing attn,
# one is isotopic gaussians (per phoneme)
# Simplistic Gaussian Isotopic Attention
# B x n_attn_dims x T1 x T2
attn = (queries_enc[:, :, :, None] - keys_enc[:, :, None]) ** 2
# compute log likelihood from a gaussian
attn = -0.0005 * attn.sum(1, keepdim=True)
if attn_prior is not None:
attn = self.log_softmax(attn) + torch.log(attn_prior[:, None]+1e-8)
attn_logprob = attn.clone()
if mask is not None:
attn.data.masked_fill_(mask.permute(0, 2, 1).unsqueeze(2),
-float("inf"))
attn = self.softmax(attn) # Softmax along T2
return attn, attn_logprob
|
TensorFlow/Classification/ConvNets/resnet50v1.5/training | training | DGX2_RN50_FP32_90E | #!/bin/bash
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
WORKSPACE=${1:-"/workspace/rn50v15_tf"}
DATA_DIR=${2:-"/data"}
OTHER=${@:3}
if [[ ! -z "${BIND_TO_SOCKET}" ]]; then
BIND_TO_SOCKET="--bind-to socket"
fi
mpiexec --allow-run-as-root ${BIND_TO_SOCKET} -np 16 python3 main.py --arch=resnet50 \
--mode=train_and_evaluate --iter_unit=epoch --num_iter=90 \
--batch_size=128 --warmup_steps=100 --cosine_lr --label_smoothing 0.1 \
--lr_init=0.256 --lr_warmup_epochs=8 --momentum=0.875 --weight_decay=3.0517578125e-05 \
--data_dir=${DATA_DIR}/tfrecords --data_idx_dir=${DATA_DIR}/dali_idx \
--results_dir=${WORKSPACE}/results --weight_init=fan_in ${OTHER}
|
Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/preprocessing | preprocessing | base_preprocessing | # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
from abc import ABC, abstractmethod
from typing import Optional
from syngen.utils.types import MetaData
from syngen.configuration import SynGenDatasetFeatureSpec
logger = logging.getLogger(__name__)
log = logger
class BasePreprocessing(ABC):
"""Base class for all preprocessing transforms.
Args:
source_path: path to the raw dataset
destination_path: path to store the dataset in SynGen format
download: tries automatically download the dataset if True
"""
def __init__(
self,
source_path: str,
destination_path: Optional[str] = None,
download: bool = False,
**kwargs,
):
self.source_path = source_path
self.destination_path = destination_path or os.path.join(source_path, 'syngen_preprocessed')
if download:
self.download()
assert self._check_files()
def _prepare_feature_list(self, tabular_data, cat_columns, cont_columns):
feature_list = [
{
MetaData.NAME: feat_name,
MetaData.DTYPE: str(tabular_data[feat_name].dtype),
MetaData.FEATURE_TYPE: MetaData.CONTINUOUS,
}
for feat_name in cont_columns
]
feature_list.extend([
{
MetaData.NAME: feat_name,
MetaData.DTYPE: str(tabular_data[feat_name].dtype),
MetaData.FEATURE_TYPE: MetaData.CATEGORICAL,
}
for feat_name in cat_columns
])
return feature_list
@abstractmethod
def transform(self, gpu=False, use_cache=False) -> SynGenDatasetFeatureSpec:
raise NotImplementedError()
@abstractmethod
def download(self):
raise NotImplementedError()
@abstractmethod
def _check_files(self) -> bool:
raise NotImplementedError()
@classmethod
def add_cli_args(cls, parser):
return parser
|
TensorFlow2/Recommendation/WideAndDeep/data/outbrain | outbrain | features | # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import glob
from data.feature_spec import CARDINALITY_SELECTOR, MAX_HOTNESS_SELECTOR, TYPE_SELECTOR, FEATURES_SELECTOR, \
FILES_SELECTOR, FeatureSpec
from data.outbrain.defaults import TEST_MAPPING, TRAIN_MAPPING, PARQUET_TYPE, MULTIHOT_CHANNEL, ONEHOT_CHANNEL, \
LABEL_CHANNEL, NUMERICAL_CHANNEL, MAP_FEATURE_CHANNEL
import os
DISPLAY_ID_COLUMN = "display_id"
NUMERIC_COLUMNS = [
"document_id_document_id_promo_sim_categories",
"document_id_document_id_promo_sim_topics",
"document_id_document_id_promo_sim_entities",
"document_id_promo_ctr",
"publisher_id_promo_ctr",
"source_id_promo_ctr",
"document_id_promo_count",
"publish_time_days_since_published",
"ad_id_ctr",
"advertiser_id_ctr",
"campaign_id_ctr",
"ad_id_count",
"publish_time_promo_days_since_published",
]
ONEHOT_COLUMNS = [
"ad_id",
"document_id",
"platform",
"document_id_promo",
"campaign_id",
"advertiser_id",
"source_id",
"geo_location",
"geo_location_country",
"geo_location_state",
"publisher_id",
"source_id_promo",
"publisher_id_promo",
]
# Multihot columns with their hotness
MULTIHOT_COLUMNS = {
"topic_id_list": 3,
"entity_id_list": 3,
"category_id_list": 3
}
CATEGORICAL_COLUMNS = ONEHOT_COLUMNS + list(MULTIHOT_COLUMNS.keys())
HASH_BUCKET_SIZES = {
"document_id": 300000,
"ad_id": 250000,
"document_id_promo": 100000,
"source_id_promo": 4000,
"source_id": 4000,
"geo_location": 2500,
"advertiser_id": 2500,
"geo_location_state": 2000,
"publisher_id_promo": 1000,
"publisher_id": 1000,
"geo_location_country": 300,
"platform": 4,
"campaign_id": 5000,
"topic_id_list": 350,
"entity_id_list": 10000,
"category_id_list": 100,
}
EMBEDDING_DIMENSIONS = {
"document_id": 128,
"ad_id": 128,
"document_id_promo": 128,
"source_id_promo": 64,
"source_id": 64,
"geo_location": 64,
"advertiser_id": 64,
"geo_location_state": 64,
"publisher_id_promo": 64,
"publisher_id": 64,
"geo_location_country": 64,
"platform": 19,
"campaign_id": 128,
"topic_id_list": 64,
"entity_id_list": 64,
"category_id_list": 64,
}
LABEL_NAME = "clicked"
def get_features_keys():
return CATEGORICAL_COLUMNS + NUMERIC_COLUMNS + [DISPLAY_ID_COLUMN]
def get_outbrain_feature_spec(base_directory):
multihot_dict = {feature_name: {CARDINALITY_SELECTOR:HASH_BUCKET_SIZES[feature_name],
MAX_HOTNESS_SELECTOR: hotness}
for feature_name, hotness in MULTIHOT_COLUMNS.items()}
onehot_dict = {feature_name: {CARDINALITY_SELECTOR:HASH_BUCKET_SIZES[feature_name]}
for feature_name in ONEHOT_COLUMNS}
numeric_dict = {feature_name: {} for feature_name in NUMERIC_COLUMNS}
feature_dict = {**multihot_dict, **onehot_dict, **numeric_dict, DISPLAY_ID_COLUMN:{}, LABEL_NAME:{}}
# these patterns come from partially our code (output_train_folder and output_valid_folder in utils/setup.py)
# and partially from how nvtabular works (saving as sorted *.parquet in a chosen folder)
train_data_pattern=f"{base_directory}/train/*.parquet"
valid_data_pattern=f"{base_directory}/valid/*.parquet"
absolute_train_paths = sorted(glob.glob(train_data_pattern))
absolute_valid_paths = sorted(glob.glob(valid_data_pattern))
train_paths = [os.path.relpath(p, base_directory) for p in absolute_train_paths]
valid_paths = [os.path.relpath(p, base_directory) for p in absolute_valid_paths]
source_spec = {}
for mapping_name, paths in zip((TRAIN_MAPPING, TEST_MAPPING),(train_paths, valid_paths)):
all_features = [LABEL_NAME] + ONEHOT_COLUMNS + list(MULTIHOT_COLUMNS.keys()) + NUMERIC_COLUMNS
if mapping_name == TEST_MAPPING:
all_features = all_features + [DISPLAY_ID_COLUMN]
source_spec[mapping_name] = []
source_spec[mapping_name].append({TYPE_SELECTOR: PARQUET_TYPE,
FEATURES_SELECTOR: all_features,
FILES_SELECTOR: paths})
channel_spec = {MULTIHOT_CHANNEL: list(MULTIHOT_COLUMNS.keys()),
ONEHOT_CHANNEL: ONEHOT_COLUMNS,
LABEL_CHANNEL: [LABEL_NAME],
NUMERICAL_CHANNEL: NUMERIC_COLUMNS,
MAP_FEATURE_CHANNEL: [DISPLAY_ID_COLUMN]}
return FeatureSpec(feature_spec=feature_dict, source_spec=source_spec, channel_spec=channel_spec, metadata={}) |
PyTorch/SpeechSynthesis/Tacotron2/platform | platform | DGXA100_tacotron2_AMP_4NGPU_train | mkdir -p output
python -m multiproc train.py -m Tacotron2 -o output/ --amp -lr 1e-3 --epochs 1501 -bs 128 --weight-decay 1e-6 --grad-clip-thresh 1.0 --cudnn-enabled --load-mel-from-disk --training-files=filelists/ljs_mel_text_train_filelist.txt --validation-files=filelists/ljs_mel_text_val_filelist.txt --log-file nvlog.json --anneal-steps 500 1000 1500 --anneal-factor 0.3
|
TensorFlow/Classification/ConvNets/model | model | resnet | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import tensorflow as tf
from utils import hvd_wrapper as hvd
import dllogger
from model import layers
from model import blocks
from utils import var_storage
from utils.data_utils import normalized_inputs
from utils.learning_rate import learning_rate_scheduler
from utils.optimizers import FixedLossScalerOptimizer
__all__ = [
'ResnetModel',
]
class ResnetModel(object):
"""Resnet cnn network configuration."""
def __init__(
self,
model_name,
n_classes,
layers_count,
layers_depth,
expansions,
compute_format='NCHW',
input_format='NHWC',
weight_init='fan_out',
dtype=tf.float32,
use_dali=False,
use_cpu=False,
cardinality=1,
use_se=False,
se_ratio=1,
):
self.model_hparams = tf.contrib.training.HParams(
n_classes=n_classes,
compute_format=compute_format,
input_format=input_format,
dtype=dtype,
layers_count=layers_count,
layers_depth=layers_depth,
expansions=expansions,
model_name=model_name,
use_dali=use_dali,
use_cpu=use_cpu,
cardinality=cardinality,
use_se=use_se,
se_ratio=se_ratio
)
self.batch_norm_hparams = tf.contrib.training.HParams(
decay=0.9,
epsilon=1e-5,
scale=True,
center=True,
param_initializers={
'beta': tf.constant_initializer(0.0),
'gamma': tf.constant_initializer(1.0),
'moving_mean': tf.constant_initializer(0.0),
'moving_variance': tf.constant_initializer(1.0)
},
)
self.conv2d_hparams = tf.contrib.training.HParams(
kernel_initializer=tf.compat.v1.variance_scaling_initializer(
scale=2.0, distribution='truncated_normal', mode=weight_init
),
bias_initializer=tf.constant_initializer(0.0)
)
self.dense_hparams = tf.contrib.training.HParams(
kernel_initializer=tf.compat.v1.variance_scaling_initializer(
scale=2.0, distribution='truncated_normal', mode=weight_init
),
bias_initializer=tf.constant_initializer(0.0)
)
if hvd.rank() == 0:
print("Model HParams:")
print("Name", model_name)
print("Number of classes", n_classes)
print("Compute_format", compute_format)
print("Input_format", input_format)
print("dtype", str(dtype))
def __call__(self, features, labels, mode, params):
if mode == tf.estimator.ModeKeys.TRAIN:
mandatory_params = [
"batch_size", "lr_init", "num_gpus", "steps_per_epoch", "momentum", "weight_decay", "loss_scale",
"label_smoothing"
]
for p in mandatory_params:
if p not in params:
raise RuntimeError("Parameter {} is missing.".format(p))
if mode == tf.estimator.ModeKeys.TRAIN and not self.model_hparams.use_dali:
with tf.device('/cpu:0'):
# Stage inputs on the host
cpu_prefetch_op, (features, labels) = self._stage([features, labels])
if not self.model_hparams.use_cpu:
with tf.device('/gpu:0'):
# Stage inputs to the device
gpu_prefetch_op, (features, labels) = self._stage([features, labels])
main_device = "/gpu:0" if not self.model_hparams.use_cpu else "/cpu:0"
with tf.device(main_device):
if features.dtype != self.model_hparams.dtype:
features = tf.cast(features, self.model_hparams.dtype)
# Subtract mean per channel
# and enforce values between [-1, 1]
if not self.model_hparams.use_dali:
features = normalized_inputs(features)
mixup = 0
eta = 0
if mode == tf.estimator.ModeKeys.TRAIN:
eta = params['label_smoothing']
mixup = params['mixup']
if mode != tf.estimator.ModeKeys.PREDICT:
n_cls = self.model_hparams.n_classes
one_hot_smoothed_labels = tf.one_hot(labels, n_cls,
on_value=1 - eta + eta / n_cls, off_value=eta / n_cls)
if mixup != 0:
print("Using mixup training with beta=", params['mixup'])
beta_distribution = tf.distributions.Beta(params['mixup'], params['mixup'])
feature_coefficients = beta_distribution.sample(sample_shape=[params['batch_size'], 1, 1, 1])
reversed_feature_coefficients = tf.subtract(
tf.ones(shape=feature_coefficients.shape), feature_coefficients
)
rotated_features = tf.reverse(features, axis=[0])
features = feature_coefficients * features + reversed_feature_coefficients * rotated_features
label_coefficients = tf.squeeze(feature_coefficients, axis=[2, 3])
rotated_labels = tf.reverse(one_hot_smoothed_labels, axis=[0])
reversed_label_coefficients = tf.subtract(
tf.ones(shape=label_coefficients.shape), label_coefficients
)
one_hot_smoothed_labels = label_coefficients * one_hot_smoothed_labels + reversed_label_coefficients * rotated_labels
# Update Global Step
global_step = tf.train.get_or_create_global_step()
tf.identity(global_step, name="global_step_ref")
tf.identity(features, name="features_ref")
if mode == tf.estimator.ModeKeys.TRAIN:
tf.identity(labels, name="labels_ref")
probs, logits = self.build_model(
features,
training=mode == tf.estimator.ModeKeys.TRAIN,
reuse=False,
use_final_conv=params['use_final_conv']
)
if params['use_final_conv']:
logits = tf.squeeze(logits, axis=[-2, -1])
y_preds = tf.argmax(logits, axis=1, output_type=tf.int32)
# Check the output dtype, shall be FP32 in training
assert (probs.dtype == tf.float32)
assert (logits.dtype == tf.float32)
assert (y_preds.dtype == tf.int32)
tf.identity(logits, name="logits_ref")
tf.identity(probs, name="probs_ref")
tf.identity(y_preds, name="y_preds_ref")
#if mode == tf.estimator.ModeKeys.TRAIN:
#
# assert (len(tf.trainable_variables()) == 161)
#
#else:
#
# assert (len(tf.trainable_variables()) == 0)
if mode == tf.estimator.ModeKeys.TRAIN and params['quantize']:
dllogger.log(data={"QUANTIZATION AWARE TRAINING ENABLED": True}, step=tuple())
if params['symmetric']:
dllogger.log(data={"MODE": "USING SYMMETRIC MODE"}, step=tuple())
tf.contrib.quantize.experimental_create_training_graph(
tf.get_default_graph(),
symmetric=True,
use_qdq=params['use_qdq'],
quant_delay=params['quant_delay']
)
else:
dllogger.log(data={"MODE": "USING ASSYMETRIC MODE"}, step=tuple())
tf.contrib.quantize.create_training_graph(
tf.get_default_graph(), quant_delay=params['quant_delay'], use_qdq=params['use_qdq']
)
# Fix for restoring variables during fine-tuning of Resnet
if 'finetune_checkpoint' in params.keys():
train_vars = tf.trainable_variables()
train_var_dict = {}
for var in train_vars:
train_var_dict[var.op.name] = var
dllogger.log(data={"Restoring variables from checkpoint": params['finetune_checkpoint']}, step=tuple())
tf.train.init_from_checkpoint(params['finetune_checkpoint'], train_var_dict)
if mode == tf.estimator.ModeKeys.PREDICT:
predictions = {'classes': y_preds, 'probabilities': probs}
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=predictions,
export_outputs={'predict': tf.estimator.export.PredictOutput(predictions)}
)
else:
with tf.device(main_device):
if mode == tf.estimator.ModeKeys.TRAIN:
acc_top1 = tf.nn.in_top_k(predictions=logits, targets=labels, k=1)
acc_top5 = tf.nn.in_top_k(predictions=logits, targets=labels, k=5)
else:
acc_top1, acc_top1_update_op = tf.metrics.mean(
tf.nn.in_top_k(predictions=logits, targets=labels, k=1)
)
acc_top5, acc_top5_update_op = tf.metrics.mean(
tf.nn.in_top_k(predictions=logits, targets=labels, k=5)
)
tf.identity(acc_top1, name="acc_top1_ref")
tf.identity(acc_top5, name="acc_top5_ref")
predictions = {
'classes': y_preds,
'probabilities': probs,
'accuracy_top1': acc_top1,
'accuracy_top5': acc_top5
}
cross_entropy = tf.losses.softmax_cross_entropy(logits=logits, onehot_labels=one_hot_smoothed_labels)
assert (cross_entropy.dtype == tf.float32)
tf.identity(cross_entropy, name='cross_entropy_loss_ref')
def loss_filter_fn(name):
"""we don't need to compute L2 loss for BN and bias (eq. to add a cste)"""
return all(
[
tensor_name not in name.lower()
# for tensor_name in ["batchnorm", "batch_norm", "batch_normalization", "bias"]
for tensor_name in ["batchnorm", "batch_norm", "batch_normalization"]
]
)
filtered_params = [tf.cast(v, tf.float32) for v in tf.trainable_variables() if loss_filter_fn(v.name)]
if len(filtered_params) != 0:
l2_loss_per_vars = [tf.nn.l2_loss(v) for v in filtered_params]
l2_loss = tf.multiply(tf.add_n(l2_loss_per_vars), params["weight_decay"])
else:
l2_loss = tf.zeros(shape=(), dtype=tf.float32)
assert (l2_loss.dtype == tf.float32)
tf.identity(l2_loss, name='l2_loss_ref')
total_loss = tf.add(cross_entropy, l2_loss, name="total_loss")
assert (total_loss.dtype == tf.float32)
tf.identity(total_loss, name='total_loss_ref')
tf.summary.scalar('cross_entropy', cross_entropy)
tf.summary.scalar('l2_loss', l2_loss)
tf.summary.scalar('total_loss', total_loss)
if mode == tf.estimator.ModeKeys.TRAIN:
with tf.device("/cpu:0"):
learning_rate = learning_rate_scheduler(
lr_init=params["lr_init"],
lr_warmup_epochs=params["lr_warmup_epochs"],
global_step=global_step,
batch_size=params["batch_size"],
num_batches_per_epoch=params["steps_per_epoch"],
num_decay_steps=params["num_decay_steps"],
num_gpus=params["num_gpus"],
use_cosine_lr=params["use_cosine_lr"]
)
tf.identity(learning_rate, name='learning_rate_ref')
tf.summary.scalar('learning_rate', learning_rate)
optimizer = tf.train.MomentumOptimizer(learning_rate=learning_rate, momentum=params["momentum"])
if params["apply_loss_scaling"]:
optimizer = FixedLossScalerOptimizer(optimizer, scale=params["loss_scale"])
if hvd.size() > 1:
optimizer = hvd.hvd_global_object.DistributedOptimizer(optimizer)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
if mode != tf.estimator.ModeKeys.TRAIN:
update_ops += [acc_top1_update_op, acc_top5_update_op]
deterministic = True
gate_gradients = (tf.compat.v1.train.Optimizer.GATE_OP if deterministic else tf.compat.v1.train.Optimizer.GATE_NONE)
backprop_op = optimizer.minimize(total_loss, gate_gradients=gate_gradients, global_step=global_step)
if self.model_hparams.use_dali:
train_ops = tf.group(backprop_op, update_ops, name='train_ops')
elif self.model_hparams.use_cpu:
train_ops = tf.group(
backprop_op, cpu_prefetch_op, update_ops, name='train_ops'
)
else:
train_ops = tf.group(
backprop_op, cpu_prefetch_op, gpu_prefetch_op, update_ops, name='train_ops'
)
return tf.estimator.EstimatorSpec(mode=mode, loss=total_loss, train_op=train_ops)
elif mode == tf.estimator.ModeKeys.EVAL:
eval_metrics = {
"top1_accuracy": (acc_top1, acc_top1_update_op),
"top5_accuracy": (acc_top5, acc_top5_update_op)
}
return tf.estimator.EstimatorSpec(
mode=mode, predictions=predictions, loss=total_loss, eval_metric_ops=eval_metrics
)
else:
raise NotImplementedError('Unknown mode {}'.format(mode))
@staticmethod
def _stage(tensors):
"""Stages the given tensors in a StagingArea for asynchronous put/get.
"""
stage_area = tf.contrib.staging.StagingArea(
dtypes=[tensor.dtype for tensor in tensors], shapes=[tensor.get_shape() for tensor in tensors]
)
put_op = stage_area.put(tensors)
get_tensors = stage_area.get()
tf.add_to_collection('STAGING_AREA_PUTS', put_op)
return put_op, get_tensors
def build_model(self, inputs, training=True, reuse=False, use_final_conv=False):
with var_storage.model_variable_scope(
self.model_hparams.model_name, reuse=reuse, dtype=self.model_hparams.dtype
):
with tf.variable_scope("input_reshape"):
if self.model_hparams.input_format == 'NHWC' and self.model_hparams.compute_format == 'NCHW':
# Reshape inputs: NHWC => NCHW
inputs = tf.transpose(inputs, [0, 3, 1, 2])
elif self.model_hparams.input_format == 'NCHW' and self.model_hparams.compute_format == 'NHWC':
# Reshape inputs: NCHW => NHWC
inputs = tf.transpose(inputs, [0, 2, 3, 1])
if self.model_hparams.dtype != inputs.dtype:
inputs = tf.cast(inputs, self.model_hparams.dtype)
net = blocks.conv2d_block(
inputs,
n_channels=64,
kernel_size=(7, 7),
strides=(2, 2),
mode='SAME',
use_batch_norm=True,
activation='relu',
is_training=training,
data_format=self.model_hparams.compute_format,
conv2d_hparams=self.conv2d_hparams,
batch_norm_hparams=self.batch_norm_hparams,
name='conv2d'
)
net = layers.max_pooling2d(
net,
pool_size=(3, 3),
strides=(2, 2),
padding='SAME',
data_format=self.model_hparams.compute_format,
name="max_pooling2d",
)
model_bottlenecks = self.model_hparams.layers_depth
for block_id, block_bottleneck in enumerate(model_bottlenecks):
for layer_id in range(self.model_hparams.layers_count[block_id]):
stride = 2 if (layer_id == 0 and block_id != 0) else 1
net = blocks.bottleneck_block(
inputs=net,
depth=block_bottleneck * self.model_hparams.expansions,
depth_bottleneck=block_bottleneck,
cardinality=self.model_hparams.cardinality,
stride=stride,
training=training,
data_format=self.model_hparams.compute_format,
conv2d_hparams=self.conv2d_hparams,
batch_norm_hparams=self.batch_norm_hparams,
block_name="btlnck_block_%d_%d" % (block_id, layer_id),
use_se=self.model_hparams.use_se,
ratio=self.model_hparams.se_ratio
)
with tf.variable_scope("output"):
net = layers.reduce_mean(
net, keepdims=False, data_format=self.model_hparams.compute_format, name='spatial_mean'
)
if use_final_conv:
logits = layers.conv2d(
net,
n_channels=self.model_hparams.n_classes,
kernel_size=(1, 1),
strides=(1, 1),
padding='SAME',
data_format=self.model_hparams.compute_format,
dilation_rate=(1, 1),
use_bias=True,
kernel_initializer=self.dense_hparams.kernel_initializer,
bias_initializer=self.dense_hparams.bias_initializer,
trainable=training,
name='dense'
)
else:
logits = layers.dense(
inputs=net,
units=self.model_hparams.n_classes,
use_bias=True,
trainable=training,
kernel_initializer=self.dense_hparams.kernel_initializer,
bias_initializer=self.dense_hparams.bias_initializer
)
if logits.dtype != tf.float32:
logits = tf.cast(logits, tf.float32)
axis = 3 if self.model_hparams.compute_format=="NHWC" and use_final_conv else 1
probs = layers.softmax(logits, name="softmax", axis=axis)
return probs, logits
model_architectures = {
'resnet50': {
'layers': [3, 4, 6, 3],
'widths': [64, 128, 256, 512],
'expansions': 4,
},
'resnext101-32x4d': {
'layers': [3, 4, 23, 3],
'widths': [128, 256, 512, 1024],
'expansions': 2,
'cardinality': 32,
},
'se-resnext101-32x4d': {
'cardinality': 32,
'layers': [3, 4, 23, 3],
'widths': [128, 256, 512, 1024],
'expansions': 2,
'use_se': True,
'se_ratio': 16,
},
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.