relative_path
stringclasses 812
values | section
stringclasses 339
values | filename
stringlengths 2
61
| text
stringlengths 6
1.76M
|
---|---|---|---|
TensorFlow2/Recommendation/WideAndDeep/triton/deployment_toolkit/triton_performance_runner/model_analyzer | model_analyzer | model_analyzer_config | # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .exceptions import ModelAnalyzerException
class ModelAnalyzerConfig:
"""
A config class to set arguments to the Model Analyzer.
An argument set to None will use the default.
"""
model_analyzer_args = [
"config-file",
]
input_to_options = [
"config-file",
]
def __init__(self):
# Args will be a dict with the string representation as key
self._args = {k: None for k in self.model_analyzer_args}
self._options = {
"-f": "config.yaml",
}
self._input_to_options = {
"config-file": "-f",
}
def to_cli_string(self):
"""
Utility function to convert a config into a
string of arguments to the server with CLI.
Returns
-------
str
the command consisting of all set arguments to
the model analyzer.
e.g. '--model-repository=/models --verbose=True'
"""
# single dashed options, then verbose flags, then main args
args = [f"{k} {v}" for k, v in self._options.items() if v]
args += [f"--{k}={v}" for k, v in self._args.items() if v]
return " ".join(args)
@classmethod
def allowed_keys(cls):
"""
Returns
-------
list of str
The keys that are allowed to be
passed into model_analyzer
"""
return list(cls.model_analyzer_args) + list(cls.input_to_options)
def __getitem__(self, key):
"""
Gets an arguments value in config
Parameters
----------
key : str
The name of the argument to the model analyzer
Returns
-------
The value that the argument is set to in this config
"""
if key in self._args:
return self._args[key]
elif key in self._input_to_options:
return self._options[self._input_to_options[key]]
else:
raise ModelAnalyzerException(f"'{key}' Key not found in config")
def __setitem__(self, key, value):
"""
Sets an arguments value in config
after checking if defined/supported.
Parameters
----------
key : str
The name of the argument to the model analyzer
value : (any)
The value to which the argument is being set
Raises
------
TritonModelAnalyzerException
If key is unsupported or undefined in the
config class
"""
if key in self._args:
self._args[key] = value
elif key in self._input_to_options:
self._options[self._input_to_options[key]] = value
else:
raise ModelAnalyzerException(f"The argument '{key}' to the Model Analyzer is not supported.")
|
PyTorch/SpeechSynthesis/HiFiGAN/fastpitch | fastpitch | model_jit | # *****************************************************************************
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
from typing import Optional
import torch
from torch import nn as nn
from fastpitch.model import TemporalPredictor
from fastpitch.transformer_jit import FFTransformer
def regulate_len(durations, enc_out, pace: float = 1.0,
mel_max_len: Optional[int] = None):
"""If target=None, then predicted durations are applied"""
reps = torch.round(durations.float() / pace).long()
dec_lens = reps.sum(dim=1)
max_len = dec_lens.max()
bsz, _, hid = enc_out.size()
reps_padded = torch.cat([reps, (max_len - dec_lens)[:, None]], dim=1)
pad_vec = torch.zeros(bsz, 1, hid, dtype=enc_out.dtype,
device=enc_out.device)
enc_rep = torch.cat([enc_out, pad_vec], dim=1)
enc_rep = torch.repeat_interleave(
enc_rep.view(-1, hid), reps_padded.view(-1), dim=0
).view(bsz, -1, hid)
if mel_max_len is not None:
enc_rep = enc_rep[:, :mel_max_len]
dec_lens = torch.clamp_max(dec_lens, mel_max_len)
return enc_rep, dec_lens
class FastPitchJIT(nn.Module):
__constants__ = ['energy_conditioning']
def __init__(self, n_mel_channels, n_symbols, padding_idx,
symbols_embedding_dim, in_fft_n_layers, in_fft_n_heads,
in_fft_d_head,
in_fft_conv1d_kernel_size, in_fft_conv1d_filter_size,
in_fft_output_size,
p_in_fft_dropout, p_in_fft_dropatt, p_in_fft_dropemb,
out_fft_n_layers, out_fft_n_heads, out_fft_d_head,
out_fft_conv1d_kernel_size, out_fft_conv1d_filter_size,
out_fft_output_size,
p_out_fft_dropout, p_out_fft_dropatt, p_out_fft_dropemb,
dur_predictor_kernel_size, dur_predictor_filter_size,
p_dur_predictor_dropout, dur_predictor_n_layers,
pitch_predictor_kernel_size, pitch_predictor_filter_size,
p_pitch_predictor_dropout, pitch_predictor_n_layers,
pitch_embedding_kernel_size,
energy_conditioning,
energy_predictor_kernel_size, energy_predictor_filter_size,
p_energy_predictor_dropout, energy_predictor_n_layers,
energy_embedding_kernel_size,
n_speakers, speaker_emb_weight, pitch_conditioning_formants=1):
super(FastPitchJIT, self).__init__()
self.encoder = FFTransformer(
n_layer=in_fft_n_layers, n_head=in_fft_n_heads,
d_model=symbols_embedding_dim,
d_head=in_fft_d_head,
d_inner=in_fft_conv1d_filter_size,
kernel_size=in_fft_conv1d_kernel_size,
dropout=p_in_fft_dropout,
dropatt=p_in_fft_dropatt,
dropemb=p_in_fft_dropemb,
embed_input=True,
d_embed=symbols_embedding_dim,
n_embed=n_symbols,
padding_idx=padding_idx)
if n_speakers > 1:
self.speaker_emb = nn.Embedding(n_speakers, symbols_embedding_dim)
else:
self.speaker_emb = None
self.speaker_emb_weight = speaker_emb_weight
self.duration_predictor = TemporalPredictor(
in_fft_output_size,
filter_size=dur_predictor_filter_size,
kernel_size=dur_predictor_kernel_size,
dropout=p_dur_predictor_dropout, n_layers=dur_predictor_n_layers
)
self.decoder = FFTransformer(
n_layer=out_fft_n_layers, n_head=out_fft_n_heads,
d_model=symbols_embedding_dim,
d_head=out_fft_d_head,
d_inner=out_fft_conv1d_filter_size,
kernel_size=out_fft_conv1d_kernel_size,
dropout=p_out_fft_dropout,
dropatt=p_out_fft_dropatt,
dropemb=p_out_fft_dropemb,
embed_input=False,
d_embed=symbols_embedding_dim
)
self.pitch_predictor = TemporalPredictor(
in_fft_output_size,
filter_size=pitch_predictor_filter_size,
kernel_size=pitch_predictor_kernel_size,
dropout=p_pitch_predictor_dropout, n_layers=pitch_predictor_n_layers,
n_predictions=pitch_conditioning_formants
)
self.pitch_emb = nn.Conv1d(
pitch_conditioning_formants, symbols_embedding_dim,
kernel_size=pitch_embedding_kernel_size,
padding=int((pitch_embedding_kernel_size - 1) / 2))
# Store values precomputed for training data within the model
self.register_buffer('pitch_mean', torch.zeros(1))
self.register_buffer('pitch_std', torch.zeros(1))
self.energy_conditioning = energy_conditioning
if energy_conditioning:
self.energy_predictor = TemporalPredictor(
in_fft_output_size,
filter_size=energy_predictor_filter_size,
kernel_size=energy_predictor_kernel_size,
dropout=p_energy_predictor_dropout,
n_layers=energy_predictor_n_layers,
n_predictions=1
)
self.energy_emb = nn.Conv1d(
1, symbols_embedding_dim,
kernel_size=energy_embedding_kernel_size,
padding=int((energy_embedding_kernel_size - 1) / 2))
self.proj = nn.Linear(out_fft_output_size, n_mel_channels, bias=True)
# skip self.attention (used only in training)
def infer(self, inputs, pace: float = 1.0,
dur_tgt: Optional[torch.Tensor] = None,
pitch_tgt: Optional[torch.Tensor] = None,
energy_tgt: Optional[torch.Tensor] = None,
speaker: int = 0):
if self.speaker_emb is None:
spk_emb = None
else:
speaker = (torch.ones(inputs.size(0)).long().to(inputs.device)
* speaker)
spk_emb = self.speaker_emb(speaker).unsqueeze(1)
spk_emb.mul_(self.speaker_emb_weight)
# Input FFT
enc_out, enc_mask = self.encoder(inputs, conditioning=spk_emb)
# Predict durations
log_dur_pred = self.duration_predictor(enc_out, enc_mask).squeeze(-1)
dur_pred = torch.clamp(torch.exp(log_dur_pred) - 1, 0, 100.0)
# Pitch over chars
pitch_pred = self.pitch_predictor(enc_out, enc_mask).permute(0, 2, 1)
if pitch_tgt is None:
pitch_emb = self.pitch_emb(pitch_pred).transpose(1, 2)
else:
pitch_emb = self.pitch_emb(pitch_tgt).transpose(1, 2)
enc_out = enc_out + pitch_emb
# Predict energy
if self.energy_conditioning:
if energy_tgt is None:
energy_pred = self.energy_predictor(enc_out, enc_mask).squeeze(-1)
energy_emb = self.energy_emb(energy_pred.unsqueeze(1)).transpose(1, 2)
else:
energy_pred = None
energy_emb = self.energy_emb(energy_tgt).transpose(1, 2)
enc_out = enc_out + energy_emb
else:
energy_pred = None
len_regulated, dec_lens = regulate_len(
dur_pred if dur_tgt is None else dur_tgt,
enc_out, pace, mel_max_len=None)
dec_out, dec_mask = self.decoder(len_regulated, dec_lens)
mel_out = self.proj(dec_out)
# mel_lens = dec_mask.squeeze(2).sum(axis=1).long()
mel_out = mel_out.permute(0, 2, 1) # For inference.py
return mel_out, dec_lens, dur_pred, pitch_pred, energy_pred
|
PyTorch/SpeechRecognition/QuartzNet | QuartzNet | requirements | inflect==5.3.0
ipdb
librosa==0.9.0
pandas==1.5.2
pyyaml>=5.4
soundfile
sox==1.4.1
tqdm==4.53.0
git+https://github.com/NVIDIA/[email protected]#egg=dllogger
|
TensorFlow/Detection/SSD/models/research/object_detection/utils | utils | visualization_utils_test | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.utils.visualization_utils."""
import logging
import os
import numpy as np
import PIL.Image as Image
import tensorflow as tf
from object_detection.core import standard_fields as fields
from object_detection.utils import visualization_utils
_TESTDATA_PATH = 'object_detection/test_images'
class VisualizationUtilsTest(tf.test.TestCase):
def create_colorful_test_image(self):
"""This function creates an image that can be used to test vis functions.
It makes an image composed of four colored rectangles.
Returns:
colorful test numpy array image.
"""
ch255 = np.full([100, 200, 1], 255, dtype=np.uint8)
ch128 = np.full([100, 200, 1], 128, dtype=np.uint8)
ch0 = np.full([100, 200, 1], 0, dtype=np.uint8)
imr = np.concatenate((ch255, ch128, ch128), axis=2)
img = np.concatenate((ch255, ch255, ch0), axis=2)
imb = np.concatenate((ch255, ch0, ch255), axis=2)
imw = np.concatenate((ch128, ch128, ch128), axis=2)
imu = np.concatenate((imr, img), axis=1)
imd = np.concatenate((imb, imw), axis=1)
image = np.concatenate((imu, imd), axis=0)
return image
def create_test_image_with_five_channels(self):
return np.full([100, 200, 5], 255, dtype=np.uint8)
def create_test_grayscale_image(self):
return np.full([100, 200, 1], 255, dtype=np.uint8)
def test_draw_bounding_box_on_image(self):
test_image = self.create_colorful_test_image()
test_image = Image.fromarray(test_image)
width_original, height_original = test_image.size
ymin = 0.25
ymax = 0.75
xmin = 0.4
xmax = 0.6
visualization_utils.draw_bounding_box_on_image(test_image, ymin, xmin, ymax,
xmax)
width_final, height_final = test_image.size
self.assertEqual(width_original, width_final)
self.assertEqual(height_original, height_final)
def test_draw_bounding_box_on_image_array(self):
test_image = self.create_colorful_test_image()
width_original = test_image.shape[0]
height_original = test_image.shape[1]
ymin = 0.25
ymax = 0.75
xmin = 0.4
xmax = 0.6
visualization_utils.draw_bounding_box_on_image_array(
test_image, ymin, xmin, ymax, xmax)
width_final = test_image.shape[0]
height_final = test_image.shape[1]
self.assertEqual(width_original, width_final)
self.assertEqual(height_original, height_final)
def test_draw_bounding_boxes_on_image(self):
test_image = self.create_colorful_test_image()
test_image = Image.fromarray(test_image)
width_original, height_original = test_image.size
boxes = np.array([[0.25, 0.75, 0.4, 0.6],
[0.1, 0.1, 0.9, 0.9]])
visualization_utils.draw_bounding_boxes_on_image(test_image, boxes)
width_final, height_final = test_image.size
self.assertEqual(width_original, width_final)
self.assertEqual(height_original, height_final)
def test_draw_bounding_boxes_on_image_array(self):
test_image = self.create_colorful_test_image()
width_original = test_image.shape[0]
height_original = test_image.shape[1]
boxes = np.array([[0.25, 0.75, 0.4, 0.6],
[0.1, 0.1, 0.9, 0.9]])
visualization_utils.draw_bounding_boxes_on_image_array(test_image, boxes)
width_final = test_image.shape[0]
height_final = test_image.shape[1]
self.assertEqual(width_original, width_final)
self.assertEqual(height_original, height_final)
def test_draw_bounding_boxes_on_image_tensors(self):
"""Tests that bounding box utility produces reasonable results."""
category_index = {1: {'id': 1, 'name': 'dog'}, 2: {'id': 2, 'name': 'cat'}}
fname = os.path.join(_TESTDATA_PATH, 'image1.jpg')
image_np = np.array(Image.open(fname))
images_np = np.stack((image_np, image_np), axis=0)
original_image_shape = [[636, 512], [636, 512]]
with tf.Graph().as_default():
images_tensor = tf.constant(value=images_np, dtype=tf.uint8)
image_shape = tf.constant(original_image_shape, dtype=tf.int32)
boxes = tf.constant([[[0.4, 0.25, 0.75, 0.75], [0.5, 0.3, 0.6, 0.9]],
[[0.25, 0.25, 0.75, 0.75], [0.1, 0.3, 0.6, 1.0]]])
classes = tf.constant([[1, 1], [1, 2]], dtype=tf.int64)
scores = tf.constant([[0.8, 0.1], [0.6, 0.5]])
images_with_boxes = (
visualization_utils.draw_bounding_boxes_on_image_tensors(
images_tensor,
boxes,
classes,
scores,
category_index,
original_image_spatial_shape=image_shape,
true_image_shape=image_shape,
min_score_thresh=0.2))
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
# Write output images for visualization.
images_with_boxes_np = sess.run(images_with_boxes)
self.assertEqual(images_np.shape[0], images_with_boxes_np.shape[0])
self.assertEqual(images_np.shape[3], images_with_boxes_np.shape[3])
self.assertEqual(
tuple(original_image_shape[0]), images_with_boxes_np.shape[1:3])
for i in range(images_with_boxes_np.shape[0]):
img_name = 'image_' + str(i) + '.png'
output_file = os.path.join(self.get_temp_dir(), img_name)
logging.info('Writing output image %d to %s', i, output_file)
image_pil = Image.fromarray(images_with_boxes_np[i, ...])
image_pil.save(output_file)
def test_draw_bounding_boxes_on_image_tensors_with_additional_channels(self):
"""Tests the case where input image tensor has more than 3 channels."""
category_index = {1: {'id': 1, 'name': 'dog'}}
image_np = self.create_test_image_with_five_channels()
images_np = np.stack((image_np, image_np), axis=0)
with tf.Graph().as_default():
images_tensor = tf.constant(value=images_np, dtype=tf.uint8)
boxes = tf.constant(0, dtype=tf.float32, shape=[2, 0, 4])
classes = tf.constant(0, dtype=tf.int64, shape=[2, 0])
scores = tf.constant(0, dtype=tf.float32, shape=[2, 0])
images_with_boxes = (
visualization_utils.draw_bounding_boxes_on_image_tensors(
images_tensor,
boxes,
classes,
scores,
category_index,
min_score_thresh=0.2))
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
final_images_np = sess.run(images_with_boxes)
self.assertEqual((2, 100, 200, 3), final_images_np.shape)
def test_draw_bounding_boxes_on_image_tensors_grayscale(self):
"""Tests the case where input image tensor has one channel."""
category_index = {1: {'id': 1, 'name': 'dog'}}
image_np = self.create_test_grayscale_image()
images_np = np.stack((image_np, image_np), axis=0)
with tf.Graph().as_default():
images_tensor = tf.constant(value=images_np, dtype=tf.uint8)
image_shape = tf.constant([[100, 200], [100, 200]], dtype=tf.int32)
boxes = tf.constant(0, dtype=tf.float32, shape=[2, 0, 4])
classes = tf.constant(0, dtype=tf.int64, shape=[2, 0])
scores = tf.constant(0, dtype=tf.float32, shape=[2, 0])
images_with_boxes = (
visualization_utils.draw_bounding_boxes_on_image_tensors(
images_tensor,
boxes,
classes,
scores,
category_index,
original_image_spatial_shape=image_shape,
true_image_shape=image_shape,
min_score_thresh=0.2))
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
final_images_np = sess.run(images_with_boxes)
self.assertEqual((2, 100, 200, 3), final_images_np.shape)
def test_draw_keypoints_on_image(self):
test_image = self.create_colorful_test_image()
test_image = Image.fromarray(test_image)
width_original, height_original = test_image.size
keypoints = [[0.25, 0.75], [0.4, 0.6], [0.1, 0.1], [0.9, 0.9]]
visualization_utils.draw_keypoints_on_image(test_image, keypoints)
width_final, height_final = test_image.size
self.assertEqual(width_original, width_final)
self.assertEqual(height_original, height_final)
def test_draw_keypoints_on_image_array(self):
test_image = self.create_colorful_test_image()
width_original = test_image.shape[0]
height_original = test_image.shape[1]
keypoints = [[0.25, 0.75], [0.4, 0.6], [0.1, 0.1], [0.9, 0.9]]
visualization_utils.draw_keypoints_on_image_array(test_image, keypoints)
width_final = test_image.shape[0]
height_final = test_image.shape[1]
self.assertEqual(width_original, width_final)
self.assertEqual(height_original, height_final)
def test_draw_mask_on_image_array(self):
test_image = np.asarray([[[0, 0, 0], [0, 0, 0]],
[[0, 0, 0], [0, 0, 0]]], dtype=np.uint8)
mask = np.asarray([[0, 1],
[1, 1]], dtype=np.uint8)
expected_result = np.asarray([[[0, 0, 0], [0, 0, 127]],
[[0, 0, 127], [0, 0, 127]]], dtype=np.uint8)
visualization_utils.draw_mask_on_image_array(test_image, mask,
color='Blue', alpha=.5)
self.assertAllEqual(test_image, expected_result)
def test_add_cdf_image_summary(self):
values = [0.1, 0.2, 0.3, 0.4, 0.42, 0.44, 0.46, 0.48, 0.50]
visualization_utils.add_cdf_image_summary(values, 'PositiveAnchorLoss')
cdf_image_summary = tf.get_collection(key=tf.GraphKeys.SUMMARIES)[0]
with self.test_session():
cdf_image_summary.eval()
def test_add_hist_image_summary(self):
values = [0.1, 0.2, 0.3, 0.4, 0.42, 0.44, 0.46, 0.48, 0.50]
bins = [0.01 * i for i in range(101)]
visualization_utils.add_hist_image_summary(values, bins,
'ScoresDistribution')
hist_image_summary = tf.get_collection(key=tf.GraphKeys.SUMMARIES)[0]
with self.test_session():
hist_image_summary.eval()
def test_eval_metric_ops(self):
category_index = {1: {'id': 1, 'name': 'dog'}, 2: {'id': 2, 'name': 'cat'}}
max_examples_to_draw = 4
metric_op_base = 'Detections_Left_Groundtruth_Right'
eval_metric_ops = visualization_utils.VisualizeSingleFrameDetections(
category_index,
max_examples_to_draw=max_examples_to_draw,
summary_name_prefix=metric_op_base)
original_image = tf.placeholder(tf.uint8, [4, None, None, 3])
original_image_spatial_shape = tf.placeholder(tf.int32, [4, 2])
true_image_shape = tf.placeholder(tf.int32, [4, 3])
detection_boxes = tf.random_uniform([4, 20, 4],
minval=0.0,
maxval=1.0,
dtype=tf.float32)
detection_classes = tf.random_uniform([4, 20],
minval=1,
maxval=3,
dtype=tf.int64)
detection_scores = tf.random_uniform([4, 20],
minval=0.,
maxval=1.,
dtype=tf.float32)
groundtruth_boxes = tf.random_uniform([4, 8, 4],
minval=0.0,
maxval=1.0,
dtype=tf.float32)
groundtruth_classes = tf.random_uniform([4, 8],
minval=1,
maxval=3,
dtype=tf.int64)
eval_dict = {
fields.DetectionResultFields.detection_boxes:
detection_boxes,
fields.DetectionResultFields.detection_classes:
detection_classes,
fields.DetectionResultFields.detection_scores:
detection_scores,
fields.InputDataFields.original_image:
original_image,
fields.InputDataFields.original_image_spatial_shape: (
original_image_spatial_shape),
fields.InputDataFields.true_image_shape: (true_image_shape),
fields.InputDataFields.groundtruth_boxes:
groundtruth_boxes,
fields.InputDataFields.groundtruth_classes:
groundtruth_classes
}
metric_ops = eval_metric_ops.get_estimator_eval_metric_ops(eval_dict)
_, update_op = metric_ops[metric_ops.keys()[0]]
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
value_ops = {}
for key, (value_op, _) in metric_ops.iteritems():
value_ops[key] = value_op
# First run enough update steps to surpass `max_examples_to_draw`.
for i in range(max_examples_to_draw):
# Use a unique image shape on each eval image.
sess.run(
update_op,
feed_dict={
original_image:
np.random.randint(
low=0,
high=256,
size=(4, 6 + i, 7 + i, 3),
dtype=np.uint8),
original_image_spatial_shape: [[6 + i, 7 + i], [6 + i, 7 + i],
[6 + i, 7 + i], [6 + i, 7 + i]],
true_image_shape: [[6 + i, 7 + i, 3], [6 + i, 7 + i, 3],
[6 + i, 7 + i, 3], [6 + i, 7 + i, 3]]
})
value_ops_out = sess.run(value_ops)
for key, value_op in value_ops_out.iteritems():
self.assertNotEqual('', value_op)
# Now run fewer update steps than `max_examples_to_draw`. A single value
# op will be the empty string, since not enough image summaries can be
# produced.
for i in range(max_examples_to_draw - 1):
# Use a unique image shape on each eval image.
sess.run(
update_op,
feed_dict={
original_image:
np.random.randint(
low=0,
high=256,
size=(4, 6 + i, 7 + i, 3),
dtype=np.uint8),
original_image_spatial_shape: [[6 + i, 7 + i], [6 + i, 7 + i],
[6 + i, 7 + i], [6 + i, 7 + i]],
true_image_shape: [[6 + i, 7 + i, 3], [6 + i, 7 + i, 3],
[6 + i, 7 + i, 3], [6 + i, 7 + i, 3]]
})
value_ops_out = sess.run(value_ops)
self.assertEqual(
'',
value_ops_out[metric_op_base + '/' + str(max_examples_to_draw - 1)])
if __name__ == '__main__':
tf.test.main()
|
TensorFlow/LanguageModeling/BERT | BERT | CONTRIBUTING | # How to Contribute
BERT needs to maintain permanent compatibility with the pre-trained model files,
so we do not plan to make any major changes to this library (other than what was
promised in the README). However, we can accept small patches related to
re-factoring and documentation. To submit contributes, there are just a few
small guidelines you need to follow.
## Contributor License Agreement
Contributions to this project must be accompanied by a Contributor License
Agreement. You (or your employer) retain the copyright to your contribution;
this simply gives us permission to use and redistribute your contributions as
part of the project. Head over to <https://cla.developers.google.com/> to see
your current agreements on file or to sign a new one.
You generally only need to submit a CLA once, so if you've already submitted one
(even if it was for a different project), you probably don't need to do it
again.
## Code reviews
All submissions, including submissions by project members, require review. We
use GitHub pull requests for this purpose. Consult
[GitHub Help](https://help.github.com/articles/about-pull-requests/) for more
information on using pull requests.
## Community Guidelines
This project follows
[Google's Open Source Community Guidelines](https://opensource.google.com/conduct/).
|
TensorFlow/LanguageModeling/BERT/scripts | scripts | run_squad | #!/usr/bin/env bash
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
echo "Container nvidia build = " $NVIDIA_BUILD_ID
batch_size=${1:-"8"}
learning_rate=${2:-"5e-6"}
precision=${3:-"fp16"}
use_xla=${4:-"true"}
num_gpu=${5:-"8"}
seq_length=${6:-"384"}
doc_stride=${7:-"128"}
bert_model=${8:-"large"}
if [ "$bert_model" = "large" ] ; then
export BERT_DIR=data/download/nvidia_pretrained/bert_tf_pretraining_large_lamb
else
export BERT_DIR=data/download/google_pretrained_weights/uncased_L-12_H-768_A-12
fi
squad_version=${9:-"1.1"}
export SQUAD_DIR=data/download/squad/v${squad_version}
if [ "$squad_version" = "1.1" ] ; then
version_2_with_negative="False"
else
version_2_with_negative="True"
fi
init_checkpoint=${10:-"$BERT_DIR/model.ckpt"}
epochs=${11:-"2.0"}
echo "Squad directory set as " $SQUAD_DIR " BERT directory set as " $BERT_DIR
use_fp16=""
if [ "$precision" = "fp16" ] ; then
echo "fp16 activated!"
use_fp16="--amp"
else
echo "fp32/tf32 activated!"
use_fp16="--noamp"
fi
if [ "$use_xla" = "true" ] ; then
use_xla_tag="--use_xla"
echo "XLA activated"
else
use_xla_tag="--nouse_xla"
fi
if [ $num_gpu -gt 1 ] ; then
mpi_command="mpirun -np $num_gpu -H localhost:$num_gpu \
--allow-run-as-root -bind-to none -map-by slot \
-x NCCL_DEBUG=INFO \
-x LD_LIBRARY_PATH \
-x PATH -mca pml ob1 -mca btl ^openib"
else
mpi_command=""
fi
export GBS=$(expr $batch_size \* $num_gpu)
printf -v TAG "tf_bert_finetuning_squad_%s_%s_gbs%d" "$bert_model" "$precision" $GBS
DATESTAMP=`date +'%y%m%d%H%M%S'`
#Edit to save logs & checkpoints in a different directory
RESULTS_DIR=/results/${TAG}_${DATESTAMP}
LOGFILE=$RESULTS_DIR/$TAG.$DATESTAMP.log
mkdir -m 777 -p $RESULTS_DIR
printf "Saving checkpoints to %s\n" "$RESULTS_DIR"
printf "Logs written to %s\n" "$LOGFILE"
#Check if all necessary files are available before training
for DIR_or_file in $SQUAD_DIR $RESULTS_DIR $BERT_DIR/bert_config.json $BERT_DIR/vocab.txt; do
if [ ! -d "$DIR_or_file" ] && [ ! -f "$DIR_or_file" ]; then
echo "Error! $DIR_or_file directory missing. Please mount correctly"
exit -1
fi
done
$mpi_command python run_squad.py \
--vocab_file=$BERT_DIR/vocab.txt \
--bert_config_file=$BERT_DIR/bert_config.json \
--init_checkpoint=$init_checkpoint \
--do_train=True \
--train_file=$SQUAD_DIR/train-v${squad_version}.json \
--do_predict=True \
--predict_file=$SQUAD_DIR/dev-v${squad_version}.json \
--eval_script=$SQUAD_DIR/evaluate-v${squad_version}.py \
--train_batch_size=$batch_size \
--learning_rate=$learning_rate \
--num_train_epochs=$epochs \
--max_seq_length=$seq_length \
--doc_stride=$doc_stride \
--save_checkpoints_steps 1000 \
--output_dir=$RESULTS_DIR \
--horovod "$use_fp16" \
$use_xla_tag --version_2_with_negative=${version_2_with_negative} |& tee $LOGFILE
|
TensorFlow/Recommendation/WideAndDeep/scripts | scripts | DGX1_benchmark_training_amp_8gpu | #!/bin/bash
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -x
set -e
mpiexec --allow-run-as-root --bind-to socket -np 8 \
python -m trainer.task \
--benchmark_warmup_steps 500 \
--benchmark_steps 1000 \
--gpu \
--benchmark \
--amp \
--hvd
|
PyTorch/Segmentation/MaskRCNN/pytorch/demo | demo | webcam | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import argparse
import cv2
from maskrcnn_benchmark.config import cfg
from predictor import COCODemo
import time
def main():
parser = argparse.ArgumentParser(description="PyTorch Object Detection Webcam Demo")
parser.add_argument(
"--config-file",
default="../configs/caffe2/e2e_mask_rcnn_R_50_FPN_1x_caffe2.yaml",
metavar="FILE",
help="path to config file",
)
parser.add_argument(
"--confidence-threshold",
type=float,
default=0.7,
help="Minimum score for the prediction to be shown",
)
parser.add_argument(
"--min-image-size",
type=int,
default=224,
help="Smallest size of the image to feed to the model. "
"Model was trained with 800, which gives best results",
)
parser.add_argument(
"--show-mask-heatmaps",
dest="show_mask_heatmaps",
help="Show a heatmap probability for the top masks-per-dim masks",
action="store_true",
)
parser.add_argument(
"--masks-per-dim",
type=int,
default=2,
help="Number of heatmaps per dimension to show",
)
parser.add_argument(
"opts",
help="Modify model config options using the command-line",
default=None,
nargs=argparse.REMAINDER,
)
args = parser.parse_args()
# load config from file and command-line arguments
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
# prepare object that handles inference plus adds predictions on top of image
coco_demo = COCODemo(
cfg,
confidence_threshold=args.confidence_threshold,
show_mask_heatmaps=args.show_mask_heatmaps,
masks_per_dim=args.masks_per_dim,
min_image_size=args.min_image_size,
)
cam = cv2.VideoCapture(0)
while True:
start_time = time.time()
ret_val, img = cam.read()
composite = coco_demo.run_on_opencv_image(img)
print("Time: {:.2f} s / img".format(time.time() - start_time))
cv2.imshow("COCO detections", composite)
if cv2.waitKey(1) == 27:
break # esc to quit
cv2.destroyAllWindows()
if __name__ == "__main__":
main()
|
TensorFlow/Detection/SSD/models/research/slim/nets/nasnet | nasnet | pnasnet_test | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for slim.pnasnet."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from nets.nasnet import pnasnet
slim = tf.contrib.slim
class PNASNetTest(tf.test.TestCase):
def testBuildLogitsLargeModel(self):
batch_size = 5
height, width = 331, 331
num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3))
tf.train.create_global_step()
with slim.arg_scope(pnasnet.pnasnet_large_arg_scope()):
logits, end_points = pnasnet.build_pnasnet_large(inputs, num_classes)
auxlogits = end_points['AuxLogits']
predictions = end_points['Predictions']
self.assertListEqual(auxlogits.get_shape().as_list(),
[batch_size, num_classes])
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
self.assertListEqual(predictions.get_shape().as_list(),
[batch_size, num_classes])
def testBuildLogitsMobileModel(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3))
tf.train.create_global_step()
with slim.arg_scope(pnasnet.pnasnet_mobile_arg_scope()):
logits, end_points = pnasnet.build_pnasnet_mobile(inputs, num_classes)
auxlogits = end_points['AuxLogits']
predictions = end_points['Predictions']
self.assertListEqual(auxlogits.get_shape().as_list(),
[batch_size, num_classes])
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
self.assertListEqual(predictions.get_shape().as_list(),
[batch_size, num_classes])
def testBuildNonExistingLayerLargeModel(self):
"""Tests that the model is built correctly without unnecessary layers."""
inputs = tf.random_uniform((5, 331, 331, 3))
tf.train.create_global_step()
with slim.arg_scope(pnasnet.pnasnet_large_arg_scope()):
pnasnet.build_pnasnet_large(inputs, 1000)
vars_names = [x.op.name for x in tf.trainable_variables()]
self.assertIn('cell_stem_0/1x1/weights', vars_names)
self.assertNotIn('cell_stem_1/comb_iter_0/right/1x1/weights', vars_names)
def testBuildNonExistingLayerMobileModel(self):
"""Tests that the model is built correctly without unnecessary layers."""
inputs = tf.random_uniform((5, 224, 224, 3))
tf.train.create_global_step()
with slim.arg_scope(pnasnet.pnasnet_mobile_arg_scope()):
pnasnet.build_pnasnet_mobile(inputs, 1000)
vars_names = [x.op.name for x in tf.trainable_variables()]
self.assertIn('cell_stem_0/1x1/weights', vars_names)
self.assertNotIn('cell_stem_1/comb_iter_0/right/1x1/weights', vars_names)
def testBuildPreLogitsLargeModel(self):
batch_size = 5
height, width = 331, 331
num_classes = None
inputs = tf.random_uniform((batch_size, height, width, 3))
tf.train.create_global_step()
with slim.arg_scope(pnasnet.pnasnet_large_arg_scope()):
net, end_points = pnasnet.build_pnasnet_large(inputs, num_classes)
self.assertFalse('AuxLogits' in end_points)
self.assertFalse('Predictions' in end_points)
self.assertTrue(net.op.name.startswith('final_layer/Mean'))
self.assertListEqual(net.get_shape().as_list(), [batch_size, 4320])
def testBuildPreLogitsMobileModel(self):
batch_size = 5
height, width = 224, 224
num_classes = None
inputs = tf.random_uniform((batch_size, height, width, 3))
tf.train.create_global_step()
with slim.arg_scope(pnasnet.pnasnet_mobile_arg_scope()):
net, end_points = pnasnet.build_pnasnet_mobile(inputs, num_classes)
self.assertFalse('AuxLogits' in end_points)
self.assertFalse('Predictions' in end_points)
self.assertTrue(net.op.name.startswith('final_layer/Mean'))
self.assertListEqual(net.get_shape().as_list(), [batch_size, 1080])
def testAllEndPointsShapesLargeModel(self):
batch_size = 5
height, width = 331, 331
num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3))
tf.train.create_global_step()
with slim.arg_scope(pnasnet.pnasnet_large_arg_scope()):
_, end_points = pnasnet.build_pnasnet_large(inputs, num_classes)
endpoints_shapes = {'Stem': [batch_size, 42, 42, 540],
'Cell_0': [batch_size, 42, 42, 1080],
'Cell_1': [batch_size, 42, 42, 1080],
'Cell_2': [batch_size, 42, 42, 1080],
'Cell_3': [batch_size, 42, 42, 1080],
'Cell_4': [batch_size, 21, 21, 2160],
'Cell_5': [batch_size, 21, 21, 2160],
'Cell_6': [batch_size, 21, 21, 2160],
'Cell_7': [batch_size, 21, 21, 2160],
'Cell_8': [batch_size, 11, 11, 4320],
'Cell_9': [batch_size, 11, 11, 4320],
'Cell_10': [batch_size, 11, 11, 4320],
'Cell_11': [batch_size, 11, 11, 4320],
'global_pool': [batch_size, 4320],
# Logits and predictions
'AuxLogits': [batch_size, 1000],
'Predictions': [batch_size, 1000],
'Logits': [batch_size, 1000],
}
self.assertEqual(len(end_points), 17)
self.assertItemsEqual(endpoints_shapes.keys(), end_points.keys())
for endpoint_name in endpoints_shapes:
tf.logging.info('Endpoint name: {}'.format(endpoint_name))
expected_shape = endpoints_shapes[endpoint_name]
self.assertIn(endpoint_name, end_points)
self.assertListEqual(end_points[endpoint_name].get_shape().as_list(),
expected_shape)
def testAllEndPointsShapesMobileModel(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3))
tf.train.create_global_step()
with slim.arg_scope(pnasnet.pnasnet_mobile_arg_scope()):
_, end_points = pnasnet.build_pnasnet_mobile(inputs, num_classes)
endpoints_shapes = {
'Stem': [batch_size, 28, 28, 135],
'Cell_0': [batch_size, 28, 28, 270],
'Cell_1': [batch_size, 28, 28, 270],
'Cell_2': [batch_size, 28, 28, 270],
'Cell_3': [batch_size, 14, 14, 540],
'Cell_4': [batch_size, 14, 14, 540],
'Cell_5': [batch_size, 14, 14, 540],
'Cell_6': [batch_size, 7, 7, 1080],
'Cell_7': [batch_size, 7, 7, 1080],
'Cell_8': [batch_size, 7, 7, 1080],
'global_pool': [batch_size, 1080],
# Logits and predictions
'AuxLogits': [batch_size, num_classes],
'Predictions': [batch_size, num_classes],
'Logits': [batch_size, num_classes],
}
self.assertEqual(len(end_points), 14)
self.assertItemsEqual(endpoints_shapes.keys(), end_points.keys())
for endpoint_name in endpoints_shapes:
tf.logging.info('Endpoint name: {}'.format(endpoint_name))
expected_shape = endpoints_shapes[endpoint_name]
self.assertIn(endpoint_name, end_points)
self.assertListEqual(end_points[endpoint_name].get_shape().as_list(),
expected_shape)
def testNoAuxHeadLargeModel(self):
batch_size = 5
height, width = 331, 331
num_classes = 1000
for use_aux_head in (True, False):
tf.reset_default_graph()
inputs = tf.random_uniform((batch_size, height, width, 3))
tf.train.create_global_step()
config = pnasnet.large_imagenet_config()
config.set_hparam('use_aux_head', int(use_aux_head))
with slim.arg_scope(pnasnet.pnasnet_large_arg_scope()):
_, end_points = pnasnet.build_pnasnet_large(inputs, num_classes,
config=config)
self.assertEqual('AuxLogits' in end_points, use_aux_head)
def testNoAuxHeadMobileModel(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
for use_aux_head in (True, False):
tf.reset_default_graph()
inputs = tf.random_uniform((batch_size, height, width, 3))
tf.train.create_global_step()
config = pnasnet.mobile_imagenet_config()
config.set_hparam('use_aux_head', int(use_aux_head))
with slim.arg_scope(pnasnet.pnasnet_mobile_arg_scope()):
_, end_points = pnasnet.build_pnasnet_mobile(
inputs, num_classes, config=config)
self.assertEqual('AuxLogits' in end_points, use_aux_head)
def testOverrideHParamsLargeModel(self):
batch_size = 5
height, width = 331, 331
num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3))
tf.train.create_global_step()
config = pnasnet.large_imagenet_config()
config.set_hparam('data_format', 'NCHW')
with slim.arg_scope(pnasnet.pnasnet_large_arg_scope()):
_, end_points = pnasnet.build_pnasnet_large(
inputs, num_classes, config=config)
self.assertListEqual(
end_points['Stem'].shape.as_list(), [batch_size, 540, 42, 42])
def testOverrideHParamsMobileModel(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3))
tf.train.create_global_step()
config = pnasnet.mobile_imagenet_config()
config.set_hparam('data_format', 'NCHW')
with slim.arg_scope(pnasnet.pnasnet_mobile_arg_scope()):
_, end_points = pnasnet.build_pnasnet_mobile(
inputs, num_classes, config=config)
self.assertListEqual(end_points['Stem'].shape.as_list(),
[batch_size, 135, 28, 28])
def testUseBoundedAcitvationMobileModel(self):
batch_size = 1
height, width = 224, 224
num_classes = 1000
for use_bounded_activation in (True, False):
tf.reset_default_graph()
inputs = tf.random_uniform((batch_size, height, width, 3))
config = pnasnet.mobile_imagenet_config()
config.set_hparam('use_bounded_activation', use_bounded_activation)
with slim.arg_scope(pnasnet.pnasnet_mobile_arg_scope()):
_, _ = pnasnet.build_pnasnet_mobile(
inputs, num_classes, config=config)
for node in tf.get_default_graph().as_graph_def().node:
if node.op.startswith('Relu'):
self.assertEqual(node.op == 'Relu6', use_bounded_activation)
if __name__ == '__main__':
tf.test.main()
|
PyTorch/LanguageModeling/BART/utils | utils | distributed_utils | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os
from contextlib import contextmanager
import torch
import math
import pynvml
pynvml.nvmlInit()
def init_distributed(cuda):
"""
Initializes distributed backend.
:param cuda: (bool) if True initializes nccl backend, if False initializes
gloo backend
"""
world_size = int(os.environ.get('WORLD_SIZE', 1))
distributed = (world_size > 1)
if distributed:
backend = 'nccl' if cuda else 'gloo'
torch.distributed.init_process_group(backend=backend,
init_method='env://')
assert torch.distributed.is_initialized()
return distributed
def barrier():
"""
Call torch.distributed.barrier() if distritubed is in use
"""
if torch.distributed.is_available() and torch.distributed.is_initialized():
torch.distributed.barrier()
def get_rank():
"""
Gets distributed rank or returns zero if distributed is not initialized.
"""
if torch.distributed.is_available() and torch.distributed.is_initialized():
rank = torch.distributed.get_rank()
else:
rank = 0
return rank
def get_world_size():
"""
Gets total number of distributed workers or returns one if distributed is
not initialized.
"""
if torch.distributed.is_available() and torch.distributed.is_initialized():
world_size = torch.distributed.get_world_size()
else:
world_size = 1
return world_size
def get_device_count():
"""
Gets total number of devices per node
"""
if torch.distributed.is_available() and torch.distributed.is_initialized():
nproc_per_node = torch.cuda.device_count()
else:
nproc_per_node = 1
return nproc_per_node
def all_reduce_item(value, op='sum'):
"""
All-reduces single scalar value if distributed is in use
"""
if torch.distributed.is_available() and torch.distributed.is_initialized():
if op == 'sum' or op == 'mean':
dop = torch.distributed.ReduceOp.SUM
elif op == 'min':
dop = torch.distributed.ReduceOp.MIN
elif op == 'max':
dop = torch.distributed.ReduceOp.MAX
elif op == 'product':
dop = torch.distributed.ReduceOp.PRODUCT
else:
raise RuntimeError('Unsupported reduce op')
backend = torch.distributed.get_backend()
if backend == torch.distributed.Backend.NCCL:
device = torch.device('cuda')
elif backend == torch.distributed.Backend.GLOO:
device = torch.device('cpu')
else:
raise RuntimeError('Unsupported distributed backend')
tensor = torch.tensor(value, device=device)
torch.distributed.all_reduce(tensor, dop)
if op == 'mean':
tensor /= get_world_size()
ret = tensor.item()
else:
if torch.is_tensor(value):
ret = value.item()
else:
ret = value
return ret
@contextmanager
def sync_workers():
"""
Yields distributed rank and synchronizes all workers on exit.
"""
rank = get_rank()
yield rank
barrier()
def systemGetDriverVersion():
return pynvml.nvmlSystemGetDriverVersion()
def deviceGetCount():
return pynvml.nvmlDeviceGetCount()
class device:
# assume nvml returns list of 64 bit ints
_nvml_affinity_elements = math.ceil(os.cpu_count() / 64)
def __init__(self, device_idx):
super().__init__()
self.handle = pynvml.nvmlDeviceGetHandleByIndex(device_idx)
def getName(self):
return pynvml.nvmlDeviceGetName(self.handle)
def getCpuAffinity(self):
affinity_string = ''
for j in pynvml.nvmlDeviceGetCpuAffinity(
self.handle, device._nvml_affinity_elements
):
# assume nvml returns list of 64 bit ints
affinity_string = '{:064b}'.format(j) + affinity_string
affinity_list = [int(x) for x in affinity_string]
affinity_list.reverse() # so core 0 is in 0th element of list
return [i for i, e in enumerate(affinity_list) if e != 0]
def set_affinity(gpu_id=None):
if gpu_id is None:
gpu_id = int(os.getenv('LOCAL_RANK', 0))
dev = device(gpu_id)
os.sched_setaffinity(0, dev.getCpuAffinity())
# list of ints representing the logical cores this process is now affinitied with
return os.sched_getaffinity(0) |
PyTorch/Recommendation/DLRM/dlrm/nn | nn | interactions | # Copyright (c) 2021 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from dlrm.cuda_ext import dotBasedInteract
def padding_size(n: int) -> int:
nearest_multiple = ((n - 1) // 8 + 1) * 8
return nearest_multiple - n
class Interaction:
@property
def num_interactions(self) -> int:
raise NotImplementedError()
def interact(self, bottom_output, bottom_mlp_output):
"""
:param bottom_output: [batch_size, 1 + #embeddings, embedding_dim]
:param bottom_mlp_output
:return:
"""
raise NotImplementedError()
class DotInteraction(Interaction):
def __init__(self, embedding_num: int, embedding_dim: int):
"""
Interactions are among outputs of all the embedding tables and bottom MLP, total number of
(num_embedding_tables + 1) vectors with size embedding_dim. ``dot`` product interaction computes dot product
between any 2 vectors. Output of interaction will have shape [num_interactions, embedding_dim].
"""
self._num_interaction_inputs = embedding_num + 1
self._embedding_dim = embedding_dim
self._tril_indices = torch.tensor([[i for i in range(self._num_interaction_inputs)
for _ in range(i)],
[j for i in range(self._num_interaction_inputs)
for j in range(i)]]).cuda()
# THIS IS NOT A REGULAR TRIANGULAR LOWER MATRIX! THE MAIN DIAGONAL IS NOT INCLUDED
@property
def _raw_num_interactions(self) -> int:
return (self._num_interaction_inputs * (self._num_interaction_inputs - 1)) // 2 + self._embedding_dim
@property
def num_interactions(self) -> int:
n = self._raw_num_interactions
return n + padding_size(n)
def interact(self, bottom_output, bottom_mlp_output):
"""
:param bottom_output: [batch_size, 1 + #embeddings, embedding_dim]
:param bottom_mlp_output
:return:
"""
batch_size = bottom_output.size()[0]
interaction = torch.bmm(bottom_output, torch.transpose(bottom_output, 1, 2))
interaction_flat = interaction[:, self._tril_indices[0], self._tril_indices[1]]
# concatenate dense features and interactions
padding_dim = padding_size(self._raw_num_interactions)
zeros_padding = torch.zeros(batch_size, padding_dim, dtype=bottom_output.dtype, device=bottom_output.device)
interaction_output = torch.cat(
(bottom_mlp_output, interaction_flat, zeros_padding), dim=1)
return interaction_output
class CudaDotInteraction(Interaction):
def __init__(self, dot_interaction: DotInteraction):
self._dot_interaction = dot_interaction
@property
def num_interactions(self):
return self._dot_interaction.num_interactions
def interact(self, bottom_output, bottom_mlp_output):
"""
:param bottom_output: [batch_size, 1 + #embeddings, embedding_dim]
:param bottom_mlp_output
:return:
"""
return dotBasedInteract(bottom_output, bottom_mlp_output)
class CatInteraction(Interaction):
def __init__(self, embedding_num: int, embedding_dim: int):
"""
Interactions are among outputs of all the embedding tables and bottom MLP, total number of
(num_embedding_tables + 1) vectors with size embdding_dim. ``cat`` interaction concatenate all the vectors
together. Output of interaction will have shape [num_interactions, embedding_dim].
"""
self._num_interaction_inputs = embedding_num + 1
self._embedding_dim = embedding_dim
@property
def num_interactions(self) -> int:
return self._num_interaction_inputs * self._embedding_dim
def interact(self, bottom_output, bottom_mlp_output):
"""
:param bottom_output: [batch_size, 1 + #embeddings, embedding_dim]
:param bottom_mlp_output
:return:
"""
return bottom_output.view(-1, self.num_interactions)
|
Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt/triton/deployment_toolkit/model_analyzer | model_analyzer | model_analyzer_config | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .exceptions import ModelAnalyzerException
class ModelAnalyzerConfig:
"""
A config class to set arguments to the Model Analyzer.
An argument set to None will use the default.
"""
model_analyzer_args = [
"config-file",
]
input_to_options = [
"config-file",
]
def __init__(self):
# Args will be a dict with the string representation as key
self._args = {k: None for k in self.model_analyzer_args}
self._options = {
"-f": "config.yaml",
}
self._input_to_options = {
"config-file": "-f",
}
def to_cli_string(self):
"""
Utility function to convert a config into a
string of arguments to the server with CLI.
Returns
-------
str
the command consisting of all set arguments to
the model analyzer.
e.g. '--model-repository=/models --verbose=True'
"""
# single dashed options, then verbose flags, then main args
args = [f"{k} {v}" for k, v in self._options.items() if v]
args += [f"--{k}={v}" for k, v in self._args.items() if v]
return " ".join(args)
@classmethod
def allowed_keys(cls):
"""
Returns
-------
list of str
The keys that are allowed to be
passed into model_analyzer
"""
return list(cls.model_analyzer_args) + list(cls.input_to_options)
def __getitem__(self, key):
"""
Gets an arguments value in config
Parameters
----------
key : str
The name of the argument to the model analyzer
Returns
-------
The value that the argument is set to in this config
"""
if key in self._args:
return self._args[key]
elif key in self._input_to_options:
return self._options[self._input_to_options[key]]
else:
raise ModelAnalyzerException(f"'{key}' Key not found in config")
def __setitem__(self, key, value):
"""
Sets an arguments value in config
after checking if defined/supported.
Parameters
----------
key : str
The name of the argument to the model analyzer
value : (any)
The value to which the argument is being set
Raises
------
TritonModelAnalyzerException
If key is unsupported or undefined in the
config class
"""
if key in self._args:
self._args[key] = value
elif key in self._input_to_options:
self._options[self._input_to_options[key]] = value
else:
raise ModelAnalyzerException(f"The argument '{key}' to the Model Analyzer is not supported.")
|
PyTorch/SpeechSynthesis/Tacotron2/trtis_cpp/src/test | test | CharacterMappingReader_test | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "CharacterMappingReader.hpp"
#include "UnitTest.hpp"
#include "characterMapping.h"
#include <fstream>
using namespace tts;
/******************************************************************************
* UNIT TEST ******************************************************************
*****************************************************************************/
TEST(LoadFromFile)
{
{
std::ofstream fout("test.txt");
fout << "# ignored header" << std::endl;
fout << "0 long" << std::endl;
fout << "1 s" << std::endl;
fout << "2 " << std::endl;
fout << "3 " << std::endl;
}
CharacterMapping mapping = CharacterMappingReader::loadFromFile("test.txt");
EXPECT_EQ(mapping.get("long"), 0);
EXPECT_EQ(mapping.get("s"), 1);
EXPECT_EQ(mapping.get(" "), 2);
EXPECT_EQ(mapping.get(" "), 3);
}
|
TensorFlow2/LanguageModeling/BERT/official/utils/flags | flags | flags_test | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import unittest
from absl import flags
import tensorflow as tf
from official.utils.flags import core as flags_core # pylint: disable=g-bad-import-order
def define_flags():
flags_core.define_base(clean=True, num_gpu=False, stop_threshold=True,
hooks=True, train_epochs=True,
epochs_between_evals=True)
flags_core.define_performance(
num_parallel_calls=True, inter_op=True, intra_op=True,
dynamic_loss_scale=True, loss_scale=True, synthetic_data=True,
dtype=True)
flags_core.define_image()
flags_core.define_benchmark()
class BaseTester(unittest.TestCase):
@classmethod
def setUpClass(cls):
super(BaseTester, cls).setUpClass()
define_flags()
def test_default_setting(self):
"""Test to ensure fields exist and defaults can be set.
"""
defaults = dict(
data_dir="dfgasf",
model_dir="dfsdkjgbs",
train_epochs=534,
epochs_between_evals=15,
batch_size=256,
hooks=["LoggingTensorHook"],
num_parallel_calls=18,
inter_op_parallelism_threads=5,
intra_op_parallelism_threads=10,
data_format="channels_first"
)
flags_core.set_defaults(**defaults)
flags_core.parse_flags()
for key, value in defaults.items():
assert flags.FLAGS.get_flag_value(name=key, default=None) == value
def test_benchmark_setting(self):
defaults = dict(
hooks=["LoggingMetricHook"],
benchmark_log_dir="/tmp/12345",
gcp_project="project_abc",
)
flags_core.set_defaults(**defaults)
flags_core.parse_flags()
for key, value in defaults.items():
assert flags.FLAGS.get_flag_value(name=key, default=None) == value
def test_booleans(self):
"""Test to ensure boolean flags trigger as expected.
"""
flags_core.parse_flags([__file__, "--use_synthetic_data"])
assert flags.FLAGS.use_synthetic_data
def test_parse_dtype_info(self):
flags_core.parse_flags([__file__, "--dtype", "fp16"])
self.assertEqual(flags_core.get_tf_dtype(flags.FLAGS), tf.float16)
self.assertEqual(flags_core.get_loss_scale(flags.FLAGS,
default_for_fp16=2), 2)
flags_core.parse_flags(
[__file__, "--dtype", "fp16", "--loss_scale", "5"])
self.assertEqual(flags_core.get_loss_scale(flags.FLAGS,
default_for_fp16=2), 5)
flags_core.parse_flags(
[__file__, "--dtype", "fp16", "--loss_scale", "dynamic"])
self.assertEqual(flags_core.get_loss_scale(flags.FLAGS,
default_for_fp16=2), "dynamic")
flags_core.parse_flags([__file__, "--dtype", "fp32"])
self.assertEqual(flags_core.get_tf_dtype(flags.FLAGS), tf.float32)
self.assertEqual(flags_core.get_loss_scale(flags.FLAGS,
default_for_fp16=2), 1)
flags_core.parse_flags([__file__, "--dtype", "fp32", "--loss_scale", "5"])
self.assertEqual(flags_core.get_loss_scale(flags.FLAGS,
default_for_fp16=2), 5)
with self.assertRaises(SystemExit):
flags_core.parse_flags([__file__, "--dtype", "int8"])
with self.assertRaises(SystemExit):
flags_core.parse_flags([__file__, "--dtype", "fp16",
"--loss_scale", "abc"])
def test_get_nondefault_flags_as_str(self):
defaults = dict(
clean=True,
data_dir="abc",
hooks=["LoggingTensorHook"],
stop_threshold=1.5,
use_synthetic_data=False
)
flags_core.set_defaults(**defaults)
flags_core.parse_flags()
expected_flags = ""
self.assertEqual(flags_core.get_nondefault_flags_as_str(), expected_flags)
flags.FLAGS.clean = False
expected_flags += "--noclean"
self.assertEqual(flags_core.get_nondefault_flags_as_str(), expected_flags)
flags.FLAGS.data_dir = "xyz"
expected_flags += " --data_dir=xyz"
self.assertEqual(flags_core.get_nondefault_flags_as_str(), expected_flags)
flags.FLAGS.hooks = ["aaa", "bbb", "ccc"]
expected_flags += " --hooks=aaa,bbb,ccc"
self.assertEqual(flags_core.get_nondefault_flags_as_str(), expected_flags)
flags.FLAGS.stop_threshold = 3.
expected_flags += " --stop_threshold=3.0"
self.assertEqual(flags_core.get_nondefault_flags_as_str(), expected_flags)
flags.FLAGS.use_synthetic_data = True
expected_flags += " --use_synthetic_data"
self.assertEqual(flags_core.get_nondefault_flags_as_str(), expected_flags)
# Assert that explicit setting a flag to its default value does not cause it
# to appear in the string
flags.FLAGS.use_synthetic_data = False
expected_flags = expected_flags[:-len(" --use_synthetic_data")]
self.assertEqual(flags_core.get_nondefault_flags_as_str(), expected_flags)
if __name__ == "__main__":
unittest.main()
|
PyTorch/Detection/Efficientdet/scripts/D0 | D0 | train-benchmark_FP32_V100-32G | #!/bin/bash
function get_dataloader_workers {
gpus=$(nvidia-smi -i 0 --query-gpu=count --format=csv,noheader)
core=$(nproc --all)
workers=$((core/gpus-2))
workers=$((workers>16?16:workers))
echo ${workers}
}
WORKERS=$(get_dataloader_workers)
./distributed_train.sh ${NUM_PROC:-8} /workspace/object_detection/datasets/coco --model efficientdet_d0 -b 30 --lr 0.9 --opt fusedmomentum --warmup-epochs 50 --lr-noise 0.4 0.9 --output /model --worker ${WORKERS} --fill-color mean --model-ema --model-ema-decay 0.999 --eval-after 200 --epochs 5 --resume --smoothing 0.0 --pretrained-backbone-path /backbone_checkpoints/jocbackbone_statedict_B0.pth --memory-format nchw --sync-bn --fused-focal-loss --seed 12711 --benchmark-steps 500 --benchmark |
TensorFlow/Detection/SSD/models/research/object_detection/predictors/heads | heads | keras_class_head_test | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.predictors.heads.class_head."""
import tensorflow as tf
from google.protobuf import text_format
from object_detection.builders import hyperparams_builder
from object_detection.predictors.heads import keras_class_head
from object_detection.protos import hyperparams_pb2
from object_detection.utils import test_case
class ConvolutionalKerasClassPredictorTest(test_case.TestCase):
def _build_conv_hyperparams(self):
conv_hyperparams = hyperparams_pb2.Hyperparams()
conv_hyperparams_text_proto = """
activation: NONE
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
"""
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams)
return hyperparams_builder.KerasLayerHyperparams(conv_hyperparams)
def test_prediction_size_depthwise_false(self):
conv_hyperparams = self._build_conv_hyperparams()
class_prediction_head = keras_class_head.ConvolutionalClassHead(
is_training=True,
num_class_slots=20,
use_dropout=True,
dropout_keep_prob=0.5,
kernel_size=3,
conv_hyperparams=conv_hyperparams,
freeze_batchnorm=False,
num_predictions_per_location=1,
use_depthwise=False)
image_feature = tf.random_uniform(
[64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32)
class_predictions = class_prediction_head(image_feature,)
self.assertAllEqual([64, 323, 20],
class_predictions.get_shape().as_list())
# TODO(kaftan): Remove conditional after CMLE moves to TF 1.10
if __name__ == '__main__':
tf.test.main()
|
TensorFlow2/Recommendation/WideAndDeep/triton/scripts | scripts | setup_environment | #!/usr/bin/env bash
# Copyright (c) 2021-2022,NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
WORKDIR="${WORKDIR:=$(pwd)}"
export DATASETS_DIR=${WORKDIR}/datasets
export WORKSPACE_DIR=${WORKDIR}/runner_workspace
export CHECKPOINTS_DIR=${WORKSPACE_DIR}/checkpoints
export MODEL_REPOSITORY_PATH=${WORKSPACE_DIR}/model_store
export SHARED_DIR=${WORKSPACE_DIR}/shared_dir
echo "Preparing directories"
mkdir -p ${WORKSPACE_DIR}
mkdir -p ${DATASETS_DIR}
mkdir -p ${CHECKPOINTS_DIR}
mkdir -p ${MODEL_REPOSITORY_PATH}
mkdir -p ${SHARED_DIR}
echo "Setting up environment"
export MODEL_NAME=WidenDeep
export ENSEMBLE_MODEL_NAME= |
PyTorch/SpeechSynthesis/FastPitch/triton/deployment_toolkit | deployment_toolkit | extensions | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib
import logging
import os
import re
from pathlib import Path
from typing import List
LOGGER = logging.getLogger(__name__)
class ExtensionManager:
def __init__(self, name: str):
self._name = name
self._registry = {}
def register_extension(self, extension: str, clazz):
already_registered_class = self._registry.get(extension, None)
if already_registered_class and already_registered_class.__module__ != clazz.__module__:
raise RuntimeError(
f"Conflicting extension {self._name}/{extension}; "
f"{already_registered_class.__module__}.{already_registered_class.__name} "
f"and "
f"{clazz.__module__}.{clazz.__name__}"
)
elif already_registered_class is None:
clazz_full_name = f"{clazz.__module__}.{clazz.__name__}" if clazz is not None else "None"
LOGGER.debug(f"Registering extension {self._name}/{extension}: {clazz_full_name}")
self._registry[extension] = clazz
def get(self, extension):
if extension not in self._registry:
raise RuntimeError(f"Missing extension {self._name}/{extension}")
return self._registry[extension]
@property
def supported_extensions(self):
return list(self._registry)
@staticmethod
def scan_for_extensions(extension_dirs: List[Path]):
register_pattern = r".*\.register_extension\(.*"
for extension_dir in extension_dirs:
for python_path in extension_dir.rglob("*.py"):
if not python_path.is_file():
continue
payload = python_path.read_text()
if re.findall(register_pattern, payload):
import_path = python_path.relative_to(toolkit_root_dir.parent)
package = import_path.parent.as_posix().replace(os.sep, ".")
package_with_module = f"{package}.{import_path.stem}"
spec = importlib.util.spec_from_file_location(name=package_with_module, location=python_path)
my_module = importlib.util.module_from_spec(spec)
my_module.__package__ = package
try:
spec.loader.exec_module(my_module) # pytype: disable=attribute-error
except ModuleNotFoundError as e:
LOGGER.error(
f"Could not load extensions from {import_path} due to missing python packages; {e}"
)
runners = ExtensionManager("runners")
loaders = ExtensionManager("loaders")
savers = ExtensionManager("savers")
converters = ExtensionManager("converters")
toolkit_root_dir = (Path(__file__).parent / "..").resolve()
ExtensionManager.scan_for_extensions([toolkit_root_dir])
|
TensorFlow/Translation/GNMT | GNMT | benchmark_hooks | # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import numpy as np
import tensorflow as tf
__all__ = ['BenchmarkHook']
class BenchmarkHook(tf.train.SessionRunHook):
latencies = ['avg', 50, 90, 95, 99, 100]
def __init__(self, global_batch_size, warmup_steps=10):
self.warmup_steps = warmup_steps
self.global_batch_size = global_batch_size
self.iter_times = []
def before_run(self, run_context):
self.t0 = time.time()
def after_run(self, run_context, run_values):
batch_time = time.time() - self.t0
self.iter_times.append(batch_time)
def get_average_speed_and_latencies(self):
if len(self.iter_times) > self.warmup_steps + 5:
warmup_steps = self.warmup_steps
elif len(self.iter_times) > 15:
warmup_steps = 10
elif len(self.iter_times) > 10:
warmup_steps = 5
elif len(self.iter_times) > 4:
warmup_steps = 2
elif len(self.iter_times) > 1:
warmup_steps = 1
else:
warmup_steps = 0
times = self.iter_times[warmup_steps:]
avg_time = np.mean(times)
speed = self.global_batch_size / avg_time
latencies = {}
for lat in self.latencies:
if lat == 'avg':
val = avg_time
else:
val = np.percentile(times, lat)
latencies[str(lat)] = val
return speed, latencies
|
TensorFlow2/LanguageModeling/ELECTRA/data | data | dataPrep | # Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import BookscorpusTextFormatting
import Downloader
import TextSharding
import WikicorpusTextFormatting
import argparse
import itertools
import multiprocessing
import os
import pprint
import subprocess
def main(args):
working_dir = os.environ['DATA_PREP_WORKING_DIR']
print('Working Directory:', working_dir)
print('Action:', args.action)
print('Dataset Name:', args.dataset)
if args.input_files:
args.input_files = args.input_files.split(',')
hdf5_tfrecord_folder_prefix = "_lower_case_" + str(args.do_lower_case) + "_seq_len_" + str(args.max_seq_length) \
+ "_random_seed_" + str(args.random_seed)
directory_structure = {
'download' : working_dir + '/download', # Downloaded and decompressed
'extracted' : working_dir +'/extracted', # Extracted from whatever the initial format is (e.g., wikiextractor)
'formatted' : working_dir + '/formatted_one_article_per_line', # This is the level where all sources should look the same
'sharded' : working_dir + '/sharded_' + "training_shards_" + str(args.n_training_shards) + "_test_shards_" + str(args.n_test_shards) + "_fraction_" + str(args.fraction_test_set),
'tfrecord' : working_dir + '/tfrecord'+ hdf5_tfrecord_folder_prefix,
'hdf5': working_dir + '/hdf5' + hdf5_tfrecord_folder_prefix
}
print('\nDirectory Structure:')
pp = pprint.PrettyPrinter(indent=2)
pp.pprint(directory_structure)
print('')
if args.action == 'download':
if not os.path.exists(directory_structure['download']):
os.makedirs(directory_structure['download'])
downloader = Downloader.Downloader(args.dataset, directory_structure['download'])
downloader.download()
elif args.action == 'text_formatting':
assert args.dataset != 'google_pretrained_weights' and args.dataset != 'nvidia_pretrained_weights' and args.dataset != 'squad' and args.dataset != 'mrpc', 'Cannot perform text_formatting on pretrained weights'
if not os.path.exists(directory_structure['extracted']):
os.makedirs(directory_structure['extracted'])
if not os.path.exists(directory_structure['formatted']):
os.makedirs(directory_structure['formatted'])
if args.dataset == 'bookscorpus':
books_path = directory_structure['download'] + '/bookscorpus'
#books_path = directory_structure['download']
output_filename = directory_structure['formatted'] + '/bookscorpus_one_book_per_line.txt'
books_formatter = BookscorpusTextFormatting.BookscorpusTextFormatting(books_path, output_filename, recursive=True)
books_formatter.merge()
elif args.dataset == 'wikicorpus_en':
if args.skip_wikiextractor == 0:
path_to_wikiextractor_in_container = '/workspace/wikiextractor/WikiExtractor.py'
wikiextractor_command = path_to_wikiextractor_in_container + ' ' + directory_structure['download'] + '/' + args.dataset + '/wikicorpus_en.xml ' + '-b 100M --processes ' + str(args.n_processes) + ' -o ' + directory_structure['extracted'] + '/' + args.dataset
print('WikiExtractor Command:', wikiextractor_command)
wikiextractor_process = subprocess.run(wikiextractor_command, shell=True, check=True)
#wikiextractor_process.communicate()
wiki_path = directory_structure['extracted'] + '/wikicorpus_en'
output_filename = directory_structure['formatted'] + '/wikicorpus_en_one_article_per_line.txt'
wiki_formatter = WikicorpusTextFormatting.WikicorpusTextFormatting(wiki_path, output_filename, recursive=True)
wiki_formatter.merge()
elif args.dataset == 'wikicorpus_zh':
raise NotImplementedError(
'wikicorpus_zh not fully supported at this time. The simplified/tradition Chinese data needs to be '
'translated and properly segmented still, and should work once this step is added.')
# if args.skip_wikiextractor == 0:
# path_to_wikiextractor_in_container = '/workspace/wikiextractor/WikiExtractor.py'
# wikiextractor_command = path_to_wikiextractor_in_container + ' ' + directory_structure['download'] + '/' + args.dataset + '/wikicorpus_zh.xml ' + '-b 100M --processes ' + str(args.n_processes) + ' -o ' + directory_structure['extracted'] + '/' + args.dataset
# print('WikiExtractor Command:', wikiextractor_command)
# wikiextractor_process = subprocess.run(wikiextractor_command, shell=True, check=True)
# #wikiextractor_process.communicate()
#
# wiki_path = directory_structure['extracted'] + '/wikicorpus_zh'
# output_filename = directory_structure['formatted'] + '/wikicorpus_zh_one_article_per_line.txt'
# wiki_formatter = WikicorpusTextFormatting.WikicorpusTextFormatting(wiki_path, output_filename, recursive=True)
# wiki_formatter.merge()
#
# assert os.stat(output_filename).st_size > 0, 'File glob did not pick up extracted wiki files from WikiExtractor.'
elif args.action == 'sharding':
# Note: books+wiki requires user to provide list of input_files (comma-separated with no spaces)
if args.dataset == 'bookscorpus' or 'wikicorpus' in args.dataset or 'books_wiki' in args.dataset:
if args.input_files is None:
if args.dataset == 'bookscorpus':
args.input_files = [directory_structure['formatted'] + '/bookscorpus_one_book_per_line.txt']
elif args.dataset == 'wikicorpus_en':
args.input_files = [directory_structure['formatted'] + '/wikicorpus_en_one_article_per_line.txt']
elif args.dataset == 'wikicorpus_zh':
args.input_files = [directory_structure['formatted'] + '/wikicorpus_zh_one_article_per_line.txt']
elif args.dataset == 'books_wiki_en_corpus':
args.input_files = [directory_structure['formatted'] + '/bookscorpus_one_book_per_line.txt', directory_structure['formatted'] + '/wikicorpus_en_one_article_per_line.txt']
output_file_prefix = directory_structure['sharded'] + '/' + args.dataset + '/' + args.dataset
if not os.path.exists(directory_structure['sharded']):
os.makedirs(directory_structure['sharded'])
if not os.path.exists(directory_structure['sharded'] + '/' + args.dataset):
os.makedirs(directory_structure['sharded'] + '/' + args.dataset)
# Segmentation is here because all datasets look the same in one article/book/whatever per line format, and
# it seemed unnecessarily complicated to add an additional preprocessing step to call just for this.
# Different languages (e.g., Chinese simplified/traditional) may require translation and
# other packages to be called from here -- just add a conditional branch for those extra steps
segmenter = TextSharding.NLTKSegmenter()
sharding = TextSharding.Sharding(args.input_files, output_file_prefix, args.n_training_shards, args.n_test_shards, args.fraction_test_set)
sharding.load_articles()
sharding.segment_articles_into_sentences(segmenter)
sharding.distribute_articles_over_shards()
sharding.write_shards_to_disk()
for _dir in ['train', 'test']:
if not os.path.exists(directory_structure['sharded'] + '/' + args.dataset + '/' + _dir):
os.makedirs(directory_structure['sharded'] + '/' + args.dataset + '/' + _dir)
absolute_dir = directory_structure['sharded'] + '/' + args.dataset
command = 'mv ' + absolute_dir + '/*' + _dir + '*.txt' + ' ' + absolute_dir + '/' + _dir
mv_process = subprocess.Popen(command, shell=True)
mv_process.wait()
else:
assert False, 'Unsupported dataset for sharding'
elif args.action == 'create_tfrecord_files':
if not os.path.exists(directory_structure['tfrecord'] + "/" + args.dataset):
os.makedirs(directory_structure['tfrecord'] + "/" + args.dataset)
if args.vocab_file is None:
args.vocab_file = os.path.join(working_dir, "vocab.txt")
for _dir in ['train', 'test']:
electra_preprocessing_command = 'python /workspace/electra/build_pretraining_dataset.py'
electra_preprocessing_command += ' --corpus-dir=' + directory_structure['sharded'] + '/' + args.dataset + '/' + _dir
electra_preprocessing_command += ' --output-dir=' + directory_structure['tfrecord'] + '/' + args.dataset + '/' + _dir
electra_preprocessing_command += ' --vocab-file=' + args.vocab_file
electra_preprocessing_command += ' --do-lower-case' if args.do_lower_case else ' --no-lower-case'
electra_preprocessing_command += ' --max-seq-length=' + str(args.max_seq_length)
electra_preprocessing_command += ' --num-processes=8'
electra_preprocessing_command += ' --num-out-files=' + str(args.n_training_shards) if _dir == 'train' \
else ' --num-out-files=' + str(args.n_test_shards)
electra_preprocessing_process = subprocess.Popen(electra_preprocessing_command, shell=True)
electra_preprocessing_process.wait()
elif args.action == 'create_hdf5_files':
raise NotImplementedError
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='Preprocessing Application for Everything BERT-related'
)
parser.add_argument(
'--action',
type=str,
help='Specify the action you want the app to take. e.g., generate vocab, segment, create tfrecords',
choices={
'download', # Download and verify mdf5/sha sums
'text_formatting', # Convert into a file that contains one article/book per line
'sharding', # Convert previous formatted text into shards containing one sentence per line
'create_tfrecord_files', # Turn each shard into a TFrecord with masking and next sentence prediction info
'create_hdf5_files' # Turn each shard into a HDF5 file with masking and next sentence prediction info
}
)
parser.add_argument(
'--dataset',
type=str,
help='Specify the dataset to perform --action on',
choices={
'bookscorpus',
'wikicorpus_en',
'wikicorpus_zh',
'books_wiki_en_corpus',
'google_pretrained_weights',
'nvidia_pretrained_weights',
'mrpc',
'squad',
'all'
}
)
parser.add_argument(
'--input_files',
type=str,
help='Specify the input files in a comma-separated list (no spaces)'
)
parser.add_argument(
'--n_training_shards',
type=int,
help='Specify the number of training shards to generate',
default=2048
)
parser.add_argument(
'--n_test_shards',
type=int,
help='Specify the number of test shards to generate',
default=2048
)
parser.add_argument(
'--fraction_test_set',
type=float,
help='Specify the fraction (0..1) of the data to withhold for the test data split (based on number of sequences)',
default=0.1
)
parser.add_argument(
'--segmentation_method',
type=str,
help='Specify your choice of sentence segmentation',
choices={
'nltk'
},
default='nltk'
)
parser.add_argument(
'--n_processes',
type=int,
help='Specify the max number of processes to allow at one time',
default=4
)
parser.add_argument(
'--random_seed',
type=int,
help='Specify the base seed to use for any random number generation',
default=12345
)
parser.add_argument(
'--dupe_factor',
type=int,
help='Specify the duplication factor',
default=5
)
parser.add_argument(
'--masked_lm_prob',
type=float,
help='Specify the probability for masked lm',
default=0.15
)
parser.add_argument(
'--max_seq_length',
type=int,
help='Specify the maximum sequence length',
default=512
)
parser.add_argument(
'--do_lower_case',
type=int,
help='Specify whether it is cased (0) or uncased (1) (any number greater than 0 will be treated as uncased)',
default=0
)
parser.add_argument(
'--vocab_file',
type=str,
help='Specify absolute path to vocab file to use)'
)
parser.add_argument(
'--skip_wikiextractor',
type=int,
help='Specify whether to skip wikiextractor step 0=False, 1=True',
default=0
)
parser.add_argument(
'--interactive_json_config_generator',
type=str,
help='Specify the action you want the app to take. e.g., generate vocab, segment, create tfrecords'
)
args = parser.parse_args()
main(args)
|
TensorFlow/Classification/ConvNets/runtime | runtime | runner_utils | # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import math
import tensorflow as tf
__all__ = ['count_steps', 'list_filenames_in_dataset', 'parse_tfrecords_dataset']
def count_steps(iter_unit, num_samples, num_iter, global_batch_size):
num_samples, num_iter = map(float, (num_samples, num_iter))
if iter_unit not in ["batch", "epoch"]:
raise ValueError("Invalid `iter_unit` value: %s" % iter_unit)
if iter_unit == 'epoch':
num_steps = (num_samples // global_batch_size) * num_iter
num_epochs = num_iter
num_decay_steps = num_steps
else:
num_steps = num_iter
num_epochs = math.ceil(num_steps / (num_samples // global_batch_size))
num_decay_steps = 90 * num_samples // global_batch_size
return num_steps, num_epochs, num_decay_steps
def list_filenames_in_dataset(data_dir, mode, count=True):
if mode not in ["train", 'validation']:
raise ValueError("Unknown mode received: %s" % mode)
filename_pattern = os.path.join(data_dir, '%s-*' % mode)
file_list = sorted(tf.compat.v1.gfile.Glob(filename_pattern))
num_samples = 0
if count:
def count_records(tf_record_filename):
count = 0
for _ in tf.compat.v1.io.tf_record_iterator(tf_record_filename):
count += 1
return count
n_files = len(file_list)
num_samples = (count_records(file_list[0]) * (n_files - 1) + count_records(file_list[-1]))
return file_list, num_samples
def parse_tfrecords_dataset(data_dir, mode, iter_unit, num_iter, global_batch_size):
if data_dir is not None:
filenames, num_samples = list_filenames_in_dataset(data_dir=data_dir, mode=mode)
else:
num_samples = 256000
filenames = []
num_steps, num_epochs, num_decay_steps = count_steps(
iter_unit=iter_unit, num_samples=num_samples, num_iter=num_iter, global_batch_size=global_batch_size
)
return filenames, num_samples, num_steps, num_epochs, num_decay_steps
def parse_inference_input(to_predict):
filenames = []
image_formats = ['.jpg', '.jpeg', '.JPEG', '.JPG', '.png', '.PNG']
if os.path.isdir(to_predict):
filenames = [f for f in os.listdir(to_predict)
if os.path.isfile(os.path.join(to_predict, f))
and os.path.splitext(f)[1] in image_formats]
elif os.path.isfile(to_predict):
filenames.append(to_predict)
return filenames
def parse_dali_idx_dataset(data_idx_dir, mode):
if data_idx_dir is not None:
filenames, _ = list_filenames_in_dataset(data_dir=data_idx_dir, mode=mode, count=False)
return filenames
|
PyTorch/SpeechSynthesis/Tacotron2/platform | platform | DGX1_waveglow_FP32_8NGPU_train | mkdir -p output
python -m multiproc train.py -m WaveGlow -o output/ -lr 1e-4 --epochs 1001 -bs 4 --segment-length 8000 --weight-decay 0 --grad-clip-thresh 3.4028234663852886e+38 --cudnn-benchmark --cudnn-enabled --log-file nvlog.json
|
PyTorch/Classification/ConvNets/triton/deployment_toolkit/library | library | utils | # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import Counter
from typing import Callable, Dict, List
import networkx as nx
from ..core import ShapeSpec
def infer_precision(
nx_graph: nx.Graph,
input_names: List[str],
output_names: List[str],
get_node_dtype_fn: Callable,
):
node_dtypes = [nx_graph.nodes[node_name].get("dtype", None) for node_name in nx_graph.nodes]
node_dtypes = [dt for dt in node_dtypes if dt is None or dt.kind not in ["i", "b"]]
dtypes_counter = Counter(node_dtypes)
return dtypes_counter.most_common()[0][0]
def get_shapes_with_dynamic_axes(dataloader, batch_size_dim=0):
def _set_dynamic_shapes(t, shapes):
for k, v in t.items():
shape = list(v.shape)
for dim, s in enumerate(shape):
if shapes[k][dim] != -1 and shapes[k][dim] != s:
shapes[k][dim] = -1
## get all shapes from input and output tensors
input_shapes = {}
output_shapes = {}
for batch in dataloader:
_, x, y = batch
for k, v in x.items():
input_shapes[k] = list(v.shape)
for k, v in y.items():
output_shapes[k] = list(v.shape)
break
# based on max <max_num_iters> iterations, check which
# dimensions differ to determine dynamic_axes
max_num_iters = 100
for idx, batch in enumerate(dataloader):
if idx >= max_num_iters:
break
_, x, y = batch
_set_dynamic_shapes(x, input_shapes)
_set_dynamic_shapes(y, output_shapes)
return input_shapes, output_shapes
def get_dynamic_axes(dataloader, batch_size_dim=0):
input_shapes, output_shapes = get_shapes_with_dynamic_axes(dataloader, batch_size_dim)
all_shapes = {**input_shapes, **output_shapes}
dynamic_axes = {}
for k, shape in all_shapes.items():
for idx, s in enumerate(shape):
if s == -1:
dynamic_axes[k] = {idx: k + "_" + str(idx)}
for k, v in all_shapes.items():
if k in dynamic_axes:
dynamic_axes[k].update({batch_size_dim: "batch_size_" + str(batch_size_dim)})
else:
dynamic_axes[k] = {batch_size_dim: "batch_size_" + str(batch_size_dim)}
return dynamic_axes
def get_input_shapes(dataloader, max_batch_size=1) -> Dict[str, ShapeSpec]:
def init_counters_and_shapes(x, counters, min_shapes, max_shapes):
for k, v in x.items():
counters[k] = Counter()
min_shapes[k] = [float("inf")] * v.ndim
max_shapes[k] = [float("-inf")] * v.ndim
counters = {}
min_shapes: Dict[str, tuple] = {}
max_shapes: Dict[str, tuple] = {}
for idx, batch in enumerate(dataloader):
ids, x, y = batch
if idx == 0:
init_counters_and_shapes(x, counters, min_shapes, max_shapes)
for k, v in x.items():
shape = v.shape
counters[k][shape] += 1
min_shapes[k] = tuple([min(a, b) for a, b in zip(min_shapes[k], shape)])
max_shapes[k] = tuple([max(a, b) for a, b in zip(max_shapes[k], shape)])
opt_shapes: Dict[str, tuple] = {}
for k, v in counters.items():
opt_shapes[k] = v.most_common(1)[0][0]
shapes = {}
for k in opt_shapes.keys(): # same keys in min_shapes and max_shapes
shapes[k] = ShapeSpec(
min=(1,) + min_shapes[k][1:],
max=(max_batch_size,) + max_shapes[k][1:],
opt=(max_batch_size,) + opt_shapes[k][1:],
)
return shapes
|
PyTorch/Classification/GPUNet/triton/085ms | 085ms | README | # Deploying the GPUNet model on Triton Inference Server
This folder contains instructions for deployment to run inference
on Triton Inference Server as well as a detailed performance analysis.
The purpose of this document is to help you with achieving
the best inference performance.
## Table of contents
- [Solution overview](#solution-overview)
- [Introduction](#introduction)
- [Deployment process](#deployment-process)
- [Setup](#setup)
- [Quick Start Guide](#quick-start-guide)
- [Performance](#performance)
- [Offline scenario](#offline-scenario)
- [Offline: NVIDIA DGX-1 (1x V100 32GB), ONNX Runtime with FP16](#offline-nvidia-dgx-1-1x-v100-32gb-onnx-runtime-with-fp16)
- [Offline: NVIDIA DGX A100 (1x A100 80GB), ONNX Runtime with FP16](#offline-nvidia-dgx-a100-1x-a100-80gb-onnx-runtime-with-fp16)
- [Online scenario](#online-scenario)
- [Online: NVIDIA DGX-1 (1x V100 32GB), ONNX Runtime with FP16](#online-nvidia-dgx-1-1x-v100-32gb-onnx-runtime-with-fp16)
- [Online: NVIDIA DGX A100 (1x A100 80GB), ONNX Runtime with FP16](#online-nvidia-dgx-a100-1x-a100-80gb-onnx-runtime-with-fp16)
- [Advanced](#advanced)
- [Step by step deployment process](#step-by-step-deployment-process)
- [Latency explanation](#latency-explanation)
- [Release notes](#release-notes)
- [Changelog](#changelog)
- [Known issues](#known-issues)
## Solution overview
### Introduction
The [NVIDIA Triton Inference Server](https://github.com/NVIDIA/triton-inference-server)
provides a datacenter and cloud inferencing solution optimized for NVIDIA GPUs.
The server provides an inference service via an HTTP or gRPC endpoint,
allowing remote clients to request inferencing for any number of GPU
or CPU models being managed by the server.
This README provides step-by-step deployment instructions for models generated
during training (as described in the [model README](../readme.md)).
Additionally, this README provides the corresponding deployment scripts that
ensure optimal GPU utilization during inferencing on Triton Inference Server.
### Deployment process
The deployment process consists of two steps:
1. Conversion.
The purpose of conversion is to find the best performing model
format supported by Triton Inference Server.
Triton Inference Server uses a number of runtime backends such as
[TensorRT](https://developer.nvidia.com/tensorrt),
[LibTorch](https://github.com/triton-inference-server/pytorch_backend) and
[ONNX Runtime](https://github.com/triton-inference-server/onnxruntime_backend)
to support various model types. Refer to the
[Triton documentation](https://github.com/triton-inference-server/backend#where-can-i-find-all-the-backends-that-are-available-for-triton)
for a list of available backends.
2. Configuration.
Model configuration on Triton Inference Server, which generates
necessary [configuration files](https://github.com/triton-inference-server/server/blob/master/docs/model_configuration.md).
After deployment Triton inference server is used for evaluation of converted model in two steps:
1. Correctness tests.
Produce results which are tested against given correctness thresholds.
2. Performance tests.
Produce latency and throughput results for offline (static batching)
and online (dynamic batching) scenarios.
All steps are executed by provided runner script. Refer to [Quick Start Guide](#quick-start-guide)
## Setup
Ensure you have the following components:
* [NVIDIA Docker](https://github.com/NVIDIA/nvidia-docker)
* [NVIDIA PyTorch NGC container 21.12](https://catalog.ngc.nvidia.com/orgs/nvidia/containers/pytorch)
* [NVIDIA Triton Inference Server NGC container 21.12](https://ngc.nvidia.com/catalog/containers/nvidia:tritonserver)
* [NVIDIA CUDA](https://docs.nvidia.com/cuda/archive//index.html)
* [NVIDIA Ampere](https://www.nvidia.com/en-us/data-center/nvidia-ampere-gpu-architecture/), [Volta](https://www.nvidia.com/en-us/data-center/volta-gpu-architecture/) or [Turing](https://www.nvidia.com/en-us/geforce/turing/) based GPU
## Quick Start Guide
Running the following scripts will build and launch the container with all required dependencies for native PyTorch as well as Triton Inference Server. This is necessary for running inference and can also be used for data download, processing, and training of the model.
1. Clone the repository.
```
git clone https://github.com/NVIDIA/DeepLearningExamples.git
cd PyTorch/Classification/GPUNet
```
2. Prepare dataset.
See the [Quick Start Guide](../../README.md#prepare-the-dataset)
3. Build and run a container that extends NGC PyTorch with the Triton client libraries and necessary dependencies.
```
./triton/scripts/docker/build.sh
./triton/scripts/docker/interactive.sh /path/to/imagenet/val/
```
4. Execute runner script (please mind, the run scripts are prepared per NVIDIA GPU).
```
NVIDIA DGX-1 (1x V100 32GB): ./triton/085ms/runner/start_NVIDIA-DGX-1-\(1x-V100-32GB\).sh
NVIDIA DGX A100 (1x A100 80GB): ./triton/085ms/runner/start_NVIDIA-DGX-A100-\(1x-A100-80GB\).sh
```
## Performance
The performance measurements in this document were conducted at the time of publication and may not reflect
the performance achieved from NVIDIA’s latest software release. For the most up-to-date performance measurements, go to
[NVIDIA Data Center Deep Learning Product Performance](https://developer.nvidia.com/deep-learning-performance-training-inference).
### Offline scenario
The offline scenario assumes the client and server are located on the same host. The tests uses:
- tensors are passed through shared memory between client and server, the Perf Analyzer flag `shared-memory=system` is used
- single request is send from client to server with static size of batch
#### Offline: NVIDIA DGX-1 (1x V100 32GB), ONNX Runtime with FP16
Our results were obtained using the following configuration:
| Parameter Name | Parameter Value |
|:-----------------------------|:-----------------------------|
| GPU |NVIDIA DGX-1 (1x V100 32GB) |
| Backend |ONNX Runtime |
| Backend accelerator |NVIDIA TensorRT|
| Precision |FP16 |
| Model format |ONNX |
| Max batch size |64 |
| Number of model instances |2|
| Export Format | ONNX |
| Device Kind | gpu |
| Torch Jit | none |
<table>
<tbody>
<tr>
<td><img src="./reports/nvidia_dgx-1_(1x_v100_32gb)_experiment_2_triton_performance_offline_2/plots/throughput_vs_batch.png"></td>
<td><img src="./reports/nvidia_dgx-1_(1x_v100_32gb)_experiment_2_triton_performance_offline_2/plots/throughput_vs_latency.png"></td>
</tr>
<tr>
<td><img src="./reports/nvidia_dgx-1_(1x_v100_32gb)_experiment_2_triton_performance_offline_2/plots/latency_vs_batch.png"></td>
</tr>
</tbody>
</table>
<details>
<summary>Results Table</summary>
| Batch | Concurrency | Inferences/Second | Client Send (ms) | Network+Server Send/Recv (ms) | Server Queue (ms) | Server Compute Input (ms) | Server Compute Infer (ms) | Server Compute Output (ms) | Client Recv (ms) | p50 latency (ms) | p90 latency (ms) | p95 latency (ms) | p99 latency (ms) | avg latency (ms) |
|--------:|--------------:|--------------------:|-------------------:|--------------------------------:|--------------------:|----------------------------:|----------------------------:|-----------------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|
| 1 | 1 | 783.00 | 0.05 | 0.21 | 0.07 | 0.12 | 0.82 | 0.01 | 0.00 | 1.28 | 1.31 | 1.34 | 1.38 | 1.27 |
| 2 | 1 | 1330.00 | 0.04 | 0.19 | 0.07 | 0.21 | 0.98 | 0.01 | 0.00 | 1.50 | 1.52 | 1.53 | 1.58 | 1.50 |
| 4 | 1 | 2069.93 | 0.05 | 0.22 | 0.08 | 0.30 | 1.26 | 0.01 | 0.00 | 1.93 | 1.97 | 1.99 | 2.05 | 1.92 |
| 8 | 1 | 2824.00 | 0.05 | 0.23 | 0.08 | 0.49 | 1.97 | 0.01 | 0.00 | 2.82 | 2.85 | 2.86 | 2.90 | 2.82 |
| 16 | 1 | 3680.00 | 0.05 | 0.23 | 0.08 | 0.86 | 3.11 | 0.01 | 0.00 | 4.34 | 4.38 | 4.39 | 4.43 | 4.34 |
| 32 | 1 | 4256.00 | 0.05 | 0.20 | 0.05 | 1.62 | 5.56 | 0.01 | 0.00 | 7.50 | 7.58 | 7.60 | 7.62 | 7.50 |
| 64 | 1 | 4672.00 | 0.05 | 0.23 | 0.08 | 3.09 | 10.22 | 0.02 | 0.00 | 13.69 | 13.74 | 13.76 | 13.80 | 13.69 |
</details>
#### Offline: NVIDIA DGX A100 (1x A100 80GB), ONNX Runtime with FP16
Our results were obtained using the following configuration:
| Parameter Name | Parameter Value |
|:-----------------------------|:-----------------------------|
| GPU |NVIDIA DGX A100 (1x A100 80GB) |
| Backend |ONNX Runtime |
| Backend accelerator |NVIDIA TensorRT|
| Precision |FP16 |
| Model format |ONNX |
| Max batch size |64 |
| Number of model instances |2|
| Export Format | ONNX |
| Device Kind | gpu |
| Torch Jit | none |
<table>
<tbody>
<tr>
<td><img src="./reports/nvidia_dgx_a100_(1x_a100_80gb)_experiment_2_triton_performance_offline_2/plots/throughput_vs_batch.png"></td>
<td><img src="./reports/nvidia_dgx_a100_(1x_a100_80gb)_experiment_2_triton_performance_offline_2/plots/throughput_vs_latency.png"></td>
</tr>
<tr>
<td><img src="./reports/nvidia_dgx_a100_(1x_a100_80gb)_experiment_2_triton_performance_offline_2/plots/latency_vs_batch.png"></td>
</tr>
</tbody>
</table>
<details>
<summary>Results Table</summary>
| Batch | Concurrency | Inferences/Second | Client Send (ms) | Network+Server Send/Recv (ms) | Server Queue (ms) | Server Compute Input (ms) | Server Compute Infer (ms) | Server Compute Output (ms) | Client Recv (ms) | p50 latency (ms) | p90 latency (ms) | p95 latency (ms) | p99 latency (ms) | avg latency (ms) |
|--------:|--------------:|--------------------:|-------------------:|--------------------------------:|--------------------:|----------------------------:|----------------------------:|-----------------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|
| 1 | 1 | 1240.00 | 0.02 | 0.08 | 0.02 | 0.09 | 0.59 | 0.00 | 0.00 | 0.80 | 0.81 | 0.81 | 0.82 | 0.80 |
| 2 | 1 | 2154.00 | 0.02 | 0.07 | 0.02 | 0.15 | 0.66 | 0.00 | 0.00 | 0.93 | 0.94 | 0.94 | 0.96 | 0.92 |
| 4 | 1 | 3704.00 | 0.02 | 0.07 | 0.02 | 0.18 | 0.78 | 0.00 | 0.00 | 1.07 | 1.09 | 1.12 | 1.26 | 1.08 |
| 8 | 1 | 5512.00 | 0.02 | 0.07 | 0.02 | 0.30 | 1.03 | 0.00 | 0.00 | 1.45 | 1.46 | 1.46 | 1.53 | 1.45 |
| 16 | 1 | 6896.00 | 0.02 | 0.07 | 0.02 | 0.60 | 1.60 | 0.01 | 0.00 | 2.30 | 2.36 | 2.41 | 2.54 | 2.32 |
| 32 | 1 | 7040.00 | 0.02 | 0.09 | 0.03 | 1.60 | 2.79 | 0.02 | 0.00 | 4.51 | 4.62 | 4.84 | 5.07 | 4.53 |
| 64 | 1 | 7296.00 | 0.02 | 0.09 | 0.03 | 3.44 | 5.14 | 0.03 | 0.00 | 8.74 | 8.82 | 8.83 | 8.88 | 8.75 |
</details>
### Online scenario
The online scenario assumes the client and server are located on different hosts. The tests uses:
- tensors are passed through HTTP from client to server
- concurrent requests are send from client to server, the final batch is created on server side
#### Online: NVIDIA DGX-1 (1x V100 32GB), ONNX Runtime with FP16
Our results were obtained using the following configuration:
| Parameter Name | Parameter Value |
|:-----------------------------|:-----------------------------|
| GPU |NVIDIA DGX-1 (1x V100 32GB) |
| Backend |ONNX Runtime |
| Backend accelerator |NVIDIA TensorRT|
| Precision |FP16 |
| Model format |ONNX |
| Max batch size |64 |
| Number of model instances |2|
| Export Format | ONNX |
| Device Kind | gpu |
| Torch Jit | none |
<table>
<tbody>
<tr>
<td colspan="2" align="center"><img src="./reports/nvidia_dgx-1_(1x_v100_32gb)_experiment_2_triton_performance_online_2/plots/latency_vs_concurrency.png"></td>
</tr>
</tbody>
</table>
<details>
<summary>Results Table</summary>
| Batch | Concurrency | Inferences/Second | Client Send (ms) | Network+Server Send/Recv (ms) | Server Queue (ms) | Server Compute Input (ms) | Server Compute Infer (ms) | Server Compute Output (ms) | Client Recv (ms) | p50 latency (ms) | p90 latency (ms) | p95 latency (ms) | p99 latency (ms) | avg latency (ms) |
|--------:|--------------:|--------------------:|-------------------:|--------------------------------:|--------------------:|----------------------------:|----------------------------:|-----------------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|
| 1 | 8 | 896.00 | 0.10 | 0.86 | 5.35 | 0.19 | 2.38 | 0.01 | 0.00 | 8.78 | 10.04 | 10.18 | 10.36 | 8.88 |
| 1 | 16 | 1411.00 | 0.10 | 1.53 | 6.27 | 0.50 | 2.88 | 0.02 | 0.00 | 11.49 | 12.83 | 13.28 | 14.12 | 11.30 |
| 1 | 24 | 1755.00 | 0.11 | 2.55 | 6.82 | 0.82 | 3.27 | 0.02 | 0.00 | 13.70 | 16.42 | 16.90 | 17.96 | 13.59 |
| 1 | 32 | 1970.00 | 0.11 | 3.53 | 7.59 | 1.15 | 3.74 | 0.03 | 0.00 | 16.04 | 20.75 | 21.35 | 22.24 | 16.15 |
| 1 | 40 | 2137.86 | 0.12 | 4.78 | 7.88 | 1.51 | 4.14 | 0.03 | 0.00 | 19.01 | 23.00 | 23.40 | 24.30 | 18.46 |
| 1 | 48 | 2392.00 | 0.14 | 4.49 | 9.02 | 1.84 | 4.33 | 0.04 | 0.00 | 20.45 | 23.80 | 25.32 | 26.69 | 19.85 |
| 1 | 56 | 2437.00 | 0.14 | 5.97 | 9.70 | 2.20 | 4.66 | 0.04 | 0.00 | 22.75 | 29.53 | 31.37 | 34.17 | 22.70 |
| 1 | 64 | 2645.00 | 0.15 | 5.06 | 11.37 | 2.48 | 4.81 | 0.04 | 0.00 | 24.24 | 28.32 | 29.55 | 35.58 | 23.91 |
| 1 | 72 | 2722.00 | 0.15 | 6.80 | 11.37 | 2.78 | 5.08 | 0.05 | 0.00 | 26.70 | 33.58 | 34.66 | 36.85 | 26.24 |
| 1 | 80 | 2762.00 | 0.16 | 7.52 | 12.04 | 3.35 | 5.67 | 0.06 | 0.00 | 29.54 | 35.28 | 36.80 | 40.63 | 28.80 |
| 1 | 88 | 2844.16 | 0.16 | 8.53 | 12.05 | 3.76 | 5.91 | 0.06 | 0.00 | 30.37 | 40.36 | 42.01 | 43.28 | 30.48 |
| 1 | 96 | 2877.00 | 0.19 | 10.35 | 12.48 | 3.91 | 5.96 | 0.07 | 0.00 | 33.47 | 43.26 | 45.26 | 47.61 | 32.95 |
| 1 | 104 | 2918.00 | 0.20 | 11.17 | 12.95 | 4.27 | 6.48 | 0.07 | 0.00 | 36.44 | 43.56 | 45.20 | 50.37 | 35.14 |
| 1 | 112 | 2977.02 | 0.18 | 10.92 | 14.21 | 4.77 | 6.76 | 0.08 | 0.00 | 37.34 | 46.95 | 49.44 | 51.85 | 36.92 |
| 1 | 120 | 3196.00 | 0.20 | 8.79 | 16.46 | 4.82 | 6.85 | 0.08 | 0.00 | 38.52 | 45.54 | 48.42 | 50.26 | 37.20 |
| 1 | 128 | 3118.00 | 0.21 | 11.73 | 15.55 | 5.31 | 7.13 | 0.09 | 0.00 | 40.85 | 51.22 | 53.00 | 55.95 | 40.02 |
| 1 | 136 | 3167.00 | 0.22 | 12.38 | 16.21 | 5.43 | 7.62 | 0.09 | 0.00 | 43.41 | 54.40 | 56.76 | 60.66 | 41.95 |
| 1 | 144 | 3273.00 | 0.24 | 10.17 | 19.19 | 5.83 | 7.79 | 0.10 | 0.00 | 42.21 | 57.11 | 61.41 | 68.18 | 43.32 |
| 1 | 152 | 3283.00 | 0.21 | 14.04 | 17.10 | 6.13 | 7.93 | 0.10 | 0.00 | 47.00 | 56.20 | 58.60 | 62.74 | 45.52 |
| 1 | 160 | 3269.00 | 0.22 | 13.18 | 19.16 | 6.89 | 8.30 | 0.12 | 0.00 | 48.07 | 59.74 | 64.15 | 70.49 | 47.87 |
| 1 | 168 | 3247.00 | 0.22 | 15.60 | 18.64 | 7.14 | 8.53 | 0.12 | 0.00 | 52.14 | 63.73 | 67.51 | 71.19 | 50.25 |
| 1 | 176 | 3468.00 | 0.26 | 11.81 | 21.98 | 7.03 | 8.75 | 0.12 | 0.00 | 50.83 | 64.12 | 66.47 | 68.26 | 49.95 |
| 1 | 184 | 3297.00 | 0.26 | 13.90 | 21.98 | 8.16 | 9.74 | 0.14 | 0.00 | 55.11 | 68.09 | 70.53 | 76.24 | 54.18 |
| 1 | 192 | 3376.00 | 0.21 | 18.13 | 21.16 | 7.15 | 8.54 | 0.12 | 0.00 | 56.58 | 70.31 | 72.58 | 76.07 | 55.31 |
| 1 | 200 | 3307.00 | 0.23 | 20.58 | 20.21 | 8.05 | 9.56 | 0.14 | 0.00 | 59.98 | 70.56 | 72.69 | 83.93 | 58.77 |
| 1 | 208 | 3489.00 | 0.34 | 12.51 | 26.31 | 8.35 | 9.85 | 0.13 | 0.00 | 57.90 | 71.05 | 73.68 | 82.01 | 57.50 |
| 1 | 216 | 3384.00 | 0.27 | 19.95 | 23.24 | 8.31 | 9.95 | 0.13 | 0.00 | 63.55 | 76.54 | 81.43 | 85.78 | 61.85 |
| 1 | 224 | 3627.00 | 0.40 | 11.76 | 29.08 | 8.62 | 10.10 | 0.14 | 0.00 | 59.27 | 74.63 | 77.66 | 84.30 | 60.10 |
| 1 | 232 | 3539.00 | 0.29 | 17.64 | 27.73 | 8.11 | 9.53 | 0.13 | 0.00 | 65.13 | 77.27 | 79.50 | 84.71 | 63.43 |
| 1 | 240 | 3654.35 | 0.41 | 13.24 | 30.22 | 8.77 | 10.27 | 0.15 | 0.00 | 63.00 | 76.36 | 77.15 | 85.25 | 63.04 |
| 1 | 248 | 3528.00 | 0.39 | 17.15 | 29.76 | 8.99 | 10.86 | 0.14 | 0.00 | 69.04 | 84.26 | 89.40 | 93.28 | 67.29 |
| 1 | 256 | 3670.00 | 0.41 | 15.51 | 31.64 | 9.13 | 10.71 | 0.15 | 0.00 | 68.76 | 81.96 | 85.84 | 96.11 | 67.54 |
</details>
#### Online: NVIDIA DGX A100 (1x A100 80GB), ONNX Runtime with FP16
Our results were obtained using the following configuration:
| Parameter Name | Parameter Value |
|:-----------------------------|:-----------------------------|
| GPU |NVIDIA DGX A100 (1x A100 80GB) |
| Backend |ONNX Runtime |
| Backend accelerator |NVIDIA TensorRT|
| Precision |FP16 |
| Model format |ONNX |
| Max batch size |64 |
| Number of model instances |2|
| Export Format | ONNX |
| Device Kind | gpu |
| Torch Jit | none |
<table>
<tbody>
<tr>
<td colspan="2" align="center"><img src="./reports/nvidia_dgx_a100_(1x_a100_80gb)_experiment_2_triton_performance_online_2/plots/latency_vs_concurrency.png"></td>
</tr>
</tbody>
</table>
<details>
<summary>Results Table</summary>
| Batch | Concurrency | Inferences/Second | Client Send (ms) | Network+Server Send/Recv (ms) | Server Queue (ms) | Server Compute Input (ms) | Server Compute Infer (ms) | Server Compute Output (ms) | Client Recv (ms) | p50 latency (ms) | p90 latency (ms) | p95 latency (ms) | p99 latency (ms) | avg latency (ms) |
|--------:|--------------:|--------------------:|-------------------:|--------------------------------:|--------------------:|----------------------------:|----------------------------:|-----------------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|
| 1 | 8 | 1390.00 | 0.06 | 0.53 | 3.46 | 0.18 | 1.50 | 0.01 | 0.00 | 5.62 | 6.44 | 6.51 | 6.76 | 5.73 |
| 1 | 16 | 2053.00 | 0.07 | 1.36 | 4.03 | 0.52 | 1.77 | 0.01 | 0.00 | 7.68 | 9.83 | 10.48 | 10.94 | 7.76 |
| 1 | 24 | 2427.00 | 0.08 | 2.72 | 4.25 | 0.83 | 1.96 | 0.02 | 0.00 | 10.19 | 13.06 | 14.62 | 15.84 | 9.86 |
| 1 | 32 | 2756.00 | 0.07 | 3.85 | 4.44 | 1.04 | 2.14 | 0.03 | 0.00 | 12.83 | 15.00 | 15.61 | 16.31 | 11.57 |
| 1 | 40 | 3260.00 | 0.10 | 3.33 | 5.11 | 1.31 | 2.32 | 0.04 | 0.00 | 12.20 | 15.97 | 16.93 | 18.58 | 12.20 |
| 1 | 48 | 3225.00 | 0.08 | 5.10 | 5.37 | 1.80 | 2.38 | 0.04 | 0.00 | 15.72 | 19.04 | 19.68 | 20.52 | 14.78 |
| 1 | 56 | 3621.00 | 0.09 | 5.13 | 5.68 | 1.79 | 2.58 | 0.05 | 0.00 | 16.25 | 19.91 | 20.70 | 22.67 | 15.33 |
| 1 | 64 | 3773.00 | 0.09 | 6.06 | 5.97 | 1.96 | 2.69 | 0.06 | 0.00 | 17.54 | 21.53 | 22.66 | 24.17 | 16.83 |
| 1 | 72 | 3882.00 | 0.08 | 6.52 | 6.56 | 2.24 | 2.89 | 0.06 | 0.00 | 19.67 | 23.71 | 24.35 | 25.93 | 18.37 |
| 1 | 80 | 3915.08 | 0.10 | 6.86 | 7.28 | 2.99 | 2.98 | 0.08 | 0.00 | 20.97 | 26.24 | 27.34 | 29.06 | 20.27 |
| 1 | 88 | 4185.00 | 0.08 | 7.07 | 7.44 | 3.03 | 3.19 | 0.08 | 0.00 | 21.56 | 27.19 | 28.56 | 30.53 | 20.89 |
| 1 | 96 | 4272.00 | 0.09 | 8.49 | 7.49 | 2.90 | 3.17 | 0.08 | 0.00 | 23.48 | 28.97 | 30.30 | 33.49 | 22.22 |
| 1 | 104 | 4458.54 | 0.08 | 8.21 | 8.26 | 3.12 | 3.26 | 0.09 | 0.00 | 23.85 | 29.61 | 31.51 | 33.47 | 23.03 |
| 1 | 112 | 4509.00 | 0.08 | 9.67 | 8.36 | 3.11 | 3.20 | 0.08 | 0.00 | 25.14 | 31.42 | 33.62 | 36.90 | 24.51 |
| 1 | 120 | 4820.00 | 0.10 | 7.31 | 9.57 | 3.66 | 3.75 | 0.11 | 0.00 | 25.25 | 30.72 | 32.59 | 35.84 | 24.49 |
| 1 | 128 | 4757.00 | 0.08 | 8.82 | 9.27 | 4.11 | 4.00 | 0.12 | 0.00 | 26.85 | 35.22 | 36.51 | 37.96 | 26.40 |
| 1 | 136 | 5263.00 | 0.10 | 7.77 | 9.78 | 3.24 | 4.18 | 0.12 | 0.00 | 26.05 | 31.36 | 33.10 | 34.59 | 25.19 |
| 1 | 144 | 5287.00 | 0.08 | 8.92 | 9.49 | 3.86 | 4.32 | 0.13 | 0.00 | 27.34 | 33.56 | 34.74 | 36.51 | 26.80 |
| 1 | 152 | 5420.00 | 0.08 | 8.56 | 10.56 | 3.93 | 4.45 | 0.14 | 0.00 | 28.50 | 34.48 | 36.21 | 40.40 | 27.71 |
| 1 | 160 | 5507.00 | 0.09 | 8.38 | 11.39 | 4.04 | 4.58 | 0.14 | 0.00 | 29.60 | 35.95 | 37.01 | 42.02 | 28.61 |
| 1 | 168 | 5471.00 | 0.10 | 9.22 | 11.55 | 4.52 | 4.63 | 0.14 | 0.00 | 30.51 | 38.58 | 41.25 | 43.11 | 30.16 |
| 1 | 176 | 5693.00 | 0.09 | 9.92 | 11.22 | 4.38 | 4.68 | 0.14 | 0.00 | 31.22 | 38.24 | 39.42 | 43.07 | 30.44 |
| 1 | 184 | 5698.00 | 0.10 | 8.64 | 13.26 | 4.63 | 4.90 | 0.15 | 0.00 | 32.38 | 39.84 | 41.38 | 43.17 | 31.68 |
| 1 | 192 | 5591.00 | 0.09 | 11.84 | 12.04 | 4.66 | 4.95 | 0.15 | 0.00 | 35.15 | 42.57 | 44.21 | 59.20 | 33.74 |
| 1 | 200 | 5973.00 | 0.12 | 7.94 | 14.59 | 4.95 | 5.19 | 0.16 | 0.00 | 33.52 | 40.18 | 42.13 | 44.74 | 32.94 |
| 1 | 208 | 5981.00 | 0.09 | 9.48 | 14.28 | 4.98 | 5.00 | 0.16 | 0.00 | 34.69 | 40.97 | 42.65 | 46.89 | 33.99 |
| 1 | 216 | 5901.00 | 0.10 | 12.20 | 12.71 | 5.48 | 5.40 | 0.17 | 0.00 | 37.42 | 44.25 | 46.57 | 49.53 | 36.07 |
| 1 | 224 | 6061.00 | 0.11 | 10.02 | 15.27 | 5.35 | 5.28 | 0.17 | 0.00 | 36.23 | 44.87 | 46.34 | 50.26 | 36.19 |
| 1 | 232 | 6030.00 | 0.09 | 11.08 | 15.65 | 5.25 | 5.52 | 0.17 | 0.00 | 38.51 | 44.80 | 48.30 | 51.88 | 37.76 |
| 1 | 240 | 6253.00 | 0.12 | 9.52 | 17.03 | 5.03 | 5.54 | 0.17 | 0.00 | 37.63 | 45.81 | 48.29 | 52.75 | 37.41 |
| 1 | 248 | 6363.00 | 0.09 | 11.33 | 15.20 | 5.29 | 6.02 | 0.17 | 0.00 | 38.93 | 46.29 | 48.27 | 51.93 | 38.11 |
| 1 | 256 | 6614.00 | 0.10 | 11.09 | 16.08 | 4.89 | 5.68 | 0.16 | 0.00 | 38.74 | 46.94 | 48.30 | 50.82 | 38.00 |
</details>
## Advanced
| Inference runtime | Mnemonic used in scripts |
|-------------------|--------------------------|
| [TorchScript Tracing](https://pytorch.org/docs/stable/jit.html) | `ts-trace` |
| [TorchScript Scripting](https://pytorch.org/docs/stable/jit.html) | `ts-script` |
| [ONNX](https://onnx.ai) | `onnx` |
| [NVIDIA TensorRT](https://developer.nvidia.com/tensorrt) | `trt` |
### Step by step deployment process
Commands described below can be used for exporting, converting and profiling the model.
#### Clone Repository
IMPORTANT: This step is executed on the host computer.
<details>
<summary>Clone Repository Command</summary>
```shell
git clone https://github.com/NVIDIA/DeepLearningExamples.git
cd PyTorch/Classification/GPUNet
```
</details>
#### Start Triton Inference Server
Setup the environment in the host computer and start Triton Inference Server.
<details>
<summary>Setup Environment and Start Triton Inference Server Command</summary>
```shell
source ./triton/scripts/setup_environment.sh
./triton/scripts/docker/triton_inference_server.sh
```
</details>
#### Prepare Dataset.
Please use the data download from the [Main QSG](../../README.md#prepare-the-dataset)
#### Prepare Checkpoint
Please download a checkpoint from [here](https://api.ngc.nvidia.com/v2/models/nvidia/dle/gpunet_1_pyt_ckpt/versions/21.12.0_amp/zip)
and place it in `runner_workspace/checkpoints/0.85ms/`. Note that the `0.85ms` subdirectory may not be created yet.
#### Setup Container
Build and run a container that extends the NGC PyTorch container with the Triton Inference Server client libraries and dependencies.
<details>
<summary>Setup Container Command</summary>
Build container:
```shell
./triton/scripts/docker/build.sh
```
Run container in interactive mode:
```shell
./triton/scripts/docker/interactive.sh /path/to/imagenet/val/
```
Setup environment in order to share artifacts in steps and with Triton Inference Server:
```shell
source ./triton/scripts/setup_environment.sh
```
</details>
#### Prepare configuration
You can use the environment variables to set the parameters of your inference configuration.
Example values of some key variables in one configuration:
<details>
<summary>Export Variables</summary>
```shell
export FORMAT="onnx"
export PRECISION="fp16"
export EXPORT_FORMAT="onnx"
export EXPORT_PRECISION="fp16"
export BACKEND_ACCELERATOR="trt"
export NUMBER_OF_MODEL_INSTANCES="2"
export TENSORRT_CAPTURE_CUDA_GRAPH="0"
export CHECKPOINT="0.85ms"
export CHECKPOINT_DIR=${CHECKPOINTS_DIR}/${CHECKPOINT}
```
</details>
#### Export Model
Export model from Python source to desired format (e.g. Savedmodel or TorchScript)
<details>
<summary>Export Model Command</summary>
```shell
if [[ "${EXPORT_FORMAT}" == "torchscript" ]]; then
export FORMAT_SUFFIX="pt"
else
export FORMAT_SUFFIX="${EXPORT_FORMAT}"
fi
python3 triton/export_model.py \
--input-path triton/model.py \
--input-type pyt \
--output-path ${SHARED_DIR}/exported_model.${FORMAT_SUFFIX} \
--output-type ${EXPORT_FORMAT} \
--ignore-unknown-parameters \
--onnx-opset 13 \
--torch-jit none \
\
--config /workspace/gpunet/configs/batch1/GV100/0.85ms.json \
--checkpoint ${CHECKPOINT_DIR}/0.85ms.pth.tar \
--precision ${EXPORT_PRECISION} \
\
--dataloader triton/dataloader.py \
--val-path ${DATASETS_DIR}/ \
--is-prunet False \
--batch-size 1
```
</details>
#### Convert Model
Convert the model from training to inference format (e.g. TensorRT).
<details>
<summary>Convert Model Command</summary>
```shell
if [[ "${EXPORT_FORMAT}" == "torchscript" ]]; then
export FORMAT_SUFFIX="pt"
else
export FORMAT_SUFFIX="${EXPORT_FORMAT}"
fi
model-navigator convert \
--model-name ${MODEL_NAME} \
--model-path ${SHARED_DIR}/exported_model.${FORMAT_SUFFIX} \
--output-path ${SHARED_DIR}/converted_model \
--target-formats ${FORMAT} \
--target-precisions ${PRECISION} \
--launch-mode local \
--override-workspace \
--verbose \
\
--onnx-opsets 13 \
--max-batch-size 64 \
--container-version 21.12 \
--max-workspace-size 10000000000 \
--atol OUTPUT__0=100 \
--rtol OUTPUT__0=100
```
</details>
#### Deploy Model
Configure the model on Triton Inference Server.
Generate the configuration from your model repository.
<details>
<summary>Deploy Model Command</summary>
```shell
model-navigator triton-config-model \
--model-repository ${MODEL_REPOSITORY_PATH} \
--model-name ${MODEL_NAME} \
--model-version 1 \
--model-path ${SHARED_DIR}/converted_model \
--model-format ${FORMAT} \
--model-control-mode explicit \
--load-model \
--load-model-timeout-s 100 \
--verbose \
\
--backend-accelerator ${BACKEND_ACCELERATOR} \
--tensorrt-precision ${PRECISION} \
--tensorrt-capture-cuda-graph \
--tensorrt-max-workspace-size 10000000000 \
--max-batch-size 64 \
--batching dynamic \
--preferred-batch-sizes 64 \
--engine-count-per-device gpu=${NUMBER_OF_MODEL_INSTANCES}
```
</details>
#### Triton Performance Offline Test
We want to maximize throughput. It assumes you have your data available
for inference or that your data saturate to maximum batch size quickly.
Triton Inference Server supports offline scenarios with static batching.
Static batching allows inference requests to be served
as they are received. The largest improvements to throughput come
from increasing the batch size due to efficiency gains in the GPU with larger
batches.
<details>
<summary>Triton Performance Offline Test Command</summary>
```shell
python triton/run_performance_on_triton.py \
--model-repository ${MODEL_REPOSITORY_PATH} \
--model-name ${MODEL_NAME} \
--input-data random \
--batch-sizes 1 2 4 8 16 32 64 \
--concurrency 1 \
--evaluation-mode offline \
--measurement-request-count 10 \
--warmup \
--performance-tool perf_analyzer \
--result-path ${SHARED_DIR}/triton_performance_offline.csv
```
</details>
#### Triton Performance Online Test
We want to maximize throughput within latency budget constraints.
Dynamic batching is a feature of Triton Inference Server that allows
inference requests to be combined by the server, so that a batch is
created dynamically, resulting in a reduced average latency.
<details>
<summary>Triton Performance Online Test</summary>
```shell
python triton/run_performance_on_triton.py \
--model-repository ${MODEL_REPOSITORY_PATH} \
--model-name ${MODEL_NAME} \
--input-data random \
--batch-sizes 1 \
--concurrency 8 16 24 32 40 48 56 64 72 80 88 96 104 112 120 128 136 144 152 160 168 176 184 192 200 208 216 224 232 240 248 256 \
--evaluation-mode online \
--measurement-request-count 500 \
--warmup \
--performance-tool perf_analyzer \
--result-path ${SHARED_DIR}/triton_performance_online.csv
```
</details>
### Latency explanation
A typical Triton Inference Server pipeline can be broken down into the following steps:
1. The client serializes the inference request into a message and sends it to
the server (Client Send).
2. The message travels over the network from the client to the server (Network).
3. The message arrives at the server and is deserialized (Server Receive).
4. The request is placed on the queue (Server Queue).
5. The request is removed from the queue and computed (Server Compute).
6. The completed request is serialized in a message and sent back to
the client (Server Send).
7. The completed message then travels over the network from the server
to the client (Network).
8. The completed message is deserialized by the client and processed as
a completed inference request (Client Receive).
Generally, for local clients, steps 1-4 and 6-8 will only occupy
a small fraction of time, compared to step 5. In distributed systems and online processing
where client and server side are connect through network, the send and receive steps might have impact
on overall processing performance. In order to analyze the possible bottlenecks the detailed
charts are presented in online scenario cases.
## Release Notes
We’re constantly refining and improving our performance on AI
and HPC workloads even on the same hardware with frequent updates
to our software stack. For our latest performance data refer
to these pages for
[AI](https://developer.nvidia.com/deep-learning-performance-training-inference)
and [HPC](https://developer.nvidia.com/hpc-application-performance) benchmarks.
### Changelog
May 2022
- Initial release
### Known issues
- There are no known issues with this model. |
TensorFlow/Detection/SSD/models/research/object_detection/meta_architectures | meta_architectures | rfcn_meta_arch_test | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.meta_architectures.rfcn_meta_arch."""
import tensorflow as tf
from object_detection.meta_architectures import faster_rcnn_meta_arch_test_lib
from object_detection.meta_architectures import rfcn_meta_arch
class RFCNMetaArchTest(
faster_rcnn_meta_arch_test_lib.FasterRCNNMetaArchTestBase):
def _get_second_stage_box_predictor_text_proto(self):
box_predictor_text_proto = """
rfcn_box_predictor {
conv_hyperparams {
op: CONV
activation: NONE
regularizer {
l2_regularizer {
weight: 0.0005
}
}
initializer {
variance_scaling_initializer {
factor: 1.0
uniform: true
mode: FAN_AVG
}
}
}
}
"""
return box_predictor_text_proto
def _get_model(self, box_predictor, **common_kwargs):
return rfcn_meta_arch.RFCNMetaArch(
second_stage_rfcn_box_predictor=box_predictor, **common_kwargs)
def _get_box_classifier_features_shape(self,
image_size,
batch_size,
max_num_proposals,
initial_crop_size,
maxpool_stride,
num_features):
return (batch_size, image_size, image_size, num_features)
if __name__ == '__main__':
tf.test.main()
|
PyTorch/SpeechSynthesis/Tacotron2/trtis_cpp/src/trt/plugins/taco2ProjectionPlugin | taco2ProjectionPlugin | taco2ProjectionLayerPluginCreator | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "taco2ProjectionLayerPluginCreator.h"
#include "taco2ProjectionLayerPlugin.h"
#include <stdexcept>
#include <vector>
using namespace nvinfer1;
namespace nvinfer1
{
namespace plugin
{
/******************************************************************************
* CONSTANTS ******************************************************************
*****************************************************************************/
namespace
{
constexpr const char* const HIDDEN_INPUT_LENGTH_STR = "HiddenInputLength";
constexpr const char* const CONTEXT_INPUT_LENGTH_STR = "ContextInputLength";
constexpr const char* const CHANNEL_DIMENSION_STR = "ChannelDimension";
constexpr const char* const GATE_DIMENSION_STR = "GateDimension";
constexpr const char* const CHANNEL_WEIGHTS_STR = "ChannelWeights";
constexpr const char* const GATE_WEIGHTS_STR = "GateWeights";
constexpr const char* const CHANNEL_BIAS_STR = "ChannelBias";
constexpr const char* const GATE_BIAS_STR = "GateBias";
} // namespace
/******************************************************************************
* PUBLIC STATIC METHODS ******************************************************
*****************************************************************************/
PluginFieldCollection* Taco2ProjectionLayerPluginCreator::getFields()
{
static PluginFieldCollection* pluginPtr = nullptr;
static const std::vector<PluginField> fields{{HIDDEN_INPUT_LENGTH_STR, nullptr, PluginFieldType::kINT32, 0},
{CONTEXT_INPUT_LENGTH_STR, nullptr, PluginFieldType::kINT32, 0},
{CHANNEL_DIMENSION_STR, nullptr, PluginFieldType::kINT32, 0},
{GATE_DIMENSION_STR, nullptr, PluginFieldType::kINT32, 0},
{CHANNEL_WEIGHTS_STR, nullptr, PluginFieldType::kFLOAT32, 0},
{GATE_WEIGHTS_STR, nullptr, PluginFieldType::kFLOAT32, 0},
{CHANNEL_BIAS_STR, nullptr, PluginFieldType::kFLOAT32, 0},
{GATE_BIAS_STR, nullptr, PluginFieldType::kFLOAT32, 0}};
if (!pluginPtr)
{
pluginPtr
= static_cast<PluginFieldCollection*>(malloc(sizeof(*pluginPtr) + fields.size() * sizeof(PluginField)));
pluginPtr->nbFields = static_cast<int>(fields.size());
pluginPtr->fields = fields.data();
}
return pluginPtr;
}
/******************************************************************************
* CONSTRUCTORS / DESTRUCTOR **************************************************
*****************************************************************************/
Taco2ProjectionLayerPluginCreator::Taco2ProjectionLayerPluginCreator()
: mNamespace()
{
// do nothing
}
/******************************************************************************
* PUBLIC METHODS *************************************************************
*****************************************************************************/
const char* Taco2ProjectionLayerPluginCreator::getPluginName() const
{
return Taco2ProjectionLayerPlugin::getName();
}
const char* Taco2ProjectionLayerPluginCreator::getPluginVersion() const
{
return Taco2ProjectionLayerPlugin::getVersion();
}
const PluginFieldCollection* Taco2ProjectionLayerPluginCreator::getFieldNames()
{
return getFields();
}
IPluginV2* Taco2ProjectionLayerPluginCreator::createPlugin(const char* const /*name*/, const PluginFieldCollection* fc)
{
int hiddenInputLength = 0;
int contextInputLength = 0;
int numChannelDimension = 0;
int numGateDimension = 0;
Weights channelWeights{DataType::kFLOAT, nullptr, 0};
Weights gateWeights{DataType::kFLOAT, nullptr, 0};
Weights channelBias{DataType::kFLOAT, nullptr, 0};
Weights gateBias{DataType::kFLOAT, nullptr, 0};
for (int i = 0; i < fc->nbFields; ++i)
{
const std::string name(fc->fields[i].name);
if (name == HIDDEN_INPUT_LENGTH_STR)
{
hiddenInputLength = static_cast<const int32_t*>(fc->fields[i].data)[0];
}
else if (name == CONTEXT_INPUT_LENGTH_STR)
{
contextInputLength = static_cast<const int32_t*>(fc->fields[i].data)[0];
}
else if (name == CHANNEL_DIMENSION_STR)
{
numChannelDimension = static_cast<const int32_t*>(fc->fields[i].data)[0];
}
else if (name == GATE_DIMENSION_STR)
{
numGateDimension = static_cast<const int32_t*>(fc->fields[i].data)[0];
}
else if (name == CHANNEL_WEIGHTS_STR)
{
channelWeights.values = fc->fields[i].data;
channelWeights.count = fc->fields[i].length;
}
else if (name == GATE_WEIGHTS_STR)
{
gateWeights.values = fc->fields[i].data;
gateWeights.count = fc->fields[i].length;
}
else if (name == CHANNEL_BIAS_STR)
{
channelBias.values = fc->fields[i].data;
channelBias.count = fc->fields[i].length;
}
else if (name == GATE_BIAS_STR)
{
gateBias.values = fc->fields[i].data;
gateBias.count = fc->fields[i].length;
}
else
{
throw std::runtime_error("Unknown plugin field: '" + name + "'");
}
}
return new Taco2ProjectionLayerPlugin(channelWeights, gateWeights, channelBias, gateBias, hiddenInputLength,
contextInputLength, numChannelDimension, numGateDimension);
}
IPluginV2* Taco2ProjectionLayerPluginCreator::deserializePlugin(
const char* const /* layerName */, const void* const serialData, size_t const serialLength)
{
return new Taco2ProjectionLayerPlugin(Taco2ProjectionLayerPlugin::deserialize(serialData, serialLength));
}
void Taco2ProjectionLayerPluginCreator::setPluginNamespace(const char* pluginNamespace)
{
mNamespace = pluginNamespace;
}
const char* Taco2ProjectionLayerPluginCreator::getPluginNamespace() const
{
return mNamespace.c_str();
}
} // namespace plugin
} // namespace nvinfer1
|
PyTorch/Classification/ConvNets/image_classification | image_classification | dataloaders | # Copyright (c) 2018-2019, NVIDIA CORPORATION
# Copyright (c) 2017- Facebook, Inc
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import torch
import numpy as np
import torchvision.datasets as datasets
import torchvision.transforms as transforms
from PIL import Image
from functools import partial
from torchvision.transforms.functional import InterpolationMode
from image_classification.autoaugment import AutoaugmentImageNetPolicy
DATA_BACKEND_CHOICES = ["pytorch", "synthetic"]
try:
from nvidia.dali.plugin.pytorch import DALIClassificationIterator
from nvidia.dali.pipeline import Pipeline
import nvidia.dali.ops as ops
import nvidia.dali.types as types
DATA_BACKEND_CHOICES.append("dali-gpu")
DATA_BACKEND_CHOICES.append("dali-cpu")
except ImportError:
print(
"Please install DALI from https://www.github.com/NVIDIA/DALI to run this example."
)
def load_jpeg_from_file(path, cuda=True):
img_transforms = transforms.Compose(
[transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor()]
)
img = img_transforms(Image.open(path))
with torch.no_grad():
# mean and std are not multiplied by 255 as they are in training script
# torch dataloader reads data into bytes whereas loading directly
# through PIL creates a tensor with floats in [0,1] range
mean = torch.tensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1)
std = torch.tensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1)
if cuda:
mean = mean.cuda()
std = std.cuda()
img = img.cuda()
img = img.float()
input = img.unsqueeze(0).sub_(mean).div_(std)
return input
class HybridTrainPipe(Pipeline):
def __init__(
self,
batch_size,
num_threads,
device_id,
data_dir,
interpolation,
crop,
dali_cpu=False,
):
super(HybridTrainPipe, self).__init__(
batch_size, num_threads, device_id, seed=12 + device_id
)
interpolation = {
"bicubic": types.INTERP_CUBIC,
"bilinear": types.INTERP_LINEAR,
"triangular": types.INTERP_TRIANGULAR,
}[interpolation]
if torch.distributed.is_initialized():
rank = torch.distributed.get_rank()
world_size = torch.distributed.get_world_size()
else:
rank = 0
world_size = 1
self.input = ops.FileReader(
file_root=data_dir,
shard_id=rank,
num_shards=world_size,
random_shuffle=True,
pad_last_batch=True,
)
if dali_cpu:
dali_device = "cpu"
self.decode = ops.ImageDecoder(device=dali_device, output_type=types.RGB)
else:
dali_device = "gpu"
# This padding sets the size of the internal nvJPEG buffers to be able to handle all images from full-sized ImageNet
# without additional reallocations
self.decode = ops.ImageDecoder(
device="mixed",
output_type=types.RGB,
device_memory_padding=211025920,
host_memory_padding=140544512,
)
self.res = ops.RandomResizedCrop(
device=dali_device,
size=[crop, crop],
interp_type=interpolation,
random_aspect_ratio=[0.75, 4.0 / 3.0],
random_area=[0.08, 1.0],
num_attempts=100,
antialias=False,
)
self.cmnp = ops.CropMirrorNormalize(
device="gpu",
dtype=types.FLOAT,
output_layout=types.NCHW,
crop=(crop, crop),
mean=[0.485 * 255, 0.456 * 255, 0.406 * 255],
std=[0.229 * 255, 0.224 * 255, 0.225 * 255],
)
self.coin = ops.CoinFlip(probability=0.5)
def define_graph(self):
rng = self.coin()
self.jpegs, self.labels = self.input(name="Reader")
images = self.decode(self.jpegs)
images = self.res(images)
output = self.cmnp(images.gpu(), mirror=rng)
return [output, self.labels]
class HybridValPipe(Pipeline):
def __init__(
self, batch_size, num_threads, device_id, data_dir, interpolation, crop, size
):
super(HybridValPipe, self).__init__(
batch_size, num_threads, device_id, seed=12 + device_id
)
interpolation = {
"bicubic": types.INTERP_CUBIC,
"bilinear": types.INTERP_LINEAR,
"triangular": types.INTERP_TRIANGULAR,
}[interpolation]
if torch.distributed.is_initialized():
rank = torch.distributed.get_rank()
world_size = torch.distributed.get_world_size()
else:
rank = 0
world_size = 1
self.input = ops.FileReader(
file_root=data_dir,
shard_id=rank,
num_shards=world_size,
random_shuffle=False,
pad_last_batch=True,
)
self.decode = ops.ImageDecoder(device="mixed", output_type=types.RGB)
self.res = ops.Resize(
device="gpu",
resize_shorter=size,
interp_type=interpolation,
antialias=False,
)
self.cmnp = ops.CropMirrorNormalize(
device="gpu",
dtype=types.FLOAT,
output_layout=types.NCHW,
crop=(crop, crop),
mean=[0.485 * 255, 0.456 * 255, 0.406 * 255],
std=[0.229 * 255, 0.224 * 255, 0.225 * 255],
)
def define_graph(self):
self.jpegs, self.labels = self.input(name="Reader")
images = self.decode(self.jpegs)
images = self.res(images)
output = self.cmnp(images)
return [output, self.labels]
class DALIWrapper(object):
def gen_wrapper(dalipipeline, num_classes, one_hot, memory_format):
for data in dalipipeline:
input = data[0]["data"].contiguous(memory_format=memory_format)
target = torch.reshape(data[0]["label"], [-1]).cuda().long()
if one_hot:
target = expand(num_classes, torch.float, target)
yield input, target
dalipipeline.reset()
def __init__(self, dalipipeline, num_classes, one_hot, memory_format):
self.dalipipeline = dalipipeline
self.num_classes = num_classes
self.one_hot = one_hot
self.memory_format = memory_format
def __iter__(self):
return DALIWrapper.gen_wrapper(
self.dalipipeline, self.num_classes, self.one_hot, self.memory_format
)
def get_dali_train_loader(dali_cpu=False):
def gdtl(
data_path,
image_size,
batch_size,
num_classes,
one_hot,
interpolation="bilinear",
augmentation=None,
start_epoch=0,
workers=5,
_worker_init_fn=None,
memory_format=torch.contiguous_format,
**kwargs,
):
if torch.distributed.is_initialized():
rank = torch.distributed.get_rank()
world_size = torch.distributed.get_world_size()
else:
rank = 0
world_size = 1
traindir = os.path.join(data_path, "train")
if augmentation is not None:
raise NotImplementedError(
f"Augmentation {augmentation} for dali loader is not supported"
)
pipe = HybridTrainPipe(
batch_size=batch_size,
num_threads=workers,
device_id=rank % torch.cuda.device_count(),
data_dir=traindir,
interpolation=interpolation,
crop=image_size,
dali_cpu=dali_cpu,
)
pipe.build()
train_loader = DALIClassificationIterator(
pipe, reader_name="Reader", fill_last_batch=False
)
return (
DALIWrapper(train_loader, num_classes, one_hot, memory_format),
int(pipe.epoch_size("Reader") / (world_size * batch_size)),
)
return gdtl
def get_dali_val_loader():
def gdvl(
data_path,
image_size,
batch_size,
num_classes,
one_hot,
interpolation="bilinear",
crop_padding=32,
workers=5,
_worker_init_fn=None,
memory_format=torch.contiguous_format,
**kwargs,
):
if torch.distributed.is_initialized():
rank = torch.distributed.get_rank()
world_size = torch.distributed.get_world_size()
else:
rank = 0
world_size = 1
valdir = os.path.join(data_path, "val")
pipe = HybridValPipe(
batch_size=batch_size,
num_threads=workers,
device_id=rank % torch.cuda.device_count(),
data_dir=valdir,
interpolation=interpolation,
crop=image_size,
size=image_size + crop_padding,
)
pipe.build()
val_loader = DALIClassificationIterator(
pipe, reader_name="Reader", fill_last_batch=False
)
return (
DALIWrapper(val_loader, num_classes, one_hot, memory_format),
int(pipe.epoch_size("Reader") / (world_size * batch_size)),
)
return gdvl
def fast_collate(memory_format, batch):
imgs = [img[0] for img in batch]
targets = torch.tensor([target[1] for target in batch], dtype=torch.int64)
w = imgs[0].size[0]
h = imgs[0].size[1]
tensor = torch.zeros((len(imgs), 3, h, w), dtype=torch.uint8).contiguous(
memory_format=memory_format
)
for i, img in enumerate(imgs):
nump_array = np.asarray(img, dtype=np.uint8)
if nump_array.ndim < 3:
nump_array = np.expand_dims(nump_array, axis=-1)
nump_array = np.rollaxis(nump_array, 2)
tensor[i] += torch.from_numpy(nump_array.copy())
return tensor, targets
def expand(num_classes, dtype, tensor):
e = torch.zeros(
tensor.size(0), num_classes, dtype=dtype, device=torch.device("cuda")
)
e = e.scatter(1, tensor.unsqueeze(1), 1.0)
return e
class PrefetchedWrapper(object):
def prefetched_loader(loader, num_classes, one_hot):
mean = (
torch.tensor([0.485 * 255, 0.456 * 255, 0.406 * 255])
.cuda()
.view(1, 3, 1, 1)
)
std = (
torch.tensor([0.229 * 255, 0.224 * 255, 0.225 * 255])
.cuda()
.view(1, 3, 1, 1)
)
stream = torch.cuda.Stream()
first = True
for next_input, next_target in loader:
with torch.cuda.stream(stream):
next_input = next_input.cuda(non_blocking=True)
next_target = next_target.cuda(non_blocking=True)
next_input = next_input.float()
if one_hot:
next_target = expand(num_classes, torch.float, next_target)
next_input = next_input.sub_(mean).div_(std)
if not first:
yield input, target
else:
first = False
torch.cuda.current_stream().wait_stream(stream)
input = next_input
target = next_target
yield input, target
def __init__(self, dataloader, start_epoch, num_classes, one_hot):
self.dataloader = dataloader
self.epoch = start_epoch
self.one_hot = one_hot
self.num_classes = num_classes
def __iter__(self):
if self.dataloader.sampler is not None and isinstance(
self.dataloader.sampler, torch.utils.data.distributed.DistributedSampler
):
self.dataloader.sampler.set_epoch(self.epoch)
self.epoch += 1
return PrefetchedWrapper.prefetched_loader(
self.dataloader, self.num_classes, self.one_hot
)
def __len__(self):
return len(self.dataloader)
def get_pytorch_train_loader(
data_path,
image_size,
batch_size,
num_classes,
one_hot,
interpolation="bilinear",
augmentation=None,
start_epoch=0,
workers=5,
_worker_init_fn=None,
prefetch_factor=2,
memory_format=torch.contiguous_format,
):
interpolation = {
"bicubic": InterpolationMode.BICUBIC,
"bilinear": InterpolationMode.BILINEAR,
}[interpolation]
traindir = os.path.join(data_path, "train")
transforms_list = [
transforms.RandomResizedCrop(image_size, interpolation=interpolation),
transforms.RandomHorizontalFlip(),
]
if augmentation == "autoaugment":
transforms_list.append(AutoaugmentImageNetPolicy())
train_dataset = datasets.ImageFolder(traindir, transforms.Compose(transforms_list))
if torch.distributed.is_initialized():
train_sampler = torch.utils.data.distributed.DistributedSampler(
train_dataset, shuffle=True
)
else:
train_sampler = None
train_loader = torch.utils.data.DataLoader(
train_dataset,
sampler=train_sampler,
batch_size=batch_size,
shuffle=(train_sampler is None),
num_workers=workers,
worker_init_fn=_worker_init_fn,
pin_memory=True,
collate_fn=partial(fast_collate, memory_format),
drop_last=True,
persistent_workers=True,
prefetch_factor=prefetch_factor,
)
return (
PrefetchedWrapper(train_loader, start_epoch, num_classes, one_hot),
len(train_loader),
)
def get_pytorch_val_loader(
data_path,
image_size,
batch_size,
num_classes,
one_hot,
interpolation="bilinear",
workers=5,
_worker_init_fn=None,
crop_padding=32,
memory_format=torch.contiguous_format,
prefetch_factor=2,
):
interpolation = {
"bicubic": InterpolationMode.BICUBIC,
"bilinear": InterpolationMode.BILINEAR,
}[interpolation]
valdir = os.path.join(data_path, "val")
val_dataset = datasets.ImageFolder(
valdir,
transforms.Compose(
[
transforms.Resize(
image_size + crop_padding, interpolation=interpolation
),
transforms.CenterCrop(image_size),
]
),
)
if torch.distributed.is_initialized():
val_sampler = torch.utils.data.distributed.DistributedSampler(
val_dataset, shuffle=False
)
else:
val_sampler = None
val_loader = torch.utils.data.DataLoader(
val_dataset,
sampler=val_sampler,
batch_size=batch_size,
shuffle=(val_sampler is None),
num_workers=workers,
worker_init_fn=_worker_init_fn,
pin_memory=True,
collate_fn=partial(fast_collate, memory_format),
drop_last=False,
persistent_workers=True,
prefetch_factor=prefetch_factor,
)
return PrefetchedWrapper(val_loader, 0, num_classes, one_hot), len(val_loader)
class SynteticDataLoader(object):
def __init__(
self,
batch_size,
num_classes,
num_channels,
height,
width,
one_hot,
memory_format=torch.contiguous_format,
):
input_data = (
torch.randn(batch_size, num_channels, height, width)
.contiguous(memory_format=memory_format)
.cuda()
.normal_(0, 1.0)
)
if one_hot:
input_target = torch.empty(batch_size, num_classes).cuda()
input_target[:, 0] = 1.0
else:
input_target = torch.randint(0, num_classes, (batch_size,))
input_target = input_target.cuda()
self.input_data = input_data
self.input_target = input_target
def __iter__(self):
while True:
yield self.input_data, self.input_target
def get_synthetic_loader(
data_path,
image_size,
batch_size,
num_classes,
one_hot,
interpolation=None,
augmentation=None,
start_epoch=0,
workers=None,
_worker_init_fn=None,
memory_format=torch.contiguous_format,
**kwargs,
):
return (
SynteticDataLoader(
batch_size,
num_classes,
3,
image_size,
image_size,
one_hot,
memory_format=memory_format,
),
-1,
)
|
TensorFlow/Detection/SSD/models/research/object_detection/models/keras_applications | keras_applications | mobilenet_v2 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A wrapper around the MobileNet v2 models for Keras, for object detection."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from object_detection.core import freezable_batch_norm
from object_detection.utils import ops
# pylint: disable=invalid-name
# This method copied from the slim mobilenet base network code (same license)
def _make_divisible(v, divisor, min_value=None):
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v
class _LayersOverride(object):
"""Alternative Keras layers interface for the Keras MobileNetV2."""
def __init__(self,
batchnorm_training,
default_batchnorm_momentum=0.999,
conv_hyperparams=None,
use_explicit_padding=False,
alpha=1.0,
min_depth=None):
"""Alternative tf.keras.layers interface, for use by the Keras MobileNetV2.
It is used by the Keras applications kwargs injection API to
modify the Mobilenet v2 Keras application with changes required by
the Object Detection API.
These injected interfaces make the following changes to the network:
- Applies the Object Detection hyperparameter configuration
- Supports FreezableBatchNorms
- Adds support for a min number of filters for each layer
- Makes the `alpha` parameter affect the final convolution block even if it
is less than 1.0
- Adds support for explicit padding of convolutions
Args:
batchnorm_training: Bool. Assigned to Batch norm layer `training` param
when constructing `freezable_batch_norm.FreezableBatchNorm` layers.
default_batchnorm_momentum: Float. When 'conv_hyperparams' is None,
batch norm layers will be constructed using this value as the momentum.
conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object
containing hyperparameters for convolution ops. Optionally set to `None`
to use default mobilenet_v2 layer builders.
use_explicit_padding: If True, use 'valid' padding for convolutions,
but explicitly pre-pads inputs so that the output dimensions are the
same as if 'same' padding were used. Off by default.
alpha: The width multiplier referenced in the MobileNetV2 paper. It
modifies the number of filters in each convolutional layer.
min_depth: Minimum number of filters in the convolutional layers.
"""
self._alpha = alpha
self._batchnorm_training = batchnorm_training
self._default_batchnorm_momentum = default_batchnorm_momentum
self._conv_hyperparams = conv_hyperparams
self._use_explicit_padding = use_explicit_padding
self._min_depth = min_depth
self.regularizer = tf.keras.regularizers.l2(0.00004 * 0.5)
self.initializer = tf.truncated_normal_initializer(stddev=0.09)
def _FixedPaddingLayer(self, kernel_size):
return tf.keras.layers.Lambda(lambda x: ops.fixed_padding(x, kernel_size))
def Conv2D(self, filters, **kwargs):
"""Builds a Conv2D layer according to the current Object Detection config.
Overrides the Keras MobileNetV2 application's convolutions with ones that
follow the spec specified by the Object Detection hyperparameters.
Args:
filters: The number of filters to use for the convolution.
**kwargs: Keyword args specified by the Keras application for
constructing the convolution.
Returns:
A one-arg callable that will either directly apply a Keras Conv2D layer to
the input argument, or that will first pad the input then apply a Conv2D
layer.
"""
# Make sure 'alpha' is always applied to the last convolution block's size
# (This overrides the Keras application's functionality)
if kwargs.get('name') == 'Conv_1' and self._alpha < 1.0:
filters = _make_divisible(1280 * self._alpha, 8)
# Apply the minimum depth to the convolution layers
if (self._min_depth and (filters < self._min_depth)
and not kwargs.get('name').endswith('expand')):
filters = self._min_depth
if self._conv_hyperparams:
kwargs = self._conv_hyperparams.params(**kwargs)
else:
kwargs['kernel_regularizer'] = self.regularizer
kwargs['kernel_initializer'] = self.initializer
kwargs['padding'] = 'same'
kernel_size = kwargs.get('kernel_size')
if self._use_explicit_padding and kernel_size > 1:
kwargs['padding'] = 'valid'
def padded_conv(features):
padded_features = self._FixedPaddingLayer(kernel_size)(features)
return tf.keras.layers.Conv2D(filters, **kwargs)(padded_features)
return padded_conv
else:
return tf.keras.layers.Conv2D(filters, **kwargs)
def DepthwiseConv2D(self, **kwargs):
"""Builds a DepthwiseConv2D according to the Object Detection config.
Overrides the Keras MobileNetV2 application's convolutions with ones that
follow the spec specified by the Object Detection hyperparameters.
Args:
**kwargs: Keyword args specified by the Keras application for
constructing the convolution.
Returns:
A one-arg callable that will either directly apply a Keras DepthwiseConv2D
layer to the input argument, or that will first pad the input then apply
the depthwise convolution.
"""
if self._conv_hyperparams:
kwargs = self._conv_hyperparams.params(**kwargs)
else:
kwargs['depthwise_initializer'] = self.initializer
kwargs['padding'] = 'same'
kernel_size = kwargs.get('kernel_size')
if self._use_explicit_padding and kernel_size > 1:
kwargs['padding'] = 'valid'
def padded_depthwise_conv(features):
padded_features = self._FixedPaddingLayer(kernel_size)(features)
return tf.keras.layers.DepthwiseConv2D(**kwargs)(padded_features)
return padded_depthwise_conv
else:
return tf.keras.layers.DepthwiseConv2D(**kwargs)
def BatchNormalization(self, **kwargs):
"""Builds a normalization layer.
Overrides the Keras application batch norm with the norm specified by the
Object Detection configuration.
Args:
**kwargs: Only the name is used, all other params ignored.
Required for matching `layers.BatchNormalization` calls in the Keras
application.
Returns:
A normalization layer specified by the Object Detection hyperparameter
configurations.
"""
name = kwargs.get('name')
if self._conv_hyperparams:
return self._conv_hyperparams.build_batch_norm(
training=self._batchnorm_training,
name=name)
else:
return freezable_batch_norm.FreezableBatchNorm(
training=self._batchnorm_training,
epsilon=1e-3,
momentum=self._default_batchnorm_momentum,
name=name)
def Input(self, shape):
"""Builds an Input layer.
Overrides the Keras application Input layer with one that uses a
tf.placeholder_with_default instead of a tf.placeholder. This is necessary
to ensure the application works when run on a TPU.
Args:
shape: The shape for the input layer to use. (Does not include a dimension
for the batch size).
Returns:
An input layer for the specified shape that internally uses a
placeholder_with_default.
"""
default_size = 224
default_batch_size = 1
shape = list(shape)
default_shape = [default_size if dim is None else dim for dim in shape]
input_tensor = tf.constant(0.0, shape=[default_batch_size] + default_shape)
placeholder_with_default = tf.placeholder_with_default(
input=input_tensor, shape=[None] + shape)
return tf.keras.layers.Input(tensor=placeholder_with_default)
# pylint: disable=unused-argument
def ReLU(self, *args, **kwargs):
"""Builds an activation layer.
Overrides the Keras application ReLU with the activation specified by the
Object Detection configuration.
Args:
*args: Ignored, required to match the `tf.keras.ReLU` interface
**kwargs: Only the name is used,
required to match `tf.keras.ReLU` interface
Returns:
An activation layer specified by the Object Detection hyperparameter
configurations.
"""
name = kwargs.get('name')
if self._conv_hyperparams:
return self._conv_hyperparams.build_activation_layer(name=name)
else:
return tf.keras.layers.Lambda(tf.nn.relu6, name=name)
# pylint: enable=unused-argument
# pylint: disable=unused-argument
def ZeroPadding2D(self, **kwargs):
"""Replaces explicit padding in the Keras application with a no-op.
Args:
**kwargs: Ignored, required to match the Keras applications usage.
Returns:
A no-op identity lambda.
"""
return lambda x: x
# pylint: enable=unused-argument
# Forward all non-overridden methods to the keras layers
def __getattr__(self, item):
return getattr(tf.keras.layers, item)
def mobilenet_v2(batchnorm_training,
default_batchnorm_momentum=0.9997,
conv_hyperparams=None,
use_explicit_padding=False,
alpha=1.0,
min_depth=None,
**kwargs):
"""Instantiates the MobileNetV2 architecture, modified for object detection.
This wraps the MobileNetV2 tensorflow Keras application, but uses the
Keras application's kwargs-based monkey-patching API to override the Keras
architecture with the following changes:
- Changes the default batchnorm momentum to 0.9997
- Applies the Object Detection hyperparameter configuration
- Supports FreezableBatchNorms
- Adds support for a min number of filters for each layer
- Makes the `alpha` parameter affect the final convolution block even if it
is less than 1.0
- Adds support for explicit padding of convolutions
- Makes the Input layer use a tf.placeholder_with_default instead of a
tf.placeholder, to work on TPUs.
Args:
batchnorm_training: Bool. Assigned to Batch norm layer `training` param
when constructing `freezable_batch_norm.FreezableBatchNorm` layers.
default_batchnorm_momentum: Float. When 'conv_hyperparams' is None,
batch norm layers will be constructed using this value as the momentum.
conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object
containing hyperparameters for convolution ops. Optionally set to `None`
to use default mobilenet_v2 layer builders.
use_explicit_padding: If True, use 'valid' padding for convolutions,
but explicitly pre-pads inputs so that the output dimensions are the
same as if 'same' padding were used. Off by default.
alpha: The width multiplier referenced in the MobileNetV2 paper. It
modifies the number of filters in each convolutional layer.
min_depth: Minimum number of filters in the convolutional layers.
**kwargs: Keyword arguments forwarded directly to the
`tf.keras.applications.MobilenetV2` method that constructs the Keras
model.
Returns:
A Keras model instance.
"""
layers_override = _LayersOverride(
batchnorm_training,
default_batchnorm_momentum=default_batchnorm_momentum,
conv_hyperparams=conv_hyperparams,
use_explicit_padding=use_explicit_padding,
min_depth=min_depth,
alpha=alpha)
return tf.keras.applications.MobileNetV2(alpha=alpha,
layers=layers_override,
**kwargs)
# pylint: enable=invalid-name
|
PaddlePaddle/LanguageModeling/BERT/bert_configs | bert_configs | bert-large-uncased | {
"attention_probs_dropout_prob": 0.1,
"hidden_act": "gelu",
"hidden_dropout_prob": 0.1,
"hidden_size": 1024,
"initializer_range": 0.02,
"intermediate_size": 4096,
"max_position_embeddings": 512,
"num_attention_heads": 16,
"num_hidden_layers": 24,
"type_vocab_size": 2,
"vocab_size": 30522
}
|
TensorFlow/Detection/SSD/models/research/object_detection | object_detection | export_tflite_ssd_graph_lib_test | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.export_tflite_ssd_graph."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
import six
import tensorflow as tf
from tensorflow.core.framework import types_pb2
from object_detection import export_tflite_ssd_graph_lib
from object_detection import exporter
from object_detection.builders import graph_rewriter_builder
from object_detection.builders import model_builder
from object_detection.core import model
from object_detection.protos import graph_rewriter_pb2
from object_detection.protos import pipeline_pb2
from object_detection.protos import post_processing_pb2
if six.PY2:
import mock # pylint: disable=g-import-not-at-top
else:
from unittest import mock # pylint: disable=g-import-not-at-top
class FakeModel(model.DetectionModel):
def __init__(self, add_detection_masks=False):
self._add_detection_masks = add_detection_masks
def preprocess(self, inputs):
pass
def predict(self, preprocessed_inputs, true_image_shapes):
features = tf.contrib.slim.conv2d(preprocessed_inputs, 3, 1)
with tf.control_dependencies([features]):
prediction_tensors = {
'box_encodings':
tf.constant([[[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 0.8, 0.8]]],
tf.float32),
'class_predictions_with_background':
tf.constant([[[0.7, 0.6], [0.9, 0.0]]], tf.float32),
}
with tf.control_dependencies(
[tf.convert_to_tensor(features.get_shape().as_list()[1:3])]):
prediction_tensors['anchors'] = tf.constant(
[[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 1.0, 1.0]], tf.float32)
return prediction_tensors
def postprocess(self, prediction_tensors, true_image_shapes):
pass
def restore_map(self, checkpoint_path, from_detection_checkpoint):
pass
def loss(self, prediction_dict, true_image_shapes):
pass
def regularization_losses(self):
pass
def updates(self):
pass
class ExportTfliteGraphTest(tf.test.TestCase):
def _save_checkpoint_from_mock_model(self,
checkpoint_path,
use_moving_averages,
quantize=False,
num_channels=3):
g = tf.Graph()
with g.as_default():
mock_model = FakeModel()
inputs = tf.placeholder(tf.float32, shape=[1, 10, 10, num_channels])
mock_model.predict(inputs, true_image_shapes=None)
if use_moving_averages:
tf.train.ExponentialMovingAverage(0.0).apply()
tf.train.get_or_create_global_step()
if quantize:
graph_rewriter_config = graph_rewriter_pb2.GraphRewriter()
graph_rewriter_config.quantization.delay = 500000
graph_rewriter_fn = graph_rewriter_builder.build(
graph_rewriter_config, is_training=False)
graph_rewriter_fn()
saver = tf.train.Saver()
init = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init)
saver.save(sess, checkpoint_path)
def _assert_quant_vars_exists(self, tflite_graph_file):
with tf.gfile.Open(tflite_graph_file) as f:
graph_string = f.read()
print(graph_string)
self.assertTrue('quant' in graph_string)
def _import_graph_and_run_inference(self, tflite_graph_file, num_channels=3):
"""Imports a tflite graph, runs single inference and returns outputs."""
graph = tf.Graph()
with graph.as_default():
graph_def = tf.GraphDef()
with tf.gfile.Open(tflite_graph_file) as f:
graph_def.ParseFromString(f.read())
tf.import_graph_def(graph_def, name='')
input_tensor = graph.get_tensor_by_name('normalized_input_image_tensor:0')
box_encodings = graph.get_tensor_by_name('raw_outputs/box_encodings:0')
class_predictions = graph.get_tensor_by_name(
'raw_outputs/class_predictions:0')
with self.test_session(graph) as sess:
[box_encodings_np, class_predictions_np] = sess.run(
[box_encodings, class_predictions],
feed_dict={input_tensor: np.random.rand(1, 10, 10, num_channels)})
return box_encodings_np, class_predictions_np
def _export_graph(self, pipeline_config, num_channels=3):
"""Exports a tflite graph."""
output_dir = self.get_temp_dir()
trained_checkpoint_prefix = os.path.join(output_dir, 'model.ckpt')
tflite_graph_file = os.path.join(output_dir, 'tflite_graph.pb')
quantize = pipeline_config.HasField('graph_rewriter')
self._save_checkpoint_from_mock_model(
trained_checkpoint_prefix,
use_moving_averages=pipeline_config.eval_config.use_moving_averages,
quantize=quantize,
num_channels=num_channels)
with mock.patch.object(
model_builder, 'build', autospec=True) as mock_builder:
mock_builder.return_value = FakeModel()
with tf.Graph().as_default():
export_tflite_ssd_graph_lib.export_tflite_graph(
pipeline_config=pipeline_config,
trained_checkpoint_prefix=trained_checkpoint_prefix,
output_dir=output_dir,
add_postprocessing_op=False,
max_detections=10,
max_classes_per_detection=1)
return tflite_graph_file
def _export_graph_with_postprocessing_op(self,
pipeline_config,
num_channels=3):
"""Exports a tflite graph with custom postprocessing op."""
output_dir = self.get_temp_dir()
trained_checkpoint_prefix = os.path.join(output_dir, 'model.ckpt')
tflite_graph_file = os.path.join(output_dir, 'tflite_graph.pb')
quantize = pipeline_config.HasField('graph_rewriter')
self._save_checkpoint_from_mock_model(
trained_checkpoint_prefix,
use_moving_averages=pipeline_config.eval_config.use_moving_averages,
quantize=quantize,
num_channels=num_channels)
with mock.patch.object(
model_builder, 'build', autospec=True) as mock_builder:
mock_builder.return_value = FakeModel()
with tf.Graph().as_default():
export_tflite_ssd_graph_lib.export_tflite_graph(
pipeline_config=pipeline_config,
trained_checkpoint_prefix=trained_checkpoint_prefix,
output_dir=output_dir,
add_postprocessing_op=True,
max_detections=10,
max_classes_per_detection=1)
return tflite_graph_file
def test_export_tflite_graph_with_moving_averages(self):
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.eval_config.use_moving_averages = True
pipeline_config.model.ssd.image_resizer.fixed_shape_resizer.height = 10
pipeline_config.model.ssd.image_resizer.fixed_shape_resizer.width = 10
pipeline_config.model.ssd.num_classes = 2
pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.y_scale = 10.0
pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.x_scale = 10.0
pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.height_scale = 5.0
pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.width_scale = 5.0
tflite_graph_file = self._export_graph(pipeline_config)
self.assertTrue(os.path.exists(tflite_graph_file))
(box_encodings_np, class_predictions_np
) = self._import_graph_and_run_inference(tflite_graph_file)
self.assertAllClose(box_encodings_np,
[[[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 0.8, 0.8]]])
self.assertAllClose(class_predictions_np, [[[0.7, 0.6], [0.9, 0.0]]])
def test_export_tflite_graph_without_moving_averages(self):
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.eval_config.use_moving_averages = False
pipeline_config.model.ssd.image_resizer.fixed_shape_resizer.height = 10
pipeline_config.model.ssd.image_resizer.fixed_shape_resizer.width = 10
pipeline_config.model.ssd.num_classes = 2
pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.y_scale = 10.0
pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.x_scale = 10.0
pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.height_scale = 5.0
pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.width_scale = 5.0
tflite_graph_file = self._export_graph(pipeline_config)
self.assertTrue(os.path.exists(tflite_graph_file))
(box_encodings_np, class_predictions_np
) = self._import_graph_and_run_inference(tflite_graph_file)
self.assertAllClose(box_encodings_np,
[[[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 0.8, 0.8]]])
self.assertAllClose(class_predictions_np, [[[0.7, 0.6], [0.9, 0.0]]])
def test_export_tflite_graph_grayscale(self):
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.eval_config.use_moving_averages = False
pipeline_config.model.ssd.image_resizer.fixed_shape_resizer.height = 10
pipeline_config.model.ssd.image_resizer.fixed_shape_resizer.width = 10
(pipeline_config.model.ssd.image_resizer.fixed_shape_resizer
).convert_to_grayscale = True
pipeline_config.model.ssd.num_classes = 2
pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.y_scale = 10.0
pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.x_scale = 10.0
pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.height_scale = 5.0
pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.width_scale = 5.0
tflite_graph_file = self._export_graph(pipeline_config, num_channels=1)
self.assertTrue(os.path.exists(tflite_graph_file))
(box_encodings_np,
class_predictions_np) = self._import_graph_and_run_inference(
tflite_graph_file, num_channels=1)
self.assertAllClose(box_encodings_np,
[[[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 0.8, 0.8]]])
self.assertAllClose(class_predictions_np, [[[0.7, 0.6], [0.9, 0.0]]])
def test_export_tflite_graph_with_quantization(self):
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.eval_config.use_moving_averages = False
pipeline_config.model.ssd.image_resizer.fixed_shape_resizer.height = 10
pipeline_config.model.ssd.image_resizer.fixed_shape_resizer.width = 10
pipeline_config.graph_rewriter.quantization.delay = 500000
pipeline_config.model.ssd.num_classes = 2
pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.y_scale = 10.0
pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.x_scale = 10.0
pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.height_scale = 5.0
pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.width_scale = 5.0
tflite_graph_file = self._export_graph(pipeline_config)
self.assertTrue(os.path.exists(tflite_graph_file))
self._assert_quant_vars_exists(tflite_graph_file)
(box_encodings_np, class_predictions_np
) = self._import_graph_and_run_inference(tflite_graph_file)
self.assertAllClose(box_encodings_np,
[[[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 0.8, 0.8]]])
self.assertAllClose(class_predictions_np, [[[0.7, 0.6], [0.9, 0.0]]])
def test_export_tflite_graph_with_softmax_score_conversion(self):
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.eval_config.use_moving_averages = False
pipeline_config.model.ssd.post_processing.score_converter = (
post_processing_pb2.PostProcessing.SOFTMAX)
pipeline_config.model.ssd.image_resizer.fixed_shape_resizer.height = 10
pipeline_config.model.ssd.image_resizer.fixed_shape_resizer.width = 10
pipeline_config.model.ssd.num_classes = 2
pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.y_scale = 10.0
pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.x_scale = 10.0
pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.height_scale = 5.0
pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.width_scale = 5.0
tflite_graph_file = self._export_graph(pipeline_config)
self.assertTrue(os.path.exists(tflite_graph_file))
(box_encodings_np, class_predictions_np
) = self._import_graph_and_run_inference(tflite_graph_file)
self.assertAllClose(box_encodings_np,
[[[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 0.8, 0.8]]])
self.assertAllClose(class_predictions_np,
[[[0.524979, 0.475021], [0.710949, 0.28905]]])
def test_export_tflite_graph_with_sigmoid_score_conversion(self):
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.eval_config.use_moving_averages = False
pipeline_config.model.ssd.post_processing.score_converter = (
post_processing_pb2.PostProcessing.SIGMOID)
pipeline_config.model.ssd.image_resizer.fixed_shape_resizer.height = 10
pipeline_config.model.ssd.image_resizer.fixed_shape_resizer.width = 10
pipeline_config.model.ssd.num_classes = 2
pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.y_scale = 10.0
pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.x_scale = 10.0
pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.height_scale = 5.0
pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.width_scale = 5.0
tflite_graph_file = self._export_graph(pipeline_config)
self.assertTrue(os.path.exists(tflite_graph_file))
(box_encodings_np, class_predictions_np
) = self._import_graph_and_run_inference(tflite_graph_file)
self.assertAllClose(box_encodings_np,
[[[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 0.8, 0.8]]])
self.assertAllClose(class_predictions_np,
[[[0.668188, 0.645656], [0.710949, 0.5]]])
def test_export_tflite_graph_with_postprocessing_op(self):
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.eval_config.use_moving_averages = False
pipeline_config.model.ssd.post_processing.score_converter = (
post_processing_pb2.PostProcessing.SIGMOID)
pipeline_config.model.ssd.image_resizer.fixed_shape_resizer.height = 10
pipeline_config.model.ssd.image_resizer.fixed_shape_resizer.width = 10
pipeline_config.model.ssd.num_classes = 2
pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.y_scale = 10.0
pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.x_scale = 10.0
pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.height_scale = 5.0
pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.width_scale = 5.0
tflite_graph_file = self._export_graph_with_postprocessing_op(
pipeline_config)
self.assertTrue(os.path.exists(tflite_graph_file))
graph = tf.Graph()
with graph.as_default():
graph_def = tf.GraphDef()
with tf.gfile.Open(tflite_graph_file) as f:
graph_def.ParseFromString(f.read())
all_op_names = [node.name for node in graph_def.node]
self.assertTrue('TFLite_Detection_PostProcess' in all_op_names)
for node in graph_def.node:
if node.name == 'TFLite_Detection_PostProcess':
self.assertTrue(node.attr['_output_quantized'].b is True)
self.assertTrue(
node.attr['_support_output_type_float_in_quantized_op'].b is True)
self.assertTrue(node.attr['y_scale'].f == 10.0)
self.assertTrue(node.attr['x_scale'].f == 10.0)
self.assertTrue(node.attr['h_scale'].f == 5.0)
self.assertTrue(node.attr['w_scale'].f == 5.0)
self.assertTrue(node.attr['num_classes'].i == 2)
self.assertTrue(
all([
t == types_pb2.DT_FLOAT
for t in node.attr['_output_types'].list.type
]))
@mock.patch.object(exporter, 'rewrite_nn_resize_op')
def test_export_with_nn_resize_op_not_called_without_fpn(self, mock_get):
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.model.ssd.image_resizer.fixed_shape_resizer.height = 10
pipeline_config.model.ssd.image_resizer.fixed_shape_resizer.width = 10
tflite_graph_file = self._export_graph_with_postprocessing_op(
pipeline_config)
self.assertTrue(os.path.exists(tflite_graph_file))
mock_get.assert_not_called()
@mock.patch.object(exporter, 'rewrite_nn_resize_op')
def test_export_with_nn_resize_op_called_with_fpn(self, mock_get):
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.model.ssd.image_resizer.fixed_shape_resizer.height = 10
pipeline_config.model.ssd.image_resizer.fixed_shape_resizer.width = 10
pipeline_config.model.ssd.feature_extractor.fpn.min_level = 3
pipeline_config.model.ssd.feature_extractor.fpn.max_level = 7
tflite_graph_file = self._export_graph_with_postprocessing_op(
pipeline_config)
self.assertTrue(os.path.exists(tflite_graph_file))
mock_get.assert_called_once()
if __name__ == '__main__':
tf.test.main()
|
PyTorch/SpeechSynthesis/Tacotron2/platform | platform | DGXA100_waveglow_AMP_1NGPU_train | mkdir -p output
python train.py -m WaveGlow -o output/ --amp -lr 1e-4 --epochs 1001 -bs 10 --segment-length 8000 --weight-decay 0 --grad-clip-thresh 65504.0 --cudnn-benchmark --cudnn-enabled --log-file nvlog.json
|
TensorFlow/Detection/SSD/models/research/object_detection/matchers | matchers | bipartite_matcher | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Bipartite matcher implementation."""
import tensorflow as tf
from tensorflow.contrib.image.python.ops import image_ops
from object_detection.core import matcher
class GreedyBipartiteMatcher(matcher.Matcher):
"""Wraps a Tensorflow greedy bipartite matcher."""
def __init__(self, use_matmul_gather=False):
"""Constructs a Matcher.
Args:
use_matmul_gather: Force constructed match objects to use matrix
multiplication based gather instead of standard tf.gather.
(Default: False).
"""
super(GreedyBipartiteMatcher, self).__init__(
use_matmul_gather=use_matmul_gather)
def _match(self, similarity_matrix, valid_rows):
"""Bipartite matches a collection rows and columns. A greedy bi-partite.
TODO(rathodv): Add num_valid_columns options to match only that many columns
with all the rows.
Args:
similarity_matrix: Float tensor of shape [N, M] with pairwise similarity
where higher values mean more similar.
valid_rows: A boolean tensor of shape [N] indicating the rows that are
valid.
Returns:
match_results: int32 tensor of shape [M] with match_results[i]=-1
meaning that column i is not matched and otherwise that it is matched to
row match_results[i].
"""
valid_row_sim_matrix = tf.gather(similarity_matrix,
tf.squeeze(tf.where(valid_rows), axis=-1))
invalid_row_sim_matrix = tf.gather(
similarity_matrix,
tf.squeeze(tf.where(tf.logical_not(valid_rows)), axis=-1))
similarity_matrix = tf.concat(
[valid_row_sim_matrix, invalid_row_sim_matrix], axis=0)
# Convert similarity matrix to distance matrix as tf.image.bipartite tries
# to find minimum distance matches.
distance_matrix = -1 * similarity_matrix
num_valid_rows = tf.reduce_sum(tf.to_float(valid_rows))
_, match_results = image_ops.bipartite_match(
distance_matrix, num_valid_rows=num_valid_rows)
match_results = tf.reshape(match_results, [-1])
match_results = tf.cast(match_results, tf.int32)
return match_results
|
PyTorch/Recommendation/DLRM/dlrm/cuda_src/dot_based_interact_ampere | dot_based_interact_ampere | pytorch_ops | #include <torch/extension.h>
torch::Tensor dotBasedInteractFwdTorch(torch::Tensor input,
torch::Tensor bottom_mlp_output);
std::vector<torch::Tensor> dotBasedInteractBwdTorch(torch::Tensor input,
torch::Tensor upstreamGrad);
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("dotBasedInteractFwd", &dotBasedInteractFwdTorch, "", py::arg("input"),
py::arg("bottom_mlp_output"));
m.def("dotBasedInteractBwd", &dotBasedInteractBwdTorch, "", py::arg("input"),
py::arg("upstreamGrad"));
}
|
TensorFlow/Segmentation/UNet_Industrial/scripts | scripts | UNet_AMP_4GPU_XLA | #!/usr/bin/env bash
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script launches UNet training in FP32-AMP on 4 GPUs using 16 batch size (4 per GPU)
# Usage ./UNet_AMP_4GPU_XLA.sh <path to result repository> <path to dataset> <dagm classID (1-10)>
BASEDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
export TF_CPP_MIN_LOG_LEVEL=3
mpirun \
-np 4 \
-H localhost:4 \
-bind-to none \
-map-by slot \
-x NCCL_DEBUG=VERSION \
-x LD_LIBRARY_PATH \
-x PATH \
-mca pml ob1 -mca btl ^openib \
--allow-run-as-root \
python "${BASEDIR}/../main.py" \
--unet_variant='tinyUNet' \
--activation_fn='relu' \
--exec_mode='train_and_evaluate' \
--iter_unit='batch' \
--num_iter=2500 \
--batch_size=4 \
--warmup_step=10 \
--results_dir="${1}" \
--data_dir="${2}" \
--dataset_name='DAGM2007' \
--dataset_classID="${3}" \
--data_format='NCHW' \
--use_auto_loss_scaling \
--amp \
--xla \
--learning_rate=1e-4 \
--learning_rate_decay_factor=0.8 \
--learning_rate_decay_steps=500 \
--rmsprop_decay=0.9 \
--rmsprop_momentum=0.8 \
--loss_fn_name='adaptive_loss' \
--weight_decay=1e-5 \
--weight_init_method='he_uniform' \
--augment_data \
--display_every=250 \
--debug_verbosity=0
|
PyTorch/LanguageModeling/BART/scripts | scripts | run_pretraining_phase1 | #!/usr/bin/env bash
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
echo "Container nvidia build = " $NVIDIA_BUILD_ID
train_batch_size_phase1=${1:-200}
train_batch_size_phase2=${2:-32}
learning_rate_phase1=${3:-"5e-3"}
learning_rate_phase2=${4:-"4e-3"}
precision=${5:-"bf16"}
use_preln=${6:-"true"}
num_gpus=${7:-8}
warmup_steps_phase1=${8:-"2166"}
warmup_steps_phase2=${9:-"200"}
train_steps_phase1=${10:-95040}
train_steps_phase2=${11:-7560}
save_checkpoints_steps=${12:-100}
num_accumulation_steps_phase1=${13:-40}
num_accumulation_steps_phase2=${14:-120}
config_path=${15:-"configs/config.json"}
DATA_DIR=${DATA_DIR:-data}
RESULTS_DIR=${RESULTS_DIR:-results}
RESULTS_DIR_PHASE1=${RESULTS_DIR}/phase_1
mkdir -m 777 -p $RESULTS_DIR_PHASE1
DATESTAMP=`date +'%y%m%d%H%M%S'`
LOGFILE=$RESULTS_DIR_PHASE1/$DATESTAMP.log
printf "Logs written to %s\n" "$LOGFILE"
SOURCE_LEN=128
if [ "$precision" = "fp16" ] ; then
echo "fp16 activated!"
USE_FP16="--fp16"
elif [ "$precision" = "bf16" ] ; then
echo "bf16 activated!"
USE_FP16="--bf16"
else
echo "fp32/tf32 activated!"
USE_FP16=""
fi
if [ "$use_preln" = "true" ] ; then
echo "Trained with PreLN"
USE_FP16="--pre_ln $USE_FP16"
else
echo "Trained with PostLN"
fi
export TOKENIZERS_PARALLELISM=true;
python -m torch.distributed.launch --nproc_per_node=${num_gpus} pretrain.py \
--data_dir=${DATA_DIR}/pretrain_lddl_${SOURCE_LEN} \
--config_path=${config_path} \
--output_dir=${RESULTS_DIR_PHASE1} \
--num_workers 4 \
--learning_rate=${learning_rate_phase1} \
${USE_FP16} \
--do_train \
--train_batch_size=${train_batch_size_phase1} --gradient_accumulation_steps=${num_accumulation_steps_phase1} \
--max_steps=${train_steps_phase1} --warmup_steps=${warmup_steps_phase1} \
--max_source_length=${SOURCE_LEN} \
--lr_scheduler polynomial \
--label_smoothing 0 \
--weight_decay 0.1 \
--dropout 0.1 --attention_dropout 0.1 --gradient_clip_val=0.1 \
--resume_from_checkpoint=True \
--save_checkpoint_steps=${save_checkpoints_steps} --log_freq=${save_checkpoints_steps} \
--allreduce_post_accumulation_half_precision \
--seed $RANDOM --lamb |& tee -a ${LOGFILE}
|
TensorFlow/Detection/SSD/models/research/slim/datasets | datasets | mnist | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Provides data for the MNIST dataset.
The dataset scripts used to create the dataset can be found at:
tensorflow/models/research/slim/datasets/download_and_convert_mnist.py
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tensorflow as tf
from datasets import dataset_utils
slim = tf.contrib.slim
_FILE_PATTERN = 'mnist_%s.tfrecord'
_SPLITS_TO_SIZES = {'train': 60000, 'test': 10000}
_NUM_CLASSES = 10
_ITEMS_TO_DESCRIPTIONS = {
'image': 'A [28 x 28 x 1] grayscale image.',
'label': 'A single integer between 0 and 9',
}
def get_split(split_name, dataset_dir, file_pattern=None, reader=None):
"""Gets a dataset tuple with instructions for reading MNIST.
Args:
split_name: A train/test split name.
dataset_dir: The base directory of the dataset sources.
file_pattern: The file pattern to use when matching the dataset sources.
It is assumed that the pattern contains a '%s' string so that the split
name can be inserted.
reader: The TensorFlow reader type.
Returns:
A `Dataset` namedtuple.
Raises:
ValueError: if `split_name` is not a valid train/test split.
"""
if split_name not in _SPLITS_TO_SIZES:
raise ValueError('split name %s was not recognized.' % split_name)
if not file_pattern:
file_pattern = _FILE_PATTERN
file_pattern = os.path.join(dataset_dir, file_pattern % split_name)
# Allowing None in the signature so that dataset_factory can use the default.
if reader is None:
reader = tf.TFRecordReader
keys_to_features = {
'image/encoded': tf.FixedLenFeature((), tf.string, default_value=''),
'image/format': tf.FixedLenFeature((), tf.string, default_value='raw'),
'image/class/label': tf.FixedLenFeature(
[1], tf.int64, default_value=tf.zeros([1], dtype=tf.int64)),
}
items_to_handlers = {
'image': slim.tfexample_decoder.Image(shape=[28, 28, 1], channels=1),
'label': slim.tfexample_decoder.Tensor('image/class/label', shape=[]),
}
decoder = slim.tfexample_decoder.TFExampleDecoder(
keys_to_features, items_to_handlers)
labels_to_names = None
if dataset_utils.has_labels(dataset_dir):
labels_to_names = dataset_utils.read_label_file(dataset_dir)
return slim.dataset.Dataset(
data_sources=file_pattern,
reader=reader,
decoder=decoder,
num_samples=_SPLITS_TO_SIZES[split_name],
num_classes=_NUM_CLASSES,
items_to_descriptions=_ITEMS_TO_DESCRIPTIONS,
labels_to_names=labels_to_names)
|
PyTorch/LanguageModeling/Transformer-XL/pytorch | pytorch | wt103_large | # Base
wt103: &wt103
dataset: wt103
data: ../data/wikitext-103/
train: &train
<<: *wt103
cuda: true
n_layer: 18
d_model: 1024
n_head: 16
d_head: 64
d_inner: 4096
dropout: 0.2
dropatt: 0.2
optim: jitlamb
lr: 0.01
eta_min: 0.0001
roll: true
warmup_step: 16000
max_step: 100000
tgt_len: 384
mem_len: 384
init_std: 0.005
eval_tgt_len: 128
batch_size: 128
multi_gpu: ddp
log_interval: 100
eval_interval: 5000
vocab: word
adaptive: true
div_val: 4
train_multinode: &train_multinode
<<: *wt103
<<: *train
lr: 0.02
max_step: 25000
batch_size: 512
eval_batch_size: 128
eval_interval: 1000
eval: &eval
<<: *wt103
cuda: true
tgt_len: 128
mem_len: 1600
clamp_len: 1000
same_length: true
split: test
default:
train:
<<: *train
eval:
<<: *eval
manual_eval:
train:
<<: *train
eval:
<<: *eval
manual_config: '{"n_token": 267735, "n_layer": 18, "n_head": 16, "d_model": 1024, "d_head": 64, "d_inner": 4096, "dropout": 0.2, "dropatt": 0.2, "dtype": null, "tie_weight": true, "d_embed": 1024, "div_val": 4, "tie_projs": [false, true, true, true], "pre_lnorm": false, "tgt_len": 384, "ext_len": 0, "mem_len": 384, "cutoffs": [19997, 39997, 199997], "same_length": false, "attn_type": 0, "clamp_len": -1, "sample_softmax": -1}'
# Full training configs for NVIDIA DGX-1 (8x NVIDIA V100 16GB GPU)
dgx1_8gpu_fp16: &dgx1_8gpu_fp16
train:
<<: *train
fp16: true
batch_chunk: 4
eval:
<<: *eval
fp16: true
dgx1_8gpu_fp32: &dgx1_8gpu_fp32
train:
<<: *train
batch_chunk: 8
eval:
<<: *eval
dgx1_4gpu_fp16: &dgx1_4gpu_fp16
train:
<<: *train
fp16: true
batch_chunk: 8
eval:
<<: *eval
fp16: true
dgx1_4gpu_fp32: &dgx1_4gpu_fp32
train:
<<: *train
batch_chunk: 16
eval:
<<: *eval
dgx1_2gpu_fp16: &dgx1_2gpu_fp16
train:
<<: *train
fp16: true
batch_chunk: 16
eval:
<<: *eval
fp16: true
dgx1_2gpu_fp32: &dgx1_2gpu_fp32
train:
<<: *train
batch_chunk: 32
eval:
<<: *eval
dgx1_1gpu_fp16: &dgx1_1gpu_fp16
train:
<<: *train
fp16: true
batch_chunk: 32
eval:
<<: *eval
fp16: true
dgx1_1gpu_fp32: &dgx1_1gpu_fp32
train:
<<: *train
batch_chunk: 64
swap_mem: true
eval:
<<: *eval
# Full training configs for NVIDIA DGX-2H (16x NVIDIA V100 32GB GPU)
dgx2_16gpu_fp16: &dgx2_16gpu_fp16
train:
<<: *train
fp16: true
eval:
<<: *eval
fp16: true
dgx2_16gpu_fp32: &dgx2_16gpu_fp32
train:
<<: *train
eval:
<<: *eval
dgx2_8gpu_fp16: &dgx2_8gpu_fp16
train:
<<: *train
fp16: true
batch_chunk: 2
eval:
<<: *eval
fp16: true
dgx2_8gpu_fp32: &dgx2_8gpu_fp32
train:
<<: *train
batch_chunk: 2
eval:
<<: *eval
dgx2_4gpu_fp16: &dgx2_4gpu_fp16
train:
<<: *train
fp16: true
batch_chunk: 4
eval:
<<: *eval
fp16: true
dgx2_4gpu_fp32: &dgx2_4gpu_fp32
train:
<<: *train
batch_chunk: 4
eval:
<<: *eval
dgx2_2gpu_fp16: &dgx2_2gpu_fp16
train:
<<: *train
fp16: true
batch_chunk: 8
eval:
<<: *eval
fp16: true
dgx2_2gpu_fp32: &dgx2_2gpu_fp32
train:
<<: *train
batch_chunk: 8
eval:
<<: *eval
dgx2_1gpu_fp16: &dgx2_1gpu_fp16
train:
<<: *train
fp16: true
batch_chunk: 16
eval:
<<: *eval
fp16: true
dgx2_1gpu_fp32: &dgx2_1gpu_fp32
train:
<<: *train
batch_chunk: 16
eval:
<<: *eval
# Full training configs for NVIDIA DGX A100 (8x NVIDIA A100 40GB GPU)
dgxa100_8gpu_fp16: &dgxa100_8gpu_fp16
train:
<<: *train
fp16: true
eval:
<<: *eval
fp16: true
dgxa100_8gpu_tf32: &dgxa100_8gpu_tf32
train:
<<: *train
batch_chunk: 2
eval:
<<: *eval
dgxa100_4gpu_fp16: &dgxa100_4gpu_fp16
train:
<<: *train
fp16: true
batch_chunk: 2
eval:
<<: *eval
fp16: true
dgxa100_4gpu_tf32: &dgxa100_4gpu_tf32
train:
<<: *train
batch_chunk: 4
eval:
<<: *eval
dgxa100_2gpu_fp16: &dgxa100_2gpu_fp16
train:
<<: *train
fp16: true
batch_chunk: 4
eval:
<<: *eval
fp16: true
dgxa100_2gpu_tf32: &dgxa100_2gpu_tf32
train:
<<: *train
batch_chunk: 8
eval:
<<: *eval
dgxa100_1gpu_fp16: &dgxa100_1gpu_fp16
train:
<<: *train
fp16: true
batch_chunk: 8
eval:
<<: *eval
fp16: true
dgxa100_1gpu_tf32: &dgxa100_1gpu_tf32
train:
<<: *train
batch_chunk: 16
eval:
<<: *eval
# Full training configs for multi-node NVIDIA DGX-2 (16x NVIDIA V100 32GB GPU)
8dgx2_16gpu_fp16: &8dgx2_16gpu_fp16
train:
<<: *train_multinode
fp16: true
eval:
<<: *eval
fp16: true
batch_size: 128
8dgx2_16gpu_fp32: &8dgx2_16gpu_fp32
train:
<<: *train_multinode
eval:
<<: *eval
batch_size: 128
4dgx2_16gpu_fp16: &4dgx2_16gpu_fp16
train:
<<: *train_multinode
fp16: true
eval:
<<: *eval
fp16: true
batch_size: 64
4dgx2_16gpu_fp32: &4dgx2_16gpu_fp32
train:
<<: *train_multinode
eval:
<<: *eval
batch_size: 64
2dgx2_16gpu_fp16: &2dgx2_16gpu_fp16
train:
<<: *train_multinode
fp16: true
eval:
<<: *eval
fp16: true
batch_size: 32
2dgx2_16gpu_fp32: &2dgx2_16gpu_fp32
train:
<<: *train_multinode
batch_chunk: 2
eval:
<<: *eval
batch_size: 32
1dgx2_16gpu_fp16: &1dgx2_16gpu_fp16
train:
<<: *train_multinode
fp16: true
batch_chunk: 2
eval:
<<: *eval
fp16: true
batch_size: 16
1dgx2_16gpu_fp32: &1dgx2_16gpu_fp32
train:
<<: *train_multinode
batch_chunk: 4
eval:
<<: *eval
batch_size: 16
# Training benchmarks
trainbench: &trainbench
train:
<<: *train
log_interval: 1
max_step: 500
max_step_scheduler: 100000
trainbench_multinode: &trainbench_multinode
train:
<<: *train_multinode
log_interval: 1
max_step: 500
max_step_scheduler: 25000
|
PyTorch/SpeechSynthesis/HiFiGAN/fastpitch | fastpitch | alignment | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from numba import jit, prange
@jit(nopython=True)
def mas(attn_map, width=1):
# assumes mel x text
opt = np.zeros_like(attn_map)
attn_map = np.log(attn_map)
attn_map[0, 1:] = -np.inf
log_p = np.zeros_like(attn_map)
log_p[0, :] = attn_map[0, :]
prev_ind = np.zeros_like(attn_map, dtype=np.int64)
for i in range(1, attn_map.shape[0]):
for j in range(attn_map.shape[1]): # for each text dim
prev_j = np.arange(max(0, j-width), j+1)
prev_log = np.array([log_p[i-1, prev_idx] for prev_idx in prev_j])
ind = np.argmax(prev_log)
log_p[i, j] = attn_map[i, j] + prev_log[ind]
prev_ind[i, j] = prev_j[ind]
# now backtrack
curr_text_idx = attn_map.shape[1]-1
for i in range(attn_map.shape[0]-1, -1, -1):
opt[i, curr_text_idx] = 1
curr_text_idx = prev_ind[i, curr_text_idx]
opt[0, curr_text_idx] = 1
return opt
@jit(nopython=True)
def mas_width1(attn_map):
"""mas with hardcoded width=1"""
# assumes mel x text
opt = np.zeros_like(attn_map)
attn_map = np.log(attn_map)
attn_map[0, 1:] = -np.inf
log_p = np.zeros_like(attn_map)
log_p[0, :] = attn_map[0, :]
prev_ind = np.zeros_like(attn_map, dtype=np.int64)
for i in range(1, attn_map.shape[0]):
for j in range(attn_map.shape[1]): # for each text dim
prev_log = log_p[i-1, j]
prev_j = j
if j-1 >= 0 and log_p[i-1, j-1] >= log_p[i-1, j]:
prev_log = log_p[i-1, j-1]
prev_j = j-1
log_p[i, j] = attn_map[i, j] + prev_log
prev_ind[i, j] = prev_j
# now backtrack
curr_text_idx = attn_map.shape[1]-1
for i in range(attn_map.shape[0]-1, -1, -1):
opt[i, curr_text_idx] = 1
curr_text_idx = prev_ind[i, curr_text_idx]
opt[0, curr_text_idx] = 1
return opt
@jit(nopython=True, parallel=True)
def b_mas(b_attn_map, in_lens, out_lens, width=1):
assert width == 1
attn_out = np.zeros_like(b_attn_map)
for b in prange(b_attn_map.shape[0]):
out = mas_width1(b_attn_map[b, 0, :out_lens[b], :in_lens[b]])
attn_out[b, 0, :out_lens[b], :in_lens[b]] = out
return attn_out
|
TensorFlow/Detection/SSD/models/research/object_detection | object_detection | inputs_test | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.tflearn.inputs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import os
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from object_detection import inputs
from object_detection.core import preprocessor
from object_detection.core import standard_fields as fields
from object_detection.utils import config_util
from object_detection.utils import test_case
FLAGS = tf.flags.FLAGS
def _get_configs_for_model(model_name):
"""Returns configurations for model."""
fname = os.path.join(tf.resource_loader.get_data_files_path(),
'samples/configs/' + model_name + '.config')
label_map_path = os.path.join(tf.resource_loader.get_data_files_path(),
'data/pet_label_map.pbtxt')
data_path = os.path.join(tf.resource_loader.get_data_files_path(),
'test_data/pets_examples.record')
configs = config_util.get_configs_from_pipeline_file(fname)
override_dict = {
'train_input_path': data_path,
'eval_input_path': data_path,
'label_map_path': label_map_path
}
return config_util.merge_external_params_with_configs(
configs, kwargs_dict=override_dict)
def _make_initializable_iterator(dataset):
"""Creates an iterator, and initializes tables.
Args:
dataset: A `tf.data.Dataset` object.
Returns:
A `tf.data.Iterator`.
"""
iterator = dataset.make_initializable_iterator()
tf.add_to_collection(tf.GraphKeys.TABLE_INITIALIZERS, iterator.initializer)
return iterator
class InputsTest(test_case.TestCase, parameterized.TestCase):
def test_faster_rcnn_resnet50_train_input(self):
"""Tests the training input function for FasterRcnnResnet50."""
configs = _get_configs_for_model('faster_rcnn_resnet50_pets')
model_config = configs['model']
model_config.faster_rcnn.num_classes = 37
train_input_fn = inputs.create_train_input_fn(
configs['train_config'], configs['train_input_config'], model_config)
features, labels = _make_initializable_iterator(train_input_fn()).get_next()
self.assertAllEqual([1, None, None, 3],
features[fields.InputDataFields.image].shape.as_list())
self.assertEqual(tf.float32, features[fields.InputDataFields.image].dtype)
self.assertAllEqual([1],
features[inputs.HASH_KEY].shape.as_list())
self.assertEqual(tf.int32, features[inputs.HASH_KEY].dtype)
self.assertAllEqual(
[1, 100, 4],
labels[fields.InputDataFields.groundtruth_boxes].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_boxes].dtype)
self.assertAllEqual(
[1, 100, model_config.faster_rcnn.num_classes],
labels[fields.InputDataFields.groundtruth_classes].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_classes].dtype)
self.assertAllEqual(
[1, 100],
labels[fields.InputDataFields.groundtruth_weights].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_weights].dtype)
self.assertAllEqual(
[1, 100, model_config.faster_rcnn.num_classes],
labels[fields.InputDataFields.groundtruth_confidences].shape.as_list())
self.assertEqual(
tf.float32,
labels[fields.InputDataFields.groundtruth_confidences].dtype)
@parameterized.parameters(
{'eval_batch_size': 1},
{'eval_batch_size': 8}
)
def test_faster_rcnn_resnet50_eval_input(self, eval_batch_size=1):
"""Tests the eval input function for FasterRcnnResnet50."""
configs = _get_configs_for_model('faster_rcnn_resnet50_pets')
model_config = configs['model']
model_config.faster_rcnn.num_classes = 37
eval_config = configs['eval_config']
eval_config.batch_size = eval_batch_size
eval_input_fn = inputs.create_eval_input_fn(
eval_config, configs['eval_input_configs'][0], model_config)
features, labels = _make_initializable_iterator(eval_input_fn()).get_next()
self.assertAllEqual([eval_batch_size, None, None, 3],
features[fields.InputDataFields.image].shape.as_list())
self.assertEqual(tf.float32, features[fields.InputDataFields.image].dtype)
self.assertAllEqual(
[eval_batch_size, None, None, 3],
features[fields.InputDataFields.original_image].shape.as_list())
self.assertEqual(tf.uint8,
features[fields.InputDataFields.original_image].dtype)
self.assertAllEqual([eval_batch_size],
features[inputs.HASH_KEY].shape.as_list())
self.assertEqual(tf.int32, features[inputs.HASH_KEY].dtype)
self.assertAllEqual(
[eval_batch_size, 100, 4],
labels[fields.InputDataFields.groundtruth_boxes].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_boxes].dtype)
self.assertAllEqual(
[eval_batch_size, 100, model_config.faster_rcnn.num_classes],
labels[fields.InputDataFields.groundtruth_classes].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_classes].dtype)
self.assertAllEqual(
[eval_batch_size, 100],
labels[fields.InputDataFields.groundtruth_weights].shape.as_list())
self.assertEqual(
tf.float32,
labels[fields.InputDataFields.groundtruth_weights].dtype)
self.assertAllEqual(
[eval_batch_size, 100],
labels[fields.InputDataFields.groundtruth_area].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_area].dtype)
self.assertAllEqual(
[eval_batch_size, 100],
labels[fields.InputDataFields.groundtruth_is_crowd].shape.as_list())
self.assertEqual(
tf.bool, labels[fields.InputDataFields.groundtruth_is_crowd].dtype)
self.assertAllEqual(
[eval_batch_size, 100],
labels[fields.InputDataFields.groundtruth_difficult].shape.as_list())
self.assertEqual(
tf.int32, labels[fields.InputDataFields.groundtruth_difficult].dtype)
def test_ssd_inceptionV2_train_input(self):
"""Tests the training input function for SSDInceptionV2."""
configs = _get_configs_for_model('ssd_inception_v2_pets')
model_config = configs['model']
model_config.ssd.num_classes = 37
batch_size = configs['train_config'].batch_size
train_input_fn = inputs.create_train_input_fn(
configs['train_config'], configs['train_input_config'], model_config)
features, labels = _make_initializable_iterator(train_input_fn()).get_next()
self.assertAllEqual([batch_size, 300, 300, 3],
features[fields.InputDataFields.image].shape.as_list())
self.assertEqual(tf.float32, features[fields.InputDataFields.image].dtype)
self.assertAllEqual([batch_size],
features[inputs.HASH_KEY].shape.as_list())
self.assertEqual(tf.int32, features[inputs.HASH_KEY].dtype)
self.assertAllEqual(
[batch_size],
labels[fields.InputDataFields.num_groundtruth_boxes].shape.as_list())
self.assertEqual(tf.int32,
labels[fields.InputDataFields.num_groundtruth_boxes].dtype)
self.assertAllEqual(
[batch_size, 100, 4],
labels[fields.InputDataFields.groundtruth_boxes].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_boxes].dtype)
self.assertAllEqual(
[batch_size, 100, model_config.ssd.num_classes],
labels[fields.InputDataFields.groundtruth_classes].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_classes].dtype)
self.assertAllEqual(
[batch_size, 100],
labels[
fields.InputDataFields.groundtruth_weights].shape.as_list())
self.assertEqual(
tf.float32,
labels[fields.InputDataFields.groundtruth_weights].dtype)
@parameterized.parameters(
{'eval_batch_size': 1},
{'eval_batch_size': 8}
)
def test_ssd_inceptionV2_eval_input(self, eval_batch_size=1):
"""Tests the eval input function for SSDInceptionV2."""
configs = _get_configs_for_model('ssd_inception_v2_pets')
model_config = configs['model']
model_config.ssd.num_classes = 37
eval_config = configs['eval_config']
eval_config.batch_size = eval_batch_size
eval_input_fn = inputs.create_eval_input_fn(
eval_config, configs['eval_input_configs'][0], model_config)
features, labels = _make_initializable_iterator(eval_input_fn()).get_next()
self.assertAllEqual([eval_batch_size, 300, 300, 3],
features[fields.InputDataFields.image].shape.as_list())
self.assertEqual(tf.float32, features[fields.InputDataFields.image].dtype)
self.assertAllEqual(
[eval_batch_size, 300, 300, 3],
features[fields.InputDataFields.original_image].shape.as_list())
self.assertEqual(tf.uint8,
features[fields.InputDataFields.original_image].dtype)
self.assertAllEqual([eval_batch_size],
features[inputs.HASH_KEY].shape.as_list())
self.assertEqual(tf.int32, features[inputs.HASH_KEY].dtype)
self.assertAllEqual(
[eval_batch_size, 100, 4],
labels[fields.InputDataFields.groundtruth_boxes].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_boxes].dtype)
self.assertAllEqual(
[eval_batch_size, 100, model_config.ssd.num_classes],
labels[fields.InputDataFields.groundtruth_classes].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_classes].dtype)
self.assertAllEqual(
[eval_batch_size, 100],
labels[
fields.InputDataFields.groundtruth_weights].shape.as_list())
self.assertEqual(
tf.float32,
labels[fields.InputDataFields.groundtruth_weights].dtype)
self.assertAllEqual(
[eval_batch_size, 100],
labels[fields.InputDataFields.groundtruth_area].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_area].dtype)
self.assertAllEqual(
[eval_batch_size, 100],
labels[fields.InputDataFields.groundtruth_is_crowd].shape.as_list())
self.assertEqual(
tf.bool, labels[fields.InputDataFields.groundtruth_is_crowd].dtype)
self.assertAllEqual(
[eval_batch_size, 100],
labels[fields.InputDataFields.groundtruth_difficult].shape.as_list())
self.assertEqual(
tf.int32, labels[fields.InputDataFields.groundtruth_difficult].dtype)
def test_predict_input(self):
"""Tests the predict input function."""
configs = _get_configs_for_model('ssd_inception_v2_pets')
predict_input_fn = inputs.create_predict_input_fn(
model_config=configs['model'],
predict_input_config=configs['eval_input_configs'][0])
serving_input_receiver = predict_input_fn()
image = serving_input_receiver.features[fields.InputDataFields.image]
receiver_tensors = serving_input_receiver.receiver_tensors[
inputs.SERVING_FED_EXAMPLE_KEY]
self.assertEqual([1, 300, 300, 3], image.shape.as_list())
self.assertEqual(tf.float32, image.dtype)
self.assertEqual(tf.string, receiver_tensors.dtype)
def test_predict_input_with_additional_channels(self):
"""Tests the predict input function with additional channels."""
configs = _get_configs_for_model('ssd_inception_v2_pets')
configs['eval_input_configs'][0].num_additional_channels = 2
predict_input_fn = inputs.create_predict_input_fn(
model_config=configs['model'],
predict_input_config=configs['eval_input_configs'][0])
serving_input_receiver = predict_input_fn()
image = serving_input_receiver.features[fields.InputDataFields.image]
receiver_tensors = serving_input_receiver.receiver_tensors[
inputs.SERVING_FED_EXAMPLE_KEY]
# RGB + 2 additional channels = 5 channels.
self.assertEqual([1, 300, 300, 5], image.shape.as_list())
self.assertEqual(tf.float32, image.dtype)
self.assertEqual(tf.string, receiver_tensors.dtype)
def test_error_with_bad_train_config(self):
"""Tests that a TypeError is raised with improper train config."""
configs = _get_configs_for_model('ssd_inception_v2_pets')
configs['model'].ssd.num_classes = 37
train_input_fn = inputs.create_train_input_fn(
train_config=configs['eval_config'], # Expecting `TrainConfig`.
train_input_config=configs['train_input_config'],
model_config=configs['model'])
with self.assertRaises(TypeError):
train_input_fn()
def test_error_with_bad_train_input_config(self):
"""Tests that a TypeError is raised with improper train input config."""
configs = _get_configs_for_model('ssd_inception_v2_pets')
configs['model'].ssd.num_classes = 37
train_input_fn = inputs.create_train_input_fn(
train_config=configs['train_config'],
train_input_config=configs['model'], # Expecting `InputReader`.
model_config=configs['model'])
with self.assertRaises(TypeError):
train_input_fn()
def test_error_with_bad_train_model_config(self):
"""Tests that a TypeError is raised with improper train model config."""
configs = _get_configs_for_model('ssd_inception_v2_pets')
configs['model'].ssd.num_classes = 37
train_input_fn = inputs.create_train_input_fn(
train_config=configs['train_config'],
train_input_config=configs['train_input_config'],
model_config=configs['train_config']) # Expecting `DetectionModel`.
with self.assertRaises(TypeError):
train_input_fn()
def test_error_with_bad_eval_config(self):
"""Tests that a TypeError is raised with improper eval config."""
configs = _get_configs_for_model('ssd_inception_v2_pets')
configs['model'].ssd.num_classes = 37
eval_input_fn = inputs.create_eval_input_fn(
eval_config=configs['train_config'], # Expecting `EvalConfig`.
eval_input_config=configs['eval_input_configs'][0],
model_config=configs['model'])
with self.assertRaises(TypeError):
eval_input_fn()
def test_error_with_bad_eval_input_config(self):
"""Tests that a TypeError is raised with improper eval input config."""
configs = _get_configs_for_model('ssd_inception_v2_pets')
configs['model'].ssd.num_classes = 37
eval_input_fn = inputs.create_eval_input_fn(
eval_config=configs['eval_config'],
eval_input_config=configs['model'], # Expecting `InputReader`.
model_config=configs['model'])
with self.assertRaises(TypeError):
eval_input_fn()
def test_error_with_bad_eval_model_config(self):
"""Tests that a TypeError is raised with improper eval model config."""
configs = _get_configs_for_model('ssd_inception_v2_pets')
configs['model'].ssd.num_classes = 37
eval_input_fn = inputs.create_eval_input_fn(
eval_config=configs['eval_config'],
eval_input_config=configs['eval_input_configs'][0],
model_config=configs['eval_config']) # Expecting `DetectionModel`.
with self.assertRaises(TypeError):
eval_input_fn()
def test_output_equal_in_replace_empty_string_with_random_number(self):
string_placeholder = tf.placeholder(tf.string, shape=[])
replaced_string = inputs._replace_empty_string_with_random_number(
string_placeholder)
test_string = 'hello world'
feed_dict = {string_placeholder: test_string}
with self.test_session() as sess:
out_string = sess.run(replaced_string, feed_dict=feed_dict)
self.assertEqual(test_string, out_string)
def test_output_is_integer_in_replace_empty_string_with_random_number(self):
string_placeholder = tf.placeholder(tf.string, shape=[])
replaced_string = inputs._replace_empty_string_with_random_number(
string_placeholder)
empty_string = ''
feed_dict = {string_placeholder: empty_string}
tf.set_random_seed(0)
with self.test_session() as sess:
out_string = sess.run(replaced_string, feed_dict=feed_dict)
# Test whether out_string is a string which represents an integer.
int(out_string) # throws an error if out_string is not castable to int.
self.assertEqual(out_string, '2798129067578209328')
class DataAugmentationFnTest(test_case.TestCase):
def test_apply_image_and_box_augmentation(self):
data_augmentation_options = [
(preprocessor.resize_image, {
'new_height': 20,
'new_width': 20,
'method': tf.image.ResizeMethod.NEAREST_NEIGHBOR
}),
(preprocessor.scale_boxes_to_pixel_coordinates, {}),
]
data_augmentation_fn = functools.partial(
inputs.augment_input_data,
data_augmentation_options=data_augmentation_options)
tensor_dict = {
fields.InputDataFields.image:
tf.constant(np.random.rand(10, 10, 3).astype(np.float32)),
fields.InputDataFields.groundtruth_boxes:
tf.constant(np.array([[.5, .5, 1., 1.]], np.float32))
}
augmented_tensor_dict = data_augmentation_fn(tensor_dict=tensor_dict)
with self.test_session() as sess:
augmented_tensor_dict_out = sess.run(augmented_tensor_dict)
self.assertAllEqual(
augmented_tensor_dict_out[fields.InputDataFields.image].shape,
[20, 20, 3]
)
self.assertAllClose(
augmented_tensor_dict_out[fields.InputDataFields.groundtruth_boxes],
[[10, 10, 20, 20]]
)
def test_apply_image_and_box_augmentation_with_scores(self):
data_augmentation_options = [
(preprocessor.resize_image, {
'new_height': 20,
'new_width': 20,
'method': tf.image.ResizeMethod.NEAREST_NEIGHBOR
}),
(preprocessor.scale_boxes_to_pixel_coordinates, {}),
]
data_augmentation_fn = functools.partial(
inputs.augment_input_data,
data_augmentation_options=data_augmentation_options)
tensor_dict = {
fields.InputDataFields.image:
tf.constant(np.random.rand(10, 10, 3).astype(np.float32)),
fields.InputDataFields.groundtruth_boxes:
tf.constant(np.array([[.5, .5, 1., 1.]], np.float32)),
fields.InputDataFields.groundtruth_classes:
tf.constant(np.array([1.0], np.float32)),
fields.InputDataFields.groundtruth_weights:
tf.constant(np.array([0.8], np.float32)),
}
augmented_tensor_dict = data_augmentation_fn(tensor_dict=tensor_dict)
with self.test_session() as sess:
augmented_tensor_dict_out = sess.run(augmented_tensor_dict)
self.assertAllEqual(
augmented_tensor_dict_out[fields.InputDataFields.image].shape,
[20, 20, 3]
)
self.assertAllClose(
augmented_tensor_dict_out[fields.InputDataFields.groundtruth_boxes],
[[10, 10, 20, 20]]
)
self.assertAllClose(
augmented_tensor_dict_out[fields.InputDataFields.groundtruth_classes],
[1.0]
)
self.assertAllClose(
augmented_tensor_dict_out[
fields.InputDataFields.groundtruth_weights],
[0.8]
)
def test_include_masks_in_data_augmentation(self):
data_augmentation_options = [
(preprocessor.resize_image, {
'new_height': 20,
'new_width': 20,
'method': tf.image.ResizeMethod.NEAREST_NEIGHBOR
})
]
data_augmentation_fn = functools.partial(
inputs.augment_input_data,
data_augmentation_options=data_augmentation_options)
tensor_dict = {
fields.InputDataFields.image:
tf.constant(np.random.rand(10, 10, 3).astype(np.float32)),
fields.InputDataFields.groundtruth_instance_masks:
tf.constant(np.zeros([2, 10, 10], np.uint8))
}
augmented_tensor_dict = data_augmentation_fn(tensor_dict=tensor_dict)
with self.test_session() as sess:
augmented_tensor_dict_out = sess.run(augmented_tensor_dict)
self.assertAllEqual(
augmented_tensor_dict_out[fields.InputDataFields.image].shape,
[20, 20, 3])
self.assertAllEqual(augmented_tensor_dict_out[
fields.InputDataFields.groundtruth_instance_masks].shape, [2, 20, 20])
def test_include_keypoints_in_data_augmentation(self):
data_augmentation_options = [
(preprocessor.resize_image, {
'new_height': 20,
'new_width': 20,
'method': tf.image.ResizeMethod.NEAREST_NEIGHBOR
}),
(preprocessor.scale_boxes_to_pixel_coordinates, {}),
]
data_augmentation_fn = functools.partial(
inputs.augment_input_data,
data_augmentation_options=data_augmentation_options)
tensor_dict = {
fields.InputDataFields.image:
tf.constant(np.random.rand(10, 10, 3).astype(np.float32)),
fields.InputDataFields.groundtruth_boxes:
tf.constant(np.array([[.5, .5, 1., 1.]], np.float32)),
fields.InputDataFields.groundtruth_keypoints:
tf.constant(np.array([[[0.5, 1.0], [0.5, 0.5]]], np.float32))
}
augmented_tensor_dict = data_augmentation_fn(tensor_dict=tensor_dict)
with self.test_session() as sess:
augmented_tensor_dict_out = sess.run(augmented_tensor_dict)
self.assertAllEqual(
augmented_tensor_dict_out[fields.InputDataFields.image].shape,
[20, 20, 3]
)
self.assertAllClose(
augmented_tensor_dict_out[fields.InputDataFields.groundtruth_boxes],
[[10, 10, 20, 20]]
)
self.assertAllClose(
augmented_tensor_dict_out[fields.InputDataFields.groundtruth_keypoints],
[[[10, 20], [10, 10]]]
)
def _fake_model_preprocessor_fn(image):
return (image, tf.expand_dims(tf.shape(image)[1:], axis=0))
def _fake_image_resizer_fn(image, mask):
return (image, mask, tf.shape(image))
class DataTransformationFnTest(test_case.TestCase):
def test_combine_additional_channels_if_present(self):
image = np.random.rand(4, 4, 3).astype(np.float32)
additional_channels = np.random.rand(4, 4, 2).astype(np.float32)
tensor_dict = {
fields.InputDataFields.image:
tf.constant(image),
fields.InputDataFields.image_additional_channels:
tf.constant(additional_channels),
fields.InputDataFields.groundtruth_classes:
tf.constant(np.array([1, 1], np.int32))
}
input_transformation_fn = functools.partial(
inputs.transform_input_data,
model_preprocess_fn=_fake_model_preprocessor_fn,
image_resizer_fn=_fake_image_resizer_fn,
num_classes=1)
with self.test_session() as sess:
transformed_inputs = sess.run(
input_transformation_fn(tensor_dict=tensor_dict))
self.assertAllEqual(transformed_inputs[fields.InputDataFields.image].dtype,
tf.float32)
self.assertAllEqual(transformed_inputs[fields.InputDataFields.image].shape,
[4, 4, 5])
self.assertAllClose(transformed_inputs[fields.InputDataFields.image],
np.concatenate((image, additional_channels), axis=2))
def test_returns_correct_class_label_encodings(self):
tensor_dict = {
fields.InputDataFields.image:
tf.constant(np.random.rand(4, 4, 3).astype(np.float32)),
fields.InputDataFields.groundtruth_boxes:
tf.constant(np.array([[0, 0, 1, 1], [.5, .5, 1, 1]], np.float32)),
fields.InputDataFields.groundtruth_classes:
tf.constant(np.array([3, 1], np.int32))
}
num_classes = 3
input_transformation_fn = functools.partial(
inputs.transform_input_data,
model_preprocess_fn=_fake_model_preprocessor_fn,
image_resizer_fn=_fake_image_resizer_fn,
num_classes=num_classes)
with self.test_session() as sess:
transformed_inputs = sess.run(
input_transformation_fn(tensor_dict=tensor_dict))
self.assertAllClose(
transformed_inputs[fields.InputDataFields.groundtruth_classes],
[[0, 0, 1], [1, 0, 0]])
self.assertAllClose(
transformed_inputs[fields.InputDataFields.groundtruth_confidences],
[[0, 0, 1], [1, 0, 0]])
def test_returns_correct_merged_boxes(self):
tensor_dict = {
fields.InputDataFields.image:
tf.constant(np.random.rand(4, 4, 3).astype(np.float32)),
fields.InputDataFields.groundtruth_boxes:
tf.constant(np.array([[.5, .5, 1, 1], [.5, .5, 1, 1]], np.float32)),
fields.InputDataFields.groundtruth_classes:
tf.constant(np.array([3, 1], np.int32))
}
num_classes = 3
input_transformation_fn = functools.partial(
inputs.transform_input_data,
model_preprocess_fn=_fake_model_preprocessor_fn,
image_resizer_fn=_fake_image_resizer_fn,
num_classes=num_classes,
merge_multiple_boxes=True)
with self.test_session() as sess:
transformed_inputs = sess.run(
input_transformation_fn(tensor_dict=tensor_dict))
self.assertAllClose(
transformed_inputs[fields.InputDataFields.groundtruth_boxes],
[[.5, .5, 1., 1.]])
self.assertAllClose(
transformed_inputs[fields.InputDataFields.groundtruth_classes],
[[1, 0, 1]])
self.assertAllClose(
transformed_inputs[fields.InputDataFields.groundtruth_confidences],
[[1, 0, 1]])
self.assertAllClose(
transformed_inputs[fields.InputDataFields.num_groundtruth_boxes],
1)
def test_returns_correct_groundtruth_confidences_when_input_present(self):
tensor_dict = {
fields.InputDataFields.image:
tf.constant(np.random.rand(4, 4, 3).astype(np.float32)),
fields.InputDataFields.groundtruth_boxes:
tf.constant(np.array([[0, 0, 1, 1], [.5, .5, 1, 1]], np.float32)),
fields.InputDataFields.groundtruth_classes:
tf.constant(np.array([3, 1], np.int32)),
fields.InputDataFields.groundtruth_confidences:
tf.constant(np.array([1.0, -1.0], np.float32))
}
num_classes = 3
input_transformation_fn = functools.partial(
inputs.transform_input_data,
model_preprocess_fn=_fake_model_preprocessor_fn,
image_resizer_fn=_fake_image_resizer_fn,
num_classes=num_classes)
with self.test_session() as sess:
transformed_inputs = sess.run(
input_transformation_fn(tensor_dict=tensor_dict))
self.assertAllClose(
transformed_inputs[fields.InputDataFields.groundtruth_classes],
[[0, 0, 1], [1, 0, 0]])
self.assertAllClose(
transformed_inputs[fields.InputDataFields.groundtruth_confidences],
[[0, 0, 1], [-1, 0, 0]])
def test_returns_resized_masks(self):
tensor_dict = {
fields.InputDataFields.image:
tf.constant(np.random.rand(4, 4, 3).astype(np.float32)),
fields.InputDataFields.groundtruth_instance_masks:
tf.constant(np.random.rand(2, 4, 4).astype(np.float32)),
fields.InputDataFields.groundtruth_classes:
tf.constant(np.array([3, 1], np.int32)),
fields.InputDataFields.original_image_spatial_shape:
tf.constant(np.array([4, 4], np.int32))
}
def fake_image_resizer_fn(image, masks=None):
resized_image = tf.image.resize_images(image, [8, 8])
results = [resized_image]
if masks is not None:
resized_masks = tf.transpose(
tf.image.resize_images(tf.transpose(masks, [1, 2, 0]), [8, 8]),
[2, 0, 1])
results.append(resized_masks)
results.append(tf.shape(resized_image))
return results
num_classes = 3
input_transformation_fn = functools.partial(
inputs.transform_input_data,
model_preprocess_fn=_fake_model_preprocessor_fn,
image_resizer_fn=fake_image_resizer_fn,
num_classes=num_classes,
retain_original_image=True)
with self.test_session() as sess:
transformed_inputs = sess.run(
input_transformation_fn(tensor_dict=tensor_dict))
self.assertAllEqual(transformed_inputs[
fields.InputDataFields.original_image].dtype, tf.uint8)
self.assertAllEqual(transformed_inputs[
fields.InputDataFields.original_image_spatial_shape], [4, 4])
self.assertAllEqual(transformed_inputs[
fields.InputDataFields.original_image].shape, [8, 8, 3])
self.assertAllEqual(transformed_inputs[
fields.InputDataFields.groundtruth_instance_masks].shape, [2, 8, 8])
def test_applies_model_preprocess_fn_to_image_tensor(self):
np_image = np.random.randint(256, size=(4, 4, 3))
tensor_dict = {
fields.InputDataFields.image:
tf.constant(np_image),
fields.InputDataFields.groundtruth_classes:
tf.constant(np.array([3, 1], np.int32))
}
def fake_model_preprocessor_fn(image):
return (image / 255., tf.expand_dims(tf.shape(image)[1:], axis=0))
num_classes = 3
input_transformation_fn = functools.partial(
inputs.transform_input_data,
model_preprocess_fn=fake_model_preprocessor_fn,
image_resizer_fn=_fake_image_resizer_fn,
num_classes=num_classes)
with self.test_session() as sess:
transformed_inputs = sess.run(
input_transformation_fn(tensor_dict=tensor_dict))
self.assertAllClose(transformed_inputs[fields.InputDataFields.image],
np_image / 255.)
self.assertAllClose(transformed_inputs[fields.InputDataFields.
true_image_shape],
[4, 4, 3])
def test_applies_data_augmentation_fn_to_tensor_dict(self):
np_image = np.random.randint(256, size=(4, 4, 3))
tensor_dict = {
fields.InputDataFields.image:
tf.constant(np_image),
fields.InputDataFields.groundtruth_classes:
tf.constant(np.array([3, 1], np.int32))
}
def add_one_data_augmentation_fn(tensor_dict):
return {key: value + 1 for key, value in tensor_dict.items()}
num_classes = 4
input_transformation_fn = functools.partial(
inputs.transform_input_data,
model_preprocess_fn=_fake_model_preprocessor_fn,
image_resizer_fn=_fake_image_resizer_fn,
num_classes=num_classes,
data_augmentation_fn=add_one_data_augmentation_fn)
with self.test_session() as sess:
augmented_tensor_dict = sess.run(
input_transformation_fn(tensor_dict=tensor_dict))
self.assertAllEqual(augmented_tensor_dict[fields.InputDataFields.image],
np_image + 1)
self.assertAllEqual(
augmented_tensor_dict[fields.InputDataFields.groundtruth_classes],
[[0, 0, 0, 1], [0, 1, 0, 0]])
def test_applies_data_augmentation_fn_before_model_preprocess_fn(self):
np_image = np.random.randint(256, size=(4, 4, 3))
tensor_dict = {
fields.InputDataFields.image:
tf.constant(np_image),
fields.InputDataFields.groundtruth_classes:
tf.constant(np.array([3, 1], np.int32))
}
def mul_two_model_preprocessor_fn(image):
return (image * 2, tf.expand_dims(tf.shape(image)[1:], axis=0))
def add_five_to_image_data_augmentation_fn(tensor_dict):
tensor_dict[fields.InputDataFields.image] += 5
return tensor_dict
num_classes = 4
input_transformation_fn = functools.partial(
inputs.transform_input_data,
model_preprocess_fn=mul_two_model_preprocessor_fn,
image_resizer_fn=_fake_image_resizer_fn,
num_classes=num_classes,
data_augmentation_fn=add_five_to_image_data_augmentation_fn)
with self.test_session() as sess:
augmented_tensor_dict = sess.run(
input_transformation_fn(tensor_dict=tensor_dict))
self.assertAllEqual(augmented_tensor_dict[fields.InputDataFields.image],
(np_image + 5) * 2)
class PadInputDataToStaticShapesFnTest(test_case.TestCase):
def test_pad_images_boxes_and_classes(self):
input_tensor_dict = {
fields.InputDataFields.image:
tf.placeholder(tf.float32, [None, None, 3]),
fields.InputDataFields.groundtruth_boxes:
tf.placeholder(tf.float32, [None, 4]),
fields.InputDataFields.groundtruth_classes:
tf.placeholder(tf.int32, [None, 3]),
fields.InputDataFields.true_image_shape:
tf.placeholder(tf.int32, [3]),
fields.InputDataFields.original_image_spatial_shape:
tf.placeholder(tf.int32, [2])
}
padded_tensor_dict = inputs.pad_input_data_to_static_shapes(
tensor_dict=input_tensor_dict,
max_num_boxes=3,
num_classes=3,
spatial_image_shape=[5, 6])
self.assertAllEqual(
padded_tensor_dict[fields.InputDataFields.image].shape.as_list(),
[5, 6, 3])
self.assertAllEqual(
padded_tensor_dict[fields.InputDataFields.true_image_shape]
.shape.as_list(), [3])
self.assertAllEqual(
padded_tensor_dict[fields.InputDataFields.original_image_spatial_shape]
.shape.as_list(), [2])
self.assertAllEqual(
padded_tensor_dict[fields.InputDataFields.groundtruth_boxes]
.shape.as_list(), [3, 4])
self.assertAllEqual(
padded_tensor_dict[fields.InputDataFields.groundtruth_classes]
.shape.as_list(), [3, 3])
def test_clip_boxes_and_classes(self):
input_tensor_dict = {
fields.InputDataFields.groundtruth_boxes:
tf.placeholder(tf.float32, [None, 4]),
fields.InputDataFields.groundtruth_classes:
tf.placeholder(tf.int32, [None, 3]),
fields.InputDataFields.num_groundtruth_boxes:
tf.placeholder(tf.int32, [])
}
padded_tensor_dict = inputs.pad_input_data_to_static_shapes(
tensor_dict=input_tensor_dict,
max_num_boxes=3,
num_classes=3,
spatial_image_shape=[5, 6])
self.assertAllEqual(
padded_tensor_dict[fields.InputDataFields.groundtruth_boxes]
.shape.as_list(), [3, 4])
self.assertAllEqual(
padded_tensor_dict[fields.InputDataFields.groundtruth_classes]
.shape.as_list(), [3, 3])
with self.test_session() as sess:
out_tensor_dict = sess.run(
padded_tensor_dict,
feed_dict={
input_tensor_dict[fields.InputDataFields.groundtruth_boxes]:
np.random.rand(5, 4),
input_tensor_dict[fields.InputDataFields.groundtruth_classes]:
np.random.rand(2, 3),
input_tensor_dict[fields.InputDataFields.num_groundtruth_boxes]:
5,
})
self.assertAllEqual(
out_tensor_dict[fields.InputDataFields.groundtruth_boxes].shape, [3, 4])
self.assertAllEqual(
out_tensor_dict[fields.InputDataFields.groundtruth_classes].shape,
[3, 3])
self.assertEqual(
out_tensor_dict[fields.InputDataFields.num_groundtruth_boxes],
3)
def test_do_not_pad_dynamic_images(self):
input_tensor_dict = {
fields.InputDataFields.image:
tf.placeholder(tf.float32, [None, None, 3]),
}
padded_tensor_dict = inputs.pad_input_data_to_static_shapes(
tensor_dict=input_tensor_dict,
max_num_boxes=3,
num_classes=3,
spatial_image_shape=[None, None])
self.assertAllEqual(
padded_tensor_dict[fields.InputDataFields.image].shape.as_list(),
[None, None, 3])
def test_images_and_additional_channels(self):
input_tensor_dict = {
fields.InputDataFields.image:
tf.placeholder(tf.float32, [None, None, 3]),
fields.InputDataFields.image_additional_channels:
tf.placeholder(tf.float32, [None, None, 2]),
}
padded_tensor_dict = inputs.pad_input_data_to_static_shapes(
tensor_dict=input_tensor_dict,
max_num_boxes=3,
num_classes=3,
spatial_image_shape=[5, 6])
self.assertAllEqual(
padded_tensor_dict[fields.InputDataFields.image].shape.as_list(),
[5, 6, 5])
self.assertAllEqual(
padded_tensor_dict[fields.InputDataFields.image_additional_channels]
.shape.as_list(), [5, 6, 2])
def test_gray_images(self):
input_tensor_dict = {
fields.InputDataFields.image:
tf.placeholder(tf.float32, [None, None, 1]),
}
padded_tensor_dict = inputs.pad_input_data_to_static_shapes(
tensor_dict=input_tensor_dict,
max_num_boxes=3,
num_classes=3,
spatial_image_shape=[5, 6])
self.assertAllEqual(
padded_tensor_dict[fields.InputDataFields.image].shape.as_list(),
[5, 6, 1])
def test_gray_images_and_additional_channels(self):
input_tensor_dict = {
fields.InputDataFields.image:
tf.placeholder(tf.float32, [None, None, 1]),
fields.InputDataFields.image_additional_channels:
tf.placeholder(tf.float32, [None, None, 2]),
}
padded_tensor_dict = inputs.pad_input_data_to_static_shapes(
tensor_dict=input_tensor_dict,
max_num_boxes=3,
num_classes=3,
spatial_image_shape=[5, 6])
self.assertAllEqual(
padded_tensor_dict[fields.InputDataFields.image].shape.as_list(),
[5, 6, 3])
self.assertAllEqual(
padded_tensor_dict[fields.InputDataFields.image_additional_channels]
.shape.as_list(), [5, 6, 2])
def test_keypoints(self):
input_tensor_dict = {
fields.InputDataFields.groundtruth_keypoints:
tf.placeholder(tf.float32, [None, 16, 4]),
fields.InputDataFields.groundtruth_keypoint_visibilities:
tf.placeholder(tf.bool, [None, 16]),
}
padded_tensor_dict = inputs.pad_input_data_to_static_shapes(
tensor_dict=input_tensor_dict,
max_num_boxes=3,
num_classes=3,
spatial_image_shape=[5, 6])
self.assertAllEqual(
padded_tensor_dict[fields.InputDataFields.groundtruth_keypoints]
.shape.as_list(), [3, 16, 4])
self.assertAllEqual(
padded_tensor_dict[
fields.InputDataFields.groundtruth_keypoint_visibilities]
.shape.as_list(), [3, 16])
if __name__ == '__main__':
tf.test.main()
|
TensorFlow2/Recommendation/WideAndDeep/triton/deployment_toolkit | deployment_toolkit | core | # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import importlib
import logging
import os
from enum import Enum
from pathlib import Path
from typing import Any, Dict, List, NamedTuple, Optional, Tuple, Union
import numpy as np
LOGGER = logging.getLogger(__name__)
DATALOADER_FN_NAME = "get_dataloader_fn"
GET_MODEL_FN_NAME = "get_model"
GET_SERVING_INPUT_RECEIVER_FN = "get_serving_input_receiver_fn"
GET_ARGPARSER_FN_NAME = "update_argparser"
class TensorSpec(NamedTuple):
name: str
dtype: str
shape: Tuple
class Parameter(Enum):
def __lt__(self, other: "Parameter") -> bool:
return self.value < other.value
def __str__(self):
return self.value
class BackendAccelerator(Parameter):
NONE = "none"
AMP = "amp"
TRT = "trt"
class ExportPrecision(Parameter):
FP16 = "fp16"
FP32 = "fp32"
class Precision(Parameter):
INT8 = "int8"
FP16 = "fp16"
FP32 = "fp32"
class DeviceKind(Parameter):
CPU = "cpu"
GPU = "gpu"
class ModelInputType(Parameter):
TF_GRAPHDEF = "tf-graphdef"
TF_ESTIMATOR = "tf-estimator"
TF_KERAS = "tf-keras"
PYT = "pyt"
class Format(Parameter):
TF_SAVEDMODEL = "tf-savedmodel"
TF_TRT = "tf-trt"
ONNX = "onnx"
TORCHSCRIPT = "torchscript"
TRT = "trt"
FASTERTRANSFORMER = "fastertransformer"
# deprecated, backward compatibility only
TS_TRACE = "ts-trace"
TS_SCRIPT = "ts-script"
class ExportFormat(Parameter):
TF_SAVEDMODEL = "tf-savedmodel"
TORCHSCRIPT = "torchscript"
ONNX = "onnx"
# deprecated, backward compatibility only
TS_TRACE = "ts-trace"
TS_SCRIPT = "ts-script"
class TorchJit(Parameter):
NONE = "none"
TRACE = "trace"
SCRIPT = "script"
class Model(NamedTuple):
handle: object
# TODO: precision should be removed
precision: Optional[Precision]
inputs: Dict[str, TensorSpec]
outputs: Dict[str, TensorSpec]
def load_from_file(file_path, label, target):
spec = importlib.util.spec_from_file_location(name=label, location=file_path)
my_module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(my_module) # pytype: disable=attribute-error
return getattr(my_module, target, None)
class BaseLoader(abc.ABC):
required_fn_name_for_signature_parsing: Optional[str] = None
@abc.abstractmethod
def load(self, model_path: Union[str, Path], **kwargs) -> Model:
"""
Loads and process model from file based on given set of args
"""
pass
class BaseSaver(abc.ABC):
required_fn_name_for_signature_parsing: Optional[str] = None
@abc.abstractmethod
def save(self, model: Model, model_path: Union[str, Path], dataloader_fn) -> None:
"""
Save model to file
"""
pass
class BaseRunner(abc.ABC):
required_fn_name_for_signature_parsing: Optional[str] = None
@abc.abstractmethod
def init_inference(self, model: Model):
raise NotImplementedError
class BaseRunnerSession(abc.ABC):
def __init__(self, model: Model):
self._model = model
@abc.abstractmethod
def __enter__(self):
raise NotImplementedError()
@abc.abstractmethod
def __exit__(self, exc_type, exc_value, traceback):
raise NotImplementedError()
@abc.abstractmethod
def __call__(self, x: Dict[str, object]):
raise NotImplementedError()
def _set_env_variables(self) -> Dict[str, object]:
"""this method not remove values; fix it if needed"""
to_set = {}
old_values = {k: os.environ.pop(k, None) for k in to_set}
os.environ.update(to_set)
return old_values
def _recover_env_variables(self, old_envs: Dict[str, object]):
for name, value in old_envs.items():
if value is None:
del os.environ[name]
else:
os.environ[name] = str(value)
class BaseConverter(abc.ABC):
required_fn_name_for_signature_parsing: Optional[str] = None
@abc.abstractmethod
def convert(self, model: Model, dataloader_fn) -> Model:
raise NotImplementedError()
@staticmethod
def required_source_model_precision(requested_model_precision: Precision) -> Precision:
return requested_model_precision
class BaseMetricsCalculator(abc.ABC):
required_fn_name_for_signature_parsing: Optional[str] = None
def calc(
self,
*,
ids: List[Any],
y_pred: Dict[str, np.ndarray],
x: Optional[Dict[str, np.ndarray]],
y_real: Optional[Dict[str, np.ndarray]],
) -> Dict[str, float]:
"""
Calculates error/accuracy metrics
Args:
ids: List of ids identifying each sample in the batch
y_pred: model output as dict where key is output name and value is output value
x: model input as dict where key is input name and value is input value
y_real: input ground truth as dict where key is output name and value is output value
Returns:
dictionary where key is metric name and value is its value
"""
pass
@abc.abstractmethod
def update(
self,
ids: List[Any],
y_pred: Dict[str, np.ndarray],
x: Optional[Dict[str, np.ndarray]],
y_real: Optional[Dict[str, np.ndarray]],
):
pass
@property
@abc.abstractmethod
def metrics(self) -> Dict[str, Any]:
pass
class ShapeSpec(NamedTuple):
min: Tuple
opt: Tuple
max: Tuple
class MeasurementMode(Enum):
"""
Available measurement stabilization modes
"""
COUNT_WINDOWS = "count_windows"
TIME_WINDOWS = "time_windows"
class PerformanceTool(Enum):
"""
Available performance evaluation tools
"""
MODEL_ANALYZER = "model_analyzer"
PERF_ANALYZER = "perf_analyzer"
class EvaluationMode(Enum):
"""
Available evaluation modes
"""
OFFLINE = "offline"
ONLINE = "online"
class OfflineMode(Enum):
"""
Available offline mode for memory
"""
SYSTEM = "system"
CUDA = "cuda"
|
TensorFlow2/Detection/Efficientdet/object_detection | object_detection | __init__ | # Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
|
TensorFlow/Classification/ConvNets/utils | utils | dali_index | #!/bin/bash
SRC_DIR=${1}
DST_DIR=${2}
echo "Creating training file indexes"
mkdir -p ${DST_DIR}
for file in ${SRC_DIR}/train-*; do
BASENAME=$(basename $file)
DST_NAME=$DST_DIR/$BASENAME
echo "Creating index $DST_NAME for $file"
tfrecord2idx $file $DST_NAME
done
echo "Creating validation file indexes"
for file in ${SRC_DIR}/validation-*; do
BASENAME=$(basename $file)
DST_NAME=$DST_DIR/$BASENAME
echo "Creating index $DST_NAME for $file"
tfrecord2idx $file $DST_NAME
done
|
TensorFlow2/Recommendation/WideAndDeep/triton/deployment_toolkit/triton_performance_runner/model_analyzer | model_analyzer | exceptions | # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class ModelAnalyzerException(Exception):
def __init__(self, message: str):
self._message = message
def __str__(self):
"""
Get the exception string representation.
Returns
-------
str
The message associated with this exception, or None if no message.
"""
return self._message
@property
def message(self):
"""
Get the exception message.
Returns
-------
str
The message associated with this exception, or None if no message.
"""
return self._message
|
TensorFlow/Segmentation/UNet_Medical/examples | examples | unet_TRAIN_BENCHMARK_8GPU | # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script launches U-Net run in FP32 on 8 GPUs for training benchmarking. Usage:
# bash unet_TRAIN_BENCHMARK_FP32_8GPU.sh <path to dataset> <path to results directory> <batch size>
horovodrun -np 8 python main.py --data_dir $1 --model_dir $2 --batch_size $3 --exec_mode train --augment --benchmark --warmup_steps 200 --max_steps 1000 --xla |
PyTorch/SpeechSynthesis/Tacotron2/trtis_cpp/src/trt/tacotron2 | tacotron2 | maskGenerator | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "maskGenerator.h"
/******************************************************************************
* CONSTANTS ******************************************************************
*****************************************************************************/
namespace
{
constexpr const int BLOCK_SIZE = 512;
}
/******************************************************************************
* KERNELS ********************************************************************
*****************************************************************************/
__global__ void generateMaskKernel(const int32_t* const lengthsDevice, const int maskLength, float* const maskDevice)
{
const int length = lengthsDevice[blockIdx.x];
for (int i = threadIdx.x; i < maskLength; i += blockDim.x)
{
maskDevice[maskLength * blockIdx.x + i] = static_cast<float>(i < length);
}
}
/******************************************************************************
* PUBLIC METHODS *************************************************************
*****************************************************************************/
namespace tts
{
void MaskGenerator::generate(const int32_t* const lengthsDevice, const int maskLength, const int batchSize,
float* const maskDevice, cudaStream_t stream)
{
const dim3 grid(batchSize);
const dim3 block(BLOCK_SIZE);
generateMaskKernel<<<grid, block, 0, stream>>>(lengthsDevice, maskLength, maskDevice);
}
} // namespace tts
|
PyTorch/SpeechSynthesis/FastPitch/triton/scripts | scripts | setup_environment | #!/usr/bin/env bash
# Copyright (c) 2021 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
WORKDIR="${WORKDIR:=$(pwd)}"
export WORKSPACE_DIR=${WORKDIR}/workspace
export DATASETS_DIR=${WORKSPACE_DIR}/datasets_dir
export CHECKPOINT_DIR=${WORKSPACE_DIR}/checkpoint_dir
export MODEL_REPOSITORY_PATH=${WORKSPACE_DIR}/model_store
export SHARED_DIR=${WORKSPACE_DIR}/shared_dir
echo "Preparing directories"
mkdir -p ${WORKSPACE_DIR}
mkdir -p ${DATASETS_DIR}
mkdir -p ${CHECKPOINT_DIR}
mkdir -p ${MODEL_REPOSITORY_PATH}
mkdir -p ${SHARED_DIR}
echo "Setting up environment"
export MODEL_NAME="FastPitch"
export TRITON_LOAD_MODEL_METHOD=explicit
export TRITON_INSTANCES=1
|
PyTorch/Segmentation/MaskRCNN/pytorch/maskrcnn_benchmark/modeling/roi_heads | roi_heads | roi_heads | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
from .box_head.box_head import build_roi_box_head
from .mask_head.mask_head import build_roi_mask_head
class CombinedROIHeads(torch.nn.ModuleDict):
"""
Combines a set of individual heads (for box prediction or masks) into a single
head.
"""
def __init__(self, cfg, heads):
super(CombinedROIHeads, self).__init__(heads)
self.cfg = cfg.clone()
if cfg.MODEL.MASK_ON and cfg.MODEL.ROI_MASK_HEAD.SHARE_BOX_FEATURE_EXTRACTOR:
self.mask.feature_extractor = self.box.feature_extractor
def forward(self, features, proposals, targets=None):
losses = {}
# TODO rename x to roi_box_features, if it doesn't increase memory consumption
x, detections, loss_box = self.box(features, proposals, targets)
losses.update(loss_box)
if self.cfg.MODEL.MASK_ON:
mask_features = features
# optimization: during training, if we share the feature extractor between
# the box and the mask heads, then we can reuse the features already computed
if (
self.training
and self.cfg.MODEL.ROI_MASK_HEAD.SHARE_BOX_FEATURE_EXTRACTOR
):
mask_features = x
# During training, self.box() will return the unaltered proposals as "detections"
# this makes the API consistent during training and testing
x, detections, loss_mask = self.mask(mask_features, detections, targets)
losses.update(loss_mask)
return x, detections, losses
def build_roi_heads(cfg):
# individually create the heads, that will be combined together
# afterwards
roi_heads = []
if not cfg.MODEL.RPN_ONLY:
roi_heads.append(("box", build_roi_box_head(cfg)))
if cfg.MODEL.MASK_ON:
roi_heads.append(("mask", build_roi_mask_head(cfg)))
# combine individual heads in a single module
if roi_heads:
roi_heads = CombinedROIHeads(cfg, roi_heads)
return roi_heads
|
TensorFlow/Detection/SSD/models/research/slim/nets/mobilenet | mobilenet | conv_blocks | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Convolution blocks for mobilenet."""
import contextlib
import functools
import tensorflow as tf
slim = tf.contrib.slim
def _fixed_padding(inputs, kernel_size, rate=1):
"""Pads the input along the spatial dimensions independently of input size.
Pads the input such that if it was used in a convolution with 'VALID' padding,
the output would have the same dimensions as if the unpadded input was used
in a convolution with 'SAME' padding.
Args:
inputs: A tensor of size [batch, height_in, width_in, channels].
kernel_size: The kernel to be used in the conv2d or max_pool2d operation.
rate: An integer, rate for atrous convolution.
Returns:
output: A tensor of size [batch, height_out, width_out, channels] with the
input, either intact (if kernel_size == 1) or padded (if kernel_size > 1).
"""
kernel_size_effective = [kernel_size[0] + (kernel_size[0] - 1) * (rate - 1),
kernel_size[0] + (kernel_size[0] - 1) * (rate - 1)]
pad_total = [kernel_size_effective[0] - 1, kernel_size_effective[1] - 1]
pad_beg = [pad_total[0] // 2, pad_total[1] // 2]
pad_end = [pad_total[0] - pad_beg[0], pad_total[1] - pad_beg[1]]
padded_inputs = tf.pad(inputs, [[0, 0], [pad_beg[0], pad_end[0]],
[pad_beg[1], pad_end[1]], [0, 0]])
return padded_inputs
def _make_divisible(v, divisor, min_value=None):
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v
def _split_divisible(num, num_ways, divisible_by=8):
"""Evenly splits num, num_ways so each piece is a multiple of divisible_by."""
assert num % divisible_by == 0
assert num / num_ways >= divisible_by
# Note: want to round down, we adjust each split to match the total.
base = num // num_ways // divisible_by * divisible_by
result = []
accumulated = 0
for i in range(num_ways):
r = base
while accumulated + r < num * (i + 1) / num_ways:
r += divisible_by
result.append(r)
accumulated += r
assert accumulated == num
return result
@contextlib.contextmanager
def _v1_compatible_scope_naming(scope):
if scope is None: # Create uniqified separable blocks.
with tf.variable_scope(None, default_name='separable') as s, \
tf.name_scope(s.original_name_scope):
yield ''
else:
# We use scope_depthwise, scope_pointwise for compatibility with V1 ckpts.
# which provide numbered scopes.
scope += '_'
yield scope
@slim.add_arg_scope
def split_separable_conv2d(input_tensor,
num_outputs,
scope=None,
normalizer_fn=None,
stride=1,
rate=1,
endpoints=None,
use_explicit_padding=False):
"""Separable mobilenet V1 style convolution.
Depthwise convolution, with default non-linearity,
followed by 1x1 depthwise convolution. This is similar to
slim.separable_conv2d, but differs in tha it applies batch
normalization and non-linearity to depthwise. This matches
the basic building of Mobilenet Paper
(https://arxiv.org/abs/1704.04861)
Args:
input_tensor: input
num_outputs: number of outputs
scope: optional name of the scope. Note if provided it will use
scope_depthwise for deptwhise, and scope_pointwise for pointwise.
normalizer_fn: which normalizer function to use for depthwise/pointwise
stride: stride
rate: output rate (also known as dilation rate)
endpoints: optional, if provided, will export additional tensors to it.
use_explicit_padding: Use 'VALID' padding for convolutions, but prepad
inputs so that the output dimensions are the same as if 'SAME' padding
were used.
Returns:
output tesnor
"""
with _v1_compatible_scope_naming(scope) as scope:
dw_scope = scope + 'depthwise'
endpoints = endpoints if endpoints is not None else {}
kernel_size = [3, 3]
padding = 'SAME'
if use_explicit_padding:
padding = 'VALID'
input_tensor = _fixed_padding(input_tensor, kernel_size, rate)
net = slim.separable_conv2d(
input_tensor,
None,
kernel_size,
depth_multiplier=1,
stride=stride,
rate=rate,
normalizer_fn=normalizer_fn,
padding=padding,
scope=dw_scope)
endpoints[dw_scope] = net
pw_scope = scope + 'pointwise'
net = slim.conv2d(
net,
num_outputs, [1, 1],
stride=1,
normalizer_fn=normalizer_fn,
scope=pw_scope)
endpoints[pw_scope] = net
return net
def expand_input_by_factor(n, divisible_by=8):
return lambda num_inputs, **_: _make_divisible(num_inputs * n, divisible_by)
@slim.add_arg_scope
def expanded_conv(input_tensor,
num_outputs,
expansion_size=expand_input_by_factor(6),
stride=1,
rate=1,
kernel_size=(3, 3),
residual=True,
normalizer_fn=None,
project_activation_fn=tf.identity,
split_projection=1,
split_expansion=1,
expansion_transform=None,
depthwise_location='expansion',
depthwise_channel_multiplier=1,
endpoints=None,
use_explicit_padding=False,
padding='SAME',
scope=None):
"""Depthwise Convolution Block with expansion.
Builds a composite convolution that has the following structure
expansion (1x1) -> depthwise (kernel_size) -> projection (1x1)
Args:
input_tensor: input
num_outputs: number of outputs in the final layer.
expansion_size: the size of expansion, could be a constant or a callable.
If latter it will be provided 'num_inputs' as an input. For forward
compatibility it should accept arbitrary keyword arguments.
Default will expand the input by factor of 6.
stride: depthwise stride
rate: depthwise rate
kernel_size: depthwise kernel
residual: whether to include residual connection between input
and output.
normalizer_fn: batchnorm or otherwise
project_activation_fn: activation function for the project layer
split_projection: how many ways to split projection operator
(that is conv expansion->bottleneck)
split_expansion: how many ways to split expansion op
(that is conv bottleneck->expansion) ops will keep depth divisible
by this value.
expansion_transform: Optional function that takes expansion
as a single input and returns output.
depthwise_location: where to put depthwise covnvolutions supported
values None, 'input', 'output', 'expansion'
depthwise_channel_multiplier: depthwise channel multiplier:
each input will replicated (with different filters)
that many times. So if input had c channels,
output will have c x depthwise_channel_multpilier.
endpoints: An optional dictionary into which intermediate endpoints are
placed. The keys "expansion_output", "depthwise_output",
"projection_output" and "expansion_transform" are always populated, even
if the corresponding functions are not invoked.
use_explicit_padding: Use 'VALID' padding for convolutions, but prepad
inputs so that the output dimensions are the same as if 'SAME' padding
were used.
padding: Padding type to use if `use_explicit_padding` is not set.
scope: optional scope.
Returns:
Tensor of depth num_outputs
Raises:
TypeError: on inval
"""
with tf.variable_scope(scope, default_name='expanded_conv') as s, \
tf.name_scope(s.original_name_scope):
prev_depth = input_tensor.get_shape().as_list()[3]
if depthwise_location not in [None, 'input', 'output', 'expansion']:
raise TypeError('%r is unknown value for depthwise_location' %
depthwise_location)
if use_explicit_padding:
if padding != 'SAME':
raise TypeError('`use_explicit_padding` should only be used with '
'"SAME" padding.')
padding = 'VALID'
depthwise_func = functools.partial(
slim.separable_conv2d,
num_outputs=None,
kernel_size=kernel_size,
depth_multiplier=depthwise_channel_multiplier,
stride=stride,
rate=rate,
normalizer_fn=normalizer_fn,
padding=padding,
scope='depthwise')
# b1 -> b2 * r -> b2
# i -> (o * r) (bottleneck) -> o
input_tensor = tf.identity(input_tensor, 'input')
net = input_tensor
if depthwise_location == 'input':
if use_explicit_padding:
net = _fixed_padding(net, kernel_size, rate)
net = depthwise_func(net, activation_fn=None)
if callable(expansion_size):
inner_size = expansion_size(num_inputs=prev_depth)
else:
inner_size = expansion_size
if inner_size > net.shape[3]:
net = split_conv(
net,
inner_size,
num_ways=split_expansion,
scope='expand',
stride=1,
normalizer_fn=normalizer_fn)
net = tf.identity(net, 'expansion_output')
if endpoints is not None:
endpoints['expansion_output'] = net
if depthwise_location == 'expansion':
if use_explicit_padding:
net = _fixed_padding(net, kernel_size, rate)
net = depthwise_func(net)
net = tf.identity(net, name='depthwise_output')
if endpoints is not None:
endpoints['depthwise_output'] = net
if expansion_transform:
net = expansion_transform(expansion_tensor=net, input_tensor=input_tensor)
# Note in contrast with expansion, we always have
# projection to produce the desired output size.
net = split_conv(
net,
num_outputs,
num_ways=split_projection,
stride=1,
scope='project',
normalizer_fn=normalizer_fn,
activation_fn=project_activation_fn)
if endpoints is not None:
endpoints['projection_output'] = net
if depthwise_location == 'output':
if use_explicit_padding:
net = _fixed_padding(net, kernel_size, rate)
net = depthwise_func(net, activation_fn=None)
if callable(residual): # custom residual
net = residual(input_tensor=input_tensor, output_tensor=net)
elif (residual and
# stride check enforces that we don't add residuals when spatial
# dimensions are None
stride == 1 and
# Depth matches
net.get_shape().as_list()[3] ==
input_tensor.get_shape().as_list()[3]):
net += input_tensor
return tf.identity(net, name='output')
def split_conv(input_tensor,
num_outputs,
num_ways,
scope,
divisible_by=8,
**kwargs):
"""Creates a split convolution.
Split convolution splits the input and output into
'num_blocks' blocks of approximately the same size each,
and only connects $i$-th input to $i$ output.
Args:
input_tensor: input tensor
num_outputs: number of output filters
num_ways: num blocks to split by.
scope: scope for all the operators.
divisible_by: make sure that every part is divisiable by this.
**kwargs: will be passed directly into conv2d operator
Returns:
tensor
"""
b = input_tensor.get_shape().as_list()[3]
if num_ways == 1 or min(b // num_ways,
num_outputs // num_ways) < divisible_by:
# Don't do any splitting if we end up with less than 8 filters
# on either side.
return slim.conv2d(input_tensor, num_outputs, [1, 1], scope=scope, **kwargs)
outs = []
input_splits = _split_divisible(b, num_ways, divisible_by=divisible_by)
output_splits = _split_divisible(
num_outputs, num_ways, divisible_by=divisible_by)
inputs = tf.split(input_tensor, input_splits, axis=3, name='split_' + scope)
base = scope
for i, (input_tensor, out_size) in enumerate(zip(inputs, output_splits)):
scope = base + '_part_%d' % (i,)
n = slim.conv2d(input_tensor, out_size, [1, 1], scope=scope, **kwargs)
n = tf.identity(n, scope + '_output')
outs.append(n)
return tf.concat(outs, 3, name=scope + '_concat')
|
PyTorch/Classification/ConvNets/resnet50v1.5/training/AMP | AMP | DGX1V_resnet50_AMP_250E | python ./multiproc.py --nproc_per_node 8 ./launch.py --model resnet50 --precision AMP --mode convergence --platform DGX1V /imagenet --workspace ${1:-./} --raport-file raport.json
|
Tools/PyTorch/TimeSeriesPredictionPlatform/conf/deployment/export | export | ts-script | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
config:
type: ts-script
|
PyTorch/LanguageModeling/BART/scripts | scripts | run_summarization | #!/usr/bin/env bash
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
DATA_DIR=${1:-data/cnn_dm/}
CKPT_PATH=${2:-data/nvidia_pretrained/bart_large}
CONFIG_PATH=${3:-"configs/config.json"}
NUM_GPU=${4:-8}
LR=${5:-1.25e-4}
BS=${6:-40}
ACCUM=${7:-1}
PREC=${8:-"bf16"}
TRAIN_STEPS=${9:-2000}
WARMUP_STEPS=${10:-50}
MAX_SOURCE_LEN=${11:-1024}
MAX_TARGET_LEN=${12:-142}
EVAL_BEAMS=${13:-4}
EVAL_BS=${14:-128}
PRED_BS=${15:-64}
PRELN=${16:-true}
if [ "$PREC" = "fp16" ] ; then
echo "fp16 activated!"
USE_FP16="--fp16"
elif [ "$PREC" = "bf16" ] ; then
echo "bf16 activated!"
USE_FP16="--bf16"
else
echo "fp32/tf32 activated!"
USE_FP16=""
fi
if [ "$PRELN" = "true" ] ; then
echo "Trained with PreLN"
USE_FP16="--pre_ln $USE_FP16"
else
echo "Trained with PostLN"
fi
printf -v TAG "bart_pyt"
DATESTAMP=`date +'%y%m%d%H%M%S'`
RESULTS_DIR=${RESULTS_DIR:-results/${TAG}_${DATESTAMP}}
mkdir -p ${RESULTS_DIR}
export TOKENIZERS_PARALLELISM="true"
python -m torch.distributed.launch --nproc_per_node=${NUM_GPU:-8} finetune.py \
--data_dir=${DATA_DIR} \
--config_path=${CONFIG_PATH} \
--output_dir=${RESULTS_DIR} \
--gpus ${NUM_GPU} \
--learning_rate=${LR:-1e-4} \
${USE_FP16} \
--do_train \
--n_val -1 \
--train_batch_size=${BS} --gradient_accumulation_steps=${ACCUM} \
--eval_batch_size=${EVAL_BS} \
--max_steps ${TRAIN_STEPS} --warmup_steps ${WARMUP_STEPS} \
--max_source_length=${MAX_SOURCE_LEN} --max_target_length=${MAX_TARGET_LEN} \
--val_max_target_length=${MAX_TARGET_LEN} --eval_max_gen_length=${MAX_TARGET_LEN} \
--sortish_sampler \
--lr_scheduler polynomial \
--label_smoothing 0.1 \
--weight_decay 0.1 \
--dropout 0.1 --attention_dropout 0.1 --gradient_clip_val=0.1 \
--eval_beams 0 --freeze_embeds \
--seed ${SEED:-42} \
--resume_from_checkpoint=${CKPT_PATH} --load_model_weights_only \
${@:17} |& tee ${RESULTS_DIR}/joblog.log
echo "completed training! Begin test" |& tee -a ${RESULTS_DIR}/joblog.log
INIT_CKPT=$(ls ${RESULTS_DIR}/final_step.ckpt | sort -n | tail -1)
python -m torch.distributed.launch --nproc_per_node=${NUM_GPU:-8} run_eval.py \
--task summarization \
--bs ${PRED_BS} --max_source_length=${MAX_SOURCE_LEN} --max_target_length=${MAX_TARGET_LEN} \
--eval_max_gen_length=${MAX_TARGET_LEN} --eval_beams=${EVAL_BEAMS} ${USE_FP16} \
${INIT_CKPT} ${CONFIG_PATH} ${DATA_DIR} ${RESULTS_DIR} |& tee -a ${RESULTS_DIR}/joblog.log
|
TensorFlow2/Segmentation/UNet_Medical | UNet_Medical | requirements | Pillow
tf2onnx
munch |
Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt | tft_pyt | data_utils | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import math
import pickle
import enum
import datetime
from collections import namedtuple, OrderedDict
import sklearn.preprocessing
from sklearn.impute import SimpleImputer
import pandas as pd
import numpy as np
from bisect import bisect
import torch
from torch.utils.data import Dataset,IterableDataset,DataLoader
class DataTypes(enum.IntEnum):
"""Defines numerical types of each column."""
CONTINUOUS = 0
CATEGORICAL = 1
DATE = 2
STR = 3
class InputTypes(enum.IntEnum):
"""Defines input types of each column."""
TARGET = 0
OBSERVED = 1
KNOWN = 2
STATIC = 3
ID = 4 # Single column used as an entity identifier
TIME = 5 # Single column exclusively used as a time index
FeatureSpec = namedtuple('FeatureSpec', ['name', 'feature_type', 'feature_embed_type'])
DTYPE_MAP = {
DataTypes.CONTINUOUS : np.float32,
DataTypes.CATEGORICAL : np.int64,
DataTypes.DATE:'datetime64[ns]',
DataTypes.STR: str
}
FEAT_ORDER = [
(InputTypes.STATIC, DataTypes.CATEGORICAL),
(InputTypes.STATIC, DataTypes.CONTINUOUS),
(InputTypes.KNOWN, DataTypes.CATEGORICAL),
(InputTypes.KNOWN, DataTypes.CONTINUOUS),
(InputTypes.OBSERVED, DataTypes.CATEGORICAL),
(InputTypes.OBSERVED, DataTypes.CONTINUOUS),
(InputTypes.TARGET, DataTypes.CONTINUOUS),
(InputTypes.ID, DataTypes.CATEGORICAL)
]
FEAT_NAMES = ['s_cat' , 's_cont' , 'k_cat' , 'k_cont' , 'o_cat' , 'o_cont' , 'target', 'id']
DEFAULT_ID_COL = 'id'
class TFTBinaryDataset(Dataset):
def __init__(self, path, config):
super(TFTBinaryDataset).__init__()
self.features = [x for x in config.features if x.feature_embed_type != DataTypes.DATE]
self.example_length = config.example_length
self.stride = config.dataset_stride
self.grouped = pickle.load(open(path, 'rb'))
self.grouped = [x for x in self.grouped if x.shape[0] >= self.example_length]
self._cum_examples_in_group = np.cumsum([(g.shape[0] - self.example_length + 1)//self.stride for g in self.grouped])
self.feature_type_col_map = [[i for i,f in enumerate(self.features) if (f.feature_type, f.feature_embed_type) == x] for x in FEAT_ORDER]
# The list comprehension below is an elaborate way of rearranging data into correct order,
# simultaneously doing casting to proper types. Probably can be written neater
self.grouped = [
[
arr[:, idxs].view(dtype=np.float32).astype(DTYPE_MAP[t[1]])
for t, idxs in zip(FEAT_ORDER, self.feature_type_col_map)
]
for arr in self.grouped
]
def __len__(self):
return self._cum_examples_in_group[-1] if len(self._cum_examples_in_group) else 0
def __getitem__(self, idx):
g_idx = bisect(self._cum_examples_in_group, idx)
e_idx = idx - self._cum_examples_in_group[g_idx-1] if g_idx else idx
group = self.grouped[g_idx]
tensors = [
torch.from_numpy(feat[e_idx * self.stride:e_idx*self.stride + self.example_length])
if feat.size else torch.empty(0)
for feat in group
]
return OrderedDict(zip(FEAT_NAMES, tensors))
class TFTDataset(Dataset):
def __init__(self, path, config):
super(TFTDataset).__init__()
self.features = config.features
self.data = pd.read_csv(path, index_col=0)
self.example_length = config.example_length
self.stride = config.dataset_stride
# name field is a column name.
# there can be multiple entries with the same name because one column can be interpreted in many ways
time_col_name = next(x.name for x in self.features if x.feature_type==InputTypes.TIME)
id_col_name = next(x.name for x in self.features if x.feature_type==InputTypes.ID)
if not id_col_name in self.data.columns:
id_col_name = DEFAULT_ID_COL
self.features = [x for x in self.features if x.feature_type!=InputTypes.ID]
self.features.append(FeatureSpec(DEFAULT_ID_COL, InputTypes.ID, DataTypes.CATEGORICAL))
col_dtypes = {v.name:DTYPE_MAP[v.feature_embed_type] for v in self.features}
self.data.sort_values(time_col_name,inplace=True)
self.data = self.data[set(x.name for x in self.features)] #leave only relevant columns
self.data = self.data.astype(col_dtypes)
self.data = self.data.groupby(id_col_name).filter(lambda group: len(group) >= self.example_length)
self.grouped = list(self.data.groupby(id_col_name))
self._cum_examples_in_group = np.cumsum([(len(g[1]) - self.example_length + 1)//self.stride for g in self.grouped])
def __len__(self):
return self._cum_examples_in_group[-1]
def __getitem__(self, idx):
g_idx = len([x for x in self._cum_examples_in_group if x <= idx])
e_idx = idx - self._cum_examples_in_group[g_idx-1] if g_idx else idx
group = self.grouped[g_idx][1]
sliced = group.iloc[e_idx * self.stride:e_idx*self.stride + self.example_length]
# We need to be sure that tensors are returned in the correct order
tensors = tuple([] for _ in range(8))
for v in self.features:
if v.feature_type == InputTypes.STATIC and v.feature_embed_type == DataTypes.CATEGORICAL:
tensors[0].append(torch.from_numpy(sliced[v.name].to_numpy()))
elif v.feature_type == InputTypes.STATIC and v.feature_embed_type == DataTypes.CONTINUOUS:
tensors[1].append(torch.from_numpy(sliced[v.name].to_numpy()))
elif v.feature_type == InputTypes.KNOWN and v.feature_embed_type == DataTypes.CATEGORICAL:
tensors[2].append(torch.from_numpy(sliced[v.name].to_numpy()))
elif v.feature_type == InputTypes.KNOWN and v.feature_embed_type == DataTypes.CONTINUOUS:
tensors[3].append(torch.from_numpy(sliced[v.name].to_numpy()))
elif v.feature_type == InputTypes.OBSERVED and v.feature_embed_type == DataTypes.CATEGORICAL:
tensors[4].append(torch.from_numpy(sliced[v.name].to_numpy()))
elif v.feature_type == InputTypes.OBSERVED and v.feature_embed_type == DataTypes.CONTINUOUS:
tensors[5].append(torch.from_numpy(sliced[v.name].to_numpy()))
elif v.feature_type == InputTypes.TARGET:
tensors[6].append(torch.from_numpy(sliced[v.name].to_numpy()))
elif v.feature_type == InputTypes.ID:
tensors[7].append(torch.from_numpy(sliced[v.name].to_numpy()))
tensors = [torch.stack(x, dim=-1) if x else torch.empty(0) for x in tensors]
return OrderedDict(zip(FEAT_NAMES, tensors))
def get_dataset_splits(df, config):
if hasattr(config, 'relative_split') and config.relative_split:
forecast_len = config.example_length - config.encoder_length
# The valid split is shifted from the train split by number of the forecast steps to the future.
# The test split is shifted by the number of the forecast steps from the valid split
train = []
valid = []
test = []
for _, group in df.groupby(DEFAULT_ID_COL):
index = group[config.time_ids]
_train = group.loc[index < config.valid_boundary]
_valid = group.iloc[(len(_train) - config.encoder_length):(len(_train) + forecast_len)]
_test = group.iloc[(len(_train) - config.encoder_length + forecast_len):(len(_train) + 2*forecast_len)]
train.append(_train)
valid.append(_valid)
test.append(_test)
train = pd.concat(train, axis=0)
valid = pd.concat(valid, axis=0)
test = pd.concat(test, axis=0)
else:
index = df[config.time_ids]
train = df.loc[(index >= config.train_range[0]) & (index < config.train_range[1])]
valid = df.loc[(index >= config.valid_range[0]) & (index < config.valid_range[1])]
test = df.loc[(index >= config.test_range[0]) & (index < config.test_range[1])]
return train, valid, test
def flatten_ids(df, config):
if config.missing_id_strategy == 'drop':
if hasattr(config, 'combine_ids') and config.combine_ids:
index = np.logical_or.reduce([df[c].isna() for c in config.combine_ids])
else:
id_col = next(x.name for x in config.features if x.feature_type == InputTypes.ID)
index = df[id_col].isna()
index = index[index == True].index # Extract indices of nans
df.drop(index, inplace=True)
if not (hasattr(config, 'combine_ids') and config.combine_ids):
id_col = next(x.name for x in config.features if x.feature_type == InputTypes.ID)
ids = df[id_col].apply(str)
df.drop(id_col, axis=1, inplace=True)
encoder = sklearn.preprocessing.LabelEncoder().fit(ids.values)
df[DEFAULT_ID_COL] = encoder.transform(ids)
encoders = OrderedDict({id_col: encoder})
else:
encoders = {c:sklearn.preprocessing.LabelEncoder().fit(df[c].values) for c in config.combine_ids}
encoders = OrderedDict(encoders)
lens = [len(v.classes_) for v in encoders.values()]
clens = np.roll(np.cumprod(lens), 1)
clens[0] = 1
# this takes a looooooot of time. Probably it would be better to create 2 dummy columns
df[DEFAULT_ID_COL] = df.apply(lambda row: sum([encoders[c].transform([row[c]])[0]*clens[i] for i,c in enumerate(encoders.keys())]), axis=1)
df.drop(config.combine_ids, axis=1, inplace=True)
return DEFAULT_ID_COL, encoders
def impute(df, config):
#XXX This ensures that out scaling will have the same mean. We still need to check the variance
if not hasattr(config, 'missing_data_label'):
return df, None
else:
imp = SimpleImputer(missing_values=config.missing_data_label, strategy='mean')
mask = df.applymap(lambda x: True if x == config.missing_data_label else False)
data = df.values
col_mask = (data == config.missing_data_label).all(axis=0)
data[:,~col_mask] = imp.fit_transform(data)
return data, mask
def normalize_reals(train, valid, test, config, id_col=DEFAULT_ID_COL):
tgt_cols = [x.name for x in config.features if x.feature_type == InputTypes.TARGET]
real_cols = list(set(v.name for v in config.features if v.feature_embed_type == DataTypes.CONTINUOUS).difference(set(tgt_cols)))
real_scalers = {}
tgt_scalers = {}
def apply_scalers(df, name=None):
if name is None:
name = df.name
mask = df.applymap(lambda x: True if x == config.missing_data_label else False) if hasattr(config, 'missing_data_label') else None
df[real_cols] = real_scalers[name].transform(df[real_cols])
if mask is not None and any(mask):
df[real_cols].mask(mask, 10**9)
df[tgt_cols] = tgt_scalers[name].transform(df[tgt_cols])
return df
if config.scale_per_id:
for identifier, sliced in train.groupby(id_col):
data = sliced[real_cols]
data, _ = impute(data, config)
real_scalers[identifier] = sklearn.preprocessing.StandardScaler().fit(data)
# XXX We should probably remove examples that contain NaN as a target
target = sliced[tgt_cols]
tgt_scalers[identifier] = sklearn.preprocessing.StandardScaler().fit(target)
train = train.groupby(id_col).apply(apply_scalers)
# For valid and testing leave only timeseries previously present in train subset
# XXX for proper data science we should consider encoding unseen timeseries as a special case, not throwing them away
valid = valid.loc[valid[id_col].isin(real_scalers.keys())]
valid = valid.groupby(id_col).apply(apply_scalers)
test = test.loc[test[id_col].isin(real_scalers.keys())]
test = test.groupby(id_col).apply(apply_scalers)
else:
data, _ = impute(train[real_cols], config)
real_scalers[''] = sklearn.preprocessing.StandardScaler().fit(data)
tgt_scalers[''] = sklearn.preprocessing.StandardScaler().fit(train[tgt_cols])
train = apply_scalers(train, name='')
valid = apply_scalers(valid, name='')
test = apply_scalers(test, name='')
return train, valid, test, real_scalers, tgt_scalers
def encode_categoricals(train, valid, test, config):
cat_encodings = {}
cat_cols = list(set(v.name for v in config.features if v.feature_embed_type == DataTypes.CATEGORICAL and v.feature_type != InputTypes.ID))
num_classes = [] #XXX Maybe we should modify config based on this value? Or send a warninig?
# For TC performance reasons we might want for num_classes[i] be divisible by 8
# Train categorical encoders
for c in cat_cols:
if config.missing_cat_data_strategy == 'special_token':
#XXX this will probably require some data augmentation
unique = train[c].unique()
valid[c].loc[valid[c].isin(unique)] = '<UNK>'
test[c].loc[test[c].isin(unique)] = '<UNK>'
if config.missing_cat_data_strategy == 'encode_all' or \
config.missing_cat_data_strategy == 'special_token':
srs = pd.concat([train[c], valid[c], test[c]]).apply(str)
cat_encodings[c] = sklearn.preprocessing.LabelEncoder().fit(srs.values)
elif config.missing_cat_data_strategy == 'drop':
# TODO: implement this. In addition to dropping rows this has to split specific time series in chunks
# to prevent data from having temporal gaps
pass
num_classes.append(srs.nunique())
print('Categorical variables encodings lens: ', num_classes)
for split in [train, valid, test]:
for c in cat_cols:
srs = split[c].apply(str)
split[c] = srs
split.loc[:,c] = cat_encodings[c].transform(srs)
return cat_encodings
def preprocess(src_path, dst_path, config):
df = pd.read_csv(src_path, index_col=0)
for c in config.features:
if c.feature_embed_type == DataTypes.DATE:
df[c.name] = pd.to_datetime(df[c.name])
# Leave only columns relevant to preprocessing
relevant_columns = list(set([f.name for f in config.features] + [config.time_ids]))
df = df[relevant_columns]
id_col, id_encoders = flatten_ids(df, config)
df = df.reindex(sorted(df.columns), axis=1)
train, valid, test = get_dataset_splits(df, config)
# Length filter the data (all timeseries shorter than example len will be dropped)
#for df in [train, valid, test]:
# df.groupby(id_col).filter(lambda x: len(x) >= config.example_length)
train = pd.concat([x[1] for x in train.groupby(id_col) if len(x[1]) >= config.example_length])
valid = pd.concat([x[1] for x in valid.groupby(id_col) if len(x[1]) >= config.example_length])
test = pd.concat([x[1] for x in test.groupby(id_col) if len(x[1]) >= config.example_length])
train, valid, test, real_scalers, tgt_scalers = normalize_reals(train, valid, test, config, id_col)
cat_encodings = encode_categoricals(train, valid, test, config)
os.makedirs(dst_path, exist_ok=True)
train.to_csv(os.path.join(dst_path, 'train.csv'))
valid.to_csv(os.path.join(dst_path, 'valid.csv'))
test.to_csv(os.path.join(dst_path, 'test.csv'))
# Save relevant columns in binary form for faster dataloading
# IMORTANT: We always expect id to be a single column indicating the complete timeseries
# We also expect a copy of id in form of static categorical input!!!
col_names = [id_col] + [x.name for x in config.features if x.feature_embed_type != DataTypes.DATE and x.feature_type != InputTypes.ID]
grouped_train = [x[1][col_names].values.astype(np.float32).view(dtype=np.int32) for x in train.groupby(id_col)]
grouped_valid = [x[1][col_names].values.astype(np.float32).view(dtype=np.int32) for x in valid.groupby(id_col)]
grouped_test = [x[1][col_names].values.astype(np.float32).view(dtype=np.int32) for x in test.groupby(id_col)]
pickle.dump(grouped_train, open(os.path.join(dst_path, 'train.bin'), 'wb'))
pickle.dump(grouped_valid, open(os.path.join(dst_path, 'valid.bin'), 'wb'))
pickle.dump(grouped_test, open(os.path.join(dst_path, 'test.bin'), 'wb'))
with open(os.path.join(dst_path, 'real_scalers.bin'), 'wb') as f:
pickle.dump(real_scalers, f)
with open(os.path.join(dst_path, 'tgt_scalers.bin'), 'wb') as f:
pickle.dump(tgt_scalers, f)
with open(os.path.join(dst_path, 'cat_encodings.bin'), 'wb') as f:
pickle.dump(cat_encodings, f)
with open(os.path.join(dst_path, 'id_encoders.bin'), 'wb') as f:
pickle.dump(id_encoders, f)
def sample_data(dataset, num_samples):
if num_samples < 0:
return dataset
else:
return torch.utils.data.Subset(dataset, np.random.choice(np.arange(len(dataset)), size=num_samples, replace=False))
def standarize_electricity(path):
"""Code taken from https://github.com/google-research/google-research/blob/master/tft/script_download_data.py"""
df = pd.read_csv(os.path.join(path, 'LD2011_2014.txt'), index_col=0, sep=';', decimal=',')
df.index = pd.to_datetime(df.index)
df.sort_index(inplace=True)
# Used to determine the start and end dates of a series
output = df.resample('1h').mean().replace(0., np.nan)
earliest_time = output.index.min()
df_list = []
for label in output:
print('Processing {}'.format(label))
srs = output[label]
start_date = min(srs.fillna(method='ffill').dropna().index)
end_date = max(srs.fillna(method='bfill').dropna().index)
active_range = (srs.index >= start_date) & (srs.index <= end_date)
srs = srs[active_range].fillna(0.)
tmp = pd.DataFrame({'power_usage': srs})
date = tmp.index
tmp['t'] = (date - earliest_time).seconds / 60 / 60 + (
date - earliest_time).days * 24
tmp['days_from_start'] = (date - earliest_time).days
tmp['categorical_id'] = label
tmp['date'] = date
tmp['id'] = label
tmp['hour'] = date.hour
tmp['day'] = date.day
tmp['day_of_week'] = date.dayofweek
tmp['month'] = date.month
df_list.append(tmp)
output = pd.concat(df_list, axis=0, join='outer').reset_index(drop=True)
output['categorical_id'] = output['id'].copy()
output['hours_from_start'] = output['t']
output['categorical_day_of_week'] = output['day_of_week'].copy()
output['categorical_hour'] = output['hour'].copy()
output.to_csv(os.path.join(path, 'standarized.csv'))
def standarize_volatility(path):
df = pd.read_csv(os.path.join(path, 'oxfordmanrealizedvolatilityindices.csv'), index_col=0) # no explicit index
# Adds additional date/day fields
idx = [str(s).split('+')[0] for s in df.index
] # ignore timezones, we don't need them
dates = pd.to_datetime(idx)
df['date'] = dates
df['days_from_start'] = (dates - pd.datetime(2000, 1, 3)).days
df['day_of_week'] = dates.dayofweek
df['day_of_month'] = dates.day
df['week_of_year'] = dates.weekofyear
df['month'] = dates.month
df['year'] = dates.year
df['categorical_id'] = df['Symbol'].copy()
# Processes log volatility
vol = df['rv5_ss'].copy()
vol.loc[vol == 0.] = np.nan
df['log_vol'] = np.log(vol)
# Adds static information
symbol_region_mapping = {
'.AEX': 'EMEA',
'.AORD': 'APAC',
'.BFX': 'EMEA',
'.BSESN': 'APAC',
'.BVLG': 'EMEA',
'.BVSP': 'AMER',
'.DJI': 'AMER',
'.FCHI': 'EMEA',
'.FTMIB': 'EMEA',
'.FTSE': 'EMEA',
'.GDAXI': 'EMEA',
'.GSPTSE': 'AMER',
'.HSI': 'APAC',
'.IBEX': 'EMEA',
'.IXIC': 'AMER',
'.KS11': 'APAC',
'.KSE': 'APAC',
'.MXX': 'AMER',
'.N225': 'APAC ',
'.NSEI': 'APAC',
'.OMXC20': 'EMEA',
'.OMXHPI': 'EMEA',
'.OMXSPI': 'EMEA',
'.OSEAX': 'EMEA',
'.RUT': 'EMEA',
'.SMSI': 'EMEA',
'.SPX': 'AMER',
'.SSEC': 'APAC',
'.SSMI': 'EMEA',
'.STI': 'APAC',
'.STOXX50E': 'EMEA'
}
df['Region'] = df['Symbol'].apply(lambda k: symbol_region_mapping[k])
# Performs final processing
output_df_list = []
for grp in df.groupby('Symbol'):
sliced = grp[1].copy()
sliced.sort_values('days_from_start', inplace=True)
# Impute log volatility values
sliced['log_vol'].fillna(method='ffill', inplace=True)
sliced.dropna()
output_df_list.append(sliced)
df = pd.concat(output_df_list, axis=0)
df.to_csv(os.path.join(path, 'standarized.csv'))
def standarize_traffic(path):
def process_list(s, variable_type=int, delimiter=None):
"""Parses a line in the PEMS format to a list."""
if delimiter is None:
l = [
variable_type(i) for i in s.replace('[', '').replace(']', '').split()
]
else:
l = [
variable_type(i)
for i in s.replace('[', '').replace(']', '').split(delimiter)
]
return l
def read_single_list(filename):
"""Returns single list from a file in the PEMS-custom format."""
with open(os.path.join(path, filename), 'r') as dat:
l = process_list(dat.readlines()[0])
return l
def read_matrix(filename):
"""Returns a matrix from a file in the PEMS-custom format."""
array_list = []
with open(os.path.join(path, filename), 'r') as dat:
lines = dat.readlines()
for i, line in enumerate(lines):
if (i + 1) % 50 == 0:
print('Completed {} of {} rows for {}'.format(i + 1, len(lines),
filename))
array = [
process_list(row_split, variable_type=float, delimiter=None)
for row_split in process_list(
line, variable_type=str, delimiter=';')
]
array_list.append(array)
return array_list
shuffle_order = np.array(read_single_list('randperm')) - 1 # index from 0
train_dayofweek = read_single_list('PEMS_trainlabels')
train_tensor = read_matrix('PEMS_train')
test_dayofweek = read_single_list('PEMS_testlabels')
test_tensor = read_matrix('PEMS_test')
# Inverse permutate shuffle order
print('Shuffling')
inverse_mapping = {
new_location: previous_location
for previous_location, new_location in enumerate(shuffle_order)
}
reverse_shuffle_order = np.array([
inverse_mapping[new_location]
for new_location, _ in enumerate(shuffle_order)
])
# Group and reoder based on permuation matrix
print('Reodering')
day_of_week = np.array(train_dayofweek + test_dayofweek)
combined_tensor = np.array(train_tensor + test_tensor)
day_of_week = day_of_week[reverse_shuffle_order]
combined_tensor = combined_tensor[reverse_shuffle_order]
# Put everything back into a dataframe
print('Parsing as dataframe')
labels = ['traj_{}'.format(i) for i in read_single_list('stations_list')]
hourly_list = []
for day, day_matrix in enumerate(combined_tensor):
# Hourly data
hourly = pd.DataFrame(day_matrix.T, columns=labels)
hourly['hour_on_day'] = [int(i / 6) for i in hourly.index
] # sampled at 10 min intervals
if hourly['hour_on_day'].max() > 23 or hourly['hour_on_day'].min() < 0:
raise ValueError('Invalid hour! {}-{}'.format(
hourly['hour_on_day'].min(), hourly['hour_on_day'].max()))
hourly = hourly.groupby('hour_on_day', as_index=True).mean()[labels]
hourly['sensor_day'] = day
hourly['time_on_day'] = hourly.index
hourly['day_of_week'] = day_of_week[day]
hourly_list.append(hourly)
hourly_frame = pd.concat(hourly_list, axis=0, ignore_index=True, sort=False)
# Flatten such that each entitiy uses one row in dataframe
store_columns = [c for c in hourly_frame.columns if 'traj' in c]
other_columns = [c for c in hourly_frame.columns if 'traj' not in c]
flat_df = pd.DataFrame(columns=['values', 'prev_values', 'next_values'] +
other_columns + ['id'])
for store in store_columns:
print('Processing {}'.format(store))
sliced = hourly_frame[[store] + other_columns].copy()
sliced.columns = ['values'] + other_columns
sliced['id'] = int(store.replace('traj_', ''))
# Sort by Sensor-date-time
key = sliced['id'].apply(str) \
+ sliced['sensor_day'].apply(lambda x: '_{:03d}'.format(x)) \
+ sliced['time_on_day'].apply(lambda x: '_{:03d}'.format(x))
sliced = sliced.set_index(key).sort_index()
sliced['values'] = sliced['values'].fillna(method='ffill')
sliced['prev_values'] = sliced['values'].shift(1)
sliced['next_values'] = sliced['values'].shift(-1)
flat_df = flat_df.append(sliced.dropna(), ignore_index=True, sort=False)
# Filter to match range used by other academic papers
index = flat_df['sensor_day']
flat_df = flat_df[index < 173].copy()
# Creating columns fo categorical inputs
flat_df['categorical_id'] = flat_df['id'].copy()
flat_df['hours_from_start'] = flat_df['time_on_day'] \
+ flat_df['sensor_day']*24.
flat_df['categorical_day_of_week'] = flat_df['day_of_week'].copy()
flat_df['categorical_time_on_day'] = flat_df['time_on_day'].copy()
flat_df.to_csv(os.path.join(path, 'standarized.csv'))
# XXX needs rework
def standarize_favorita(data_folder):
import gc
# Extract only a subset of data to save/process for efficiency
start_date = pd.datetime(2015, 1, 1)
end_date = pd.datetime(2016, 6, 1)
print('Regenerating data...')
# load temporal data
temporal = pd.read_csv(os.path.join(data_folder, 'train.csv'), index_col=0)
store_info = pd.read_csv(os.path.join(data_folder, 'stores.csv'), index_col=0)
oil = pd.read_csv(
os.path.join(data_folder, 'oil.csv'), index_col=0).iloc[:, 0]
holidays = pd.read_csv(os.path.join(data_folder, 'holidays_events.csv'))
items = pd.read_csv(os.path.join(data_folder, 'items.csv'), index_col=0)
transactions = pd.read_csv(os.path.join(data_folder, 'transactions.csv'))
# Take first 6 months of data
temporal['date'] = pd.to_datetime(temporal['date'])
# Filter dates to reduce storage space requirements
if start_date is not None:
temporal = temporal[(temporal['date'] >= start_date)]
if end_date is not None:
temporal = temporal[(temporal['date'] < end_date)]
dates = temporal['date'].unique()
# Add trajectory identifier
temporal['traj_id'] = temporal['store_nbr'].apply(
str) + '_' + temporal['item_nbr'].apply(str)
temporal['unique_id'] = temporal['traj_id'] + '_' + temporal['date'].apply(
str)
# Remove all IDs with negative returns
print('Removing returns data')
min_returns = temporal['unit_sales'].groupby(temporal['traj_id']).min()
valid_ids = set(min_returns[min_returns >= 0].index)
selector = temporal['traj_id'].apply(lambda traj_id: traj_id in valid_ids)
new_temporal = temporal[selector].copy()
del temporal
gc.collect()
temporal = new_temporal
temporal['open'] = 1
# Resampling
print('Resampling to regular grid')
resampled_dfs = []
for traj_id, raw_sub_df in temporal.groupby('traj_id'):
print('Resampling', traj_id)
sub_df = raw_sub_df.set_index('date', drop=True).copy()
sub_df = sub_df.resample('1d').last()
sub_df['date'] = sub_df.index
sub_df[['store_nbr', 'item_nbr', 'onpromotion']] \
= sub_df[['store_nbr', 'item_nbr', 'onpromotion']].fillna(method='ffill')
sub_df['open'] = sub_df['open'].fillna(
0) # flag where sales data is unknown
sub_df['log_sales'] = np.log(sub_df['unit_sales'])
resampled_dfs.append(sub_df.reset_index(drop=True))
new_temporal = pd.concat(resampled_dfs, axis=0)
del temporal
gc.collect()
temporal = new_temporal
print('Adding oil')
oil.name = 'oil'
oil.index = pd.to_datetime(oil.index)
#XXX the lines below match the value of the oil on given date with the rest of the timeseries
# missing values in oil series are copied from the index before. Then the oil series is joined with
# temporal. Then there are some dates present in temporal which arent present in oil, for which
# oil values is substituted with -1. WHY?!
#TODO: check how many nans there are after first step. Previously oil series was extended by dates
# present in dates variable with nan value, which were forward filled.
# This behavior is no longer supported by pandas, so we changed to DataFrame.isin method.
# This leaves us with more nans after first step than previously. To achieve previous behavior
# we have to join series before filling nans.
temporal = temporal.join(
#oil.loc[oil.index.isin(dates)].fillna(method='ffill'), on='date', how='left')
oil.loc[oil.index.isin(dates)], on='date', how='left')
temporal['oil'] = temporal['oil'].fillna(method='ffill')
temporal['oil'] = temporal['oil'].fillna(-1)
print('Adding store info')
temporal = temporal.join(store_info, on='store_nbr', how='left')
print('Adding item info')
temporal = temporal.join(items, on='item_nbr', how='left')
transactions['date'] = pd.to_datetime(transactions['date'])
temporal = temporal.merge(
transactions,
left_on=['date', 'store_nbr'],
right_on=['date', 'store_nbr'],
how='left')
temporal['transactions'] = temporal['transactions'].fillna(-1)
# Additional date info
temporal['day_of_week'] = pd.to_datetime(temporal['date'].values).dayofweek
temporal['day_of_month'] = pd.to_datetime(temporal['date'].values).day
temporal['month'] = pd.to_datetime(temporal['date'].values).month
# Add holiday info
print('Adding holidays')
holiday_subset = holidays[holidays['transferred'].apply(
lambda x: not x)].copy()
holiday_subset.columns = [
s if s != 'type' else 'holiday_type' for s in holiday_subset.columns
]
holiday_subset['date'] = pd.to_datetime(holiday_subset['date'])
local_holidays = holiday_subset[holiday_subset['locale'] == 'Local']
regional_holidays = holiday_subset[holiday_subset['locale'] == 'Regional']
national_holidays = holiday_subset[holiday_subset['locale'] == 'National']
temporal['national_hol'] = temporal.merge(
national_holidays, left_on=['date'], right_on=['date'],
how='left')['description'].fillna('')
temporal['regional_hol'] = temporal.merge(
regional_holidays,
left_on=['state', 'date'],
right_on=['locale_name', 'date'],
how='left')['description'].fillna('')
temporal['local_hol'] = temporal.merge(
local_holidays,
left_on=['city', 'date'],
right_on=['locale_name', 'date'],
how='left')['description'].fillna('')
temporal.sort_values('unique_id', inplace=True)
# Transform date to integer index
start_date = pd.to_datetime(min(temporal['date']))
dates = temporal['date'].apply(pd.to_datetime)
temporal['days_from_start'] = (dates - start_date).dt.days
temporal['categorical_id'] = temporal['traj_id'].copy()
print('Saving processed file to {}'.format(os.path.join(data_folder, 'standarized.csv')))
temporal.to_csv(os.path.join(data_folder, 'standarized.csv'))
|
TensorFlow/Detection/SSD/models/research/object_detection/g3doc | g3doc | exporting_models | # Exporting a trained model for inference
After your model has been trained, you should export it to a Tensorflow
graph proto. A checkpoint will typically consist of three files:
* model.ckpt-${CHECKPOINT_NUMBER}.data-00000-of-00001
* model.ckpt-${CHECKPOINT_NUMBER}.index
* model.ckpt-${CHECKPOINT_NUMBER}.meta
After you've identified a candidate checkpoint to export, run the following
command from tensorflow/models/research:
``` bash
# From tensorflow/models/research/
INPUT_TYPE=image_tensor
PIPELINE_CONFIG_PATH={path to pipeline config file}
TRAINED_CKPT_PREFIX={path to model.ckpt}
EXPORT_DIR={path to folder that will be used for export}
python object_detection/export_inference_graph.py \
--input_type=${INPUT_TYPE} \
--pipeline_config_path=${PIPELINE_CONFIG_PATH} \
--trained_checkpoint_prefix=${TRAINED_CKPT_PREFIX} \
--output_directory=${EXPORT_DIR}
```
NOTE: We are configuring our exported model to ingest 4-D image tensors. We can
also configure the exported model to take encoded images or serialized
`tf.Example`s.
After export, you should see the directory ${EXPORT_DIR} containing the following:
* saved_model/, a directory containing the saved model format of the exported model
* frozen_inference_graph.pb, the frozen graph format of the exported model
* model.ckpt.*, the model checkpoints used for exporting
* checkpoint, a file specifying to restore included checkpoint files
* pipeline.config, pipeline config file for the exported model
|
TensorFlow2/Recommendation/DLRM_and_DCNv2/utils | utils | checkpointing | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# author: Tomasz Grel ([email protected])
import os
def get_variable_path(checkpoint_path, name):
tokens = name.split('/')
tokens = [t for t in tokens if 'model_parallel' not in t and 'data_parallel' not in t]
name = '_'.join(tokens)
name = name.replace(':', '_')
filename = name + '.npy'
return os.path.join(checkpoint_path, filename)
|
PyTorch/SpeechSynthesis/FastPitch/common/text/unidecoder | unidecoder | homoglyphs | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# The MIT License (MIT)
#
# Copyright (c) 2015 Rob Dawson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Based on:
# https://github.com/codebox/homoglyph/blob/master/raw_data/chars.txt
#
homoglyphs = {
' ': ['\xa0', '\u1680', '\u2000', '\u2001', '\u2002', '\u2003', '\u2004', '\u2005', '\u2006', '\u2007', '\u2008', '\u2009', '\u200a', '\u2028', '\u2029', '\u202f', '\u205f'],
'!': ['ǃ', 'ⵑ', '!'],
'$': ['$'],
'%': ['%'],
'&': ['ꝸ', '&'],
"'": ['´', 'ʹ', 'ʻ', 'ʼ', 'ʽ', 'ʾ', 'ˈ', 'ˊ', 'ˋ', '˴', 'ʹ', '΄', '՚', '՝', 'י', '׳', 'ߴ', 'ߵ', 'ᑊ', 'ᛌ', '᾽', '᾿', '`', '´', '῾', '‘', '’', '‛', '′', '‵', 'ꞌ', ''', '`', '𖽑', '𖽒'],
'"': ['¨', 'ʺ', '˝', 'ˮ', '״', '“', '”', '‟', '❝', '❞', '⠐', '⹂'],
'(': ['❨', '❲', '〔', '﴾', '(', '['],
')': ['❩', '❳', '〕', '﴿', ')', ']'],
'*': ['٭', '⁎', '∗', '*', '𐌟'],
'+': ['᛭', '➕', '+', '𐊛'],
',': ['¸', '؍', '٫', '‚', 'ꓹ', ','],
'-': ['˗', '۔', '‐', '‑', '‒', '–', '⁃', '−', '➖', 'Ⲻ', '﹘'],
'.': ['٠', '۰', '܁', '܂', '․', 'ꓸ', '꘎', '.', '𐩐', '𝅭'],
'/': ['᜵', '⁁', '⁄', '∕', '╱', '⟋', '⧸', 'Ⳇ', '⼃', '〳', 'ノ', '㇓', '丿', '/', '𝈺'],
'2': ['Ƨ', 'Ϩ', 'ᒿ', 'Ꙅ', 'ꛯ', 'Ꝛ', '2', '𝟐', '𝟚', '𝟤', '𝟮', '𝟸', '\U0001fbf2'],
'3': ['Ʒ', 'Ȝ', 'З', 'Ӡ', 'Ⳍ', 'Ꝫ', 'Ɜ', '3', '𑣊', '𖼻', '𝈆', '𝟑', '𝟛', '𝟥', '𝟯', '𝟹', '\U0001fbf3'],
'4': ['Ꮞ', '4', '𑢯', '𝟒', '𝟜', '𝟦', '𝟰', '𝟺', '\U0001fbf4'],
'5': ['Ƽ', '5', '𑢻', '𝟓', '𝟝', '𝟧', '𝟱', '𝟻', '\U0001fbf5'],
'6': ['б', 'Ꮾ', 'Ⳓ', '6', '𑣕', '𝟔', '𝟞', '𝟨', '𝟲', '𝟼', '\U0001fbf6'],
'7': ['7', '𐓒', '𑣆', '𝈒', '𝟕', '𝟟', '𝟩', '𝟳', '𝟽', '\U0001fbf7'],
'8': ['Ȣ', 'ȣ', '৪', '੪', 'ଃ', '8', '𐌚', '𝟖', '𝟠', '𝟪', '𝟴', '𝟾', '𞣋', '\U0001fbf8'],
'9': ['৭', '੧', '୨', '൭', 'Ⳋ', 'Ꝯ', '9', '𑢬', '𑣌', '𑣖', '𝟗', '𝟡', '𝟫', '𝟵', '𝟿', '\U0001fbf9'],
':': ['ː', '˸', '։', '׃', '܃', '܄', 'ः', 'ઃ', '᛬', '᠃', '᠉', '⁚', '∶', 'ꓽ', '꞉', '︰', ':'],
';': [';', ';'],
'<': ['˂', 'ᐸ', 'ᚲ', '‹', '❮', '<', '𝈶'],
'=': ['᐀', '⹀', '゠', '꓿', '='],
'>': ['˃', 'ᐳ', '›', '❯', '>', '𖼿', '𝈷'],
'?': ['Ɂ', 'ʔ', 'ॽ', 'Ꭾ', 'ꛫ', '?'],
'@': ['@'],
'A': ['Α', 'А', 'Ꭺ', 'ᗅ', 'ᴀ', 'ꓮ', 'ꭺ', 'A', '𐊠', '𖽀', '𝐀', '𝐴', '𝑨', '𝒜', '𝓐', '𝔄', '𝔸', '𝕬', '𝖠', '𝗔', '𝘈', '𝘼', '𝙰', '𝚨', '𝛢', '𝜜', '𝝖', '𝞐'],
'B': ['ʙ', 'Β', 'В', 'в', 'Ᏼ', 'ᏼ', 'ᗷ', 'ᛒ', 'ℬ', 'ꓐ', 'Ꞵ', 'B', '𐊂', '𐊡', '𐌁', '𝐁', '𝐵', '𝑩', '𝓑', '𝔅', '𝔹', '𝕭', '𝖡', '𝗕', '𝘉', '𝘽', '𝙱', '𝚩', '𝛣', '𝜝', '𝝗', '𝞑'],
'C': ['Ϲ', 'С', 'Ꮯ', 'ᑕ', 'ℂ', 'ℭ', 'Ⅽ', '⊂', 'Ⲥ', '⸦', 'ꓚ', 'C', '𐊢', '𐌂', '𐐕', '𐔜', '𑣩', '𑣲', '𝐂', '𝐶', '𝑪', '𝒞', '𝓒', '𝕮', '𝖢', '𝗖', '𝘊', '𝘾', '𝙲', '🝌'],
'D': ['Ꭰ', 'ᗞ', 'ᗪ', 'ᴅ', 'ⅅ', 'Ⅾ', 'ꓓ', 'ꭰ', 'D', '𝐃', '𝐷', '𝑫', '𝒟', '𝓓', '𝔇', '𝔻', '𝕯', '𝖣', '𝗗', '𝘋', '𝘿', '𝙳'],
'E': ['Ε', 'Е', 'Ꭼ', 'ᴇ', 'ℰ', '⋿', 'ⴹ', 'ꓰ', 'ꭼ', 'E', '𐊆', '𑢦', '𑢮', '𝐄', '𝐸', '𝑬', '𝓔', '𝔈', '𝔼', '𝕰', '𝖤', '𝗘', '𝘌', '𝙀', '𝙴', '𝚬', '𝛦', '𝜠', '𝝚', '𝞔'],
'F': ['Ϝ', 'ᖴ', 'ℱ', 'ꓝ', 'Ꞙ', 'F', '𐊇', '𐊥', '𐔥', '𑢢', '𑣂', '𝈓', '𝐅', '𝐹', '𝑭', '𝓕', '𝔉', '𝔽', '𝕱', '𝖥', '𝗙', '𝘍', '𝙁', '𝙵', '𝟊'],
'G': ['ɢ', 'Ԍ', 'ԍ', 'Ꮐ', 'Ᏻ', 'ᏻ', 'ꓖ', 'ꮐ', 'G', '𝐆', '𝐺', '𝑮', '𝒢', '𝓖', '𝔊', '𝔾', '𝕲', '𝖦', '𝗚', '𝘎', '𝙂', '𝙶'],
'H': ['ʜ', 'Η', 'Н', 'н', 'Ꮋ', 'ᕼ', 'ℋ', 'ℌ', 'ℍ', 'Ⲏ', 'ꓧ', 'ꮋ', 'H', '𐋏', '𝐇', '𝐻', '𝑯', '𝓗', '𝕳', '𝖧', '𝗛', '𝘏', '𝙃', '𝙷', '𝚮', '𝛨', '𝜢', '𝝜', '𝞖'],
'J': ['Ϳ', 'Ј', 'Ꭻ', 'ᒍ', 'ᴊ', 'ꓙ', 'Ʝ', 'ꭻ', 'J', '𝐉', '𝐽', '𝑱', '𝒥', '𝓙', '𝔍', '𝕁', '𝕵', '𝖩', '𝗝', '𝘑', '𝙅', '𝙹'],
'K': ['Κ', 'К', 'Ꮶ', 'ᛕ', 'K', 'Ⲕ', 'ꓗ', 'K', '𐔘', '𝐊', '𝐾', '𝑲', '𝒦', '𝓚', '𝔎', '𝕂', '𝕶', '𝖪', '𝗞', '𝘒', '𝙆', '𝙺', '𝚱', '𝛫', '𝜥', '𝝟', '𝞙'],
'L': ['ʟ', 'Ꮮ', 'ᒪ', 'ℒ', 'Ⅼ', 'Ⳑ', 'ⳑ', 'ꓡ', 'ꮮ', 'L', '𐐛', '𐑃', '𐔦', '𑢣', '𑢲', '𖼖', '𝈪', '𝐋', '𝐿', '𝑳', '𝓛', '𝔏', '𝕃', '𝕷', '𝖫', '𝗟', '𝘓', '𝙇', '𝙻'],
'M': ['Μ', 'Ϻ', 'М', 'Ꮇ', 'ᗰ', 'ᛖ', 'ℳ', 'Ⅿ', 'Ⲙ', 'ꓟ', 'M', '𐊰', '𐌑', '𝐌', '𝑀', '𝑴', '𝓜', '𝔐', '𝕄', '𝕸', '𝖬', '𝗠', '𝘔', '𝙈', '𝙼', '𝚳', '𝛭', '𝜧', '𝝡', '𝞛'],
'N': ['ɴ', 'Ν', 'ℕ', 'Ⲛ', 'ꓠ', 'N', '𐔓', '𝐍', '𝑁', '𝑵', '𝒩', '𝓝', '𝔑', '𝕹', '𝖭', '𝗡', '𝘕', '𝙉', '𝙽', '𝚴', '𝛮', '𝜨', '𝝢', '𝞜'],
'P': ['Ρ', 'Р', 'Ꮲ', 'ᑭ', 'ᴘ', 'ᴩ', 'ℙ', 'Ⲣ', 'ꓑ', 'ꮲ', 'P', '𐊕', '𝐏', '𝑃', '𝑷', '𝒫', '𝓟', '𝔓', '𝕻', '𝖯', '𝗣', '𝘗', '𝙋', '𝙿', '𝚸', '𝛲', '𝜬', '𝝦', '𝞠'],
'Q': ['ℚ', 'ⵕ', 'Q', '𝐐', '𝑄', '𝑸', '𝒬', '𝓠', '𝔔', '𝕼', '𝖰', '𝗤', '𝘘', '𝙌', '𝚀'],
'R': ['Ʀ', 'ʀ', 'Ꭱ', 'Ꮢ', 'ᖇ', 'ᚱ', 'ℛ', 'ℜ', 'ℝ', 'ꓣ', 'ꭱ', 'ꮢ', 'R', '𐒴', '𖼵', '𝈖', '𝐑', '𝑅', '𝑹', '𝓡', '𝕽', '𝖱', '𝗥', '𝘙', '𝙍', '𝚁'],
'S': ['Ѕ', 'Տ', 'Ꮥ', 'Ꮪ', 'ꓢ', 'S', '𐊖', '𐐠', '𖼺', '𝐒', '𝑆', '𝑺', '𝒮', '𝓢', '𝔖', '𝕊', '𝕾', '𝖲', '𝗦', '𝘚', '𝙎', '𝚂'],
'T': ['Τ', 'τ', 'Т', 'т', 'Ꭲ', 'ᴛ', '⊤', '⟙', 'Ⲧ', 'ꓔ', 'ꭲ', 'T', '𐊗', '𐊱', '𐌕', '𑢼', '𖼊', '𝐓', '𝑇', '𝑻', '𝒯', '𝓣', '𝔗', '𝕋', '𝕿', '𝖳', '𝗧', '𝘛', '𝙏', '𝚃', '𝚻', '𝛕', '𝛵', '𝜏', '𝜯', '𝝉', '𝝩', '𝞃', '𝞣', '𝞽', '🝨'],
'U': ['Ս', 'ሀ', 'ᑌ', '∪', '⋃', 'ꓴ', 'U', '𐓎', '𑢸', '𖽂', '𝐔', '𝑈', '𝑼', '𝒰', '𝓤', '𝔘', '𝕌', '𝖀', '𝖴', '𝗨', '𝘜', '𝙐', '𝚄'],
'V': ['Ѵ', '٧', '۷', 'Ꮩ', 'ᐯ', 'Ⅴ', 'ⴸ', 'ꓦ', 'ꛟ', 'V', '𐔝', '𑢠', '𖼈', '𝈍', '𝐕', '𝑉', '𝑽', '𝒱', '𝓥', '𝔙', '𝕍', '𝖁', '𝖵', '𝗩', '𝘝', '𝙑', '𝚅'],
'W': ['Ԝ', 'Ꮃ', 'Ꮤ', 'ꓪ', 'W', '𑣦', '𑣯', '𝐖', '𝑊', '𝑾', '𝒲', '𝓦', '𝔚', '𝕎', '𝖂', '𝖶', '𝗪', '𝘞', '𝙒', '𝚆'],
'X': ['Χ', 'Х', '᙭', 'ᚷ', 'Ⅹ', '╳', 'Ⲭ', 'ⵝ', 'ꓫ', 'Ꭓ', 'X', '𐊐', '𐊴', '𐌗', '𐌢', '𐔧', '𑣬', '𝐗', '𝑋', '𝑿', '𝒳', '𝓧', '𝔛', '𝕏', '𝖃', '𝖷', '𝗫', '𝘟', '𝙓', '𝚇', '𝚾', '𝛸', '𝜲', '𝝬', '𝞦'],
'Y': ['Υ', 'ϒ', 'У', 'Ү', 'Ꭹ', 'Ꮍ', 'Ⲩ', 'ꓬ', 'Y', '𐊲', '𑢤', '𖽃', '𝐘', '𝑌', '𝒀', '𝒴', '𝓨', '𝔜', '𝕐', '𝖄', '𝖸', '𝗬', '𝘠', '𝙔', '𝚈', '𝚼', '𝛶', '𝜰', '𝝪', '𝞤'],
'Z': ['Ζ', 'Ꮓ', 'ℤ', 'ℨ', 'ꓜ', 'Z', '𐋵', '𑢩', '𑣥', '𝐙', '𝑍', '𝒁', '𝒵', '𝓩', '𝖅', '𝖹', '𝗭', '𝘡', '𝙕', '𝚉', '𝚭', '𝛧', '𝜡', '𝝛', '𝞕'],
'\\': ['∖', '⟍', '⧵', '⧹', '⼂', '㇔', '丶', '﹨', '\', '𝈏', '𝈻'],
'^': ['˄', 'ˆ'],
'_': ['ߺ', '﹍', '﹎', '﹏', '_'],
'a': ['ɑ', 'α', 'а', '⍺', 'a', '𝐚', '𝑎', '𝒂', '𝒶', '𝓪', '𝔞', '𝕒', '𝖆', '𝖺', '𝗮', '𝘢', '𝙖', '𝚊', '𝛂', '𝛼', '𝜶', '𝝰', '𝞪'],
'b': ['Ƅ', 'Ь', 'Ꮟ', 'ᑲ', 'ᖯ', 'b', '𝐛', '𝑏', '𝒃', '𝒷', '𝓫', '𝔟', '𝕓', '𝖇', '𝖻', '𝗯', '𝘣', '𝙗', '𝚋'],
'c': ['ϲ', 'с', 'ᴄ', 'ⅽ', 'ⲥ', 'ꮯ', 'c', '𐐽', '𝐜', '𝑐', '𝒄', '𝒸', '𝓬', '𝔠', '𝕔', '𝖈', '𝖼', '𝗰', '𝘤', '𝙘', '𝚌'],
'd': ['ԁ', 'Ꮷ', 'ᑯ', 'ⅆ', 'ⅾ', 'ꓒ', 'd', '𝐝', '𝑑', '𝒅', '𝒹', '𝓭', '𝔡', '𝕕', '𝖉', '𝖽', '𝗱', '𝘥', '𝙙', '𝚍'],
'e': ['е', 'ҽ', '℮', 'ℯ', 'ⅇ', 'ꬲ', 'e', '𝐞', '𝑒', '𝒆', '𝓮', '𝔢', '𝕖', '𝖊', '𝖾', '𝗲', '𝘦', '𝙚', '𝚎'],
'f': ['ſ', 'ϝ', 'ք', 'ẝ', 'ꞙ', 'ꬵ', 'f', '𝐟', '𝑓', '𝒇', '𝒻', '𝓯', '𝔣', '𝕗', '𝖋', '𝖿', '𝗳', '𝘧', '𝙛', '𝚏', '𝟋'],
'g': ['ƍ', 'ɡ', 'ց', 'ᶃ', 'ℊ', 'g', '𝐠', '𝑔', '𝒈', '𝓰', '𝔤', '𝕘', '𝖌', '𝗀', '𝗴', '𝘨', '𝙜', '𝚐'],
'h': ['һ', 'հ', 'Ꮒ', 'ℎ', 'h', '𝐡', '𝒉', '𝒽', '𝓱', '𝔥', '𝕙', '𝖍', '𝗁', '𝗵', '𝘩', '𝙝', '𝚑'],
'i': ['ı', 'ɩ', 'ɪ', '˛', 'ͺ', 'ι', 'і', 'ӏ', 'Ꭵ', 'ι', 'ℹ', 'ⅈ', 'ⅰ', '⍳', 'ꙇ', 'ꭵ', 'i', '𑣃', '𝐢', '𝑖', '𝒊', '𝒾', '𝓲', '𝔦', '𝕚', '𝖎', '𝗂', '𝗶', '𝘪', '𝙞', '𝚒', '𝚤', '𝛊', '𝜄', '𝜾', '𝝸', '𝞲'],
'j': ['ϳ', 'ј', 'ⅉ', 'j', '𝐣', '𝑗', '𝒋', '𝒿', '𝓳', '𝔧', '𝕛', '𝖏', '𝗃', '𝗷', '𝘫', '𝙟', '𝚓'],
'k': ['k', '𝐤', '𝑘', '𝒌', '𝓀', '𝓴', '𝔨', '𝕜', '𝖐', '𝗄', '𝗸', '𝘬', '𝙠', '𝚔'],
'l': ['Ɩ', 'ǀ', 'Ι', 'І', 'Ӏ', '׀', 'ו', 'ן', 'ا', '١', '۱', 'ߊ', 'ᛁ', 'ℐ', 'ℑ', 'ℓ', 'Ⅰ', 'ⅼ', '∣', '⏽', 'Ⲓ', 'ⵏ', 'ꓲ', 'ﺍ', 'ﺎ', '1', 'I', 'l', '│', '𐊊', '𐌉', '𐌠', '𖼨', '𝐈', '𝐥', '𝐼', '𝑙', '𝑰', '𝒍', '𝓁', '𝓘', '𝓵', '𝔩', '𝕀', '𝕝', '𝕴', '𝖑', '𝖨', '𝗅', '𝗜', '𝗹', '𝘐', '𝘭', '𝙄', '𝙡', '𝙸', '𝚕', '𝚰', '𝛪', '𝜤', '𝝞', '𝞘', '𝟏', '𝟙', '𝟣', '𝟭', '𝟷', '𞣇', '𞸀', '𞺀', '\U0001fbf1'],
'm': ['m'],
'n': ['ո', 'ռ', 'n', '𝐧', '𝑛', '𝒏', '𝓃', '𝓷', '𝔫', '𝕟', '𝖓', '𝗇', '𝗻', '𝘯', '𝙣', '𝚗'],
'o': ['Ο', 'ο', 'σ', 'О', 'о', 'Օ', 'օ', 'ס', 'ه', '٥', 'ھ', 'ہ', 'ە', '۵', '߀', '०', '০', '੦', '૦', 'ଠ', '୦', '௦', 'ం', '౦', 'ಂ', '೦', 'ം', 'ഠ', '൦', 'ං', '๐', '໐', 'ဝ', '၀', 'ჿ', 'ዐ', 'ᴏ', 'ᴑ', 'ℴ', 'Ⲟ', 'ⲟ', 'ⵔ', '〇', 'ꓳ', 'ꬽ', 'ﮦ', 'ﮧ', 'ﮨ', 'ﮩ', 'ﮪ', 'ﮫ', 'ﮬ', 'ﮭ', 'ﻩ', 'ﻪ', 'ﻫ', 'ﻬ', '0', 'O', 'o', '𐊒', '𐊫', '𐐄', '𐐬', '𐓂', '𐓪', '𐔖', '𑓐', '𑢵', '𑣈', '𑣗', '𑣠', '𝐎', '𝐨', '𝑂', '𝑜', '𝑶', '𝒐', '𝒪', '𝓞', '𝓸', '𝔒', '𝔬', '𝕆', '𝕠', '𝕺', '𝖔', '𝖮', '𝗈', '𝗢', '𝗼', '𝘖', '𝘰', '𝙊', '𝙤', '𝙾', '𝚘', '𝚶', '𝛐', '𝛔', '𝛰', '𝜊', '𝜎', '𝜪', '𝝄', '𝝈', '𝝤', '𝝾', '𝞂', '𝞞', '𝞸', '𝞼', '𝟎', '𝟘', '𝟢', '𝟬', '𝟶', '𞸤', '𞹤', '𞺄', '\U0001fbf0'],
'p': ['ρ', 'ϱ', 'р', '⍴', 'ⲣ', 'p', '𝐩', '𝑝', '𝒑', '𝓅', '𝓹', '𝔭', '𝕡', '𝖕', '𝗉', '𝗽', '𝘱', '𝙥', '𝚙', '𝛒', '𝛠', '𝜌', '𝜚', '𝝆', '𝝔', '𝞀', '𝞎', '𝞺', '𝟈'],
'q': ['ԛ', 'գ', 'զ', 'q', '𝐪', '𝑞', '𝒒', '𝓆', '𝓺', '𝔮', '𝕢', '𝖖', '𝗊', '𝗾', '𝘲', '𝙦', '𝚚'],
'r': ['г', 'ᴦ', 'ⲅ', 'ꭇ', 'ꭈ', 'ꮁ', 'r', '𝐫', '𝑟', '𝒓', '𝓇', '𝓻', '𝔯', '𝕣', '𝖗', '𝗋', '𝗿', '𝘳', '𝙧', '𝚛'],
's': ['ƽ', 'ѕ', 'ꜱ', 'ꮪ', 's', '𐑈', '𑣁', '𝐬', '𝑠', '𝒔', '𝓈', '𝓼', '𝔰', '𝕤', '𝖘', '𝗌', '𝘀', '𝘴', '𝙨', '𝚜'],
't': ['t', '𝐭', '𝑡', '𝒕', '𝓉', '𝓽', '𝔱', '𝕥', '𝖙', '𝗍', '𝘁', '𝘵', '𝙩', '𝚝'],
'u': ['ʋ', 'υ', 'ս', 'ᴜ', 'ꞟ', 'ꭎ', 'ꭒ', 'u', '𐓶', '𑣘', '𝐮', '𝑢', '𝒖', '𝓊', '𝓾', '𝔲', '𝕦', '𝖚', '𝗎', '𝘂', '𝘶', '𝙪', '𝚞', '𝛖', '𝜐', '𝝊', '𝞄', '𝞾'],
'v': ['ν', 'ѵ', 'ט', 'ᴠ', 'ⅴ', '∨', '⋁', 'ꮩ', 'v', '𑜆', '𑣀', '𝐯', '𝑣', '𝒗', '𝓋', '𝓿', '𝔳', '𝕧', '𝖛', '𝗏', '𝘃', '𝘷', '𝙫', '𝚟', '𝛎', '𝜈', '𝝂', '𝝼', '𝞶'],
'w': ['ɯ', 'ѡ', 'ԝ', 'ա', 'ᴡ', 'ꮃ', 'w', '𑜊', '𑜎', '𑜏', '𝐰', '𝑤', '𝒘', '𝓌', '𝔀', '𝔴', '𝕨', '𝖜', '𝗐', '𝘄', '𝘸', '𝙬', '𝚠'],
'x': ['×', 'х', 'ᕁ', 'ᕽ', '᙮', 'ⅹ', '⤫', '⤬', '⨯', 'x', '𝐱', '𝑥', '𝒙', '𝓍', '𝔁', '𝔵', '𝕩', '𝖝', '𝗑', '𝘅', '𝘹', '𝙭', '𝚡'],
'y': ['ɣ', 'ʏ', 'γ', 'у', 'ү', 'ყ', 'ᶌ', 'ỿ', 'ℽ', 'ꭚ', 'y', '𑣜', '𝐲', '𝑦', '𝒚', '𝓎', '𝔂', '𝔶', '𝕪', '𝖞', '𝗒', '𝘆', '𝘺', '𝙮', '𝚢', '𝛄', '𝛾', '𝜸', '𝝲', '𝞬'],
'z': ['ᴢ', 'ꮓ', 'z', '𑣄', '𝐳', '𝑧', '𝒛', '𝓏', '𝔃', '𝔷', '𝕫', '𝖟', '𝗓', '𝘇', '𝘻', '𝙯', '𝚣'],
'{': ['❴', '{', '𝄔'],
'}': ['❵', '}'],
'~': ['˜', '῀', '⁓', '∼'],
}
|
TensorFlow/Recommendation/WideAndDeep/utils/hooks | hooks | benchmark_hooks | #! /usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import dllogger
import tensorflow as tf
import time
from .training_hooks import MeanAccumulator
__all__ = ['BenchmarkLoggingHook']
class BenchmarkLoggingHook(tf.train.SessionRunHook):
def __init__(self, global_batch_size, warmup_steps=100):
self.warmup_steps = warmup_steps
self.global_batch_size = global_batch_size
self.current_step = 0
self.t0 = None
self.mean_throughput = MeanAccumulator()
def before_run(self, run_context):
self.t0 = time.time()
def after_run(self, run_context, run_values):
batch_time = time.time() - self.t0
samplesps = self.global_batch_size / batch_time
if self.current_step >= self.warmup_steps:
self.mean_throughput.consume(samplesps)
dllogger.log(data={"samplesps": samplesps}, step=(0, self.current_step))
self.current_step += 1
|
PyTorch/Recommendation/NCF | NCF | convert | # Copyright (c) 2018, deepakn94, codyaustun, robieta. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -----------------------------------------------------------------------
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
import pandas as pd
from load import implicit_load
from feature_spec import FeatureSpec
from neumf_constants import USER_CHANNEL_NAME, ITEM_CHANNEL_NAME, LABEL_CHANNEL_NAME, TEST_SAMPLES_PER_SERIES
import torch
import os
import tqdm
TEST_1 = 'test_data_1.pt'
TEST_0 = 'test_data_0.pt'
TRAIN_1 = 'train_data_1.pt'
TRAIN_0 = 'train_data_0.pt'
USER_COLUMN = 'user_id'
ITEM_COLUMN = 'item_id'
def parse_args():
parser = ArgumentParser()
parser.add_argument('--path', type=str, default='/data/ml-20m/ratings.csv',
help='Path to reviews CSV file from MovieLens')
parser.add_argument('--output', type=str, default='/data',
help='Output directory for train and test files')
parser.add_argument('--valid_negative', type=int, default=100,
help='Number of negative samples for each positive test example')
parser.add_argument('--seed', '-s', type=int, default=1,
help='Manually set random seed for torch')
return parser.parse_args()
class _TestNegSampler:
def __init__(self, train_ratings, nb_neg):
self.nb_neg = nb_neg
self.nb_users = int(train_ratings[:, 0].max()) + 1
self.nb_items = int(train_ratings[:, 1].max()) + 1
# compute unique ids for quickly created hash set and fast lookup
ids = (train_ratings[:, 0] * self.nb_items) + train_ratings[:, 1]
self.set = set(ids)
def generate(self, batch_size=128 * 1024):
users = torch.arange(0, self.nb_users).reshape([1, -1]).repeat([self.nb_neg, 1]).transpose(0, 1).reshape(-1)
items = [-1] * len(users)
random_items = torch.LongTensor(batch_size).random_(0, self.nb_items).tolist()
print('Generating validation negatives...')
for idx, u in enumerate(tqdm.tqdm(users.tolist())):
if not random_items:
random_items = torch.LongTensor(batch_size).random_(0, self.nb_items).tolist()
j = random_items.pop()
while u * self.nb_items + j in self.set:
if not random_items:
random_items = torch.LongTensor(batch_size).random_(0, self.nb_items).tolist()
j = random_items.pop()
items[idx] = j
items = torch.LongTensor(items)
return items
def save_feature_spec(user_cardinality, item_cardinality, dtypes, test_negative_samples, output_path,
user_feature_name='user',
item_feature_name='item',
label_feature_name='label'):
feature_spec = {
user_feature_name: {
'dtype': dtypes[user_feature_name],
'cardinality': int(user_cardinality)
},
item_feature_name: {
'dtype': dtypes[item_feature_name],
'cardinality': int(item_cardinality)
},
label_feature_name: {
'dtype': dtypes[label_feature_name],
}
}
metadata = {
TEST_SAMPLES_PER_SERIES: test_negative_samples + 1
}
train_mapping = [
{
'type': 'torch_tensor',
'features': [
user_feature_name,
item_feature_name
],
'files': [TRAIN_0]
},
{
'type': 'torch_tensor',
'features': [
label_feature_name
],
'files': [TRAIN_1]
}
]
test_mapping = [
{
'type': 'torch_tensor',
'features': [
user_feature_name,
item_feature_name
],
'files': [TEST_0],
},
{
'type': 'torch_tensor',
'features': [
label_feature_name
],
'files': [TEST_1],
}
]
channel_spec = {
USER_CHANNEL_NAME: [user_feature_name],
ITEM_CHANNEL_NAME: [item_feature_name],
LABEL_CHANNEL_NAME: [label_feature_name]
}
source_spec = {'train': train_mapping, 'test': test_mapping}
feature_spec = FeatureSpec(feature_spec=feature_spec, metadata=metadata, source_spec=source_spec,
channel_spec=channel_spec, base_directory="")
feature_spec.to_yaml(output_path=output_path)
def main():
args = parse_args()
if args.seed is not None:
torch.manual_seed(args.seed)
print("Loading raw data from {}".format(args.path))
df = implicit_load(args.path, sort=False)
print("Mapping original user and item IDs to new sequential IDs")
df[USER_COLUMN] = pd.factorize(df[USER_COLUMN])[0]
df[ITEM_COLUMN] = pd.factorize(df[ITEM_COLUMN])[0]
user_cardinality = df[USER_COLUMN].max() + 1
item_cardinality = df[ITEM_COLUMN].max() + 1
# Need to sort before popping to get last item
df.sort_values(by='timestamp', inplace=True)
# clean up data
del df['rating'], df['timestamp']
df = df.drop_duplicates() # assuming it keeps order
# Test set is the last interaction for a given user
grouped_sorted = df.groupby(USER_COLUMN, group_keys=False)
test_data = grouped_sorted.tail(1).sort_values(by=USER_COLUMN)
# Train set is all interactions but the last one
train_data = grouped_sorted.apply(lambda x: x.iloc[:-1])
sampler = _TestNegSampler(train_data.values, args.valid_negative)
test_negs = sampler.generate().cuda()
test_negs = test_negs.reshape(-1, args.valid_negative)
# Reshape train set into user,item,label tabular and save
train_ratings = torch.from_numpy(train_data.values).cuda()
train_labels = torch.ones_like(train_ratings[:, 0:1], dtype=torch.float32)
torch.save(train_ratings, os.path.join(args.output, TRAIN_0))
torch.save(train_labels, os.path.join(args.output, TRAIN_1))
# Reshape test set into user,item,label tabular and save
# All users have the same number of items, items for a given user appear consecutively
test_ratings = torch.from_numpy(test_data.values).cuda()
test_users_pos = test_ratings[:, 0:1] # slicing instead of indexing to keep dimensions
test_items_pos = test_ratings[:, 1:2]
test_users = test_users_pos.repeat_interleave(args.valid_negative + 1, dim=0)
test_items = torch.cat((test_items_pos.reshape(-1, 1), test_negs), dim=1).reshape(-1, 1)
positive_labels = torch.ones_like(test_users_pos, dtype=torch.float32)
negative_labels = torch.zeros_like(test_users_pos, dtype=torch.float32).repeat(1, args.valid_negative)
test_labels = torch.cat((positive_labels, negative_labels), dim=1).reshape(-1, 1)
dtypes = {'user': str(test_users.dtype), 'item': str(test_items.dtype), 'label': str(test_labels.dtype)}
test_tensor = torch.cat((test_users, test_items), dim=1)
torch.save(test_tensor, os.path.join(args.output, TEST_0))
torch.save(test_labels, os.path.join(args.output, TEST_1))
save_feature_spec(user_cardinality=user_cardinality, item_cardinality=item_cardinality, dtypes=dtypes,
test_negative_samples=args.valid_negative, output_path=args.output + '/feature_spec.yaml')
if __name__ == '__main__':
main()
|
TensorFlow/Classification/ConvNets/triton/scripts/docker | docker | build | #!/usr/bin/env bash
# Copyright (c) 2021 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
docker build -t resnet50 . -f Dockerfile.inference
|
TensorFlow/Segmentation/VNet/hooks | hooks | train_hook | # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
import dllogger as DLLogger
class TrainHook(tf.estimator.SessionRunHook):
def __init__(self, log_every, logger):
self._log_every = log_every
self._step = 0
self._logger = logger
def before_run(self, run_context):
run_args = tf.train.SessionRunArgs(
fetches=[
'vnet/loss/total_loss_ref:0',
]
)
return run_args
def after_run(self,
run_context,
run_values):
if self._step % self._log_every == 0:
self._logger.log(step=(self._step,), data={'total_loss': str(run_values.results[0])})
self._step += 1
def end(self, session):
self._logger.flush()
|
PyTorch/LanguageModeling/BERT/triton/runner | runner | requirements | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
tqdm>=4.44.1
docker==5.0.0
colorama==0.4.4
pytz==2021.1
coloredlogs==15.0.1
py-cpuinfo==8.0.0
psutil==5.8.0
retrying>=1.3.3 |
Tools/PyTorch/TimeSeriesPredictionPlatform/triton/deployment_toolkit/perf_analyzer | perf_analyzer | __init__ | # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pathlib
# method from PEP-366 to support relative import in executed modules
if __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from .perf_analyzer import PerfAnalyzer # noqa: F401
from .perf_config import PerfAnalyzerConfig # noqa: F401
|
PyTorch/SpeechRecognition/wav2vec2/common/fairseq/modules | modules | fairseq_dropout | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import List, Optional
import torch.nn as nn
import torch.nn.functional as F
logger = logging.getLogger(__name__)
class FairseqDropout(nn.Module):
def __init__(self, p, module_name=None):
super().__init__()
self.p = p
self.module_name = module_name
self.apply_during_inference = False
def forward(self, x, inplace: bool = False):
if self.p > 0 and (self.training or self.apply_during_inference):
return F.dropout(x, p=self.p, training=True, inplace=inplace)
else:
return x
def make_generation_fast_(
self,
name: str,
retain_dropout: bool = False,
retain_dropout_modules: Optional[List[str]] = None,
**kwargs
):
if retain_dropout:
if retain_dropout_modules is not None and self.module_name is None:
logger.warning(
"Cannot enable dropout during inference for module {} "
"because module_name was not set".format(name)
)
elif (
retain_dropout_modules is None # if None, apply to all modules
or self.module_name in retain_dropout_modules
):
logger.info(
"Enabling dropout during inference for module: {}".format(name)
)
self.apply_during_inference = True
else:
logger.info("Disabling dropout for module: {}".format(name))
|
TensorFlow/LanguageModeling/BERT | BERT | tokenization_test | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
import tokenization
import six
import tensorflow as tf
class TokenizationTest(tf.test.TestCase):
def test_full_tokenizer(self):
vocab_tokens = [
"[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn",
"##ing", ","
]
with tempfile.NamedTemporaryFile(delete=False) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
vocab_file = vocab_writer.name
tokenizer = tokenization.FullTokenizer(vocab_file)
os.unlink(vocab_file)
tokens = tokenizer.tokenize(u"UNwant\u00E9d,running")
self.assertAllEqual(tokens, ["un", "##want", "##ed", ",", "runn", "##ing"])
self.assertAllEqual(
tokenizer.convert_tokens_to_ids(tokens), [7, 4, 5, 10, 8, 9])
def test_chinese(self):
tokenizer = tokenization.BasicTokenizer()
self.assertAllEqual(
tokenizer.tokenize(u"ah\u535A\u63A8zz"),
[u"ah", u"\u535A", u"\u63A8", u"zz"])
def test_basic_tokenizer_lower(self):
tokenizer = tokenization.BasicTokenizer(do_lower_case=True)
self.assertAllEqual(
tokenizer.tokenize(u" \tHeLLo!how \n Are yoU? "),
["hello", "!", "how", "are", "you", "?"])
self.assertAllEqual(tokenizer.tokenize(u"H\u00E9llo"), ["hello"])
def test_basic_tokenizer_no_lower(self):
tokenizer = tokenization.BasicTokenizer(do_lower_case=False)
self.assertAllEqual(
tokenizer.tokenize(u" \tHeLLo!how \n Are yoU? "),
["HeLLo", "!", "how", "Are", "yoU", "?"])
def test_wordpiece_tokenizer(self):
vocab_tokens = [
"[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn",
"##ing"
]
vocab = {}
for (i, token) in enumerate(vocab_tokens):
vocab[token] = i
tokenizer = tokenization.WordpieceTokenizer(vocab=vocab)
self.assertAllEqual(tokenizer.tokenize(""), [])
self.assertAllEqual(
tokenizer.tokenize("unwanted running"),
["un", "##want", "##ed", "runn", "##ing"])
self.assertAllEqual(
tokenizer.tokenize("unwantedX running"), ["[UNK]", "runn", "##ing"])
def test_convert_tokens_to_ids(self):
vocab_tokens = [
"[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn",
"##ing"
]
vocab = {}
for (i, token) in enumerate(vocab_tokens):
vocab[token] = i
self.assertAllEqual(
tokenization.convert_tokens_to_ids(
vocab, ["un", "##want", "##ed", "runn", "##ing"]), [7, 4, 5, 8, 9])
def test_is_whitespace(self):
self.assertTrue(tokenization._is_whitespace(u" "))
self.assertTrue(tokenization._is_whitespace(u"\t"))
self.assertTrue(tokenization._is_whitespace(u"\r"))
self.assertTrue(tokenization._is_whitespace(u"\n"))
self.assertTrue(tokenization._is_whitespace(u"\u00A0"))
self.assertFalse(tokenization._is_whitespace(u"A"))
self.assertFalse(tokenization._is_whitespace(u"-"))
def test_is_control(self):
self.assertTrue(tokenization._is_control(u"\u0005"))
self.assertFalse(tokenization._is_control(u"A"))
self.assertFalse(tokenization._is_control(u" "))
self.assertFalse(tokenization._is_control(u"\t"))
self.assertFalse(tokenization._is_control(u"\r"))
self.assertFalse(tokenization._is_control(u"\U0001F4A9"))
def test_is_punctuation(self):
self.assertTrue(tokenization._is_punctuation(u"-"))
self.assertTrue(tokenization._is_punctuation(u"$"))
self.assertTrue(tokenization._is_punctuation(u"`"))
self.assertTrue(tokenization._is_punctuation(u"."))
self.assertFalse(tokenization._is_punctuation(u"A"))
self.assertFalse(tokenization._is_punctuation(u" "))
if __name__ == "__main__":
tf.test.main()
|
Tools/PyTorch/TimeSeriesPredictionPlatform/triton/deployment_toolkit | deployment_toolkit | report | # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import csv
import re
from typing import Dict, List
from natsort import natsorted
from tabulate import tabulate
def sort_results(results: List):
results = natsorted(results, key=lambda item: [item[key] for key in item.keys()])
return results
def save_results(filename: str, data: List, formatted: bool = False):
data = format_data(data=data) if formatted else data
with open(filename, "a") as csvfile:
fieldnames = data[0].keys()
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for row in data:
writer.writerow(row)
def format_data(data: List[Dict]) -> List[Dict]:
formatted_data = list()
for item in data:
formatted_item = format_keys(data=item)
formatted_data.append(formatted_item)
return formatted_data
def format_keys(data: Dict) -> Dict:
keys = {format_key(key=key): value for key, value in data.items()}
return keys
def format_key(key: str) -> str:
key = " ".join([k.capitalize() for k in re.split("_| ", key)])
return key
def show_results(results: List[Dict]):
headers = list(results[0].keys())
summary = map(lambda x: list(map(lambda item: item[1], x.items())), results)
print(tabulate(summary, headers=headers))
|
TensorFlow/LanguageModeling/Transformer-XL/tf | tf | data_utils | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import os
from functools import partial
from collections import Counter, OrderedDict
import pickle
import json
import multiprocessing as mp
import numpy as np
from absl import flags
import tensorflow as tf
from vocabulary import Vocab
from tensorflow.gfile import Exists as exists
from tensorflow.gfile import MakeDirs as makedirs
from tensorflow.gfile import Glob as glob
def _preprocess(shard, train, vocab, save_dir, cutoffs, bin_sizes, bsz, tgt_len,
num_core_per_host, num_shuffle):
file_names = []
num_batch = 0
path = train[shard]
data_shard = vocab.encode_file(path, ordered=False, add_double_eos=True)
for shuffle in range(num_shuffle):
basename = "train-{:03d}-{:02d}".format(shard, shuffle)
print("Processing shard {} shuffle {}".format(shard, shuffle))
np.random.shuffle(data_shard)
file_name, num_batch_shuffle = create_ordered_tfrecords(
save_dir, basename, np.concatenate(data_shard), bsz, tgt_len,
num_core_per_host, cutoffs, bin_sizes)
file_names.append(file_name)
num_batch += num_batch_shuffle
return file_names, num_batch
class Corpus(object):
def __init__(self, path, dataset, *args, **kwargs):
self.dataset = dataset
self.vocab = Vocab(*args, **kwargs)
if self.dataset in ["ptb", "wt2", "enwik8", "text8"]:
self.vocab.count_file(os.path.join(path, "train.txt"))
self.vocab.count_file(os.path.join(path, "valid.txt"))
self.vocab.count_file(os.path.join(path, "test.txt"))
elif self.dataset == "wt103":
self.vocab.count_file(os.path.join(path, "train.txt"))
elif self.dataset == "lm1b":
train_path_pattern = os.path.join(
path, "1-billion-word-language-modeling-benchmark-r13output",
"training-monolingual.tokenized.shuffled", "news.en-*")
train_paths = glob(train_path_pattern)
# the vocab will load from file when build_vocab() is called
# for train_path in sorted(train_paths):
# self.vocab.count_file(train_path, verbose=True)
self.vocab.build_vocab()
if self.dataset in ["ptb", "wt2", "wt103"]:
self.train = self.vocab.encode_file(
os.path.join(path, "train.txt"), ordered=True)
self.valid = self.vocab.encode_file(
os.path.join(path, "valid.txt"), ordered=True)
self.test = self.vocab.encode_file(
os.path.join(path, "test.txt"), ordered=True)
elif self.dataset in ["enwik8", "text8"]:
self.train = self.vocab.encode_file(
os.path.join(path, "train.txt"), ordered=True, add_eos=False)
self.valid = self.vocab.encode_file(
os.path.join(path, "valid.txt"), ordered=True, add_eos=False)
self.test = self.vocab.encode_file(
os.path.join(path, "test.txt"), ordered=True, add_eos=False)
elif self.dataset == "lm1b":
self.train = train_paths
valid_path = os.path.join(path, "valid.txt")
test_path = valid_path
self.valid = self.vocab.encode_file(
valid_path, ordered=True, add_double_eos=True)
self.test = self.vocab.encode_file(
test_path, ordered=True, add_double_eos=True)
if self.dataset == "wt103":
self.cutoffs = [0, 19997, 39997, 199997] + [len(self.vocab)]
elif self.dataset == "lm1b":
self.cutoffs = [0, 59997, 99997, 639997] + [len(self.vocab)]
else:
self.cutoffs = []
def convert_to_tfrecords(self, split, save_dir, bsz, tgt_len,
num_core_per_host, **kwargs):
FLAGS = kwargs.get('FLAGS')
file_names = []
record_name = "record_info-{}.bsz-{}.tlen-{}.json".format(
split, bsz, tgt_len)
record_info_path = os.path.join(save_dir, record_name)
if self.dataset in ["ptb", "wt2", "wt103", "enwik8", "text8"]:
data = getattr(self, split)
bin_sizes = get_bin_sizes(
data, bsz // num_core_per_host, tgt_len, self.cutoffs)
file_name, num_batch = create_ordered_tfrecords(
save_dir, split, data, bsz, tgt_len, num_core_per_host,
self.cutoffs, bin_sizes,
num_passes=FLAGS.num_passes if split == 'train' else 1)
file_names.append(file_name)
elif self.dataset == "lm1b":
bin_sizes = get_bin_sizes(
self.valid, bsz // num_core_per_host, tgt_len, self.cutoffs)
if split == "train":
np.random.seed(123456)
num_batch = 0
if FLAGS.num_procs > 1:
_preprocess_wrapper = partial(_preprocess,
train=self.train, vocab=self.vocab, save_dir=save_dir,
cutoffs=self.cutoffs, bin_sizes=bin_sizes, bsz=bsz,
tgt_len=tgt_len, num_core_per_host=num_core_per_host,
num_shuffle=FLAGS.num_shuffle)
pool = mp.Pool(processes=FLAGS.num_procs)
results = pool.map(_preprocess_wrapper, range(len(self.train)))
for res in results:
file_names.extend(res[0])
num_batch += res[1]
else:
for shard, path in enumerate(self.train):
data_shard = self.vocab.encode_file(path, ordered=False,
add_double_eos=True)
num_shuffle = FLAGS.num_shuffle
for shuffle in range(num_shuffle):
print("Processing shard {} shuffle {}".format(shard, shuffle))
basename = "train-{:03d}-{:02d}".format(shard, shuffle)
np.random.shuffle(data_shard)
file_name, num_batch_ = create_ordered_tfrecords(
save_dir, basename, np.concatenate(data_shard), bsz, tgt_len,
num_core_per_host,
self.cutoffs, bin_sizes)
file_names.append(file_name)
num_batch += num_batch_
else:
file_name, num_batch = create_ordered_tfrecords(
save_dir, split, getattr(self, split), bsz, tgt_len,
num_core_per_host,
self.cutoffs, bin_sizes)
file_names.append(file_name)
with open(record_info_path, "w") as fp:
record_info = {
"filenames": file_names,
"bin_sizes": bin_sizes,
"num_batch": num_batch
}
json.dump(record_info, fp)
def get_bin_sizes(data, batch_size, tgt_len, cutoffs, std_mult=[2.5, 2.5, 2.5]):
"""
Note: the `batch_size` here should be per-core batch size
"""
bin_sizes = []
def _nearest_to_eight(x):
y = x - x % 8
return y + 8 if x % 8 >= 4 else max(8, y)
if cutoffs:
num_batch = len(data) // batch_size // tgt_len
data = data[:batch_size * num_batch * tgt_len]
data = data.reshape(batch_size, num_batch, tgt_len)
tot = batch_size * tgt_len
for b, (left, right) in enumerate(zip(cutoffs[1:-1], cutoffs[2:])):
mask = (data >= left) * (data < right)
percents = mask.astype(np.float64).sum(2).sum(0) / tot
mean = np.mean(percents)
std = np.std(percents)
bin_size = int(math.ceil(tgt_len * batch_size * (mean + std_mult[b] * std)))
bin_size = _nearest_to_eight(bin_size)
bin_sizes.append(bin_size)
return bin_sizes
def _int64_feature(values):
return tf.train.Feature(int64_list=tf.train.Int64List(value=values))
def _float_feature(values):
return tf.train.Feature(float_list=tf.train.FloatList(value=values))
def batchify(data, batch_size, num_passes):
"""
if num_passes > 1
Here, we use multiple randomly shifted copies.
"""
if num_passes > 1:
data_len = len(data)
double_data = np.concatenate([data, data])
data_list = []
for i in range(num_passes):
start = np.random.randint(0, data_len)
data_list.append(double_data[start:start+data_len])
data = np.concatenate(data_list)
num_step = len(data) // batch_size
data = data[:batch_size * num_step]
data = data.reshape(batch_size, num_step)
return data
def create_ordered_tfrecords(save_dir, basename, data, batch_size, tgt_len,
num_core_per_host, cutoffs=[], bin_sizes=[],
num_passes=1):
file_name = "{}.bsz-{}.tlen-{}.tfrecords".format(
basename, batch_size, tgt_len)
save_path = os.path.join(save_dir, file_name)
record_writer = tf.python_io.TFRecordWriter(save_path)
batched_data = batchify(data, batch_size, num_passes)
num_batch = 0
for t in range(0, batched_data.shape[1] - 1, tgt_len):
cur_tgt_len = min(batched_data.shape[1] - 1 - t, tgt_len)
if num_batch % 500 == 0:
print(" processing batch {}".format(num_batch))
for idx in range(batch_size):
inputs = batched_data[idx, t:t + cur_tgt_len]
labels = batched_data[idx, t + 1:t + cur_tgt_len + 1]
# features dict
feature = {
"inputs": _int64_feature(inputs),
"labels": _int64_feature(labels),
}
example = tf.train.Example(features=tf.train.Features(feature=feature))
record_writer.write(example.SerializeToString())
num_batch += 1
record_writer.close()
print("Done writing {}. batches: {}".format(file_name, num_batch))
return file_name, num_batch
def get_lm_corpus(data_dir, dataset):
fn = os.path.join(data_dir, "cache.pkl")
if exists(fn):
print("Loading cached dataset...")
with open(fn, "rb") as fp:
corpus = pickle.load(fp)
else:
print("Producing dataset...")
kwargs = {}
if dataset in ["wt103", "wt2"]:
kwargs["special"] = ["<eos>"]
kwargs["lower_case"] = False
elif dataset == "ptb":
kwargs["special"] = ["<eos>"]
kwargs["lower_case"] = True
elif dataset == "lm1b":
kwargs["special"] = []
kwargs["lower_case"] = False
kwargs["vocab_file"] = os.path.join(data_dir, "1b_word_vocab.txt")
elif dataset in ["enwik8", "text8"]:
pass
corpus = Corpus(data_dir, dataset, **kwargs)
print("Saving dataset...")
with open(fn, "wb") as fp:
pickle.dump(corpus, fp, protocol=2)
corpus_info = {
"vocab_size" : len(corpus.vocab),
"cutoffs" : corpus.cutoffs,
"dataset" : corpus.dataset
}
with open(os.path.join(data_dir, "corpus-info.json"), "w") as fp:
json.dump(corpus_info, fp)
return corpus
def main(unused_argv):
del unused_argv # Unused
corpus = get_lm_corpus(FLAGS.data_dir, FLAGS.dataset)
save_dir = os.path.join(FLAGS.data_dir, "tfrecords")
if not exists(save_dir):
makedirs(save_dir)
# test mode
if FLAGS.eval_batch_size > 0:
corpus.convert_to_tfrecords("test", save_dir, FLAGS.eval_batch_size,
FLAGS.tgt_len, FLAGS.num_core_per_host,
FLAGS=FLAGS)
return
for split, batch_size in zip(
["train", "valid"],
[FLAGS.train_batch_size // FLAGS.batch_chunk, FLAGS.valid_batch_size]):
if batch_size <= 0: continue
print("Converting {} set...".format(split))
corpus.convert_to_tfrecords(split, save_dir, batch_size, FLAGS.tgt_len,
FLAGS.num_core_per_host, FLAGS=FLAGS)
def load_record_info(record_info_dir, split, per_host_bsz, tgt_len,
num_core_per_host):
record_name = "record_info-{}.bsz-{}.tlen-{}.json".format(
split, per_host_bsz, tgt_len)
record_info_path = os.path.join(record_info_dir, record_name)
with open(record_info_path, "r") as fp:
record_info = json.load(fp)
return record_info
def get_input_fn(record_info_dir, split, per_host_bsz, tgt_len,
num_core_per_host, num_hosts=1):
"""Creates input function."""
record_info = load_record_info(record_info_dir, split, per_host_bsz, tgt_len,
num_core_per_host)
file_names = record_info["filenames"]
bin_sizes = record_info["bin_sizes"]
num_batch = record_info["num_batch"]
tf.logging.info("[{}] File names {}".format(split, file_names))
def input_fn(params):
# per-core batch size
per_core_bsz = params["batch_size"] // num_core_per_host
# data_dir could be a remote path, e.g., a google storage url
data_dir = params["data_dir"]
def parser(record):
# preprocess "inp_perm" and "tgt_perm"
def _process_perm_feature(example, prefix):
for b in range(len(bin_sizes)):
cnt = example.pop("{}_cnt_{}".format(prefix, b))[0]
tup = example.pop("{}_tup_{}".format(prefix, b))
tup = tf.reshape(
tf.sparse_tensor_to_dense(tup),
shape=[cnt, 2])
# tf.float32
perm = tf.sparse_to_dense(
sparse_indices=tup,
output_shape=[tgt_len, bin_sizes[b]],
sparse_values=1.0,
default_value=0.0)
example["{}_perm_{}".format(prefix, b)] = perm
# whether allow the last batch with a potentially shorter length
record_spec = {
"inputs": tf.VarLenFeature(tf.int64),
"labels": tf.VarLenFeature(tf.int64),
}
# retrieve serialized example
example = tf.parse_single_example(
serialized=record,
features=record_spec)
# cast int64 into int32
# cast sparse to dense
for key in list(example.keys()):
val = example[key]
if tf.keras.backend.is_sparse(val):
val = tf.sparse.to_dense(val)
if val.dtype == tf.int64:
val = tf.to_int32(val)
example[key] = val
return example["inputs"], example["labels"]
file_paths = []
for file_name in file_names:
file_path = os.path.join(data_dir, file_name)
file_paths.append(file_path)
if split == "train":
dataset = tf.data.Dataset.from_tensor_slices(file_paths)
if len(file_paths) > 1:
dataset = dataset.shuffle(len(file_paths)).repeat()
dataset = tf.data.TFRecordDataset(dataset)
elif num_hosts > 1:
host_id = params["context"].current_host
# drop the remaining batches
num_batch_per_host = num_batch // num_hosts
my_start_sample_id = (host_id * num_batch_per_host * num_core_per_host *
per_core_bsz)
my_sample_num = num_batch_per_host * num_core_per_host * per_core_bsz
dataset = tf.data.TFRecordDataset(dataset).skip(
my_start_sample_id).take(my_sample_num)
else:
dataset = tf.data.TFRecordDataset(dataset)
if num_core_per_host > 1:
import horovod.tensorflow as hvd
dataset = dataset.shard(hvd.size(), hvd.rank())
dataset = dataset.map(parser).cache().repeat()
dataset = dataset.batch(per_core_bsz, drop_remainder=True)
dataset = dataset.prefetch(num_core_per_host * per_core_bsz)
else:
# do not shuffle, repeat or cache in evaluation
dataset = tf.data.Dataset.from_tensor_slices(file_paths)
dataset = tf.data.TFRecordDataset(dataset)
dataset = dataset.map(parser)
dataset = dataset.batch(per_core_bsz, drop_remainder=True)
return dataset
if split == "train" and num_hosts > 1:
record_info["num_batch"] = num_batch // num_hosts
return input_fn, record_info
def get_corpus_info(corpus_info_path):
with open(corpus_info_path, "r") as fp:
corpus_info = json.load(fp)
return corpus_info
if __name__ == "__main__":
FLAGS = flags.FLAGS
flags.DEFINE_string("data_dir", None,
help="Location of the data corpus")
flags.DEFINE_enum("dataset", "wt103",
["ptb", "wt2", "wt103", "lm1b", "enwik8", "text8"],
help="Dataset name.")
flags.DEFINE_integer("train_batch_size", 256,
help="train batch size each host")
flags.DEFINE_integer("valid_batch_size", 256,
help="valid batch size each host")
flags.DEFINE_integer("eval_batch_size", 16,
help="If > 0, enter test mode and process test set only."
"Otherwise, process train and dev sets only.")
flags.DEFINE_integer("tgt_len", 70,
help="number of tokens to predict")
flags.DEFINE_integer("max_batch", -1,
help="run in debug mode")
flags.DEFINE_integer("num_core_per_host", 8,
help="number of GPUs per host")
flags.DEFINE_bool("debug", default=False,
help="Process only the first batch without shuffle for lm1b.")
flags.DEFINE_integer("num_procs", 1,
help="number of processes")
flags.DEFINE_integer("num_passes", 10,
help="number of passes")
flags.DEFINE_integer("num_shuffle", 4,
help="number of shuffles for lm1b")
flags.DEFINE_integer("batch_chunk", 1,
help="number of accumulation steps")
tf.app.run(main)
|
TensorFlow2/LanguageModeling/BERT/official/utils/flags | flags | _device | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Flags for managing compute devices. Currently only contains TPU flags."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import flags
import tensorflow as tf
from official.utils.flags._conventions import help_wrap
def require_cloud_storage(flag_names):
"""Register a validator to check directory flags.
Args:
flag_names: An iterable of strings containing the names of flags to be
checked.
"""
msg = "TPU requires GCS path for {}".format(", ".join(flag_names))
@flags.multi_flags_validator(["tpu"] + flag_names, message=msg)
def _path_check(flag_values): # pylint: disable=missing-docstring
if flag_values["tpu"] is None:
return True
valid_flags = True
for key in flag_names:
if not flag_values[key].startswith("gs://"):
tf.compat.v1.logging.error("{} must be a GCS path.".format(key))
valid_flags = False
return valid_flags
def define_device(tpu=True):
"""Register device specific flags.
Args:
tpu: Create flags to specify TPU operation.
Returns:
A list of flags for core.py to marks as key flags.
"""
key_flags = []
if tpu:
flags.DEFINE_string(
name="tpu", default=None,
help=help_wrap(
"The Cloud TPU to use for training. This should be either the name "
"used when creating the Cloud TPU, or a "
"grpc://ip.address.of.tpu:8470 url. Passing `local` will use the"
"CPU of the local instance instead. (Good for debugging.)"))
key_flags.append("tpu")
flags.DEFINE_string(
name="tpu_zone", default=None,
help=help_wrap(
"[Optional] GCE zone where the Cloud TPU is located in. If not "
"specified, we will attempt to automatically detect the GCE "
"project from metadata."))
flags.DEFINE_string(
name="tpu_gcp_project", default=None,
help=help_wrap(
"[Optional] Project name for the Cloud TPU-enabled project. If not "
"specified, we will attempt to automatically detect the GCE "
"project from metadata."))
flags.DEFINE_integer(name="num_tpu_shards", default=8,
help=help_wrap("Number of shards (TPU chips)."))
return key_flags
|
TensorFlow/Detection/SSD/models/research/slim | slim | train_image_classifier | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Generic training script that trains a model using a given dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from datasets import dataset_factory
from deployment import model_deploy
from nets import nets_factory
from preprocessing import preprocessing_factory
slim = tf.contrib.slim
tf.app.flags.DEFINE_string(
'master', '', 'The address of the TensorFlow master to use.')
tf.app.flags.DEFINE_string(
'train_dir', '/tmp/tfmodel/',
'Directory where checkpoints and event logs are written to.')
tf.app.flags.DEFINE_integer('num_clones', 1,
'Number of model clones to deploy. Note For '
'historical reasons loss from all clones averaged '
'out and learning rate decay happen per clone '
'epochs')
tf.app.flags.DEFINE_boolean('clone_on_cpu', False,
'Use CPUs to deploy clones.')
tf.app.flags.DEFINE_integer('worker_replicas', 1, 'Number of worker replicas.')
tf.app.flags.DEFINE_integer(
'num_ps_tasks', 0,
'The number of parameter servers. If the value is 0, then the parameters '
'are handled locally by the worker.')
tf.app.flags.DEFINE_integer(
'num_readers', 4,
'The number of parallel readers that read data from the dataset.')
tf.app.flags.DEFINE_integer(
'num_preprocessing_threads', 4,
'The number of threads used to create the batches.')
tf.app.flags.DEFINE_integer(
'log_every_n_steps', 10,
'The frequency with which logs are print.')
tf.app.flags.DEFINE_integer(
'save_summaries_secs', 600,
'The frequency with which summaries are saved, in seconds.')
tf.app.flags.DEFINE_integer(
'save_interval_secs', 600,
'The frequency with which the model is saved, in seconds.')
tf.app.flags.DEFINE_integer(
'task', 0, 'Task id of the replica running the training.')
######################
# Optimization Flags #
######################
tf.app.flags.DEFINE_float(
'weight_decay', 0.00004, 'The weight decay on the model weights.')
tf.app.flags.DEFINE_string(
'optimizer', 'rmsprop',
'The name of the optimizer, one of "adadelta", "adagrad", "adam",'
'"ftrl", "momentum", "sgd" or "rmsprop".')
tf.app.flags.DEFINE_float(
'adadelta_rho', 0.95,
'The decay rate for adadelta.')
tf.app.flags.DEFINE_float(
'adagrad_initial_accumulator_value', 0.1,
'Starting value for the AdaGrad accumulators.')
tf.app.flags.DEFINE_float(
'adam_beta1', 0.9,
'The exponential decay rate for the 1st moment estimates.')
tf.app.flags.DEFINE_float(
'adam_beta2', 0.999,
'The exponential decay rate for the 2nd moment estimates.')
tf.app.flags.DEFINE_float('opt_epsilon', 1.0, 'Epsilon term for the optimizer.')
tf.app.flags.DEFINE_float('ftrl_learning_rate_power', -0.5,
'The learning rate power.')
tf.app.flags.DEFINE_float(
'ftrl_initial_accumulator_value', 0.1,
'Starting value for the FTRL accumulators.')
tf.app.flags.DEFINE_float(
'ftrl_l1', 0.0, 'The FTRL l1 regularization strength.')
tf.app.flags.DEFINE_float(
'ftrl_l2', 0.0, 'The FTRL l2 regularization strength.')
tf.app.flags.DEFINE_float(
'momentum', 0.9,
'The momentum for the MomentumOptimizer and RMSPropOptimizer.')
tf.app.flags.DEFINE_float('rmsprop_momentum', 0.9, 'Momentum.')
tf.app.flags.DEFINE_float('rmsprop_decay', 0.9, 'Decay term for RMSProp.')
tf.app.flags.DEFINE_integer(
'quantize_delay', -1,
'Number of steps to start quantized training. Set to -1 would disable '
'quantized training.')
#######################
# Learning Rate Flags #
#######################
tf.app.flags.DEFINE_string(
'learning_rate_decay_type',
'exponential',
'Specifies how the learning rate is decayed. One of "fixed", "exponential",'
' or "polynomial"')
tf.app.flags.DEFINE_float('learning_rate', 0.01, 'Initial learning rate.')
tf.app.flags.DEFINE_float(
'end_learning_rate', 0.0001,
'The minimal end learning rate used by a polynomial decay learning rate.')
tf.app.flags.DEFINE_float(
'label_smoothing', 0.0, 'The amount of label smoothing.')
tf.app.flags.DEFINE_float(
'learning_rate_decay_factor', 0.94, 'Learning rate decay factor.')
tf.app.flags.DEFINE_float(
'num_epochs_per_decay', 2.0,
'Number of epochs after which learning rate decays. Note: this flag counts '
'epochs per clone but aggregates per sync replicas. So 1.0 means that '
'each clone will go over full epoch individually, but replicas will go '
'once across all replicas.')
tf.app.flags.DEFINE_bool(
'sync_replicas', False,
'Whether or not to synchronize the replicas during training.')
tf.app.flags.DEFINE_integer(
'replicas_to_aggregate', 1,
'The Number of gradients to collect before updating params.')
tf.app.flags.DEFINE_float(
'moving_average_decay', None,
'The decay to use for the moving average.'
'If left as None, then moving averages are not used.')
#######################
# Dataset Flags #
#######################
tf.app.flags.DEFINE_string(
'dataset_name', 'imagenet', 'The name of the dataset to load.')
tf.app.flags.DEFINE_string(
'dataset_split_name', 'train', 'The name of the train/test split.')
tf.app.flags.DEFINE_string(
'dataset_dir', None, 'The directory where the dataset files are stored.')
tf.app.flags.DEFINE_integer(
'labels_offset', 0,
'An offset for the labels in the dataset. This flag is primarily used to '
'evaluate the VGG and ResNet architectures which do not use a background '
'class for the ImageNet dataset.')
tf.app.flags.DEFINE_string(
'model_name', 'inception_v3', 'The name of the architecture to train.')
tf.app.flags.DEFINE_string(
'preprocessing_name', None, 'The name of the preprocessing to use. If left '
'as `None`, then the model_name flag is used.')
tf.app.flags.DEFINE_integer(
'batch_size', 32, 'The number of samples in each batch.')
tf.app.flags.DEFINE_integer(
'train_image_size', None, 'Train image size')
tf.app.flags.DEFINE_integer('max_number_of_steps', None,
'The maximum number of training steps.')
#####################
# Fine-Tuning Flags #
#####################
tf.app.flags.DEFINE_string(
'checkpoint_path', None,
'The path to a checkpoint from which to fine-tune.')
tf.app.flags.DEFINE_string(
'checkpoint_exclude_scopes', None,
'Comma-separated list of scopes of variables to exclude when restoring '
'from a checkpoint.')
tf.app.flags.DEFINE_string(
'trainable_scopes', None,
'Comma-separated list of scopes to filter the set of variables to train.'
'By default, None would train all the variables.')
tf.app.flags.DEFINE_boolean(
'ignore_missing_vars', False,
'When restoring a checkpoint would ignore missing variables.')
FLAGS = tf.app.flags.FLAGS
def _configure_learning_rate(num_samples_per_epoch, global_step):
"""Configures the learning rate.
Args:
num_samples_per_epoch: The number of samples in each epoch of training.
global_step: The global_step tensor.
Returns:
A `Tensor` representing the learning rate.
Raises:
ValueError: if
"""
# Note: when num_clones is > 1, this will actually have each clone to go
# over each epoch FLAGS.num_epochs_per_decay times. This is different
# behavior from sync replicas and is expected to produce different results.
decay_steps = int(num_samples_per_epoch * FLAGS.num_epochs_per_decay /
FLAGS.batch_size)
if FLAGS.sync_replicas:
decay_steps /= FLAGS.replicas_to_aggregate
if FLAGS.learning_rate_decay_type == 'exponential':
return tf.train.exponential_decay(FLAGS.learning_rate,
global_step,
decay_steps,
FLAGS.learning_rate_decay_factor,
staircase=True,
name='exponential_decay_learning_rate')
elif FLAGS.learning_rate_decay_type == 'fixed':
return tf.constant(FLAGS.learning_rate, name='fixed_learning_rate')
elif FLAGS.learning_rate_decay_type == 'polynomial':
return tf.train.polynomial_decay(FLAGS.learning_rate,
global_step,
decay_steps,
FLAGS.end_learning_rate,
power=1.0,
cycle=False,
name='polynomial_decay_learning_rate')
else:
raise ValueError('learning_rate_decay_type [%s] was not recognized' %
FLAGS.learning_rate_decay_type)
def _configure_optimizer(learning_rate):
"""Configures the optimizer used for training.
Args:
learning_rate: A scalar or `Tensor` learning rate.
Returns:
An instance of an optimizer.
Raises:
ValueError: if FLAGS.optimizer is not recognized.
"""
if FLAGS.optimizer == 'adadelta':
optimizer = tf.train.AdadeltaOptimizer(
learning_rate,
rho=FLAGS.adadelta_rho,
epsilon=FLAGS.opt_epsilon)
elif FLAGS.optimizer == 'adagrad':
optimizer = tf.train.AdagradOptimizer(
learning_rate,
initial_accumulator_value=FLAGS.adagrad_initial_accumulator_value)
elif FLAGS.optimizer == 'adam':
optimizer = tf.train.AdamOptimizer(
learning_rate,
beta1=FLAGS.adam_beta1,
beta2=FLAGS.adam_beta2,
epsilon=FLAGS.opt_epsilon)
elif FLAGS.optimizer == 'ftrl':
optimizer = tf.train.FtrlOptimizer(
learning_rate,
learning_rate_power=FLAGS.ftrl_learning_rate_power,
initial_accumulator_value=FLAGS.ftrl_initial_accumulator_value,
l1_regularization_strength=FLAGS.ftrl_l1,
l2_regularization_strength=FLAGS.ftrl_l2)
elif FLAGS.optimizer == 'momentum':
optimizer = tf.train.MomentumOptimizer(
learning_rate,
momentum=FLAGS.momentum,
name='Momentum')
elif FLAGS.optimizer == 'rmsprop':
optimizer = tf.train.RMSPropOptimizer(
learning_rate,
decay=FLAGS.rmsprop_decay,
momentum=FLAGS.rmsprop_momentum,
epsilon=FLAGS.opt_epsilon)
elif FLAGS.optimizer == 'sgd':
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
else:
raise ValueError('Optimizer [%s] was not recognized' % FLAGS.optimizer)
return optimizer
def _get_init_fn():
"""Returns a function run by the chief worker to warm-start the training.
Note that the init_fn is only run when initializing the model during the very
first global step.
Returns:
An init function run by the supervisor.
"""
if FLAGS.checkpoint_path is None:
return None
# Warn the user if a checkpoint exists in the train_dir. Then we'll be
# ignoring the checkpoint anyway.
if tf.train.latest_checkpoint(FLAGS.train_dir):
tf.logging.info(
'Ignoring --checkpoint_path because a checkpoint already exists in %s'
% FLAGS.train_dir)
return None
exclusions = []
if FLAGS.checkpoint_exclude_scopes:
exclusions = [scope.strip()
for scope in FLAGS.checkpoint_exclude_scopes.split(',')]
# TODO(sguada) variables.filter_variables()
variables_to_restore = []
for var in slim.get_model_variables():
for exclusion in exclusions:
if var.op.name.startswith(exclusion):
break
else:
variables_to_restore.append(var)
if tf.gfile.IsDirectory(FLAGS.checkpoint_path):
checkpoint_path = tf.train.latest_checkpoint(FLAGS.checkpoint_path)
else:
checkpoint_path = FLAGS.checkpoint_path
tf.logging.info('Fine-tuning from %s' % checkpoint_path)
return slim.assign_from_checkpoint_fn(
checkpoint_path,
variables_to_restore,
ignore_missing_vars=FLAGS.ignore_missing_vars)
def _get_variables_to_train():
"""Returns a list of variables to train.
Returns:
A list of variables to train by the optimizer.
"""
if FLAGS.trainable_scopes is None:
return tf.trainable_variables()
else:
scopes = [scope.strip() for scope in FLAGS.trainable_scopes.split(',')]
variables_to_train = []
for scope in scopes:
variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope)
variables_to_train.extend(variables)
return variables_to_train
def main(_):
if not FLAGS.dataset_dir:
raise ValueError('You must supply the dataset directory with --dataset_dir')
tf.logging.set_verbosity(tf.logging.INFO)
with tf.Graph().as_default():
#######################
# Config model_deploy #
#######################
deploy_config = model_deploy.DeploymentConfig(
num_clones=FLAGS.num_clones,
clone_on_cpu=FLAGS.clone_on_cpu,
replica_id=FLAGS.task,
num_replicas=FLAGS.worker_replicas,
num_ps_tasks=FLAGS.num_ps_tasks)
# Create global_step
with tf.device(deploy_config.variables_device()):
global_step = slim.create_global_step()
######################
# Select the dataset #
######################
dataset = dataset_factory.get_dataset(
FLAGS.dataset_name, FLAGS.dataset_split_name, FLAGS.dataset_dir)
######################
# Select the network #
######################
network_fn = nets_factory.get_network_fn(
FLAGS.model_name,
num_classes=(dataset.num_classes - FLAGS.labels_offset),
weight_decay=FLAGS.weight_decay,
is_training=True)
#####################################
# Select the preprocessing function #
#####################################
preprocessing_name = FLAGS.preprocessing_name or FLAGS.model_name
image_preprocessing_fn = preprocessing_factory.get_preprocessing(
preprocessing_name,
is_training=True)
##############################################################
# Create a dataset provider that loads data from the dataset #
##############################################################
with tf.device(deploy_config.inputs_device()):
provider = slim.dataset_data_provider.DatasetDataProvider(
dataset,
num_readers=FLAGS.num_readers,
common_queue_capacity=20 * FLAGS.batch_size,
common_queue_min=10 * FLAGS.batch_size)
[image, label] = provider.get(['image', 'label'])
label -= FLAGS.labels_offset
train_image_size = FLAGS.train_image_size or network_fn.default_image_size
image = image_preprocessing_fn(image, train_image_size, train_image_size)
images, labels = tf.train.batch(
[image, label],
batch_size=FLAGS.batch_size,
num_threads=FLAGS.num_preprocessing_threads,
capacity=5 * FLAGS.batch_size)
labels = slim.one_hot_encoding(
labels, dataset.num_classes - FLAGS.labels_offset)
batch_queue = slim.prefetch_queue.prefetch_queue(
[images, labels], capacity=2 * deploy_config.num_clones)
####################
# Define the model #
####################
def clone_fn(batch_queue):
"""Allows data parallelism by creating multiple clones of network_fn."""
images, labels = batch_queue.dequeue()
logits, end_points = network_fn(images)
#############################
# Specify the loss function #
#############################
if 'AuxLogits' in end_points:
slim.losses.softmax_cross_entropy(
end_points['AuxLogits'], labels,
label_smoothing=FLAGS.label_smoothing, weights=0.4,
scope='aux_loss')
slim.losses.softmax_cross_entropy(
logits, labels, label_smoothing=FLAGS.label_smoothing, weights=1.0)
return end_points
# Gather initial summaries.
summaries = set(tf.get_collection(tf.GraphKeys.SUMMARIES))
clones = model_deploy.create_clones(deploy_config, clone_fn, [batch_queue])
first_clone_scope = deploy_config.clone_scope(0)
# Gather update_ops from the first clone. These contain, for example,
# the updates for the batch_norm variables created by network_fn.
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, first_clone_scope)
# Add summaries for end_points.
end_points = clones[0].outputs
for end_point in end_points:
x = end_points[end_point]
summaries.add(tf.summary.histogram('activations/' + end_point, x))
summaries.add(tf.summary.scalar('sparsity/' + end_point,
tf.nn.zero_fraction(x)))
# Add summaries for losses.
for loss in tf.get_collection(tf.GraphKeys.LOSSES, first_clone_scope):
summaries.add(tf.summary.scalar('losses/%s' % loss.op.name, loss))
# Add summaries for variables.
for variable in slim.get_model_variables():
summaries.add(tf.summary.histogram(variable.op.name, variable))
#################################
# Configure the moving averages #
#################################
if FLAGS.moving_average_decay:
moving_average_variables = slim.get_model_variables()
variable_averages = tf.train.ExponentialMovingAverage(
FLAGS.moving_average_decay, global_step)
else:
moving_average_variables, variable_averages = None, None
if FLAGS.quantize_delay >= 0:
tf.contrib.quantize.create_training_graph(
quant_delay=FLAGS.quantize_delay)
#########################################
# Configure the optimization procedure. #
#########################################
with tf.device(deploy_config.optimizer_device()):
learning_rate = _configure_learning_rate(dataset.num_samples, global_step)
optimizer = _configure_optimizer(learning_rate)
summaries.add(tf.summary.scalar('learning_rate', learning_rate))
if FLAGS.sync_replicas:
# If sync_replicas is enabled, the averaging will be done in the chief
# queue runner.
optimizer = tf.train.SyncReplicasOptimizer(
opt=optimizer,
replicas_to_aggregate=FLAGS.replicas_to_aggregate,
total_num_replicas=FLAGS.worker_replicas,
variable_averages=variable_averages,
variables_to_average=moving_average_variables)
elif FLAGS.moving_average_decay:
# Update ops executed locally by trainer.
update_ops.append(variable_averages.apply(moving_average_variables))
# Variables to train.
variables_to_train = _get_variables_to_train()
# and returns a train_tensor and summary_op
total_loss, clones_gradients = model_deploy.optimize_clones(
clones,
optimizer,
var_list=variables_to_train)
# Add total_loss to summary.
summaries.add(tf.summary.scalar('total_loss', total_loss))
# Create gradient updates.
grad_updates = optimizer.apply_gradients(clones_gradients,
global_step=global_step)
update_ops.append(grad_updates)
update_op = tf.group(*update_ops)
with tf.control_dependencies([update_op]):
train_tensor = tf.identity(total_loss, name='train_op')
# Add the summaries from the first clone. These contain the summaries
# created by model_fn and either optimize_clones() or _gather_clone_loss().
summaries |= set(tf.get_collection(tf.GraphKeys.SUMMARIES,
first_clone_scope))
# Merge all summaries together.
summary_op = tf.summary.merge(list(summaries), name='summary_op')
###########################
# Kicks off the training. #
###########################
slim.learning.train(
train_tensor,
logdir=FLAGS.train_dir,
master=FLAGS.master,
is_chief=(FLAGS.task == 0),
init_fn=_get_init_fn(),
summary_op=summary_op,
number_of_steps=FLAGS.max_number_of_steps,
log_every_n_steps=FLAGS.log_every_n_steps,
save_summaries_secs=FLAGS.save_summaries_secs,
save_interval_secs=FLAGS.save_interval_secs,
sync_optimizer=optimizer if FLAGS.sync_replicas else None)
if __name__ == '__main__':
tf.app.run()
|
TensorFlow2/Classification/ConvNets/efficientnet_v1/B0/training/TF32 | TF32 | convergence_8xA100-80G | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
horovodrun -np 8 bash ./scripts/bind.sh --cpu=exclusive --ib=single -- python3 main.py \
--cfg config/efficientnet_v1/b0_cfg.py \
--mode train_and_eval \
--use_xla \
--model_dir ./output \
--data_dir /data \
--log_steps 100 \
--max_epochs 500 \
--save_checkpoint_freq 5 \
--train_batch_size 512 \
--eval_batch_size 512 \
--augmenter_name autoaugment \
--lr_decay cosine \
--memory_limit 81000 \
--defer_img_mixing \
--moving_average_decay 0.9999 \
--lr_init 0.005
|
TensorFlow/LanguageModeling/BERT/scripts/docker | docker | build | #!/bin/bash
docker pull nvcr.io/nvidia/tritonserver:20.09-py3
docker build . --rm -t bert
|
TensorFlow2/Detection/Efficientdet/utils | utils | train_lib | # Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Training related libraries."""
import re
import numpy as np
import tensorflow as tf
import horovod.tensorflow as hvd
from model import iou_utils
from model import anchors
from model import efficientdet_keras
from utils.util_keras import get_mixed_precision_policy
class FocalLoss(tf.keras.losses.Loss):
"""Compute the focal loss between `logits` and the golden `target` values.
Focal loss = -(1-pt)^gamma * log(pt)
where pt is the probability of being classified to the true class.
"""
def __init__(self, alpha, gamma, label_smoothing=0.0, **kwargs):
"""Initialize focal loss.
Args:
alpha: A float32 scalar multiplying alpha to the loss from positive
examples and (1-alpha) to the loss from negative examples.
gamma: A float32 scalar modulating loss from hard and easy examples.
label_smoothing: Float in [0, 1]. If > `0` then smooth the labels.
**kwargs: other params.
"""
super().__init__(**kwargs)
self.alpha = alpha
self.gamma = gamma
self.label_smoothing = label_smoothing
@tf.autograph.experimental.do_not_convert
def call(self, y, y_pred):
"""Compute focal loss for y and y_pred.
Args:
y: A tuple of (normalizer, y_true), where y_true is the target class.
y_pred: A float32 tensor [batch, height_in, width_in, num_predictions].
Returns:
the focal loss.
"""
normalizer, y_true = y
alpha = tf.convert_to_tensor(self.alpha, dtype=y_pred.dtype)
gamma = tf.convert_to_tensor(self.gamma, dtype=y_pred.dtype)
# compute focal loss multipliers before label smoothing, such that it will
# not blow up the loss.
pred_prob = tf.sigmoid(y_pred)
p_t = (y_true * pred_prob) + ((1 - y_true) * (1 - pred_prob))
alpha_factor = y_true * alpha + (1 - y_true) * (1 - alpha)
modulating_factor = (1.0 - p_t)**gamma
# apply label smoothing for cross_entropy for each entry.
y_true = y_true * (1.0 - self.label_smoothing) + 0.5 * self.label_smoothing
ce = tf.nn.sigmoid_cross_entropy_with_logits(labels=y_true, logits=y_pred)
# compute the final loss and return
return alpha_factor * modulating_factor * ce / normalizer
class StableFocalLoss(tf.keras.losses.Loss):
"""Compute the focal loss between `logits` and the golden `target` values.
Focal loss = -(1-pt)^gamma * log(pt)
where pt is the probability of being classified to the true class.
Below are comments/derivations for computing modulator.
For brevity, let x = logits, z = targets, r = gamma, and p_t = sigmod(x)
for positive samples and 1 - sigmoid(x) for negative examples.
The modulator, defined as (1 - P_t)^r, is a critical part in focal loss
computation. For r > 0, it puts more weights on hard examples, and less
weights on easier ones. However if it is directly computed as (1 - P_t)^r,
its back-propagation is not stable when r < 1. The implementation here
resolves the issue.
For positive samples (labels being 1),
(1 - p_t)^r
= (1 - sigmoid(x))^r
= (1 - (1 / (1 + exp(-x))))^r
= (exp(-x) / (1 + exp(-x)))^r
= exp(log((exp(-x) / (1 + exp(-x)))^r))
= exp(r * log(exp(-x)) - r * log(1 + exp(-x)))
= exp(- r * x - r * log(1 + exp(-x)))
For negative samples (labels being 0),
(1 - p_t)^r
= (sigmoid(x))^r
= (1 / (1 + exp(-x)))^r
= exp(log((1 / (1 + exp(-x)))^r))
= exp(-r * log(1 + exp(-x)))
Therefore one unified form for positive (z = 1) and negative (z = 0)
samples is: (1 - p_t)^r = exp(-r * z * x - r * log(1 + exp(-x))).
"""
def __init__(self, alpha, gamma, label_smoothing=0.0, **kwargs):
"""Initialize focal loss.
Args:
alpha: A float32 scalar multiplying alpha to the loss from positive
examples and (1-alpha) to the loss from negative examples.
gamma: A float32 scalar modulating loss from hard and easy examples.
label_smoothing: Float in [0, 1]. If > `0` then smooth the labels.
**kwargs: other params.
"""
super().__init__(**kwargs)
self.alpha = alpha
self.gamma = gamma
self.label_smoothing = label_smoothing
@tf.autograph.experimental.do_not_convert
def call(self, y, y_pred):
"""Compute focal loss for y and y_pred.
Args:
y: A tuple of (normalizer, y_true), where y_true is the target class.
y_pred: A float32 tensor [batch, height_in, width_in, num_predictions].
Returns:
the focal loss.
"""
normalizer, y_true = y
alpha = tf.convert_to_tensor(self.alpha, dtype=y_pred.dtype)
gamma = tf.convert_to_tensor(self.gamma, dtype=y_pred.dtype)
positive_label_mask = tf.equal(y_true, 1.0)
negative_pred = -1.0 * y_pred
modulator = tf.exp(gamma * y_true * negative_pred - gamma * tf.math.log1p(tf.exp(negative_pred)))
# apply label smoothing for cross_entropy for each entry.
y_true = y_true * (1.0 - self.label_smoothing) + 0.5 * self.label_smoothing
ce = tf.nn.sigmoid_cross_entropy_with_logits(labels=y_true, logits=y_pred)
loss = modulator * ce
weighted_loss = tf.where(positive_label_mask, alpha * loss, (1.0 - alpha) * loss)
weighted_loss /= normalizer
return weighted_loss
class BoxLoss(tf.keras.losses.Loss):
"""L2 box regression loss."""
def __init__(self, delta=0.1, **kwargs):
"""Initialize box loss.
Args:
delta: `float`, the point where the huber loss function changes from a
quadratic to linear. It is typically around the mean value of regression
target. For instances, the regression targets of 512x512 input with 6
anchors on P3-P7 pyramid is about [0.1, 0.1, 0.2, 0.2].
**kwargs: other params.
"""
super().__init__(**kwargs)
self.huber = tf.keras.losses.Huber(
delta, reduction=tf.keras.losses.Reduction.NONE)
@tf.autograph.experimental.do_not_convert
def call(self, y_true, box_outputs):
num_positives, box_targets = y_true
normalizer = num_positives * 4.0
mask = tf.cast(box_targets != 0.0, tf.float32)
box_targets = tf.expand_dims(box_targets, axis=-1)
box_outputs = tf.expand_dims(box_outputs, axis=-1)
box_loss = self.huber(box_targets, box_outputs) * mask
box_loss = tf.reduce_sum(box_loss)
box_loss /= normalizer
return box_loss
class BoxIouLoss(tf.keras.losses.Loss):
"""Box iou loss."""
def __init__(self, iou_loss_type, min_level, max_level, num_scales,
aspect_ratios, anchor_scale, image_size, **kwargs):
super().__init__(**kwargs)
self.iou_loss_type = iou_loss_type
self.input_anchors = anchors.Anchors(min_level, max_level, num_scales,
aspect_ratios, anchor_scale,
image_size)
@tf.autograph.experimental.do_not_convert
def call(self, y_true, box_outputs):
anchor_boxes = tf.tile(
self.input_anchors.boxes,
[box_outputs.shape[0] // self.input_anchors.boxes.shape[0], 1])
num_positives, box_targets = y_true
mask = tf.cast(box_targets != 0.0, box_targets.dtype)
box_outputs = anchors.decode_box_outputs(box_outputs, anchor_boxes) * mask
box_targets = anchors.decode_box_outputs(box_targets, anchor_boxes) * mask
normalizer = num_positives * 4.0
box_iou_loss = iou_utils.iou_loss(box_outputs, box_targets,
self.iou_loss_type)
box_iou_loss = tf.reduce_sum(box_iou_loss) / normalizer
return box_iou_loss
class EfficientDetNetTrain(efficientdet_keras.EfficientDetNet):
"""A customized trainer for EfficientDet.
see https://www.tensorflow.org/guide/keras/customizing_what_happens_in_fit
"""
def _freeze_vars(self):
if self.config.var_freeze_expr:
return [
v for v in self.trainable_variables
if not re.match(self.config.var_freeze_expr, v.name)
]
return self.trainable_variables
def _reg_l2_loss(self, weight_decay, regex=r'.*(kernel|weight):0$'):
"""Return regularization l2 loss loss."""
var_match = re.compile(regex)
return weight_decay * tf.add_n([
tf.nn.l2_loss(v)
for v in self._freeze_vars()
if var_match.match(v.name)
])
def _detection_loss(self, cls_outputs, box_outputs, labels, loss_vals):
"""Computes total detection loss.
Computes total detection loss including box and class loss from all levels.
Args:
cls_outputs: an OrderDict with keys representing levels and values
representing logits in [batch_size, height, width, num_anchors].
box_outputs: an OrderDict with keys representing levels and values
representing box regression targets in [batch_size, height, width,
num_anchors * 4].
labels: the dictionary that returned from dataloader that includes
groundtruth targets.
loss_vals: A dict of loss values.
Returns:
total_loss: an integer tensor representing total loss reducing from
class and box losses from all levels.
cls_loss: an integer tensor representing total class loss.
box_loss: an integer tensor representing total box regression loss.
box_iou_loss: an integer tensor representing total box iou loss.
"""
# convert to float32 for loss computing.
cls_outputs = [tf.cast(i, tf.float32) for i in cls_outputs]
box_outputs = [tf.cast(i, tf.float32) for i in box_outputs]
# Sum all positives in a batch for normalization and avoid zero
# num_positives_sum, which would lead to inf loss during training
num_positives_sum = tf.reduce_sum(labels['mean_num_positives']) + 1.0
levels = range(len(cls_outputs))
cls_losses = []
box_losses = []
for level in levels:
# Onehot encoding for classification labels.
cls_targets_at_level = tf.one_hot(labels['cls_targets_%d' % (level + 3)],
self.config.num_classes)
if self.config.data_format == 'channels_first':
targets_shape = tf.shape(cls_targets_at_level)
bs = targets_shape[0]
width = targets_shape[2]
height = targets_shape[3]
cls_targets_at_level = tf.reshape(cls_targets_at_level,
[bs, -1, width, height])
else:
targets_shape = tf.shape(cls_targets_at_level)
bs = targets_shape[0]
width = targets_shape[1]
height = targets_shape[2]
cls_targets_at_level = tf.reshape(cls_targets_at_level,
[bs, width, height, -1])
box_targets_at_level = labels['box_targets_%d' % (level + 3)]
class_loss_layer = self.loss.get('class_loss', None)
if class_loss_layer:
cls_loss = class_loss_layer([num_positives_sum, cls_targets_at_level],
cls_outputs[level])
if self.config.data_format == 'channels_first':
cls_loss = tf.reshape(
cls_loss, [bs, -1, width, height, self.config.num_classes])
else:
cls_loss = tf.reshape(
cls_loss, [bs, width, height, -1, self.config.num_classes])
cls_loss *= tf.cast(
tf.expand_dims(
tf.not_equal(labels['cls_targets_%d' % (level + 3)], -2), -1),
tf.float32)
cls_losses.append(tf.reduce_sum(cls_loss))
if self.config.box_loss_weight and self.loss.get('box_loss', None):
box_loss_layer = self.loss['box_loss']
box_losses.append(
box_loss_layer([num_positives_sum, box_targets_at_level],
box_outputs[level]))
if self.config.iou_loss_type:
box_outputs = tf.concat([tf.reshape(v, [-1, 4]) for v in box_outputs],
axis=0)
box_targets = tf.concat([
tf.reshape(labels['box_targets_%d' % (level + 3)], [-1, 4])
for level in levels
],
axis=0)
box_iou_loss_layer = self.loss['box_iou_loss']
box_iou_loss = box_iou_loss_layer([num_positives_sum, box_targets],
box_outputs)
loss_vals['box_iou_loss'] = box_iou_loss
else:
box_iou_loss = 0
cls_loss = tf.add_n(cls_losses) if cls_losses else 0
box_loss = tf.add_n(box_losses) if box_losses else 0
total_loss = (
cls_loss + self.config.box_loss_weight * box_loss +
self.config.iou_loss_weight * box_iou_loss)
loss_vals['det_loss'] = total_loss
loss_vals['cls_loss'] = cls_loss
loss_vals['box_loss'] = box_loss
return total_loss
def train_step(self, data):
"""Train step.
Args:
data: Tuple of (images, labels). Image tensor with shape [batch_size,
height, width, 3]. The height and width are fixed and equal.Input labels
in a dictionary. The labels include class targets and box targets which
are dense label maps. The labels are generated from get_input_fn
function in data/dataloader.py.
Returns:
A dict record loss info.
"""
images, labels = data
with tf.GradientTape() as tape:
if len(self.config.heads) == 2:
cls_outputs, box_outputs, seg_outputs = self(images, training=True)
elif 'object_detection' in self.config.heads:
cls_outputs, box_outputs = self(images, training=True)
elif 'segmentation' in self.config.heads:
seg_outputs, = self(images, training=True)
total_loss = 0
loss_vals = {}
if 'object_detection' in self.config.heads:
det_loss = self._detection_loss(cls_outputs, box_outputs, labels,
loss_vals)
total_loss += det_loss
if 'segmentation' in self.config.heads:
seg_loss_layer = self.loss['seg_loss']
seg_loss = seg_loss_layer(labels['image_masks'], seg_outputs)
total_loss += seg_loss
loss_vals['seg_loss'] = seg_loss
reg_l2_loss = self._reg_l2_loss(self.config.weight_decay)
loss_vals['reg_l2_loss'] = reg_l2_loss
total_loss += reg_l2_loss
if isinstance(self.optimizer,
tf.keras.mixed_precision.LossScaleOptimizer):
scaled_loss = self.optimizer.get_scaled_loss(total_loss)
optimizer = self.optimizer._optimizer
else:
scaled_loss = total_loss
optimizer = self.optimizer
compress = get_mixed_precision_policy().compute_dtype == 'float16'
tape = hvd.DistributedGradientTape(tape, compression=hvd.Compression.fp16 \
if compress else hvd.Compression.none)
loss_vals['loss'] = total_loss
loss_vals['learning_rate'] = optimizer.learning_rate(optimizer.iterations)
trainable_vars = self._freeze_vars()
scaled_gradients = tape.gradient(scaled_loss, trainable_vars)
if isinstance(self.optimizer,
tf.keras.mixed_precision.LossScaleOptimizer):
gradients = self.optimizer.get_unscaled_gradients(scaled_gradients)
else:
gradients = scaled_gradients
if self.config.clip_gradients_norm > 0:
clip_norm = abs(self.config.clip_gradients_norm)
gradients = [
tf.clip_by_norm(g, clip_norm) if g is not None else None
for g in gradients
]
gradients, _ = tf.clip_by_global_norm(gradients, clip_norm)
loss_vals['gradient_norm'] = tf.linalg.global_norm(gradients)
self.optimizer.apply_gradients(zip(gradients, trainable_vars))
return loss_vals
def test_step(self, data):
"""Test step.
Args:
data: Tuple of (images, labels). Image tensor with shape [batch_size,
height, width, 3]. The height and width are fixed and equal.Input labels
in a dictionary. The labels include class targets and box targets which
are dense label maps. The labels are generated from get_input_fn
function in data/dataloader.py.
Returns:
A dict record loss info.
"""
images, labels = data
if len(self.config.heads) == 2:
cls_outputs, box_outputs, seg_outputs = self(images, training=False)
elif 'object_detection' in self.config.heads:
cls_outputs, box_outputs = self(images, training=False)
elif 'segmentation' in self.config.heads:
seg_outputs, = self(images, training=False)
reg_l2loss = self._reg_l2_loss(self.config.weight_decay)
total_loss = reg_l2loss
loss_vals = {}
if 'object_detection' in self.config.heads:
det_loss = self._detection_loss(cls_outputs, box_outputs, labels,
loss_vals)
total_loss += det_loss
if 'segmentation' in self.config.heads:
seg_loss_layer = self.loss['seg_loss']
seg_loss = seg_loss_layer(labels['image_masks'], seg_outputs)
total_loss += seg_loss
loss_vals['seg_loss'] = seg_loss
loss_vals['loss'] = total_loss
return loss_vals
|
PyTorch/SpeechSynthesis/Tacotron2/tacotron2 | tacotron2 | __init__ | from .entrypoints import nvidia_tacotron2, nvidia_tts_utils
|
TensorFlow2/LanguageModeling/BERT/official/nlp/modeling/layers | layers | masked_softmax | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras-based softmax layer with optional masking."""
from __future__ import absolute_import
from __future__ import division
# from __future__ import google_type_annotations
from __future__ import print_function
import tensorflow as tf
@tf.keras.utils.register_keras_serializable(package='Text')
class MaskedSoftmax(tf.keras.layers.Layer):
"""Performs a softmax with optional masking on a tensor.
Attributes:
mask_expansion_axes: Any axes that should be padded on the mask tensor.
"""
def __init__(self, mask_expansion_axes=None, **kwargs):
self._mask_expansion_axes = mask_expansion_axes
super(MaskedSoftmax, self).__init__(**kwargs)
def call(self, inputs):
if isinstance(inputs, list) and len(inputs) == 2:
scores, mask = inputs
else:
scores, mask = (inputs, None)
if mask is not None:
if self._mask_expansion_axes is not None:
mask = tf.expand_dims(mask, axis=self._mask_expansion_axes)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
adder = (1.0 - tf.cast(mask, scores.dtype)) * -10000.0
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
scores += adder
return tf.nn.softmax(scores)
def get_config(self):
config = {'mask_expansion_axes': self._mask_expansion_axes}
base_config = super(MaskedSoftmax, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
|
PyTorch/Translation/Transformer/examples/translation | translation | prepare-iwslt14 | #!/usr/bin/env bash
#
# Adapted from https://github.com/facebookresearch/MIXER/blob/master/prepareData.sh
echo 'Cloning Moses github repository (for tokenization scripts)...'
git clone https://github.com/moses-smt/mosesdecoder.git
echo 'Cloning Subword NMT repository (for BPE pre-processing)...'
git clone https://github.com/rsennrich/subword-nmt.git
SCRIPTS=mosesdecoder/scripts
TOKENIZER=$SCRIPTS/tokenizer/tokenizer.perl
LC=$SCRIPTS/tokenizer/lowercase.perl
CLEAN=$SCRIPTS/training/clean-corpus-n.perl
BPEROOT=subword-nmt
BPE_TOKENS=10000
URL="https://wit3.fbk.eu/archive/2014-01/texts/de/en/de-en.tgz"
GZ=de-en.tgz
if [ ! -d "$SCRIPTS" ]; then
echo "Please set SCRIPTS variable correctly to point to Moses scripts."
exit
fi
src=de
tgt=en
lang=de-en
prep=iwslt14.tokenized.de-en
tmp=$prep/tmp
orig=orig
mkdir -p $orig $tmp $prep
echo "Downloading data from ${URL}..."
cd $orig
wget "$URL"
if [ -f $GZ ]; then
echo "Data successfully downloaded."
else
echo "Data not successfully downloaded."
exit
fi
tar zxvf $GZ
cd ..
echo "pre-processing train data..."
for l in $src $tgt; do
f=train.tags.$lang.$l
tok=train.tags.$lang.tok.$l
cat $orig/$lang/$f | \
grep -v '<url>' | \
grep -v '<talkid>' | \
grep -v '<keywords>' | \
sed -e 's/<title>//g' | \
sed -e 's/<\/title>//g' | \
sed -e 's/<description>//g' | \
sed -e 's/<\/description>//g' | \
perl $TOKENIZER -threads 8 -l $l > $tmp/$tok
echo ""
done
perl $CLEAN -ratio 1.5 $tmp/train.tags.$lang.tok $src $tgt $tmp/train.tags.$lang.clean 1 175
for l in $src $tgt; do
perl $LC < $tmp/train.tags.$lang.clean.$l > $tmp/train.tags.$lang.$l
done
echo "pre-processing valid/test data..."
for l in $src $tgt; do
for o in `ls $orig/$lang/IWSLT14.TED*.$l.xml`; do
fname=${o##*/}
f=$tmp/${fname%.*}
echo $o $f
grep '<seg id' $o | \
sed -e 's/<seg id="[0-9]*">\s*//g' | \
sed -e 's/\s*<\/seg>\s*//g' | \
sed -e "s/\’/\'/g" | \
perl $TOKENIZER -threads 8 -l $l | \
perl $LC > $f
echo ""
done
done
echo "creating train, valid, test..."
for l in $src $tgt; do
awk '{if (NR%23 == 0) print $0; }' $tmp/train.tags.de-en.$l > $tmp/valid.$l
awk '{if (NR%23 != 0) print $0; }' $tmp/train.tags.de-en.$l > $tmp/train.$l
cat $tmp/IWSLT14.TED.dev2010.de-en.$l \
$tmp/IWSLT14.TEDX.dev2012.de-en.$l \
$tmp/IWSLT14.TED.tst2010.de-en.$l \
$tmp/IWSLT14.TED.tst2011.de-en.$l \
$tmp/IWSLT14.TED.tst2012.de-en.$l \
> $tmp/test.$l
done
TRAIN=$tmp/train.en-de
BPE_CODE=$prep/code
rm -f $TRAIN
for l in $src $tgt; do
cat $tmp/train.$l >> $TRAIN
done
echo "learn_bpe.py on ${TRAIN}..."
python $BPEROOT/learn_bpe.py -s $BPE_TOKENS < $TRAIN > $BPE_CODE
for L in $src $tgt; do
for f in train.$L valid.$L test.$L; do
echo "apply_bpe.py to ${f}..."
python $BPEROOT/apply_bpe.py -c $BPE_CODE < $tmp/$f > $prep/$f
done
done
|
PyTorch/Segmentation/MaskRCNN/pytorch/maskrcnn_benchmark/modeling/backbone | backbone | fpn | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
import torch
import torch.nn.functional as F
from torch import nn
class FPN(nn.Module):
"""
Module that adds FPN on top of a list of feature maps.
The feature maps are currently supposed to be in increasing depth
order, and must be consecutive
"""
def __init__(
self, in_channels_list, out_channels, conv_block, top_blocks=None, nhwc=False
):
"""
Arguments:
in_channels_list (list[int]): number of channels for each feature map that
will be fed
out_channels (int): number of channels of the FPN representation
top_blocks (nn.Module or None): if provided, an extra operation will
be performed on the output of the last (smallest resolution)
FPN output, and the result will extend the result list
nhwc (bool): specify is FPN is running at nhwc mode
"""
super(FPN, self).__init__()
self.inner_blocks = []
self.layer_blocks = []
for idx, in_channels in enumerate(in_channels_list, 1):
inner_block = "fpn_inner{}".format(idx)
layer_block = "fpn_layer{}".format(idx)
inner_block_module = conv_block(in_channels, out_channels, 1)
layer_block_module = conv_block(out_channels, out_channels, 3, 1)
self.add_module(inner_block, inner_block_module)
self.add_module(layer_block, layer_block_module)
self.inner_blocks.append(inner_block)
self.layer_blocks.append(layer_block)
self.top_blocks = top_blocks
def forward(self, x):
"""
Arguments:
x (list[Tensor]): feature maps for each feature level.
Returns:
results (tuple[Tensor]): feature maps after FPN layers.
They are ordered from highest resolution first.
"""
last_inner = getattr(self, self.inner_blocks[-1])(x[-1])
results = []
results.append(getattr(self, self.layer_blocks[-1])(last_inner))
for feature, inner_block, layer_block in zip(
x[:-1][::-1], self.inner_blocks[:-1][::-1], self.layer_blocks[:-1][::-1]
):
inner_top_down = F.interpolate(last_inner, scale_factor=2, mode="nearest")
inner_lateral = getattr(self, inner_block)(feature)
# TODO use size instead of scale to make it robust to different sizes
# inner_top_down = F.upsample(last_inner, size=inner_lateral.shape[-2:],
# mode='bilinear', align_corners=False)
last_inner = inner_lateral + inner_top_down
results.insert(0, getattr(self, layer_block)(last_inner))
if self.top_blocks is not None:
last_results = self.top_blocks(results[-1])
results.extend(last_results)
return tuple(results)
class LastLevelMaxPool(nn.Module):
def forward(self, x):
return [F.max_pool2d(x, 1, 2, 0)]
|
PyTorch/Segmentation/MaskRCNN/pytorch/maskrcnn_benchmark/data/transforms | transforms | transforms | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
import random
import torch
import torchvision
from torchvision.transforms import functional as F
class Compose(object):
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, image, target):
for t in self.transforms:
image, target = t(image, target)
return image, target
def __repr__(self):
format_string = self.__class__.__name__ + "("
for t in self.transforms:
format_string += "\n"
format_string += " {0}".format(t)
format_string += "\n)"
return format_string
class Resize(object):
def __init__(self, min_size, max_size):
self.min_size = min_size
self.max_size = max_size
# modified from torchvision to add support for max size
def get_size(self, image_size):
w, h = image_size
size = self.min_size
max_size = self.max_size
if max_size is not None:
min_original_size = float(min((w, h)))
max_original_size = float(max((w, h)))
if max_original_size / min_original_size * size > max_size:
size = int(round(max_size * min_original_size / max_original_size))
if (w <= h and w == size) or (h <= w and h == size):
return (h, w)
if w < h:
ow = size
oh = int(size * h / w)
else:
oh = size
ow = int(size * w / h)
return (oh, ow)
def __call__(self, image, target):
if isinstance(image, torch.Tensor):
image_size = image.shape[-2:]
image_size = (image_size[1], image_size[0])
else:
image_size = image.size
size = self.get_size(image_size)
image = F.resize(image, size)
if isinstance(image, torch.Tensor):
image_size = image.shape[-2:]
image_size = (image_size[1], image_size[0])
else:
image_size = image.size
target = target.resize(image_size)
return image, target
class RandomHorizontalFlip(object):
def __init__(self, prob=0.5):
self.prob = prob
def __call__(self, image, target):
if random.random() < self.prob:
image = F.hflip(image)
target = target.transpose(0)
return image, target
class ToTensor(object):
def __call__(self, image, target):
if isinstance(image, torch.Tensor):
return F.convert_image_dtype(image, dtype=torch.float32), target
else:
return F.to_tensor(image), target
class Normalize(object):
def __init__(self, mean, std, to_bgr255=True):
self.mean = mean
self.std = std
self.to_bgr255 = to_bgr255
def __call__(self, image, target):
if self.to_bgr255:
image = image[[2, 1, 0]] * 255
image = F.normalize(image, mean=self.mean, std=self.std)
return image, target
|
PyTorch/LanguageModeling/Transformer-XL/pytorch/utils | utils | adaptive_softmax | import torch
import torch.nn as nn
import torch.nn.functional as F
class AdaptiveLogSoftmax(nn.Module):
def __init__(self, in_features, n_classes, cutoffs, keep_order=False):
super(AdaptiveLogSoftmax, self).__init__()
cutoffs = list(cutoffs)
if (cutoffs != sorted(cutoffs)) \
or (min(cutoffs) <= 0) \
or (max(cutoffs) >= (n_classes - 1)) \
or (len(set(cutoffs)) != len(cutoffs)) \
or any([int(c) != c for c in cutoffs]):
raise ValueError("cutoffs should be a sequence of unique, positive "
"integers sorted in an increasing order, where "
"each value is between 1 and n_classes-1")
self.in_features = in_features
self.n_classes = n_classes
self.cutoffs = cutoffs + [n_classes]
self.shortlist_size = self.cutoffs[0]
self.n_clusters = len(self.cutoffs) - 1
self.head_size = self.shortlist_size + self.n_clusters
self.cluster_weight = nn.Parameter(torch.zeros(self.n_clusters, self.in_features))
self.cluster_bias = nn.Parameter(torch.zeros(self.n_clusters))
self.keep_order = keep_order
def forward(self, hidden, target, weight, bias, keep_order=False):
if hidden.size(0) != target.size(0):
raise RuntimeError('Input and target should have the same size '
'in the batch dimension.')
head_weight = torch.cat(
[weight[:self.shortlist_size], self.cluster_weight], dim=0)
head_bias = torch.cat(
[bias[:self.shortlist_size], self.cluster_bias], dim=0)
head_logit = F.linear(hidden, head_weight, bias=head_bias)
head_logprob = F.log_softmax(head_logit, dim=1)
nll = torch.zeros_like(target, dtype=hidden.dtype,
device=hidden.device)
offset = 0
cutoff_values = [0] + self.cutoffs
for i in range(len(cutoff_values) - 1):
l_idx, h_idx = cutoff_values[i], cutoff_values[i + 1]
mask_i = (target >= l_idx) & (target < h_idx)
indices_i = mask_i.nonzero(as_tuple=False).squeeze()
if indices_i.numel() == 0:
continue
target_i = target.index_select(0, indices_i) - l_idx
head_logprob_i = head_logprob.index_select(0, indices_i)
if i == 0:
logprob_i = head_logprob_i.gather(1, target_i[:, None]).squeeze(1)
else:
weight_i = weight[l_idx:h_idx]
bias_i = bias[l_idx:h_idx]
hidden_i = hidden.index_select(0, indices_i)
tail_logit_i = F.linear(hidden_i, weight_i, bias=bias_i)
tail_logprob_i = F.log_softmax(tail_logit_i, dim=1)
logprob_i = head_logprob_i[:, -i] \
+ tail_logprob_i.gather(1, target_i[:, None]).squeeze(1)
if (hasattr(self, 'keep_order') and self.keep_order) or keep_order:
nll.index_copy_(0, indices_i, -logprob_i)
else:
nll[offset:offset+logprob_i.size(0)].copy_(-logprob_i)
offset += logprob_i.size(0)
return nll
|
Tools/PyTorch/TimeSeriesPredictionPlatform/conf/trainer/optimizer | optimizer | Rprop | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
_target_: torch.optim.Rprop
lr: 0.01
etas: [0.5, 1.2]
step_sizes: [1e-06, 50]
|
PyTorch/Recommendation/NCF/qa | qa | generate_validation_curves | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import matplotlib.pyplot as plt
def get_curve(filename):
hrs = []
with open(filename, 'r') as opened:
for line in opened.readlines():
d = json.loads(line[len("DLLL "):])
try:
hrs.append(d["data"]["hr@10"])
except KeyError:
pass
return hrs
a100 = "runs/pytorch_ncf_A100-SXM4-40GBx{numgpus}gpus_{precision}_{num_run}.json"
v16 = "runs/pytorch_ncf_Tesla V100-SXM2-16GBx{numgpus}gpus_{precision}_{num_run}.json"
v32 = "runs/pytorch_ncf_Tesla V100-SXM2-32GBx{numgpus}gpus_{precision}_{num_run}.json"
dgx2 = "runs/pytorch_ncf_Tesla V100-SXM3-32GBx{numgpus}gpus_{precision}_{num_run}.json"
fp32 = "FP32"
amp = "Mixed (AMP)"
tf32 = "TF32"
def get_accs(arch, numgpu, prec):
data = [get_curve(arch.format(numgpus=numgpu, num_run=num_run, precision=prec)) for num_run in range(1, 21)]
return data[0]
def get_plots():
archs = [dgx2, a100]
titles = ["DGX2 32GB", "DGX A100 40GB"]
fullprecs = [fp32, tf32]
halfprecs = [amp, amp]
gpuranges = [(1, 8, 16), (1, 8)]
fig, axs = plt.subplots(1, 2, sharey=True, figsize=(10, 5))
plt.subplots_adjust(hspace=0.5)
for x, prec in enumerate([fullprecs, halfprecs]):
for i, arch in enumerate(archs):
for numgpu in gpuranges[i]:
d = get_accs(arch, numgpu, prec[i])
axs[x].plot(range(len(d)), d, label=f"{titles[i]} x {numgpu} {prec[i]}")
axs[x].legend()
#plt.show()
plt.savefig("val_curves.png")
if __name__ == "__main__":
get_plots()
|
TensorFlow/Segmentation/UNet_Industrial | UNet_Industrial | download_and_preprocess_dagm2007 | #!/bin/bash
##############################################################################
# Copyright (c) Jonathan Dekhtiar - [email protected]
# All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
##############################################################################
# Usage: ./download_and_preprocess_dagm2007.sh /path/to/dataset/directory/
if [[ ! "$BASH_VERSION" ]] ; then
echo "Please do not use sh to run this script ($0), just execute it directly" 1>&2
exit 1
fi
if [[ -z "$1" ]]
then
echo -e "Error: Argument is missing. No dataset directory received."
echo -e "Usage: '$0 /path/to/dataset/directory/'"
exit 1
fi
DATASET_DIR=$(realpath -s $1)
ZIP_FILES_DIR=${DATASET_DIR}/zip_files
RAW_IMAGES_DIR=${DATASET_DIR}/raw_images
PUBLIC_ZIP_FILES_DIR=${ZIP_FILES_DIR}/public
PUBLIC_RAW_IMAGES_DIR=${RAW_IMAGES_DIR}/public
if [[ ! -e ${PUBLIC_ZIP_FILES_DIR} ]]; then
echo "creating ${PUBLIC_ZIP_FILES_DIR} ..."
mkdir -p ${PUBLIC_ZIP_FILES_DIR}
fi
if [[ ! -e ${PUBLIC_RAW_IMAGES_DIR} ]]; then
echo "creating ${PUBLIC_RAW_IMAGES_DIR} ..."
mkdir -p ${PUBLIC_RAW_IMAGES_DIR}
fi
PRIVATE_ZIP_FILES_DIR=${ZIP_FILES_DIR}/private
PRIVATE_RAW_IMAGES_DIR=${RAW_IMAGES_DIR}/private
if [[ ! -e ${PRIVATE_ZIP_FILES_DIR} ]]; then
echo "creating ${PRIVATE_ZIP_FILES_DIR} ..."
mkdir -p ${PRIVATE_ZIP_FILES_DIR}
fi
if [[ ! -e ${PRIVATE_RAW_IMAGES_DIR} ]]; then
echo "creating ${PRIVATE_RAW_IMAGES_DIR} ..."
mkdir -p ${PRIVATE_RAW_IMAGES_DIR}
fi
echo -e "\n################################################"
echo -e "Processing Public Dataset"
echo -e "################################################\n"
sleep 2
BASE_PUBLIC_URL="https://resources.mpi-inf.mpg.de/conference/dagm/2007"
declare -a arr=(
"Class1.zip"
"Class1_def.zip"
"Class2.zip"
"Class2_def.zip"
"Class3.zip"
"Class3_def.zip"
"Class4.zip"
"Class4_def.zip"
"Class5.zip"
"Class5_def.zip"
"Class6.zip"
"Class6_def.zip"
)
for file in "${arr[@]}"
do
if [[ ! -e ${PUBLIC_ZIP_FILES_DIR}/${file} ]]; then
echo -e "Downloading File: $BASE_PUBLIC_URL/$file ..."
wget -N ${BASE_PUBLIC_URL}/${file} -O ${PUBLIC_ZIP_FILES_DIR}/${file}
fi
# Unzip without overwriting
unzip -n ${PUBLIC_ZIP_FILES_DIR}/${file} -d ${PUBLIC_RAW_IMAGES_DIR}
done
chmod -R 744 ${PUBLIC_ZIP_FILES_DIR}
chmod -R 744 ${PUBLIC_RAW_IMAGES_DIR}
echo -e "\n################################################"
echo -e "Processing Private Dataset"
echo -e "################################################\n"
sleep 2
declare -a arr=(
"Class1.zip"
"Class2.zip"
"Class3.zip"
"Class4.zip"
"Class5.zip"
"Class6.zip"
"Class7.zip"
"Class8.zip"
"Class9.zip"
"Class10.zip"
)
for file in "${arr[@]}"
do
if [[ ! -e ${PRIVATE_ZIP_FILES_DIR}/${file} ]]; then
echo -e "\nError! File not found: '${PRIVATE_ZIP_FILES_DIR}/${file}'"
echo -e "Instructions:"
echo -e " 1. Create an account on 'https://hci.iwr.uni-heidelberg.de/node/3616'"
echo -e " 2. Download the missing file(s) to: '${PRIVATE_ZIP_FILES_DIR}'\n"
exit 1
fi
# Unzip without overwriting
unzip -n ${PRIVATE_ZIP_FILES_DIR}/${file} -d ${PRIVATE_RAW_IMAGES_DIR}
done
chmod -R 744 ${PRIVATE_ZIP_FILES_DIR}
chmod -R 744 ${PRIVATE_RAW_IMAGES_DIR}
echo -e "\n################################################"
echo -e "Dataset Preprocessing Starting ..."
echo -e "################################################\n"
sleep 2
echo "Cleaning non useful files ..."
rm -rf ${DATASET_DIR}/**/Thumbs.db
rm -rf ${DATASET_DIR}/**/**/Thumbs.db
rm -rf ${DATASET_DIR}/**/**/**/Thumbs.db
rm -rf ${DATASET_DIR}/**/**/**/**/Thumbs.db
rm -rf ${DATASET_DIR}/**/**/**/**/**/Thumbs.db
echo "Generating file listing for private dataset ..."
python preprocess_dagm2007.py --data_dir=${PRIVATE_RAW_IMAGES_DIR}
echo -e "\n################################################"
echo -e "Dataset Processing Finished with success"
echo -e "################################################\n" |
PyTorch/Classification/GPUNet/triton/08ms-D/runner | runner | start_NVIDIA-DGX-A100-(1x-A100-80GB) | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/bin/bash
# Evaluate Runner
python3 -m "triton.08ms-D.runner.__main__" \
--config-path "triton/08ms-D/runner/config_NVIDIA-DGX-A100-(1x-A100-80GB).yaml" \
--device 0 |
TensorFlow/Detection/SSD/models/research/object_detection/builders | builders | anchor_generator_builder_test | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for anchor_generator_builder."""
import math
import tensorflow as tf
from google.protobuf import text_format
from object_detection.anchor_generators import grid_anchor_generator
from object_detection.anchor_generators import multiple_grid_anchor_generator
from object_detection.anchor_generators import multiscale_grid_anchor_generator
from object_detection.builders import anchor_generator_builder
from object_detection.protos import anchor_generator_pb2
class AnchorGeneratorBuilderTest(tf.test.TestCase):
def assert_almost_list_equal(self, expected_list, actual_list, delta=None):
self.assertEqual(len(expected_list), len(actual_list))
for expected_item, actual_item in zip(expected_list, actual_list):
self.assertAlmostEqual(expected_item, actual_item, delta=delta)
def test_build_grid_anchor_generator_with_defaults(self):
anchor_generator_text_proto = """
grid_anchor_generator {
}
"""
anchor_generator_proto = anchor_generator_pb2.AnchorGenerator()
text_format.Merge(anchor_generator_text_proto, anchor_generator_proto)
anchor_generator_object = anchor_generator_builder.build(
anchor_generator_proto)
self.assertTrue(isinstance(anchor_generator_object,
grid_anchor_generator.GridAnchorGenerator))
self.assertListEqual(anchor_generator_object._scales, [])
self.assertListEqual(anchor_generator_object._aspect_ratios, [])
with self.test_session() as sess:
base_anchor_size, anchor_offset, anchor_stride = sess.run(
[anchor_generator_object._base_anchor_size,
anchor_generator_object._anchor_offset,
anchor_generator_object._anchor_stride])
self.assertAllEqual(anchor_offset, [0, 0])
self.assertAllEqual(anchor_stride, [16, 16])
self.assertAllEqual(base_anchor_size, [256, 256])
def test_build_grid_anchor_generator_with_non_default_parameters(self):
anchor_generator_text_proto = """
grid_anchor_generator {
height: 128
width: 512
height_stride: 10
width_stride: 20
height_offset: 30
width_offset: 40
scales: [0.4, 2.2]
aspect_ratios: [0.3, 4.5]
}
"""
anchor_generator_proto = anchor_generator_pb2.AnchorGenerator()
text_format.Merge(anchor_generator_text_proto, anchor_generator_proto)
anchor_generator_object = anchor_generator_builder.build(
anchor_generator_proto)
self.assertTrue(isinstance(anchor_generator_object,
grid_anchor_generator.GridAnchorGenerator))
self.assert_almost_list_equal(anchor_generator_object._scales,
[0.4, 2.2])
self.assert_almost_list_equal(anchor_generator_object._aspect_ratios,
[0.3, 4.5])
with self.test_session() as sess:
base_anchor_size, anchor_offset, anchor_stride = sess.run(
[anchor_generator_object._base_anchor_size,
anchor_generator_object._anchor_offset,
anchor_generator_object._anchor_stride])
self.assertAllEqual(anchor_offset, [30, 40])
self.assertAllEqual(anchor_stride, [10, 20])
self.assertAllEqual(base_anchor_size, [128, 512])
def test_build_ssd_anchor_generator_with_defaults(self):
anchor_generator_text_proto = """
ssd_anchor_generator {
aspect_ratios: [1.0]
}
"""
anchor_generator_proto = anchor_generator_pb2.AnchorGenerator()
text_format.Merge(anchor_generator_text_proto, anchor_generator_proto)
anchor_generator_object = anchor_generator_builder.build(
anchor_generator_proto)
self.assertTrue(isinstance(anchor_generator_object,
multiple_grid_anchor_generator.
MultipleGridAnchorGenerator))
for actual_scales, expected_scales in zip(
list(anchor_generator_object._scales),
[(0.1, 0.2, 0.2),
(0.35, 0.418),
(0.499, 0.570),
(0.649, 0.721),
(0.799, 0.871),
(0.949, 0.974)]):
self.assert_almost_list_equal(expected_scales, actual_scales, delta=1e-2)
for actual_aspect_ratio, expected_aspect_ratio in zip(
list(anchor_generator_object._aspect_ratios),
[(1.0, 2.0, 0.5)] + 5 * [(1.0, 1.0)]):
self.assert_almost_list_equal(expected_aspect_ratio, actual_aspect_ratio)
with self.test_session() as sess:
base_anchor_size = sess.run(anchor_generator_object._base_anchor_size)
self.assertAllClose(base_anchor_size, [1.0, 1.0])
def test_build_ssd_anchor_generator_with_custom_scales(self):
anchor_generator_text_proto = """
ssd_anchor_generator {
aspect_ratios: [1.0]
scales: [0.1, 0.15, 0.2, 0.4, 0.6, 0.8]
reduce_boxes_in_lowest_layer: false
}
"""
anchor_generator_proto = anchor_generator_pb2.AnchorGenerator()
text_format.Merge(anchor_generator_text_proto, anchor_generator_proto)
anchor_generator_object = anchor_generator_builder.build(
anchor_generator_proto)
self.assertTrue(isinstance(anchor_generator_object,
multiple_grid_anchor_generator.
MultipleGridAnchorGenerator))
for actual_scales, expected_scales in zip(
list(anchor_generator_object._scales),
[(0.1, math.sqrt(0.1 * 0.15)),
(0.15, math.sqrt(0.15 * 0.2)),
(0.2, math.sqrt(0.2 * 0.4)),
(0.4, math.sqrt(0.4 * 0.6)),
(0.6, math.sqrt(0.6 * 0.8)),
(0.8, math.sqrt(0.8 * 1.0))]):
self.assert_almost_list_equal(expected_scales, actual_scales, delta=1e-2)
def test_build_ssd_anchor_generator_with_custom_interpolated_scale(self):
anchor_generator_text_proto = """
ssd_anchor_generator {
aspect_ratios: [0.5]
interpolated_scale_aspect_ratio: 0.5
reduce_boxes_in_lowest_layer: false
}
"""
anchor_generator_proto = anchor_generator_pb2.AnchorGenerator()
text_format.Merge(anchor_generator_text_proto, anchor_generator_proto)
anchor_generator_object = anchor_generator_builder.build(
anchor_generator_proto)
self.assertTrue(isinstance(anchor_generator_object,
multiple_grid_anchor_generator.
MultipleGridAnchorGenerator))
for actual_aspect_ratio, expected_aspect_ratio in zip(
list(anchor_generator_object._aspect_ratios),
6 * [(0.5, 0.5)]):
self.assert_almost_list_equal(expected_aspect_ratio, actual_aspect_ratio)
def test_build_ssd_anchor_generator_without_reduced_boxes(self):
anchor_generator_text_proto = """
ssd_anchor_generator {
aspect_ratios: [1.0]
reduce_boxes_in_lowest_layer: false
}
"""
anchor_generator_proto = anchor_generator_pb2.AnchorGenerator()
text_format.Merge(anchor_generator_text_proto, anchor_generator_proto)
anchor_generator_object = anchor_generator_builder.build(
anchor_generator_proto)
self.assertTrue(isinstance(anchor_generator_object,
multiple_grid_anchor_generator.
MultipleGridAnchorGenerator))
for actual_scales, expected_scales in zip(
list(anchor_generator_object._scales),
[(0.2, 0.264),
(0.35, 0.418),
(0.499, 0.570),
(0.649, 0.721),
(0.799, 0.871),
(0.949, 0.974)]):
self.assert_almost_list_equal(expected_scales, actual_scales, delta=1e-2)
for actual_aspect_ratio, expected_aspect_ratio in zip(
list(anchor_generator_object._aspect_ratios),
6 * [(1.0, 1.0)]):
self.assert_almost_list_equal(expected_aspect_ratio, actual_aspect_ratio)
with self.test_session() as sess:
base_anchor_size = sess.run(anchor_generator_object._base_anchor_size)
self.assertAllClose(base_anchor_size, [1.0, 1.0])
def test_build_ssd_anchor_generator_with_non_default_parameters(self):
anchor_generator_text_proto = """
ssd_anchor_generator {
num_layers: 2
min_scale: 0.3
max_scale: 0.8
aspect_ratios: [2.0]
height_stride: 16
height_stride: 32
width_stride: 20
width_stride: 30
height_offset: 8
height_offset: 16
width_offset: 0
width_offset: 10
}
"""
anchor_generator_proto = anchor_generator_pb2.AnchorGenerator()
text_format.Merge(anchor_generator_text_proto, anchor_generator_proto)
anchor_generator_object = anchor_generator_builder.build(
anchor_generator_proto)
self.assertTrue(isinstance(anchor_generator_object,
multiple_grid_anchor_generator.
MultipleGridAnchorGenerator))
for actual_scales, expected_scales in zip(
list(anchor_generator_object._scales),
[(0.1, 0.3, 0.3), (0.8, 0.894)]):
self.assert_almost_list_equal(expected_scales, actual_scales, delta=1e-2)
for actual_aspect_ratio, expected_aspect_ratio in zip(
list(anchor_generator_object._aspect_ratios),
[(1.0, 2.0, 0.5), (2.0, 1.0)]):
self.assert_almost_list_equal(expected_aspect_ratio, actual_aspect_ratio)
for actual_strides, expected_strides in zip(
list(anchor_generator_object._anchor_strides), [(16, 20), (32, 30)]):
self.assert_almost_list_equal(expected_strides, actual_strides)
for actual_offsets, expected_offsets in zip(
list(anchor_generator_object._anchor_offsets), [(8, 0), (16, 10)]):
self.assert_almost_list_equal(expected_offsets, actual_offsets)
with self.test_session() as sess:
base_anchor_size = sess.run(anchor_generator_object._base_anchor_size)
self.assertAllClose(base_anchor_size, [1.0, 1.0])
def test_raise_value_error_on_empty_anchor_genertor(self):
anchor_generator_text_proto = """
"""
anchor_generator_proto = anchor_generator_pb2.AnchorGenerator()
text_format.Merge(anchor_generator_text_proto, anchor_generator_proto)
with self.assertRaises(ValueError):
anchor_generator_builder.build(anchor_generator_proto)
def test_build_multiscale_anchor_generator_custom_aspect_ratios(self):
anchor_generator_text_proto = """
multiscale_anchor_generator {
aspect_ratios: [1.0]
}
"""
anchor_generator_proto = anchor_generator_pb2.AnchorGenerator()
text_format.Merge(anchor_generator_text_proto, anchor_generator_proto)
anchor_generator_object = anchor_generator_builder.build(
anchor_generator_proto)
self.assertTrue(isinstance(anchor_generator_object,
multiscale_grid_anchor_generator.
MultiscaleGridAnchorGenerator))
for level, anchor_grid_info in zip(
range(3, 8), anchor_generator_object._anchor_grid_info):
self.assertEqual(set(anchor_grid_info.keys()), set(['level', 'info']))
self.assertTrue(level, anchor_grid_info['level'])
self.assertEqual(len(anchor_grid_info['info']), 4)
self.assertAllClose(anchor_grid_info['info'][0], [2**0, 2**0.5])
self.assertTrue(anchor_grid_info['info'][1], 1.0)
self.assertAllClose(anchor_grid_info['info'][2],
[4.0 * 2**level, 4.0 * 2**level])
self.assertAllClose(anchor_grid_info['info'][3], [2**level, 2**level])
self.assertTrue(anchor_generator_object._normalize_coordinates)
def test_build_multiscale_anchor_generator_with_anchors_in_pixel_coordinates(
self):
anchor_generator_text_proto = """
multiscale_anchor_generator {
aspect_ratios: [1.0]
normalize_coordinates: false
}
"""
anchor_generator_proto = anchor_generator_pb2.AnchorGenerator()
text_format.Merge(anchor_generator_text_proto, anchor_generator_proto)
anchor_generator_object = anchor_generator_builder.build(
anchor_generator_proto)
self.assertTrue(isinstance(anchor_generator_object,
multiscale_grid_anchor_generator.
MultiscaleGridAnchorGenerator))
self.assertFalse(anchor_generator_object._normalize_coordinates)
if __name__ == '__main__':
tf.test.main()
|
PyTorch/Translation/Transformer/scripts | scripts | average_checkpoints | #!/usr/bin/env python3
import argparse
import collections
import torch
import os
import re
def average_checkpoints(inputs):
"""Loads checkpoints from inputs and returns a model with averaged weights.
Args:
inputs: An iterable of string paths of checkpoints to load from.
Returns:
A dict of string keys mapping to various values. The 'model' key
from the returned dict should correspond to an OrderedDict mapping
string parameter names to torch Tensors.
"""
params_dict = collections.OrderedDict()
params_keys = None
new_state = None
for f in inputs:
state = torch.load(
f,
map_location=(
lambda s, _: torch.serialization.default_restore_location(s, 'cpu')
),
)
# Copies over the settings from the first checkpoint
if new_state is None:
new_state = state
model_params = state['model']
model_params_keys = list(model_params.keys())
if params_keys is None:
params_keys = model_params_keys
elif params_keys != model_params_keys:
raise KeyError(
'For checkpoint {}, expected list of params: {}, '
'but found: {}'.format(f, params_keys, model_params_keys)
)
for k in params_keys:
if k not in params_dict:
params_dict[k] = []
p = model_params[k]
if isinstance(p, torch.HalfTensor):
p = p.float()
params_dict[k].append(p)
averaged_params = collections.OrderedDict()
# v should be a list of torch Tensor.
for k, v in params_dict.items():
summed_v = None
for x in v:
summed_v = summed_v + x if summed_v is not None else x
averaged_params[k] = summed_v / len(v)
new_state['model'] = averaged_params
return new_state
def last_n_checkpoints(paths, n, update_based):
assert len(paths) == 1
path = paths[0]
if update_based:
pt_regexp = re.compile(r'checkpoint_\d+_(\d+)\.pt')
else:
pt_regexp = re.compile(r'checkpoint(\d+)\.pt')
files = os.listdir(path)
entries = []
for f in files:
m = pt_regexp.fullmatch(f)
if m is not None:
entries.append((int(m.group(1)), m.group(0)))
if len(entries) < n:
raise Exception('Found {} checkpoint files but need at least {}', len(entries), n)
return [os.path.join(path, x[1]) for x in sorted(entries, reverse=True)[:n]]
def main():
parser = argparse.ArgumentParser(
description='Tool to average the params of input checkpoints to '
'produce a new checkpoint',
)
parser.add_argument(
'--inputs',
required=True,
nargs='+',
help='Input checkpoint file paths.',
)
parser.add_argument(
'--output',
required=True,
metavar='FILE',
help='Write the new checkpoint containing the averaged weights to this '
'path.',
)
num_group = parser.add_mutually_exclusive_group()
num_group.add_argument(
'--num-epoch-checkpoints',
type=int,
help='if set, will try to find checkpoints with names checkpoint_xx.pt in the path specified by input, '
'and average last this many of them.',
)
num_group.add_argument(
'--num-update-checkpoints',
type=int,
help='if set, will try to find checkpoints with names checkpoint_ee_xx.pt in the path specified by input, '
'and average last this many of them.',
)
args = parser.parse_args()
print(args)
num = None
is_update_based = False
if args.num_update_checkpoints is not None:
num = args.num_update_checkpoints
is_update_based = True
elif args.num_epoch_checkpoints is not None:
num = args.num_epoch_checkpoints
if num is not None:
args.inputs = last_n_checkpoints(args.inputs, num, is_update_based)
print('averaging checkpoints: ', args.inputs)
new_state = average_checkpoints(args.inputs)
torch.save(new_state, args.output)
print('Finished writing averaged checkpoint to {}.'.format(args.output))
if __name__ == '__main__':
main()
|
MxNet/Classification/RN50v1.5 | RN50v1.5 | fit | # Copyright 2017-2018 The Apache Software Foundation
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# -----------------------------------------------------------------------
#
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" train fit utility """
import logging
import math
import glob
import os
import random
import sys
import time
import re
from itertools import starmap
import signal
import pickle
import dllogger
import horovod.mxnet as hvd
import mxnet as mx
import mxnet.contrib.amp as amp
import numpy as np
from mxnet import autograd as ag
from mxnet import gluon
import data
from benchmarking import BenchmarkingDataIter
from global_metrics import CompositeMeter, MaxMeter, MinMeter, AvgMeter, PercentileMeter
class PartitionSignalHandler():
def __init__(self, sync_freq: int = 10):
self.step = 0
self.freq = sync_freq
self.t = mx.nd.array([0])
signal.signal(signal.SIGUSR1, self._signal_handler)
signal.signal(signal.SIGTERM, self._signal_handler)
def sync(self) -> bool:
if self.step % self.freq == 0:
new_sync = hvd.allreduce(self.t, average=False)
if new_sync[0] > 0:
self.t[0] = 1
self.step += 1
return self.should_end()
def should_end(self) -> bool:
return bool(self.t[0] > 0)
def _signal_handler(self, signum, frame):
print("Signal received")
self.t[0] = 1
def add_fit_args(parser):
def int_list(x):
return list(map(int, x.split(',')))
def float_list(x):
return list(map(float, x.split(',')))
train = parser.add_argument_group('Training')
train.add_argument('--mode', default='train_val', choices=('train_val', 'train', 'val', 'pred'),
help='mode')
train.add_argument('--seed', type=int, default=None,
help='random seed')
train.add_argument('--gpus', type=int_list, default=[0],
help='list of gpus to run, e.g. 0 or 0,2,5')
train.add_argument('--kv-store', type=str, default='device', choices=('device', 'horovod'),
help='key-value store type')
train.add_argument('--dtype', type=str, default='float16', choices=('float32', 'float16'),
help='precision')
train.add_argument('--amp', action='store_true',
help='If enabled, turn on AMP (Automatic Mixed Precision)')
train.add_argument('--batch-size', type=int, default=192,
help='the batch size')
train.add_argument('--num-epochs', type=int, default=90,
help='number of epochs')
train.add_argument('--run-epochs', type=int, default=-1,
help='number of epochs to run in single run')
train.add_argument('--lr', type=float, default=0.1,
help='initial learning rate')
train.add_argument('--lr-schedule', choices=('multistep', 'cosine'), default='cosine',
help='learning rate schedule')
train.add_argument('--lr-factor', type=float, default=0.256,
help='the ratio to reduce lr on each step')
train.add_argument('--lr-steps', type=float_list, default=[],
help='the epochs to reduce the lr, e.g. 30,60')
train.add_argument('--warmup-epochs', type=int, default=5,
help='the epochs to ramp-up lr to scaled large-batch value')
train.add_argument('--optimizer', type=str, default='sgd',
help='the optimizer type')
train.add_argument('--mom', type=float, default=0.875,
help='momentum for sgd')
train.add_argument('--wd', type=float, default=1 / 32768,
help='weight decay for sgd')
train.add_argument('--label-smoothing', type=float, default=0.1,
help='label smoothing factor')
train.add_argument('--mixup', type=float, default=0,
help='alpha parameter for mixup (if 0 then mixup is not applied)')
train.add_argument('--disp-batches', type=int, default=20,
help='show progress for every n batches')
train.add_argument('--model-prefix', type=str, default='model',
help='model checkpoint prefix')
train.add_argument('--save-frequency', type=int, default=-1,
help='frequency of saving model in epochs (--model-prefix must be specified). '
'If -1 then save only best model. If 0 then do not save anything.')
train.add_argument('--begin-epoch', type=int, default=0,
help='start the model from an epoch')
train.add_argument('--load', help='checkpoint to load')
train.add_argument('--test-io', action='store_true',
help='test reading speed without training')
train.add_argument('--test-io-mode', default='train', choices=('train', 'val'),
help='data to test')
train.add_argument('--log', type=str, default='log.log',
help='file where to save the log from the experiment')
train.add_argument('--dllogger-log', type=str, default='dllogger_log.log',
help='file where to save the dllogger log from the experiment')
train.add_argument('--workspace', type=str, default='./',
help='path to directory where results will be stored')
train.add_argument('--logdir', type=str, default=None,
help="path to directory where logs will be stored")
train.add_argument('--no-metrics', action='store_true',
help='do not calculate evaluation metrics (for benchmarking)')
train.add_argument('--benchmark-iters', type=int, default=None,
help='run only benchmark-iters iterations from each epoch')
return train
def get_epoch_size(args, kv):
return math.ceil(args.num_examples / args.batch_size)
def get_lr_scheduler(args):
def multistep_schedule(x):
lr = args.lr * \
(args.lr_factor ** (len(list(filter(lambda step: step <= x, args.lr_steps)))))
warmup_coeff = min(1, x / args.warmup_epochs)
return warmup_coeff * lr
def cosine_schedule(x):
steps = args.lr_steps
if not steps or steps[0] > args.warmup_epochs:
steps = [args.warmup_epochs] + steps
elif not steps or steps[0] != 0:
steps = [0] + steps
if steps[-1] != args.num_epochs:
steps.append(args.num_epochs)
if x < args.warmup_epochs:
return args.lr * x / args.warmup_epochs
for i, (step, next_step) in enumerate(zip(steps, steps[1:])):
if next_step > x:
return args.lr * 0.5 * (1 + math.cos(math.pi * (x - step) / (next_step - step))) * (args.lr_factor ** i)
return 0
schedules = {
'multistep': multistep_schedule,
'cosine': cosine_schedule,
}
return schedules[args.lr_schedule]
def load_model(args, model):
file = list(glob.glob(
f"{args.workspace}/{args.model_prefix}_*.params"))
if len(file) == 0:
return -1
file = [x for x in sorted(file) if "best.params" not in x]
if len(file) == 0:
return -1
file = file[-1]
epoch = re.match(f".*{args.model_prefix}_([0-9]*)\.params", file)
if epoch is None:
return -1
epoch = int(epoch.group(1))
model.load_parameters(file)
logging.info('Loaded model {}'.format(file))
return epoch
def save_checkpoint(net, epoch, top1, best_acc, model_prefix, workspace, save_frequency, kvstore, force_save=False):
if model_prefix is None or save_frequency == 0 or ('horovod' in kvstore and hvd.rank() != 0):
return
if (save_frequency > 0 and (epoch + 1) % save_frequency == 0) or force_save:
fname = '{}_{:04}.params'.format(model_prefix, epoch)
fname = os.path.join(workspace, fname)
net.save_parameters(fname)
logging.info('[Epoch {}] Saving checkpoint to {} with Accuracy: {:.4f}'.format(
epoch, fname, top1))
if top1 > best_acc:
fname = os.path.join(workspace, f'{model_prefix}_best.params')
net.save_parameters(fname)
logging.info('[Epoch {}] Saving checkpoint to {} with Accuracy: {:.4f}'.format(
epoch, fname, top1))
def model_pred(args, model, image):
from imagenet_classes import classes
output = model(image.reshape(-1, *image.shape)
)[0].softmax().as_in_context(mx.cpu())
top = output.argsort(is_ascend=False)[:10]
for i, ind in enumerate(top):
ind = int(ind.asscalar())
logging.info('{:2d}. {:5.2f}% -> {}'.format(i + 1,
output[ind].asscalar() * 100, classes[ind]))
def reduce_metrics(args, metrics, kvstore):
if 'horovod' not in kvstore or not metrics[0] or hvd.size() == 1:
return metrics
m = mx.ndarray.array(metrics[1], ctx=mx.gpu(args.gpus[0]))
reduced = hvd.allreduce(m)
values = reduced.as_in_context(mx.cpu()).asnumpy().tolist()
return (metrics[0], values)
def model_score(args, net, val_data, metric, kvstore):
if val_data is None:
logging.info('Omitting validation: no data')
return [], []
if not isinstance(metric, mx.metric.EvalMetric):
metric = mx.metric.create(metric)
metric.reset()
val_data.reset()
total_batch_size = val_data.batch_size * val_data._num_gpus * \
(hvd.size() if 'horovod' in kvstore else 1)
durations = []
tic = time.time()
outputs = []
for batches in val_data:
# synchronize to previous iteration
for o in outputs:
o.wait_to_read()
data = [b.data[0] for b in batches]
label = [b.label[0][:len(b.data[0]) - b.pad]
for b in batches if len(b.data[0]) != b.pad]
outputs = [net(X) for X, b in zip(data, batches)]
outputs = [o[:len(b.data[0]) - b.pad]
for o, b in zip(outputs, batches) if len(b.data[0]) != b.pad]
metric.update(label, outputs)
durations.append(time.time() - tic)
tic = time.time()
metric = reduce_metrics(args, metric.get_global(), kvstore)
durations = durations[min(len(durations) // 10, 100):]
duration_stats = {
'ips': total_batch_size / np.mean(durations),
'latency_avg': np.mean(durations),
}
return metric, duration_stats, durations
class ScalarMetric(mx.metric.Loss):
def update(self, _, scalar):
self.sum_metric += scalar
self.global_sum_metric += scalar
self.num_inst += 1
self.global_num_inst += 1
def label_smoothing(labels, classes, eta):
return labels.one_hot(classes, on_value=1 - eta + eta / classes, off_value=eta / classes)
def model_fit(args, net, train_data, eval_metric, optimizer,
optimizer_params, lr_scheduler, eval_data, global_metrics, kvstore, kv,
begin_epoch, num_epoch, run_epoch, model_prefix):
if not isinstance(eval_metric, mx.metric.EvalMetric):
eval_metric = mx.metric.create(eval_metric)
loss_metric = ScalarMetric()
if 'horovod' in kvstore:
trainer = hvd.DistributedTrainer(
net.collect_params(), optimizer, optimizer_params)
else:
trainer = gluon.Trainer(net.collect_params(), optimizer, optimizer_params,
kvstore=kv, update_on_kvstore=False)
if args.amp:
amp.init_trainer(trainer)
partition_handler = PartitionSignalHandler(1)
sparse_label_loss = (args.label_smoothing == 0 and args.mixup == 0)
loss = gluon.loss.SoftmaxCrossEntropyLoss(sparse_label=sparse_label_loss)
loss.hybridize(static_shape=True, static_alloc=True)
local_batch_size = train_data.batch_size
total_batch_size = local_batch_size * train_data._num_gpus * \
(hvd.size() if 'horovod' in kvstore else 1)
durations = []
epoch_size = get_epoch_size(args, kv)
run_epoch = num_epoch if (run_epoch == -1) else (begin_epoch + run_epoch)
def transform_data(images, labels):
if args.mixup != 0:
coeffs = mx.nd.array(np.random.beta(args.mixup, args.mixup, size=images.shape[0])).as_in_context(
images.context)
image_coeffs = coeffs.astype(
images.dtype, copy=False).reshape(*coeffs.shape, 1, 1, 1)
ret_images = image_coeffs * images + \
(1 - image_coeffs) * images[::-1]
ret_labels = label_smoothing(
labels, args.num_classes, args.label_smoothing)
label_coeffs = coeffs.reshape(*coeffs.shape, 1)
ret_labels = label_coeffs * ret_labels + \
(1 - label_coeffs) * ret_labels[::-1]
else:
ret_images = images
if not sparse_label_loss:
ret_labels = label_smoothing(
labels, args.num_classes, args.label_smoothing)
else:
ret_labels = labels
return ret_images, ret_labels
i = -1
best_accuracy = -1
for epoch in range(begin_epoch, min(run_epoch, num_epoch)):
tic = time.time()
btic = time.time()
etic = time.time()
train_data.reset()
eval_metric.reset()
loss_metric.reset()
logging.info('Starting epoch {}'.format(epoch))
outputs = []
if not partition_handler.should_end():
for i, batches in enumerate(train_data):
# synchronize to previous iteration
# for o in outputs:
# o.wait_to_read()
trainer.set_learning_rate(lr_scheduler(epoch + i / epoch_size))
data = [b.data[0] for b in batches]
label = [b.label[0].as_in_context(
b.data[0].context) for b in batches]
orig_label = label
data, label = zip(*starmap(transform_data, zip(data, label)))
outputs = []
Ls = []
with ag.record():
for x, y in zip(data, label):
z = net(x)
L = loss(z, y)
# store the loss and do backward after we have done forward
# on all GPUs for better speed on multiple GPUs.
Ls.append(L)
outputs.append(z)
if args.amp:
with amp.scale_loss(Ls, trainer) as scaled_loss:
ag.backward(scaled_loss)
else:
ag.backward(Ls)
if 'horovod' in kvstore:
trainer.step(local_batch_size)
else:
trainer.step(total_batch_size)
loss_metric.update(..., np.mean(
[l.asnumpy() for l in Ls]).item())
if args.disp_batches and not (i + 1) % args.disp_batches:
dllogger_it_data = {
'train.loss': loss_metric.get()[1],
'train.ips': args.disp_batches * total_batch_size / (time.time() - btic),
'train.lr': trainer.learning_rate
}
dllogger.log((epoch, i), data=dllogger_it_data)
loss_metric.reset_local()
btic = time.time()
durations.append(time.time() - tic)
tic = time.time()
else:
break
durations = durations[min(len(durations) // 10, 100):]
dllogger_epoch_data = {
'train.loss': loss_metric.get_global()[1],
'train.ips': total_batch_size / np.mean(durations)
}
should_break = partition_handler.sync()
if args.mode == 'train_val':
logging.info('Validating epoch {}'.format(epoch))
score, duration_stats, _ = model_score(
args, net, eval_data, eval_metric, kvstore)
dllogger_epoch_data.update(
starmap(lambda key, val: (
'val.{}'.format(key), val), zip(*score))
)
dllogger_epoch_data.update(
starmap(lambda key, val: ('val.{}'.format(key), val),
duration_stats.items())
)
score = dict(zip(*score))
accuracy = score.get('accuracy', -1)
save_checkpoint(net, epoch, accuracy, best_accuracy,
model_prefix, args.workspace,
args.save_frequency if args.mode == "train_val" else -1,
kvstore, force_save=should_break)
best_accuracy = max(best_accuracy, accuracy)
global_metrics.update_dict(dllogger_epoch_data)
dllogger.log(step=(epoch,), data=dllogger_epoch_data)
def fit(args, model, data_loader):
"""
train a model
args : argparse returns
model : the the neural network model
data_loader : function that returns the train and val data iterators
"""
start_time = time.time()
# select gpu for horovod process
if 'horovod' in args.kv_store:
args.gpus = [args.gpus[hvd.local_rank()]]
if args.amp:
amp.init()
if args.seed is not None:
logging.info('Setting seeds to {}'.format(args.seed))
random.seed(args.seed)
np.random.seed(args.seed)
mx.random.seed(args.seed)
# kvstore
if 'horovod' in args.kv_store:
kv = None
rank = hvd.rank()
num_workers = hvd.size()
else:
kv = mx.kvstore.create(args.kv_store)
rank = kv.rank
num_workers = kv.num_workers
if args.test_io:
train, val = data_loader(args, kv)
if args.test_io_mode == 'train':
data_iter = train
else:
data_iter = val
tic = time.time()
for i, batch in enumerate(data_iter):
if isinstance(batch, list):
for b in batch:
for j in b.data:
j.wait_to_read()
else:
for j in batch.data:
j.wait_to_read()
if (i + 1) % args.disp_batches == 0:
logging.info('Batch [{}]\tSpeed: {:.2f} samples/sec'.format(
i, args.disp_batches * args.batch_size / (time.time() - tic)))
tic = time.time()
return
start_epoch = load_model(args, model) + 1
if start_epoch == 0:
# all initializers should be specified in the model definition.
# if not, this will raise an error
model.initialize(mx.init.Initializer())
logging.info(f"starting epoch {start_epoch}")
# devices for training
devs = list(map(mx.gpu, args.gpus))
model.collect_params().reset_ctx(devs)
if args.mode == 'pred':
logging.info('Infering image {}'.format(args.data_pred))
model_pred(args, model, data.load_image(args, args.data_pred, devs[0]))
return
# learning rate
lr_scheduler = get_lr_scheduler(args)
optimizer_params = {
'learning_rate': 0,
'wd': args.wd,
'multi_precision': True,
}
# Only a limited number of optimizers have 'momentum' property
has_momentum = {'sgd', 'dcasgd', 'nag', 'signum', 'lbsgd'}
if args.optimizer in has_momentum:
optimizer_params['momentum'] = args.mom
# evaluation metrices
if not args.no_metrics:
eval_metrics = ['accuracy']
eval_metrics.append(mx.metric.create(
'top_k_accuracy', top_k=5))
else:
eval_metrics = []
train, val = data_loader(args, kv)
train = BenchmarkingDataIter(train, args.benchmark_iters)
if val is not None:
val = BenchmarkingDataIter(val, args.benchmark_iters)
if 'horovod' in args.kv_store:
# Fetch and broadcast parameters
params = model.collect_params()
if params is not None:
hvd.broadcast_parameters(params, root_rank=0)
ctx = mx.gpu(hvd.local_rank())
tensor1 = mx.nd.zeros(shape=(1,), dtype='float32', ctx=ctx)
tensor2 = mx.nd.zeros(shape=(1,), dtype='float32', ctx=ctx)
tensor1, tensor2 = hvd.grouped_allreduce([tensor1,tensor2])
global_metrics = CompositeMeter()
if args.mode in ['train_val', 'train']:
global_metrics.register_metric('train.loss', MinMeter())
global_metrics.register_metric('train.ips', AvgMeter())
if args.mode in ['train_val', 'val']:
global_metrics.register_metric('val.accuracy', MaxMeter())
global_metrics.register_metric('val.top_k_accuracy_5', MaxMeter())
global_metrics.register_metric('val.ips', AvgMeter())
global_metrics.register_metric('val.latency_avg', AvgMeter())
if args.mode in ['val']:
global_metrics.register_metric('val.latency_50', PercentileMeter(50))
global_metrics.register_metric('val.latency_90', PercentileMeter(90))
global_metrics.register_metric('val.latency_95', PercentileMeter(95))
global_metrics.register_metric('val.latency_99', PercentileMeter(99))
global_metrics.register_metric('val.latency_100', PercentileMeter(100))
# run
if args.mode in ['train_val', 'train']:
model_fit(
args,
model,
train,
begin_epoch=start_epoch,
num_epoch=args.num_epochs,
run_epoch=args.run_epochs,
eval_data=val,
eval_metric=eval_metrics,
global_metrics=global_metrics,
kvstore=args.kv_store,
kv=kv,
optimizer=args.optimizer,
optimizer_params=optimizer_params,
lr_scheduler=lr_scheduler,
model_prefix=args.model_prefix,
)
elif args.mode == 'val':
for epoch in range(args.num_epochs): # loop for benchmarking
score, duration_stats, durations = model_score(
args, model, val, eval_metrics, args.kv_store)
dllogger_data = dict(starmap(lambda key, val: (
'val.{}'.format(key), val), zip(*score)))
dllogger_data.update(
starmap(lambda key, val: ('val.{}'.format(key), val),
duration_stats.items())
)
global_metrics.update_dict(dllogger_data)
for percentile in [50, 90, 95, 99, 100]:
metric_name = 'val.latency_{}'.format(percentile)
dllogger_data[metric_name] = np.percentile(
durations, percentile)
global_metrics.update_metric(metric_name, durations)
dllogger.log(step=(epoch,), data=dllogger_data)
else:
raise ValueError('Wrong mode')
mx.nd.waitall()
dllogger.log(tuple(), data=global_metrics.get())
|
PyTorch/SpeechSynthesis/Tacotron2/trtis_cpp/src/trt/util | util | IModelImporter | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef TT2I_IMODELIMPORTER_H
#define TT2I_IMODELIMPORTER_H
#include "layerData.h"
#include <string>
#include <vector>
namespace tts
{
class IModelImporter
{
public:
/**
* @brief Virtual destructor.
*/
virtual ~IModelImporter() = default;
/**
* @brief Get the weights for the given layer.
*
* @param path The path of the layer.
*
* @return The weights.
*/
virtual const LayerData* getWeights(const std::vector<std::string>& path) = 0;
};
} // namespace tts
#endif
|
Tools/DGLPyTorch/SyntheticGraphGeneration/demos/advanced_examples | advanced_examples | big_graph_generation | #!/usr/bin/env python
# coding: utf-8
# Copyright 2023 NVIDIA Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# # Big Graph Generation (MAG240m)
# ## Overview
#
# In this notebook, we have walked through the complete process of generating a synthetic dataset based on an MAG240m dataset.
# We will cover advanced SynGen features as memmory mapping and independent chunk generation.
# ## Preprare the dataset
# In[1]:
data_path = '/raid/ogbn_mag240m/'
preprocessed_path = '/raid/ogbn_mag240m_syngen'
# In[2]:
get_ipython().system('python -m syngen preprocess --source-path=$data_path --dataset=ogbn_mag240m --destination-path=$preprocessed_path --cpu --use-cache')
# In[3]:
get_ipython().system('cat $preprocessed_path/graph_metadata.json')
# ## Create Configurations Directory
# In[4]:
configs_dir = 'ogbn_mag240m_configs'
# In[5]:
get_ipython().system('mkdir -p $configs_dir')
# ## Prepare simple SynGen Configuration
# In[6]:
get_ipython().system('python -m syngen mimic-dataset --output-file=$configs_dir/simple.json --dataset-path $preprocessed_path --tab-gen uniform')
# In[7]:
get_ipython().system('cat $configs_dir/simple.json')
# In[8]:
get_ipython().system('python -m syngen synthesize --config-path $configs_dir/simple.json --save-path /raid/ogbn_mag240m_simple --verbose')
# In[9]:
get_ipython().system('du -ah /raid/ogbn_mag240m_simple')
# ## Prepare SynGen Configuration that stores fitted generators
# Generators fitting process takes a significant part of the entire generation, so we can store the fitted generators for the future experiments.
# In[10]:
generators_dump_dir = 'ogbn_mag240m_gens'
# In[10]:
get_ipython().system('python -m syngen mimic-dataset --gen-dump-path=$generators_dump_dir --output-file=$configs_dir/with_gen_dump.json --dataset-path $preprocessed_path --tab-gen uniform')
# In[11]:
get_ipython().system('cat $configs_dir/with_gen_dump.json')
# In[12]:
get_ipython().system('python -m syngen synthesize --config-path $configs_dir/with_gen_dump.json --save-path /raid/ogbn_mag240m_with_gen_dump --verbose')
# In[13]:
get_ipython().system('du -ah /raid/ogbn_mag240m_with_gen_dump')
# ## Prepare SynGen Configuration that scales the dataset
# In[11]:
import os
# In[12]:
scale_config_files = {}
for scale in [1, 2, 4]:
edges_scale = scale ** 3
out_file = f'{configs_dir}/scale_nodes_{scale}_edges_{edges_scale}.json'
scale_config_files[scale] = out_file
if os.path.exists(out_file):
continue
get_ipython().system('python -m syngen mimic-dataset --node-scale=$scale --edge-scale=$edges_scale --gen-dump-path=$generators_dump_dir --output-file=$out_file --dataset-path $preprocessed_path --tab-gen uniform')
# In[15]:
def generate_scale(scale):
config_file = scale_config_files[scale]
out_dir = f"/raid/scale_{scale}"
get_ipython().system('python -m syngen synthesize --config-path $config_file --save-path $out_dir --verbose')
get_ipython().system('du -ah $out_dir')
# In[17]:
generate_scale(2)
# ## Memory-mapped files for edge lists
# Instead of chunk concatenation after the generation SynGen supports memory mapped files that allow multi-process writing into the single file. To enable this feature, you need to specify the edge `structure_path` as `.npy` file.
# In[13]:
import json
# In[14]:
memmap_scale_config_files = {}
for scale, config_file in scale_config_files.items():
with open(config_file, 'r') as f:
cfg = json.load(f)
for edge_info in cfg["edges"]:
edge_info['structure_path'] = edge_info['structure_path'].split('.')[0] + '.npy'
memmap_cfg_file = config_file[:-5] + "_memmap.json"
memmap_scale_config_files[scale] = memmap_cfg_file
if os.path.exists(memmap_cfg_file):
continue
with open(memmap_cfg_file, 'w') as f:
json.dump(cfg, f, indent=4)
# In[15]:
def generate_scale_memmap(scale):
config_file = memmap_scale_config_files[scale]
out_dir = f"/raid/scale_{scale}_memmap"
get_ipython().system('python -m syngen synthesize --config-path $config_file --save-path $out_dir --verbose')
get_ipython().system('du -ah $out_dir')
# In[16]:
generate_scale_memmap(1)
# In[17]:
generate_scale_memmap(2)
# In[18]:
generate_scale_memmap(4)
# In[ ]:
|
PyTorch/SpeechSynthesis/Tacotron2/tacotron2_common | tacotron2_common | stft | """
BSD 3-Clause License
Copyright (c) 2017, Prem Seetharaman
All rights reserved.
* Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import torch
import numpy as np
import torch.nn.functional as F
from torch.autograd import Variable
from scipy.signal import get_window
from librosa.util import pad_center, tiny
from tacotron2_common.audio_processing import window_sumsquare
class STFT(torch.nn.Module):
"""adapted from Prem Seetharaman's https://github.com/pseeth/pytorch-stft"""
def __init__(self, filter_length=800, hop_length=200, win_length=800,
window='hann'):
super(STFT, self).__init__()
self.filter_length = filter_length
self.hop_length = hop_length
self.win_length = win_length
self.window = window
self.forward_transform = None
scale = self.filter_length / self.hop_length
fourier_basis = np.fft.fft(np.eye(self.filter_length))
cutoff = int((self.filter_length / 2 + 1))
fourier_basis = np.vstack([np.real(fourier_basis[:cutoff, :]),
np.imag(fourier_basis[:cutoff, :])])
forward_basis = torch.FloatTensor(fourier_basis[:, None, :])
inverse_basis = torch.FloatTensor(
np.linalg.pinv(scale * fourier_basis).T[:, None, :].astype(np.float32))
if window is not None:
assert(filter_length >= win_length)
# get window and zero center pad it to filter_length
fft_window = get_window(window, win_length, fftbins=True)
fft_window = pad_center(fft_window, size=filter_length)
fft_window = torch.from_numpy(fft_window).float()
# window the bases
forward_basis *= fft_window
inverse_basis *= fft_window
self.register_buffer('forward_basis', forward_basis.float())
self.register_buffer('inverse_basis', inverse_basis.float())
def transform(self, input_data):
num_batches = input_data.size(0)
num_samples = input_data.size(1)
self.num_samples = num_samples
# similar to librosa, reflect-pad the input
input_data = input_data.view(num_batches, 1, num_samples)
input_data = F.pad(
input_data.unsqueeze(1),
(int(self.filter_length / 2), int(self.filter_length / 2), 0, 0),
mode='reflect')
input_data = input_data.squeeze(1)
forward_transform = F.conv1d(
input_data,
Variable(self.forward_basis, requires_grad=False),
stride=self.hop_length,
padding=0)
cutoff = int((self.filter_length / 2) + 1)
real_part = forward_transform[:, :cutoff, :]
imag_part = forward_transform[:, cutoff:, :]
magnitude = torch.sqrt(real_part**2 + imag_part**2)
phase = torch.autograd.Variable(
torch.atan2(imag_part.data, real_part.data))
return magnitude, phase
def inverse(self, magnitude, phase):
recombine_magnitude_phase = torch.cat(
[magnitude*torch.cos(phase), magnitude*torch.sin(phase)], dim=1)
inverse_transform = F.conv_transpose2d(
recombine_magnitude_phase.unsqueeze(-1),
Variable(self.inverse_basis.unsqueeze(-1), requires_grad=False),
stride=(self.hop_length,1),
padding=(0,0))
inverse_transform = inverse_transform.squeeze(-1)
if self.window is not None:
window_sum = window_sumsquare(
self.window, magnitude.size(-1), hop_length=self.hop_length,
win_length=self.win_length, n_fft=self.filter_length,
dtype=np.float32)
# remove modulation effects
approx_nonzero_indices = torch.from_numpy(
np.where(window_sum > tiny(window_sum))[0])
window_sum = torch.autograd.Variable(
torch.from_numpy(window_sum), requires_grad=False)
window_sum = window_sum.cuda() if magnitude.is_cuda else window_sum
inverse_transform[:, :, approx_nonzero_indices] /= window_sum[approx_nonzero_indices]
# scale by hop ratio
inverse_transform *= float(self.filter_length) / self.hop_length
inverse_transform = inverse_transform[:, :, int(self.filter_length/2):]
inverse_transform = inverse_transform[:, :, :-int(self.filter_length/2):]
return inverse_transform
def forward(self, input_data):
self.magnitude, self.phase = self.transform(input_data)
reconstruction = self.inverse(self.magnitude, self.phase)
return reconstruction
|
TensorFlow/Detection/SSD/models/research/object_detection/builders | builders | model_builder_test | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.models.model_builder."""
from absl.testing import parameterized
import tensorflow as tf
from google.protobuf import text_format
from object_detection.builders import model_builder
from object_detection.meta_architectures import faster_rcnn_meta_arch
from object_detection.meta_architectures import rfcn_meta_arch
from object_detection.meta_architectures import ssd_meta_arch
from object_detection.models import faster_rcnn_inception_resnet_v2_feature_extractor as frcnn_inc_res
from object_detection.models import faster_rcnn_inception_v2_feature_extractor as frcnn_inc_v2
from object_detection.models import faster_rcnn_nas_feature_extractor as frcnn_nas
from object_detection.models import faster_rcnn_pnas_feature_extractor as frcnn_pnas
from object_detection.models import faster_rcnn_resnet_v1_feature_extractor as frcnn_resnet_v1
from object_detection.models import ssd_resnet_v1_fpn_feature_extractor as ssd_resnet_v1_fpn
from object_detection.models import ssd_resnet_v1_ppn_feature_extractor as ssd_resnet_v1_ppn
from object_detection.models.embedded_ssd_mobilenet_v1_feature_extractor import EmbeddedSSDMobileNetV1FeatureExtractor
from object_detection.models.ssd_inception_v2_feature_extractor import SSDInceptionV2FeatureExtractor
from object_detection.models.ssd_inception_v3_feature_extractor import SSDInceptionV3FeatureExtractor
from object_detection.models.ssd_mobilenet_v1_feature_extractor import SSDMobileNetV1FeatureExtractor
from object_detection.models.ssd_mobilenet_v1_fpn_feature_extractor import SSDMobileNetV1FpnFeatureExtractor
from object_detection.models.ssd_mobilenet_v1_ppn_feature_extractor import SSDMobileNetV1PpnFeatureExtractor
from object_detection.models.ssd_mobilenet_v2_feature_extractor import SSDMobileNetV2FeatureExtractor
from object_detection.models.ssd_mobilenet_v2_fpn_feature_extractor import SSDMobileNetV2FpnFeatureExtractor
from object_detection.models.ssd_mobilenet_v2_keras_feature_extractor import SSDMobileNetV2KerasFeatureExtractor
from object_detection.predictors import convolutional_box_predictor
from object_detection.predictors import convolutional_keras_box_predictor
from object_detection.protos import model_pb2
FRCNN_RESNET_FEAT_MAPS = {
'faster_rcnn_resnet50':
frcnn_resnet_v1.FasterRCNNResnet50FeatureExtractor,
'faster_rcnn_resnet101':
frcnn_resnet_v1.FasterRCNNResnet101FeatureExtractor,
'faster_rcnn_resnet152':
frcnn_resnet_v1.FasterRCNNResnet152FeatureExtractor
}
SSD_RESNET_V1_FPN_FEAT_MAPS = {
'ssd_resnet50_v1_fpn':
ssd_resnet_v1_fpn.SSDResnet50V1FpnFeatureExtractor,
'ssd_resnet101_v1_fpn':
ssd_resnet_v1_fpn.SSDResnet101V1FpnFeatureExtractor,
'ssd_resnet152_v1_fpn':
ssd_resnet_v1_fpn.SSDResnet152V1FpnFeatureExtractor,
}
SSD_RESNET_V1_PPN_FEAT_MAPS = {
'ssd_resnet50_v1_ppn':
ssd_resnet_v1_ppn.SSDResnet50V1PpnFeatureExtractor,
'ssd_resnet101_v1_ppn':
ssd_resnet_v1_ppn.SSDResnet101V1PpnFeatureExtractor,
'ssd_resnet152_v1_ppn':
ssd_resnet_v1_ppn.SSDResnet152V1PpnFeatureExtractor
}
class ModelBuilderTest(tf.test.TestCase, parameterized.TestCase):
def create_model(self, model_config):
"""Builds a DetectionModel based on the model config.
Args:
model_config: A model.proto object containing the config for the desired
DetectionModel.
Returns:
DetectionModel based on the config.
"""
return model_builder.build(model_config, is_training=True)
def test_create_ssd_inception_v2_model_from_config(self):
model_text_proto = """
ssd {
feature_extractor {
type: 'ssd_inception_v2'
conv_hyperparams {
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
}
override_base_feature_extractor_hyperparams: true
}
box_coder {
faster_rcnn_box_coder {
}
}
matcher {
argmax_matcher {
}
}
similarity_calculator {
iou_similarity {
}
}
anchor_generator {
ssd_anchor_generator {
aspect_ratios: 1.0
}
}
image_resizer {
fixed_shape_resizer {
height: 320
width: 320
}
}
box_predictor {
convolutional_box_predictor {
conv_hyperparams {
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
}
}
}
loss {
classification_loss {
weighted_softmax {
}
}
localization_loss {
weighted_smooth_l1 {
}
}
}
}"""
model_proto = model_pb2.DetectionModel()
text_format.Merge(model_text_proto, model_proto)
model = self.create_model(model_proto)
self.assertIsInstance(model, ssd_meta_arch.SSDMetaArch)
self.assertIsInstance(model._feature_extractor,
SSDInceptionV2FeatureExtractor)
self.assertIsNone(model._expected_loss_weights_fn)
def test_create_ssd_inception_v3_model_from_config(self):
model_text_proto = """
ssd {
feature_extractor {
type: 'ssd_inception_v3'
conv_hyperparams {
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
}
override_base_feature_extractor_hyperparams: true
}
box_coder {
faster_rcnn_box_coder {
}
}
matcher {
argmax_matcher {
}
}
similarity_calculator {
iou_similarity {
}
}
anchor_generator {
ssd_anchor_generator {
aspect_ratios: 1.0
}
}
image_resizer {
fixed_shape_resizer {
height: 320
width: 320
}
}
box_predictor {
convolutional_box_predictor {
conv_hyperparams {
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
}
}
}
loss {
classification_loss {
weighted_softmax {
}
}
localization_loss {
weighted_smooth_l1 {
}
}
}
}"""
model_proto = model_pb2.DetectionModel()
text_format.Merge(model_text_proto, model_proto)
model = self.create_model(model_proto)
self.assertIsInstance(model, ssd_meta_arch.SSDMetaArch)
self.assertIsInstance(model._feature_extractor,
SSDInceptionV3FeatureExtractor)
def test_create_ssd_resnet_v1_fpn_model_from_config(self):
model_text_proto = """
ssd {
feature_extractor {
type: 'ssd_resnet50_v1_fpn'
fpn {
min_level: 3
max_level: 7
}
conv_hyperparams {
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
}
}
box_coder {
faster_rcnn_box_coder {
}
}
matcher {
argmax_matcher {
}
}
similarity_calculator {
iou_similarity {
}
}
encode_background_as_zeros: true
anchor_generator {
multiscale_anchor_generator {
aspect_ratios: [1.0, 2.0, 0.5]
scales_per_octave: 2
}
}
image_resizer {
fixed_shape_resizer {
height: 320
width: 320
}
}
box_predictor {
weight_shared_convolutional_box_predictor {
depth: 32
conv_hyperparams {
regularizer {
l2_regularizer {
}
}
initializer {
random_normal_initializer {
}
}
}
num_layers_before_predictor: 1
}
}
normalize_loss_by_num_matches: true
normalize_loc_loss_by_codesize: true
loss {
classification_loss {
weighted_sigmoid_focal {
alpha: 0.25
gamma: 2.0
}
}
localization_loss {
weighted_smooth_l1 {
delta: 0.1
}
}
classification_weight: 1.0
localization_weight: 1.0
}
}"""
model_proto = model_pb2.DetectionModel()
text_format.Merge(model_text_proto, model_proto)
for extractor_type, extractor_class in SSD_RESNET_V1_FPN_FEAT_MAPS.items():
model_proto.ssd.feature_extractor.type = extractor_type
model = model_builder.build(model_proto, is_training=True)
self.assertIsInstance(model, ssd_meta_arch.SSDMetaArch)
self.assertIsInstance(model._feature_extractor, extractor_class)
def test_create_ssd_resnet_v1_ppn_model_from_config(self):
model_text_proto = """
ssd {
feature_extractor {
type: 'ssd_resnet_v1_50_ppn'
conv_hyperparams {
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
}
}
box_coder {
mean_stddev_box_coder {
}
}
matcher {
bipartite_matcher {
}
}
similarity_calculator {
iou_similarity {
}
}
anchor_generator {
ssd_anchor_generator {
aspect_ratios: 1.0
}
}
image_resizer {
fixed_shape_resizer {
height: 320
width: 320
}
}
box_predictor {
weight_shared_convolutional_box_predictor {
depth: 1024
class_prediction_bias_init: -4.6
conv_hyperparams {
activation: RELU_6,
regularizer {
l2_regularizer {
weight: 0.0004
}
}
initializer {
variance_scaling_initializer {
}
}
}
num_layers_before_predictor: 2
kernel_size: 1
}
}
loss {
classification_loss {
weighted_softmax {
}
}
localization_loss {
weighted_l2 {
}
}
classification_weight: 1.0
localization_weight: 1.0
}
}"""
model_proto = model_pb2.DetectionModel()
text_format.Merge(model_text_proto, model_proto)
for extractor_type, extractor_class in SSD_RESNET_V1_PPN_FEAT_MAPS.items():
model_proto.ssd.feature_extractor.type = extractor_type
model = model_builder.build(model_proto, is_training=True)
self.assertIsInstance(model, ssd_meta_arch.SSDMetaArch)
self.assertIsInstance(model._feature_extractor, extractor_class)
def test_create_ssd_mobilenet_v1_model_from_config(self):
model_text_proto = """
ssd {
freeze_batchnorm: true
inplace_batchnorm_update: true
feature_extractor {
type: 'ssd_mobilenet_v1'
conv_hyperparams {
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
}
}
box_coder {
faster_rcnn_box_coder {
}
}
matcher {
argmax_matcher {
}
}
similarity_calculator {
iou_similarity {
}
}
anchor_generator {
ssd_anchor_generator {
aspect_ratios: 1.0
}
}
image_resizer {
fixed_shape_resizer {
height: 320
width: 320
}
}
box_predictor {
convolutional_box_predictor {
conv_hyperparams {
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
}
}
}
normalize_loc_loss_by_codesize: true
loss {
classification_loss {
weighted_softmax {
}
}
localization_loss {
weighted_smooth_l1 {
}
}
}
}"""
model_proto = model_pb2.DetectionModel()
text_format.Merge(model_text_proto, model_proto)
model = self.create_model(model_proto)
self.assertIsInstance(model, ssd_meta_arch.SSDMetaArch)
self.assertIsInstance(model._feature_extractor,
SSDMobileNetV1FeatureExtractor)
self.assertTrue(model._normalize_loc_loss_by_codesize)
self.assertTrue(model._freeze_batchnorm)
self.assertTrue(model._inplace_batchnorm_update)
def test_create_ssd_mobilenet_v1_fpn_model_from_config(self):
model_text_proto = """
ssd {
freeze_batchnorm: true
inplace_batchnorm_update: true
feature_extractor {
type: 'ssd_mobilenet_v1_fpn'
fpn {
min_level: 3
max_level: 7
}
conv_hyperparams {
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
}
}
box_coder {
faster_rcnn_box_coder {
}
}
matcher {
argmax_matcher {
}
}
similarity_calculator {
iou_similarity {
}
}
anchor_generator {
ssd_anchor_generator {
aspect_ratios: 1.0
}
}
image_resizer {
fixed_shape_resizer {
height: 320
width: 320
}
}
box_predictor {
convolutional_box_predictor {
conv_hyperparams {
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
}
}
}
normalize_loc_loss_by_codesize: true
loss {
classification_loss {
weighted_softmax {
}
}
localization_loss {
weighted_smooth_l1 {
}
}
}
}"""
model_proto = model_pb2.DetectionModel()
text_format.Merge(model_text_proto, model_proto)
model = self.create_model(model_proto)
self.assertIsInstance(model, ssd_meta_arch.SSDMetaArch)
self.assertIsInstance(model._feature_extractor,
SSDMobileNetV1FpnFeatureExtractor)
self.assertTrue(model._normalize_loc_loss_by_codesize)
self.assertTrue(model._freeze_batchnorm)
self.assertTrue(model._inplace_batchnorm_update)
def test_create_ssd_mobilenet_v1_ppn_model_from_config(self):
model_text_proto = """
ssd {
freeze_batchnorm: true
inplace_batchnorm_update: true
feature_extractor {
type: 'ssd_mobilenet_v1_ppn'
conv_hyperparams {
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
}
}
box_coder {
faster_rcnn_box_coder {
}
}
matcher {
argmax_matcher {
}
}
similarity_calculator {
iou_similarity {
}
}
anchor_generator {
ssd_anchor_generator {
aspect_ratios: 1.0
}
}
image_resizer {
fixed_shape_resizer {
height: 320
width: 320
}
}
box_predictor {
convolutional_box_predictor {
conv_hyperparams {
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
}
}
}
normalize_loc_loss_by_codesize: true
loss {
classification_loss {
weighted_softmax {
}
}
localization_loss {
weighted_smooth_l1 {
}
}
}
}"""
model_proto = model_pb2.DetectionModel()
text_format.Merge(model_text_proto, model_proto)
model = self.create_model(model_proto)
self.assertIsInstance(model, ssd_meta_arch.SSDMetaArch)
self.assertIsInstance(model._feature_extractor,
SSDMobileNetV1PpnFeatureExtractor)
self.assertTrue(model._normalize_loc_loss_by_codesize)
self.assertTrue(model._freeze_batchnorm)
self.assertTrue(model._inplace_batchnorm_update)
def test_create_ssd_mobilenet_v2_model_from_config(self):
model_text_proto = """
ssd {
feature_extractor {
type: 'ssd_mobilenet_v2'
conv_hyperparams {
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
}
}
box_coder {
faster_rcnn_box_coder {
}
}
matcher {
argmax_matcher {
}
}
similarity_calculator {
iou_similarity {
}
}
anchor_generator {
ssd_anchor_generator {
aspect_ratios: 1.0
}
}
image_resizer {
fixed_shape_resizer {
height: 320
width: 320
}
}
box_predictor {
convolutional_box_predictor {
conv_hyperparams {
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
}
}
}
normalize_loc_loss_by_codesize: true
loss {
classification_loss {
weighted_softmax {
}
}
localization_loss {
weighted_smooth_l1 {
}
}
}
}"""
model_proto = model_pb2.DetectionModel()
text_format.Merge(model_text_proto, model_proto)
model = self.create_model(model_proto)
self.assertIsInstance(model, ssd_meta_arch.SSDMetaArch)
self.assertIsInstance(model._feature_extractor,
SSDMobileNetV2FeatureExtractor)
self.assertIsInstance(model._box_predictor,
convolutional_box_predictor.ConvolutionalBoxPredictor)
self.assertTrue(model._normalize_loc_loss_by_codesize)
def test_create_ssd_mobilenet_v2_keras_model_from_config(self):
model_text_proto = """
ssd {
feature_extractor {
type: 'ssd_mobilenet_v2_keras'
conv_hyperparams {
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
}
}
box_coder {
faster_rcnn_box_coder {
}
}
matcher {
argmax_matcher {
}
}
similarity_calculator {
iou_similarity {
}
}
anchor_generator {
ssd_anchor_generator {
aspect_ratios: 1.0
}
}
image_resizer {
fixed_shape_resizer {
height: 320
width: 320
}
}
box_predictor {
convolutional_box_predictor {
conv_hyperparams {
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
}
}
}
normalize_loc_loss_by_codesize: true
loss {
classification_loss {
weighted_softmax {
}
}
localization_loss {
weighted_smooth_l1 {
}
}
}
}"""
model_proto = model_pb2.DetectionModel()
text_format.Merge(model_text_proto, model_proto)
model = self.create_model(model_proto)
self.assertIsInstance(model, ssd_meta_arch.SSDMetaArch)
self.assertIsInstance(model._feature_extractor,
SSDMobileNetV2KerasFeatureExtractor)
self.assertIsInstance(
model._box_predictor,
convolutional_keras_box_predictor.ConvolutionalBoxPredictor)
self.assertTrue(model._normalize_loc_loss_by_codesize)
def test_create_ssd_mobilenet_v2_fpn_model_from_config(self):
model_text_proto = """
ssd {
freeze_batchnorm: true
inplace_batchnorm_update: true
feature_extractor {
type: 'ssd_mobilenet_v2_fpn'
fpn {
min_level: 3
max_level: 7
}
conv_hyperparams {
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
}
}
box_coder {
faster_rcnn_box_coder {
}
}
matcher {
argmax_matcher {
}
}
similarity_calculator {
iou_similarity {
}
}
anchor_generator {
ssd_anchor_generator {
aspect_ratios: 1.0
}
}
image_resizer {
fixed_shape_resizer {
height: 320
width: 320
}
}
box_predictor {
convolutional_box_predictor {
conv_hyperparams {
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
}
}
}
normalize_loc_loss_by_codesize: true
loss {
classification_loss {
weighted_softmax {
}
}
localization_loss {
weighted_smooth_l1 {
}
}
}
}"""
model_proto = model_pb2.DetectionModel()
text_format.Merge(model_text_proto, model_proto)
model = self.create_model(model_proto)
self.assertIsInstance(model, ssd_meta_arch.SSDMetaArch)
self.assertIsInstance(model._feature_extractor,
SSDMobileNetV2FpnFeatureExtractor)
self.assertTrue(model._normalize_loc_loss_by_codesize)
self.assertTrue(model._freeze_batchnorm)
self.assertTrue(model._inplace_batchnorm_update)
def test_create_ssd_mobilenet_v2_fpnlite_model_from_config(self):
model_text_proto = """
ssd {
freeze_batchnorm: true
inplace_batchnorm_update: true
feature_extractor {
type: 'ssd_mobilenet_v2_fpn'
use_depthwise: true
fpn {
min_level: 3
max_level: 7
additional_layer_depth: 128
}
conv_hyperparams {
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
}
}
box_coder {
faster_rcnn_box_coder {
}
}
matcher {
argmax_matcher {
}
}
similarity_calculator {
iou_similarity {
}
}
anchor_generator {
ssd_anchor_generator {
aspect_ratios: 1.0
}
}
image_resizer {
fixed_shape_resizer {
height: 320
width: 320
}
}
box_predictor {
convolutional_box_predictor {
conv_hyperparams {
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
}
}
}
normalize_loc_loss_by_codesize: true
loss {
classification_loss {
weighted_softmax {
}
}
localization_loss {
weighted_smooth_l1 {
}
}
}
}"""
model_proto = model_pb2.DetectionModel()
text_format.Merge(model_text_proto, model_proto)
model = self.create_model(model_proto)
self.assertIsInstance(model, ssd_meta_arch.SSDMetaArch)
self.assertIsInstance(model._feature_extractor,
SSDMobileNetV2FpnFeatureExtractor)
self.assertTrue(model._normalize_loc_loss_by_codesize)
self.assertTrue(model._freeze_batchnorm)
self.assertTrue(model._inplace_batchnorm_update)
def test_create_embedded_ssd_mobilenet_v1_model_from_config(self):
model_text_proto = """
ssd {
feature_extractor {
type: 'embedded_ssd_mobilenet_v1'
conv_hyperparams {
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
}
}
box_coder {
faster_rcnn_box_coder {
}
}
matcher {
argmax_matcher {
}
}
similarity_calculator {
iou_similarity {
}
}
anchor_generator {
ssd_anchor_generator {
aspect_ratios: 1.0
}
}
image_resizer {
fixed_shape_resizer {
height: 256
width: 256
}
}
box_predictor {
convolutional_box_predictor {
conv_hyperparams {
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
}
}
}
loss {
classification_loss {
weighted_softmax {
}
}
localization_loss {
weighted_smooth_l1 {
}
}
}
}"""
model_proto = model_pb2.DetectionModel()
text_format.Merge(model_text_proto, model_proto)
model = self.create_model(model_proto)
self.assertIsInstance(model, ssd_meta_arch.SSDMetaArch)
self.assertIsInstance(model._feature_extractor,
EmbeddedSSDMobileNetV1FeatureExtractor)
def test_create_faster_rcnn_resnet_v1_models_from_config(self):
model_text_proto = """
faster_rcnn {
inplace_batchnorm_update: false
num_classes: 3
image_resizer {
keep_aspect_ratio_resizer {
min_dimension: 600
max_dimension: 1024
}
}
feature_extractor {
type: 'faster_rcnn_resnet101'
}
first_stage_anchor_generator {
grid_anchor_generator {
scales: [0.25, 0.5, 1.0, 2.0]
aspect_ratios: [0.5, 1.0, 2.0]
height_stride: 16
width_stride: 16
}
}
first_stage_box_predictor_conv_hyperparams {
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
}
initial_crop_size: 14
maxpool_kernel_size: 2
maxpool_stride: 2
second_stage_box_predictor {
mask_rcnn_box_predictor {
fc_hyperparams {
op: FC
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
}
}
}
second_stage_post_processing {
batch_non_max_suppression {
score_threshold: 0.01
iou_threshold: 0.6
max_detections_per_class: 100
max_total_detections: 300
}
score_converter: SOFTMAX
}
}"""
model_proto = model_pb2.DetectionModel()
text_format.Merge(model_text_proto, model_proto)
for extractor_type, extractor_class in FRCNN_RESNET_FEAT_MAPS.items():
model_proto.faster_rcnn.feature_extractor.type = extractor_type
model = model_builder.build(model_proto, is_training=True)
self.assertIsInstance(model, faster_rcnn_meta_arch.FasterRCNNMetaArch)
self.assertIsInstance(model._feature_extractor, extractor_class)
@parameterized.parameters(
{'use_matmul_crop_and_resize': False},
{'use_matmul_crop_and_resize': True},
)
def test_create_faster_rcnn_resnet101_with_mask_prediction_enabled(
self, use_matmul_crop_and_resize):
model_text_proto = """
faster_rcnn {
num_classes: 3
image_resizer {
keep_aspect_ratio_resizer {
min_dimension: 600
max_dimension: 1024
}
}
feature_extractor {
type: 'faster_rcnn_resnet101'
}
first_stage_anchor_generator {
grid_anchor_generator {
scales: [0.25, 0.5, 1.0, 2.0]
aspect_ratios: [0.5, 1.0, 2.0]
height_stride: 16
width_stride: 16
}
}
first_stage_box_predictor_conv_hyperparams {
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
}
initial_crop_size: 14
maxpool_kernel_size: 2
maxpool_stride: 2
second_stage_box_predictor {
mask_rcnn_box_predictor {
fc_hyperparams {
op: FC
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
}
conv_hyperparams {
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
}
predict_instance_masks: true
}
}
second_stage_mask_prediction_loss_weight: 3.0
second_stage_post_processing {
batch_non_max_suppression {
score_threshold: 0.01
iou_threshold: 0.6
max_detections_per_class: 100
max_total_detections: 300
}
score_converter: SOFTMAX
}
}"""
model_proto = model_pb2.DetectionModel()
text_format.Merge(model_text_proto, model_proto)
model_proto.faster_rcnn.use_matmul_crop_and_resize = (
use_matmul_crop_and_resize)
model = model_builder.build(model_proto, is_training=True)
self.assertAlmostEqual(model._second_stage_mask_loss_weight, 3.0)
def test_create_faster_rcnn_nas_model_from_config(self):
model_text_proto = """
faster_rcnn {
num_classes: 3
image_resizer {
keep_aspect_ratio_resizer {
min_dimension: 600
max_dimension: 1024
}
}
feature_extractor {
type: 'faster_rcnn_nas'
}
first_stage_anchor_generator {
grid_anchor_generator {
scales: [0.25, 0.5, 1.0, 2.0]
aspect_ratios: [0.5, 1.0, 2.0]
height_stride: 16
width_stride: 16
}
}
first_stage_box_predictor_conv_hyperparams {
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
}
initial_crop_size: 17
maxpool_kernel_size: 1
maxpool_stride: 1
second_stage_box_predictor {
mask_rcnn_box_predictor {
fc_hyperparams {
op: FC
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
}
}
}
second_stage_post_processing {
batch_non_max_suppression {
score_threshold: 0.01
iou_threshold: 0.6
max_detections_per_class: 100
max_total_detections: 300
}
score_converter: SOFTMAX
}
}"""
model_proto = model_pb2.DetectionModel()
text_format.Merge(model_text_proto, model_proto)
model = model_builder.build(model_proto, is_training=True)
self.assertIsInstance(model, faster_rcnn_meta_arch.FasterRCNNMetaArch)
self.assertIsInstance(
model._feature_extractor,
frcnn_nas.FasterRCNNNASFeatureExtractor)
def test_create_faster_rcnn_pnas_model_from_config(self):
model_text_proto = """
faster_rcnn {
num_classes: 3
image_resizer {
keep_aspect_ratio_resizer {
min_dimension: 600
max_dimension: 1024
}
}
feature_extractor {
type: 'faster_rcnn_pnas'
}
first_stage_anchor_generator {
grid_anchor_generator {
scales: [0.25, 0.5, 1.0, 2.0]
aspect_ratios: [0.5, 1.0, 2.0]
height_stride: 16
width_stride: 16
}
}
first_stage_box_predictor_conv_hyperparams {
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
}
initial_crop_size: 17
maxpool_kernel_size: 1
maxpool_stride: 1
second_stage_box_predictor {
mask_rcnn_box_predictor {
fc_hyperparams {
op: FC
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
}
}
}
second_stage_post_processing {
batch_non_max_suppression {
score_threshold: 0.01
iou_threshold: 0.6
max_detections_per_class: 100
max_total_detections: 300
}
score_converter: SOFTMAX
}
}"""
model_proto = model_pb2.DetectionModel()
text_format.Merge(model_text_proto, model_proto)
model = model_builder.build(model_proto, is_training=True)
self.assertIsInstance(model, faster_rcnn_meta_arch.FasterRCNNMetaArch)
self.assertIsInstance(
model._feature_extractor,
frcnn_pnas.FasterRCNNPNASFeatureExtractor)
def test_create_faster_rcnn_inception_resnet_v2_model_from_config(self):
model_text_proto = """
faster_rcnn {
num_classes: 3
image_resizer {
keep_aspect_ratio_resizer {
min_dimension: 600
max_dimension: 1024
}
}
feature_extractor {
type: 'faster_rcnn_inception_resnet_v2'
}
first_stage_anchor_generator {
grid_anchor_generator {
scales: [0.25, 0.5, 1.0, 2.0]
aspect_ratios: [0.5, 1.0, 2.0]
height_stride: 16
width_stride: 16
}
}
first_stage_box_predictor_conv_hyperparams {
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
}
initial_crop_size: 17
maxpool_kernel_size: 1
maxpool_stride: 1
second_stage_box_predictor {
mask_rcnn_box_predictor {
fc_hyperparams {
op: FC
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
}
}
}
second_stage_post_processing {
batch_non_max_suppression {
score_threshold: 0.01
iou_threshold: 0.6
max_detections_per_class: 100
max_total_detections: 300
}
score_converter: SOFTMAX
}
}"""
model_proto = model_pb2.DetectionModel()
text_format.Merge(model_text_proto, model_proto)
model = model_builder.build(model_proto, is_training=True)
self.assertIsInstance(model, faster_rcnn_meta_arch.FasterRCNNMetaArch)
self.assertIsInstance(
model._feature_extractor,
frcnn_inc_res.FasterRCNNInceptionResnetV2FeatureExtractor)
def test_create_faster_rcnn_inception_v2_model_from_config(self):
model_text_proto = """
faster_rcnn {
num_classes: 3
image_resizer {
keep_aspect_ratio_resizer {
min_dimension: 600
max_dimension: 1024
}
}
feature_extractor {
type: 'faster_rcnn_inception_v2'
}
first_stage_anchor_generator {
grid_anchor_generator {
scales: [0.25, 0.5, 1.0, 2.0]
aspect_ratios: [0.5, 1.0, 2.0]
height_stride: 16
width_stride: 16
}
}
first_stage_box_predictor_conv_hyperparams {
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
}
initial_crop_size: 14
maxpool_kernel_size: 2
maxpool_stride: 2
second_stage_box_predictor {
mask_rcnn_box_predictor {
fc_hyperparams {
op: FC
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
}
}
}
second_stage_post_processing {
batch_non_max_suppression {
score_threshold: 0.01
iou_threshold: 0.6
max_detections_per_class: 100
max_total_detections: 300
}
score_converter: SOFTMAX
}
}"""
model_proto = model_pb2.DetectionModel()
text_format.Merge(model_text_proto, model_proto)
model = model_builder.build(model_proto, is_training=True)
self.assertIsInstance(model, faster_rcnn_meta_arch.FasterRCNNMetaArch)
self.assertIsInstance(model._feature_extractor,
frcnn_inc_v2.FasterRCNNInceptionV2FeatureExtractor)
def test_create_faster_rcnn_model_from_config_with_example_miner(self):
model_text_proto = """
faster_rcnn {
num_classes: 3
feature_extractor {
type: 'faster_rcnn_inception_resnet_v2'
}
image_resizer {
keep_aspect_ratio_resizer {
min_dimension: 600
max_dimension: 1024
}
}
first_stage_anchor_generator {
grid_anchor_generator {
scales: [0.25, 0.5, 1.0, 2.0]
aspect_ratios: [0.5, 1.0, 2.0]
height_stride: 16
width_stride: 16
}
}
first_stage_box_predictor_conv_hyperparams {
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
}
second_stage_box_predictor {
mask_rcnn_box_predictor {
fc_hyperparams {
op: FC
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
}
}
}
hard_example_miner {
num_hard_examples: 10
iou_threshold: 0.99
}
}"""
model_proto = model_pb2.DetectionModel()
text_format.Merge(model_text_proto, model_proto)
model = model_builder.build(model_proto, is_training=True)
self.assertIsNotNone(model._hard_example_miner)
def test_create_rfcn_resnet_v1_model_from_config(self):
model_text_proto = """
faster_rcnn {
num_classes: 3
image_resizer {
keep_aspect_ratio_resizer {
min_dimension: 600
max_dimension: 1024
}
}
feature_extractor {
type: 'faster_rcnn_resnet101'
}
first_stage_anchor_generator {
grid_anchor_generator {
scales: [0.25, 0.5, 1.0, 2.0]
aspect_ratios: [0.5, 1.0, 2.0]
height_stride: 16
width_stride: 16
}
}
first_stage_box_predictor_conv_hyperparams {
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
}
initial_crop_size: 14
maxpool_kernel_size: 2
maxpool_stride: 2
second_stage_box_predictor {
rfcn_box_predictor {
conv_hyperparams {
op: CONV
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
}
}
}
second_stage_post_processing {
batch_non_max_suppression {
score_threshold: 0.01
iou_threshold: 0.6
max_detections_per_class: 100
max_total_detections: 300
}
score_converter: SOFTMAX
}
}"""
model_proto = model_pb2.DetectionModel()
text_format.Merge(model_text_proto, model_proto)
for extractor_type, extractor_class in FRCNN_RESNET_FEAT_MAPS.items():
model_proto.faster_rcnn.feature_extractor.type = extractor_type
model = model_builder.build(model_proto, is_training=True)
self.assertIsInstance(model, rfcn_meta_arch.RFCNMetaArch)
self.assertIsInstance(model._feature_extractor, extractor_class)
if __name__ == '__main__':
tf.test.main()
|
PyTorch/Detection/Efficientdet/scripts/D0 | D0 | train_AMP_8xA100-80G | #!/bin/bash
function get_dataloader_workers {
gpus=$(nvidia-smi -i 0 --query-gpu=count --format=csv,noheader)
core=$(nproc --all)
workers=$((core/gpus-2))
workers=$((workers>16?16:workers))
echo ${workers}
}
WORKERS=$(get_dataloader_workers)
./distributed_train.sh 8 /workspace/object_detection/datasets/coco --model efficientdet_d0 -b 150 --lr 1.63 --amp --opt fusedmomentum --warmup-epochs 50 --lr-noise 0.4 0.9 --output /model --worker ${WORKERS} --fill-color mean --model-ema --model-ema-decay 0.999 --eval-after 200 --epochs 300 --resume --smoothing 0.0 --pretrained-backbone-path /backbone_checkpoints/jocbackbone_statedict_B0.pth --memory-format nchw --sync-bn --fused-focal-loss --seed 12711 |
TensorFlow/Recommendation/WideAndDeep/scripts | scripts | preproc | #!/bin/bash
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
if [ $# -ge 1 ]
then
PREBATCH_SIZE=$1
else
PREBATCH_SIZE=4096
fi
echo "Starting preprocessing 1/4..."
time python -m preproc.preproc1
echo "Preprocessing 1/4 done.\n"
echo "Starting preprocessing 2/4..."
time python -m preproc.preproc2
echo "Preprocessing 2/4 done.\n"
echo "Starting preprocessing 3/4..."
time python -m preproc.preproc3
echo "Preprocessing 3/4 done.\n"
echo "Starting preprocessing 4/4..."
time python -m preproc.preproc4 --prebatch_size ${PREBATCH_SIZE}
echo "Preprocessing 4/4 done.\n"
|
Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt/triton/runner | runner | triton | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pathlib
# method from PEP-366 to support relative import in executed modules
if __name__ == "__main__" and __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from .core import Framework, Paths
class Triton:
"""
Triton Inference Server helper class
"""
image = "nvcr.io/nvidia/tritonserver"
tag = "py3"
class LOAD_MODE:
"""
Loading mode available in Triton
"""
POLL = "poll"
EXPLICIT = "explicit"
@staticmethod
def container_image(container_version: str):
"""
Container image based on version
Args:
container_version: Version of container to be used
Returns:
Image name with tag
"""
return f"{Triton.image}:{container_version}-{Triton.tag}"
@staticmethod
def command(
framework: str,
repository_path: str,
strict_mode: bool = False,
poll_model: bool = False,
metrics: bool = False,
verbose: bool = False,
):
"""
Command to run Triton Inference Server inside container
Args:
framework: Framework used for model
repository_path: Path to model repository
strict_mode: Flag to use strict model config
poll_model: Poll model
metrics: Enable GPU metrics (disable for MIG)
verbose: Use verbose mode logging
Returns:
"""
triton_command = f"tritonserver --model-store={repository_path}"
if poll_model:
triton_command += " --model-control-mode=poll --repository-poll-secs 5"
else:
triton_command += " --model-control-mode=explicit"
if not strict_mode:
triton_command += " --strict-model-config=false"
if not metrics:
triton_command += " --allow-metrics=false --allow-gpu-metrics=false"
if verbose:
triton_command += " --log-verbose 1"
if framework in (Framework.TensorFlow1, Framework.TensorFlow2):
version = 1 if framework == Framework.TensorFlow1 else 2
triton_command += f" --backend-config=tensorflow,version={version}"
return triton_command
@staticmethod
def library_path(framework: str):
"""
Obtain custom library path for framework
Args:
framework: Framework used for model
Returns:
Path to additional libraries needed by framework
"""
paths = {
Framework.PyTorch.name: "/opt/tritonserver/backends/pytorch",
Framework.TensorFlow1.name: "/opt/tritonserver/backends/tensorflow1",
Framework.TensorFlow2.name: "/opt/tritonserver/backends/tensorflow2",
}
return paths[framework]
@staticmethod
def custom_library_path_remote() -> str:
"""
Path to custom library mounted in Triton container
Returns:
Path to shared library with custom operations
"""
return f"{Paths.LIBRARIES_PATH}/libcustomops.so"
@staticmethod
def custom_library_path_local(libs_dir: pathlib.Path) -> pathlib.Path:
"""
Path to custom library in local path
Args:
libs_dir: path to libraries directory
Returns:
Path to shared library with custom operations
"""
return libs_dir / "libcustomops.so"
|
TensorFlow/Classification/ConvNets | ConvNets | export_frozen_graph | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tensorflow as tf
from utils import hvd_wrapper as hvd
from model import resnet
tf.app.flags.DEFINE_string(
'model_name', 'resnet50', 'The name of the architecture to save. The default name was being '
'used to train the model')
tf.app.flags.DEFINE_integer(
'image_size', 224,
'The image size to use, otherwise use the model default_image_size.')
tf.app.flags.DEFINE_integer(
'num_classes', 1001,
'The number of classes to predict.')
tf.app.flags.DEFINE_integer(
'batch_size', None,
'Batch size for the exported model. Defaulted to "None" so batch size can '
'be specified at model runtime.')
tf.app.flags.DEFINE_string('input_format', 'NCHW',
'The dataformat used by the layers in the model')
tf.app.flags.DEFINE_string('compute_format', 'NCHW',
'The dataformat used by the layers in the model')
tf.app.flags.DEFINE_string('checkpoint', '',
'The trained model checkpoint.')
tf.app.flags.DEFINE_string(
'output_file', '', 'Where to save the resulting file to.')
tf.app.flags.DEFINE_bool(
'quantize', False, 'whether to use quantized graph or not.')
tf.app.flags.DEFINE_bool(
'symmetric', False, 'Using symmetric quantization or not.')
tf.app.flags.DEFINE_bool(
'use_qdq', False, 'Use quantize and dequantize op instead of fake quant op')
tf.app.flags.DEFINE_bool(
'use_final_conv', False, 'whether to use quantized graph or not.')
tf.app.flags.DEFINE_bool('write_text_graphdef', False,
'Whether to write a text version of graphdef.')
FLAGS = tf.app.flags.FLAGS
def main(_):
hvd.init()
if not FLAGS.output_file:
raise ValueError('You must supply the path to save to with --output_file')
tf.logging.set_verbosity(tf.logging.INFO)
with tf.Graph().as_default() as graph:
if FLAGS.input_format=='NCHW':
input_shape = [FLAGS.batch_size, 3, FLAGS.image_size, FLAGS.image_size]
else:
input_shape = [FLAGS.batch_size, FLAGS.image_size, FLAGS.image_size, 3]
input_images = tf.placeholder(name='input', dtype=tf.float32, shape=input_shape)
resnet50_config = resnet.model_architectures[FLAGS.model_name]
network = resnet.ResnetModel(FLAGS.model_name,
FLAGS.num_classes,
resnet50_config['layers'],
resnet50_config['widths'],
resnet50_config['expansions'],
FLAGS.compute_format,
FLAGS.input_format)
probs, logits = network.build_model(
input_images,
training=False,
reuse=False,
use_final_conv=FLAGS.use_final_conv)
if FLAGS.quantize:
tf.contrib.quantize.experimental_create_eval_graph(symmetric=FLAGS.symmetric,
use_qdq=FLAGS.use_qdq)
# Define the saver and restore the checkpoint
saver = tf.train.Saver()
with tf.Session() as sess:
if FLAGS.checkpoint:
saver.restore(sess, FLAGS.checkpoint)
else:
sess.run(tf.global_variables_initializer())
graph_def = graph.as_graph_def()
frozen_graph_def = tf.graph_util.convert_variables_to_constants(sess, graph_def, [probs.op.name])
# Write out the frozen graph
tf.io.write_graph(
frozen_graph_def,
os.path.dirname(FLAGS.output_file),
os.path.basename(FLAGS.output_file),
as_text=FLAGS.write_text_graphdef)
if __name__ == '__main__':
tf.app.run()
|
TensorFlow2/Detection/Efficientdet | Efficientdet | train | # Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The main training script."""
import os
import time
from mpi4py import MPI
from absl import app
from absl import flags
from absl import logging
import tensorflow as tf
import horovod.tensorflow.keras as hvd
from dllogger import StdOutBackend, JSONStreamBackend, Verbosity
import dllogger as DLLogger
from model import anchors, callback_builder, coco_metric, dataloader
from model import efficientdet_keras, label_util, optimizer_builder, postprocess
from utils import hparams_config, model_utils, setup, train_lib, util_keras
from utils.horovod_utils import is_main_process, get_world_size, get_rank
# Model specific paramenters
flags.DEFINE_string('training_mode', 'traineval', '(train/train300/traineval)')
flags.DEFINE_string(
'training_file_pattern', None,
'Glob for training data files (e.g., COCO train - minival set)')
flags.DEFINE_string('model_name', 'efficientdet-d0', 'Model name.')
flags.DEFINE_string('model_dir', None, 'Location of model_dir')
flags.DEFINE_integer('batch_size', 64, 'training local batch size')
flags.DEFINE_integer('eval_batch_size', 64, 'evaluation local batch size')
flags.DEFINE_integer('num_examples_per_epoch', 120000,
'Number of examples in one epoch (coco default is 117266)')
flags.DEFINE_integer('num_epochs', None, 'Number of epochs for training')
flags.DEFINE_bool('benchmark', False, 'Train for a fixed number of steps for performance')
flags.DEFINE_integer('benchmark_steps', 100, 'Train for these many steps to benchmark training performance')
flags.DEFINE_bool('use_fake_data', False, 'Use fake input.')
flags.DEFINE_bool('use_xla', True, 'Use XLA')
flags.DEFINE_bool('amp', True, 'Enable mixed precision training')
flags.DEFINE_bool('set_num_threads', True, 'Set inter-op and intra-op parallelism threads')
flags.DEFINE_string('log_filename', 'time_log.txt', 'Filename for dllogger logs')
flags.DEFINE_integer('log_steps', 1, 'Interval of steps between logging of batch level stats')
flags.DEFINE_bool('lr_tb', False, 'Log learning rate at each step to TB')
flags.DEFINE_bool('enable_map_parallelization', True, 'Parallelize stateless map transformations in dataloader')
flags.DEFINE_integer('checkpoint_period', 10, 'Save ema model weights after every X epochs for eval')
flags.DEFINE_string('pretrained_ckpt', None,
'Start training from this EfficientDet checkpoint.')
flags.DEFINE_string('backbone_init', None,
'Initialize backbone weights from checkpoint in this directory.')
flags.DEFINE_string(
'hparams', '', 'Comma separated k=v pairs of hyperparameters or a module'
' containing attributes to use as hyperparameters.')
flags.DEFINE_float('lr', None, 'Learning rate')
flags.DEFINE_float('warmup_value', 0.0001, 'Initial warmup value')
flags.DEFINE_float('warmup_epochs', None, 'Number of warmup epochs')
flags.DEFINE_integer('seed', None, 'Random seed')
flags.DEFINE_bool('debug', False, 'Enable debug mode')
flags.DEFINE_bool('time_history', True, 'Get time history')
flags.DEFINE_bool('validate', False, 'Get validation loss after each epoch')
flags.DEFINE_string('val_file_pattern', None,
'Glob for eval tfrecords, e.g. coco/val-*.tfrecord.')
flags.DEFINE_string(
'val_json_file', None,
'COCO validation JSON containing golden bounding boxes. If None, use the '
'ground truth from the dataloader. Ignored if testdev_dir is not None.')
flags.DEFINE_string('testdev_dir', None,
'COCO testdev dir. If not None, ignorer val_json_file.')
flags.DEFINE_integer('eval_samples', 5000, 'The number of samples for '
'evaluation.')
FLAGS = flags.FLAGS
def main(_):
# get e2e training time
begin = time.time()
logging.info("Training started at: {}".format(time.asctime()))
hvd.init()
# Parse and override hparams
config = hparams_config.get_detection_config(FLAGS.model_name)
config.override(FLAGS.hparams)
if FLAGS.num_epochs: # NOTE: remove this flag after updating all docs.
config.num_epochs = FLAGS.num_epochs
if FLAGS.lr:
config.learning_rate = FLAGS.lr
if FLAGS.warmup_value:
config.lr_warmup_init = FLAGS.warmup_value
if FLAGS.warmup_epochs:
config.lr_warmup_epoch = FLAGS.warmup_epochs
config.backbone_init = FLAGS.backbone_init
config.mixed_precision = FLAGS.amp
config.image_size = model_utils.parse_image_size(config.image_size)
# get eval config
eval_config = hparams_config.get_detection_config(FLAGS.model_name)
eval_config.override(FLAGS.hparams)
eval_config.val_json_file = FLAGS.val_json_file
eval_config.val_file_pattern = FLAGS.val_file_pattern
eval_config.nms_configs.max_nms_inputs = anchors.MAX_DETECTION_POINTS
eval_config.drop_remainder = False # eval all examples w/o drop.
eval_config.image_size = model_utils.parse_image_size(eval_config['image_size'])
# setup
setup.set_flags(FLAGS, config, training=True)
if FLAGS.debug:
tf.config.experimental_run_functions_eagerly(True)
tf.debugging.set_log_device_placement(True)
tf.random.set_seed(111111)
logging.set_verbosity(logging.DEBUG)
# Check data path
if FLAGS.training_file_pattern is None or FLAGS.val_file_pattern is None or FLAGS.val_json_file is None:
raise RuntimeError('You must specify --training_file_pattern, --val_file_pattern and --val_json_file for training.')
steps_per_epoch = (FLAGS.num_examples_per_epoch + (FLAGS.batch_size * get_world_size()) - 1) // (FLAGS.batch_size * get_world_size())
if FLAGS.benchmark == True:
# For ci perf training runs, run for a fixed number of iterations per epoch
steps_per_epoch = FLAGS.benchmark_steps
params = dict(
config.as_dict(),
model_name=FLAGS.model_name,
model_dir=FLAGS.model_dir,
steps_per_epoch=steps_per_epoch,
checkpoint_period=FLAGS.checkpoint_period,
batch_size=FLAGS.batch_size,
num_shards=get_world_size(),
val_json_file=FLAGS.val_json_file,
testdev_dir=FLAGS.testdev_dir,
mode='train')
logging.info('Training params: {}'.format(params))
# make output dir if it does not exist
tf.io.gfile.makedirs(FLAGS.model_dir)
# dllogger setup
backends = []
if is_main_process():
log_path = os.path.join(FLAGS.model_dir, FLAGS.log_filename)
backends+=[
JSONStreamBackend(verbosity=Verbosity.VERBOSE, filename=log_path),
StdOutBackend(verbosity=Verbosity.DEFAULT)]
DLLogger.init(backends=backends)
DLLogger.metadata('avg_fps_training', {'unit': 'images/s'})
DLLogger.metadata('avg_fps_training_per_GPU', {'unit': 'images/s'})
DLLogger.metadata('avg_latency_training', {'unit': 's'})
DLLogger.metadata('training_loss', {'unit': None})
DLLogger.metadata('e2e_training_time', {'unit': 's'})
def get_dataset(is_training, params):
file_pattern = (
FLAGS.training_file_pattern
if is_training else FLAGS.val_file_pattern)
if not file_pattern:
raise ValueError('No matching files.')
return dataloader.InputReader(
file_pattern,
is_training=is_training,
use_fake_data=FLAGS.use_fake_data,
max_instances_per_image=config.max_instances_per_image,
enable_map_parallelization=FLAGS.enable_map_parallelization)(
params)
num_samples = (FLAGS.eval_samples + get_world_size() - 1) // get_world_size()
num_samples = (num_samples + FLAGS.eval_batch_size - 1) // FLAGS.eval_batch_size
eval_config.num_samples = num_samples
def get_eval_dataset(eval_config):
dataset = dataloader.InputReader(
FLAGS.val_file_pattern,
is_training=False,
max_instances_per_image=eval_config.max_instances_per_image)(
eval_config, batch_size=FLAGS.eval_batch_size)
dataset = dataset.shard(get_world_size(), get_rank())
dataset = dataset.take(num_samples)
return dataset
eval_dataset = get_eval_dataset(eval_config)
# pick focal loss implementation
focal_loss = train_lib.StableFocalLoss(
params['alpha'],
params['gamma'],
label_smoothing=params['label_smoothing'],
reduction=tf.keras.losses.Reduction.NONE)
model = train_lib.EfficientDetNetTrain(params['model_name'], config)
model.build((None, *config.image_size, 3))
model.compile(
optimizer=optimizer_builder.get_optimizer(params),
loss={
'box_loss':
train_lib.BoxLoss(
params['delta'], reduction=tf.keras.losses.Reduction.NONE),
'box_iou_loss':
train_lib.BoxIouLoss(
params['iou_loss_type'],
params['min_level'],
params['max_level'],
params['num_scales'],
params['aspect_ratios'],
params['anchor_scale'],
params['image_size'],
reduction=tf.keras.losses.Reduction.NONE),
'class_loss': focal_loss,
'seg_loss':
tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=True,
reduction=tf.keras.losses.Reduction.NONE)
})
train_from_epoch = util_keras.restore_ckpt(model, params['model_dir'],
config.moving_average_decay, steps_per_epoch=steps_per_epoch)
print("training_mode: {}".format(FLAGS.training_mode))
callbacks = callback_builder.get_callbacks(params, FLAGS.training_mode, eval_config, eval_dataset,
DLLogger, FLAGS.time_history, FLAGS.log_steps, FLAGS.lr_tb, FLAGS.benchmark)
history = model.fit(
get_dataset(True, params=params),
epochs=params['num_epochs'],
steps_per_epoch=steps_per_epoch,
initial_epoch=train_from_epoch,
callbacks=callbacks,
verbose=1 if is_main_process() else 0,
validation_data=get_dataset(False, params=params) if FLAGS.validate else None,
validation_steps=(FLAGS.eval_samples // FLAGS.eval_batch_size) if FLAGS.validate else None)
if is_main_process():
model.save_weights(os.path.join(FLAGS.model_dir, 'ckpt-final'))
# log final stats
stats = {}
for callback in callbacks:
if isinstance(callback, callback_builder.TimeHistory):
if callback.epoch_runtime_log:
stats['avg_fps_training'] = callback.average_examples_per_second
stats['avg_fps_training_per_GPU'] = callback.average_examples_per_second / get_world_size()
stats['avg_latency_training'] = callback.average_time_per_iteration
if history and history.history:
train_hist = history.history
#Gets final loss from training.
stats['training_loss'] = float(hvd.allreduce(tf.constant(train_hist['loss'][-1], dtype=tf.float32), average=True))
if os.path.exists(os.path.join(FLAGS.model_dir,'ema_weights')):
ckpt_epoch = "%02d" % sorted(set([int(f.rsplit('.')[0].rsplit('-')[1])
for f in os.listdir(os.path.join(FLAGS.model_dir,'ema_weights'))
if 'emackpt' in f]), reverse=True)[0]
ckpt = os.path.join(FLAGS.model_dir, 'ema_weights', 'emackpt-' + str(ckpt_epoch))
util_keras.restore_ckpt(model, ckpt, eval_config.moving_average_decay,
steps_per_epoch=0, skip_mismatch=False, expect_partial=True)
if is_main_process():
model.save(os.path.join(FLAGS.model_dir, 'emackpt-final'))
else:
ckpt_epoch = 'final'
ckpt = os.path.join(FLAGS.model_dir, 'ckpt-' + ckpt_epoch)
if is_main_process():
model.save(os.path.join(FLAGS.model_dir, 'ckpt-' + ckpt_epoch))
# Start evaluation of final ema checkpoint
logging.set_verbosity(logging.WARNING)
@tf.function
def model_fn(images, labels):
cls_outputs, box_outputs = model(images, training=False)
detections = postprocess.generate_detections(eval_config, cls_outputs, box_outputs,
labels['image_scales'],
labels['source_ids'])
tf.numpy_function(evaluator.update_state,
[labels['groundtruth_data'],
postprocess.transform_detections(detections)], [])
if FLAGS.benchmark == False and (FLAGS.training_mode == 'train' or FLAGS.num_epochs < 200):
# Evaluator for AP calculation.
label_map = label_util.get_label_map(eval_config.label_map)
evaluator = coco_metric.EvaluationMetric(
filename=eval_config.val_json_file, label_map=label_map)
evaluator.reset_states()
# evaluate all images.
pbar = tf.keras.utils.Progbar(num_samples)
for i, (images, labels) in enumerate(eval_dataset):
model_fn(images, labels)
if is_main_process():
pbar.update(i)
# gather detections from all ranks
evaluator.gather()
if is_main_process():
# compute the final eval results.
metrics = evaluator.result()
metric_dict = {}
for i, name in enumerate(evaluator.metric_names):
metric_dict[name] = metrics[i]
if label_map:
for i, cid in enumerate(sorted(label_map.keys())):
name = 'AP_/%s' % label_map[cid]
metric_dict[name] = metrics[i + len(evaluator.metric_names)]
# csv format
csv_metrics = ['AP','AP50','AP75','APs','APm','APl']
csv_format = ",".join([str(ckpt_epoch)] + [str(round(metric_dict[key] * 100, 2)) for key in csv_metrics])
print(FLAGS.model_name, metric_dict, "csv format:", csv_format)
DLLogger.log(step=(), data={'epoch': ckpt_epoch,
'validation_accuracy_mAP': round(metric_dict['AP'] * 100, 2)})
DLLogger.flush()
MPI.COMM_WORLD.Barrier()
if is_main_process():
stats['e2e_training_time'] = time.time() - begin
DLLogger.log(step=(), data=stats)
DLLogger.flush()
if __name__ == '__main__':
logging.set_verbosity(logging.INFO)
app.run(main)
|
TensorFlow2/Recommendation/WideAndDeep/triton/runner | runner | start_NVIDIA-T4 | # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/bin/bash
# Install Docker
. /etc/os-release && \
curl -fsSL https://download.docker.com/linux/debian/gpg | apt-key add - && \
echo "deb [arch=amd64] https://download.docker.com/linux/debian buster stable" > /etc/apt/sources.list.d/docker.list && \
curl -s -L https://nvidia.github.io/nvidia-docker/gpgkey| apt-key add - && \
curl -s -L https://nvidia.github.io/nvidia-docker/$ID$VERSION_ID/nvidia-docker.list > /etc/apt/sources.list.d/nvidia-docker.list && \
apt-get update && \
apt-get install -y docker-ce docker-ce-cli containerd.io nvidia-docker2
# Install packages
pip install -r triton/runner/requirements.txt
# Evaluate Runner
python3 -m "triton.runner.__main__" \
--config-path "triton/runner/config_NVIDIA-T4.yaml" \
--device 0 |
TensorFlow2/Recommendation/DLRM_and_DCNv2/tests/feature_specs | feature_specs | wider_dtypes | channel_spec:
categorical:
- cat_0.bin
- cat_1.bin
- cat_2.bin
- cat_3.bin
- cat_4.bin
- cat_5.bin
- cat_6.bin
- cat_7.bin
- cat_8.bin
- cat_9.bin
- cat_10.bin
- cat_11.bin
- cat_12.bin
- cat_13.bin
- cat_14.bin
- cat_15.bin
- cat_16.bin
- cat_17.bin
- cat_18.bin
- cat_19.bin
- cat_20.bin
- cat_21.bin
- cat_22.bin
- cat_23.bin
- cat_24.bin
- cat_25.bin
label:
- label
numerical: &id001
- num_0
- num_1
- num_2
- num_3
- num_4
- num_5
- num_6
- num_7
- num_8
- num_9
- num_10
- num_11
- num_12
feature_spec:
cat_0.bin:
cardinality: 7912889
dtype: int32
cat_1.bin:
cardinality: 33823
dtype: int32
cat_10.bin:
cardinality: 582469
dtype: int32
cat_11.bin:
cardinality: 245828
dtype: int32
cat_12.bin:
cardinality: 11
dtype: int32
cat_13.bin:
cardinality: 2209
dtype: int32
cat_14.bin:
cardinality: 10667
dtype: int16
cat_15.bin:
cardinality: 104
dtype: int16
cat_16.bin:
cardinality: 4
dtype: int8
cat_17.bin:
cardinality: 968
dtype: int32
cat_18.bin:
cardinality: 15
dtype: int32
cat_19.bin:
cardinality: 8165896
dtype: int32
cat_2.bin:
cardinality: 17139
dtype: int16
cat_20.bin:
cardinality: 2675940
dtype: int32
cat_21.bin:
cardinality: 7156453
dtype: int32
cat_22.bin:
cardinality: 302516
dtype: int32
cat_23.bin:
cardinality: 12022
dtype: int16
cat_24.bin:
cardinality: 97
dtype: int32
cat_25.bin:
cardinality: 35
dtype: int8
cat_3.bin:
cardinality: 7339
dtype: int16
cat_4.bin:
cardinality: 20046
dtype: int16
cat_5.bin:
cardinality: 4
dtype: int8
cat_6.bin:
cardinality: 7105
dtype: int16
cat_7.bin:
cardinality: 1382
dtype: int32
cat_8.bin:
cardinality: 63
dtype: int8
cat_9.bin:
cardinality: 5554114
dtype: int32
label:
dtype: bool
num_0:
dtype: float16
num_1:
dtype: float16
num_10:
dtype: float16
num_11:
dtype: float16
num_12:
dtype: float16
num_2:
dtype: float16
num_3:
dtype: float16
num_4:
dtype: float16
num_5:
dtype: float16
num_6:
dtype: float16
num_7:
dtype: float16
num_8:
dtype: float16
num_9:
dtype: float16
metadata: {}
source_spec:
test:
- features: *id001
files:
- test/numerical.bin
type: split_binary
- features:
- label
files:
- test/label.bin
type: split_binary
- features:
- cat_0.bin
files:
- test/cat_0.bin
type: split_binary
- features:
- cat_1.bin
files:
- test/cat_1.bin
type: split_binary
- features:
- cat_2.bin
files:
- test/cat_2.bin
type: split_binary
- features:
- cat_3.bin
files:
- test/cat_3.bin
type: split_binary
- features:
- cat_4.bin
files:
- test/cat_4.bin
type: split_binary
- features:
- cat_5.bin
files:
- test/cat_5.bin
type: split_binary
- features:
- cat_6.bin
files:
- test/cat_6.bin
type: split_binary
- features:
- cat_7.bin
files:
- test/cat_7.bin
type: split_binary
- features:
- cat_8.bin
files:
- test/cat_8.bin
type: split_binary
- features:
- cat_9.bin
files:
- test/cat_9.bin
type: split_binary
- features:
- cat_10.bin
files:
- test/cat_10.bin
type: split_binary
- features:
- cat_11.bin
files:
- test/cat_11.bin
type: split_binary
- features:
- cat_12.bin
files:
- test/cat_12.bin
type: split_binary
- features:
- cat_13.bin
files:
- test/cat_13.bin
type: split_binary
- features:
- cat_14.bin
files:
- test/cat_14.bin
type: split_binary
- features:
- cat_15.bin
files:
- test/cat_15.bin
type: split_binary
- features:
- cat_16.bin
files:
- test/cat_16.bin
type: split_binary
- features:
- cat_17.bin
files:
- test/cat_17.bin
type: split_binary
- features:
- cat_18.bin
files:
- test/cat_18.bin
type: split_binary
- features:
- cat_19.bin
files:
- test/cat_19.bin
type: split_binary
- features:
- cat_20.bin
files:
- test/cat_20.bin
type: split_binary
- features:
- cat_21.bin
files:
- test/cat_21.bin
type: split_binary
- features:
- cat_22.bin
files:
- test/cat_22.bin
type: split_binary
- features:
- cat_23.bin
files:
- test/cat_23.bin
type: split_binary
- features:
- cat_24.bin
files:
- test/cat_24.bin
type: split_binary
- features:
- cat_25.bin
files:
- test/cat_25.bin
type: split_binary
train:
- features: *id001
files:
- train/numerical.bin
type: split_binary
- features:
- label
files:
- train/label.bin
type: split_binary
- features:
- cat_0.bin
files:
- train/cat_0.bin
type: split_binary
- features:
- cat_1.bin
files:
- train/cat_1.bin
type: split_binary
- features:
- cat_2.bin
files:
- train/cat_2.bin
type: split_binary
- features:
- cat_3.bin
files:
- train/cat_3.bin
type: split_binary
- features:
- cat_4.bin
files:
- train/cat_4.bin
type: split_binary
- features:
- cat_5.bin
files:
- train/cat_5.bin
type: split_binary
- features:
- cat_6.bin
files:
- train/cat_6.bin
type: split_binary
- features:
- cat_7.bin
files:
- train/cat_7.bin
type: split_binary
- features:
- cat_8.bin
files:
- train/cat_8.bin
type: split_binary
- features:
- cat_9.bin
files:
- train/cat_9.bin
type: split_binary
- features:
- cat_10.bin
files:
- train/cat_10.bin
type: split_binary
- features:
- cat_11.bin
files:
- train/cat_11.bin
type: split_binary
- features:
- cat_12.bin
files:
- train/cat_12.bin
type: split_binary
- features:
- cat_13.bin
files:
- train/cat_13.bin
type: split_binary
- features:
- cat_14.bin
files:
- train/cat_14.bin
type: split_binary
- features:
- cat_15.bin
files:
- train/cat_15.bin
type: split_binary
- features:
- cat_16.bin
files:
- train/cat_16.bin
type: split_binary
- features:
- cat_17.bin
files:
- train/cat_17.bin
type: split_binary
- features:
- cat_18.bin
files:
- train/cat_18.bin
type: split_binary
- features:
- cat_19.bin
files:
- train/cat_19.bin
type: split_binary
- features:
- cat_20.bin
files:
- train/cat_20.bin
type: split_binary
- features:
- cat_21.bin
files:
- train/cat_21.bin
type: split_binary
- features:
- cat_22.bin
files:
- train/cat_22.bin
type: split_binary
- features:
- cat_23.bin
files:
- train/cat_23.bin
type: split_binary
- features:
- cat_24.bin
files:
- train/cat_24.bin
type: split_binary
- features:
- cat_25.bin
files:
- train/cat_25.bin
type: split_binary
|
TensorFlow/Classification/ConvNets/resnext101-32x4d/training | training | DGXA100_RNxt101-32x4d_AMP_90E | #!/bin/bash
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
WORKSPACE=${1:-"/workspace/rn50v15_tf"}
DATA_DIR=${2:-"/data"}
OTHER=${@:3}
if [[ ! -z "${BIND_TO_SOCKET}" ]]; then
BIND_TO_SOCKET="--bind-to socket"
fi
mpiexec --allow-run-as-root ${BIND_TO_SOCKET} -np 8 python3 main.py --arch=resnext101-32x4d \
--mode=train_and_evaluate --iter_unit=epoch --num_iter=90 \
--batch_size=256 --warmup_steps=100 --cosine_lr --label_smoothing 0.1 \
--lr_init=0.256 --lr_warmup_epochs=8 --momentum=0.875 --weight_decay=6.103515625e-05 \
--amp --static_loss_scale 128 \
--data_dir=${DATA_DIR}/tfrecords --data_idx_dir=${DATA_DIR}/dali_idx \
--results_dir=${WORKSPACE}/results --weight_init=fan_in ${OTHER}
|
TensorFlow2/Recommendation/DLRM_and_DCNv2/deployment/deployment_toolkit | deployment_toolkit | args | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import inspect
import logging
from typing import Callable, Dict, List, Optional, Union
from .core import GET_ARGPARSER_FN_NAME, load_from_file
LOGGER = logging.getLogger(__name__)
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("Boolean value expected.")
def filter_fn_args(args: Union[dict, argparse.Namespace], fn: Callable) -> dict:
signature = inspect.signature(fn)
parameters_names = list(signature.parameters)
if isinstance(args, argparse.Namespace):
args = vars(args)
args = {k: v for k, v in args.items() if k in parameters_names}
return args
def add_args_for_fn_signature(parser, fn) -> argparse.ArgumentParser:
parser.conflict_handler = "resolve"
signature = inspect.signature(fn)
for parameter in signature.parameters.values():
if parameter.name in ["self", "args", "kwargs"]:
continue
argument_kwargs = {}
if parameter.annotation != inspect.Parameter.empty:
is_optional = is_optional_generic(parameter.annotation)
if is_optional:
annotation = parameter.annotation.__args__[
0
] # Optional[cls] will be changed into Union[cls, None]
else:
annotation = parameter.annotation
is_list = is_list_generic(annotation)
is_dict = is_dict_generic(annotation)
if parameter.annotation == bool:
argument_kwargs["type"] = str2bool
argument_kwargs["choices"] = [0, 1]
elif is_list:
argument_kwargs["type"] = annotation.__args__[0] # List[cls] -> cls
elif is_dict:
raise RuntimeError(
f"Could not prepare argument parser for {parameter.name}: {parameter.annotation} in {fn}"
)
else:
argument_kwargs["type"] = annotation
if parameter.default != inspect.Parameter.empty:
if parameter.annotation == bool:
argument_kwargs["default"] = str2bool(parameter.default)
else:
argument_kwargs["default"] = parameter.default
else:
argument_kwargs["required"] = True
name = parameter.name.replace("_", "-")
LOGGER.debug(f"Adding argument {name} with {argument_kwargs}")
parser.add_argument(f"--{name}", **argument_kwargs)
return parser
class ArgParserGenerator:
def __init__(self, cls_or_fn, module_path: Optional[str] = None):
self._cls_or_fn = cls_or_fn
init_method_name = "__init__"
self._handle = (
cls_or_fn
if inspect.isfunction(cls_or_fn)
else getattr(cls_or_fn, init_method_name, None)
)
input_is_python_file = module_path and module_path.endswith(".py")
self._input_path = module_path if input_is_python_file else None
self._required_fn_name_for_signature_parsing = getattr(
cls_or_fn, "required_fn_name_for_signature_parsing", None
)
def update_argparser(self, parser):
name = self._handle.__name__
group_parser = parser.add_argument_group(name)
add_args_for_fn_signature(group_parser, fn=self._handle)
self._update_argparser(group_parser)
def get_args(self, args: argparse.Namespace):
filtered_args = filter_fn_args(args, fn=self._handle)
tmp_parser = argparse.ArgumentParser(allow_abbrev=False)
self._update_argparser(tmp_parser)
custom_names = [
p.dest.replace("-", "_")
for p in tmp_parser._actions
if not isinstance(p, argparse._HelpAction)
]
custom_params = {n: getattr(args, n) for n in custom_names}
filtered_args = {**filtered_args, **custom_params}
return filtered_args
def from_args(self, args: Union[argparse.Namespace, Dict]):
args = self.get_args(args)
LOGGER.info(f"Initializing {self._cls_or_fn.__name__}({args})")
return self._cls_or_fn(**args)
def _update_argparser(self, parser):
label = "argparser_update"
if self._input_path:
update_argparser_handle = load_from_file(
self._input_path, label=label, target=GET_ARGPARSER_FN_NAME
)
if update_argparser_handle:
update_argparser_handle(parser)
elif self._required_fn_name_for_signature_parsing:
fn_handle = load_from_file(
self._input_path,
label=label,
target=self._required_fn_name_for_signature_parsing,
)
if fn_handle:
add_args_for_fn_signature(parser, fn_handle)
def is_optional_generic(type_):
from typing_inspect import is_optional_type
return is_optional_type(type_)
def is_list_generic(type_):
from typing_inspect import get_args, get_origin, is_generic_type
is_optional = is_optional_generic(type_)
if is_optional:
type_, _ = get_args(type_, evaluate=True)
return is_generic_type(type_) and get_origin(type_) in [list, List]
def is_dict_generic(type_):
from typing_inspect import get_args, get_origin, is_generic_type
is_optional = is_optional_generic(type_)
if is_optional:
type_, _ = get_args(type_, evaluate=True)
return is_generic_type(type_) and get_origin(type_) in [dict, Dict]
|
PyTorch/LanguageModeling/BERT/triton/large/runner | runner | config_NVIDIA-DGX-1-(1x-V100-32GB) | checkpoints:
- name: large-qa
url: https://api.ngc.nvidia.com/v2/models/nvidia/bert_pyt_ckpt_large_qa_squad11_amp/versions/19.09.0/zip
configurations:
- accelerator: none
accelerator_precision: fp16
batch_size:
- 1
batch_sizes: '1'
capture_cuda_graph: 0
checkpoint_variant: large-qa
export_format: onnx
export_precision: fp16
format: onnx
max_batch_size: 1
max_seq_length: 384
precision: fp16
triton_gpu_engine_count: 1
triton_max_queue_delay: 1
triton_preferred_batch_sizes: '1'
- accelerator: none
accelerator_precision: fp16
batch_size:
- 16
batch_sizes: '16'
capture_cuda_graph: 0
checkpoint_variant: large-qa
export_format: onnx
export_precision: fp16
format: onnx
max_batch_size: 16
max_seq_length: 384
precision: fp16
triton_gpu_engine_count: 1
triton_max_queue_delay: 1
triton_preferred_batch_sizes: 8 16
- accelerator: none
accelerator_precision: fp16
batch_size:
- 8
batch_sizes: '8'
capture_cuda_graph: 0
checkpoint_variant: large-qa
export_format: onnx
export_precision: fp16
format: onnx
max_batch_size: 8
max_seq_length: 384
precision: fp16
triton_gpu_engine_count: 1
triton_max_queue_delay: 1
triton_preferred_batch_sizes: 4 8
- accelerator: trt
accelerator_precision: fp16
batch_size:
- 1
batch_sizes: '1'
capture_cuda_graph: 0
checkpoint_variant: large-qa
export_format: onnx
export_precision: fp16
format: onnx
max_batch_size: 1
max_seq_length: 384
precision: fp16
triton_gpu_engine_count: 1
triton_max_queue_delay: 1
triton_preferred_batch_sizes: '1'
- accelerator: trt
accelerator_precision: fp16
batch_size:
- 16
batch_sizes: '16'
capture_cuda_graph: 0
checkpoint_variant: large-qa
export_format: onnx
export_precision: fp16
format: onnx
max_batch_size: 16
max_seq_length: 384
precision: fp16
triton_gpu_engine_count: 1
triton_max_queue_delay: 1
triton_preferred_batch_sizes: 8 16
- accelerator: trt
accelerator_precision: fp16
batch_size:
- 8
batch_sizes: '8'
capture_cuda_graph: 0
checkpoint_variant: large-qa
export_format: onnx
export_precision: fp16
format: onnx
max_batch_size: 8
max_seq_length: 384
precision: fp16
triton_gpu_engine_count: 1
triton_max_queue_delay: 1
triton_preferred_batch_sizes: 4 8
- accelerator: none
accelerator_precision: fp16
batch_size:
- 1
batch_sizes: '1'
capture_cuda_graph: 0
checkpoint_variant: large-qa
export_format: onnx
export_precision: fp16
format: trt
max_batch_size: 1
max_seq_length: 384
precision: fp16
triton_gpu_engine_count: 1
triton_max_queue_delay: 1
triton_preferred_batch_sizes: '1'
- accelerator: none
accelerator_precision: fp16
batch_size:
- 16
batch_sizes: '16'
capture_cuda_graph: 0
checkpoint_variant: large-qa
export_format: onnx
export_precision: fp16
format: trt
max_batch_size: 16
max_seq_length: 384
precision: fp16
triton_gpu_engine_count: 1
triton_max_queue_delay: 1
triton_preferred_batch_sizes: 8 16
- accelerator: none
accelerator_precision: fp16
batch_size:
- 8
batch_sizes: '8'
capture_cuda_graph: 0
checkpoint_variant: large-qa
export_format: onnx
export_precision: fp16
format: trt
max_batch_size: 8
max_seq_length: 384
precision: fp16
triton_gpu_engine_count: 1
triton_max_queue_delay: 1
triton_preferred_batch_sizes: 4 8
- accelerator: none
accelerator_precision: fp16
batch_size:
- 1
- 8
- 16
batch_sizes: 1 8 16
capture_cuda_graph: 0
checkpoint_variant: large-qa
export_format: ts-trace
export_precision: fp16
format: ts-trace
max_batch_size: 16
max_seq_length: 384
precision: fp16
triton_gpu_engine_count: 1
triton_max_queue_delay: 1
triton_preferred_batch_sizes: 8 16
container_version: '21.10'
datasets:
- name: data
datasets_dir: datasets
framework: PyTorch
model_name: BERT
triton_container_image: null
triton_custom_operations: null
triton_dockerfile: null
triton_load_model_method: explicit
|
TensorFlow/Segmentation/UNet_Industrial/scripts/benchmarking | benchmarking | UNet_trainbench_AMP_8GPU | #!/usr/bin/env bash
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script launches UNet training benchmark in TF-AMP on 8 GPUs using 16 batch size (2 per GPU)
# Usage ./UNet_trainbench_AMP_8GPU.sh <path to dataset> <dagm classID (1-10)>
BASEDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
export TF_CPP_MIN_LOG_LEVEL=3
# Cleaning up for benchmark
RESULT_DIR="/tmp"
rm -rf "${RESULT_DIR}"
mpirun \
-np 8 \
-H localhost:8 \
-bind-to none \
-map-by slot \
-x NCCL_DEBUG=VERSION \
-x LD_LIBRARY_PATH \
-x PATH \
-mca pml ob1 -mca btl ^openib \
--allow-run-as-root \
python "${BASEDIR}/../../main.py" \
--unet_variant='tinyUNet' \
--activation_fn='relu' \
--exec_mode='training_benchmark' \
--iter_unit='batch' \
--num_iter=1500 \
--batch_size=2 \
--warmup_step=500 \
--results_dir="${RESULT_DIR}" \
--data_dir="${1}" \
--dataset_name='DAGM2007' \
--dataset_classID="${2}" \
--data_format='NCHW' \
--use_auto_loss_scaling \
--amp \
--xla \
--learning_rate=1e-4 \
--learning_rate_decay_factor=0.8 \
--learning_rate_decay_steps=500 \
--rmsprop_decay=0.9 \
--rmsprop_momentum=0.8 \
--loss_fn_name='adaptive_loss' \
--weight_decay=1e-5 \
--weight_init_method='he_uniform' \
--augment_data \
--display_every=250 \
--debug_verbosity=0
|
TensorFlow/Classification/ConvNets/dataprep | dataprep | process_bounding_boxes | #!/usr/bin/python
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Process the ImageNet Challenge bounding boxes for TensorFlow model training.
This script is called as
process_bounding_boxes.py <dir> [synsets-file]
Where <dir> is a directory containing the downloaded and unpacked bounding box
data. If [synsets-file] is supplied, then only the bounding boxes whose
synstes are contained within this file are returned. Note that the
[synsets-file] file contains synset ids, one per line.
The script dumps out a CSV text file in which each line contains an entry.
n00007846_64193.JPEG,0.0060,0.2620,0.7545,0.9940
The entry can be read as:
<JPEG file name>, <xmin>, <ymin>, <xmax>, <ymax>
The bounding box for <JPEG file name> contains two points (xmin, ymin) and
(xmax, ymax) specifying the lower-left corner and upper-right corner of a
bounding box in *relative* coordinates.
The user supplies a directory where the XML files reside. The directory
structure in the directory <dir> is assumed to look like this:
<dir>/nXXXXXXXX/nXXXXXXXX_YYYY.xml
Each XML file contains a bounding box annotation. The script:
(1) Parses the XML file and extracts the filename, label and bounding box info.
(2) The bounding box is specified in the XML files as integer (xmin, ymin) and
(xmax, ymax) *relative* to image size displayed to the human annotator. The
size of the image displayed to the human annotator is stored in the XML file
as integer (height, width).
Note that the displayed size will differ from the actual size of the image
downloaded from image-net.org. To make the bounding box annotation useable,
we convert bounding box to floating point numbers relative to displayed
height and width of the image.
Note that each XML file might contain N bounding box annotations.
Note that the points are all clamped at a range of [0.0, 1.0] because some
human annotations extend outside the range of the supplied image.
See details here: http://image-net.org/download-bboxes
(3) By default, the script outputs all valid bounding boxes. If a
[synsets-file] is supplied, only the subset of bounding boxes associated
with those synsets are outputted. Importantly, one can supply a list of
synsets in the ImageNet Challenge and output the list of bounding boxes
associated with the training images of the ILSVRC.
We use these bounding boxes to inform the random distortion of images
supplied to the network.
If you run this script successfully, you will see the following output
to stderr:
> Finished processing 544546 XML files.
> Skipped 0 XML files not in ImageNet Challenge.
> Skipped 0 bounding boxes not in ImageNet Challenge.
> Wrote 615299 bounding boxes from 544546 annotated images.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import glob
import os.path
import sys
import xml.etree.ElementTree as ET
class BoundingBox(object):
pass
def GetItem(name, root, index=0):
count = 0
for item in root.iter(name):
if count == index:
return item.text
count += 1
# Failed to find "index" occurrence of item.
return -1
def GetInt(name, root, index=0):
# In some XML annotation files, the point values are not integers, but floats.
# So we add a float function to avoid ValueError.
return int(float(GetItem(name, root, index)))
def FindNumberBoundingBoxes(root):
index = 0
while True:
if GetInt('xmin', root, index) == -1:
break
index += 1
return index
def ProcessXMLAnnotation(xml_file):
"""Process a single XML file containing a bounding box."""
# pylint: disable=broad-except
try:
tree = ET.parse(xml_file)
except Exception:
print('Failed to parse: ' + xml_file, file=sys.stderr)
return None
# pylint: enable=broad-except
root = tree.getroot()
num_boxes = FindNumberBoundingBoxes(root)
boxes = []
for index in range(num_boxes):
box = BoundingBox()
# Grab the 'index' annotation.
box.xmin = GetInt('xmin', root, index)
box.ymin = GetInt('ymin', root, index)
box.xmax = GetInt('xmax', root, index)
box.ymax = GetInt('ymax', root, index)
box.width = GetInt('width', root)
box.height = GetInt('height', root)
box.filename = GetItem('filename', root) + '.JPEG'
box.label = GetItem('name', root)
xmin = float(box.xmin) / float(box.width)
xmax = float(box.xmax) / float(box.width)
ymin = float(box.ymin) / float(box.height)
ymax = float(box.ymax) / float(box.height)
# Some images contain bounding box annotations that
# extend outside of the supplied image. See, e.g.
# n03127925/n03127925_147.xml
# Additionally, for some bounding boxes, the min > max
# or the box is entirely outside of the image.
min_x = min(xmin, xmax)
max_x = max(xmin, xmax)
box.xmin_scaled = min(max(min_x, 0.0), 1.0)
box.xmax_scaled = min(max(max_x, 0.0), 1.0)
min_y = min(ymin, ymax)
max_y = max(ymin, ymax)
box.ymin_scaled = min(max(min_y, 0.0), 1.0)
box.ymax_scaled = min(max(max_y, 0.0), 1.0)
boxes.append(box)
return boxes
if __name__ == '__main__':
if len(sys.argv) < 2 or len(sys.argv) > 3:
print('Invalid usage\n'
'usage: process_bounding_boxes.py <dir> [synsets-file]',
file=sys.stderr)
sys.exit(-1)
xml_files = glob.glob(sys.argv[1] + '/*/*.xml')
print('Identified %d XML files in %s' % (len(xml_files), sys.argv[1]),
file=sys.stderr)
if len(sys.argv) == 3:
labels = set([l.strip() for l in open(sys.argv[2]).readlines()])
print('Identified %d synset IDs in %s' % (len(labels), sys.argv[2]),
file=sys.stderr)
else:
labels = None
skipped_boxes = 0
skipped_files = 0
saved_boxes = 0
saved_files = 0
for file_index, one_file in enumerate(xml_files):
# Example: <...>/n06470073/n00141669_6790.xml
label = os.path.basename(os.path.dirname(one_file))
# Determine if the annotation is from an ImageNet Challenge label.
if labels is not None and label not in labels:
skipped_files += 1
continue
bboxes = ProcessXMLAnnotation(one_file)
assert bboxes is not None, 'No bounding boxes found in ' + one_file
found_box = False
for bbox in bboxes:
if labels is not None:
if bbox.label != label:
# Note: There is a slight bug in the bounding box annotation data.
# Many of the dog labels have the human label 'Scottish_deerhound'
# instead of the synset ID 'n02092002' in the bbox.label field. As a
# simple hack to overcome this issue, we only exclude bbox labels
# *which are synset ID's* that do not match original synset label for
# the XML file.
if bbox.label in labels:
skipped_boxes += 1
continue
# Guard against improperly specified boxes.
if (bbox.xmin_scaled >= bbox.xmax_scaled or
bbox.ymin_scaled >= bbox.ymax_scaled):
skipped_boxes += 1
continue
# Note bbox.filename occasionally contains '%s' in the name. This is
# data set noise that is fixed by just using the basename of the XML file.
image_filename = os.path.splitext(os.path.basename(one_file))[0]
print('%s.JPEG,%.4f,%.4f,%.4f,%.4f' %
(image_filename,
bbox.xmin_scaled, bbox.ymin_scaled,
bbox.xmax_scaled, bbox.ymax_scaled))
saved_boxes += 1
found_box = True
if found_box:
saved_files += 1
else:
skipped_files += 1
if not file_index % 5000:
print('--> processed %d of %d XML files.' %
(file_index + 1, len(xml_files)),
file=sys.stderr)
print('--> skipped %d boxes and %d XML files.' %
(skipped_boxes, skipped_files), file=sys.stderr)
print('Finished processing %d XML files.' % len(xml_files), file=sys.stderr)
print('Skipped %d XML files not in ImageNet Challenge.' % skipped_files,
file=sys.stderr)
print('Skipped %d bounding boxes not in ImageNet Challenge.' % skipped_boxes,
file=sys.stderr)
print('Wrote %d bounding boxes from %d annotated images.' %
(saved_boxes, saved_files),
file=sys.stderr)
print('Finished.', file=sys.stderr)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.