relative_path
stringclasses 812
values | section
stringclasses 339
values | filename
stringlengths 2
61
| text
stringlengths 6
1.76M
|
---|---|---|---|
TensorFlow2/Recommendation/WideAndDeep/triton/deployment_toolkit | deployment_toolkit | utils | # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from enum import Enum
from typing import Any, Dict, Tuple
LOGGER = logging.getLogger(__name__)
class TritonClientProtocol(Enum):
"""Describe protocol with which client communicates with Triton"""
GRPC = "grpc"
HTTP = "http"
def parse_server_url(server_url: str) -> Tuple[TritonClientProtocol, str, int]:
DEFAULT_PORTS = {
TritonClientProtocol.HTTP: 8000,
TritonClientProtocol.GRPC: 8001,
}
# extract protocol
server_url_items = server_url.split("://")
if len(server_url_items) != 2:
raise ValueError("Prefix server_url with protocol ex.: grpc://127.0.0.1:8001")
requested_protocol, server_url = server_url_items
requested_protocol = TritonClientProtocol(requested_protocol.lower())
if requested_protocol not in DEFAULT_PORTS:
raise ValueError(f"Unsupported protocol: {requested_protocol}")
# extract host and port
default_port = DEFAULT_PORTS[requested_protocol]
server_url_items = server_url.split(":")
if len(server_url_items) == 1:
host, port = server_url, default_port
elif len(server_url_items) == 2:
host, port = server_url_items
port = int(port)
if port != default_port:
LOGGER.warning(
f"Current server URL is {server_url} while default {requested_protocol} port is {default_port}"
)
else:
raise ValueError(f"Could not parse {server_url}. Example of correct server URL: grpc://127.0.0.1:8001")
return requested_protocol, host, port
def log_dict(title: str, dict_: Dict[str, Any]):
LOGGER.info(title)
for key, value in dict_.items():
LOGGER.info(f"\t{key} = {value}")
|
PyTorch/SpeechSynthesis/FastPitch/scripts/mandarin_chinese | mandarin_chinese | inference | #!/usr/bin/env bash
set -a
bash scripts/download_models.sh waveglow
PYTHONIOENCODING=utf-8
: ${BATCH_SIZE:=20}
: ${FILELIST:="filelists/sf_test.tsv"}
: ${FASTPITCH:="output_sf/FastPitch_checkpoint_1000.pt"}
: ${OUTPUT_DIR:="output_sf/audio_sf_test_fastpitch1000ep_waveglow_denoise0.01"}
# Disable HiFi-GAN and enable WaveGlow
HIFIGAN=""
WAVEGLOW="pretrained_models/waveglow/nvidia_waveglow256pyt_fp16.pt"
bash scripts/inference_example.sh "$@"
|
TensorFlow2/LanguageModeling/BERT/official/nlp/modeling/networks | networks | classification_test | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for classification network."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.python.keras import keras_parameterized # pylint: disable=g-direct-tensorflow-import
from official.nlp.modeling.networks import classification
# This decorator runs the test in V1, V2-Eager, and V2-Functional mode. It
# guarantees forward compatibility of this code for the V2 switchover.
@keras_parameterized.run_all_keras_modes
class ClassificationTest(keras_parameterized.TestCase):
def test_network_creation(self):
"""Validate that the Keras object can be created."""
input_width = 512
num_classes = 10
test_object = classification.Classification(
input_width=input_width, num_classes=num_classes)
# Create a 2-dimensional input (the first dimension is implicit).
cls_data = tf.keras.Input(shape=(input_width,), dtype=tf.float32)
output = test_object(cls_data)
# Validate that the outputs are of the expected shape.
expected_output_shape = [None, num_classes]
self.assertEqual(expected_output_shape, output.shape.as_list())
def test_network_invocation(self):
"""Validate that the Keras object can be invoked."""
input_width = 512
num_classes = 10
test_object = classification.Classification(
input_width=input_width, num_classes=num_classes, output='predictions')
# Create a 2-dimensional input (the first dimension is implicit).
cls_data = tf.keras.Input(shape=(input_width,), dtype=tf.float32)
output = test_object(cls_data)
# Invoke the network as part of a Model.
model = tf.keras.Model(cls_data, output)
input_data = 10 * np.random.random_sample((3, input_width))
_ = model.predict(input_data)
def test_network_invocation_with_internal_logits(self):
"""Validate that the logit outputs are correct."""
input_width = 512
num_classes = 10
test_object = classification.Classification(
input_width=input_width, num_classes=num_classes, output='predictions')
# Create a 2-dimensional input (the first dimension is implicit).
cls_data = tf.keras.Input(shape=(input_width,), dtype=tf.float32)
output = test_object(cls_data)
model = tf.keras.Model(cls_data, output)
logits_model = tf.keras.Model(test_object.inputs, test_object.logits)
batch_size = 3
input_data = 10 * np.random.random_sample((batch_size, input_width))
outputs = model.predict(input_data)
logits = logits_model.predict(input_data)
# Ensure that the tensor shapes are correct.
expected_output_shape = (batch_size, num_classes)
self.assertEqual(expected_output_shape, outputs.shape)
self.assertEqual(expected_output_shape, logits.shape)
# Ensure that the logits, when softmaxed, create the outputs.
input_tensor = tf.keras.Input(expected_output_shape[1:])
output_tensor = tf.keras.layers.Activation(tf.nn.log_softmax)(input_tensor)
softmax_model = tf.keras.Model(input_tensor, output_tensor)
calculated_softmax = softmax_model.predict(logits)
self.assertAllClose(outputs, calculated_softmax)
def test_network_invocation_with_internal_and_external_logits(self):
"""Validate that the logit outputs are correct."""
input_width = 512
num_classes = 10
test_object = classification.Classification(
input_width=input_width, num_classes=num_classes, output='logits')
# Create a 2-dimensional input (the first dimension is implicit).
cls_data = tf.keras.Input(shape=(input_width,), dtype=tf.float32)
output = test_object(cls_data)
model = tf.keras.Model(cls_data, output)
logits_model = tf.keras.Model(test_object.inputs, test_object.logits)
batch_size = 3
input_data = 10 * np.random.random_sample((batch_size, input_width))
outputs = model.predict(input_data)
logits = logits_model.predict(input_data)
# Ensure that the tensor shapes are correct.
expected_output_shape = (batch_size, num_classes)
self.assertEqual(expected_output_shape, outputs.shape)
self.assertEqual(expected_output_shape, logits.shape)
self.assertAllClose(outputs, logits)
def test_network_invocation_with_logit_output(self):
"""Validate that the logit outputs are correct."""
input_width = 512
num_classes = 10
test_object = classification.Classification(
input_width=input_width, num_classes=num_classes, output='predictions')
logit_object = classification.Classification(
input_width=input_width, num_classes=num_classes, output='logits')
logit_object.set_weights(test_object.get_weights())
# Create a 2-dimensional input (the first dimension is implicit).
cls_data = tf.keras.Input(shape=(input_width,), dtype=tf.float32)
output = test_object(cls_data)
logit_output = logit_object(cls_data)
model = tf.keras.Model(cls_data, output)
logits_model = tf.keras.Model(cls_data, logit_output)
batch_size = 3
input_data = 10 * np.random.random_sample((batch_size, input_width))
outputs = model.predict(input_data)
logits = logits_model.predict(input_data)
# Ensure that the tensor shapes are correct.
expected_output_shape = (batch_size, num_classes)
self.assertEqual(expected_output_shape, outputs.shape)
self.assertEqual(expected_output_shape, logits.shape)
# Ensure that the logits, when softmaxed, create the outputs.
input_tensor = tf.keras.Input(expected_output_shape[1:])
output_tensor = tf.keras.layers.Activation(tf.nn.log_softmax)(input_tensor)
softmax_model = tf.keras.Model(input_tensor, output_tensor)
calculated_softmax = softmax_model.predict(logits)
self.assertAllClose(outputs, calculated_softmax)
def test_serialize_deserialize(self):
# Create a network object that sets all of its config options.
network = classification.Classification(
input_width=128,
num_classes=10,
initializer='zeros',
output='predictions')
# Create another network object from the first object's config.
new_network = classification.Classification.from_config(
network.get_config())
# Validate that the config can be forced to JSON.
_ = new_network.to_json()
# If the serialization was successful, the new config should match the old.
self.assertAllEqual(network.get_config(), new_network.get_config())
def test_unknown_output_type_fails(self):
with self.assertRaisesRegex(ValueError, 'Unknown `output` value "bad".*'):
_ = classification.Classification(
input_width=128, num_classes=10, output='bad')
if __name__ == '__main__':
tf.test.main()
|
CUDA-Optimized/FastSpeech/fastspeech/trt/plugins/repeat | repeat | RepeatPlugin | // Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of the NVIDIA CORPORATION nor the
// names of its contributors may be used to endorse or promote products
// derived from this software without specific prior written permission.
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "NvInfer.h"
#include "NvInferRuntimeCommon.h"
#include <iostream>
#include <cstring>
#include <assert.h>
using namespace std;
using namespace nvinfer1;
class RepeatPlugin: public IPluginV2IOExt {
public:
RepeatPlugin() = delete;
RepeatPlugin(int maxOutputLength) {
m.maxOutputLength = maxOutputLength;
}
RepeatPlugin(const void *buffer, size_t length) {
memcpy(&m, buffer, sizeof(m));
}
virtual size_t getSerializationSize() const override {
return sizeof(m);
}
virtual void serialize(void *buffer) const override {
memcpy(buffer, &m, sizeof(m));
}
nvinfer1::IPluginV2Ext* clone() const override {
return new RepeatPlugin(&m, sizeof(m));
}
int getNbOutputs() const override {
return 1;
}
nvinfer1::Dims getOutputDimensions(int index, const nvinfer1::Dims* pInputDim, int nInputDim) override {
int t = m.maxOutputLength;
int w = pInputDim[0].d[1];
return nvinfer1::Dims2(t, w);
}
size_t getWorkspaceSize(int nBatch) const override {return 0;}
int enqueue(int nBatch, const void * const *inputs, void **outputs, void* workspace, cudaStream_t stream) override;
int initialize() override {return 0;}
void terminate() override {}
void destroy() override { delete this; }
void setPluginNamespace(const char* szNamespace) override {}
const char* getPluginNamespace() const override {return "";}
const char* getPluginType() const override {return "RepeatPlugin";}
const char* getPluginVersion() const override {return "0.0.1";}
void configurePlugin(const PluginTensorDesc* in, int nbInput, const PluginTensorDesc* out, int nbOutput) override
{
m.inputDim = in[0].dims;
m.dataType = in[0].type;
}
bool supportsFormatCombination(int pos, const PluginTensorDesc* inOut, int nbInputs, int nbOutputs) const override
{
bool condition = inOut[pos].format == TensorFormat::kLINEAR;
switch (pos) {
case 0: // input seq
condition &= ((inOut[pos].type == DataType::kFLOAT) // for seq in fp32
|| (inOut[pos].type == DataType::kHALF) // for seq in fp16
|| (inOut[pos].type == DataType::kINT32)); // for seq_mask
break;
case 1: // repeat count
condition &= ((inOut[pos].type == DataType::kFLOAT));
break;
case 2: // output seq
condition &= ((inOut[pos].type == inOut[0].type)); // the same type as the input
break;
}
return condition;
}
DataType getOutputDataType(int index, const DataType* inputTypes, int nbInputs) const override
{
return inputTypes[0];
}
bool isOutputBroadcastAcrossBatch(int outputIndex, const bool* inputIsBroadcasted, int nbInputs) const override
{
return false;
}
bool canBroadcastInputAcrossBatch(int inputIndex) const override
{
return false;
}
private:
struct {
Dims inputDim;
DataType dataType;
int maxOutputLength;
} m;
};
class RepeatPluginCreator : public nvinfer1::IPluginCreator {
public:
nvinfer1::IPluginV2* deserializePlugin(const char* name, const void* serialData, size_t serialLength) override {
return new RepeatPlugin(serialData, serialLength);
}
const char* getPluginName() const override {return "RepeatPlugin";}
const char* getPluginVersion() const override {return "0.0.1";}
void setPluginNamespace(const char* szNamespace) override {}
const char* getPluginNamespace() const override {return "";}
const nvinfer1::PluginFieldCollection* getFieldNames() override {
std::cout << __FUNCTION__ << std::endl;
return nullptr;
}
nvinfer1::IPluginV2* createPlugin(const char* name, const nvinfer1::PluginFieldCollection* fc) override {
int maxOutputLength = 0;
for (int i = 0; i < fc->nbFields; i++) {
if (!strcmp(fc->fields[i].name, "maxOutputLength")) {
maxOutputLength = *(int *)fc->fields[i].data;
}
}
return new RepeatPlugin(maxOutputLength);
}
};
|
PyTorch/SpeechRecognition/QuartzNet/scripts | scripts | download_quartznet | #!/usr/bin/env bash
set -e
: ${LANGUAGE:=${1:-en}}
: ${MODEL_DIR:="pretrained_models/quartznet_${LANGUAGE}"}
case $LANGUAGE in
en)
MODEL="nvidia_quartznet_210504.pt"
MODEL_ZIP="quartznet_pyt_ckpt_amp_21.03.0.zip"
MODEL_URL="https://api.ngc.nvidia.com/v2/models/nvidia/quartznet_pyt_ckpt_amp/versions/21.03.0/zip"
;;
ca|de|es|fr|it|pl|ru|zh)
MODEL="stt_${LANGUAGE}_quartznet15x5.nemo"
MODEL_URL="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/stt_${LANGUAGE}_quartznet15x5/versions/1.0.0rc1/zip"
MODEL_ZIP="stt_${LANGUAGE}_quartznet15x5_1.0.0rc1.zip"
;;
*)
echo "Unsupported language $LANGUAGE"
exit 1
;;
esac
mkdir -p "$MODEL_DIR"
if [ ! -f "${MODEL_DIR}/${MODEL_ZIP}" ]; then
echo "Downloading ${MODEL_ZIP} ..."
wget -O ${MODEL_DIR}/${MODEL_ZIP} ${MODEL_URL} \
|| { echo "ERROR: Failed to download ${MODEL_ZIP} from NGC"; exit 1; }
fi
if [ ! -f "${MODEL_DIR}/${MODEL}" ]; then
echo "Extracting ${MODEL} ..."
unzip -o ${MODEL_DIR}/${MODEL_ZIP} -d ${MODEL_DIR} \
|| { echo "ERROR: Failed to extract ${MODEL_ZIP}"; exit 1; }
echo "OK"
else
echo "${MODEL} already downloaded."
fi
|
PyTorch/SpeechSynthesis/FastPitch/common/text | text | symbols | """ from https://github.com/keithito/tacotron """
'''
Defines the set of symbols used in text input to the model.
The default is a set of ASCII characters that works well for English or text that has been run through Unidecode. For other data, you can modify _characters. See TRAINING_DATA.md for details. '''
from .cmudict import valid_symbols
# Prepend "@" to ARPAbet symbols to ensure uniqueness (some are the same as uppercase letters):
_arpabet = ['@' + s for s in valid_symbols]
def get_symbols(symbol_set='english_basic'):
if symbol_set == 'english_basic':
_pad = '_'
_punctuation = '!\'(),.:;? '
_special = '-'
_letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
symbols = list(_pad + _special + _punctuation + _letters) + _arpabet
elif symbol_set == 'english_basic_lowercase':
_pad = '_'
_punctuation = '!\'"(),.:;? '
_special = '-'
_letters = 'abcdefghijklmnopqrstuvwxyz'
symbols = list(_pad + _special + _punctuation + _letters) + _arpabet
elif symbol_set == 'english_expanded':
_punctuation = '!\'",.:;? '
_math = '#%&*+-/[]()'
_special = '_@©°½—₩€$'
_accented = 'áçéêëñöøćž'
_letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
symbols = list(_punctuation + _math + _special + _accented + _letters) + _arpabet
elif symbol_set == 'english_mandarin_basic':
from .zh.chinese import chinese_punctuations, valid_symbols as mandarin_valid_symbols
# Prepend "#" to mandarin phonemes to ensure uniqueness (some are the same as uppercase letters):
_mandarin_phonemes = ['#' + s for s in mandarin_valid_symbols]
_pad = '_'
_punctuation = '!\'(),.:;? '
_chinese_punctuation = ["#" + p for p in chinese_punctuations]
_special = '-'
_letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
symbols = list(_pad + _special + _punctuation + _letters) + _arpabet + _mandarin_phonemes + _chinese_punctuation
else:
raise Exception("{} symbol set does not exist".format(symbol_set))
return symbols
def get_pad_idx(symbol_set='english_basic'):
if symbol_set in {'english_basic', 'english_basic_lowercase', 'english_mandarin_basic'}:
return 0
else:
raise Exception("{} symbol set not used yet".format(symbol_set))
|
PyTorch/SpeechSynthesis/HiFiGAN/platform | platform | DGX1_HiFi-GAN_FP32_1GPU | #!/bin/bash
set -a
: ${NUM_GPUS:=1}
: ${BATCH_SIZE:=32}
: ${GRAD_ACCUMULATION:=4}
: ${AMP:=false}
bash scripts/train_lj22khz.sh "$@"
|
PyTorch/SpeechSynthesis/Tacotron2/trtis_cpp/trtis_client/src/bin | bin | trtis_client | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "TRTISClient.hpp"
#include "WaveFileWriter.hpp"
#include <chrono>
#include <fstream>
#include <iostream>
#include <stdexcept>
#include <string>
using highres_clock = std::chrono::high_resolution_clock;
using time_type = std::chrono::high_resolution_clock::time_point;
namespace
{
double timeElapsed(const time_type& start, const time_type& end)
{
return std::chrono::duration_cast<std::chrono::microseconds>(end - start)
.count()
/ 1000000.0;
}
std::vector<std::string> loadInputs(const std::string filename)
{
std::ifstream fin(filename);
if (!fin.good()) {
throw std::runtime_error("Failed to open '" + filename + "'.");
}
fin.exceptions(std::ifstream::badbit);
std::vector<std::string> data;
std::string line;
while (std::getline(fin, line)) {
data.emplace_back(line);
}
return data;
}
} // namespace
int main(int argc, const char** argv)
{
std::string url("localhost:8000");
if (argc < 2 || argc > 3) {
std::cerr << "Invalid number of arguments: " << (argc - 1) << std::endl;
std::cerr << "Usage:" << std::endl;
std::cerr << "\t" << argv[0] << " <input file> <batch size>" << std::endl;
return 1;
}
const std::string inputFile(argv[1]);
int batchSize = 1;
if (argc == 3) {
batchSize = std::stol(argv[2]);
}
TRTISClient client(url);
try {
const std::vector<std::string> inputs = loadInputs(inputFile);
size_t totalChars = 0;
for (const std::string& seq : inputs) {
totalChars += seq.size();
}
time_type start = highres_clock::now();
std::vector<std::vector<float>> outputs
= client.execute(inputs, batchSize, false);
time_type stop = highres_clock::now();
size_t totalSamples = 0;
for (const std::vector<float>& sample : outputs) {
totalSamples += sample.size();
}
const double audioDuration = static_cast<double>(totalSamples) / 22050.0;
const double duration = timeElapsed(start, stop);
std::cout << "Total Processing time: " << duration << " sec" << std::endl;
std::cout << "Processed " << inputs.size() << " sequences for a total of "
<< audioDuration << " seconds of audio:" << std::endl;
std::cout << "\t" << (totalChars / duration) << " symbols / sec."
<< std::endl;
std::cout << "\t" << (totalSamples / duration) << " samples / sec."
<< std::endl;
for (size_t i = 0; i < outputs.size(); ++i) {
WaveFileWriter::write(
"./audio/" + std::to_string(i + 1) + ".wav",
22050,
outputs[i].data(),
outputs[i].size());
}
} catch (const std::exception& e) {
std::cerr << "Exception: " << e.what() << std::endl;
return 1;
}
return 0;
}
|
PyTorch/Segmentation/MaskRCNN/pytorch/maskrcnn_benchmark/csrc/cuda | cuda | ROIAlign_cuda | // Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
// Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <THC/THC.h>
#include <THC/THCAtomics.cuh>
// TODO make it in a common file
#define CUDA_1D_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \
i += blockDim.x * gridDim.x)
template <typename U, typename T>
__device__ T bilinear_interpolate(const U* bottom_data,
const int height, const int width,
T y, T x,
const int index /* index for debug only*/) {
// deal with cases that inverse elements are out of feature map boundary
if (y < -1.0 || y > height || x < -1.0 || x > width) {
//empty
return 0;
}
if (y <= 0) y = 0;
if (x <= 0) x = 0;
int y_low = (int) y;
int x_low = (int) x;
int y_high;
int x_high;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = (T) y_low;
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = (T) x_low;
} else {
x_high = x_low + 1;
}
T ly = y - y_low;
T lx = x - x_low;
T hy = 1. - ly, hx = 1. - lx;
// do bilinear interpolation
T v1 = bottom_data[y_low * width + x_low];
T v2 = bottom_data[y_low * width + x_high];
T v3 = bottom_data[y_high * width + x_low];
T v4 = bottom_data[y_high * width + x_high];
T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
template <typename U, typename T>
__device__ void SingleSampleRoIAlignForward(
const U* bottom_data, const T spatial_scale, const int height, const int width, // per level
const int channels, const int pooled_height, const int pooled_width, const int sampling_ratio,
const T* bottom_rois, U* top_data,
size_t index // per loop iteration
)
{
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
const T* offset_bottom_rois = bottom_rois + n * 5;
int roi_batch_ind = offset_bottom_rois[0];
// Do not using rounding; this implementation detail is critical
T roi_start_w = offset_bottom_rois[1] * spatial_scale;
T roi_start_h = offset_bottom_rois[2] * spatial_scale;
T roi_end_w = offset_bottom_rois[3] * spatial_scale;
T roi_end_h = offset_bottom_rois[4] * spatial_scale;
// Force malformed ROIs to be 1x1
T roi_width = max(roi_end_w - roi_start_w, (T)1.);
T roi_height = max(roi_end_h - roi_start_h, (T)1.);
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
const U* offset_bottom_data = bottom_data + (roi_batch_ind * channels + c) * height * width;
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / pooled_height); // e.g., = 2
int roi_bin_grid_w = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
// We do average (integral) pooling inside a bin
const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4
T output_val = 0.;
for (int iy = 0; iy < roi_bin_grid_h; iy ++) // e.g., iy = 0, 1
{
const T y = roi_start_h + ph * bin_size_h + static_cast<T>(iy + .5f) * bin_size_h / static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5
for (int ix = 0; ix < roi_bin_grid_w; ix ++)
{
const T x = roi_start_w + pw * bin_size_w + static_cast<T>(ix + .5f) * bin_size_w / static_cast<T>(roi_bin_grid_w);
T val = bilinear_interpolate(offset_bottom_data, height, width, y, x, index);
output_val += val;
}
}
output_val /= count;
top_data[index] = output_val;
}
// rois in math type (float). This is because ROIs come in as float.
// TODO: Change other blocks producing ROI to support half type as well
template <typename U, typename T>
__global__ void RoIAlignForward(const int nthreads,
const U* bottom_data, const T spatial_scale, const int height, const int width, // per-level arguments
const int channels, const int pooled_height, const int pooled_width, const int sampling_ratio,
const T* bottom_rois, U* top_data) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
SingleSampleRoIAlignForward(
bottom_data, spatial_scale, height, width,
channels, pooled_height, pooled_width, sampling_ratio,
bottom_rois, top_data,
index);
}
}
template <typename U, typename T>
__device__ T bilinear_interpolate_nhwc(const U* bottom_data,
const int height, const int width, const int channels,
T y, T x,
const int index /* index for debug only*/) {
// deal with cases that inverse elements are out of feature map boundary
if (y < -1.0 || y > height || x < -1.0 || x > width) {
//empty
return 0;
}
if (y <= 0) y = 0;
if (x <= 0) x = 0;
int y_low = (int) y;
int x_low = (int) x;
int y_high;
int x_high;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = (T) y_low;
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = (T) x_low;
} else {
x_high = x_low + 1;
}
T ly = y - y_low;
T lx = x - x_low;
T hy = 1. - ly, hx = 1. - lx;
// do bilinear interpolation
T v1 = bottom_data[channels * (y_low * width + x_low)];
T v2 = bottom_data[channels * (y_low * width + x_high)];
T v3 = bottom_data[channels * (y_high * width + x_low)];
T v4 = bottom_data[channels * (y_high * width + x_high)];
T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
template <typename U, typename T>
__device__ void SingleSampleRoIAlignForwardNHWC(
const U* bottom_data, const T spatial_scale, const int height, const int width, // per level
const int channels, const int pooled_height, const int pooled_width, const int sampling_ratio,
const T* bottom_rois, U* top_data,
size_t index // per loop iteration
)
{
// (n, ph, pw, c) is an element in the pooled output
int c = index % channels;
int pw = (index / channels) % pooled_width;
int ph = (index / channels / pooled_width) % pooled_height;
int n = index / pooled_width / pooled_height / channels;
const T* offset_bottom_rois = bottom_rois + n * 5;
int roi_batch_ind = offset_bottom_rois[0];
// Do not using rounding; this implementation detail is critical
T roi_start_w = offset_bottom_rois[1] * spatial_scale;
T roi_start_h = offset_bottom_rois[2] * spatial_scale;
T roi_end_w = offset_bottom_rois[3] * spatial_scale;
T roi_end_h = offset_bottom_rois[4] * spatial_scale;
// Force malformed ROIs to be 1x1
T roi_width = max(roi_end_w - roi_start_w, (T)1.);
T roi_height = max(roi_end_h - roi_start_h, (T)1.);
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
const U* offset_bottom_data = bottom_data + (roi_batch_ind * channels * height * width + c);
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / pooled_height); // e.g., = 2
int roi_bin_grid_w = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
// We do average (integral) pooling inside a bin
const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4
T output_val = 0.;
for (int iy = 0; iy < roi_bin_grid_h; iy ++) // e.g., iy = 0, 1
{
const T y = roi_start_h + ph * bin_size_h + static_cast<T>(iy + .5f) * bin_size_h / static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5
for (int ix = 0; ix < roi_bin_grid_w; ix ++)
{
const T x = roi_start_w + pw * bin_size_w + static_cast<T>(ix + .5f) * bin_size_w / static_cast<T>(roi_bin_grid_w);
T val = bilinear_interpolate_nhwc(offset_bottom_data, height, width, channels, y, x, index);
output_val += val;
}
}
output_val /= count;
top_data[index] = output_val;
}
// rois in math type (float). This is because ROIs come in as float.
// TODO: Change other blocks producing ROI to support half type as well
template <typename U, typename T>
__global__ void RoIAlignForwardNHWC(const int nthreads,
const U* bottom_data, const T spatial_scale, const int height, const int width, // per level
const int channels, const int pooled_height, const int pooled_width, const int sampling_ratio,
const T* bottom_rois, U* top_data) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
SingleSampleRoIAlignForwardNHWC(
bottom_data, spatial_scale, height, width,
channels, pooled_height, pooled_width, sampling_ratio,
bottom_rois, top_data,
index);
}
}
template <typename T>
__device__ void bilinear_interpolate_gradient(
const int height, const int width,
T y, T x,
T & w1, T & w2, T & w3, T & w4,
int & x_low, int & x_high, int & y_low, int & y_high,
const int index /* index for debug only*/) {
// deal with cases that inverse elements are out of feature map boundary
if (y < -1.0 || y > height || x < -1.0 || x > width) {
//empty
w1 = w2 = w3 = w4 = 0.;
x_low = x_high = y_low = y_high = -1;
return;
}
if (y <= 0) y = 0;
if (x <= 0) x = 0;
y_low = (int) y;
x_low = (int) x;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = (T) y_low;
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = (T) x_low;
} else {
x_high = x_low + 1;
}
T ly = y - y_low;
T lx = x - x_low;
T hy = 1. - ly, hx = 1. - lx;
w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
return;
}
template <typename U, typename T>
__device__ void SingleSampleRoIAlignBackwardFeature(
const U* top_diff,
const T spatial_scale, const int height, const int width, U* bottom_diff, // per level
const int channels, const int pooled_height, const int pooled_width, const int sampling_ratio,
const T* bottom_rois,
size_t index // per loop iteration
)
{
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
const T* offset_bottom_rois = bottom_rois + n * 5;
int roi_batch_ind = offset_bottom_rois[0];
// Do not using rounding; this implementation detail is critical
T roi_start_w = offset_bottom_rois[1] * spatial_scale;
T roi_start_h = offset_bottom_rois[2] * spatial_scale;
T roi_end_w = offset_bottom_rois[3] * spatial_scale;
T roi_end_h = offset_bottom_rois[4] * spatial_scale;
// Force malformed ROIs to be 1x1
T roi_width = max(roi_end_w - roi_start_w, (T)1.);
T roi_height = max(roi_end_h - roi_start_h, (T)1.);
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
U* offset_bottom_diff = bottom_diff + (roi_batch_ind * channels + c) * height * width;
int top_offset = (n * channels + c) * pooled_height * pooled_width;
const U* offset_top_diff = top_diff + top_offset;
const T top_diff_this_bin = offset_top_diff[ph * pooled_width + pw];
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / pooled_height); // e.g., = 2
int roi_bin_grid_w = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
// We do average (integral) pooling inside a bin
const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4
for (int iy = 0; iy < roi_bin_grid_h; iy ++) // e.g., iy = 0, 1
{
const T y = roi_start_h + ph * bin_size_h + static_cast<T>(iy + .5f) * bin_size_h / static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5
for (int ix = 0; ix < roi_bin_grid_w; ix ++)
{
const T x = roi_start_w + pw * bin_size_w + static_cast<T>(ix + .5f) * bin_size_w / static_cast<T>(roi_bin_grid_w);
T w1, w2, w3, w4;
int x_low, x_high, y_low, y_high;
bilinear_interpolate_gradient(height, width, y, x,
w1, w2, w3, w4,
x_low, x_high, y_low, y_high,
index);
T g1 = top_diff_this_bin * w1 / count;
T g2 = top_diff_this_bin * w2 / count;
T g3 = top_diff_this_bin * w3 / count;
T g4 = top_diff_this_bin * w4 / count;
if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0)
{
atomicAdd(offset_bottom_diff + y_low * width + x_low, static_cast<T>(g1));
atomicAdd(offset_bottom_diff + y_low * width + x_high, static_cast<T>(g2));
atomicAdd(offset_bottom_diff + y_high * width + x_low, static_cast<T>(g3));
atomicAdd(offset_bottom_diff + y_high * width + x_high, static_cast<T>(g4));
} // if
} // ix
} // iy
}
template <typename U, typename T>
__global__ void RoIAlignBackwardFeature(const int nthreads, const U* top_diff,
const T spatial_scale, const int height, const int width, U* bottom_diff, // per level
const int channels, const int pooled_height, const int pooled_width, const int sampling_ratio,
const T* bottom_rois
)
{
CUDA_1D_KERNEL_LOOP(index, nthreads) {
SingleSampleRoIAlignBackwardFeature(top_diff,
spatial_scale, height, width, bottom_diff,
channels, pooled_height, pooled_width, sampling_ratio,
bottom_rois,
index);
} // CUDA_1D_KERNEL_LOOP
} // RoIAlignBackwardFeature
template <typename U, typename T>
__device__ void SingleSampleRoIAlignBackwardFeatureNHWC(const U* top_diff,
const T spatial_scale, const int height, const int width, U* bottom_diff, // per level
const int channels, const int pooled_height, const int pooled_width, const int sampling_ratio,
const T* bottom_rois,
size_t index // per loop iteration
)
{
// (n, c, ph, pw) is an element in the pooled output
int c = index % channels;
int pw = (index / channels) % pooled_width;
int ph = (index / channels / pooled_width) % pooled_height;
int n = index / pooled_width / pooled_height / channels;
const T* offset_bottom_rois = bottom_rois + n * 5;
int roi_batch_ind = offset_bottom_rois[0];
// Do not using rounding; this implementation detail is critical
T roi_start_w = offset_bottom_rois[1] * spatial_scale;
T roi_start_h = offset_bottom_rois[2] * spatial_scale;
T roi_end_w = offset_bottom_rois[3] * spatial_scale;
T roi_end_h = offset_bottom_rois[4] * spatial_scale;
// Force malformed ROIs to be 1x1
T roi_width = max(roi_end_w - roi_start_w, (T)1.);
T roi_height = max(roi_end_h - roi_start_h, (T)1.);
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
U* offset_bottom_diff = bottom_diff + (roi_batch_ind * channels * height * width + c);
int top_offset = n * channels * pooled_height * pooled_width + c;
const U* offset_top_diff = top_diff + top_offset;
const T top_diff_this_bin = offset_top_diff[channels * (ph * pooled_width + pw)];
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / pooled_height); // e.g., = 2
int roi_bin_grid_w = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
// We do average (integral) pooling inside a bin
const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4
for (int iy = 0; iy < roi_bin_grid_h; iy ++) // e.g., iy = 0, 1
{
const T y = roi_start_h + ph * bin_size_h + static_cast<T>(iy + .5f) * bin_size_h / static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5
for (int ix = 0; ix < roi_bin_grid_w; ix ++)
{
const T x = roi_start_w + pw * bin_size_w + static_cast<T>(ix + .5f) * bin_size_w / static_cast<T>(roi_bin_grid_w);
T w1, w2, w3, w4;
int x_low, x_high, y_low, y_high;
bilinear_interpolate_gradient(height, width, y, x,
w1, w2, w3, w4,
x_low, x_high, y_low, y_high,
index);
T g1 = top_diff_this_bin * w1 / count;
T g2 = top_diff_this_bin * w2 / count;
T g3 = top_diff_this_bin * w3 / count;
T g4 = top_diff_this_bin * w4 / count;
if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0)
{
atomicAdd(offset_bottom_diff + channels * (y_low * width + x_low), static_cast<T>(g1));
atomicAdd(offset_bottom_diff + channels * (y_low * width + x_high), static_cast<T>(g2));
atomicAdd(offset_bottom_diff + channels * (y_high * width + x_low), static_cast<T>(g3));
atomicAdd(offset_bottom_diff + channels * (y_high * width + x_high), static_cast<T>(g4));
} // if
} // ix
} // iy
}
template <typename U, typename T>
__global__ void RoIAlignBackwardFeatureNHWC(const int nthreads, const U* top_diff,
const T spatial_scale, const int height, const int width, U* bottom_diff, // per level
const int channels, const int pooled_height, const int pooled_width, const int sampling_ratio,
const T* bottom_rois
)
{
CUDA_1D_KERNEL_LOOP(index, nthreads) {
SingleSampleRoIAlignBackwardFeatureNHWC(top_diff,
spatial_scale,height,width,bottom_diff,
channels,pooled_height,pooled_width,sampling_ratio,
bottom_rois,
index);
} // CUDA_1D_KERNEL_LOOP
} // RoIAlignBackwardFeatureNHWC
at::Tensor ROIAlign_forward_cuda(const at::Tensor& input,
const at::Tensor& rois,
const float spatial_scale,
const int pooled_height,
const int pooled_width,
const int sampling_ratio,
const bool is_nhwc) {
AT_ASSERTM(input.is_cuda(), "input must be a CUDA tensor");
AT_ASSERTM(rois.is_cuda(), "rois must be a CUDA tensor");
auto num_rois = rois.size(0);
auto channels = input.size(1);
auto height = input.size(2);
auto width = input.size(3);
auto output = is_nhwc ? at::empty({num_rois, channels, pooled_height, pooled_width}, input.options()).contiguous(at::MemoryFormat::ChannelsLast) : at::empty({num_rois, channels, pooled_height, pooled_width}, input.options());
auto output_size = num_rois * pooled_height * pooled_width * channels;
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
if (output.numel() == 0) {
C10_CUDA_CHECK(cudaGetLastError());
return output;
}
int gridSize;
int blockSize;
cudaOccupancyMaxPotentialBlockSize(&gridSize,
&blockSize,
(void*) RoIAlignForward<float, float>,
0, // dynamic memory
0); // maximum utilized threads
dim3 grid(gridSize);
dim3 block(blockSize);
//TODO: Math type is hard coded to float assuming double is not used, if needed, add a case for double as well.
//In case of double, it should be <double, double>, not <double, float>
//TODO: ROIs come in as float, fix other blocks so they come in as same type as input.
if (!is_nhwc){
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "ROIAlign_forward", [&] {
RoIAlignForward<scalar_t, float><<<grid, block, 0, stream>>>(
output_size,
input.contiguous().data_ptr<scalar_t>(),
spatial_scale,
height,
width,
channels,
pooled_height,
pooled_width,
sampling_ratio,
rois.contiguous().data_ptr<float>(),
output.data_ptr<scalar_t>());
});
}
else{
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "ROIAlign_forward", [&] {
RoIAlignForwardNHWC<scalar_t, float><<<grid, block, 0, stream>>>(
output_size,
input.contiguous(at::MemoryFormat::ChannelsLast).data_ptr<scalar_t>(),
spatial_scale,
height,
width,
channels,
pooled_height,
pooled_width,
sampling_ratio,
rois.contiguous().data_ptr<float>(),
output.data_ptr<scalar_t>());
});
}
C10_CUDA_CHECK(cudaGetLastError());
return output;
}
// TODO remove the dependency on input and use instead its sizes -> save memory
// NHWC + layout transposes are faster than NCHW, so just keep the NHWC implementation for backward pass
at::Tensor ROIAlign_backward_cuda(const at::Tensor& grad,
const at::Tensor& rois,
const float spatial_scale,
const int pooled_height,
const int pooled_width,
const int batch_size,
const int channels,
const int height,
const int width,
const int sampling_ratio,
const bool is_nhwc) {
AT_ASSERTM(grad.is_cuda(), "grad must be a CUDA tensor");
AT_ASSERTM(rois.is_cuda(), "rois must be a CUDA tensor");
auto num_rois = rois.size(0);
auto grad_input = is_nhwc ? at::zeros({batch_size, channels, height, width}, grad.options()).contiguous(at::MemoryFormat::ChannelsLast) : at::zeros({batch_size, channels, height, width}, grad.options());
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
// handle possibly empty gradients
if (grad.numel() == 0) {
C10_CUDA_CHECK(cudaGetLastError());
return grad_input;
}
int gridSize;
int blockSize;
cudaOccupancyMaxPotentialBlockSize(&gridSize,
&blockSize,
(void*) RoIAlignBackwardFeature<float, float>,
0, // dynamic memory
0); // maximum utilized threads
dim3 grid(gridSize);
dim3 block(blockSize);
//TODO: Math type is hard coded to float assuming double is not used, if needed, add a case for double as well.
//In case of double, it should be <double, double>, not <double, float>
//TODO: ROIs come in as float, fix other blocks so they come in as same type as input.
if (!is_nhwc){
AT_DISPATCH_FLOATING_TYPES_AND_HALF(grad.scalar_type(), "ROIAlign_backward", [&] {
RoIAlignBackwardFeature<scalar_t, float><<<grid, block, 0, stream>>>(
grad.numel(),
grad.contiguous().data_ptr<scalar_t>(),
spatial_scale,
height,
width,
grad_input.data_ptr<scalar_t>(),
channels,
pooled_height,
pooled_width,
sampling_ratio,
rois.contiguous().data_ptr<float>());
});
}
else{
AT_DISPATCH_FLOATING_TYPES_AND_HALF(grad.scalar_type(), "ROIAlign_backward", [&] {
RoIAlignBackwardFeatureNHWC<scalar_t, float><<<grid, block, 0, stream>>>(
grad.numel(),
grad.contiguous(at::MemoryFormat::ChannelsLast).data_ptr<scalar_t>(),
spatial_scale,
height,
width,
grad_input.data_ptr<scalar_t>(),
channels,
pooled_height,
pooled_width,
sampling_ratio,
rois.contiguous().data_ptr<float>());
});
}
C10_CUDA_CHECK(cudaGetLastError());
return grad_input;
} |
TensorFlow2/Segmentation/MaskRCNN/mrcnn_tf2/model/models/resnet50 | resnet50 | __init__ | from .conv2d_block import Conv2DBlock
from .bottleneck_block import BottleneckBlock
from .bottleneck_group import BottleneckGroup
from .resnet import ResNet50
|
Tools/PyTorch/TimeSeriesPredictionPlatform/training | training | ema | # Copyright 2021-2022 NVIDIA Corporation
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2019 Ross Wightman
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Exponential Moving Average (EMA) of model updates
"""
import logging
from copy import deepcopy
import torch
import torch.nn as nn
_logger = logging.getLogger(__name__)
class ModelEmaV2(nn.Module):
""" Model Exponential Moving Average V2
Keep a moving average of everything in the model state_dict (parameters and buffers).
V2 of this module is simpler, it does not match params/buffers based on name but simply
iterates in order. It works with torchscript (JIT of full model).
"""
def __init__(self, model, decay=0.999, device=None):
super(ModelEmaV2, self).__init__()
# make a copy of the model for accumulating moving average of weights
self.module = deepcopy(model)
self.module.eval()
self.decay = decay
self.device = device # perform ema on different device from model if set
if self.device is not None:
self.module.to(device=device)
def update(self, model):
update_fn = lambda ema_v, model_v: self.decay * ema_v + (1.0 - self.decay) * model_v
with torch.no_grad():
for ema_v, model_v in zip(self.module.state_dict().values(), model.state_dict().values()):
if self.device is not None:
model_v = model_v.to(device=self.device)
ema_v.copy_(update_fn(ema_v, model_v))
def set(self, model):
with torch.no_grad():
for ema_v, model_v in zip(self.module.state_dict().values(), model.state_dict().values()):
if self.device is not None:
model_v = model_v.to(device=self.device)
ema_v.copy_(model_v)
def forward(self, x):
return self.module(x)
|
PyTorch/Recommendation/DLRM/preproc | preproc | prepare_dataset | #! /bin/bash
# Copyright (c) 2021 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Examples:
# to run on GPU with a frequency limit of 3 using NVTabular:
# ./prepare_dataset.sh 3 GPU NVTabular
#
# to run on GPU with a frequency limit of 15 using Spark GPU:
# ./prepare_dataset.sh 15 GPU Spark
#
# to run on CPU with a frequency limit of 15 using Spark CPU:
# ./prepare_dataset.sh 15 CPU
set -e
set -x
ls -ltrash
rm -rf /data/dlrm/spark
rm -rf /data/dlrm/intermediate_binary
rm -rf /data/dlrm/output
rm -rf /data/dlrm/criteo_parquet
rm -rf /data/dlrm/binary_dataset
download_dir=${download_dir:-'/data/dlrm/criteo'}
./verify_criteo_downloaded.sh ${download_dir}
output_path=${output_path:-'/data/dlrm/output'}
if [ "$3" = "NVTabular" ]; then
echo "Performing NVTabular preprocessing"
./run_NVTabular.sh ${download_dir} ${output_path} $1
preprocessing_version=NVTabular
else
if [ -f ${output_path}/train/_SUCCESS ] \
&& [ -f ${output_path}/validation/_SUCCESS ] \
&& [ -f ${output_path}/test/_SUCCESS ]; then
echo "Spark preprocessing already carried out"
else
echo "Performing spark preprocessing"
./run_spark.sh $2 ${download_dir} ${output_path} $1
fi
preprocessing_version=Spark
fi
conversion_intermediate_dir=${conversion_intermediate_dir:-'/data/dlrm/intermediate_binary'}
final_output_dir=${final_output_dir:-'/data/dlrm/binary_dataset'}
source ${DGX_VERSION}_config.sh
if [ -d ${final_output_dir}/train ] \
&& [ -d ${final_output_dir}/validation ] \
&& [ -d ${final_output_dir}/test ] \
&& [ -f ${final_output_dir}/feature_spec.yaml ]; then
echo "Final conversion already done"
else
echo "Performing final conversion to a custom data format"
python parquet_to_binary.py --parallel_jobs ${TOTAL_CORES} --src_dir ${output_path} \
--intermediate_dir ${conversion_intermediate_dir} \
--dst_dir ${final_output_dir}
cp "${output_path}/model_size.json" "${final_output_dir}/model_size.json"
python split_dataset.py --dataset "${final_output_dir}" --output "${final_output_dir}/split"
rm ${final_output_dir}/train_data.bin
rm ${final_output_dir}/validation_data.bin
rm ${final_output_dir}/test_data.bin
rm ${final_output_dir}/model_size.json
mv ${final_output_dir}/split/* ${final_output_dir}
rm -rf ${final_output_dir}/split
fi
echo "Done preprocessing the Criteo Kaggle Dataset"
|
TensorFlow2/LanguageModeling/ELECTRA/data | data | NVIDIAPretrainedWeightDownloader | # Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
class NVIDIAPretrainedWeightDownloader:
def __init__(self, save_path):
self.save_path = save_path + '/nvidia_pretrained_weights'
if not os.path.exists(self.save_path):
os.makedirs(self.save_path)
pass
def download(self):
assert False, 'NVIDIAPretrainedWeightDownloader not implemented yet.' |
PyTorch/SpeechSynthesis/Tacotron2/trtis_cpp/src/trt/layers | layers | convBatchNormCreator | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "convBatchNormCreator.h"
#include "layerData.h"
#include "trtUtils.h"
#include "NvInfer.h"
#include <cmath>
#include <stdexcept>
using namespace nvinfer1;
namespace tts
{
/******************************************************************************
* CONSTANTS ******************************************************************
*****************************************************************************/
namespace
{
constexpr const float EPS = 1e-5f;
}
/******************************************************************************
* PUBLIC METHODS *************************************************************
*****************************************************************************/
ILayer* ConvBatchNormCreator::add(INetworkDefinition& network, ITensor* const input, const LayerData& convData,
const LayerData& normData, const std::string& activation, const std::string& name)
{
// base the number of channels based on the output size of the batch norm
const int numChannels = static_cast<int>(normData.get("bias").count);
// CONVOLUTION //////////////////////////////////////////////////////////////
const std::vector<float>& convWeight = newVector(static_cast<const float*>(convData.get("weight").values),
static_cast<const float*>(convData.get("weight").values) + convData.get("weight").count);
const std::vector<float>& convBias = newVector(static_cast<const float*>(convData.get("bias").values),
static_cast<const float*>(convData.get("bias").values) + convData.get("bias").count);
#if NV_TENSORRT_MAJOR < 7
IConvolutionLayer* const convLayer = network.addConvolution(
*input, numChannels, DimsHW(5, 1), TRTUtils::toWeights(convWeight), TRTUtils::toWeights(convBias));
convLayer->setPadding({2, 0});
#else
IConvolutionLayer* const convLayer = network.addConvolutionNd(
*input, numChannels, Dims2(5, 1), TRTUtils::toWeights(convWeight), TRTUtils::toWeights(convBias));
convLayer->setPaddingNd(Dims2(2, 0));
#endif
convLayer->setName((name + ".conv_layer").c_str());
ITensor* const batchInput = convLayer->getOutput(0);
// BATCH NORM ///////////////////////////////////////////////////////////////
// create vectors
std::vector<float>& negativeMeanWeights = newVector(static_cast<const float*>(normData.get("running_mean").values),
static_cast<const float*>(normData.get("running_mean").values) + normData.get("running_mean").count);
std::vector<float>& scaleWeights = newVector(static_cast<const float*>(normData.get("weight").values),
static_cast<const float*>(normData.get("weight").values) + normData.get("weight").count);
const std::vector<float>& normBias = newVector(static_cast<const float*>(normData.get("bias").values),
static_cast<const float*>(normData.get("bias").values) + normData.get("bias").count);
const Weights emptyWeights{DataType::kFLOAT, nullptr, 0};
// check input
if (negativeMeanWeights.size() != scaleWeights.size())
{
throw std::runtime_error("Mismatch between 'running_mean' and 'weight' sizes: "
+ std::to_string(negativeMeanWeights.size()) + " " + std::to_string(scaleWeights.size()) + ".");
}
if (static_cast<size_t>(normData.get("running_var").count) != scaleWeights.size())
{
throw std::runtime_error("Size of 'running_var' does not match 'running_mean':"
+ std::to_string(normData.get("running_var").count) + " vs. " + std::to_string(scaleWeights.size()));
}
// create negative mean values
for (float& val : negativeMeanWeights)
{
val = -val;
}
// compute scaling matrix
// weight / sqrt(var(x) + eps)
const float* varWeights = static_cast<const float*>(normData.get("running_var").values);
for (size_t i = 0; i < scaleWeights.size(); ++i)
{
const float den = std::sqrt(varWeights[i] + EPS);
scaleWeights[i] /= den;
}
// x - mean(x)
ILayer* const shiftedLayer = network.addScale(
*batchInput, ScaleMode::kCHANNEL, TRTUtils::toWeights(negativeMeanWeights), emptyWeights, emptyWeights);
shiftedLayer->setName((name + ".shift").c_str());
// ((x - mean(x)) / sqrt(var(x) + eps)) * weight + bias
ILayer* const scaleLayer = network.addScale(*shiftedLayer->getOutput(0), ScaleMode::kCHANNEL,
TRTUtils::toWeights(normBias), TRTUtils::toWeights(scaleWeights), emptyWeights);
scaleLayer->setName((name + ".scale").c_str());
ITensor* const actInput = scaleLayer->getOutput(0);
// ACTIVATION ///////////////////////////////////////////////////////////////
ILayer* outputLayer;
if (activation == "relu")
{
outputLayer = network.addActivation(*actInput, ActivationType::kRELU);
outputLayer->setName((name + ".relu").c_str());
}
else if (activation == "tanh")
{
outputLayer = network.addActivation(*actInput, ActivationType::kTANH);
outputLayer->setName((name + ".tanh").c_str());
}
else if (activation == "none")
{
outputLayer = scaleLayer;
}
else
{
throw std::runtime_error("Unknown activation '" + activation + "'.");
}
return outputLayer;
}
/******************************************************************************
* PRIVATE METHODS ************************************************************
*****************************************************************************/
std::vector<float>& ConvBatchNormCreator::newVector(const float* const begin, const float* const end)
{
mData.emplace_back(new std::vector<float>(begin, end));
return *mData.back().get();
}
} // namespace tts
|
PyTorch/Segmentation/MaskRCNN/pytorch/maskrcnn_benchmark | maskrcnn_benchmark | __init__ | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
|
TensorFlow/LanguageModeling/BERT/triton/scripts | scripts | export_model | #!/bin/bash
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
init_checkpoint=${1:-"data/download/nvidia_pretrained/bert_tf_squad11_large_384/model.ckpt"}
batch_size=${2:-"8"}
precision=${3:-"fp16"}
use_xla=${4:-"true"}
seq_length=${5:-"384"}
doc_stride=${6:-"128"}
BERT_DIR=${7:-"data/download/nvidia_pretrained/bert_tf_pretraining_large_lamb"}
triton_model_version=${8:-1}
triton_model_name=${9:-"bert"}
triton_dyn_batching_delay=${10:-0}
triton_engine_count=${11:-1}
triton_model_overwrite=${12:-"False"}
additional_args="--triton_model_version=$triton_model_version --triton_model_name=$triton_model_name --triton_max_batch_size=$batch_size \
--triton_model_overwrite=$triton_model_overwrite --triton_dyn_batching_delay=$triton_dyn_batching_delay \
--triton_engine_count=$triton_engine_count"
if [ "$precision" = "fp16" ] ; then
echo "fp16 activated!"
additional_args="$additional_args --amp"
else
echo "fp32/tf32 activated!"
additional_args="$additional_args --noamp"
fi
if [ "$use_xla" = "true" ] ; then
echo "XLA activated"
additional_args="$additional_args --use_xla"
else
additional_args="$additional_args --nouse_xla"
fi
echo "Additional args: $additional_args"
bash scripts/docker/launch.sh \
python run_squad.py \
--vocab_file=${BERT_DIR}/vocab.txt \
--bert_config_file=${BERT_DIR}/bert_config.json \
--init_checkpoint=${init_checkpoint} \
--max_seq_length=${seq_length} \
--doc_stride=${doc_stride} \
--predict_batch_size=${batch_size} \
--output_dir=/results \
--export_triton=True \
${additional_args}
|
PyTorch/Translation/GNMT/seq2seq/models | models | gnmt | # Copyright (c) 2017 Elad Hoffer
# Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import torch.nn as nn
import seq2seq.data.config as config
from seq2seq.models.decoder import ResidualRecurrentDecoder
from seq2seq.models.encoder import ResidualRecurrentEncoder
from seq2seq.models.seq2seq_base import Seq2Seq
class GNMT(Seq2Seq):
"""
GNMT v2 model
"""
def __init__(self, vocab_size, hidden_size=1024, num_layers=4, dropout=0.2,
batch_first=False, share_embedding=True):
"""
Constructor for the GNMT v2 model.
:param vocab_size: size of vocabulary (number of tokens)
:param hidden_size: internal hidden size of the model
:param num_layers: number of layers, applies to both encoder and
decoder
:param dropout: probability of dropout (in encoder and decoder)
:param batch_first: if True the model uses (batch,seq,feature) tensors,
if false the model uses (seq, batch, feature)
:param share_embedding: if True embeddings are shared between encoder
and decoder
"""
super(GNMT, self).__init__(batch_first=batch_first)
if share_embedding:
embedder = nn.Embedding(vocab_size, hidden_size,
padding_idx=config.PAD)
nn.init.uniform_(embedder.weight.data, -0.1, 0.1)
else:
embedder = None
self.encoder = ResidualRecurrentEncoder(vocab_size, hidden_size,
num_layers, dropout,
batch_first, embedder)
self.decoder = ResidualRecurrentDecoder(vocab_size, hidden_size,
num_layers, dropout,
batch_first, embedder)
def forward(self, input_encoder, input_enc_len, input_decoder):
context = self.encode(input_encoder, input_enc_len)
context = (context, input_enc_len, None)
output, _, _ = self.decode(input_decoder, context)
return output
|
PyTorch/SpeechRecognition/Jasper/triton/model_repo_configs/fp16/jasper-ts-trace | jasper-ts-trace | config | name: "jasper-ts-trace"
platform: "pytorch_libtorch"
default_model_filename: "model.pt"
max_batch_size: 8#MAX_BATCH
input [
{
name: "input__0"
data_type: TYPE_FP16
dims: [64, -1]
}
]
output [
{
name: "output__0"
data_type: TYPE_FP16
dims: [-1, 29]
}
]
instance_group {
count: 1#NUM_ENGINES
gpus: 0
kind: KIND_GPU
}
#db#dynamic_batching {
#db# preferred_batch_size: 8#MAX_BATCH
#db# max_queue_delay_microseconds: #MAX_QUEUE
#db#}
|
Tools/PyTorch/TimeSeriesPredictionPlatform/triton | triton | run_inference_on_triton | #!/usr/bin/env python3
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""
To infer the model deployed on Triton, you can use `run_inference_on_triton.py` script.
It sends a request with data obtained from pointed data loader and dumps received data into dump files.
Those files are stored in directory pointed by `--output-dir` argument.
Currently, the client communicates with the Triton server asynchronously using GRPC protocol.
Example call:
```shell script
python ./triton/run_inference_on_triton.py \
--server-url localhost:8001 \
--model-name ResNet50 \
--model-version 1 \
--dump-labels \
--output-dir /results/dump_triton
```
"""
import argparse
import functools
import logging
import queue
import threading
import time
import traceback
from pathlib import Path
from typing import Optional
from tqdm import tqdm
# pytype: disable=import-error
try:
from tritonclient import utils as client_utils # noqa: F401
from tritonclient.grpc import InferenceServerClient, InferInput, InferRequestedOutput
except ImportError:
from tritongrpcclient import InferenceServerClient, InferInput, InferRequestedOutput
# pytype: enable=import-error
# method from PEP-366 to support relative import in executed modules
if __package__ is None:
__package__ = Path(__file__).parent.name
from .deployment_toolkit.args import ArgParserGenerator
from .deployment_toolkit.core import DATALOADER_FN_NAME, load_from_file
from .deployment_toolkit.dump import JsonDumpWriter
LOGGER = logging.getLogger("run_inference_on_triton")
class SyncGRPCTritonRunner:
DEFAULT_MAX_RESP_WAIT_S = 120
def __init__(
self,
server_url: str,
model_name: str,
model_version: str,
*,
dataloader,
verbose=False,
resp_wait_s: Optional[float] = None,
):
self._server_url = server_url
self._model_name = model_name
self._model_version = model_version
self._dataloader = dataloader
self._verbose = verbose
self._response_wait_t = self.DEFAULT_MAX_RESP_WAIT_S if resp_wait_s is None else resp_wait_s
def __iter__(self):
client = InferenceServerClient(self._server_url, verbose=self._verbose)
error = self._verify_triton_state(client)
if error:
raise RuntimeError(f"Could not communicate to Triton Server: {error}")
LOGGER.debug(
f"Triton server {self._server_url} and model {self._model_name}:{self._model_version} " f"are up and ready!"
)
model_config = client.get_model_config(self._model_name, self._model_version)
model_metadata = client.get_model_metadata(self._model_name, self._model_version)
LOGGER.info(f"Model config {model_config}")
LOGGER.info(f"Model metadata {model_metadata}")
inputs = {tm.name: tm for tm in model_metadata.inputs}
outputs = {tm.name: tm for tm in model_metadata.outputs}
output_names = list(outputs)
outputs_req = [InferRequestedOutput(name) for name in outputs]
for ids, x, y_real in self._dataloader:
infer_inputs = []
for name in inputs:
data = x[name]
infer_input = InferInput(name, data.shape, inputs[name].datatype)
target_np_dtype = client_utils.triton_to_np_dtype(inputs[name].datatype)
data = data.astype(target_np_dtype)
infer_input.set_data_from_numpy(data)
infer_inputs.append(infer_input)
results = client.infer(
model_name=self._model_name,
model_version=self._model_version,
inputs=infer_inputs,
outputs=outputs_req,
client_timeout=self._response_wait_t,
)
y_pred = {name: results.as_numpy(name) for name in output_names}
yield ids, x, y_pred, y_real
def _verify_triton_state(self, triton_client):
if not triton_client.is_server_live():
return f"Triton server {self._server_url} is not live"
elif not triton_client.is_server_ready():
return f"Triton server {self._server_url} is not ready"
elif not triton_client.is_model_ready(self._model_name, self._model_version):
return f"Model {self._model_name}:{self._model_version} is not ready"
return None
class AsyncGRPCTritonRunner:
DEFAULT_MAX_RESP_WAIT_S = 120
DEFAULT_MAX_UNRESP_REQS = 128
DEFAULT_MAX_FINISH_WAIT_S = 900 # 15min
def __init__(
self,
server_url: str,
model_name: str,
model_version: str,
*,
dataloader,
verbose=False,
resp_wait_s: Optional[float] = None,
max_unresponded_reqs: Optional[int] = None,
):
self._server_url = server_url
self._model_name = model_name
self._model_version = model_version
self._dataloader = dataloader
self._verbose = verbose
self._response_wait_t = self.DEFAULT_MAX_RESP_WAIT_S if resp_wait_s is None else resp_wait_s
self._max_unresp_reqs = self.DEFAULT_MAX_UNRESP_REQS if max_unresponded_reqs is None else max_unresponded_reqs
self._results = queue.Queue()
self._processed_all = False
self._errors = []
self._num_waiting_for = 0
self._sync = threading.Condition()
self._req_thread = threading.Thread(target=self.req_loop, daemon=True)
def __iter__(self):
self._req_thread.start()
timeout_s = 0.050 # check flags processed_all and error flags every 50ms
while True:
try:
ids, x, y_pred, y_real = self._results.get(timeout=timeout_s)
yield ids, x, y_pred, y_real
except queue.Empty:
shall_stop = self._processed_all or self._errors
if shall_stop:
break
LOGGER.debug("Waiting for request thread to stop")
self._req_thread.join()
if self._errors:
error_msg = "\n".join(map(str, self._errors))
raise RuntimeError(error_msg)
def _on_result(self, ids, x, y_real, output_names, result, error):
with self._sync:
request_id = str(ids[0])
NOT_MATCHING_REQUEST_ID_MSG = (
"Error during processing result - request_id doesn't match. This shouldn't have happened."
)
if error:
response_id = error.get_response().id
if response_id != request_id:
raise RuntimeError(NOT_MATCHING_REQUEST_ID_MSG)
self._errors.append(error)
else:
response_id = result.get_response().id
if response_id != request_id:
raise RuntimeError(NOT_MATCHING_REQUEST_ID_MSG)
y_pred = {name: result.as_numpy(name) for name in output_names}
self._results.put((ids, x, y_pred, y_real))
self._num_waiting_for -= 1
self._sync.notify_all()
def req_loop(self):
client = InferenceServerClient(self._server_url, verbose=self._verbose)
self._errors = self._verify_triton_state(client)
if self._errors:
return
LOGGER.debug(
f"Triton server {self._server_url} and model {self._model_name}:{self._model_version} " f"are up and ready!"
)
model_config = client.get_model_config(self._model_name, self._model_version)
model_metadata = client.get_model_metadata(self._model_name, self._model_version)
LOGGER.info(f"Model config {model_config}")
LOGGER.info(f"Model metadata {model_metadata}")
inputs = {tm.name: tm for tm in model_metadata.inputs}
outputs = {tm.name: tm for tm in model_metadata.outputs}
output_names = list(outputs)
self._num_waiting_for = 0
for ids, x, y_real in self._dataloader:
infer_inputs = []
for name in inputs:
data = x[name]
infer_input = InferInput(name, data.shape, inputs[name].datatype)
target_np_dtype = client_utils.triton_to_np_dtype(inputs[name].datatype)
data = data.astype(target_np_dtype)
infer_input.set_data_from_numpy(data)
infer_inputs.append(infer_input)
outputs_req = [InferRequestedOutput(name) for name in outputs]
with self._sync:
def _check_can_send():
return self._num_waiting_for < self._max_unresp_reqs
can_send = self._sync.wait_for(_check_can_send, timeout=self._response_wait_t)
if not can_send:
error_msg = f"Runner could not send new requests for {self._response_wait_t}s"
self._errors.append(error_msg)
self._sync.notify_all()
break
request_id = str(ids[0])
callback = functools.partial(AsyncGRPCTritonRunner._on_result, self, ids, x, y_real, output_names)
client.async_infer(
model_name=self._model_name,
model_version=self._model_version,
inputs=infer_inputs,
outputs=outputs_req,
callback=callback,
request_id=request_id,
)
self._num_waiting_for += 1
self._sync.notify_all()
# wait till receive all requested data
with self._sync:
def _all_processed():
LOGGER.debug(f"wait for {self._num_waiting_for} unprocessed jobs")
return self._num_waiting_for == 0
self._processed_all = self._sync.wait_for(_all_processed, self.DEFAULT_MAX_FINISH_WAIT_S)
if not self._processed_all:
error_msg = f"Runner {self._response_wait_t}s timeout received while waiting for results from server"
self._errors.append(error_msg)
self._sync.notify_all()
LOGGER.debug("Finished request thread")
def _verify_triton_state(self, triton_client):
errors = []
if not triton_client.is_server_live():
errors.append(f"Triton server {self._server_url} is not live")
elif not triton_client.is_server_ready():
errors.append(f"Triton server {self._server_url} is not ready")
elif not triton_client.is_model_ready(self._model_name, self._model_version):
errors.append(f"Model {self._model_name}:{self._model_version} is not ready")
return errors
def _parse_args():
parser = argparse.ArgumentParser(description="Infer model on Triton server", allow_abbrev=False)
parser.add_argument(
"--server-url", type=str, default="localhost:8001", help="Inference server URL (default localhost:8001)"
)
parser.add_argument("--model-name", help="The name of the model used for inference.", required=True)
parser.add_argument("--model-version", help="The version of the model used for inference.", required=True)
parser.add_argument("--dataloader", help="Path to python file containing dataloader.", required=True)
parser.add_argument("--dump-labels", help="Dump labels to output dir", action="store_true", default=False)
parser.add_argument("--dump-inputs", help="Dump inputs to output dir", action="store_true", default=False)
parser.add_argument("-v", "--verbose", help="Verbose logs", action="store_true", default=True)
parser.add_argument("--output-dir", required=True, help="Path to directory where outputs will be saved")
parser.add_argument(
"--response-wait-time", required=False, help="Maximal time to wait for response", default=120, type=float
)
parser.add_argument(
"--max-unresponded-requests",
required=False,
help="Maximal number of unresponded requests",
default=128,
type=int,
)
parser.add_argument(
"--synchronous", help="Enable synchronous calls to Triton Server", action="store_true", default=False
)
args, *_ = parser.parse_known_args()
get_dataloader_fn = load_from_file(args.dataloader, label="dataloader", target=DATALOADER_FN_NAME)
ArgParserGenerator(get_dataloader_fn).update_argparser(parser)
args = parser.parse_args()
return args
def main():
args = _parse_args()
log_format = "%(asctime)s %(levelname)s %(name)s %(message)s"
log_level = logging.INFO if not args.verbose else logging.DEBUG
logging.basicConfig(level=log_level, format=log_format)
LOGGER.info("args:")
for key, value in vars(args).items():
LOGGER.info(f" {key} = {value}")
get_dataloader_fn = load_from_file(args.dataloader, label="dataloader", target=DATALOADER_FN_NAME)
dataloader_fn = ArgParserGenerator(get_dataloader_fn).from_args(args)
try:
if args.synchronous:
runner = SyncGRPCTritonRunner(
args.server_url,
args.model_name,
args.model_version,
dataloader=dataloader_fn(),
verbose=False,
resp_wait_s=args.response_wait_time,
)
else:
runner = AsyncGRPCTritonRunner(
args.server_url,
args.model_name,
args.model_version,
dataloader=dataloader_fn(),
verbose=False,
resp_wait_s=args.response_wait_time,
max_unresponded_reqs=args.max_unresponded_requests,
)
except Exception as e:
message = traceback.format_exc()
LOGGER.error(f"Encountered exception \n{message}")
raise e
with JsonDumpWriter(output_dir=args.output_dir) as writer:
start = time.time()
for ids, x, y_pred, y_real in tqdm(runner, unit="batch", mininterval=10):
data = _verify_and_format_dump(args, ids, x, y_pred, y_real)
writer.write(**data)
stop = time.time()
LOGGER.info(f"\nThe inference took {stop - start:0.3f}s")
def _verify_and_format_dump(args, ids, x, y_pred, y_real):
data = {"outputs": y_pred, "ids": {"ids": ids}}
if args.dump_inputs:
data["inputs"] = x
if args.dump_labels:
if not y_real:
raise ValueError(
"Found empty label values. Please provide labels in dataloader_fn or do not use --dump-labels argument"
)
data["labels"] = y_real
return data
if __name__ == "__main__":
main()
|
PyTorch/SpeechRecognition/wav2vec2/common | common | dataset | # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import numpy as np
from torch.utils.data import DataLoader
from common.fairseq.data import data_utils
from common.helpers import print_once
from common.sampler import DistributedIndicesSampler
def adjust_max_tokens(train_dataset, world_size, args):
def get_steps_per_epoch(world_size, max_tokens, update_freq):
train_loader, sampler = get_batch_iterator(
train_dataset,
True,
max_tokens=max_tokens,
max_sentences=args.batch_size,
max_positions=(max_tokens, max_tokens),
ignore_invalid_inputs=True,
required_batch_size_multiple=args.required_batch_size_multiple,
seed=args.seed,
num_shards=world_size,
shard_id=0,
num_workers=args.num_workers)
steps_per_epoch = len(train_loader) // update_freq
return steps_per_epoch
steps_ref = get_steps_per_epoch(args.ref_world_size, args.ref_max_tokens, 1)
min_ = args.ref_max_tokens // 20
max_ = args.ref_max_tokens * 20
prev_max_tokens = 0
align_to = 1000
while min_ < max_:
max_tokens = (max_ + min_) // 2 // align_to * align_to # try to round
if max_tokens == prev_max_tokens:
break
prev_max_tokens = max_tokens
steps = get_steps_per_epoch(world_size, max_tokens, args.update_freq)
print_once(f"max_tokens={max_tokens} yields {steps} steps "
f"(adjusting for {steps_ref}).")
if steps == steps_ref:
break
elif steps > steps_ref:
min_ = max_tokens
else:
max_ = max_tokens
args.max_tokens = max_tokens
args.max_tokens_valid = max_tokens
def filter_indices_by_size(
indices, dataset, max_positions=None, ignore_invalid_inputs=False
):
"""
Filter examples that are too large
Args:
indices (np.array): original array of sample indices
dataset (~fairseq.data.FairseqDataset): dataset to batch
max_positions (optional): max sentence length supported by the
model (default: None).
ignore_invalid_inputs (bool, optional): don't raise Exception for
sentences that are too long (default: False).
Returns:
np.array: array of filtered sample indices
"""
indices, ignored = dataset.filter_indices_by_size(indices, max_positions)
# TODO: consider removing this function. If `len(ignored) > 0`,
# an error is raised in fairseq dataset code, both in sup and unsup case
if len(ignored) > 0:
if not ignore_invalid_inputs:
raise Exception(
(
"Size of sample #{} is invalid (={}) since max_positions={}, "
"skip this example with --skip-invalid-size-inputs-valid-test"
).format(ignored[0], dataset.size(ignored[0]), max_positions)
)
print(
(
"WARNING: {:,} samples have invalid sizes and will be skipped, "
"max_positions={}, first few sample ids={}"
).format(len(ignored), max_positions, ignored[:10])
)
return indices
def get_batch_iterator(
dataset,
training,
max_tokens=None,
max_sentences=None,
max_positions=None,
ignore_invalid_inputs=False,
required_batch_size_multiple=1,
seed=1,
num_shards=1,
shard_id=0,
num_workers=0,
num_concat_batches=1,
):
# get indices ordered by example size
with data_utils.numpy_seed(seed):
indices = dataset.ordered_indices()
# filter examples that are too large
if max_positions is not None:
indices = filter_indices_by_size(
indices, dataset, max_positions, ignore_invalid_inputs)
# create mini-batches with given size constraints
batch_inds, non_grouped_batch_inds = dataset.batch_by_size(
indices,
max_tokens=max_tokens,
max_sentences=max_sentences,
required_batch_size_multiple=required_batch_size_multiple,
num_concat_batches=num_concat_batches,
)
batch_ids = copy.deepcopy(non_grouped_batch_inds)
[bi.fill(i) for i, bi in enumerate(batch_ids)]
inds_ids = zip(np.concatenate(batch_inds), np.concatenate(batch_ids))
dataset.batch_ids = {idx: batch_idx for idx, batch_idx in inds_ids}
# Batches are already specified, now we just need to shuffle them
batch_ind_sampler = DistributedIndicesSampler(batch_inds, shuffle=training,
num_replicas=num_shards,
rank=shard_id, seed=seed,
drop_last=training,
fillvalue=[])
loader = DataLoader(
dataset=dataset,
collate_fn=dataset.collater,
batch_sampler=batch_ind_sampler,
num_workers=num_workers,
pin_memory=True,
persistent_workers=num_workers > 0,
)
return loader, batch_ind_sampler
|
PyTorch/Detection/SSD/examples | examples | SSD300_inference | # Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import numpy as np
from apex.fp16_utils import network_to_half
from dle.inference import prepare_input
from ssd.model import SSD300, ResNet
from ssd.utils import dboxes300_coco, Encoder
def load_checkpoint(model, model_file):
cp = torch.load(model_file)['model']
model.load_state_dict(cp)
def build_predictor(model_file, backbone='resnet50'):
ssd300 = SSD300(backbone=ResNet(backbone=backbone))
load_checkpoint(ssd300, model_file)
return ssd300
def prepare_model(checkpoint_path):
ssd300 = build_predictor(checkpoint_path)
ssd300 = ssd300.cuda()
ssd300 = network_to_half(ssd300)
ssd300 = ssd300.eval()
return ssd300
def prepare_tensor(inputs):
NHWC = np.array(inputs)
NCHW = np.swapaxes(np.swapaxes(NHWC, 2, 3), 1, 2)
tensor = torch.from_numpy(NCHW)
tensor = tensor.cuda()
tensor = tensor.half()
return tensor
def decode_results(predictions):
dboxes = dboxes300_coco()
encoder = Encoder(dboxes)
ploc, plabel = [val.float() for val in predictions]
results = encoder.decode_batch(ploc, plabel, criteria=0.5, max_output=20)
return [ [ pred.detach().cpu().numpy()
for pred in detections
]
for detections in results
]
def pick_best(detections, treshold):
bboxes, classes, confidences = detections
best = np.argwhere(confidences > 0.3).squeeze(axis=1)
return [pred[best] for pred in detections]
def main(checkpoint_path, imgs):
inputs = [prepare_input(uri) for uri in imgs]
tensor = prepare_tensor(inputs)
ssd300 = prepare_model(checkpoint_path)
predictions = ssd300(tensor)
results = decode_results(predictions)
best_results = [pick_best(detections, treshold=0.3) for detections in results]
return best_results
if __name__ == '__main__':
best_results = main(
checkpoint_path='/checkpoints/SSD300v1.1.pt',
imgs=[ 'http://images.cocodataset.org/val2017/000000397133.jpg',
'http://images.cocodataset.org/val2017/000000037777.jpg',
'http://images.cocodataset.org/val2017/000000252219.jpg',
]
)
print(best_results)
|
PyTorch/Translation/Transformer/examples/translation | translation | prepare-wmt14en2de | #!/bin/bash
# Adapted from https://github.com/facebookresearch/MIXER/blob/master/prepareData.sh
#
#-------------------------------------------------------------------------
#
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
echo 'Cloning Moses github repository (for tokenization scripts)...'
git clone https://github.com/moses-smt/mosesdecoder.git
echo 'Cloning Subword NMT repository (for BPE pre-processing)...'
git clone https://github.com/rsennrich/subword-nmt.git
SCRIPTS=mosesdecoder/scripts
TOKENIZER=$SCRIPTS/tokenizer/tokenizer.perl
CLEAN=$SCRIPTS/training/clean-corpus-n.perl
NORM_PUNC=$SCRIPTS/tokenizer/normalize-punctuation.perl
REM_NON_PRINT_CHAR=$SCRIPTS/tokenizer/remove-non-printing-char.perl
BPEROOT=subword-nmt
BPE_TOKENS=40000
URLS=(
"http://statmt.org/wmt13/training-parallel-europarl-v7.tgz"
"http://statmt.org/wmt13/training-parallel-commoncrawl.tgz"
"http://data.statmt.org/wmt17/translation-task/training-parallel-nc-v12.tgz"
"http://data.statmt.org/wmt17/translation-task/dev.tgz"
"http://statmt.org/wmt14/test-full.tgz"
)
FILES=(
"training-parallel-europarl-v7.tgz"
"training-parallel-commoncrawl.tgz"
"training-parallel-nc-v12.tgz"
"dev.tgz"
"test-full.tgz"
)
CORPORA=(
"training/europarl-v7.de-en"
"commoncrawl.de-en"
"training/news-commentary-v12.de-en"
)
# This will make the dataset compatible to the one used in "Convolutional Sequence to Sequence Learning"
# https://arxiv.org/abs/1705.03122
if [ "$1" == "--icml17" ]; then
URLS[2]="http://statmt.org/wmt14/training-parallel-nc-v9.tgz"
FILES[2]="training-parallel-nc-v9.tgz"
CORPORA[2]="training/news-commentary-v9.de-en"
fi
# This will make the dataset comparable to the one used in "Scaling Neural Machine Translation"
# https://arxiv.org/abs/1806.00187
if [ "$1" == "--scaling18" ]; then
BPE_TOKENS=33708
fi
if [ ! -d "$SCRIPTS" ]; then
echo "Please set SCRIPTS variable correctly to point to Moses scripts."
exit
fi
src=en
tgt=de
lang=en-de
prep=wmt14_en_de
tmp=$prep/tmp
orig=orig
dev=dev/newstest2013
mkdir -p $orig $tmp $prep
cd $orig
for ((i=0;i<${#URLS[@]};++i)); do
file=${FILES[i]}
if [ -f $file ]; then
echo "$file already exists, skipping download"
if [ ${file: -4} == ".tgz" ]; then
tar zxvf $file
elif [ ${file: -4} == ".tar" ]; then
tar xvf $file
fi
else
url=${URLS[i]}
wget "$url"
if [ -f $file ]; then
echo "$url successfully downloaded."
else
echo "$url not successfully downloaded."
exit -1
fi
if [ ${file: -4} == ".tgz" ]; then
tar zxvf $file
elif [ ${file: -4} == ".tar" ]; then
tar xvf $file
fi
fi
done
cd ..
echo "pre-processing train data..."
for l in $src $tgt; do
rm -f $tmp/train.tags.$lang.tok.$l
for f in "${CORPORA[@]}"; do
cat $orig/$f.$l | \
perl $NORM_PUNC $l | \
perl $REM_NON_PRINT_CHAR | \
perl $TOKENIZER -threads 8 -a -l $l >> $tmp/train.tags.$lang.tok.$l
done
done
echo "pre-processing test data..."
for l in $src $tgt; do
if [ "$l" == "$src" ]; then
t="src"
else
t="ref"
fi
grep '<seg id' $orig/test-full/newstest2014-deen-$t.$l.sgm | \
sed -e 's/<seg id="[0-9]*">\s*//g' | \
sed -e 's/\s*<\/seg>\s*//g' | \
sed -e "s/\’/\'/g" | \
perl $TOKENIZER -threads 8 -a -l $l > $tmp/test.$l
echo ""
done
if [ "$1" == "--scaling18" ]; then
# apply length filtering before BPE for --scaling18
perl $CLEAN $tmp/train.tags.$lang.tok $src $tgt $tmp/train 1 256
# use newstest2013 for valid
echo "pre-processing valid data..."
for l in $src $tgt; do
rm -f $tmp/valid.$l
cat $orig/$dev.$l | \
perl $NORM_PUNC $l | \
perl $REM_NON_PRINT_CHAR | tee -a $tmp/valid.raw.$l | \
perl $TOKENIZER -threads 8 -a -l $l >> $tmp/valid.$l
done
else
echo "splitting train and valid..."
for l in $src $tgt; do
awk '{if (NR%100 == 0) print $0; }' $tmp/train.tags.$lang.tok.$l > $tmp/valid.$l
awk '{if (NR%100 != 0) print $0; }' $tmp/train.tags.$lang.tok.$l > $tmp/train.$l
done
fi
TRAIN=$tmp/train.de-en
BPE_CODE=$prep/code
rm -f $TRAIN
for l in $src $tgt; do
cat $tmp/train.$l >> $TRAIN
done
echo "learn_bpe.py on ${TRAIN}..."
python $BPEROOT/learn_bpe.py -s $BPE_TOKENS < $TRAIN > $BPE_CODE
for L in $src $tgt; do
for f in train.$L valid.$L test.$L; do
echo "apply_bpe.py to ${f}..."
python $BPEROOT/apply_bpe.py -c $BPE_CODE < $tmp/$f > $tmp/bpe.$f
done
done
if [ "$1" == "--scaling18" ]; then
for L in $src $tgt; do
cp $tmp/bpe.train.$L $prep/train.$L
cp $tmp/bpe.valid.$L $prep/valid.$L
done
else
perl $CLEAN -ratio 1.5 $tmp/bpe.train $src $tgt $prep/train 1 250
perl $CLEAN -ratio 1.5 $tmp/bpe.valid $src $tgt $prep/valid 1 250
fi
for L in $src $tgt; do
cp $tmp/bpe.test.$L $prep/test.$L
done
|
TensorFlow2/Classification/ConvNets/efficientnet_v2/S/inference | inference | inference_AMP | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
python main.py \
--cfg config/efficientnet_v2/s_cfg.py \
--mode predict \
--use_amp \
--use_xla \
--predict_batch_size 128 \
--predict_img_size 384 \
--predict_ckpt xxx \
|
PyTorch/Recommendation/DLRM/dlrm/model | model | distributed | # Copyright (c) 2021 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Sequence, Optional
import torch
from torch import nn
from dlrm.nn.factories import create_interaction
from dlrm.nn.parts import DlrmBottom, DlrmTop
from dlrm.utils import distributed as dist
class BottomToTop(torch.autograd.Function):
"""Switch from model parallel to data parallel
Wrap the communication of doing from bottom model in model parallel fashion to top model in data parallel
"""
@staticmethod
def forward(
ctx,
local_bottom_outputs: torch.Tensor,
batch_sizes_per_gpu: Sequence[int],
vector_dim: int,
vectors_per_gpu: Sequence[int],
feature_order: Optional[torch.Tensor] = None,
device_feature_order: Optional[torch.Tensor] = None
):
"""
Args:
ctx : Pytorch convention
local_bottom_outputs (Tensor): Concatenated output of bottom model
batch_sizes_per_gpu (Sequence[int]):
vector_dim (int):
vectors_per_gpu (Sequence[int]): Note, bottom MLP is considered as 1 vector
device_feature_order:
feature_order:
Returns:
slice_embedding_outputs (Tensor): Patial output from bottom model to feed into data parallel top model
"""
rank = dist.get_rank()
ctx.world_size = torch.distributed.get_world_size()
ctx.batch_sizes_per_gpu = batch_sizes_per_gpu
ctx.vector_dim = vector_dim
ctx.vectors_per_gpu = vectors_per_gpu
ctx.feature_order = feature_order
ctx.device_feature_order = device_feature_order
# Buffer shouldn't need to be zero out. If not zero out buffer affecting accuracy, there must be a bug.
bottom_output_buffer = [torch.empty(
batch_sizes_per_gpu[rank], n * vector_dim,
device=local_bottom_outputs.device, dtype=local_bottom_outputs.dtype) for n in vectors_per_gpu]
torch.distributed.all_to_all(bottom_output_buffer, list(local_bottom_outputs.split(batch_sizes_per_gpu, dim=0)))
slice_bottom_outputs = torch.cat(bottom_output_buffer, dim=1).view(batch_sizes_per_gpu[rank], -1, vector_dim)
# feature reordering is just for consistency across different device mapping configurations
if feature_order is not None and device_feature_order is not None:
return slice_bottom_outputs[:, feature_order, :]
return slice_bottom_outputs
@staticmethod
def backward(ctx, grad_slice_bottom_outputs):
rank = dist.get_rank()
if ctx.feature_order is not None and ctx.device_feature_order is not None:
grad_slice_bottom_outputs = grad_slice_bottom_outputs[:, ctx.device_feature_order, :]
grad_local_bottom_outputs = torch.empty(
sum(ctx.batch_sizes_per_gpu), ctx.vectors_per_gpu[rank] * ctx.vector_dim,
device=grad_slice_bottom_outputs.device,
dtype=grad_slice_bottom_outputs.dtype)
# All to all only takes list while split() returns tuple
grad_local_bottom_outputs_split = list(grad_local_bottom_outputs.split(ctx.batch_sizes_per_gpu, dim=0))
split_grads = [t.contiguous() for t in (grad_slice_bottom_outputs.view(ctx.batch_sizes_per_gpu[rank], -1).split(
[ctx.vector_dim * n for n in ctx.vectors_per_gpu], dim=1))]
torch.distributed.all_to_all(grad_local_bottom_outputs_split, split_grads)
return (grad_local_bottom_outputs.view(grad_local_bottom_outputs.shape[0], -1, ctx.vector_dim), None, None,
None, None, None)
bottom_to_top = BottomToTop.apply
class DistributedDlrm(nn.Module):
def __init__(
self,
num_numerical_features: int,
categorical_feature_sizes: Sequence[int],
bottom_mlp_sizes: Sequence[int],
top_mlp_sizes: Sequence[int],
vectors_per_gpu: Sequence[int] = None,
embedding_device_mapping: Sequence[Sequence[int]] = None,
world_num_categorical_features: int = None,
embedding_type: str = "multi_table",
embedding_dim: int = 128,
interaction_op: str = "dot",
hash_indices: bool = False,
use_cpp_mlp: bool = False,
fp16: bool = False,
bottom_features_ordered: bool = False,
device: str = "cuda"
):
super().__init__()
self.distributed = dist.get_world_size() > 1
self._vectors_per_gpu = vectors_per_gpu
self._embedding_dim = embedding_dim
self._interaction_op = interaction_op
self._hash_indices = hash_indices
if self.distributed:
# TODO: take bottom_mlp GPU from device mapping, do not assume it's always first
self._device_feature_order = torch.tensor(
[-1] + [i for bucket in embedding_device_mapping for i in bucket], dtype=torch.long, device=device
) + 1 if bottom_features_ordered else None
self._feature_order = self._device_feature_order.argsort() if bottom_features_ordered else None
else:
world_num_categorical_features = len(categorical_feature_sizes)
interaction = create_interaction(interaction_op, world_num_categorical_features, embedding_dim)
self.bottom_model = DlrmBottom(
num_numerical_features, categorical_feature_sizes, bottom_mlp_sizes,
embedding_type, embedding_dim, hash_indices=hash_indices, use_cpp_mlp=use_cpp_mlp,
fp16=fp16, device=device
)
self.top_model = DlrmTop(top_mlp_sizes, interaction, use_cpp_mlp=use_cpp_mlp).to(device)
def extra_repr(self):
return f"interaction_op={self._interaction_op}, hash_indices={self._hash_indices}"
# pylint:enable=missing-docstring
@classmethod
def from_dict(cls, obj_dict, **kwargs):
"""Create from json str"""
return cls(**obj_dict, **kwargs)
def forward(self, numerical_input, categorical_inputs, batch_sizes_per_gpu: Sequence[int] = None):
"""
Args:
numerical_input (Tensor): with shape [batch_size, num_numerical_features]
categorical_inputs (Tensor): with shape [batch_size, num_categorical_features]
batch_sizes_per_gpu (Sequence[int]):
"""
# bottom mlp output may be not present before all to all communication
from_bottom, bottom_mlp_output = self.bottom_model(numerical_input, categorical_inputs)
# only perform all_to_all in multiGPU mode
if self.distributed:
from_bottom = bottom_to_top(from_bottom, batch_sizes_per_gpu, self._embedding_dim, self._vectors_per_gpu,
self._feature_order, self._device_feature_order)
# TODO: take bottom_mlp GPU from device mapping, do not assume it's always first
bottom_mlp_output = from_bottom[:, 0, :]
return self.top_model(from_bottom, bottom_mlp_output)
|
PyTorch/SpeechSynthesis/Tacotron2/trtis_cpp/src/trt/util | util | componentTiming | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "componentTiming.h"
#include <stdexcept>
namespace tts
{
/******************************************************************************
* CONSTRUCTORS / DESTRUCTOR **************************************************
*****************************************************************************/
ComponentTiming::ComponentTiming(const std::string& name, const double duration)
: mName(name)
, mDuration(duration)
, mSubTimings()
{
// do nothing
}
/******************************************************************************
* PUBLIC METHODS *************************************************************
*****************************************************************************/
void ComponentTiming::addSubTiming(const ComponentTiming& timing)
{
mSubTimings.emplace_back(timing);
}
void ComponentTiming::addSubTiming(const std::string& name, const double duration)
{
mSubTimings.emplace_back(name, duration);
}
void ComponentTiming::print(std::ostream& stream, const int numRuns) const
{
output(0, stream, numRuns);
}
std::string ComponentTiming::getName() const
{
return mName;
}
double ComponentTiming::getDuration() const
{
return mDuration;
}
ComponentTiming ComponentTiming::getSubTiming(const std::string& name) const
{
for (const ComponentTiming& timing : mSubTimings)
{
if (timing.getName() == name)
{
return timing;
}
}
throw std::runtime_error("Unable to find timing named '" + name + "'.");
}
/******************************************************************************
* PRIVATE METHODS ************************************************************
*****************************************************************************/
void ComponentTiming::output(const int level, std::ostream& stream, const int numRuns, const double parentTime) const
{
for (int i = 0; i < level; ++i)
{
stream << " ";
}
if (numRuns == 0)
{
throw std::runtime_error("Cannot compute average time of 0 runs.");
}
stream << mName << ": " << (mDuration / numRuns) << " s";
if (level > 0 && parentTime > 0.0)
{
stream << " (% " << 100.0 * (mDuration / parentTime) << ")";
}
stream << std::endl;
for (const ComponentTiming& t : mSubTimings)
{
if (t.getDuration() > 0)
{
t.output(level + 1, stream, numRuns, mDuration);
}
}
}
} // namespace tts
|
Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/generator/tabular | tabular | kde_generator | # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tqdm import tqdm
from typing import Union, List, Optional
import pickle
import cupy as cp
import numpy as np
import pandas as pd
from sklearn.neighbors import KernelDensity
from cuml.neighbors import KernelDensity as KernelDensityGPU
from sklearn.preprocessing import OrdinalEncoder
from syngen.generator.tabular.chunked_tabular_generator import ChunkedBaseTabularGenerator
import warnings
warnings.simplefilter(action='ignore', category=pd.errors.PerformanceWarning)
class KDEGenerator(ChunkedBaseTabularGenerator):
def __init__(self, **kwargs):
"""
A tabular generator based on kernel density estimation.
Categorical and continuous columns are modeled
using gaussian KDE
"""
super().__init__(**kwargs)
def ordinal_encoder(self, cat_col):
encoder = OrdinalEncoder()
encoder.fit(cat_col)
return encoder
def fit(
self,
data: pd.DataFrame,
categorical_columns: list = (),
samples: Union[float, int] = 0.1,
columns: Optional[List[str]] = None,
verbose: bool = False,
):
if samples > 0:
num_samples = len(data)
if 0.0 <= samples <= 1.0:
num_samples = samples * num_samples
else:
num_samples = samples
num_samples = min(int(num_samples), 10_000_000)
data = data.sample(n=num_samples)
self.column_order = columns or list(data.columns)
self.cat_fit = {}
self.categorical_columns = set(categorical_columns)
self.continuous_columns = set(self.column_order) - self.categorical_columns
# - kde distribution
cat_cols = tqdm(self.categorical_columns) if verbose else self.categorical_columns
for column in cat_cols:
col_data = data[column].dropna().values.reshape(-1, 1)
enc = self.ordinal_encoder(col_data)
col_data = enc.transform(col_data).reshape(-1, 1)
kde = KernelDensity(kernel="gaussian")
kde = kde.fit(col_data)
self.cat_fit[column] = {
"encoder": enc,
"n_categories": len(enc.categories_[0]),
"sampler": kde,
'dtype': data[column].dtype,
}
self.cont_fit = {}
# - gaussian distribution
cont_cols = tqdm(self.continuous_columns) if verbose else self.continuous_columns
for column in cont_cols:
col_data = data[column].values.reshape(-1, 1)
kde = KernelDensity(kernel="gaussian")
kde = kde.fit(col_data)
self.cont_fit[column] = {
"sampler": kde,
'dtype': data[column].dtype,
}
self.fits = {**self.cat_fit, **self.cont_fit}
def sample(self, n, gpu=False, memmap_kwargs=None, start_idx=0, end_idx=None, **kwargs):
use_memmap = memmap_kwargs is not None
if use_memmap:
memmap_outfile = np.load(memmap_kwargs['filename'], mmap_mode='r+')
df = pd.DataFrame()
if gpu:
for column_id, column in enumerate(self.column_order):
sampler = self.fits[column]["sampler"]
gpu_sampler = KernelDensityGPU(kernel="gaussian")
gpu_sampler.fit(np.asarray(sampler.tree_.data))
if "encoder" in self.fits[column]:
# - must be categorical
encoder = self.fits[column]["encoder"]
n_categories = self.fits[column]["n_categories"]
sampled_data = gpu_sampler.sample(n)
sampled_data = cp.abs(sampled_data.reshape(-1, 1))
sampled_data = cp.round(sampled_data)
sampled_data = cp.clip(sampled_data, 0, n_categories - 1)
sampled_data = cp.asnumpy(sampled_data)
sampled_data = encoder.inverse_transform(sampled_data).reshape(-1)
else:
sampled_data = gpu_sampler.sample(n)
sampled_data = cp.asnumpy(sampled_data.reshape(-1))
sampled_data = sampled_data.astype(self.fits[column]["dtype"])
if use_memmap:
memmap_outfile[start_idx:end_idx, column_id] = sampled_data
else:
df[column] = sampled_data
else:
for column_id, column in enumerate(self.column_order):
sampler = self.fits[column]["sampler"]
if "encoder" in self.fits[column]:
# - must be categorical
encoder = self.fits[column]["encoder"]
n_categories = self.fits[column]["n_categories"]
sampled_data = sampler.sample(n)
sampled_data = np.abs(sampled_data.reshape(-1, 1))
sampled_data = np.round(sampled_data)
sampled_data = np.clip(sampled_data, 0, n_categories - 1)
sampled_data = encoder.inverse_transform(sampled_data).reshape(-1)
else:
sampled_data = sampler.sample(n).reshape(-1)
sampled_data = sampled_data.astype(self.fits[column]["dtype"])
if use_memmap:
memmap_outfile[start_idx:end_idx, column_id] = sampled_data
else:
df[column] = sampled_data
if use_memmap:
return None
return df
def save(self, path):
with open(path, 'wb') as file_handler:
pickle.dump(self, file_handler, protocol=pickle.HIGHEST_PROTOCOL)
@classmethod
def load(cls, path):
with open(path, 'rb') as file_handler:
model = pickle.load(file_handler)
return model
|
TensorFlow/Detection/SSD/models/research/object_detection/samples/configs | configs | faster_rcnn_resnet101_ava_v2.1 | # Faster R-CNN with Resnet-101 (v1), configuration for AVA v2.1.
# Users should configure the fine_tune_checkpoint field in the train config as
# well as the label_map_path and input_path fields in the train_input_reader and
# eval_input_reader. Search for "PATH_TO_BE_CONFIGURED" to find the fields that
# should be configured.
model {
faster_rcnn {
num_classes: 80
image_resizer {
keep_aspect_ratio_resizer {
min_dimension: 600
max_dimension: 1024
}
}
feature_extractor {
type: 'faster_rcnn_resnet101'
first_stage_features_stride: 16
}
first_stage_anchor_generator {
grid_anchor_generator {
scales: [0.25, 0.5, 1.0, 2.0]
aspect_ratios: [0.5, 1.0, 2.0]
height_stride: 16
width_stride: 16
}
}
first_stage_box_predictor_conv_hyperparams {
op: CONV
regularizer {
l2_regularizer {
weight: 0.0
}
}
initializer {
truncated_normal_initializer {
stddev: 0.01
}
}
}
first_stage_nms_score_threshold: 0.0
first_stage_nms_iou_threshold: 0.7
first_stage_max_proposals: 300
first_stage_localization_loss_weight: 2.0
first_stage_objectness_loss_weight: 1.0
initial_crop_size: 14
maxpool_kernel_size: 2
maxpool_stride: 2
second_stage_box_predictor {
mask_rcnn_box_predictor {
use_dropout: false
dropout_keep_probability: 1.0
fc_hyperparams {
op: FC
regularizer {
l2_regularizer {
weight: 0.0
}
}
initializer {
variance_scaling_initializer {
factor: 1.0
uniform: true
mode: FAN_AVG
}
}
}
}
}
second_stage_post_processing {
batch_non_max_suppression {
score_threshold: 0.0
iou_threshold: 0.6
max_detections_per_class: 100
max_total_detections: 300
}
score_converter: SIGMOID
}
second_stage_localization_loss_weight: 2.0
second_stage_classification_loss_weight: 1.0
second_stage_classification_loss {
weighted_sigmoid {
anchorwise_output: true
}
}
}
}
train_config: {
batch_size: 1
num_steps: 1500000
optimizer {
momentum_optimizer: {
learning_rate: {
manual_step_learning_rate {
initial_learning_rate: 0.0003
schedule {
step: 1200000
learning_rate: .00003
}
}
}
momentum_optimizer_value: 0.9
}
use_moving_average: false
}
gradient_clipping_by_norm: 10.0
merge_multiple_label_boxes: true
fine_tune_checkpoint: "PATH_TO_BE_CONFIGURED/model.ckpt"
data_augmentation_options {
random_horizontal_flip {
}
}
max_number_of_boxes: 100
}
train_input_reader: {
tf_record_input_reader {
input_path: "PATH_TO_BE_CONFIGURED/ava_train.record"
}
label_map_path: "PATH_TO_BE_CONFIGURED/ava_label_map_v2.1.pbtxt"
}
eval_config: {
metrics_set: "pascal_voc_detection_metrics"
use_moving_averages: false
num_examples: 57371
}
eval_input_reader: {
tf_record_input_reader {
input_path: "PATH_TO_BE_CONFIGURED/ava_val.record"
}
label_map_path: "PATH_TO_BE_CONFIGURED/ava_label_map_v2.1.pbtxt"
shuffle: false
num_readers: 1
}
|
Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt/triton/runner | runner | exporter | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pathlib
# method from PEP-366 to support relative import in executed modules
if __name__ == "__main__" and __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from .core import Command
from .exceptions import RunnerException
from .stages import Stage
class CommandsExporter:
"""
Command exported to BASH scripts
"""
def __init__(self, scripts_dir: pathlib.Path):
"""
Args:
scripts_dir: Paths where scripts should be stored
"""
self._scripts_dir = scripts_dir
def export(self, stage: Stage) -> Command:
"""
Export stage commands to script and return new command to execute
Args:
stage: Stage object with commands
Returns:
Command object with script execution command
"""
filename = self._get_filename(stage.label)
file_path = self._scripts_dir / filename
with open(file_path, "w+") as stagefile:
stagefile.write("set -x\n")
stagefile.write("set -e\n")
stagefile.write("export PYTHONUNBUFFERED=1\n")
stagefile.write("export PYTHONPATH=`pwd`\n")
for command in stage.commands:
stagefile.write(str(command))
result = os.system(f'ex +"set syn=sh" +"norm gg=G" -cwq {file_path}')
if result != 0:
raise RunnerException(f"Failed running {filename} script formatting. Exit code {result}")
command = Command(f"bash -xe {file_path.as_posix()}")
return command
def _get_filename(self, label: str):
"""
Generate filename for script based on label
Args:
label: String with stage label
Returns:
String with script filename
"""
filename = label.replace(" ", "_").lower()
filename = f"{filename}.sh"
return filename
|
PyTorch/SpeechRecognition/Jasper/scripts/docker | docker | build | #!/bin/bash
docker build . --rm -t jasper |
TensorFlow2/LanguageModeling/ELECTRA/scripts | scripts | benchmark_pretraining | #!/usr/bin/env bash
# Copyright (c) 2020 NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
train_batch_size_p1=${1:-"176"}
learning_rate_p1="6e-7"
precision=${2:-"amp"}
xla=${3:-"xla"}
num_gpus=${4:-8}
warmup_steps_p1="10"
train_steps_p1=10
save_checkpoint_steps=500
resume_training="false"
optimizer="lamb"
accumulate_gradients=${5:-"true"}
gradient_accumulation_steps_p1=${6:-48}
seed=42
job_name="electra_lamb_pretraining_benchmark"
train_batch_size_p2=${7:-24}
learning_rate_p2="4e-7"
warmup_steps_p2="10"
train_steps_p2=10
gradient_accumulation_steps_p2=${8:-144}
electra_model=${9:-"base"}
restore_checkpoint=false bash scripts/run_pretraining.sh $train_batch_size_p1 $learning_rate_p1 $precision $num_gpus $xla \
$warmup_steps_p1 $train_steps_p1 $save_checkpoint_steps \
$resume_training $optimizer $accumulate_gradients \
$gradient_accumulation_steps_p1 $seed $job_name \
$train_batch_size_p2 $learning_rate_p2 \
$warmup_steps_p2 $train_steps_p2 $gradient_accumulation_steps_p2 \
$electra_model
|
PaddlePaddle/Classification/RN50v1.5/scripts/inference | inference | export_resnet50_TF32 | # Copyright (c) 2022 NVIDIA Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
CKPT=${1:-"./output/ResNet50/89"}
MODEL_PREFIX=${2:-"resnet_50_paddle"}
python -m paddle.distributed.launch --gpus=0 export_model.py \
--trt-inference-dir ./inference_tf32 \
--from-checkpoint $CKPT \
--model-prefix ${MODEL_PREFIX}
|
PyTorch/Segmentation/MaskRCNN/pytorch/configs/gn_baselines | gn_baselines | scratch_e2e_mask_rcnn_R_50_FPN_Xconv1fc_3x_gn | INPUT:
MIN_SIZE_TRAIN: 800
MAX_SIZE_TRAIN: 1333
MIN_SIZE_TEST: 800
MAX_SIZE_TEST: 1333
MODEL:
META_ARCHITECTURE: "GeneralizedRCNN"
WEIGHT: "" # no pretrained model
BACKBONE:
CONV_BODY: "R-50-FPN"
OUT_CHANNELS: 256
FREEZE_CONV_BODY_AT: 0 # finetune all layers
RESNETS: # use GN for backbone
TRANS_FUNC: "BottleneckWithGN"
STEM_FUNC: "StemWithGN"
FPN:
USE_GN: True # use GN for FPN
RPN:
USE_FPN: True
ANCHOR_STRIDE: (4, 8, 16, 32, 64)
PRE_NMS_TOP_N_TRAIN: 2000
PRE_NMS_TOP_N_TEST: 1000
POST_NMS_TOP_N_TEST: 1000
FPN_POST_NMS_TOP_N_TEST: 1000
ROI_HEADS:
USE_FPN: True
BATCH_SIZE_PER_IMAGE: 512
POSITIVE_FRACTION: 0.25
ROI_BOX_HEAD:
USE_GN: True # use GN for bbox head
POOLER_RESOLUTION: 7
POOLER_SCALES: (0.25, 0.125, 0.0625, 0.03125)
POOLER_SAMPLING_RATIO: 2
CONV_HEAD_DIM: 256
NUM_STACKED_CONVS: 4
FEATURE_EXTRACTOR: "FPNXconv1fcFeatureExtractor"
PREDICTOR: "FPNPredictor"
ROI_MASK_HEAD:
USE_GN: True # use GN for mask head
POOLER_SCALES: (0.25, 0.125, 0.0625, 0.03125)
CONV_LAYERS: (256, 256, 256, 256)
FEATURE_EXTRACTOR: "MaskRCNNFPNFeatureExtractor"
PREDICTOR: "MaskRCNNC4Predictor"
POOLER_RESOLUTION: 14
POOLER_SAMPLING_RATIO: 2
RESOLUTION: 28
SHARE_BOX_FEATURE_EXTRACTOR: False
MASK_ON: True
DATASETS:
TRAIN: ("coco_2014_train", "coco_2014_valminusminival")
TEST: ("coco_2014_minival",)
DATALOADER:
SIZE_DIVISIBILITY: 32
SOLVER:
# Assume 8 gpus
BASE_LR: 0.02
WEIGHT_DECAY: 0.0001
STEPS: (210000, 250000)
MAX_ITER: 270000
IMS_PER_BATCH: 16
TEST:
IMS_PER_BATCH: 8 |
PyTorch/SpeechRecognition/Jasper/triton/scripts | scripts | generate_perf_results | #!/bin/bash
#### input arguments
RESULT_DIR=${RESULT_DIR} # used by perf_client to store results
NVIDIA_VISIBLE_DEVICES=${NVIDIA_VISIBLE_DEVICES:-"0"}
SERVER_HOSTNAME=${SERVER_HOSTNAME:-localhost}
AUDIO_LENGTH=${AUDIO_LENGTH:-80000}
BATCH_SIZE=${BATCH_SIZE:-16}
####
set -e
SCRIPT_DIR=$(cd $(dirname $0); pwd)
TRITON_DIR=${SCRIPT_DIR}/..
PROJECT_DIR=${TRITON_DIR}/..
GPU_TESTS=${GPU_TESTS:-"tensorrt ts-trace onnx"}
ENGINE_COUNT_TESTS=${ENGINE_COUNT_TESTS:-"1"}
# Export the set variables in case they used the default
export NVIDIA_VISIBLE_DEVICES
export SERVER_HOSTNAME
export AUDIO_LENGTH
export MAX_LATENCY=2000 # Set max latency high to prevent errors
TRITON=${TRITON:-jasper-triton-server}
MAX_QUEUE_DELAYS=${MAX_QUEUE_DELAYS:-"10 5 2"} #ms
# Ensure that the server is closed when the script exits
function cleanup_server {
current_time=$(date "+%Y.%m.%d-%H.%M.%S")
logfile="/tmp/${TRITON}-${current_time}.log"
echo "Shutting down ${TRITON} container, log is in ${logfile}"
docker logs ${TRITON} > ${logfile} 2>&1
docker stop ${TRITON} > /dev/null 2>&1
}
trap cleanup_server EXIT
trap "exit" INT
function wait_for_triton {
TIMEOUT=${1:-60}
timeout ${TIMEOUT} ${SCRIPT_DIR}/wait_for_triton_server.sh || (echo '\nServer timeout!!!\n' && exit 1)
}
function modify_ensemble {
PLAT=$1
REPO=${TRITON_DIR}/deploy/model_repo
INPLACE="--in_place"
CONF=${REPO}/jasper-${PLAT}/config.pbtxt
CONF_E=${REPO}/jasper-${PLAT}-ensemble/config.pbtxt
echo "Modifying ${CONF} : batch size ${BATCH_SIZE} engines=${NUM_ENGINES} ..."
cleanup_server || true
sed -i -e "s/1#NUM_ENGINES/${NUM_ENGINES}/g" -e "s/8#MAX_BATCH/${BATCH_SIZE}/g" ${CONF}
if [ "$MAX_QUEUE" != "" ] ; then
sed -i -e "s/#db#//g" -e "s/#MAX_QUEUE/${MAX_QUEUE}/g" ${CONF}
fi
echo "Modifying ${CONF_E} for size $2, batch size ${BATCH_SIZE} ${TRITON_DYN_BATCH_ARGS}.."
sed -i -e "s/-1#AUDIO_LENGTH/${AUDIO_LENGTH}/g" -e "s/8#MAX_BATCH/${BATCH_SIZE}/g" ${CONF_E}
${SCRIPT_DIR}/run_server.sh
wait_for_triton
echo "done."
}
echo "GPU tests: ${GPU_TESTS}"
echo "PRECISION: ${PRECISION}"
for plat in ${GPU_TESTS}; do
if [ "$plat" == "none" ]; then
continue
else
export MAX_LATENCY=2000
export MEASUREMENT_WINDOW=3000
fi
export BASE_SAVE_NAME="${plat}_${PRECISION}_${AUDIO_LENGTH}_BS${BATCH_SIZE}"
export MODEL_NAME=jasper-${plat}-ensemble
MODELS="jasper-${plat} jasper-${plat}-ensemble" PRECISION=${PRECISION} ${SCRIPT_DIR}/prepare_model_repository.sh
############## Engine Count Comparison (static batcing) ##############
for num_engines in ${ENGINE_COUNT_TESTS}; do
SAVE_RESULTS_DIR="${BASE_SAVE_NAME}/static/${num_engines}_engines"
NUM_ENGINES=${num_engines} BATCH_SIZE=${BATCH_SIZE} modify_ensemble ${plat} ${AUDIO_LENGTH}
echo "Running engines comparison, ${num_engines} engines..."
MAX_CONCURRENCY=8 BATCH_SIZE=${BATCH_SIZE} ${SCRIPT_DIR}/run_perf_client.sh ${SAVE_RESULTS_DIR} || echo '\nPerf Client Failure!!!\n'
done
############## Dynamic Batching Comparison ##############
for delay in ${MAX_QUEUE_DELAYS}; do
echo "Running dynamic batching comparison, models=${MODELS}, delay ${delay}..."
TRITON_DYN_BATCHING_DELAY=$((delay * 1000))
SAVE_RESULTS_DIR="${BASE_SAVE_NAME}/batching/${TRITON_DYN_BATCHING_DELAY}"
NUM_ENGINES=1 MAX_QUEUE=${TRITON_DYN_BATCHING_DELAY} BATCH_SIZE=${BATCH_SIZE} modify_ensemble ${plat} ${AUDIO_LENGTH}
BATCH_SIZE=1 MAX_CONCURRENCY=$((BATCH_SIZE*2)) ${SCRIPT_DIR}/run_perf_client.sh ${SAVE_RESULTS_DIR} || echo '\nPerf Client Failure!!!\n'
done
done
echo "Complete!"
|
TensorFlow2/LanguageModeling/ELECTRA/scripts/configs | configs | pretrain_config | #!/usr/bin/env bash
# Copyright (c) 2020 NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Full pretraining configs for NVIDIA DGX-A100 (8x NVIDIA A100 40GB GPU)
dgxa100_8gpu_amp ()
{
train_batch_size_p1="176"
learning_rate_p1="6e-3"
precision="amp"
xla="xla"
num_gpus=8
warmup_steps_p1="2000"
train_steps_p1=10000
save_checkpoint_steps=500
resume_training="false"
optimizer="lamb"
accumulate_gradients="true"
gradient_accumulation_steps_p1=48
seed=42
job_name="electra_lamb_pretraining"
train_batch_size_p2=24
learning_rate_p2="4e-3"
warmup_steps_p2="200"
train_steps_p2=933
gradient_accumulation_steps_p2=144
electra_model="base"
echo $train_batch_size_p1 $learning_rate_p1 $precision $num_gpus $xla \
$warmup_steps_p1 $train_steps_p1 $save_checkpoint_steps \
$resume_training $optimizer $accumulate_gradients \
$gradient_accumulation_steps_p1 $seed $job_name \
$train_batch_size_p2 $learning_rate_p2 \
$warmup_steps_p2 $train_steps_p2 $gradient_accumulation_steps_p2 \
$electra_model
}
dgxa100_8gpu_tf32 ()
{
train_batch_size_p1="88"
learning_rate_p1="6e-3"
precision="tf32"
xla="xla"
num_gpus=8
warmup_steps_p1="2000"
train_steps_p1=10000
save_checkpoint_steps=500
resume_training="false"
optimizer="lamb"
accumulate_gradients="true"
gradient_accumulation_steps_p1=96
seed=42
job_name="electra_lamb_pretraining"
train_batch_size_p2=12
learning_rate_p2="4e-3"
warmup_steps_p2="200"
train_steps_p2=933
gradient_accumulation_steps_p2=288
electra_model="base"
echo $train_batch_size_p1 $learning_rate_p1 $precision $num_gpus $xla \
$warmup_steps_p1 $train_steps_p1 $save_checkpoint_steps \
$resume_training $optimizer $accumulate_gradients \
$gradient_accumulation_steps_p1 $seed $job_name \
$train_batch_size_p2 $learning_rate_p2 \
$warmup_steps_p2 $train_steps_p2 $gradient_accumulation_steps_p2 \
$electra_model
}
# Full pretraining configs for NVIDIA DGX-2H (16x NVIDIA V100 32GB GPU)
dgx2_16gpu_amp ()
{
train_batch_size_p1="176"
learning_rate_p1="6e-3"
precision="amp"
xla="xla"
num_gpus=16
warmup_steps_p1="2000"
train_steps_p1=10000
save_checkpoint_steps=500
resume_training="false"
optimizer="lamb"
accumulate_gradients="true"
gradient_accumulation_steps_p1=24
seed=42
job_name="electra_lamb_pretraining"
train_batch_size_p2=24
learning_rate_p2="4e-3"
warmup_steps_p2="200"
train_steps_p2=933
gradient_accumulation_steps_p2=72
electra_model="base"
echo $train_batch_size_p1 $learning_rate_p1 $precision $num_gpus $xla \
$warmup_steps_p1 $train_steps_p1 $save_checkpoint_steps \
$resume_training $optimizer $accumulate_gradients \
$gradient_accumulation_steps_p1 $seed $job_name \
$train_batch_size_p2 $learning_rate_p2 \
$warmup_steps_p2 $train_steps_p2 $gradient_accumulation_steps_p2 \
$electra_model
}
dgx2_16gpu_fp32 ()
{
train_batch_size_p1="88"
learning_rate_p1="6e-3"
precision="fp32"
xla="xla"
num_gpus=16
warmup_steps_p1="2000"
train_steps_p1=10000
save_checkpoint_steps=500
resume_training="false"
optimizer="lamb"
accumulate_gradients="true"
gradient_accumulation_steps_p1=48
seed=42
job_name="electra_lamb_pretraining"
train_batch_size_p2=12
learning_rate_p2="4e-3"
warmup_steps_p2="200"
train_steps_p2=933
gradient_accumulation_steps_p2=144
electra_model="base"
echo $train_batch_size_p1 $learning_rate_p1 $precision $num_gpus $xla \
$warmup_steps_p1 $train_steps_p1 $save_checkpoint_steps \
$resume_training $optimizer $accumulate_gradients \
$gradient_accumulation_steps_p1 $seed $job_name \
$train_batch_size_p2 $learning_rate_p2 \
$warmup_steps_p2 $train_steps_p2 $gradient_accumulation_steps_p2 \
$electra_model
}
# Full pretraining configs for NVIDIA DGX-1 (8x NVIDIA V100 16GB GPU)
dgx1_8gpu_amp ()
{
train_batch_size_p1="88"
learning_rate_p1="6e-3"
precision="amp"
xla="xla"
num_gpus=8
warmup_steps_p1="2000"
train_steps_p1=10000
save_checkpoint_steps=500
resume_training="false"
optimizer="lamb"
accumulate_gradients="true"
gradient_accumulation_steps_p1=96
seed=42
job_name="electra_lamb_pretraining"
train_batch_size_p2=12
learning_rate_p2="4e-3"
warmup_steps_p2="200"
train_steps_p2=933
gradient_accumulation_steps_p2=288
electra_model="base"
echo $train_batch_size_p1 $learning_rate_p1 $precision $num_gpus $xla \
$warmup_steps_p1 $train_steps_p1 $save_checkpoint_steps \
$resume_training $optimizer $accumulate_gradients \
$gradient_accumulation_steps_p1 $seed $job_name \
$train_batch_size_p2 $learning_rate_p2 \
$warmup_steps_p2 $train_steps_p2 $gradient_accumulation_steps_p2 \
$electra_model
}
dgx1_8gpu_fp32 ()
{
train_batch_size_p1="40"
learning_rate_p1="6e-3"
precision="fp32"
xla="xla"
num_gpus=8
warmup_steps_p1="2000"
train_steps_p1=10000
save_checkpoint_steps=500
resume_training="false"
optimizer="lamb"
accumulate_gradients="true"
gradient_accumulation_steps_p1=211
seed=42
job_name="electra_lamb_pretraining"
train_batch_size_p2=6
learning_rate_p2="4e-3"
warmup_steps_p2="200"
train_steps_p2=933
gradient_accumulation_steps_p2=576
electra_model="base"
echo $train_batch_size_p1 $learning_rate_1 $precision $num_gpus $xla \
$warmup_steps_p1 $train_steps_p1 $save_checkpoint_steps \
$resume_training $optimizer $accumulate_gradients \
$gradient_accumulation_steps_p1 $seed $job_name \
$train_batch_size_p2 $learning_rate_p2 \
$warmup_steps_p2 $train_steps_p2 $gradient_accumulation_steps_p2 \
$electra_model
}
# Full pretraining configs for NVIDIA DGX-A100 (1x NVIDIA A100 40GB GPU)
dgxa100_1gpu_amp ()
{
train_batch_size_p1="176"
learning_rate_p1="6e-3"
precision="amp"
xla="xla"
num_gpus=1
warmup_steps_p1="2000"
train_steps_p1=10000
save_checkpoint_steps=500
resume_training="false"
optimizer="lamb"
accumulate_gradients="true"
gradient_accumulation_steps_p1=384
seed=42
job_name="electra_lamb_pretraining"
train_batch_size_p2=24
learning_rate_p2="4e-3"
warmup_steps_p2="200"
train_steps_p2=933
gradient_accumulation_steps_p2=1152
electra_model="base"
echo $train_batch_size_p1 $learning_rate_p1 $precision $num_gpus $xla \
$warmup_steps_p1 $train_steps_p1 $save_checkpoint_steps \
$resume_training $optimizer $accumulate_gradients \
$gradient_accumulation_steps_p1 $seed $job_name \
$train_batch_size_p2 $learning_rate_p2 \
$warmup_steps_p2 $train_steps_p2 $gradient_accumulation_steps_p2 \
$electra_model
}
dgxa100_1gpu_tf32 ()
{
train_batch_size_p1="88"
learning_rate_p1="6e-3"
precision="tf32"
xla="xla"
num_gpus=1
warmup_steps_p1="2000"
train_steps_p1=10000
save_checkpoint_steps=500
resume_training="false"
optimizer="lamb"
accumulate_gradients="true"
gradient_accumulation_steps_p1=768
seed=42
job_name="electra_lamb_pretraining"
train_batch_size_p2=12
learning_rate_p2="4e-3"
warmup_steps_p2="200"
train_steps_p2=933
gradient_accumulation_steps_p2=2304
electra_model="base"
echo $train_batch_size_p1 $learning_rate_p1 $precision $num_gpus $xla \
$warmup_steps_p1 $train_steps_p1 $save_checkpoint_steps \
$resume_training $optimizer $accumulate_gradients \
$gradient_accumulation_steps_p1 $seed $job_name \
$train_batch_size_p2 $learning_rate_p2 \
$warmup_steps_p2 $train_steps_p2 $gradient_accumulation_steps_p2 \
$electra_model
}
# Full pretraining configs for NVIDIA DGX-2H (1x NVIDIA V100 32GB GPU)
dgx2_1gpu_amp ()
{
train_batch_size_p1="176"
learning_rate_p1="6e-3"
precision="amp"
xla="xla"
num_gpus=1
warmup_steps_p1="2000"
train_steps_p1=10000
save_checkpoint_steps=500
resume_training="false"
optimizer="lamb"
accumulate_gradients="true"
gradient_accumulation_steps_p1=384
seed=42
job_name="electra_lamb_pretraining"
train_batch_size_p2=24
learning_rate_p2="4e-3"
warmup_steps_p2="200"
train_steps_p2=933
gradient_accumulation_steps_p2=1152
electra_model="base"
echo $train_batch_size_p1 $learning_rate_p1 $precision $num_gpus $xla \
$warmup_steps_p1 $train_steps_p1 $save_checkpoint_steps \
$resume_training $optimizer $accumulate_gradients \
$gradient_accumulation_steps_p1 $seed $job_name \
$train_batch_size_p2 $learning_rate_p2 \
$warmup_steps_p2 $train_steps_p2 $gradient_accumulation_steps_p2 \
$electra_model
}
dgx2_1gpu_fp32 ()
{
train_batch_size_p1="88"
learning_rate_p1="6e-3"
precision="fp32"
xla="xla"
num_gpus=1
warmup_steps_p1="2000"
train_steps_p1=10000
save_checkpoint_steps=500
resume_training="false"
optimizer="lamb"
accumulate_gradients="true"
gradient_accumulation_steps_p1=768
seed=42
job_name="electra_lamb_pretraining"
train_batch_size_p2=12
learning_rate_p2="4e-3"
warmup_steps_p2="200"
train_steps_p2=933
gradient_accumulation_steps_p2=2304
electra_model="base"
echo $train_batch_size_p1 $learning_rate_p1 $precision $num_gpus $xla \
$warmup_steps_p1 $train_steps_p1 $save_checkpoint_steps \
$resume_training $optimizer $accumulate_gradients \
$gradient_accumulation_steps_p1 $seed $job_name \
$train_batch_size_p2 $learning_rate_p2 \
$warmup_steps_p2 $train_steps_p2 $gradient_accumulation_steps_p2 \
$electra_model
}
# Full pretraining configs for NVIDIA DGX-1 (1x NVIDIA V100 16GB GPU)
dgx1_1gpu_amp ()
{
train_batch_size_p1="88"
learning_rate_p1="6e-3"
precision="amp"
xla="xla"
num_gpus=1
warmup_steps_p1="2000"
train_steps_p1=10000
save_checkpoint_steps=500
resume_training="false"
optimizer="lamb"
accumulate_gradients="true"
gradient_accumulation_steps_p1=768
seed=42
job_name="electra_lamb_pretraining"
train_batch_size_p2=12
learning_rate_p2="4e-3"
warmup_steps_p2="200"
train_steps_p2=933
gradient_accumulation_steps_p2=2304
electra_model="base"
echo $train_batch_size_p1 $learning_rate_p1 $precision $num_gpus $xla \
$warmup_steps_p1 $train_steps_p1 $save_checkpoint_steps \
$resume_training $optimizer $accumulate_gradients \
$gradient_accumulation_steps_p1 $seed $job_name \
$train_batch_size_p2 $learning_rate_p2 \
$warmup_steps_p2 $train_steps_p2 $gradient_accumulation_steps_p2 \
$electra_model
}
dgx1_1gpu_fp32 ()
{
train_batch_size_p1="40"
learning_rate_p1="6e-3"
precision="fp32"
xla="xla"
num_gpus=1
warmup_steps_p1="2000"
train_steps_p1=10000
save_checkpoint_steps=500
resume_training="false"
optimizer="lamb"
accumulate_gradients="true"
gradient_accumulation_steps_p1=1689
seed=42
job_name="electra_lamb_pretraining"
train_batch_size_p2=6
learning_rate_p2="4e-3"
warmup_steps_p2="200"
train_steps_p2=933
gradient_accumulation_steps_p2=4608
electra_model="base"
echo $train_batch_size_p1 $learning_rate_1 $precision $num_gpus $xla \
$warmup_steps_p1 $train_steps_p1 $save_checkpoint_steps \
$resume_training $optimizer $accumulate_gradients \
$gradient_accumulation_steps_p1 $seed $job_name \
$train_batch_size_p2 $learning_rate_p2 \
$warmup_steps_p2 $train_steps_p2 $gradient_accumulation_steps_p2 \
$electra_model
}
|
PyTorch/LanguageModeling/BERT/scripts | scripts | run_pretraining | #!/bin/bash
# Copyright (c) 2019-2021 NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
echo "Container nvidia build = " $NVIDIA_BUILD_ID
train_batch_size=${1:-8192}
learning_rate=${2:-"6e-3"}
precision=${3:-"fp16"}
num_gpus=${4:-$(nvidia-smi -L | wc -l)}
warmup_proportion=${5:-"0.2843"}
train_steps=${6:-7038}
save_checkpoint_steps=${7:-200}
resume_training=${8:-"false"}
create_logfile=${9:-"true"}
accumulate_gradients=${10:-"true"}
gradient_accumulation_steps=${11:-128}
seed=${12:-12439}
job_name=${13:-"bert_lamb_pretraining"}
allreduce_post_accumulation=${14:-"true"}
allreduce_post_accumulation_fp16=${15:-"true"}
train_batch_size_phase2=${16:-4096}
learning_rate_phase2=${17:-"4e-3"}
warmup_proportion_phase2=${18:-"0.128"}
train_steps_phase2=${19:-1563}
gradient_accumulation_steps_phase2=${20:-512}
#change this for other datasets
DATASET=pretrain/phase1/unbinned/parquet
DATA_DIR_PHASE1=${21:-$BERT_PREP_WORKING_DIR/${DATASET}/}
#change this for other datasets
DATASET2=pretrain/phase2/bin_size_64/parquet
DATA_DIR_PHASE2=${22:-$BERT_PREP_WORKING_DIR/${DATASET2}/}
CODEDIR=${23:-"/workspace/bert"}
init_checkpoint=${24:-"None"}
VOCAB_FILE=vocab/vocab
RESULTS_DIR=$CODEDIR/results
CHECKPOINTS_DIR=$RESULTS_DIR/checkpoints
wikipedia_source=${25:-$BERT_PREP_WORKING_DIR/wikipedia/source/}
num_dask_workers=${26:-$(nproc)}
num_shards_per_worker=${27:-128}
num_workers=${28:-4}
num_nodes=1
sample_ratio=${29:-0.9}
phase2_bin_size=${30:-64}
masking=${31:-static}
BERT_CONFIG=${32:-bert_configs/large.json}
# Calculate the total number of shards.
readonly num_blocks=$((num_shards_per_worker * $(( num_workers > 0 ? num_workers : 1 )) * num_nodes * num_gpus))
if [ "${phase2_bin_size}" == "none" ]; then
readonly phase2_bin_size_flag=""
elif [[ "${phase2_bin_size}" =~ ^(32|64|128|256|512)$ ]]; then
readonly phase2_bin_size_flag="--bin-size ${phase2_bin_size}"
else
echo "Error! phase2_bin_size=${phase2_bin_size} not supported!"
return -1
fi
if [ "${masking}" == "static" ]; then
readonly masking_flag="--masking"
elif [ "${masking}" == "dynamic" ]; then
readonly masking_flag=""
else
echo "Error! masking=${masking} not supported!"
return -1
fi
mkdir -p $CHECKPOINTS_DIR
if [ ! -d "${DATA_DIR_PHASE1}" ] || [ -z "$(ls -A ${DATA_DIR_PHASE1})" ]; then
echo "Warning! ${DATA_DIR_PHASE1} directory missing."
if [ ! -d "${wikipedia_source}" ] || [ -z "$(ls -A ${wikipedia_source})" ]; then
echo "Error! ${wikipedia_source} directory missing. Training cannot start!"
return -1
fi
preprocess_cmd=" \
mpirun \
--oversubscribe \
--allow-run-as-root \
-np ${num_dask_workers} \
-x LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libjemalloc.so \
preprocess_bert_pretrain \
--schedule mpi \
--vocab-file ${VOCAB_FILE} \
--wikipedia ${wikipedia_source} \
--sink ${DATA_DIR_PHASE1} \
--num-blocks ${num_blocks} \
--sample-ratio ${sample_ratio} \
${masking_flag} \
--seed ${seed}"
echo "Running ${preprocess_cmd} ..."
${preprocess_cmd}
balance_load_cmd=" \
mpirun \
--oversubscribe \
--allow-run-as-root \
-np ${num_dask_workers} \
balance_dask_output \
--indir ${DATA_DIR_PHASE1} \
--num-shards ${num_blocks}"
echo "Running ${balance_load_cmd} ..."
${balance_load_cmd}
fi
if [ ! -d "$RESULTS_DIR" ] ; then
echo "Error! $RESULTS_DIR directory missing."
exit -1
fi
if [ ! -d "$CHECKPOINTS_DIR" ] ; then
echo "Warning! $CHECKPOINTS_DIR directory missing."
echo "Checkpoints will be written to $RESULTS_DIR instead."
CHECKPOINTS_DIR=$RESULTS_DIR
fi
if [ ! -f "$BERT_CONFIG" ] ; then
echo "Error! BERT large configuration file not found at $BERT_CONFIG"
exit -1
fi
PREC=""
if [ "$precision" = "fp16" ] ; then
PREC="--fp16"
elif [ "$precision" = "fp32" ] ; then
PREC=""
elif [ "$precision" = "tf32" ] ; then
PREC=""
else
echo "Unknown <precision> argument"
exit -2
fi
ACCUMULATE_GRADIENTS=""
if [ "$accumulate_gradients" == "true" ] ; then
ACCUMULATE_GRADIENTS="--gradient_accumulation_steps=$gradient_accumulation_steps"
fi
CHECKPOINT=""
if [ "$resume_training" == "true" ] ; then
CHECKPOINT="--resume_from_checkpoint"
fi
ALL_REDUCE_POST_ACCUMULATION=""
if [ "$allreduce_post_accumulation" == "true" ] ; then
ALL_REDUCE_POST_ACCUMULATION="--allreduce_post_accumulation"
fi
ALL_REDUCE_POST_ACCUMULATION_FP16=""
if [ "$allreduce_post_accumulation_fp16" == "true" ] ; then
ALL_REDUCE_POST_ACCUMULATION_FP16="--allreduce_post_accumulation_fp16"
fi
INIT_CHECKPOINT=""
if [ "$init_checkpoint" != "None" ] ; then
INIT_CHECKPOINT="--init_checkpoint=$init_checkpoint"
fi
echo $DATA_DIR_PHASE1
INPUT_DIR=$DATA_DIR_PHASE1
CMD=" $CODEDIR/run_pretraining.py"
CMD+=" --input_dir=$DATA_DIR_PHASE1"
CMD+=" --output_dir=$CHECKPOINTS_DIR"
CMD+=" --config_file=$BERT_CONFIG"
CMD+=" --vocab_file=$VOCAB_FILE"
CMD+=" --train_batch_size=$train_batch_size"
CMD+=" --max_seq_length=128"
CMD+=" --max_predictions_per_seq=20"
CMD+=" --max_steps=$train_steps"
CMD+=" --warmup_proportion=$warmup_proportion"
CMD+=" --num_steps_per_checkpoint=$save_checkpoint_steps"
CMD+=" --learning_rate=$learning_rate"
CMD+=" --seed=$seed"
CMD+=" $PREC"
CMD+=" $ACCUMULATE_GRADIENTS"
CMD+=" $CHECKPOINT"
CMD+=" $ALL_REDUCE_POST_ACCUMULATION"
CMD+=" $ALL_REDUCE_POST_ACCUMULATION_FP16"
CMD+=" $INIT_CHECKPOINT"
CMD+=" --do_train"
CMD+=" --json-summary ${RESULTS_DIR}/dllogger.json "
CMD+=" --disable_progress_bar"
CMD+=" --num_workers=${num_workers}"
CMD="python3 -m torch.distributed.launch --nproc_per_node=$num_gpus $CMD"
if [ "$create_logfile" = "true" ] ; then
export GBS=$(expr $train_batch_size \* $num_gpus)
printf -v TAG "pyt_bert_pretraining_phase1_%s_gbs%d" "$precision" $GBS
DATESTAMP=`date +'%y%m%d%H%M%S'`
LOGFILE=$RESULTS_DIR/$job_name.$TAG.$DATESTAMP.log
printf "Logs written to %s\n" "$LOGFILE"
fi
set -x
if [ -z "$LOGFILE" ] ; then
$CMD
else
(
$CMD
) |& tee $LOGFILE
fi
set +x
echo "finished pretraining"
#Start Phase2
PREC=""
if [ "$precision" = "fp16" ] ; then
PREC="--fp16"
elif [ "$precision" = "fp32" ] ; then
PREC=""
elif [ "$precision" = "tf32" ] ; then
PREC=""
else
echo "Unknown <precision> argument"
exit -2
fi
ACCUMULATE_GRADIENTS=""
if [ "$accumulate_gradients" == "true" ] ; then
ACCUMULATE_GRADIENTS="--gradient_accumulation_steps=$gradient_accumulation_steps_phase2"
fi
ALL_REDUCE_POST_ACCUMULATION=""
if [ "$allreduce_post_accumulation" == "true" ] ; then
ALL_REDUCE_POST_ACCUMULATION="--allreduce_post_accumulation"
fi
ALL_REDUCE_POST_ACCUMULATION_FP16=""
if [ "$allreduce_post_accumulation_fp16" == "true" ] ; then
ALL_REDUCE_POST_ACCUMULATION_FP16="--allreduce_post_accumulation_fp16"
fi
if [ ! -d "${DATA_DIR_PHASE2}" ] || [ -z "$(ls -A ${DATA_DIR_PHASE2})" ]; then
echo "Warning! ${DATA_DIR_PHASE2} directory missing."
if [ ! -d "${wikipedia_source}" ] || [ -z "$(ls -A ${wikipedia_source})" ]; then
echo "Error! ${wikipedia_source} directory missing. Training cannot start!"
return -1
fi
preprocess_cmd=" \
mpirun \
--oversubscribe \
--allow-run-as-root \
-np ${num_dask_workers} \
-x LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libjemalloc.so \
preprocess_bert_pretrain \
--schedule mpi \
--vocab-file ${VOCAB_FILE} \
--wikipedia ${wikipedia_source} \
--sink ${DATA_DIR_PHASE2} \
--target-seq-length 512 \
--num-blocks ${num_blocks} \
--sample-ratio ${sample_ratio} \
${phase2_bin_size_flag} \
${masking_flag} \
--seed ${seed}"
echo "Running ${preprocess_cmd} ..."
${preprocess_cmd}
balance_load_cmd=" \
mpirun \
--oversubscribe \
--allow-run-as-root \
-np ${num_dask_workers} \
balance_dask_output \
--indir ${DATA_DIR_PHASE2} \
--num-shards ${num_blocks}"
echo "Running ${balance_load_cmd} ..."
${balance_load_cmd}
fi
echo $DATA_DIR_PHASE2
INPUT_DIR=$DATA_DIR_PHASE2
CMD=" $CODEDIR/run_pretraining.py"
CMD+=" --input_dir=$DATA_DIR_PHASE2"
CMD+=" --output_dir=$CHECKPOINTS_DIR"
CMD+=" --config_file=$BERT_CONFIG"
CMD+=" --vocab_file=$VOCAB_FILE"
CMD+=" --train_batch_size=$train_batch_size_phase2"
CMD+=" --max_seq_length=512"
CMD+=" --max_predictions_per_seq=80"
CMD+=" --max_steps=$train_steps_phase2"
CMD+=" --warmup_proportion=$warmup_proportion_phase2"
CMD+=" --num_steps_per_checkpoint=$save_checkpoint_steps"
CMD+=" --learning_rate=$learning_rate_phase2"
CMD+=" --seed=$seed"
CMD+=" $PREC"
CMD+=" $ACCUMULATE_GRADIENTS"
CMD+=" $CHECKPOINT"
CMD+=" $ALL_REDUCE_POST_ACCUMULATION"
CMD+=" $ALL_REDUCE_POST_ACCUMULATION_FP16"
CMD+=" --do_train --phase2 --resume_from_checkpoint --phase1_end_step=$train_steps"
CMD+=" --json-summary ${RESULTS_DIR}/dllogger.json "
CMD+=" --disable_progress_bar"
CMD+=" --num_workers=${num_workers}"
CMD="python3 -m torch.distributed.launch --nproc_per_node=$num_gpus $CMD"
if [ "$create_logfile" = "true" ] ; then
export GBS=$(expr $train_batch_size_phase2 \* $num_gpus)
printf -v TAG "pyt_bert_pretraining_phase2_%s_gbs%d" "$precision" $GBS
DATESTAMP=`date +'%y%m%d%H%M%S'`
LOGFILE=$RESULTS_DIR/$job_name.$TAG.$DATESTAMP.log
printf "Logs written to %s\n" "$LOGFILE"
fi
set -x
if [ -z "$LOGFILE" ] ; then
$CMD
else
(
$CMD
) |& tee $LOGFILE
fi
set +x
echo "finished phase2"
|
TensorFlow2/LanguageModeling/BERT/official/nlp/modeling/losses | losses | __init__ | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Activations package definition. Subject to change."""
from official.nlp.modeling.losses.weighted_sparse_categorical_crossentropy import loss as weighted_sparse_categorical_crossentropy_loss
from official.nlp.modeling.losses.weighted_sparse_categorical_crossentropy import per_example_loss as weighted_sparse_categorical_crossentropy_per_example_loss
|
PyTorch/Classification/GPUNet/triton/runner | runner | runner | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import pathlib
import signal
import sys
from typing import List, Type
# method from PEP-366 to support relative import in executed modules
if __name__ == "__main__" and __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from .config import Config
from .exceptions import RunnerException
from .executor import Executor
from .finalizer import Finalizer
from .logger import LOGGER, log_format
from .maintainer import Maintainer
from .pipeline import Pipeline
from .preparer import Preparer
from .triton import Triton
class Runner:
"""
Runner class. Main entrypoint to performing task and experiments
"""
WORKSPACE = pathlib.Path.cwd()
EXECUTOR_WORKSPACE = WORKSPACE / "runner_workspace"
def __init__(
self,
pipeline: Pipeline,
config: Config,
executor_cls: Type[Executor],
maintainer_cls: Type[Maintainer],
preparer_cls: Type[Preparer],
finalizer_cls: Type[Finalizer],
devices: List[str] = None,
log_level: int = logging.INFO,
):
self._pipeline = pipeline
self._config = config
self._pipeline = pipeline
self._config = config
self._preparer = preparer_cls()
self._finalizer = finalizer_cls()
self._devices = devices or ["0"]
self._log_level = log_level
self._logs_dir = self.EXECUTOR_WORKSPACE / "logs"
self._log_file_path = self._logs_dir / "runner.log"
self._maintainer = maintainer_cls()
self._executor = executor_cls(
workspace=self.EXECUTOR_WORKSPACE,
maintainer=self._maintainer,
pipeline=pipeline,
devices=devices,
)
signal.signal(signal.SIGINT, self._catch)
self._logs_dir.mkdir(parents=True, exist_ok=True)
def start(self) -> None:
"""
Start runner
Returns:
None
"""
self._setup_logger()
task = self._preparer.exec(
workspace=self.EXECUTOR_WORKSPACE,
config=self._config,
pipeline=self._pipeline,
logs_dir=self._logs_dir,
maintainer=self._maintainer,
triton=Triton(),
)
results = []
try:
for result in self._executor.start(task):
results.append(result)
except RunnerException as e:
LOGGER.error(f"Error running task: {str(e)}")
finally:
self._executor.stop()
self._finalizer.exec(workspace=self.EXECUTOR_WORKSPACE, task=task, results=results)
def _catch(self, signum, frame):
"""
SIGINT catcher. Stops executor on any sigterm.
Args:
signum: signal id
frame: signal frame
"""
self._executor.stop()
sys.exit(0)
def _setup_logger(self) -> None:
"""
Add file handle for logger
Returns:
None
"""
file = logging.FileHandler(self._log_file_path)
formatter = logging.Formatter(log_format)
file.setFormatter(formatter)
LOGGER.addHandler(file)
LOGGER.setLevel(level=self._log_level)
LOGGER.initialize(file_path=self._log_file_path)
|
PyTorch/Classification/ConvNets | ConvNets | quant_main | # Copyright (c) 2018-2019, NVIDIA CORPORATION
# Copyright (c) 2017- Facebook, Inc
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import argparse
import random
from copy import deepcopy
import torch
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
from image_classification.training import *
from image_classification.utils import *
from image_classification.quantization import *
from image_classification.models import efficientnet_quant_b0, efficientnet_quant_b4
from main import prepare_for_training, add_parser_arguments as parse_training
import dllogger
def available_models():
models = {
m.name: m
for m in [
efficientnet_quant_b0,
efficientnet_quant_b4,
]
}
return models
def parse_quantization(parser):
model_names = available_models().keys()
parser.add_argument(
"--arch",
"-a",
metavar="ARCH",
default="efficientnet-quant-b0",
choices=model_names,
help="model architecture: "
+ " | ".join(model_names)
+ " (default: efficientnet-quant-b0)",
)
parser.add_argument(
"--skip-calibration",
action="store_true",
help="skip calibration before training, (default: false)",
)
def parse_training_args(parser):
from main import add_parser_arguments
return add_parser_arguments(parser)
def main(args, model_args, model_arch):
exp_start_time = time.time()
global best_prec1
best_prec1 = 0
skip_calibration = args.skip_calibration or args.evaluate or args.resume is not None
select_default_calib_method()
(
trainer,
lr_policy,
train_loader,
train_loader_len,
val_loader,
logger,
start_epoch,
) = prepare_for_training(args, model_args, model_arch)
print(f"RUNNING QUANTIZATION")
if not skip_calibration:
calibrate(trainer.model_and_loss.model, train_loader, logger, calib_iter=10)
train_loop(
trainer,
lr_policy,
train_loader,
train_loader_len,
val_loader,
logger,
should_backup_checkpoint(args),
start_epoch=start_epoch,
end_epoch=min((start_epoch + args.run_epochs), args.epochs)
if args.run_epochs != -1
else args.epochs,
best_prec1=best_prec1,
prof=args.prof,
skip_training=args.evaluate,
skip_validation=args.training_only,
save_checkpoints=args.save_checkpoints,
checkpoint_dir=args.workspace,
checkpoint_filename="quantized_" + args.checkpoint_filename,
)
if not torch.distributed.is_initialized() or torch.distributed.get_rank() == 0:
logger.end()
print("Experiment ended")
if __name__ == "__main__":
epilog = [
"Based on the architecture picked by --arch flag, you may use the following options:\n"
]
for model, ep in available_models().items():
model_help = "\n".join(ep.parser().format_help().split("\n")[2:])
epilog.append(model_help)
parser = argparse.ArgumentParser(
description="PyTorch ImageNet Training",
epilog="\n".join(epilog),
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parse_quantization(parser)
parse_training(parser, skip_arch=True)
args, rest = parser.parse_known_args()
model_arch = available_models()[args.arch]
model_args, rest = model_arch.parser().parse_known_args(rest)
print(model_args)
assert len(rest) == 0, f"Unknown args passed: {rest}"
cudnn.benchmark = True
main(args, model_args, model_arch)
|
PyTorch/SpeechSynthesis/HiFiGAN/common/text | text | numerical | """ adapted from https://github.com/keithito/tacotron """
import inflect
import re
_magnitudes = ['trillion', 'billion', 'million', 'thousand', 'hundred', 'm', 'b', 't']
_magnitudes_key = {'m': 'million', 'b': 'billion', 't': 'trillion'}
_measurements = '(f|c|k|d|m)'
_measurements_key = {'f': 'fahrenheit',
'c': 'celsius',
'k': 'thousand',
'm': 'meters'}
_currency_key = {'$': 'dollar', '£': 'pound', '€': 'euro', '₩': 'won'}
_inflect = inflect.engine()
_comma_number_re = re.compile(r'([0-9][0-9\,]+[0-9])')
_decimal_number_re = re.compile(r'([0-9]+\.[0-9]+)')
_currency_re = re.compile(r'([\$€£₩])([0-9\.\,]*[0-9]+)(?:[ ]?({})(?=[^a-zA-Z]|$))?'.format("|".join(_magnitudes)), re.IGNORECASE)
_measurement_re = re.compile(r'([0-9\.\,]*[0-9]+(\s)?{}\b)'.format(_measurements), re.IGNORECASE)
_ordinal_re = re.compile(r'[0-9]+(st|nd|rd|th)')
# _range_re = re.compile(r'(?<=[0-9])+(-)(?=[0-9])+.*?')
_roman_re = re.compile(r'\b(?=[MDCLXVI]+\b)M{0,4}(CM|CD|D?C{0,3})(XC|XL|L?X{0,3})(IX|IV|V?I{2,3})\b') # avoid I
_multiply_re = re.compile(r'(\b[0-9]+)(x)([0-9]+)')
_number_re = re.compile(r"[0-9]+'s|[0-9]+s|[0-9]+")
def _remove_commas(m):
return m.group(1).replace(',', '')
def _expand_decimal_point(m):
return m.group(1).replace('.', ' point ')
def _expand_currency(m):
currency = _currency_key[m.group(1)]
quantity = m.group(2)
magnitude = m.group(3)
# remove commas from quantity to be able to convert to numerical
quantity = quantity.replace(',', '')
# check for million, billion, etc...
if magnitude is not None and magnitude.lower() in _magnitudes:
if len(magnitude) == 1:
magnitude = _magnitudes_key[magnitude.lower()]
return "{} {} {}".format(_expand_hundreds(quantity), magnitude, currency+'s')
parts = quantity.split('.')
if len(parts) > 2:
return quantity + " " + currency + "s" # Unexpected format
dollars = int(parts[0]) if parts[0] else 0
cents = int(parts[1]) if len(parts) > 1 and parts[1] else 0
if dollars and cents:
dollar_unit = currency if dollars == 1 else currency+'s'
cent_unit = 'cent' if cents == 1 else 'cents'
return "{} {}, {} {}".format(
_expand_hundreds(dollars), dollar_unit,
_inflect.number_to_words(cents), cent_unit)
elif dollars:
dollar_unit = currency if dollars == 1 else currency+'s'
return "{} {}".format(_expand_hundreds(dollars), dollar_unit)
elif cents:
cent_unit = 'cent' if cents == 1 else 'cents'
return "{} {}".format(_inflect.number_to_words(cents), cent_unit)
else:
return 'zero' + ' ' + currency + 's'
def _expand_hundreds(text):
number = float(text)
if 1000 < number < 10000 and (number % 100 == 0) and (number % 1000 != 0):
return _inflect.number_to_words(int(number / 100)) + " hundred"
else:
return _inflect.number_to_words(text)
def _expand_ordinal(m):
return _inflect.number_to_words(m.group(0))
def _expand_measurement(m):
_, number, measurement = re.split('(\d+(?:\.\d+)?)', m.group(0))
number = _inflect.number_to_words(number)
measurement = "".join(measurement.split())
measurement = _measurements_key[measurement.lower()]
return "{} {}".format(number, measurement)
def _expand_range(m):
return ' to '
def _expand_multiply(m):
left = m.group(1)
right = m.group(3)
return "{} by {}".format(left, right)
def _expand_roman(m):
# from https://stackoverflow.com/questions/19308177/converting-roman-numerals-to-integers-in-python
roman_numerals = {'I':1, 'V':5, 'X':10, 'L':50, 'C':100, 'D':500, 'M':1000}
result = 0
num = m.group(0)
for i, c in enumerate(num):
if (i+1) == len(num) or roman_numerals[c] >= roman_numerals[num[i+1]]:
result += roman_numerals[c]
else:
result -= roman_numerals[c]
return str(result)
def _expand_number(m):
_, number, suffix = re.split(r"(\d+(?:'?\d+)?)", m.group(0))
number = int(number)
if number > 1000 < 10000 and (number % 100 == 0) and (number % 1000 != 0):
text = _inflect.number_to_words(number // 100) + " hundred"
elif number > 1000 and number < 3000:
if number == 2000:
text = 'two thousand'
elif number > 2000 and number < 2010:
text = 'two thousand ' + _inflect.number_to_words(number % 100)
elif number % 100 == 0:
text = _inflect.number_to_words(number // 100) + ' hundred'
else:
number = _inflect.number_to_words(number, andword='', zero='oh', group=2).replace(', ', ' ')
number = re.sub(r'-', ' ', number)
text = number
else:
number = _inflect.number_to_words(number, andword='and')
number = re.sub(r'-', ' ', number)
number = re.sub(r',', '', number)
text = number
if suffix in ("'s", "s"):
if text[-1] == 'y':
text = text[:-1] + 'ies'
else:
text = text + suffix
return text
def normalize_numbers(text):
text = re.sub(_comma_number_re, _remove_commas, text)
text = re.sub(_currency_re, _expand_currency, text)
text = re.sub(_decimal_number_re, _expand_decimal_point, text)
text = re.sub(_ordinal_re, _expand_ordinal, text)
# text = re.sub(_range_re, _expand_range, text)
# text = re.sub(_measurement_re, _expand_measurement, text)
text = re.sub(_roman_re, _expand_roman, text)
text = re.sub(_multiply_re, _expand_multiply, text)
text = re.sub(_number_re, _expand_number, text)
return text
|
PyTorch/SpeechSynthesis/HiFiGAN/fastpitch | fastpitch | entrypoints | # *****************************************************************************
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
import urllib.request
import torch
import os
import sys
#from https://github.com/NVIDIA/DeepLearningExamples/blob/master/PyTorch/SpeechSynthesis/Tacotron2/inference.py
def checkpoint_from_distributed(state_dict):
"""
Checks whether checkpoint was generated by DistributedDataParallel. DDP
wraps model in additional "module.", it needs to be unwrapped for single
GPU inference.
:param state_dict: model's state dict
"""
ret = False
for key, _ in state_dict.items():
if key.find('module.') != -1:
ret = True
break
return ret
# from https://github.com/NVIDIA/DeepLearningExamples/blob/master/PyTorch/SpeechSynthesis/Tacotron2/inference.py
def unwrap_distributed(state_dict):
"""
Unwraps model from DistributedDataParallel.
DDP wraps model in additional "module.", it needs to be removed for single
GPU inference.
:param state_dict: model's state dict
"""
new_state_dict = {}
for key, value in state_dict.items():
new_key = key.replace('module.1.', '')
new_key = new_key.replace('module.', '')
new_state_dict[new_key] = value
return new_state_dict
def _download_checkpoint(checkpoint, force_reload):
model_dir = os.path.join(torch.hub._get_torch_home(), 'checkpoints')
if not os.path.exists(model_dir):
os.makedirs(model_dir)
ckpt_file = os.path.join(model_dir, os.path.basename(checkpoint))
if not os.path.exists(ckpt_file) or force_reload:
sys.stderr.write('Downloading checkpoint from {}\n'.format(checkpoint))
urllib.request.urlretrieve(checkpoint, ckpt_file)
return ckpt_file
def nvidia_fastpitch(pretrained=True, **kwargs):
"""TODO
"""
from fastpitch import model as fastpitch
force_reload = "force_reload" in kwargs and kwargs["force_reload"]
fp16 = "model_math" in kwargs and kwargs["model_math"] == "fp16"
if pretrained:
checkpoint = 'https://api.ngc.nvidia.com/v2/models/nvidia/dle/fastpitch__pyt_ckpt/versions/21.12.1_amp/files/nvidia_fastpitch_210824+cfg.pt'
ckpt_file = _download_checkpoint(checkpoint, force_reload)
ckpt = torch.load(ckpt_file)
state_dict = ckpt['state_dict']
if checkpoint_from_distributed(state_dict):
state_dict = unwrap_distributed(state_dict)
config = ckpt['config']
train_setup = ckpt.get('train_setup', {})
else:
config = {'n_mel_channels': 80, 'n_symbols': 148, 'padding_idx': 0, 'symbols_embedding_dim': 384,
'in_fft_n_layers': 6, 'in_fft_n_heads': 1, 'in_fft_d_head': 64, 'in_fft_conv1d_kernel_size': 3,
'in_fft_conv1d_filter_size': 1536, 'in_fft_output_size': 384, 'p_in_fft_dropout': 0.1,
'p_in_fft_dropatt': 0.1, 'p_in_fft_dropemb': 0.0, 'out_fft_n_layers': 6, 'out_fft_n_heads': 1,
'out_fft_d_head': 64, 'out_fft_conv1d_kernel_size': 3, 'out_fft_conv1d_filter_size': 1536,
'out_fft_output_size': 384, 'p_out_fft_dropout': 0.1, 'p_out_fft_dropatt': 0.1, 'p_out_fft_dropemb': 0.0,
'dur_predictor_kernel_size': 3, 'dur_predictor_filter_size': 256, 'p_dur_predictor_dropout': 0.1,
'dur_predictor_n_layers': 2, 'pitch_predictor_kernel_size': 3, 'pitch_predictor_filter_size': 256,
'p_pitch_predictor_dropout': 0.1, 'pitch_predictor_n_layers': 2, 'pitch_embedding_kernel_size': 3,
'n_speakers': 1, 'speaker_emb_weight': 1.0, 'energy_predictor_kernel_size': 3,
'energy_predictor_filter_size': 256, 'p_energy_predictor_dropout': 0.1, 'energy_predictor_n_layers': 2,
'energy_conditioning': True, 'energy_embedding_kernel_size': 3}
for k,v in kwargs.items():
if k in config.keys():
config[k] = v
train_setup = {}
model = fastpitch.FastPitch(**config)
if pretrained:
model.load_state_dict(state_dict)
if fp16:
model.half()
model.forward = model.infer
return model, train_setup
def nvidia_textprocessing_utils(cmudict_path, heteronyms_path, **kwargs):
from common.text.text_processing import TextProcessing
import numpy as np
from torch.nn.utils.rnn import pad_sequence
from common.text import cmudict
class TextPreProcessing:
@staticmethod
def prepare_input_sequence(texts, batch_size=1, device='cpu'):
cmudict.initialize(cmudict_path, heteronyms_path)
tp = TextProcessing(symbol_set='english_basic', cleaner_names=['english_cleaners_v2'], p_arpabet=1.0)
fields={}
fields['text'] = [torch.LongTensor(tp.encode_text(text))
for text in texts]
order = np.argsort([-t.size(0) for t in fields['text']])
fields['text'] = [fields['text'][i] for i in order]
fields['text_lens'] = torch.LongTensor([t.size(0) for t in fields['text']])
for t in fields['text']:
print(tp.sequence_to_text(t.numpy()))
# cut into batches & pad
batches = []
for b in range(0, len(order), batch_size):
batch = {f: values[b:b+batch_size] for f, values in fields.items()}
for f in batch:
if f == 'text':
batch[f] = pad_sequence(batch[f], batch_first=True)
if type(batch[f]) is torch.Tensor:
batch[f] = batch[f].to(device)
batches.append(batch)
return batches
return TextPreProcessing()
# # from tacotron2.text import text_to_sequence
# @staticmethod
# def pad_sequences(batch):
# # Right zero-pad all one-hot text sequences to max input length
# input_lengths, ids_sorted_decreasing = torch.sort(
# torch.LongTensor([len(x) for x in batch]),
# dim=0, descending=True)
# max_input_len = input_lengths[0]
# text_padded = torch.LongTensor(len(batch), max_input_len)
# text_padded.zero_()
# for i in range(len(ids_sorted_decreasing)):
# text = batch[ids_sorted_decreasing[i]]
# text_padded[i, :text.size(0)] = text
# return text_padded, input_lengths
# @staticmethod
# def prepare_input_sequence(texts, cpu_run=False):
# d = []
# # for i,text in enumerate(texts):
# # d.append(torch.IntTensor(
# # Processing.text_to_sequence(text, ['english_cleaners'])[:]))
# text_padded, input_lengths = Processing.pad_sequences(d)
# if not cpu_run:
# text_padded = text_padded.cuda().long()
# input_lengths = input_lengths.cuda().long()
# else:
# text_padded = text_padded.long()
# input_lengths = input_lengths.long()
# return text_padded, input_lengths
# return Processing()
|
TensorFlow2/Classification/ConvNets/dataloader | dataloader | augment | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""AutoAugment and RandAugment policies for enhanced image preprocessing.
AutoAugment Reference: https://arxiv.org/abs/1805.09501
RandAugment Reference: https://arxiv.org/abs/1909.13719
"""
from __future__ import absolute_import
from __future__ import division
# from __future__ import google_type_annotations
from __future__ import print_function
import math
import tensorflow as tf
from typing import Any, Dict, List, Optional, Text, Tuple
try:
from keras.layers.preprocessing import image_preprocessing as image_ops
except (ImportError, ModuleNotFoundError):
import keras.src.layers.preprocessing.image_preprocessing as image_ops
# This signifies the max integer that the controller RNN could predict for the
# augmentation scheme.
_MAX_LEVEL = 10.
def to_4d(image: tf.Tensor) -> tf.Tensor:
"""Converts an input Tensor to 4 dimensions.
4D image => [N, H, W, C] or [N, C, H, W]
3D image => [1, H, W, C] or [1, C, H, W]
2D image => [1, H, W, 1]
Args:
image: The 2/3/4D input tensor.
Returns:
A 4D image tensor.
Raises:
`TypeError` if `image` is not a 2/3/4D tensor.
"""
shape = tf.shape(image)
original_rank = tf.rank(image)
left_pad = tf.cast(tf.less_equal(original_rank, 3), dtype=tf.int32)
right_pad = tf.cast(tf.equal(original_rank, 2), dtype=tf.int32)
new_shape = tf.concat(
[
tf.ones(shape=left_pad, dtype=tf.int32),
shape,
tf.ones(shape=right_pad, dtype=tf.int32),
],
axis=0,
)
return tf.reshape(image, new_shape)
def from_4d(image: tf.Tensor, ndims: tf.Tensor) -> tf.Tensor:
"""Converts a 4D image back to `ndims` rank."""
shape = tf.shape(image)
begin = tf.cast(tf.less_equal(ndims, 3), dtype=tf.int32)
end = 4 - tf.cast(tf.equal(ndims, 2), dtype=tf.int32)
new_shape = shape[begin:end]
return tf.reshape(image, new_shape)
def _convert_translation_to_transform(translations: tf.Tensor) -> tf.Tensor:
"""Converts translations to a projective transform.
The translation matrix looks like this:
[[1 0 -dx]
[0 1 -dy]
[0 0 1]]
Args:
translations: The 2-element list representing [dx, dy], or a matrix of
2-element lists representing [dx dy] to translate for each image. The
shape must be static.
Returns:
The transformation matrix of shape (num_images, 8).
Raises:
`TypeError` if
- the shape of `translations` is not known or
- the shape of `translations` is not rank 1 or 2.
"""
translations = tf.convert_to_tensor(translations, dtype=tf.float32)
if translations.get_shape().ndims is None:
raise TypeError('translations rank must be statically known')
elif len(translations.get_shape()) == 1:
translations = translations[None]
elif len(translations.get_shape()) != 2:
raise TypeError('translations should have rank 1 or 2.')
num_translations = tf.shape(translations)[0]
return tf.concat(
values=[
tf.ones((num_translations, 1), tf.dtypes.float32),
tf.zeros((num_translations, 1), tf.dtypes.float32),
-translations[:, 0, None],
tf.zeros((num_translations, 1), tf.dtypes.float32),
tf.ones((num_translations, 1), tf.dtypes.float32),
-translations[:, 1, None],
tf.zeros((num_translations, 2), tf.dtypes.float32),
],
axis=1,
)
def _convert_angles_to_transform(
angles: tf.Tensor,
image_width: tf.Tensor,
image_height: tf.Tensor) -> tf.Tensor:
"""Converts an angle or angles to a projective transform.
Args:
angles: A scalar to rotate all images, or a vector to rotate a batch of
images. This must be a scalar.
image_width: The width of the image(s) to be transformed.
image_height: The height of the image(s) to be transformed.
Returns:
A tensor of shape (num_images, 8).
Raises:
`TypeError` if `angles` is not rank 0 or 1.
"""
angles = tf.convert_to_tensor(angles, dtype=tf.float32)
if len(angles.get_shape()) == 0: # pylint:disable=g-explicit-length-test
angles = angles[None]
elif len(angles.get_shape()) != 1:
raise TypeError('Angles should have a rank 0 or 1.')
x_offset = ((image_width - 1) -
(tf.math.cos(angles) * (image_width - 1) - tf.math.sin(angles) *
(image_height - 1))) / 2.0
y_offset = ((image_height - 1) -
(tf.math.sin(angles) * (image_width - 1) + tf.math.cos(angles) *
(image_height - 1))) / 2.0
num_angles = tf.shape(angles)[0]
return tf.concat(
values=[
tf.math.cos(angles)[:, None],
-tf.math.sin(angles)[:, None],
x_offset[:, None],
tf.math.sin(angles)[:, None],
tf.math.cos(angles)[:, None],
y_offset[:, None],
tf.zeros((num_angles, 2), tf.dtypes.float32),
],
axis=1,
)
def transform(image: tf.Tensor, transforms) -> tf.Tensor:
"""Prepares input data for `image_ops.transform`."""
original_ndims = tf.rank(image)
transforms = tf.convert_to_tensor(transforms, dtype=tf.float32)
if transforms.shape.rank == 1:
transforms = transforms[None]
image = to_4d(image)
image = image_ops.transform(
images=image,
transforms=transforms,
interpolation='nearest')
return from_4d(image, original_ndims)
def translate(image: tf.Tensor, translations) -> tf.Tensor:
"""Translates image(s) by provided vectors.
Args:
image: An image Tensor of type uint8.
translations: A vector or matrix representing [dx dy].
Returns:
The translated version of the image.
"""
transforms = _convert_translation_to_transform(translations)
return transform(image, transforms=transforms)
def rotate(image: tf.Tensor, degrees: float) -> tf.Tensor:
"""Rotates the image by degrees either clockwise or counterclockwise.
Args:
image: An image Tensor of type uint8.
degrees: Float, a scalar angle in degrees to rotate all images by. If
degrees is positive the image will be rotated clockwise otherwise it will
be rotated counterclockwise.
Returns:
The rotated version of image.
"""
# Convert from degrees to radians.
degrees_to_radians = math.pi / 180.0
radians = tf.cast(degrees * degrees_to_radians, tf.float32)
original_ndims = tf.rank(image)
image = to_4d(image)
image_height = tf.cast(tf.shape(image)[1], tf.float32)
image_width = tf.cast(tf.shape(image)[2], tf.float32)
transforms = _convert_angles_to_transform(angles=radians,
image_width=image_width,
image_height=image_height)
# In practice, we should randomize the rotation degrees by flipping
# it negatively half the time, but that's done on 'degrees' outside
# of the function.
image = transform(image, transforms=transforms)
return from_4d(image, original_ndims)
def blend(image1: tf.Tensor, image2: tf.Tensor, factor: float) -> tf.Tensor:
"""Blend image1 and image2 using 'factor'.
Factor can be above 0.0. A value of 0.0 means only image1 is used.
A value of 1.0 means only image2 is used. A value between 0.0 and
1.0 means we linearly interpolate the pixel values between the two
images. A value greater than 1.0 "extrapolates" the difference
between the two pixel values, and we clip the results to values
between 0 and 255.
Args:
image1: An image Tensor of type uint8.
image2: An image Tensor of type uint8.
factor: A floating point value above 0.0.
Returns:
A blended image Tensor of type uint8.
"""
if factor == 0.0:
return tf.convert_to_tensor(image1)
if factor == 1.0:
return tf.convert_to_tensor(image2)
image1 = tf.cast(image1, tf.float32)
image2 = tf.cast(image2, tf.float32)
difference = image2 - image1
scaled = factor * difference
# Do addition in float.
temp = tf.cast(image1, tf.float32) + scaled
# Interpolate
if factor > 0.0 and factor < 1.0:
# Interpolation means we always stay within 0 and 255.
return tf.cast(temp, tf.uint8)
# Extrapolate:
#
# We need to clip and then cast.
return tf.cast(tf.clip_by_value(temp, 0.0, 255.0), tf.uint8)
def cutout(image: tf.Tensor, pad_size: int, replace: int = 0) -> tf.Tensor:
"""Apply cutout (https://arxiv.org/abs/1708.04552) to image.
This operation applies a (2*pad_size x 2*pad_size) mask of zeros to
a random location within `img`. The pixel values filled in will be of the
value `replace`. The located where the mask will be applied is randomly
chosen uniformly over the whole image.
Args:
image: An image Tensor of type uint8.
pad_size: Specifies how big the zero mask that will be generated is that
is applied to the image. The mask will be of size
(2*pad_size x 2*pad_size).
replace: What pixel value to fill in the image in the area that has
the cutout mask applied to it.
Returns:
An image Tensor that is of type uint8.
"""
image_height = tf.shape(image)[0]
image_width = tf.shape(image)[1]
# Sample the center location in the image where the zero mask will be applied.
cutout_center_height = tf.random.uniform(
shape=[], minval=0, maxval=image_height,
dtype=tf.int32)
cutout_center_width = tf.random.uniform(
shape=[], minval=0, maxval=image_width,
dtype=tf.int32)
lower_pad = tf.maximum(0, cutout_center_height - pad_size)
upper_pad = tf.maximum(0, image_height - cutout_center_height - pad_size)
left_pad = tf.maximum(0, cutout_center_width - pad_size)
right_pad = tf.maximum(0, image_width - cutout_center_width - pad_size)
cutout_shape = [image_height - (lower_pad + upper_pad),
image_width - (left_pad + right_pad)]
padding_dims = [[lower_pad, upper_pad], [left_pad, right_pad]]
mask = tf.pad(
tf.zeros(cutout_shape, dtype=image.dtype),
padding_dims, constant_values=1)
mask = tf.expand_dims(mask, -1)
mask = tf.tile(mask, [1, 1, 3])
image = tf.where(
tf.equal(mask, 0),
tf.ones_like(image, dtype=image.dtype) * replace,
image)
return image
def solarize(image: tf.Tensor, threshold: int = 128) -> tf.Tensor:
# For each pixel in the image, select the pixel
# if the value is less than the threshold.
# Otherwise, subtract 255 from the pixel.
return tf.where(image < threshold, image, 255 - image)
def solarize_add(image: tf.Tensor,
addition: int = 0,
threshold: int = 128) -> tf.Tensor:
# For each pixel in the image less than threshold
# we add 'addition' amount to it and then clip the
# pixel value to be between 0 and 255. The value
# of 'addition' is between -128 and 128.
added_image = tf.cast(image, tf.int64) + addition
added_image = tf.cast(tf.clip_by_value(added_image, 0, 255), tf.uint8)
return tf.where(image < threshold, added_image, image)
def color(image: tf.Tensor, factor: float) -> tf.Tensor:
"""Equivalent of PIL Color."""
degenerate = tf.image.grayscale_to_rgb(tf.image.rgb_to_grayscale(image))
return blend(degenerate, image, factor)
def contrast(image: tf.Tensor, factor: float) -> tf.Tensor:
"""Equivalent of PIL Contrast."""
degenerate = tf.image.rgb_to_grayscale(image)
# Cast before calling tf.histogram.
degenerate = tf.cast(degenerate, tf.int32)
# Compute the grayscale histogram, then compute the mean pixel value,
# and create a constant image size of that value. Use that as the
# blending degenerate target of the original image.
hist = tf.histogram_fixed_width(degenerate, [0, 255], nbins=256)
mean = tf.reduce_sum(tf.cast(hist, tf.float32)) / 256.0
degenerate = tf.ones_like(degenerate, dtype=tf.float32) * mean
degenerate = tf.clip_by_value(degenerate, 0.0, 255.0)
degenerate = tf.image.grayscale_to_rgb(tf.cast(degenerate, tf.uint8))
return blend(degenerate, image, factor)
def brightness(image: tf.Tensor, factor: float) -> tf.Tensor:
"""Equivalent of PIL Brightness."""
degenerate = tf.zeros_like(image)
return blend(degenerate, image, factor)
def posterize(image: tf.Tensor, bits: int) -> tf.Tensor:
"""Equivalent of PIL Posterize."""
shift = 8 - bits
return tf.bitwise.left_shift(tf.bitwise.right_shift(image, shift), shift)
def wrapped_rotate(image: tf.Tensor, degrees: float, replace: int) -> tf.Tensor:
"""Applies rotation with wrap/unwrap."""
image = rotate(wrap(image), degrees=degrees)
return unwrap(image, replace)
def translate_x(image: tf.Tensor, pixels: int, replace: int) -> tf.Tensor:
"""Equivalent of PIL Translate in X dimension."""
image = translate(wrap(image), [-pixels, 0])
return unwrap(image, replace)
def translate_y(image: tf.Tensor, pixels: int, replace: int) -> tf.Tensor:
"""Equivalent of PIL Translate in Y dimension."""
image = translate(wrap(image), [0, -pixels])
return unwrap(image, replace)
def shear_x(image: tf.Tensor, level: float, replace: int) -> tf.Tensor:
"""Equivalent of PIL Shearing in X dimension."""
# Shear parallel to x axis is a projective transform
# with a matrix form of:
# [1 level
# 0 1].
image = transform(image=wrap(image),
transforms=[1., level, 0., 0., 1., 0., 0., 0.])
return unwrap(image, replace)
def shear_y(image: tf.Tensor, level: float, replace: int) -> tf.Tensor:
"""Equivalent of PIL Shearing in Y dimension."""
# Shear parallel to y axis is a projective transform
# with a matrix form of:
# [1 0
# level 1].
image = transform(image=wrap(image),
transforms=[1., 0., 0., level, 1., 0., 0., 0.])
return unwrap(image, replace)
def autocontrast(image: tf.Tensor) -> tf.Tensor:
"""Implements Autocontrast function from PIL using TF ops.
Args:
image: A 3D uint8 tensor.
Returns:
The image after it has had autocontrast applied to it and will be of type
uint8.
"""
def scale_channel(image: tf.Tensor) -> tf.Tensor:
"""Scale the 2D image using the autocontrast rule."""
# A possibly cheaper version can be done using cumsum/unique_with_counts
# over the histogram values, rather than iterating over the entire image.
# to compute mins and maxes.
lo = tf.cast(tf.reduce_min(image), tf.float32)
hi = tf.cast(tf.reduce_max(image), tf.float32)
# Scale the image, making the lowest value 0 and the highest value 255.
def scale_values(im):
scale = 255.0 / (hi - lo)
offset = -lo * scale
im = tf.cast(im, tf.float32) * scale + offset
im = tf.clip_by_value(im, 0.0, 255.0)
return tf.cast(im, tf.uint8)
result = tf.cond(hi > lo, lambda: scale_values(image), lambda: image)
return result
# Assumes RGB for now. Scales each channel independently
# and then stacks the result.
s1 = scale_channel(image[:, :, 0])
s2 = scale_channel(image[:, :, 1])
s3 = scale_channel(image[:, :, 2])
image = tf.stack([s1, s2, s3], 2)
return image
def sharpness(image: tf.Tensor, factor: float) -> tf.Tensor:
"""Implements Sharpness function from PIL using TF ops."""
orig_image = image
image = tf.cast(image, tf.float32)
# Make image 4D for conv operation.
image = tf.expand_dims(image, 0)
# SMOOTH PIL Kernel.
kernel = tf.constant(
[[1, 1, 1], [1, 5, 1], [1, 1, 1]], dtype=tf.float32,
shape=[3, 3, 1, 1]) / 13.
# Tile across channel dimension.
kernel = tf.tile(kernel, [1, 1, 3, 1])
strides = [1, 1, 1, 1]
degenerate = tf.nn.depthwise_conv2d(
image, kernel, strides, padding='VALID', dilations=[1, 1])
degenerate = tf.clip_by_value(degenerate, 0.0, 255.0)
degenerate = tf.squeeze(tf.cast(degenerate, tf.uint8), [0])
# For the borders of the resulting image, fill in the values of the
# original image.
mask = tf.ones_like(degenerate)
padded_mask = tf.pad(mask, [[1, 1], [1, 1], [0, 0]])
padded_degenerate = tf.pad(degenerate, [[1, 1], [1, 1], [0, 0]])
result = tf.where(tf.equal(padded_mask, 1), padded_degenerate, orig_image)
# Blend the final result.
return blend(result, orig_image, factor)
def equalize(image: tf.Tensor) -> tf.Tensor:
"""Implements Equalize function from PIL using TF ops."""
def scale_channel(im, c):
"""Scale the data in the channel to implement equalize."""
im = tf.cast(im[:, :, c], tf.int32)
# Compute the histogram of the image channel.
histo = tf.histogram_fixed_width(im, [0, 255], nbins=256)
# For the purposes of computing the step, filter out the nonzeros.
nonzero = tf.where(tf.not_equal(histo, 0))
nonzero_histo = tf.reshape(tf.gather(histo, nonzero), [-1])
step = (tf.reduce_sum(nonzero_histo) - nonzero_histo[-1]) // 255
def build_lut(histo, step):
# Compute the cumulative sum, shifting by step // 2
# and then normalization by step.
lut = (tf.cumsum(histo) + (step // 2)) // step
# Shift lut, prepending with 0.
lut = tf.concat([[0], lut[:-1]], 0)
# Clip the counts to be in range. This is done
# in the C code for image.point.
return tf.clip_by_value(lut, 0, 255)
# If step is zero, return the original image. Otherwise, build
# lut from the full histogram and step and then index from it.
result = tf.cond(tf.equal(step, 0),
lambda: im,
lambda: tf.gather(build_lut(histo, step), im))
return tf.cast(result, tf.uint8)
# Assumes RGB for now. Scales each channel independently
# and then stacks the result.
s1 = scale_channel(image, 0)
s2 = scale_channel(image, 1)
s3 = scale_channel(image, 2)
image = tf.stack([s1, s2, s3], 2)
return image
def invert(image: tf.Tensor) -> tf.Tensor:
"""Inverts the image pixels."""
image = tf.convert_to_tensor(image)
return 255 - image
def wrap(image: tf.Tensor) -> tf.Tensor:
"""Returns 'image' with an extra channel set to all 1s."""
shape = tf.shape(image)
extended_channel = tf.ones([shape[0], shape[1], 1], image.dtype)
extended = tf.concat([image, extended_channel], axis=2)
return extended
def unwrap(image: tf.Tensor, replace: int) -> tf.Tensor:
"""Unwraps an image produced by wrap.
Where there is a 0 in the last channel for every spatial position,
the rest of the three channels in that spatial dimension are grayed
(set to 128). Operations like translate and shear on a wrapped
Tensor will leave 0s in empty locations. Some transformations look
at the intensity of values to do preprocessing, and we want these
empty pixels to assume the 'average' value, rather than pure black.
Args:
image: A 3D Image Tensor with 4 channels.
replace: A one or three value 1D tensor to fill empty pixels.
Returns:
image: A 3D image Tensor with 3 channels.
"""
image_shape = tf.shape(image)
# Flatten the spatial dimensions.
flattened_image = tf.reshape(image, [-1, image_shape[2]])
# Find all pixels where the last channel is zero.
alpha_channel = tf.expand_dims(flattened_image[:, 3], axis=-1)
replace = tf.concat([replace, tf.ones([1], image.dtype)], 0)
# Where they are zero, fill them in with 'replace'.
flattened_image = tf.where(
tf.equal(alpha_channel, 0),
tf.ones_like(flattened_image, dtype=image.dtype) * replace,
flattened_image)
image = tf.reshape(flattened_image, image_shape)
image = tf.slice(image, [0, 0, 0], [image_shape[0], image_shape[1], 3])
return image
def _randomly_negate_tensor(tensor):
"""With 50% prob turn the tensor negative."""
should_flip = tf.cast(tf.floor(tf.random.uniform([]) + 0.5), tf.bool)
final_tensor = tf.cond(should_flip, lambda: tensor, lambda: -tensor)
return final_tensor
def _rotate_level_to_arg(level: float):
level = (level/_MAX_LEVEL) * 30.
level = _randomly_negate_tensor(level)
return (level,)
def _shrink_level_to_arg(level: float):
"""Converts level to ratio by which we shrink the image content."""
if level == 0:
return (1.0,) # if level is zero, do not shrink the image
# Maximum shrinking ratio is 2.9.
level = 2. / (_MAX_LEVEL / level) + 0.9
return (level,)
def _enhance_level_to_arg(level: float):
return ((level/_MAX_LEVEL) * 1.8 + 0.1,)
def _shear_level_to_arg(level: float):
level = (level/_MAX_LEVEL) * 0.3
# Flip level to negative with 50% chance.
level = _randomly_negate_tensor(level)
return (level,)
def _translate_level_to_arg(level: float, translate_const: float):
level = (level/_MAX_LEVEL) * float(translate_const)
# Flip level to negative with 50% chance.
level = _randomly_negate_tensor(level)
return (level,)
def _mult_to_arg(level: float, multiplier: float = 1.):
return (int((level / _MAX_LEVEL) * multiplier),)
def _apply_func_with_prob(func: Any,
image: tf.Tensor,
args: Any,
prob: float):
"""Apply `func` to image w/ `args` as input with probability `prob`."""
assert isinstance(args, tuple)
# Apply the function with probability `prob`.
should_apply_op = tf.cast(
tf.floor(tf.random.uniform([], dtype=tf.float32) + prob), tf.bool)
augmented_image = tf.cond(
should_apply_op,
lambda: func(image, *args),
lambda: image)
return augmented_image
def select_and_apply_random_policy(policies: Any, image: tf.Tensor):
"""Select a random policy from `policies` and apply it to `image`."""
policy_to_select = tf.random.uniform([], maxval=len(policies), dtype=tf.int32)
# Note that using tf.case instead of tf.conds would result in significantly
# larger graphs and would even break export for some larger policies.
for (i, policy) in enumerate(policies):
image = tf.cond(
tf.equal(i, policy_to_select),
lambda selected_policy=policy: selected_policy(image),
lambda: image)
return image
NAME_TO_FUNC = {
'AutoContrast': autocontrast,
'Equalize': equalize,
'Invert': invert,
'Rotate': wrapped_rotate,
'Posterize': posterize,
'Solarize': solarize,
'SolarizeAdd': solarize_add,
'Color': color,
'Contrast': contrast,
'Brightness': brightness,
'Sharpness': sharpness,
'ShearX': shear_x,
'ShearY': shear_y,
'TranslateX': translate_x,
'TranslateY': translate_y,
'Cutout': cutout,
}
# Functions that have a 'replace' parameter
REPLACE_FUNCS = frozenset({
'Rotate',
'TranslateX',
'ShearX',
'ShearY',
'TranslateY',
'Cutout',
})
def level_to_arg(cutout_const: float, translate_const: float):
"""Creates a dict mapping image operation names to their arguments."""
no_arg = lambda level: ()
posterize_arg = lambda level: _mult_to_arg(level, 4)
solarize_arg = lambda level: _mult_to_arg(level, 256)
solarize_add_arg = lambda level: _mult_to_arg(level, 110)
cutout_arg = lambda level: _mult_to_arg(level, cutout_const)
translate_arg = lambda level: _translate_level_to_arg(level, translate_const)
args = {
'AutoContrast': no_arg,
'Equalize': no_arg,
'Invert': no_arg,
'Rotate': _rotate_level_to_arg,
'Posterize': posterize_arg,
'Solarize': solarize_arg,
'SolarizeAdd': solarize_add_arg,
'Color': _enhance_level_to_arg,
'Contrast': _enhance_level_to_arg,
'Brightness': _enhance_level_to_arg,
'Sharpness': _enhance_level_to_arg,
'ShearX': _shear_level_to_arg,
'ShearY': _shear_level_to_arg,
'Cutout': cutout_arg,
'TranslateX': translate_arg,
'TranslateY': translate_arg,
}
return args
def _parse_policy_info(name: Text,
prob: float,
level: float,
replace_value: List[int],
cutout_const: float,
translate_const: float) -> Tuple[Any, float, Any]:
"""Return the function that corresponds to `name` and update `level` param."""
func = NAME_TO_FUNC[name]
args = level_to_arg(cutout_const, translate_const)[name](level)
if name in REPLACE_FUNCS:
# Add in replace arg if it is required for the function that is called.
args = tuple(list(args) + [replace_value])
return func, prob, args
class ImageAugment(object):
"""Image augmentation class for applying image distortions."""
def distort(self, image: tf.Tensor) -> tf.Tensor:
"""Given an image tensor, returns a distorted image with the same shape.
Args:
image: `Tensor` of shape [height, width, 3] representing an image.
Returns:
The augmented version of `image`.
"""
raise NotImplementedError()
class AutoAugment(ImageAugment):
"""Applies the AutoAugment policy to images.
AutoAugment is from the paper: https://arxiv.org/abs/1805.09501.
"""
def __init__(self,
augmentation_name: Text = 'v0',
policies: Optional[Dict[Text, Any]] = None,
cutout_const: float = 100,
translate_const: float = 250):
"""Applies the AutoAugment policy to images.
Args:
augmentation_name: The name of the AutoAugment policy to use. The
available options are `v0` and `test`. `v0` is the policy used for all
of the results in the paper and was found to achieve the best results on
the COCO dataset. `v1`, `v2` and `v3` are additional good policies found
on the COCO dataset that have slight variation in what operations were
used during the search procedure along with how many operations are
applied in parallel to a single image (2 vs 3).
policies: list of lists of tuples in the form `(func, prob, level)`,
`func` is a string name of the augmentation function, `prob` is the
probability of applying the `func` operation, `level` is the input
argument for `func`.
cutout_const: multiplier for applying cutout.
translate_const: multiplier for applying translation.
"""
super(AutoAugment, self).__init__()
if policies is None:
self.available_policies = {
'v0': self.policy_v0(),
'test': self.policy_test(),
'simple': self.policy_simple(),
}
if augmentation_name not in self.available_policies:
raise ValueError(
'Invalid augmentation_name: {}'.format(augmentation_name))
self.augmentation_name = augmentation_name
self.policies = self.available_policies[augmentation_name]
self.cutout_const = float(cutout_const)
self.translate_const = float(translate_const)
def distort(self, image: tf.Tensor) -> tf.Tensor:
"""Applies the AutoAugment policy to `image`.
AutoAugment is from the paper: https://arxiv.org/abs/1805.09501.
Args:
image: `Tensor` of shape [height, width, 3] representing an image.
Returns:
A version of image that now has data augmentation applied to it based on
the `policies` pass into the function.
"""
input_image_type = image.dtype
if input_image_type != tf.uint8:
image = tf.clip_by_value(image, 0.0, 255.0)
image = tf.cast(image, dtype=tf.uint8)
replace_value = [128] * 3
# func is the string name of the augmentation function, prob is the
# probability of applying the operation and level is the parameter
# associated with the tf op.
# tf_policies are functions that take in an image and return an augmented
# image.
tf_policies = []
for policy in self.policies:
tf_policy = []
# Link string name to the correct python function and make sure the
# correct argument is passed into that function.
for policy_info in policy:
policy_info = list(policy_info) + [
replace_value, self.cutout_const, self.translate_const
]
tf_policy.append(_parse_policy_info(*policy_info))
# Now build the tf policy that will apply the augmentation procedue
# on image.
def make_final_policy(tf_policy_):
def final_policy(image_):
for func, prob, args in tf_policy_:
image_ = _apply_func_with_prob(func, image_, args, prob)
return image_
return final_policy
tf_policies.append(make_final_policy(tf_policy))
image = select_and_apply_random_policy(tf_policies, image)
image = tf.cast(image, dtype=input_image_type)
return image
@staticmethod
def policy_v0():
"""Autoaugment policy that was used in AutoAugment Paper.
Each tuple is an augmentation operation of the form
(operation, probability, magnitude). Each element in policy is a
sub-policy that will be applied sequentially on the image.
Returns:
the policy.
"""
# TODO(dankondratyuk): tensorflow_addons defines custom ops, which
# for some reason are not included when building/linking
# This results in the error, "Op type not registered
# 'Addons>ImageProjectiveTransformV2' in binary" when running on borg TPUs
policy = [
[('Equalize', 0.8, 1), ('ShearY', 0.8, 4)],
[('Color', 0.4, 9), ('Equalize', 0.6, 3)],
[('Color', 0.4, 1), ('Rotate', 0.6, 8)],
[('Solarize', 0.8, 3), ('Equalize', 0.4, 7)],
[('Solarize', 0.4, 2), ('Solarize', 0.6, 2)],
[('Color', 0.2, 0), ('Equalize', 0.8, 8)],
[('Equalize', 0.4, 8), ('SolarizeAdd', 0.8, 3)],
[('ShearX', 0.2, 9), ('Rotate', 0.6, 8)],
[('Color', 0.6, 1), ('Equalize', 1.0, 2)],
[('Invert', 0.4, 9), ('Rotate', 0.6, 0)],
[('Equalize', 1.0, 9), ('ShearY', 0.6, 3)],
[('Color', 0.4, 7), ('Equalize', 0.6, 0)],
[('Posterize', 0.4, 6), ('AutoContrast', 0.4, 7)],
[('Solarize', 0.6, 8), ('Color', 0.6, 9)],
[('Solarize', 0.2, 4), ('Rotate', 0.8, 9)],
[('Rotate', 1.0, 7), ('TranslateY', 0.8, 9)],
[('ShearX', 0.0, 0), ('Solarize', 0.8, 4)],
[('ShearY', 0.8, 0), ('Color', 0.6, 4)],
[('Color', 1.0, 0), ('Rotate', 0.6, 2)],
[('Equalize', 0.8, 4), ('Equalize', 0.0, 8)],
[('Equalize', 1.0, 4), ('AutoContrast', 0.6, 2)],
[('ShearY', 0.4, 7), ('SolarizeAdd', 0.6, 7)],
[('Posterize', 0.8, 2), ('Solarize', 0.6, 10)],
[('Solarize', 0.6, 8), ('Equalize', 0.6, 1)],
[('Color', 0.8, 6), ('Rotate', 0.4, 5)],
]
return policy
@staticmethod
def policy_simple():
"""Same as `policy_v0`, except with custom ops removed."""
policy = [
[('Color', 0.4, 9), ('Equalize', 0.6, 3)],
[('Solarize', 0.8, 3), ('Equalize', 0.4, 7)],
[('Solarize', 0.4, 2), ('Solarize', 0.6, 2)],
[('Color', 0.2, 0), ('Equalize', 0.8, 8)],
[('Equalize', 0.4, 8), ('SolarizeAdd', 0.8, 3)],
[('Color', 0.6, 1), ('Equalize', 1.0, 2)],
[('Color', 0.4, 7), ('Equalize', 0.6, 0)],
[('Posterize', 0.4, 6), ('AutoContrast', 0.4, 7)],
[('Solarize', 0.6, 8), ('Color', 0.6, 9)],
[('Equalize', 0.8, 4), ('Equalize', 0.0, 8)],
[('Equalize', 1.0, 4), ('AutoContrast', 0.6, 2)],
[('Posterize', 0.8, 2), ('Solarize', 0.6, 10)],
[('Solarize', 0.6, 8), ('Equalize', 0.6, 1)],
]
return policy
@staticmethod
def policy_test():
"""Autoaugment test policy for debugging."""
policy = [
[('TranslateX', 1.0, 4), ('Equalize', 1.0, 10)],
]
return policy
class RandAugment(ImageAugment):
"""Applies the RandAugment policy to images.
RandAugment is from the paper https://arxiv.org/abs/1909.13719,
"""
def __init__(self,
num_layers: int = 2,
magnitude: float = 10.,
cutout_const: float = 40.,
translate_const: float = 100.):
"""Applies the RandAugment policy to images.
Args:
num_layers: Integer, the number of augmentation transformations to apply
sequentially to an image. Represented as (N) in the paper. Usually best
values will be in the range [1, 3].
magnitude: Integer, shared magnitude across all augmentation operations.
Represented as (M) in the paper. Usually best values are in the range
[5, 10].
cutout_const: multiplier for applying cutout.
translate_const: multiplier for applying translation.
"""
super(RandAugment, self).__init__()
self.num_layers = num_layers
self.magnitude = float(magnitude)
self.cutout_const = float(cutout_const)
self.translate_const = float(translate_const)
self.available_ops = [
'AutoContrast', 'Equalize', 'Invert', 'Rotate', 'Posterize', 'Solarize',
'Color', 'Contrast', 'Brightness', 'Sharpness', 'ShearX', 'ShearY',
'TranslateX', 'TranslateY', 'Cutout', 'SolarizeAdd'
]
def distort(self, image: tf.Tensor) -> tf.Tensor:
"""Applies the RandAugment policy to `image`.
Args:
image: `Tensor` of shape [height, width, 3] representing an image.
Returns:
The augmented version of `image`.
"""
input_image_type = image.dtype
if input_image_type != tf.uint8:
image = tf.clip_by_value(image, 0.0, 255.0)
image = tf.cast(image, dtype=tf.uint8)
replace_value = [128] * 3
min_prob, max_prob = 0.2, 0.8
for _ in range(self.num_layers):
op_to_select = tf.random.uniform(
[], maxval=len(self.available_ops) + 1, dtype=tf.int32)
branch_fns = []
for (i, op_name) in enumerate(self.available_ops):
prob = tf.random.uniform([],
minval=min_prob,
maxval=max_prob,
dtype=tf.float32)
func, _, args = _parse_policy_info(op_name,
prob,
self.magnitude,
replace_value,
self.cutout_const,
self.translate_const)
branch_fns.append((
i,
# pylint:disable=g-long-lambda
lambda selected_func=func, selected_args=args: selected_func(
image, *selected_args)))
# pylint:enable=g-long-lambda
image = tf.switch_case(branch_index=op_to_select,
branch_fns=branch_fns,
default=lambda: tf.identity(image))
image = tf.cast(image, dtype=input_image_type)
return image
|
TensorFlow/Recommendation/NCF | NCF | download_dataset | DATASET_NAME=$1
RAW_DATADIR=$2
function download_20m {
echo "Download ml-20m"
curl -O http://files.grouplens.org/datasets/movielens/ml-20m.zip
mv ml-20m.zip ${RAW_DATADIR}
}
function download_1m {
echo "Downloading ml-1m"
curl -O http://files.grouplens.org/datasets/movielens/ml-1m.zip
mv ml-1m.zip ${RAW_DATADIR}
}
if [[ ${DATASET_NAME} == "ml-1m" ]]
then
download_1m
elif [[ ${DATASET_NAME} == "ml-20m" ]]
then
download_20m
else
echo "Unsupported dataset name: $DATASET_NAME"
exit 1
fi
|
PyTorch/SpeechRecognition/QuartzNet/common/text | text | symbols | # Copyright (c) 2017 Keith Ito
""" from https://github.com/keithito/tacotron """
'''
Defines the set of symbols used in text input to the model.
The default is a set of ASCII characters that works well for English or text that has been run through Unidecode. For other data, you can modify _characters. See TRAINING_DATA.md for details. '''
from . import cmudict
_pad = '_'
_punctuation = '!\'(),.:;? '
_special = '-'
_letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
# Prepend "@" to ARPAbet symbols to ensure uniqueness (some are the same as uppercase letters):
_arpabet = ['@' + s for s in cmudict.valid_symbols]
# Export all symbols:
symbols = [_pad] + list(_special) + list(_punctuation) + list(_letters) + _arpabet
|
TensorFlow/Segmentation/VNet | VNet | requirements | SimpleITK==1.1.0
requests
googledrivedownloader
tf2onnx
git+https://github.com/NVIDIA/dllogger#egg=dllogger |
Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt/triton/deployment_toolkit | deployment_toolkit | core | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import importlib
import logging
import os
from enum import Enum
from pathlib import Path
from typing import Any, Dict, List, NamedTuple, Optional, Tuple, Union
import numpy as np
LOGGER = logging.getLogger(__name__)
DATALOADER_FN_NAME = "get_dataloader_fn"
GET_MODEL_FN_NAME = "get_model"
GET_SERVING_INPUT_RECEIVER_FN = "get_serving_input_receiver_fn"
GET_ARGPARSER_FN_NAME = "update_argparser"
class TensorSpec(NamedTuple):
name: str
dtype: str
shape: Tuple
class Parameter(Enum):
def __lt__(self, other: "Parameter") -> bool:
return self.value < other.value
def __str__(self):
return self.value
class Accelerator(Parameter):
NONE = "none"
AMP = "amp"
TRT = "trt"
CUDA = NONE # backward compatibility
class Precision(Parameter):
INT8 = "int8"
FP16 = "fp16"
FP32 = "fp32"
TF32 = "tf32" # Deprecated
class Format(Parameter):
TF_GRAPHDEF = "tf-graphdef"
TF_SAVEDMODEL = "tf-savedmodel"
TF_TRT = "tf-trt"
TF_ESTIMATOR = "tf-estimator"
TF_KERAS = "tf-keras"
ONNX = "onnx"
TRT = "trt"
TS_SCRIPT = "ts-script"
TS_TRACE = "ts-trace"
PYT = "pyt"
FASTERTRANSFORMER = "fastertransformer"
class Model(NamedTuple):
handle: object
# TODO: precision should be removed
precision: Optional[Precision]
inputs: Dict[str, TensorSpec]
outputs: Dict[str, TensorSpec]
def load_from_file(file_path, label, target):
spec = importlib.util.spec_from_file_location(name=label, location=file_path)
my_module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(my_module) # pytype: disable=attribute-error
return getattr(my_module, target, None)
class BaseLoader(abc.ABC):
required_fn_name_for_signature_parsing: Optional[str] = None
@abc.abstractmethod
def load(self, model_path: Union[str, Path], **kwargs) -> Model:
"""
Loads and process model from file based on given set of args
"""
pass
class BaseSaver(abc.ABC):
required_fn_name_for_signature_parsing: Optional[str] = None
@abc.abstractmethod
def save(self, model: Model, model_path: Union[str, Path], dataloader_fn) -> None:
"""
Save model to file
"""
pass
class BaseRunner(abc.ABC):
required_fn_name_for_signature_parsing: Optional[str] = None
@abc.abstractmethod
def init_inference(self, model: Model):
raise NotImplementedError
class BaseRunnerSession(abc.ABC):
def __init__(self, model: Model):
self._model = model
@abc.abstractmethod
def __enter__(self):
raise NotImplementedError()
@abc.abstractmethod
def __exit__(self, exc_type, exc_value, traceback):
raise NotImplementedError()
@abc.abstractmethod
def __call__(self, x: Dict[str, object]):
raise NotImplementedError()
def _set_env_variables(self) -> Dict[str, object]:
"""this method not remove values; fix it if needed"""
to_set = {}
old_values = {k: os.environ.pop(k, None) for k in to_set}
os.environ.update(to_set)
return old_values
def _recover_env_variables(self, old_envs: Dict[str, object]):
for name, value in old_envs.items():
if value is None:
del os.environ[name]
else:
os.environ[name] = str(value)
class BaseConverter(abc.ABC):
required_fn_name_for_signature_parsing: Optional[str] = None
@abc.abstractmethod
def convert(self, model: Model, dataloader_fn) -> Model:
raise NotImplementedError()
@staticmethod
def required_source_model_precision(requested_model_precision: Precision) -> Precision:
return requested_model_precision
class BaseMetricsCalculator(abc.ABC):
required_fn_name_for_signature_parsing: Optional[str] = None
def calc(
self,
*,
ids: List[Any],
y_pred: Dict[str, np.ndarray],
x: Optional[Dict[str, np.ndarray]],
y_real: Optional[Dict[str, np.ndarray]],
) -> Dict[str, float]:
"""
Calculates error/accuracy metrics
Args:
ids: List of ids identifying each sample in the batch
y_pred: model output as dict where key is output name and value is output value
x: model input as dict where key is input name and value is input value
y_real: input ground truth as dict where key is output name and value is output value
Returns:
dictionary where key is metric name and value is its value
"""
pass
@abc.abstractmethod
def update(
self,
ids: List[Any],
y_pred: Dict[str, np.ndarray],
x: Optional[Dict[str, np.ndarray]],
y_real: Optional[Dict[str, np.ndarray]],
):
pass
@property
@abc.abstractmethod
def metrics(self) -> Dict[str, Any]:
pass
class ShapeSpec(NamedTuple):
min: Tuple
opt: Tuple
max: Tuple
class MeasurementMode(Enum):
COUNT_WINDOWS = "count_windows"
TIME_WINDOWS = "time_windows"
class PerformanceTool(Enum):
"""
Available performance evaluation tools
"""
MODEL_ANALYZER = "model_analyzer"
PERF_ANALYZER = "perf_analyzer"
class BatchingMode(Enum):
"""
Available batching modes
"""
STATIC = "static"
DYNAMIC = "dynamic"
class EvaluationMode(Enum):
"""
Available evaluation modes
"""
OFFLINE = "offline"
ONLINE = "online"
class OfflineMode(Enum):
SYSTEM = "system"
CUDA = "cuda"
|
PyTorch/SpeechSynthesis/HiFiGAN/common/text/unidecoder | unidecoder | __init__ | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import warnings
from .homoglyphs import homoglyphs
from .replacements import replacements
_replacements = {uni: asc for uni, asc in replacements}
_homoglyphs = {g: asc for asc, glyphs in homoglyphs.items() for g in glyphs}
def unidecoder(s, homoglyphs=False):
"""Transliterate unicode
Args:
s (str): unicode string
homoglyphs (bool): prioritize translating to homoglyphs
"""
warned = False # Once per utterance
ret = ''
for u in s:
if ord(u) < 127:
a = u
elif homoglyphs:
a = _homoglyphs.get(u, _replacements.get(u, None))
else:
a = _replacements.get(u, _homoglyphs.get(u, None))
if a is None:
if not warned:
warnings.warn(f'Unexpected character {u}: '
'please revise your text cleaning rules.',
stacklevel=10**6)
warned = True
else:
ret += a
return ret
|
PyTorch/SpeechSynthesis/HiFiGAN/hifigan | hifigan | entrypoints | # *****************************************************************************
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
import urllib.request
import torch
import os
import sys
#from https://github.com/NVIDIA/DeepLearningExamples/blob/master/PyTorch/SpeechSynthesis/Tacotron2/inference.py
def checkpoint_from_distributed(state_dict):
"""
Checks whether checkpoint was generated by DistributedDataParallel. DDP
wraps model in additional "module.", it needs to be unwrapped for single
GPU inference.
:param state_dict: model's state dict
"""
ret = False
for key, _ in state_dict.items():
if key.find('module.') != -1:
ret = True
break
return ret
# from https://github.com/NVIDIA/DeepLearningExamples/blob/master/PyTorch/SpeechSynthesis/Tacotron2/inference.py
def unwrap_distributed(state_dict):
"""
Unwraps model from DistributedDataParallel.
DDP wraps model in additional "module.", it needs to be removed for single
GPU inference.
:param state_dict: model's state dict
"""
new_state_dict = {}
for key, value in state_dict.items():
new_key = key.replace('module.1.', '')
new_key = new_key.replace('module.', '')
new_state_dict[new_key] = value
return new_state_dict
def _download_checkpoint(checkpoint, force_reload):
model_dir = os.path.join(torch.hub._get_torch_home(), 'checkpoints')
if not os.path.exists(model_dir):
os.makedirs(model_dir)
ckpt_file = os.path.join(model_dir, os.path.basename(checkpoint))
if not os.path.exists(ckpt_file) or force_reload:
sys.stderr.write('Downloading checkpoint from {}\n'.format(checkpoint))
urllib.request.urlretrieve(checkpoint, ckpt_file)
return ckpt_file
def nvidia_hifigan(pretrained=True, **kwargs):
"""TODO
"""
from hifigan import models as vocoder
force_reload = "force_reload" in kwargs and kwargs["force_reload"]
fp16 = "model_math" in kwargs and kwargs["model_math"] == "fp16"
if pretrained:
checkpoint = 'https://api.ngc.nvidia.com/v2/models/nvidia/dle/hifigan__pyt_ckpt_mode-finetune_ds-ljs22khz/versions/21.08.0_amp/files/hifigan_gen_checkpoint_10000_ft.pt'
ckpt_file = _download_checkpoint(checkpoint, force_reload)
ckpt = torch.load(ckpt_file)
state_dict = ckpt['generator']
if checkpoint_from_distributed(state_dict):
state_dict = unwrap_distributed(state_dict)
config = ckpt['config']
train_setup = ckpt.get('train_setup', {})
else:
config = {'upsample_rates': [8, 8, 2, 2], 'upsample_kernel_sizes': [16, 16, 4, 4],
'upsample_initial_channel': 512, 'resblock': '1', 'resblock_kernel_sizes': [3, 7, 11],
'resblock_dilation_sizes': [[1, 3, 5], [1, 3, 5], [1, 3, 5]]}
for k,v in kwargs.items():
if k in config.keys():
config[k] = v
train_setup = {}
hifigan = vocoder.Generator(config)
denoiser = None
if pretrained:
hifigan.load_state_dict(state_dict)
hifigan.remove_weight_norm()
denoiser = vocoder.Denoiser(hifigan, win_length=1024)
if fp16:
hifigan.half()
denoiser.half()
return hifigan, train_setup, denoiser |
PyTorch/SpeechSynthesis/Tacotron2/platform | platform | DGX1_tacotron2_FP32_1NGPU_train | mkdir -p output
python train.py -m Tacotron2 -o output/ -lr 1e-3 --epochs 1501 -bs 48 --weight-decay 1e-6 --grad-clip-thresh 1.0 --cudnn-enabled --load-mel-from-disk --training-files=filelists/ljs_mel_text_train_filelist.txt --validation-files=filelists/ljs_mel_text_val_filelist.txt --log-file nvlog.json --anneal-steps 500 1000 1500 --anneal-factor 0.1
|
Kaldi/SpeechRecognition/scripts/docker | docker | launch_client | #!/bin/bash
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
docker run --rm -it \
--net=host \
--shm-size=1g \
--ulimit memlock=-1 \
--ulimit stack=67108864 \
-v $PWD/data:/data \
--entrypoint /bin/bash \
triton_kaldi_client /workspace/scripts/docker/run_client.sh $@
|
PyTorch/SpeechSynthesis/Tacotron2/notebooks/triton | triton | README | # Tacotron 2 and WaveGlow inference on Triton Inference Server
## Setup
### Clone the repository.
```bash
git clone https://github.com/NVIDIA/DeepLearningExamples.git
cd DeepLearningExamples/PyTorch/SpeechSynthesis/Tacotron2
```
### Obtain models to be loaded in Triton Inference Server.
We have prepared Tacotron 2 and WaveGlow models that are ready to be loaded in
Triton Inference Server, so you don't need to train and export the models.
Please follow the instructions below to learn how to train,
export --- or simply download the pretrained models.
### Obtain Tacotron 2 and WaveGlow checkpoints.
You can either download the pretrained checkpoints or train the models yourself.
#### (Option 1) Download pretrained checkpoints.
If you want to use a pretrained checkpoints, download them from [NGC](https://ngc.nvidia.com/catalog/models):
- [Tacotron2 checkpoint](https://ngc.nvidia.com/models/nvidia:tacotron2pyt_fp16)
- [WaveGlow checkpoint](https://ngc.nvidia.com/models/nvidia:waveglow256pyt_fp16)
#### (Option 2) Train Tacotron 2 and WaveGlow models.
In order to train the models, follow the QuickStart section in the `Tacotron2/README.md`
file by executing points 1-5. You have to train WaveGlow in a different way than described there. Use
the following command instead of the one given in QuickStart at point 5:
```bash
python -m multiproc train.py -m WaveGlow -o output/ --amp -lr 1e-4 --epochs 2001 --wn-channels 256 -bs 12 --segment-length 16000 --weight-decay 0 --grad-clip-thresh 65504.0 --cudnn-benchmark --cudnn-enabled --log-file output/nvlog.json
```
This will train the WaveGlow model with a smaller number of residual connections
in the coupling layer networks and larger segment length. Training should take
about 100 hours on DGX-1 (8x V100 16G).
### Setup Tacotron 2 TorchScript.
There are two ways to proceed.
#### (Option 1) Download the Tacotron 2 TorchScript model.
Download the Tacotron 2 TorchScript model from:
- [Tacotron2 TorchScript](https://ngc.nvidia.com/models/nvidia:tacotron2pyt_jit_fp16)
Next, save it to `triton_models/tacotron2-ts-script/1/` and rename as `model.pt`:
```bash
wget https://api.ngc.nvidia.com/v2/models/nvidia/tacotron2pyt_jit_fp16/versions/1/files/nvidia_tacotron2pyt_jit_fp16
mkdir -p triton_models/tacotron2-ts-script/1/
mv nvidia_tacotron2pyt_jit_fp16 triton_models/tacotron2-ts-script/1/model.pt
```
Copy the Triton config file for the Tacotron 2 model to the model directory:
```bash
cp notebooks/triton/tacotron2_ts-script_config.pbtxt triton_models/tacotron2-ts-script/config.pbtxt
```
#### (Option 2) Export the Tacotron 2 model using TorchScript.
To export the Tacotron 2 model using TorchScript, type:
```bash
python exports/export_tacotron2.py --triton-model-name tacotron2-ts-script --export ts-script -- --checkpoint <Tacotron 2 checkpoint> --config-file config.json
```
This will create the model as file `model.pt` and save it in folder `triton_models/tacotron2-ts-script/1/`.
The command will also generate the Triton configuration file `config.pbtxt` for the Tacotron 2 model.
You can change the folder names using the flags `--triton-models-dir` (default `triton_models`), `--triton-model-name` (default `""`) and `--triton-model-version` (default `1`).
You can also change model file name with the flag `--export-name <filename>`.
### Setup WaveGlow TensorRT engine.
There are two ways to proceed.
#### (Option 1) Download the WaveGlow TensorRT engine.
Download the WaveGlow TensorRT engine from:
- [WaveGlow TensorRT engine](https://ngc.nvidia.com/models/nvidia:waveglow256pyt_trt_fp16)
Next, save it to `triton_models/waveglow-tensorrt/1/` and rename as `model.plan`:
```bash
wget https://api.ngc.nvidia.com/v2/models/nvidia/waveglow256pyt_trt_fp16/versions/1/files/nvidia_waveglow256pyt_trt_fp16
mkdir -p triton_models/waveglow-tensorrt/1/
mv nvidia_waveglow256pyt_trt_fp16 triton_models/waveglow-tensorrt/1/model.plan
```
Copy the Triton config file for the WaveGlow model to the model directory:
```bash
cp notebooks/triton/waveglow_tensorrt_config.pbtxt triton_models/waveglow-tensorrt/config.pbtxt
```
#### (Option 2) Export the WaveGlow model to TensorRT.
In order to export the model into the TensorRT engine, type:
```bash
python exports/export_waveglow.py --triton-model-name waveglow-tensorrt --export tensorrt --tensorrt-fp16 -- --checkpoint <waveglow_checkpoint> --config-file config.json --wn-channels 256
```
This will create the model as file `model.plan` and save it in folder `triton_models/waveglow-tensorrt/1/`.
The command will also generate the Triton configuration file `config.pbtxt` for the WaveGlow model.
You can change the folder names using the flags `--triton-models-dir` (default `triton_models`), `--triton-model-name` (default `""`) and `--triton-model-version` (default `1`).
You can also change model file name with the flag `--export-name <filename>`.
### Setup the Triton Inference Server.
Download the Triton Inference Server container by typing:
```bash
docker pull nvcr.io/nvidia/tritonserver:20.06-py3
docker tag nvcr.io/nvidia/tritonserver:20.06-py3 tritonserver:20.06
```
### Setup the Triton notebook client.
Now go to the root directory of the Tacotron 2 repo, and type:
```bash
docker build -f Dockerfile_triton_client --network=host -t speech_ai_tts_only:demo .
```
### Run the Triton Inference Server.
To run the server, type in the root directory of the Tacotron 2 repo:
```bash
NV_GPU=1 nvidia-docker run -ti --ipc=host --network=host --rm -p8000:8000 -p8001:8001 -v $PWD/triton_models/:/models tritonserver:20.06 tritonserver --model-store=/models --log-verbose 1
```
The flag `NV_GPU` selects the GPU the server is going to see. If we want it to see all the available GPUs, then run the above command without this flag.
By default, the model repository will be in `triton_models/`.
### Run the Triton notebook client.
Leave the server running. In another terminal, type:
```bash
docker run -it --rm --network=host --device /dev/snd:/dev/snd speech_ai_tts_only:demo bash ./run_this.sh
```
Open the URL in a browser, open `notebook.ipynb`, click play, and enjoy.
|
TensorFlow/Detection/SSD/models/research/object_detection/g3doc | g3doc | tpu_compatibility | # TPU compatible detection pipelines
[TOC]
The Tensorflow Object Detection API supports TPU training for some models. To
make models TPU compatible you need to make a few tweaks to the model config as
mentioned below. We also provide several sample configs that you can use as a
template.
## TPU compatibility
### Static shaped tensors
TPU training currently requires all tensors in the Tensorflow Graph to have
static shapes. However, most of the sample configs in Object Detection API have
a few different tensors that are dynamically shaped. Fortunately, we provide
simple alternatives in the model configuration that modifies these tensors to
have static shape:
* **Image tensors with static shape** - This can be achieved either by using a
`fixed_shape_resizer` that resizes images to a fixed spatial shape or by
setting `pad_to_max_dimension: true` in `keep_aspect_ratio_resizer` which
pads the resized images with zeros to the bottom and right. Padded image
tensors are correctly handled internally within the model.
```
image_resizer {
fixed_shape_resizer {
height: 640
width: 640
}
}
```
or
```
image_resizer {
keep_aspect_ratio_resizer {
min_dimension: 640
max_dimension: 640
pad_to_max_dimension: true
}
}
```
* **Groundtruth tensors with static shape** - Images in a typical detection
dataset have variable number of groundtruth boxes and associated classes.
Setting `max_number_of_boxes` to a large enough number in the
`train_input_reader` and `eval_input_reader` pads the groundtruth tensors
with zeros to a static shape. Padded groundtruth tensors are correctly
handled internally within the model.
```
train_input_reader: {
tf_record_input_reader {
input_path: "PATH_TO_BE_CONFIGURED/mscoco_train.record-?????-of-00100"
}
label_map_path: "PATH_TO_BE_CONFIGURED/mscoco_label_map.pbtxt"
max_number_of_boxes: 200
}
eval_input_reader: {
tf_record_input_reader {
input_path: "PATH_TO_BE_CONFIGURED/mscoco_val.record-?????-of-0010"
}
label_map_path: "PATH_TO_BE_CONFIGURED/mscoco_label_map.pbtxt"
max_number_of_boxes: 200
}
```
### TPU friendly ops
Although TPU supports a vast number of tensorflow ops, a few used in the
Tensorflow Object Detection API are unsupported. We list such ops below and
recommend compatible substitutes.
* **Anchor sampling** - Typically we use hard example mining in standard SSD
pipeliens to balance positive and negative anchors that contribute to the
loss. Hard Example mining uses non max suppression as a subroutine and since
non max suppression is not currently supported on TPUs we cannot use hard
example mining. Fortunately, we provide an implementation of focal loss that
can be used instead of hard example mining. Remove `hard_example_miner` from
the config and substitute `weighted_sigmoid` classification loss with
`weighted_sigmoid_focal` loss.
```
loss {
classification_loss {
weighted_sigmoid_focal {
alpha: 0.25
gamma: 2.0
}
}
localization_loss {
weighted_smooth_l1 {
}
}
classification_weight: 1.0
localization_weight: 1.0
}
```
* **Target Matching** - Object detection API provides two choices for matcher
used in target assignment: `argmax_matcher` and `bipartite_matcher`.
Bipartite matcher is not currently supported on TPU, therefore we must
modify the configs to use `argmax_matcher`. Additionally, set
`use_matmul_gather: true` for efficiency on TPU.
```
matcher {
argmax_matcher {
matched_threshold: 0.5
unmatched_threshold: 0.5
ignore_thresholds: false
negatives_lower_than_unmatched: true
force_match_for_each_row: true
use_matmul_gather: true
}
}
```
### TPU training hyperparameters
Object Detection training on TPU uses synchronous SGD. On a typical cloud TPU
with 8 cores we recommend batch sizes that are 8x large when compared to a GPU
config that uses asynchronous SGD. We also use fewer training steps (~ 1/100 x)
due to the large batch size. This necessitates careful tuning of some other
training parameters as listed below.
* **Batch size** - Use the largest batch size that can fit on cloud TPU.
```
train_config {
batch_size: 1024
}
```
* **Training steps** - Typically only 10s of thousands.
```
train_config {
num_steps: 25000
}
```
* **Batch norm decay** - Use smaller decay constants (0.97 or 0.997) since we
take fewer training steps.
```
batch_norm {
scale: true,
decay: 0.97,
epsilon: 0.001,
}
```
* **Learning rate** - Use large learning rate with warmup. Scale learning rate
linearly with batch size. See `cosine_decay_learning_rate` or
`manual_step_learning_rate` for examples.
```
learning_rate: {
cosine_decay_learning_rate {
learning_rate_base: .04
total_steps: 25000
warmup_learning_rate: .013333
warmup_steps: 2000
}
}
```
or
```
learning_rate: {
manual_step_learning_rate {
warmup: true
initial_learning_rate: .01333
schedule {
step: 2000
learning_rate: 0.04
}
schedule {
step: 15000
learning_rate: 0.004
}
}
}
```
## Example TPU compatible configs
We provide example config files that you can use to train your own models on TPU
* <a href='https://github.com/tensorflow/models/blob/master/research/object_detection/samples/configs/ssd_mobilenet_v1_300x300_coco14_sync.config'>ssd_mobilenet_v1_300x300</a> <br>
* <a href='https://github.com/tensorflow/models/blob/master/research/object_detection/samples/configs/ssd_mobilenet_v1_ppn_shared_box_predictor_300x300_coco14_sync.config'>ssd_mobilenet_v1_ppn_300x300</a> <br>
* <a href='https://github.com/tensorflow/models/blob/master/research/object_detection/samples/configs/ssd_mobilenet_v1_fpn_shared_box_predictor_640x640_coco14_sync.config'>ssd_mobilenet_v1_fpn_640x640
(mobilenet based retinanet)</a> <br>
* <a href='https://github.com/tensorflow/models/blob/master/research/object_detection/samples/configs/ssd_resnet50_v1_fpn_shared_box_predictor_640x640_coco14_sync.config'>ssd_resnet50_v1_fpn_640x640
(retinanet)</a> <br>
## Supported Meta architectures
Currently, `SSDMetaArch` models are supported on TPUs. `FasterRCNNMetaArch` is
going to be supported soon.
|
PyTorch/Segmentation/nnUNet/utils | utils | utils | # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ctypes
import os
import pickle
from subprocess import run
import numpy as np
import torch
from pytorch_lightning.utilities import rank_zero_only
@rank_zero_only
def print0(text):
print(text)
def get_task_code(args):
return f"{args.task}_{args.dim}d"
def get_config_file(args):
if args.data != "/data":
path = os.path.join(args.data, "config.pkl")
else:
task_code = get_task_code(args)
path = os.path.join(args.data, task_code, "config.pkl")
return pickle.load(open(path, "rb"))
def set_cuda_devices(args):
assert args.gpus <= torch.cuda.device_count(), f"Requested {args.gpus} gpus, available {torch.cuda.device_count()}."
device_list = ",".join([str(i) for i in range(args.gpus)])
os.environ["CUDA_VISIBLE_DEVICES"] = os.environ.get("CUDA_VISIBLE_DEVICES", device_list)
def verify_ckpt_path(args):
if args.resume_training:
resume_path_ckpt = os.path.join(
args.ckpt_path if args.ckpt_path is not None else "", "checkpoints", "last.ckpt"
)
resume_path_results = os.path.join(args.results, "checkpoints", "last.ckpt")
if os.path.exists(resume_path_ckpt):
return resume_path_ckpt
if os.path.exists(resume_path_results):
return resume_path_results
print("[Warning] Checkpoint not found. Starting training from scratch.")
return None
if args.ckpt_path is None or not os.path.isfile(args.ckpt_path):
print(f"Provided checkpoint {args.ckpt_path} is not a file. Starting training from scratch.")
return None
return args.ckpt_path
def make_empty_dir(path):
run(["rm", "-rf", path])
os.makedirs(path)
def get_stats(pred, targ, class_idx):
tp = np.logical_and(pred == class_idx, targ == class_idx).sum()
fn = np.logical_and(pred != class_idx, targ == class_idx).sum()
fp = np.logical_and(pred == class_idx, targ != class_idx).sum()
return tp, fn, fp
def set_granularity():
_libcudart = ctypes.CDLL("libcudart.so")
pValue = ctypes.cast((ctypes.c_int * 1)(), ctypes.POINTER(ctypes.c_int))
_libcudart.cudaDeviceSetLimit(ctypes.c_int(0x05), ctypes.c_int(128))
_libcudart.cudaDeviceGetLimit(pValue, ctypes.c_int(0x05))
assert pValue.contents.value == 128
|
PyTorch/SpeechRecognition/QuartzNet/scripts | scripts | train_benchmark | #!/bin/bash
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -a
: ${DATA_DIR:=${1:-"/datasets/LibriSpeech"}}
: ${OUTPUT_DIR:=${3:-"/results"}}
: ${TRAIN_MANIFESTS:="$DATA_DIR/librispeech-train-clean-100-wav.json"}
: ${BENCHMARK_EPOCHS:=20}
: ${EPOCHS:=100000}
: ${RESUME:=false}
: ${SAVE_FREQUENCY:=100000}
: ${EVAL_FREQUENCY:=100000}
: ${LEARNING_RATE:=0.0001}
: ${AMP:=false}
: ${EMA:=0}
: ${DALI_DEVICE:="gpu"}
: ${NUM_GPUS_SEQ:="8 4 1"}
: ${ACC_BATCH_SIZE:="144"}
: ${GRAD_ACC_SEQ:="4 2"}
# A range of batch lengths for LibriSpeech
# with continuous speed perturbation (0.85, 1.15) and max duration 16.7s
: ${PRE_ALLOCATE:="1408 1920"}
for NUM_GPUS in $NUM_GPUS_SEQ; do
for GRAD_ACCUMULATION in $GRAD_ACC_SEQ; do
# Scale the number of epochs to the number of GPUs
BMARK=$((BENCHMARK_EPOCHS * NUM_GPUS / 8))
BMARK=$((BMARK < 2 ? 2 : BMARK))
BMARK=$((BMARK > BENCHMARK_EPOCHS ? BENCHMARK_EPOCHS : BMARK))
EPOCHS_THIS_JOB=$((BMARK + 1))
GPU_BATCH_SIZE=$((ACC_BATCH_SIZE / $GRAD_ACCUMULATION * 8 / $NUM_GPUS))
LOG_FILE="$OUTPUT_DIR/perf-train_dali-${DALI_DEVICE}_amp-${AMP}_"
LOG_FILE+="1x${NUM_GPUS}x${GPU_BATCH_SIZE}x${GRAD_ACCUMULATION}.json"
BENCHMARK_EPOCHS=$BMARK bash ./scripts/train.sh "$@"
done
done
|
TensorFlow2/Recommendation/SIM/sim/models | models | sequential_recommender_model | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABC, abstractmethod
import tensorflow as tf
from sim.data.defaults import (CARDINALITY_SELECTOR, NEGATIVE_HISTORY_CHANNEL, POSITIVE_HISTORY_CHANNEL,
TARGET_ITEM_FEATURES_CHANNEL, USER_FEATURES_CHANNEL)
from sim.layers.ctr_classification_mlp import CTRClassificationMLP
from sim.layers.embedding import Embedding
class SequentialRecommenderModel(tf.keras.Model, ABC):
def __init__(self, feature_spec, embedding_dim, classifier_dense_sizes=(200,)):
super(SequentialRecommenderModel, self).__init__()
self.embedding_dim = embedding_dim
features = feature_spec.feature_spec
channel_spec = feature_spec.channel_spec
embedding_names = []
user_feature_fstring = "user_feat{}"
item_feature_fstring = "item_feat{}"
# Features in the same embedding group will share embedding table
embedding_group_counter = 0
feature_groups_cardinalities = []
self.feature_name_to_embedding_group = {}
for i, user_feature in enumerate(channel_spec[USER_FEATURES_CHANNEL]):
self.feature_name_to_embedding_group[user_feature] = embedding_group_counter
cardinality = features[user_feature][CARDINALITY_SELECTOR]
feature_groups_cardinalities.append(cardinality)
embedding_names.append(user_feature_fstring.format(i))
embedding_group_counter += 1
# Group corresponding item features from different item channels together
zipped_item_features = zip(channel_spec[TARGET_ITEM_FEATURES_CHANNEL],
channel_spec[POSITIVE_HISTORY_CHANNEL], channel_spec[NEGATIVE_HISTORY_CHANNEL])
for i, (feature_target, feature_pos, feature_neg) in enumerate(zipped_item_features):
self.feature_name_to_embedding_group[feature_target] = embedding_group_counter
self.feature_name_to_embedding_group[feature_pos] = embedding_group_counter
self.feature_name_to_embedding_group[feature_neg] = embedding_group_counter
cardinality = features[feature_target][CARDINALITY_SELECTOR]
feature_groups_cardinalities.append(cardinality)
embedding_names.append(item_feature_fstring.format(i))
embedding_group_counter += 1
self.variable_embeddings_groups = []
for embedding_name, cardinality in zip(embedding_names, feature_groups_cardinalities):
self.variable_embeddings_groups.append(
Embedding(
embedding_name=embedding_name,
input_dim=cardinality + 1, # ids in range <1, cardinality> (boundries included)
output_dim=embedding_dim
)
)
self.classificationMLP = CTRClassificationMLP(
layer_sizes=classifier_dense_sizes
)
def embed(self, features):
embeddings = []
for (variable, id) in features.items():
embedding_group = self.feature_name_to_embedding_group[variable]
embeddings.append(self.variable_embeddings_groups[embedding_group](id))
return tf.concat(embeddings, -1)
@abstractmethod
def call(self, inputs):
pass
|
PyTorch/SpeechSynthesis/Tacotron2/trtis_cpp/scripts | scripts | tacotron2_to_json | #!/usr/bin/env python3
##
# Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# # Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# # Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# # Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import json
import torch
import sys
if len(sys.argv) != 3:
print("Must specify statedict to load and json to write")
sys.exit(1)
statedict_path = sys.argv[1]
json_path = sys.argv[2]
print("Reading from '%s' and writing to '%s'." % (statedict_path, json_path))
statedict = dict(torch.load(statedict_path)["state_dict"])
outdict = {}
for k, v in dict(statedict).items():
if k.startswith("module."):
k = k[len("module."):]
print(k)
outdict[k] = v.cpu().numpy().tolist()
with open(json_path, "w") as fout:
json.dump(outdict, fout)
print("Wrote to '%s'" % json_path)
|
PyTorch/SpeechSynthesis/FastPitch/scripts | scripts | download_dataset | #!/usr/bin/env bash
set -e
scripts/download_cmudict.sh
DATA_DIR="LJSpeech-1.1"
LJS_ARCH="LJSpeech-1.1.tar.bz2"
LJS_URL="http://data.keithito.com/data/speech/${LJS_ARCH}"
if [ ! -d ${DATA_DIR} ]; then
echo "Downloading ${LJS_ARCH} ..."
wget -q ${LJS_URL}
echo "Extracting ${LJS_ARCH} ..."
tar jxvf ${LJS_ARCH}
rm -f ${LJS_ARCH}
fi
|
TensorFlow2/Recommendation/WideAndDeep/triton | triton | export_model | #!/usr/bin/env python3
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import logging
import os
from pathlib import Path
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
os.environ["TF_ENABLE_DEPRECATION_WARNINGS"] = "1"
# method from PEP-366 to support relative import in executed modules
if __name__ == "__main__" and __package__ is None:
__package__ = Path(__file__).parent.name
from .deployment_toolkit.args import ArgParserGenerator # noqa: E402 module level import not at top of file
from .deployment_toolkit.core import ( # noqa: E402 module level import not at top of file
DATALOADER_FN_NAME,
BaseLoader,
BaseSaver,
ExportFormat,
ModelInputType,
TorchJit,
load_from_file,
)
from .deployment_toolkit.extensions import loaders, savers # noqa: E402 module level import not at top of file
LOGGER = logging.getLogger("export_model")
INPUT_MODEL_TYPES = [
ModelInputType.TF_ESTIMATOR,
ModelInputType.TF_KERAS,
ModelInputType.PYT,
]
OUTPUT_MODEL_TYPES = [
ExportFormat.TF_SAVEDMODEL,
ExportFormat.TORCHSCRIPT,
ExportFormat.ONNX,
]
TORCH_JIT_TYPES = [
TorchJit.NONE,
TorchJit.TRACE,
TorchJit.SCRIPT,
]
def _get_args():
parser = argparse.ArgumentParser(
description="Script for exporting models from supported frameworks.", allow_abbrev=False
)
parser.add_argument("--input-path", help="Path to input python module", required=True)
parser.add_argument(
"--input-type", help="Input model type", choices=[f.value for f in INPUT_MODEL_TYPES], required=True
)
parser.add_argument("--output-path", help="Path to output model file", required=True)
parser.add_argument(
"--output-type", help="Output model type", choices=[f.value for f in OUTPUT_MODEL_TYPES], required=True
)
parser.add_argument(
"--torch-jit",
help="Torch Jit",
choices=[f.value for f in TORCH_JIT_TYPES],
required=False,
default=None,
)
parser.add_argument("--dataloader", help="Path to python module containing data loader")
parser.add_argument("-v", "--verbose", help="Verbose logs", action="store_true", default=False)
parser.add_argument(
"--ignore-unknown-parameters",
help="Ignore unknown parameters (argument often used in CI where set of arguments is constant)",
action="store_true",
default=False,
)
args, unparsed_args = parser.parse_known_args()
Loader: BaseLoader = loaders.get(args.input_type)
ArgParserGenerator(Loader, module_path=args.input_path).update_argparser(parser)
if args.input_type == ModelInputType.PYT.value and args.output_type == ExportFormat.ONNX.value:
saver_type = f"{ModelInputType.PYT.value}--{ExportFormat.ONNX.value}"
else:
saver_type = args.output_type
Saver: BaseSaver = savers.get(saver_type)
ArgParserGenerator(Saver).update_argparser(parser)
if args.dataloader is not None:
get_dataloader_fn = load_from_file(args.dataloader, label="dataloader", target=DATALOADER_FN_NAME)
ArgParserGenerator(get_dataloader_fn).update_argparser(parser)
if args.ignore_unknown_parameters:
args, unknown_args = parser.parse_known_args()
LOGGER.warning(f"Got additional args {unknown_args}")
else:
args = parser.parse_args()
return args
def main():
args = _get_args()
log_level = logging.INFO if not args.verbose else logging.DEBUG
log_format = "%(asctime)s %(levelname)s %(name)s %(message)s"
logging.basicConfig(level=log_level, format=log_format)
LOGGER.info("args:")
for key, value in vars(args).items():
LOGGER.info(f" {key} = {value}")
dataloader_fn = None
if args.dataloader is not None:
get_dataloader_fn = load_from_file(args.dataloader, label="dataloader", target=DATALOADER_FN_NAME)
dataloader_fn = ArgParserGenerator(get_dataloader_fn).from_args(args)
Loader: BaseLoader = loaders.get(args.input_type)
loader = ArgParserGenerator(Loader, module_path=args.input_path).from_args(args)
print(args.input_path)
print(os.path.isfile(args.input_path))
print(args.output_type)
model = loader.load(
args.input_path,
dataloader_fn=dataloader_fn,
output_type=args.output_type,
torch_jit=args.torch_jit,
)
LOGGER.info("inputs: %s", model.inputs)
LOGGER.info("outputs: %s", model.outputs)
if args.input_type == ModelInputType.PYT.value and args.output_type == ExportFormat.ONNX.value:
saver_type = f"{ModelInputType.PYT.value}--{ExportFormat.ONNX.value}"
else:
saver_type = args.output_type
Saver: BaseSaver = savers.get(saver_type)
saver = ArgParserGenerator(Saver).from_args(args)
saver.save(model, args.output_path, dataloader_fn)
if __name__ == "__main__":
main()
|
TensorFlow2/Recommendation/WideAndDeep/trainer/utils | utils | arguments | # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
DEFAULT_DIR = "/outbrain"
def parse_args():
parser = argparse.ArgumentParser(
description="Tensorflow2 WideAndDeep Model",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
add_help=True,
)
locations = parser.add_argument_group("location of datasets")
locations.add_argument(
"--dataset_path",
type=str,
default=f"{DEFAULT_DIR}/data",
help="Dataset base directory, relative to which path to feature_spec and paths in feature_spec are resolved"
)
locations.add_argument(
"--fspec_file",
type=str,
default="feature_spec.yaml",
help="Path to the feature spec file, relative to dataset_path"
)
locations.add_argument(
"--embedding_sizes_file",
type=str,
default="data/outbrain/embedding_sizes.json",
help="Path to the file containing a dictionary of embedding sizes for categorical features"
)
locations.add_argument(
"--use_checkpoint",
default=False,
action="store_true",
help="Use checkpoint stored in model_dir path",
)
locations.add_argument(
"--model_dir",
type=str,
default=f"{DEFAULT_DIR}/checkpoints",
help="Destination where the model checkpoint will be saved",
)
locations.add_argument(
"--results_dir",
type=str,
default="/results",
help="Directory to store training results",
)
locations.add_argument(
"--log_filename",
type=str,
default="log.json",
help="Name of the file to store dlloger output",
)
training_params = parser.add_argument_group("training parameters")
training_params.add_argument(
"--global_batch_size",
type=int,
default=131072,
help="Total (global) size of training batch",
)
training_params.add_argument(
"--eval_batch_size",
type=int,
default=131072,
help="Total (global) size of evaluation batch",
)
training_params.add_argument(
"--num_epochs", type=int, default=20, help="Number of training epochs"
)
training_params.add_argument(
"--cpu", default=False, action="store_true", help="Run computations on the CPU"
)
training_params.add_argument(
"--amp",
default=False,
action="store_true",
help="Enable automatic mixed precision conversion",
)
training_params.add_argument(
"--xla", default=False, action="store_true", help="Enable XLA conversion"
)
training_params.add_argument(
"--linear_learning_rate",
type=float,
default=0.02,
help="Learning rate for linear model",
)
training_params.add_argument(
"--deep_learning_rate",
type=float,
default=0.00012,
help="Learning rate for deep model",
)
training_params.add_argument(
"--deep_warmup_epochs",
type=float,
default=6,
help="Number of learning rate warmup epochs for deep model",
)
model_construction = parser.add_argument_group("model construction")
model_construction.add_argument(
"--deep_hidden_units",
type=int,
default=[1024, 1024, 1024, 1024, 1024],
nargs="+",
help="Hidden units per layer for deep model, separated by spaces",
)
model_construction.add_argument(
"--deep_dropout",
type=float,
default=0.1,
help="Dropout regularization for deep model",
)
model_construction.add_argument(
"--combiner",
type=str,
default="sum",
choices=[
"mean",
"sum",
],
help="Type of aggregation used for multi hot categorical features",
)
run_params = parser.add_argument_group("run mode parameters")
run_params.add_argument(
"--num_auc_thresholds",
type=int,
default=8000,
help="Number of thresholds for the AUC computation",
)
run_params.add_argument(
"--disable_map_calculation",
dest="map_calculation_enabled",
action="store_false",
default=True,
help="Disable calculation of MAP metric. See ReadMe for additional dataset requirements keeping it enabled introduces."
)
run_params.add_argument(
"--evaluate",
default=False,
action="store_true",
help="Only perform an evaluation on the validation dataset, don't train",
)
run_params.add_argument(
"--benchmark",
action="store_true",
default=False,
help="Run training or evaluation benchmark to collect performance metrics",
)
run_params.add_argument(
"--benchmark_warmup_steps",
type=int,
default=500,
help="Number of warmup steps before the start of the benchmark",
)
run_params.add_argument(
"--benchmark_steps",
type=int,
default=1000,
help="Number of steps for performance benchmark",
)
run_params.add_argument(
"--affinity",
type=str,
default="unique_interleaved",
choices=[
"all",
"single",
"single_unique",
"unique_interleaved",
"unique_contiguous",
"disabled",
],
help="Type of CPU affinity",
)
return parser.parse_args()
|
CUDA-Optimized/FastSpeech | FastSpeech | setup | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from setuptools import setup, find_packages
def get_requirements(filename='requirements.txt'):
deps = []
with open(filename, 'r') as f:
for pkg in f.readlines():
if pkg.strip():
deps.append(pkg)
return deps
setup(
name='fastspeech',
version='0.2.2',
description='FastSpeech training and inference in PyTorch and TensorRT',
author='Dabi Ahn',
keywords='tts',
packages=find_packages(),
install_requires=get_requirements(),
python_requires='>=3',
include_package_data=True
) |
PyTorch/Segmentation/nnUNet/triton | triton | metrics | from typing import Any, Dict, List, Optional
import numpy as np
from triton.deployment_toolkit.core import BaseMetricsCalculator
class MetricsCalculator(BaseMetricsCalculator):
def calc(
self,
*,
ids: List[Any],
x: Optional[Dict[str, np.ndarray]],
y_real: Optional[Dict[str, np.ndarray]],
y_pred: Dict[str, np.ndarray],
) -> Dict[str, float]:
y_pred = y_pred["OUTPUT__0"]
y_true = y_real["OUTPUT__0"]
n_examples = y_pred.shape[0]
nclass = max(np.max(y_pred), np.max(y_true))
dice = np.zeros((nclass,))
for i in range(n_examples):
for c in range(nclass):
if not (y_true[i] == c).any():
# no foreground class
dice[c] += 1 if not (y_pred[i] == c).any() else 0
continue
true_pos, false_neg, false_pos = self.get_stats(y_pred[i], y_true[i], c + 1)
denom = 2 * true_pos + false_neg + false_pos
dice[c] += 2 * true_pos / denom if denom != 0 else 0.0
dice /= n_examples
dice = np.mean(dice)
return {"dice": dice}
@staticmethod
def get_stats(pred, targ, class_idx):
true_pos = np.logical_and(pred == class_idx, targ == class_idx).sum()
false_neg = np.logical_and(pred != class_idx, targ == class_idx).sum()
false_pos = np.logical_and(pred == class_idx, targ != class_idx).sum()
return true_pos, false_neg, false_pos
|
PyTorch/Segmentation/nnUNet/scripts | scripts | benchmark | # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import subprocess
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser
from pathlib import Path
parser = ArgumentParser(ArgumentDefaultsHelpFormatter)
parser.add_argument("--mode", type=str, required=True, choices=["train", "predict"], help="Benchmarking mode")
parser.add_argument("--task", type=str, default="01", help="Task code")
parser.add_argument("--gpus", type=int, default=1, help="Number of GPUs to use")
parser.add_argument("--nodes", type=int, default=1, help="Number of nodes to use")
parser.add_argument("--dim", type=int, required=True, help="Dimension of UNet")
parser.add_argument("--batch_size", type=int, required=True, help="Batch size")
parser.add_argument("--amp", action="store_true", help="Enable automatic mixed precision")
parser.add_argument("--bind", action="store_true", help="Bind CPUs for each GPU. Improves throughput for multi-GPU.")
parser.add_argument("--train_batches", type=int, default=200, help="Number of batches for training")
parser.add_argument("--test_batches", type=int, default=200, help="Number of batches for inference")
parser.add_argument("--warmup", type=int, default=100, help="Warmup iterations before collecting statistics")
parser.add_argument("--results", type=str, default="/results", help="Path to results directory")
parser.add_argument("--logname", type=str, default="perf.json", help="Name of dlloger output")
if __name__ == "__main__":
args = parser.parse_args()
path_to_main = Path(__file__).resolve().parent.parent / "main.py"
cmd = ""
if args.bind:
cmd += "bindpcie --cpu=exclusive,nosmt "
cmd += f"python main.py --task {args.task} --benchmark --epochs 2 "
cmd += f"--results {args.results} "
cmd += f"--logname {args.logname} "
cmd += f"--exec_mode {args.mode} "
cmd += f"--dim {args.dim} "
cmd += f"--gpus {args.gpus} "
cmd += f"--nodes {args.nodes} "
cmd += f"--train_batches {args.train_batches} "
cmd += f"--test_batches {args.test_batches} "
cmd += f"--warmup {args.warmup} "
cmd += "--amp " if args.amp else ""
if args.mode == "train":
cmd += f"--batch_size {args.batch_size} "
else:
cmd += f"--val_batch_size {args.batch_size} "
if args.amp and args.dim == 3:
cmd += "--norm instance_nvfuser --layout NDHWC"
subprocess.run(cmd, shell=True)
|
PyTorch/Forecasting/TFT/triton | triton | dataloader | # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
import torch
from torch.utils.data import DataLoader
from data_utils import TFTDataset
def update_argparser(parser):
parser.add_argument("--dataset", type=str, help="Path to dataset to be used", required=True)
parser.add_argument("--checkpoint", type=str, help="Path to checkpoint to be used", required=True)
parser.add_argument("--batch-size", type=int, help="Path to dataset to be used", default=64)
def get_dataloader_fn(dataset, checkpoint, batch_size=64):
state_dict = torch.load(os.path.join(checkpoint, "checkpoint.pt"))
config = state_dict['config']
test_split = TFTDataset(os.path.join(dataset, "test.csv"), config)
data_loader = DataLoader(test_split, batch_size=int(batch_size), num_workers=2)
input_names_dict = {'s_cat': 's_cat__0', 's_cont':'s_cont__1', 'k_cat':'k_cat__2', 'k_cont':'k_cont__3', 'o_cat':'o_cat__4', 'o_cont':'o_cont__5', 'target':'target__6', 'id':'id__7'}
reshaper = [-1] + [1]
def _get_dataloader():
for step, batch in enumerate(data_loader):
bs = batch['target'].shape[0]
x = {input_names_dict[key]: tensor.numpy() if tensor.numel() else np.ones([bs]).reshape(reshaper) for key, tensor in batch.items()}
ids = batch['id'][:,0,:].numpy()
# ids = np.arange(step * batch_size, (step + 1) * batch_size)
y_real = {'target__0':np.tile(batch['target'][:,config.encoder_length:,:].numpy(), (1, 1, len(config.quantiles)))}
yield (ids, x, y_real)
return _get_dataloader |
TensorFlow2/LanguageModeling/ELECTRA/scripts/docker | docker | build | #!/bin/bash
# Copyright (c) 2020 NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
docker build --network=host . --rm -t electra
|
PaddlePaddle/LanguageModeling/BERT/scripts | scripts | run_pretraining_p2 | # Copyright (c) 2022 NVIDIA Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
python3 -m paddle.distributed.launch \
--gpus="0,1,2,3,4,5,6,7" \
./run_pretraining.py \
--input-dir=pretrain/phase2/bin_size_64/parquet \
--vocab-file=vocab/bert-large-uncased-vocab.txt \
--output-dir=./results/checkpoints \
--bert-model=bert-large-uncased \
--from-checkpoint=./results/checkpoints/bert-large-uncased/phase2 \
--last-step-of-checkpoint=auto \
--from-phase1-final-params=./results/checkpoints/bert-large-uncased/phase1/7038 \
--batch-size=32 \
--max-steps=1563 \
--num-steps-per-checkpoint=200 \
--log-freq=1 \
--max-seq-length=512 \
--max-predictions-per-seq=80 \
--gradient-merge-steps=128 \
--amp \
--use-dynamic-loss-scaling \
--optimizer=Lamb \
--fuse-mha \
--phase2 \
--scale-loss=1048576 \
--learning-rate=4e-3 \
--warmup-proportion=0.128 \
--report-file=./results/dllogger_p1.json
|
TensorFlow/Recommendation/VAE-CF | VAE-CF | LICENSE | Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
|
TensorFlow2/Recommendation/WideAndDeep/trainer/model | model | layers | # Copyright (c) 2021-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import tensorflow as tf
from tensorflow.python.feature_column import feature_column_v2 as fc
# pylint has issues with TF array ops, so disable checks until fixed:
# https://github.com/PyCQA/pylint/issues/3613
# pylint: disable=no-value-for-parameter, unexpected-keyword-arg
def _sort_columns(feature_columns):
return sorted(feature_columns, key=lambda col: col.name)
def _validate_numeric_column(feature_column):
if len(feature_column.shape) > 1:
return (
"Matrix numeric features are not allowed, "
"found feature {} with shape {}".format(
feature_column.key, feature_column.shape
)
)
def _validate_categorical_column(feature_column):
if not isinstance(feature_column, fc.IdentityCategoricalColumn):
return (
"Only acceptable categorical columns for feeding "
"embeddings are identity, found column {} of type {}. "
"Consider using NVTabular online preprocessing to perform "
"categorical transformations".format(
feature_column.name, type(feature_column).__name__
)
)
def _validate_dense_feature_columns(feature_columns):
_errors = []
for feature_column in feature_columns:
if isinstance(feature_column, fc.CategoricalColumn):
if not isinstance(feature_column, fc.BucketizedColumn):
_errors.append(
"All feature columns must be dense, found categorical "
"column {} of type {}. Please wrap categorical columns "
"in embedding or indicator columns before passing".format(
feature_column.name, type(feature_column).__name__
)
)
else:
_errors.append(
"Found bucketized column {}. DenseFeatures layer "
"cannot apply bucketization preprocessing. Consider using "
"NVTabular to do preprocessing offline".format(feature_column.name)
)
elif isinstance(feature_column, (fc.EmbeddingColumn, fc.IndicatorColumn)):
_errors.append(
_validate_categorical_column(feature_column.categorical_column)
)
elif isinstance(feature_column, fc.NumericColumn):
_errors.append(_validate_numeric_column(feature_column))
_errors = list(filter(lambda e: e is not None, _errors))
if len(_errors) > 0:
msg = "Found issues with columns passed to DenseFeatures:"
msg += "\n\t".join(_errors)
raise ValueError(_errors)
def _validate_stack_dimensions(feature_columns):
dims = []
for feature_column in feature_columns:
if isinstance(feature_column, fc.EmbeddingColumn):
dimension = feature_column.dimension
elif isinstance(feature_column, fc.IndicatorColumn):
dimension = feature_column.categorical_column.num_buckets
else:
dimension = feature_column.shape[0]
dims.append(dimension)
dim0 = dims[0]
if not all(dim == dim0 for dim in dims[1:]):
dims = ", ".join(map(str, dims))
raise ValueError(
"'stack' aggregation requires all categorical "
"embeddings and continuous features to have same "
"size. Found dimensions {}".format(dims)
)
def _categorical_embedding_lookup(table, inputs, feature_name, combiner):
# Multi-hots
if inputs[feature_name].shape[1] > 1:
# Multi-hot embedding lookup
x = inputs[feature_name]
embeddings = tf.gather(table, x)
# Remove padded values
# This is an inverse of dataloader pad_batch
mask_array = tf.cast(x >= 0, embeddings.dtype)
mask = tf.expand_dims(mask_array, -1)
embeddings = tf.math.multiply(embeddings, mask)
# Sum aggregation
embeddings = tf.reduce_sum(embeddings, axis=1)
# Divide by number of not zeros if mean aggregation
if combiner == "mean":
row_lengths = tf.reduce_sum(mask_array, axis=1)
row_lengths = tf.cast(row_lengths, embeddings.dtype)
row_lengths = tf.expand_dims(row_lengths, -1)
embeddings = tf.math.divide_no_nan(embeddings, row_lengths)
else:
embeddings = tf.gather(table, inputs[feature_name][:, 0])
return embeddings
def _handle_continuous_feature(inputs, feature_column):
if feature_column.shape[0] > 1:
x = inputs[feature_column.name]
if isinstance(x, tuple):
x = x[0]
return tf.reshape(x, (-1, feature_column.shape[0]))
return inputs[feature_column.name]
class DenseFeatures(tf.keras.layers.Layer):
"""
Layer which maps a dictionary of input tensors to a dense, continuous
vector digestible by a neural network. Meant to reproduce the API exposed
by `tf.keras.layers.DenseFeatures` while reducing overhead for the
case of one-hot categorical and scalar numeric features.
Uses TensorFlow `feature_column` to represent inputs to the layer, but
does not perform any preprocessing associated with those columns. As such,
it should only be passed `numeric_column` objects and their subclasses,
`embedding_column` and `indicator_column`. Preprocessing functionality should
be moved to NVTabular.
For multi-hot categorical or vector continuous data, represent the data for
a feature with a dictionary entry `"<feature_name>__values"` corresponding
to the flattened array of all values in the batch. For multi-hot categorical
data, there should be a corresponding `"<feature_name>__nnzs"` entry that
describes how many categories are present in each sample (and so has length
`batch_size`).
Note that categorical columns should be wrapped in embedding or
indicator columns first, consistent with the API used by
`tf.keras.layers.DenseFeatures`.
Example usage::
column_a = tf.feature_column.numeric_column("a", (1,))
column_b = tf.feature_column.categorical_column_with_identity("b", 100)
column_b_embedding = tf.feature_column.embedding_column(column_b, 4)
inputs = {
"a": tf.keras.Input(name="a", shape=(1,), dtype=tf.float32),
"b": tf.keras.Input(name="b", shape=(1,), dtype=tf.int64)
}
x = DenseFeatures([column_a, column_b_embedding])(inputs)
Parameters
----------
feature_columns : list of `tf.feature_column`
feature columns describing the inputs to the layer
aggregation : str in ("concat", "stack")
how to combine the embeddings from multiple features
"""
def __init__(self, feature_columns, aggregation="concat", name=None, **kwargs):
# sort feature columns to make layer independent of column order
feature_columns = _sort_columns(feature_columns)
_validate_dense_feature_columns(feature_columns)
if aggregation == "stack":
_validate_stack_dimensions(feature_columns)
elif aggregation != "concat":
raise ValueError(
"Unrecognized aggregation {}, must be stack or concat".format(
aggregation
)
)
self.feature_columns = feature_columns
self.aggregation = aggregation
super(DenseFeatures, self).__init__(name=name, **kwargs)
def build(self, input_shapes):
self.embedding_tables = {}
for feature_column in self.feature_columns:
if isinstance(feature_column, fc.NumericColumn):
continue
feature_name = feature_column.categorical_column.key
num_buckets = feature_column.categorical_column.num_buckets
if isinstance(feature_column, fc.EmbeddingColumn):
self.embedding_tables[feature_name] = self.add_weight(
name="{}/embedding_weights".format(feature_name),
trainable=True,
initializer="glorot_normal",
shape=(num_buckets, feature_column.dimension),
)
else:
self.embedding_tables[feature_name] = self.add_weight(
name="{}/embedding_weights".format(feature_name),
trainable=False,
initializer=tf.constant_initializer(np.eye(num_buckets)),
shape=(num_buckets, num_buckets),
)
self.built = True
def call(self, inputs):
features = []
for feature_column in self.feature_columns:
if isinstance(feature_column, fc.NumericColumn):
x = _handle_continuous_feature(inputs, feature_column)
features.append(x)
else:
feature_name = feature_column.categorical_column.name
table = self.embedding_tables[feature_name]
combiner = getattr(feature_column, "combiner", "sum")
embeddings = _categorical_embedding_lookup(
table, inputs, feature_name, combiner
)
features.append(embeddings)
if self.aggregation == "stack":
return tf.stack(features, axis=1)
return tf.concat(features, axis=1)
def compute_output_shape(self, input_shapes):
input_shape = list(input_shapes.values())[0]
if self.aggregation == "concat":
output_dim = len(self.numeric_features) + sum(
[shape[-1] for shape in self.embedding_shapes.values()]
)
return (input_shape[0], output_dim)
else:
embedding_dim = list(self.embedding_shapes.values())[0]
return (input_shape[0], len(self.embedding_shapes), embedding_dim)
def get_config(self):
return {
"feature_columns": self.feature_columns,
"aggregation": self.aggregation,
}
def _validate_linear_feature_columns(feature_columns):
_errors = []
for feature_column in feature_columns:
if isinstance(feature_column, (fc.EmbeddingColumn, fc.IndicatorColumn)):
_errors.append(
"Only pass categorical or numeric columns to ScalarLinearFeatures "
"layer, found column {} of type".format(feature_column)
)
elif isinstance(feature_column, fc.NumericColumn):
_errors.append(_validate_numeric_column(feature_column))
else:
_errors.append(_validate_categorical_column(feature_column))
_errors = list(filter(lambda e: e is not None, _errors))
if len(_errors) > 0:
msg = "Found issues with columns passed to ScalarDenseFeatures:"
msg += "\n\t".join(_errors)
raise ValueError(_errors)
# TODO: is there a clean way to combine these two layers
# into one, maybe with a "sum" aggregation? Major differences
# seem to be whether categorical columns are wrapped in
# embeddings and the numeric matmul, both of which seem
# reasonably easy to check. At the very least, we should
# be able to subclass I think?
class LinearFeatures(tf.keras.layers.Layer):
"""
Layer which implements a linear combination of one-hot categorical
and scalar numeric features. Based on the "wide" branch of the Wide & Deep
network architecture.
Uses TensorFlow ``feature_column``s to represent inputs to the layer, but
does not perform any preprocessing associated with those columns. As such,
it should only be passed ``numeric_column`` and
``categorical_column_with_identity``. Preprocessing functionality should
be moved to NVTabular.
Also note that, unlike ScalarDenseFeatures, categorical columns should
NOT be wrapped in embedding or indicator columns first.
Example usage::
column_a = tf.feature_column.numeric_column("a", (1,))
column_b = tf.feature_column.categorical_column_with_identity("b", 100)
inputs = {
"a": tf.keras.Input(name="a", shape=(1,), dtype=tf.float32),
"b": tf.keras.Input(name="b", shape=(1,), dtype=tf.int64)
}
x = ScalarLinearFeatures([column_a, column_b])(inputs)
Parameters
----------
feature_columns : list of tf.feature_column
feature columns describing the inputs to the layer
"""
def __init__(self, feature_columns, name=None, **kwargs):
feature_columns = _sort_columns(feature_columns)
_validate_linear_feature_columns(feature_columns)
self.feature_columns = feature_columns
super(LinearFeatures, self).__init__(name=name, **kwargs)
def build(self, input_shapes):
# TODO: I've tried combining all the categorical tables
# into a single giant lookup op, but it ends up turning
# out the adding the offsets to lookup indices at call
# time ends up being much slower due to kernel overhead
# Still, a better (and probably custom) solutions would
# probably be desirable
numeric_kernel_dim = 0
self.embedding_tables = {}
for feature_column in self.feature_columns:
if isinstance(feature_column, fc.NumericColumn):
numeric_kernel_dim += feature_column.shape[0]
continue
self.embedding_tables[feature_column.key] = self.add_weight(
name="{}/embedding_weights".format(feature_column.key),
initializer="zeros",
trainable=True,
shape=(feature_column.num_buckets, 1),
)
if numeric_kernel_dim > 0:
self.embedding_tables["numeric"] = self.add_weight(
name="numeric/embedding_weights",
initializer="zeros",
trainable=True,
shape=(numeric_kernel_dim, 1),
)
self.bias = self.add_weight(
name="bias", initializer="zeros", trainable=True, shape=(1,)
)
self.built = True
def call(self, inputs):
x = self.bias
numeric_inputs = []
for feature_column in self.feature_columns:
if isinstance(feature_column, fc.NumericColumn):
numeric_inputs.append(
_handle_continuous_feature(inputs, feature_column)
)
else:
table = self.embedding_tables[feature_column.key]
embeddings = _categorical_embedding_lookup(
table, inputs, feature_column.key, "sum"
)
x = x + embeddings
if len(numeric_inputs) > 0:
numerics = tf.concat(numeric_inputs, axis=1)
x = x + tf.matmul(numerics, self.embedding_tables["numeric"])
return x
def compute_output_shape(self, input_shapes):
batch_size = list(input_shapes.values())[0].shape[0]
return (batch_size, 1)
def get_config(self):
return {
"feature_columns": self.feature_columns,
}
|
TensorFlow2/Detection/Efficientdet/visualize | visualize | shape_utils | # Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utils used to manipulate tensor shapes."""
from six.moves import zip
import tensorflow.compat.v1 as tf
from visualize import static_shape
def _is_tensor(t):
"""Returns a boolean indicating whether the input is a tensor.
Args:
t: the input to be tested.
Returns:
a boolean that indicates whether t is a tensor.
"""
return isinstance(t, (tf.Tensor, tf.SparseTensor, tf.Variable))
def _set_dim_0(t, d0):
"""Sets the 0-th dimension of the input tensor.
Args:
t: the input tensor, assuming the rank is at least 1.
d0: an integer indicating the 0-th dimension of the input tensor.
Returns:
the tensor t with the 0-th dimension set.
"""
t_shape = t.get_shape().as_list()
t_shape[0] = d0
t.set_shape(t_shape)
return t
def pad_tensor(t, length):
"""Pads the input tensor with 0s along the first dimension up to the length.
Args:
t: the input tensor, assuming the rank is at least 1.
length: a tensor of shape [1] or an integer, indicating the first dimension
of the input tensor t after padding, assuming length <= t.shape[0].
Returns:
padded_t: the padded tensor, whose first dimension is length. If the length
is an integer, the first dimension of padded_t is set to length
statically.
"""
t_rank = tf.rank(t)
t_shape = tf.shape(t)
t_d0 = t_shape[0]
pad_d0 = tf.expand_dims(length - t_d0, 0)
pad_shape = tf.cond(
tf.greater(t_rank, 1), lambda: tf.concat([pad_d0, t_shape[1:]], 0),
lambda: tf.expand_dims(length - t_d0, 0))
padded_t = tf.concat([t, tf.zeros(pad_shape, dtype=t.dtype)], 0)
if not _is_tensor(length):
padded_t = _set_dim_0(padded_t, length)
return padded_t
def clip_tensor(t, length):
"""Clips the input tensor along the first dimension up to the length.
Args:
t: the input tensor, assuming the rank is at least 1.
length: a tensor of shape [1] or an integer, indicating the first dimension
of the input tensor t after clipping, assuming length <= t.shape[0].
Returns:
clipped_t: the clipped tensor, whose first dimension is length. If the
length is an integer, the first dimension of clipped_t is set to length
statically.
"""
clipped_t = tf.gather(t, tf.range(length))
if not _is_tensor(length):
clipped_t = _set_dim_0(clipped_t, length)
return clipped_t
def pad_or_clip_tensor(t, length):
"""Pad or clip the input tensor along the first dimension.
Args:
t: the input tensor, assuming the rank is at least 1.
length: a tensor of shape [1] or an integer, indicating the first dimension
of the input tensor t after processing.
Returns:
processed_t: the processed tensor, whose first dimension is length. If the
length is an integer, the first dimension of the processed tensor is set
to length statically.
"""
return pad_or_clip_nd(t, [length] + t.shape.as_list()[1:])
def pad_or_clip_nd(tensor, output_shape):
"""Pad or Clip given tensor to the output shape.
Args:
tensor: Input tensor to pad or clip.
output_shape: A list of integers / scalar tensors (or None for dynamic dim)
representing the size to pad or clip each dimension of the input tensor.
Returns:
Input tensor padded and clipped to the output shape.
"""
tensor_shape = tf.shape(tensor)
clip_size = [
tf.where(tensor_shape[i] - shape > 0, shape, -1)
if shape is not None else -1 for i, shape in enumerate(output_shape)
]
clipped_tensor = tf.slice(
tensor,
begin=tf.zeros(len(clip_size), dtype=tf.int32),
size=clip_size)
# Pad tensor if the shape of clipped tensor is smaller than the expected
# shape.
clipped_tensor_shape = tf.shape(clipped_tensor)
trailing_paddings = [
shape - clipped_tensor_shape[i] if shape is not None else 0
for i, shape in enumerate(output_shape)
]
paddings = tf.stack(
[
tf.zeros(len(trailing_paddings), dtype=tf.int32),
trailing_paddings
],
axis=1)
padded_tensor = tf.pad(clipped_tensor, paddings=paddings)
output_static_shape = [
dim if not isinstance(dim, tf.Tensor) else None for dim in output_shape
]
padded_tensor.set_shape(output_static_shape)
return padded_tensor
def combined_static_and_dynamic_shape(tensor):
"""Returns a list containing static and dynamic values for the dimensions.
Returns a list of static and dynamic values for shape dimensions. This is
useful to preserve static shapes when available in reshape operation.
Args:
tensor: A tensor of any type.
Returns:
A list of size tensor.shape.ndims containing integers or a scalar tensor.
"""
static_tensor_shape = tensor.shape.as_list()
dynamic_tensor_shape = tf.shape(tensor)
combined_shape = []
for index, dim in enumerate(static_tensor_shape):
if dim is not None:
combined_shape.append(dim)
else:
combined_shape.append(dynamic_tensor_shape[index])
return combined_shape
def static_or_dynamic_map_fn(fn, elems, dtype=None,
parallel_iterations=32, back_prop=True):
"""Runs map_fn as a (static) for loop when possible.
This function rewrites the map_fn as an explicit unstack input -> for loop
over function calls -> stack result combination. This allows our graphs to
be acyclic when the batch size is static.
For comparison, see https://www.tensorflow.org/api_docs/python/tf/map_fn.
Note that `static_or_dynamic_map_fn` currently is not *fully* interchangeable
with the default tf.map_fn function as it does not accept nested inputs (only
Tensors or lists of Tensors). Likewise, the output of `fn` can only be a
Tensor or list of Tensors.
TODO(jonathanhuang): make this function fully interchangeable with tf.map_fn.
Args:
fn: The callable to be performed. It accepts one argument, which will have
the same structure as elems. Its output must have the
same structure as elems.
elems: A tensor or list of tensors, each of which will
be unpacked along their first dimension. The sequence of the
resulting slices will be applied to fn.
dtype: (optional) The output type(s) of fn. If fn returns a structure of
Tensors differing from the structure of elems, then dtype is not optional
and must have the same structure as the output of fn.
parallel_iterations: (optional) number of batch items to process in
parallel. This flag is only used if the native tf.map_fn is used
and defaults to 32 instead of 10 (unlike the standard tf.map_fn default).
back_prop: (optional) True enables support for back propagation.
This flag is only used if the native tf.map_fn is used.
Returns:
A tensor or sequence of tensors. Each tensor packs the
results of applying fn to tensors unpacked from elems along the first
dimension, from first to last.
Raises:
ValueError: if `elems` a Tensor or a list of Tensors.
ValueError: if `fn` does not return a Tensor or list of Tensors
"""
if isinstance(elems, list):
for elem in elems:
if not isinstance(elem, tf.Tensor):
raise ValueError('`elems` must be a Tensor or list of Tensors.')
elem_shapes = [elem.shape.as_list() for elem in elems]
# Fall back on tf.map_fn if shapes of each entry of `elems` are None or fail
# to all be the same size along the batch dimension.
for elem_shape in elem_shapes:
if (not elem_shape or not elem_shape[0]
or elem_shape[0] != elem_shapes[0][0]):
return tf.map_fn(fn, elems, dtype, parallel_iterations, back_prop)
arg_tuples = zip(*[tf.unstack(elem) for elem in elems])
outputs = [fn(arg_tuple) for arg_tuple in arg_tuples]
else:
if not isinstance(elems, tf.Tensor):
raise ValueError('`elems` must be a Tensor or list of Tensors.')
elems_shape = elems.shape.as_list()
if not elems_shape or not elems_shape[0]:
return tf.map_fn(fn, elems, dtype, parallel_iterations, back_prop)
outputs = [fn(arg) for arg in tf.unstack(elems)]
# Stack `outputs`, which is a list of Tensors or list of lists of Tensors
if all([isinstance(output, tf.Tensor) for output in outputs]):
return tf.stack(outputs)
else:
if all([isinstance(output, list) for output in outputs]):
if all([all(
[isinstance(entry, tf.Tensor) for entry in output_list])
for output_list in outputs]):
return [tf.stack(output_tuple) for output_tuple in zip(*outputs)]
raise ValueError('`fn` should return a Tensor or a list of Tensors.')
def check_min_image_dim(min_dim, image_tensor):
"""Checks that the image width/height are greater than some number.
This function is used to check that the width and height of an image are above
a certain value. If the image shape is static, this function will perform the
check at graph construction time. Otherwise, if the image shape varies, an
Assertion control dependency will be added to the graph.
Args:
min_dim: The minimum number of pixels along the width and height of the
image.
image_tensor: The image tensor to check size for.
Returns:
If `image_tensor` has dynamic size, return `image_tensor` with a Assert
control dependency. Otherwise returns image_tensor.
Raises:
ValueError: if `image_tensor`'s' width or height is smaller than `min_dim`.
"""
image_shape = image_tensor.get_shape()
image_height = static_shape.get_height(image_shape)
image_width = static_shape.get_width(image_shape)
if image_height is None or image_width is None:
shape_assert = tf.Assert(
tf.logical_and(tf.greater_equal(tf.shape(image_tensor)[1], min_dim),
tf.greater_equal(tf.shape(image_tensor)[2], min_dim)),
['image size must be >= {} in both height and width.'.format(min_dim)])
with tf.control_dependencies([shape_assert]):
return tf.identity(image_tensor)
if image_height < min_dim or image_width < min_dim:
raise ValueError(
'image size must be >= %d in both height and width; image dim = %d,%d' %
(min_dim, image_height, image_width))
return image_tensor
def assert_shape_equal(shape_a, shape_b):
"""Asserts that shape_a and shape_b are equal.
If the shapes are static, raises a ValueError when the shapes
mismatch.
If the shapes are dynamic, raises a tf InvalidArgumentError when the shapes
mismatch.
Args:
shape_a: a list containing shape of the first tensor.
shape_b: a list containing shape of the second tensor.
Returns:
Either a tf.no_op() when shapes are all static and a tf.assert_equal() op
when the shapes are dynamic.
Raises:
ValueError: When shapes are both static and unequal.
"""
if (all(isinstance(dim, int) for dim in shape_a) and
all(isinstance(dim, int) for dim in shape_b)):
if shape_a != shape_b:
raise ValueError('Unequal shapes {}, {}'.format(shape_a, shape_b))
else: return tf.no_op()
else:
return tf.assert_equal(shape_a, shape_b)
def assert_shape_equal_along_first_dimension(shape_a, shape_b):
"""Asserts that shape_a and shape_b are the same along the 0th-dimension.
If the shapes are static, raises a ValueError when the shapes
mismatch.
If the shapes are dynamic, raises a tf InvalidArgumentError when the shapes
mismatch.
Args:
shape_a: a list containing shape of the first tensor.
shape_b: a list containing shape of the second tensor.
Returns:
Either a tf.no_op() when shapes are all static and a tf.assert_equal() op
when the shapes are dynamic.
Raises:
ValueError: When shapes are both static and unequal.
"""
if isinstance(shape_a[0], int) and isinstance(shape_b[0], int):
if shape_a[0] != shape_b[0]:
raise ValueError('Unequal first dimension {}, {}'.format(
shape_a[0], shape_b[0]))
else: return tf.no_op()
else:
return tf.assert_equal(shape_a[0], shape_b[0])
def assert_box_normalized(boxes, maximum_normalized_coordinate=1.1):
"""Asserts the input box tensor is normalized.
Args:
boxes: a tensor of shape [N, 4] where N is the number of boxes.
maximum_normalized_coordinate: Maximum coordinate value to be considered
as normalized, default to 1.1.
Returns:
a tf.Assert op which fails when the input box tensor is not normalized.
Raises:
ValueError: When the input box tensor is not normalized.
"""
box_minimum = tf.reduce_min(boxes)
box_maximum = tf.reduce_max(boxes)
return tf.Assert(
tf.logical_and(
tf.less_equal(box_maximum, maximum_normalized_coordinate),
tf.greater_equal(box_minimum, 0)),
[boxes])
def flatten_dimensions(inputs, first, last):
"""Flattens `K-d` tensor along [first, last) dimensions.
Converts `inputs` with shape [D0, D1, ..., D(K-1)] into a tensor of shape
[D0, D1, ..., D(first) * D(first+1) * ... * D(last-1), D(last), ..., D(K-1)].
Example:
`inputs` is a tensor with initial shape [10, 5, 20, 20, 3].
new_tensor = flatten_dimensions(inputs, first=1, last=3)
new_tensor.shape -> [10, 100, 20, 3].
Args:
inputs: a tensor with shape [D0, D1, ..., D(K-1)].
first: first value for the range of dimensions to flatten.
last: last value for the range of dimensions to flatten. Note that the last
dimension itself is excluded.
Returns:
a tensor with shape
[D0, D1, ..., D(first) * D(first + 1) * ... * D(last - 1), D(last), ...,
D(K-1)].
Raises:
ValueError: if first and last arguments are incorrect.
"""
if first >= inputs.shape.ndims or last > inputs.shape.ndims:
raise ValueError('`first` and `last` must be less than inputs.shape.ndims. '
'found {} and {} respectively while ndims is {}'.format(
first, last, inputs.shape.ndims))
shape = combined_static_and_dynamic_shape(inputs)
flattened_dim_prod = tf.reduce_prod(shape[first:last],
keepdims=True)
new_shape = tf.concat([shape[:first], flattened_dim_prod,
shape[last:]], axis=0)
return tf.reshape(inputs, new_shape)
def flatten_first_n_dimensions(inputs, n):
"""Flattens `K-d` tensor along first n dimension to be a `(K-n+1)-d` tensor.
Converts `inputs` with shape [D0, D1, ..., D(K-1)] into a tensor of shape
[D0 * D1 * ... * D(n-1), D(n), ... D(K-1)].
Example:
`inputs` is a tensor with initial shape [10, 5, 20, 20, 3].
new_tensor = flatten_first_n_dimensions(inputs, 2)
new_tensor.shape -> [50, 20, 20, 3].
Args:
inputs: a tensor with shape [D0, D1, ..., D(K-1)].
n: The number of dimensions to flatten.
Returns:
a tensor with shape [D0 * D1 * ... * D(n-1), D(n), ... D(K-1)].
"""
return flatten_dimensions(inputs, first=0, last=n)
def expand_first_dimension(inputs, dims):
"""Expands `K-d` tensor along first dimension to be a `(K+n-1)-d` tensor.
Converts `inputs` with shape [D0, D1, ..., D(K-1)] into a tensor of shape
[dims[0], dims[1], ..., dims[-1], D1, ..., D(k-1)].
Example:
`inputs` is a tensor with shape [50, 20, 20, 3].
new_tensor = expand_first_dimension(inputs, [10, 5]).
new_tensor.shape -> [10, 5, 20, 20, 3].
Args:
inputs: a tensor with shape [D0, D1, ..., D(K-1)].
dims: List with new dimensions to expand first axis into. The length of
`dims` is typically 2 or larger.
Returns:
a tensor with shape [dims[0], dims[1], ..., dims[-1], D1, ..., D(k-1)].
"""
inputs_shape = combined_static_and_dynamic_shape(inputs)
expanded_shape = tf.stack(dims + inputs_shape[1:])
# Verify that it is possible to expand the first axis of inputs.
assert_op = tf.assert_equal(
inputs_shape[0], tf.reduce_prod(tf.stack(dims)),
message=('First dimension of `inputs` cannot be expanded into provided '
'`dims`'))
with tf.control_dependencies([assert_op]):
inputs_reshaped = tf.reshape(inputs, expanded_shape)
return inputs_reshaped
def resize_images_and_return_shapes(inputs, image_resizer_fn):
"""Resizes images using the given function and returns their true shapes.
Args:
inputs: a float32 Tensor representing a batch of inputs of shape
[batch_size, height, width, channels].
image_resizer_fn: a function which takes in a single image and outputs
a resized image and its original shape.
Returns:
resized_inputs: The inputs resized according to image_resizer_fn.
true_image_shapes: A integer tensor of shape [batch_size, 3]
representing the height, width and number of channels in inputs.
"""
if inputs.dtype is not tf.float32:
raise ValueError('`resize_images_and_return_shapes` expects a'
' tf.float32 tensor')
# TODO(jonathanhuang): revisit whether to always use batch size as
# the number of parallel iterations vs allow for dynamic batching.
outputs = static_or_dynamic_map_fn(
image_resizer_fn,
elems=inputs,
dtype=[tf.float32, tf.int32])
resized_inputs = outputs[0]
true_image_shapes = outputs[1]
return resized_inputs, true_image_shapes
|
Tools/PyTorch/TimeSeriesPredictionPlatform/conf/trainer/callbacks/callbacks | callbacks | early_stopping | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
_target_: callbacks.ctl_callbacks.EarlyStopping
metric: val_loss
min_delta: 0
patience: 5
|
PyTorch/SpeechSynthesis/HiFiGAN/common | common | layers | # *****************************************************************************
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
import torch
import torch.nn.functional as F
from librosa.filters import mel as librosa_mel_fn
from common.audio_processing import (dynamic_range_compression,
dynamic_range_decompression)
from common.stft import STFT
class LinearNorm(torch.nn.Module):
def __init__(self, in_dim, out_dim, bias=True, w_init_gain='linear'):
super(LinearNorm, self).__init__()
self.linear_layer = torch.nn.Linear(in_dim, out_dim, bias=bias)
torch.nn.init.xavier_uniform_(
self.linear_layer.weight,
gain=torch.nn.init.calculate_gain(w_init_gain))
def forward(self, x):
return self.linear_layer(x)
class ConvNorm(torch.nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=1, stride=1,
padding=None, dilation=1, bias=True, w_init_gain='linear',
batch_norm=False):
super(ConvNorm, self).__init__()
if padding is None:
assert(kernel_size % 2 == 1)
padding = int(dilation * (kernel_size - 1) / 2)
self.conv = torch.nn.Conv1d(in_channels, out_channels,
kernel_size=kernel_size, stride=stride,
padding=padding, dilation=dilation,
bias=bias)
self.norm = torch.nn.BatchNorm1D(out_channels) if batch_norm else None
torch.nn.init.xavier_uniform_(
self.conv.weight,
gain=torch.nn.init.calculate_gain(w_init_gain))
def forward(self, signal):
if self.norm is None:
return self.conv(signal)
else:
return self.norm(self.conv(signal))
class ConvReLUNorm(torch.nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=1, dropout=0.0):
super(ConvReLUNorm, self).__init__()
self.conv = torch.nn.Conv1d(in_channels, out_channels,
kernel_size=kernel_size,
padding=(kernel_size // 2))
self.norm = torch.nn.LayerNorm(out_channels)
self.dropout = torch.nn.Dropout(dropout)
def forward(self, signal):
out = F.relu(self.conv(signal))
out = self.norm(out.transpose(1, 2)).transpose(1, 2).to(signal.dtype)
return self.dropout(out)
class TacotronSTFT(torch.nn.Module):
def __init__(self, filter_length=1024, hop_length=256, win_length=1024,
n_mel_channels=80, sampling_rate=22050, mel_fmin=0.0,
mel_fmax=8000.0):
super(TacotronSTFT, self).__init__()
self.n_mel_channels = n_mel_channels
self.sampling_rate = sampling_rate
self.stft_fn = STFT(filter_length, hop_length, win_length)
mel_basis = librosa_mel_fn(
sr=sampling_rate,
n_fft=filter_length,
n_mels=n_mel_channels,
fmin=mel_fmin,
fmax=mel_fmax
)
mel_basis = torch.from_numpy(mel_basis).float()
self.register_buffer('mel_basis', mel_basis)
def spectral_normalize(self, magnitudes):
output = dynamic_range_compression(magnitudes)
return output
def spectral_de_normalize(self, magnitudes):
output = dynamic_range_decompression(magnitudes)
return output
def mel_spectrogram(self, y):
"""Computes mel-spectrograms from a batch of waves
PARAMS
------
y: Variable(torch.FloatTensor) with shape (B, T) in range [-1, 1]
RETURNS
-------
mel_output: torch.FloatTensor of shape (B, n_mel_channels, T)
"""
assert(torch.min(y.data) >= -1)
assert(torch.max(y.data) <= 1)
magnitudes, phases = self.stft_fn.transform(y)
magnitudes = magnitudes.data
mel_output = torch.matmul(self.mel_basis, magnitudes)
mel_output = self.spectral_normalize(mel_output)
return mel_output
|
PyTorch/Classification/GPUNet/triton/runner | runner | exceptions | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class RunnerException(Exception):
"""
Runner Exception
"""
def __init__(self, message: str):
self._message = message
def __str__(self):
return self._message
@property
def message(self):
"""Get the exception message.
Returns
-------
str
The message associated with this exception, or None if no message.
"""
return self._message
|
TensorFlow2/Recommendation/DLRM_and_DCNv2/nn | nn | sparse_model | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# author: Tomasz Grel ([email protected])
import tensorflow as tf
import horovod.tensorflow as hvd
import numpy as np
import json
from distributed_embeddings.python.layers import dist_model_parallel as dmp
from utils.checkpointing import get_variable_path
from .embedding import EmbeddingInitializer, DualEmbeddingGroup
sparse_model_parameters = ['use_mde_embeddings', 'embedding_dim', 'column_slice_threshold',
'embedding_zeros_initializer', 'embedding_trainable', 'categorical_cardinalities',
'concat_embedding', 'cpu_offloading_threshold_gb',
'data_parallel_input', 'row_slice_threshold', 'data_parallel_threshold']
def _gigabytes_to_elements(gb, dtype=tf.float32):
if gb is None:
return None
if dtype == tf.float32:
bytes_per_element = 4
else:
raise ValueError(f'Unsupported dtype: {dtype}')
return gb * 10**9 / bytes_per_element
class SparseModel(tf.keras.Model):
def __init__(self, **kwargs):
super(SparseModel, self).__init__()
sparse_model_kwargs = {k:kwargs[k] for k in sparse_model_parameters}
for field in sparse_model_kwargs.keys():
self.__dict__[field] = kwargs[field]
self.num_all_categorical_features = len(self.categorical_cardinalities)
self.use_concat_embedding = self.concat_embedding and (hvd.size() == 1) and \
all(dim == self.embedding_dim[0] for dim in self.embedding_dim)
self._create_embeddings()
def _create_embeddings(self):
self.embedding_layers = []
initializer_cls = tf.keras.initializers.Zeros if self.embedding_zeros_initializer else EmbeddingInitializer
# use a concatenated embedding for singleGPU when all embedding dimensions are equal
if self.use_concat_embedding:
self.embedding = DualEmbeddingGroup(cardinalities=self.categorical_cardinalities,
output_dim=self.embedding_dim[0],
memory_threshold=self.cpu_offloading_threshold_gb,
trainable=self.trainable,
use_mde_embeddings=self.use_mde_embeddings)
return
for table_size, dim in zip(self.categorical_cardinalities, self.embedding_dim):
if hvd.rank() == 0:
print(f'Creating embedding with size: {table_size} {dim}')
e = tf.keras.layers.Embedding(input_dim=table_size, output_dim=dim,
embeddings_initializer=initializer_cls())
self.embedding_layers.append(e)
gpu_size = _gigabytes_to_elements(self.cpu_offloading_threshold_gb)
self.embedding = dmp.DistributedEmbedding(self.embedding_layers,
strategy='memory_balanced',
dp_input=self.data_parallel_input,
column_slice_threshold=self.column_slice_threshold,
row_slice_threshold=self.row_slice_threshold,
data_parallel_threshold=self.data_parallel_threshold,
gpu_embedding_size=gpu_size)
def get_local_table_ids(self, rank):
if self.use_concat_embedding or self.data_parallel_input:
return list(range(self.num_all_categorical_features))
else:
return self.embedding.strategy.input_ids_list[rank]
@tf.function
def call(self, cat_features):
embedding_outputs = self._call_embeddings(cat_features)
return embedding_outputs
def _call_embeddings(self, cat_features):
if self.use_concat_embedding:
x = self.embedding(cat_features)
else:
x = self.embedding(cat_features)
x = tf.concat(x, axis=1)
x = tf.cast(x, dtype=self.compute_dtype)
return x
def force_initialization(self, global_batch_size=64):
categorical_features = [tf.zeros(shape=[global_batch_size, 1], dtype=tf.int32)
for _ in range(len(self.get_local_table_ids(hvd.rank())))]
_ = self(categorical_features)
def save_checkpoint(self, checkpoint_path):
print('Gathering the embedding weights...')
full_embedding_weights = self.embedding.get_weights()
print('Saving the embedding weights...')
for i, weight in enumerate(full_embedding_weights):
filename = get_variable_path(checkpoint_path, f'feature_{i}')
np.save(file=filename, arr=weight)
print('Embedding checkpoint saved.')
def load_checkpoint(self, checkpoint_path):
self.force_initialization()
paths = []
for i in range(self.num_all_categorical_features):
path = get_variable_path(checkpoint_path, f'feature_{i}')
paths.append(path)
self.embedding.set_weights(weights=paths)
def save_config(self, path):
config = {k : self.__dict__[k] for k in sparse_model_parameters}
with open(path, 'w') as f:
json.dump(obj=config, fp=f, indent=4)
@staticmethod
def from_config(path):
with open(path) as f:
config = json.load(fp=f)
if 'data_parallel_input' not in config:
config['data_parallel_input'] = False
if 'row_slice_threshold' not in config:
config['row_slice_threshold'] = None
if 'data_parallel_threshold' not in config:
config['data_parallel_threshold'] = None
return SparseModel(**config)
|
PyTorch/Translation/Transformer/fairseq/models | models | __init__ | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import importlib
import os
from .fairseq_incremental_decoder import FairseqIncrementalDecoder # noqa: F401
MODEL_REGISTRY = {}
ARCH_MODEL_REGISTRY = {}
ARCH_CONFIG_REGISTRY = {}
def build_model(args):
return ARCH_MODEL_REGISTRY[args.arch].build_model(args)
def register_model(name):
"""Decorator to register a new model (e.g., LSTM)."""
def register_model_cls(cls):
if name in MODEL_REGISTRY:
raise ValueError('Cannot register duplicate model ({})'.format(name))
MODEL_REGISTRY[name] = cls
return cls
return register_model_cls
def register_model_architecture(model_name, arch_name):
"""Decorator to register a new model architecture (e.g., lstm_luong_wmt_en_de)."""
def register_model_arch_fn(fn):
if model_name not in MODEL_REGISTRY:
raise ValueError('Cannot register model architecture for unknown model type ({})'.format(model_name))
if arch_name in ARCH_MODEL_REGISTRY:
raise ValueError('Cannot register duplicate model architecture ({})'.format(arch_name))
if not callable(fn):
raise ValueError('Model architecture must be callable ({})'.format(arch_name))
ARCH_MODEL_REGISTRY[arch_name] = MODEL_REGISTRY[model_name]
ARCH_CONFIG_REGISTRY[arch_name] = fn
return fn
return register_model_arch_fn
# automatically import any Python files in the models/ directory
for file in os.listdir(os.path.dirname(__file__)):
if file.endswith('.py') and not file.startswith('_'):
module = file[:file.find('.py')]
importlib.import_module('fairseq.models.' + module)
|
PyTorch/Detection/SSD/examples | examples | SSD300_A100_FP32_8GPU | # This script launches SSD300 training in FP32 on 8 GPUs using 1024 batch size (128 per GPU)
# Usage ./SSD300_FP32_8GPU.sh <path to this repository> <path to dataset> <additional flags>
torchrun --nproc_per_node=8 $1/main.py --backbone resnet50 --learning-rate 2.7e-3 --warmup 1200 --bs 128 --no-amp --data $2 ${@:3}
|
PyTorch/Segmentation/MaskRCNN/pytorch/maskrcnn_benchmark/data/datasets/evaluation/coco | coco | coco_eval | # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
import logging
import tempfile
import os
import torch
from collections import OrderedDict
from tqdm import tqdm
from maskrcnn_benchmark.modeling.roi_heads.mask_head.inference import Masker
from maskrcnn_benchmark.structures.bounding_box import BoxList
from maskrcnn_benchmark.structures.boxlist_ops import boxlist_iou
def do_coco_evaluation(
dataset,
predictions,
box_only,
output_folder,
iou_types,
expected_results,
expected_results_sigma_tol,
):
logger = logging.getLogger("maskrcnn_benchmark.inference")
if box_only:
logger.info("Evaluating bbox proposals")
areas = {"all": "", "small": "s", "medium": "m", "large": "l"}
res = COCOResults("box_proposal")
for limit in [100, 1000]:
for area, suffix in areas.items():
stats = evaluate_box_proposals(
predictions, dataset, area=area, limit=limit
)
key = "AR{}@{:d}".format(suffix, limit)
res.results["box_proposal"][key] = stats["ar"].item()
logger.info(res)
check_expected_results(res, expected_results, expected_results_sigma_tol)
if output_folder:
torch.save(res, os.path.join(output_folder, "box_proposals.pth"))
return
logger.info("Preparing results for COCO format")
coco_results = {}
if "bbox" in iou_types:
logger.info("Preparing bbox results")
coco_results["bbox"] = prepare_for_coco_detection(predictions, dataset)
if "segm" in iou_types:
logger.info("Preparing segm results")
coco_results["segm"] = prepare_for_coco_segmentation(predictions, dataset)
results = COCOResults(*iou_types)
logger.info("Evaluating predictions")
dataset.coco.createIndex(use_ext=True)
for iou_type in iou_types:
with tempfile.NamedTemporaryFile() as f:
file_path = f.name
if output_folder:
file_path = os.path.join(output_folder, iou_type + ".json")
res = evaluate_predictions_on_coco(
dataset.coco, coco_results[iou_type], file_path, iou_type
)
results.update(res)
logger.info(results)
check_expected_results(results, expected_results, expected_results_sigma_tol)
if output_folder:
torch.save(results, os.path.join(output_folder, "coco_results.pth"))
return results, coco_results
def prepare_for_coco_detection(predictions, dataset):
# assert isinstance(dataset, COCODataset)
coco_results = []
for image_id, prediction in enumerate(predictions):
original_id = dataset.id_to_img_map[image_id]
if len(prediction) == 0:
continue
# TODO replace with get_img_info?
image_width = dataset.coco.imgs[original_id]["width"]
image_height = dataset.coco.imgs[original_id]["height"]
prediction = prediction.resize((image_width, image_height))
prediction = prediction.convert("xywh")
boxes = prediction.bbox.tolist()
scores = prediction.get_field("scores").tolist()
labels = prediction.get_field("labels").tolist()
mapped_labels = [dataset.contiguous_category_id_to_json_id[i] for i in labels]
coco_results.extend(
[
{
"image_id": original_id,
"category_id": mapped_labels[k],
"bbox": box,
"score": scores[k],
}
for k, box in enumerate(boxes)
]
)
return coco_results
def prepare_for_coco_segmentation(predictions, dataset):
import pycocotools.mask as mask_util
import numpy as np
masker = Masker(threshold=0.5, padding=1)
# assert isinstance(dataset, COCODataset)
coco_results = []
for image_id, prediction in tqdm(enumerate(predictions)):
original_id = dataset.id_to_img_map[image_id]
if len(prediction) == 0:
continue
# TODO replace with get_img_info?
image_width = dataset.coco.imgs[original_id]["width"]
image_height = dataset.coco.imgs[original_id]["height"]
prediction = prediction.resize((image_width, image_height))
masks = prediction.get_field("mask")
# t = time.time()
# Masker is necessary only if masks haven't been already resized.
if list(masks.shape[-2:]) != [image_height, image_width]:
masks = masker(masks.expand(1, -1, -1, -1, -1), prediction)
masks = masks[0]
# logger.info('Time mask: {}'.format(time.time() - t))
# prediction = prediction.convert('xywh')
# boxes = prediction.bbox.tolist()
scores = prediction.get_field("scores").tolist()
labels = prediction.get_field("labels").tolist()
# rles = prediction.get_field('mask')
rles = [
mask_util.encode(np.array(mask[0, :, :, np.newaxis], order="F"))[0]
for mask in masks
]
for rle in rles:
rle["counts"] = rle["counts"].decode("utf-8")
mapped_labels = [dataset.contiguous_category_id_to_json_id[i] for i in labels]
coco_results.extend(
[
{
"image_id": original_id,
"category_id": mapped_labels[k],
"segmentation": rle,
"score": scores[k],
}
for k, rle in enumerate(rles)
]
)
return coco_results
# inspired from Detectron
def evaluate_box_proposals(
predictions, dataset, thresholds=None, area="all", limit=None
):
"""Evaluate detection proposal recall metrics. This function is a much
faster alternative to the official COCO API recall evaluation code. However,
it produces slightly different results.
"""
# Record max overlap value for each gt box
# Return vector of overlap values
areas = {
"all": 0,
"small": 1,
"medium": 2,
"large": 3,
"96-128": 4,
"128-256": 5,
"256-512": 6,
"512-inf": 7,
}
area_ranges = [
[0 ** 2, 1e5 ** 2], # all
[0 ** 2, 32 ** 2], # small
[32 ** 2, 96 ** 2], # medium
[96 ** 2, 1e5 ** 2], # large
[96 ** 2, 128 ** 2], # 96-128
[128 ** 2, 256 ** 2], # 128-256
[256 ** 2, 512 ** 2], # 256-512
[512 ** 2, 1e5 ** 2],
] # 512-inf
assert area in areas, "Unknown area range: {}".format(area)
area_range = area_ranges[areas[area]]
gt_overlaps = []
num_pos = 0
for image_id, prediction in enumerate(predictions):
original_id = dataset.id_to_img_map[image_id]
# TODO replace with get_img_info?
image_width = dataset.coco.imgs[original_id]["width"]
image_height = dataset.coco.imgs[original_id]["height"]
prediction = prediction.resize((image_width, image_height))
# sort predictions in descending order
# TODO maybe remove this and make it explicit in the documentation
inds = prediction.get_field("objectness").sort(descending=True)[1]
prediction = prediction[inds]
ann_ids = dataset.coco.getAnnIds(imgIds=original_id)
anno = dataset.coco.loadAnns(ann_ids)
gt_boxes = [obj["bbox"] for obj in anno if obj["iscrowd"] == 0]
gt_boxes = torch.as_tensor(gt_boxes).reshape(-1, 4) # guard against no boxes
gt_boxes = BoxList(gt_boxes, (image_width, image_height), mode="xywh").convert(
"xyxy"
)
gt_areas = torch.as_tensor([obj["area"] for obj in anno if obj["iscrowd"] == 0])
if len(gt_boxes) == 0:
continue
valid_gt_inds = (gt_areas >= area_range[0]) & (gt_areas <= area_range[1])
gt_boxes = gt_boxes[valid_gt_inds]
num_pos += len(gt_boxes)
if len(gt_boxes) == 0:
continue
if len(prediction) == 0:
continue
if limit is not None and len(prediction) > limit:
prediction = prediction[:limit]
overlaps = boxlist_iou(prediction, gt_boxes)
_gt_overlaps = torch.zeros(len(gt_boxes))
for j in range(min(len(prediction), len(gt_boxes))):
# find which proposal box maximally covers each gt box
# and get the iou amount of coverage for each gt box
max_overlaps, argmax_overlaps = overlaps.max(dim=0)
# find which gt box is 'best' covered (i.e. 'best' = most iou)
gt_ovr, gt_ind = max_overlaps.max(dim=0)
assert gt_ovr >= 0
# find the proposal box that covers the best covered gt box
box_ind = argmax_overlaps[gt_ind]
# record the iou coverage of this gt box
_gt_overlaps[j] = overlaps[box_ind, gt_ind]
assert _gt_overlaps[j] == gt_ovr
# mark the proposal box and the gt box as used
overlaps[box_ind, :] = -1
overlaps[:, gt_ind] = -1
# append recorded iou coverage level
gt_overlaps.append(_gt_overlaps)
gt_overlaps = torch.cat(gt_overlaps, dim=0)
gt_overlaps, _ = torch.sort(gt_overlaps)
if thresholds is None:
step = 0.05
thresholds = torch.arange(0.5, 0.95 + 1e-5, step, dtype=torch.float32)
recalls = torch.zeros_like(thresholds)
# compute recall for each iou threshold
for i, t in enumerate(thresholds):
recalls[i] = (gt_overlaps >= t).float().sum() / float(num_pos)
# ar = 2 * np.trapz(recalls, thresholds)
ar = recalls.mean()
return {
"ar": ar,
"recalls": recalls,
"thresholds": thresholds,
"gt_overlaps": gt_overlaps,
"num_pos": num_pos,
}
def evaluate_predictions_on_coco(
coco_gt, coco_results, json_result_file, iou_type="bbox"
):
import json
with open(json_result_file, "w") as f:
json.dump(coco_results, f)
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
coco_dt = coco_gt.loadRes(str(json_result_file), use_ext=True) if coco_results else COCO()
# coco_dt = coco_gt.loadRes(coco_results)
coco_eval = COCOeval(coco_gt, coco_dt, iou_type, use_ext=True)
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
return coco_eval
class COCOResults(object):
METRICS = {
"bbox": ["AP", "AP50", "AP75", "APs", "APm", "APl"],
"segm": ["AP", "AP50", "AP75", "APs", "APm", "APl"],
"box_proposal": [
"AR@100",
"ARs@100",
"ARm@100",
"ARl@100",
"AR@1000",
"ARs@1000",
"ARm@1000",
"ARl@1000",
],
"keypoint": ["AP", "AP50", "AP75", "APm", "APl"],
}
def __init__(self, *iou_types):
allowed_types = ("box_proposal", "bbox", "segm")
assert all(iou_type in allowed_types for iou_type in iou_types)
results = OrderedDict()
for iou_type in iou_types:
results[iou_type] = OrderedDict(
[(metric, -1) for metric in COCOResults.METRICS[iou_type]]
)
self.results = results
def update(self, coco_eval):
if coco_eval is None:
return
from pycocotools.cocoeval import COCOeval
assert isinstance(coco_eval, COCOeval)
s = coco_eval.stats
iou_type = coco_eval.params.iouType
res = self.results[iou_type]
metrics = COCOResults.METRICS[iou_type]
for idx, metric in enumerate(metrics):
res[metric] = s[idx]
def __repr__(self):
# TODO make it pretty
return repr(self.results)
def check_expected_results(results, expected_results, sigma_tol):
if not expected_results:
return
logger = logging.getLogger("maskrcnn_benchmark.inference")
for task, metric, (mean, std) in expected_results:
actual_val = results.results[task][metric]
lo = mean - sigma_tol * std
hi = mean + sigma_tol * std
ok = (lo < actual_val) and (actual_val < hi)
msg = (
"{} > {} sanity check (actual vs. expected): "
"{:.3f} vs. mean={:.4f}, std={:.4}, range=({:.4f}, {:.4f})"
).format(task, metric, actual_val, mean, std, lo, hi)
if not ok:
msg = "FAIL: " + msg
logger.error(msg)
else:
msg = "PASS: " + msg
logger.info(msg)
|
TensorFlow2/LanguageModeling/ELECTRA/data/squad | squad | squad_download | #!/usr/bin/env bash
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
echo "Downloading dataset for squad..."
# Download SQuAD
v1="v1.1"
mkdir $v1
wget https://rajpurkar.github.io/SQuAD-explorer/dataset/train-v1.1.json -O $v1/train-v1.1.json
wget https://rajpurkar.github.io/SQuAD-explorer/dataset/dev-v1.1.json -O $v1/dev-v1.1.json
wget https://worksheets.codalab.org/rest/bundles/0xbcd57bee090b421c982906709c8c27e1/contents/blob/ -O $v1/evaluate-v1.1.py
EXP_TRAIN_v1='981b29407e0affa3b1b156f72073b945 -'
EXP_DEV_v1='3e85deb501d4e538b6bc56f786231552 -'
EXP_EVAL_v1='afb04912d18ff20696f7f88eed49bea9 -'
CALC_TRAIN_v1=`cat ${v1}/train-v1.1.json |md5sum`
CALC_DEV_v1=`cat ${v1}/dev-v1.1.json |md5sum`
CALC_EVAL_v1=`cat ${v1}/evaluate-v1.1.py |md5sum`
v2="v2.0"
mkdir $v2
wget https://rajpurkar.github.io/SQuAD-explorer/dataset/train-v2.0.json -O $v2/train-v2.0.json
wget https://rajpurkar.github.io/SQuAD-explorer/dataset/dev-v2.0.json -O $v2/dev-v2.0.json
wget https://worksheets.codalab.org/rest/bundles/0x6b567e1cf2e041ec80d7098f031c5c9e/contents/blob/ -O $v2/evaluate-v2.0.py
EXP_TRAIN_v2='62108c273c268d70893182d5cf8df740 -'
EXP_DEV_v2='246adae8b7002f8679c027697b0b7cf8 -'
EXP_EVAL_v2='ff23213bed5516ea4a6d9edb6cd7d627 -'
CALC_TRAIN_v2=`cat ${v2}/train-v2.0.json |md5sum`
CALC_DEV_v2=`cat ${v2}/dev-v2.0.json |md5sum`
CALC_EVAL_v2=`cat ${v2}/evaluate-v2.0.py |md5sum`
echo "Squad data download done!"
echo "Verifying Dataset...."
if [ "$EXP_TRAIN_v1" != "$CALC_TRAIN_v1" ]; then
echo "train-v1.1.json is corrupted! md5sum doesn't match"
fi
if [ "$EXP_DEV_v1" != "$CALC_DEV_v1" ]; then
echo "dev-v1.1.json is corrupted! md5sum doesn't match"
fi
if [ "$EXP_EVAL_v1" != "$CALC_EVAL_v1" ]; then
echo "evaluate-v1.1.py is corrupted! md5sum doesn't match"
fi
if [ "$EXP_TRAIN_v2" != "$CALC_TRAIN_v2" ]; then
echo "train-v2.0.json is corrupted! md5sum doesn't match"
fi
if [ "$EXP_DEV_v2" != "$CALC_DEV_v2" ]; then
echo "dev-v2.0.json is corrupted! md5sum doesn't match"
fi
if [ "$EXP_EVAL_v2" != "$CALC_EVAL_v2" ]; then
echo "evaluate-v2.0.py is corrupted! md5sum doesn't match"
fi
echo "Complete!"
|
DGLPyTorch/DrugDiscovery/SE3Transformer | SE3Transformer | README | # SE(3)-Transformers For PyTorch
This repository provides a script and recipe to train the SE(3)-Transformer model to achieve state-of-the-art accuracy. The content of this repository is tested and maintained by NVIDIA.
## Table Of Contents
- [Model overview](#model-overview)
* [Model architecture](#model-architecture)
* [Default configuration](#default-configuration)
* [Feature support matrix](#feature-support-matrix)
* [Features](#features)
* [Mixed precision training](#mixed-precision-training)
* [Enabling mixed precision](#enabling-mixed-precision)
* [Enabling TF32](#enabling-tf32)
* [Glossary](#glossary)
- [Setup](#setup)
* [Requirements](#requirements)
- [Quick Start Guide](#quick-start-guide)
- [Advanced](#advanced)
* [Scripts and sample code](#scripts-and-sample-code)
* [Parameters](#parameters)
* [Command-line options](#command-line-options)
* [Getting the data](#getting-the-data)
* [Dataset guidelines](#dataset-guidelines)
* [Multi-dataset](#multi-dataset)
* [Training process](#training-process)
* [Inference process](#inference-process)
- [Performance](#performance)
* [Benchmarking](#benchmarking)
* [Training performance benchmark](#training-performance-benchmark)
* [Inference performance benchmark](#inference-performance-benchmark)
* [Results](#results)
* [Training accuracy results](#training-accuracy-results)
* [Training accuracy: NVIDIA DGX A100 (8x A100 80GB)](#training-accuracy-nvidia-dgx-a100-8x-a100-80gb)
* [Training accuracy: NVIDIA DGX-1 (8x V100 16GB)](#training-accuracy-nvidia-dgx-1-8x-v100-16gb)
* [Training stability test](#training-stability-test)
* [Training performance results](#training-performance-results)
* [Training performance: NVIDIA DGX A100 (8x A100 80GB)](#training-performance-nvidia-dgx-a100-8x-a100-80gb)
* [Training performance: NVIDIA DGX-1 (8x V100 16GB)](#training-performance-nvidia-dgx-1-8x-v100-16gb)
* [Inference performance results](#inference-performance-results)
* [Inference performance: NVIDIA DGX A100 (1x A100 80GB)](#inference-performance-nvidia-dgx-a100-1x-a100-80gb)
* [Inference performance: NVIDIA DGX-1 (1x V100 16GB)](#inference-performance-nvidia-dgx-1-1x-v100-16gb)
- [Release notes](#release-notes)
* [Changelog](#changelog)
* [Known issues](#known-issues)
## Model overview
The **SE(3)-Transformer** is a Graph Neural Network using a variant of [self-attention](https://arxiv.org/abs/1706.03762v5) for 3D points and graphs processing.
This model is [equivariant](https://en.wikipedia.org/wiki/Equivariant_map) under [continuous 3D roto-translations](https://en.wikipedia.org/wiki/Euclidean_group), meaning that when the inputs (graphs or sets of points) rotate in 3D space (or more generally experience a [proper rigid transformation](https://en.wikipedia.org/wiki/Rigid_transformation)), the model outputs either stay invariant or transform with the input.
A mathematical guarantee of equivariance is important to ensure stable and predictable performance in the presence of nuisance transformations of the data input and when the problem has some inherent symmetries we want to exploit.
The model is based on the following publications:
- [SE(3)-Transformers: 3D Roto-Translation Equivariant Attention Networks](https://arxiv.org/abs/2006.10503) (NeurIPS 2020) by Fabian B. Fuchs, Daniel E. Worrall, et al.
- [Tensor field networks: Rotation- and translation-equivariant neural networks for 3D point clouds](https://arxiv.org/abs/1802.08219) by Nathaniel Thomas, Tess Smidt, et al.
A follow-up paper explains how this model can be used iteratively, for example, to predict or refine protein structures:
- [Iterative SE(3)-Transformers](https://arxiv.org/abs/2102.13419) by Fabian B. Fuchs, Daniel E. Worrall, et al.
Just like [the official implementation](https://github.com/FabianFuchsML/se3-transformer-public), this implementation uses [PyTorch](https://pytorch.org/) and the [Deep Graph Library (DGL)](https://www.dgl.ai/).
The main differences between this implementation of SE(3)-Transformers and the official one are the following:
- Training and inference support for multiple GPUs
- Training and inference support for [Mixed Precision](https://arxiv.org/abs/1710.03740)
- The [QM9 dataset from DGL](https://docs.dgl.ai/en/latest/api/python/dgl.data.html#qm9edge-dataset) is used and automatically downloaded
- Significantly increased throughput
- Significantly reduced memory consumption
- The use of layer normalization in the fully connected radial profile layers is an option (`--use_layer_norm`), off by default
- The use of equivariant normalization between attention layers is an option (`--norm`), off by default
- The [spherical harmonics](https://en.wikipedia.org/wiki/Spherical_harmonic) and [Clebsch–Gordan coefficients](https://en.wikipedia.org/wiki/Clebsch%E2%80%93Gordan_coefficients), used to compute bases matrices, are computed with the [e3nn library](https://e3nn.org/)
This model enables you to predict quantum chemical properties of small organic molecules in the [QM9 dataset](https://www.nature.com/articles/sdata201422).
In this case, the exploited symmetry is that these properties do not depend on the orientation or position of the molecules in space.
This model is trained with mixed precision using Tensor Cores on NVIDIA Volta, NVIDIA Turing, and the NVIDIA Ampere GPU architectures. Therefore, researchers can get results up to 1.5x faster than training without Tensor Cores while experiencing the benefits of mixed precision training. This model is tested against each NGC monthly container release to ensure consistent accuracy and performance over time.
### Model architecture
The model consists of stacked layers of equivariant graph self-attention and equivariant normalization.
Lastly, a Tensor Field Network convolution is applied to obtain invariant features. Graph pooling (mean or max over the nodes) is applied to these features, and the result is fed to a final MLP to get scalar predictions.
In this setup, the model is a graph-to-scalar network. The pooling can be removed to obtain a graph-to-graph network, and the final TFN can be modified to output features of any type (invariant scalars, 3D vectors, ...).

### Default configuration
SE(3)-Transformers introduce a self-attention layer for graphs that is equivariant to 3D roto-translations. It achieves this by leveraging Tensor Field Networks to build attention weights that are invariant and attention values that are equivariant.
Combining the equivariant values with the invariant weights gives rise to an equivariant output. This output is normalized while preserving equivariance thanks to equivariant normalization layers operating on feature norms.
The following features were implemented in this model:
- Support for edge features of any degree (1D, 3D, 5D, ...), whereas the official implementation only supports scalar invariant edge features (degree 0). Edge features with a degree greater than one are
concatenated to node features of the same degree. This is required in order to reproduce published results on point cloud processing.
- Data-parallel multi-GPU training (DDP)
- Mixed precision training (autocast, gradient scaling)
- Gradient accumulation
- Model checkpointing
The following performance optimizations were implemented in this model:
**General optimizations**
- The option is provided to precompute bases at the beginning of the training instead of computing them at the beginning of each forward pass (`--precompute_bases`)
- The bases computation is just-in-time (JIT) compiled with `torch.jit.script`
- The Clebsch-Gordon coefficients are cached in RAM
**Tensor Field Network optimizations**
- The last layer of each radial profile network does not add any bias in order to avoid large broadcasting operations
- The layout (order of dimensions) of the bases tensors is optimized to avoid copies to contiguous memory in the downstream TFN layers
- When Tensor Cores are available, and the output feature dimension of computed bases is odd, then it is padded with zeros to make more effective use of Tensor Cores (AMP and TF32 precisions)
- Multiple levels of fusion for TFN convolutions (and radial profiles) are provided and automatically used when conditions are met
- A low-memory mode is provided that will trade throughput for less memory use (`--low_memory`). Overview of memory savings over the official implementation (batch size 100), depending on the precision and the low memory mode:
| | FP32 | AMP
|---|-----------------------|--------------------------
|`--low_memory false` (default) | 4.7x | 7.1x
|`--low_memory true` | 29.4x | 43.6x
**Self-attention optimizations**
- Attention keys and values are computed by a single partial TFN graph convolution in each attention layer instead of two
- Graph operations for different output degrees may be fused together if conditions are met
**Normalization optimizations**
- The equivariant normalization layer is optimized from multiple layer normalizations to a group normalization on fused norms when certain conditions are met
Competitive training results and analysis are provided for the following hyperparameters (identical to the ones in the original publication):
- Number of layers: 7
- Number of degrees: 4
- Number of channels: 32
- Number of attention heads: 8
- Channels division: 2
- Use of equivariant normalization: true
- Use of layer normalization: true
- Pooling: max
### Feature support matrix
This model supports the following features::
| Feature | SE(3)-Transformer |
|---------------------------------|-------------------|
| Automatic mixed precision (AMP) | Yes |
| Distributed data parallel (DDP) | Yes |
#### Features
**Distributed data parallel (DDP)**
[DistributedDataParallel (DDP)](https://pytorch.org/docs/stable/generated/torch.nn.parallel.DistributedDataParallel.html#torch.nn.parallel.DistributedDataParallel) implements data parallelism at the module level that can run across multiple GPUs or machines.
**Automatic Mixed Precision (AMP)**
This implementation uses the native PyTorch AMP implementation of mixed precision training. It allows us to use FP16 training with FP32 master weights by modifying just a few lines of code. A detailed explanation of mixed precision can be found in the next section.
### Mixed precision training
Mixed precision is the combined use of different numerical precisions in a computational method. [Mixed precision](https://arxiv.org/abs/1710.03740) training offers significant computational speedup by performing operations in half-precision format while storing minimal information in single-precision to retain as much information as possible in critical parts of the network. Since the introduction of [Tensor Cores](https://developer.nvidia.com/tensor-cores) in NVIDIA Volta, and following with both the NVIDIA Turing and NVIDIA Ampere Architectures, significant training speedups are experienced by switching to mixed precision -- up to 3x overall speedup on the most arithmetically intense model architectures. Using [mixed precision training](https://docs.nvidia.com/deeplearning/performance/mixed-precision-training/index.html) previously required two steps:
1. Porting the model to use the FP16 data type where appropriate.
2. Adding loss scaling to preserve small gradient values.
AMP enables mixed precision training on NVIDIA Volta, NVIDIA Turing, and NVIDIA Ampere GPU architectures automatically. The PyTorch framework code makes all necessary model changes internally.
For information about:
- How to train using mixed precision, refer to the [Mixed Precision Training](https://arxiv.org/abs/1710.03740) paper and [Training With Mixed Precision](https://docs.nvidia.com/deeplearning/performance/mixed-precision-training/index.html) documentation.
- Techniques used for mixed precision training, refer to the [Mixed-Precision Training of Deep Neural Networks](https://devblogs.nvidia.com/mixed-precision-training-deep-neural-networks/) blog.
- APEX tools for mixed precision training, refer to the [NVIDIA Apex: Tools for Easy Mixed-Precision Training in PyTorch](https://devblogs.nvidia.com/apex-pytorch-easy-mixed-precision-training/).
#### Enabling mixed precision
Mixed precision is enabled in PyTorch by using the native [Automatic Mixed Precision package](https://pytorch.org/docs/stable/amp.html), which casts variables to half-precision upon retrieval while storing variables in single-precision format. Furthermore, to preserve small gradient magnitudes in backpropagation, a [loss scaling](https://docs.nvidia.com/deeplearning/sdk/mixed-precision-training/index.html#lossscaling) step must be included when applying gradients. In PyTorch, loss scaling can be applied automatically using a `GradScaler`.
Automatic Mixed Precision makes all the adjustments internally in PyTorch, providing two benefits over manual operations. First, programmers need not modify network model code, reducing development and maintenance effort. Second, using AMP maintains forward and backward compatibility with all the APIs for defining and running PyTorch models.
To enable mixed precision, you can simply use the `--amp` flag when running the training or inference scripts.
#### Enabling TF32
TensorFloat-32 (TF32) is the new math mode in [NVIDIA A100](https://www.nvidia.com/en-us/data-center/a100/) GPUs for handling the matrix math, also called tensor operations. TF32 running on Tensor Cores in A100 GPUs can provide up to 10x speedups compared to single-precision floating-point math (FP32) on NVIDIA Volta GPUs.
TF32 Tensor Cores can speed up networks using FP32, typically with no loss of accuracy. It is more robust than FP16 for models that require a high dynamic range for weights or activations.
For more information, refer to the [TensorFloat-32 in the A100 GPU Accelerates AI Training, HPC up to 20x](https://blogs.nvidia.com/blog/2020/05/14/tensorfloat-32-precision-format/) blog post.
TF32 is supported in the NVIDIA Ampere GPU architecture and is enabled by default.
### Glossary
**Degree (type)**
In the model, every feature (input, output and hidden) transforms in an equivariant way in relation to the input graph. When we define a feature, we need to choose, in addition to the number of channels, which transformation rule it obeys.
The degree or type of a feature is a positive integer that describes how this feature transforms when the input rotates in 3D.
This is related to [irreducible representations](https://en.wikipedia.org/wiki/Irreducible_representation) of different rotation orders.
The degree of a feature determines its dimensionality. A type-d feature has a dimensionality of 2d+1.
Some common examples include:
- Degree 0: 1D scalars invariant to rotation
- Degree 1: 3D vectors that rotate according to 3D rotation matrices
- Degree 2: 5D vectors that rotate according to 5D [Wigner-D matrices](https://en.wikipedia.org/wiki/Wigner_D-matrix). These can represent symmetric traceless 3x3 matrices.
**Fiber**
A fiber can be viewed as a representation of a set of features of different types or degrees (positive integers), where each feature type transforms according to its rule.
In this repository, a fiber can be seen as a dictionary with degrees as keys and numbers of channels as values.
**Multiplicity**
The multiplicity of a feature of a given type is the number of channels of this feature.
**Tensor Field Network**
A [Tensor Field Network](https://arxiv.org/abs/1802.08219) is a kind of equivariant graph convolution that can combine features of different degrees and produce new ones while preserving equivariance thanks to [tensor products](https://en.wikipedia.org/wiki/Tensor_product).
**Equivariance**
[Equivariance](https://en.wikipedia.org/wiki/Equivariant_map) is a property of a function of model stating that applying a symmetry transformation to the input and then computing the function produces the same result as computing the function and then applying the transformation to the output.
In the case of SE(3)-Transformer, the symmetry group is the group of continuous roto-translations (SE(3)).
## Setup
The following section lists the requirements that you need to meet in order to start training the SE(3)-Transformer model.
### Requirements
This repository contains a Dockerfile which extends the PyTorch 23.01 NGC container and encapsulates some dependencies. Aside from these dependencies, ensure you have the following components:
- [NVIDIA Docker](https://github.com/NVIDIA/nvidia-docker)
- PyTorch 23.01+ NGC container
- Supported GPUs:
- [NVIDIA Volta architecture](https://www.nvidia.com/en-us/data-center/volta-gpu-architecture/)
- [NVIDIA Turing architecture](https://www.nvidia.com/en-us/design-visualization/technologies/turing-architecture/)
- [NVIDIA Ampere architecture](https://www.nvidia.com/en-us/data-center/nvidia-ampere-gpu-architecture/)
For more information about how to get started with NGC containers, refer to the following sections from the NVIDIA GPU Cloud Documentation and the Deep Learning Documentation:
- [Getting Started Using NVIDIA GPU Cloud](https://docs.nvidia.com/ngc/ngc-getting-started-guide/index.html)
- [Accessing And Pulling From The NGC Container Registry](https://docs.nvidia.com/deeplearning/frameworks/user-guide/index.html#accessing_registry)
- [Running PyTorch](https://docs.nvidia.com/deeplearning/frameworks/pytorch-release-notes/running.html#running)
For those unable to use the PyTorch NGC container to set up the required environment or create your own container, refer to the versioned [NVIDIA Container Support Matrix](https://docs.nvidia.com/deeplearning/frameworks/support-matrix/index.html).
## Quick Start Guide
To train your model using mixed or TF32 precision with Tensor Cores or FP32, perform the following steps using the default parameters of the SE(3)-Transformer model on the QM9 dataset. For the specifics concerning training and inference, refer to the [Advanced](#advanced) section.
1. Clone the repository.
```
git clone https://github.com/NVIDIA/DeepLearningExamples
cd DeepLearningExamples/DGLPyTorch/DrugDiscovery/SE3Transformer
```
2. Build the `se3-transformer` PyTorch NGC container.
```
docker build -t se3-transformer .
```
3. Start an interactive session in the NGC container to run training/inference.
```
mkdir -p results
docker run -it --runtime=nvidia --shm-size=8g --ulimit memlock=-1 --ulimit stack=67108864 --rm -v ${PWD}/results:/workspace/se3-transformer/results se3-transformer:latest
```
4. Start training.
```
bash scripts/train.sh # or scripts/train_multi_gpu.sh
```
5. Start inference/predictions.
```
bash scripts/predict.sh
```
Now that you have your model trained and evaluated, you can choose to compare your training results with our [Training accuracy results](#training-accuracy-results). You can also choose to benchmark your performance to [Training performance benchmark](#training-performance-results) or [Inference performance benchmark](#inference-performance-results). Following the steps in these sections will ensure that you achieve the same accuracy and performance results as stated in the [Results](#results) section.
## Advanced
The following sections provide greater details of the dataset, running training and inference, and the training results.
### Scripts and sample code
In the root directory, the most important files are:
- `Dockerfile`: container with the basic set of dependencies to run SE(3)-Transformers
- `requirements.txt`: set of extra requirements to run SE(3)-Transformers
- `se3_transformer/data_loading/qm9.py`: QM9 data loading and preprocessing, as well as bases precomputation
- `se3_transformer/model/layers/`: directory containing model architecture layers
- `se3_transformer/model/transformer.py`: main Transformer module
- `se3_transformer/model/basis.py`: logic for computing bases matrices
- `se3_transformer/runtime/training.py`: training script, to be run as a python module
- `se3_transformer/runtime/inference.py`: inference script, to be run as a python module
- `se3_transformer/runtime/metrics.py`: MAE metric with support for multi-GPU synchronization
- `se3_transformer/runtime/loggers.py`: [DLLogger](https://github.com/NVIDIA/dllogger) and [W&B](wandb.ai/) loggers
### Parameters
The complete list of the available parameters for the `training.py` script contains:
**General**
- `--epochs`: Number of training epochs (default: `100` for single-GPU)
- `--batch_size`: Batch size (default: `240`)
- `--seed`: Set a seed globally (default: `None`)
- `--num_workers`: Number of dataloading workers (default: `8`)
- `--amp`: Use Automatic Mixed Precision (default `false`)
- `--gradient_clip`: Clipping of the gradient norms (default: `None`)
- `--accumulate_grad_batches`: Gradient accumulation (default: `1`)
- `--ckpt_interval`: Save a checkpoint every N epochs (default: `-1`)
- `--eval_interval`: Do an evaluation round every N epochs (default: `20`)
- `--silent`: Minimize stdout output (default: `false`)
**Paths**
- `--data_dir`: Directory where the data is located or should be downloaded (default: `./data`)
- `--log_dir`: Directory where the results logs should be saved (default: `/results`)
- `--save_ckpt_path`: File where the checkpoint should be saved (default: `None`)
- `--load_ckpt_path`: File of the checkpoint to be loaded (default: `None`)
**Optimizer**
- `--optimizer`: Optimizer to use (default: `adam`)
- `--learning_rate`: Learning rate to use (default: `0.002` for single-GPU)
- `--momentum`: Momentum to use (default: `0.9`)
- `--weight_decay`: Weight decay to use (default: `0.1`)
**QM9 dataset**
- `--task`: Regression task to train on (default: `homo`)
- `--precompute_bases`: Precompute bases at the beginning of the script during dataset initialization, instead of computing them at the beginning of each forward pass (default: `false`)
**Model architecture**
- `--num_layers`: Number of stacked Transformer layers (default: `7`)
- `--num_heads`: Number of heads in self-attention (default: `8`)
- `--channels_div`: Channels division before feeding to attention layer (default: `2`)
- `--pooling`: Type of graph pooling (default: `max`)
- `--norm`: Apply a normalization layer after each attention block (default: `false`)
- `--use_layer_norm`: Apply layer normalization between MLP layers (default: `false`)
- `--low_memory`: If true, will use ops that are slower but use less memory (default: `false`)
- `--num_degrees`: Number of degrees to use. Hidden features will have types [0, ..., num_degrees - 1] (default: `4`)
- `--num_channels`: Number of channels for the hidden features (default: `32`)
### Command-line options
To show the full list of available options and their descriptions, use the `-h` or `--help` command-line option, for example: `python -m se3_transformer.runtime.training --help`.
### Dataset guidelines
#### Demo dataset
The SE(3)-Transformer was trained on the QM9 dataset.
The QM9 dataset is hosted on DGL servers and downloaded (38MB) automatically when needed. By default, it is stored in the `./data` directory, but this location can be changed with the `--data_dir` argument.
The dataset is saved as a `qm9_edge.npz` file and converted to DGL graphs at runtime.
As input features, we use:
- Node features (6D):
- One-hot-encoded atom type (5D) (atom types: H, C, N, O, F)
- Number of protons of each atom (1D)
- Edge features: one-hot-encoded bond type (4D) (bond types: single, double, triple, aromatic)
- The relative positions between adjacent nodes (atoms)
#### Custom datasets
To use this network on a new dataset, you can extend the `DataModule` class present in `se3_transformer/data_loading/data_module.py`.
Your custom collate function should return a tuple with:
- A (batched) DGLGraph object
- A dictionary of node features ({‘{degree}’: tensor})
- A dictionary of edge features ({‘{degree}’: tensor})
- (Optional) Precomputed bases as a dictionary
- Labels as a tensor
You can then modify the `training.py` and `inference.py` scripts to use your new data module.
### Training process
The training script is `se3_transformer/runtime/training.py`, to be run as a module: `python -m se3_transformer.runtime.training`.
**Logs**
By default, the resulting logs are stored in `/results/`. This can be changed with `--log_dir`.
You can connect your existing Weights & Biases account by setting the WANDB_API_KEY environment variable, and enabling the `--wandb` flag.
If no API key is set, `--wandb` will log the run anonymously to Weights & Biases.
**Checkpoints**
The argument `--save_ckpt_path` can be set to the path of the file where the checkpoints should be saved.
`--ckpt_interval` can also be set to the interval (in the number of epochs) between checkpoints.
**Evaluation**
The evaluation metric is the Mean Absolute Error (MAE).
`--eval_interval` can be set to the interval (in the number of epochs) between evaluation rounds. By default, an evaluation round is performed after each epoch.
**Automatic Mixed Precision**
To enable Mixed Precision training, add the `--amp` flag.
**Multi-GPU and multi-node**
The training script supports the PyTorch elastic launcher to run on multiple GPUs or nodes. Refer to the [official documentation](https://pytorch.org/docs/1.9.0/elastic/run.html).
For example, to train on all available GPUs with AMP:
```
python -m torch.distributed.run --nnodes=1 --nproc_per_node=gpu --module se3_transformer.runtime.training --amp
```
### Inference process
Inference can be run by using the `se3_transformer.runtime.inference` python module.
The inference script is `se3_transformer/runtime/inference.py`, to be run as a module: `python -m se3_transformer.runtime.inference`. It requires a pre-trained model checkpoint (to be passed as `--load_ckpt_path`).
## Performance
The performance measurements in this document were conducted at the time of publication and may not reflect the performance achieved from NVIDIA’s latest software release. For the most up-to-date performance measurements, go to [NVIDIA Data Center Deep Learning Product Performance](https://developer.nvidia.com/deep-learning-performance-training-inference).
### Benchmarking
The following section shows how to run benchmarks measuring the model performance in training and inference modes.
#### Training performance benchmark
To benchmark the training performance on a specific batch size, run `bash scripts/benchmark_train.sh {BATCH_SIZE}` for single GPU, and `bash scripts/benchmark_train_multi_gpu.sh {BATCH_SIZE}` for multi-GPU.
#### Inference performance benchmark
To benchmark the inference performance on a specific batch size, run `bash scripts/benchmark_inference.sh {BATCH_SIZE}`.
### Results
The following sections provide details on how we achieved our performance and accuracy in training and inference.
#### Training accuracy results
##### Training accuracy: NVIDIA DGX A100 (8x A100 80GB)
Our results were obtained by running the `scripts/train.sh` and `scripts/train_multi_gpu.sh` training scripts in the PyTorch 23.01 NGC container on NVIDIA DGX A100 (8x A100 80GB) GPUs.
| GPUs | Batch size / GPU | Absolute error - TF32 | Absolute error - mixed precision | Time to train - TF32 | Time to train - mixed precision | Time to train speedup (mixed precision to TF32) |
|:----:|:----------------:|:---------------------:|:--------------------------------:|:--------------------:|:-------------------------------:|:-----------------------------------------------:|
| 1 | 240 | 0.03038 | 0.02987 | 1h02min | 50min | 1.24x |
| 8 | 240 | 0.03466 | 0.03436 | 13min | 10min | 1.27x |
##### Training accuracy: NVIDIA DGX-1 (8x V100 16GB)
Our results were obtained by running the `scripts/train.sh` and `scripts/train_multi_gpu.sh` training scripts in the PyTorch 23.01 NGC container on NVIDIA DGX-1 with (8x V100 16GB) GPUs.
| GPUs | Batch size / GPU | Absolute error - FP32 | Absolute error - mixed precision | Time to train - FP32 | Time to train - mixed precision | Time to train speedup (mixed precision to FP32) |
|:----:|:----------------:|:---------------------:|:--------------------------------:|:--------------------:|:-------------------------------:|:-----------------------------------------------:|
| 1 | 240 | 0.03044 | 0.03076 | 2h07min | 1h22min | 1.55x |
| 8 | 240 | 0.03435 | 0.03495 | 27min | 19min | 1.42x |
#### Training performance results
##### Training performance: NVIDIA DGX A100 (8x A100 80GB)
Our results were obtained by running the `scripts/benchmark_train.sh` and `scripts/benchmark_train_multi_gpu.sh` benchmarking scripts in the PyTorch 23.01 NGC container on NVIDIA DGX A100 with 8x A100 80GB GPUs. Performance numbers (in molecules per millisecond) were averaged over five entire training epochs after a warmup epoch.
| GPUs | Batch size / GPU | Throughput - TF32 [mol/ms] | Throughput - mixed precision [mol/ms] | Throughput speedup (mixed precision - TF32) | Weak scaling - TF32 | Weak scaling - mixed precision |
|:----------------:|:-------------------:|:--------------------------:|:-------------------------------------:|:-------------------------------------------:|:-------------------:|:------------------------------:|
| 1 | 240 | 2.59 | 3.23 | 1.25x | | |
| 1 | 120 | 1.89 | 1.89 | 1.00x | | |
| 8 | 240 | 18.38 | 21.42 | 1.17x | 7.09 | 6.63 |
| 8 | 120 | 13.23 | 13.23 | 1.00x | 7.00 | 7.00 |
To achieve these same results, follow the steps in the [Quick Start Guide](#quick-start-guide).
##### Training performance: NVIDIA DGX-1 (8x V100 16GB)
Our results were obtained by running the `scripts/benchmark_train.sh` and `scripts/benchmark_train_multi_gpu.sh` benchmarking scripts in the PyTorch 23.01 NGC container on NVIDIA DGX-1 with 8x V100 16GB GPUs. Performance numbers (in molecules per millisecond) were averaged over five entire training epochs after a warmup epoch.
| GPUs | Batch size / GPU | Throughput - FP32 [mol/ms] | Throughput - mixed precision [mol/ms] | Throughput speedup (FP32 - mixed precision) | Weak scaling - FP32 | Weak scaling - mixed precision |
|:----------------:|:--------------------:|:--------------------------:|:--------------------------------------:|:-------------------------------------------:|:-------------------:|:------------------------------:|
| 1 | 240 | 1.23 | 1.91 | 1.55x | | |
| 1 | 120 | 1.01 | 1.23 | 1.22x | | |
| 8 | 240 | 8.44 | 11.28 | 1.34x | 6.8 | 5.90 |
| 8 | 120 | 6.06 | 7.36 | 1.21x | 6.00 | 5.98 |
To achieve these same results, follow the steps in the [Quick Start Guide](#quick-start-guide).
#### Inference performance results
##### Inference performance: NVIDIA DGX A100 (1x A100 80GB)
Our results were obtained by running the `scripts/benchmark_inference.sh` inferencing benchmarking script in the PyTorch 23.01 NGC container on NVIDIA DGX A100 with 1x A100 80GB GPU.
AMP
| Batch size | Throughput Avg [mol/ms] | Latency Avg [ms] | Latency 90% [ms] | Latency 95% [ms] | Latency 99% [ms] |
|:----------:|:-----------------------:|:----------------:|:----------------:|:----------------:|:----------------:|
| 1600 | 9.71 | 175.2 | 190.2 | 191.8 | 432.4 |
| 800 | 7.90 | 114.5 | 134.3 | 135.8 | 140.2 |
| 400 | 7.18 | 75.49 | 108.6 | 109.6 | 113.2 |
TF32
| Batch size | Throughput Avg [mol/ms] | Latency Avg [ms] | Latency 90% [ms] | Latency 95% [ms] | Latency 99% [ms] |
|:----------:|:-----------------------:|:----------------:|:----------------:|:----------------:|:----------------:|
| 1600 | 8.19 | 198.2 | 206.8 | 208.5 | 377.0 |
| 800 | 7.56 | 107.5 | 119.6 | 120.5 | 125.7 |
| 400 | 6.97 | 59.8 | 75.1 | 75.7 | 81.3 |
To achieve these same results, follow the steps in the [Quick Start Guide](#quick-start-guide).
##### Inference performance: NVIDIA DGX-1 (1x V100 16GB)
Our results were obtained by running the `scripts/benchmark_inference.sh` inferencing benchmarking script in the PyTorch 23.01 NGC container on NVIDIA DGX-1 with 1x V100 16GB GPU.
AMP
| Batch size | Throughput Avg [mol/ms] | Latency Avg [ms] | Latency 90% [ms] | Latency 95% [ms] | Latency 99% [ms] |
|:----------:|:-----------------------:|:----------------:|:----------------:|:----------------:|:----------------:|
| 1600 | 5.39 | 306.6 | 321.2 | 324.9 | 819.1 |
| 800 | 4.67 | 179.8 | 201.5 | 203.8 | 213.3 |
| 400 | 4.25 | 108.2 | 142.0 | 143.0 | 149.0 |
FP32
| Batch size | Throughput Avg [mol/ms] | Latency Avg [ms] | Latency 90% [ms] | Latency 95% [ms] | Latency 99% [ms] |
|:----------:|:-----------------------:|:----------------:|:----------------:|:----------------:|:----------------:|
| 1600 | 3.14 | 510.9 | 518.83 | 521.1 | 808.0 |
| 800 | 3.10 | 258.7 | 269.4 | 271.1 | 278.9 |
| 400 | 2.93 | 137.3 | 147.5 | 148.8 | 151.7 |
To achieve these same results, follow the steps in the [Quick Start Guide](#quick-start-guide).
## Release notes
### Changelog
February 2023:
- Upgraded base container
- Fixed benchmarking code
August 2022:
- Slight performance improvements
- Upgraded base container
November 2021:
- Improved low memory mode to give further 6x memory savings
- Disabled W&B logging by default
- Fixed persistent workers when using one data loading process
October 2021:
- Updated README performance tables
- Fixed shape mismatch when using partially fused TFNs per output degree
- Fixed shape mismatch when using partially fused TFNs per input degree with edge degrees > 0
September 2021:
- Moved to new location (from `PyTorch/DrugDiscovery` to `DGLPyTorch/DrugDiscovery`)
- Fixed multi-GPUs training script
August 2021
- Initial release
### Known issues
If you encounter `OSError: [Errno 12] Cannot allocate memory` during the Dataloader iterator creation (more precisely during the `fork()`, this is most likely due to the use of the `--precompute_bases` flag. If you cannot add more RAM or Swap to your machine, it is recommended to turn off bases precomputation by removing the `--precompute_bases` flag or using `--precompute_bases false`.
|
PyTorch/Segmentation/nnUNet/notebooks | notebooks | BraTS21 | #!/usr/bin/env python
# coding: utf-8
# # nnU-Net for BraTS21
#
# # Table of contents
# - [Introduction](#introduction)
# - [Dataset](#dataset)
# - [Data pre-processing](#preprocessing)
# - [Data augmentations](#augmentations)
# - [Loss function](#loss)
# - [Model](#model)
# - [Training](#training)
# - [Inference](#inference)
# - [Post-processing](#postprocessing)
#
# # Introduction <a name="introduction"></a>
#
# The goal of [BraTS 2021 challenge](https://www.med.upenn.edu/cbica/brats2021) was to create a model for segmenting the brain glioblastoma subregions in mpMRI scans. By using our nnU-Net implementation, NVIDIA data scientists have [won the BraTS21 validation phase](https://developer.nvidia.com/blog/nvidia-data-scientists-take-top-spots-in-miccai-2021-brain-tumor-segmentation-challenge). In this notebook, we will share with you the recipe we used for training our nnU-Net for BraTS21 challenge, so that you can reproduce our results. In particular, we will walk you through the following steps: data pre-processing, designing the loss function, building and training the model, running inference and finally post-processing the predictions.
#
# # Dataset <a name="dataset"></a>
#
# The training dataset provided for the BraTS21 challenge consists of 1,251 brain mpMRI scans along with segmentation annotations of tumorous regions. The 3D volumes were skull-stripped and resampled to 1 mm isotropic resolution, with dimensions of (240, 240, 155) voxels. For each example, four modalities were given: Fluid Attenuated Inversion Recovery (FLAIR), native (T1), post-contrast T1-weighted (T1Gd), and T2-weighted (T2). See image below with each modality. Annotations consist of four classes: 1 for necrotic tumor core (NCR), 2 for peritumoral edematous tissue (ED), 4 for enhancing tumor (ET), and 0 for background (voxels that are not part of the tumor).
#
# To download the training and validation dataset, you need to have an account on https://www.synapse.org platform and be registered for BraTS21 challenge. We will assume that after downloading and unzipping, the dataset is organized as follows:
#
# ```
# /data
# │
# ├───BraTS2021_train
# │ ├──BraTS2021_00000
# │ │ └──BraTS2021_00000_flair.nii.gz
# │ │ └──BraTS2021_00000_t1.nii.gz
# │ │ └──BraTS2021_00000_t1ce.nii.gz
# │ │ └──BraTS2021_00000_t2.nii.gz
# │ │ └──BraTS2021_00000_seg.nii.gz
# │ ├──BraTS2021_00002
# │ │ └──BraTS2021_00002_flair.nii.gz
# │ ... └──...
# │
# └────BraTS2021_val
# ├──BraTS2021_00001
# │ └──BraTS2021_00001_flair.nii.gz
# │ └──BraTS2021_00001_t1.nii.gz
# │ └──BraTS2021_00001_t1ce.nii.gz
# │ └──BraTS2021_00001_t2.nii.gz
# ├──BraTS2021_00002
# │ └──BraTS2021_00002_flair.nii.gz
# ... └──...
# ```
#
# Let's visualize a BraTS2021_00000 example from the training dataset. Each plot presents a different modality (from left to right: FLAIR, T1, T1ce, T2), and an annotation.
# In[1]:
import numpy as np
import matplotlib.pyplot as plt
import nibabel as nib
from glob import glob
imgs = [nib.load(f"/data/BraTS2021_train/BraTS2021_00000/BraTS2021_00000_{m}.nii.gz").get_fdata().astype(np.float32)[:, :, 75] for m in ["flair", "t1", "t1ce", "t2"]]
lbl = nib.load("/data/BraTS2021_train/BraTS2021_00000/BraTS2021_00000_seg.nii.gz").get_fdata().astype(np.uint8)[:, :, 75]
fig, ax = plt.subplots(nrows=1, ncols=5, figsize=(15, 15))
for i, img in enumerate(imgs):
ax[i].imshow(img, cmap='gray')
ax[i].axis('off')
ax[-1].imshow(lbl, vmin=0, vmax=4)
ax[-1].axis('off')
plt.tight_layout()
plt.show()
# # Data pre-processing <a name="preprocessing"></a>
#
# Each example of the BraTS21 dataset consists of four [NIfTI](https://nifti.nimh.nih.gov/) files with different MRI modalities (filenames with suffixes flair, t1, t1ce, t2). Additionally, examples in the training dataset have a NIfTI with annotation (filename with suffix seg). As a first step of data pre-processing, all four modalities are stacked such that each example has shape (4, 240, 240, 155) (input tensor is in the (C, H, W, D) layout, where C-channels, H-height, W-width and D-depth). Then redundant background voxels (with voxel value zero) on the borders of each volume are [cropped](https://docs.monai.io/en/latest/transforms.html#cropforeground), as they do not provide any useful information and can be ignored by the neural network. Subsequently, for each example, the mean and the standard deviation are computed within the non-zero region for each channel separately. All volumes are [normalized](https://docs.monai.io/en/latest/transforms.html#normalizeintensityd) by first subtracting the mean and then divided by the standard deviation. The background voxels are not normalized so that their value remained at zero. To distinguish between background voxels and normalized voxels which have values close to zero, we add an input channel with one-hot encoding for foreground voxels and stacked with the input data. As a result, each example has 5 channels.
#
# Let's start by preparing the raw training and validation datasets into stacked NIfTI files.
# In[2]:
import json
import os
from glob import glob
from subprocess import call
import time
import nibabel
import numpy as np
from joblib import Parallel, delayed
def load_nifty(directory, example_id, suffix):
return nibabel.load(os.path.join(directory, example_id + "_" + suffix + ".nii.gz"))
def load_channels(d, example_id):
return [load_nifty(d, example_id, suffix) for suffix in ["flair", "t1", "t1ce", "t2"]]
def get_data(nifty, dtype="int16"):
if dtype == "int16":
data = np.abs(nifty.get_fdata().astype(np.int16))
data[data == -32768] = 0
return data
return nifty.get_fdata().astype(np.uint8)
def prepare_nifty(d):
example_id = d.split("/")[-1]
flair, t1, t1ce, t2 = load_channels(d, example_id)
affine, header = flair.affine, flair.header
vol = np.stack([get_data(flair), get_data(t1), get_data(t1ce), get_data(t2)], axis=-1)
vol = nibabel.nifti1.Nifti1Image(vol, affine, header=header)
nibabel.save(vol, os.path.join(d, example_id + ".nii.gz"))
if os.path.exists(os.path.join(d, example_id + "_seg.nii.gz")):
seg = load_nifty(d, example_id, "seg")
affine, header = seg.affine, seg.header
vol = get_data(seg, "unit8")
vol[vol == 4] = 3
seg = nibabel.nifti1.Nifti1Image(vol, affine, header=header)
nibabel.save(seg, os.path.join(d, example_id + "_seg.nii.gz"))
def prepare_dirs(data, train):
img_path, lbl_path = os.path.join(data, "images"), os.path.join(data, "labels")
call(f"mkdir {img_path}", shell=True)
if train:
call(f"mkdir {lbl_path}", shell=True)
dirs = glob(os.path.join(data, "BraTS*"))
for d in dirs:
if "_" in d.split("/")[-1]:
files = glob(os.path.join(d, "*.nii.gz"))
for f in files:
if "flair" in f or "t1" in f or "t1ce" in f or "t2" in f:
continue
if "_seg" in f:
call(f"mv {f} {lbl_path}", shell=True)
else:
call(f"mv {f} {img_path}", shell=True)
call(f"rm -rf {d}", shell=True)
def prepare_dataset_json(data, train):
images, labels = glob(os.path.join(data, "images", "*")), glob(os.path.join(data, "labels", "*"))
images = sorted([img.replace(data + "/", "") for img in images])
labels = sorted([lbl.replace(data + "/", "") for lbl in labels])
modality = {"0": "FLAIR", "1": "T1", "2": "T1CE", "3": "T2"}
labels_dict = {"0": "background", "1": "edema", "2": "non-enhancing tumor", "3": "enhancing tumour"}
if train:
key = "training"
data_pairs = [{"image": img, "label": lbl} for (img, lbl) in zip(images, labels)]
else:
key = "test"
data_pairs = [{"image": img} for img in images]
dataset = {
"labels": labels_dict,
"modality": modality,
key: data_pairs,
}
with open(os.path.join(data, "dataset.json"), "w") as outfile:
json.dump(dataset, outfile)
def run_parallel(func, args):
return Parallel(n_jobs=os.cpu_count())(delayed(func)(arg) for arg in args)
def prepare_dataset(data, train):
print(f"Preparing BraTS21 dataset from: {data}")
start = time.time()
run_parallel(prepare_nifty, sorted(glob(os.path.join(data, "BraTS*"))))
prepare_dirs(data, train)
prepare_dataset_json(data, train)
end = time.time()
print(f"Preparing time: {(end - start):.2f}")
prepare_dataset("/data/BraTS2021_train", True)
prepare_dataset("/data/BraTS2021_val", False)
print("Finished!")
# Now, lets preprocesses the datasets by cropping and normalizing the volumes. We will store the pre-processed volumes as NumPy arrays.
# In[3]:
get_ipython().system('python3 ../preprocess.py --task 11 --ohe --exec_mode training')
get_ipython().system('python3 ../preprocess.py --task 12 --ohe --exec_mode test')
print("Finished!")
# # Data Augmentations <a name="augmentations"></a>
#
# Data augmentation is a technique that alleviates the overfitting problem by artificially extending a dataset during the training phase. To make our method more robust, the following data augmentations are used during training phase:
#
# 1. **Biased crop**: From the input volume, a patch of dimensions (5, 128, 128, 128) was randomly cropped. Additionally, with probability of 0.4 the patch selected via random biased crop is guaranteed that some foreground voxels (with positive class in the ground truth) are present in the cropped region.
# 2. **Zoom**: With probability of 0.15, a zoom factor is sampled uniformly from (1.0, 1.4) and then input volume is zoomed by a sampled factor with cubic interpolation, while label with nearest neighbor interpolation.
# 3. **Flips**: With probability of 0.5, for each x, y, z axis independently, volume was flipped along that axis.
# 4. **Gaussian Noise**: With probability of 0.15, random Gaussian noise with mean zero and standard deviation sampled uniformly from (0, 0.33) is sampled for each voxel and added to the input volume.
# 5. **Gaussian Blur**: With probability of 0.15, Gaussian blurring with standard deviation of the Gaussian Kernel sampled uniformly from (0.5, 1.5) is applied to the input volume.
# 6. **Brightness**: With probability of 0.15, a random value is sampled uniformly from (0.7, 1.3) and then input volume voxels are multiplied by it.
# 7. **Contrast**: With probability of 0.15, a random value is sampled uniformly from (0.65, 1.5) and then input volume voxels are multiplied by it and clipped to its original min and max values.
#
# The data loading pipeline is implemented with [NVIDIA Data Loading Library (DALI)](https://docs.nvidia.com/deeplearning/dali/user-guide/docs/index.html), which addresses the problem of the CPU bottleneck by offloading data augmentations to the GPU. We encourage you to check out the implementation details of our [DALI pipeline](https://github.com/NVIDIA/DeepLearningExamples/blob/master/PyTorch/Segmentation/nnUNet/data_loading/dali_loader.py).
# # Loss function <a name="loss"></a>
#
# The BraTS leaderboard is computed based on three partially overlapping regions: whole tumor (1, 2, 4), tumor core (1, 4) and enhancing tumor (4), instead of classes present in the labels. Thus, it is beneficial to construct the loss function based on classes used for ranking calculation. Therefore, we optimize each region separately with a sum of binary Cross-Entropy with the Dice loss.
# In[4]:
import torch.nn as nn
from monai.losses import DiceLoss
class Loss(nn.Module):
def __init__(self):
super(Loss, self).__init__()
self.dice = DiceLoss(sigmoid=True, batch=True)
self.ce = nn.BCEWithLogitsLoss()
def _loss(self, p, y):
return self.dice(p, y) + self.ce(p, y.float())
def forward(self, p, y):
y_wt, y_tc, y_et = y > 0, ((y == 1) + (y == 3)) > 0, y == 3
p_wt, p_tc, p_et = p[:, 0].unsqueeze(1), p[:, 1].unsqueeze(1), p[:, 2].unsqueeze(1)
l_wt, l_tc, l_et = self._loss(p_wt, y_wt), self._loss(p_tc, y_tc), self._loss(p_et, y_et)
return l_wt + l_tc + l_et
# # Model <a name="model"></a>
#
# We have made some modifications to the U-Net architecture for the BraTS challenge with respect to the original nnU-Net template. In particular, the U-Net template in the nnU-Net has the encoder depth of 6, and the convolution channels at each encoder level are: 32, 64, 128, 256, 320, 320. Based on the experiments we run, increasing the depth of the encoder to 7, modifying the number of channels to: 64, 96, 128, 192, 256, 384, 512, and using deep supervision improves the final score.
#
# For deep supervision, we used two additional output heads at the decoder levels with feature map sizes (64, 64, 64) and (32, 32, 32). To match the shape of the additional predictions with the label shape of (128, 128, 128) we downsampled the label using the nearest neighbor interpolation to the (64, 64, 64) and (32, 32, 32) shapes, so that loss can be computed for additional outputs.
# In[5]:
from IPython.display import Image
Image(filename="../images/unet-brats.jpg")
# Figure 1: *The final U-Net architecture used for BraTS21 challenge.*
#
# # Training <a name="training"></a>
#
# Now, let's start training the model. For that, we will call the training script from our nnUNet repo with some additional command line arguments for BraTS challenge:
#
# - `--brats` - use loss function with partially overlapping regions (WT, TC, ET) and BraTS specific inference;
# - `--deep_supervision` - use deep supervision loss with two additional output heads;
# - `--more_chn` - create encoder with more channels than regular U-Net;
# - `--min_fmap 2` - create deeper encoder, with feature map size in the bottleneck 2x2x2;
#
# and the regular command line arguments:
#
# - `--scheduler` - use cosine decay learning rate scheduler with warm up 250 steps of warm up;
# - `--learning_rate 0.0003` - initial learning rate after warm up will be set to 0.0003;
# - `--epochs 30` - training will be done for 30 epochs;
# - `--fold 0` - training will be done for fold 0 (by default, 5-fold cross validation is used);
# - `--amp` - training with automatic mixed precision, for faster training and memory reduction;
# - `--gpus 1` - one GPU will be used during training;
# - `--task 11` - task number for BraTS21 training dataset. See file `data_preprocessing/configs.py` for more details;
# - `--save_ckpt` - save checkpoint with highest dice score acheived during training.
#
# We will run training on 1xA100 GPU. To train the model with [AMP](https://developer.nvidia.com/automatic-mixed-precision), you will need a GPU with at least 15G memory.
#
# Here, we will train the model on just 1-fold (fold with index 0) and 30 epochs. For the challenge submission, we have trained 5 models on each fold for 150 epochs, and averaged their predictions.
# In[6]:
get_ipython().system('python ../main.py --brats --deep_supervision --depth 6 --filters 64 96 128 192 256 384 512 --min_fmap 2 --scheduler --learning_rate 0.0003 --epochs 30 --fold 0 --amp --gpus 1 --task 11 --save_ckpt')
# # Inference <a name="inference"></a>
#
# During inference, the input volume can have arbitrary size, instead of the fixed patch size (128, 128, 128) as during the training phase. Thus, we use a [sliding window inference](https://docs.monai.io/en/latest/inferers.html) from [MONAI](https://monai.io/) library, where the window has the same size as the training patch, i.e., (128, 128, 128) and adjacent windows overlap by half the size of a patch. The predictions on the overlapping regions are then averaged with Gaussian importance weighting, such that the weights of the center voxels have higher importance.
#
# One of the known tricks to improve predictions robustness is to apply test time augmentations (TTA). During inference, we are creating eight versions of the input volume, such that each version corresponds to one of eight possible flips along the x, y, z axis combination. Then we run inference for each version of the input volume and transform the predictions back to the original input volume orientation by applying the same flips to predictions as was used for the input volume. Finally, the probabilities from all predictions were averaged.
#
# Let's run inference with TTA on the challenge validation dataset.
#
# Note: You will have to modify the `--ckpt_path` argument, such that the path to checkpoint is valid.
# In[7]:
get_ipython().system('python ../main.py --gpus 1 --amp --save_preds --exec_mode predict --brats --data /data/12_3d/test --ckpt_path /results/checkpoints/epoch=29-dice_mean=89.69.ckpt --tta')
# # Post-processing <a name="postprocessing"></a>
#
# By optimizing the three overlapping regions (ET, TC, WT) we need to convert them back to the original classes (NCR, ED, ET). The strategy for transforming classes back to the original one is the following: if the WT probability for a given voxel is less than 0.45 then its class is set to 0 (background), otherwise if the probability for TC is less than 0.4 the voxel class is 2 (ED), and finally if probability for ET is less than 0.4 voxel has class 1 (NCR), or otherwise 4 (ET).
#
# Furthermore, we applied the following post-processing strategy: find ET connected components, for components smaller than 16 voxels with mean probability smaller than 0.9, replace their class to NCR (such that voxels are still considered part of the tumor core), next if there is overall less than 73 voxels with ET and their mean probability is smaller than 0.9 replace all ET voxels to NCR. With such post-processing we avoided the edge case where the model predicted a few voxels with enhancing tumor (ET) but there were not any in the ground truth. Such post-processing was beneficial to the final score as if there were no enhancing tumor voxels in the label, then the Dice score for zero false positive prediction was 1, and 0 otherwise.
# In[8]:
import os
from glob import glob
from subprocess import call
import nibabel as nib
import numpy as np
from scipy.ndimage.measurements import label
def to_lbl(pred):
enh = pred[2]
c1, c2, c3 = pred[0] > 0.5, pred[1] > 0.5, pred[2] > 0.5
pred = (c1 > 0).astype(np.uint8)
pred[(c2 == False) * (c1 == True)] = 2
pred[(c3 == True) * (c1 == True)] = 4
components, n = label(pred == 4)
for et_idx in range(1, n + 1):
_, counts = np.unique(pred[components == et_idx], return_counts=True)
if 1 < counts[0] and counts[0] < 8 and np.mean(enh[components == et_idx]) < 0.9:
pred[components == et_idx] = 1
et = pred == 4
if 0 < et.sum() and et.sum() < 73 and np.mean(enh[et]) < 0.9:
pred[et] = 1
pred = np.transpose(pred, (2, 1, 0)).astype(np.uint8)
return pred
def prepare_preditions(e):
fname = e[0].split("/")[-1].split(".")[0]
preds = [np.load(f) for f in e]
p = to_lbl(np.mean(preds, 0))
img = nib.load(f"/data/BraTS2021_val/images/{fname}.nii.gz")
nib.save(
nib.Nifti1Image(p, img.affine, header=img.header),
os.path.join("/results/final_preds", fname + ".nii.gz"),
)
os.makedirs("/results/final_preds")
preds = sorted(glob(f"/results/predictions*"))
examples = list(zip(*[sorted(glob(f"{p}/*.npy")) for p in preds]))
print("Preparing final predictions")
for e in examples:
prepare_preditions(e)
print("Finished!")
# # Visualization
#
# Let's visualize the final prediction made on the challenge validation dataset.
# In[9]:
import numpy as np
import matplotlib.pyplot as plt
import nibabel as nib
from glob import glob
n, z = 5, 75
data = sorted(glob("/results/final_preds/*.nii.gz"))
for i in range(n):
fname = data[i].split("/")[-1].split(".")[0]
print(fname)
img = nib.load(f"/data/BraTS2021_val/images/{fname}.nii.gz").get_fdata().astype(np.float32)
pred = nib.load(data[i]).get_fdata().astype(np.uint8)[:, :, z]
imgs = [img[:, :, z, i] for i in [0, 3]] + [pred]
fig, ax = plt.subplots(nrows=1, ncols=3, figsize=(12, 12))
for i in range(3):
if i < 2:
ax[i].imshow(imgs[i], cmap='gray')
else:
ax[i].imshow(imgs[i]);
ax[i].axis('off')
plt.tight_layout()
plt.show()
|
PyTorch/Forecasting/TFT/triton/runner/maintainer | maintainer | exceptions | # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class ContainerNotStarted(Exception):
pass
|
TensorFlow/Detection/SSD/models/research/object_detection | object_detection | exporter | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions to export object detection inference graph."""
import os
import tempfile
import tensorflow as tf
from tensorflow.contrib.quantize.python import graph_matcher
from tensorflow.core.protobuf import saver_pb2
from tensorflow.python.client import session
from tensorflow.python.platform import gfile
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.tools import freeze_graph
from tensorflow.python.training import saver as saver_lib
from object_detection.builders import graph_rewriter_builder
from object_detection.builders import model_builder
from object_detection.core import standard_fields as fields
from object_detection.data_decoders import tf_example_decoder
from object_detection.utils import config_util
from object_detection.utils import shape_utils
slim = tf.contrib.slim
freeze_graph_with_def_protos = freeze_graph.freeze_graph_with_def_protos
def rewrite_nn_resize_op(is_quantized=False):
"""Replaces a custom nearest-neighbor resize op with the Tensorflow version.
Some graphs use this custom version for TPU-compatibility.
Args:
is_quantized: True if the default graph is quantized.
"""
input_pattern = graph_matcher.OpTypePattern(
'FakeQuantWithMinMaxVars' if is_quantized else '*')
reshape_1_pattern = graph_matcher.OpTypePattern(
'Reshape', inputs=[input_pattern, 'Const'], ordered_inputs=False)
mul_pattern = graph_matcher.OpTypePattern(
'Mul', inputs=[reshape_1_pattern, 'Const'], ordered_inputs=False)
# The quantization script may or may not insert a fake quant op after the
# Mul. In either case, these min/max vars are not needed once replaced with
# the TF version of NN resize.
fake_quant_pattern = graph_matcher.OpTypePattern(
'FakeQuantWithMinMaxVars',
inputs=[mul_pattern, 'Identity', 'Identity'],
ordered_inputs=False)
reshape_2_pattern = graph_matcher.OpTypePattern(
'Reshape',
inputs=[graph_matcher.OneofPattern([fake_quant_pattern, mul_pattern]),
'Const'],
ordered_inputs=False)
add_pattern = graph_matcher.OpTypePattern(
'Add', inputs=[reshape_2_pattern, '*'], ordered_inputs=False)
matcher = graph_matcher.GraphMatcher(add_pattern)
for match in matcher.match_graph(tf.get_default_graph()):
projection_op = match.get_op(input_pattern)
reshape_2_op = match.get_op(reshape_2_pattern)
add_op = match.get_op(add_pattern)
nn_resize = tf.image.resize_nearest_neighbor(
projection_op.outputs[0],
add_op.outputs[0].shape.dims[1:3],
align_corners=False)
for index, op_input in enumerate(add_op.inputs):
if op_input == reshape_2_op.outputs[0]:
add_op._update_input(index, nn_resize) # pylint: disable=protected-access
break
def replace_variable_values_with_moving_averages(graph,
current_checkpoint_file,
new_checkpoint_file):
"""Replaces variable values in the checkpoint with their moving averages.
If the current checkpoint has shadow variables maintaining moving averages of
the variables defined in the graph, this function generates a new checkpoint
where the variables contain the values of their moving averages.
Args:
graph: a tf.Graph object.
current_checkpoint_file: a checkpoint containing both original variables and
their moving averages.
new_checkpoint_file: file path to write a new checkpoint.
"""
with graph.as_default():
variable_averages = tf.train.ExponentialMovingAverage(0.0)
ema_variables_to_restore = variable_averages.variables_to_restore()
with tf.Session() as sess:
read_saver = tf.train.Saver(ema_variables_to_restore)
read_saver.restore(sess, current_checkpoint_file)
write_saver = tf.train.Saver()
write_saver.save(sess, new_checkpoint_file)
def _image_tensor_input_placeholder(input_shape=None):
"""Returns input placeholder and a 4-D uint8 image tensor."""
if input_shape is None:
input_shape = (None, None, None, 3)
input_tensor = tf.placeholder(
dtype=tf.uint8, shape=input_shape, name='image_tensor')
return input_tensor, input_tensor
def _tf_example_input_placeholder():
"""Returns input that accepts a batch of strings with tf examples.
Returns:
a tuple of input placeholder and the output decoded images.
"""
batch_tf_example_placeholder = tf.placeholder(
tf.string, shape=[None], name='tf_example')
def decode(tf_example_string_tensor):
tensor_dict = tf_example_decoder.TfExampleDecoder().decode(
tf_example_string_tensor)
image_tensor = tensor_dict[fields.InputDataFields.image]
return image_tensor
return (batch_tf_example_placeholder,
shape_utils.static_or_dynamic_map_fn(
decode,
elems=batch_tf_example_placeholder,
dtype=tf.uint8,
parallel_iterations=32,
back_prop=False))
def _encoded_image_string_tensor_input_placeholder():
"""Returns input that accepts a batch of PNG or JPEG strings.
Returns:
a tuple of input placeholder and the output decoded images.
"""
batch_image_str_placeholder = tf.placeholder(
dtype=tf.string,
shape=[None],
name='encoded_image_string_tensor')
def decode(encoded_image_string_tensor):
image_tensor = tf.image.decode_image(encoded_image_string_tensor,
channels=3)
image_tensor.set_shape((None, None, 3))
return image_tensor
return (batch_image_str_placeholder,
tf.map_fn(
decode,
elems=batch_image_str_placeholder,
dtype=tf.uint8,
parallel_iterations=32,
back_prop=False))
input_placeholder_fn_map = {
'image_tensor': _image_tensor_input_placeholder,
'encoded_image_string_tensor':
_encoded_image_string_tensor_input_placeholder,
'tf_example': _tf_example_input_placeholder,
}
def add_output_tensor_nodes(postprocessed_tensors,
output_collection_name='inference_op'):
"""Adds output nodes for detection boxes and scores.
Adds the following nodes for output tensors -
* num_detections: float32 tensor of shape [batch_size].
* detection_boxes: float32 tensor of shape [batch_size, num_boxes, 4]
containing detected boxes.
* detection_scores: float32 tensor of shape [batch_size, num_boxes]
containing scores for the detected boxes.
* detection_classes: float32 tensor of shape [batch_size, num_boxes]
containing class predictions for the detected boxes.
* detection_keypoints: (Optional) float32 tensor of shape
[batch_size, num_boxes, num_keypoints, 2] containing keypoints for each
detection box.
* detection_masks: (Optional) float32 tensor of shape
[batch_size, num_boxes, mask_height, mask_width] containing masks for each
detection box.
Args:
postprocessed_tensors: a dictionary containing the following fields
'detection_boxes': [batch, max_detections, 4]
'detection_scores': [batch, max_detections]
'detection_classes': [batch, max_detections]
'detection_masks': [batch, max_detections, mask_height, mask_width]
(optional).
'detection_keypoints': [batch, max_detections, num_keypoints, 2]
(optional).
'num_detections': [batch]
output_collection_name: Name of collection to add output tensors to.
Returns:
A tensor dict containing the added output tensor nodes.
"""
detection_fields = fields.DetectionResultFields
label_id_offset = 1
boxes = postprocessed_tensors.get(detection_fields.detection_boxes)
scores = postprocessed_tensors.get(detection_fields.detection_scores)
classes = postprocessed_tensors.get(
detection_fields.detection_classes) + label_id_offset
keypoints = postprocessed_tensors.get(detection_fields.detection_keypoints)
masks = postprocessed_tensors.get(detection_fields.detection_masks)
num_detections = postprocessed_tensors.get(detection_fields.num_detections)
outputs = {}
outputs[detection_fields.detection_boxes] = tf.identity(
boxes, name=detection_fields.detection_boxes)
outputs[detection_fields.detection_scores] = tf.identity(
scores, name=detection_fields.detection_scores)
outputs[detection_fields.detection_classes] = tf.identity(
classes, name=detection_fields.detection_classes)
outputs[detection_fields.num_detections] = tf.identity(
num_detections, name=detection_fields.num_detections)
if keypoints is not None:
outputs[detection_fields.detection_keypoints] = tf.identity(
keypoints, name=detection_fields.detection_keypoints)
if masks is not None:
outputs[detection_fields.detection_masks] = tf.identity(
masks, name=detection_fields.detection_masks)
for output_key in outputs:
tf.add_to_collection(output_collection_name, outputs[output_key])
return outputs
def write_saved_model(saved_model_path,
frozen_graph_def,
inputs,
outputs):
"""Writes SavedModel to disk.
If checkpoint_path is not None bakes the weights into the graph thereby
eliminating the need of checkpoint files during inference. If the model
was trained with moving averages, setting use_moving_averages to true
restores the moving averages, otherwise the original set of variables
is restored.
Args:
saved_model_path: Path to write SavedModel.
frozen_graph_def: tf.GraphDef holding frozen graph.
inputs: The input placeholder tensor.
outputs: A tensor dictionary containing the outputs of a DetectionModel.
"""
with tf.Graph().as_default():
with session.Session() as sess:
tf.import_graph_def(frozen_graph_def, name='')
builder = tf.saved_model.builder.SavedModelBuilder(saved_model_path)
tensor_info_inputs = {
'inputs': tf.saved_model.utils.build_tensor_info(inputs)}
tensor_info_outputs = {}
for k, v in outputs.items():
tensor_info_outputs[k] = tf.saved_model.utils.build_tensor_info(v)
detection_signature = (
tf.saved_model.signature_def_utils.build_signature_def(
inputs=tensor_info_inputs,
outputs=tensor_info_outputs,
method_name=signature_constants.PREDICT_METHOD_NAME))
builder.add_meta_graph_and_variables(
sess, [tf.saved_model.tag_constants.SERVING],
signature_def_map={
signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
detection_signature,
},
)
builder.save()
def write_graph_and_checkpoint(inference_graph_def,
model_path,
input_saver_def,
trained_checkpoint_prefix):
"""Writes the graph and the checkpoint into disk."""
for node in inference_graph_def.node:
node.device = ''
with tf.Graph().as_default():
tf.import_graph_def(inference_graph_def, name='')
with session.Session() as sess:
saver = saver_lib.Saver(saver_def=input_saver_def,
save_relative_paths=True)
saver.restore(sess, trained_checkpoint_prefix)
saver.save(sess, model_path)
def _get_outputs_from_inputs(input_tensors, detection_model,
output_collection_name):
inputs = tf.to_float(input_tensors)
preprocessed_inputs, true_image_shapes = detection_model.preprocess(inputs)
output_tensors = detection_model.predict(
preprocessed_inputs, true_image_shapes)
postprocessed_tensors = detection_model.postprocess(
output_tensors, true_image_shapes)
return add_output_tensor_nodes(postprocessed_tensors,
output_collection_name)
def _build_detection_graph(input_type, detection_model, input_shape,
output_collection_name, graph_hook_fn):
"""Build the detection graph."""
if input_type not in input_placeholder_fn_map:
raise ValueError('Unknown input type: {}'.format(input_type))
placeholder_args = {}
if input_shape is not None:
if input_type != 'image_tensor':
raise ValueError('Can only specify input shape for `image_tensor` '
'inputs.')
placeholder_args['input_shape'] = input_shape
placeholder_tensor, input_tensors = input_placeholder_fn_map[input_type](
**placeholder_args)
outputs = _get_outputs_from_inputs(
input_tensors=input_tensors,
detection_model=detection_model,
output_collection_name=output_collection_name)
# Add global step to the graph.
slim.get_or_create_global_step()
if graph_hook_fn: graph_hook_fn()
return outputs, placeholder_tensor
def _export_inference_graph(input_type,
detection_model,
use_moving_averages,
trained_checkpoint_prefix,
output_directory,
additional_output_tensor_names=None,
input_shape=None,
output_collection_name='inference_op',
graph_hook_fn=None,
write_inference_graph=False):
"""Export helper."""
tf.gfile.MakeDirs(output_directory)
frozen_graph_path = os.path.join(output_directory,
'frozen_inference_graph.pb')
saved_model_path = os.path.join(output_directory, 'saved_model')
model_path = os.path.join(output_directory, 'model.ckpt')
outputs, placeholder_tensor = _build_detection_graph(
input_type=input_type,
detection_model=detection_model,
input_shape=input_shape,
output_collection_name=output_collection_name,
graph_hook_fn=graph_hook_fn)
profile_inference_graph(tf.get_default_graph())
saver_kwargs = {}
if use_moving_averages:
# This check is to be compatible with both version of SaverDef.
if os.path.isfile(trained_checkpoint_prefix):
saver_kwargs['write_version'] = saver_pb2.SaverDef.V1
temp_checkpoint_prefix = tempfile.NamedTemporaryFile().name
else:
temp_checkpoint_prefix = tempfile.mkdtemp()
replace_variable_values_with_moving_averages(
tf.get_default_graph(), trained_checkpoint_prefix,
temp_checkpoint_prefix)
checkpoint_to_use = temp_checkpoint_prefix
else:
checkpoint_to_use = trained_checkpoint_prefix
saver = tf.train.Saver(**saver_kwargs)
input_saver_def = saver.as_saver_def()
write_graph_and_checkpoint(
inference_graph_def=tf.get_default_graph().as_graph_def(),
model_path=model_path,
input_saver_def=input_saver_def,
trained_checkpoint_prefix=checkpoint_to_use)
if write_inference_graph:
inference_graph_def = tf.get_default_graph().as_graph_def()
inference_graph_path = os.path.join(output_directory,
'inference_graph.pbtxt')
for node in inference_graph_def.node:
node.device = ''
with gfile.GFile(inference_graph_path, 'wb') as f:
f.write(str(inference_graph_def))
if additional_output_tensor_names is not None:
output_node_names = ','.join(outputs.keys()+additional_output_tensor_names)
else:
output_node_names = ','.join(outputs.keys())
frozen_graph_def = freeze_graph.freeze_graph_with_def_protos(
input_graph_def=tf.get_default_graph().as_graph_def(),
input_saver_def=input_saver_def,
input_checkpoint=checkpoint_to_use,
output_node_names=output_node_names,
restore_op_name='save/restore_all',
filename_tensor_name='save/Const:0',
output_graph=frozen_graph_path,
clear_devices=True,
initializer_nodes='')
write_saved_model(saved_model_path, frozen_graph_def,
placeholder_tensor, outputs)
def export_inference_graph(input_type,
pipeline_config,
trained_checkpoint_prefix,
output_directory,
input_shape=None,
output_collection_name='inference_op',
additional_output_tensor_names=None,
write_inference_graph=False):
"""Exports inference graph for the model specified in the pipeline config.
Args:
input_type: Type of input for the graph. Can be one of ['image_tensor',
'encoded_image_string_tensor', 'tf_example'].
pipeline_config: pipeline_pb2.TrainAndEvalPipelineConfig proto.
trained_checkpoint_prefix: Path to the trained checkpoint file.
output_directory: Path to write outputs.
input_shape: Sets a fixed shape for an `image_tensor` input. If not
specified, will default to [None, None, None, 3].
output_collection_name: Name of collection to add output tensors to.
If None, does not add output tensors to a collection.
additional_output_tensor_names: list of additional output
tensors to include in the frozen graph.
write_inference_graph: If true, writes inference graph to disk.
"""
detection_model = model_builder.build(pipeline_config.model,
is_training=False)
graph_rewriter_fn = None
if pipeline_config.HasField('graph_rewriter'):
graph_rewriter_config = pipeline_config.graph_rewriter
graph_rewriter_fn = graph_rewriter_builder.build(graph_rewriter_config,
is_training=False)
_export_inference_graph(
input_type,
detection_model,
pipeline_config.eval_config.use_moving_averages,
trained_checkpoint_prefix,
output_directory,
additional_output_tensor_names,
input_shape,
output_collection_name,
graph_hook_fn=graph_rewriter_fn,
write_inference_graph=write_inference_graph)
pipeline_config.eval_config.use_moving_averages = False
config_util.save_pipeline_config(pipeline_config, output_directory)
def profile_inference_graph(graph):
"""Profiles the inference graph.
Prints model parameters and computation FLOPs given an inference graph.
BatchNorms are excluded from the parameter count due to the fact that
BatchNorms are usually folded. BatchNorm, Initializer, Regularizer
and BiasAdd are not considered in FLOP count.
Args:
graph: the inference graph.
"""
tfprof_vars_option = (
tf.contrib.tfprof.model_analyzer.TRAINABLE_VARS_PARAMS_STAT_OPTIONS)
tfprof_flops_option = tf.contrib.tfprof.model_analyzer.FLOAT_OPS_OPTIONS
# Batchnorm is usually folded during inference.
tfprof_vars_option['trim_name_regexes'] = ['.*BatchNorm.*']
# Initializer and Regularizer are only used in training.
tfprof_flops_option['trim_name_regexes'] = [
'.*BatchNorm.*', '.*Initializer.*', '.*Regularizer.*', '.*BiasAdd.*'
]
tf.contrib.tfprof.model_analyzer.print_model_analysis(
graph,
tfprof_options=tfprof_vars_option)
tf.contrib.tfprof.model_analyzer.print_model_analysis(
graph,
tfprof_options=tfprof_flops_option)
|
TensorFlow2/Segmentation/MaskRCNN/mrcnn_tf2/object_detection | object_detection | minibatch_sampler | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base minibatch sampler module.
The job of the minibatch_sampler is to subsample a minibatch based on some
criterion.
The main function call is:
subsample(indicator, batch_size, **params).
Indicator is a 1d boolean tensor where True denotes which examples can be
sampled. It returns a boolean indicator where True denotes an example has been
sampled..
Subclasses should implement the Subsample function and can make use of the
@staticmethod SubsampleIndicator.
This is originally implemented in TensorFlow Object Detection API.
"""
from abc import ABCMeta, abstractmethod
import tensorflow as tf
from mrcnn_tf2.object_detection import ops
class MinibatchSampler:
"""Abstract base class for subsampling minibatches."""
__metaclass__ = ABCMeta
def __init__(self):
"""Constructs a minibatch sampler."""
@abstractmethod
def subsample(self, indicator, batch_size, **params):
"""Returns subsample of entries in indicator.
Args:
indicator: boolean tensor of shape [N] whose True entries can be sampled.
batch_size: desired batch size.
**params: additional keyword arguments for specific implementations of
the MinibatchSampler.
Returns:
sample_indicator: boolean tensor of shape [N] whose True entries have been
sampled. If sum(indicator) >= batch_size, sum(is_sampled) = batch_size
"""
@staticmethod
def subsample_indicator(indicator, num_samples):
"""Subsample indicator vector.
Given a boolean indicator vector with M elements set to `True`, the function
assigns all but `num_samples` of these previously `True` elements to
`False`. If `num_samples` is greater than M, the original indicator vector
is returned.
Args:
indicator: a 1-dimensional boolean tensor indicating which elements
are allowed to be sampled and which are not.
num_samples: int32 scalar tensor
Returns:
a boolean tensor with the same shape as input (indicator) tensor
"""
indices = tf.where(indicator)
indices = tf.random.shuffle(indices)
indices = tf.reshape(indices, [-1])
num_samples = tf.minimum(tf.size(input=indices), num_samples)
selected_indices = tf.slice(indices, [0], tf.reshape(num_samples, [1]))
selected_indicator = ops.indices_to_dense_vector(selected_indices,
tf.shape(input=indicator)[0])
return tf.equal(selected_indicator, 1)
|
PyTorch/Classification/GPUNet/triton/08ms-D/runner | runner | pipeline_impl | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pathlib
if __name__ == "__main__" and __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from ...runner.pipeline import Pipeline
pipeline = Pipeline()
pipeline.model_export(
commands=(
r"""
if [[ "${EXPORT_FORMAT}" == "torchscript" ]]; then
export FORMAT_SUFFIX="pt"
else
export FORMAT_SUFFIX="${EXPORT_FORMAT}"
fi
python3 triton/export_model.py \
--input-path triton/model.py \
--input-type pyt \
--output-path ${SHARED_DIR}/exported_model.${FORMAT_SUFFIX} \
--output-type ${EXPORT_FORMAT} \
--ignore-unknown-parameters \
--onnx-opset 13 \
--torch-jit ${TORCH_JIT} \
\
--config /workspace/gpunet/configs/batch1/GV100/0.8ms-D.json \
--checkpoint ${CHECKPOINT_DIR}/0.8ms-D.pth.tar \
--precision ${EXPORT_PRECISION} \
\
--dataloader triton/dataloader.py \
--val-path ${DATASETS_DIR}/ \
--is-prunet True \
--batch-size 1
""",
)
)
pipeline.model_conversion(
commands=(
r"""
if [[ "${EXPORT_FORMAT}" == "torchscript" ]]; then
export FORMAT_SUFFIX="pt"
else
export FORMAT_SUFFIX="${EXPORT_FORMAT}"
fi
model-navigator convert \
--model-name ${MODEL_NAME} \
--model-path ${SHARED_DIR}/exported_model.${FORMAT_SUFFIX} \
--output-path ${SHARED_DIR}/converted_model \
--target-formats ${FORMAT} \
--target-precisions ${PRECISION} \
--launch-mode local \
--override-workspace \
--verbose \
\
--onnx-opsets 13 \
--max-batch-size ${MAX_BATCH_SIZE} \
--container-version 21.12 \
--max-workspace-size 10000000000 \
--atol OUTPUT__0=100 \
--rtol OUTPUT__0=100
""",
)
)
pipeline.model_deploy(
commands=(
r"""
model-navigator triton-config-model \
--model-repository ${MODEL_REPOSITORY_PATH} \
--model-name ${MODEL_NAME} \
--model-version 1 \
--model-path ${SHARED_DIR}/converted_model \
--model-format ${FORMAT} \
--model-control-mode explicit \
--load-model \
--load-model-timeout-s 100 \
--verbose \
\
--backend-accelerator ${BACKEND_ACCELERATOR} \
--tensorrt-precision ${PRECISION} \
--tensorrt-capture-cuda-graph \
--tensorrt-max-workspace-size 10000000000 \
--max-batch-size ${MAX_BATCH_SIZE} \
--batching ${MODEL_BATCHING} \
--preferred-batch-sizes ${MAX_BATCH_SIZE} \
--engine-count-per-device gpu=${NUMBER_OF_MODEL_INSTANCES}
""",
)
)
pipeline.triton_performance_offline_tests(
commands=(
r"""
python triton/run_performance_on_triton.py \
--model-repository ${MODEL_REPOSITORY_PATH} \
--model-name ${MODEL_NAME} \
--input-data random \
--batch-sizes 1 2 4 8 16 32 64 \
--concurrency 1 \
--evaluation-mode offline \
--measurement-request-count 10 \
--warmup \
--performance-tool perf_analyzer \
--result-path ${SHARED_DIR}/triton_performance_offline.csv
""",
),
result_path="${SHARED_DIR}/triton_performance_offline.csv",
)
pipeline.triton_performance_online_tests(
commands=(
r"""
python triton/run_performance_on_triton.py \
--model-repository ${MODEL_REPOSITORY_PATH} \
--model-name ${MODEL_NAME} \
--input-data random \
--batch-sizes 1 \
--concurrency 8 16 24 32 40 48 56 64 72 80 88 96 104 112 120 128 136 144 152 160 168 176 184 192 200 208 216 224 232 240 248 256 \
--evaluation-mode online \
--measurement-request-count 500 \
--warmup \
--performance-tool perf_analyzer \
--result-path ${SHARED_DIR}/triton_performance_online.csv
""",
),
result_path="${SHARED_DIR}/triton_performance_online.csv",
) |
TensorFlow/Recommendation/VAE-CF/vae/models | models | layers | # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from tensorflow.keras.layers import Dense
class DenseFromSparse(Dense):
def call(self, inputs):
if type(inputs) != tf.sparse.SparseTensor:
raise ValueError("input should be of type " + str(tf.sparse.SparseTensor))
rank = len(inputs.get_shape().as_list())
if rank != 2:
raise NotImplementedError("input should be rank 2")
else:
outputs = tf.sparse.sparse_dense_matmul(inputs, self.kernel)
if self.use_bias:
outputs = tf.nn.bias_add(outputs, self.bias)
if self.activation is not None:
return self.activation(outputs) # pylint: disable=not-callable
return outputs
|
TensorFlow/Segmentation/UNet_3D_Medical/scripts | scripts | unet3d_infer_benchmark | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script launches 3D-UNet run FP32 inference benchmark.
# Usage:
# bash examples/unet3d_infer_benchmark.sh <path/to/dataset> <path/to/results/directory> <batch/size>
python main.py --data_dir $1 --model_dir $2 --exec_mode predict --warmup_steps 20 --fold 0 --batch_size $3 --benchmark --xla |
PyTorch/Forecasting/TFT/triton/runner | runner | pipeline | # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pathlib
from typing import Dict, Tuple
# method from PEP-366 to support relative import in executed modules
if __name__ == "__main__" and __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from .stages import (
ConversionStage,
DeployStage,
ExportStage,
ResultsType,
TritonPerformanceOfflineStage,
TritonPerformanceOnlineStage,
TritonPreparePerformanceProfilingDataStage,
)
class Pipeline:
"""
Definition of stages that has to be executed before and during experiments
"""
# Stages to execute as part of single experiment
_experiment_stages = [
ExportStage.label,
ConversionStage.label,
DeployStage.label,
TritonPreparePerformanceProfilingDataStage.label,
TritonPerformanceOfflineStage.label,
TritonPerformanceOnlineStage.label,
]
def __init__(self):
"""
Initialize pipeline
"""
self._stages: Dict = dict()
def model_export(self, commands: Tuple[str, ...]) -> None:
"""
Model export stage
Args:
commands: Commands to be executed as part of stage
Returns:
None
"""
stage = ExportStage(commands=commands)
self._stages[stage.label] = stage
def model_conversion(self, commands: Tuple[str, ...]) -> None:
"""
Model conversion stage
Args:
commands: Commands to be executed as part of stage
Returns:
None
"""
stage = ConversionStage(commands=commands)
self._stages[stage.label] = stage
def model_deploy(self, commands: Tuple[str, ...]) -> None:
"""
Model deployment stage
Args:
commands: Commands to be executed as part of stage
Returns:
None
"""
stage = DeployStage(commands=commands)
self._stages[stage.label] = stage
def triton_prepare_performance_profiling_data(self, commands: Tuple[str, ...]) -> None:
"""
Model profiling data creation stage
Args:
commands: Commands to be executed as part of stage
Returns:
None
"""
stage = TritonPreparePerformanceProfilingDataStage(commands=commands)
self._stages[stage.label] = stage
def triton_performance_offline_tests(self, commands: Tuple[str, ...], result_path: str) -> None:
"""
Model performance offline test stage
Args:
commands: Commands to be executed as part of stage
result_path: Path where results file is stored
Returns:
None
"""
stage = TritonPerformanceOfflineStage(
commands=commands,
result_path=result_path,
result_type=ResultsType.TRITON_PERFORMANCE_OFFLINE,
)
self._stages[stage.label] = stage
def triton_performance_online_tests(self, commands: Tuple[str, ...], result_path: str) -> None:
"""
Model performance online test stage
Args:
commands: Commands to be executed as part of stage
result_path: Path where results file is stored
Returns:
None
"""
stage = TritonPerformanceOnlineStage(
commands=commands,
result_path=result_path,
result_type=ResultsType.TRITON_PERFORMANCE_ONLINE,
)
self._stages[stage.label] = stage
def stages(self):
"""
Generate stages which should be run per experiment
Returns:
Generator with stages object
"""
for stage_name in self._experiment_stages:
stage = self._stages.get(stage_name)
if not stage:
continue
yield stage
|
PyTorch/Segmentation/nnUNet/nnunet | nnunet | brats22_model | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
import torch.nn as nn
normalizations = {
"instancenorm3d": nn.InstanceNorm3d,
"instancenorm2d": nn.InstanceNorm2d,
"batchnorm3d": nn.BatchNorm3d,
"batchnorm2d": nn.BatchNorm2d,
}
convolutions = {
"Conv2d": nn.Conv2d,
"Conv3d": nn.Conv3d,
"ConvTranspose2d": nn.ConvTranspose2d,
"ConvTranspose3d": nn.ConvTranspose3d,
}
def get_norm(name, out_channels, groups=32):
if "groupnorm" in name:
return nn.GroupNorm(groups, out_channels, affine=True)
return normalizations[name](out_channels, affine=True)
def get_conv(in_channels, out_channels, kernel_size, stride, dim=3, bias=False):
conv = convolutions[f"Conv{dim}d"]
padding = get_padding(kernel_size, stride)
return conv(in_channels, out_channels, kernel_size, stride, padding, bias=bias)
def get_transp_conv(in_channels, out_channels, kernel_size, stride, dim):
conv = convolutions[f"ConvTranspose{dim}d"]
padding = get_padding(kernel_size, stride)
output_padding = get_output_padding(kernel_size, stride, padding)
return conv(in_channels, out_channels, kernel_size, stride, padding, output_padding, bias=True)
def get_padding(kernel_size, stride):
kernel_size_np = np.atleast_1d(kernel_size)
stride_np = np.atleast_1d(stride)
padding_np = (kernel_size_np - stride_np + 1) / 2
padding = tuple(int(p) for p in padding_np)
return padding if len(padding) > 1 else padding[0]
def get_output_padding(kernel_size, stride, padding):
kernel_size_np = np.atleast_1d(kernel_size)
stride_np = np.atleast_1d(stride)
padding_np = np.atleast_1d(padding)
out_padding_np = 2 * padding_np + stride_np - kernel_size_np
out_padding = tuple(int(p) for p in out_padding_np)
return out_padding if len(out_padding) > 1 else out_padding[0]
class InputBlock(nn.Module):
def __init__(self, in_channels, out_channels, **kwargs):
super(InputBlock, self).__init__()
self.conv1 = get_conv(in_channels, out_channels, 3, 1)
self.conv2 = get_conv(out_channels, out_channels, 3, 1)
self.norm = get_norm(kwargs["norm"], out_channels)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv1(x)
x = self.norm(x)
x = self.relu(x)
x = self.conv2(x)
x = self.relu(x)
return x
class ConvLayer(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride, **kwargs):
super(ConvLayer, self).__init__()
self.conv = get_conv(in_channels, out_channels, kernel_size, stride)
self.norm = get_norm(kwargs["norm"], in_channels)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x = self.norm(x)
x = self.conv(x)
x = self.relu(x)
return x
class ConvBlock(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride, **kwargs):
super(ConvBlock, self).__init__()
self.conv1 = ConvLayer(in_channels, out_channels, kernel_size, stride, **kwargs)
self.conv2 = ConvLayer(out_channels, out_channels, kernel_size, 1, **kwargs)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
return x
class UpsampleBlock(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride, **kwargs):
super(UpsampleBlock, self).__init__()
self.conv_block = ConvBlock(out_channels + in_channels, out_channels, kernel_size, 1, **kwargs)
def forward(self, x, x_skip):
x = nn.functional.interpolate(x, scale_factor=2, mode="trilinear", align_corners=True)
x = torch.cat((x, x_skip), dim=1)
x = self.conv_block(x)
return x
class OutputBlock(nn.Module):
def __init__(self, in_channels, out_channels, dim):
super(OutputBlock, self).__init__()
self.conv = get_conv(in_channels, out_channels, kernel_size=1, stride=1, dim=dim, bias=True)
def forward(self, input_data):
return self.conv(input_data)
class UNet3D(nn.Module):
def __init__(
self,
kernels,
strides,
):
super(UNet3D, self).__init__()
self.dim = 3
self.n_class = 3
self.deep_supervision = True
self.norm = "instancenorm3d"
self.filters = [64, 128, 256, 512, 768, 1024, 2048][: len(strides)]
down_block = ConvBlock
self.input_block = InputBlock(5, self.filters[0], norm=self.norm)
self.downsamples = self.get_module_list(
conv_block=down_block,
in_channels=self.filters[:-1],
out_channels=self.filters[1:],
kernels=kernels[1:-1],
strides=strides[1:-1],
)
self.bottleneck = self.get_conv_block(
conv_block=down_block,
in_channels=self.filters[-2],
out_channels=self.filters[-1],
kernel_size=kernels[-1],
stride=strides[-1],
)
self.upsamples = self.get_module_list(
conv_block=UpsampleBlock,
in_channels=self.filters[1:][::-1],
out_channels=self.filters[:-1][::-1],
kernels=kernels[1:][::-1],
strides=strides[1:][::-1],
)
self.output_block = self.get_output_block(decoder_level=0)
self.deep_supervision_heads = self.get_deep_supervision_heads()
self.apply(self.initialize_weights)
def forward(self, input_data):
out = self.input_block(input_data)
encoder_outputs = [out]
for downsample in self.downsamples:
out = downsample(out)
encoder_outputs.append(out)
out = self.bottleneck(out)
decoder_outputs = []
for upsample, skip in zip(self.upsamples, reversed(encoder_outputs)):
out = upsample(out, skip)
decoder_outputs.append(out)
out = self.output_block(out)
if self.training and self.deep_supervision:
out = [out]
for i, decoder_out in enumerate(decoder_outputs[-3:-1][::-1]):
out.append(self.deep_supervision_heads[i](decoder_out))
return out
def get_conv_block(self, conv_block, in_channels, out_channels, kernel_size, stride, drop_block=False):
return conv_block(
dim=self.dim,
stride=stride,
norm=self.norm,
kernel_size=kernel_size,
in_channels=in_channels,
out_channels=out_channels,
)
def get_output_block(self, decoder_level):
return OutputBlock(in_channels=self.filters[decoder_level], out_channels=self.n_class, dim=self.dim)
def get_deep_supervision_heads(self):
return nn.ModuleList([self.get_output_block(1), self.get_output_block(2)])
def get_module_list(self, in_channels, out_channels, kernels, strides, conv_block):
layers = []
for in_channel, out_channel, kernel, stride in zip(in_channels, out_channels, kernels, strides):
conv_layer = self.get_conv_block(conv_block, in_channel, out_channel, kernel, stride)
layers.append(conv_layer)
return nn.ModuleList(layers)
def initialize_weights(self, module):
name = module.__class__.__name__.lower()
if name in ["conv2d", "conv3d"]:
nn.init.kaiming_normal_(module.weight)
if hasattr(module, "bias") and module.bias is not None:
nn.init.constant_(module.bias, 0)
|
TensorFlow2/Recommendation/SIM/sim/utils | utils | gpu_affinity | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import math
import os
import pathlib
import re
import pynvml
pynvml.nvmlInit()
class Device:
# assume nvml returns list of 64 bit ints
_nvml_affinity_elements = math.ceil(os.cpu_count() / 64)
def __init__(self, device_idx):
super().__init__()
self.handle = pynvml.nvmlDeviceGetHandleByIndex(device_idx)
def get_cpu_affinity(self):
affinity_string = ''
for j in pynvml.nvmlDeviceGetCpuAffinity(
self.handle, Device._nvml_affinity_elements
):
# assume nvml returns list of 64 bit ints
affinity_string = '{:064b}'.format(j) + affinity_string
affinity_list = [int(x) for x in affinity_string]
affinity_list.reverse() # so core 0 is in 0th element of list
ret = [i for i, e in enumerate(affinity_list) if e != 0]
return ret
def set_socket_affinity(gpu_id):
dev = Device(gpu_id)
affinity = dev.get_cpu_affinity()
os.sched_setaffinity(0, affinity)
def set_single_affinity(gpu_id):
dev = Device(gpu_id)
affinity = dev.get_cpu_affinity()
os.sched_setaffinity(0, affinity[:1])
def set_single_unique_affinity(gpu_id, nproc_per_node):
devices = [Device(i) for i in range(nproc_per_node)]
socket_affinities = [dev.get_cpu_affinity() for dev in devices]
siblings_list = get_thread_siblings_list()
siblings_dict = dict(siblings_list)
# remove siblings
for idx, socket_affinity in enumerate(socket_affinities):
socket_affinities[idx] = list(set(socket_affinity) - set(siblings_dict.values()))
affinities = []
assigned = []
for socket_affinity in socket_affinities:
for core in socket_affinity:
if core not in assigned:
affinities.append([core])
assigned.append(core)
break
os.sched_setaffinity(0, affinities[gpu_id])
def set_socket_unique_affinity(gpu_id, nproc_per_node, mode):
device_ids = [Device(i) for i in range(nproc_per_node)]
socket_affinities = [dev.get_cpu_affinity() for dev in device_ids]
siblings_list = get_thread_siblings_list()
siblings_dict = dict(siblings_list)
# remove siblings
for idx, socket_affinity in enumerate(socket_affinities):
socket_affinities[idx] = list(set(socket_affinity) - set(siblings_dict.values()))
socket_affinities_to_device_ids = collections.defaultdict(list)
for idx, socket_affinity in enumerate(socket_affinities):
socket_affinities_to_device_ids[tuple(socket_affinity)].append(idx)
for socket_affinity, device_ids in socket_affinities_to_device_ids.items():
devices_per_group = len(device_ids)
cores_per_device = len(socket_affinity) // devices_per_group
for group_id, device_id in enumerate(device_ids):
if device_id == gpu_id:
if mode == 'interleaved':
affinity = list(socket_affinity[group_id::devices_per_group])
elif mode == 'continuous':
affinity = list(socket_affinity[group_id*cores_per_device:(group_id+1)*cores_per_device])
else:
raise RuntimeError('Unknown set_socket_unique_affinity mode')
# reintroduce siblings
affinity += [siblings_dict[aff] for aff in affinity if aff in siblings_dict]
os.sched_setaffinity(0, affinity)
def get_thread_siblings_list():
path = '/sys/devices/system/cpu/cpu*/topology/thread_siblings_list'
thread_siblings_list = []
pattern = re.compile(r'(\d+)\D(\d+)')
for fname in pathlib.Path(path[0]).glob(path[1:]):
with open(fname) as f:
content = f.read().strip()
res = pattern.findall(content)
if res:
pair = tuple(map(int, res[0]))
thread_siblings_list.append(pair)
return thread_siblings_list
def set_affinity(gpu_id, nproc_per_node, mode='socket'):
if mode == 'socket':
set_socket_affinity(gpu_id)
elif mode == 'single':
set_single_affinity(gpu_id)
elif mode == 'single_unique':
set_single_unique_affinity(gpu_id, nproc_per_node)
elif mode == 'socket_unique_interleaved':
set_socket_unique_affinity(gpu_id, nproc_per_node, 'interleaved')
elif mode == 'socket_unique_continuous':
set_socket_unique_affinity(gpu_id, nproc_per_node, 'continuous')
else:
raise RuntimeError('Unknown affinity mode')
affinity = os.sched_getaffinity(0)
return affinity
|
TensorFlow/Classification/ConvNets/resnet50v1.5/training | training | GPU1_RN50_QAT | #!/bin/bash
# Copyright (c) 2020 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script does Quantization aware training of Resnet-50 by finetuning on the pre-trained model using 1 GPU and a batch size of 32.
# Usage ./GPU1_RN50_QAT.sh <path to the pre-trained model> <path to dataset> <path to results directory>
python main.py --mode=train_and_evaluate --batch_size=32 --lr_warmup_epochs=1 --quantize \
--symmetric --use_qdq --label_smoothing 0.1 --lr_init=0.00005 --momentum=0.875 \
--weight_decay=3.0517578125e-05 --finetune_checkpoint=$1 --data_dir=$2 \
--results_dir=$3 --num_iter 10 --data_format NHWC
|
TensorFlow/LanguageModeling/BERT | BERT | extract_features | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Extract pre-computed feature vectors from BERT."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import codecs
import collections
import json
import re
import modeling
import tokenization
import tensorflow as tf
flags = tf.flags
FLAGS = flags.FLAGS
flags.DEFINE_string("input_file", None, "")
flags.DEFINE_string("output_file", None, "")
flags.DEFINE_string("layers", "-1,-2,-3,-4", "")
flags.DEFINE_string(
"bert_config_file", None,
"The config json file corresponding to the pre-trained BERT model. "
"This specifies the model architecture.")
flags.DEFINE_integer(
"max_seq_length", 128,
"The maximum total input sequence length after WordPiece tokenization. "
"Sequences longer than this will be truncated, and sequences shorter "
"than this will be padded.")
flags.DEFINE_string(
"init_checkpoint", None,
"Initial checkpoint (usually from a pre-trained BERT model).")
flags.DEFINE_string("vocab_file", None,
"The vocabulary file that the BERT model was trained on.")
flags.DEFINE_bool(
"do_lower_case", True,
"Whether to lower case the input text. Should be True for uncased "
"models and False for cased models.")
flags.DEFINE_integer("batch_size", 32, "Batch size for predictions.")
flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.")
flags.DEFINE_string("master", None,
"If using a TPU, the address of the master.")
flags.DEFINE_integer(
"num_tpu_cores", 8,
"Only used if `use_tpu` is True. Total number of TPU cores to use.")
flags.DEFINE_bool(
"use_one_hot_embeddings", False,
"If True, tf.one_hot will be used for embedding lookups, otherwise "
"tf.nn.embedding_lookup will be used. On TPUs, this should be True "
"since it is much faster.")
class InputExample(object):
def __init__(self, unique_id, text_a, text_b):
self.unique_id = unique_id
self.text_a = text_a
self.text_b = text_b
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, unique_id, tokens, input_ids, input_mask, input_type_ids):
self.unique_id = unique_id
self.tokens = tokens
self.input_ids = input_ids
self.input_mask = input_mask
self.input_type_ids = input_type_ids
def input_fn_builder(features, seq_length):
"""Creates an `input_fn` closure to be passed to TPUEstimator."""
all_unique_ids = []
all_input_ids = []
all_input_mask = []
all_input_type_ids = []
for feature in features:
all_unique_ids.append(feature.unique_id)
all_input_ids.append(feature.input_ids)
all_input_mask.append(feature.input_mask)
all_input_type_ids.append(feature.input_type_ids)
def input_fn(params):
"""The actual input function."""
batch_size = params["batch_size"]
num_examples = len(features)
# This is for demo purposes and does NOT scale to large data sets. We do
# not use Dataset.from_generator() because that uses tf.py_func which is
# not TPU compatible. The right way to load data is with TFRecordReader.
d = tf.data.Dataset.from_tensor_slices({
"unique_ids":
tf.constant(all_unique_ids, shape=[num_examples], dtype=tf.int32),
"input_ids":
tf.constant(
all_input_ids, shape=[num_examples, seq_length],
dtype=tf.int32),
"input_mask":
tf.constant(
all_input_mask,
shape=[num_examples, seq_length],
dtype=tf.int32),
"input_type_ids":
tf.constant(
all_input_type_ids,
shape=[num_examples, seq_length],
dtype=tf.int32),
})
d = d.batch(batch_size=batch_size, drop_remainder=False)
return d
return input_fn
def model_fn_builder(bert_config, init_checkpoint, layer_indexes, use_tpu,
use_one_hot_embeddings):
"""Returns `model_fn` closure for TPUEstimator."""
def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
"""The `model_fn` for TPUEstimator."""
unique_ids = features["unique_ids"]
input_ids = features["input_ids"]
input_mask = features["input_mask"]
input_type_ids = features["input_type_ids"]
model = modeling.BertModel(
config=bert_config,
is_training=False,
input_ids=input_ids,
input_mask=input_mask,
token_type_ids=input_type_ids,
use_one_hot_embeddings=use_one_hot_embeddings)
if mode != tf.estimator.ModeKeys.PREDICT:
raise ValueError("Only PREDICT modes are supported: %s" % (mode))
tvars = tf.trainable_variables()
scaffold_fn = None
(assignment_map,
initialized_variable_names) = modeling.get_assignment_map_from_checkpoint(
tvars, init_checkpoint)
if use_tpu:
def tpu_scaffold():
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
return tf.train.Scaffold()
scaffold_fn = tpu_scaffold
else:
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
tf.compat.v1.logging.info("**** Trainable Variables ****")
for var in tvars:
init_string = ""
if var.name in initialized_variable_names:
init_string = ", *INIT_FROM_CKPT*"
tf.compat.v1.logging.info(" name = %s, shape = %s%s", var.name, var.shape,
init_string)
all_layers = model.get_all_encoder_layers()
predictions = {
"unique_id": unique_ids,
}
for (i, layer_index) in enumerate(layer_indexes):
predictions["layer_output_%d" % i] = all_layers[layer_index]
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode, predictions=predictions, scaffold_fn=scaffold_fn)
return output_spec
return model_fn
def convert_examples_to_features(examples, seq_length, tokenizer):
"""Loads a data file into a list of `InputBatch`s."""
features = []
for (ex_index, example) in enumerate(examples):
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
if tokens_b:
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
_truncate_seq_pair(tokens_a, tokens_b, seq_length - 3)
else:
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > seq_length - 2:
tokens_a = tokens_a[0:(seq_length - 2)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = []
input_type_ids = []
tokens.append("[CLS]")
input_type_ids.append(0)
for token in tokens_a:
tokens.append(token)
input_type_ids.append(0)
tokens.append("[SEP]")
input_type_ids.append(0)
if tokens_b:
for token in tokens_b:
tokens.append(token)
input_type_ids.append(1)
tokens.append("[SEP]")
input_type_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < seq_length:
input_ids.append(0)
input_mask.append(0)
input_type_ids.append(0)
assert len(input_ids) == seq_length
assert len(input_mask) == seq_length
assert len(input_type_ids) == seq_length
if ex_index < 5:
tf.compat.v1.logging.info("*** Example ***")
tf.compat.v1.logging.info("unique_id: %s" % (example.unique_id))
tf.compat.v1.logging.info("tokens: %s" % " ".join(
[tokenization.printable_text(x) for x in tokens]))
tf.compat.v1.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
tf.compat.v1.logging.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
tf.compat.v1.logging.info(
"input_type_ids: %s" % " ".join([str(x) for x in input_type_ids]))
features.append(
InputFeatures(
unique_id=example.unique_id,
tokens=tokens,
input_ids=input_ids,
input_mask=input_mask,
input_type_ids=input_type_ids))
return features
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def read_examples(input_file):
"""Read a list of `InputExample`s from an input file."""
examples = []
unique_id = 0
with tf.io.gfile.GFile(input_file, "r") as reader:
while True:
line = tokenization.convert_to_unicode(reader.readline())
if not line:
break
line = line.strip()
text_a = None
text_b = None
m = re.match(r"^(.*) \|\|\| (.*)$", line)
if m is None:
text_a = line
else:
text_a = m.group(1)
text_b = m.group(2)
examples.append(
InputExample(unique_id=unique_id, text_a=text_a, text_b=text_b))
unique_id += 1
return examples
def main(_):
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.info)
layer_indexes = [int(x) for x in FLAGS.layers.split(",")]
bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file)
tokenizer = tokenization.FullTokenizer(
vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)
is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2
run_config = tf.contrib.tpu.RunConfig(
master=FLAGS.master,
tpu_config=tf.contrib.tpu.TPUConfig(
num_shards=FLAGS.num_tpu_cores,
per_host_input_for_training=is_per_host))
examples = read_examples(FLAGS.input_file)
features = convert_examples_to_features(
examples=examples, seq_length=FLAGS.max_seq_length, tokenizer=tokenizer)
unique_id_to_feature = {}
for feature in features:
unique_id_to_feature[feature.unique_id] = feature
model_fn = model_fn_builder(
bert_config=bert_config,
init_checkpoint=FLAGS.init_checkpoint,
layer_indexes=layer_indexes,
use_tpu=FLAGS.use_tpu,
use_one_hot_embeddings=FLAGS.use_one_hot_embeddings)
# If TPU is not available, this will fall back to normal Estimator on CPU
# or GPU.
estimator = tf.contrib.tpu.TPUEstimator(
use_tpu=FLAGS.use_tpu,
model_fn=model_fn,
config=run_config,
predict_batch_size=FLAGS.batch_size)
input_fn = input_fn_builder(
features=features, seq_length=FLAGS.max_seq_length)
with codecs.getwriter("utf-8")(tf.io.gfile.Open(FLAGS.output_file,
"w")) as writer:
for result in estimator.predict(input_fn, yield_single_examples=True):
unique_id = int(result["unique_id"])
feature = unique_id_to_feature[unique_id]
output_json = collections.OrderedDict()
output_json["linex_index"] = unique_id
all_features = []
for (i, token) in enumerate(feature.tokens):
all_layers = []
for (j, layer_index) in enumerate(layer_indexes):
layer_output = result["layer_output_%d" % j]
layers = collections.OrderedDict()
layers["index"] = layer_index
layers["values"] = [
round(float(x), 6) for x in layer_output[i:(i + 1)].flat
]
all_layers.append(layers)
features = collections.OrderedDict()
features["token"] = token
features["layers"] = all_layers
all_features.append(features)
output_json["features"] = all_features
writer.write(json.dumps(output_json) + "\n")
if __name__ == "__main__":
flags.mark_flag_as_required("input_file")
flags.mark_flag_as_required("vocab_file")
flags.mark_flag_as_required("bert_config_file")
flags.mark_flag_as_required("init_checkpoint")
flags.mark_flag_as_required("output_file")
tf.app.run()
|
PyTorch/Forecasting/TFT/triton/runner | runner | start_NVIDIA-DGX-A100-(1x-A100-80GB) | # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/bin/bash
# Install Docker
. /etc/os-release && \
curl -fsSL https://download.docker.com/linux/debian/gpg | apt-key add - && \
echo "deb [arch=amd64] https://download.docker.com/linux/debian buster stable" > /etc/apt/sources.list.d/docker.list && \
curl -s -L https://nvidia.github.io/nvidia-docker/gpgkey| apt-key add - && \
curl -s -L https://nvidia.github.io/nvidia-docker/$ID$VERSION_ID/nvidia-docker.list > /etc/apt/sources.list.d/nvidia-docker.list && \
apt-get update && \
apt-get install -y docker-ce docker-ce-cli containerd.io nvidia-docker2
# Install packages
pip install -r triton/runner/requirements.txt
# Evaluate Runner
python3 -m "triton.runner.__main__" \
--config-path "triton/runner/config_NVIDIA-DGX-A100-(1x-A100-80GB).yaml" \
--device 0 |
PyTorch/SpeechRecognition/Jasper/scripts | scripts | train_benchmark | #!/bin/bash
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -a
# measure on speed perturbed data, but so slightly that fbank length remains the same
# with pad_to_max_duration, this reduces cuDNN benchmak's burn-in period to a single step
: ${DATA_DIR:=${1:-"/datasets/LibriSpeech"}}
: ${OUTPUT_DIR:=${3:-"/results"}}
: ${TRAIN_MANIFESTS:="$DATA_DIR/librispeech-train-clean-100-wav.json"}
# run for a number of epochs, but don't finalize the training
: ${EPOCHS_THIS_JOB:=2}
: ${EPOCHS:=100000}
: ${RESUME:=false}
: ${SAVE_FREQUENCY:=100000}
: ${EVAL_FREQUENCY:=100000}
: ${GRAD_ACCUMULATION_STEPS:=1}
: ${AMP:=false}
: ${EMA:=0}
: ${DALI_DEVICE:="gpu"}
: ${NUM_GPUS_SEQ:="1 4 8"}
: ${BATCH_SIZE_SEQ:="32"}
# A probable range of batch lengths for LibriSpeech
# with BS=64 and continuous speed perturbation (0.85, 1.15)
: ${PRE_ALLOCATE:="1408 1920"}
for NUM_GPUS in $NUM_GPUS_SEQ; do
for BATCH_SIZE in $BATCH_SIZE_SEQ; do
LOG_FILE="$OUTPUT_DIR/perf-train_dali-${DALI_DEVICE}_amp-${AMP}_ngpus${NUM_GPUS}_bs${BATCH_SIZE}.json"
bash ./scripts/train.sh "$@"
done
done
|
PyTorch/SpeechSynthesis/Tacotron2/notebooks/conversationalai/model_repo/tacotron2 | tacotron2 | config | name: "tacotron2"
platform: "pytorch_libtorch"
default_model_filename: "tacotron2_fp16.pt"
max_batch_size: 8
input [
{
name: "sequence__0"
data_type: TYPE_INT64
dims: [-1]
},
{
name: "input_lengths__1"
data_type: TYPE_INT64
dims: [1]
reshape: { shape: [ ] }
}
]
output [
{
name: "mel_outputs_postnet__0"
data_type: TYPE_FP16
dims: [80,-1]
},
{
name: "mel_lengths__1"
data_type: TYPE_INT32
dims: [1]
reshape: { shape: [ ] }
}
]
|
PyTorch/Segmentation/MaskRCNN/pytorch/maskrcnn_benchmark/modeling | modeling | utils | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
"""
Miscellaneous utility functions
"""
import torch
def cat(tensors, dim=0):
"""
Efficient version of torch.cat that avoids a copy if there is only a single element in a list
"""
assert isinstance(tensors, (list, tuple))
if len(tensors) == 1:
return tensors[0]
return torch.cat(tensors, dim)
|
TensorFlow2/Recommendation/DLRM_and_DCNv2/tensorflow-dot-based-interact/tensorflow_dot_based_interact/cc/kernels/cuda_kernels | cuda_kernels | dot_based_interact_tf32 | // Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <cuda.h>
#include <cuda_fp16.h>
#include <cuda_runtime_api.h>
#include <device_launch_parameters.h>
#include <mma.h>
#include <cuda_fp16.hpp>
#include <math.h>
#include <fstream>
#include <iomanip>
#include <iostream>
#include <vector>
#include "dot_based_interact_shared_utils.cuh"
using namespace nvcuda;
template <uint WARPS_PER_BLOCK,
uint THREADBLOCK_SIZE,
uint WARP_SIZE,
uint WARP_SIZE_LOG_2,
uint TILE_LENGTH,
uint TILE_LENGTH_LOG_2,
uint TILE_WIDTH,
uint TILE_WIDTH_LOG_2,
uint ROW_TILES_PER_STEP>
__launch_bounds__(THREADBLOCK_SIZE) __global__ void dotBasedInteractTF32FwdKernel(const float *__restrict input,
float *__restrict output,
uint batch_size,
uint num_rows,
uint num_cols,
uint num_rows_after_padding,
uint num_cols_after_padding,
uint smem_elems_per_warp,
uint output_size,
uint num_row_steps,
uint num_col_steps,
uint smem_stride,
uint smem_stride_acc,
uint padding_size) {
// The only support sizes for TF32.
const uint kWmmaM = 16;
const uint kWmmaN = 16;
const uint kWmmaK = 8;
uint warp_id = threadIdx.x >> WARP_SIZE_LOG_2;
uint sample_id = blockIdx.x * WARPS_PER_BLOCK + warp_id;
if (sample_id >= batch_size) {
return;
}
int lane_id = threadIdx.x & (WARP_SIZE - 1);
extern __shared__ float shmem_dynamic_float[];
float *shmem = shmem_dynamic_float + (warp_id * smem_elems_per_warp);
const float *gmem_input = input + num_rows * num_cols * sample_id;
if (lane_id < (num_cols >> 2)) {
for (int i = 0; i < num_rows; ++i, gmem_input += num_cols) {
float4 tmp = ((float4 *)gmem_input)[lane_id];
tmp.x = wmma::__float_to_tf32(tmp.x);
tmp.y = wmma::__float_to_tf32(tmp.y);
tmp.z = wmma::__float_to_tf32(tmp.z);
tmp.w = wmma::__float_to_tf32(tmp.w);
((float4 *)(shmem + i * smem_stride))[lane_id] = tmp;
}
}
float zero = wmma::__float_to_tf32(0.0f);
float4 zero4;
zero4.x = zero;
zero4.y = zero;
zero4.z = zero;
zero4.w = zero;
uint idx = lane_id + num_cols;
if (idx < num_cols_after_padding) {
for (uint i = 0; i < num_rows; ++i) {
(shmem + i * smem_stride)[idx] = zero;
}
}
if (lane_id < (num_cols_after_padding >> 2)) {
for (int i = num_rows; i < num_rows_after_padding; i++) {
((float4 *)(shmem + i * smem_stride))[lane_id] = zero4;
}
}
__syncwarp();
// TODO: MTMD - Copy directly without using shared memory
float *gmem_output = output + output_size * sample_id;
if (lane_id < (num_cols >> 2)) {
((float4 *)gmem_output)[lane_id] = ((float4 *)shmem)[lane_id];
}
wmma::fragment<wmma::accumulator, kWmmaM, kWmmaN, kWmmaK, float> acc[ROW_TILES_PER_STEP][ROW_TILES_PER_STEP];
for (int i = 0; i < ROW_TILES_PER_STEP; i++) {
for (int j = 0; j < ROW_TILES_PER_STEP; j++) {
wmma::fill_fragment(acc[i][j], zero);
}
}
// TODO: MTMD - Loop promotion
for (int k_step = 0; k_step < num_col_steps; k_step++) {
wmma::fragment<wmma::matrix_a, kWmmaM, kWmmaN, kWmmaK, wmma::precision::tf32, wmma::row_major>
a[ROW_TILES_PER_STEP];
wmma::fragment<wmma::matrix_b, kWmmaM, kWmmaN, kWmmaK, wmma::precision::tf32, wmma::col_major>
b[ROW_TILES_PER_STEP];
for (int j = 0; j < ROW_TILES_PER_STEP; j++) {
int base_row = (j < ROW_TILES_PER_STEP - 1) ? j * 16 : num_rows_after_padding - 16;
const float *tile_ptr = shmem + (base_row * smem_stride + k_step * kWmmaK);
wmma::load_matrix_sync(a[j], tile_ptr, smem_stride);
wmma::load_matrix_sync(b[j], tile_ptr, smem_stride);
}
for (int i = 0; i < ROW_TILES_PER_STEP; i++) {
for (int j = 0; j < ROW_TILES_PER_STEP; j++) {
wmma::mma_sync(acc[i][j], a[i], b[j], acc[i][j]);
}
}
}
for (int i = 0; i < ROW_TILES_PER_STEP; i++) {
for (int j = 0; j < ROW_TILES_PER_STEP; j++) {
float *tile_ptr = shmem + (i * kWmmaM * smem_stride_acc + j * kWmmaN);
wmma::store_matrix_sync(tile_ptr, acc[i][j], smem_stride_acc, wmma::mem_row_major);
}
}
float *gmem_interact_output = gmem_output + num_cols;
int lastRowBlockOffset = ROW_TILES_PER_STEP * 16 - num_rows_after_padding;
int src_line = 0;
for (int i = 0; i < num_rows; ++i, ++src_line) {
if (i == ((ROW_TILES_PER_STEP - 1) * 16)) {
src_line += lastRowBlockOffset;
}
if (lane_id < i) {
uint offset = (i * (i - 1)) >> 1;
gmem_interact_output[offset + lane_id] = shmem[src_line * smem_stride_acc + lane_id];
}
}
// Add padding to the output vectors
if (lane_id < padding_size) {
gmem_output[output_size - lane_id - 1] = __float2half(0);
}
}
template <uint WARPS_PER_BLOCK,
uint THREADBLOCK_SIZE,
uint WARP_SIZE,
uint WARP_SIZE_LOG_2,
uint FRAG_A_ROWS,
uint FRAG_B_COLS,
uint TILE_LENGTH,
uint TILE_LENGTH_LOG_2,
uint TILE_WIDTH,
uint TILE_WIDTH_LOG_2>
__launch_bounds__(THREADBLOCK_SIZE) __global__
void dotBasedInteractTF32BwdKernel(const float *__restrict input,
const float *__restrict upstream_grad,
float *__restrict grad,
float *__restrict bottom_mlp_grad,
uint batch_size,
uint num_rows,
uint num_cols,
uint num_rows_after_padding,
uint num_cols_after_padding,
uint sample_size,
uint interaction_ugrad_size,
uint interaction_ugrad_size_with_padding,
uint interaction_ugrad_2D_size_elems,
uint interaction_ugrad_2D_stride,
uint input_size_elems,
uint input_stride,
uint shared_mem_per_warp_size_elems,
uint num_k_steps,
uint num_n_steps) {
// The only support sizes for TF32.
const uint kWmmaM = 16;
const uint kWmmaN = 16;
const uint kWmmaK = 8;
extern __shared__ float shared_mem_float[];
uint warp_id = threadIdx.x >> WARP_SIZE_LOG_2;
uint sample_id = blockIdx.x * WARPS_PER_BLOCK + warp_id;
if (sample_id >= batch_size) {
return;
}
uint lane_id = threadIdx.x & (WARP_SIZE - 1);
uint smem_warp_offset = warp_id * shared_mem_per_warp_size_elems;
float *smem_in = &shared_mem_float[smem_warp_offset];
float *smem_ugrad = &shared_mem_float[smem_warp_offset + input_size_elems];
float *smem_out = &shared_mem_float[smem_warp_offset + input_size_elems + interaction_ugrad_2D_size_elems];
// Global memory pointers for the current sample
// Input
uint gmem_input_sample_offset = sample_id * sample_size;
const float *gmem_input = &input[gmem_input_sample_offset];
// Interaction Gradient
const uint &gmem_grad_sample_offset = gmem_input_sample_offset;
float *gmem_grad = &grad[gmem_grad_sample_offset];
// Bottom MLP gradient
float *gmem_mlp_grad = &bottom_mlp_grad[sample_id * num_cols];
// Upstream gradient vector
uint gmem_ugrad_sample_offset = sample_id * (num_cols + interaction_ugrad_size_with_padding);
const float *gmem_ugrad = &upstream_grad[gmem_ugrad_sample_offset];
// Upstream gradient vector for interactions
const float *gmem_ugrad_interactions = &gmem_ugrad[num_cols];
// upstream grad -> shared memory (place in input section temporarily)
#pragma unroll
for (uint idx = lane_id; idx < (interaction_ugrad_size >> 2); idx += WARP_SIZE) {
float4 tmp = ((float4 *)gmem_ugrad_interactions)[idx];
tmp.x = wmma::__float_to_tf32(tmp.x);
tmp.y = wmma::__float_to_tf32(tmp.y);
tmp.z = wmma::__float_to_tf32(tmp.z);
tmp.w = wmma::__float_to_tf32(tmp.w);
((float4 *)smem_in)[idx] = tmp;
}
uint offset = (interaction_ugrad_size >> 2) << 2;
for (uint idx = lane_id + offset; idx < interaction_ugrad_size; idx += WARP_SIZE) {
smem_in[idx] = wmma::__float_to_tf32(gmem_ugrad_interactions[idx]);
}
__syncwarp();
float zero = wmma::__float_to_tf32(0.0f);
float4 zero4;
zero4.x = zero;
zero4.y = zero;
zero4.z = zero;
zero4.w = zero;
// Form the 2D ugrad matrix.
if (lane_id < num_rows_after_padding) {
uint ugrad_flat_index = ((lane_id * (lane_id - 1)) >> 1);
uint ugrad_offset_1 = lane_id * interaction_ugrad_2D_stride;
for (uint row = 0; row < num_rows; row++) {
float ugrad_val = zero;
if (row < lane_id && lane_id < num_rows) {
ugrad_val = smem_in[ugrad_flat_index + row];
smem_ugrad[ugrad_offset_1 + row] = ugrad_val;
}
if (row <= lane_id && lane_id < num_rows_after_padding) {
smem_ugrad[row * interaction_ugrad_2D_stride + lane_id] = ugrad_val;
}
}
for (uint row = num_rows; row < num_rows_after_padding; row++) {
smem_ugrad[row * interaction_ugrad_2D_stride + lane_id] = zero;
}
}
__syncwarp();
// Input -> Shared Memory
if (lane_id < (num_cols >> 2)) {
for (uint row = 0; row < num_rows; row++) {
float *smem_row_ptr = &smem_in[row * input_stride];
const float *gmem_row_ptr = &gmem_input[row * num_cols];
float4 tmp = ((float4 *)gmem_row_ptr)[lane_id];
tmp.x = wmma::__float_to_tf32(tmp.x);
tmp.y = wmma::__float_to_tf32(tmp.y);
tmp.z = wmma::__float_to_tf32(tmp.z);
tmp.w = wmma::__float_to_tf32(tmp.w);
((float4 *)smem_row_ptr)[lane_id] = tmp;
}
}
uint idx = lane_id + num_cols;
if (idx < num_cols_after_padding) {
for (uint row = 0; row < num_rows; row++) {
float *smem_row_ptr = &smem_in[row * input_stride];
smem_row_ptr[idx] = zero;
}
}
if (lane_id < (num_cols_after_padding >> 2)) {
#pragma unroll 2
for (uint row = num_rows; row < num_rows_after_padding; row++) {
float *smem_row_ptr = &smem_in[row * input_stride];
((float4 *)smem_row_ptr)[lane_id] = zero4;
}
}
__syncwarp();
wmma::fragment<wmma::matrix_a, kWmmaM, kWmmaN, kWmmaK, wmma::precision::tf32, wmma::row_major> a[FRAG_A_ROWS];
wmma::fragment<wmma::matrix_b, kWmmaM, kWmmaN, kWmmaK, wmma::precision::tf32, wmma::row_major> b[FRAG_B_COLS];
wmma::fragment<wmma::accumulator, kWmmaM, kWmmaN, kWmmaK, float> acc[FRAG_A_ROWS][FRAG_B_COLS];
for (uint n = 0; n < num_n_steps; n++) {
for (uint i = 0; i < FRAG_A_ROWS; i++) {
for (uint j = 0; j < FRAG_B_COLS; j++) {
wmma::fill_fragment(acc[i][j], zero);
}
}
for (uint k = 0; k < num_k_steps; k++) {
for (uint i = 0; i < FRAG_A_ROWS; i++) {
const float *mat_a_tile_ptr =
smem_ugrad + (i << TILE_LENGTH_LOG_2) * interaction_ugrad_2D_stride + (k << TILE_WIDTH_LOG_2);
wmma::load_matrix_sync(a[i], mat_a_tile_ptr, interaction_ugrad_2D_stride);
}
for (uint j = 0; j < FRAG_B_COLS; j++) {
const float *mat_b_tile_ptr =
smem_in + (k << TILE_WIDTH_LOG_2) * input_stride + ((2 * n + j) << TILE_LENGTH_LOG_2);
wmma::load_matrix_sync(b[j], mat_b_tile_ptr, input_stride);
}
for (uint i = 0; i < FRAG_A_ROWS; i++) {
for (uint j = 0; j < FRAG_B_COLS; j++) {
wmma::mma_sync(acc[i][j], a[i], b[j], acc[i][j]);
}
}
}
// __syncwarp(); ?
uint out_stride = FRAG_B_COLS << TILE_LENGTH_LOG_2;
for (uint i = 0; i < FRAG_A_ROWS; i++) {
for (uint j = 0; j < FRAG_B_COLS; j++) {
float *out_tile_ptr = smem_out + (i << TILE_LENGTH_LOG_2) * out_stride + (j << TILE_LENGTH_LOG_2);
wmma::store_matrix_sync(out_tile_ptr, acc[i][j], out_stride, wmma::mem_row_major);
}
}
uint gmem_grad_col = n * (FRAG_B_COLS << TILE_LENGTH_LOG_2) + lane_id;
for (uint i = 0; i < num_rows; i++) {
gmem_grad[i * num_cols + gmem_grad_col] = smem_out[i * out_stride + lane_id];
}
}
if (lane_id < (num_cols >> 2)) {
((float4 *)gmem_mlp_grad)[lane_id] = ((float4 *)gmem_ugrad)[lane_id];
}
}
|
PyTorch/SpeechSynthesis/FastPitch/waveglow | waveglow | loss_function | # *****************************************************************************
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
import torch
class WaveGlowLoss(torch.nn.Module):
def __init__(self, sigma=1.0):
super(WaveGlowLoss, self).__init__()
self.sigma = sigma
def forward(self, model_output, clean_audio):
# clean_audio is unused;
z, log_s_list, log_det_W_list = model_output
for i, log_s in enumerate(log_s_list):
if i == 0:
log_s_total = torch.sum(log_s)
log_det_W_total = log_det_W_list[i]
else:
log_s_total = log_s_total + torch.sum(log_s)
log_det_W_total += log_det_W_list[i]
loss = torch.sum(
z * z) / (2 * self.sigma * self.sigma) - log_s_total - log_det_W_total # noqa: E501
meta = {}
return loss / (z.size(0) * z.size(1) * z.size(2)), meta
|
PyTorch/Segmentation/MaskRCNN/pytorch/maskrcnn_benchmark/csrc | csrc | ROIAlign | // Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
// Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#pragma once
#include "cpu/vision.h"
#ifdef WITH_CUDA
#include "cuda/vision.h"
#endif
// Interface for Python
at::Tensor ROIAlign_forward(const at::Tensor& input,
const at::Tensor& rois,
const float spatial_scale,
const int pooled_height,
const int pooled_width,
const int sampling_ratio,
const bool is_nhwc) {
if (input.is_cuda()) {
#ifdef WITH_CUDA
return ROIAlign_forward_cuda(input, rois, spatial_scale, pooled_height, pooled_width, sampling_ratio, is_nhwc);
#else
AT_ERROR("Not compiled with GPU support");
#endif
}
return ROIAlign_forward_cpu(input, rois, spatial_scale, pooled_height, pooled_width, sampling_ratio);
}
at::Tensor ROIAlign_backward(const at::Tensor& grad,
const at::Tensor& rois,
const float spatial_scale,
const int pooled_height,
const int pooled_width,
const int batch_size,
const int channels,
const int height,
const int width,
const int sampling_ratio,
const bool is_nhwc) {
if (grad.is_cuda()) {
#ifdef WITH_CUDA
return ROIAlign_backward_cuda(grad, rois, spatial_scale, pooled_height, pooled_width, batch_size, channels, height, width, sampling_ratio, is_nhwc);
#else
AT_ERROR("Not compiled with GPU support");
#endif
}
AT_ERROR("Not implemented on the CPU");
}
|
PyTorch/Classification/GPUNet/triton/deployment_toolkit | deployment_toolkit | core | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import importlib
import logging
import os
import time
from enum import Enum
from pathlib import Path
from typing import Any, Dict, List, NamedTuple, Optional, Tuple, Union
import numpy as np
LOGGER = logging.getLogger(__name__)
DATALOADER_FN_NAME = "get_dataloader_fn"
GET_MODEL_FN_NAME = "get_model"
GET_SERVING_INPUT_RECEIVER_FN = "get_serving_input_receiver_fn"
GET_ARGPARSER_FN_NAME = "update_argparser"
class TensorSpec(NamedTuple):
name: str
dtype: str
shape: Tuple
class Parameter(Enum):
def __lt__(self, other: "Parameter") -> bool:
return self.value < other.value
def __str__(self):
return self.value
class BackendAccelerator(Parameter):
NONE = "none"
AMP = "amp"
TRT = "trt"
class ExportPrecision(Parameter):
FP16 = "fp16"
FP32 = "fp32"
class Precision(Parameter):
INT8 = "int8"
FP16 = "fp16"
FP32 = "fp32"
class DeviceKind(Parameter):
CPU = "cpu"
GPU = "gpu"
class ModelInputType(Parameter):
TF_GRAPHDEF = "tf-graphdef"
TF_ESTIMATOR = "tf-estimator"
TF_KERAS = "tf-keras"
PYT = "pyt"
class Format(Parameter):
TF_SAVEDMODEL = "tf-savedmodel"
TF_TRT = "tf-trt"
ONNX = "onnx"
TORCHSCRIPT = "torchscript"
TRT = "trt"
FASTERTRANSFORMER = "fastertransformer"
# deprecated, backward compatibility only
TS_TRACE = "ts-trace"
TS_SCRIPT = "ts-script"
class ExportFormat(Parameter):
TF_SAVEDMODEL = "tf-savedmodel"
TORCHSCRIPT = "torchscript"
ONNX = "onnx"
# deprecated, backward compatibility only
TS_TRACE = "ts-trace"
TS_SCRIPT = "ts-script"
class TorchJit(Parameter):
NONE = "none"
TRACE = "trace"
SCRIPT = "script"
class Model(NamedTuple):
handle: object
# TODO: precision should be removed
precision: Optional[Precision]
inputs: Dict[str, TensorSpec]
outputs: Dict[str, TensorSpec]
def load_from_file(file_path, label, target):
spec = importlib.util.spec_from_file_location(name=label, location=file_path)
my_module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(my_module) # pytype: disable=attribute-error
return getattr(my_module, target, None)
class BaseLoader(abc.ABC):
required_fn_name_for_signature_parsing: Optional[str] = None
@abc.abstractmethod
def load(self, model_path: Union[str, Path], **kwargs) -> Model:
"""
Loads and process model from file based on given set of args
"""
pass
class BaseSaver(abc.ABC):
required_fn_name_for_signature_parsing: Optional[str] = None
@abc.abstractmethod
def save(self, model: Model, model_path: Union[str, Path], dataloader_fn) -> None:
"""
Save model to file
"""
pass
class BaseRunner(abc.ABC):
required_fn_name_for_signature_parsing: Optional[str] = None
@abc.abstractmethod
def init_inference(self, model: Model):
raise NotImplementedError
class BaseRunnerSession(abc.ABC):
def __init__(self, model: Model):
self._model = model
self._evaluations = []
self._measurement = False
@abc.abstractmethod
def __enter__(self):
raise NotImplementedError()
@abc.abstractmethod
def __exit__(self, exc_type, exc_value, traceback):
raise NotImplementedError()
@abc.abstractmethod
def __call__(self, x: Dict[str, object]):
raise NotImplementedError()
def start_measurement(self):
self._measurement = True
self._evaluations = []
def stop_measurement(self, batch_size: int = 1):
LOGGER.info("Removing worst and best results")
evaluations = sorted(self._evaluations)[2:-2]
LOGGER.debug(f"Filtered: {evaluations}")
average_latency_ms = sum(evaluations) / len(evaluations)
LOGGER.debug(f"Average latency: {average_latency_ms:.2f} [ms]")
throughput = (1000.0 / average_latency_ms) * batch_size
LOGGER.debug(f"Throughput: {throughput:.2f} [infer/sec]")
self._measurement = False
return throughput, average_latency_ms
def _set_env_variables(self) -> Dict[str, object]:
"""this method not remove values; fix it if needed"""
to_set = {}
old_values = {k: os.environ.pop(k, None) for k in to_set}
os.environ.update(to_set)
return old_values
def _recover_env_variables(self, old_envs: Dict[str, object]):
for name, value in old_envs.items():
if value is None:
del os.environ[name]
else:
os.environ[name] = str(value)
class TimeMeasurement:
def __init__(self, session: BaseRunnerSession):
self._session = session
self._start = 0
self._end = 0
def __enter__(self):
self._start = time.time()
return self
def __exit__(self, exc_type, exc_value, traceback):
if not self._session._measurement:
return
self._end = time.time()
diff = (self._end - self._start) * 1000.0
LOGGER.debug(f"Iteration time {diff:.2f} [ms]")
self._session._evaluations.append(diff)
class BaseConverter(abc.ABC):
required_fn_name_for_signature_parsing: Optional[str] = None
@abc.abstractmethod
def convert(self, model: Model, dataloader_fn) -> Model:
raise NotImplementedError()
@staticmethod
def required_source_model_precision(requested_model_precision: Precision) -> Precision:
return requested_model_precision
class BaseMetricsCalculator(abc.ABC):
required_fn_name_for_signature_parsing: Optional[str] = None
def calc(
self,
*,
ids: List[Any],
y_pred: Dict[str, np.ndarray],
x: Optional[Dict[str, np.ndarray]],
y_real: Optional[Dict[str, np.ndarray]],
) -> Dict[str, float]:
"""
Calculates error/accuracy metrics
Args:
ids: List of ids identifying each sample in the batch
y_pred: model output as dict where key is output name and value is output value
x: model input as dict where key is input name and value is input value
y_real: input ground truth as dict where key is output name and value is output value
Returns:
dictionary where key is metric name and value is its value
"""
pass
@abc.abstractmethod
def update(
self,
ids: List[Any],
y_pred: Dict[str, np.ndarray],
x: Optional[Dict[str, np.ndarray]],
y_real: Optional[Dict[str, np.ndarray]],
):
pass
@property
@abc.abstractmethod
def metrics(self) -> Dict[str, Any]:
pass
class ShapeSpec(NamedTuple):
min: Tuple
opt: Tuple
max: Tuple
class MeasurementMode(Enum):
"""
Available measurement stabilization modes
"""
COUNT_WINDOWS = "count_windows"
TIME_WINDOWS = "time_windows"
class PerformanceTool(Enum):
"""
Available performance evaluation tools
"""
MODEL_ANALYZER = "model_analyzer"
PERF_ANALYZER = "perf_analyzer"
class EvaluationMode(Enum):
"""
Available evaluation modes
"""
OFFLINE = "offline"
ONLINE = "online"
class OfflineMode(Enum):
"""
Available offline mode for memory
"""
SYSTEM = "system"
CUDA = "cuda"
|
TensorFlow/Segmentation/UNet_Medical/examples | examples | unet_TRAIN_BENCHMARK_TF-AMP_8GPU | # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script launches U-Net run in FP16 on 8 GPUs for training benchmarking. Usage:
# bash unet_TRAIN_BENCHMARK_TF-AMP_8GPU.sh <path to dataset> <path to results directory> <batch size>
horovodrun -np 8 python main.py --data_dir $1 --model_dir $2 --batch_size $3 --exec_mode train --augment --benchmark --warmup_steps 200 --max_steps 1000 --xla --amp |
Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt/triton | triton | run_performance_on_triton | #!/usr/bin/env python3
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import csv
import logging
import os
import pathlib
import shutil
import sys
from distutils.version import LooseVersion
from enum import Enum
from typing import Any, Dict, List
import yaml
# method from PEP-366 to support relative import in executed modules
if __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from .deployment_toolkit.core import BatchingMode, EvaluationMode, MeasurementMode, OfflineMode, PerformanceTool
from .deployment_toolkit.model_analyzer import ModelAnalyzer, ModelAnalyzerConfig, ModelAnalyzerMode
from .deployment_toolkit.perf_analyzer import PerfAnalyzer, PerfAnalyzerConfig
from .deployment_toolkit.report import save_results, show_results, sort_results
from .deployment_toolkit.utils import parse_server_url
from .deployment_toolkit.warmup import performance_evaluation_warmup
LOGGER = logging.getLogger("run_performance_on_triton")
if LooseVersion(sys.version) >= LooseVersion("3.8.0"):
from importlib.metadata import version
TRITON_CLIENT_VERSION = LooseVersion(version("tritonclient"))
TRITON_MODEL_ANALYZER_VERSION = LooseVersion(version("triton-model-analyzer"))
else:
import pkg_resources
TRITON_CLIENT_VERSION = LooseVersion(pkg_resources.get_distribution("tritonclient").version)
TRITON_MODEL_ANALYZER_VERSION = LooseVersion(pkg_resources.get_distribution("triton-model-analyzer").version)
def _log_dict(title: str, dict_: Dict[str, Any]):
LOGGER.info(title)
for key, value in dict_.items():
LOGGER.info(f"\t{key} = {value}")
def _calculate_average_latency(r):
avg_sum_fields = [
"Client Send",
"Network+Server Send/Recv",
"Server Queue",
"Server Compute",
"Server Compute Input",
"Server Compute Infer",
"Server Compute Output",
"Client Recv",
]
avg_latency = sum([int(r.get(f, 0)) for f in avg_sum_fields])
return avg_latency
def _update_performance_data(results: List, batch_size: int, performance_partial_file: str):
row: Dict = {"Batch": batch_size}
with open(performance_partial_file) as csvfile:
reader = csv.DictReader(csvfile)
for r in reader:
avg_latency = _calculate_average_latency(r)
row = {**row, **r, "avg latency": avg_latency}
results.append(row)
def _model_analyzer_evaluation(
server_url: str,
model_name: str,
input_data: str,
input_shapes: List[str],
batch_sizes: List[int],
number_of_triton_instances: int,
number_of_model_instances: int,
measurement_mode: MeasurementMode,
measurement_interval: int,
measurement_request_count: int,
concurrency_steps: int,
batching_mode: BatchingMode,
evaluation_mode: EvaluationMode,
offline_mode: OfflineMode,
model_repository: str,
result_path: pathlib.Path,
output_shared_memory_size: int = 102400,
verbose: bool = False,
):
_log_dict(
"Selected configuration",
{
"server_url": server_url,
"model_name": model_name,
"input_data": input_data,
"input_shapes": input_shapes,
"batch_sizes": batch_sizes,
"number_of_triton_instances": number_of_triton_instances,
"number_of_model_instances": number_of_model_instances,
"measurement_mode": measurement_mode,
"measurement_interval": measurement_interval,
"measurement_request_count": measurement_request_count,
"concurrency_steps": concurrency_steps,
"batching_mode": batching_mode,
"evaluation_mode": evaluation_mode,
"offline_mode": offline_mode,
"output_shared_memory_size": output_shared_memory_size,
"model_repository": model_repository,
"result_path": result_path,
"verbose": verbose,
},
)
perf_analyzer_config = {
"measurement-interval": measurement_interval,
}
if TRITON_MODEL_ANALYZER_VERSION >= LooseVersion("1.8.0"):
perf_analyzer_config["input-data"] = [input_data]
else:
perf_analyzer_config["input-data"] = input_data
if TRITON_CLIENT_VERSION >= LooseVersion("2.11.0"):
perf_analyzer_config["measurement-mode"] = measurement_mode.value
perf_analyzer_config["measurement-request-count"] = measurement_request_count
if evaluation_mode == EvaluationMode.OFFLINE:
perf_analyzer_config["shared-memory"] = offline_mode.value
perf_analyzer_config["output-shared-memory-size"] = output_shared_memory_size
if input_shapes:
if TRITON_MODEL_ANALYZER_VERSION > LooseVersion("1.8.0"):
perf_analyzer_config["shape"] = input_shapes
else:
perf_analyzer_config["shape"] = input_shapes[0]
LOGGER.warning("Model Analyzer <= 1.8.0 support only single shape param for Perf Analyzer.")
if batching_mode == BatchingMode.STATIC:
batch_sizes = batch_sizes
concurrency = [number_of_triton_instances]
elif batching_mode == BatchingMode.DYNAMIC:
max_batch_size = max(batch_sizes)
max_total_requests = 2 * max_batch_size * number_of_triton_instances * number_of_model_instances
max_concurrency = min(256, max_total_requests)
step = max(1, max_concurrency // concurrency_steps)
min_concurrency = step
concurrency = {"start": min_concurrency, "stop": max_concurrency, "step": step}
batch_sizes = [max(1, max_total_requests // 256)]
else:
raise ValueError(f"Unsupported batching mode: {batching_mode}")
protocol, host, port = parse_server_url(server_url)
checkpoints = pathlib.Path("./checkpoints")
if checkpoints.is_dir():
shutil.rmtree(checkpoints.as_posix())
checkpoints.mkdir(parents=True, exist_ok=True)
config = {
"model_repository": model_repository,
"triton_launch_mode": "remote",
"run_config_search_disable": True,
"perf_analyzer_flags": perf_analyzer_config,
"perf_analyzer_timeout": 3600, # Workaround for Perf Analyzer timeout - use 1h
"profile_models": [model_name],
"batch_sizes": batch_sizes,
"concurrency": concurrency,
"verbose": verbose,
"checkpoint_directory": checkpoints.as_posix(),
"override_output_model_repository": True,
"client_protocol": protocol,
f"triton_{protocol}_endpoint": f"{host}:{port}",
}
if verbose:
_log_dict("Model Analyzer profiling configuration", config)
with open("config.yaml", "w") as file:
yaml.safe_dump(config, file)
config = ModelAnalyzerConfig()
model_analyzer = ModelAnalyzer(config=config)
model_analyzer.run(mode=ModelAnalyzerMode.PROFILE, verbose=verbose)
result_path.mkdir(parents=True, exist_ok=True)
for file in checkpoints.iterdir():
if not file.is_file() or file.suffix != ".ckpt":
continue
LOGGER.info(f"Moving checkpoint {file.name} to {result_path}")
shutil.move(file, result_path / file.name)
inference_output_fields = [
"batch_size",
"concurrency",
"perf_throughput",
"perf_latency",
"perf_client_send_recv",
"perf_client_response_wait",
"perf_server_queue",
"perf_server_compute_input",
"perf_server_compute_infer",
"perf_server_compute_output",
]
gpu_output_fields = [
"gpu_uuid",
"batch_size",
"concurrency",
"gpu_used_memory",
"gpu_free_memory",
"gpu_utilization",
"gpu_power_usage",
]
filename_model_inference = "metrics-model-inference.csv"
filename_model_gpu = "metrics-model-gpu.csv"
config = {
"analysis_models": model_name,
"checkpoint_directory": result_path.as_posix(),
"export_path": "/tmp",
"inference_output_fields": inference_output_fields,
"gpu_output_fields": gpu_output_fields,
"filename_model_inference": filename_model_inference,
"filename_model_gpu": filename_model_gpu,
"summarize": False,
}
if verbose:
_log_dict("Model Analyzer analysis configuration", config)
with open("config.yaml", "w") as file:
yaml.safe_dump(config, file)
config = ModelAnalyzerConfig()
model_analyzer = ModelAnalyzer(config=config)
model_analyzer.run(mode=ModelAnalyzerMode.ANALYZE, verbose=verbose)
inference_metrics_file = pathlib.Path("/tmp") / "results" / filename_model_inference
gpu_metrics_file = pathlib.Path("/tmp") / "results" / filename_model_gpu
for file in [inference_metrics_file, gpu_metrics_file]:
LOGGER.info(f"Moving metrics {file.name} to {result_path}")
shutil.move(file, result_path / file.name)
def _perf_analyzer_evaluation(
server_url: str,
model_name: str,
input_data: str,
input_shapes: List[str],
batch_sizes: List[int],
number_of_triton_instances: int,
number_of_model_instances: int,
measurement_mode: MeasurementMode,
measurement_interval: int,
measurement_request_count: int,
concurrency_steps: int,
batching_mode: BatchingMode,
evaluation_mode: EvaluationMode,
offline_mode: OfflineMode,
result_path: pathlib.Path,
output_shared_memory_size: int = 102400,
verbose: bool = False,
):
protocol, host, port = parse_server_url(server_url)
if batching_mode == BatchingMode.STATIC:
batch_sizes = batch_sizes
max_concurrency = 1
min_concurrency = 1
step = 1
elif batching_mode == BatchingMode.DYNAMIC:
max_batch_size = max(batch_sizes)
max_total_requests = 2 * max_batch_size * number_of_triton_instances * number_of_model_instances
max_concurrency = min(256, max_total_requests)
step = max(1, max_concurrency // concurrency_steps)
min_concurrency = step
batch_sizes = [max(1, max_total_requests // 256)]
else:
raise ValueError(f"Unsupported batching mode: {batching_mode}")
_log_dict(
"Selected configuration",
{
"server_url": server_url,
"model_name": model_name,
"input_data": input_data,
"input_shapes": input_shapes,
"batch_sizes": batch_sizes,
"number_of_triton_instances": number_of_triton_instances,
"number_of_model_instances": number_of_model_instances,
"measurement_mode": measurement_mode,
"measurement_interval": measurement_interval,
"measurement_request_count": measurement_request_count,
"concurrency_steps": concurrency_steps,
"batching_mode": batching_mode,
"evaluation_mode": evaluation_mode,
"offline_mode": offline_mode,
"output_shared_memory_size": output_shared_memory_size,
"result_path": result_path,
"verbose": verbose,
},
)
results: List[Dict] = list()
for batch_size in batch_sizes:
for concurrency in range(min_concurrency, max_concurrency + step, step):
performance_partial_file = f"triton_performance_{evaluation_mode.value.lower()}_{batching_mode.value.lower()}_partial_{batch_size}_{concurrency}.csv"
params = {
"model-name": model_name,
"model-version": 1,
"batch-size": batch_size,
"url": f"{host}:{port}",
"protocol": protocol,
"input-data": input_data,
"measurement-interval": measurement_interval,
"concurrency-range": f"{concurrency}:{concurrency}:1",
"latency-report-file": performance_partial_file,
}
if verbose:
params["extra-verbose"] = True
if TRITON_CLIENT_VERSION >= LooseVersion("2.11.0"):
params["measurement-mode"] = measurement_mode.value
params["measurement-request-count"] = measurement_request_count
if evaluation_mode == EvaluationMode.OFFLINE:
params["shared-memory"] = offline_mode.value
params["output-shared-memory-size"] = output_shared_memory_size
if verbose:
_log_dict(f"Perf Analyzer config for batch_size: {batch_size} and concurrency: {concurrency}", params)
config = PerfAnalyzerConfig()
for param, value in params.items():
config[param] = value
for shape in input_shapes:
config["shape"] = shape
perf_analyzer = PerfAnalyzer(config=config)
perf_analyzer.run()
_update_performance_data(results, batch_size, performance_partial_file)
os.remove(performance_partial_file)
results = sort_results(results=results)
save_results(filename=result_path.as_posix(), data=results)
show_results(results=results)
def _run_performance_analysis(
server_url: str,
model_name: str,
input_data: str,
input_shapes: List[str],
batch_sizes: List[int],
number_of_triton_instances: int,
number_of_model_instances: int,
measurement_mode: MeasurementMode,
measurement_interval: int,
measurement_request_count: int,
concurrency_steps: int,
batching_mode: BatchingMode,
evaluation_mode: EvaluationMode,
offline_mode: OfflineMode,
output_shared_memory_size: int,
performance_tool: PerformanceTool,
model_repository: str,
result_path: pathlib.Path,
warmup: bool,
verbose: bool,
):
log_level = logging.INFO if not verbose else logging.DEBUG
log_format = "%(asctime)s %(levelname)s %(name)s %(message)s"
logging.basicConfig(level=log_level, format=log_format)
if performance_tool == PerformanceTool.MODEL_ANALYZER:
if result_path.suffix:
raise ValueError(
"Results path for Model Analyzer is invalid. Please, provide the directory name. Example: results"
)
elif performance_tool == PerformanceTool.PERF_ANALYZER:
if result_path.suffix != ".csv":
raise ValueError(
"Results path for Perf Analyzer is invalid. Please, provide the CSV file name. Example: results.csv"
)
else:
raise ValueError(f"Unsupported performance tool {performance_tool}")
if warmup:
LOGGER.info("Running warmup before the main test")
performance_evaluation_warmup(
server_url=server_url,
model_name=model_name,
input_data=input_data,
input_shapes=input_shapes,
batch_sizes=batch_sizes,
number_of_triton_instances=number_of_triton_instances,
number_of_model_instances=number_of_model_instances,
measurement_mode=measurement_mode,
measurement_interval=measurement_interval,
measurement_request_count=measurement_request_count,
batching_mode=batching_mode,
evaluation_mode=evaluation_mode,
offline_mode=offline_mode,
output_shared_memory_size=output_shared_memory_size,
)
if performance_tool == PerformanceTool.MODEL_ANALYZER:
LOGGER.info("Using Model Analyzer for performance evaluation")
_model_analyzer_evaluation(
server_url=server_url,
model_name=model_name,
input_data=input_data,
input_shapes=input_shapes,
batch_sizes=batch_sizes,
number_of_triton_instances=number_of_triton_instances,
number_of_model_instances=number_of_model_instances,
measurement_mode=measurement_mode,
measurement_interval=measurement_interval,
measurement_request_count=measurement_request_count,
concurrency_steps=concurrency_steps,
batching_mode=batching_mode,
evaluation_mode=evaluation_mode,
offline_mode=offline_mode,
output_shared_memory_size=output_shared_memory_size,
model_repository=model_repository,
result_path=result_path,
verbose=verbose,
)
elif performance_tool == PerformanceTool.PERF_ANALYZER:
LOGGER.info("Using Perf Analyzer for performance evaluation")
_perf_analyzer_evaluation(
server_url=server_url,
model_name=model_name,
input_data=input_data,
input_shapes=input_shapes,
batch_sizes=batch_sizes,
number_of_triton_instances=number_of_triton_instances,
number_of_model_instances=number_of_model_instances,
measurement_mode=measurement_mode,
measurement_interval=measurement_interval,
measurement_request_count=measurement_request_count,
concurrency_steps=concurrency_steps,
batching_mode=batching_mode,
evaluation_mode=evaluation_mode,
offline_mode=offline_mode,
output_shared_memory_size=output_shared_memory_size,
result_path=result_path,
verbose=verbose,
)
else:
raise ValueError(f"Unsupported performance tool {performance_tool}")
class MeasurementMode(Enum):
"""
Available measurement stabilization modes
"""
COUNT_WINDOWS = "count_windows"
TIME_WINDOWS = "time_windows"
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--server-url",
type=str,
required=False,
default="http://127.0.0.1:8000",
help="Url to Triton server",
)
parser.add_argument(
"--model-name",
type=str,
required=True,
help="Name of the model to test",
)
parser.add_argument(
"--input-data",
type=str,
required=False,
default="random",
help="Input data to perform profiling.",
)
parser.add_argument(
"--input-shapes",
action="append",
required=False,
help="Input data shape in form INPUT_NAME:<full_shape_without_batch_axis>.",
)
parser.add_argument(
"--batch-sizes",
type=str,
required=True,
help="List of batch sizes to tests. Comma separated.",
)
parser.add_argument(
"--number-of-triton-instances",
type=int,
default=1,
help="Number of Triton Server instances",
)
parser.add_argument(
"--number-of-model-instances",
type=int,
default=1,
help="Number of models instances on Triton Server",
)
parser.add_argument(
"--measurement-mode",
choices=[item.value for item in MeasurementMode],
default=MeasurementMode.COUNT_WINDOWS.value,
type=str,
help="Select measurement mode "
"'time_windows' stabilize performance on measurement window. "
"'count_windows' stabilize performance on number of samples.",
)
parser.add_argument(
"--measurement-interval",
required=False,
help="Time window perf_analyzer will wait to stabilize the measurement",
default=5000,
type=int,
)
parser.add_argument(
"--measurement-request-count",
required=False,
help="Number of samples on which perf_analyzer will stabilize the measurement",
default=50,
type=int,
)
parser.add_argument(
"--concurrency-steps",
help="Define number of concurrency steps used for dynamic batching tests",
default=32,
type=int,
)
parser.add_argument(
"--batching-mode",
choices=[item.value for item in BatchingMode],
default=BatchingMode.STATIC.value,
type=str,
help="Select batching mode "
"'static' run static batching scenario. "
"'dynamic' run dynamic batching scenario.",
)
parser.add_argument(
"--evaluation-mode",
choices=[item.value for item in EvaluationMode],
default=EvaluationMode.OFFLINE.value,
type=str,
help="Select evaluation mode "
"'offline' run offline analysis and use GPU memory to pass tensors. "
"'online' run online analysis and use HTTP protocol.",
)
parser.add_argument(
"--offline-mode",
choices=[item.value for item in OfflineMode],
default=OfflineMode.SYSTEM.value,
type=str,
help="Select offline mode "
"'system' pass tensors through CPU RAM memory. "
"'cuda' pass tensors through GPU RAM memory.",
)
parser.add_argument(
"--output-shared-memory-size",
default=100240,
type=int,
help="Size of memory buffer allocated for output with dynamic shapes in bytes. "
"Has to be equal to maximal size of output tensor.",
)
parser.add_argument(
"--performance-tool",
choices=[item.value for item in PerformanceTool],
default=PerformanceTool.MODEL_ANALYZER.value,
type=str,
help="Select performance tool for measurement mode "
"'model_analyzer' use Model Analyzer "
"'perf_analyzer' use Perf Analyzer",
)
parser.add_argument(
"--model-repository",
default=None,
type=str,
help="Path to model repository. Valid when using Model Analyzer",
)
parser.add_argument("--result-path", type=pathlib.Path, required=True, help="Path where results files is stored.")
parser.add_argument(
"--warmup", help="Enable model warmup before performance test", action="store_true", default=False
)
parser.add_argument("-v", "--verbose", help="Verbose logs", action="store_true", default=False)
args = parser.parse_args()
batch_sizes = list(map(lambda x: int(x), args.batch_sizes.split(",")))
_run_performance_analysis(
server_url=args.server_url,
model_name=args.model_name,
input_data=args.input_data,
input_shapes=args.input_shapes or [],
batch_sizes=batch_sizes,
number_of_triton_instances=args.number_of_triton_instances,
number_of_model_instances=args.number_of_model_instances,
measurement_mode=MeasurementMode(args.measurement_mode),
measurement_interval=args.measurement_interval,
measurement_request_count=args.measurement_request_count,
concurrency_steps=args.concurrency_steps,
batching_mode=BatchingMode(args.batching_mode),
evaluation_mode=EvaluationMode(args.evaluation_mode),
offline_mode=OfflineMode(args.offline_mode),
output_shared_memory_size=args.output_shared_memory_size,
performance_tool=PerformanceTool(args.performance_tool),
model_repository=args.model_repository,
result_path=args.result_path,
warmup=args.warmup,
verbose=args.verbose,
)
if __name__ == "__main__":
main()
|
Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/generator | generator | __init__ | # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
PyTorch/SpeechRecognition/wav2vec2/common/fairseq/data | data | add_target_dataset | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from . import data_utils
class BaseWrapperDataset(torch.utils.data.Dataset):
def __init__(self, dataset):
super().__init__()
self.dataset = dataset
def __getitem__(self, index):
return self.dataset[index]
def __len__(self):
return len(self.dataset)
@property
def sizes(self):
return self.dataset.sizes
def num_tokens(self, index):
return self.dataset.num_tokens(index)
def size(self, index):
return self.dataset.size(index)
def ordered_indices(self):
return self.dataset.ordered_indices()
def batch_by_size(
self,
indices,
max_tokens=None,
max_sentences=None,
required_batch_size_multiple=1,
num_concat_batches=1,
):
return self.dataset.batch_by_size(
indices,
max_tokens=max_tokens,
max_sentences=max_sentences,
required_batch_size_multiple=required_batch_size_multiple,
num_concat_batches=num_concat_batches,
)
def filter_indices_by_size(self, indices, max_sizes):
return self.dataset.filter_indices_by_size(indices, max_sizes)
class AddTargetDataset(BaseWrapperDataset):
def __init__(
self,
dataset,
labels,
pad,
eos,
batch_targets,
process_label=None,
add_to_input=False,
):
super().__init__(dataset)
self.labels = labels
self.batch_targets = batch_targets
self.pad = pad
self.eos = eos
self.process_label = process_label
self.add_to_input = add_to_input
def get_label(self, index):
return (
self.labels[index]
if self.process_label is None
else self.process_label(self.labels[index])
)
def __getitem__(self, index):
item = self.dataset[index]
item["label"] = self.get_label(index)
return item
def size(self, index):
sz = self.dataset.size(index)
own_sz = len(self.get_label(index))
return (sz, own_sz)
def collater(self, samples):
collated = self.dataset.collater(samples)
if len(collated) == 0:
return collated
indices = set(collated["id"].tolist())
target = [s["label"] for s in samples if s["id"] in indices]
if self.batch_targets:
collated["target_lengths"] = torch.LongTensor([len(t) for t in target])
target = data_utils.collate_tokens(target, pad_idx=self.pad, left_pad=False)
collated["ntokens"] = collated["target_lengths"].sum().item()
else:
collated["ntokens"] = sum([len(t) for t in target])
collated["target"] = target
if self.add_to_input:
eos = target.new_full((target.size(0), 1), self.eos)
collated["target"] = torch.cat([target, eos], dim=-1).long()
collated["net_input"]["prev_output_tokens"] = torch.cat(
[eos, target], dim=-1
).long()
collated["ntokens"] += target.size(0)
return collated
def __setattr__(self, attr, val):
if attr == "batch_ids":
self.dataset.batch_ids = val
else:
super().__setattr__(attr, val)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.