relative_path
stringclasses 812
values | section
stringclasses 339
values | filename
stringlengths 2
61
| text
stringlengths 6
1.76M
|
---|---|---|---|
PyTorch/LanguageModeling/BERT/data | data | __init__ | # Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
PyTorch/SpeechSynthesis/Tacotron2/trtis_cpp/src/trt/tacotron2 | tacotron2 | decoderInstancePlugins | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "decoderInstancePlugins.h"
#include "cudaUtils.h"
#include "dataShuffler.h"
#include "dims5.h"
#include "trtUtils.h"
#include <algorithm>
#include <cuda_runtime.h>
#include <numeric>
#include <stdexcept>
using namespace nvinfer1;
namespace tts
{
/******************************************************************************
* HELPER FUNCTIONS ***********************************************************
*****************************************************************************/
namespace
{
void setBatchDimension(IExecutionContext& context, const std::string& bindingName, const int batchSize)
{
const ICudaEngine& engine = context.getEngine();
const int bindingIndex = engine.getBindingIndex(bindingName.c_str());
if (bindingIndex < 0)
{
throw std::runtime_error("Failed to get binding " + bindingName);
}
Dims baseDims = context.getBindingDimensions(bindingIndex);
baseDims.d[0] = batchSize;
if (!context.setBindingDimensions(bindingIndex, baseDims))
{
throw std::runtime_error(
"Failed to set dimensions for " + bindingName + " as " + TRTUtils::dimsToString(baseDims));
}
}
} // namespace
/******************************************************************************
* CONSTRUCTORS / DESTRUCTOR **************************************************
*****************************************************************************/
DecoderInstancePlugins::DecoderInstancePlugins(
TRTPtr<ICudaEngine> engine, const int maxChunkSize) :
DecoderInstance(std::move(engine), maxChunkSize),
mNumEncodingDim(
TRTUtils::getBindingDimension(getEngine(), INPUT_MEMORY_NAME, 2)),
mNumAttentionDim(
TRTUtils::getBindingDimension(getEngine(), INPUT_PROCESSED_NAME, 2)),
mDimsSet(false),
mBinding(),
mInputWeightsDevice(
getMaxBatchSize()
* TRTUtils::getMaxBindingSize(getEngine(), INPUT_WEIGHTS_NAME)),
mOutputWeightsDevice(
getMaxBatchSize()
* TRTUtils::getMaxBindingSize(getEngine(), INPUT_WEIGHTS_NAME)),
mInAttentionHiddenStatesDevice(
getMaxBatchSize()
* TRTUtils::getBindingSize(getEngine(), INPUT_ATTENTIONHIDDEN_NAME)),
mInAttentionCellStatesDevice(
getMaxBatchSize()
* TRTUtils::getBindingSize(getEngine(), INPUT_ATTENTIONCELL_NAME)),
mOutAttentionHiddenStatesDevice(
getMaxBatchSize()
* TRTUtils::getBindingSize(getEngine(), OUTPUT_ATTENTIONHIDDEN_NAME)),
mOutAttentionCellStatesDevice(
getMaxBatchSize()
* TRTUtils::getBindingSize(getEngine(), OUTPUT_ATTENTIONCELL_NAME)),
mInputAttentionContextDevice(
getMaxBatchSize()
* TRTUtils::getBindingSize(getEngine(), INPUT_CONTEXT_NAME)),
mOutputAttentionContextDevice(
getMaxBatchSize()
* TRTUtils::getBindingSize(getEngine(), OUTPUT_CONTEXT_NAME)),
mInDecoderHiddenStatesDevice(
getMaxBatchSize()
* TRTUtils::getBindingSize(getEngine(), INPUT_DECODERHIDDEN_NAME)),
mInDecoderCellStatesDevice(
getMaxBatchSize()
* TRTUtils::getBindingSize(getEngine(), INPUT_DECODERCELL_NAME)),
mOutDecoderHiddenStatesDevice(
getMaxBatchSize()
* TRTUtils::getBindingSize(getEngine(), OUTPUT_DECODERHIDDEN_NAME)),
mOutDecoderCellStatesDevice(
getMaxBatchSize()
* TRTUtils::getBindingSize(getEngine(), OUTPUT_DECODERCELL_NAME))
{
// do nothing
}
/******************************************************************************
* PUBLIC METHODS *************************************************************
*****************************************************************************/
void DecoderInstancePlugins::reset(cudaStream_t stream)
{
DecoderInstance::reset(stream);
mInputWeightsDevice.zeroAsync(stream);
mInAttentionHiddenStatesDevice.zeroAsync(stream);
mInAttentionCellStatesDevice.zeroAsync(stream);
mInputAttentionContextDevice.zeroAsync(stream);
mOutputAttentionContextDevice.zeroAsync(stream);
mInDecoderHiddenStatesDevice.zeroAsync(stream);
mInDecoderCellStatesDevice.zeroAsync(stream);
mDimsSet = false;
}
/******************************************************************************
* PROTECTED METHODS **********************************************************
*****************************************************************************/
void DecoderInstancePlugins::decode(cudaStream_t stream, IExecutionContext& context, const int batchSize,
const float* const inputLastFrameDevice, const float* const inputMemoryDevice,
const float* const inputProcessedMemoryDevice, const float* const /* inputMaskDevice */,
const int32_t* const inputLengthHost, const int32_t* const /*inputLengthDevice*/,
const float* const inputDropoutDevice, float* const outputChannelsDevice)
{
const ICudaEngine& engine = context.getEngine();
if (!mDimsSet)
{
// all input dimensions need to be set (as they all have dynamic batch).
// have inputLength as dynamic dimension
context.setBindingDimensions(
engine.getBindingIndex(INPUT_MEMORY_NAME), Dims3(batchSize, inputLengthHost[0], mNumEncodingDim));
context.setBindingDimensions(
engine.getBindingIndex(INPUT_PROCESSED_NAME), Dims5(batchSize, inputLengthHost[0], mNumAttentionDim, 1, 1));
context.setBindingDimensions(
engine.getBindingIndex(INPUT_WEIGHTS_NAME), Dims4(batchSize, 2, inputLengthHost[0], 1));
// only have batch as dynamic dimension
setBatchDimension(context, INPUT_DROPOUT_NAME, batchSize);
setBatchDimension(context, INPUT_LASTFRAME_NAME, batchSize);
setBatchDimension(context, INPUT_CONTEXT_NAME, batchSize);
setBatchDimension(context, INPUT_ATTENTIONHIDDEN_NAME, batchSize);
setBatchDimension(context, INPUT_ATTENTIONCELL_NAME, batchSize);
setBatchDimension(context, INPUT_DECODERHIDDEN_NAME, batchSize);
setBatchDimension(context, INPUT_DECODERCELL_NAME, batchSize);
mDimsSet = true;
}
mBinding.setBinding(engine, INPUT_DROPOUT_NAME, inputDropoutDevice);
mBinding.setBinding(engine, INPUT_MEMORY_NAME, inputMemoryDevice);
mBinding.setBinding(engine, INPUT_PROCESSED_NAME, inputProcessedMemoryDevice);
mBinding.setBinding(engine, INPUT_WEIGHTS_NAME, mInputWeightsDevice.data());
mBinding.setBinding(engine, INPUT_LASTFRAME_NAME, inputLastFrameDevice);
mBinding.setBinding(engine, INPUT_CONTEXT_NAME, mInputAttentionContextDevice.data());
mBinding.setBinding(engine, INPUT_ATTENTIONHIDDEN_NAME, mInAttentionHiddenStatesDevice.data());
mBinding.setBinding(engine, INPUT_ATTENTIONCELL_NAME, mInAttentionCellStatesDevice.data());
mBinding.setBinding(engine, INPUT_DECODERHIDDEN_NAME, mInDecoderHiddenStatesDevice.data());
mBinding.setBinding(engine, INPUT_DECODERCELL_NAME, mInDecoderCellStatesDevice.data());
mBinding.setBinding(engine, OUTPUT_CONTEXT_NAME, mOutputAttentionContextDevice.data());
mBinding.setBinding(engine, OUTPUT_WEIGHTS_NAME, mOutputWeightsDevice.data());
mBinding.setBinding(engine, OUTPUT_ATTENTIONHIDDEN_NAME, mOutAttentionHiddenStatesDevice.data());
mBinding.setBinding(engine, OUTPUT_ATTENTIONCELL_NAME, mOutAttentionCellStatesDevice.data());
mBinding.setBinding(engine, OUTPUT_DECODERHIDDEN_NAME, mOutDecoderHiddenStatesDevice.data());
mBinding.setBinding(engine, OUTPUT_DECODERCELL_NAME, mOutDecoderCellStatesDevice.data());
mBinding.setBinding(engine, OUTPUT_CHANNELS_NAME, outputChannelsDevice);
if (!context.enqueueV2(mBinding.getBindings(), stream, nullptr))
{
throw std::runtime_error("Failed to run decoder.");
}
// swap pointers
std::swap(mInputWeightsDevice, mOutputWeightsDevice);
std::swap(mInputAttentionContextDevice, mOutputAttentionContextDevice);
std::swap(mInAttentionHiddenStatesDevice, mOutAttentionHiddenStatesDevice);
std::swap(mInAttentionCellStatesDevice, mOutAttentionCellStatesDevice);
std::swap(mInDecoderHiddenStatesDevice, mOutDecoderHiddenStatesDevice);
std::swap(mInDecoderCellStatesDevice, mOutDecoderCellStatesDevice);
}
} // namespace tts
|
PyTorch/SpeechSynthesis/Tacotron2/trtis_cpp/src/trt/util | util | layerData | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef TT2I_LAYERDATA_H
#define TT2I_LAYERDATA_H
#include "NvInfer.h"
#include <map>
#include <vector>
namespace tts
{
class LayerData
{
public:
/**
* @brief Allocate an empty LayerData object.
*/
LayerData();
/**
* @brief Move constructor.
*
* @param other The object to move.
*/
LayerData(LayerData&& other) = default;
/**
* @brief Move assignment operator.
*
* @param other The object to move.
*
* @return This object.
*/
LayerData& operator=(LayerData&& other) = default;
// prevent copying
LayerData(const LayerData& other) = delete;
LayerData& operator=(const LayerData& other) = delete;
/**
* @brief Get the weights with the given name (e.g., "weight", "bias").
*
* @param name The name of the weights.
*
* @return The weights.
*/
nvinfer1::Weights get(const std::string& name) const;
/**
* @brief Check if weights with the given name exist in this object.
*
* @param name The name of the weights.
*
* @return The weights.
*/
bool has(const std::string& name) const;
/**
* @brief Insert new weights into this object.
*
* @tparam ITER The type of iterator.
* @param name The name of the weights.
* @param start The iterator to the start of the weights.
* @param end The iterator to the end of the weights (exclusive).
*/
template <typename ITER>
void add(const std::string& name, const ITER start, const ITER end)
{
mKeys.emplace(name, mKeys.size());
mData.insert(mData.end(), start, end);
mPrefix.emplace_back(mData.size());
}
/**
* @brief Insert the new weights into this object.
*
* @param name The name of the weights.
* @param vec The vector of weights.
*/
void add(const std::string& name, const std::vector<float>& vec)
{
add(name, vec.begin(), vec.end());
}
friend std::ostream& operator<<(std::ostream& stream, const LayerData& data);
private:
std::map<std::string, size_t> mKeys;
std::vector<size_t> mPrefix;
std::vector<float> mData;
};
/**
* @brief Print out a LayerData object to human readable form to the output
* stream.
*
* @param stream The stream.
* @param data The LayerData to write.
*
* @return The stream.
*/
std::ostream& operator<<(std::ostream& stream, const LayerData& data);
} // namespace tts
#endif
|
Tools/DGLPyTorch/SyntheticGraphGeneration/configurations | configurations | epinions | {
"nodes": [
{
"name": "user",
"count": 49289,
"features": [],
"features_path": null
},
{
"name": "item",
"count": 139738,
"features": [],
"features_path": null
}
],
"edges": [
{
"name": "user-item",
"count": 664824,
"src_node_type": "user",
"dst_node_type": "item",
"directed": false,
"features": [
{
"name": "rating",
"dtype": "int64",
"feature_type": "categorical"
}
],
"features_path": "user-item.parquet",
"structure_path": "user-item_edge_list.parquet",
"[gen]structure_generator": {
"type": "RMAT",
"data_source": {
"type": "cfg",
"path": "/workspace/data/epinions/syngen_preprocessed",
"name": "user-item"
},
"params": {
"seed": 42
}
},
"[gen]tabular_generators": [
{
"type": "kde",
"features_list": ["rating"],
"data_source": {
"type": "dataset",
"path": "/workspace/data/epinions/syngen_preprocessed/user-item.parquet"
},
"params": {
}
}
]
},
{
"name": "user-user",
"count": 487183,
"src_node_type": "user",
"dst_node_type": "user",
"reverse_name": "user-user-rev",
"features": [],
"features_path": null,
"structure_path": "user-user_edge_list.parquet",
"[gen]structure_generator": {
"type": "RMAT",
"data_source": {
"type": "cfg",
"path": "/workspace/data/epinions/syngen_preprocessed",
"name": "user-user"
},
"params": {
"seed": 42,
"has_self_loop": false
}
}
}
],
"[gen]aligners": [
{
"type": "xgboost",
"graphs": ["user-item", "user-user"],
"edges": {
"user-item": ["rating"]
},
"nodes": {},
"params": {
}
}
]
} |
PyTorch/SpeechRecognition/wav2vec2/common | common | filter_warnings | # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
# NGC 22.04-py3 container (PyTorch 1.12.0a0+bd13bc6)
warnings.filterwarnings(
"ignore",
message='positional arguments and argument "destination" are deprecated.'
' nn.Module.state_dict will not accept them in the future.')
# NGC ~22.05-py3
warnings.filterwarnings(
"ignore", message="pyprof will be removed by the end of June, 2022")
# 22.08-py3 RC
warnings.filterwarnings(
"ignore",
message="is_namedtuple is deprecated, please use the python checks")
|
TensorFlow2/Recommendation/WideAndDeep | WideAndDeep | requirements | # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
git+https://github.com/NVIDIA/[email protected]#egg=dllogger
|
TensorFlow/Classification/ConvNets/triton | triton | README | # Deploying the ResNet-50 v1.5 model on Triton Inference Server
This folder contains instructions for deployment to run inference
on Triton Inference Server as well as a detailed performance analysis.
The purpose of this document is to help you with achieving
the best inference performance.
## Table of contents
- [Solution overview](#solution-overview)
- [Introduction](#introduction)
- [Deployment process](#deployment-process)
- [Setup](#setup)
- [Quick Start Guide](#quick-start-guide)
- [Advanced](#advanced)
- [Prepare configuration](#prepare-configuration)
- [Latency explanation](#latency-explanation)
- [Performance](#performance)
- [Offline scenario](#offline-scenario)
- [Offline: NVIDIA A40, TF-TRT with FP16](#offline-nvidia-a40-tf-trt-with-fp16)
- [Offline: NVIDIA DGX A100 (1x A100 80GB), TF-TRT with FP16](#offline-nvidia-dgx-a100-1x-a100-80gb-tf-trt-with-fp16)
- [Offline: NVIDIA DGX-1 (1x V100 32GB), TF-TRT with FP16](#offline-nvidia-dgx-1-1x-v100-32gb-tf-trt-with-fp16)
- [Offline: NVIDIA T4, TF-TRT with FP16](#offline-nvidia-t4-tf-trt-with-fp16)
- [Online scenario](#online-scenario)
- [Online: NVIDIA A40, TF-TRT with FP16](#online-nvidia-a40-tf-trt-with-fp16)
- [Online: NVIDIA DGX A100 (1x A100 80GB), TF-TRT with FP16](#online-nvidia-dgx-a100-1x-a100-80gb-tf-trt-with-fp16)
- [Online: NVIDIA DGX-1 (1x V100 32GB), TF-TRT with FP16](#online-nvidia-dgx-1-1x-v100-32gb-tf-trt-with-fp16)
- [Online: NVIDIA T4, TF-TRT with FP16](#online-nvidia-t4-tf-trt-with-fp16)
- [Release Notes](#release-notes)
- [Changelog](#changelog)
- [Known issues](#known-issues)
## Solution overview
### Introduction
The [NVIDIA Triton Inference Server](https://github.com/NVIDIA/triton-inference-server)
provides a datacenter and cloud inferencing solution optimized for NVIDIA GPUs.
The server provides an inference service via an HTTP or gRPC endpoint,
allowing remote clients to request inferencing for any number of GPU
or CPU models being managed by the server.
This README provides step-by-step deployment instructions for models generated
during training (as described in the [model README](../README.md)).
Additionally, this README provides the corresponding deployment scripts that
ensure optimal GPU utilization during inferencing on Triton Inference Server.
### Deployment process
The deployment process consists of two steps:
1. Conversion. The purpose of conversion is to find the best performing model
format supported by Triton Inference Server.
Triton Inference Server uses a number of runtime backends such as
[TensorRT](https://developer.nvidia.com/tensorrt),
[TensorFlow](https://github.com/triton-inference-server/tensorflow_backend) and
[ONNX Runtime](https://github.com/triton-inference-server/onnxruntime_backend)
to support various model types. Refer to
[Triton documentation](https://github.com/triton-inference-server/backend#where-can-i-find-all-the-backends-that-are-available-for-triton)
for a list of available backends.
2. Configuration. Model configuration on Triton Inference Server, which generates
necessary [configuration files](https://github.com/triton-inference-server/server/blob/master/docs/model_configuration.md).
To run benchmarks measuring the model performance in inference,
perform the following steps:
1. Start the Triton Inference Server.
The Triton Inference Server container is started
in one (possibly remote) container and ports for gRPC or REST API are exposed.
2. Run accuracy tests.
Produce results which are tested against given accuracy thresholds.
Refer to step 8 in the [Quick Start Guide](#quick-start-guide).
3. Run performance tests.
Produce latency and throughput results for offline (static batching)
and online (dynamic batching) scenarios.
Refer to step 11 in the [Quick Start Guide](#quick-start-guide).
## Setup
Ensure you have the following components:
* [NVIDIA Docker](https://github.com/NVIDIA/nvidia-docker)
* [TensorFlow1 NGC container 20.12](https://ngc.nvidia.com/catalog/containers/nvidia:tensorflow)
* [Triton Inference Server NGC container 20.12](https://ngc.nvidia.com/catalog/containers/nvidia:tritonserver)
* [NVIDIA CUDA repository](https://docs.nvidia.com/cuda/archive/11.1.1/index.html)
* [NVIDIA Ampere](https://www.nvidia.com/en-us/data-center/nvidia-ampere-gpu-architecture/), [Volta](https://www.nvidia.com/en-us/data-center/volta-gpu-architecture/) or [Turing](https://www.nvidia.com/en-us/geforce/turing/) based GPU
## Quick Start Guide
Running the following scripts will build and launch the container with all
required dependencies for native TensorFlow as well as Triton Inference Server.
This is necessary for running inference and can also be used for data download,
processing, and training of the model.
1. Clone the repository.
IMPORTANT: This step is executed on the host computer.
```
git clone https://github.com/NVIDIA/DeepLearningExamples.git
cd DeepLearningExamples/TensorFlow/Classification/ConvNets
```
2. Setup the environment in host PC and start Triton Inference Server.
```
source triton/scripts/setup_environment.sh
bash triton/scripts/docker/triton_inference_server.sh
```
3. Build and run a container that extends the NGC TensorFlow container with
the Triton Inference Server client libraries and dependencies.
```
bash triton/scripts/docker/build.sh
bash triton/scripts/docker/interactive.sh
```
4. Prepare the deployment configuration and create folders in Docker.
IMPORTANT: These and the following commands must be executed in the TensorFlow NGC container.
```
source triton/scripts/setup_environment.sh
```
5. Download and pre-process the dataset.
```
bash triton/scripts/download_data.sh
bash triton/scripts/process_dataset.sh
```
6. Setup the parameters for deployment.
```
source triton/scripts/setup_parameters.sh
```
7. Convert the model from training to inference format (e.g. TensorRT).
```
python3 triton/convert_model.py \
--input-path triton/rn50_model.py \
--input-type tf-estimator \
--output-path ${SHARED_DIR}/model \
--output-type ${FORMAT} \
--onnx-opset 12 \
--onnx-optimized 1 \
--max-batch-size ${MAX_BATCH_SIZE} \
--max-workspace-size 4294967296 \
--ignore-unknown-parameters \
\
--model-dir ${CHECKPOINT_DIR} \
--precision ${PRECISION} \
--dataloader triton/dataloader.py \
--data-dir ${DATASETS_DIR}/imagenet
```
8. Run the model accuracy tests in framework.
```
python3 triton/run_inference_on_fw.py \
--input-path ${SHARED_DIR}/model \
--input-type ${FORMAT} \
--dataloader triton/dataloader.py \
--data-dir ${DATASETS_DIR}/imagenet \
--images-num 256 \
--batch-size ${MAX_BATCH_SIZE} \
--output-dir ${SHARED_DIR}/correctness_dump \
--dump-labels
python3 triton/calculate_metrics.py \
--dump-dir ${SHARED_DIR}/correctness_dump \
--metrics triton/metrics.py \
--csv ${SHARED_DIR}/correctness_metrics.csv
cat ${SHARED_DIR}/correctness_metrics.csv
```
9. Configure the model on Triton Inference Server.
Generate the configuration from your model repository.
```
model-navigator triton-config-model \
--model-repository ${MODEL_REPOSITORY_PATH} \
--model-name ${MODEL_NAME} \
--model-version 1 \
--model-path ${SHARED_DIR}/model \
--model-format ${FORMAT} \
--load-model \
--load-model-timeout-s 100 \
--verbose \
\
--batching dynamic \
--max-queue-delay-us ${TRITON_MAX_QUEUE_DELAY} \
--preferred-batch-sizes ${TRITON_PREFERRED_BATCH_SIZES} \
--backend-accelerator ${BACKEND_ACCELERATOR} \
--tensorrt-precision ${PRECISION} \
--max-batch-size ${MAX_BATCH_SIZE} \
--engine-count-per-device ${DEVICE_KIND}=${NUMBER_OF_MODEL_INSTANCES}
```
10. Run the Triton Inference Server accuracy tests.
```
python3 triton/run_inference_on_triton.py \
--server-url localhost:8001 \
--model-name ${MODEL_NAME} \
--model-version 1 \
--dataloader triton/dataloader.py \
--data-dir ${DATASETS_DIR}/imagenet \
--batch-size ${MAX_BATCH_SIZE} \
--output-dir ${SHARED_DIR}/accuracy_dump \
--dump-labels
python3 triton/calculate_metrics.py \
--dump-dir ${SHARED_DIR}/accuracy_dump \
--metrics triton/metrics.py \
--csv ${SHARED_DIR}/accuracy_metrics.csv
cat ${SHARED_DIR}/accuracy_metrics.csv
```
11. Run the Triton Inference Server performance online tests.
We want to maximize throughput within latency budget constraints.
Dynamic batching is a feature of Triton Inference Server that allows
inference requests to be combined by the server, so that a batch is
created dynamically, resulting in a reduced average latency.
You can set the Dynamic Batcher parameter `max_queue_delay_microseconds` to
indicate the maximum amount of time you are willing to wait and
`preferred_batch_size` to indicate your maximum server batch size
in the Triton Inference Server model configuration. The measurements
presented below set the maximum latency to zero to achieve the best latency
possible with good performance.
```
python triton/run_offline_performance_test_on_triton.py \
--server-url ${TRITON_SERVER_URL} \
--model-name ${MODEL_NAME} \
--input-data random \
--batch-sizes ${BATCH_SIZE} \
--triton-instances ${TRITON_INSTANCES} \
--result-path ${SHARED_DIR}/triton_performance_offline.csv
```
12. Run the Triton Inference Server performance offline tests.
We want to maximize throughput. It assumes you have your data available
for inference or that your data saturate to maximum batch size quickly.
Triton Inference Server supports offline scenarios with static batching.
Static batching allows inference requests to be served
as they are received. The largest improvements to throughput come
from increasing the batch size due to efficiency gains in the GPU with larger
batches.
```
python triton/run_online_performance_test_on_triton.py \
--server-url ${TRITON_SERVER_URL} \
--model-name ${MODEL_NAME} \
--input-data random \
--batch-sizes ${BATCH_SIZE} \
--triton-instances ${TRITON_INSTANCES} \
--number-of-model-instances ${NUMBER_OF_MODEL_INSTANCES} \
--result-path ${SHARED_DIR}/triton_performance_online.csv
```
## Advanced
### Prepare configuration
You can use the environment variables to set the parameters of your inference
configuration.
Triton deployment scripts support several inference runtimes listed in the table below:
| Inference runtime | Mnemonic used in scripts |
|--------------------|--------------------------|
| [TensorFlow SavedModel](https://www.tensorflow.org/guide/saved_model) | `tf-savedmodel` |
| [TensorFlow TensorRT](https://docs.nvidia.com/deeplearning/frameworks/tf-trt-user-guide/index.html) | `tf-trt` |
| [ONNX](https://onnx.ai) | `onnx` |
| [NVIDIA TensorRT](https://developer.nvidia.com/tensorrt) | `trt` |
The name of the inference runtime should be put into the `FORMAT` variable.
Example values of some key variables in one configuration:
```
PRECISION="fp16"
FORMAT="tf-trt"
BATCH_SIZE="1, 2, 4, 8, 16, 32, 64, 128"
BACKEND_ACCELERATOR="none"
MAX_BATCH_SIZE="128"
NUMBER_OF_MODEL_INSTANCES="2"
TRITON_MAX_QUEUE_DELAY="1"
TRITON_PREFERRED_BATCH_SIZES="64 128"
DEVICE_KIND="gpu"
```
### Latency explanation
A typical Triton Inference Server pipeline can be broken down into the following steps:
1. The client serializes the inference request into a message and sends it to
the server (Client Send).
2. The message travels over the network from the client to the server (Network).
3. The message arrives at the server and is deserialized (Server Receive).
4. The request is placed on the queue (Server Queue).
5. The request is removed from the queue and computed (Server Compute).
6. The completed request is serialized in a message and sent back to
the client (Server Send).
7. The completed message then travels over the network from the server
to the client (Network).
8. The completed message is deserialized by the client and processed as
a completed inference request (Client Receive).
Generally, for local clients, steps 1-4 and 6-8 will only occupy
a small fraction of time, compared to steps 5. As backend deep learning
systems like Jasper are rarely exposed directly to end users, but instead
only interfacing with local front-end servers, for the sake of Jasper,
we can consider that all clients are local.
## Performance
The performance measurements in this document were conducted at the time of publication and may not reflect the performance achieved from NVIDIA’s latest software release. For the most up-to-date performance measurements, go to [NVIDIA Data Center Deep Learning Product Performance](https://developer.nvidia.com/deep-learning-performance-training-inference).
### Offline scenario
This table lists the common variable parameters for all performance measurements:
| Parameter Name | Parameter Value |
|:-----------------------------|:------------------|
| Max Batch Size | 128 |
| Number of model instances | 2 |
| Triton Max Queue Delay | 1 |
| Triton Preferred Batch Sizes | 64 128 |
#### Offline: NVIDIA A40, TF-TRT with FP16
Our results were obtained using the following configuration:
* **GPU:** NVIDIA A40
* **Backend:** TensorFlow
* **Model binding:** TF-TRT
* **Precision:** FP16
* **Model format:** TensorFlow SavedModel
|||
|-----|-----|
<details>
<summary>
Full tabular data
</summary>
| Precision | Backend Accelerator | Client Batch Size | Inferences/second | P90 Latency | P95 Latency | P99 Latency | Avg Latency |
|:------------|:---------------------|--------------------:|--------------------:|--------------:|--------------:|--------------:|--------------:|
| FP16 | TensorRT | 1 | 329.5 | 3.23 | 3.43 | 3.973 | 3.031 |
| FP16 | TensorRT | 2 | 513.8 | 4.292 | 4.412 | 4.625 | 3.888 |
| FP16 | TensorRT | 4 | 720.8 | 6.122 | 6.264 | 6.5 | 5.543 |
| FP16 | TensorRT | 8 | 919.2 | 9.145 | 9.664 | 10.3 | 8.701 |
| FP16 | TensorRT | 16 | 1000 | 17.522 | 17.979 | 19.098 | 16.01 |
| FP16 | TensorRT | 32 | 889.6 | 37.49 | 38.481 | 40.316 | 35.946 |
| FP16 | TensorRT | 64 | 992 | 66.837 | 67.923 | 70.324 | 64.645 |
| FP16 | TensorRT | 128 | 896 | 148.461 | 149.854 | 150.05 | 143.684 |
</details>
#### Offline: NVIDIA DGX A100 (1x A100 80GB), TF-TRT with FP16
Our results were obtained using the following configuration:
* **GPU:** NVIDIA DGX A100 (1x A100 80GB)
* **Backend:** TensorFlow
* **Model binding:** TF-TRT
* **Precision:** FP16
* **Model format:** TensorFlow SavedModel
|||
|-----|-----|
<details>
<summary>
Full tabular data
</summary>
| Precision | Backend Accelerator | Client Batch Size | Inferences/second | P90 Latency | P95 Latency | P99 Latency | Avg Latency |
|:------------|:---------------------|--------------------:|--------------------:|--------------:|--------------:|--------------:|--------------:|
| FP16 | TensorRT | 1 | 387.9 | 2.626 | 2.784 | 2.875 | 2.574 |
| FP16 | TensorRT | 2 | 637.2 | 3.454 | 3.506 | 3.547 | 3.135 |
| FP16 | TensorRT | 4 | 982.4 | 4.328 | 4.454 | 4.627 | 4.07 |
| FP16 | TensorRT | 8 | 1181.6 | 7.012 | 7.074 | 7.133 | 6.765 |
| FP16 | TensorRT | 16 | 1446.4 | 11.162 | 11.431 | 11.941 | 11.061 |
| FP16 | TensorRT | 32 | 1353.6 | 24.392 | 24.914 | 25.178 | 23.603 |
| FP16 | TensorRT | 64 | 1478.4 | 45.539 | 46.096 | 47.546 | 43.401 |
| FP16 | TensorRT | 128 | 1331.2 | 97.504 | 100.611 | 101.896 | 96.198 |
</details>
#### Offline: NVIDIA DGX-1 (1x V100 32GB), TF-TRT with FP16
Our results were obtained using the following configuration:
* **GPU:** NVIDIA DGX A100 (1x A100 80GB)
* **Backend:** TensorFlow
* **Model binding:** TF-TRT
* **Precision:** FP16
* **Model format:** TensorFlow SavedModel
|||
|-----|-----|
<details>
<summary>
Full tabular data
</summary>
| Precision | Backend Accelerator | Client Batch Size | Inferences/second | P90 Latency | P95 Latency | P99 Latency | Avg Latency |
|:------------|:---------------------|--------------------:|--------------------:|--------------:|--------------:|--------------:|--------------:|
| FP16 | TensorRT | 1 | 255.6 | 4.032 | 4.061 | 4.141 | 3.909 |
| FP16 | TensorRT | 2 | 419.2 | 4.892 | 4.94 | 5.133 | 4.766 |
| FP16 | TensorRT | 4 | 633.6 | 6.603 | 6.912 | 7.18 | 6.306 |
| FP16 | TensorRT | 8 | 865.6 | 9.657 | 9.73 | 9.834 | 9.236 |
| FP16 | TensorRT | 16 | 950.4 | 18.396 | 20.748 | 23.873 | 16.824 |
| FP16 | TensorRT | 32 | 854.4 | 37.965 | 38.599 | 40.34 | 37.432 |
| FP16 | TensorRT | 64 | 825.6 | 80.118 | 80.758 | 87.374 | 77.596 |
| FP16 | TensorRT | 128 | 704 | 189.198 | 189.87 | 191.259 | 183.205 |
</details>
#### Offline: NVIDIA T4, TF-TRT with FP16
Our results were obtained using the following configuration:
* **GPU:** NVIDIA T4
* **Backend:** TensorFlow
* **Model binding:** TF-TRT
* **Precision:** FP16
* **Model format:** TensorFlow SavedModel
|||
|-----|-----|
<details>
<summary>
Full tabular data
</summary>
| Precision | Backend Accelerator | Client Batch Size | Inferences/second | P90 Latency | P95 Latency | P99 Latency | Avg Latency |
|:------------|:---------------------|--------------------:|--------------------:|--------------:|--------------:|--------------:|--------------:|
| FP16 | TensorRT | 1 | 211.7 | 4.89 | 4.926 | 4.965 | 4.717 |
| FP16 | TensorRT | 2 | 327.8 | 6.258 | 6.309 | 6.436 | 6.094 |
| FP16 | TensorRT | 4 | 468.4 | 8.996 | 9.085 | 9.239 | 8.531 |
| FP16 | TensorRT | 8 | 544.8 | 15.654 | 15.978 | 16.324 | 14.673 |
| FP16 | TensorRT | 16 | 544 | 30.626 | 30.788 | 31.311 | 29.477 |
| FP16 | TensorRT | 32 | 524.8 | 64.527 | 65.35 | 66.13 | 60.943 |
| FP16 | TensorRT | 64 | 556.8 | 115.455 | 115.717 | 116.02 | 113.802 |
| FP16 | TensorRT | 128 | 537.6 | 242.501 | 244.599 | 246.16 | 238.384 |
</details>
### Online scenario
This table lists the common variable parameters for all performance measurements:
| Parameter Name | Parameter Value |
|:-----------------------------|:------------------|
| Max Batch Size | 128 |
| Number of model instances | 2 |
| Triton Max Queue Delay | 1 |
| Triton Preferred Batch Sizes | 64 128 |
#### Online: NVIDIA A40, TF-TRT with FP16
Our results were obtained using the following configuration:
* **GPU:** NVIDIA A40
* **Backend:** TensorFlow
* **Model binding:** TF-TRT
* **Precision:** FP16
* **Model format:** TensorFlow SavedModel

<details>
<summary>
Full tabular data
</summary>
| Concurrent client requests | Inferences/second | Client Send | Network+server Send/recv | Server Queue | Server Compute Input | Server Compute Infer | Server Compute Output | Client Recv | P50 Latency | P90 Latency | P95 Latency | P99 Latency | Avg Latency |
|-----------------------------:|--------------------:|--------------:|---------------------------:|---------------:|-----------------------:|-----------------------:|------------------------:|--------------:|--------------:|--------------:|--------------:|--------------:|--------------:|
| 16 | 1421.3 | 0.109 | 4.875 | 1.126 | 0.895 | 4.188 | 0.053 | 0 | 11.046 | 17.34 | 17.851 | 19.013 | 11.246 |
| 32 | 1920 | 0.118 | 8.402 | 1.47 | 1.323 | 5.277 | 0.09 | 0 | 16.328 | 28.052 | 29.871 | 31.932 | 16.68 |
| 48 | 2270.4 | 0.12 | 11.505 | 1.856 | 1.582 | 5.953 | 0.113 | 0 | 22.172 | 31.87 | 35.395 | 41.256 | 21.129 |
| 64 | 2401.9 | 0.12 | 14.443 | 2.299 | 2.358 | 7.285 | 0.149 | 0 | 26.69 | 37.388 | 40.73 | 47.503 | 26.654 |
| 80 | 2823 | 0.126 | 14.917 | 2.71 | 2.406 | 7.977 | 0.174 | 0 | 29.113 | 39.932 | 43.789 | 51.24 | 28.31 |
| 96 | 2903.8 | 0.133 | 18.824 | 2.929 | 2.595 | 8.364 | 0.18 | 0 | 33.951 | 46.785 | 51.878 | 60.37 | 33.025 |
| 112 | 3096.6 | 0.135 | 20.018 | 3.362 | 2.97 | 9.434 | 0.209 | 0 | 37.927 | 50.587 | 55.169 | 63.141 | 36.128 |
| 128 | 3252 | 0.138 | 21.092 | 3.912 | 3.445 | 10.505 | 0.245 | 0 | 41.241 | 53.912 | 58.961 | 68.864 | 39.337 |
| 144 | 3352.4 | 0.137 | 21.407 | 4.527 | 4.237 | 12.363 | 0.293 | 0 | 44.211 | 59.876 | 65.971 | 79.335 | 42.964 |
| 160 | 3387.4 | 0.137 | 22.947 | 5.179 | 4.847 | 13.805 | 0.326 | 0 | 48.423 | 65.393 | 69.568 | 81.288 | 47.241 |
| 176 | 3409.1 | 0.142 | 24.989 | 5.623 | 5.539 | 14.956 | 0.357 | 0 | 52.714 | 71.332 | 78.478 | 99.086 | 51.606 |
| 192 | 3481.8 | 0.143 | 25.661 | 6.079 | 6.666 | 16.442 | 0.372 | 0 | 55.383 | 79.276 | 95.479 | 122.295 | 55.363 |
| 208 | 3523.8 | 0.147 | 27.042 | 6.376 | 7.526 | 17.413 | 0.4 | 0 | 58.823 | 86.375 | 104.134 | 123.278 | 58.904 |
| 224 | 3587.2 | 0.148 | 29.648 | 6.776 | 7.659 | 17.85 | 0.411 | 0 | 61.973 | 91.804 | 107.987 | 130.413 | 62.492 |
| 240 | 3507.4 | 0.153 | 31.079 | 7.987 | 9.246 | 19.342 | 0.426 | 0 | 65.697 | 106.035 | 121.914 | 137.572 | 68.233 |
| 256 | 3504.4 | 0.16 | 34.664 | 8.252 | 9.886 | 19.567 | 0.461 | 0 | 70.708 | 115.965 | 127.808 | 147.327 | 72.99 |
</details>
#### Online: NVIDIA DGX A100 (1x A100 80GB), TF-TRT with FP16
Our results were obtained using the following configuration:
* **GPU:** NVIDIA DGX A100 (1x A100 80GB)
* **Backend:** TensorFlow
* **Model binding:** TF-TRT
* **Precision:** FP16
* **Model format:** TensorFlow SavedModel

<details>
<summary>
Full tabular data
</summary>
| Concurrent client requests | Inferences/second | Client Send | Network+server Send/recv | Server Queue | Server Compute Input | Server Compute Infer | Server Compute Output | Client Recv | P50 Latency | P90 Latency | P95 Latency | P99 Latency | Avg Latency |
|-----------------------------:|--------------------:|--------------:|---------------------------:|---------------:|-----------------------:|-----------------------:|------------------------:|--------------:|--------------:|--------------:|--------------:|--------------:|--------------:|
| 16 | 1736.5 | 0.11 | 2.754 | 1.272 | 0.954 | 4.08 | 0.036 | 0 | 9.037 | 12.856 | 13.371 | 15.174 | 9.206 |
| 32 | 2418.9 | 0.114 | 5.15 | 1.494 | 1.361 | 5.031 | 0.072 | 0 | 13.234 | 20.638 | 21.717 | 23.352 | 13.222 |
| 48 | 2891.3 | 0.112 | 7.389 | 1.721 | 1.586 | 5.688 | 0.096 | 0 | 17.089 | 25.946 | 27.611 | 29.784 | 16.592 |
| 64 | 3432.6 | 0.11 | 7.866 | 2.11 | 2.126 | 6.301 | 0.131 | 0 | 19.322 | 25.971 | 28.845 | 34.024 | 18.644 |
| 80 | 3644.6 | 0.116 | 9.665 | 2.33 | 2.493 | 7.185 | 0.146 | 0 | 22.834 | 29.061 | 32.281 | 37.224 | 21.935 |
| 96 | 3902.2 | 0.116 | 11.138 | 2.676 | 2.828 | 7.684 | 0.166 | 0 | 25.589 | 32.572 | 35.307 | 40.123 | 24.608 |
| 112 | 3960.6 | 0.124 | 13.321 | 2.964 | 3.209 | 8.438 | 0.186 | 0 | 29.537 | 37.388 | 40.602 | 46.193 | 28.242 |
| 128 | 4137.7 | 0.124 | 14.325 | 3.372 | 3.646 | 9.244 | 0.219 | 0 | 31.587 | 41.968 | 44.993 | 51.38 | 30.93 |
| 144 | 4139.6 | 0.136 | 15.919 | 3.803 | 4.451 | 10.274 | 0.233 | 0 | 35.696 | 48.301 | 51.345 | 57.414 | 34.816 |
| 160 | 4300.5 | 0.134 | 16.453 | 4.341 | 4.934 | 10.979 | 0.274 | 0 | 38.495 | 50.566 | 53.943 | 61.406 | 37.115 |
| 176 | 4166.6 | 0.143 | 18.436 | 4.959 | 6.081 | 12.321 | 0.309 | 0 | 43.451 | 60.739 | 69.51 | 84.959 | 42.249 |
| 192 | 4281.3 | 0.138 | 19.585 | 5.201 | 6.571 | 13.042 | 0.313 | 0 | 46.175 | 62.718 | 69.46 | 83.032 | 44.85 |
| 208 | 4314.8 | 0.15 | 20.046 | 5.805 | 7.752 | 14.062 | 0.335 | 0 | 47.957 | 73.848 | 84.644 | 96.408 | 48.15 |
| 224 | 4388.2 | 0.141 | 21.393 | 6.105 | 8.236 | 14.85 | 0.343 | 0 | 50.449 | 77.534 | 88.553 | 100.727 | 51.068 |
| 240 | 4371.8 | 0.143 | 22.342 | 6.711 | 9.423 | 15.78 | 0.377 | 0 | 53.216 | 85.983 | 97.756 | 112.48 | 54.776 |
| 256 | 4617.3 | 0.144 | 23.392 | 6.595 | 9.466 | 15.568 | 0.367 | 0 | 54.703 | 86.054 | 93.95 | 105.917 | 55.532 |
</details>
#### Online: NVIDIA DGX-1 (1x V100 32GB), TF-TRT with FP16
Our results were obtained using the following configuration:
* **GPU:** NVIDIA DGX-1 (1x V100 32GB)
* **Backend:** TensorFlow
* **Model binding:** TF-TRT
* **Precision:** FP16
* **Model format:** TensorFlow SavedModel

<details>
<summary>
Full tabular data
</summary>
| Concurrent client requests | Inferences/second | Client Send | Network+server Send/recv | Server Queue | Server Compute Input | Server Compute Infer | Server Compute Output | Client Recv | P50 Latency | P90 Latency | P95 Latency | P99 Latency | Avg Latency |
|-----------------------------:|--------------------:|--------------:|---------------------------:|---------------:|-----------------------:|-----------------------:|------------------------:|--------------:|--------------:|--------------:|--------------:|--------------:|--------------:|
| 16 | 1259.7 | 0.121 | 3.735 | 1.999 | 0.803 | 5.998 | 0.034 | 0 | 13.623 | 17.271 | 17.506 | 18.938 | 12.69 |
| 32 | 1686.4 | 0.17 | 6.9 | 2.33 | 2.212 | 7.303 | 0.07 | 0 | 18.836 | 28.302 | 30.423 | 32.916 | 18.985 |
| 48 | 1888.3 | 0.183 | 9.068 | 3.372 | 3.65 | 9.058 | 0.108 | 0.001 | 26.571 | 36.583 | 40.84 | 50.402 | 25.44 |
| 64 | 2103.9 | 0.204 | 12.416 | 3.146 | 4.304 | 10.127 | 0.145 | 0.001 | 32.401 | 37.121 | 41.252 | 49.094 | 30.343 |
| 80 | 2255.2 | 0.211 | 13.753 | 4.074 | 5.455 | 11.776 | 0.192 | 0.001 | 38.298 | 47.082 | 54.476 | 65.412 | 35.462 |
| 96 | 2376.6 | 0.214 | 16.22 | 4.873 | 5.972 | 12.911 | 0.208 | 0.001 | 43.008 | 52.947 | 57.126 | 69.778 | 40.399 |
| 112 | 2445.6 | 0.243 | 18.495 | 5.461 | 7.012 | 14.365 | 0.248 | 0.001 | 48.081 | 62.414 | 68.274 | 85.766 | 45.825 |
| 128 | 2534.2 | 0.261 | 19.294 | 6.486 | 7.925 | 16.312 | 0.282 | 0.001 | 52.894 | 68.475 | 74.852 | 89.979 | 50.561 |
| 144 | 2483.9 | 0.27 | 20.771 | 7.744 | 9.993 | 18.865 | 0.414 | 0.001 | 64.866 | 70.434 | 80.279 | 99.177 | 58.058 |
| 160 | 2512.8 | 0.302 | 24.205 | 7.838 | 11.217 | 19.689 | 0.373 | 0.001 | 69.085 | 85.576 | 95.016 | 109.455 | 63.625 |
| 176 | 2541 | 0.311 | 26.206 | 8.556 | 12.439 | 21.393 | 0.418 | 0.001 | 76.666 | 92.266 | 106.889 | 127.055 | 69.324 |
| 192 | 2623.4 | 0.33 | 27.783 | 9.058 | 13.198 | 22.181 | 0.433 | 0.001 | 79.724 | 97.736 | 111.44 | 142.418 | 72.984 |
| 208 | 2616.2 | 0.353 | 29.667 | 9.759 | 15.693 | 23.567 | 0.444 | 0.001 | 80.571 | 125.202 | 140.527 | 175.331 | 79.484 |
| 224 | 2693.9 | 0.369 | 32.283 | 9.941 | 15.769 | 24.304 | 0.439 | 0.001 | 78.743 | 137.09 | 151.955 | 183.397 | 83.106 |
| 240 | 2700.4 | 0.447 | 32.287 | 11.128 | 18.204 | 26.578 | 0.456 | 0.001 | 82.561 | 155.011 | 177.925 | 191.51 | 89.101 |
| 256 | 2743.8 | 0.481 | 34.688 | 11.834 | 19.087 | 26.597 | 0.459 | 0.001 | 89.387 | 153.866 | 177.805 | 204.319 | 93.147 |
</details>
#### Online: NVIDIA T4, TF-TRT with FP16
Our results were obtained using the following configuration:
* **GPU:** NVIDIA T4
* **Backend:** TensorFlow
* **Model binding:** TF-TRT
* **Precision:** FP16
* **Model format:** TensorFlow SavedModel

<details>
<summary>
Full tabular data
</summary>
| Concurrent client requests | Inferences/second | Client Send | Network+server Send/recv | Server Queue | Server Compute Input | Server Compute Infer | Server Compute Output | Client Recv | P50 Latency | P90 Latency | P95 Latency | P99 Latency | Avg Latency |
|-----------------------------:|--------------------:|--------------:|---------------------------:|---------------:|-----------------------:|-----------------------:|------------------------:|--------------:|--------------:|--------------:|--------------:|--------------:|--------------:|
| 16 | 731.4 | 0.271 | 6.9 | 3.745 | 2.073 | 8.802 | 0.081 | 0.001 | 25.064 | 28.863 | 29.7 | 32.01 | 21.873 |
| 32 | 935 | 0.273 | 12.023 | 3.48 | 4.375 | 13.885 | 0.141 | 0.001 | 31.339 | 50.564 | 52.684 | 55.823 | 34.178 |
| 48 | 1253 | 0.298 | 12.331 | 5.313 | 4.623 | 15.634 | 0.178 | 0.001 | 38.099 | 60.665 | 64.537 | 72.38 | 38.378 |
| 64 | 1368.3 | 0.303 | 15.3 | 6.926 | 4.9 | 19.118 | 0.2 | 0.001 | 48.758 | 66.391 | 73.271 | 81.537 | 46.748 |
| 80 | 1410.7 | 0.296 | 15.525 | 11.06 | 6.934 | 22.476 | 0.286 | 0.001 | 60.346 | 65.664 | 76.055 | 84.643 | 56.578 |
| 96 | 1473.1 | 0.309 | 18.846 | 11.746 | 7.825 | 26.165 | 0.319 | 0.001 | 69.785 | 77.337 | 91.586 | 100.918 | 65.211 |
| 112 | 1475.5 | 0.316 | 23.275 | 12.412 | 8.954 | 30.724 | 0.338 | 0.001 | 79.904 | 106.324 | 111.382 | 126.559 | 76.02 |
| 128 | 1535.9 | 0.328 | 23.486 | 14.64 | 10.057 | 34.534 | 0.352 | 0.001 | 89.451 | 110.789 | 121.814 | 140.139 | 83.398 |
| 144 | 1512.3 | 0.336 | 25.79 | 18.7 | 12.205 | 37.909 | 0.435 | 0.001 | 103.388 | 108.917 | 114.44 | 136.469 | 95.376 |
| 160 | 1533.6 | 0.406 | 29.825 | 17.67 | 13.751 | 42.259 | 0.44 | 0.001 | 111.899 | 140.67 | 154.76 | 191.391 | 104.352 |
| 176 | 1515.1 | 0.438 | 34.286 | 17.867 | 16.42 | 46.792 | 0.461 | 0.001 | 120.503 | 187.317 | 205.71 | 223.391 | 116.265 |
| 192 | 1532.2 | 0.476 | 34.796 | 18.86 | 19.071 | 51.446 | 0.483 | 0.001 | 124.044 | 211.466 | 226.921 | 237.664 | 125.133 |
| 208 | 1616.7 | 0.697 | 32.363 | 21.465 | 18.315 | 55.539 | 0.516 | 0.001 | 127.891 | 200.478 | 221.404 | 250.348 | 128.896 |
| 224 | 1541.5 | 0.702 | 35.932 | 22.786 | 22.138 | 62.657 | 0.527 | 0.001 | 141.32 | 248.069 | 263.661 | 276.579 | 144.743 |
| 240 | 1631.7 | 0.79 | 37.581 | 22.791 | 21.651 | 64.278 | 0.549 | 0.001 | 141.393 | 250.354 | 272.17 | 289.926 | 147.641 |
| 256 | 1607.4 | 0.801 | 39.342 | 29.09 | 23.416 | 66.866 | 0.593 | 0.001 | 157.87 | 262.818 | 280.921 | 310.504 | 160.109 |
</details>
## Release Notes
We’re constantly refining and improving our performance on AI
and HPC workloads even on the same hardware with frequent updates
to our software stack. For our latest performance data please refer
to these pages for
[AI](https://developer.nvidia.com/deep-learning-performance-training-inference)
and [HPC](https://developer.nvidia.com/hpc-application-performance) benchmarks.
### Changelog
July 2020
- Initial release
April 2021
- NVIDIA A100 results added
### Known issues
There are no known issues with this model with this model.
|
TensorFlow2/Segmentation/UNet_Medical/runtime | runtime | run | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from time import time
import numpy as np
from PIL import Image
import horovod.tensorflow as hvd
import tensorflow as tf
from runtime.losses import partial_losses
from runtime.parse_results import process_performance_stats
from model.tf_trt import export_model, TFTRTModel
def train(params, model, dataset, logger):
np.random.seed(params.seed)
tf.random.set_seed(params.seed)
max_steps = params.max_steps // hvd.size()
optimizer = tf.keras.optimizers.Adam(learning_rate=params.learning_rate)
if params.use_amp:
optimizer = tf.keras.mixed_precision.LossScaleOptimizer(optimizer, dynamic=True)
ce_loss = tf.keras.metrics.Mean(name='ce_loss')
f1_loss = tf.keras.metrics.Mean(name='dice_loss')
checkpoint = tf.train.Checkpoint(optimizer=optimizer, model=model)
if params.resume_training and params.model_dir:
checkpoint.restore(tf.train.latest_checkpoint(params.model_dir))
@tf.function
def train_step(features, labels, warmup_batch=False):
with tf.GradientTape() as tape:
output_map = model(features)
crossentropy_loss, dice_loss = partial_losses(output_map, labels)
added_losses = tf.add(crossentropy_loss, dice_loss, name="total_loss_ref")
loss = added_losses + params.weight_decay * tf.add_n(
[tf.nn.l2_loss(v) for v in model.trainable_variables
if 'batch_normalization' not in v.name])
if params.use_amp:
loss = optimizer.get_scaled_loss(loss)
tape = hvd.DistributedGradientTape(tape)
gradients = tape.gradient(loss, model.trainable_variables)
if params.use_amp:
gradients = optimizer.get_unscaled_gradients(gradients)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
# Note: broadcast should be done after the first gradient step to ensure optimizer
# initialization.
if warmup_batch:
hvd.broadcast_variables(model.variables, root_rank=0)
hvd.broadcast_variables(optimizer.variables(), root_rank=0)
ce_loss(crossentropy_loss)
f1_loss(dice_loss)
return loss
if params.benchmark:
assert max_steps * hvd.size() > params.warmup_steps, \
"max_steps value has to be greater than warmup_steps"
timestamps = []
for iteration, (images, labels) in enumerate(dataset.train_fn(drop_remainder=True)):
loss = train_step(images, labels, warmup_batch=iteration == 0).numpy()
if iteration > params.warmup_steps:
timestamps.append(time())
if iteration >= max_steps * hvd.size():
break
if hvd.rank() == 0:
deltas = np.array([timestamps[i + 1] - timestamps[i] for i in range(len(timestamps) - 1)])
stats = process_performance_stats(deltas, hvd.size() * params.batch_size, mode="train")
logger.log(step=(), data=stats)
else:
for iteration, (images, labels) in enumerate(dataset.train_fn()):
train_step(images, labels, warmup_batch=iteration == 0)
if hvd.rank() == 0:
if iteration % params.log_every == 0:
logger.log(step=(iteration, max_steps),
data={"train_ce_loss": float(ce_loss.result()),
"train_dice_loss": float(f1_loss.result()),
"train_total_loss": float(f1_loss.result() + ce_loss.result())})
if (params.evaluate_every > 0) and (iteration % params.evaluate_every == 0):
evaluate(params, model, dataset, logger, restore_checkpoint=False)
f1_loss.reset_states()
ce_loss.reset_states()
if iteration >= max_steps:
break
if hvd.rank() == 0:
checkpoint.save(file_prefix=os.path.join(params.model_dir, "checkpoint"))
if params.use_savedmodel:
prec = 'amp' if params.use_amp else 'fp32'
model.save(os.path.join(params.model_dir, f'saved_model_{prec}'))
if params.use_tftrt:
export_model(params.model_dir, prec, os.path.join(params.model_dir, f'tf-trt_model_{prec}'))
logger.flush()
def evaluate(params, model, dataset, logger, restore_checkpoint=True):
if params.fold is None:
print("No fold specified for evaluation. Please use --fold [int] to select a fold.")
ce_loss = tf.keras.metrics.Mean(name='ce_loss')
f1_loss = tf.keras.metrics.Mean(name='dice_loss')
if params.model_dir and restore_checkpoint:
prec = 'amp' if params.use_amp else 'fp32'
if params.use_savedmodel:
model = tf.keras.models.load_model(os.path.join(params.model_dir, f'saved_model_{prec}'))
elif params.use_tftrt:
model = TFTRTModel(model_dir=params.model_dir, precision=prec)
else:
checkpoint = tf.train.Checkpoint(model=model)
checkpoint.restore(tf.train.latest_checkpoint(params.model_dir)).expect_partial()
def validation_step(features, labels):
output_map = model(features, training=False)
crossentropy_loss, dice_loss = partial_losses(output_map, labels)
ce_loss(crossentropy_loss)
f1_loss(dice_loss)
for iteration, (images, labels) in enumerate(dataset.eval_fn(count=1)):
validation_step(images, labels)
if iteration >= dataset.eval_size // params.batch_size:
break
if dataset.eval_size > 0:
logger.log(step=(),
data={"eval_ce_loss": float(ce_loss.result()),
"eval_dice_loss": float(f1_loss.result()),
"eval_total_loss": float(f1_loss.result() + ce_loss.result()),
"eval_dice_score": 1.0 - float(f1_loss.result())})
logger.flush()
def predict(params, model, dataset, logger):
prec = 'amp' if params.use_amp else 'fp32'
if params.model_dir:
if params.use_savedmodel:
model = tf.keras.models.load_model(os.path.join(params.model_dir, f'saved_model_{prec}'))
elif params.use_tftrt:
model = TFTRTModel(model_dir=params.model_dir, precision=prec)
else:
checkpoint = tf.train.Checkpoint(model=model)
checkpoint.restore(tf.train.latest_checkpoint(params.model_dir)).expect_partial()
@tf.function
def prediction_step(features):
return tf.nn.softmax(model(features, training=False), axis=-1)
if params.benchmark:
assert params.max_steps > params.warmup_steps, \
"max_steps value has to be greater than warmup_steps"
timestamps = []
for iteration, images in enumerate(dataset.test_fn(count=None, drop_remainder=True)):
prediction_step(images)
if iteration > params.warmup_steps:
timestamps.append(time())
if iteration >= params.max_steps:
break
deltas = np.array([timestamps[i + 1] - timestamps[i] for i in range(len(timestamps) - 1)])
stats = process_performance_stats(deltas, params.batch_size, mode="test")
logger.log(step=(), data=stats)
else:
predictions = np.concatenate([prediction_step(images).numpy()
for images in dataset.test_fn(count=1)], axis=0)
binary_masks = [np.argmax(p, axis=-1).astype(np.uint8) * 255 for p in predictions]
multipage_tif = [Image.fromarray(mask).resize(size=(512, 512), resample=Image.BILINEAR)
for mask in binary_masks]
output_dir = os.path.join(params.model_dir, 'predictions')
if not os.path.exists(output_dir):
os.makedirs(output_dir)
multipage_tif[0].save(os.path.join(output_dir, 'test-masks.tif'),
compression="tiff_deflate",
save_all=True,
append_images=multipage_tif[1:])
print("Predictions saved at {}".format(output_dir))
logger.flush()
|
TensorFlow/Detection/SSD/models/research/object_detection/data | data | oid_bbox_trainable_label_map | item {
name: "/m/01g317"
id: 1
display_name: "Person"
}
item {
name: "/m/09j2d"
id: 2
display_name: "Clothing"
}
item {
name: "/m/04yx4"
id: 3
display_name: "Man"
}
item {
name: "/m/0dzct"
id: 4
display_name: "Face"
}
item {
name: "/m/07j7r"
id: 5
display_name: "Tree"
}
item {
name: "/m/05s2s"
id: 6
display_name: "Plant"
}
item {
name: "/m/03bt1vf"
id: 7
display_name: "Woman"
}
item {
name: "/m/07yv9"
id: 8
display_name: "Vehicle"
}
item {
name: "/m/0cgh4"
id: 9
display_name: "Building"
}
item {
name: "/m/01prls"
id: 10
display_name: "Land vehicle"
}
item {
name: "/m/09j5n"
id: 11
display_name: "Footwear"
}
item {
name: "/m/05r655"
id: 12
display_name: "Girl"
}
item {
name: "/m/0jbk"
id: 13
display_name: "Animal"
}
item {
name: "/m/0k4j"
id: 14
display_name: "Car"
}
item {
name: "/m/02wbm"
id: 15
display_name: "Food"
}
item {
name: "/m/083wq"
id: 16
display_name: "Wheel"
}
item {
name: "/m/0c9ph5"
id: 17
display_name: "Flower"
}
item {
name: "/m/0c_jw"
id: 18
display_name: "Furniture"
}
item {
name: "/m/0d4v4"
id: 19
display_name: "Window"
}
item {
name: "/m/03jm5"
id: 20
display_name: "House"
}
item {
name: "/m/01bl7v"
id: 21
display_name: "Boy"
}
item {
name: "/m/0463sg"
id: 22
display_name: "Fashion accessory"
}
item {
name: "/m/04bcr3"
id: 23
display_name: "Table"
}
item {
name: "/m/0jyfg"
id: 24
display_name: "Glasses"
}
item {
name: "/m/01xyhv"
id: 25
display_name: "Suit"
}
item {
name: "/m/08dz3q"
id: 26
display_name: "Auto part"
}
item {
name: "/m/015p6"
id: 27
display_name: "Bird"
}
item {
name: "/m/05y5lj"
id: 28
display_name: "Sports equipment"
}
item {
name: "/m/01d40f"
id: 29
display_name: "Dress"
}
item {
name: "/m/0bt9lr"
id: 30
display_name: "Dog"
}
item {
name: "/m/01lrl"
id: 31
display_name: "Carnivore"
}
item {
name: "/m/02p0tk3"
id: 32
display_name: "Human body"
}
item {
name: "/m/0fly7"
id: 33
display_name: "Jeans"
}
item {
name: "/m/04szw"
id: 34
display_name: "Musical instrument"
}
item {
name: "/m/0271t"
id: 35
display_name: "Drink"
}
item {
name: "/m/019jd"
id: 36
display_name: "Boat"
}
item {
name: "/m/03q69"
id: 37
display_name: "Hair"
}
item {
name: "/m/0h9mv"
id: 38
display_name: "Tire"
}
item {
name: "/m/04hgtk"
id: 39
display_name: "Head"
}
item {
name: "/m/01yrx"
id: 40
display_name: "Cat"
}
item {
name: "/m/01rzcn"
id: 41
display_name: "Watercraft"
}
item {
name: "/m/01mzpv"
id: 42
display_name: "Chair"
}
item {
name: "/m/0199g"
id: 43
display_name: "Bike"
}
item {
name: "/m/01fdzj"
id: 44
display_name: "Tower"
}
item {
name: "/m/04rky"
id: 45
display_name: "Mammal"
}
item {
name: "/m/079cl"
id: 46
display_name: "Skyscraper"
}
item {
name: "/m/0dzf4"
id: 47
display_name: "Arm"
}
item {
name: "/m/0138tl"
id: 48
display_name: "Toy"
}
item {
name: "/m/06msq"
id: 49
display_name: "Sculpture"
}
item {
name: "/m/03xxp"
id: 50
display_name: "Invertebrate"
}
item {
name: "/m/0hg7b"
id: 51
display_name: "Microphone"
}
item {
name: "/m/01n5jq"
id: 52
display_name: "Poster"
}
item {
name: "/m/03vt0"
id: 53
display_name: "Insect"
}
item {
name: "/m/0342h"
id: 54
display_name: "Guitar"
}
item {
name: "/m/0k0pj"
id: 55
display_name: "Nose"
}
item {
name: "/m/02dl1y"
id: 56
display_name: "Hat"
}
item {
name: "/m/04brg2"
id: 57
display_name: "Tableware"
}
item {
name: "/m/02dgv"
id: 58
display_name: "Door"
}
item {
name: "/m/01bqk0"
id: 59
display_name: "Bicycle wheel"
}
item {
name: "/m/017ftj"
id: 60
display_name: "Sunglasses"
}
item {
name: "/m/052lwg6"
id: 61
display_name: "Baked goods"
}
item {
name: "/m/014sv8"
id: 62
display_name: "Eye"
}
item {
name: "/m/0270h"
id: 63
display_name: "Dessert"
}
item {
name: "/m/0283dt1"
id: 64
display_name: "Mouth"
}
item {
name: "/m/0k5j"
id: 65
display_name: "Aircraft"
}
item {
name: "/m/0cmf2"
id: 66
display_name: "Airplane"
}
item {
name: "/m/07jdr"
id: 67
display_name: "Train"
}
item {
name: "/m/032b3c"
id: 68
display_name: "Jacket"
}
item {
name: "/m/033rq4"
id: 69
display_name: "Street light"
}
item {
name: "/m/0k65p"
id: 70
display_name: "Hand"
}
item {
name: "/m/01ww8y"
id: 71
display_name: "Snack"
}
item {
name: "/m/0zvk5"
id: 72
display_name: "Helmet"
}
item {
name: "/m/07mhn"
id: 73
display_name: "Trousers"
}
item {
name: "/m/04dr76w"
id: 74
display_name: "Bottle"
}
item {
name: "/m/03fp41"
id: 75
display_name: "Houseplant"
}
item {
name: "/m/03k3r"
id: 76
display_name: "Horse"
}
item {
name: "/m/01y9k5"
id: 77
display_name: "Desk"
}
item {
name: "/m/0cdl1"
id: 78
display_name: "Palm tree"
}
item {
name: "/m/0f4s2w"
id: 79
display_name: "Vegetable"
}
item {
name: "/m/02xwb"
id: 80
display_name: "Fruit"
}
item {
name: "/m/035r7c"
id: 81
display_name: "Leg"
}
item {
name: "/m/0bt_c3"
id: 82
display_name: "Book"
}
item {
name: "/m/01_bhs"
id: 83
display_name: "Fast food"
}
item {
name: "/m/01599"
id: 84
display_name: "Beer"
}
item {
name: "/m/03120"
id: 85
display_name: "Flag"
}
item {
name: "/m/026t6"
id: 86
display_name: "Drum"
}
item {
name: "/m/01bjv"
id: 87
display_name: "Bus"
}
item {
name: "/m/07r04"
id: 88
display_name: "Truck"
}
item {
name: "/m/018xm"
id: 89
display_name: "Ball"
}
item {
name: "/m/01rkbr"
id: 90
display_name: "Tie"
}
item {
name: "/m/0fm3zh"
id: 91
display_name: "Flowerpot"
}
item {
name: "/m/02_n6y"
id: 92
display_name: "Goggles"
}
item {
name: "/m/04_sv"
id: 93
display_name: "Motorcycle"
}
item {
name: "/m/06z37_"
id: 94
display_name: "Picture frame"
}
item {
name: "/m/01bfm9"
id: 95
display_name: "Shorts"
}
item {
name: "/m/0h8mhzd"
id: 96
display_name: "Sports uniform"
}
item {
name: "/m/0d_2m"
id: 97
display_name: "Moths and butterflies"
}
item {
name: "/m/0gjbg72"
id: 98
display_name: "Shelf"
}
item {
name: "/m/01n4qj"
id: 99
display_name: "Shirt"
}
item {
name: "/m/0ch_cf"
id: 100
display_name: "Fish"
}
item {
name: "/m/06m11"
id: 101
display_name: "Rose"
}
item {
name: "/m/01jfm_"
id: 102
display_name: "Licence plate"
}
item {
name: "/m/02crq1"
id: 103
display_name: "Couch"
}
item {
name: "/m/083kb"
id: 104
display_name: "Weapon"
}
item {
name: "/m/01c648"
id: 105
display_name: "Laptop"
}
item {
name: "/m/09tvcd"
id: 106
display_name: "Wine glass"
}
item {
name: "/m/0h2r6"
id: 107
display_name: "Van"
}
item {
name: "/m/081qc"
id: 108
display_name: "Wine"
}
item {
name: "/m/09ddx"
id: 109
display_name: "Duck"
}
item {
name: "/m/03p3bw"
id: 110
display_name: "Bicycle helmet"
}
item {
name: "/m/0cyf8"
id: 111
display_name: "Butterfly"
}
item {
name: "/m/0b_rs"
id: 112
display_name: "Swimming pool"
}
item {
name: "/m/039xj_"
id: 113
display_name: "Ear"
}
item {
name: "/m/021sj1"
id: 114
display_name: "Office"
}
item {
name: "/m/0dv5r"
id: 115
display_name: "Camera"
}
item {
name: "/m/01lynh"
id: 116
display_name: "Stairs"
}
item {
name: "/m/06bt6"
id: 117
display_name: "Reptile"
}
item {
name: "/m/01226z"
id: 118
display_name: "Football"
}
item {
name: "/m/0fszt"
id: 119
display_name: "Cake"
}
item {
name: "/m/050k8"
id: 120
display_name: "Mobile phone"
}
item {
name: "/m/02wbtzl"
id: 121
display_name: "Sun hat"
}
item {
name: "/m/02p5f1q"
id: 122
display_name: "Coffee cup"
}
item {
name: "/m/025nd"
id: 123
display_name: "Christmas tree"
}
item {
name: "/m/02522"
id: 124
display_name: "Computer monitor"
}
item {
name: "/m/09ct_"
id: 125
display_name: "Helicopter"
}
item {
name: "/m/0cvnqh"
id: 126
display_name: "Bench"
}
item {
name: "/m/0d5gx"
id: 127
display_name: "Castle"
}
item {
name: "/m/01xygc"
id: 128
display_name: "Coat"
}
item {
name: "/m/04m6gz"
id: 129
display_name: "Porch"
}
item {
name: "/m/01gkx_"
id: 130
display_name: "Swimwear"
}
item {
name: "/m/01s105"
id: 131
display_name: "Cabinetry"
}
item {
name: "/m/01j61q"
id: 132
display_name: "Tent"
}
item {
name: "/m/0hnnb"
id: 133
display_name: "Umbrella"
}
item {
name: "/m/01j51"
id: 134
display_name: "Balloon"
}
item {
name: "/m/01knjb"
id: 135
display_name: "Billboard"
}
item {
name: "/m/03__z0"
id: 136
display_name: "Bookcase"
}
item {
name: "/m/01m2v"
id: 137
display_name: "Computer keyboard"
}
item {
name: "/m/0167gd"
id: 138
display_name: "Doll"
}
item {
name: "/m/0284d"
id: 139
display_name: "Dairy"
}
item {
name: "/m/03ssj5"
id: 140
display_name: "Bed"
}
item {
name: "/m/02fq_6"
id: 141
display_name: "Fedora"
}
item {
name: "/m/06nwz"
id: 142
display_name: "Seafood"
}
item {
name: "/m/0220r2"
id: 143
display_name: "Fountain"
}
item {
name: "/m/01mqdt"
id: 144
display_name: "Traffic sign"
}
item {
name: "/m/0268lbt"
id: 145
display_name: "Hiking equipment"
}
item {
name: "/m/07c52"
id: 146
display_name: "Television"
}
item {
name: "/m/0grw1"
id: 147
display_name: "Salad"
}
item {
name: "/m/01h3n"
id: 148
display_name: "Bee"
}
item {
name: "/m/078n6m"
id: 149
display_name: "Coffee table"
}
item {
name: "/m/01xq0k1"
id: 150
display_name: "Cattle"
}
item {
name: "/m/0gd2v"
id: 151
display_name: "Marine mammal"
}
item {
name: "/m/0dbvp"
id: 152
display_name: "Goose"
}
item {
name: "/m/03rszm"
id: 153
display_name: "Curtain"
}
item {
name: "/m/0h8n5zk"
id: 154
display_name: "Kitchen & dining room table"
}
item {
name: "/m/019dx1"
id: 155
display_name: "Home appliance"
}
item {
name: "/m/03hl4l9"
id: 156
display_name: "Marine invertebrates"
}
item {
name: "/m/0b3fp9"
id: 157
display_name: "Countertop"
}
item {
name: "/m/02rdsp"
id: 158
display_name: "Office supplies"
}
item {
name: "/m/0hf58v5"
id: 159
display_name: "Luggage and bags"
}
item {
name: "/m/04h7h"
id: 160
display_name: "Lighthouse"
}
item {
name: "/m/024g6"
id: 161
display_name: "Cocktail"
}
item {
name: "/m/0cffdh"
id: 162
display_name: "Maple"
}
item {
name: "/m/03q5c7"
id: 163
display_name: "Saucer"
}
item {
name: "/m/014y4n"
id: 164
display_name: "Paddle"
}
item {
name: "/m/01yx86"
id: 165
display_name: "Bronze sculpture"
}
item {
name: "/m/020jm"
id: 166
display_name: "Beetle"
}
item {
name: "/m/025dyy"
id: 167
display_name: "Box"
}
item {
name: "/m/01llwg"
id: 168
display_name: "Necklace"
}
item {
name: "/m/08pbxl"
id: 169
display_name: "Monkey"
}
item {
name: "/m/02d9qx"
id: 170
display_name: "Whiteboard"
}
item {
name: "/m/02pkr5"
id: 171
display_name: "Plumbing fixture"
}
item {
name: "/m/0h99cwc"
id: 172
display_name: "Kitchen appliance"
}
item {
name: "/m/050gv4"
id: 173
display_name: "Plate"
}
item {
name: "/m/02vqfm"
id: 174
display_name: "Coffee"
}
item {
name: "/m/09kx5"
id: 175
display_name: "Deer"
}
item {
name: "/m/019w40"
id: 176
display_name: "Surfboard"
}
item {
name: "/m/09dzg"
id: 177
display_name: "Turtle"
}
item {
name: "/m/07k1x"
id: 178
display_name: "Tool"
}
item {
name: "/m/080hkjn"
id: 179
display_name: "Handbag"
}
item {
name: "/m/07qxg_"
id: 180
display_name: "Football helmet"
}
item {
name: "/m/0ph39"
id: 181
display_name: "Canoe"
}
item {
name: "/m/018p4k"
id: 182
display_name: "Cart"
}
item {
name: "/m/02h19r"
id: 183
display_name: "Scarf"
}
item {
name: "/m/015h_t"
id: 184
display_name: "Beard"
}
item {
name: "/m/0fqfqc"
id: 185
display_name: "Drawer"
}
item {
name: "/m/025rp__"
id: 186
display_name: "Cowboy hat"
}
item {
name: "/m/01x3z"
id: 187
display_name: "Clock"
}
item {
name: "/m/0crjs"
id: 188
display_name: "Convenience store"
}
item {
name: "/m/0l515"
id: 189
display_name: "Sandwich"
}
item {
name: "/m/015qff"
id: 190
display_name: "Traffic light"
}
item {
name: "/m/09kmb"
id: 191
display_name: "Spider"
}
item {
name: "/m/09728"
id: 192
display_name: "Bread"
}
item {
name: "/m/071qp"
id: 193
display_name: "Squirrel"
}
item {
name: "/m/02s195"
id: 194
display_name: "Vase"
}
item {
name: "/m/06c54"
id: 195
display_name: "Rifle"
}
item {
name: "/m/01xqw"
id: 196
display_name: "Cello"
}
item {
name: "/m/05zsy"
id: 197
display_name: "Pumpkin"
}
item {
name: "/m/0bwd_0j"
id: 198
display_name: "Elephant"
}
item {
name: "/m/04m9y"
id: 199
display_name: "Lizard"
}
item {
name: "/m/052sf"
id: 200
display_name: "Mushroom"
}
item {
name: "/m/03grzl"
id: 201
display_name: "Baseball glove"
}
item {
name: "/m/01z1kdw"
id: 202
display_name: "Juice"
}
item {
name: "/m/02wv6h6"
id: 203
display_name: "Skirt"
}
item {
name: "/m/016m2d"
id: 204
display_name: "Skull"
}
item {
name: "/m/0dtln"
id: 205
display_name: "Lamp"
}
item {
name: "/m/057cc"
id: 206
display_name: "Musical keyboard"
}
item {
name: "/m/06k2mb"
id: 207
display_name: "High heels"
}
item {
name: "/m/0f6wt"
id: 208
display_name: "Falcon"
}
item {
name: "/m/0cxn2"
id: 209
display_name: "Ice cream"
}
item {
name: "/m/02jvh9"
id: 210
display_name: "Mug"
}
item {
name: "/m/0gjkl"
id: 211
display_name: "Watch"
}
item {
name: "/m/01b638"
id: 212
display_name: "Boot"
}
item {
name: "/m/071p9"
id: 213
display_name: "Ski"
}
item {
name: "/m/0pg52"
id: 214
display_name: "Taxi"
}
item {
name: "/m/0ftb8"
id: 215
display_name: "Sunflower"
}
item {
name: "/m/0hnyx"
id: 216
display_name: "Pastry"
}
item {
name: "/m/02jz0l"
id: 217
display_name: "Tap"
}
item {
name: "/m/04kkgm"
id: 218
display_name: "Bowl"
}
item {
name: "/m/0174n1"
id: 219
display_name: "Glove"
}
item {
name: "/m/0gv1x"
id: 220
display_name: "Parrot"
}
item {
name: "/m/09csl"
id: 221
display_name: "Eagle"
}
item {
name: "/m/02jnhm"
id: 222
display_name: "Tin can"
}
item {
name: "/m/099ssp"
id: 223
display_name: "Platter"
}
item {
name: "/m/03nfch"
id: 224
display_name: "Sandal"
}
item {
name: "/m/07y_7"
id: 225
display_name: "Violin"
}
item {
name: "/m/05z6w"
id: 226
display_name: "Penguin"
}
item {
name: "/m/03m3pdh"
id: 227
display_name: "Sofa bed"
}
item {
name: "/m/09ld4"
id: 228
display_name: "Frog"
}
item {
name: "/m/09b5t"
id: 229
display_name: "Chicken"
}
item {
name: "/m/054xkw"
id: 230
display_name: "Lifejacket"
}
item {
name: "/m/0130jx"
id: 231
display_name: "Sink"
}
item {
name: "/m/07fbm7"
id: 232
display_name: "Strawberry"
}
item {
name: "/m/01dws"
id: 233
display_name: "Bear"
}
item {
name: "/m/01tcjp"
id: 234
display_name: "Muffin"
}
item {
name: "/m/0dftk"
id: 235
display_name: "Swan"
}
item {
name: "/m/0c06p"
id: 236
display_name: "Candle"
}
item {
name: "/m/034c16"
id: 237
display_name: "Pillow"
}
item {
name: "/m/09d5_"
id: 238
display_name: "Owl"
}
item {
name: "/m/03hlz0c"
id: 239
display_name: "Kitchen utensil"
}
item {
name: "/m/0ft9s"
id: 240
display_name: "Dragonfly"
}
item {
name: "/m/011k07"
id: 241
display_name: "Tortoise"
}
item {
name: "/m/054_l"
id: 242
display_name: "Mirror"
}
item {
name: "/m/0jqgx"
id: 243
display_name: "Lily"
}
item {
name: "/m/0663v"
id: 244
display_name: "Pizza"
}
item {
name: "/m/0242l"
id: 245
display_name: "Coin"
}
item {
name: "/m/014trl"
id: 246
display_name: "Cosmetics"
}
item {
name: "/m/05r5c"
id: 247
display_name: "Piano"
}
item {
name: "/m/07j87"
id: 248
display_name: "Tomato"
}
item {
name: "/m/05kyg_"
id: 249
display_name: "Chest of drawers"
}
item {
name: "/m/0kmg4"
id: 250
display_name: "Teddy bear"
}
item {
name: "/m/07cmd"
id: 251
display_name: "Tank"
}
item {
name: "/m/0dv77"
id: 252
display_name: "Squash"
}
item {
name: "/m/096mb"
id: 253
display_name: "Lion"
}
item {
name: "/m/01gmv2"
id: 254
display_name: "Brassiere"
}
item {
name: "/m/07bgp"
id: 255
display_name: "Sheep"
}
item {
name: "/m/0cmx8"
id: 256
display_name: "Spoon"
}
item {
name: "/m/029tx"
id: 257
display_name: "Dinosaur"
}
item {
name: "/m/073bxn"
id: 258
display_name: "Tripod"
}
item {
name: "/m/0bh9flk"
id: 259
display_name: "Tablet computer"
}
item {
name: "/m/06mf6"
id: 260
display_name: "Rabbit"
}
item {
name: "/m/06_fw"
id: 261
display_name: "Skateboard"
}
item {
name: "/m/078jl"
id: 262
display_name: "Snake"
}
item {
name: "/m/0fbdv"
id: 263
display_name: "Shellfish"
}
item {
name: "/m/0h23m"
id: 264
display_name: "Sparrow"
}
item {
name: "/m/014j1m"
id: 265
display_name: "Apple"
}
item {
name: "/m/03fwl"
id: 266
display_name: "Goat"
}
item {
name: "/m/02y6n"
id: 267
display_name: "French fries"
}
item {
name: "/m/06c7f7"
id: 268
display_name: "Lipstick"
}
item {
name: "/m/026qbn5"
id: 269
display_name: "studio couch"
}
item {
name: "/m/0cdn1"
id: 270
display_name: "Hamburger"
}
item {
name: "/m/07clx"
id: 271
display_name: "Tea"
}
item {
name: "/m/07cx4"
id: 272
display_name: "Telephone"
}
item {
name: "/m/03g8mr"
id: 273
display_name: "Baseball bat"
}
item {
name: "/m/0cnyhnx"
id: 274
display_name: "Bull"
}
item {
name: "/m/01b7fy"
id: 275
display_name: "Headphones"
}
item {
name: "/m/04gth"
id: 276
display_name: "Lavender"
}
item {
name: "/m/0cyfs"
id: 277
display_name: "Parachute"
}
item {
name: "/m/021mn"
id: 278
display_name: "Cookie"
}
item {
name: "/m/07dm6"
id: 279
display_name: "Tiger"
}
item {
name: "/m/0k1tl"
id: 280
display_name: "Pen"
}
item {
name: "/m/0dv9c"
id: 281
display_name: "Racket"
}
item {
name: "/m/0dt3t"
id: 282
display_name: "Fork"
}
item {
name: "/m/04yqq2"
id: 283
display_name: "Bust"
}
item {
name: "/m/01cmb2"
id: 284
display_name: "Miniskirt"
}
item {
name: "/m/0gd36"
id: 285
display_name: "Sea lion"
}
item {
name: "/m/033cnk"
id: 286
display_name: "Egg"
}
item {
name: "/m/06ncr"
id: 287
display_name: "Saxophone"
}
item {
name: "/m/03bk1"
id: 288
display_name: "Giraffe"
}
item {
name: "/m/0bjyj5"
id: 289
display_name: "Waste container"
}
item {
name: "/m/06__v"
id: 290
display_name: "Snowboard"
}
item {
name: "/m/0qmmr"
id: 291
display_name: "Wheelchair"
}
item {
name: "/m/01xgg_"
id: 292
display_name: "Medical equipment"
}
item {
name: "/m/0czz2"
id: 293
display_name: "Antelope"
}
item {
name: "/m/02l8p9"
id: 294
display_name: "Harbor seal"
}
item {
name: "/m/09g1w"
id: 295
display_name: "Toilet"
}
item {
name: "/m/0ll1f78"
id: 296
display_name: "Shrimp"
}
item {
name: "/m/0cyhj_"
id: 297
display_name: "Orange"
}
item {
name: "/m/0642b4"
id: 298
display_name: "Cupboard"
}
item {
name: "/m/0h8mzrc"
id: 299
display_name: "Wall clock"
}
item {
name: "/m/068zj"
id: 300
display_name: "Pig"
}
item {
name: "/m/02z51p"
id: 301
display_name: "Nightstand"
}
item {
name: "/m/0h8nr_l"
id: 302
display_name: "Bathroom accessory"
}
item {
name: "/m/0388q"
id: 303
display_name: "Grape"
}
item {
name: "/m/02hj4"
id: 304
display_name: "Dolphin"
}
item {
name: "/m/01jfsr"
id: 305
display_name: "Lantern"
}
item {
name: "/m/07gql"
id: 306
display_name: "Trumpet"
}
item {
name: "/m/0h8my_4"
id: 307
display_name: "Tennis racket"
}
item {
name: "/m/0n28_"
id: 308
display_name: "Crab"
}
item {
name: "/m/0120dh"
id: 309
display_name: "Sea turtle"
}
item {
name: "/m/020kz"
id: 310
display_name: "Cannon"
}
item {
name: "/m/0mkg"
id: 311
display_name: "Accordion"
}
item {
name: "/m/03c7gz"
id: 312
display_name: "Door handle"
}
item {
name: "/m/09k_b"
id: 313
display_name: "Lemon"
}
item {
name: "/m/031n1"
id: 314
display_name: "Foot"
}
item {
name: "/m/04rmv"
id: 315
display_name: "Mouse"
}
item {
name: "/m/084rd"
id: 316
display_name: "Wok"
}
item {
name: "/m/02rgn06"
id: 317
display_name: "Volleyball"
}
item {
name: "/m/05z55"
id: 318
display_name: "Pasta"
}
item {
name: "/m/01r546"
id: 319
display_name: "Earrings"
}
item {
name: "/m/09qck"
id: 320
display_name: "Banana"
}
item {
name: "/m/012w5l"
id: 321
display_name: "Ladder"
}
item {
name: "/m/01940j"
id: 322
display_name: "Backpack"
}
item {
name: "/m/09f_2"
id: 323
display_name: "Crocodile"
}
item {
name: "/m/02p3w7d"
id: 324
display_name: "Roller skates"
}
item {
name: "/m/057p5t"
id: 325
display_name: "Scoreboard"
}
item {
name: "/m/0d8zb"
id: 326
display_name: "Jellyfish"
}
item {
name: "/m/01nq26"
id: 327
display_name: "Sock"
}
item {
name: "/m/01x_v"
id: 328
display_name: "Camel"
}
item {
name: "/m/05gqfk"
id: 329
display_name: "Plastic bag"
}
item {
name: "/m/0cydv"
id: 330
display_name: "Caterpillar"
}
item {
name: "/m/07030"
id: 331
display_name: "Sushi"
}
item {
name: "/m/084zz"
id: 332
display_name: "Whale"
}
item {
name: "/m/0c29q"
id: 333
display_name: "Leopard"
}
item {
name: "/m/02zn6n"
id: 334
display_name: "Barrel"
}
item {
name: "/m/03tw93"
id: 335
display_name: "Fireplace"
}
item {
name: "/m/0fqt361"
id: 336
display_name: "Stool"
}
item {
name: "/m/0f9_l"
id: 337
display_name: "Snail"
}
item {
name: "/m/0gm28"
id: 338
display_name: "Candy"
}
item {
name: "/m/09rvcxw"
id: 339
display_name: "Rocket"
}
item {
name: "/m/01nkt"
id: 340
display_name: "Cheese"
}
item {
name: "/m/04p0qw"
id: 341
display_name: "Billiard table"
}
item {
name: "/m/03hj559"
id: 342
display_name: "Mixing bowl"
}
item {
name: "/m/07pj7bq"
id: 343
display_name: "Bowling equipment"
}
item {
name: "/m/04ctx"
id: 344
display_name: "Knife"
}
item {
name: "/m/0703r8"
id: 345
display_name: "Loveseat"
}
item {
name: "/m/03qrc"
id: 346
display_name: "Hamster"
}
item {
name: "/m/020lf"
id: 347
display_name: "Mouse"
}
item {
name: "/m/0by6g"
id: 348
display_name: "Shark"
}
item {
name: "/m/01fh4r"
id: 349
display_name: "Teapot"
}
item {
name: "/m/07c6l"
id: 350
display_name: "Trombone"
}
item {
name: "/m/03bj1"
id: 351
display_name: "Panda"
}
item {
name: "/m/0898b"
id: 352
display_name: "Zebra"
}
item {
name: "/m/02x984l"
id: 353
display_name: "Mechanical fan"
}
item {
name: "/m/0fj52s"
id: 354
display_name: "Carrot"
}
item {
name: "/m/0cd4d"
id: 355
display_name: "Cheetah"
}
item {
name: "/m/02068x"
id: 356
display_name: "Gondola"
}
item {
name: "/m/01vbnl"
id: 357
display_name: "Bidet"
}
item {
name: "/m/0449p"
id: 358
display_name: "Jaguar"
}
item {
name: "/m/0gj37"
id: 359
display_name: "Ladybug"
}
item {
name: "/m/0nl46"
id: 360
display_name: "Crown"
}
item {
name: "/m/0152hh"
id: 361
display_name: "Snowman"
}
item {
name: "/m/03dnzn"
id: 362
display_name: "Bathtub"
}
item {
name: "/m/05_5p_0"
id: 363
display_name: "Table tennis racket"
}
item {
name: "/m/02jfl0"
id: 364
display_name: "Sombrero"
}
item {
name: "/m/01dxs"
id: 365
display_name: "Brown bear"
}
item {
name: "/m/0cjq5"
id: 366
display_name: "Lobster"
}
item {
name: "/m/040b_t"
id: 367
display_name: "Refrigerator"
}
item {
name: "/m/0_cp5"
id: 368
display_name: "Oyster"
}
item {
name: "/m/0gxl3"
id: 369
display_name: "Handgun"
}
item {
name: "/m/029bxz"
id: 370
display_name: "Oven"
}
item {
name: "/m/02zt3"
id: 371
display_name: "Kite"
}
item {
name: "/m/03d443"
id: 372
display_name: "Rhinoceros"
}
item {
name: "/m/0306r"
id: 373
display_name: "Fox"
}
item {
name: "/m/0h8l4fh"
id: 374
display_name: "Light bulb"
}
item {
name: "/m/0633h"
id: 375
display_name: "Polar bear"
}
item {
name: "/m/01s55n"
id: 376
display_name: "Suitcase"
}
item {
name: "/m/0hkxq"
id: 377
display_name: "Broccoli"
}
item {
name: "/m/0cn6p"
id: 378
display_name: "Otter"
}
item {
name: "/m/0dbzx"
id: 379
display_name: "Mule"
}
item {
name: "/m/01dy8n"
id: 380
display_name: "Woodpecker"
}
item {
name: "/m/01h8tj"
id: 381
display_name: "Starfish"
}
item {
name: "/m/03s_tn"
id: 382
display_name: "Kettle"
}
item {
name: "/m/01xs3r"
id: 383
display_name: "Jet ski"
}
item {
name: "/m/031b6r"
id: 384
display_name: "Window blind"
}
item {
name: "/m/06j2d"
id: 385
display_name: "Raven"
}
item {
name: "/m/0hqkz"
id: 386
display_name: "Grapefruit"
}
item {
name: "/m/01_5g"
id: 387
display_name: "Chopsticks"
}
item {
name: "/m/02zvsm"
id: 388
display_name: "Tart"
}
item {
name: "/m/0kpqd"
id: 389
display_name: "Watermelon"
}
item {
name: "/m/015x4r"
id: 390
display_name: "Cucumber"
}
item {
name: "/m/061hd_"
id: 391
display_name: "Infant bed"
}
item {
name: "/m/04ylt"
id: 392
display_name: "Missile"
}
item {
name: "/m/02wv84t"
id: 393
display_name: "Gas stove"
}
item {
name: "/m/04y4h8h"
id: 394
display_name: "Bathroom cabinet"
}
item {
name: "/m/01gllr"
id: 395
display_name: "Beehive"
}
item {
name: "/m/0pcr"
id: 396
display_name: "Alpaca"
}
item {
name: "/m/0jy4k"
id: 397
display_name: "Doughnut"
}
item {
name: "/m/09f20"
id: 398
display_name: "Hippopotamus"
}
item {
name: "/m/0mcx2"
id: 399
display_name: "Ipod"
}
item {
name: "/m/04c0y"
id: 400
display_name: "Kangaroo"
}
item {
name: "/m/0_k2"
id: 401
display_name: "Ant"
}
item {
name: "/m/0jg57"
id: 402
display_name: "Bell pepper"
}
item {
name: "/m/03fj2"
id: 403
display_name: "Goldfish"
}
item {
name: "/m/03ldnb"
id: 404
display_name: "Ceiling fan"
}
item {
name: "/m/06nrc"
id: 405
display_name: "Shotgun"
}
item {
name: "/m/01btn"
id: 406
display_name: "Barge"
}
item {
name: "/m/05vtc"
id: 407
display_name: "Potato"
}
item {
name: "/m/08hvt4"
id: 408
display_name: "Jug"
}
item {
name: "/m/0fx9l"
id: 409
display_name: "Microwave oven"
}
item {
name: "/m/01h44"
id: 410
display_name: "Bat"
}
item {
name: "/m/05n4y"
id: 411
display_name: "Ostrich"
}
item {
name: "/m/0jly1"
id: 412
display_name: "Turkey"
}
item {
name: "/m/06y5r"
id: 413
display_name: "Sword"
}
item {
name: "/m/05ctyq"
id: 414
display_name: "Tennis ball"
}
item {
name: "/m/0fp6w"
id: 415
display_name: "Pineapple"
}
item {
name: "/m/0d4w1"
id: 416
display_name: "Closet"
}
item {
name: "/m/02pv19"
id: 417
display_name: "Stop sign"
}
item {
name: "/m/07crc"
id: 418
display_name: "Taco"
}
item {
name: "/m/01dwwc"
id: 419
display_name: "Pancake"
}
item {
name: "/m/01b9xk"
id: 420
display_name: "Hot dog"
}
item {
name: "/m/013y1f"
id: 421
display_name: "Organ"
}
item {
name: "/m/0m53l"
id: 422
display_name: "Rays and skates"
}
item {
name: "/m/0174k2"
id: 423
display_name: "Washing machine"
}
item {
name: "/m/01dwsz"
id: 424
display_name: "Waffle"
}
item {
name: "/m/04vv5k"
id: 425
display_name: "Snowplow"
}
item {
name: "/m/04cp_"
id: 426
display_name: "Koala"
}
item {
name: "/m/0fz0h"
id: 427
display_name: "Honeycomb"
}
item {
name: "/m/0llzx"
id: 428
display_name: "Sewing machine"
}
item {
name: "/m/0319l"
id: 429
display_name: "Horn"
}
item {
name: "/m/04v6l4"
id: 430
display_name: "Frying pan"
}
item {
name: "/m/0dkzw"
id: 431
display_name: "Seat belt"
}
item {
name: "/m/027pcv"
id: 432
display_name: "Zucchini"
}
item {
name: "/m/0323sq"
id: 433
display_name: "Golf cart"
}
item {
name: "/m/054fyh"
id: 434
display_name: "Pitcher"
}
item {
name: "/m/01pns0"
id: 435
display_name: "Fire hydrant"
}
item {
name: "/m/012n7d"
id: 436
display_name: "Ambulance"
}
item {
name: "/m/044r5d"
id: 437
display_name: "Golf ball"
}
item {
name: "/m/01krhy"
id: 438
display_name: "Tiara"
}
item {
name: "/m/0dq75"
id: 439
display_name: "Raccoon"
}
item {
name: "/m/0176mf"
id: 440
display_name: "Belt"
}
item {
name: "/m/0h8lkj8"
id: 441
display_name: "Corded phone"
}
item {
name: "/m/04tn4x"
id: 442
display_name: "Swim cap"
}
item {
name: "/m/06l9r"
id: 443
display_name: "Red panda"
}
item {
name: "/m/0cjs7"
id: 444
display_name: "Asparagus"
}
item {
name: "/m/01lsmm"
id: 445
display_name: "Scissors"
}
item {
name: "/m/01lcw4"
id: 446
display_name: "Limousine"
}
item {
name: "/m/047j0r"
id: 447
display_name: "Filing cabinet"
}
item {
name: "/m/01fb_0"
id: 448
display_name: "Bagel"
}
item {
name: "/m/04169hn"
id: 449
display_name: "Wood-burning stove"
}
item {
name: "/m/076bq"
id: 450
display_name: "Segway"
}
item {
name: "/m/0hdln"
id: 451
display_name: "Ruler"
}
item {
name: "/m/01g3x7"
id: 452
display_name: "Bow and arrow"
}
item {
name: "/m/0l3ms"
id: 453
display_name: "Balance beam"
}
item {
name: "/m/058qzx"
id: 454
display_name: "Kitchen knife"
}
item {
name: "/m/0h8n6ft"
id: 455
display_name: "Cake stand"
}
item {
name: "/m/018j2"
id: 456
display_name: "Banjo"
}
item {
name: "/m/0l14j_"
id: 457
display_name: "Flute"
}
item {
name: "/m/0wdt60w"
id: 458
display_name: "Rugby ball"
}
item {
name: "/m/02gzp"
id: 459
display_name: "Dagger"
}
item {
name: "/m/0h8n6f9"
id: 460
display_name: "Dog bed"
}
item {
name: "/m/0fbw6"
id: 461
display_name: "Cabbage"
}
item {
name: "/m/07kng9"
id: 462
display_name: "Picnic basket"
}
item {
name: "/m/0dj6p"
id: 463
display_name: "Peach"
}
item {
name: "/m/06pcq"
id: 464
display_name: "Submarine sandwich"
}
item {
name: "/m/061_f"
id: 465
display_name: "Pear"
}
item {
name: "/m/04g2r"
id: 466
display_name: "Lynx"
}
item {
name: "/m/0jwn_"
id: 467
display_name: "Pomegranate"
}
item {
name: "/m/02f9f_"
id: 468
display_name: "Shower"
}
item {
name: "/m/01f8m5"
id: 469
display_name: "Blue jay"
}
item {
name: "/m/01m4t"
id: 470
display_name: "Printer"
}
item {
name: "/m/0cl4p"
id: 471
display_name: "Hedgehog"
}
item {
name: "/m/07xyvk"
id: 472
display_name: "Coffeemaker"
}
item {
name: "/m/084hf"
id: 473
display_name: "Worm"
}
item {
name: "/m/03v5tg"
id: 474
display_name: "Drinking straw"
}
item {
name: "/m/0qjjc"
id: 475
display_name: "Remote control"
}
item {
name: "/m/015x5n"
id: 476
display_name: "Radish"
}
item {
name: "/m/0ccs93"
id: 477
display_name: "Canary"
}
item {
name: "/m/0nybt"
id: 478
display_name: "Seahorse"
}
item {
name: "/m/02vkqh8"
id: 479
display_name: "Wardrobe"
}
item {
name: "/m/09gtd"
id: 480
display_name: "Toilet paper"
}
item {
name: "/m/019h78"
id: 481
display_name: "Centipede"
}
item {
name: "/m/015wgc"
id: 482
display_name: "Croissant"
}
item {
name: "/m/01x3jk"
id: 483
display_name: "Snowmobile"
}
item {
name: "/m/01j3zr"
id: 484
display_name: "Burrito"
}
item {
name: "/m/0c568"
id: 485
display_name: "Porcupine"
}
item {
name: "/m/02pdsw"
id: 486
display_name: "Cutting board"
}
item {
name: "/m/029b3"
id: 487
display_name: "Dice"
}
item {
name: "/m/03q5t"
id: 488
display_name: "Harpsichord"
}
item {
name: "/m/0p833"
id: 489
display_name: "Perfume"
}
item {
name: "/m/01d380"
id: 490
display_name: "Drill"
}
item {
name: "/m/024d2"
id: 491
display_name: "Calculator"
}
item {
name: "/m/0mw_6"
id: 492
display_name: "Willow"
}
item {
name: "/m/01f91_"
id: 493
display_name: "Pretzel"
}
item {
name: "/m/02g30s"
id: 494
display_name: "Guacamole"
}
item {
name: "/m/01hrv5"
id: 495
display_name: "Popcorn"
}
item {
name: "/m/03m5k"
id: 496
display_name: "Harp"
}
item {
name: "/m/0162_1"
id: 497
display_name: "Towel"
}
item {
name: "/m/063rgb"
id: 498
display_name: "Mixer"
}
item {
name: "/m/06_72j"
id: 499
display_name: "Digital clock"
}
item {
name: "/m/046dlr"
id: 500
display_name: "Alarm clock"
}
item {
name: "/m/047v4b"
id: 501
display_name: "Artichoke"
}
item {
name: "/m/04zpv"
id: 502
display_name: "Milk"
}
item {
name: "/m/043nyj"
id: 503
display_name: "Common fig"
}
item {
name: "/m/03bbps"
id: 504
display_name: "Power plugs and sockets"
}
item {
name: "/m/02w3r3"
id: 505
display_name: "Paper towel"
}
item {
name: "/m/02pjr4"
id: 506
display_name: "Blender"
}
item {
name: "/m/0755b"
id: 507
display_name: "Scorpion"
}
item {
name: "/m/02lbcq"
id: 508
display_name: "Stretcher"
}
item {
name: "/m/0fldg"
id: 509
display_name: "Mango"
}
item {
name: "/m/012074"
id: 510
display_name: "Magpie"
}
item {
name: "/m/035vxb"
id: 511
display_name: "Isopod"
}
item {
name: "/m/02w3_ws"
id: 512
display_name: "Personal care"
}
item {
name: "/m/0f6nr"
id: 513
display_name: "Unicycle"
}
item {
name: "/m/0420v5"
id: 514
display_name: "Punching bag"
}
item {
name: "/m/0frqm"
id: 515
display_name: "Envelope"
}
item {
name: "/m/03txqz"
id: 516
display_name: "Scale"
}
item {
name: "/m/0271qf7"
id: 517
display_name: "Wine rack"
}
item {
name: "/m/074d1"
id: 518
display_name: "Submarine"
}
item {
name: "/m/08p92x"
id: 519
display_name: "Cream"
}
item {
name: "/m/01j4z9"
id: 520
display_name: "Chainsaw"
}
item {
name: "/m/0kpt_"
id: 521
display_name: "Cantaloupe"
}
item {
name: "/m/0h8n27j"
id: 522
display_name: "Serving tray"
}
item {
name: "/m/03y6mg"
id: 523
display_name: "Food processor"
}
item {
name: "/m/04h8sr"
id: 524
display_name: "Dumbbell"
}
item {
name: "/m/065h6l"
id: 525
display_name: "Jacuzzi"
}
item {
name: "/m/02tsc9"
id: 526
display_name: "Slow cooker"
}
item {
name: "/m/012ysf"
id: 527
display_name: "Syringe"
}
item {
name: "/m/0ky7b"
id: 528
display_name: "Dishwasher"
}
item {
name: "/m/02wg_p"
id: 529
display_name: "Tree house"
}
item {
name: "/m/0584n8"
id: 530
display_name: "Briefcase"
}
item {
name: "/m/03kt2w"
id: 531
display_name: "Stationary bicycle"
}
item {
name: "/m/05kms"
id: 532
display_name: "Oboe"
}
item {
name: "/m/030610"
id: 533
display_name: "Treadmill"
}
item {
name: "/m/0lt4_"
id: 534
display_name: "Binoculars"
}
item {
name: "/m/076lb9"
id: 535
display_name: "Bench"
}
item {
name: "/m/02ctlc"
id: 536
display_name: "Cricket ball"
}
item {
name: "/m/02x8cch"
id: 537
display_name: "Salt and pepper shakers"
}
item {
name: "/m/09gys"
id: 538
display_name: "Squid"
}
item {
name: "/m/03jbxj"
id: 539
display_name: "Light switch"
}
item {
name: "/m/012xff"
id: 540
display_name: "Toothbrush"
}
item {
name: "/m/0h8kx63"
id: 541
display_name: "Spice rack"
}
item {
name: "/m/073g6"
id: 542
display_name: "Stethoscope"
}
item {
name: "/m/02cvgx"
id: 543
display_name: "Winter melon"
}
item {
name: "/m/027rl48"
id: 544
display_name: "Ladle"
}
item {
name: "/m/01kb5b"
id: 545
display_name: "Flashlight"
}
|
Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/generator/tabular/transforms | transforms | __init__ | # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# flake8: noqa
from .one_hot_encoding import OneHotEncoding
|
PyTorch/SpeechSynthesis/Tacotron2/waveglow | waveglow | arg_parser | # *****************************************************************************
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
import argparse
def waveglow_parser(parent, add_help=False):
"""
Parse commandline arguments.
"""
parser = argparse.ArgumentParser(parents=[parent], add_help=add_help)
# misc parameters
parser.add_argument('--n-mel-channels', default=80, type=int,
help='Number of bins in mel-spectrograms')
# glow parameters
parser.add_argument('--flows', default=12, type=int,
help='Number of steps of flow')
parser.add_argument('--groups', default=8, type=int,
help='Number of samples in a group processed by the steps of flow')
parser.add_argument('--early-every', default=4, type=int,
help='Determines how often (i.e., after how many coupling layers) \
a number of channels (defined by --early-size parameter) are output\
to the loss function')
parser.add_argument('--early-size', default=2, type=int,
help='Number of channels output to the loss function')
parser.add_argument('--sigma', default=1.0, type=float,
help='Standard deviation used for sampling from Gaussian')
parser.add_argument('--segment-length', default=4000, type=int,
help='Segment length (audio samples) processed per iteration')
# wavenet parameters
wavenet = parser.add_argument_group('WaveNet parameters')
wavenet.add_argument('--wn-kernel-size', default=3, type=int,
help='Kernel size for dialted convolution in the affine coupling layer (WN)')
wavenet.add_argument('--wn-channels', default=512, type=int,
help='Number of channels in WN')
wavenet.add_argument('--wn-layers', default=8, type=int,
help='Number of layers in WN')
return parser
|
TensorFlow2/LanguageModeling/BERT/official/nlp/modeling/layers | layers | position_embedding_test | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras-based positional embedding layer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.python.keras import keras_parameterized # pylint: disable=g-direct-tensorflow-import
from official.nlp.modeling.layers import position_embedding
# This decorator runs the test in V1, V2-Eager, and V2-Functional mode. It
# guarantees forward compatibility of this code for the V2 switchover.
@keras_parameterized.run_all_keras_modes
class PositionEmbeddingLayerTest(keras_parameterized.TestCase):
def test_static_layer_output_shape(self):
test_layer = position_embedding.PositionEmbedding()
# Create a 3-dimensional input (the first dimension is implicit).
sequence_length = 21
width = 30
input_tensor = tf.keras.Input(shape=(sequence_length, width))
output_tensor = test_layer(input_tensor)
# When using static positional embedding shapes, the output is expected
# to be the same as the input shape in all dimensions save batch.
expected_output_shape = [1, sequence_length, width]
self.assertEqual(expected_output_shape, output_tensor.shape.as_list())
# The default output dtype for this layer should be tf.float32.
self.assertEqual(tf.float32, output_tensor.dtype)
def test_float16_dtype(self):
test_layer = position_embedding.PositionEmbedding(dtype="float16")
# Create a 3-dimensional input (the first dimension is implicit).
sequence_length = 21
width = 30
input_tensor = tf.keras.Input(shape=(sequence_length, width))
output_tensor = test_layer(input_tensor)
# When using static positional embedding shapes, the output is expected
# to be the same as the input shape in all dimensions save batch.
expected_output_shape = [1, sequence_length, width]
self.assertEqual(expected_output_shape, output_tensor.shape.as_list())
# The default output dtype for this layer should be tf.float32.
self.assertEqual(tf.float16, output_tensor.dtype)
def test_dynamic_layer_output_shape(self):
max_sequence_length = 40
test_layer = position_embedding.PositionEmbedding(
use_dynamic_slicing=True, max_sequence_length=max_sequence_length)
# Create a 3-dimensional input (the first dimension is implicit).
width = 30
input_tensor = tf.keras.Input(shape=(None, width))
output_tensor = test_layer(input_tensor)
# When using dynamic positional embedding shapes, the output is expected
# to be the same as the input shape in all dimensions - but may be None if
# the input shape is None there.
expected_output_shape = [1, None, width]
self.assertEqual(expected_output_shape, output_tensor.shape.as_list())
def test_dynamic_layer_slicing(self):
max_sequence_length = 40
test_layer = position_embedding.PositionEmbedding(
use_dynamic_slicing=True, max_sequence_length=max_sequence_length)
# Create a 3-dimensional input (the first dimension is implicit).
width = 30
input_tensor = tf.keras.Input(shape=(None, width))
output_tensor = test_layer(input_tensor)
model = tf.keras.Model(input_tensor, output_tensor)
# Create input data that is shorter than max_sequence_length, which should
# trigger a down-slice.
input_length = 17
# Note: This test explicitly uses a batch size of 1. This is to get around
# Keras' restriction on Model invocations: inputs are expected to have the
# same batch cardinality as outputs. In practice, this layer should be used
# inside a model, where it can be projected when added to another tensor.
input_data = np.ones((1, input_length, width))
output_data = model.predict(input_data)
self.assertAllEqual([1, input_length, width], output_data.shape)
if __name__ == "__main__":
tf.test.main()
|
PyTorch/LanguageModeling/BERT/lamb_amp_opt/csrc | csrc | multi_tensor_l2norm_kernel | #include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/Exceptions.h>
#include <c10/cuda/CUDAGuard.h>
// Another possibility:
// #include <torch/all.h>
#include <assert.h>
#include "type_shim.h"
#include "multi_tensor_apply.cuh"
#define BLOCK_SIZE 512
#define ILP 4
template<typename T>
__device__ __forceinline__ bool is_aligned(T* p){
return ((uint64_t)p) % (ILP*sizeof(T)) == 0;
}
template<typename T>
__device__ __forceinline__ void load_store(T* dst, T* src, int dst_offset, int src_offset){
typedef typename std::aligned_storage<ILP*sizeof(T), ILP*alignof(T)>::type LT;
((LT*)dst)[dst_offset] = ((LT*)src)[src_offset];
}
template<typename x_t>
struct L2NormFunctor
{
__device__ __forceinline__ void operator()(
int chunk_size,
volatile int* noop_gmem,
TensorListMetadata<1>& tl,
float* output,
float* output_per_tensor,
bool per_tensor,
int max_chunks_per_tensor)
{
if (*noop_gmem) {
return;
}
int tensor_loc = tl.block_to_tensor[blockIdx.x];
int chunk_idx = tl.block_to_chunk[blockIdx.x];
int n = tl.sizes[tensor_loc];
x_t* x = (x_t*)tl.addresses[0][tensor_loc];
x += chunk_idx*chunk_size;
n -= chunk_idx*chunk_size;
__shared__ float s_vals[512];
float vals[ILP]; // = {0}; // this probably works too but I want to be sure...
x_t r_x[ILP];
for(int i = 0; i < ILP; i++)
{
vals[i] = 0.f;
r_x[i] = 0;
}
// to make things simple, we put aligned case in a different code path
if(n % ILP == 0 && chunk_size % ILP == 0 && is_aligned(x))
{
for(int i_start = threadIdx.x; i_start*ILP < n && i_start*ILP < chunk_size; i_start += blockDim.x)
{
// load
load_store(r_x, x, 0 , i_start);
#pragma unroll
for(int ii = 0; ii < ILP; ii++)
{
float next = static_cast<float>(r_x[ii]);
vals[ii] += next*next;
}
}
}
else
{
for(int i_start = 0; i_start < n && i_start < chunk_size; i_start += blockDim.x*ILP)
{
#pragma unroll
for(int ii = 0; ii < ILP; ii++)
{
int i = i_start + threadIdx.x + ii*blockDim.x;
if(i < n && i < chunk_size)
{
float next = static_cast<float>(x[i]);
vals[ii] += next*next;
}
}
}
}
float val = 0.f;
for(int i = 0; i < ILP; i++)
val += vals[i];
float final = reduce_block_into_lanes(s_vals, val);
if(threadIdx.x == 0)
{
if(!isfinite(final))
*noop_gmem = 1; // Blindly fire off a write. These will race but that's ok.
output[blockIdx.x] += final;
if(per_tensor)
output_per_tensor[(tl.start_tensor_this_launch + tensor_loc)*max_chunks_per_tensor + chunk_idx] = final;
}
}
};
__global__ void cleanup(
float* output,
float* output_per_tensor,
float* ret,
float* ret_per_tensor,
bool per_tensor,
int max_chunks_per_tensor,
volatile int* noop_gmem)
{
if (*noop_gmem) {
return;
}
__shared__ float vals[512];
if(blockIdx.x == 0)
{
float val = 0;
if(threadIdx.x < 320)
val = output[threadIdx.x];
float final = reduce_block_into_lanes(vals, val);
if(threadIdx.x == 0)
*ret = sqrt(final);
}
if(per_tensor)
{
float* output_this_tensor = output_per_tensor + blockIdx.x*max_chunks_per_tensor;
float val = 0;
for(int i = threadIdx.x; i < max_chunks_per_tensor; i += blockDim.x)
val += output_this_tensor[i];
float final = reduce_block_into_lanes(vals, val);
if(threadIdx.x == 0)
ret_per_tensor[blockIdx.x] = sqrt(final);
}
}
std::tuple<at::Tensor, at::Tensor> multi_tensor_l2norm_cuda(
int chunk_size,
at::Tensor noop_flag,
std::vector<std::vector<at::Tensor>> tensor_lists,
at::optional<bool> per_tensor_python)
{
bool per_tensor = per_tensor_python.has_value() ? per_tensor_python.value() : false;
auto float_options = tensor_lists[0][0].options().dtype(at::kFloat);
auto output = at::zeros({320}, float_options);
at::Tensor output_per_tensor;
at::Tensor ret_per_tensor;
int ntensors = tensor_lists[0].size();
int max_chunks_per_tensor = -1;
if(per_tensor)
{
for(int t = 0; t < ntensors; t++)
{
int max_chunks_this_tensor = (tensor_lists[0][t].numel() + chunk_size - 1)/chunk_size;
if(max_chunks_this_tensor > max_chunks_per_tensor)
max_chunks_per_tensor = max_chunks_this_tensor;
}
output_per_tensor = at::zeros({ntensors*max_chunks_per_tensor}, float_options);
ret_per_tensor = at::empty({ntensors}, float_options);
}
else
{
ret_per_tensor = at::empty({0}, float_options);
}
DISPATCH_FLOAT_AND_HALF(tensor_lists[0][0].scalar_type(), 0, "multi_tensor_l2norm_cuda",
multi_tensor_apply<1>(
BLOCK_SIZE,
chunk_size,
noop_flag,
tensor_lists,
L2NormFunctor<scalar_t_0>(),
output.data_ptr<float>(),
per_tensor ? output_per_tensor.data_ptr<float>() : nullptr,
per_tensor,
max_chunks_per_tensor);)
AT_CUDA_CHECK(cudaGetLastError());
// AT_CUDA_CHECK(cudaDeviceSynchronize());
// This involves one more small kernel launches, but will be negligible end to end.
// I could get rid of these by hacking the functor + multi tensor harness with persistence
// logic, but keeping it simple for now
auto ret = at::empty({1}, output.options());
const at::cuda::OptionalCUDAGuard device_guard(device_of(output));
auto stream = at::cuda::getCurrentCUDAStream();
cleanup<<<per_tensor ? ntensors : 1, 512, 0, stream>>>(
output.data_ptr<float>(),
per_tensor ? output_per_tensor.data_ptr<float>() : nullptr,
ret.data_ptr<float>(),
per_tensor ? ret_per_tensor.data_ptr<float>() : nullptr,
per_tensor,
max_chunks_per_tensor, noop_flag.data_ptr<int>());
return std::tuple<at::Tensor, at::Tensor>(ret, ret_per_tensor);
}
|
PyTorch/Classification/ConvNets/image_classification/models | models | efficientnet | import argparse
import random
import math
import warnings
from typing import List, Any, Optional
from collections import namedtuple, OrderedDict
from dataclasses import dataclass, replace
import torch
from torch import nn
from functools import partial
try:
from pytorch_quantization import nn as quant_nn
from ..quantization import switch_on_quantization
except ImportError as e:
warnings.warn(
"pytorch_quantization module not found, quantization will not be available"
)
quant_nn = None
import contextlib
@contextlib.contextmanager
def switch_on_quantization(do_quantization=False):
assert not do_quantization, "quantization is not available"
try:
yield
finally:
pass
from .common import (
SequentialSqueezeAndExcitation,
SequentialSqueezeAndExcitationTRT,
LayerBuilder,
StochasticDepthResidual,
Flatten,
)
from .model import (
Model,
ModelParams,
ModelArch,
OptimizerParams,
create_entrypoint,
EntryPoint,
)
# EffNetArch {{{
@dataclass
class EffNetArch(ModelArch):
block: Any
stem_channels: int
feature_channels: int
kernel: List[int]
stride: List[int]
num_repeat: List[int]
expansion: List[int]
channels: List[int]
default_image_size: int
squeeze_excitation_ratio: float = 0.25
def enumerate(self):
return enumerate(
zip(
self.kernel, self.stride, self.num_repeat, self.expansion, self.channels
)
)
def num_layers(self):
_f = lambda l: len(set(map(len, l)))
l = [self.kernel, self.stride, self.num_repeat, self.expansion, self.channels]
assert _f(l) == 1
return len(self.kernel)
@staticmethod
def _scale_width(width_coeff, divisor=8):
def _sw(num_channels):
num_channels *= width_coeff
# Rounding should not go down by more than 10%
rounded_num_channels = max(
divisor, int(num_channels + divisor / 2) // divisor * divisor
)
if rounded_num_channels < 0.9 * num_channels:
rounded_num_channels += divisor
return rounded_num_channels
return _sw
@staticmethod
def _scale_depth(depth_coeff):
def _sd(num_repeat):
return int(math.ceil(num_repeat * depth_coeff))
return _sd
def scale(self, wc, dc, dis, divisor=8) -> "EffNetArch":
sw = EffNetArch._scale_width(wc, divisor=divisor)
sd = EffNetArch._scale_depth(dc)
return EffNetArch(
block=self.block,
stem_channels=sw(self.stem_channels),
feature_channels=sw(self.feature_channels),
kernel=self.kernel,
stride=self.stride,
num_repeat=list(map(sd, self.num_repeat)),
expansion=self.expansion,
channels=list(map(sw, self.channels)),
default_image_size=dis,
squeeze_excitation_ratio=self.squeeze_excitation_ratio,
)
# }}}
# EffNetParams {{{
@dataclass
class EffNetParams(ModelParams):
dropout: float
num_classes: int = 1000
activation: str = "silu"
conv_init: str = "fan_in"
bn_momentum: float = 1 - 0.99
bn_epsilon: float = 1e-3
survival_prob: float = 1
quantized: bool = False
trt: bool = False
def parser(self, name):
p = super().parser(name)
p.add_argument(
"--num_classes",
metavar="N",
default=self.num_classes,
type=int,
help="number of classes",
)
p.add_argument(
"--conv_init",
default=self.conv_init,
choices=["fan_in", "fan_out"],
type=str,
help="initialization mode for convolutional layers, see https://pytorch.org/docs/stable/nn.init.html#torch.nn.init.kaiming_normal_",
)
p.add_argument(
"--bn_momentum",
default=self.bn_momentum,
type=float,
help="Batch Norm momentum",
)
p.add_argument(
"--bn_epsilon",
default=self.bn_epsilon,
type=float,
help="Batch Norm epsilon",
)
p.add_argument(
"--survival_prob",
default=self.survival_prob,
type=float,
help="Survival probability for stochastic depth",
)
p.add_argument(
"--dropout", default=self.dropout, type=float, help="Dropout drop prob"
)
p.add_argument("--trt", metavar="True|False", default=self.trt, type=bool)
return p
# }}}
class EfficientNet(nn.Module):
def __init__(
self,
arch: EffNetArch,
dropout: float,
num_classes: int = 1000,
activation: str = "silu",
conv_init: str = "fan_in",
bn_momentum: float = 1 - 0.99,
bn_epsilon: float = 1e-3,
survival_prob: float = 1,
quantized: bool = False,
trt: bool = False,
):
self.quantized = quantized
with switch_on_quantization(self.quantized):
super(EfficientNet, self).__init__()
self.arch = arch
self.num_layers = arch.num_layers()
self.num_blocks = sum(arch.num_repeat)
self.survival_prob = survival_prob
self.builder = LayerBuilder(
LayerBuilder.Config(
activation=activation,
conv_init=conv_init,
bn_momentum=bn_momentum,
bn_epsilon=bn_epsilon,
)
)
self.stem = self._make_stem(arch.stem_channels)
out_channels = arch.stem_channels
plc = 0
layers = []
for i, (k, s, r, e, c) in arch.enumerate():
layer, out_channels = self._make_layer(
block=arch.block,
kernel_size=k,
stride=s,
num_repeat=r,
expansion=e,
in_channels=out_channels,
out_channels=c,
squeeze_excitation_ratio=arch.squeeze_excitation_ratio,
prev_layer_count=plc,
trt=trt,
)
plc = plc + r
layers.append(layer)
self.layers = nn.Sequential(*layers)
self.features = self._make_features(out_channels, arch.feature_channels)
self.classifier = self._make_classifier(
arch.feature_channels, num_classes, dropout
)
def forward(self, x):
x = self.stem(x)
x = self.layers(x)
x = self.features(x)
x = self.classifier(x)
return x
def extract_features(self, x, layers=None):
if layers is None:
layers = [f"layer{i+1}" for i in range(self.num_layers)] + [
"features",
"classifier",
]
run = [
i
for i in range(self.num_layers)
if "classifier" in layers
or "features" in layers
or any([f"layer{j+1}" in layers for j in range(i, self.num_layers)])
]
output = {}
x = self.stem(x)
for l in run:
fn = self.layers[l]
x = fn(x)
if f"layer{l+1}" in layers:
output[f"layer{l+1}"] = x
if "features" in layers or "classifier" in layers:
x = self.features(x)
if "features" in layers:
output["features"] = x
if "classifier" in layers:
output["classifier"] = self.classifier(x)
return output
# helper functions {{{
def _make_stem(self, stem_width):
return nn.Sequential(
OrderedDict(
[
("conv", self.builder.conv3x3(3, stem_width, stride=2)),
("bn", self.builder.batchnorm(stem_width)),
("activation", self.builder.activation()),
]
)
)
def _get_survival_prob(self, block_id):
drop_rate = 1.0 - self.survival_prob
sp = 1.0 - drop_rate * float(block_id) / self.num_blocks
return sp
def _make_features(self, in_channels, num_features):
return nn.Sequential(
OrderedDict(
[
("conv", self.builder.conv1x1(in_channels, num_features)),
("bn", self.builder.batchnorm(num_features)),
("activation", self.builder.activation()),
]
)
)
def _make_classifier(self, num_features, num_classes, dropout):
return nn.Sequential(
OrderedDict(
[
("pooling", nn.AdaptiveAvgPool2d(1)),
("squeeze", Flatten()),
("dropout", nn.Dropout(dropout)),
("fc", nn.Linear(num_features, num_classes)),
]
)
)
def _make_layer(
self,
block,
kernel_size,
stride,
num_repeat,
expansion,
in_channels,
out_channels,
squeeze_excitation_ratio,
prev_layer_count,
trt,
):
layers = []
idx = 0
survival_prob = self._get_survival_prob(idx + prev_layer_count)
blk = block(
self.builder,
kernel_size,
in_channels,
out_channels,
expansion,
stride,
self.arch.squeeze_excitation_ratio,
survival_prob if stride == 1 and in_channels == out_channels else 1.0,
self.quantized,
trt=trt,
)
layers.append((f"block{idx}", blk))
for idx in range(1, num_repeat):
survival_prob = self._get_survival_prob(idx + prev_layer_count)
blk = block(
self.builder,
kernel_size,
out_channels,
out_channels,
expansion,
1, # stride
squeeze_excitation_ratio,
survival_prob,
self.quantized,
trt=trt,
)
layers.append((f"block{idx}", blk))
return nn.Sequential(OrderedDict(layers)), out_channels
def ngc_checkpoint_remap(self, url=None, version=None):
if version is None:
version = url.split("/")[8]
def to_sequential_remap(s):
splited = s.split(".")
if splited[0].startswith("layer"):
return ".".join(
["layers." + str(int(splited[0][len("layer") :]) - 1)] + splited[1:]
)
else:
return s
def no_remap(s):
return s
return {"20.12.0": to_sequential_remap, "21.03.0": to_sequential_remap}.get(
version, no_remap
)
# }}}
# MBConvBlock {{{
class MBConvBlock(nn.Module):
__constants__ = ["quantized"]
def __init__(
self,
builder: LayerBuilder,
depsep_kernel_size: int,
in_channels: int,
out_channels: int,
expand_ratio: int,
stride: int,
squeeze_excitation_ratio: float,
squeeze_hidden=False,
survival_prob: float = 1.0,
quantized: bool = False,
trt: bool = False,
):
super().__init__()
self.quantized = quantized
self.residual = stride == 1 and in_channels == out_channels
hidden_dim = in_channels * expand_ratio
squeeze_base = hidden_dim if squeeze_hidden else in_channels
squeeze_dim = max(1, int(squeeze_base * squeeze_excitation_ratio))
self.expand = (
None
if in_channels == hidden_dim
else builder.conv1x1(in_channels, hidden_dim, bn=True, act=True)
)
self.depsep = builder.convDepSep(
depsep_kernel_size, hidden_dim, hidden_dim, stride, bn=True, act=True
)
if trt or self.quantized:
# Need TRT mode for quantized in order to automatically insert quantization before pooling
self.se: nn.Module = SequentialSqueezeAndExcitationTRT(
hidden_dim, squeeze_dim, builder.activation(), self.quantized
)
else:
self.se: nn.Module = SequentialSqueezeAndExcitation(
hidden_dim, squeeze_dim, builder.activation(), self.quantized
)
self.proj = builder.conv1x1(hidden_dim, out_channels, bn=True)
if survival_prob == 1.0:
self.residual_add = torch.add
else:
self.residual_add = StochasticDepthResidual(survival_prob=survival_prob)
if self.quantized and self.residual:
assert quant_nn is not None, "pytorch_quantization is not available"
self.residual_quantizer = quant_nn.TensorQuantizer(
quant_nn.QuantConv2d.default_quant_desc_input
) # TODO QuantConv2d ?!?
else:
self.residual_quantizer = nn.Identity()
def forward(self, x: torch.Tensor) -> torch.Tensor:
if not self.residual:
return self.proj(
self.se(self.depsep(x if self.expand is None else self.expand(x)))
)
b = self.proj(
self.se(self.depsep(x if self.expand is None else self.expand(x)))
)
if self.quantized:
x = self.residual_quantizer(x)
return self.residual_add(x, b)
def original_mbconv(
builder: LayerBuilder,
depsep_kernel_size: int,
in_channels: int,
out_channels: int,
expand_ratio: int,
stride: int,
squeeze_excitation_ratio: int,
survival_prob: float,
quantized: bool,
trt: bool,
):
return MBConvBlock(
builder,
depsep_kernel_size,
in_channels,
out_channels,
expand_ratio,
stride,
squeeze_excitation_ratio,
squeeze_hidden=False,
survival_prob=survival_prob,
quantized=quantized,
trt=trt,
)
def widese_mbconv(
builder: LayerBuilder,
depsep_kernel_size: int,
in_channels: int,
out_channels: int,
expand_ratio: int,
stride: int,
squeeze_excitation_ratio: int,
survival_prob: float,
quantized: bool,
trt: bool,
):
return MBConvBlock(
builder,
depsep_kernel_size,
in_channels,
out_channels,
expand_ratio,
stride,
squeeze_excitation_ratio,
squeeze_hidden=True,
survival_prob=survival_prob,
quantized=quantized,
trt=trt,
)
# }}}
# EffNet configs {{{
# fmt: off
effnet_b0_layers = EffNetArch(
block = original_mbconv,
stem_channels = 32,
feature_channels=1280,
kernel = [ 3, 3, 5, 3, 5, 5, 3],
stride = [ 1, 2, 2, 2, 1, 2, 1],
num_repeat = [ 1, 2, 2, 3, 3, 4, 1],
expansion = [ 1, 6, 6, 6, 6, 6, 6],
channels = [16, 24, 40, 80, 112, 192, 320],
default_image_size=224,
)
effnet_b1_layers=effnet_b0_layers.scale(wc=1, dc=1.1, dis=240)
effnet_b2_layers=effnet_b0_layers.scale(wc=1.1, dc=1.2, dis=260)
effnet_b3_layers=effnet_b0_layers.scale(wc=1.2, dc=1.4, dis=300)
effnet_b4_layers=effnet_b0_layers.scale(wc=1.4, dc=1.8, dis=380)
effnet_b5_layers=effnet_b0_layers.scale(wc=1.6, dc=2.2, dis=456)
effnet_b6_layers=effnet_b0_layers.scale(wc=1.8, dc=2.6, dis=528)
effnet_b7_layers=effnet_b0_layers.scale(wc=2.0, dc=3.1, dis=600)
urls = {
"efficientnet-b0": "https://api.ngc.nvidia.com/v2/models/nvidia/efficientnet_b0_pyt_amp/versions/20.12.0/files/nvidia_efficientnet-b0_210412.pth",
"efficientnet-b4": "https://api.ngc.nvidia.com/v2/models/nvidia/efficientnet_b4_pyt_amp/versions/20.12.0/files/nvidia_efficientnet-b4_210412.pth",
"efficientnet-widese-b0": "https://api.ngc.nvidia.com/v2/models/nvidia/efficientnet_widese_b0_pyt_amp/versions/20.12.0/files/nvidia_efficientnet-widese-b0_210412.pth",
"efficientnet-widese-b4": "https://api.ngc.nvidia.com/v2/models/nvidia/efficientnet_widese_b4_pyt_amp/versions/20.12.0/files/nvidia_efficientnet-widese-b4_210412.pth",
"efficientnet-quant-b0": "https://api.ngc.nvidia.com/v2/models/nvidia/efficientnet_b0_pyt_qat_ckpt_fp32/versions/21.03.0/files/nvidia-efficientnet-quant-b0-130421.pth",
"efficientnet-quant-b4": "https://api.ngc.nvidia.com/v2/models/nvidia/efficientnet_b4_pyt_qat_ckpt_fp32/versions/21.03.0/files/nvidia-efficientnet-quant-b4-130421.pth",
}
def _m(*args, **kwargs):
return Model(constructor=EfficientNet, *args, **kwargs)
architectures = {
"efficientnet-b0": _m(arch=effnet_b0_layers, params=EffNetParams(dropout=0.2), checkpoint_url=urls["efficientnet-b0"]),
"efficientnet-b1": _m(arch=effnet_b1_layers, params=EffNetParams(dropout=0.2)),
"efficientnet-b2": _m(arch=effnet_b2_layers, params=EffNetParams(dropout=0.3)),
"efficientnet-b3": _m(arch=effnet_b3_layers, params=EffNetParams(dropout=0.3)),
"efficientnet-b4": _m(arch=effnet_b4_layers, params=EffNetParams(dropout=0.4, survival_prob=0.8), checkpoint_url=urls["efficientnet-b4"]),
"efficientnet-b5": _m(arch=effnet_b5_layers, params=EffNetParams(dropout=0.4)),
"efficientnet-b6": _m(arch=effnet_b6_layers, params=EffNetParams(dropout=0.5)),
"efficientnet-b7": _m(arch=effnet_b7_layers, params=EffNetParams(dropout=0.5)),
"efficientnet-widese-b0": _m(arch=replace(effnet_b0_layers, block=widese_mbconv), params=EffNetParams(dropout=0.2), checkpoint_url=urls["efficientnet-widese-b0"]),
"efficientnet-widese-b1": _m(arch=replace(effnet_b1_layers, block=widese_mbconv), params=EffNetParams(dropout=0.2)),
"efficientnet-widese-b2": _m(arch=replace(effnet_b2_layers, block=widese_mbconv), params=EffNetParams(dropout=0.3)),
"efficientnet-widese-b3": _m(arch=replace(effnet_b3_layers, block=widese_mbconv), params=EffNetParams(dropout=0.3)),
"efficientnet-widese-b4": _m(arch=replace(effnet_b4_layers, block=widese_mbconv), params=EffNetParams(dropout=0.4, survival_prob=0.8), checkpoint_url=urls["efficientnet-widese-b4"]),
"efficientnet-widese-b5": _m(arch=replace(effnet_b5_layers, block=widese_mbconv), params=EffNetParams(dropout=0.4)),
"efficientnet-widese-b6": _m(arch=replace(effnet_b6_layers, block=widese_mbconv), params=EffNetParams(dropout=0.5)),
"efficientnet-widese-b7": _m(arch=replace(effnet_b7_layers, block=widese_mbconv), params=EffNetParams(dropout=0.5)),
"efficientnet-quant-b0": _m(arch=effnet_b0_layers, params=EffNetParams(dropout=0.2, quantized=True), checkpoint_url=urls["efficientnet-quant-b0"]),
"efficientnet-quant-b1": _m(arch=effnet_b1_layers, params=EffNetParams(dropout=0.2, quantized=True)),
"efficientnet-quant-b2": _m(arch=effnet_b2_layers, params=EffNetParams(dropout=0.3, quantized=True)),
"efficientnet-quant-b3": _m(arch=effnet_b3_layers, params=EffNetParams(dropout=0.3, quantized=True)),
"efficientnet-quant-b4": _m(arch=effnet_b4_layers, params=EffNetParams(dropout=0.4, survival_prob=0.8, quantized=True), checkpoint_url=urls["efficientnet-quant-b4"]),
"efficientnet-quant-b5": _m(arch=effnet_b5_layers, params=EffNetParams(dropout=0.4, quantized=True)),
"efficientnet-quant-b6": _m(arch=effnet_b6_layers, params=EffNetParams(dropout=0.5, quantized=True)),
"efficientnet-quant-b7": _m(arch=effnet_b7_layers, params=EffNetParams(dropout=0.5, quantized=True)),
}
# fmt: on
# }}}
_ce = lambda n: EntryPoint.create(n, architectures[n])
efficientnet_b0 = _ce("efficientnet-b0")
efficientnet_b4 = _ce("efficientnet-b4")
efficientnet_widese_b0 = _ce("efficientnet-widese-b0")
efficientnet_widese_b4 = _ce("efficientnet-widese-b4")
efficientnet_quant_b0 = _ce("efficientnet-quant-b0")
efficientnet_quant_b4 = _ce("efficientnet-quant-b4")
|
PyTorch/SpeechSynthesis/FastPitch/triton | triton | README | # Deploying the FastPitch model on Triton Inference Server
This folder contains instructions for deployment to run inference
on Triton Inference Server as well as a detailed performance analysis.
The purpose of this document is to help you with achieving
the best inference performance.
## Table of contents
- [Solution overview](#solution-overview)
- [Introduction](#introduction)
- [Deployment process](#deployment-process)
- [Setup](#setup)
- [Quick Start Guide](#quick-start-guide)
- [Advanced](#advanced)
- [Prepare configuration](#prepare-configuration)
- [Latency explanation](#latency-explanation)
- [Performance](#performance)
- [Offline scenario](#offline-scenario)
- [Offline: NVIDIA A40 with FP16](#offline-nvidia-a40-with-fp16)
- [Offline: NVIDIA A40 with FP32](#offline-nvidia-a40-with-fp32)
- [Offline: NVIDIA DGX A100 (1x A100 80GB) with FP16](#offline-nvidia-dgx-a100-1x-a100-80gb-with-fp16)
- [Offline: NVIDIA DGX A100 (1x A100 80GB) with FP32](#offline-nvidia-dgx-a100-1x-a100-80gb-with-fp32)
- [Offline: NVIDIA DGX-1 (1x V100 32GB) with FP16](#offline-nvidia-dgx-1-1x-v100-32gb-with-fp16)
- [Offline: NVIDIA DGX-1 (1x V100 32GB) with FP32](#offline-nvidia-dgx-1-1x-v100-32gb-with-fp32)
- [Offline: NVIDIA T4 with FP16](#offline-nvidia-t4-with-fp16)
- [Offline: NVIDIA T4 with FP32](#offline-nvidia-t4-with-fp32)
- [Online scenario](#online-scenario)
- [Online: NVIDIA A40 with FP16](#online-nvidia-a40-with-fp16)
- [Online: NVIDIA A40 with FP32](#online-nvidia-a40-with-fp32)
- [Online: NVIDIA DGX A100 (1x A100 80GB) with FP16](#online-nvidia-dgx-a100-1x-a100-80gb-with-fp16)
- [Online: NVIDIA DGX A100 (1x A100 80GB) with FP32](#online-nvidia-dgx-a100-1x-a100-80gb-with-fp32)
- [Online: NVIDIA DGX-1 (1x V100 32GB) with FP16](#online-nvidia-dgx-1-1x-v100-32gb-with-fp16)
- [Online: NVIDIA DGX-1 (1x V100 32GB) with FP32](#online-nvidia-dgx-1-1x-v100-32gb-with-fp32)
- [Online: NVIDIA T4 with FP16](#online-nvidia-t4-with-fp16)
- [Online: NVIDIA T4 with FP32](#online-nvidia-t4-with-fp32)
- [Release Notes](#release-notes)
- [Changelog](#changelog)
- [Known issues](#known-issues)
## Solution overview
### Introduction
The [NVIDIA Triton Inference Server](https://github.com/NVIDIA/triton-inference-server)
provides a datacenter and cloud inferencing solution optimized for NVIDIA GPUs.
The server provides an inference service via an HTTP or gRPC endpoint,
allowing remote clients to request inferencing for any number of GPU
or CPU models being managed by the server.
This README provides step-by-step deployment instructions for models generated
during training (as described in the [model README](../README.md)).
Additionally, this README provides the corresponding deployment scripts that
ensure optimal GPU utilization during inferencing on Triton Inference Server.
### Deployment process
The deployment process consists of two steps:
1. Conversion. The purpose of conversion is to find the best performing model
format supported by Triton Inference Server.
Triton Inference Server uses a number of runtime backends such as
[TensorRT](https://developer.nvidia.com/tensorrt),
[LibTorch](https://github.com/triton-inference-server/pytorch_backend) and
[ONNX Runtime](https://github.com/triton-inference-server/onnxruntime_backend)
to support various model types. Refer to
[Triton documentation](https://github.com/triton-inference-server/backend#where-can-i-find-all-the-backends-that-are-available-for-triton)
for the list of available backends.
2. Configuration. Model configuration on Triton Inference Server, which generates
necessary [configuration files](https://github.com/triton-inference-server/server/blob/master/docs/model_configuration.md).
To run benchmarks measuring the model performance in inference,
perform the following steps:
1. Start the Triton Inference Server.
The Triton Inference Server container is started
in one (possibly remote) container and ports for gRPC or REST API are exposed.
2. Run accuracy tests.
Produce results which are tested against given accuracy thresholds.
Refer to step 8 in the [Quick Start Guide](#quick-start-guide).
3. Run performance tests.
Produce latency and throughput results for offline (static batching)
and online (dynamic batching) scenarios.
Refer to step 10 in the [Quick Start Guide](#quick-start-guide).
## Setup
Ensure you have the following components:
* [NVIDIA Docker](https://github.com/NVIDIA/nvidia-docker)
* [PyTorch NGC container 21.02](https://ngc.nvidia.com/catalog/containers/nvidia:pytorch)
* [Triton Inference Server NGC container 21.02](https://ngc.nvidia.com/catalog/containers/nvidia:tritonserver)
* [NVIDIA CUDA repository](https://docs.nvidia.com/cuda/archive/11.2.0/index.html) (use CUDA 11.2 or newer)
* [NVIDIA Ampere](https://www.nvidia.com/en-us/data-center/nvidia-ampere-gpu-architecture/), [Volta](https://www.nvidia.com/en-us/data-center/volta-gpu-architecture/) or [Turing](https://www.nvidia.com/en-us/geforce/turing/) based GPU
## Quick Start Guide
Running the following scripts will build and launch the container with all required dependencies for native PyTorch as well as Triton Inference Server. This is necessary for running inference and can also be used for data download, processing, and training of the model.
1. Clone the repository.
IMPORTANT: This step is executed on the host computer.
```
git clone https://github.com/NVIDIA/DeepLearningExamples.git
cd DeepLearningExamples/PyTorch/SpeechSynthesis/FastPitch
```
1. Setup environment in host PC and start Triton Inference Server.
```
source triton/scripts/setup_environment.sh
bash triton/scripts/docker/triton_inference_server.sh
```
1. Build and run a container that extends the NGC PyTorch container with the Triton Inference Server client libraries and dependencies.
```
bash triton/scripts/docker/build.sh
bash triton/scripts/docker/interactive.sh
```
1. Prepare the deployment configuration and create folders in Docker.
IMPORTANT: These and the following commands must be executed in the PyTorch NGC container.
```
source triton/scripts/setup_environment.sh
```
1. Download and pre-process the dataset.
```
bash triton/scripts/download_data.sh
bash triton/scripts/process_dataset.sh
```
1. Setup parameters for deployment.
```
source triton/scripts/setup_parameters.sh
```
1. Convert the model from training to inference format (e.g. TensorRT).
```
python3 triton/convert_model.py \
--input-path ./triton/model.py \
--input-type pyt \
--output-path ${SHARED_DIR}/model \
--output-type ${FORMAT} \
--checkpoint ${CHECKPOINT_DIR}/nvidia_fastpitch_200518.pt \
--onnx-opset 12 \
--model-path triton/model.py \
--output-format ${FORMAT} \
--dataloader triton/dataloader.py \
--dataset-path ${DATASETS_DIR}/LJSpeech-1.1/LJSpeech-1.1_fastpitch \
--batch-size 1 \
--max-batch-size ${MAX_BATCH_SIZE} \
--max-workspace-size 512 \
--precision ${PRECISION} \
--ignore-unknown-parameters
```
1. Configure the model on Triton Inference Server.
Generate the configuration from your model repository.
```
model-navigator triton-config-model \
--model-repository ${MODEL_REPOSITORY_PATH} \
--model-name ${MODEL_NAME} \
--model-version 1 \
--model-path ${SHARED_DIR}/model \
--model-format ${CONFIG_FORMAT} \
--model-control-mode ${TRITON_LOAD_MODEL_METHOD} \
--load-model \
--load-model-timeout-s 100 \
--verbose \
\
--backend-accelerator ${BACKEND_ACCELERATOR} \
--tensorrt-precision ${PRECISION} \
--max-batch-size ${MAX_BATCH_SIZE} \
--preferred-batch-sizes ${TRITON_PREFERRED_BATCH_SIZES} \
--max-queue-delay-us ${TRITON_MAX_QUEUE_DELAY} \
--engine-count-per-device gpu=${NUMBER_OF_MODEL_INSTANCES}
```
1. Run the Triton Inference Server accuracy tests.
```
python3 triton/run_inference_on_triton.py \
--server-url localhost:8001 \
--model-name ${MODEL_NAME} \
--model-version 1 \
--dataloader triton/dataloader.py \
--dataset-path ${DATASETS_DIR}/LJSpeech-1.1/LJSpeech-1.1_fastpitch \
--batch-size ${MAX_BATCH_SIZE} \
--output-dir ${SHARED_DIR}/accuracy_dump
ls ${SHARED_DIR}/accuracy_dump
python3 triton/calculate_metrics.py \
--dump-dir ${SHARED_DIR}/accuracy_dump \
--metrics triton/metrics.py \
--csv ${SHARED_DIR}/accuracy_metrics.csv \
--output-used-for-metrics OUTPUT__0
cat ${SHARED_DIR}/accuracy_metrics.csv
```
1. Prepare performance input.
```
mkdir -p ${SHARED_DIR}/input_data
python triton/prepare_input_data.py \
--dataloader triton/dataloader.py \
--input-data-dir ${SHARED_DIR}/input_data \
--dataset-path ${DATASETS_DIR}/LJSpeech-1.1/LJSpeech-1.1_fastpitch \
--precision ${PRECISION} \
--length ${SEQUENCE_LENGTH}
```
1. Run the Triton Inference Server performance online tests.
We want to maximize throughput within latency budget constraints.
Dynamic batching is a feature of Triton Inference Server that allows
inference requests to be combined by the server, so that a batch is
created dynamically, resulting in a reduced average latency.
You can set the Dynamic Batcher parameter `max_queue_delay_microseconds` to
indicate the maximum amount of time you are willing to wait and
`preferred_batch_size` to indicate your maximum server batch size
in the Triton Inference Server model configuration. The measurements
presented below set the maximum latency to zero to achieve the best latency
possible with good performance.
```
python triton/run_online_performance_test_on_triton.py \
--model-name ${MODEL_NAME} \
--input-data ${SHARED_DIR}/input_data \
--input-shape INPUT__0:${SEQUENCE_LENGTH} \
--batch-sizes ${BATCH_SIZE} \
--triton-instances ${TRITON_INSTANCES} \
--number-of-model-instances ${NUMBER_OF_MODEL_INSTANCES} \
--result-path ${SHARED_DIR}/triton_performance_online.csv
```
1. Run the Triton Inference Server performance offline tests.
We want to maximize throughput. It assumes you have your data available
for inference or that your data saturate to maximum batch size quickly.
Triton Inference Server supports offline scenarios with static batching.
Static batching allows inference requests to be served
as they are received. The largest improvements to throughput come
from increasing the batch size due to efficiency gains in the GPU with larger
batches.
```
python triton/run_offline_performance_test_on_triton.py \
--model-name ${MODEL_NAME} \
--input-data ${SHARED_DIR}/input_data \
--input-shape INPUT__0:${SEQUENCE_LENGTH} \
--batch-sizes ${BATCH_SIZE} \
--triton-instances ${TRITON_INSTANCES} \
--result-path ${SHARED_DIR}/triton_performance_offline.csv
```
## Advanced
### Prepare configuration
You can use the environment variables to set the parameters of your inference
configuration.
Triton deployment scripts support several inference runtimes listed in the table below:
| Inference runtime | Mnemonic used in scripts |
|-------------------|--------------------------|
| [TorchScript Tracing](https://pytorch.org/docs/stable/jit.html) | `ts-trace` |
| [TorchScript Tracing](https://pytorch.org/docs/stable/jit.html) | `ts-script` |
| [ONNX](https://onnx.ai) | `onnx` |
| [NVIDIA TensorRT](https://developer.nvidia.com/tensorrt) | `trt` |
Example values of some key variables in one configuration:
```
PRECISION="fp16"
FORMAT="ts-trace"
BATCH_SIZE="1, 2, 4, 8"
BACKEND_ACCELERATOR="cuda"
MAX_BATCH_SIZE="8"
NUMBER_OF_MODEL_INSTANCES="2"
TRITON_MAX_QUEUE_DELAY="1"
TRITON_PREFERRED_BATCH_SIZES="4 8"
SEQUENCE_LENGTH="128"
```
### Latency explanation
A typical Triton Inference Server pipeline can be broken down into the following steps:
1. The client serializes the inference request into a message and sends it to
the server (Client Send).
2. The message travels over the network from the client to the server (Network).
3. The message arrives at the server and is deserialized (Server Receive).
4. The request is placed on the queue (Server Queue).
5. The request is removed from the queue and computed (Server Compute).
6. The completed request is serialized in a message and sent back to
the client (Server Send).
7. The completed message then travels over the network from the server
to the client (Network).
8. The completed message is deserialized by the client and processed as
a completed inference request (Client Receive).
Generally, for local clients, steps 1-4 and 6-8 will only occupy
a small fraction of time, compared to steps 5-6. As backend deep learning
systems like Jasper are rarely exposed directly to end users, but instead
only interfacing with local front-end servers, for the sake of Jasper,
we can consider that all clients are local.
## Performance
### Offline scenario
This table lists the common variable parameters for all performance measurements:
| Parameter Name | Parameter Value |
|:-----------------------------|:--------------------|
| Model Format | TorchScript, Trace |
| Backend Accelerator | CUDA |
| Max Batch Size | 8 |
| Number of model instances | 2 |
| Triton Max Queue Delay | 1 |
| Triton Preferred Batch Sizes | 4 8 |
#### Offline: NVIDIA A40 with FP16
Our results were obtained using the following configuration:
* **GPU:** NVIDIA A40
* **Backend:** PyTorch
* **Precision:** FP16
* **Model format:** TorchScript
* **Conversion variant:** Trace
|||
|-----|-----|
<details>
<summary>
Full tabular data
</summary>
| Precision | Sequence Length | Client Batch Size | Inferences/second | P90 Latency | P95 Latency | P99 Latency | Avg Latency |
|:------------|------------------:|--------------------:|--------------------:|--------------:|--------------:|--------------:|--------------:|
| FP16 | 128 | 1 | 81.8 | 12.828 | 13.384 | 13.493 | 12.22 |
| FP16 | 128 | 2 | 164 | 12.906 | 13.222 | 13.635 | 12.199 |
| FP16 | 128 | 4 | 315.6 | 13.565 | 13.635 | 13.875 | 12.674 |
| FP16 | 128 | 8 | 592.8 | 13.534 | 15.352 | 15.801 | 13.491 |
</details>
#### Offline: NVIDIA A40 with FP32
Our results were obtained using the following configuration:
* **GPU:** NVIDIA A40
* **Backend:** PyTorch
* **Precision:** FP32
* **Model format:** TorchScript
* **Conversion variant:** Trace
|||
|-----|-----|
<details>
<summary>
Full tabular data
</summary>
| Precision | Sequence Length | Client Batch Size | Inferences/second | P90 Latency | P95 Latency | P99 Latency | Avg Latency |
|:------------|------------------:|--------------------:|--------------------:|--------------:|--------------:|--------------:|--------------:|
| FP32 | 128 | 1 | 83.3 | 12.387 | 12.59 | 12.814 | 11.994 |
| FP32 | 128 | 2 | 197 | 12.058 | 12.418 | 13.14 | 10.151 |
| FP32 | 128 | 4 | 320.8 | 12.474 | 12.527 | 14.722 | 12.476 |
| FP32 | 128 | 8 | 439.2 | 18.546 | 18.578 | 18.63 | 18.204 |
</details>
#### Offline: NVIDIA DGX A100 (1x A100 80GB) with FP16
Our results were obtained using the following configuration:
* **GPU:** NVIDIA NVIDIA DGX A100 (1x A100 80GB)
* **Backend:** PyTorch
* **Precision:** FP16
* **Model format:** TorchScript
* **Conversion variant:** Trace
|||
|-----|-----|
<details>
<summary>
Full tabular data
</summary>
| Precision | Sequence Length | Client Batch Size | Inferences/second | P90 Latency | P95 Latency | P99 Latency | Avg Latency |
|:------------|------------------:|--------------------:|--------------------:|--------------:|--------------:|--------------:|--------------:|
| FP16 | 128 | 1 | 152.3 | 6.84 | 6.889 | 7.429 | 6.561 |
| FP16 | 128 | 2 | 298.2 | 6.918 | 7.014 | 7.135 | 6.703 |
| FP16 | 128 | 4 | 537.6 | 7.649 | 7.76 | 7.913 | 7.435 |
| FP16 | 128 | 8 | 844 | 9.723 | 9.809 | 10.027 | 9.482 |
</details>
#### Offline: NVIDIA DGX A100 (1x A100 80GB) with FP32
Our results were obtained using the following configuration:
* **GPU:** NVIDIA NVIDIA DGX A100 (1x A100 80GB)
* **Backend:** PyTorch
* **Precision:** FP32
* **Model format:** TorchScript
* **Conversion variant:** Trace
|||
|-----|-----|
<details>
<summary>
Full tabular data
</summary>
| Precision | Sequence Length | Client Batch Size | Inferences/second | P90 Latency | P95 Latency | P99 Latency | Avg Latency |
|:------------|------------------:|--------------------:|--------------------:|--------------:|--------------:|--------------:|--------------:|
| FP32 | 128 | 1 | 149.8 | 6.873 | 6.935 | 7.061 | 6.668 |
| FP32 | 128 | 2 | 272.4 | 7.508 | 7.614 | 8.215 | 7.336 |
| FP32 | 128 | 4 | 465.2 | 8.828 | 8.881 | 9.253 | 8.6 |
| FP32 | 128 | 8 | 749.6 | 10.86 | 10.968 | 11.154 | 10.669 |
</details>
#### Offline: NVIDIA DGX-1 (1x V100 32GB) with FP16
Our results were obtained using the following configuration:
* **GPU:** NVIDIA DGX-1 (1x V100 32GB)
* **Backend:** PyTorch
* **Precision:** FP16
* **Model format:** TorchScript
* **Conversion variant:** Trace
|||
|-----|-----|
<details>
<summary>
Full tabular data
</summary>
| Precision | Sequence Length | Client Batch Size | Inferences/second | P90 Latency | P95 Latency | P99 Latency | Avg Latency |
|:------------|------------------:|--------------------:|--------------------:|--------------:|--------------:|--------------:|--------------:|
| FP16 | 128 | 1 | 101.3 | 10.039 | 10.14 | 10.333 | 9.866 |
| FP16 | 128 | 2 | 199.2 | 10.191 | 10.359 | 10.911 | 10.034 |
| FP16 | 128 | 4 | 349.2 | 11.541 | 11.629 | 11.807 | 11.45 |
| FP16 | 128 | 8 | 567.2 | 14.266 | 14.307 | 14.426 | 14.107 |
</details>
#### Offline: NVIDIA DGX-1 (1x V100 32GB) with FP32
Our results were obtained using the following configuration:
* **GPU:** NVIDIA DGX-1 (1x V100 32GB)
* **Backend:** PyTorch
* **Precision:** FP32
* **Model format:** TorchScript
* **Conversion variant:** Trace
|||
|-----|-----|
<details>
<summary>
Full tabular data
</summary>
| Precision | Sequence Length | Client Batch Size | Inferences/second | P90 Latency | P95 Latency | P99 Latency | Avg Latency |
|:------------|------------------:|--------------------:|--------------------:|--------------:|--------------:|--------------:|--------------:|
| FP32 | 128 | 1 | 107.7 | 9.413 | 9.58 | 10.265 | 9.278 |
| FP32 | 128 | 2 | 159 | 12.71 | 12.889 | 13.228 | 12.565 |
| FP32 | 128 | 4 | 205.6 | 19.874 | 19.995 | 20.156 | 19.456 |
| FP32 | 128 | 8 | 248.8 | 32.237 | 32.273 | 32.347 | 32.091 |
</details>
#### Offline: NVIDIA T4 with FP16
Our results were obtained using the following configuration:
* **GPU:** NVIDIA T4
* **Backend:** PyTorch
* **Precision:** FP16
* **Model format:** TorchScript
* **Conversion variant:** Trace
|||
|-----|-----|
<details>
<summary>
Full tabular data
</summary>
| Precision | Sequence Length | Client Batch Size | Inferences/second | P90 Latency | P95 Latency | P99 Latency | Avg Latency |
|:------------|------------------:|--------------------:|--------------------:|--------------:|--------------:|--------------:|--------------:|
| FP16 | 128 | 1 | 53.7 | 19.583 | 19.746 | 20.223 | 18.631 |
| FP16 | 128 | 2 | 99.6 | 20.385 | 20.585 | 20.835 | 20.078 |
| FP16 | 128 | 4 | 193.6 | 23.293 | 24.649 | 25.708 | 20.656 |
| FP16 | 128 | 8 | 260 | 31.21 | 31.409 | 33.953 | 30.739 |
</details>
#### Offline: NVIDIA T4 with FP32
Our results were obtained using the following configuration:
* **GPU:** NVIDIA T4
* **Backend:** PyTorch
* **Precision:** FP32
* **Model format:** TorchScript
* **Conversion variant:** Trace
|||
|-----|-----|
<details>
<summary>
Full tabular data
</summary>
| Precision | Sequence Length | Client Batch Size | Inferences/second | P90 Latency | P95 Latency | P99 Latency | Avg Latency |
|:------------|------------------:|--------------------:|--------------------:|--------------:|--------------:|--------------:|--------------:|
| FP32 | 128 | 1 | 53.7 | 19.402 | 19.494 | 19.635 | 18.619 |
| FP32 | 128 | 2 | 86.2 | 25.448 | 25.921 | 26.419 | 23.182 |
| FP32 | 128 | 4 | 98.8 | 41.163 | 41.562 | 41.865 | 40.549 |
| FP32 | 128 | 8 | 111.2 | 73.033 | 73.204 | 73.372 | 72.165 |
</details>
### Online scenario
This table lists the common variable parameters for all performance measurements:
| Parameter Name | Parameter Value |
|:-----------------------------|:--------------------|
| Model Format | TorchScript, Tracing|
| Backend Accelerator | CUDA |
| Max Batch Size | 8 |
| Number of model instances | 2 |
| Triton Max Queue Delay | 1 |
| Triton Preferred Batch Sizes | 4 8 |
#### Online: NVIDIA A40 with FP16
Our results were obtained using the following configuration:
* **GPU:** NVIDIA A40
* **Backend:** PyTorch
* **Precision:** FP16
* **Model format:** TorchScript
* **Conversion variant:** Trace

<details>
<summary>
Full tabular data
</summary>
| Sequence Length | Concurrent client requests | Inferences/second | Client Send | Network+server Send/recv | Server Queue | Server Compute Input | Server Compute Infer | Server Compute Output | Client Recv | P50 Latency | P90 Latency | P95 Latency | P99 Latency | Avg Latency |
|------------------:|-----------------------------:|--------------------:|--------------:|---------------------------:|---------------:|-----------------------:|-----------------------:|------------------------:|--------------:|--------------:|--------------:|--------------:|--------------:|--------------:|
| 128 | 1 | 82.1 | 0.061 | 0.38 | 0.033 | 0.036 | 11.501 | 0.122 | 0.032 | 12.166 | 12.884 | 13.175 | 13.541 | 12.165 |
| 128 | 2 | 127.6 | 0.062 | 0.361 | 0.02 | 0.066 | 14.944 | 0.195 | 0.033 | 15.246 | 17.223 | 17.546 | 18.699 | 15.681 |
| 128 | 3 | 134.6 | 0.048 | 0.271 | 7.119 | 0.05 | 14.54 | 0.192 | 0.066 | 22.009 | 28.693 | 29.875 | 31.877 | 22.286 |
| 128 | 4 | 173 | 0.063 | 0.336 | 7.278 | 0.062 | 15.053 | 0.258 | 0.072 | 23.099 | 29.053 | 30.21 | 32.361 | 23.122 |
| 128 | 5 | 212.6 | 0.063 | 0.393 | 7.327 | 0.075 | 15.168 | 0.341 | 0.122 | 23.398 | 29.099 | 30.253 | 32.099 | 23.489 |
| 128 | 6 | 246.1 | 0.054 | 0.353 | 7.716 | 0.087 | 15.496 | 0.436 | 0.247 | 24.086 | 30.768 | 31.833 | 33.181 | 24.389 |
| 128 | 7 | 290.9 | 0.06 | 0.437 | 7.405 | 0.094 | 15.207 | 0.566 | 0.293 | 23.754 | 30.664 | 31.577 | 33.009 | 24.062 |
| 128 | 8 | 320.3 | 0.059 | 0.455 | 7.344 | 0.117 | 15.343 | 1.219 | 0.442 | 24.579 | 31.313 | 32.409 | 34.271 | 24.979 |
| 128 | 9 | 344.5 | 0.058 | 0.396 | 7.703 | 0.134 | 16.035 | 1.34 | 0.467 | 25.812 | 31.951 | 33.019 | 34.873 | 26.133 |
| 128 | 10 | 378.8 | 0.058 | 0.517 | 7.795 | 0.137 | 16.05 | 1.343 | 0.465 | 26.106 | 32.899 | 34.166 | 36.33 | 26.365 |
| 128 | 11 | 413.1 | 0.056 | 0.342 | 7.871 | 0.141 | 16.154 | 1.569 | 0.488 | 26.077 | 33.343 | 34.532 | 36.262 | 26.621 |
| 128 | 12 | 427.2 | 0.055 | 0.857 | 8.059 | 0.158 | 16.668 | 1.785 | 0.523 | 28.44 | 34.58 | 36.211 | 37.894 | 28.105 |
| 128 | 13 | 465.1 | 0.054 | 0.558 | 8.185 | 0.157 | 16.614 | 1.835 | 0.55 | 27.839 | 34.834 | 36.023 | 37.601 | 27.953 |
| 128 | 14 | 537.1 | 0.056 | 0.395 | 7.547 | 0.146 | 15.489 | 1.913 | 0.525 | 25.232 | 32.118 | 33.33 | 35.574 | 26.071 |
| 128 | 15 | 536 | 0.054 | 0.382 | 8.166 | 0.174 | 16.504 | 2.122 | 0.555 | 27.507 | 34.662 | 36.181 | 38.592 | 27.957 |
| 128 | 16 | 560.8 | 0.055 | 0.472 | 8.434 | 0.176 | 16.377 | 2.446 | 0.601 | 28.267 | 35.102 | 36.282 | 38.229 | 28.561 |
</details>
#### Online: NVIDIA A40 with FP32
Our results were obtained using the following configuration:
* **GPU:** NVIDIA A40
* **Backend:** PyTorch
* **Precision:** FP32
* **Model format:** TorchScript
* **Conversion variant:** Trace

<details>
<summary>
Full tabular data
</summary>
| Sequence Length | Concurrent client requests | Inferences/second | Client Send | Network+server Send/recv | Server Queue | Server Compute Input | Server Compute Infer | Server Compute Output | Client Recv | P50 Latency | P90 Latency | P95 Latency | P99 Latency | Avg Latency |
|------------------:|-----------------------------:|--------------------:|--------------:|---------------------------:|---------------:|-----------------------:|-----------------------:|------------------------:|--------------:|--------------:|--------------:|--------------:|--------------:|--------------:|
| 128 | 1 | 110.2 | 0.052 | 0.318 | 0.019 | 0.041 | 8.412 | 0.128 | 0.098 | 9.057 | 9.113 | 9.122 | 9.288 | 9.068 |
| 128 | 2 | 154.8 | 0.045 | 0.229 | 0.015 | 0.063 | 12.179 | 0.24 | 0.136 | 12.601 | 14.375 | 14.896 | 15.36 | 12.907 |
| 128 | 3 | 158.3 | 0.046 | 0.235 | 5.947 | 0.058 | 12.271 | 0.244 | 0.139 | 18.654 | 23.975 | 24.778 | 26.432 | 18.94 |
| 128 | 4 | 201.3 | 0.059 | 0.467 | 5.962 | 0.066 | 12.642 | 0.529 | 0.145 | 19.573 | 24.86 | 25.498 | 27.134 | 19.87 |
| 128 | 5 | 229.8 | 0.061 | 0.554 | 6.339 | 0.078 | 13.62 | 0.924 | 0.176 | 21.27 | 26.668 | 27.417 | 29.052 | 21.752 |
| 128 | 6 | 253.2 | 0.057 | 0.441 | 6.63 | 0.095 | 14.46 | 1.579 | 0.449 | 24.231 | 28.977 | 29.719 | 31.173 | 23.711 |
| 128 | 7 | 283.8 | 0.057 | 0.426 | 6.752 | 0.102 | 14.749 | 2.021 | 0.53 | 24.64 | 29.875 | 30.748 | 32.599 | 24.637 |
| 128 | 8 | 300.9 | 0.056 | 0.604 | 7.057 | 0.113 | 15.442 | 2.634 | 0.669 | 26.929 | 32.007 | 32.902 | 34.674 | 26.575 |
| 128 | 9 | 330.7 | 0.054 | 0.434 | 7.248 | 0.121 | 15.833 | 2.796 | 0.707 | 27.338 | 32.766 | 33.935 | 36.28 | 27.193 |
| 128 | 10 | 327.1 | 0.055 | 0.536 | 8.154 | 0.153 | 17.753 | 3.173 | 0.783 | 30.417 | 37.22 | 38.515 | 40.813 | 30.607 |
| 128 | 11 | 342.8 | 0.054 | 0.601 | 8.563 | 0.16 | 18.398 | 3.472 | 0.832 | 32.205 | 38.823 | 40.226 | 42.314 | 32.08 |
| 128 | 12 | 364.3 | 0.054 | 0.299 | 9.32 | 0.164 | 18.918 | 3.371 | 0.799 | 32.326 | 40.15 | 41.456 | 43.995 | 32.925 |
| 128 | 13 | 397.3 | 0.052 | 0.57 | 8.506 | 0.167 | 17.784 | 4.715 | 0.944 | 33.95 | 39.302 | 40.772 | 44.117 | 32.738 |
| 128 | 14 | 413.5 | 0.051 | 0.562 | 9.554 | 0.174 | 18.423 | 4.132 | 0.973 | 34.27 | 40.553 | 42.599 | 45.688 | 33.869 |
| 128 | 15 | 397.6 | 0.048 | 0.606 | 10.659 | 0.212 | 20.533 | 4.608 | 1.111 | 38.44 | 45.484 | 47.037 | 51.264 | 37.777 |
| 128 | 16 | 411.4 | 0.053 | 0.605 | 11.127 | 0.222 | 20.87 | 4.969 | 1.048 | 40.638 | 47.265 | 48.693 | 51.886 | 38.894 |
</details>
#### Online: NVIDIA DGX A100 (1x A100 80GB) with FP16
Our results were obtained using the following configuration:
* **GPU:** NVIDIA DGX A100 (1x A100 80GB)
* **Backend:** PyTorch
* **Precision:** FP16
* **Model format:** TorchScript
* **Conversion variant:** Trace

<details>
<summary>
Full tabular data
</summary>
| Sequence Length | Concurrent client requests | Inferences/second | Client Send | Network+server Send/recv | Server Queue | Server Compute Input | Server Compute Infer | Server Compute Output | Client Recv | P50 Latency | P90 Latency | P95 Latency | P99 Latency | Avg Latency |
|------------------:|-----------------------------:|--------------------:|--------------:|---------------------------:|---------------:|-----------------------:|-----------------------:|------------------------:|--------------:|--------------:|--------------:|--------------:|--------------:|--------------:|
| 128 | 1 | 152.4 | 0.02 | 0.109 | 0.014 | 0.031 | 6.254 | 0.09 | 0.042 | 6.471 | 6.832 | 6.881 | 6.983 | 6.56 |
| 128 | 2 | 209.1 | 0.02 | 0.11 | 0.011 | 0.048 | 9.144 | 0.194 | 0.038 | 9.532 | 9.747 | 9.839 | 10.955 | 9.565 |
| 128 | 3 | 209 | 0.021 | 0.069 | 4.669 | 0.038 | 9.316 | 0.203 | 0.036 | 13.806 | 18.261 | 18.589 | 19.265 | 14.352 |
| 128 | 4 | 268.8 | 0.022 | 0.128 | 4.809 | 0.043 | 9.503 | 0.318 | 0.063 | 14.609 | 19.148 | 19.459 | 21.103 | 14.886 |
| 128 | 5 | 329.3 | 0.024 | 0.071 | 4.884 | 0.053 | 9.631 | 0.462 | 0.061 | 14.759 | 19.328 | 19.901 | 20.689 | 15.186 |
| 128 | 6 | 381.2 | 0.027 | 0.094 | 4.866 | 0.064 | 9.793 | 0.767 | 0.129 | 15.497 | 19.599 | 20.151 | 21.114 | 15.74 |
| 128 | 7 | 437.7 | 0.025 | 0.071 | 5.05 | 0.064 | 9.87 | 0.778 | 0.138 | 15.723 | 19.844 | 20.748 | 21.68 | 15.996 |
| 128 | 8 | 480.5 | 0.025 | 0.211 | 5.163 | 0.073 | 10.019 | 1.006 | 0.158 | 16.31 | 21.126 | 21.547 | 22.021 | 16.655 |
| 128 | 9 | 526.9 | 0.024 | 0.134 | 5.266 | 0.083 | 10.145 | 1.217 | 0.199 | 16.933 | 21.398 | 21.97 | 22.583 | 17.068 |
| 128 | 10 | 574.2 | 0.027 | 0.252 | 5.106 | 0.088 | 10.453 | 1.275 | 0.215 | 17.445 | 20.922 | 22.044 | 23.077 | 17.416 |
| 128 | 11 | 607.3 | 0.026 | 0.233 | 5.498 | 0.095 | 10.596 | 1.46 | 0.224 | 18.007 | 22.761 | 23.277 | 24.159 | 18.132 |
| 128 | 12 | 642.4 | 0.029 | 0.258 | 5.654 | 0.101 | 10.808 | 1.587 | 0.24 | 18.578 | 23.363 | 23.816 | 24.722 | 18.677 |
| 128 | 13 | 661.1 | 0.028 | 0.228 | 5.964 | 0.114 | 11.415 | 1.666 | 0.247 | 19.496 | 24.522 | 25.26 | 26.797 | 19.662 |
| 128 | 14 | 709 | 0.029 | 0.21 | 6.113 | 0.116 | 11.203 | 1.822 | 0.249 | 19.76 | 24.659 | 25.474 | 27.112 | 19.742 |
| 128 | 15 | 738.8 | 0.029 | 0.262 | 6.338 | 0.121 | 11.369 | 1.934 | 0.256 | 20.499 | 25.183 | 25.911 | 26.981 | 20.309 |
| 128 | 16 | 775.8 | 0.027 | 0.294 | 6.272 | 0.128 | 11.568 | 2.042 | 0.28 | 20.766 | 25.316 | 25.918 | 27.265 | 20.611 |
</details>
#### Online: NVIDIA DGX A100 (1x A100 80GB) with FP32
Our results were obtained using the following configuration:
* **GPU:** NVIDIA DGX A100 (1x A100 80GB)
* **Backend:** PyTorch
* **Precision:** FP32
* **Model format:** TorchScript
* **Conversion variant:** Trace

<details>
<summary>
Full tabular data
</summary>
| Sequence Length | Concurrent client requests | Inferences/second | Client Send | Network+server Send/recv | Server Queue | Server Compute Input | Server Compute Infer | Server Compute Output | Client Recv | P50 Latency | P90 Latency | P95 Latency | P99 Latency | Avg Latency |
|------------------:|-----------------------------:|--------------------:|--------------:|---------------------------:|---------------:|-----------------------:|-----------------------:|------------------------:|--------------:|--------------:|--------------:|--------------:|--------------:|--------------:|
| 128 | 1 | 148.4 | 0.02 | 0.098 | 0.014 | 0.032 | 6.374 | 0.125 | 0.07 | 6.68 | 6.951 | 7.019 | 7.139 | 6.733 |
| 128 | 2 | 196.1 | 0.018 | 0.082 | 0.011 | 0.052 | 9.703 | 0.26 | 0.074 | 10.196 | 10.462 | 10.602 | 12.079 | 10.2 |
| 128 | 3 | 203.3 | 0.02 | 0.059 | 4.775 | 0.041 | 9.489 | 0.297 | 0.079 | 14.285 | 19.316 | 19.563 | 20.723 | 14.76 |
| 128 | 4 | 249.6 | 0.02 | 0.16 | 5.045 | 0.047 | 10.157 | 0.476 | 0.111 | 15.581 | 20.396 | 21.039 | 21.506 | 16.016 |
| 128 | 5 | 305.7 | 0.022 | 0.109 | 5.011 | 0.06 | 10.245 | 0.729 | 0.178 | 15.9 | 20.525 | 21.236 | 21.943 | 16.354 |
| 128 | 6 | 351.1 | 0.027 | 0.172 | 5.15 | 0.063 | 10.516 | 0.933 | 0.228 | 16.755 | 20.641 | 22.263 | 23.198 | 17.089 |
| 128 | 7 | 390.1 | 0.026 | 0.187 | 5.398 | 0.069 | 10.909 | 1.089 | 0.271 | 17.749 | 22.145 | 22.984 | 23.545 | 17.949 |
| 128 | 8 | 434.2 | 0.024 | 0.24 | 5.23 | 0.08 | 11.082 | 1.414 | 0.337 | 18.15 | 21.854 | 22.955 | 24.232 | 18.407 |
| 128 | 9 | 459.2 | 0.027 | 0.236 | 5.765 | 0.083 | 11.595 | 1.533 | 0.349 | 19.471 | 23.521 | 24.357 | 25.754 | 19.588 |
| 128 | 10 | 494.5 | 0.027 | 0.282 | 6.032 | 0.097 | 11.604 | 1.768 | 0.409 | 20.057 | 25.18 | 25.611 | 26.491 | 20.219 |
| 128 | 11 | 542.4 | 0.024 | 0.237 | 5.399 | 0.103 | 11.858 | 2.153 | 0.495 | 20.149 | 23.651 | 24.332 | 26.042 | 20.269 |
| 128 | 12 | 563 | 0.027 | 0.302 | 6.266 | 0.111 | 11.918 | 2.183 | 0.486 | 21.361 | 26.142 | 26.604 | 28.143 | 21.293 |
| 128 | 13 | 597.9 | 0.028 | 0.152 | 6.492 | 0.118 | 12.156 | 2.274 | 0.512 | 21.719 | 26.516 | 27.27 | 28.705 | 21.732 |
| 128 | 14 | 619.4 | 0.026 | 0.303 | 6.576 | 0.126 | 12.524 | 2.498 | 0.557 | 22.577 | 27.346 | 27.928 | 29.136 | 22.61 |
| 128 | 15 | 657 | 0.024 | 0.19 | 6.529 | 0.132 | 12.703 | 2.66 | 0.602 | 22.774 | 27.187 | 28.158 | 29.452 | 22.84 |
| 128 | 16 | 674.9 | 0.028 | 0.266 | 7.032 | 0.14 | 12.847 | 2.792 | 0.584 | 23.905 | 29.061 | 29.839 | 31.466 | 23.689 |
</details>
#### Online: NVIDIA DGX-1 (1x V100 32GB) with FP16
Our results were obtained using the following configuration:
* **GPU:** NVIDIA DGX-1 (1x V100 32GB)
* **Backend:** PyTorch
* **Precision:** FP16
* **Model format:** TorchScript
* **Conversion variant:** Trace

<details>
<summary>
Full tabular data
</summary>
| Sequence Length | Concurrent client requests | Inferences/second | Client Send | Network+server Send/recv | Server Queue | Server Compute Input | Server Compute Infer | Server Compute Output | Client Recv | P50 Latency | P90 Latency | P95 Latency | P99 Latency | Avg Latency |
|------------------:|-----------------------------:|--------------------:|--------------:|---------------------------:|---------------:|-----------------------:|-----------------------:|------------------------:|--------------:|--------------:|--------------:|--------------:|--------------:|--------------:|
| 128 | 1 | 100.5 | 0.043 | 0.271 | 0.043 | 0.039 | 9.408 | 0.108 | 0.03 | 9.879 | 10.247 | 10.329 | 10.592 | 9.942 |
| 128 | 2 | 151.5 | 0.044 | 0.3 | 0.048 | 0.067 | 12.475 | 0.238 | 0.034 | 12.972 | 14.525 | 15.161 | 15.692 | 13.206 |
| 128 | 3 | 158.4 | 0.044 | 0.227 | 6.028 | 0.045 | 12.296 | 0.25 | 0.037 | 18.563 | 24.091 | 24.562 | 25.234 | 18.927 |
| 128 | 4 | 205.4 | 0.044 | 0.249 | 6.129 | 0.055 | 12.41 | 0.516 | 0.067 | 18.767 | 25.126 | 25.524 | 26.199 | 19.47 |
| 128 | 5 | 242.4 | 0.044 | 0.308 | 6.384 | 0.065 | 12.824 | 0.888 | 0.11 | 20.052 | 26.303 | 26.858 | 27.476 | 20.623 |
| 128 | 6 | 279.6 | 0.044 | 0.301 | 6.585 | 0.075 | 13.074 | 1.237 | 0.14 | 20.76 | 27.575 | 28.037 | 28.974 | 21.456 |
| 128 | 7 | 314 | 0.046 | 0.269 | 6.844 | 0.08 | 13.385 | 1.48 | 0.196 | 21.705 | 28.573 | 29.121 | 29.847 | 22.3 |
| 128 | 8 | 342.8 | 0.047 | 0.452 | 6.695 | 0.097 | 13.94 | 1.826 | 0.26 | 23.164 | 29.564 | 30.467 | 31.278 | 23.317 |
| 128 | 9 | 364.6 | 0.047 | 0.375 | 7.022 | 0.103 | 14.39 | 2.373 | 0.347 | 24.599 | 31.093 | 31.868 | 32.917 | 24.657 |
| 128 | 10 | 389.3 | 0.048 | 0.448 | 7.375 | 0.115 | 14.873 | 2.477 | 0.345 | 25.412 | 31.847 | 32.733 | 34.499 | 25.681 |
| 128 | 11 | 411.3 | 0.047 | 0.466 | 7.65 | 0.125 | 15.464 | 2.582 | 0.38 | 26.432 | 33.057 | 34.029 | 36.509 | 26.714 |
| 128 | 12 | 439.7 | 0.047 | 0.546 | 8.002 | 0.125 | 15.342 | 2.873 | 0.363 | 27.282 | 33.765 | 34.579 | 36.181 | 27.298 |
| 128 | 13 | 458.6 | 0.049 | 0.46 | 8.421 | 0.139 | 15.689 | 3.173 | 0.402 | 28.226 | 34.756 | 35.961 | 38.42 | 28.333 |
| 128 | 14 | 479.8 | 0.048 | 0.528 | 8.631 | 0.144 | 16.278 | 3.124 | 0.421 | 28.925 | 35.885 | 37.331 | 39.311 | 29.174 |
| 128 | 15 | 494.2 | 0.048 | 0.488 | 9.049 | 0.147 | 16.642 | 3.558 | 0.441 | 30.541 | 37.113 | 38.568 | 40.605 | 30.373 |
| 128 | 16 | 516.9 | 0.049 | 0.61 | 9.469 | 0.166 | 16.669 | 3.601 | 0.409 | 31.962 | 38.323 | 39.16 | 40.616 | 30.973 |
</details>
#### Online: NVIDIA DGX-1 (1x V100 32GB) with FP32
Our results were obtained using the following configuration:
* **GPU:** NVIDIA DGX-1 (1x V100 32GB)
* **Backend:** PyTorch
* **Precision:** FP32
* **Model format:** TorchScript
* **Conversion variant:** Trace

<details>
<summary>
Full tabular data
</summary>
| Sequence Length | Concurrent client requests | Inferences/second | Client Send | Network+server Send/recv | Server Queue | Server Compute Input | Server Compute Infer | Server Compute Output | Client Recv | P50 Latency | P90 Latency | P95 Latency | P99 Latency | Avg Latency |
|------------------:|-----------------------------:|--------------------:|--------------:|---------------------------:|---------------:|-----------------------:|-----------------------:|------------------------:|--------------:|--------------:|--------------:|--------------:|--------------:|--------------:|
| 128 | 1 | 110.6 | 0.038 | 0.203 | 0.017 | 0.033 | 7.407 | 1.227 | 0.109 | 8.989 | 9.095 | 9.201 | 10.374 | 9.034 |
| 128 | 2 | 119.4 | 0.048 | 0.284 | 0.055 | 0.204 | 14.442 | 1.613 | 0.099 | 16.705 | 17.275 | 17.478 | 17.934 | 16.745 |
| 128 | 3 | 118.3 | 0.043 | 0.368 | 8.044 | 0.065 | 15.021 | 1.707 | 0.111 | 26.011 | 31.049 | 31.999 | 33.798 | 25.359 |
| 128 | 4 | 140 | 0.042 | 0.278 | 8.922 | 0.077 | 15.948 | 3.114 | 0.17 | 28.949 | 35.762 | 36.454 | 38.914 | 28.551 |
| 128 | 5 | 159.3 | 0.044 | 0.303 | 9.009 | 0.097 | 17.258 | 4.412 | 0.254 | 31.81 | 37.571 | 38.675 | 41.042 | 31.377 |
| 128 | 6 | 165.4 | 0.044 | 0.378 | 9.866 | 0.113 | 20.096 | 5.443 | 0.345 | 37.16 | 43.107 | 45.435 | 52.102 | 36.285 |
| 128 | 7 | 180.8 | 0.045 | 0.308 | 11.011 | 0.147 | 20.175 | 6.605 | 0.388 | 39.446 | 46.791 | 49.684 | 54.777 | 38.679 |
| 128 | 8 | 192.2 | 0.048 | 0.36 | 11.298 | 0.153 | 21.965 | 7.467 | 0.414 | 42.309 | 51.787 | 55.15 | 58.38 | 41.705 |
| 128 | 9 | 200.5 | 0.048 | 0.357 | 12.823 | 0.158 | 23.488 | 7.594 | 0.474 | 45.72 | 53.947 | 55.908 | 61.154 | 44.942 |
| 128 | 10 | 208.7 | 0.047 | 0.421 | 13.27 | 0.162 | 24.334 | 9.03 | 0.6 | 48.705 | 57.995 | 59.473 | 65.057 | 47.864 |
| 128 | 11 | 214.3 | 0.047 | 0.395 | 15.778 | 0.217 | 24.846 | 9.588 | 0.483 | 52.653 | 63.823 | 66.897 | 69.067 | 51.354 |
| 128 | 12 | 215.7 | 0.048 | 0.616 | 15.895 | 0.24 | 25.579 | 12.456 | 0.648 | 56.333 | 63.09 | 64.429 | 74.218 | 55.482 |
| 128 | 13 | 222.5 | 0.048 | 0.397 | 16.294 | 0.24 | 28.246 | 12.469 | 0.645 | 59.08 | 69.552 | 73.32 | 81.029 | 58.339 |
| 128 | 14 | 228.2 | 0.05 | 0.496 | 18.186 | 0.27 | 29.653 | 12.178 | 0.562 | 62.211 | 72.935 | 77.152 | 83.805 | 61.395 |
| 128 | 15 | 234 | 0.05 | 0.418 | 19.624 | 0.317 | 30.497 | 12.504 | 0.569 | 64.758 | 79.884 | 82.316 | 86.467 | 63.979 |
| 128 | 16 | 236 | 0.048 | 0.379 | 21.46 | 0.352 | 30.808 | 14.245 | 0.566 | 69.054 | 82.334 | 87.213 | 94.892 | 67.858 |
</details>
#### Online: NVIDIA T4 with FP16
Our results were obtained using the following configuration:
* **GPU:** NVIDIA T4
* **Backend:** PyTorch
* **Precision:** FP16
* **Model format:** TorchScript
* **Conversion variant:** Trace

<details>
<summary>
Full tabular data
</summary>
| Sequence Length | Concurrent client requests | Inferences/second | Client Send | Network+server Send/recv | Server Queue | Server Compute Input | Server Compute Infer | Server Compute Output | Client Recv | P50 Latency | P90 Latency | P95 Latency | P99 Latency | Avg Latency |
|------------------:|-----------------------------:|--------------------:|--------------:|---------------------------:|---------------:|-----------------------:|-----------------------:|------------------------:|--------------:|--------------:|--------------:|--------------:|--------------:|--------------:|
| 128 | 1 | 53.6 | 0.102 | 0.56 | 0.087 | 0.105 | 17.485 | 0.261 | 0.052 | 18.882 | 19.597 | 19.712 | 19.948 | 18.652 |
| 128 | 2 | 129.9 | 0.097 | 0.494 | 0.017 | 0.291 | 12.386 | 2.059 | 0.054 | 15.273 | 16.187 | 16.906 | 21.99 | 15.398 |
| 128 | 3 | 122.3 | 0.098 | 0.506 | 7.577 | 0.07 | 14.428 | 1.796 | 0.049 | 24.851 | 30.177 | 32.726 | 34.667 | 24.524 |
| 128 | 4 | 141.4 | 0.095 | 0.533 | 8.459 | 0.083 | 16.254 | 2.798 | 0.064 | 28.512 | 34.407 | 36.983 | 40.366 | 28.286 |
| 128 | 5 | 153.1 | 0.097 | 0.613 | 9.277 | 0.095 | 18.608 | 3.878 | 0.114 | 32.559 | 40.931 | 43.966 | 47.479 | 32.682 |
| 128 | 6 | 168.6 | 0.098 | 0.587 | 9.407 | 0.115 | 20.512 | 4.603 | 0.222 | 35.182 | 45.268 | 47.867 | 51.381 | 35.544 |
| 128 | 7 | 184.3 | 0.094 | 0.697 | 9.432 | 0.13 | 21.351 | 6.037 | 0.259 | 36.83 | 50.213 | 54.732 | 62.848 | 38 |
| 128 | 8 | 187 | 0.093 | 0.665 | 11.347 | 0.155 | 23.914 | 6.27 | 0.257 | 41.379 | 57.516 | 62.209 | 66.726 | 42.701 |
| 128 | 9 | 199.5 | 0.094 | 0.775 | 11.261 | 0.163 | 24.54 | 7.938 | 0.385 | 44.016 | 58.752 | 65.017 | 71.694 | 45.156 |
| 128 | 10 | 210.2 | 0.091 | 0.897 | 11.848 | 0.183 | 24.714 | 9.401 | 0.449 | 44.964 | 65.754 | 73.463 | 79.672 | 47.583 |
| 128 | 11 | 217.3 | 0.092 | 0.838 | 12.487 | 0.202 | 25.694 | 10.75 | 0.523 | 47.864 | 69.923 | 77.628 | 85.826 | 50.586 |
| 128 | 12 | 219.6 | 0.09 | 0.771 | 14.799 | 0.206 | 27.126 | 11.095 | 0.495 | 52.728 | 73.813 | 79.036 | 95.389 | 54.582 |
| 128 | 13 | 227.6 | 0.09 | 0.758 | 14.886 | 0.247 | 29.603 | 10.932 | 0.527 | 54.152 | 80.264 | 86.911 | 97.091 | 57.043 |
| 128 | 14 | 235 | 0.093 | 0.64 | 15.942 | 0.26 | 29.521 | 12.755 | 0.519 | 56.969 | 82.85 | 89.545 | 104.486 | 59.73 |
| 128 | 15 | 236.7 | 0.092 | 0.686 | 17.532 | 0.294 | 31.765 | 12.432 | 0.557 | 59.681 | 91.908 | 100.856 | 119.919 | 63.358 |
| 128 | 16 | 242.3 | 0.091 | 0.693 | 16.804 | 0.289 | 32.901 | 14.663 | 0.559 | 63.006 | 96.607 | 99.376 | 108.381 | 66 |
</details>
#### Online: NVIDIA T4 with FP32
Our results were obtained using the following configuration:
* **GPU:** NVIDIA T4
* **Backend:** PyTorch
* **Precision:** FP32
* **Model format:** TorchScript
* **Conversion variant:** Trace

<details>
<summary>
Full tabular data
</summary>
| Sequence Length | Concurrent client requests | Inferences/second | Client Send | Network+server Send/recv | Server Queue | Server Compute Input | Server Compute Infer | Server Compute Output | Client Recv | P50 Latency | P90 Latency | P95 Latency | P99 Latency | Avg Latency |
|------------------:|-----------------------------:|--------------------:|--------------:|---------------------------:|---------------:|-----------------------:|-----------------------:|------------------------:|--------------:|--------------:|--------------:|--------------:|--------------:|--------------:|
| 128 | 1 | 53.5 | 0.103 | 0.57 | 0.085 | 0.108 | 16.195 | 1.506 | 0.112 | 18.777 | 19.448 | 19.513 | 19.697 | 18.679 |
| 128 | 2 | 78.1 | 0.097 | 0.476 | 0.021 | 0.37 | 19.778 | 4.735 | 0.113 | 19.266 | 48.198 | 50.37 | 51.933 | 25.59 |
| 128 | 3 | 78.9 | 0.092 | 0.511 | 12.039 | 0.126 | 20.597 | 4.568 | 0.104 | 34.628 | 55.275 | 62.943 | 69.63 | 38.037 |
| 128 | 4 | 86.4 | 0.094 | 0.492 | 14.143 | 0.163 | 24.336 | 6.955 | 0.16 | 42.424 | 69.874 | 73.991 | 81.048 | 46.343 |
| 128 | 5 | 87.4 | 0.096 | 0.569 | 16.207 | 0.174 | 28.415 | 11.335 | 0.344 | 52.867 | 85.206 | 92.721 | 106.801 | 57.14 |
| 128 | 6 | 91.5 | 0.094 | 0.644 | 16.815 | 0.207 | 33.454 | 13.923 | 0.471 | 62.079 | 96.925 | 100.852 | 115.651 | 65.608 |
| 128 | 7 | 96.3 | 0.094 | 0.622 | 18.675 | 0.219 | 36.551 | 16.332 | 0.621 | 69.447 | 103.115 | 108.706 | 130.277 | 73.114 |
| 128 | 8 | 95.7 | 0.096 | 0.642 | 18.336 | 0.24 | 41.708 | 21.953 | 0.868 | 79.887 | 113.645 | 117.36 | 145.151 | 83.843 |
| 128 | 9 | 95.2 | 0.095 | 1.01 | 18.682 | 0.249 | 48.823 | 24.68 | 1.059 | 90.799 | 126.669 | 129.592 | 167.038 | 94.598 |
| 128 | 10 | 102.6 | 0.093 | 0.767 | 19.687 | 0.26 | 46.234 | 29.561 | 1.219 | 95.095 | 121.245 | 128.962 | 170.8 | 97.821 |
| 128 | 11 | 104.9 | 0.09 | 0.629 | 23.884 | 0.317 | 49.746 | 29.621 | 1.19 | 101.884 | 133.615 | 141.351 | 186.759 | 105.477 |
| 128 | 12 | 103.8 | 0.093 | 0.427 | 29.107 | 0.375 | 52.974 | 32.07 | 1.145 | 113.659 | 154.182 | 172.429 | 204.619 | 116.191 |
| 128 | 13 | 104 | 0.096 | 0.458 | 30.526 | 0.433 | 58.923 | 33.204 | 1.247 | 120.19 | 174.267 | 189.165 | 216.331 | 124.887 |
| 128 | 14 | 106.1 | 0.091 | 0.401 | 38.587 | 0.443 | 60.805 | 30.968 | 1.081 | 127.547 | 182.202 | 198.122 | 222.625 | 132.376 |
| 128 | 15 | 106.5 | 0.09 | 1.093 | 38.282 | 0.47 | 63.64 | 36.439 | 1.256 | 138.848 | 182.504 | 203.954 | 219.243 | 141.27 |
| 128 | 16 | 104.9 | 0.089 | 0.365 | 41.181 | 0.51 | 68.818 | 39.515 | 1.402 | 148.399 | 223.069 | 230.082 | 257.301 | 151.88 |
</details>
## Release Notes
We’re constantly refining and improving our performance on AI
and HPC workloads even on the same hardware with frequent updates
to our software stack. For our latest performance data refer
to these pages for
[AI](https://developer.nvidia.com/deep-learning-performance-training-inference)
and [HPC](https://developer.nvidia.com/hpc-application-performance) benchmarks.
### Changelog
April 2021
- Initial release
### Known issues
There are no known issues with this model.
|
TensorFlow2/Recommendation/DLRM_and_DCNv2/dataloading | dataloading | __init__ | # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# author: Tomasz Grel ([email protected])
|
PyTorch/SpeechSynthesis/FastPitch/common | common | utils | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# MIT License
#
# Copyright (c) 2020 Jungil Kong
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# The following functions/classes were based on code from https://github.com/jik876/hifi-gan:
# init_weights, get_padding, AttrDict
import ctypes
import glob
import os
import re
import shutil
import warnings
from collections import defaultdict, OrderedDict
from pathlib import Path
from typing import Optional
import librosa
import numpy as np
import torch
import torch.distributed as dist
from scipy.io.wavfile import read
def mask_from_lens(lens, max_len: Optional[int] = None):
if max_len is None:
max_len = lens.max()
ids = torch.arange(0, max_len, device=lens.device, dtype=lens.dtype)
mask = torch.lt(ids, lens.unsqueeze(1))
return mask
def load_wav(full_path, torch_tensor=False):
import soundfile # flac
data, sampling_rate = soundfile.read(full_path, dtype='int16')
if torch_tensor:
return torch.FloatTensor(data.astype(np.float32)), sampling_rate
else:
return data, sampling_rate
def load_wav_to_torch(full_path, force_sampling_rate=None):
if force_sampling_rate is not None:
data, sampling_rate = librosa.load(full_path, sr=force_sampling_rate)
else:
sampling_rate, data = read(full_path)
return torch.FloatTensor(data.astype(np.float32)), sampling_rate
def load_filepaths_and_text(dataset_path, fnames, has_speakers=False, split="|"):
def split_line(root, line):
parts = line.strip().split(split)
if has_speakers:
paths, non_paths = parts[:-2], parts[-2:]
else:
paths, non_paths = parts[:-1], parts[-1:]
return tuple(str(Path(root, p)) for p in paths) + tuple(non_paths)
fpaths_and_text = []
for fname in fnames:
with open(fname, encoding='utf-8') as f:
fpaths_and_text += [split_line(dataset_path, line) for line in f]
return fpaths_and_text
def to_gpu(x):
x = x.contiguous()
return x.cuda(non_blocking=True) if torch.cuda.is_available() else x
def l2_promote():
_libcudart = ctypes.CDLL('libcudart.so')
# Set device limit on the current device
# cudaLimitMaxL2FetchGranularity = 0x05
pValue = ctypes.cast((ctypes.c_int*1)(), ctypes.POINTER(ctypes.c_int))
_libcudart.cudaDeviceSetLimit(ctypes.c_int(0x05), ctypes.c_int(128))
_libcudart.cudaDeviceGetLimit(pValue, ctypes.c_int(0x05))
assert pValue.contents.value == 128
def prepare_tmp(path):
if path is None:
return
p = Path(path)
if p.is_dir():
warnings.warn(f'{p} exists. Removing...')
shutil.rmtree(p, ignore_errors=True)
p.mkdir(parents=False, exist_ok=False)
def print_once(*msg):
if not dist.is_initialized() or dist.get_rank() == 0:
print(*msg)
def init_weights(m, mean=0.0, std=0.01):
classname = m.__class__.__name__
if classname.find("Conv") != -1:
m.weight.data.normal_(mean, std)
def get_padding(kernel_size, dilation=1):
return int((kernel_size*dilation - dilation)/2)
def load_pretrained_weights(model, ckpt_fpath):
model = getattr(model, "module", model)
weights = torch.load(ckpt_fpath, map_location="cpu")["state_dict"]
weights = {re.sub("^module.", "", k): v for k, v in weights.items()}
ckpt_emb = weights["encoder.word_emb.weight"]
new_emb = model.state_dict()["encoder.word_emb.weight"]
ckpt_vocab_size = ckpt_emb.size(0)
new_vocab_size = new_emb.size(0)
if ckpt_vocab_size != new_vocab_size:
print("WARNING: Resuming from a checkpoint with a different size "
"of embedding table. For best results, extend the vocab "
"and ensure the common symbols' indices match.")
min_len = min(ckpt_vocab_size, new_vocab_size)
weights["encoder.word_emb.weight"] = ckpt_emb if ckpt_vocab_size > new_vocab_size else new_emb
weights["encoder.word_emb.weight"][:min_len] = ckpt_emb[:min_len]
model.load_state_dict(weights)
class AttrDict(dict):
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
class DefaultAttrDict(defaultdict):
def __init__(self, *args, **kwargs):
super(DefaultAttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
def __getattr__(self, item):
return self[item]
class BenchmarkStats:
""" Tracks statistics used for benchmarking. """
def __init__(self):
self.num_frames = []
self.losses = []
self.mel_losses = []
self.took = []
def update(self, num_frames, losses, mel_losses, took):
self.num_frames.append(num_frames)
self.losses.append(losses)
self.mel_losses.append(mel_losses)
self.took.append(took)
def get(self, n_epochs):
frames_s = sum(self.num_frames[-n_epochs:]) / sum(self.took[-n_epochs:])
return {'frames/s': frames_s,
'loss': np.mean(self.losses[-n_epochs:]),
'mel_loss': np.mean(self.mel_losses[-n_epochs:]),
'took': np.mean(self.took[-n_epochs:]),
'benchmark_epochs_num': n_epochs}
def __len__(self):
return len(self.losses)
class Checkpointer:
def __init__(self, save_dir, keep_milestones=[]):
self.save_dir = save_dir
self.keep_milestones = keep_milestones
find = lambda name: [
(int(re.search("_(\d+).pt", fn).group(1)), fn)
for fn in glob.glob(f"{save_dir}/{name}_checkpoint_*.pt")]
tracked = sorted(find("FastPitch"), key=lambda t: t[0])
self.tracked = OrderedDict(tracked)
def last_checkpoint(self, output):
def corrupted(fpath):
try:
torch.load(fpath, map_location="cpu")
return False
except:
warnings.warn(f"Cannot load {fpath}")
return True
saved = sorted(
glob.glob(f"{output}/FastPitch_checkpoint_*.pt"),
key=lambda f: int(re.search("_(\d+).pt", f).group(1)))
if len(saved) >= 1 and not corrupted(saved[-1]):
return saved[-1]
elif len(saved) >= 2:
return saved[-2]
else:
return None
def maybe_load(self, model, optimizer, scaler, train_state, args,
ema_model=None):
assert args.checkpoint_path is None or args.resume is False, (
"Specify a single checkpoint source")
fpath = None
if args.checkpoint_path is not None:
fpath = args.checkpoint_path
self.tracked = OrderedDict() # Do not track/delete prev ckpts
elif args.resume:
fpath = self.last_checkpoint(args.output)
if fpath is None:
return
print_once(f"Loading model and optimizer state from {fpath}")
ckpt = torch.load(fpath, map_location="cpu")
train_state["epoch"] = ckpt["epoch"] + 1
train_state["total_iter"] = ckpt["iteration"]
no_pref = lambda sd: {re.sub("^module.", "", k): v for k, v in sd.items()}
unwrap = lambda m: getattr(m, "module", m)
unwrap(model).load_state_dict(no_pref(ckpt["state_dict"]))
if ema_model is not None:
unwrap(ema_model).load_state_dict(no_pref(ckpt["ema_state_dict"]))
optimizer.load_state_dict(ckpt["optimizer"])
if "scaler" in ckpt:
scaler.load_state_dict(ckpt["scaler"])
else:
warnings.warn("AMP scaler state missing from the checkpoint.")
def maybe_save(self, args, model, ema_model, optimizer, scaler, epoch,
total_iter, config):
intermediate = (args.epochs_per_checkpoint > 0
and epoch % args.epochs_per_checkpoint == 0)
final = epoch == args.epochs
if not intermediate and not final and epoch not in self.keep_milestones:
return
rank = 0
if dist.is_initialized():
dist.barrier()
rank = dist.get_rank()
if rank != 0:
return
unwrap = lambda m: getattr(m, "module", m)
ckpt = {"epoch": epoch,
"iteration": total_iter,
"config": config,
"train_setup": args.__dict__,
"state_dict": unwrap(model).state_dict(),
"optimizer": optimizer.state_dict(),
"scaler": scaler.state_dict()}
if ema_model is not None:
ckpt["ema_state_dict"] = unwrap(ema_model).state_dict()
fpath = Path(args.output, f"FastPitch_checkpoint_{epoch}.pt")
print(f"Saving model and optimizer state at epoch {epoch} to {fpath}")
torch.save(ckpt, fpath)
# Remove old checkpoints; keep milestones and the last two
self.tracked[epoch] = fpath
for epoch in set(list(self.tracked)[:-2]) - set(self.keep_milestones):
try:
os.remove(self.tracked[epoch])
except:
pass
del self.tracked[epoch]
|
PyTorch/LanguageModeling/Transformer-XL/pytorch/utils | utils | proj_adaptive_softmax | # Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
import torch.nn.functional as F
class OptionalParameterList(nn.ParameterList):
def extra_repr(self):
child_lines = []
for k, p in self._parameters.items():
if p is not None:
size_str = 'x'.join(str(size) for size in p.size())
device_str = '' if not p.is_cuda else ' (GPU {})'.format(p.get_device())
parastr = 'Parameter containing: [{} of size {}{}]'.format(
torch.typename(p), size_str, device_str)
child_lines.append(' (' + str(k) + '): ' + parastr)
tmpstr = '\n'.join(child_lines)
return tmpstr
class ProjectedAdaptiveLogSoftmax(nn.Module):
def __init__(self, n_token, d_embed, d_proj, cutoffs, div_val=1,
tie_projs=None, out_layers_weights=None, out_projs=None,
keep_order=False):
super().__init__()
self.n_token = n_token
self.d_embed = d_embed
self.d_proj = d_proj
self.cutoffs = cutoffs + [n_token]
self.cutoff_ends = [0] + self.cutoffs
self.div_val = div_val
self.shortlist_size = self.cutoffs[0]
self.n_clusters = len(self.cutoffs) - 1
self.head_size = self.shortlist_size + self.n_clusters
self.tie_projs = tie_projs
if self.n_clusters > 0:
self.cluster_weight = nn.Parameter(torch.zeros(self.n_clusters, self.d_embed))
self.cluster_bias = nn.Parameter(torch.zeros(self.n_clusters))
if not out_layers_weights:
self.out_layers_weights = nn.ParameterList()
else:
self.out_layers_weights = out_layers_weights
self.out_layers_biases = nn.ParameterList()
self.shared_out_projs = out_projs
self.out_projs = OptionalParameterList()
if div_val == 1:
if d_proj != d_embed:
for i in range(len(self.cutoffs)):
if tie_projs[i]:
self.out_projs.append(None)
else:
self.out_projs.append(
nn.Parameter(torch.zeros(d_proj, d_embed))
)
else:
# self.out_projs = [None] * len(self.cutoffs)
self.out_projs.append(None)
self.out_layers_biases.append(
nn.Parameter(torch.zeros(n_token))
)
if not out_layers_weights:
self.out_layers_weights.append(
nn.Parameter(torch.zeros(n_token, d_embed))
)
else:
for i in range(len(self.cutoffs)):
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i+1]
d_emb_i = d_embed // (div_val ** i)
if tie_projs[i]:
self.out_projs.append(None)
else:
self.out_projs.append(
nn.Parameter(torch.zeros(d_proj, d_emb_i))
)
self.out_layers_biases.append(
nn.Parameter(torch.zeros(r_idx - l_idx))
)
if not out_layers_weights:
self.out_layers_weights.append(
nn.Parameter(torch.zeros(r_idx - l_idx, d_emb_i))
)
self.keep_order = keep_order
def _compute_logit(self, hidden, weight, bias, proj):
if proj is None:
logit = F.linear(hidden, weight, bias=bias)
else:
logit = torch.einsum('bd,de,ev->bv', hidden, proj, weight.t())
if bias is not None:
logit = logit + bias
return logit
def get_out_proj(self, i):
if self.tie_projs[i]:
if len(self.shared_out_projs) == 0:
return None
elif len(self.shared_out_projs) == 1:
return self.shared_out_projs[0]
else:
return self.shared_out_projs[i]
else:
return self.out_projs[i]
def forward(self, hidden, target, keep_order=False):
'''
hidden :: [len*bsz x d_proj]
target :: [len*bsz]
'''
if hidden.size(0) != target.size(0):
raise RuntimeError('Input and target should have the same size '
'in the batch dimension.')
if self.n_clusters == 0:
logit = self._compute_logit(hidden, self.out_layers_weights[0],
self.out_layers_biases[0], self.get_out_proj(0))
nll = -F.log_softmax(logit, dim=-1) \
.gather(1, target.unsqueeze(1)).squeeze(1)
else:
# construct weights and biases
weights, biases = [], []
for i in range(len(self.cutoffs)):
if self.div_val == 1:
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
weight_i = self.out_layers_weights[0][l_idx:r_idx]
bias_i = self.out_layers_biases[0][l_idx:r_idx]
else:
weight_i = self.out_layers_weights[i]
bias_i = self.out_layers_biases[i]
if i == 0:
weight_i = torch.cat(
[weight_i, self.cluster_weight], dim=0)
bias_i = torch.cat(
[bias_i, self.cluster_bias], dim=0)
weights.append(weight_i)
biases.append(bias_i)
head_weight, head_bias, head_proj = weights[0], biases[0], self.get_out_proj(0)
head_logit = self._compute_logit(hidden, head_weight, head_bias, head_proj)
head_logprob = F.log_softmax(head_logit, dim=1)
nll = torch.zeros_like(target, dtype=hidden.dtype, device=hidden.device)
offset = 0
cutoff_values = [0] + self.cutoffs
for i in range(len(cutoff_values) - 1):
l_idx, r_idx = cutoff_values[i], cutoff_values[i + 1]
mask_i = (target >= l_idx) & (target < r_idx)
indices_i = mask_i.nonzero(as_tuple=False).squeeze()
if indices_i.numel() == 0:
continue
target_i = target.index_select(0, indices_i) - l_idx
head_logprob_i = head_logprob.index_select(0, indices_i)
if i == 0:
logprob_i = head_logprob_i.gather(1, target_i[:, None]).squeeze(1)
else:
weight_i, bias_i, proj_i = weights[i], biases[i], self.get_out_proj(i)
hidden_i = hidden.index_select(0, indices_i)
tail_logit_i = self._compute_logit(hidden_i, weight_i, bias_i, proj_i)
tail_logprob_i = F.log_softmax(tail_logit_i, dim=1)
logprob_i = head_logprob_i[:, -i] \
+ tail_logprob_i.gather(1, target_i[:, None]).squeeze(1)
if self.keep_order or keep_order:
nll.index_copy_(0, indices_i, -logprob_i)
else:
nll[offset:offset+logprob_i.size(0)].copy_(-logprob_i)
offset += logprob_i.size(0)
return nll
|
TensorFlow2/Recommendation/SIM/sim/data | data | feature_spec | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from typing import Dict
import numpy as np
import yaml
from sim.data.defaults import (CARDINALITY_SELECTOR, DIMENSIONS_SELECTOR, DTYPE_SELECTOR, LABEL_CHANNEL,
NEGATIVE_HISTORY_CHANNEL, POSITIVE_HISTORY_CHANNEL, TARGET_ITEM_FEATURES_CHANNEL,
TEST_MAPPING, TRAIN_MAPPING, USER_FEATURES_CHANNEL)
class FeatureSpec:
def __init__(self, feature_spec=None, source_spec=None, channel_spec=None, metadata=None, base_directory=None):
self.feature_spec: Dict = feature_spec if feature_spec is not None else {}
self.source_spec: Dict = source_spec if source_spec is not None else {}
self.channel_spec: Dict = channel_spec if channel_spec is not None else {}
self.metadata: Dict = metadata if metadata is not None else {}
self.base_directory: str = base_directory
@classmethod
def from_yaml(cls, path):
with open(path, 'r') as feature_spec_file:
base_directory = os.path.dirname(path)
feature_spec = yaml.safe_load(feature_spec_file)
return cls.from_dict(feature_spec, base_directory=base_directory)
@classmethod
def from_dict(cls, source_dict, base_directory):
return cls(base_directory=base_directory, **source_dict)
def to_dict(self):
attributes_to_dump = ['feature_spec', 'source_spec', 'channel_spec', 'metadata']
return {attr: self.__dict__[attr] for attr in attributes_to_dump}
def to_string(self):
return yaml.dump(self.to_dict())
def to_yaml(self, output_path=None):
if not output_path:
output_path = self.base_directory + '/feature_spec.yaml'
with open(output_path, 'w') as output_file:
print(yaml.dump(self.to_dict()), file=output_file)
@staticmethod
def get_default_features_names(number_of_user_features, number_of_item_features):
user_feature_fstring = 'user_feat_{}'
item_feature_fstring = 'item_feat_{}_{}'
label_feature_name = "label"
item_channels_feature_name_suffixes = ['trgt', 'pos', 'neg']
user_features_names = [user_feature_fstring.format(i) for i in range(number_of_user_features)]
item_features_names = [item_feature_fstring.format(i, channel_suffix)
for channel_suffix in item_channels_feature_name_suffixes
for i in range(number_of_item_features)]
return [label_feature_name] + user_features_names + item_features_names
@staticmethod
def get_default_feature_spec(user_features_cardinalities, item_features_cardinalities, max_seq_len):
number_of_user_features = len(user_features_cardinalities)
number_of_item_features = len(item_features_cardinalities)
all_features_names = FeatureSpec.get_default_features_names(number_of_user_features, number_of_item_features)
user_features = {
f_name: {
DTYPE_SELECTOR: str(np.dtype(np.int64)),
CARDINALITY_SELECTOR: int(cardinality)
} for i, (f_name, cardinality)
in enumerate(zip(all_features_names[1:1+number_of_user_features], user_features_cardinalities))
}
item_channels = [TARGET_ITEM_FEATURES_CHANNEL, POSITIVE_HISTORY_CHANNEL, NEGATIVE_HISTORY_CHANNEL]
item_channels_feature_dicts = [{} for _ in range(len(item_channels))]
item_channels_info = list(zip(item_channels, item_channels_feature_dicts))
for i, cardinality in enumerate(item_features_cardinalities):
for j, (channel, dictionary) in enumerate(item_channels_info):
feature_name = all_features_names[1 + number_of_user_features + i + j * number_of_item_features]
dictionary[feature_name] = {
DTYPE_SELECTOR: str(np.dtype(np.int64)),
CARDINALITY_SELECTOR: int(cardinality)
}
if channel != TARGET_ITEM_FEATURES_CHANNEL:
dictionary[feature_name][DIMENSIONS_SELECTOR] = [max_seq_len]
feature_spec = {
feat_name: feat_spec
for dictionary in [user_features] + item_channels_feature_dicts
for feat_name, feat_spec in dictionary.items()
}
feature_spec[all_features_names[0]] = {DTYPE_SELECTOR: str(np.dtype(np.bool))}
channel_spec = {
USER_FEATURES_CHANNEL: list(user_features),
TARGET_ITEM_FEATURES_CHANNEL: list(item_channels_feature_dicts[0]),
POSITIVE_HISTORY_CHANNEL: list(item_channels_feature_dicts[1]),
NEGATIVE_HISTORY_CHANNEL: list(item_channels_feature_dicts[2]),
LABEL_CHANNEL: all_features_names[:1]
}
source_spec = {
split: [
{
'type': 'tfrecord',
'features': all_features_names,
'files': []
}
] for split in [TRAIN_MAPPING, TEST_MAPPING]
}
return FeatureSpec(feature_spec=feature_spec, channel_spec=channel_spec, source_spec=source_spec)
|
PaddlePaddle/Classification/RN50v1.5/utils | utils | cuda_bind | # Copyright (c) 2022 NVIDIA Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import ctypes
_cuda_home = os.environ.get('CUDA_HOME', '/usr/local/cuda')
_cudart = ctypes.CDLL(os.path.join(_cuda_home, 'lib64/libcudart.so'))
def cuda_profile_start():
_cudart.cudaProfilerStart()
def cuda_profile_stop():
_cudart.cudaProfilerStop()
_nvtx = ctypes.CDLL(os.path.join(_cuda_home, 'lib64/libnvToolsExt.so'))
def cuda_nvtx_range_push(name):
_nvtx.nvtxRangePushW(ctypes.c_wchar_p(name))
def cuda_nvtx_range_pop():
_nvtx.nvtxRangePop()
|
TensorFlow/Detection/SSD/models/research/object_detection/legacy | legacy | trainer_test | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.trainer."""
import tensorflow as tf
from google.protobuf import text_format
from object_detection.core import losses
from object_detection.core import model
from object_detection.core import standard_fields as fields
from object_detection.legacy import trainer
from object_detection.protos import train_pb2
NUMBER_OF_CLASSES = 2
def get_input_function():
"""A function to get test inputs. Returns an image with one box."""
image = tf.random_uniform([32, 32, 3], dtype=tf.float32)
key = tf.constant('image_000000')
class_label = tf.random_uniform(
[1], minval=0, maxval=NUMBER_OF_CLASSES, dtype=tf.int32)
box_label = tf.random_uniform(
[1, 4], minval=0.4, maxval=0.6, dtype=tf.float32)
multiclass_scores = tf.random_uniform(
[1, NUMBER_OF_CLASSES], minval=0.4, maxval=0.6, dtype=tf.float32)
return {
fields.InputDataFields.image: image,
fields.InputDataFields.key: key,
fields.InputDataFields.groundtruth_classes: class_label,
fields.InputDataFields.groundtruth_boxes: box_label,
fields.InputDataFields.multiclass_scores: multiclass_scores
}
class FakeDetectionModel(model.DetectionModel):
"""A simple (and poor) DetectionModel for use in test."""
def __init__(self):
super(FakeDetectionModel, self).__init__(num_classes=NUMBER_OF_CLASSES)
self._classification_loss = losses.WeightedSigmoidClassificationLoss()
self._localization_loss = losses.WeightedSmoothL1LocalizationLoss()
def preprocess(self, inputs):
"""Input preprocessing, resizes images to 28x28.
Args:
inputs: a [batch, height_in, width_in, channels] float32 tensor
representing a batch of images with values between 0 and 255.0.
Returns:
preprocessed_inputs: a [batch, 28, 28, channels] float32 tensor.
true_image_shapes: int32 tensor of shape [batch, 3] where each row is
of the form [height, width, channels] indicating the shapes
of true images in the resized images, as resized images can be padded
with zeros.
"""
true_image_shapes = [inputs.shape[:-1].as_list()
for _ in range(inputs.shape[-1])]
return tf.image.resize_images(inputs, [28, 28]), true_image_shapes
def predict(self, preprocessed_inputs, true_image_shapes):
"""Prediction tensors from inputs tensor.
Args:
preprocessed_inputs: a [batch, 28, 28, channels] float32 tensor.
true_image_shapes: int32 tensor of shape [batch, 3] where each row is
of the form [height, width, channels] indicating the shapes
of true images in the resized images, as resized images can be padded
with zeros.
Returns:
prediction_dict: a dictionary holding prediction tensors to be
passed to the Loss or Postprocess functions.
"""
flattened_inputs = tf.contrib.layers.flatten(preprocessed_inputs)
class_prediction = tf.contrib.layers.fully_connected(
flattened_inputs, self._num_classes)
box_prediction = tf.contrib.layers.fully_connected(flattened_inputs, 4)
return {
'class_predictions_with_background': tf.reshape(
class_prediction, [-1, 1, self._num_classes]),
'box_encodings': tf.reshape(box_prediction, [-1, 1, 4])
}
def postprocess(self, prediction_dict, true_image_shapes, **params):
"""Convert predicted output tensors to final detections. Unused.
Args:
prediction_dict: a dictionary holding prediction tensors.
true_image_shapes: int32 tensor of shape [batch, 3] where each row is
of the form [height, width, channels] indicating the shapes
of true images in the resized images, as resized images can be padded
with zeros.
**params: Additional keyword arguments for specific implementations of
DetectionModel.
Returns:
detections: a dictionary with empty fields.
"""
return {
'detection_boxes': None,
'detection_scores': None,
'detection_classes': None,
'num_detections': None
}
def loss(self, prediction_dict, true_image_shapes):
"""Compute scalar loss tensors with respect to provided groundtruth.
Calling this function requires that groundtruth tensors have been
provided via the provide_groundtruth function.
Args:
prediction_dict: a dictionary holding predicted tensors
true_image_shapes: int32 tensor of shape [batch, 3] where each row is
of the form [height, width, channels] indicating the shapes
of true images in the resized images, as resized images can be padded
with zeros.
Returns:
a dictionary mapping strings (loss names) to scalar tensors representing
loss values.
"""
batch_reg_targets = tf.stack(
self.groundtruth_lists(fields.BoxListFields.boxes))
batch_cls_targets = tf.stack(
self.groundtruth_lists(fields.BoxListFields.classes))
weights = tf.constant(
1.0, dtype=tf.float32,
shape=[len(self.groundtruth_lists(fields.BoxListFields.boxes)), 1])
location_losses = self._localization_loss(
prediction_dict['box_encodings'], batch_reg_targets,
weights=weights)
cls_losses = self._classification_loss(
prediction_dict['class_predictions_with_background'], batch_cls_targets,
weights=weights)
loss_dict = {
'localization_loss': tf.reduce_sum(location_losses),
'classification_loss': tf.reduce_sum(cls_losses),
}
return loss_dict
def regularization_losses(self):
"""Returns a list of regularization losses for this model.
Returns a list of regularization losses for this model that the estimator
needs to use during training/optimization.
Returns:
A list of regularization loss tensors.
"""
pass
def restore_map(self, fine_tune_checkpoint_type='detection'):
"""Returns a map of variables to load from a foreign checkpoint.
Args:
fine_tune_checkpoint_type: whether to restore from a full detection
checkpoint (with compatible variable names) or to restore from a
classification checkpoint for initialization prior to training.
Valid values: `detection`, `classification`. Default 'detection'.
Returns:
A dict mapping variable names to variables.
"""
return {var.op.name: var for var in tf.global_variables()}
def updates(self):
"""Returns a list of update operators for this model.
Returns a list of update operators for this model that must be executed at
each training step. The estimator's train op needs to have a control
dependency on these updates.
Returns:
A list of update operators.
"""
pass
class TrainerTest(tf.test.TestCase):
def test_configure_trainer_and_train_two_steps(self):
train_config_text_proto = """
optimizer {
adam_optimizer {
learning_rate {
constant_learning_rate {
learning_rate: 0.01
}
}
}
}
data_augmentation_options {
random_adjust_brightness {
max_delta: 0.2
}
}
data_augmentation_options {
random_adjust_contrast {
min_delta: 0.7
max_delta: 1.1
}
}
num_steps: 2
"""
train_config = train_pb2.TrainConfig()
text_format.Merge(train_config_text_proto, train_config)
train_dir = self.get_temp_dir()
trainer.train(
create_tensor_dict_fn=get_input_function,
create_model_fn=FakeDetectionModel,
train_config=train_config,
master='',
task=0,
num_clones=1,
worker_replicas=1,
clone_on_cpu=True,
ps_tasks=0,
worker_job_name='worker',
is_chief=True,
train_dir=train_dir)
def test_configure_trainer_with_multiclass_scores_and_train_two_steps(self):
train_config_text_proto = """
optimizer {
adam_optimizer {
learning_rate {
constant_learning_rate {
learning_rate: 0.01
}
}
}
}
data_augmentation_options {
random_adjust_brightness {
max_delta: 0.2
}
}
data_augmentation_options {
random_adjust_contrast {
min_delta: 0.7
max_delta: 1.1
}
}
num_steps: 2
use_multiclass_scores: true
"""
train_config = train_pb2.TrainConfig()
text_format.Merge(train_config_text_proto, train_config)
train_dir = self.get_temp_dir()
trainer.train(create_tensor_dict_fn=get_input_function,
create_model_fn=FakeDetectionModel,
train_config=train_config,
master='',
task=0,
num_clones=1,
worker_replicas=1,
clone_on_cpu=True,
ps_tasks=0,
worker_job_name='worker',
is_chief=True,
train_dir=train_dir)
if __name__ == '__main__':
tf.test.main()
|
PyTorch/Classification/ConvNets/efficientnet/quantization | quantization | DGX1V-32G_efficientnet-quant-b0_FP32 | python ./multiproc.py \
--nproc_per_node 8 \
./quant_main.py /imagenet \
--arch efficientnet-quant-b0 \
--epochs 10 \
-j5 -p 500 \
--data-backend pytorch \
--optimizer sgd \
-b 128 \
--lr 0.0125 \
--momentum 0.89 \
--weight-decay 4.50e-05 \
--lr-schedule cosine \
--pretrained-from-file "${1}" |
PyTorch/SpeechRecognition/Jasper/triton | triton | tensorrt_io_props_fp32 | {"input_shapes": [[-1, 64, -1]], "output_shapes": [[-1, -1, 29]], "input_types": ["TYPE_FP32"], "output_types": ["TYPE_FP32"], "input_names": ["input__0"], "output_names": ["output__0"], "dynamic_axes": {"input__0": [0, 2], "output__0": [0, 1]}, "min_shapes": [[1, 64, 200]], "opt_shapes": [[8, 64, 704]], "max_shapes": [[8, 64, 1680]]}
|
TensorFlow2/Recommendation/SIM/sim/models | models | din_model | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from sim.layers.item_item_interaction import DINActivationUnit, DotItemItemInteraction
from sim.layers.item_sequence_interaction import DINItemSequenceInteractionBlock
from sim.models.sequential_recommender_model import SequentialRecommenderModel
class DINModel(SequentialRecommenderModel):
def __init__(
self,
feature_spec,
mlp_hidden_dims=(200, 80),
embedding_dim=4,
item_item_interaction="dot",
):
super(DINModel, self).__init__(
feature_spec, embedding_dim, mlp_hidden_dims
)
if item_item_interaction == "dot":
item_item_interaction_block = DotItemItemInteraction()
elif item_item_interaction == "activation_unit":
item_item_interaction_block = DINActivationUnit()
self.item_seq_interaction = DINItemSequenceInteractionBlock(
item_item_interaction=item_item_interaction_block
)
@tf.function
def call(
self,
inputs,
training=False
):
user_features = inputs["user_features"]
target_item_features = inputs["target_item_features"]
long_sequence_features = inputs["long_sequence_features"]
short_sequence_features = inputs["short_sequence_features"]
long_sequence_mask = inputs["long_sequence_mask"]
short_sequence_mask = inputs["short_sequence_mask"]
user_embedding = self.embed(user_features)
target_item_embedding = self.embed(target_item_features)
long_sequence_embeddings = self.embed(long_sequence_features)
short_sequence_embeddings = self.embed(short_sequence_features)
# Concat over time axis
sequence_embeddings = tf.concat([long_sequence_embeddings, short_sequence_embeddings], axis=1)
mask = tf.concat([long_sequence_mask, short_sequence_mask], axis=1)
sequence_embeddings = sequence_embeddings * tf.expand_dims(
mask, axis=-1
)
item_sequence_interaction_embedding, _ = self.item_seq_interaction(
(target_item_embedding, sequence_embeddings, mask)
)
combined_embeddings = tf.concat([
target_item_embedding, item_sequence_interaction_embedding, user_embedding
], -1)
logits = self.classificationMLP(combined_embeddings, training=training)
return {"logits": logits}
|
TensorFlow/Detection/SSD/models/research/object_detection/builders | builders | preprocessor_builder_test | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for preprocessor_builder."""
import tensorflow as tf
from google.protobuf import text_format
from object_detection.builders import preprocessor_builder
from object_detection.core import preprocessor
from object_detection.protos import preprocessor_pb2
class PreprocessorBuilderTest(tf.test.TestCase):
def assert_dictionary_close(self, dict1, dict2):
"""Helper to check if two dicts with floatst or integers are close."""
self.assertEqual(sorted(dict1.keys()), sorted(dict2.keys()))
for key in dict1:
value = dict1[key]
if isinstance(value, float):
self.assertAlmostEqual(value, dict2[key])
else:
self.assertEqual(value, dict2[key])
def test_build_normalize_image(self):
preprocessor_text_proto = """
normalize_image {
original_minval: 0.0
original_maxval: 255.0
target_minval: -1.0
target_maxval: 1.0
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.normalize_image)
self.assertEqual(args, {
'original_minval': 0.0,
'original_maxval': 255.0,
'target_minval': -1.0,
'target_maxval': 1.0,
})
def test_build_random_horizontal_flip(self):
preprocessor_text_proto = """
random_horizontal_flip {
keypoint_flip_permutation: 1
keypoint_flip_permutation: 0
keypoint_flip_permutation: 2
keypoint_flip_permutation: 3
keypoint_flip_permutation: 5
keypoint_flip_permutation: 4
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.random_horizontal_flip)
self.assertEqual(args, {'keypoint_flip_permutation': (1, 0, 2, 3, 5, 4)})
def test_build_random_vertical_flip(self):
preprocessor_text_proto = """
random_vertical_flip {
keypoint_flip_permutation: 1
keypoint_flip_permutation: 0
keypoint_flip_permutation: 2
keypoint_flip_permutation: 3
keypoint_flip_permutation: 5
keypoint_flip_permutation: 4
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.random_vertical_flip)
self.assertEqual(args, {'keypoint_flip_permutation': (1, 0, 2, 3, 5, 4)})
def test_build_random_rotation90(self):
preprocessor_text_proto = """
random_rotation90 {}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.random_rotation90)
self.assertEqual(args, {})
def test_build_random_pixel_value_scale(self):
preprocessor_text_proto = """
random_pixel_value_scale {
minval: 0.8
maxval: 1.2
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.random_pixel_value_scale)
self.assert_dictionary_close(args, {'minval': 0.8, 'maxval': 1.2})
def test_build_random_image_scale(self):
preprocessor_text_proto = """
random_image_scale {
min_scale_ratio: 0.8
max_scale_ratio: 2.2
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.random_image_scale)
self.assert_dictionary_close(args, {'min_scale_ratio': 0.8,
'max_scale_ratio': 2.2})
def test_build_random_rgb_to_gray(self):
preprocessor_text_proto = """
random_rgb_to_gray {
probability: 0.8
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.random_rgb_to_gray)
self.assert_dictionary_close(args, {'probability': 0.8})
def test_build_random_adjust_brightness(self):
preprocessor_text_proto = """
random_adjust_brightness {
max_delta: 0.2
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.random_adjust_brightness)
self.assert_dictionary_close(args, {'max_delta': 0.2})
def test_build_random_adjust_contrast(self):
preprocessor_text_proto = """
random_adjust_contrast {
min_delta: 0.7
max_delta: 1.1
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.random_adjust_contrast)
self.assert_dictionary_close(args, {'min_delta': 0.7, 'max_delta': 1.1})
def test_build_random_adjust_hue(self):
preprocessor_text_proto = """
random_adjust_hue {
max_delta: 0.01
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.random_adjust_hue)
self.assert_dictionary_close(args, {'max_delta': 0.01})
def test_build_random_adjust_saturation(self):
preprocessor_text_proto = """
random_adjust_saturation {
min_delta: 0.75
max_delta: 1.15
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.random_adjust_saturation)
self.assert_dictionary_close(args, {'min_delta': 0.75, 'max_delta': 1.15})
def test_build_random_distort_color(self):
preprocessor_text_proto = """
random_distort_color {
color_ordering: 1
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.random_distort_color)
self.assertEqual(args, {'color_ordering': 1})
def test_build_random_jitter_boxes(self):
preprocessor_text_proto = """
random_jitter_boxes {
ratio: 0.1
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.random_jitter_boxes)
self.assert_dictionary_close(args, {'ratio': 0.1})
def test_build_random_crop_image(self):
preprocessor_text_proto = """
random_crop_image {
min_object_covered: 0.75
min_aspect_ratio: 0.75
max_aspect_ratio: 1.5
min_area: 0.25
max_area: 0.875
overlap_thresh: 0.5
clip_boxes: False
random_coef: 0.125
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.random_crop_image)
self.assertEqual(args, {
'min_object_covered': 0.75,
'aspect_ratio_range': (0.75, 1.5),
'area_range': (0.25, 0.875),
'overlap_thresh': 0.5,
'clip_boxes': False,
'random_coef': 0.125,
})
def test_build_random_pad_image(self):
preprocessor_text_proto = """
random_pad_image {
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.random_pad_image)
self.assertEqual(args, {
'min_image_size': None,
'max_image_size': None,
'pad_color': None,
})
def test_build_random_crop_pad_image(self):
preprocessor_text_proto = """
random_crop_pad_image {
min_object_covered: 0.75
min_aspect_ratio: 0.75
max_aspect_ratio: 1.5
min_area: 0.25
max_area: 0.875
overlap_thresh: 0.5
clip_boxes: False
random_coef: 0.125
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.random_crop_pad_image)
self.assertEqual(args, {
'min_object_covered': 0.75,
'aspect_ratio_range': (0.75, 1.5),
'area_range': (0.25, 0.875),
'overlap_thresh': 0.5,
'clip_boxes': False,
'random_coef': 0.125,
})
def test_build_random_crop_pad_image_with_optional_parameters(self):
preprocessor_text_proto = """
random_crop_pad_image {
min_object_covered: 0.75
min_aspect_ratio: 0.75
max_aspect_ratio: 1.5
min_area: 0.25
max_area: 0.875
overlap_thresh: 0.5
clip_boxes: False
random_coef: 0.125
min_padded_size_ratio: 0.5
min_padded_size_ratio: 0.75
max_padded_size_ratio: 0.5
max_padded_size_ratio: 0.75
pad_color: 0.5
pad_color: 0.5
pad_color: 1.0
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.random_crop_pad_image)
self.assertEqual(args, {
'min_object_covered': 0.75,
'aspect_ratio_range': (0.75, 1.5),
'area_range': (0.25, 0.875),
'overlap_thresh': 0.5,
'clip_boxes': False,
'random_coef': 0.125,
'min_padded_size_ratio': (0.5, 0.75),
'max_padded_size_ratio': (0.5, 0.75),
'pad_color': (0.5, 0.5, 1.0)
})
def test_build_random_crop_to_aspect_ratio(self):
preprocessor_text_proto = """
random_crop_to_aspect_ratio {
aspect_ratio: 0.85
overlap_thresh: 0.35
clip_boxes: False
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.random_crop_to_aspect_ratio)
self.assert_dictionary_close(args, {'aspect_ratio': 0.85,
'overlap_thresh': 0.35,
'clip_boxes': False})
def test_build_random_black_patches(self):
preprocessor_text_proto = """
random_black_patches {
max_black_patches: 20
probability: 0.95
size_to_image_ratio: 0.12
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.random_black_patches)
self.assert_dictionary_close(args, {'max_black_patches': 20,
'probability': 0.95,
'size_to_image_ratio': 0.12})
def test_build_random_resize_method(self):
preprocessor_text_proto = """
random_resize_method {
target_height: 75
target_width: 100
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.random_resize_method)
self.assert_dictionary_close(args, {'target_size': [75, 100]})
def test_build_scale_boxes_to_pixel_coordinates(self):
preprocessor_text_proto = """
scale_boxes_to_pixel_coordinates {}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.scale_boxes_to_pixel_coordinates)
self.assertEqual(args, {})
def test_build_resize_image(self):
preprocessor_text_proto = """
resize_image {
new_height: 75
new_width: 100
method: BICUBIC
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.resize_image)
self.assertEqual(args, {'new_height': 75,
'new_width': 100,
'method': tf.image.ResizeMethod.BICUBIC})
def test_build_rgb_to_gray(self):
preprocessor_text_proto = """
rgb_to_gray {}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.rgb_to_gray)
self.assertEqual(args, {})
def test_build_subtract_channel_mean(self):
preprocessor_text_proto = """
subtract_channel_mean {
means: [1.0, 2.0, 3.0]
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.subtract_channel_mean)
self.assertEqual(args, {'means': [1.0, 2.0, 3.0]})
def test_build_ssd_random_crop(self):
preprocessor_text_proto = """
ssd_random_crop {
operations {
min_object_covered: 0.0
min_aspect_ratio: 0.875
max_aspect_ratio: 1.125
min_area: 0.5
max_area: 1.0
overlap_thresh: 0.0
clip_boxes: False
random_coef: 0.375
}
operations {
min_object_covered: 0.25
min_aspect_ratio: 0.75
max_aspect_ratio: 1.5
min_area: 0.5
max_area: 1.0
overlap_thresh: 0.25
clip_boxes: True
random_coef: 0.375
}
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.ssd_random_crop)
self.assertEqual(args, {'min_object_covered': [0.0, 0.25],
'aspect_ratio_range': [(0.875, 1.125), (0.75, 1.5)],
'area_range': [(0.5, 1.0), (0.5, 1.0)],
'overlap_thresh': [0.0, 0.25],
'clip_boxes': [False, True],
'random_coef': [0.375, 0.375]})
def test_build_ssd_random_crop_empty_operations(self):
preprocessor_text_proto = """
ssd_random_crop {
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.ssd_random_crop)
self.assertEqual(args, {})
def test_build_ssd_random_crop_pad(self):
preprocessor_text_proto = """
ssd_random_crop_pad {
operations {
min_object_covered: 0.0
min_aspect_ratio: 0.875
max_aspect_ratio: 1.125
min_area: 0.5
max_area: 1.0
overlap_thresh: 0.0
clip_boxes: False
random_coef: 0.375
min_padded_size_ratio: [1.0, 1.0]
max_padded_size_ratio: [2.0, 2.0]
pad_color_r: 0.5
pad_color_g: 0.5
pad_color_b: 0.5
}
operations {
min_object_covered: 0.25
min_aspect_ratio: 0.75
max_aspect_ratio: 1.5
min_area: 0.5
max_area: 1.0
overlap_thresh: 0.25
clip_boxes: True
random_coef: 0.375
min_padded_size_ratio: [1.0, 1.0]
max_padded_size_ratio: [2.0, 2.0]
pad_color_r: 0.5
pad_color_g: 0.5
pad_color_b: 0.5
}
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.ssd_random_crop_pad)
self.assertEqual(args, {'min_object_covered': [0.0, 0.25],
'aspect_ratio_range': [(0.875, 1.125), (0.75, 1.5)],
'area_range': [(0.5, 1.0), (0.5, 1.0)],
'overlap_thresh': [0.0, 0.25],
'clip_boxes': [False, True],
'random_coef': [0.375, 0.375],
'min_padded_size_ratio': [(1.0, 1.0), (1.0, 1.0)],
'max_padded_size_ratio': [(2.0, 2.0), (2.0, 2.0)],
'pad_color': [(0.5, 0.5, 0.5), (0.5, 0.5, 0.5)]})
def test_build_ssd_random_crop_fixed_aspect_ratio(self):
preprocessor_text_proto = """
ssd_random_crop_fixed_aspect_ratio {
operations {
min_object_covered: 0.0
min_area: 0.5
max_area: 1.0
overlap_thresh: 0.0
clip_boxes: False
random_coef: 0.375
}
operations {
min_object_covered: 0.25
min_area: 0.5
max_area: 1.0
overlap_thresh: 0.25
clip_boxes: True
random_coef: 0.375
}
aspect_ratio: 0.875
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.ssd_random_crop_fixed_aspect_ratio)
self.assertEqual(args, {'min_object_covered': [0.0, 0.25],
'aspect_ratio': 0.875,
'area_range': [(0.5, 1.0), (0.5, 1.0)],
'overlap_thresh': [0.0, 0.25],
'clip_boxes': [False, True],
'random_coef': [0.375, 0.375]})
def test_build_ssd_random_crop_pad_fixed_aspect_ratio(self):
preprocessor_text_proto = """
ssd_random_crop_pad_fixed_aspect_ratio {
operations {
min_object_covered: 0.0
min_aspect_ratio: 0.875
max_aspect_ratio: 1.125
min_area: 0.5
max_area: 1.0
overlap_thresh: 0.0
clip_boxes: False
random_coef: 0.375
}
operations {
min_object_covered: 0.25
min_aspect_ratio: 0.75
max_aspect_ratio: 1.5
min_area: 0.5
max_area: 1.0
overlap_thresh: 0.25
clip_boxes: True
random_coef: 0.375
}
aspect_ratio: 0.875
min_padded_size_ratio: [1.0, 1.0]
max_padded_size_ratio: [2.0, 2.0]
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function,
preprocessor.ssd_random_crop_pad_fixed_aspect_ratio)
self.assertEqual(args, {'min_object_covered': [0.0, 0.25],
'aspect_ratio': 0.875,
'aspect_ratio_range': [(0.875, 1.125), (0.75, 1.5)],
'area_range': [(0.5, 1.0), (0.5, 1.0)],
'overlap_thresh': [0.0, 0.25],
'clip_boxes': [False, True],
'random_coef': [0.375, 0.375],
'min_padded_size_ratio': (1.0, 1.0),
'max_padded_size_ratio': (2.0, 2.0)})
def test_build_normalize_image_convert_class_logits_to_softmax(self):
preprocessor_text_proto = """
convert_class_logits_to_softmax {
temperature: 2
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.convert_class_logits_to_softmax)
self.assertEqual(args, {'temperature': 2})
if __name__ == '__main__':
tf.test.main()
|
JAX/Classification/ViT | ViT | README | # ViT on GPUs
Please refer to [Rosetta ViT](https://github.com/NVIDIA/JAX-Toolbox/tree/main/rosetta/rosetta/projects/vit), NVIDIA's project that enables seamless training of LLMs, CV models and multimodal models in JAX, for information about running Vision Transformer models and experiments on GPUs.
|
TensorFlow2/Recommendation/WideAndDeep | WideAndDeep | transcode | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
import os
import numpy as np
import pandas as pd
import pyarrow as pa
import pyarrow.parquet as pq
from data.feature_spec import FeatureSpec, FEATURES_SELECTOR, TYPE_SELECTOR, FILES_SELECTOR
from data.outbrain.defaults import MULTIHOT_CHANNEL, PARQUET_TYPE
def parse_args():
parser = ArgumentParser()
parser.add_argument('--input', type=str, default='',
help='Path to input data directory')
parser.add_argument('--feature_spec_in', type=str, default='feature_spec.yaml',
help='Name of the input feature specification file')
parser.add_argument('--output', type=str, default='/data',
help='Path to output data directory')
parser.add_argument('--feature_spec_out', type=str, default='feature_spec.yaml',
help='Name of the output feature specification file')
parser.add_argument('--chunk_size', type=int, default=65536,
help='Number of rows to write out per partition')
parser.add_argument('--minimum_partition_number', type=int, default=8,
help='throw error if each mapping does not produce at least this many partitions')
return parser.parse_args()
def check_only_one_file_per_chunk(feature_spec):
for mapping in feature_spec.source_spec.values():
for chunk in mapping:
chunk_files = chunk[FILES_SELECTOR]
assert len(chunk_files) == 1
assert chunk[TYPE_SELECTOR] == 'csv'
def main():
args = parse_args()
args_output = args.output
args_input = args.input
args_feature_spec_in = args.feature_spec_in
args_feature_spec_out = args.feature_spec_out
batch_size = args.chunk_size
fspec_in_path = os.path.join(args_input, args_feature_spec_in)
fspec_in = FeatureSpec.from_yaml(fspec_in_path)
os.makedirs(args.output, exist_ok=True)
paths_per_mapping = dict()
check_only_one_file_per_chunk(fspec_in)
for mapping_name, mapping in fspec_in.source_spec.items():
paths_per_mapping[mapping_name]=[]
df_iterators = []
for chunk in mapping:
# We checked earlier it's a single file chunk
path_to_load = os.path.join(fspec_in.base_directory, chunk[FILES_SELECTOR][0])
chunk_iterator = pd.read_csv(path_to_load, header=None, chunksize=batch_size, names=chunk[FEATURES_SELECTOR])
df_iterators.append(chunk_iterator)
zipped = zip(*df_iterators)
# writer = None
for chunk_id, chunks in enumerate(zipped):
# chunks is now a list of the chunk_id-th segment of each dataframe iterator and contains all columns
mapping_df = pd.concat(chunks, axis=1) # This takes care of making sure feature names are unique
#transform multihots from strings to objects # TODO: find a better way to do this
multihot_features = fspec_in.get_names_by_channel(MULTIHOT_CHANNEL)
for feature in multihot_features:
mapping_df[feature] = mapping_df[feature].apply(lambda x: np.fromstring(x[1:-1], sep=' ,'))
# prepare path
partition_path = f"{mapping_name}_{chunk_id}.parquet"
paths_per_mapping[mapping_name].append(partition_path)
partition_path_abs = os.path.join(args.output, partition_path)
#write to parquet
mapping_table = pa.Table.from_pandas(mapping_df)
pq.write_table(mapping_table, partition_path_abs)
# Prepare the new feature spec
new_source_spec = {}
old_source_spec = fspec_in.source_spec
for mapping_name in old_source_spec.keys():
#check if we met the required partitions number
min_partitions = args.minimum_partition_number
got_partitions = len(paths_per_mapping[mapping_name])
assert got_partitions>min_partitions, f"Not enough partitions generated for mapping:{mapping_name}. Expected at least {min_partitions}, got {got_partitions}"
all_features = []
for chunk in old_source_spec[mapping_name]:
all_features = all_features + chunk[FEATURES_SELECTOR]
new_source_spec[mapping_name] = []
new_source_spec[mapping_name].append({TYPE_SELECTOR: PARQUET_TYPE,
FEATURES_SELECTOR: all_features,
FILES_SELECTOR: paths_per_mapping[mapping_name]})
fspec_out = FeatureSpec(feature_spec=fspec_in.feature_spec, source_spec=new_source_spec,
channel_spec=fspec_in.channel_spec, metadata=fspec_in.metadata)
fspec_out.base_directory = args.output
feature_spec_save_path = os.path.join(args_output, args_feature_spec_out)
fspec_out.to_yaml(output_path=feature_spec_save_path)
if __name__ == '__main__':
main() |
TensorFlow2/LanguageModeling/BERT/official/nlp/modeling/networks | networks | __init__ | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Networks package definition."""
from official.nlp.modeling.networks.albert_transformer_encoder import AlbertTransformerEncoder
from official.nlp.modeling.networks.classification import Classification
from official.nlp.modeling.networks.encoder_scaffold import EncoderScaffold
from official.nlp.modeling.networks.masked_lm import MaskedLM
from official.nlp.modeling.networks.span_labeling import SpanLabeling
from official.nlp.modeling.networks.transformer_encoder import TransformerEncoder
|
TensorFlow/Translation | Translation | README | # Machine Translation
Machine Translation is the task of translation text from one language to another. Simply replacing one word with it's equivalent in another language rarely produces a semantically meaningful translation, because that may not account for the phrase-level meaning at all. A good machine translation system may require modeling whole sentences or phrases. Use of Neural Networks has allowed end-to-end architectures that can accomplish this, mapping from input text to the corresponding output text.A good model should be able to handle challenges like morphologically rich languages and very large vocalbularies well, while maintaining reasonable training and inference times. This Collection contains state-of-the-art models and containers that can help with the task of Machine Translation.
In this collection, we will cover:
- Challenges in Machine Translation
- Model architecture
- Where to get started
---
## Challenges in Machine Translation
Ages before, it was very time consuming to translate the text from an unfamiliar language. Adopting simple vocabularies with word-for-word translation was challenging for two purposes: 1) the user had to know the grammar rules, and 2) must keep in mind all language transcriptions while translating the whole sentence.
Presently, we don't need to struggle so much– we can translate phrases, sentences, and even large texts just by putting them in Google Translate.
If the Google Translator tried to keep the translations for even short sentences, it wouldn't work because of the massive number of possible variations. The most useful approach can be to train the machine sets of grammar rules and translate them accordingly. If only it were as easy as it sounds.
Suppose you have ever tried discovering a foreign language. In that case, you comprehend that there are always many exceptions to rules when we try to capture all these rules, limitations, and exceptions to the program's peculiarities, the quality of translation fragments down.
---
## Model architecture
i) Google’s Neural Machine Translation:
Sequence-to-Sequence (seq2seq) models are used for several Natural Language Processing (NLP) jobs, such as text summarization, speech recognition, and nucleotide sequence modeling. We aim to translate the provided sentences from one language to another.
Here, both the input and output are sentences. In another way, these sentences are a sequence of words proceeding in and out of the network. It is the fundamental purpose of Sequence-to-Sequence modeling. The figure underneath tries to demonstrate this technique.

Source - https://developer.nvidia.com/blog/introduction-neural-machine-translation-with-gpus/
The GNMT v2 model is related to the one addressed in [Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation](https://arxiv.org/abs/1609.08144) paper.
The most crucial difference between the two models is in the attention mechanism. In the version2 (v2) model, the decoder's output from the primary LSTM layer goes into the attention module. The re-weighted setting is then concatenated with inputs to all subsequent LSTM layers in the decoder at the present step.

ii) Transformer based Neural Machine Translation:
The Transformer model uses typical NMT encoder-decoder architecture. Unlike other NMT models, this method uses no repeated contacts and works on a rigid-sized context windowpane. The encoder stack is made up of N identical layers. The individual layer is composed of the subsequent sublayers: 1. Self-attention layer 2. Feedforward network (which is two fully-connected layers) Like the encoder stack, the decoder stack comprises N identical layers. Each layer is composed of the sublayers: 1. Self-attention, layer 2. Multi-headed attention layer merging encoder outputs with events from the previous self-attention layer. 3. Feedforward network (2 fully-connected layers)
The encoder uses self-attention to calculate a representation of the input sequence. The decoder generates the output sequence one token at a time, taking the encoder output and former decoder-outputted tickets as inputs. The model also applies embeddings on the input and output tokens and adds a fixed positional encoding. The positional encoding adds knowledge about the location of each token.

Source - [Attention is all you Need](https://arxiv.org/abs/1706.03762)
---
## Where to get started
NVIDIA provides Deep Learning Examples for Image Segmentation on its GitHub repository. These examples provide you with easy to consume and highly optimized scripts for both training and inferencing. The quick start guide at our GitHub repository will help you in setting up the environment using NGC Docker Images, download pre-trained models from NGC and adapt the model training and inference for your application/use-case.
Here are the examples relevant for image segmentation, directly from [Deep Learning Examples](https://github.com/NVIDIA/DeepLearningExamples):
1. Machine translation with GNMT using PyTorch
- [Git repository](https://github.com/NVIDIA/DeepLearningExamples/tree/master/TensorFlow/Translation/GNMT)
- Uses TensorFlow 20.06-tf1-py3 [NGC container](https://ngc.nvidia.com/registry/nvidia-tensorflow)
2. Machine translation with Transformers using PyTorch
- [Git repository](https://github.com/NVIDIA/DeepLearningExamples/tree/master/PyTorch/Translation/Transformer)
- Uses PyTorch 20.03-py3 [NGC container](https://ngc.nvidia.com/registry/nvidia-pytorch) |
PyTorch/SpeechRecognition/wav2vec2/common | common | features | # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import random
import librosa
import torch
import torch.nn as nn
class BaseFeatures(nn.Module):
"""Base class for GPU accelerated audio preprocessing."""
__constants__ = ["pad_align", "pad_to_max_duration", "max_len"]
def __init__(self, pad_align, pad_to_max_duration, max_duration,
sample_rate, window_size, window_stride, spec_augment=None,
cutout_augment=None):
super(BaseFeatures, self).__init__()
self.pad_align = pad_align
self.pad_to_max_duration = pad_to_max_duration
self.win_length = int(sample_rate * window_size) # frame size
self.hop_length = int(sample_rate * window_stride)
# Calculate maximum sequence length (# frames)
if pad_to_max_duration:
self.max_len = 1 + math.ceil(
(max_duration * sample_rate - self.win_length) / self.hop_length
)
if spec_augment is not None:
self.spec_augment = SpecAugment(**spec_augment)
else:
self.spec_augment = None
if cutout_augment is not None:
self.cutout_augment = CutoutAugment(**cutout_augment)
else:
self.cutout_augment = None
@torch.no_grad()
def calculate_features(self, audio, audio_lens):
return audio, audio_lens
def __call__(self, audio, audio_lens):
dtype = audio.dtype
audio = audio.float()
feat, feat_lens = self.calculate_features(audio, audio_lens)
feat = self.apply_padding(feat)
if self.cutout_augment is not None:
feat = self.cutout_augment(feat)
if self.spec_augment is not None:
feat = self.spec_augment(feat)
feat = feat.to(dtype)
return feat, feat_lens
def apply_padding(self, x):
if self.pad_to_max_duration:
x_size = max(x.size(-1), self.max_len)
else:
x_size = x.size(-1)
if self.pad_align > 0:
pad_amt = x_size % self.pad_align
else:
pad_amt = 0
padded_len = x_size + (self.pad_align - pad_amt if pad_amt > 0 else 0)
return nn.functional.pad(x, (0, padded_len - x.size(-1)))
class SpecAugment(nn.Module):
"""Spec augment. refer to https://arxiv.org/abs/1904.08779
"""
def __init__(self, freq_masks=0, min_freq=0, max_freq=10, time_masks=0,
min_time=0, max_time=10):
super(SpecAugment, self).__init__()
assert 0 <= min_freq <= max_freq
assert 0 <= min_time <= max_time
self.freq_masks = freq_masks
self.min_freq = min_freq
self.max_freq = max_freq
self.time_masks = time_masks
self.min_time = min_time
self.max_time = max_time
@torch.no_grad()
def forward(self, x):
sh = x.shape
mask = torch.zeros(x.shape, dtype=torch.bool, device=x.device)
for idx in range(sh[0]):
for _ in range(self.freq_masks):
w = torch.randint(self.min_freq, self.max_freq + 1, size=(1,)).item()
f0 = torch.randint(0, max(1, sh[1] - w), size=(1,))
mask[idx, f0:f0+w] = 1
for _ in range(self.time_masks):
w = torch.randint(self.min_time, self.max_time + 1, size=(1,)).item()
t0 = torch.randint(0, max(1, sh[2] - w), size=(1,))
mask[idx, :, t0:t0+w] = 1
return x.masked_fill(mask, 0)
class CutoutAugment(nn.Module):
"""Cutout. refer to https://arxiv.org/pdf/1708.04552.pdf
"""
def __init__(self, masks=0, min_freq=20, max_freq=20, min_time=5, max_time=5):
super(CutoutAugment, self).__init__()
assert 0 <= min_freq <= max_freq
assert 0 <= min_time <= max_time
self.masks = masks
self.min_freq = min_freq
self.max_freq = max_freq
self.min_time = min_time
self.max_time = max_time
@torch.no_grad()
def forward(self, x):
sh = x.shape
mask = torch.zeros(x.shape, dtype=torch.bool, device=x.device)
for idx in range(sh[0]):
for i in range(self.masks):
w = torch.randint(self.min_freq, self.max_freq + 1, size=(1,)).item()
h = torch.randint(self.min_time, self.max_time + 1, size=(1,)).item()
f0 = int(random.uniform(0, sh[1] - w))
t0 = int(random.uniform(0, sh[2] - h))
mask[idx, f0:f0+w, t0:t0+h] = 1
return x.masked_fill(mask, 0)
@torch.jit.script
def normalize_batch(x, seq_len, normalize_type: str):
if normalize_type == "per_feature":
x_mean = torch.zeros((seq_len.shape[0], x.shape[1]), dtype=x.dtype,
device=x.device)
x_std = torch.zeros((seq_len.shape[0], x.shape[1]), dtype=x.dtype,
device=x.device)
for i in range(x.shape[0]):
x_mean[i, :] = x[i, :, :seq_len[i]].mean(dim=1)
x_std[i, :] = x[i, :, :seq_len[i]].std(dim=1)
# make sure x_std is not zero
x_std += 1e-5
return (x - x_mean.unsqueeze(2)) / x_std.unsqueeze(2)
elif normalize_type == "all_features":
x_mean = torch.zeros(seq_len.shape, dtype=x.dtype, device=x.device)
x_std = torch.zeros(seq_len.shape, dtype=x.dtype, device=x.device)
for i in range(x.shape[0]):
x_mean[i] = x[i, :, :int(seq_len[i])].mean()
x_std[i] = x[i, :, :int(seq_len[i])].std()
# make sure x_std is not zero
x_std += 1e-5
return (x - x_mean.view(-1, 1, 1)) / x_std.view(-1, 1, 1)
else:
return x
@torch.jit.script
def stack_subsample_frames(x, x_lens, stacking: int = 1, subsampling: int = 1):
""" Stacks frames together across feature dim, and then subsamples
input is batch_size, feature_dim, num_frames
output is batch_size, feature_dim * stacking, num_frames / subsampling
"""
seq = [x]
for n in range(1, stacking):
tmp = torch.zeros_like(x)
tmp[:, :, :-n] = x[:, :, n:]
seq.append(tmp)
x = torch.cat(seq, dim=1)[:, :, ::subsampling]
if subsampling > 1:
x_lens = torch.ceil(x_lens.float() / subsampling).int()
if x.size(2) > x_lens.max().item():
assert abs(x.size(2) - x_lens.max().item()) <= 1
x = x[:,:,:x_lens.max().item()]
return x, x_lens
class FilterbankFeatures(BaseFeatures):
# For JIT, https://pytorch.org/docs/stable/jit.html#python-defined-constants
__constants__ = ["dither", "preemph", "n_fft", "hop_length", "win_length",
"log", "frame_stacking", "frame_subsampling", "normalize"]
# torchscript: "center" removed due to a bug
def __init__(self, spec_augment=None, cutout_augment=None,
sample_rate=16000, window_size=0.02, window_stride=0.01,
window="hann", normalize="per_feature", n_fft=512,
preemph=0.97, n_filt=80, lowfreq=0, highfreq=None, log=True,
dither=1e-5, pad_align=16, pad_to_max_duration=False,
max_duration=float('inf'), frame_stacking=1,
frame_subsampling=1):
super(FilterbankFeatures, self).__init__(
pad_align=pad_align, pad_to_max_duration=pad_to_max_duration,
max_duration=max_duration, sample_rate=sample_rate,
window_size=window_size, window_stride=window_stride,
spec_augment=spec_augment, cutout_augment=cutout_augment)
torch_windows = {
'hann': torch.hann_window,
'hamming': torch.hamming_window,
'blackman': torch.blackman_window,
'bartlett': torch.bartlett_window,
'none': None,
}
self.n_fft = n_fft or 2 ** math.ceil(math.log2(self.win_length))
self.normalize = normalize
self.log = log
#TORCHSCRIPT: Check whether or not we need this
self.dither = dither
self.frame_stacking = frame_stacking
self.frame_subsampling = frame_subsampling
self.n_filt = n_filt
self.preemph = preemph
highfreq = highfreq or sample_rate / 2
window_fn = torch_windows.get(window, None)
window_tensor = window_fn(self.win_length,
periodic=False) if window_fn else None
filterbanks = torch.tensor(
librosa.filters.mel(sample_rate, self.n_fft, n_mels=n_filt,
fmin=lowfreq, fmax=highfreq),
dtype=torch.float).unsqueeze(0)
# torchscript
self.register_buffer("fb", filterbanks)
self.register_buffer("window", window_tensor)
def output_dim(self):
return self.n_filt * self.frame_stacking
def get_seq_len(self, seq_len):
return torch.ceil(seq_len.to(dtype=torch.float) / self.hop_length).to(
dtype=torch.int)
# TORCHSCRIPT: center removed due to bug
def stft(self, x):
spec = torch.stft(x, n_fft=self.n_fft, hop_length=self.hop_length,
win_length=self.win_length,
window=self.window.to(dtype=torch.float),
return_complex=True)
return torch.view_as_real(spec)
@torch.no_grad()
def calculate_features(self, x, x_lens):
dtype = x.dtype
x_lens = self.get_seq_len(x_lens)
# dither
if self.dither > 0:
x += self.dither * torch.randn_like(x)
# do preemphasis
if self.preemph is not None:
x = torch.cat(
x[:, 0].unsqueeze(1), x[:, 1:] - self.preemph * x[:, :-1],
dim=1)
x = self.stft(x)
# get power spectrum
x = x.pow(2).sum(-1)
# dot with filterbank energies
x = torch.matmul(self.fb.to(x.dtype), x)
# log features if required
if self.log:
x = torch.log(x + 1e-20)
# normalize if required
x = normalize_batch(x, x_lens, normalize_type=self.normalize)
if self.frame_stacking > 1 or self.frame_subsampling > 1:
x, x_lens = stack_subsample_frames(x, x_lens, self.frame_stacking,
self.frame_subsampling)
# mask to zero any values beyond x_lens in batch,
# pad to multiple of `pad_align` (for efficiency)
max_len = x.size(-1)
mask = torch.arange(max_len, dtype=x_lens.dtype, device=x.device)
mask = mask.expand(x.size(0), max_len) >= x_lens.unsqueeze(1)
x = x.masked_fill(mask.unsqueeze(1), 0)
# TORCHSCRIPT: Is this del important? It breaks scripting
# del mask
return x.to(dtype), x_lens
|
TensorFlow/Classification/ConvNets/se-resnext101-32x4d | se-resnext101-32x4d | inference_benchmark | #!/bin/bash
DATA_DIR=${1:-"/data/tfrecords"}
DALI_DIR=${2}
BATCH_SIZE_TO_TEST="1 2 4 8 16 32 64 128"
INFERENCE_BENCHMARK=$(mktemp /tmp/inference-benchmark.XXXXXX)
function test_configuration() {
echo "Testing configuration: $1" | tee -a $INFERENCE_BENCHMARK
for BATCH in $BATCH_SIZE_TO_TEST; do
python ./main.py --arch=se-resnext101-32x4d --mode=inference_benchmark --warmup_steps 50 --num_iter 400 --iter_unit batch \
--batch_size $BATCH --data_dir=$DATA_DIR --results_dir=/tmp/results $2 | tail -n2 | head -n1 | sed \
's/^DLL \([0-9]*-\)*[0-9]* \([0-9]*:\)*[0-9]*.[0-9]* - ()/Results for BS='$BATCH'/' | tee -a $INFERENCE_BENCHMARK
if [ ! $? -eq 0 ]; then
echo "Failed test on batch size $BATCH_SIZE"
exit 1
fi
done
}
test_configuration "FP32 nodali noxla"
test_configuration "FP32 nodali xla" "--xla"
test_configuration "FP16 nodali noxla" "--amp"
test_configuration "FP16 nodali xla" "--amp --xla"
if [ ! -z $DALI_DIR ]; then
test_configuration "FP16 dali xla" "--amp --xla --dali --data_idx_dir ${DALI_DIR}"
fi
cat $INFERENCE_BENCHMARK
rm $INFERENCE_BENCHMARK
|
TensorFlow2/Detection/Efficientdet/efficientnet | efficientnet | common_modules | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Common modeling utilities."""
from __future__ import absolute_import
from __future__ import division
# from __future__ import google_type_annotations
from __future__ import print_function
import numpy as np
import math
import tensorflow as tf
from typing import Text, Optional
__all__ = ['count_params', 'load_weights', 'round_filters', 'round_repeats']
def count_params(model, trainable_only=True):
"""Returns the count of all model parameters, or just trainable ones."""
if not trainable_only:
return model.count_params()
else:
return int(np.sum([tf.keras.backend.count_params(p)
for p in model.trainable_weights]))
def load_weights(model: tf.keras.Model,
model_weights_path: Text,
weights_format: Text = 'saved_model'):
"""Load model weights from the given file path.
Args:
model: the model to load weights into
model_weights_path: the path of the model weights
weights_format: the model weights format. One of 'saved_model', 'h5',
or 'checkpoint'.
"""
if weights_format == 'saved_model':
loaded_model = tf.keras.models.load_model(model_weights_path)
model.set_weights(loaded_model.get_weights())
else:
model.load_weights(model_weights_path).expect_partial()
def round_filters(filters: int,
config: dict) -> int:
"""Round number of filters based on width coefficient."""
width_coefficient = config['width_coefficient']
min_depth = config['min_depth']
divisor = config['depth_divisor']
orig_filters = filters
if not width_coefficient:
return filters
filters *= width_coefficient
min_depth = min_depth or divisor
new_filters = max(min_depth, int(filters + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_filters < 0.9 * filters:
new_filters += divisor
return int(new_filters)
def round_repeats(repeats: int, depth_coefficient: float) -> int:
"""Round number of repeats based on depth coefficient."""
return int(math.ceil(depth_coefficient * repeats))
|
TensorFlow/Recommendation/WideAndDeep/trainer | trainer | task | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import dllogger
import horovod.tensorflow as hvd
import json
import numpy as np
import os
import tensorflow as tf
import tensorflow_transform as tft
from tensorflow.core.protobuf import rewriter_config_pb2
from trainer import features
from utils.dataloader import separate_input_fn
from utils.hooks.benchmark_hooks import BenchmarkLoggingHook
from utils.metrics import map_custom_metric, map_custom_metric_with_leak
from utils.schedulers import learning_rate_scheduler
MODEL_TYPES = ['wide', 'deep', 'wide_n_deep']
WIDE, DEEP, WIDE_N_DEEP = MODEL_TYPES
# Default train dataset size
TRAIN_DATASET_SIZE = 59761827
def create_parser():
"""Initialize command line parser using arparse.
Returns:
An argparse.ArgumentParser.
"""
parser = argparse.ArgumentParser()
parser.add_argument(
'--model_type',
help='Model type to train on',
choices=MODEL_TYPES,
default=WIDE_N_DEEP)
parser.add_argument(
'--train_data_pattern',
help='Pattern of training file names. For example if training files are train_000.tfrecord, \
train_001.tfrecord then --train_data_pattern is train_*',
type=str,
default='/outbrain/tfrecords/train/part*',
nargs='+')
parser.add_argument(
'--eval_data_pattern',
help='Pattern of eval file names. For example if eval files are eval_000.tfrecord, \
eval_001.tfrecord then --eval_data_pattern is eval_*',
type=str,
default='/outbrain/tfrecords/eval/part*',
nargs='+')
parser.add_argument(
'--model_dir',
help='Model Checkpoint will be saved here',
type=str,
default='/outbrain/checkpoints')
parser.add_argument(
'--transformed_metadata_path',
help='Path to transformed_metadata.',
type=str,
default='/outbrain/tfrecords')
parser.add_argument(
'--deep_hidden_units',
help='Hidden units per layer, separated by spaces',
default=[1024, 1024, 1024, 1024, 1024],
type=int,
nargs="+")
parser.add_argument(
'--prebatch_size',
help='Size of the pre-batches in the tfrecords',
default=4096,
type=int)
parser.add_argument(
'--global_batch_size',
help='Total training batch size',
default=131072,
type=int)
parser.add_argument(
'--eval_batch_size',
help='Evaluation batch size',
default=32768,
type=int)
parser.add_argument(
'--eval_steps',
help='Number of evaluation steps to perform',
default=8,
type=int)
parser.add_argument(
'--training_set_size',
help='Number of samples in the training set',
default=TRAIN_DATASET_SIZE,
type=int)
parser.add_argument(
'--num_epochs',
help='Number of epochs',
default=120,
type=int)
parser.add_argument(
'--eval_epoch_interval',
help='Perform evaluation during training after this many epochs',
default=2,
type=float)
parser.add_argument(
'--xla',
help='Enable XLA',
default=False,
action='store_true')
parser.add_argument(
'--gpu',
help='Run computations on the GPU',
default=False,
action='store_true')
parser.add_argument(
'--amp',
help='Attempt automatic mixed precision conversion',
default=False,
action='store_true')
parser.add_argument(
'--hvd',
help='Use Horovod',
action='store_true',
default=False)
parser.add_argument(
'--linear_l1_regularization',
help='L1 regularization for linear model',
type=float,
default=0.0)
parser.add_argument(
'--linear_l2_regularization',
help='L2 regularization for linear model',
type=float,
default=0.0)
parser.add_argument(
'--linear_learning_rate',
help='Learning rate for linear model',
type=float,
default=0.2)
parser.add_argument(
'--deep_learning_rate',
help='Learning rate for deep model',
type=float,
default=1.0)
parser.add_argument(
'--deep_dropout',
help='Dropout regularization for deep model',
type=float,
default=0.0)
parser.add_argument(
'--deep_warmup_epochs',
help='Number of epochs for deep LR warmup',
type=float,
default=0)
parser.add_argument(
'--log_device_placement',
help='Ask Tensorflow (via ConfigProto) to print device placement of nodes',
default=False,
action='store_true')
parser.add_argument(
'--predict',
help='Only perform a prediction on the validation dataset, don\'t train',
default=False,
action='store_true')
parser.add_argument(
'--evaluate',
help='Only perform an evaluation on the validation dataset, don\'t train',
default=False,
action='store_true')
parser.add_argument(
'--results_dir',
type=str,
help='Directory to store training results',
default='/results')
parser.add_argument(
'--log_filename',
type=str,
help='Name of the file to store dlloger output',
default='log.json')
parser.add_argument(
'--shuffle_percentage',
type=float,
default=0.0,
help='Size of the shuffle buffer from 0 to 1. \
1 means that the shuffle buffer size will be equal to the size of the entire batch.')
parser.add_argument(
'--print_display_ids',
help='Print the display ids processed by the input pipeline',
default=False,
action='store_true')
parser.add_argument(
'--reader_num_threads',
default=12,
type=int)
parser.add_argument(
'--parser_num_threads',
default=3,
type=int)
parser.add_argument(
'--prefetch_buffer_size',
default=1,
type=int)
parser.add_argument(
'--submission',
action='store_true',
default=False)
parser.add_argument(
'--benchmark',
help='Collect performance metrics during training',
action='store_true',
default=False)
parser.add_argument(
'--benchmark_warmup_steps',
help='Warmup before starg of benchmarking the training',
type=int,
default=50)
parser.add_argument(
'--benchmark_steps',
help='Number of steps for train performance benchmark',
type=int,
default=100)
return parser
def construct_estimator(model_type, run_config,
wide_columns, wide_optimizer,
deep_columns, deep_hidden_units, deep_dropout, deep_optimizer):
assert model_type in [WIDE, DEEP, WIDE_N_DEEP], 'Canned estimator only supports basic wide, deep, wnd'
if model_type == WIDE:
estimator = tf.estimator.LinearClassifier(
feature_columns=wide_columns,
config=run_config,
optimizer=wide_optimizer)
elif model_type == DEEP:
estimator = tf.estimator.DNNClassifier(
feature_columns=deep_columns,
hidden_units=deep_hidden_units,
dropout=deep_dropout,
config=run_config,
optimizer=deep_optimizer)
elif model_type == WIDE_N_DEEP:
estimator = tf.estimator.DNNLinearCombinedClassifier(
config=run_config,
linear_feature_columns=wide_columns,
linear_optimizer=wide_optimizer,
dnn_feature_columns=deep_columns,
dnn_optimizer=deep_optimizer,
dnn_hidden_units=deep_hidden_units,
dnn_dropout=deep_dropout,
linear_sparse_combiner='sum',
loss_reduction=tf.losses.Reduction.SUM_OVER_BATCH_SIZE)
return estimator
def main(FLAGS):
if FLAGS.hvd:
hvd.init()
if hvd.local_rank() == 0:
tf.logging.set_verbosity(tf.logging.INFO)
log_path = os.path.join(FLAGS.results_dir, FLAGS.log_filename)
os.makedirs(FLAGS.results_dir, exist_ok=True)
dllogger.init(backends=[
dllogger.JSONStreamBackend(verbosity=dllogger.Verbosity.VERBOSE,
filename=log_path),
dllogger.StdOutBackend(verbosity=dllogger.Verbosity.VERBOSE)])
else:
tf.logging.set_verbosity(tf.logging.ERROR)
dllogger.init(backends=[])
num_gpus = hvd.size()
else:
tf.logging.set_verbosity(tf.logging.INFO)
log_path = os.path.join(FLAGS.results_dir, FLAGS.log_filename)
os.makedirs(FLAGS.results_dir, exist_ok=True)
dllogger.init(backends=[
dllogger.JSONStreamBackend(verbosity=dllogger.Verbosity.VERBOSE,
filename=log_path),
dllogger.StdOutBackend(verbosity=dllogger.Verbosity.VERBOSE)])
num_gpus = 1
dllogger.log(data=vars(FLAGS), step='PARAMETER')
dllogger.metadata('train_throughput', {'unit': 'samples/s'})
dllogger.metadata('infer_throughput', {'unit': 'samples/s'})
dllogger.metadata('map', {'unit': None})
dllogger.metadata('map_infer', {'unit': None})
dllogger.metadata('map_with_leak', {'unit': None})
dllogger.metadata('map_with_leak_infer', {'unit': None})
local_batch_size = FLAGS.global_batch_size // num_gpus
create_batches = local_batch_size // FLAGS.prebatch_size
wide_columns, deep_columns = features.get_feature_columns()
tf_transform_output = tft.TFTransformOutput(FLAGS.transformed_metadata_path)
if not FLAGS.hvd or hvd.local_rank() == 0:
tf.compat.v1.logging.warn('command line arguments: {}'.format(json.dumps(vars(FLAGS))))
if not os.path.exists(FLAGS.results_dir):
os.mkdir(FLAGS.results_dir)
with open('{}/args.json'.format(FLAGS.results_dir), 'w') as f:
json.dump(vars(FLAGS), f, indent=4)
if FLAGS.gpu:
if FLAGS.amp:
rewrite_options = rewriter_config_pb2.RewriterConfig(auto_mixed_precision=True)
session_config = tf.compat.v1.ConfigProto(
graph_options=tf.compat.v1.GraphOptions(rewrite_options=rewrite_options),
log_device_placement=FLAGS.log_device_placement
)
else:
session_config = tf.compat.v1.ConfigProto(
log_device_placement=FLAGS.log_device_placement
)
else:
session_config = tf.compat.v1.ConfigProto(
device_count={'GPU': 0},
log_device_placement=FLAGS.log_device_placement
)
if FLAGS.hvd:
session_config.gpu_options.visible_device_list = str(hvd.local_rank())
if FLAGS.xla:
session_config.graph_options.optimizer_options.global_jit_level = tf.OptimizerOptions.ON_1
if FLAGS.benchmark:
model_dir = None
else:
model_dir = FLAGS.model_dir
steps_per_epoch = FLAGS.training_set_size / FLAGS.global_batch_size
print('Steps per epoch: {}'.format(steps_per_epoch))
max_steps = int(FLAGS.num_epochs * steps_per_epoch)
save_checkpoints_steps = FLAGS.benchmark_steps + 1 if FLAGS.benchmark else \
int(FLAGS.eval_epoch_interval * steps_per_epoch)
count_steps = FLAGS.benchmark_steps + 1 if FLAGS.benchmark else 100
run_config = tf.estimator.RunConfig(model_dir=model_dir) \
.replace(session_config=session_config,
save_checkpoints_steps=save_checkpoints_steps,
save_summary_steps=count_steps,
log_step_count_steps=count_steps,
keep_checkpoint_max=1)
def wide_optimizer():
opt = tf.compat.v1.train.FtrlOptimizer(
learning_rate=FLAGS.linear_learning_rate,
l1_regularization_strength=FLAGS.linear_l1_regularization,
l2_regularization_strength=FLAGS.linear_l2_regularization)
if FLAGS.hvd:
opt = hvd.DistributedOptimizer(opt)
if FLAGS.amp:
loss_scale = tf.train.experimental.DynamicLossScale()
opt = tf.compat.v1.train.experimental.MixedPrecisionLossScaleOptimizer(opt, loss_scale)
return opt
def deep_optimizer():
with tf.device("/cpu:0"):
learning_rate_fn = learning_rate_scheduler(
lr_init=FLAGS.deep_learning_rate,
warmup_steps=int(steps_per_epoch * FLAGS.deep_warmup_epochs),
global_step=tf.compat.v1.train.get_global_step()
)
opt = tf.compat.v1.train.AdagradOptimizer(
learning_rate=learning_rate_fn,
initial_accumulator_value=0.1,
use_locking=False)
if FLAGS.hvd:
opt = hvd.DistributedOptimizer(opt)
if FLAGS.amp:
loss_scale = tf.train.experimental.DynamicLossScale()
opt = tf.compat.v1.train.experimental.MixedPrecisionLossScaleOptimizer(opt, loss_scale)
return opt
# input functions to read data from disk
train_input_fn = lambda: separate_input_fn(
tf_transform_output,
FLAGS.train_data_pattern,
create_batches,
tf.estimator.ModeKeys.TRAIN,
reader_num_threads=FLAGS.reader_num_threads,
parser_num_threads=FLAGS.parser_num_threads,
shuffle_buffer_size=int(FLAGS.shuffle_percentage * create_batches),
prefetch_buffer_size=FLAGS.prefetch_buffer_size,
print_display_ids=FLAGS.print_display_ids)
eval_input_fn = lambda: separate_input_fn(
tf_transform_output,
FLAGS.eval_data_pattern,
(FLAGS.eval_batch_size // FLAGS.prebatch_size),
tf.estimator.ModeKeys.EVAL,
reader_num_threads=1,
parser_num_threads=1,
shuffle_buffer_size=int(FLAGS.shuffle_percentage * create_batches),
prefetch_buffer_size=FLAGS.prefetch_buffer_size,
print_display_ids=FLAGS.print_display_ids)
estimator = construct_estimator(FLAGS.model_type, run_config,
wide_columns, wide_optimizer,
deep_columns, FLAGS.deep_hidden_units, FLAGS.deep_dropout, deep_optimizer)
estimator = tf.estimator.add_metrics(estimator, map_custom_metric)
estimator = tf.estimator.add_metrics(estimator, map_custom_metric_with_leak)
hooks = []
if FLAGS.hvd:
hooks.append(hvd.BroadcastGlobalVariablesHook(0))
if FLAGS.predict or FLAGS.evaluate: # inference
if FLAGS.benchmark:
benchmark_hook = BenchmarkLoggingHook(global_batch_size=FLAGS.eval_batch_size,
warmup_steps=FLAGS.benchmark_warmup_steps)
hooks.append(benchmark_hook)
eval_steps = FLAGS.benchmark_steps
else:
eval_steps = FLAGS.eval_steps
predict_result_iter = estimator.predict(input_fn=eval_input_fn, hooks=hooks, yield_single_examples=False)
results = []
for i, r in enumerate(predict_result_iter):
print('predicting batch: ', i)
results.append(r)
# TODO: use eval_steps
if i >= eval_steps - 1:
break
if FLAGS.benchmark:
infer_throughput = benchmark_hook.mean_throughput.value()
if FLAGS.benchmark:
dllogger.log(data={'infer_throughput': infer_throughput}, step=tuple())
elif FLAGS.evaluate:
print('evaluating using estimator.evaluate with eval_batch_size = ',
FLAGS.eval_batch_size, ' and eval_steps = ', FLAGS.eval_steps)
result = estimator.evaluate(eval_input_fn, hooks=hooks, steps=FLAGS.eval_steps)
dllogger.log(step=(), data={'map_infer': float(result['map']),
'map_with_leak_infer': float(result['map_with_leak'])})
elif FLAGS.predict:
scores = [r['probabilities'][:, 1] for r in results]
scores = np.hstack(scores)
scores_path = os.path.join(FLAGS.model_dir, 'scores.txt')
print('saving the numpy scores array to: ', scores_path)
np.savetxt(scores_path, scores, fmt="%f", delimiter='\n')
else: # training
if FLAGS.benchmark:
benchmark_hook = BenchmarkLoggingHook(global_batch_size=FLAGS.global_batch_size,
warmup_steps=FLAGS.benchmark_warmup_steps)
hooks.append(benchmark_hook)
estimator.train(train_input_fn, hooks=hooks, steps=FLAGS.benchmark_steps)
train_throughput = benchmark_hook.mean_throughput.value()
dllogger.log(data={'train_throughput': train_throughput}, step=tuple())
else:
train_spec = tf.estimator.TrainSpec(input_fn=train_input_fn,
max_steps=max_steps,
hooks=hooks)
eval_spec = tf.estimator.EvalSpec(input_fn=eval_input_fn,
throttle_secs=0,
steps=FLAGS.eval_steps)
result = tf.estimator.train_and_evaluate(estimator=estimator,
train_spec=train_spec,
eval_spec=eval_spec)
if result != (None, None):
dllogger.log(step=(), data={'map': float(result[0]['map']),
'map_with_leak': float(result[0]['map_with_leak'])})
if __name__ == '__main__':
FLAGS = create_parser().parse_args()
main(FLAGS)
|
PyTorch/SpeechSynthesis/FastPitch/filelists | filelists | ljs_audio_pitch_text_val | wavs/LJ016-0288.wav|pitch/LJ016-0288.pt|"Müller, Müller, He's the man," till a diversion was created by the appearance of the gallows, which was received with continuous yells.
wavs/LJ028-0275.wav|pitch/LJ028-0275.pt|At last, in the twentieth month,
wavs/LJ019-0273.wav|pitch/LJ019-0273.pt|which Sir Joshua Jebb told the committee he considered the proper elements of penal discipline.
wavs/LJ021-0145.wav|pitch/LJ021-0145.pt|From those willing to join in establishing this hoped-for period of peace,
wavs/LJ009-0076.wav|pitch/LJ009-0076.pt|We come to the sermon.
wavs/LJ048-0194.wav|pitch/LJ048-0194.pt|during the morning of November twenty-two prior to the motorcade.
wavs/LJ049-0050.wav|pitch/LJ049-0050.pt|Hill had both feet on the car and was climbing aboard to assist President and Mrs. Kennedy.
wavs/LJ022-0023.wav|pitch/LJ022-0023.pt|The overwhelming majority of people in this country know how to sift the wheat from the chaff in what they hear and what they read.
wavs/LJ034-0053.wav|pitch/LJ034-0053.pt|reached the same conclusion as Latona that the prints found on the cartons were those of Lee Harvey Oswald.
wavs/LJ035-0129.wav|pitch/LJ035-0129.pt|and she must have run down the stairs ahead of Oswald and would probably have seen or heard him.
wavs/LJ039-0075.wav|pitch/LJ039-0075.pt|once you know that you must put the crosshairs on the target and that is all that is necessary.
wavs/LJ046-0184.wav|pitch/LJ046-0184.pt|but there is a system for the immediate notification of the Secret Service by the confining institution when a subject is released or escapes.
wavs/LJ003-0111.wav|pitch/LJ003-0111.pt|He was in consequence put out of the protection of their internal law, end quote. Their code was a subject of some curiosity.
wavs/LJ037-0234.wav|pitch/LJ037-0234.pt|Mrs. Mary Brock, the wife of a mechanic who worked at the station, was there at the time and she saw a white male,
wavs/LJ047-0044.wav|pitch/LJ047-0044.pt|Oswald was, however, willing to discuss his contacts with Soviet authorities. He denied having any involvement with Soviet intelligence agencies
wavs/LJ028-0081.wav|pitch/LJ028-0081.pt|Years later, when the archaeologists could readily distinguish the false from the true,
wavs/LJ012-0161.wav|pitch/LJ012-0161.pt|he was reported to have fallen away to a shadow.
wavs/LJ009-0114.wav|pitch/LJ009-0114.pt|Mr. Wakefield winds up his graphic but somewhat sensational account by describing another religious service, which may appropriately be inserted here.
wavs/LJ028-0335.wav|pitch/LJ028-0335.pt|accordingly they committed to him the command of their whole army, and put the keys of their city into his hands.
wavs/LJ005-0014.wav|pitch/LJ005-0014.pt|Speaking on a debate on prison matters, he declared that
wavs/LJ008-0294.wav|pitch/LJ008-0294.pt|nearly indefinitely deferred.
wavs/LJ028-0307.wav|pitch/LJ028-0307.pt|then let twenty days pass, and at the end of that time station near the Chaldasan gates a body of four thousand.
wavs/LJ046-0058.wav|pitch/LJ046-0058.pt|During his Presidency, Franklin D. Roosevelt made almost four hundred journeys and traveled more than three hundred fifty thousand miles.
wavs/LJ046-0146.wav|pitch/LJ046-0146.pt|The criteria in effect prior to November twenty-two, nineteen sixty-three, for determining whether to accept material for the PRS general files
wavs/LJ017-0131.wav|pitch/LJ017-0131.pt|even when the high sheriff had told him there was no possibility of a reprieve, and within a few hours of execution.
wavs/LJ002-0018.wav|pitch/LJ002-0018.pt|The inadequacy of the jail was noticed and reported upon again and again by the grand juries of the city of London,
wavs/LJ019-0257.wav|pitch/LJ019-0257.pt|Here the tread-wheel was in use, there cellular cranks, or hard-labor machines.
wavs/LJ034-0042.wav|pitch/LJ034-0042.pt|that he could only testify with certainty that the print was less than three days old.
wavs/LJ031-0070.wav|pitch/LJ031-0070.pt|Dr. Clark, who most closely observed the head wound,
wavs/LJ012-0035.wav|pitch/LJ012-0035.pt|the number and names on watches, were carefully removed or obliterated after the goods passed out of his hands.
wavs/LJ050-0168.wav|pitch/LJ050-0168.pt|with the particular purposes of the agency involved. The Commission recognizes that this is a controversial area
wavs/LJ036-0103.wav|pitch/LJ036-0103.pt|The police asked him whether he could pick out his passenger from the lineup.
wavs/LJ016-0318.wav|pitch/LJ016-0318.pt|Other officials, great lawyers, governors of prisons, and chaplains supported this view.
wavs/LJ034-0198.wav|pitch/LJ034-0198.pt|Euins, who was on the southwest corner of Elm and Houston Streets testified that he could not describe the man he saw in the window.
wavs/LJ049-0026.wav|pitch/LJ049-0026.pt|On occasion the Secret Service has been permitted to have an agent riding in the passenger compartment with the President.
wavs/LJ011-0096.wav|pitch/LJ011-0096.pt|He married a lady also belonging to the Society of Friends, who brought him a large fortune, which, and his own money, he put into a city firm,
wavs/LJ040-0002.wav|pitch/LJ040-0002.pt|Chapter seven. Lee Harvey Oswald: Background and Possible Motives, Part one.
wavs/LJ014-0030.wav|pitch/LJ014-0030.pt|These were damnatory facts which well supported the prosecution.
wavs/LJ043-0002.wav|pitch/LJ043-0002.pt|The Warren Commission Report. By The President's Commission on the Assassination of President Kennedy. Chapter seven. Lee Harvey Oswald:
wavs/LJ029-0022.wav|pitch/LJ029-0022.pt|The original plan called for the President to spend only one day in the State, making whirlwind visits to Dallas, Fort Worth, San Antonio, and Houston.
wavs/LJ014-0020.wav|pitch/LJ014-0020.pt|He was soon afterwards arrested on suspicion, and a search of his lodgings brought to light several garments saturated with blood;
wavs/LJ040-0027.wav|pitch/LJ040-0027.pt|He was never satisfied with anything.
wavs/LJ028-0093.wav|pitch/LJ028-0093.pt|but his scribe wrote it in the manner customary for the scribes of those days to write of their royal masters.
wavs/LJ004-0152.wav|pitch/LJ004-0152.pt|although at Mr. Buxton's visit a new jail was in process of erection, the first step towards reform since Howard's visitation in seventeen seventy-four.
wavs/LJ008-0111.wav|pitch/LJ008-0111.pt|They entered a "stone cold room," and were presently joined by the prisoner.
wavs/LJ017-0044.wav|pitch/LJ017-0044.pt|and the deepest anxiety was felt that the crime, if crime there had been, should be brought home to its perpetrator.
wavs/LJ033-0047.wav|pitch/LJ033-0047.pt|I noticed when I went out that the light was on, end quote,
wavs/LJ028-0008.wav|pitch/LJ028-0008.pt|you tap gently with your heel upon the shoulder of the dromedary to urge her on.
wavs/LJ016-0179.wav|pitch/LJ016-0179.pt|contracted with sheriffs and conveners to work by the job.
wavs/LJ005-0201.wav|pitch/LJ005-0201.pt|as is shown by the report of the Commissioners to inquire into the state of the municipal corporations in eighteen thirty-five.
wavs/LJ035-0019.wav|pitch/LJ035-0019.pt|drove to the northwest corner of Elm and Houston, and parked approximately ten feet from the traffic signal.
wavs/LJ031-0038.wav|pitch/LJ031-0038.pt|The first physician to see the President at Parkland Hospital was Dr. Charles J. Carrico, a resident in general surgery.
wavs/LJ017-0070.wav|pitch/LJ017-0070.pt|but his sporting operations did not prosper, and he became a needy man, always driven to desperate straits for cash.
wavs/LJ007-0154.wav|pitch/LJ007-0154.pt|These pungent and well-grounded strictures applied with still greater force to the unconvicted prisoner, the man who came to the prison innocent, and still uncontaminated,
wavs/LJ002-0043.wav|pitch/LJ002-0043.pt|long narrow rooms -- one thirty-six feet, six twenty-three feet, and the eighth eighteen,
wavs/LJ004-0096.wav|pitch/LJ004-0096.pt|the fatal consequences whereof might be prevented if the justices of the peace were duly authorized
wavs/LJ018-0081.wav|pitch/LJ018-0081.pt|his defense being that he had intended to commit suicide, but that, on the appearance of this officer who had wronged him,
wavs/LJ042-0129.wav|pitch/LJ042-0129.pt|No night clubs or bowling alleys, no places of recreation except the trade union dances. I have had enough.
wavs/LJ008-0278.wav|pitch/LJ008-0278.pt|or theirs might be one of many, and it might be considered necessary to "make an example."
wavs/LJ015-0203.wav|pitch/LJ015-0203.pt|but were the precautions too minute, the vigilance too close to be eluded or overcome?
wavs/LJ018-0239.wav|pitch/LJ018-0239.pt|His disappearance gave color and substance to evil reports already in circulation that the will and conveyance above referred to
wavs/LJ021-0066.wav|pitch/LJ021-0066.pt|together with a great increase in the payrolls, there has come a substantial rise in the total of industrial profits
wavs/LJ024-0083.wav|pitch/LJ024-0083.pt|This plan of mine is no attack on the Court;
wavs/LJ008-0258.wav|pitch/LJ008-0258.pt|Let me retrace my steps, and speak more in detail of the treatment of the condemned in those bloodthirsty and brutally indifferent days,
wavs/LJ038-0199.wav|pitch/LJ038-0199.pt|eleven. If I am alive and taken prisoner,
wavs/LJ045-0230.wav|pitch/LJ045-0230.pt|when he was finally apprehended in the Texas Theatre. Although it is not fully corroborated by others who were present,
wavs/LJ027-0141.wav|pitch/LJ027-0141.pt|is closely reproduced in the life-history of existing deer. Or, in other words,
wavs/LJ016-0020.wav|pitch/LJ016-0020.pt|He never reached the cistern, but fell back into the yard, injuring his legs severely.
wavs/LJ012-0250.wav|pitch/LJ012-0250.pt|On the seventh July, eighteen thirty-seven,
wavs/LJ001-0110.wav|pitch/LJ001-0110.pt|Even the Caslon type when enlarged shows great shortcomings in this respect:
wavs/LJ047-0148.wav|pitch/LJ047-0148.pt|On October twenty-five,
wavs/LJ031-0134.wav|pitch/LJ031-0134.pt|On one occasion Mrs. Johnson, accompanied by two Secret Service agents, left the room to see Mrs. Kennedy and Mrs. Connally.
wavs/LJ036-0174.wav|pitch/LJ036-0174.pt|This is the approximate time he entered the roominghouse, according to Earlene Roberts, the housekeeper there.
wavs/LJ026-0068.wav|pitch/LJ026-0068.pt|Energy enters the plant, to a small extent,
wavs/LJ034-0160.wav|pitch/LJ034-0160.pt|on Brennan's subsequent certain identification of Lee Harvey Oswald as the man he saw fire the rifle.
wavs/LJ013-0164.wav|pitch/LJ013-0164.pt|who came from his room ready dressed, a suspicious circumstance, as he was always late in the morning.
wavs/LJ014-0263.wav|pitch/LJ014-0263.pt|When other pleasures palled he took a theatre, and posed as a munificent patron of the dramatic art.
wavs/LJ005-0079.wav|pitch/LJ005-0079.pt|and improve the morals of the prisoners, and shall insure the proper measure of punishment to convicted offenders.
wavs/LJ048-0228.wav|pitch/LJ048-0228.pt|and others who were present say that no agent was inebriated or acted improperly.
wavs/LJ027-0052.wav|pitch/LJ027-0052.pt|These principles of homology are essential to a correct interpretation of the facts of morphology.
wavs/LJ004-0045.wav|pitch/LJ004-0045.pt|Mr. Sturges Bourne, Sir James Mackintosh, Sir James Scarlett, and William Wilberforce.
wavs/LJ012-0042.wav|pitch/LJ012-0042.pt|which he kept concealed in a hiding-place with a trap-door just under his bed.
wavs/LJ014-0110.wav|pitch/LJ014-0110.pt|At the first the boxes were impounded, opened, and found to contain many of O'Connor's effects.
wavs/LJ028-0506.wav|pitch/LJ028-0506.pt|A modern artist would have difficulty in doing such accurate work.
wavs/LJ014-0010.wav|pitch/LJ014-0010.pt|yet he could not overcome the strange fascination it had for him, and remained by the side of the corpse till the stretcher came.
wavs/LJ042-0096.wav|pitch/LJ042-0096.pt|(old exchange rate) in addition to his factory salary of approximately equal amount
wavs/LJ031-0202.wav|pitch/LJ031-0202.pt|Mrs. Kennedy chose the hospital in Bethesda for the autopsy because the President had served in the Navy.
wavs/LJ012-0235.wav|pitch/LJ012-0235.pt|While they were in a state of insensibility the murder was committed.
wavs/LJ019-0186.wav|pitch/LJ019-0186.pt|seeing that since the establishment of the Central Criminal Court, Newgate received prisoners for trial from several counties,
wavs/LJ018-0098.wav|pitch/LJ018-0098.pt|and recognized as one of the frequenters of the bogus law-stationers. His arrest led to that of others.
wavs/LJ036-0077.wav|pitch/LJ036-0077.pt|Roger D. Craig, a deputy sheriff of Dallas County,
wavs/LJ045-0140.wav|pitch/LJ045-0140.pt|The arguments he used to justify his use of the alias suggest that Oswald may have come to think that the whole world was becoming involved
wavs/LJ029-0032.wav|pitch/LJ029-0032.pt|According to O'Donnell, quote, we had a motorcade wherever we went, end quote.
wavs/LJ003-0345.wav|pitch/LJ003-0345.pt|All the committee could do in this respect was to throw the responsibility on others.
wavs/LJ008-0307.wav|pitch/LJ008-0307.pt|afterwards express a wish to murder the Recorder for having kept them so long in suspense.
wavs/LJ043-0030.wav|pitch/LJ043-0030.pt|If somebody did that to me, a lousy trick like that, to take my wife away, and all the furniture, I would be mad as hell, too.
wavs/LJ009-0238.wav|pitch/LJ009-0238.pt|After this the sheriffs sent for another rope, but the spectators interfered, and the man was carried back to jail.
wavs/LJ039-0223.wav|pitch/LJ039-0223.pt|Oswald's Marine training in marksmanship, his other rifle experience and his established familiarity with this particular weapon
wavs/LJ014-0076.wav|pitch/LJ014-0076.pt|He was seen afterwards smoking and talking with his hosts in their back parlor, and never seen again alive.
wavs/LJ016-0138.wav|pitch/LJ016-0138.pt|at a distance from the prison.
|
Tools/PyTorch/TimeSeriesPredictionPlatform/conf/hydra/callbacks | callbacks | merge_logs | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
merge_logs:
_target_: callbacks.hydra_callbacks.MergeLogs
|
TensorFlow2/LanguageModeling | LanguageModeling | README | # Language Modeling
Language modeling (LM) is a natural language processing (NLP) task that determines the probability of a given sequence of words occurring in a sentence.
In an era where computers, smartphones and other electronic devices increasingly need to interact with humans, language modeling has become an indispensable technique for teaching devices how to communicate in natural languages in human-like ways.
But how does language modeling work? And what can you build with it? What are the different approaches, what are its potential benefits and limitations, and how might you use it in your business?
In this guide, you’ll find answers to all of those questions and more. Whether you’re an experienced machine learning engineer considering implementation, a developer wanting to learn more, or a product manager looking to explore what’s possible with natural language processing and language modeling, this guide is for you.
Here’s a look at what we’ll cover:
- Language modeling – the basics
- How does language modeling work?
- Use cases and applications
- Getting started
## Language modeling – the basics
### What is language modeling?
"*Language modeling is the task of assigning a probability to sentences in a language. […]
Besides assigning a probability to each sequence of words, the language models also assign a
probability for the likelihood of a given word (or a sequence of words) to follow a sequence
of words.*" Source: Page 105, [Neural Network Methods in Natural Language Processing](http://amzn.to/2wt1nzv), 2017.
### Types of language models
There are primarily two types of Language Models:
- Statistical Language Models: These models use traditional statistical techniques like N-grams, Hidden Markov Models (HMM), and certain linguistic rules to learn the probability distribution of words.
- Neural Language Models: They use different kinds of Neural Networks to model language, and have surpassed the statistical language models in their effectiveness.
"*We provide ample empirical evidence to suggest that connectionist language models are
superior to standard n-gram techniques, except their high computational (training)
complexity.*" Source: [Recurrent neural network based language model](http://www.fit.vutbr.cz/research/groups/speech/publi/2010/mikolov_interspeech2010_IS100722.pdf), 2010.
Given the superior performance of neural language models, we include in the container two popular state-of-the-art neural language models: BERT and Transformer-XL.
### Why is language modeling important?
Language modeling is fundamental in modern NLP applications. It enables machines to understand qualitative information, and enables people to communicate with machines in the natural languages that humans use to communicate with each other.
Language modeling is used directly in a variety of industries, including tech, finance, healthcare, transportation, legal, military, government, and more -- actually, you probably have just interacted with a language model today, whether it be through Google search, engaging with a voice assistant, or using text autocomplete features.
## How does language modeling work?
The roots of modern language modeling can be traced back to 1948, when Claude Shannon
published a paper titled "A Mathematical Theory of Communication", laying the foundation for information theory and language modeling. In the paper, Shannon detailed the use of a stochastic model called the Markov chain to create a statistical model for the sequences of letters in English text. The Markov models, along with n-gram, are still among the most popular statistical language models today.
However, simple statistical language models have serious drawbacks in scalability and fluency because of its sparse representation of language. Overcoming the problem by representing language units (eg. words, characters) as a non-linear, distributed combination of weights in continuous space, neural language models can learn to approximate words without being misled by rare or unknown values.
Therefore, as mentioned above, we introduce two popular state-of-the-art neural language models, BERT and Transformer-XL, in Tensorflow and PyTorch. More details can be found in the [NVIDIA Deep Learning Examples Github Repository ](https://github.com/NVIDIA/DeepLearningExamples)
## Use cases and applications
### Speech Recognition
Imagine speaking a phrase to the phone, expecting it to convert the speech to text. How does
it know if you said "recognize speech" or "wreck a nice beach"? Language models help figure it out
based on the context, enabling machines to process and make sense of speech audio.
### Spelling Correction
Language-models-enabled spellcheckers can point to spelling errors and possibly suggest alternatives.
### Machine translation
Imagine you are translating the Chinese sentence "我在开车" into English. Your translation system gives you several choices:
- I at open car
- me at open car
- I at drive
- me at drive
- I am driving
- me am driving
A language model tells you which translation sounds the most natural.
## Getting started
NVIDIA provides examples for Language Modeling on [Deep Learning Examples Github Repository](https://github.com/NVIDIA/DeepLearningExamples). These examples provide you with easy to consume and highly optimized scripts for both training and inferencing. The quick start guide at our GitHub repository will help you in setting up the environment using NGC Docker Images, download pre-trained models from NGC and adapt the model training and inference for your application/use-case.
These models are tested and maintained by NVIDIA, leveraging mixed precision using tensor cores on our latest GPUs for faster training times while maintaining accuracy.
|
TensorFlow2/Recommendation/WideAndDeep/trainer/utils | utils | schedulers | # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
class LearningRateScheduler:
def __init__(self, args, steps_per_epoch, optimizer):
assert args.deep_warmup_epochs <= args.num_epochs, \
"Number of warmup epochs cannot be higher than training epochs"
self.base_lr = args.deep_learning_rate
self.warmup_steps = args.deep_warmup_epochs * steps_per_epoch
bound_epoch = args.deep_warmup_epochs + (args.num_epochs - args.deep_warmup_epochs) / 2
self.boundaries = [bound_epoch * steps_per_epoch]
self.values = [self.base_lr / 4, self.base_lr / 8]
self.optimizer = optimizer
@tf.function
def __call__(self, step):
if step < self.warmup_steps:
warmup_lr = self.base_lr * step / self.warmup_steps
self.optimizer.lr.assign(warmup_lr)
else:
index = tf.reduce_sum(tf.cast(step > self.boundaries, tf.int64))
value = tf.gather(self.values, index)
self.optimizer.lr.assign(value)
|
DGLPyTorch/DrugDiscovery/SE3Transformer/tests | tests | test_equivariance | # Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES
# SPDX-License-Identifier: MIT
import torch
from se3_transformer.model import SE3Transformer
from se3_transformer.model.fiber import Fiber
if __package__ is None or __package__ == '':
from utils import get_random_graph, assign_relative_pos, get_max_diff, rot
else:
from .utils import get_random_graph, assign_relative_pos, get_max_diff, rot
# Tolerances for equivariance error abs( f(x) @ R - f(x @ R) )
TOL = 1e-3
CHANNELS, NODES = 32, 512
def _get_outputs(model, R):
feats0 = torch.randn(NODES, CHANNELS, 1)
feats1 = torch.randn(NODES, CHANNELS, 3)
coords = torch.randn(NODES, 3)
graph = get_random_graph(NODES)
if torch.cuda.is_available():
feats0 = feats0.cuda()
feats1 = feats1.cuda()
R = R.cuda()
coords = coords.cuda()
graph = graph.to('cuda')
model.cuda()
graph1 = assign_relative_pos(graph, coords)
out1 = model(graph1, {'0': feats0, '1': feats1}, {})
graph2 = assign_relative_pos(graph, coords @ R)
out2 = model(graph2, {'0': feats0, '1': feats1 @ R}, {})
return out1, out2
def _get_model(**kwargs):
return SE3Transformer(
num_layers=4,
fiber_in=Fiber.create(2, CHANNELS),
fiber_hidden=Fiber.create(3, CHANNELS),
fiber_out=Fiber.create(2, CHANNELS),
fiber_edge=Fiber({}),
num_heads=8,
channels_div=2,
**kwargs
)
def test_equivariance():
model = _get_model()
R = rot(*torch.rand(3))
if torch.cuda.is_available():
R = R.cuda()
out1, out2 = _get_outputs(model, R)
assert torch.allclose(out2['0'], out1['0'], atol=TOL), \
f'type-0 features should be invariant {get_max_diff(out1["0"], out2["0"])}'
assert torch.allclose(out2['1'], (out1['1'] @ R), atol=TOL), \
f'type-1 features should be equivariant {get_max_diff(out1["1"] @ R, out2["1"])}'
def test_equivariance_pooled():
model = _get_model(pooling='avg', return_type=1)
R = rot(*torch.rand(3))
if torch.cuda.is_available():
R = R.cuda()
out1, out2 = _get_outputs(model, R)
assert torch.allclose(out2, (out1 @ R), atol=TOL), \
f'type-1 features should be equivariant {get_max_diff(out1 @ R, out2)}'
def test_invariance_pooled():
model = _get_model(pooling='avg', return_type=0)
R = rot(*torch.rand(3))
if torch.cuda.is_available():
R = R.cuda()
out1, out2 = _get_outputs(model, R)
assert torch.allclose(out2, out1, atol=TOL), \
f'type-0 features should be invariant {get_max_diff(out1, out2)}'
|
TensorFlow2/LanguageModeling/BERT | BERT | README | # BERT 1.0 for TensorFlow 2
This repository provides a script and recipe to train the BERT
model for TensorFlow 2 to achieve state-of-the-art accuracy and is tested and maintained by NVIDIA.
## Table of Contents
* [Model overview](#model-overview)
* [Model architecture](#model-architecture)
* [Default configuration](#default-configuration)
* [Feature support matrix](#feature-support-matrix)
* [Features](#features)
* [Mixed precision training](#mixed-precision-training)
* [Enabling mixed precision](#enabling-mixed-precision)
* [Enabling TF32](#enabling-tf32)
* [Glossary](#glossary)
* [Setup](#setup)
* [Requirements](#requirements)
* [Quick Start Guide](#quick-start-guide)
* [Advanced](#advanced)
* [Scripts and sample code](#scripts-and-sample-code)
* [Command-line options](#command-line-options)
* [Getting the data](#getting-the-data)
* [Dataset guidelines](#dataset-guidelines)
* [Multi-dataset](#multi-dataset)
* [Training process](#training-process)
* [Pre-training](#pre-training)
* [Fine tuning](#fine-tuning)
* [Multi-node](#multi-node)
* [Inference process](#inference-process)
* [Performance](#performance)
* [Benchmarking](#benchmarking)
* [Training performance benchmark](#training-performance-benchmark)
* [Inference performance benchmark](#inference-performance-benchmark)
* [Results](#results)
* [Training accuracy results](#training-accuracy-results)
* [Pre-training accuracy](#pre-training-accuracy)
* [Fine-tuning accuracy for SQuAD v1.1: NVIDIA DGX A100 (8x A100 40GB)](#fine-tuning-accuracy-for-squad-v1.1-nvidia-dgx-a100-(8x-a100-40gb))
* [Pre-training SQuAD v1.1 stability test: NVIDIA DGX A100 (256x A100 80GB)](#pre-training-squad-v1.1-stability-test-nvidia-dgx-a100-(256x-a100-80gb))
* [Fine-tuning SQuAD v1.1 stability test: NVIDIA DGX A100 (8x A100 80GB)](#fine-tuning-squad-v1.1-stability-test-nvidia-dgx-a100-(8x-a100-80gb))
* [Training performance results](#training-performance-results)
* [Pre-training training performance: Single-node on NVIDIA DGX-2 V100 (16x V100 32GB)](#pre-training-training-performance-single-node-on-nvidia-dgx-1-v100-(16x-v100-32gb))
* [Pre-training training performance: Multi-node on NVIDIA DGX-2H V100 (16x V100 32GB)](#pre-training-training-performance-multi-node-on-nvidia-dgx-2h-v100-(16x-v100-32gb))
* [Pre-training training performance: Single-node on NVIDIA DGX A100 (8x A100 80GB)](#pre-training-training-performance-single-node-on-nvidia-dgx-a100-(8x-a100-80gb))
* [Pre-training training performance: Multi-node on NVIDIA DGX A100 (8x A100 40GB)](#pre-training-training-performance-multi-node-on-nvidia-dgx-a100-(8x-a100-40gb))
* [Fine-tuning training performance for SQuAD v1.1 on NVIDIA DGX-1 V100 (8x V100 16GB)](#fine-tuning-training-performance-for-squad-v1.1-on-nvidia-dgx-1-v100-(8x-v100-16gb))
* [Fine-tuning training performance for SQuAD v1.1 on NVIDIA DGX-1 V100 (8x V100 32GB)](#fine-tuning-training-performance-for-squad-v1.1-on-nvidia-dgx-1-v100-(8x-v100-32gb))
* [Fine-tuning training performance for SQuAD v1.1 on NVIDIA DGX A100 (8x A100 80GB)](#fine-tuning-training-performance-for-squad-v1.1-on-nvidia-dgx-a100-(8x-a100-80gb))
* [Inference performance results](#inference-performance-results)
* [Fine-tuning inference performance for SQuAD v1.1 on NVIDIA DGX-1 V100 (1x V100 16GB)](#fine-tuning-inference-performance-for-squad-v1.1-on-nvidia-dgx-1-v100-(1x-v100-16gb))
* [Fine-tuning inference performance for SQuAD v1.1 on NVIIDA DGX-1 V100 (1x V100 32GB)](#fine-tuning-inference-performance-for-squad-v1.1-on-nvidia-dgx-1-v100-(1x-v100-32gb))
* [Fine-tuning inference performance for SQuAD v1.1 on NVIDIA DGX A100 (8x A100 80GB)](#fine-tuning-inference-performance-for-squad-v1.1-on-nvidia-dgx-a100-(1x-a100-80gb))
* [Fine-tuning inference performance for SQuAD v1.1 on NVIDIA Tesla T4 (1x T4 16GB)](#fine-tuning-inference-performance-for-squad-v1.1-on-nvidia-tesla-t4-(1x-t4-16gb))
* [Release Notes](#release-notes)
* [Changelog](#changelog)
* [Known issues](#known-issues)
## Model overview
BERT, or Bidirectional Encoder Representations from Transformers, is a new method of pre-training language representations which obtains state-of-the-art results on a wide array of Natural Language Processing (NLP) tasks. This model is based on the [BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding](https://arxiv.org/abs/1810.04805) paper. NVIDIA's BERT is an optimized version of [Google's official implementation](https://github.com/google-research/bert), leveraging mixed precision arithmetic and Tensor Cores on V100 GPUs for faster training times while maintaining target accuracy.
Other publicly available implementations of BERT include:
[NVIDIA PyTorch](https://github.com/NVIDIA/DeepLearningExamples/tree/master/PyTorch/LanguageModeling/BERT)
[Hugging Face](https://github.com/huggingface/pytorch-pretrained-BERT)
[codertimo](https://github.com/codertimo/BERT-pytorch)
[gluon-nlp](https://github.com/dmlc/gluon-nlp/tree/master/scripts/bert)
[Google's official implementation](https://github.com/google-research/bert)
This model is trained with mixed precision using Tensor Cores on Volta, Turing, and the NVIDIA Ampere GPU architectures. Therefore, researchers can get results up to 4x faster than training without Tensor Cores, while experiencing the benefits of mixed precision training. This model is tested against each NGC monthly container release to ensure consistent accuracy and performance over time.
### Model architecture
BERT's model architecture is a multi-layer bidirectional transformer encoder. Based on the model size, we have the following two default configurations of BERT:
| **Model** | **Hidden layers** | **Hidden unit size** | **Attention heads** | **Feedforward filter size** | **Max sequence length** | **Parameters** |
|:---------:|:----------:|:----:|:---:|:--------:|:---:|:----:|
|BERTBASE |12 encoder| 768| 12|4 x 768|512|110M|
|BERTLARGE|24 encoder|1024| 16|4 x 1024|512|330M|
BERT training consists of two steps, pre-training the language model in an unsupervised fashion on vast amounts of unannotated datasets, and then using this pre-trained model for fine-tuning for various NLP tasks, such as question and answer, sentence classification, or sentiment analysis. Fine-tuning typically adds an extra layer or two for the specific task and further trains the model using a task-specific annotated dataset, starting from the pre-trained backbone weights. The end-to-end process is depicted in the following image:

Figure 1: BERT Pipeline
### Default configuration
This repository contains scripts to interactively launch data download, training, benchmarking, and inference routines in a Docker container for fine tuning Question Answering. The major differences between the official implementation of the paper and our version of BERT are as follows:
- Mixed precision support with TensorFlow Automatic Mixed Precision (TF-AMP), which enables mixed precision training without any changes to the code-base by performing automatic graph rewrites and loss scaling controlled by an environmental variable.
- Scripts to download dataset and pretrained checkpoints for:
- Pre-training - [Wikipedia](https://dumps.wikimedia.org/), [BookCorpus](http://yknzhu.wixsite.com/mbweb)
- Fine tuning - [SQuAD](https://rajpurkar.github.io/SQuAD-explorer/) (Stanford Question Answering Dataset)
- Pretrained weights from Google
- Custom fused CUDA kernels for faster computations
- Multi-GPU/Multi-node support using Horovod
The following performance optimizations were implemented in this model:
- [XLA](https://www.tensorflow.org/xla) support (experimental).
These techniques and optimizations improve model performance and reduce training time, allowing you to perform various NLP tasks with no additional effort.
### Feature support matrix
The following features are supported by this model.
| **Feature** | **BERT** |
|:-----------------------:|:--------------------------:|
| Horovod Multi-GPU | Yes |
| Horovod Multi-node | Yes |
| Automatic mixed precision (AMP) | Yes |
| XLA | Yes |
| LAMB | Yes |
#### Features
**Multi-GPU training with Horovod**
Our model uses Horovod to implement efficient multi-GPU training with NCCL. For details, see example sources in this repository or see the [TensorFlow tutorial](https://github.com/horovod/horovod/#usage).
**XLA support**
XLA is a domain-specific compiler for linear algebra that can accelerate TensorFlow models with potentially no source code changes. The results are improvements in speed and memory usage: most internal benchmarks run ~1.1-1.5x faster after XLA is enabled.
**Multi-node Training**
Supported on a Pyxis/Enroot Slurm cluster.
**LAMB**
[LAMB](https://arxiv.org/pdf/1904.00962.pdf) stands for Layerwise Adaptive Moments based optimizer, is a large batch optimization technique that helps accelerate training of deep neural networks using large minibatches. It allows using a global batch size of 65536 and 32768 on sequence lengths 128 and 512 respectively, compared to a batch size of 256 for Adam. The optimized implementation accumulates 1024 gradient batches in phase 1 and 4096 steps in phase 2 before updating weights once. This results in 27% training speedup on a single DGX2 node. On multi-node systems, LAMB allows scaling up to 1024 GPUs resulting in training speedups of up to 17x in comparison to [Adam](https://arxiv.org/pdf/1412.6980.pdf). Adam has limitations on the learning rate that can be used since it is applied globally on all parameters whereas LAMB follows a layerwise learning rate strategy.
NVLAMB adds necessary tweaks to [LAMB version 1](https://arxiv.org/abs/1904.00962v1), to ensure correct convergence. A guide to implementating the LAMB optimizer can be found in our [article](https://medium.com/@NvidiaAI/a-guide-to-optimizer-implementation-for-bert-at-scale-8338cc7f45fd) on Medium.com. The algorithm is as follows:

### Mixed precision training
Mixed precision is the combined use of different numerical precision in a computational method. [Mixed precision](https://arxiv.org/abs/1710.03740) training offers significant computational speedup by performing operations in half-precision format, while storing minimal information in single-precision to retain as much information as possible in critical parts of the network. Since the introduction of [Tensor Cores](https://developer.nvidia.com/tensor-cores) in Volta, and following with both the Turing and Ampere architectures, significant training speedups are experienced by switching to mixed precision -- up to 3x overall speedup on the most arithmetically intense model architectures. Using mixed precision training requires two steps:
1. Porting the model to use the FP16 data type where appropriate.
2. Adding loss scaling to preserve small gradient values.
This can now be achieved using Automatic Mixed Precision (AMP) for TensorFlow to enable the full [mixed precision methodology](https://docs.nvidia.com/deeplearning/sdk/mixed-precision-training/index.html#tensorflow) in your existing TensorFlow model code. AMP enables mixed precision training on Volta, Turing, and NVIDIA Ampere GPU architectures automatically. The TensorFlow framework code makes all necessary model changes internally.
In TF-AMP, the computational graph is optimized to use as few casts as necessary and maximize the use of FP16, and the loss scaling is automatically applied inside of supported optimizers. AMP can be configured to work with the existing tf.contrib loss scaling manager by disabling the AMP scaling with a single environment variable to perform only the automatic mixed-precision optimization. It accomplishes this by automatically rewriting all computation graphs with the necessary operations to enable mixed precision training and automatic loss scaling.
For information about:
- How to train using mixed precision, see the [Mixed Precision Training](https://arxiv.org/abs/1710.03740) paper and [Training With Mixed Precision](https://docs.nvidia.com/deeplearning/performance/mixed-precision-training/index.html) documentation.
- Techniques used for mixed precision training, see the [Mixed Precision Training of Deep Neural Networks](https://devblogs.nvidia.com/mixed-precision-training-deep-neural-networks/) blog.
- How to access and enable AMP for TensorFlow, see [Using TF-AMP](https://docs.nvidia.com/deeplearning/dgx/tensorflow-user-guide/index.html#tfamp) from the TensorFlow User Guide.
#### Enabling mixed precision
This implementation exploits the TensorFlow Automatic Mixed Precision feature. To enable AMP, you simply need to supply the `--use_fp16` flag to the `run_pretraining.py` or `run_squad.py` script. For reference, enabling AMP required us to apply the following changes to the code:
1. Set the Keras mixed precision policy:
```python
if FLAGS.use_fp16:
policy = tf.keras.mixed_precision.experimental.Policy("mixed_float16")
tf.keras.mixed_precision.experimental.set_policy(policy)
```
2. Use the loss scaling wrapper on the optimizer:
```python
if FLAGS.use_fp16:
squad_model.optimizer = tf.keras.mixed_precision.LossScaleOptimizer(squad_model.optimizer,
dynamic=True)
```
3. Use scaled loss to calculate the gradients:
```python
if use_float16:
scaled_loss = optimizer.get_scaled_loss(loss)
scaled_grads = tape.gradient(scaled_loss, training_vars)
grads = optimizer.get_unscaled_gradients(scaled_grads)
```
#### Enabling TF32
TensorFloat-32 (TF32) is the new math mode in [NVIDIA A100](https://www.nvidia.com/en-us/data-center/a100/) GPUs for handling the matrix math also called tensor operations. TF32 running on Tensor Cores in A100 GPUs can provide up to 10x speedups compared to single-precision floating-point math (FP32) on Volta GPUs.
TF32 Tensor Cores can speed up networks using FP32, typically with no loss of accuracy. It is more robust than FP16 for models which require high dynamic range for weights or activations.
For more information, refer to the [TensorFloat-32 in the A100 GPU Accelerates AI Training, HPC up to 20x](https://blogs.nvidia.com/blog/2020/05/14/tensorfloat-32-precision-format/) blog post.
TF32 is supported in the NVIDIA Ampere GPU architecture and is enabled by default.
### Glossary
**Fine-tuning**
Training an already pretrained model further using a task specific dataset for subject-specific refinements, by adding task-specific layers on top if required.
**Language model**
Assigns a probability distribution over a sequence of words. Given a sequence of words, it assigns a probability to the whole sequence.
**Pre-training**
Training a model on vast amounts of data on the same (or different) task to build general understandings.
**Transformer**
The paper [Attention Is All You Need](https://arxiv.org/abs/1706.03762) introduces a novel architecture called Transformer that uses an attention mechanism and transforms one sequence into another.
## Setup
The following section lists the requirements that you need to meet in order to start training the BERT model.
### Requirements
This repository contains `Dockerfile` which extends the TensorFlow NGC container and encapsulates some dependencies. Aside from these dependencies, ensure you have the following components:
- [NVIDIA Docker](https://github.com/NVIDIA/nvidia-docker)
- [TensorFlow 21.02-py3+](https://ngc.nvidia.com/catalog/containers/nvidia:tensorflow) NGC container
- GPU-based architecture:
- [NVIDIA Volta](https://www.nvidia.com/en-us/data-center/volta-gpu-architecture/)
- [NVIDIA Turing](https://www.nvidia.com/en-us/geforce/turing/)
- [NVIDIA Ampere architecture](https://www.nvidia.com/en-us/data-center/nvidia-ampere-gpu-architecture/)
For more information about how to get started with NGC containers, see the following sections from the NVIDIA GPU Cloud Documentation and the Deep Learning Documentation:
- [Getting Started Using NVIDIA GPU Cloud](https://docs.nvidia.com/ngc/ngc-getting-started-guide/index.html)
- [Accessing And Pulling From The NGC Container Registry](https://docs.nvidia.com/deeplearning/frameworks/user-guide/index.html#accessing_registry)
- [Running TensorFlow](https://docs.nvidia.com/deeplearning/frameworks/tensorflow-release-notes/running.html#running)
For those unable to use the TensorFlow NGC container, to set up the required environment or create your own container, see the versioned [NVIDIA Container Support Matrix](https://docs.nvidia.com/deeplearning/frameworks/support-matrix/index.html).
For multi-node, the sample provided in this repository requires [Enroot](https://github.com/NVIDIA/enroot) and [Pyxis](https://github.com/NVIDIA/pyxis) set up on a [SLURM](https://slurm.schedmd.com) cluster.
More information on how to set up and launch can be found in the [Multi-node Documentation](https://docs.nvidia.com/ngc/multi-node-bert-user-guide).
## Quick Start Guide
To pretrain or fine tune your model for Question Answering using mixed precision with Tensor Cores or using FP32/TF32, perform the following steps using the default parameters of the BERT model.
1. Clone the repository.
```bash
git clone https://github.com/NVIDIA/DeepLearningExamples
cd DeepLearningExamples/TensorFlow2/LanguageModeling/BERT
```
2. Build the BERT TensorFlow NGC container.
```bash
bash scripts/docker/build.sh
```
3. Download and preprocess the dataset.
This repository provides scripts to download, verify, extract the SQuAD dataset and pretrained weights for fine tuning as well as Wikipedia and BookCorpus dataset for pre-training.
To download, verify, and extract the required datasets, run:
```bash
bash scripts/data_download.sh
```
The script launches a Docker container with the current directory mounted and downloads the datasets to a `data/` folder on the host.
- Download datasets for fine tuning and pretraining
`bash scripts/data_download.sh all`
- Download datasets for fine tuning only
`bash scripts/data_download.sh squad`
- Download Wikipedia only for pretraining
The pretraining dataset is 170GB+ and takes 15+ hours to download. The BookCorpus server most of the times get overloaded and also contains broken links resulting in HTTP 403 and 503 errors. Hence, it is recommended to skip downloading BookCorpus data by running:
`bash scripts/data_download.sh pretrained wiki_only`
Note: Wikipedia dataset is 17GB and takes about one hour to download.
- Download Wikipedia and BookCorpus
Users can download BookCorpus from other sources to match our accuracy, or repeatedly try our script until the required number of files are downloaded by running the following:
`bash scripts/data_download.sh pretrained wiki_books`
Note: Not using BookCorpus can potentially change final accuracy on a few downstream tasks.
4. Download the pretrained models from NGC.
We have uploaded checkpoints for fine tuning with various configurations on the NGC Model Registry. You can download them directly from the [NGC model catalog](https://ngc.nvidia.com/catalog/models). Download them to the `results/models/` to easily access them in your scripts.
5. Start an interactive session in the NGC container to run training/inference.
After you build the container image and download the data, you can start an interactive CLI session as follows:
```bash
bash scripts/docker/launch.sh
```
6. Start pre-training.
BERT is designed to pre-train deep bidirectional representations for language representations. The following scripts are to replicate pre-training on Wikipedia and BookCorpus from the [LAMB paper](https://arxiv.org/pdf/1904.00962.pdf). These scripts are general and can be used for pre-training language representations on any corpus of choice.
From within the container, you can use the following script to run pre-training using LAMB.
```bash
bash scripts/run_pretraining_lamb.sh <train_batch_size_phase1> <train_batch_size_phase2> <eval_batch_size> <learning_rate_phase1> <learning_rate_phase2> <precision> <use_xla> <num_gpus> <warmup_steps_phase1> <warmup_steps_phase2> <train_steps> <save_checkpoint_steps> <num_accumulation_phase1> <num_accumulation_steps_phase2> <bert_model>
```
For BERT-Large FP16 training with XLA using a DGX-2H, run:
```bash
bash scripts/run_pretraining_lamb.sh 60 10 8 7.5e-4 5e-4 fp16 true 8 2000 200 7820 100 64 192 large
```
This repository also contains a number of predefined configurations to run the LAMB pretraining on NVIDIA DGX-1, NVIDIA DGX-2H or NVIDIA DGX A100 nodes in `scripts/configs/pretrain_config.sh`. For example, to use the default DGX A100 8 GPU config, run:
```bash
bash scripts/run_pretraining_lamb.sh $(source scripts/configs/pretrain_config.sh && dgxa100_8gpu_fp16)
```
Alternatively, to run pre-training with Adam as in the original [BERT paper](https://arxiv.org/pdf/1810.04805.pdf) from within the container, run:
```bash
bash scripts/run_pretraining_adam.sh <train_batch_size_per_gpu> <eval_batch_size> <learning_rate_per_gpu> <precision> <use_xla> <num_gpus> <warmup_steps> <train_steps> <save_checkpoint_steps>
```
7. Start fine tuning.
The above pretrained BERT representations can be fine tuned with just one additional output layer for a state-of-the-art Question Answering system. From within the container, you can use the following script to run fine-training for SQuAD.
```bash
bash scripts/run_squad.sh <num_gpus> <batch_size_per_gpu> <learning_rate_per_gpu> <precision> <use_xla> <bert_model> <squad_version> <epochs>
```
For SQuAD 1.1 FP16 training with XLA using a DGX-1 V100 32G, run:
```bash
bash scripts/run_squad.sh 8 12 5e-6 fp16 true large 1.1 2
```
For SQuAD 2.0 FP32 training without XLA using a DGX-1 V100 32G, run:
```bash
bash scripts/run_squad.sh 8 8 5e-6 fp32 false large 2.0 2
```
The fine-tuned checkpoint will save to `/results/tf_bert_finetuning_squad_xxxxxx/ctl_step_xxx.ckpt-x`
8. Start validation/evaluation.
The `run_squad_inference.sh` script runs inference on a checkpoint fine tuned for SQuAD and evaluates the validity of predictions on the basis of exact match and F1 score.
```bash
bash scripts/run_squad_inference.sh <init_checkpoint> <batch_size> <precision> <use_xla> <bert_model> <squad_version>
```
The `init_checkpoint` is the fine-tuned checkpoint path. For example, we take the checkpoint from previous step `/results/tf_bert_finetuning_squad_xxxxxx/ctl_step_xxx.ckpt-x` and rename to `/results/model.ckpt`. SQuAD 1.1 and SQuAD 2.0 should be different checkpoints.
For SQuAD 2.0 FP16 inference with XLA using a DGX-1 V100 32G, run:
```bash
bash scripts/run_squad_inference.sh /results/model.ckpt 8 fp16 true large 2.0
```
For SQuAD 1.1 FP32 inference without XLA using a DGX-1 V100 32G, run:
```bash
bash scripts/run_squad_inference.sh /results/model.ckpt 8 fp32 false large 1.1
```
## Advanced
The following sections provide greater details of the dataset, running training and inference, and the training results.
### Scripts and sample code
In the root directory, the most important files are:
* `run_pretraining.py` - Serves as entry point for pre-training
* `run_squad.py` - Serves as entry point for SQuAD training
* `Dockerfile` - Container with the basic set of dependencies to run BERT
The `scripts/` folder encapsulates all the one-click scripts required for running various functionalities supported such as:
* `run_squad.sh` - Runs SQuAD training and inference using `run_squad.py` file
* `run_pretraining_adam.sh` - Runs pre-training with Adam optimizer using the `run_pretraining.py` file
* `run_pretraining_lamb.sh` - Runs pre-training with LAMB optimizer using the `run_pretraining.py` file in two phases. Phase 1 does 90% of training with sequence length = 128. In phase 2, the remaining 10% of the training is done with sequence length = 512.
* `data_download.sh` - Downloads datasets using files in the `data/` folder
* `finetune_train_benchmark.sh` - Captures performance metrics of training for multiple configurations
* `finetune_inference_benchmark.sh` - Captures performance metrics of inference for multiple configurations
The `data/` folder contains necessary folders and scripts to download datasets required for fine tuning and pre-training BERT.
After downloading the data, the `launch.sh` script assumes that the datasets are in the following locations by default
- SQuAD v1.1 - `data/download/squad/v1.1`
- SQuAD v2.0 - `data/download/squad/v2.0`
- BERT-Large - `data/download/google_pretrained_weights/uncased_L-24_H-1024_A-16`
- BERT-Base - `data/download/google_pretrained_weights/uncased_L-12_H-768_A-12`
- Wikipedia + BookCorpus TFRecords - `data/tfrecords/books_wiki_en_corpus`
The `official/` folder contains necessary files of building model architecture and training process.
### Parameters
Aside from the options to set hyperparameters, the relevant options to control the behaviour of the `run_pretraining.py` script are:
```
--bert_config_file: Bert configuration file to define core bert layers.
--init_checkpoint: Initial checkpoint (usually from a pre-trained BERT model).
--[no]use_horovod: Whether to use horovod.(default: 'false')
--[no]use_fp16: Whether to use fp32 or fp16 arithmetic on GPU. When false, uses TF32 on A100 and FP32 on V100 GPUS.(default: 'false')
--[no]enable_xla: Whether to enable XLA auto jit compilation.(default: 'false')
--input_files: File path to retrieve training data for pre-training.
--model_dir: The location of the model checkpoint files.
--optimizer_type: Optimizer used for training - LAMB or ADAM
--num_accumulation_steps: Number of accumulation steps before gradient update. Global batch size = num_accumulation_steps * train_batch_size
```
Aside from the options to set hyperparameters, some relevant options to control the behaviour of the `run_squad.py` script are:
```
--bert_config_file: Bert configuration file to define core bert layers.
--model_dir: The location of the model checkpoint files.
--mode: <train_and_predict|train|predict|export_only>: One of {"train_and_predict", "train", "predict", "export_only"}. `train_and_predict`: both train and predict to a json file. `train`: only trains the model. trains the model and evaluates in the meantime. `predict`: predict answers from the squad json file. `export_only`: will take the latest checkpoint inside model_dir and export a `SavedModel`.
--max_answer_length: The maximum length of an answer that can be generated. (default: '30')(an integer)
--input_meta_data_path: Path to file that contains meta data about input to be used for training and evaluation.
--predict_batch_size: Total batch size for predictions.(default: '8')(an integer)
--train_batch_size: Total batch size for training.(default: '8')(an integer)
--[no]use_fp16: Whether to use fp32 or fp16 arithmetic on GPU. When false, uses TF32 on A100 and FP32 on V100 GPUS.(default: 'false')
--[no]enable_xla: Whether to enable XLA auto jit compilation.(default: 'false')
```
### Command-line options
To see the full list of available options and their descriptions, use the `-h` or `--helpfull` command-line option with the Python file, for example:
```bash
python run_pretraining.py --helpfull
python run_squad.py --helpfull
```
### Getting the data
For pre-training BERT, we use the concatenation of Wikipedia (2500M words) as well as BookCorpus (800M words). For Wikipedia, we extract only the text passages from [here](https://dumps.wikimedia.your.org/enwiki/latest/enwiki-latest-pages-articles.xml.bz2) and ignore headers list and tables. It is structured as a document level corpus rather than a shuffled sentence level corpus because it is critical to extract long contiguous sentences.
The next step is to run `create_pretraining_data.py` with the document level corpus as input, which generates input data and labels for the masked language modeling and next sentence prediction tasks. Pre-training can also be performed on any corpus of your choice. The collection of data generation scripts are intended to be modular to allow modifications for additional preprocessing steps or to use additional data. They can hence easily be modified for an arbitrary corpus.
The preparation of an individual pre-training dataset is described in the `create_datasets_from_start.sh` script found in the `data/` folder. The component steps to prepare the datasets are as follows:
1. Data download and extract - the dataset is downloaded and extracted.
2. Clean and format - document tags, etc. are removed from the dataset. The end result of this step is a `{dataset_name_one_article_per_line}.txt` file that contains the entire corpus. Each line in the text file contains an entire document from the corpus. One file per dataset is created in the `formatted_one_article_per_line` folder.
3. Sharding - the sentence segmented corpus file is split into a number of smaller text documents. The sharding is configured so that a document will not be split between two shards. Sentence segmentation is performed at this time using Natural Language Toolkit (NLTK).
4. TFRecord file creation - each text file shard is processed by the `create_pretraining_data.py` script to produce a corresponding TFRecord file. The script generates input data and labels for masked language modeling and sentence prediction tasks for the input text shard.
For fine tuning BERT, for the task of Question Answering, we use SQuAD. SQuAD v1.1 has 100,000+ question-answer pairs on 500+ articles. SQuAD v2.0 combines v1.1 with an additional 50,000 new unanswerable questions and must not only answer questions but also determine when that is not possible.
#### Dataset guidelines
The procedure to prepare a text corpus for pre-training is described in the previous section. This section provides additional insight into how exactly raw text is processed so that it is ready for pre-training.
First, raw text is tokenized using [WordPiece tokenization](https://arxiv.org/pdf/1609.08144.pdf). A [CLS] token is inserted at the start of every sequence, and the two sentences in the sequence are separated by a [SEP] token.
Note: BERT pre-training looks at pairs of sentences at a time. A sentence embedding token [A] is added to the first sentence and token [B] to the next.
BERT pre-training optimizes for two unsupervised classification tasks. The first is Masked Language Modelling (Masked LM). One training instance of Masked LM is a single modified sentence. Each token in the sentence has a 15% chance of being replaced by a [MASK] token. The chosen token is replaced with [MASK] 80% of the time, 10% with another random token, and the remaining 10% with the same token. The task is then to predict the original token.
The second task is next sentence prediction. One training instance of BERT pre-training is two sentences (a sentence pair). A sentence pair may be constructed by simply taking two adjacent sentences from a single document, or by pairing up two random sentences with equal probability. The goal of this task is to predict whether or not the second sentence followed the first in the original document.
The `create_pretraining_data.py` script takes in raw text and creates training instances for both pre-training tasks.
#### Multi-dataset
We are able to combine multiple datasets into a single dataset for pre-training on a diverse text corpus. Once TFRecords have been created for each component dataset, you can create a combined dataset by adding the directory to `SOURCES` in `run_pretraining_*.sh`. This will feed all matching files to the input pipeline in `run_pretraining.py`. However, in the training process, only one TFRecord file is consumed at a time, therefore, the training instances of any given training batch will all belong to the same source dataset.
### Training process
#### Pre-training
Pre-training is performed using the `run_pretraining.py` script along with parameters defined in the `scripts/run_pretraining_lamb.sh`.
The `run_pretraining_lamb.sh` script runs a job on a single node that trains the BERT-Large model from scratch using the Wikipedia and BookCorpus datasets as training data. By default, the training script:
- Runs on 8 GPUs.
- Has FP16 precision enabled.
- Is XLA enabled.
- Creates a log file containing all the output.
- Saves a checkpoint every 100 iterations (keeps only the latest checkpoint) at the end of training. All checkpoints, evaluation results, and training logs are saved to the `/results` directory (in the container which can be mounted to a local directory).
- Evaluates the model at the end of each phase.
- Phase 1
- Runs 7506 steps with 2133 warmup steps
- Sets Maximum sequence length to 128
- Sets Global Batch size to 61K
- Phase 2
- Runs 1668 steps with 213 warm-up steps
- Sets Maximum sequence length to 512
- Sets Global Batch size to 30K
- Starts from Phase1's final checkpoint
These parameters train Wikipedia and BookCorpus with reasonable accuracy on a DGX-1 with 32GB V100 cards.
For example:
```bash
scripts/run_pretraining_lamb.sh <train_batch_size_phase1> <train_batch_size_phase2> <eval_batch_size> <learning_rate_phase1> <learning_rate_phase2> <precision> <use_xla> <num_gpus> <warmup_steps_phase1> <warmup_steps_phase2> <train_steps> <save_checkpoint_steps> <num_accumulation_phase1> <num_accumulation_steps_phase2> <bert_model>
```
Where:
- `<training_batch_size_phase*>` is per-GPU batch size used for training in the respective phase. Batch size varies with precision, larger batch sizes run more efficiently, but require more memory.
- `<eval_batch_size>` is per-GPU batch size used for evaluation after training.
- `<learning_rate_phase1>` is the default rate of 5e-4 is good for global batch size 61K.
- `<learning_rate_phase2>` is the default rate of 7.5e-4 is good for global batch size 30K.
- `<precision>` is the type of math in your model, can be either `fp32` or `fp16`. Specifically:
- `fp32` is 32-bit IEEE single precision floats. Is enabled by default on V100.
- `fp16` is Automatic rewrite of TensorFlow compute graph to take advantage of 16-bit arithmetic whenever it is safe.
- `tf32` uses same 10 bit mantissa as FP16 and 8-bit exponent as FP32. Is enabled by default on A100.
- `<num_gpus>` is the number of GPUs to use for training. Must be equal to or smaller than the number of GPUs attached to your node.
- `<warmup_steps_phase*>` is the number of warm-up steps at the start of training in the respective phase.
- `<training_steps>` is the total number of training steps in both phases combined.
- `<save_checkpoint_steps>` controls how often checkpoints are saved. Default is 100 steps.
- `<num_accumulation_phase*>` is used to mimic higher batch sizes in the respective phase by accumulating gradients N times before weight update.
- `<bert_model>` is used to indicate whether to pretrain BERT-Large or BERT-Base model
The following sample code trains BERT-Large from scratch on a single DGX-2 using FP16 arithmetic. This will take around 4.5 days.
```bash
scripts/run_pretraining_lamb.sh 60 10 8 7.5e-4 5e-4 fp16 true 8 2000 200 7820 100 64 192 large
```
#### Fine tuning
Fine tuning is performed using the `run_squad.py` script along with parameters defined in `scripts/run_squad.sh`.
The `run_squad.sh` script trains a model and performs evaluation on the SQuAD dataset. By default, the training script:
- Trains for SQuAD v1.1 dataset.
- Trains on BERT-Large model.
- Uses 8 GPUs and batch size of 12 on each GPU.
- Has FP16 precision enabled.
- Is XLA enabled.
- Runs for 2 epochs.
- Saves a checkpoint every 180 seconds (keeps only the latest checkpoint) at the end of training. All checkpoints, evaluation results, and training logs are saved to the `/results` directory (in the container which can be mounted to a local directory).
- Evaluation is done at the end of training. To skip evaluation, modify `--mode` from `train_and_predict` to `train`.
This script outputs checkpoints to the `/results` directory, by default, inside the container. Mount point of `/results` can be changed in the `scripts/docker/launch.sh` file. The training log contains information about:
- Loss for the final step
- Training and evaluation performance
- F1 and exact match score on the Dev Set of SQuAD after evaluation.
The summary after training is printed in the following format:
```bash
I0415 18:12:49.376930 140671213582144 model_training_utils.py:82] Training Summary:
{'total_training_steps': 1846, 'train_loss': 0.6074678301811218}
I0415 18:12:49.377982 140671213582144 model_training_utils.py:564] -----------------------------
I0415 18:12:49.377468 140671213582144 model_training_utils.py:558] Batch size = 12
...
I0415 18:12:49.379069 140671213582144 model_training_utils.py:568] -----------------------------
```
Multi-GPU training is enabled with the Horovod TensorFlow module. The following example runs training on 8 GPUs:
```bash
BERT_DIR=data/download/google_pretrained_weights/uncased_L-24_H-1024_A-16
mpirun -np 8 \
--allow-run-as-root -bind-to none -map-by slot \
-x NCCL_DEBUG=INFO \
-x LD_LIBRARY_PATH \
-x PATH -mca pml ob1 -mca btl ^openib \
python run_squad.py --use_horovod --vocab_file=$BERT_DIR/vocab.txt \
--bert_config_file=$BERT_DIR/bert_config.json \
--model_dir=/results
```
#### Multi-node
Multi-node runs can be launched on a pyxis/enroot Slurm cluster (see [Requirements](#requirements)) with the `run.sub` script with the following command for a 4-node DGX-1 example for both phase 1 and phase 2:
```
BATCHSIZE=16 LEARNING_RATE='1.875e-4' NUM_ACCUMULATION_STEPS=128 PHASE=1 sbatch -N4 --ntasks-per-node=8 run.sub
BATCHSIZE=2 LEARNING_RATE='1.25e-4' NUM_ACCUMULATION_STEPS=512 PHASE=1 sbatch -N4 --ntasks-per-node=8 run.sub
```
Checkpoint after phase 1 will be saved in `model_dir` specified in `run.sub`. The checkpoint will be automatically picked up to resume training on phase 2. Note that phase 2 should be run after phase 1.
Variables to re-run the [Training performance results](#training-performance-results) are available in the `scripts/configs/configurations.yml` file.
The batch variables `BATCHSIZE`, `LEARNING_RATE`, `NUM_ACCUMULATION_STEPS` refer to the Python arguments `train_batch_size`, `learning_rate`, `num_accumulation_steps` respectively.
The variable `PHASE` refers to phase specific arguments available in `run.sub`.
Note that the `run.sub` script is a starting point that has to be adapted depending on the environment. In particular, variables such as `datadir` handle the location of the files for each phase.
Refer to the files contents to see the full list of variables to adjust for your system.
### Inference process
Inference on a fine tuned Question Answering system is performed using the `run_squad.py` script along with parameters defined in `scripts/run_squad_inference.sh`. Inference is supported on a single GPU.
The `run_squad_inference.sh` script trains a model and performs evaluation on the SQuAD dataset. By default, the inferencing script:
- Uses SQuAD v1.1 dataset
- Has FP16 precision enabled
- Is XLA enabled
- Evaluates the latest checkpoint present in `/results` with a batch size of 8
This script outputs predictions file to `/results/predictions.json` and computes F1 score and exact match score using SQuAD's evaluate file. Mount point of `/results` can be changed in the `scripts/docker/launch.sh` file.
The output log contains information about:
Inference performance
Inference accuracy (F1 and exact match scores) on the Dev Set of SQuAD after evaluation.
The summary after inference is printed in the following format:
```bash
I0424 23:59:50.030514 139905798453056 run_squad.py:268] -----------------------------
I0424 23:59:50.030774 139905798453056 run_squad.py:269] Summary Inference Statistics
I0424 23:59:50.030934 139905798453056 run_squad.py:270] Batch size = 8
I0424 23:59:50.031085 139905798453056 run_squad.py:271] Sequence Length = 384
I0424 23:59:50.031238 139905798453056 run_squad.py:272] Precision = fp16
I0424 23:59:50.031387 139905798453056 run_squad.py:274] Total Inference Time = 88.29 for Sentences = 10840
I0424 23:59:50.031537 139905798453056 run_squad.py:302] -----------------------------
{"exact_match": 84.08703878902554, "f1": 90.87995817872932}
```
## Performance
The performance measurements in this document were conducted at the time of publication and may not reflect the performance achieved from NVIDIA’s latest software release. For the most up-to-date performance measurements, go to [NVIDIA Data Center Deep Learning Product Performance](https://developer.nvidia.com/deep-learning-performance-training-inference).
### Benchmarking
The following section shows how to run benchmarks measuring the model performance in training and inference modes.
Both of these benchmarking scripts enable you to run a number of epochs, extract performance numbers, and run the BERT model for fine tuning.
#### Training performance benchmark
Training benchmarking can be performed by running the script:
``` bash
scripts/finetune_train_benchmark.sh <bert_model> <num_gpu> <batch_size> <precision> <use_xla>
```
This script runs 800 steps by default on the SQuAD v1.1 dataset and extracts performance numbers for the given configuration. These numbers are saved at `/results/squad_train_benchmark_<bert_model>_gpu<num_gpu>_bs<batch_size>.log`.
#### Inference performance benchmark
Inference benchmarking can be performed by running the script:
``` bash
scripts/finetune_inference_benchmark.sh <bert_model> <batch_size> <precision> <use_xla>
```
This script runs 1000 eval iterations by default on the SQuAD v1.1 dataset and extracts performance and latency numbers for the given configuration. These numbers are saved at `/results/squad_inference_benchmark_<bert_model>_<precision>_bs<batch_size>.log`.
### Results
The following sections provide details on how we achieved our performance and accuracy in training and inference for fine tuning Question Answering. All results are on BERT-Large model unless otherwise mentioned. All fine tuning results are on SQuAD v1.1 using a sequence length of 384 unless otherwise mentioned.
#### Training accuracy results
##### Pre-training accuracy
Our results were obtained by running the `scripts/run_pretraining_lamb.sh` training script in the TensorFlow 21.02-py3 NGC container on NVIDIA DGX-2 and NVIDIA DGX A100.
| **DGX System** | **Nodes x GPUs** | **Precision** | **Batch Size/GPU: Phase1, Phase2** | **Accumulation Steps: Phase1, Phase2** | **Time to Train (Hrs)** | **Final Loss** |
|----------------|-----------|---------------|------------------------------------|----------------------------------------|----------------|-------------------------|
| DGX2H | 32 x 16 | FP16 | 56, 10 | 2, 6 | 2.67 | 1.69 |
| DGX2H | 32 x 16 | FP32 | 32, 4 | 4, 16 | 8.02 | 1.71 |
| DGXA100 | 32 x 8 | FP16 | 312, 40 | 1, 3 | 2.02 | 1.68 |
| DGXA100 | 32 x 8 | TF32 | 176, 22 | 2, 6 | 3.57 | 1.67 |
##### Fine-tuning accuracy for SQuAD v1.1: NVIDIA DGX A100 (8x A100 40GB)
Our results were obtained by running the `scripts/run_squad.sh` training script in the TensorFlow 20.12-py3 NGC container on NVIDIA DGX A100 with 8x A100 80GB GPUs.
| **GPUs** | **Batch size / GPU: TF32, FP16 ** | **Accuracy - TF32** | **Accuracy - mixed precision** | **Time to Train - TF32 (Hrs)** | **Time to Train - mixed precision (Hrs)** |
|:---:|:----:|:----:|:---:|:----:|:----:|
| 8 | 38, 76 | 90.88 | 91.12 | 0.16 | 0.11 |
##### Pre-training SQuAD v1.1 stability test: NVIDIA DGX A100 (256x A100 80GB)
The following tables compare `Final Loss` scores across 3 different training runs with different seeds, for both FP16 and TF32. The runs showcase consistent convergence on all 3 seeds with very little deviation.
| **FP16, 256x GPUs** | **seed 1** | **seed 2** | **seed 3** | **mean** | **std** |
|:-----------:|:-----:|:-----:|:-----:|:-----:|:-----:|
|Final Loss |1.657 |1.661 |1.683 |1.667 |0.014 |
| **TF32, 256x GPUs** | **seed 1** | **seed 2** | **seed 3** | **mean** | **std** |
|:-----------:|:-----:|:-----:|:-----:|:-----:|:-----:|
|Final Loss |1.67 |1.654 |1.636 |1.653 |0.017 |
##### Fine-tuning SQuAD v1.1 stability test: NVIDIA DGX A100 (8x A100 80GB)
The following tables compare `F1` scores across 5 different training runs with different seeds, for both FP16 and TF32 respectively using the (NVIDIA Pretrained Checkpoint)[https://ngc.nvidia.com/catalog/models]. The runs showcase consistent convergence on all 5 seeds with very little deviation.
| **FP16, 8x GPUs** | **seed 1** | **seed 2** | **seed 3** | **seed 4** | **seed 5** | **mean** | **std** |
|:-----------:|:-----:|:-----:|:-----:|:-----:|:-----:|:-----:|:-----:|
|F1 |91.12 |90.80 |90.94 |90.90 |90.94 |90.94 |0.11 |
| **TF32, 8x GPUs** | **seed 1** | **seed 2** | **seed 3** | **seed 4** | **seed 5** | **mean** | **std** |
|:-----------:|:-----:|:-----:|:-----:|:-----:|:-----:|:-----:|:-----:|
|F1 |90.79 |90.88 |90.80 |90.88 |90.83 |90.84 |0.04 |
#### Training performance results
##### Pre-training training performance: Single-node on NVIDIA DGX-2 V100 (16x V100 32GB)
Our results were obtained by running the `scripts/run_pretraining_lamb.sh` training script in the TensorFlow 21.02-py3 NGC container on NVIDIA DGX-2 with 16x V100 32GB GPUs. Performance (in sequences per second) is the steady state throughput.
| **GPUs** | **Sequence Length** | **Batch size / GPU: mixed precision, FP32** | **Gradient Accumulation: mixed precision, FP32** | **Global Batch Size: mixed precision, FP32** | **Throughput - mixed precision** | **Throughput - FP32** | **Throughput speedup (FP32 - mixed precision)** | **Weak scaling - mixed precision** | **Weak scaling - FP32** |
|:--------:|:-------------------:|:-------------------------------------------:|--------------------------------------------------|:--------------------------------------------:|:--------------------------------:|:---------------------:|-------------------------------------------------|------------------------------------|-------------------------|
| 1 | 128 | 60 , 32 | 1024 , 2048 | 61440 , 65536 | 206.5 | 49.97 | 4.13 | 1.00 | 1.00 |
| 4 | 128 | 60 , 32 | 256 , 512 | 61440 , 65536 | 789.75 | 194.02 | 4.07 | 3.82 | 3.88 |
| 8 | 128 | 60 , 32 | 128 , 256 | 61440 , 65536 | 1561.77 | 367.9 | 4.25 | 7.56 | 7.36 |
| 16 | 128 | 60 , 32 | 64 , 128 | 61440 , 65536 | 3077.99 | 762.22 | 4.04 | 14.9 | 15.25 |
| 1 | 512 | 10 , 6 | 3072 , 5120 | 30720 , 30720 | 40.95 | 11.06 | 3.70 | 1.00 | 1.00 |
| 4 | 512 | 10 , 6 | 768 , 1280 | 30720 , 30720 | 158.5 | 43.05 | 3.68 | 3.87 | 3.89 |
| 8 | 512 | 10 , 6 | 384 , 640 | 30720 , 30720 | 312.03 | 85.51 | 3.65 | 7.62 | 7.73 |
| 16 | 512 | 10 , 4 | 192 , 512 | 30720 , 32768 | 614.94 | 161.38 | 3.81 | 15.02 | 14.59 |
Note: The respective values for FP32 runs that use a batch size of 60 and 10 in sequence lengths 128 and 512 are not available due to out of memory errors that arise.
##### Pre-training training performance: Multi-node on NVIDIA DGX-2H V100 (16x V100 32GB)
Our results were obtained by running the `run.sub` training script in the TensorFlow 21.02-py3 NGC container using multiple NVIDIA DGX-2 with 16x V100 32GB GPUs. Performance (in sequences per second) is the steady state throughput.
| **Num Nodes** | **Sequence Length** | **Batch size / GPU: mixed precision, FP32** | **Gradient Accumulation: mixed precision, FP32** | **Global Batch Size: mixed precision, FP32** | **Throughput - mixed precision** | **Throughput - FP32** | **Throughput speedup (FP32 - mixed precision)** | **Weak scaling - mixed precision** | **Weak scaling - FP32** |
|:-------------:|:-------------------:|:-------------------------------------------:|--------------------------------------------------|:--------------------------------------------:|:--------------------------------:|:---------------------:|-------------------------------------------------|------------------------------------|-------------------------|
| 1 | 128 | 60 , 32 | 64 , 128 | 61440 , 65536 | 3528.51 | 841.72 | 4.19 | 1.00 | 1.00 |
| 4 | 128 | 60 , 32 | 16 , 32 | 61440 , 65536 | 13370.21 | 3060.49 | 4.37 | 3.79 | 3.64 |
| 16 | 128 | 60 , 32 | 4 , 8 | 61440 , 65536 | 42697.42 | 10383.57 | 4.11 | 12.1 | 12.34 |
| 32 | 128 | 60 , 32 | 2 , 4 | 61440 , 65536 | 84223.16 | 20094.14 | 4.19 | 23.87 | 23.87 |
| 1 | 512 | 10 , 4 | 192 , 256 | 30720 , 32768 | 678.35 | 180 | 3.77 | 1.00 | 1.00 |
| 4 | 512 | 10 , 4 | 96 , 64 | 30720 , 32768 | 2678.29 | 646.76 | 4.14 | 3.95 | 3.59 |
| 16 | 512 | 10 , 4 | 24 , 32 | 30720 , 32768 | 7834.72 | 2204.72 | 3.55 | 11.55 | 12.25 |
| 32 | 512 | 10 , 4 | 6 , 16 | 30720 , 32768 | 18786.93 | 4196.15 | 4.48 | 27.70 | 23.31 |
Note: The respective values for FP32 runs that use a batch size of 60 and 10 in sequence lengths 128 and 512 are not available due to out of memory errors that arise.
##### Pre-training training performance: Single-node on NVIDIA DGX A100 (8x A100 80GB)
Our results were obtained by running the `scripts/run_pretraining_lamb.sh` training script in the TensorFlow 21.02-py3 NGC container on NVIDIA DGX A100 with 8x A100 80GB GPUs. Performance (in sequences per second) is the steady state throughput.
| **GPUs** | **Sequence Length** | **Batch size / GPU: mixed precision, TF32** | **Gradient Accumulation: mixed precision, TF32** | **Global Batch Size: mixed precision, FP32** | **Throughput - mixed precision** | **Throughput - TF32** | **Throughput speedup (TF32 - mixed precision)** | **Weak scaling - mixed precision** | **Weak scaling -TF32** |
|:--------:|:-------------------:|:-------------------------------------------:|--------------------------------------------------|:--------------------------------------------:|:--------------------------------:|:---------------------:|-------------------------------------------------|------------------------------------|------------------------|
| 1 | 128 | 312 , 176 | 256 , 512 | 79872 , 90112 | 485.59 | 282.98 | 1.72 | 1.00 | 1.00 |
| 8 | 128 | 312 , 176 | 32 , 64 | 79872 , 90112 | 3799.24 | 1944.77 | 1.95 | 7.82 | 6.87 |
| 1 | 512 | 40 , 22 | 768 , 1536 | 30720 , 33792 | 96.52 | 54.92 | 1.76 | 1.00 | 1.00 |
| 8 | 512 | 40 , 22 | 96 , 192 | 30720 , 33792 | 649.69 | 427.39 | 1.52 | 6.73 | 7.78 |
Note: The respective values for TF32 runs that use a batch size of 312 and 40 in sequence lengths 128 and 512 are not available due to out of memory errors that arise.
##### Pre-training training performance: Multi-node on NVIDIA DGX A100 (8x A100 80GB)
Our results were obtained by running the `scripts/run_pretraining_lamb.sh` training script in the TensorFlow 21.02-py3 NGC container on NVIDIA DGX A100 with 8x A100 40GB GPUs. Performance (in sequences per second) is the steady state throughput.
| **Num Nodes** | **Sequence Length** | **Batch size / GPU: mixed precision, TF32** | **Gradient Accumulation: mixed precision, TF32** | **Global Batch Size: mixed precision, FP32** | **Throughput - mixed precision** | **Throughput - TF32** | **Throughput speedup (TF32 - mixed precision)** | **Weak scaling - mixed precision** | **Weak scaling -TF32** |
|:-------------:|:-------------------:|:-------------------------------------------:|--------------------------------------------------|:--------------------------------------------:|:--------------------------------:|:---------------------:|-------------------------------------------------|------------------------------------|------------------------|
| 1 | 128 | 312 , 176 | 32 , 64 | 79872 , 90112 | 3803.82 | 2062.98 | 1.84 |1.00 | 1.00 |
| 2 | 128 | 312 , 176 | 16 , 32 | 79872 , 90112 | 7551.37 | 4084.76 | 1.85 |1.99 | 1.98 |
| 8 | 128 | 312 , 176 | 4 , 8 | 79872 , 90112 | 29711.11 | 16134.02 | 1.84 |7.81 | 7.82 |
| 32 | 128 | 312 , 176 | 1 , 2 | 79872 , 90112 | 110280.73 | 59569.77 | 1.85 |28.99 | 28.88 |
| 1 | 512 | 40 , 22 | 96 , 192 | 30720 , 33792 | 749.73 | 431.89 | 1.74 |1.00 | 1.00 |
| 2 | 512 | 40 , 22 | 48 , 96 | 30720 , 33792 | 1491.87 | 739.14 | 2.02 |1.99 | 1.71 |
| 8 | 512 | 40 , 22 | 12 , 24 | 30720 , 33792 | 5870.83 | 2926.58 | 2.01 |7.83 | 6.78 |
| 32 | 512 | 40 , 22 | 3 , 6 | 30720 , 33792 | 22506.23 | 11240.5 | 2.00 |30.02 | 26.03 |
Note: The respective values for TF32 runs that use a batch size of 312 and 40 in sequence lengths 128 and 512 are not available due to out of memory errors that arise.
##### Fine-tuning training performance for SQuAD v1.1 on NVIDIA DGX-1 V100 (8x V100 16GB)
Our results were obtained by running the `scripts/run_squad.sh` training script in the TensorFlow 21.02-py3 NGC container on NVIDIA DGX-1 with 8x V100 16GB GPUs. Performance (in sequences per second) is the mean throughput from 2 epochs.
| **GPUs** | **Batch size / GPU: mixed precision, FP32** | **Throughput - mixed precision** | **Throughput - FP32** | **Throughput speedup (FP32 to mixed precision)** | **Weak scaling - FP32** | **Weak scaling - mixed precision** |
|:---:|:---:|:------:|:-----:|:----:|:----:|:----:|
| 1 | 6,3 | 39.10 | 9.85 | 3.97 | 1.00 | 1.00 |
| 4 | 6,3 | 128.48 | 36.52 | 3.52 | 3.29 | 3.71 |
| 8 | 6,3 | 255.36 | 73.03 | 3.5 | 6.53 | 7.41 |
Note: The respective values for FP32 runs that use a batch size of 6 are not available due to out of memory errors that arise. Batch size of 6 is only available on using FP16.
To achieve these same results, follow the [Quick Start Guide](#quick-start-guide) outlined above.
##### Fine-tuning training performance for SQuAD v1.1 on NVIDIA DGX-1 V100 (8x V100 32GB)
Our results were obtained by running the `scripts/run_squad.sh` training script in the TensorFlow 21.02-py3 NGC container on NVIDIA DGX-1 with 8x V100 32GB GPUs. Performance (in sequences per second) is the mean throughput from 2 epochs.
| **GPUs** | **Batch size / GPU: mixed precision, FP32** | **Throughput - mixed precision** | **Throughput - FP32** | **Throughput speedup (FP32 to mixed precision)** | **Weak scaling - FP32** | **Weak scaling - mixed precision** |
|---|---|-----|------|----|----|----|
| 1 | 12,8 | 47.06 | 11.11 | 4.24 | 1.00 | 1.00 |
| 4 | 12,8 | 165.26 | 42.84 | 3.86 | 3.51 | 3.86 |
| 8 | 12,8 | 330.29 | 85.91 | 3.84 | 7.02 | 7.73 |
Note: The respective values for FP32 runs that use a batch size of 12 are not available due to out of memory errors that arise. Batch size of 12 is only available on using FP16.
To achieve these same results, follow the [Quick Start Guide](#quick-start-guide) outlined above.
##### Fine-tuning training performance for SQuAD v1.1 on NVIDIA DGX A100 (8x A100 80GB)
Our results were obtained by running the `scripts/run_squad.sh` training script in the TensorFlow 21.02-py3 NGC container on NVIDIA DGX-2 with 16x V100 32GB GPUs. Performance (in sequences per second) is the mean throughput from 2 epochs.
| **GPUs** | **Batch size / GPU: mixed precision, TF32** | **Throughput - mixed precision** | **Throughput - FP32** | **Throughput speedup (FP32 to mixed precision)** | **Weak scaling - FP32** | **Weak scaling - mixed precision** |
|---|---|------|------|----|-----|-----|
| 1 | 76,38 | 134.22 | 43.9 | 3.057 | 1.00 | 1.00 |
| 8 | 76,38 | 1048.23 | 341.31 | 3.071 | 7.81 | 7.77 |
Note: The respective values for TF32 runs that use a batch size of 76 are not available due to out of memory errors that arise. Batch size of 12 is only available on using FP16.
To achieve these same results, follow the [Quick Start Guide](#quick-start-guide) outlined above.
#### Inference performance results
##### Fine-tuning inference performance for SQuAD v1.1 on NVIDIA DGX-1 V100 (1x V100 16GB)
Our results were obtained by running the `scripts/finetune_inference_benchmark.sh` training script in the TensorFlow 21.02-py3 NGC container on NVIDIA DGX-1 with 1x V100 16GB GPUs. Performance numbers (throughput in sequences per second and latency in milliseconds) were averaged from 1000 iterations. Latency is computed as the time taken for a batch to process as they are fed in one after another in the model ie no pipelining.
BERT-LARGE FP16
| Sequence Length | Batch Size | Throughput-Average(seq/sec) | Throughput speedup (FP32 to mixed precision) | Latency-Average(ms) | Latency-90%(ms) | Latency-95%(ms) | Latency-99%(ms) |
|-----------------|------------|------------------------------|----------------------------------------------|---------------------|-----------------|-----------------|-----------------|
| 128 | 1 | 105.04 | 1.277237354 | 9.52 | 9.67 | 9.77 | 10.16 |
| 128 | 2 | 184.9 | 1.671487977 | 10.82 | 11.15 | 11.27 | 11.8 |
| 128 | 4 | 301.9 | 2.448102498 | 13.25 | 13.38 | 13.45 | 13.96 |
| 128 | 8 | 421.98 | 3.149809659 | 18.96 | 19.12 | 19.2 | 19.82 |
| 384 | 1 | 74.99 | 2.15055922 | 13.34 | 13.5 | 13.58 | 14.53 |
| 384 | 2 | 109.84 | 2.709422792 | 18.21 | 18.4 | 18.6 | 19.39 |
| 384 | 4 | 142.58 | 3.313502208 | 28.05 | 28.28 | 28.48 | 28.85 |
| 384 | 8 | 168.34 | 3.823302294 | 47.52 | 47.74 | 47.86 | 48.52 |
BERT-Large FP32
| Sequence Length | Batch Size | Throughput-Average(seq/sec) | Latency-Average(ms) | Latency-90%(ms) | Latency-95%(ms) | Latency-99%(ms) |
|-----------------|------------|------------------------------|---------------------|-----------------|-----------------|-----------------|
| 128 | 1 | 82.24 | 12.16 | 12.28 | 12.33 | 12.92 |
| 128 | 2 | 110.62 | 18.08 | 18.22 | 18.28 | 18.88 |
| 128 | 4 | 123.32 | 32.44 | 32.72 | 32.82 | 32.98 |
| 128 | 8 | 133.97 | 59.71 | 60.29 | 60.49 | 60.69 |
| 384 | 1 | 34.87 | 28.67 | 28.92 | 29.02 | 29.33 |
| 384 | 2 | 40.54 | 49.34 | 49.74 | 49.86 | 50.05 |
| 384 | 4 | 43.03 | 92.97 | 93.59 | 93.75 | 94.57 |
| 384 | 8 | 44.03 | 181.71 | 182.34 | 182.48 | 183.03 |
BERT-Base FP16
| Sequence Length | Batch Size | Throughput-Average(seq/sec) | Throughput speedup (FP32 to mixed precision) | Latency-Average(ms) | Latency-90%(ms) | Latency-95%(ms) | Latency-99%(ms) |
|-----------------|------------|------------------------------|----------------------------------------------|---------------------|-----------------|-----------------|-----------------|
| 128 | 1 | 236.26 | 1.179589595 | 4.23 | 4.37 | 4.49 | 4.59 |
| 128 | 2 | 425.1 | 1.441554478 | 4.7 | 4.84 | 4.97 | 5.26 |
| 128 | 4 | 710.48 | 1.911691107 | 5.63 | 5.78 | 5.93 | 6.4 |
| 128 | 8 | 1081.17 | 2.523032764 | 7.4 | 7.5 | 7.54 | 7.73 |
| 384 | 1 | 190.53 | 1.757170525 | 5.25 | 5.35 | 5.42 | 5.8 |
| 384 | 2 | 289.67 | 2.248292456 | 6.9 | 7.08 | 7.24 | 7.57 |
| 384 | 4 | 404.03 | 2.946328302 | 9.9 | 10 | 10.03 | 10.13 |
| 384 | 8 | 504.24 | 3.450153951 | 15.87 | 15.96 | 16.01 | 16.3 |
BERT-Base FP32
| Sequence Length | Batch Size | Throughput-Average(seq/sec) | Latency-Average(ms) | Latency-90%(ms) | Latency-95%(ms) | Latency-99%(ms) |
|-----------------|------------|------------------------------|---------------------|-----------------|-----------------|-----------------|
| 128 | 1 | 200.29 | 4.99 | 5.08 | 5.16 | 5.53 |
| 128 | 2 | 294.89 | 6.78 | 6.89 | 6.93 | 7.37 |
| 128 | 4 | 371.65 | 10.76 | 10.89 | 10.96 | 11.92 |
| 128 | 8 | 428.52 | 18.67 | 18.89 | 18.98 | 19.17 |
| 384 | 1 | 108.43 | 9.22 | 9.26 | 9.31 | 10.24 |
| 384 | 2 | 128.84 | 15.52 | 15.6 | 15.71 | 16.49 |
| 384 | 4 | 137.13 | 29.17 | 29.4 | 29.48 | 29.64 |
| 384 | 8 | 146.15 | 54.74 | 55.19 | 55.3 | 55.54 |
To achieve these same results, follow the [Quick Start Guide](#quick-start-guide) outlined above.
##### Fine-tuning inference performance for SQuAD v1.1 on NVIIDA DGX-1 V100 (1x V100 32GB)
Our results were obtained by running the `scripts/finetune_inference_benchmark.sh` training script in the TensorFlow 21.02-py3 NGC container on NVIDIA DGX-1 with 1x V100 32GB GPUs. Performance numbers (throughput in sequences per second and latency in milliseconds) were averaged from 1000 iterations. Latency is computed as the time taken for a batch to process as they are fed in one after another in the model ie no pipelining.
BERTLarge FP16
| Sequence Length | Batch Size | Throughput-Average(seq/sec) | Throughput speedup (FP32 to mixed precision) | Latency-Average(ms) | Latency-90%(ms) | Latency-95%(ms) | Latency-99%(ms) |
|-----------------|------------|------------------------------|----------------------------------------------|---------------------|-----------------|-----------------|-----------------|
| 128 | 1 | 101.58 | 1.242112986 | 9.84 | 9.99 | 10.06 | 10.39 |
| 128 | 2 | 181.89 | 1.651593571 | 11 | 11.14 | 11.2 | 11.87 |
| 128 | 4 | 295.86 | 2.348840902 | 13.52 | 13.67 | 13.75 | 14.5 |
| 128 | 8 | 411.29 | 3.010246652 | 19.45 | 19.62 | 19.69 | 20.4 |
| 384 | 1 | 72.95 | 2.083690374 | 13.71 | 13.93 | 14.08 | 14.81 |
| 384 | 2 | 107.02 | 2.583775954 | 18.69 | 18.8 | 18.88 | 19.57 |
| 384 | 4 | 139.8 | 3.14652262 | 28.61 | 28.75 | 28.88 | 29.6 |
| 384 | 8 | 163.68 | 3.595782074 | 48.88 | 49.09 | 49.18 | 49.77 |
BERT-Large FP32
| Sequence Length | Batch Size | Throughput-Average(seq/sec) | Latency-Average(ms) | Latency-90%(ms) | Latency-95%(ms) | Latency-99%(ms) |
|-----------------|------------|------------------------------|---------------------|-----------------|-----------------|-----------------|
| 128 | 1 | 81.78 | 12.23 | 12.37 | 12.43 | 13.2 |
| 128 | 2 | 110.13 | 18.16 | 18.29 | 18.37 | 19.27 |
| 128 | 4 | 125.96 | 31.76 | 32.09 | 32.21 | 32.42 |
| 128 | 8 | 136.63 | 58.55 | 58.93 | 59.05 | 59.4 |
| 384 | 1 | 35.01 | 28.56 | 28.81 | 28.94 | 29.16 |
| 384 | 2 | 41.42 | 48.29 | 48.57 | 48.67 | 49.02 |
| 384 | 4 | 44.43 | 90.03 | 90.43 | 90.59 | 90.89 |
| 384 | 8 | 45.52 | 175.76 | 176.66 | 176.89 | 177.33 |
BERT-Base FP16
| Sequence Length | Batch Size | Throughput-Average(seq/sec) | Throughput speedup (FP32 to mixed precision) | Latency-Average(ms) | Latency-90%(ms) | Latency-95%(ms) | Latency-99%(ms) |
|-----------------|------------|------------------------------|----------------------------------------------|---------------------|-----------------|-----------------|-----------------|
| 128 | 1 | 234.85 | 1.217533309 | 4.26 | 4.33 | 4.37 | 4.62 |
| 128 | 2 | 415.86 | 1.435782351 | 4.81 | 4.92 | 5.06 | 5.55 |
| 128 | 4 | 680.09 | 1.84912586 | 5.88 | 6.1 | 6.2 | 6.53 |
| 128 | 8 | 1030.03 | 2.264548752 | 7.77 | 7.87 | 7.95 | 8.53 |
| 384 | 1 | 183.18 | 1.700993593 | 5.46 | 5.56 | 5.61 | 5.93 |
| 384 | 2 | 275.77 | 2.175528558 | 7.25 | 7.38 | 7.44 | 7.89 |
| 384 | 4 | 385.61 | 2.778570399 | 10.37 | 10.56 | 10.63 | 11.1 |
| 384 | 8 | 488.45 | 3.292329469 | 16.38 | 16.48 | 16.52 | 16.64 |
BERT-Base FP32
| Sequence Length | Batch Size | Throughput-Average(seq/sec) | Latency-Average(ms) | Latency-90%(ms) | Latency-95%(ms) | Latency-99%(ms) |
|-----------------|------------|------------------------------|---------------------|-----------------|-----------------|-----------------|
| 128 | 1 | 192.89 | 5.18 | 5.3 | 5.36 | 5.65 |
| 128 | 2 | 289.64 | 6.91 | 7 | 7.22 | 7.83 |
| 128 | 4 | 367.79 | 10.88 | 10.98 | 11.02 | 11.59 |
| 128 | 8 | 454.85 | 17.59 | 17.76 | 17.81 | 17.92 |
| 384 | 1 | 107.69 | 9.29 | 9.37 | 9.42 | 9.88 |
| 384 | 2 | 126.76 | 15.78 | 15.89 | 15.97 | 16.72 |
| 384 | 4 | 138.78 | 28.82 | 28.98 | 29.06 | 29.88 |
| 384 | 8 | 148.36 | 53.92 | 54.16 | 54.26 | 54.58 |
To achieve these same results, follow the [Quick Start Guide](#quick-start-guide) outlined above.
##### Fine-tuning inference performance for SQuAD v1.1 on NVIDIA DGX A100 (1x A100 80GB)
Our results were obtained by running the `scripts/finetune_inference_benchmark.sh` training script in the TensorFlow 21.02-py3 NGC container on NVIDIA DGX-2 with 1x V100 32GB GPUs. Performance numbers (throughput in sequences per second and latency in milliseconds) were averaged from 1000 iterations. Latency is computed as the time taken for a batch to process as they are fed in one after another in the model ie no pipelining.
BERT-Large FP16
| Sequence Length | Batch Size | Throughput-Average(seq/sec) | Throughput speedup (FP32 to mixed precision) | Latency-Average(ms) | Latency-90%(ms) | Latency-95%(ms) | Latency-99%(ms) |
|-----------------|------------|------------------------------|----------------------------------------------|---------------------|-----------------|-----------------|-----------------|
| 128 | 1 | 145.21 | 0.9435347628 | 6.89 | 7.14 | 7.4 | 8.35 |
| 128 | 2 | 272.81 | 1.093953003 | 7.33 | 7.61 | 7.77 | 8.35 |
| 128 | 4 | 468.98 | 1.273087573 | 8.53 | 8.71 | 8.83 | 9.85 |
| 128 | 8 | 705.67 | 1.191627687 | 11.34 | 11.64 | 11.9 | 13.1 |
| 384 | 1 | 118.34 | 1.042459479 | 8.45 | 8.82 | 8.99 | 9.52 |
| 384 | 2 | 197.8 | 1.231478023 | 10.11 | 10.48 | 10.62 | 11.4 |
| 384 | 4 | 275.19 | 1.268332027 | 14.54 | 14.73 | 14.8 | 16.8 |
| 384 | 8 | 342.22 | 1.416004634 | 23.38 | 23.64 | 23.75 | 24.1 |
BERT-Large TF32
| Sequence Length | Batch Size | Throughput-Average(seq/sec) | Latency-Average(ms) | Latency-90%(ms) | Latency-95%(ms) | Latency-99%(ms) |
|-----------------|------------|------------------------------|---------------------|-----------------|-----------------|-----------------|
| 128 | 1 | 153.9 | 6.5 | 6.76 | 6.86 | 7.4 |
| 128 | 2 | 249.38 | 8.02 | 8.22 | 8.34 | 9.45 |
| 128 | 4 | 368.38 | 10.86 | 11.11 | 11.24 | 12.76 |
| 128 | 8 | 592.19 | 13.51 | 13.64 | 13.77 | 15.85 |
| 384 | 1 | 113.52 | 8.81 | 9.02 | 9.16 | 10.19 |
| 384 | 2 | 160.62 | 12.45 | 12.61 | 12.68 | 14.47 |
| 384 | 4 | 216.97 | 18.44 | 18.6 | 18.7 | 18.84 |
| 384 | 8 | 241.68 | 33.1 | 33.29 | 33.36 | 33.5 |
BERT-Base FP16
| Sequence Length | Batch Size | Throughput-Average(seq/sec) | Throughput speedup (FP32 to mixed precision) | Latency-Average(ms) | Latency-90%(ms) | Latency-95%(ms) | Latency-99%(ms) |
|-----------------|------------|------------------------------|----------------------------------------------|---------------------|-----------------|-----------------|-----------------|
| 128 | 1 | 295.01 | 1.014023992 | 3.39 | 3.59 | 3.65 | 3.73 |
| 128 | 2 | 594.81 | 1.048455898 | 3.36 | 3.59 | 3.68 | 4.19 |
| 128 | 4 | 1043.12 | 1.005145599 | 3.83 | 3.97 | 4.2 | 4.44 |
| 128 | 8 | 1786.25 | 1.198278638 | 4.48 | 4.73 | 4.8 | 5.19 |
| 384 | 1 | 278.85 | 1.103395062 | 3.59 | 3.67 | 3.99 | 4.15 |
| 384 | 2 | 464.77 | 1.252006896 | 4.3 | 4.59 | 4.87 | 5.29 |
| 384 | 4 | 675.82 | 1.264822578 | 5.92 | 6.15 | 6.27 | 6.94 |
| 384 | 8 | 846.81 | 1.31109494 | 9.45 | 9.65 | 9.74 | 11.03 |
BERT-Base TF32
| Sequence Length | Batch Size | Throughput-Average(seq/sec) | Latency-Average(ms) | Latency-90%(ms) | Latency-95%(ms) | Latency-99%(ms) |
|-----------------|------------|------------------------------|---------------------|-----------------|-----------------|-----------------|
| 128 | 1 | 290.93 | 3.44 | 3.61 | 3.73 | 4.69 |
| 128 | 2 | 567.32 | 3.53 | 3.64 | 3.96 | 5.01 |
| 128 | 4 | 1037.78 | 3.85 | 3.95 | 4.06 | 4.58 |
| 128 | 8 | 1490.68 | 5.37 | 5.61 | 5.66 | 6.19 |
| 384 | 1 | 252.72 | 3.96 | 3.96 | 4.52 | 4.66 |
| 384 | 2 | 371.22 | 5.39 | 5.64 | 5.71 | 6.38 |
| 384 | 4 | 534.32 | 7.49 | 7.69 | 7.76 | 8.56 |
| 384 | 8 | 645.88 | 12.39 | 12.61 | 12.67 | 12.77 |
To achieve these same results, follow the [Quick Start Guide](#quick-start-guide) outlined above.
##### Fine-tuning inference performance for SQuAD v1.1 on NVIDIA Tesla T4 (1x T4 16GB)
Our results were obtained by running the `scripts/finetune_inference_benchmark.sh` training script in the TensorFlow 21.02-py3 NGC container on NVIDIA Tesla T4 with 1x T4 16GB GPUs. Performance numbers (throughput in sequences per second and latency in milliseconds) were averaged from 1000 iterations. Latency is computed as the time taken for a batch to process as they are fed in one after another in the model ie no pipelining.
BERT-Large FP16
| Sequence Length | Batch Size | Throughput-Average(seq/sec) | Throughput speedup (FP32 to mixed precision) | Latency-Average(ms) | Latency-90%(ms) | Latency-95%(ms) | Latency-99%(ms) |
|-----------------|------------|------------------------------|----------------------------------------------|---------------------|-----------------|-----------------|-----------------|
| 128 | 1 | 57.6 | 1.364605544 | 17.36 | 18.16 | 19.02 | 21.67 |
| 128 | 2 | 102.76 | 2.17988969 | 19.46 | 20.68 | 21.27 | 22.2 |
| 128 | 4 | 151.11 | 3.146813828 | 26.47 | 26.9 | 27.06 | 27.45 |
| 128 | 8 | 186.99 | 3.733080455 | 42.78 | 43.87 | 44.18 | 44.78 |
| 384 | 1 | 38.88 | 2.590273151 | 25.72 | 26.06 | 26.16 | 26.38 |
| 384 | 2 | 50.53 | 3.202154626 | 39.58 | 39.93 | 40.35 | 40.95 |
| 384 | 4 | 57.69 | 3.721935484 | 69.34 | 70.5 | 70.77 | 71.09 |
| 384 | 8 | 62.99 | 3.927057357 | 127 | 129.18 | 130.07 | 131.86 |
BERT-Large FP32
| Sequence Length | Batch Size | Throughput-Average(seq/sec) | Latency-Average(ms) | Latency-90%(ms) | Latency-95%(ms) | Latency-99%(ms) |
|-----------------|------------|------------------------------|---------------------|-----------------|-----------------|-----------------|
| 128 | 1 | 42.21 | 23.69 | 24.8 | 25.02 | 25.48 |
| 128 | 2 | 47.14 | 42.42 | 43.48 | 43.63 | 44.32 |
| 128 | 4 | 48.02 | 83.29 | 84.37 | 84.68 | 85.14 |
| 128 | 8 | 50.09 | 159.72 | 161.66 | 161.97 | 162.52 |
| 384 | 1 | 15.01 | 66.63 | 67.76 | 68.08 | 68.66 |
| 384 | 2 | 15.78 | 126.78 | 128.21 | 128.58 | 129.08 |
| 384 | 4 | 15.5 | 258.1 | 261.01 | 261.66 | 262.55 |
| 384 | 8 | 16.04 | 498.61 | 504.29 | 504.74 | 505.55 |
BERT-Base FP16
| Sequence Length | Batch Size | Throughput-Average(seq/sec) | Throughput speedup (FP32 to mixed precision) | Latency-Average(ms) | Latency-90%(ms) | Latency-95%(ms) | Latency-99%(ms) |
|-----------------|------------|------------------------------|----------------------------------------------|---------------------|-----------------|-----------------|-----------------|
| 128 | 1 | 116.56 | 1.039878669 | 8.58 | 9.53 | 10.84 | 11.74 |
| 128 | 2 | 238.62 | 1.675937632 | 8.38 | 9.09 | 9.27 | 12.33 |
| 128 | 4 | 402.93 | 2.440964439 | 9.93 | 10.07 | 10.13 | 12.17 |
| 128 | 8 | 532.56 | 3.052619512 | 15.02 | 15.43 | 15.6 | 16.52 |
| 384 | 1 | 102.12 | 2.035073735 | 9.79 | 11.06 | 11.18 | 12.07 |
| 384 | 2 | 149.3 | 2.910898811 | 13.4 | 13.54 | 13.62 | 14.36 |
| 384 | 4 | 177.78 | 3.563439567 | 22.5 | 23.11 | 23.27 | 23.59 |
| 384 | 8 | 192.61 | 3.752386519 | 41.53 | 42.67 | 42.81 | 43.31 |
BERT-Base FP32
| Sequence Length | Batch Size | Throughput-Average(seq/sec) | Latency-Average(ms) | Latency-90%(ms) | Latency-95%(ms) | Latency-99%(ms) |
|-----------------|------------|------------------------------|---------------------|-----------------|-----------------|-----------------|
| 128 | 1 | 112.09 | 8.92 | 9.12 | 9.49 | 10.93 |
| 128 | 2 | 142.38 | 14.05 | 14.34 | 14.48 | 15.03 |
| 128 | 4 | 165.07 | 24.23 | 24.86 | 24.92 | 25.05 |
| 128 | 8 | 174.46 | 45.86 | 46.71 | 46.8 | 47.2 |
| 384 | 1 | 50.18 | 19.93 | 20.53 | 21.04 | 21.73 |
| 384 | 2 | 51.29 | 38.99 | 39.68 | 39.93 | 40.2 |
| 384 | 4 | 49.89 | 80.18 | 81.54 | 82 | 82.65 |
| 384 | 8 | 51.33 | 155.85 | 158.11 | 158.5 | 159.17 |
To achieve these same results, follow the [Quick Start Guide](#quick-start-guide) outlined above.
## Release notes
### Changelog
April 2021
Initial release
### Known issues
There are no known issues with this model.
|
Tools/PyTorch/TimeSeriesPredictionPlatform/conf/trainer/optimizer | optimizer | Adam | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
_target_: apex.optimizers.FusedAdam
lr: 0.001
betas: [0.9, 0.999]
eps: 1e-8
weight_decay: 0.0
amsgrad: False
|
TensorFlow2/Classification/ConvNets/efficientnet_v1/B4/evaluation | evaluation | evaluation_AMP_A100-80G | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
python3 main.py --cfg config/efficientnet_v1/b4_cfg.py \
--mode eval \
--use_amp \
--use_xla \
--model_dir ./output \
--data_dir /data \
--eval_batch_size 128 \
--eval_img_size 380 \
--memory_limit 81000
|
PyTorch/Classification/ConvNets/resnet50v1.5/training/FP32 | FP32 | DGX1V_resnet50_FP32_90E | python ./multiproc.py --nproc_per_node 8 ./launch.py --model resnet50 --precision FP32 --mode convergence --platform DGX1V /imagenet --epochs 90 --mixup 0.0 --workspace ${1:-./} --raport-file raport.json
|
PyTorch/Forecasting/TFT/triton/runner | runner | task | # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pathlib
import platform
import subprocess
from datetime import datetime
from typing import Dict, List, Optional, Union
import cpuinfo
import psutil
import yaml
# method from PEP-366 to support relative import in executed modules
if __name__ == "__main__" and __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from .core import CustomDumper, DataObject
from .experiment import Experiment
from .triton import Triton
class GPU(DataObject):
"""
GPU information data object
"""
name: str
driver_version: str
cuda_version: str
memory: str
tdp: str
def __init__(self, name: str, driver_version: str, cuda_version: str, memory: str, tdp: str):
"""
Args:
name: name of GPU
driver_version: version of driver
cuda_version: version of CUDA
memory: size of memory available on GPU [MB]
tdp: Max TDP of GPU unit
"""
self.name = name
self.driver_version = driver_version
self.cuda_version = cuda_version
self.memory = memory
self.tdp = tdp
@staticmethod
def from_dict(data: Dict):
"""
Create GPU object from dictionary
Args:
data: dictionary with GPU data
Returns:
GPU object
"""
return GPU(
name=data["name"],
driver_version=data["driver_version"],
cuda_version=data["cuda_version"],
memory=data["memory"],
tdp=data["tdp"],
)
@staticmethod
def from_host():
"""
Create GPU object from host data
Returns:
GPU object
"""
data = subprocess.check_output(
["nvidia-smi", "--query-gpu=name,driver_version,memory.total,power.max_limit", "--format=csv"]
).decode()
lines = data.split(sep="\n")
device_details = lines[1].split(",")
name = device_details[0].strip()
driver_version = device_details[1].strip()
memory = device_details[2].strip()
tdp = device_details[3].strip()
cuda_version = None
data = subprocess.check_output(["nvidia-smi", "--query"]).decode()
lines = data.split(sep="\n")
for line in lines:
if line.startswith("CUDA Version"):
cuda_version = line.split(":")[1].strip()
break
return GPU(
name=name,
driver_version=driver_version,
cuda_version=cuda_version,
memory=memory,
tdp=tdp,
)
class CPU(DataObject):
"""
CPU details
"""
name: str
physical_cores: int
logical_cores: int
min_frequency: float
max_frequency: float
def __init__(self, name: str, physical_cores: int, logical_cores: int, min_frequency: float, max_frequency: float):
"""
Args:
name: name of CPU unit
physical_cores: number of physical cores available on CPU
logical_cores: number of logical cores available on CPU
min_frequency: minimal clock frequency
max_frequency: maximal clock frequency
"""
self.name = name
self.physical_cores = physical_cores
self.logical_cores = logical_cores
self.min_frequency = min_frequency
self.max_frequency = max_frequency
@staticmethod
def from_host():
"""
Create CPU object from host data
Returns:
CPU object
"""
return CPU(
name=cpuinfo.get_cpu_info()["brand_raw"],
physical_cores=psutil.cpu_count(logical=False),
logical_cores=psutil.cpu_count(logical=True),
min_frequency=psutil.cpu_freq().min,
max_frequency=psutil.cpu_freq().max,
)
class Memory(DataObject):
"""
Memory data object
"""
size: float
def __init__(self, size: float):
"""
Args:
size: RAM memory size in MB
"""
self.size = size
@staticmethod
def from_host():
"""
Create Memory object from host data
Returns:
Memory object
"""
svm = psutil.virtual_memory()
return Memory(size=svm.total)
class SystemInfo(DataObject):
"""
System Information data object
"""
system: str
cpu: CPU
memory: Memory
gpu: GPU
def __init__(self, system: str, cpu: CPU, memory: Memory, gpu: GPU):
"""
Args:
system: name of operating system
cpu: CPU info
memory: Memory info
gpu: GPU info
"""
self.system = system
self.cpu = cpu
self.memory = memory
self.gpu = gpu
@staticmethod
def from_host():
"""
Create SystemInfo object from host data
Returns:
SystemInfo object
"""
system = platform.platform()
gpu = GPU.from_host()
memory = Memory.from_host()
cpu = CPU.from_host()
return SystemInfo(system=system, cpu=cpu, gpu=gpu, memory=memory)
class Checkpoint(DataObject):
"""
Checkpoint data object
"""
def __init__(self, name: str, url: str, path: Union[str, pathlib.Path]):
"""
Args:
name: Name of checkpoint
path: Location of checkpoint on local hardware
"""
self.name = name
self.url = url
self.path = pathlib.Path(path)
class Dataset(DataObject):
"""
Dataset data object
"""
def __init__(self, name: str):
"""
Args:
name: Name of dataset
"""
self.name = name
class Task(DataObject):
"""
Task data object to store build information
"""
model_name: str
framework: str
started_at: int
ended_at: Optional[int]
container_version: str
checkpoints: Dict[str, Checkpoint]
datasets: Dict[str, Dataset]
datasets_dir: Optional[Union[str, pathlib.Path]]
experiments: List[Experiment]
system_info: SystemInfo
triton_container_image: Optional[str]
triton_custom_operations: Optional[str]
filename: str = "task.yaml"
results_dir: str = "results"
checkpoints_dir: str = "checkpoints"
def __init__(
self,
model_name: str,
framework: str,
container_version: str,
checkpoints: Dict,
datasets: Dict,
experiments: List,
system_info: SystemInfo,
started_at: int,
logs_dir: pathlib.Path = pathlib.Path("/var/logs"),
datasets_dir: Optional[Union[str, pathlib.Path]] = None,
ended_at: Optional[int] = None,
triton_container_image: Optional[str] = None,
triton_custom_operations: Optional[str] = None,
triton_load_model_method: str = Triton.LOAD_MODE.EXPLICIT,
):
"""
Args:
model_name: Name of model
framework: Model framework
container_version: Container version used in task
checkpoints: List of checkpoints
datasets: List of datasets
datasets_dir: Directory where datasests are stored
experiments: List of experiments run as part of task
system_info: information about node on which experiment was executed
started_at: Time when task has started
ended_at: Time when task has ended
triton_container_image: Custom Triton Container Image used for task
triton_custom_operations: Custom operations library path
triton_load_model_method: Method how models are loaded on Triton
"""
self.started_at = started_at
self.ended_at = ended_at
self.model_name = model_name
self.framework = framework
self.container_version = container_version
self.checkpoints = checkpoints
self.datasets = datasets
self.datasets_dir = pathlib.Path(datasets_dir)
self.experiments = experiments
self.system_info = system_info
self.triton_container_image = triton_container_image
self.triton_custom_operations = triton_custom_operations
self.triton_load_model_method = triton_load_model_method
self.logs_dir = logs_dir
def start(self) -> None:
"""
Update stage execution info at start
Returns:
None
"""
self.started_at = int(datetime.utcnow().timestamp())
def end(self) -> None:
"""
Update stage execution info at end
Returns:
None
"""
self.ended_at = int(datetime.utcnow().timestamp())
def to_file(self, file_path: Union[pathlib.Path, str]):
"""
Store task data to YAML file
Args:
file_path: path to file where task data has to be saved
Returns:
None
"""
task_data = self.to_dict()
with open(file_path, "w") as f:
yaml.dump(task_data, f, Dumper=CustomDumper, width=240, sort_keys=False)
|
TensorFlow/Translation/GNMT/examples | examples | DGXA100_AMP_8GPU | python nmt.py --output_dir=results --batch_size=1024 --learning_rate=2e-3 --num_gpus=8 --amp
|
TensorFlow2/LanguageModeling/BERT/scripts | scripts | run_pretraining_adam | #! /bin/bash
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
echo "Container nvidia build = " $NVIDIA_BUILD_ID
num_gpus=${1:-8}
train_batch_size=${2:-14}
learning_rate=${3:-"1e-4"}
precision=${4:-"fp16"}
use_xla=${5:-"true"}
warmup_steps=${6:-"10000"}
train_steps=${7:-1144000}
bert_model=${8:-"large"}
num_accumulation_steps=${9:-1}
seq_len=${10:-512}
max_pred_per_seq=${11:-80}
DATA_DIR=data/tfrecord/lower_case_1_seq_len_${seq_len}_max_pred_${max_pred_per_seq}_masked_lm_prob_0.15_random_seed_12345_dupe_factor_5_shard_1472_test_split_10/books_wiki_en_corpus
if [ "$bert_model" = "large" ] ; then
export BERT_CONFIG=data/download/google_pretrained_weights/uncased_L-24_H-1024_A-16/bert_config.json
else
export BERT_CONFIG=data/download/google_pretrained_weights/uncased_L-12_H-768_A-12/bert_config.json
fi
PREC=""
if [ "$precision" = "fp16" ] ; then
PREC="--use_fp16"
elif [ "$precision" = "fp32" ] || [ "$precision" = "tf32" ] ; then
PREC=""
else
echo "Unknown <precision> argument"
exit -2
fi
if [ "$use_xla" = "true" ] ; then
PREC="$PREC --enable_xla"
echo "XLA activated"
fi
export GBS=$(expr $train_batch_size \* $num_gpus)
printf -v TAG "tf_bert_pretraining_adam_%s_%s_gbs%d" "$bert_model" "$precision" $GBS
DATESTAMP=`date +'%y%m%d%H%M%S'`
#Edit to save logs & checkpoints in a different directory
RESULTS_DIR=${RESULTS_DIR:-/results/${TAG}_${DATESTAMP}}
LOGFILE=$RESULTS_DIR/$TAG.$DATESTAMP.log
mkdir -m 777 -p $RESULTS_DIR
printf "Saving checkpoints to %s\n" "$RESULTS_DIR"
printf "Logs written to %s\n" "$LOGFILE"
INPUT_FILES="$DATA_DIR/training/*"
EVAL_FILES="$DATA_DIR/test"
CMD="python3 run_pretraining.py"
CMD+=" --input_files=$INPUT_FILES"
CMD+=" --model_dir=$RESULTS_DIR"
CMD+=" --bert_config_file=$BERT_CONFIG"
CMD+=" --train_batch_size=$train_batch_size"
CMD+=" --max_seq_length=$seq_len"
CMD+=" --max_predictions_per_seq=$max_pred_per_seq"
CMD+=" --num_steps_per_epoch=$train_steps --num_train_epochs=1"
CMD+=" --warmup_steps=$warmup_steps"
CMD+=" --num_accumulation_steps=$num_accumulation_steps"
CMD+=" --learning_rate=$learning_rate"
CMD+=" $PREC"
#Check if all necessary files are available before training
for DIR_or_file in $DATA_DIR $BERT_CONFIG $RESULTS_DIR; do
if [ ! -d "$DIR_or_file" ] && [ ! -f "$DIR_or_file" ]; then
echo "Error! $DIR_or_file directory missing. Please mount correctly"
exit -1
fi
done
if [ $num_gpus -gt 1 ] ; then
mpi="mpirun -np $num_gpus \
--allow-run-as-root -bind-to none -map-by slot \
-x NCCL_DEBUG=INFO \
-x LD_LIBRARY_PATH \
-x PATH -mca pml ob1 -mca btl ^openib"
CMD="$mpi $CMD --use_horovod"
fi
set -x
if [ -z "$LOGFILE" ] ; then
$CMD
else
(
$CMD
) |& tee $LOGFILE
fi
set +x
|
PyTorch/Recommendation/DLRM/dlrm/utils/checkpointing | checkpointing | distributed | # Copyright (c) 2021 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Any, Optional
import torch
from dlrm.utils.checkpointing.model import DlrmCheckpointWriter, DlrmCheckpointLoader
class DistributedCheckpointWriter:
def __init__(
self,
writer: DlrmCheckpointWriter,
device_mapping: Dict[str, Any],
rank: int,
main_process: bool
):
self._device_mapping = device_mapping
self._main_process = main_process
self._has_bottom_mlp = rank == device_mapping["bottom_mlp"]
self._writer = writer
self._distributed = len(device_mapping['embedding']) > 1
def save_checkpoint(
self,
model,
checkpoint_path: str,
epoch: Optional[int] = None,
step: Optional[int] = None
):
self._writer.save_embeddings(checkpoint_path, model)
if self._has_bottom_mlp:
self._writer.save_bottom_mlp(checkpoint_path, model)
if self._main_process:
self._writer.save_top_model(checkpoint_path, model)
self._save_metadata(checkpoint_path, epoch, step)
if self._distributed:
torch.distributed.barrier()
def _save_metadata(self, checkpoint_path, epoch, step):
self._writer.save_metadata(checkpoint_path, {
"device_mapping": self._device_mapping,
"epoch": epoch,
"step": step
})
class DistributedCheckpointLoader:
def __init__(self, loader: DlrmCheckpointLoader, device_mapping: Dict[str, Any], rank: int):
self._has_bottom_mlp = rank == device_mapping["bottom_mlp"]
self._loader = loader
self.distributed = len(device_mapping['embedding']) > 1
def load_checkpoint(self, model, checkpoint_path: str):
self._loader.load_top_model(checkpoint_path, model)
if self._has_bottom_mlp:
self._loader.load_bottom_mlp(checkpoint_path, model)
self._loader.load_embeddings(checkpoint_path, model)
if self.distributed:
torch.distributed.barrier()
def make_distributed_checkpoint_loader(device_mapping, rank: int, device: str = "cpu") -> DistributedCheckpointLoader:
embedding_indices = device_mapping["embedding"][rank]
return DistributedCheckpointLoader(
loader=DlrmCheckpointLoader(
embedding_indices=embedding_indices,
device=device,
),
device_mapping=device_mapping,
rank=rank
)
def make_distributed_checkpoint_writer(
device_mapping,
rank: int,
is_main_process: bool,
config: Dict[str, Any],
) -> DistributedCheckpointWriter:
embedding_indices = device_mapping["embedding"][rank]
return DistributedCheckpointWriter(
writer=DlrmCheckpointWriter(
embedding_indices=embedding_indices,
config=config
),
device_mapping=device_mapping,
rank=rank,
main_process=is_main_process
)
|
PyTorch/SpeechRecognition/Jasper/utils | utils | preprocessing_utils | # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/env python
import os
import multiprocessing
import functools
import sox
from tqdm import tqdm
def preprocess(data, input_dir, dest_dir, target_sr=None, speed=None,
overwrite=True):
speed = speed or []
speed.append(1)
speed = list(set(speed)) # Make uniqe
input_fname = os.path.join(input_dir,
data['input_relpath'],
data['input_fname'])
input_sr = sox.file_info.sample_rate(input_fname)
target_sr = target_sr or input_sr
os.makedirs(os.path.join(dest_dir, data['input_relpath']), exist_ok=True)
output_dict = {}
output_dict['transcript'] = data['transcript'].lower().strip()
output_dict['files'] = []
fname = os.path.splitext(data['input_fname'])[0]
for s in speed:
output_fname = fname + '{}.wav'.format('' if s==1 else '-{}'.format(s))
output_fpath = os.path.join(dest_dir,
data['input_relpath'],
output_fname)
if not os.path.exists(output_fpath) or overwrite:
cbn = sox.Transformer().speed(factor=s).convert(target_sr)
cbn.build(input_fname, output_fpath)
file_info = sox.file_info.info(output_fpath)
file_info['fname'] = os.path.join(os.path.basename(dest_dir),
data['input_relpath'],
output_fname)
file_info['speed'] = s
output_dict['files'].append(file_info)
if s == 1:
file_info = sox.file_info.info(output_fpath)
output_dict['original_duration'] = file_info['duration']
output_dict['original_num_samples'] = file_info['num_samples']
return output_dict
def parallel_preprocess(dataset, input_dir, dest_dir, target_sr, speed, overwrite, parallel):
with multiprocessing.Pool(parallel) as p:
func = functools.partial(preprocess,
input_dir=input_dir, dest_dir=dest_dir,
target_sr=target_sr, speed=speed, overwrite=overwrite)
dataset = list(tqdm(p.imap(func, dataset), total=len(dataset)))
return dataset
|
PyTorch/SpeechSynthesis/Tacotron2/notebooks/conversationalai/client/speech_ai_demo/utils/tacotron2 | tacotron2 | __init__ | """ from https://github.com/keithito/tacotron """
import re
from utils.tacotron2 import cleaners
from utils.tacotron2.symbols import symbols
# Mappings from symbol to numeric ID and vice versa:
_symbol_to_id = {s: i for i, s in enumerate(symbols)}
_id_to_symbol = {i: s for i, s in enumerate(symbols)}
# Regular expression matching text enclosed in curly braces:
_curly_re = re.compile(r'(.*?)\{(.+?)\}(.*)')
def text_to_sequence(text, cleaner_names):
'''Converts a string of text to a sequence of IDs corresponding to the symbols in the text.
The text can optionally have ARPAbet sequences enclosed in curly braces embedded
in it. For example, "Turn left on {HH AW1 S S T AH0 N} Street."
Args:
text: string to convert to a sequence
cleaner_names: names of the cleaner functions to run the text through
Returns:
List of integers corresponding to the symbols in the text
'''
sequence = []
# Check for curly braces and treat their contents as ARPAbet:
while len(text):
m = _curly_re.match(text)
if not m:
sequence += _symbols_to_sequence(_clean_text(text, cleaner_names))
break
sequence += _symbols_to_sequence(_clean_text(m.group(1), cleaner_names))
sequence += _arpabet_to_sequence(m.group(2))
text = m.group(3)
return sequence
def sequence_to_text(sequence):
'''Converts a sequence of IDs back to a string'''
result = ''
for symbol_id in sequence:
if symbol_id in _id_to_symbol:
s = _id_to_symbol[symbol_id]
# Enclose ARPAbet back in curly braces:
if len(s) > 1 and s[0] == '@':
s = '{%s}' % s[1:]
result += s
return result.replace('}{', ' ')
def _clean_text(text, cleaner_names):
for name in cleaner_names:
cleaner = getattr(cleaners, name)
if not cleaner:
raise Exception('Unknown cleaner: %s' % name)
text = cleaner(text)
return text
def _symbols_to_sequence(symbols):
return [_symbol_to_id[s] for s in symbols if _should_keep_symbol(s)]
def _arpabet_to_sequence(text):
return _symbols_to_sequence(['@' + s for s in text.split()])
def _should_keep_symbol(s):
return s in _symbol_to_id and s is not '_' and s is not '~'
|
PyTorch/DrugDiscovery/MoFlow | MoFlow | README | # MoFlow For PyTorch
This repository provides a script and recipe to train the MoFlow model to achieve state-of-the-art accuracy. The content of this repository is tested and maintained by NVIDIA.
## Table Of Contents
- [Model overview](#model-overview)
* [Model architecture](#model-architecture)
* [Default configuration](#default-configuration)
* [Feature support matrix](#feature-support-matrix)
* [Features](#features)
* [Mixed precision training](#mixed-precision-training)
* [Enabling mixed precision](#enabling-mixed-precision)
* [Enabling TF32](#enabling-tf32)
* [Glossary](#glossary)
- [Setup](#setup)
* [Requirements](#requirements)
- [Quick Start Guide](#quick-start-guide)
- [Advanced](#advanced)
* [Scripts and sample code](#scripts-and-sample-code)
* [Parameters](#parameters)
* [Command-line options](#command-line-options)
* [Getting the data](#getting-the-data)
* [Dataset guidelines](#dataset-guidelines)
* [Multi-dataset](#multi-dataset)
* [Training process](#training-process)
* [Inference process](#inference-process)
- [Performance](#performance)
* [Benchmarking](#benchmarking)
* [Training performance benchmark](#training-performance-benchmark)
* [Inference performance benchmark](#inference-performance-benchmark)
* [Results](#results)
* [Training accuracy results](#training-accuracy-results)
* [Training accuracy: NVIDIA DGX A100 (8x A100 80GB)](#training-accuracy-nvidia-dgx-a100-8x-a100-80gb)
* [Training stability test](#training-stability-test)
* [Training performance results](#training-performance-results)
* [Training performance: NVIDIA DGX A100 (8x A100 80GB)](#training-performance-nvidia-dgx-a100-8x-a100-80gb)
* [Inference performance results](#inference-performance-results)
* [Inference performance: NVIDIA DGX A100 (1x A100 80GB)](#inference-performance-nvidia-dgx-a100-1x-a100-80gb)
- [Release notes](#release-notes)
* [Changelog](#changelog)
* [Known issues](#known-issues)
## Model overview
MoFlow is a model for molecule generation that leverages Normalizing Flows.
Normalizing Flows is a class of generative neural networks that directly models the probability density of the data. They consist of a sequence of invertible transformations that convert the input data that follow some hard-to-model distribution into a latent code that follows a normal distribution which can then be easily used for sampling.
MoFlow was first introduced by Chengxi Zang et al. in their paper titled "MoFlow: An Invertible Flow Model for Generating Molecular Graphs" ([link](https://arxiv.org/pdf/2006.10137.pdf)).
The model enables you to generate novel molecules that have similar properties to your training data.
In the case of [ZINC dataset](https://zinc.docking.org/), which is used in this example, it allows you to navigate the chemical space of drug-like molecules and facilitate de-novo drug design.
The differences between this version and the [original implementation](https://github.com/calvin-zcx/moflow) accompanying the paper are as follows:
* Loss calculation was separated from the neural network
* ActNorm layers were refactored and their initialization was moved outside of the forward pass
* Numerical stability of the training was improved by introducing gradient clipping
* Numerically-stable formulas for 1/sigmoid(x) and log(sigmoid(x)) were used in AffineCoupling and GraphAffineCoupling layers
* Network and data configurations were untangled to allow for more flexibility
* Linear transformations for node features were implemented using native Linear layers instead of custom GraphLinear layers
* Rescaled adjacency matrix was removed as it did not provide any benefit for the training
* Data pre-processing and loading were refactored
* Support for data-parallel multi-GPU training was added
* Option to capture CUDA graphs was added
* Execution of bond and atom models in was put in two parallel CUDA streams
* Option to compile model to TorchScript format was added
* Support for Automatic Mixed Precision training and inference was added
* FusedAdam optimizer from [Apex](https://github.com/NVIDIA/apex) was used instead of Adam
* Training parameters were tuned to achieve better generation quality
This model is trained with mixed precision using Tensor Cores on the NVIDIA Ampere GPU architectures. Therefore, researchers can get results up to 1.43x faster than training with full precision while experiencing the benefits of mixed precision training. This model is tested against each NGC monthly container release to ensure consistent accuracy and performance over time.
### Model architecture

[Chengxi Zang and Fei Wang. 2020. MoFlow: An Invertible Flow Model for Generating Molecular Graphs. In Proceedings of the 26th ACM SIGKDD](https://arxiv.org/pdf/2006.10137.pdf)
The MoFlow model consists of two parts.
The first part, Glow, processes edges to convert an adjacency matrix into a latent vector Z_B.
The second part, Graph Conditional Flow, processes nodes in the context of edges to produce conditional latent vector Z_{A|B}.
Each part is a normalizing flow—a chain of invertible transformations with learnable parameters, which provide the ability to learn the distribution of the data.
### Default configuration
The MoFlow model is built out of Normalizing Flows. It consists of two parts: Glow for processing edges and Graph Conditional Flow for processing nodes in the context of edges.
The following features were implemented in this model:
* Data-parallel multi-GPU training (DDP)
* Mixed precision training (autocast, gradient scaling)
* Just-in-time compilation
* Resumable training
* CUDA graphs capture
The following performance optimizations were implemented in this model:
- A series of matrix manipulations in the GraphConv layer was replaced with a single torch.einsum
- Tensors are created on the device with the desired dtype whenever possible
- Channels-last memory format was used for Glow
- Stream concurrency was introduced to allow for executing Glow and Graph Conditional Flow at the same time. The concurrency happens in both forward and backward passes, and it hides the runtime of the smaller sub-model. Performance improvement is the most prominent for small batch sizes.
- Number of nodes in the graph is now independent of the maximum number of atoms in the dataset. This provides more flexibility and allows the use of shapes divisible by eight for better Tensor Cores usage.
- FusedAdam optimizer is used instead of native Adam.
- Normalization of the adjacency matrix was removed, as it did not benefit the training and required additional computation.
### Feature support matrix
This model supports the following features::
| Feature | MoFlow
|-----------------------|--------------------------
|Automatic mixed precision (AMP) | Yes
|Distributed data parallel (DDP) | Yes
|CUDA Graphs | Yes
#### Features
**Distributed data parallel (DDP)**
[DistributedDataParallel (DDP)](https://pytorch.org/docs/stable/generated/torch.nn.parallel.DistributedDataParallel.html#torch.nn.parallel.DistributedDataParallel) implements data parallelism at the module level that can run across multiple GPUs or machines.
**Automatic Mixed Precision (AMP)**
This implementation uses the native PyTorch AMP implementation of mixed precision training. It allows us to use FP16 training with FP32 master weights by modifying just a few lines of code. A detailed explanation of mixed precision can be found in the next section.
**CUDA Graphs**
This feature allows launching multiple GPU operations through a single CPU operation. The result is a vast reduction in CPU overhead. The benefits are particularly pronounced when training with relatively small batch sizes. The CUDA Graphs feature has been available through a [native PyTorch API](https://pytorch.org/docs/master/notes/cuda.html#cuda-graphs) starting from PyTorch v1.10.
### Mixed precision training
Mixed precision is the combined use of different numerical precisions in a computational method. [Mixed precision](https://arxiv.org/abs/1710.03740) training offers significant computational speedup by performing operations in half-precision format while storing minimal information in single-precision to retain as much information as possible in critical parts of the network. Since the introduction of [Tensor Cores](https://developer.nvidia.com/tensor-cores) in NVIDIA Volta, and following with both the NVIDIA Turing and NVIDIA Ampere Architectures, significant training speedups are experienced by switching to mixed precision -- up to 3x overall speedup on the most arithmetically intense model architectures. Using [mixed precision training](https://docs.nvidia.com/deeplearning/performance/mixed-precision-training/index.html) previously required two steps:
1. Porting the model to use the FP16 data type where appropriate.
2. Adding loss scaling to preserve small gradient values.
AMP enables mixed precision training on NVIDIA Volta, NVIDIA Turing, and NVIDIA Ampere GPU architectures automatically. The PyTorch framework code makes all necessary model changes internally.
For information about:
- How to train using mixed precision, refer to the [Mixed Precision Training](https://arxiv.org/abs/1710.03740) paper and [Training With Mixed Precision](https://docs.nvidia.com/deeplearning/performance/mixed-precision-training/index.html) documentation.
- Techniques used for mixed precision training, refer to the [Mixed-Precision Training of Deep Neural Networks](https://devblogs.nvidia.com/mixed-precision-training-deep-neural-networks/) blog.
- APEX tools for mixed precision training, refer to the [NVIDIA Apex: Tools for Easy Mixed-Precision Training in PyTorch](https://devblogs.nvidia.com/apex-pytorch-easy-mixed-precision-training/).
#### Enabling mixed precision
Mixed precision is enabled in PyTorch by using the native [Automatic Mixed Precision package](https://pytorch.org/docs/stable/amp.html), which casts variables to half-precision upon retrieval while storing variables in single-precision format. Furthermore, to preserve small gradient magnitudes in backpropagation, a [loss scaling](https://docs.nvidia.com/deeplearning/sdk/mixed-precision-training/index.html#lossscaling) step must be included when applying gradients. In PyTorch, loss scaling can be applied automatically using a `GradScaler`.
Automatic Mixed Precision makes all the adjustments internally in PyTorch, providing two benefits over manual operations. First, programmers do not need to modify network model code, reducing development and maintenance efforts. Second, using AMP maintains forward and backward compatibility with all the APIs for defining and running PyTorch models.
To enable mixed precision, you can simply use the `--amp` flag when running the training or inference scripts.
#### Enabling TF32
TensorFloat-32 (TF32) is the new math mode in [NVIDIA A100](https://www.nvidia.com/en-us/data-center/a100/) GPUs for handling the matrix math, also called tensor operations. TF32 running on Tensor Cores in A100 GPUs can provide up to 10x speedups compared to single-precision floating-point math (FP32) on NVIDIA Volta GPUs.
TF32 Tensor Cores can speed up networks using FP32, typically with no loss of accuracy. It is more robust than FP16 for models which require a high dynamic range for weights or activations.
For more information, refer to the [TensorFloat-32 in the A100 GPU Accelerates AI Training, HPC up to 20x](https://blogs.nvidia.com/blog/2020/05/14/tensorfloat-32-precision-format/) blog post.
TF32 is supported in the NVIDIA Ampere GPU architecture and is enabled by default.
### Glossary
**Normalizing flow** - a class of generative neural networks that directly models the probability density of the data.
**Molecular graph** - representation of a molecule, in which nodes correspond to atoms and edges correspond to chemical bonds
**SMILES format** - a format that allows representing a molecule with a string of characters
## Setup
The following section lists the requirements that you need to meet to start training the MoFlow model.
### Requirements
This repository contains a Dockerfile that extends the PyTorch 22.11 NGC container and encapsulates some dependencies. Aside from these dependencies, ensure you have the following components:
- [NVIDIA Docker](https://github.com/NVIDIA/nvidia-docker)
- PyTorch 22.11+ NGC container
- Supported GPUs:
- [NVIDIA Volta architecture](https://www.nvidia.com/en-us/data-center/volta-gpu-architecture/)
- [NVIDIA Turing architecture](https://www.nvidia.com/en-us/design-visualization/technologies/turing-architecture/)
- [NVIDIA Ampere architecture](https://www.nvidia.com/en-us/data-center/nvidia-ampere-gpu-architecture/)
For more information about how to get started with NGC containers, refer to the following sections from the NVIDIA GPU Cloud Documentation and the Deep Learning Documentation:
- [Getting Started Using NVIDIA GPU Cloud](https://docs.nvidia.com/ngc/ngc-getting-started-guide/index.html)
- [Accessing And Pulling From The NGC Container Registry](https://docs.nvidia.com/deeplearning/frameworks/user-guide/index.html#accessing_registry)
- Running [framework name - link to topic]
For those unable to use the [framework name] NGC container, to set up the required environment or create your own container, refer to the versioned [NVIDIA Container Support Matrix](https://docs.nvidia.com/deeplearning/frameworks/support-matrix/index.html).
## Quick Start Guide
To train your model using mixed or TF32 precision with Tensor Cores or using FP32, perform the following steps using the default parameters of the MoFlow model on the ZINC 250k dataset. For the specifics concerning training and inference, refer to the [Advanced](#advanced) section.
1. Clone the repository.
```
git clone [https://github.com/NVIDIA/DeepLearningExamples](https://github.com/NVIDIA/DeepLearningExamples)
cd [DeepLearningExamples](https://github.com/NVIDIA/DeepLearningExamples)/PyTorch/DrugDiscovery/MoFlow
```
2. Build the MoFlow PyTorch NGC container.
```
docker build . -t moflow_pyt
```
3. Start an interactive session in the NGC container to run training/inference.
Run the following command to launch the Docker container.
```
docker run --rm -it --shm-size=8gb --gpus all -v <path to results>:/results moflow_pyt
```
If you want to reuse the dataset between runs, (recommended), use -v <path to data directory>:/data to mount your directory inside the container:
```
docker run --rm -it --shm-size=8gb --gpus all -v <path to results>:/results -v <path to data directory>:/data moflow_pyt
```
The contents of /data will be downloaded in the following step.
4. Download and preprocess the dataset.
```
bash scripts/prepare_datasets.sh
```
5. Start training and evaluation.
```
bash scripts/train.sh
```
6. Start inference.
You can train the model yourself (see the prevoius step) or download the pretrained weights from NGC:
```
wget 'https://api.ngc.nvidia.com/v2/models/nvidia/dle/moflow__pyt_ckpt/versions/22.11.0_amp/files/model_snapshot_epoch_300' -O /results/model_snapshot_epoch_300
```
Then you can run the inference:
```
bash scripts/predict.sh
```
Now that you have your model trained and evaluated, you can choose to compare your training results with our [Training accuracy results](#training-accuracy-results). You can also choose to benchmark your performance to [Training performance benchmark](#training-performance-results), or [Inference performance benchmark](#inference-performance-results). Following the steps in these sections will ensure that you achieve the same accuracy and performance results as stated in the [Results](#results) section.
## Advanced
The following sections provide greater details of the dataset, running training and inference, and the training results.
### Scripts and sample code
In the root directory, the most important files are:
- Dockerfile - definition of the Docker image with all dependencies needed to run MoFlow
- setup.py - script that allows installing MoFlow with pip. Note that it does not include dependencies.
The `moflow` directory contains the definition of the network and tools needed for using it
- `config.py` - configuration of the dataset and network
- `data` - directory with tools needed to process and load the data
- `model` - directory with the definition of the MoFlow’s building blocks and helper functions
- `runtime` - directory that contains code for running experiments, multi-GPU training, and logging. The most important files in this directory are `train.py` and `generate.py`, which allow running training or inference, respectively.
- `utils.py`- various helper functions
The `scripts` directory contains scripts for running the most typical workflows inside the docker container:
- `benchmark_inference.sh` and `benchmark_training.sh` for measuring the performance of inference or training, respectively
- `data_preprocess.py` for dataset preparation
- `prepare_datasets.sh` for downloading and preprocessing the data (note, that it launches `data_preprocess.py`)
- `train.sh` for launching training
- `predict.sh` for sampling random molecules from the trained model
### Parameters
The complete list of parameters accepted by the runtime scripts (`moflow/runtime/train.py` and `moflow/runtime/generate.py`) consists of:
* --data_dir - Location for the dataset.
* --config_name - The config to choose. This parameter allows one to switch between different datasets and their dedicated configurations of the neural network. By default, a pre-defined “zinc250k” config is used.
* --results_dir - Directory where checkpoints are stored.
* --predictions_path - Path to store generated molecules. If an empty string is provided, predictions will not be saved (useful for benchmarking and debugging).
* --log_path - Path for DLLogger log. This file will contain information about the speed and accuracy of the model during training and inference. Note that if the file already exists, new logs will be added at the end.
* --log_interval - Frequency for writing logs, expressed in steps.
* --warmup_steps - Number of warmup steps. This value is used for benchmarking and for CUDA graph capture.
* --steps - Number of steps used for training/inference. This parameter allows finishing training earlier than the specified number of epochs. If used with inference, it allows generating more molecules (by default only a single batch of molecules is generated).
* --save_epochs - Frequency for saving checkpoints, expressed in epochs. If -1 is provided, checkpoints will not be saved.
* --eval_epochs - Evaluation frequency, expressed in epochs. If -1 is provided, an evaluation will not be performed.
* --learning_rate - Base learning rate.
* --beta1 - beta1 parameter for the Adam optimizer.
* --beta2 - beta2 parameter for the Adam optimizer.
* --clip - Gradient clipping norm.
* --epochs - Number of training epochs. Note that you can finish training mid-epoch by using “--steps” flag.
* --batch_size - Batch size per GPU.
* --num_workers - Number of workers in the data loader.
* --seed - Random seed used to initialize the distributed loaders.
* --local_rank - rank of the GPU, used to launch distributed training. This argument is specified automatically by `torchrun` and does not have to be provided by the user.
* --temperature - Temperature used for sampling.
* --val_batch_size - Number of molecules to generate during the validation step.
* --allow_untrained - Allow sampling molecules from an untrained network. Useful for performance benchmarking or debugging purposes.
* --correct_validity - Apply validity correction after the generation of the molecules.
* --amp - Use Automatic Mixed Precision
* --cuda_graph - Capture GPU kernels with CUDA graphs. This option allows to speed up training.
* --jit - Compile the model with `torch.jit.script`. Can be used to speed up training or inference.
* --verbosity - Verbosity level. Specify the following values: 0, 1, 2, 3, where 0 means minimal verbosity (errors only) and 3 - maximal (debugging).
### Command-line options
To view the full list of available options and their descriptions, use the `-h` or `--help` command-line option, for example:
`python moflow/runtime/train.py --help`
The following example output is printed when running the model:
```
usage: train.py [-h] [--data_dir DATA_DIR] [--config_name {zinc250k}] [--results_dir RESULTS_DIR] [--predictions_path PREDICTIONS_PATH] [--log_path LOG_PATH] [--log_interval LOG_INTERVAL]
[--warmup_steps WARMUP_STEPS] [--steps STEPS] [--save_epochs SAVE_EPOCHS] [--eval_epochs EVAL_EPOCHS] [--learning_rate LEARNING_RATE] [--beta1 BETA1] [--beta2 BETA2] [--clip CLIP]
[--epochs EPOCHS] [--batch_size BATCH_SIZE] [--num_workers NUM_WORKERS] [--seed SEED] [--local_rank LOCAL_RANK] [--temperature TEMPERATURE] [--val_batch_size VAL_BATCH_SIZE]
[--allow_untrained] [--correct_validity] [--amp] [--cuda_graph] [--jit] [--verbosity {0,1,2,3}]
optional arguments:
-h, --help show this help message and exit
--data_dir DATA_DIR Location for the dataset.
--config_name {zinc250k}
The config to choose. This parameter allows one to switch between different datasets and their dedicated configurations of the neural network. By default, a pre-defined
"zinc250k" config is used.
--results_dir RESULTS_DIR
Directory where checkpoints are stored.
--predictions_path PREDICTIONS_PATH
Path to store generated molecules. If an empty string is provided, predictions will not be saved (useful for benchmarking and debugging).
--log_path LOG_PATH Path for DLLogger log. This file will contain information about the speed and accuracy of the model during training and inference. Note that if the file already exists, new logs
will be added at the end.
--log_interval LOG_INTERVAL
Frequency for writing logs, expressed in steps.
--warmup_steps WARMUP_STEPS
Number of warmup steps. This value is used for benchmarking and for CUDA graph capture.
--steps STEPS Number of steps used for training/inference. This parameter allows finishing training earlier than the specified number of epochs. If used with inference, it allows generating
more molecules (by default only a single batch of molecules is generated).
--save_epochs SAVE_EPOCHS
Frequency for saving checkpoints, expressed in epochs. If -1 is provided, checkpoints will not be saved.
--eval_epochs EVAL_EPOCHS
Evaluation frequency, expressed in epochs. If -1 is provided, an evaluation will not be performed.
--learning_rate LEARNING_RATE
Base learning rate.
--beta1 BETA1 beta1 parameter for the optimizer.
--beta2 BETA2 beta2 parameter for the optimizer.
--clip CLIP Gradient clipping norm.
--epochs EPOCHS Number of training epochs. Note that you can finish training mid-epoch by using "--steps" flag.
--batch_size BATCH_SIZE
Batch size per GPU.
--num_workers NUM_WORKERS
Number of workers in the data loader.
--seed SEED Random seed used to initialize the distributed loaders.
--local_rank LOCAL_RANK
rank of the GPU, used to launch distributed training. This argument is specified automatically by `torchrun` and does not have to be provided by the user.
--temperature TEMPERATURE
Temperature used for sampling.
--val_batch_size VAL_BATCH_SIZE
Number of molecules to generate during validation step.
--allow_untrained Allow sampling molecules from an untrained network. Useful for performance benchmarking or debugging purposes.
--correct_validity Apply validity correction after the generation of the molecules.
--amp Use Automatic Mixed Precision.
--cuda_graph Capture GPU kernels with CUDA graphs. This option allows to speed up training.
--jit Compile the model with `torch.jit.script`. Can be used to speed up training or inference.
--verbosity {0,1,2,3}
Verbosity level. Specify the following values: 0, 1, 2, 3, where 0 means minimal verbosity (errors only) and 3 - maximal (debugging).
```
### Getting the data
The MoFlow model was trained on the ZINC 250k dataset. The original data split was used, with 224569 molecules in the training set and 24887 molecules in the test set.
This repository contains the `prepare_datasets.sh` script that will automatically download and process the dataset. By default, data will be downloaded to the `/data/` directory.
#### Dataset guidelines
The dataset preparation is implemented in the `scripts/data_preprocess.py` script, and the parameters for the dataset are defined in the `moflow/config.py` file. The config includes information about data location, the structure of the CSV file, types and numbers of atoms in the molecules, and the number of nodes in the output graphs.
Initially, the data is stored in a CSV file that contains the molecules in SMILES format, together with their properties (optional). The data is loaded using the `pandas` library, and the SMILES strings are converted to molecules with RDKit.
Then, the molecules are converted into graphs with features assigned to nodes and edges. The first step is the standardization of molecular structures - each molecule is converted into canonical SMILES and loaded back, and kekulized. Then, two numpy arrays are constructed. The first array is a vector corresponding to graph nodes and contains atomic numbers for all atoms in the molecule. The second array is a 2D square matrix corresponding to graph edges and contains codes for atomic bond orders - 0 if two atoms are not connected, 1 for a single bond, 2 for a double bond, and 3 for a triple bond.
Both arrays are padded to some predefined size larger than the maximum number of atoms in the molecules in the dataset. For ZINC 250k, the maximum number of atoms is 38, and the output size of the numpy arrays is set to 40 for the nodes array and 40x40 for the edges array.
This representation of the data is dumped on the disk using the numpy `savez` function.
During training, the numpy arrays are loaded, and one-hot-encoding is used to represent atomic numbers (node features) and bond orders (edge features). This representation is then used for training the neural network.
### Training process
The training script is located in `moflow/runtime/train.py` and it accepts the parameters listed above.
To make the usage of the model easier, there is also `scripts/train.sh` script that runs training with the default configuration and the evaluation using the trained checkpoint at the end. This script can be run without any arguments - then it launches training on a single GPU and performance optimizations enabled - automatic mixed precision (AMP) and CUDA graph capture.
```
./scripts/train.sh
```
It is also possible to pass the number of GPUs and precision (“amp” or “full”) that should be used for training. For example, to launch training with eight GPUs and AMP, run:
```
./scripts/train.sh 8
```
and to launch four GPU training with full precision, run:
```
./scripts/train.sh 4 full
```
These two arguments can also be followed by extra flags that will be passed to training and evaluation commands. For example, to train on eight GPUs with AMP, batch size of 2048 per GPU and save logs in `/results/dll.json`, run:
```
./scripts/train.sh 8 amp --batch_size 2048 --log_path /results/dll.json
```
Alternatively, you can launch training with `moflow/runtime/train.py`. To run the model with multiple GPUs, run:
```
torchrun --nproc_per_node=<# GPUs> moflow/runtime/train.py <arguments>
```
To enable mixed precision training, add `--amp`. You can also optimize the performance further by adding `--cuda_graph` or `--jit` flags to enable CUDA graph capture or just-in-time compilation, respectively.
#### Logs
By default, logs are printed to the screen and not saved on disk. If you want to store the logs, pass `--log_path` flag to `scripts/train.sh` or `moflow/runtime/train.py`.
#### Checkpoints
By default, the training script saves checkpoints inside `/results` every five epochs. The location of the checkpoints directory can be modified with `--results_dir` flag and saving interval with `--save_epochs` flag (pass -1 if you do not want to save checkpoints). Up to five most recent checkpoints are kept while the older ones are removed.
#### Evaluation
The following metrics are used to evaluate the model:
- Validity - the percentage of predictions corresponding to the correct molecular graph.
- Uniqueness - the percentage of valid molecules that is unique.
- Novelty - the percentage of valid and unique molecules not present in the training set.
- N.U.V - the percentage of valid, unique, and novel molecules.
During training, a single batch of molecules is generated every couple of epochs to assess two metrics: validity and uniqueness, as they are quick to calculate and track the training progress.
By default, the validation batch size is set to 100 molecules per GPU, and evaluation happens every five epochs. This can be changed with `--val_batch_size` and `--eval_epochs` flags, respectively. To disable evaluation, pass `--eval_epochs -1`.
If you use `scripts/train.sh`, there is also a final evaluation of the model done on 100 batches of molecules. This larger sample is evaluated with all metrics described above, and we use N.U.V as the main metric.
Alternatively, you can trigger evaluation manually by running `moflow/runtime/evaluate.py` script. Make sure that you pass the same value for `--results_dir` for both training and evaluation scripts.
### Inference process
Inference can be run by launching the `moflow/runtime/generate.py` or `scripts/predict.sh` script. The first one provides more flexibility and accepts the arguments listed above. The second script allows you to easily run the default configuration with performance optimization (`--jit` flag) and molecule validity correction (`--correct_validity`). To generate a single batch of molecules with AMP and batch size of 512, run:
```
./scripts/predict.sh
```
You can also provide batch size and precision to use for predictions. For example, to generate 1000 molecules with full precision, run:
```
./scripts/predict.sh 1000 full
```
The script also allows you to pass extra flags to the generation. For example, to generate 10 batches of 1000 each and save predictions inside /results/predictions.smi, run:
```
./scripts/predict.sh 1000 amp --steps 10 --predictions_path /results/predictions.smi
```
## Performance
The performance measurements in this document were conducted at the time of publication and may not reflect the performance achieved from NVIDIA’s latest software release. For the most up-to-date performance measurements, go to [NVIDIA Data Center Deep Learning Product Performance](https://developer.nvidia.com/deep-learning-performance-training-inference).
### Benchmarking
The following section shows how to run benchmarks measuring the model performance in training and inference modes.
#### Training performance benchmark
To benchmark the training performance on a specific number of GPUs, batch size and precision, run:
```
bash scripts/benchmark_training.sh <# GPUs> <batch_size> <precision>
```
Eg. running
```
./scripts/benchmark_training.sh 8 2048 amp
```
will measure performance for eight GPUs, batch size of 2048 per GPU and mixed precision and running:
```
./scripts/benchmark_training.sh 1 1024 full
```
will measure performance for single GPU, batch size of 1024 and full precision.
#### Inference performance benchmark
To benchmark the inference performance on a specific batch size and precision, run:
```
bash scripts/benchmark_inference.sh <batch size> <precision>
```
Eg. running
```
./scripts/benchmark_inference.sh 2048 amp
```
will measure performance for a batch size of 2048 and mixed precision and running:
```
./scripts/benchmark_inference.sh 1024 full
```
will measure performance for a batch size of 1024 and full precision.
### Results
The following sections provide details on how we achieved our performance and accuracy in training and inference.
#### Training accuracy results
##### Training accuracy: NVIDIA A100 (8x A100 80GB)
Our results were obtained by running the `scripts/train.sh` training script in the PyTorch 22.11 NGC container on NVIDIA A100 (8x A100 80GB) GPUs. The values presented below were averaged over 20 experiments.
| GPUs | Batch size / GPU | NUV - TF32 | NUV - mixed precision | Time to train - TF32 | Time to train - mixed precision | Time to train speedup (TF32 to mixed precision)
|---------|------------------|-----------------|----------------------------|-------------------------|----------------------------------|--------------
| 1 | 512 | 89.63 % | 87.83 % | 5h8min | 4h0min | 1.28x
| 8 | 512 | 87.03 % | 87.90 % | 48min | 40min | 1.20x
##### Training stability test
The MoFlow model was trained for 300 epochs starting from 20 different initial random seeds. Every five training epochs, the model was evaluated by generating a small sample of molecules (100 molecules per GPU), and validity and uniqueness were calculated. The training was performed in the PyTorch 22.11 Docker container on NVIDIA DGX A100 with 8x A100 80GB GPUs with AMP and CUDA graph capture enabled. The following table summarizes the results of the stability test.
The following table displays the validity and uniqueness scores after every 50 epochs for different initial random seeds.
|epoch|validity mean|validity std|validity min|validity max|validity median|uniqueness mean|uniqueness std|uniqueness min|uniqueness max|uniqueness median|
|-----|-------------|------------|------------|------------|---------------|---------------|--------------|--------------|--------------|-----------------|
|50 |68.22 |5.25 |57.38 |74.75 |69.50 |93.64 |8.22 |62.56 |99.82 |95.30 |
|100 |76.91 |4.23 |69.50 |84.38 |77.50 |99.39 |0.92 |96.31 |100.00 |99.83 |
|150 |80.48 |3.80 |73.88 |88.25 |81.75 |99.58 |0.78 |96.64 |100.00 |99.85 |
|200 |83.87 |3.98 |77.00 |90.62 |84.44 |99.76 |0.38 |98.81 |100.00 |100.00 |
|250 |86.08 |4.46 |77.12 |93.12 |86.56 |99.87 |0.21 |99.27 |100.00 |100.00 |
|300 |87.29 |3.70 |77.75 |93.38 |87.69 |99.82 |0.30 |98.70 |100.00 |99.93 |
#### Training performance results
##### Training performance: NVIDIA A100 (8x A100 80GB)
Our results were obtained by running the `scripts/benchmark_training.sh` training script in the PyTorch 22.11 NGC container on NVIDIA A100 (8x A100 80GB) GPUs. Performance numbers (in molecules per second) were averaged over 190 iterations after 10 warm-up steps.
|GPUs|Batch size / GPU|Throughput - TF32|Throughput - mixed precision|Throughput speedup (TF32 - mixed precision)|Weak scaling - TF32|Weak scaling - mixed precision|
|----|----------------|-----------------|----------------------------|-------------------------------------------|-------------------|------------------------------|
|1 |512 |3499.35 |4524.15 |1.29 | | |
|1 |1024 |3883.49 |5392.78 |1.39 | | |
|1 |2048 |4291.29 |6118.46 |1.43 | | |
|8 |512 |24108.04 |29293.41 |1.22 |6.89 |6.47 |
|8 |1024 |28104.62 |37365.05 |1.33 |7.24 |6.93 |
|8 |2048 |30927.04 |42078.31 |1.36 |7.21 |6.88 |
To achieve these same results, follow the steps in the [Quick Start Guide](#quick-start-guide).
#### Inference performance results
##### Inference performance: NVIDIA A100 (1x A100 80GB)
Our results were obtained by running the `scripts/benchmark_inference.sh` inferencing benchmarking script in the PyTorch 22.11 NGC container on the NVIDIA A100 (1x A100 80GB) GPU.
FP16
|Batch size|Throughput Avg|Latency Avg|Latency 90%|Latency 95%|Latency 99%|
|----------|--------------|-----------|-----------|-----------|-----------|
|512 |12524.49 |41 |41 |41 |41 |
|1024 |13871.60 |74 |74 |74 |74 |
|2048 |14386.44 |142 |144 |144 |144 |
TF32
|Batch size|Throughput Avg|Latency Avg|Latency 90%|Latency 95%|Latency 99%|
|----------|--------------|-----------|-----------|-----------|-----------|
|512 |9696.35 |53 |53 |53 |53 |
|1024 |10242.98 |100 |100 |100 |100 |
|2048 |11174.75 |183 |187 |187 |187 |
To achieve these same results, follow the steps in the [Quick Start Guide](#quick-start-guide).
## Release notes
### Changelog
January 2023
- Initial release
### Known issues
There is a known issue with the selection of sampling temperature. For some runs, the default value (0.3) might be sub-optimal, and better prediction quality can be achieved when lowering or increasing the value of this parameter. To tune the value of this parameter, run `moflow/runtime/evaluate.py` script passing different values for the `--temperature` flag.
|
JAX/Classification | Classification | README | # Image Classification
Image classification is the task of categorizing an image into one of several predefined classes, often also giving a probability of the input belonging to a certain class. This task is crucial in understanding and analyzing images, and it comes quite effortlessly to human beings with our complex visual systems. Most powerful image classification models today are built using some form of Convolution Neural Networks (CNNs), which are also the backbone of many other tasks in Computer Vision.

[Source](https://github.com/NVlabs/stylegan)
In this overview, we will cover
- Types of image Classification
- How does it work?
- How is the performance evaluated?
- Use cases and applications
- Where to get started
---
## Types of image Classification
Image Classification can be broadly divided into either Binary or Multi-class problems depending on the number of categories. Binary image classification problems entail predicting one of two classes. An example of this would be to predict whether an image is that of a dog or not. A subtly different problem is that of single-class (one vs all) classification, where the goal is to recognize data from one class and reject all other. This is beneficial when there is an overabundance of data from one of the classes, also called a class imbalance.

In Multi-class classification problems, models categorize instances into one of three or more categories. Multi-class models often also return confidence scores (or probabilities) of an image belonging to each of the possible classes. This should not be confused with multi-label classification, where a model assigns multiple labels to an instance.
---
## How is the performance evaluated?
Image Classification performance is often reported as Top-1 or Top-5 scores. In top-1 score, classification is considered correct if the top predicted class (with the highest predicted probability) matches the true class for a given instance. In top-5, we check if one of the top 5 predictions matches the true class. The score is just the number of correct predictions divided by the total number of instances evaluated.
---
## Use cases and applications
### Categorizing Images in Large Visual Databases
Businesses with visual databases may accumulate large amounts of images with missing tags or meta-data. Unless there is an effective way to organize such images, they may not be much use at all. On the contrary, they may hog precious storage space. Automated image classification algorithms can classify such untagged images into predefined categories. Businesses can avoid expensive manual labor by employing automated image classification algorithms.
A related task is that of Image Organization in smart devices like mobile phones. With Image Classification techniques, images and videos can be organized for improved accessibility.
### Visual Search
Visual Search or Image-based search has risen to popularity over the recent years. Many prominent search engines already provide this feature where users can search for visual content similar to a provided image. This has many applications in the e-commerce and retail industry where users can take a snap and upload an image of a product they are interested in purchasing. This makes the shopping experience much more efficient for customers, and can increase sales for businesses.
### Healthcare
Medical Imaging is about creating visual images of internal body parts for clinical purposes. This includes health monitoring, medical diagnosis, treatment, and keeping organized records. Image Classification algorithms can play a crucial role in Medical Imaging by assisting medical professionals detect presence of illness and having consistency in clinical diagnosis.
---
## Getting started
NVIDIA provides examples for JAX models on [Rosetta](https://github.com/NVIDIA/JAX-Toolbox/tree/main/rosetta/rosetta/projects). These examples provide you with easy to consume and highly optimized scripts for both training and inferencing. The quick start guide at our GitHub repository will help you in setting up the environment using NGC Docker Images, download pre-trained models from NGC and adapt the model training and inference for your application/use-case.
These models are tested and maintained by NVIDIA, leveraging mixed precision using tensor cores on our latest GPUs for faster training times while maintaining accuracy.
|
PyTorch/Recommendation/DLRM/dlrm/scripts | scripts | main | # Copyright (c) 2021 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import os
import sys
from absl import app, flags, logging
from apex import optimizers as apex_optim
from dlrm.data.feature_spec import FeatureSpec
from dlrm.model.distributed import DistributedDlrm
from dlrm.utils import distributed as dist
from dlrm.utils.checkpointing.distributed import make_distributed_checkpoint_writer, make_distributed_checkpoint_loader
from dlrm.utils.distributed import get_gpu_batch_sizes, get_device_mapping, is_main_process, is_distributed
import datetime
from time import time
import dllogger
import numpy as np
import torch
from absl import app, flags
import dlrm.scripts.utils as utils
from dlrm.data.data_loader import get_data_loaders
from dlrm.data.utils import prefetcher, get_embedding_sizes
FLAGS = flags.FLAGS
# Basic run settings
flags.DEFINE_enum("mode", default='train', enum_values=['train', 'test', 'inference_benchmark'],
help="Select task to be performed")
flags.DEFINE_integer("seed", 12345, "Random seed")
# Training flags
flags.DEFINE_integer("batch_size", 65536, "Batch size used for training")
flags.DEFINE_integer("test_batch_size", 65536, "Batch size used for testing/validation")
flags.DEFINE_float("lr", 24, "Base learning rate")
flags.DEFINE_integer("epochs", 1, "Number of epochs to train for")
flags.DEFINE_integer("max_steps", None, "Stop training after doing this many optimization steps")
# Learning rate schedule flags
flags.DEFINE_integer("warmup_factor", 0, "Learning rate warmup factor. Must be a non-negative integer")
flags.DEFINE_integer("warmup_steps", 8000, "Number of warmup optimization steps")
flags.DEFINE_integer("decay_steps", 24000,
"Polynomial learning rate decay steps. If equal to 0 will not do any decaying")
flags.DEFINE_integer("decay_start_step", 48000,
"Optimization step after which to start decaying the learning rate, "
"if None will start decaying right after the warmup phase is completed")
flags.DEFINE_integer("decay_power", 2, "Polynomial learning rate decay power")
flags.DEFINE_float("decay_end_lr", 0, "LR after the decay ends")
# Model configuration
flags.DEFINE_enum("embedding_type", "custom_cuda",
["joint", "custom_cuda", "multi_table", "joint_sparse", "joint_fused"],
help="The type of the embedding operation to use")
flags.DEFINE_integer("embedding_dim", 128, "Dimensionality of embedding space for categorical features")
flags.DEFINE_list("top_mlp_sizes", [1024, 1024, 512, 256, 1], "Linear layer sizes for the top MLP")
flags.DEFINE_list("bottom_mlp_sizes", [512, 256, 128], "Linear layer sizes for the bottom MLP")
flags.DEFINE_enum("interaction_op", default="cuda_dot", enum_values=["cuda_dot", "dot", "cat"],
help="Type of interaction operation to perform.")
# Data configuration
flags.DEFINE_string("dataset", None, "Path to dataset directory")
flags.DEFINE_string("feature_spec", default="feature_spec.yaml",
help="Name of the feature spec file in the dataset directory")
flags.DEFINE_enum("dataset_type", default="parametric", enum_values=['synthetic_gpu', 'parametric'],
help='The type of the dataset to use')
flags.DEFINE_boolean("shuffle_batch_order", False, "Read batch in train dataset by random order", short_name="shuffle")
flags.DEFINE_integer("max_table_size", None,
"Maximum number of rows per embedding table, "
"by default equal to the number of unique values for each categorical variable")
flags.DEFINE_boolean("hash_indices", False,
"If True the model will compute `index := index % table size` "
"to ensure that the indices match table sizes")
# Synthetic data configuration
flags.DEFINE_integer("synthetic_dataset_num_entries", default=int(2 ** 15 * 1024),
help="Number of samples per epoch for the synthetic dataset")
flags.DEFINE_list("synthetic_dataset_table_sizes", default=','.join(26 * [str(10 ** 5)]),
help="Cardinalities of variables to use with the synthetic dataset.")
flags.DEFINE_integer("synthetic_dataset_numerical_features", default='13',
help="Number of numerical features to use with the synthetic dataset")
flags.DEFINE_boolean("synthetic_dataset_use_feature_spec", default=False,
help="Create a temporary synthetic dataset based on a real one. "
"Uses --dataset and --feature_spec"
"Overrides synthetic_dataset_table_sizes and synthetic_dataset_numerical_features."
"--synthetic_dataset_num_entries is still required")
# Checkpointing
flags.DEFINE_string("load_checkpoint_path", None, "Path from which to load a checkpoint")
flags.DEFINE_string("save_checkpoint_path", None, "Path to which to save the training checkpoints")
# Saving and logging flags
flags.DEFINE_string("log_path", "./log.json", "Destination for the log file with various results and statistics")
flags.DEFINE_integer("test_freq", None,
"Number of optimization steps between validations. If None will test after each epoch")
flags.DEFINE_float("test_after", 0, "Don't test the model unless this many epochs has been completed")
flags.DEFINE_integer("print_freq", 200, "Number of optimizations steps between printing training status to stdout")
flags.DEFINE_integer("benchmark_warmup_steps", 0,
"Number of initial iterations to exclude from throughput measurements")
# Machine setting flags
flags.DEFINE_string("base_device", "cuda", "Device to run the majority of the model operations")
flags.DEFINE_boolean("amp", False, "If True the script will use Automatic Mixed Precision")
flags.DEFINE_boolean("cuda_graphs", False, "Use CUDA Graphs")
# inference benchmark
flags.DEFINE_list("inference_benchmark_batch_sizes", default=[1, 64, 4096],
help="Batch sizes for inference throughput and latency measurements")
flags.DEFINE_integer("inference_benchmark_steps", 200,
"Number of steps for measuring inference latency and throughput")
# Miscellaneous
flags.DEFINE_float("auc_threshold", None, "Stop the training after achieving this AUC")
flags.DEFINE_boolean("optimized_mlp", True, "Use an optimized implementation of MLP from apex")
flags.DEFINE_enum("auc_device", default="GPU", enum_values=['GPU', 'CPU'],
help="Specifies where ROC AUC metric is calculated")
flags.DEFINE_string("backend", "nccl", "Backend to use for distributed training. Default nccl")
flags.DEFINE_boolean("bottom_features_ordered", False,
"Sort features from the bottom model, useful when using saved "
"checkpoint in different device configurations")
flags.DEFINE_boolean("freeze_mlps", False,
"For debug and benchmarking. Don't perform the weight update for MLPs.")
flags.DEFINE_boolean("freeze_embeddings", False,
"For debug and benchmarking. Don't perform the weight update for the embeddings.")
flags.DEFINE_boolean("Adam_embedding_optimizer", False, "Swaps embedding optimizer to Adam")
flags.DEFINE_boolean("Adam_MLP_optimizer", False, "Swaps MLP optimizer to Adam")
def validate_flags(cat_feature_count):
if FLAGS.max_table_size is not None and not FLAGS.hash_indices:
raise ValueError('Hash indices must be True when setting a max_table_size')
if FLAGS.base_device == 'cpu':
if FLAGS.embedding_type in ('joint_fused', 'joint_sparse'):
print('WARNING: CUDA joint embeddings are not supported on CPU')
FLAGS.embedding_type = 'joint'
if FLAGS.amp:
print('WARNING: Automatic mixed precision not supported on CPU')
FLAGS.amp = False
if FLAGS.optimized_mlp:
print('WARNING: Optimized MLP is not supported on CPU')
FLAGS.optimized_mlp = False
if FLAGS.embedding_type == 'custom_cuda':
if (not is_distributed()) and FLAGS.embedding_dim == 128 and cat_feature_count == 26:
FLAGS.embedding_type = 'joint_fused'
else:
FLAGS.embedding_type = 'joint_sparse'
if FLAGS.embedding_type == 'joint_fused' and FLAGS.embedding_dim != 128:
print('WARNING: Joint fused can be used only with embedding_dim=128. Changed embedding type to joint_sparse.')
FLAGS.embedding_type = 'joint_sparse'
if FLAGS.dataset is None and (FLAGS.dataset_type != 'synthetic_gpu' or
FLAGS.synthetic_dataset_use_feature_spec):
raise ValueError('Dataset argument has to specify a path to the dataset')
FLAGS.inference_benchmark_batch_sizes = [int(x) for x in FLAGS.inference_benchmark_batch_sizes]
FLAGS.top_mlp_sizes = [int(x) for x in FLAGS.top_mlp_sizes]
FLAGS.bottom_mlp_sizes = [int(x) for x in FLAGS.bottom_mlp_sizes]
# TODO check that bottom_mlp ends in embedding_dim size
def load_feature_spec(flags):
if flags.dataset_type == 'synthetic_gpu' and not flags.synthetic_dataset_use_feature_spec:
num_numerical = flags.synthetic_dataset_numerical_features
categorical_sizes = [int(s) for s in FLAGS.synthetic_dataset_table_sizes]
return FeatureSpec.get_default_feature_spec(number_of_numerical_features=num_numerical,
categorical_feature_cardinalities=categorical_sizes)
fspec_path = os.path.join(flags.dataset, flags.feature_spec)
return FeatureSpec.from_yaml(fspec_path)
class CudaGraphWrapper:
def __init__(self, model, train_step, parallelize,
zero_grad, cuda_graphs=False, warmup_steps=20):
self.cuda_graphs = cuda_graphs
self.warmup_iters = warmup_steps
self.graph = None
self.stream = None
self.static_args = None
self.model = model
self._parallelize = parallelize
self._train_step = train_step
self._zero_grad = zero_grad
self.loss = None
self.step = -1
if cuda_graphs:
self.stream = torch.cuda.Stream()
else:
# if not using graphs, parallelize the model immediately
# otherwise do this in the warmup phase under the graph stream
self.model = self._parallelize(self.model)
self.stream = torch.cuda.default_stream()
def _copy_input_data(self, *train_step_args):
if len(train_step_args) != len(self.static_args):
raise ValueError(f'Expected {len(self.static_args)} arguments to train step'
f'Got: {len(train_step_args)}')
for data, placeholder in zip(train_step_args, self.static_args):
if placeholder is None:
continue
placeholder.copy_(data)
def _cuda_graph_capture(self, *train_step_args):
self._copy_input_data(*train_step_args)
self.graph = torch.cuda.CUDAGraph()
self._zero_grad(self.model)
with torch.cuda.graph(self.graph, stream=self.stream):
self.loss = self._train_step(self.model, *self.static_args)
return self.loss
def _cuda_graph_replay(self, *train_step_args):
self._copy_input_data(*train_step_args)
self.graph.replay()
def _warmup_step(self, *train_step_args):
with torch.cuda.stream(self.stream):
if self.step == 0:
self.model = self._parallelize(self.model)
self.static_args = list(train_step_args)
else:
self._copy_input_data(*train_step_args)
self._zero_grad(self.model)
self.loss = self._train_step(self.model, *self.static_args)
return self.loss
def train_step(self, *train_step_args):
self.step += 1
if not self.cuda_graphs:
self._zero_grad(self.model)
self.loss = self._train_step(self.model, *train_step_args)
return self.loss
if self.step == 0:
self.stream.wait_stream(torch.cuda.current_stream())
if self.step < self.warmup_iters:
return self._warmup_step(*train_step_args)
if self.graph is None:
torch.cuda.synchronize()
self._cuda_graph_capture(*train_step_args)
self._cuda_graph_replay(*train_step_args)
return self.loss
def inference_benchmark(*args, cuda_graphs=False, **kwargs):
if cuda_graphs:
return inference_benchmark_graphed(*args, **kwargs)
else:
return inference_benchmark_nongraphed(*args, **kwargs)
def inference_benchmark_nongraphed(model, data_loader, num_batches=100):
model.eval()
base_device = FLAGS.base_device
latencies = []
y_true = []
y_score = []
with torch.no_grad():
for step, (numerical_features, categorical_features, click) in enumerate(data_loader):
if step > num_batches:
break
step_start_time = time()
numerical_features = numerical_features.to(base_device)
if FLAGS.amp:
numerical_features = numerical_features.half()
categorical_features = categorical_features.to(device=base_device, dtype=torch.int64)
inference_result = model(numerical_features, categorical_features).squeeze()
torch.cuda.synchronize()
step_time = time() - step_start_time
if step >= FLAGS.benchmark_warmup_steps:
latencies.append(step_time)
y_true.append(click)
y_score.append(inference_result.reshape([-1]).clone())
y_true = torch.cat(y_true)
y_score = torch.sigmoid(torch.cat(y_score)).float()
auc = utils.roc_auc_score(y_true, y_score)
print('auc: ', auc)
return latencies
def inference_benchmark_graphed(model, data_loader, num_batches=100):
model.eval()
base_device = FLAGS.base_device
latencies = []
data_iter = iter(data_loader)
numerical, categorical, _ = next(data_iter)
# Warmup before capture
s = torch.cuda.Stream()
static_numerical = numerical.to(base_device)
static_categorical = categorical.to(device=base_device, dtype=torch.int64)
s.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(s):
for i in range(10):
if FLAGS.amp:
numerical = static_numerical.half()
else:
numerical = static_numerical
inference_result = model(numerical, static_categorical).squeeze()
torch.cuda.synchronize()
# Graph capture
graph = torch.cuda.CUDAGraph()
with torch.cuda.graph(graph):
if FLAGS.amp:
numerical = static_numerical.half()
else:
numerical = static_numerical
inference_result = model(numerical, static_categorical).squeeze()
torch.cuda.synchronize()
# Inference
y_true = []
y_score = []
with torch.no_grad():
for step, (numerical_features, categorical_features, click) in enumerate(data_loader):
if step > num_batches:
break
torch.cuda.synchronize()
step_start_time = time()
numerical_features = numerical_features.to(base_device)
categorical_features = categorical_features.to(device=base_device, dtype=torch.int64)
static_categorical.copy_(categorical_features)
static_numerical.copy_(numerical_features)
graph.replay()
torch.cuda.synchronize()
step_time = time() - step_start_time
if step >= FLAGS.benchmark_warmup_steps:
latencies.append(step_time)
y_true.append(click)
y_score.append(inference_result.reshape([-1]).clone())
y_true = torch.cat(y_true)
y_score = torch.sigmoid(torch.cat(y_score)).float()
auc = utils.roc_auc_score(y_true, y_score)
print('auc: ', auc)
return latencies
def main(argv):
torch.manual_seed(FLAGS.seed)
use_gpu = "cpu" not in FLAGS.base_device.lower()
rank, world_size, gpu = dist.init_distributed_mode(backend=FLAGS.backend, use_gpu=use_gpu)
device = FLAGS.base_device
feature_spec = load_feature_spec(FLAGS)
cat_feature_count = len(get_embedding_sizes(feature_spec, None))
validate_flags(cat_feature_count)
if is_main_process():
utils.init_logging(log_path=FLAGS.log_path)
dllogger.log(data=FLAGS.flag_values_dict(), step='PARAMETER')
FLAGS.set_default("test_batch_size", FLAGS.test_batch_size // world_size * world_size)
feature_spec = load_feature_spec(FLAGS)
world_embedding_sizes = get_embedding_sizes(feature_spec, max_table_size=FLAGS.max_table_size)
world_categorical_feature_sizes = np.asarray(world_embedding_sizes)
device_mapping = get_device_mapping(world_embedding_sizes, num_gpus=world_size)
batch_sizes_per_gpu = get_gpu_batch_sizes(FLAGS.batch_size, num_gpus=world_size)
batch_indices = tuple(np.cumsum([0] + list(batch_sizes_per_gpu))) # todo what does this do
# Embedding sizes for each GPU
categorical_feature_sizes = world_categorical_feature_sizes[device_mapping['embedding'][rank]].tolist()
num_numerical_features = feature_spec.get_number_of_numerical_features()
bottom_mlp_sizes = FLAGS.bottom_mlp_sizes if rank == device_mapping['bottom_mlp'] else None
data_loader_train, data_loader_test = get_data_loaders(FLAGS, device_mapping=device_mapping,
feature_spec=feature_spec)
model = DistributedDlrm(
vectors_per_gpu=device_mapping['vectors_per_gpu'],
embedding_device_mapping=device_mapping['embedding'],
embedding_type=FLAGS.embedding_type,
embedding_dim=FLAGS.embedding_dim,
world_num_categorical_features=len(world_categorical_feature_sizes),
categorical_feature_sizes=categorical_feature_sizes,
num_numerical_features=num_numerical_features,
hash_indices=FLAGS.hash_indices,
bottom_mlp_sizes=bottom_mlp_sizes,
top_mlp_sizes=FLAGS.top_mlp_sizes,
interaction_op=FLAGS.interaction_op,
fp16=FLAGS.amp,
use_cpp_mlp=FLAGS.optimized_mlp,
bottom_features_ordered=FLAGS.bottom_features_ordered,
device=device
)
dist.setup_distributed_print(is_main_process())
# DDP introduces a gradient average through allreduce(mean), which doesn't apply to bottom model.
# Compensate it with further scaling lr
if FLAGS.Adam_embedding_optimizer:
embedding_model_parallel_lr = FLAGS.lr
else:
embedding_model_parallel_lr = FLAGS.lr / world_size
if FLAGS.Adam_MLP_optimizer:
MLP_model_parallel_lr = FLAGS.lr
else:
MLP_model_parallel_lr = FLAGS.lr / world_size
data_parallel_lr = FLAGS.lr
if is_main_process():
mlp_params = [
{'params': list(model.top_model.parameters()), 'lr': data_parallel_lr},
{'params': list(model.bottom_model.mlp.parameters()), 'lr': MLP_model_parallel_lr}
]
mlp_lrs = [data_parallel_lr, MLP_model_parallel_lr]
else:
mlp_params = [
{'params': list(model.top_model.parameters()), 'lr': data_parallel_lr}
]
mlp_lrs = [data_parallel_lr]
if FLAGS.Adam_MLP_optimizer:
mlp_optimizer = apex_optim.FusedAdam(mlp_params)
else:
mlp_optimizer = apex_optim.FusedSGD(mlp_params)
embedding_params = [{
'params': list(model.bottom_model.embeddings.parameters()),
'lr': embedding_model_parallel_lr
}]
embedding_lrs = [embedding_model_parallel_lr]
if FLAGS.Adam_embedding_optimizer:
embedding_optimizer = torch.optim.SparseAdam(embedding_params)
else:
embedding_optimizer = torch.optim.SGD(embedding_params)
checkpoint_writer = make_distributed_checkpoint_writer(
device_mapping=device_mapping,
rank=rank,
is_main_process=is_main_process(),
config=FLAGS.flag_values_dict()
)
checkpoint_loader = make_distributed_checkpoint_loader(device_mapping=device_mapping, rank=rank)
if FLAGS.load_checkpoint_path:
checkpoint_loader.load_checkpoint(model, FLAGS.load_checkpoint_path)
model.to(device)
scaler = torch.cuda.amp.GradScaler(enabled=FLAGS.amp, growth_interval=int(1e9))
def parallelize(model):
if world_size <= 1:
return model
model.top_model = torch.nn.parallel.DistributedDataParallel(model.top_model)
return model
if FLAGS.mode == 'test':
model = parallelize(model)
auc, valid_loss = dist_evaluate(model, data_loader_test)
results = {'best_auc': auc, 'best_validation_loss': valid_loss}
if is_main_process():
dllogger.log(data=results, step=tuple())
return
elif FLAGS.mode == 'inference_benchmark':
if world_size > 1:
raise ValueError('Inference benchmark only supports singleGPU mode.')
results = {}
if FLAGS.amp:
# can use pure FP16 for inference
model = model.half()
for batch_size in FLAGS.inference_benchmark_batch_sizes:
FLAGS.test_batch_size = batch_size
_, data_loader_test = get_data_loaders(FLAGS, device_mapping=device_mapping, feature_spec=feature_spec)
latencies = inference_benchmark(model=model, data_loader=data_loader_test,
num_batches=FLAGS.inference_benchmark_steps,
cuda_graphs=FLAGS.cuda_graphs)
# drop the first 10 as a warmup
latencies = latencies[10:]
mean_latency = np.mean(latencies)
mean_inference_throughput = batch_size / mean_latency
subresult = {f'mean_inference_latency_batch_{batch_size}': mean_latency,
f'mean_inference_throughput_batch_{batch_size}': mean_inference_throughput}
results.update(subresult)
if is_main_process():
dllogger.log(data=results, step=tuple())
return
if FLAGS.save_checkpoint_path and not FLAGS.bottom_features_ordered and is_main_process():
logging.warning("Saving checkpoint without --bottom_features_ordered flag will result in "
"a device-order dependent model. Consider using --bottom_features_ordered "
"if you plan to load the checkpoint in different device configurations.")
loss_fn = torch.nn.BCEWithLogitsLoss(reduction="mean")
# Print per 16384 * 2000 samples by default
default_print_freq = 16384 * 2000 // FLAGS.batch_size
print_freq = default_print_freq if FLAGS.print_freq is None else FLAGS.print_freq
# last one will be dropped in the training loop
steps_per_epoch = len(data_loader_train) - 1
test_freq = FLAGS.test_freq if FLAGS.test_freq is not None else steps_per_epoch - 2
metric_logger = utils.MetricLogger(delimiter=" ")
metric_logger.add_meter('loss', utils.SmoothedValue(window_size=1, fmt='{avg:.8f}'))
metric_logger.add_meter('step_time', utils.SmoothedValue(window_size=1, fmt='{avg:.6f}'))
metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.4f}'))
# Accumulating loss on GPU to avoid memcpyD2H every step
moving_loss = torch.zeros(1, device=device)
lr_scheduler = utils.LearningRateScheduler(optimizers=[mlp_optimizer, embedding_optimizer],
base_lrs=[mlp_lrs, embedding_lrs],
warmup_steps=FLAGS.warmup_steps,
warmup_factor=FLAGS.warmup_factor,
decay_start_step=FLAGS.decay_start_step,
decay_steps=FLAGS.decay_steps,
decay_power=FLAGS.decay_power,
end_lr_factor=FLAGS.decay_end_lr / FLAGS.lr)
def zero_grad(model):
if FLAGS.Adam_embedding_optimizer or FLAGS.Adam_MLP_optimizer:
model.zero_grad()
else:
# We don't need to accumulate gradient. Set grad to None is faster than optimizer.zero_grad()
for param_group in itertools.chain(embedding_optimizer.param_groups, mlp_optimizer.param_groups):
for param in param_group['params']:
param.grad = None
def forward_backward(model, *args):
numerical_features, categorical_features, click = args
with torch.cuda.amp.autocast(enabled=FLAGS.amp):
output = model(numerical_features, categorical_features, batch_sizes_per_gpu).squeeze()
loss = loss_fn(output, click[batch_indices[rank]: batch_indices[rank + 1]])
scaler.scale(loss).backward()
return loss
def weight_update():
if not FLAGS.freeze_mlps:
if FLAGS.Adam_MLP_optimizer:
scale_MLP_gradients(mlp_optimizer, world_size)
scaler.step(mlp_optimizer)
if not FLAGS.freeze_embeddings:
if FLAGS.Adam_embedding_optimizer:
scale_embeddings_gradients(embedding_optimizer, world_size)
scaler.unscale_(embedding_optimizer)
embedding_optimizer.step()
scaler.update()
trainer = CudaGraphWrapper(model, forward_backward, parallelize, zero_grad,
cuda_graphs=FLAGS.cuda_graphs)
data_stream = torch.cuda.Stream()
timer = utils.StepTimer()
best_validation_loss = 1e6
best_auc = 0
best_epoch = 0
start_time = time()
for epoch in range(FLAGS.epochs):
epoch_start_time = time()
batch_iter = prefetcher(iter(data_loader_train), data_stream)
for step in range(len(data_loader_train)):
numerical_features, categorical_features, click = next(batch_iter)
timer.click(synchronize=(device == 'cuda'))
global_step = steps_per_epoch * epoch + step
if FLAGS.max_steps and global_step > FLAGS.max_steps:
print(f"Reached max global steps of {FLAGS.max_steps}. Stopping.")
break
# One of the batches will be smaller because the dataset size
# isn't necessarily a multiple of the batch size. #TODO isn't dropping here a change of behavior
if click.shape[0] != FLAGS.batch_size:
continue
lr_scheduler.step()
loss = trainer.train_step(numerical_features, categorical_features, click)
# need to wait for the gradients before the weight update
torch.cuda.current_stream().wait_stream(trainer.stream)
weight_update()
moving_loss += loss
if timer.measured is None:
# first iteration, no step time etc. to print
continue
if step == 0:
print(f"Started epoch {epoch}...")
elif step % print_freq == 0:
# Averaging across a print_freq period to reduce the error.
# An accurate timing needs synchronize which would slow things down.
# only check for nan every print_freq steps
if torch.any(torch.isnan(loss)):
print('NaN loss encountered.')
break
if global_step < FLAGS.benchmark_warmup_steps:
metric_logger.update(
loss=moving_loss.item() / print_freq,
lr=mlp_optimizer.param_groups[0]["lr"])
else:
metric_logger.update(
step_time=timer.measured,
loss=moving_loss.item() / print_freq,
lr=mlp_optimizer.param_groups[0]["lr"])
eta_str = datetime.timedelta(seconds=int(metric_logger.step_time.global_avg * (steps_per_epoch - step)))
metric_logger.print(header=f"Epoch:[{epoch}/{FLAGS.epochs}] [{step}/{steps_per_epoch}] eta: {eta_str}")
moving_loss = 0.
if global_step % test_freq == 0 and global_step > 0 and global_step / steps_per_epoch >= FLAGS.test_after:
auc, validation_loss = dist_evaluate(trainer.model, data_loader_test)
if auc is None:
continue
print(f"Epoch {epoch} step {step}. auc {auc:.6f}")
stop_time = time()
if auc > best_auc:
best_auc = auc
best_epoch = epoch + ((step + 1) / steps_per_epoch)
if validation_loss < best_validation_loss:
best_validation_loss = validation_loss
if FLAGS.auc_threshold and auc >= FLAGS.auc_threshold:
run_time_s = int(stop_time - start_time)
print(f"Hit target accuracy AUC {FLAGS.auc_threshold} at epoch "
f"{global_step / steps_per_epoch:.2f} in {run_time_s}s. ")
sys.exit()
epoch_stop_time = time()
epoch_time_s = epoch_stop_time - epoch_start_time
print(f"Finished epoch {epoch} in {datetime.timedelta(seconds=int(epoch_time_s))}. ")
avg_throughput = FLAGS.batch_size / metric_logger.step_time.avg
if FLAGS.save_checkpoint_path:
checkpoint_writer.save_checkpoint(model, FLAGS.save_checkpoint_path, epoch, step)
results = {'best_auc': best_auc,
'best_validation_loss': best_validation_loss,
'training_loss' : metric_logger.meters['loss'].avg,
'best_epoch': best_epoch,
'average_train_throughput': avg_throughput}
if is_main_process():
dllogger.log(data=results, step=tuple())
def scale_MLP_gradients(mlp_optimizer: torch.optim.Optimizer, world_size: int):
for param_group in mlp_optimizer.param_groups[1:]: # Omitting top MLP
for param in param_group['params']:
param.grad.div_(world_size)
def scale_embeddings_gradients(embedding_optimizer: torch.optim.Optimizer, world_size: int):
for param_group in embedding_optimizer.param_groups:
for param in param_group['params']:
if param.grad != None:
param.grad.div_(world_size)
def dist_evaluate(model, data_loader):
"""Test distributed DLRM model
Args:
model (DistDLRM):
data_loader (torch.utils.data.DataLoader):
"""
model.eval()
device = FLAGS.base_device
world_size = dist.get_world_size()
batch_sizes_per_gpu = [FLAGS.test_batch_size // world_size for _ in range(world_size)]
test_batch_size = sum(batch_sizes_per_gpu)
if FLAGS.test_batch_size != test_batch_size:
print(f"Rounded test_batch_size to {test_batch_size}")
# Test bach size could be big, make sure it prints
default_print_freq = max(524288 * 100 // test_batch_size, 1)
print_freq = default_print_freq if FLAGS.print_freq is None else FLAGS.print_freq
steps_per_epoch = len(data_loader)
metric_logger = utils.MetricLogger(delimiter=" ")
metric_logger.add_meter('step_time', utils.SmoothedValue(window_size=1, fmt='{avg:.4f}'))
with torch.no_grad():
timer = utils.StepTimer()
# ROC can be computed per batch and then compute AUC globally, but I don't have the code.
# So pack all the outputs and labels together to compute AUC. y_true and y_score naming follows sklearn
y_true = []
y_score = []
data_stream = torch.cuda.Stream()
batch_iter = prefetcher(iter(data_loader), data_stream)
loss_fn = torch.nn.BCELoss(reduction="mean")
timer.click(synchronize=(device=='cuda'))
for step in range(len(data_loader)):
numerical_features, categorical_features, click = next(batch_iter)
torch.cuda.synchronize()
last_batch_size = None
if click.shape[0] != test_batch_size: # last batch
last_batch_size = click.shape[0]
padding_size = test_batch_size - last_batch_size
if numerical_features is not None:
padding_numerical = torch.empty(
padding_size, numerical_features.shape[1],
device=numerical_features.device, dtype=numerical_features.dtype)
numerical_features = torch.cat((numerical_features, padding_numerical), dim=0)
if categorical_features is not None:
padding_categorical = torch.ones(
padding_size, categorical_features.shape[1],
device=categorical_features.device, dtype=categorical_features.dtype)
categorical_features = torch.cat((categorical_features, padding_categorical), dim=0)
with torch.cuda.amp.autocast(enabled=FLAGS.amp):
output = model(numerical_features, categorical_features, batch_sizes_per_gpu)
output = output.squeeze()
output = output.float()
if world_size > 1:
output_receive_buffer = torch.empty(test_batch_size, device=device)
torch.distributed.all_gather(list(output_receive_buffer.split(batch_sizes_per_gpu)), output)
output = output_receive_buffer
if last_batch_size is not None:
output = output[:last_batch_size]
if FLAGS.auc_device == "CPU":
click = click.cpu()
output = output.cpu()
y_true.append(click)
y_score.append(output)
timer.click(synchronize=(device == 'cuda'))
if timer.measured is not None:
metric_logger.update(step_time=timer.measured)
if step % print_freq == 0 and step > 0:
metric_logger.print(header=f"Test: [{step}/{steps_per_epoch}]")
if is_main_process():
y_true = torch.cat(y_true)
y_score = torch.sigmoid(torch.cat(y_score)).float()
auc = utils.roc_auc_score(y_true, y_score)
loss = loss_fn(y_score, y_true).item()
print(f'test loss: {loss:.8f}', )
else:
auc = None
loss = None
if world_size > 1:
torch.distributed.barrier()
model.train()
return auc, loss
if __name__ == '__main__':
app.run(main)
|
TensorFlow2/LanguageModeling/BERT/data | data | WikiDownloader | # Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import bz2
import os
import urllib.request
import sys
import subprocess
class WikiDownloader:
def __init__(self, language, save_path):
self.save_path = save_path + '/wikicorpus_' + language
if not os.path.exists(self.save_path):
os.makedirs(self.save_path)
self.language = language
self.download_urls = {
'en' : 'https://dumps.wikimedia.your.org/enwiki/latest/enwiki-latest-pages-articles.xml.bz2',
'zh' : 'https://dumps.wikimedia.your.org/zhwiki/latest/zhwiki-latest-pages-articles.xml.bz2' }
self.output_files = {
'en' : 'wikicorpus_en.xml.bz2',
'zh' : 'wikicorpus_zh.xml.bz2'
}
def download(self):
if self.language in self.download_urls:
url = self.download_urls[self.language]
filename = self.output_files[self.language]
print('Downloading:', url)
if os.path.isfile(self.save_path + '/' + filename):
print('** Download file already exists, skipping download')
else:
cmd = ['wget', url, '--output-document={}'.format(self.save_path + '/' + filename), '--no-check-certificate']
print('Running:', cmd)
status = subprocess.run(cmd)
if status.returncode != 0:
raise RuntimeError('Wiki download not successful')
# Always unzipping since this is relatively fast and will overwrite
print('Unzipping:', self.output_files[self.language])
subprocess.run('bzip2 -dk ' + self.save_path + '/' + filename, shell=True, check=True)
else:
assert False, 'WikiDownloader not implemented for this language yet.'
|
PyTorch/SpeechSynthesis/FastPitch/common/text | text | datestime | import re
_ampm_re = re.compile(
r'([0-9]|0[0-9]|1[0-9]|2[0-3]):?([0-5][0-9])?\s*([AaPp][Mm]\b)')
def _expand_ampm(m):
matches = list(m.groups(0))
txt = matches[0]
txt = txt if int(matches[1]) == 0 else txt + ' ' + matches[1]
if matches[2][0].lower() == 'a':
txt += ' a.m.'
elif matches[2][0].lower() == 'p':
txt += ' p.m.'
return txt
def normalize_datestime(text):
text = re.sub(_ampm_re, _expand_ampm, text)
#text = re.sub(r"([0-9]|0[0-9]|1[0-9]|2[0-3]):([0-5][0-9])?", r"\1 \2", text)
return text
|
PyTorch/LanguageModeling/BERT/triton/runner | runner | experiment | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import dataclasses
import pathlib
from datetime import datetime
from typing import Any, Dict, Optional
# method from PEP-366 to support relative import in executed modules
if __name__ == "__main__" and __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from .core import DataObject
class ExperimentStatus(object):
"""
Experiment status flags object
"""
SUCCEED = "Succeed"
FAILED = "Failed"
class StageStatus:
"""
Stages status flags object
"""
SUCCEED = "Succeed"
FAILED = "Failed"
class Stage(DataObject):
"""
Stage data object
"""
name: str
status: str
started_at: Optional[int]
ended_at: Optional[int]
result_path: Optional[str]
result_type: Optional[str]
def __init__(
self,
name: str,
result_path: Optional[str],
result_type: Optional[str],
status: str = StageStatus.FAILED,
started_at: Optional[int] = None,
ended_at: Optional[int] = None,
):
"""
Args:
name: name of stage
result_path: path where results file is stored
result_type: type of results
status: success/fail status
started_at: time when stage has started
ended_at: time when stage has ended
"""
self.name = name
self.status = status
self.started_at = started_at
self.ended_at = ended_at
self.result_path = result_path
self.result_type = result_type
def start(self) -> None:
"""
Update stage execution info at start
Returns:
None
"""
self.started_at = int(datetime.utcnow().timestamp())
def end(self) -> None:
"""
Update stage execution info at end
Returns:
None
"""
self.status = StageStatus.SUCCEED
self.ended_at = int(datetime.utcnow().timestamp())
class Experiment(DataObject):
"""
Experiment data object
"""
experiment_id: int
parameters: Dict
stages: Dict[str, Stage]
results: Dict[str, str]
status: str
started_at: Optional[int]
ended_at: Optional[int]
def __init__(
self,
experiment_id: int,
parameters: Dict,
stages: Dict[str, Stage],
results: Dict[str, str],
started_at: Optional[int] = None,
ended_at: Optional[int] = None,
status: str = ExperimentStatus.FAILED,
):
"""
Args:
experiment_id: experiment identifier
parameters: dictionary with experiment configuration
stages: dictionary with stages run in experiment
results: mapping between results types and location where are stored
started_at: time when experiment has started
ended_at: time when experiment has ended
status: experiment success/fail information
"""
self.experiment_id = experiment_id
self.started_at = started_at
self.ended_at = ended_at
self.parameters = parameters
self.stages = stages
self.status = status
self.results = results
self.results_dir = f"experiment_{experiment_id}"
def start(self) -> None:
"""
Update experiment execution info at start
Returns:
None
"""
self.started_at = int(datetime.utcnow().timestamp())
def end(self) -> None:
"""
Update experiment execution info at end
Returns:
None
"""
self.status = ExperimentStatus.SUCCEED
self.ended_at = int(datetime.utcnow().timestamp())
@dataclasses.dataclass
class Status:
state: ExperimentStatus
message: str
@dataclasses.dataclass
class ExperimentResult:
"""
Experiment result object
"""
status: Status
experiment: Experiment
results: Dict[str, pathlib.Path]
payload: Dict[str, Any] = dataclasses.field(default_factory=dict)
|
PyTorch/SpeechSynthesis/Tacotron2/tensorrt | tensorrt | convert_waveglow2onnx | # *****************************************************************************
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
import torch
import argparse
import os
import sys
sys.path.append('./')
from tacotron2_common.utils import ParseFromConfigFile
from inference import load_and_setup_model
def parse_args(parser):
"""
Parse commandline arguments.
"""
parser.add_argument('--waveglow', type=str, required=True,
help='full path to the WaveGlow model checkpoint file')
parser.add_argument('-o', '--output', type=str, required=True,
help='Directory for the exported WaveGlow ONNX model')
parser.add_argument('--fp16', action='store_true',
help='inference with AMP')
parser.add_argument('-s', '--sigma-infer', default=0.6, type=float)
parser.add_argument('--config-file', action=ParseFromConfigFile,
type=str, help='Path to configuration file')
return parser
def export_onnx(parser, args):
waveglow = load_and_setup_model('WaveGlow', parser, args.waveglow,
fp16_run=args.fp16, cpu_run=False,
forward_is_infer=False)
# 80 mel channels, 620 mel spectrograms ~ 7 seconds of speech
mel = torch.randn(1, 80, 620).cuda()
stride = 256 # value from waveglow upsample
n_group = 8
z_size2 = (mel.size(2)*stride)//n_group
z = torch.randn(1, n_group, z_size2).cuda()
if args.fp16:
mel = mel.half()
z = z.half()
with torch.no_grad():
# run inference to force calculation of inverses
waveglow.infer(mel, sigma=args.sigma_infer)
# export to ONNX
if args.fp16:
waveglow = waveglow.half()
waveglow.forward = waveglow.infer_onnx
opset_version = 12
output_path = os.path.join(args.output, "waveglow.onnx")
torch.onnx.export(waveglow, (mel, z), output_path,
opset_version=opset_version,
do_constant_folding=True,
input_names=["mel", "z"],
output_names=["audio"],
dynamic_axes={"mel": {0: "batch_size", 2: "mel_seq"},
"z": {0: "batch_size", 2: "z_seq"},
"audio": {0: "batch_size", 1: "audio_seq"}})
def main():
parser = argparse.ArgumentParser(
description='PyTorch Tacotron 2 Inference')
parser = parse_args(parser)
args, _ = parser.parse_known_args()
export_onnx(parser, args)
if __name__ == '__main__':
main()
|
TensorFlow/Detection/SSD/models/research/slim/scripts | scripts | train_cifarnet_on_cifar10 | #!/bin/bash
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
#
# This script performs the following operations:
# 1. Downloads the Cifar10 dataset
# 2. Trains a CifarNet model on the Cifar10 training set.
# 3. Evaluates the model on the Cifar10 testing set.
#
# Usage:
# cd slim
# ./scripts/train_cifarnet_on_cifar10.sh
set -e
# Where the checkpoint and logs will be saved to.
TRAIN_DIR=/tmp/cifarnet-model
# Where the dataset is saved to.
DATASET_DIR=/tmp/cifar10
# Download the dataset
python download_and_convert_data.py \
--dataset_name=cifar10 \
--dataset_dir=${DATASET_DIR}
# Run training.
python train_image_classifier.py \
--train_dir=${TRAIN_DIR} \
--dataset_name=cifar10 \
--dataset_split_name=train \
--dataset_dir=${DATASET_DIR} \
--model_name=cifarnet \
--preprocessing_name=cifarnet \
--max_number_of_steps=100000 \
--batch_size=128 \
--save_interval_secs=120 \
--save_summaries_secs=120 \
--log_every_n_steps=100 \
--optimizer=sgd \
--learning_rate=0.1 \
--learning_rate_decay_factor=0.1 \
--num_epochs_per_decay=200 \
--weight_decay=0.004
# Run evaluation.
python eval_image_classifier.py \
--checkpoint_path=${TRAIN_DIR} \
--eval_dir=${TRAIN_DIR} \
--dataset_name=cifar10 \
--dataset_split_name=test \
--dataset_dir=${DATASET_DIR} \
--model_name=cifarnet
|
PyTorch/LanguageModeling/BART/bart/tokenization | tokenization | tokenization_utils | # coding=utf-8
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Tokenization classes for python tokenizers.
For fast tokenizers (provided by HuggingFace's tokenizers library) see tokenization_utils_fast.py
"""
import itertools
import logging
import re
import unicodedata
from typing import Any, Dict, List, Optional, Tuple, Union, overload
from utils.file_utils import add_end_docstrings
from bart.tokenization.tokenization_utils_base import (
ENCODE_KWARGS_DOCSTRING,
ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING,
INIT_TOKENIZER_DOCSTRING,
AddedToken,
BatchEncoding,
EncodedInput,
EncodedInputPair,
PaddingStrategy,
PreTokenizedInput,
PreTokenizedInputPair,
PreTrainedTokenizerBase,
TensorType,
TextInput,
TextInputPair,
TruncationStrategy,
)
logger = logging.getLogger(__name__)
def _is_whitespace(char):
"""Checks whether `char` is a whitespace character."""
# \t, \n, and \r are technically contorl characters but we treat them
# as whitespace since they are generally considered as such.
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
cat = unicodedata.category(char)
if cat == "Zs":
return True
return False
def _is_control(char):
"""Checks whether `char` is a control character."""
# These are technically control characters but we count them as whitespace
# characters.
if char == "\t" or char == "\n" or char == "\r":
return False
cat = unicodedata.category(char)
if cat.startswith("C"):
return True
return False
def _is_punctuation(char):
"""Checks whether `char` is a punctuation character."""
cp = ord(char)
# We treat all non-letter/number ASCII as punctuation.
# Characters such as "^", "$", and "`" are not in the Unicode
# Punctuation class but we treat them as punctuation anyways, for
# consistency.
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):
return True
cat = unicodedata.category(char)
if cat.startswith("P"):
return True
return False
def _is_end_of_word(text):
"""Checks whether the last character in text is one of a punctuation, control or whitespace character."""
last_char = text[-1]
return bool(_is_control(last_char) | _is_punctuation(last_char) | _is_whitespace(last_char))
def _is_start_of_word(text):
"""Checks whether the first character in text is one of a punctuation, control or whitespace character."""
first_char = text[0]
return bool(_is_control(first_char) | _is_punctuation(first_char) | _is_whitespace(first_char))
@add_end_docstrings(INIT_TOKENIZER_DOCSTRING, """ .. automethod:: __call__""")
class PreTrainedTokenizer(PreTrainedTokenizerBase):
"""
Base class for all slow tokenizers.
Inherits from :class:`~transformers.tokenization_utils_base.PreTrainedTokenizerBase`.
Handle all the shared methods for tokenization and special tokens as well as methods
downloading/caching/loading pretrained tokenizers as well as adding tokens to the vocabulary.
This class also contain the added tokens in a unified way on top of all tokenizers so we don't
have to handle the specific vocabulary augmentation methods of the various underlying
dictionary structures (BPE, sentencepiece...).
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
# Added tokens - We store this for both slow and fast tokenizers
# until the serialization of Fast tokenizers is updated
self.added_tokens_encoder: Dict[str, int] = {}
self.added_tokens_decoder: Dict[int, str] = {}
self.unique_no_split_tokens: List[str] = []
@property
def is_fast(self) -> bool:
return False
@property
def vocab_size(self) -> int:
"""
:obj:`int`: Size of the base vocabulary (without the added tokens).
"""
raise NotImplementedError
def get_vocab(self) -> Dict[str, int]:
"""
Returns the vocabulary as a dictionary of token to index.
:obj:`tokenizer.get_vocab()[token]` is equivalent to :obj:`tokenizer.convert_tokens_to_ids(token)` when
:obj:`token` is in the vocab.
Returns:
:obj:`Dict[str, int]`: The vocabulary.
"""
raise NotImplementedError()
def get_added_vocab(self) -> Dict[str, int]:
"""
Returns the added tokens in the vocabulary as a dictionary of token to index.
Returns:
:obj:`Dict[str, int]`: The added tokens.
"""
return self.added_tokens_encoder
def __len__(self):
"""
Size of the full vocabulary with the added tokens.
"""
return self.vocab_size + len(self.added_tokens_encoder)
def _add_tokens(self, new_tokens: Union[List[str], List[AddedToken]], special_tokens: bool = False) -> int:
"""
Add a list of new tokens to the tokenizer class. If the new tokens are not in the
vocabulary, they are added to it with indices starting from length of the current vocabulary.
Args:
new_tokens (:obj:`List[str]`or :obj:`List[tokenizers.AddedToken]`):
Token(s) to add in vocabulary. A token is only added if it's not already in the vocabulary (tested by
checking if the tokenizer assign the index of the ``unk_token`` to them).
special_tokens (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not the tokens should be added as special tokens.
Returns:
:obj:`int`: The number of tokens actually added to the vocabulary.
Examples::
# Let's see how to increase the vocabulary of Bert model and tokenizer
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = BertModel.from_pretrained('bert-base-uncased')
num_added_toks = tokenizer.add_tokens(['new_tok1', 'my_new-tok2'])
print('We have added', num_added_toks, 'tokens')
# Notice: resize_token_embeddings expect to receive the full size of the new vocabulary, i.e. the length of the tokenizer.
model.resize_token_embeddings(len(tokenizer))
"""
new_tokens = [str(tok) for tok in new_tokens]
tokens_to_add = []
for token in new_tokens:
assert isinstance(token, str)
if not special_tokens and self.init_kwargs.get("do_lower_case", False):
token = token.lower()
if (
token != self.unk_token
and self.convert_tokens_to_ids(token) == self.convert_tokens_to_ids(self.unk_token)
and token not in tokens_to_add
):
tokens_to_add.append(token)
if self.verbose:
logger.info("Adding %s to the vocabulary", token)
added_tok_encoder = dict((tok, len(self) + i) for i, tok in enumerate(tokens_to_add))
added_tok_decoder = {v: k for k, v in added_tok_encoder.items()}
self.added_tokens_encoder.update(added_tok_encoder)
self.added_tokens_decoder.update(added_tok_decoder)
# Make sure we don't split on any special tokens (even they were already in the vocab before e.g. for Albert)
if special_tokens:
self.unique_no_split_tokens = sorted(set(self.unique_no_split_tokens).union(set(new_tokens)))
else:
# Or on the newly added tokens
self.unique_no_split_tokens = sorted(set(self.unique_no_split_tokens).union(set(tokens_to_add)))
return len(tokens_to_add)
def num_special_tokens_to_add(self, pair: bool = False) -> int:
"""
Returns the number of added tokens when encoding a sequence with special tokens.
.. note::
This encodes a dummy input and checks the number of added tokens, and is therefore not efficient. Do not
put this inside your training loop.
Args:
pair (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether the number of added tokens should be computed in the case of a sequence pair or a single
sequence.
Returns:
:obj:`int`: Number of special tokens added to sequences.
"""
token_ids_0 = []
token_ids_1 = []
return len(self.build_inputs_with_special_tokens(token_ids_0, token_ids_1 if pair else None))
def tokenize(self, text: TextInput, **kwargs) -> List[str]:
"""
Converts a string in a sequence of tokens, using the tokenizer.
Split in words for word-based vocabulary or sub-words for sub-word-based vocabularies (BPE/SentencePieces/WordPieces).
Takes care of added tokens.
Args:
text (:obj:`str`):
The sequence to be encoded.
**kwargs (additional keyword arguments):
Passed along to the model-specific ``prepare_for_tokenization`` preprocessing method.
Returns:
:obj:`List[str]`: The list of tokens.
"""
# Simple mapping string => AddedToken for special tokens with specific tokenization behaviors
all_special_tokens_extended = dict(
(str(t), t) for t in self.all_special_tokens_extended if isinstance(t, AddedToken)
)
text, kwargs = self.prepare_for_tokenization(text, **kwargs)
if kwargs:
logger.warning(f"Keyword arguments {kwargs} not recognized.")
# TODO: should this be in the base class?
if self.init_kwargs.get("do_lower_case", False):
# convert non-special tokens to lowercase
escaped_special_toks = [re.escape(s_tok) for s_tok in self.all_special_tokens]
pattern = r"(" + r"|".join(escaped_special_toks) + r")|" + r"(.+?)"
text = re.sub(pattern, lambda m: m.groups()[0] or m.groups()[1].lower(), text)
def split_on_token(tok, text):
result = []
tok_extended = all_special_tokens_extended.get(tok, None)
split_text = text.split(tok)
full_word = ""
for i, sub_text in enumerate(split_text):
# AddedToken can control whitespace stripping around them.
# We use them for GPT2 and Roberta to have different behavior depending on the special token
# Cf. https://github.com/huggingface/transformers/pull/2778
# and https://github.com/huggingface/transformers/issues/3788
if isinstance(tok_extended, AddedToken):
if tok_extended.single_word:
# Try to avoid splitting on token
if (
i < len(split_text) - 1
and not _is_end_of_word(sub_text)
and not _is_start_of_word(split_text[i + 1])
):
# Don't extract the special token
full_word += sub_text + tok
elif full_word:
full_word += sub_text
result += [full_word]
full_word = ""
continue
# Strip white spaces on the right
if tok_extended.rstrip and i > 0:
# A bit counter-intuitive but we strip the left of the string
# since tok_extended.rstrip means the special token is eating all white spaces on its right
sub_text = sub_text.lstrip()
# Strip white spaces on the left
if tok_extended.lstrip and i < len(split_text) - 1:
sub_text = sub_text.rstrip() # Opposite here
else:
# We strip left and right by default
if i < len(split_text) - 1:
sub_text = sub_text.rstrip()
if i > 0:
sub_text = sub_text.lstrip()
if i == 0 and not sub_text:
result += [tok]
elif i == len(split_text) - 1:
if sub_text:
result += [sub_text]
else:
if sub_text:
result += [sub_text]
result += [tok]
return result
def split_on_tokens(tok_list, text):
if not text.strip():
return []
if not tok_list:
return self._tokenize(text)
tokenized_text = []
text_list = [text]
for tok in tok_list:
tokenized_text = []
for sub_text in text_list:
if sub_text not in self.unique_no_split_tokens:
tokenized_text += split_on_token(tok, sub_text)
else:
tokenized_text += [sub_text]
text_list = tokenized_text
return list(
itertools.chain.from_iterable(
(
self._tokenize(token) if token not in self.unique_no_split_tokens else [token]
for token in tokenized_text
)
)
)
no_split_token = self.unique_no_split_tokens
tokenized_text = split_on_tokens(no_split_token, text)
return tokenized_text
def _tokenize(self, text, **kwargs):
"""
Converts a string in a sequence of tokens (string), using the tokenizer.
Split in words for word-based vocabulary or sub-words for sub-word-based vocabularies
(BPE/SentencePieces/WordPieces).
Do NOT take care of added tokens.
"""
raise NotImplementedError
def convert_tokens_to_ids(self, tokens: Union[str, List[str]]) -> Union[int, List[int]]:
"""
Converts a token string (or a sequence of tokens) in a single integer id (or a sequence of ids), using the
vocabulary.
Args:
token (:obj:`str` or :obj:`List[str]`): One or several token(s) to convert to token id(s).
Returns:
:obj:`int` or :obj:`List[int]`: The token id or list of token ids.
"""
if tokens is None:
return None
if isinstance(tokens, str):
return self._convert_token_to_id_with_added_voc(tokens)
ids = []
for token in tokens:
ids.append(self._convert_token_to_id_with_added_voc(token))
return ids
def _convert_token_to_id_with_added_voc(self, token):
if token is None:
return None
if token in self.added_tokens_encoder:
return self.added_tokens_encoder[token]
return self._convert_token_to_id(token)
def _convert_token_to_id(self, token):
raise NotImplementedError
def _encode_plus(
self,
text: Union[TextInput, PreTokenizedInput, EncodedInput],
text_pair: Optional[Union[TextInput, PreTokenizedInput, EncodedInput]] = None,
add_special_tokens: bool = True,
padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
max_length: Optional[int] = None,
stride: int = 0,
is_pretokenized: bool = False,
pad_to_multiple_of: Optional[int] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
return_length: bool = False,
verbose: bool = True,
**kwargs
) -> BatchEncoding:
def get_input_ids(text):
if isinstance(text, str):
tokens = self.tokenize(text, **kwargs)
return self.convert_tokens_to_ids(tokens)
elif isinstance(text, (list, tuple)) and len(text) > 0 and isinstance(text[0], str):
if is_pretokenized:
tokens = list(itertools.chain(*(self.tokenize(t, is_pretokenized=True, **kwargs) for t in text)))
return self.convert_tokens_to_ids(tokens)
else:
return self.convert_tokens_to_ids(text)
elif isinstance(text, (list, tuple)) and len(text) > 0 and isinstance(text[0], int):
return text
else:
if is_pretokenized:
raise ValueError(
f"Input {text} is not valid. Should be a string or a list/tuple of strings when `is_pretokenized=True`."
)
else:
raise ValueError(
f"Input {text} is not valid. Should be a string, a list/tuple of strings or a list/tuple of integers."
)
if return_offsets_mapping:
raise NotImplementedError(
"return_offset_mapping is not available when using Python tokenizers."
"To use this feature, change your tokenizer to one deriving from "
"transformers.PreTrainedTokenizerFast."
"More information on available tokenizers at "
"https://github.com/huggingface/transformers/pull/2674"
)
first_ids = get_input_ids(text)
second_ids = get_input_ids(text_pair) if text_pair is not None else None
return self.prepare_for_model(
first_ids,
pair_ids=second_ids,
add_special_tokens=add_special_tokens,
padding=padding_strategy.value,
truncation=truncation_strategy.value,
max_length=max_length,
stride=stride,
pad_to_multiple_of=pad_to_multiple_of,
return_tensors=return_tensors,
prepend_batch_axis=True,
return_attention_mask=return_attention_mask,
return_token_type_ids=return_token_type_ids,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_length=return_length,
verbose=verbose,
)
def _batch_encode_plus(
self,
batch_text_or_text_pairs: Union[
List[TextInput],
List[TextInputPair],
List[PreTokenizedInput],
List[PreTokenizedInputPair],
List[EncodedInput],
List[EncodedInputPair],
],
add_special_tokens: bool = True,
padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
max_length: Optional[int] = None,
stride: int = 0,
is_pretokenized: bool = False,
pad_to_multiple_of: Optional[int] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
return_length: bool = False,
verbose: bool = True,
**kwargs
) -> BatchEncoding:
def get_input_ids(text):
if isinstance(text, str):
tokens = self.tokenize(text, **kwargs)
return self.convert_tokens_to_ids(tokens)
elif isinstance(text, (list, tuple)) and len(text) > 0 and isinstance(text[0], str):
if is_pretokenized:
tokens = list(itertools.chain(*(self.tokenize(t, is_pretokenized=True, **kwargs) for t in text)))
return self.convert_tokens_to_ids(tokens)
else:
return self.convert_tokens_to_ids(text)
elif isinstance(text, (list, tuple)) and len(text) > 0 and isinstance(text[0], int):
return text
else:
raise ValueError(
"Input is not valid. Should be a string, a list/tuple of strings or a list/tuple of integers."
)
if return_offsets_mapping:
raise NotImplementedError(
"return_offset_mapping is not available when using Python tokenizers."
"To use this feature, change your tokenizer to one deriving from "
"transformers.PreTrainedTokenizerFast."
)
input_ids = []
for ids_or_pair_ids in batch_text_or_text_pairs:
if not isinstance(ids_or_pair_ids, (list, tuple)):
ids, pair_ids = ids_or_pair_ids, None
elif is_pretokenized and not isinstance(ids_or_pair_ids[0], (list, tuple)):
ids, pair_ids = ids_or_pair_ids, None
else:
ids, pair_ids = ids_or_pair_ids
first_ids = get_input_ids(ids)
second_ids = get_input_ids(pair_ids) if pair_ids is not None else None
input_ids.append((first_ids, second_ids))
batch_outputs = self._batch_prepare_for_model(
input_ids,
add_special_tokens=add_special_tokens,
padding_strategy=padding_strategy,
truncation_strategy=truncation_strategy,
max_length=max_length,
stride=stride,
pad_to_multiple_of=pad_to_multiple_of,
return_attention_mask=return_attention_mask,
return_token_type_ids=return_token_type_ids,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_length=return_length,
return_tensors=return_tensors,
verbose=verbose,
)
return BatchEncoding(batch_outputs)
@add_end_docstrings(ENCODE_KWARGS_DOCSTRING, ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
def _batch_prepare_for_model(
self,
batch_ids_pairs: List[Union[PreTokenizedInputPair, Tuple[List[int], None]]],
add_special_tokens: bool = True,
padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
max_length: Optional[int] = None,
stride: int = 0,
pad_to_multiple_of: Optional[int] = None,
return_tensors: Optional[str] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_length: bool = False,
verbose: bool = True,
) -> BatchEncoding:
"""
Prepares a sequence of input id, or a pair of sequences of inputs ids so that it can be used by the model.
It adds special tokens, truncates sequences if overflowing while taking into account the special tokens and
manages a moving window (with user defined stride) for overflowing tokens
Args:
batch_ids_pairs: list of tokenized input ids or input ids pairs
"""
batch_outputs = {}
for first_ids, second_ids in batch_ids_pairs:
outputs = self.prepare_for_model(
first_ids,
second_ids,
add_special_tokens=add_special_tokens,
padding=PaddingStrategy.DO_NOT_PAD.value, # we pad in batch afterward
truncation=truncation_strategy.value,
max_length=max_length,
stride=stride,
pad_to_multiple_of=None, # we pad in batch afterward
return_attention_mask=False, # we pad in batch afterward
return_token_type_ids=return_token_type_ids,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_length=return_length,
return_tensors=None, # We convert the whole batch to tensors at the end
prepend_batch_axis=False,
verbose=verbose,
)
for key, value in outputs.items():
if key not in batch_outputs:
batch_outputs[key] = []
batch_outputs[key].append(value)
batch_outputs = self.pad(
batch_outputs,
padding=padding_strategy.value,
max_length=max_length,
pad_to_multiple_of=pad_to_multiple_of,
return_attention_mask=return_attention_mask,
)
batch_outputs = BatchEncoding(batch_outputs, tensor_type=return_tensors)
return batch_outputs
def prepare_for_tokenization(
self, text: str, is_pretokenized: bool = False, **kwargs
) -> Tuple[str, Dict[str, Any]]:
"""
Performs any necessary transformations before tokenization.
This method should pop the arguments from kwargs and return the remaining :obj:`kwargs` as well.
We test the :obj:`kwargs` at the end of the encoding process to be sure all the arguments have been used.
Args:
test (:obj:`str`):
The text to prepare.
is_pretokenized (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not the text has been pretokenized.
kwargs:
Keyword arguments to use for the tokenization.
Returns:
:obj:`Tuple[str, Dict[str, Any]]`: The prepared text and the unused kwargs.
"""
return (text, kwargs)
def get_special_tokens_mask(
self, token_ids_0: List, token_ids_1: Optional[List] = None, already_has_special_tokens: bool = False
) -> List[int]:
"""
Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer ``prepare_for_model`` or ``encode_plus`` methods.
Args:
token_ids_0 (:obj:`List[int]`):
List of ids of the first sequence.
token_ids_1 (:obj:`List[int]`, `optional`):
List of ids of the second sequence.
already_has_special_tokens (:obj:`bool`, `optional`, defaults to :obj:`False`):
Wheter or not the token list is already formated with special tokens for the model.
Returns:
A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
"""
return [0] * ((len(token_ids_1) if token_ids_1 else 0) + len(token_ids_0))
@overload
def convert_ids_to_tokens(self, ids: int, skip_special_tokens: bool = False) -> str:
...
@overload
def convert_ids_to_tokens(self, ids: List[int], skip_special_tokens: bool = False) -> List[str]:
...
def convert_ids_to_tokens(
self, ids: Union[int, List[int]], skip_special_tokens: bool = False
) -> Union[str, List[str]]:
"""
Converts a single index or a sequence of indices in a token or a sequence of tokens, using the vocabulary
and added tokens.
Args:
ids (:obj:`int` or :obj:`List[int]`):
The token id (or token ids) to convert to tokens.
skip_special_tokens (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to remove special tokens in the decoding.
Returns:
:obj:`str` or :obj:`List[str]`: The decoded token(s).
"""
if isinstance(ids, int):
if ids in self.added_tokens_decoder:
return self.added_tokens_decoder[ids]
else:
return self._convert_id_to_token(ids)
tokens = []
for index in ids:
index = int(index)
if skip_special_tokens and index in self.all_special_ids:
continue
if index in self.added_tokens_decoder:
tokens.append(self.added_tokens_decoder[index])
else:
tokens.append(self._convert_id_to_token(index))
return tokens
def _convert_id_to_token(self, index: int) -> str:
raise NotImplementedError
def convert_tokens_to_string(self, tokens: List[str]) -> str:
"""
Converts a sequence of token ids in a single string.
The most simple way to do it is ``" ".join(tokens)`` but we often want to remove
sub-word tokenization artifacts at the same time.
Args:
tokens (:obj:`List[str]`): The token to join in a string.
Return: The joined tokens.
"""
return " ".join(tokens)
def decode(
self, token_ids: List[int], skip_special_tokens: bool = False, clean_up_tokenization_spaces: bool = True
) -> str:
"""
Converts a sequence of ids in a string, using the tokenizer and vocabulary
with options to remove special tokens and clean up tokenization spaces.
Similar to doing ``self.convert_tokens_to_string(self.convert_ids_to_tokens(token_ids))``.
Args:
token_ids (:obj:`List[int]`):
List of tokenized input ids. Can be obtained using the ``__call__`` method.
skip_special_tokens (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to remove special tokens in the decoding.
clean_up_tokenization_spaces (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether or not to clean up the tokenization spaces.
Returns:
:obj:`str`: The decoded sentence.
"""
filtered_tokens = self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens)
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separatly for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
sub_texts = []
current_sub_text = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(current_sub_text))
current_sub_text = []
sub_texts.append(token)
else:
current_sub_text.append(token)
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(current_sub_text))
text = " ".join(sub_texts)
if clean_up_tokenization_spaces:
clean_text = self.clean_up_tokenization(text)
return clean_text
else:
return text
def save_vocabulary(self, save_directory) -> Tuple[str]:
"""
Save the tokenizer vocabulary to a directory. This method does *NOT* save added tokens
and special token mappings.
.. warning::
Please use :meth:`~transformers.PreTrainedTokenizer.save_pretrained` to save the full tokenizer state if
you want to reload it using the :meth:`~transformers.PreTrainedTokenizer.from_pretrained` class method.
Args:
save_directory (:obj:`str`): The path to adirectory where the tokenizer will be saved.
Returns:
A tuple of :obj:`str`: The files saved.
"""
raise NotImplementedError
def prepare_seq2seq_batch(
self,
src_texts: List[str],
tgt_texts: Optional[List[str]] = None,
max_length: Optional[int] = None,
max_target_length: Optional[int] = None,
padding: str = "longest",
return_tensors: str = "None",
truncation=True,
**kwargs,
) -> BatchEncoding:
r"""
Prepare a batch that can be passed directly to an instance of :class:`~transformers.AutoModelForSeq2SeqLM`.
Args:
src_texts: (:obj:`List[str]`):
List of documents to summarize or source language texts.
tgt_texts: (:obj:`List[str]`, `optional`):
List of summaries or target language texts.
max_length (:obj:`int`, `optional`):
Controls the maximum length for encoder inputs (documents to summarize or source language texts).
If left unset or set to :obj:`None`, this will use the predefined model maximum length if a maximum
length is required by one of the truncation/padding parameters. If the model has no specific maximum
input length (like XLNet) truncation/padding to a maximum length will be deactivated.
max_target_length (:obj:`int`, `optional`):
Controls the maximum length of decoder inputs (target language texts or summaries).
If left unset or set to :obj:`None`, this will use the max_length value.
padding (:obj:`bool`, :obj:`str` or :class:`~transformers.tokenization_utils_base.PaddingStrategy`, `optional`, defaults to :obj:`False`):
Activates and controls padding. Accepts the following values:
* :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the batch (or no padding if only a
single sequence if provided).
* :obj:`'max_length'`: Pad to a maximum length specified with the argument :obj:`max_length` or to the
maximum acceptable input length for the model if that argument is not provided.
* :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of
different lengths).
return_tensors (:obj:`str` or :class:`~transformers.tokenization_utils_base.TensorType`, `optional`, defaults to "pt"):
If set, will return tensors instead of list of python integers. Acceptable values are:
* :obj:`'tf'`: Return TensorFlow :obj:`tf.constant` objects.
* :obj:`'pt'`: Return PyTorch :obj:`torch.Tensor` objects.
* :obj:`'np'`: Return Numpy :obj:`np.ndarray` objects.
truncation (:obj:`bool`, :obj:`str` or :class:`~transformers.tokenization_utils_base.TruncationStrategy`, `optional`, defaults to :obj:`True`):
Activates and controls truncation. Accepts the following values:
* :obj:`True` or :obj:`'longest_first'`: Truncate to a maximum length specified with the argument
:obj:`max_length` or to the maximum acceptable input length for the model if that argument is not
provided. This will truncate token by token, removing a token from the longest sequence in the pair
if a pair of sequences (or a batch of pairs) is provided.
* :obj:`'only_first'`: Truncate to a maximum length specified with the argument :obj:`max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will only
truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
* :obj:`'only_second'`: Truncate to a maximum length specified with the argument :obj:`max_length` or
to the maximum acceptable input length for the model if that argument is not provided. This will only
truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
* :obj:`False` or :obj:`'do_not_truncate'` (default): No truncation (i.e., can output batch with
sequence lengths greater than the model maximum admissible input size).
**kwargs:
Additional keyword arguments passed along to :obj:`self.__call__`.
Returns:
:class:`~transformers.BatchEncoding`: A :class:`~transformers.BatchEncoding` with the following fields:
- **input_ids** -- List of token ids to be fed to the encoder.
- **attention_mask** -- List of indices specifying which tokens should be attended to by the model.
- **labels** -- List of token ids for tgt_texts
The full set of keys ``[input_ids, attention_mask, labels]``,
will only be returned if tgt_texts is passed. Otherwise, input_ids, attention_mask will be the only keys.
"""
raise NotImplementedError(
"If your model requires more than input_ids for a typical forward pass, you should implement this method. "
"Returned keys should be [input_ids, attention_mask, labels]. See MarianTokenizer or T5Tokenizer for a "
"reference implementation."
) |
TensorFlow2/LanguageModeling/BERT/official/utils/accelerator | accelerator | tpu | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions specific to running TensorFlow on TPUs."""
import tensorflow as tf
# "local" is a magic word in the TPU cluster resolver; it informs the resolver
# to use the local CPU as the compute device. This is useful for testing and
# debugging; the code flow is ostensibly identical, but without the need to
# actually have a TPU on the other end.
LOCAL = "local"
def construct_scalar_host_call(metric_dict, model_dir, prefix=""):
"""Construct a host call to log scalars when training on TPU.
Args:
metric_dict: A dict of the tensors to be logged.
model_dir: The location to write the summary.
prefix: The prefix (if any) to prepend to the metric names.
Returns:
A tuple of (function, args_to_be_passed_to_said_function)
"""
# type: (dict, str) -> (function, list)
metric_names = list(metric_dict.keys())
def host_call_fn(global_step, *args):
"""Training host call. Creates scalar summaries for training metrics.
This function is executed on the CPU and should not directly reference
any Tensors in the rest of the `model_fn`. To pass Tensors from the
model to the `metric_fn`, provide as part of the `host_call`. See
https://www.tensorflow.org/api_docs/python/tf/contrib/tpu/TPUEstimatorSpec
for more information.
Arguments should match the list of `Tensor` objects passed as the second
element in the tuple passed to `host_call`.
Args:
global_step: `Tensor with shape `[batch]` for the global_step
*args: Remaining tensors to log.
Returns:
List of summary ops to run on the CPU host.
"""
step = global_step[0]
with tf.contrib.summary.create_file_writer(
logdir=model_dir, filename_suffix=".host_call").as_default():
with tf.contrib.summary.always_record_summaries():
for i, name in enumerate(metric_names):
tf.contrib.summary.scalar(prefix + name, args[i][0], step=step)
return tf.contrib.summary.all_summary_ops()
# To log the current learning rate, and gradient norm for Tensorboard, the
# summary op needs to be run on the host CPU via host_call. host_call
# expects [batch_size, ...] Tensors, thus reshape to introduce a batch
# dimension. These Tensors are implicitly concatenated to
# [params['batch_size']].
global_step_tensor = tf.reshape(
tf.compat.v1.train.get_or_create_global_step(), [1])
other_tensors = [tf.reshape(metric_dict[key], [1]) for key in metric_names]
return host_call_fn, [global_step_tensor] + other_tensors
def embedding_matmul(embedding_table, values, mask, name="embedding_matmul"):
"""Performs embedding lookup via a matmul.
The matrix to be multiplied by the embedding table Tensor is constructed
via an implementation of scatter based on broadcasting embedding indices
and performing an equality comparison against a broadcasted
range(num_embedding_table_rows). All masked positions will produce an
embedding vector of zeros.
Args:
embedding_table: Tensor of embedding table.
Rank 2 (table_size x embedding dim)
values: Tensor of embedding indices. Rank 2 (batch x n_indices)
mask: Tensor of mask / weights. Rank 2 (batch x n_indices)
name: Optional name scope for created ops
Returns:
Rank 3 tensor of embedding vectors.
"""
with tf.name_scope(name):
n_embeddings = embedding_table.get_shape().as_list()[0]
batch_size, padded_size = values.shape.as_list()
emb_idcs = tf.tile(
tf.reshape(values, (batch_size, padded_size, 1)), (1, 1, n_embeddings))
emb_weights = tf.tile(
tf.reshape(mask, (batch_size, padded_size, 1)), (1, 1, n_embeddings))
col_idcs = tf.tile(
tf.reshape(tf.range(n_embeddings), (1, 1, n_embeddings)),
(batch_size, padded_size, 1))
one_hot = tf.where(
tf.equal(emb_idcs, col_idcs), emb_weights,
tf.zeros((batch_size, padded_size, n_embeddings)))
return tf.tensordot(one_hot, embedding_table, 1)
|
PyTorch/SpeechSynthesis/Tacotron2/trtis_cpp | trtis_cpp | CMakeLists | ##
# Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
cmake_minimum_required(VERSION 3.8 FATAL_ERROR)
project(tacotron2_inference LANGUAGES CXX CUDA)
if (DEFINED DEVEL AND NOT DEVEL EQUAL 0)
if ("${CMAKE_CXX_COMPILER_ID}" MATCHES "GNU")
# g++ warnings
set(CPP_DEVEL_FLAGS "${CPP_DEVEL_FLAGS} -Wall")
set(CPP_DEVEL_FLAGS "${CPP_DEVEL_FLAGS} -Werror")
set(CPP_DEVEL_FLAGS "${CPP_DEVEL_FLAGS} -Wpedantic")
set(CPP_DEVEL_FLAGS "${CPP_DEVEL_FLAGS} -Weffc++")
set(CPP_DEVEL_FLAGS "${CPP_DEVEL_FLAGS} -Wextra")
set(CPP_DEVEL_FLAGS "${CPP_DEVEL_FLAGS} -DDEVEL=1")
# nvcc warnings
set(CUDA_DEVEL_FLAGS "${CUDA_DEVEL_FLAGS} -Xcompiler=-Wall")
set(CUDA_DEVEL_FLAGS "${CUDA_DEVEL_FLAGS} -Xcompiler=-Werror")
set(CUDA_DEVEL_FLAGS "${CUDA_DEVEL_FLAGS} -Xcompiler=-Weffc++")
set(CUDA_DEVEL_FLAGS "${CUDA_DEVEL_FLAGS} -Xcompiler=-Wextra")
set(CUDA_DEVEL_FLAGS "${CUDA_DEVEL_FLAGS} -Xcompiler=-DDEVEL=1")
endif()
endif()
set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -O0")
set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} -O3")
set(CMAKE_CXX_STANDARD 11)
set(CMAKE_CUDA_STANDARD 11)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${CPP_DEVEL_FLAGS} -fPIC")
set(CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS} ${CUDA_DEVEL_FLAGS} -Xcompiler=-fPIC")
enable_testing()
add_subdirectory("src")
|
PyTorch/Classification/ConvNets | ConvNets | checkpoint2model | # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the BSD 3-Clause License (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import torch
def add_parser_arguments(parser):
parser.add_argument(
"--checkpoint-path", metavar="<path>", help="checkpoint filename"
)
parser.add_argument(
"--weight-path", metavar="<path>", help="name of file in which to store weights"
)
parser.add_argument("--ema", action="store_true", default=False)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="PyTorch ImageNet Training")
add_parser_arguments(parser)
args = parser.parse_args()
checkpoint = torch.load(args.checkpoint_path, map_location=torch.device("cpu"))
key = "state_dict" if not args.ema else "ema_state_dict"
model_state_dict = {
k[len("module.") :] if "module." in k else k: v
for k, v in checkpoint["state_dict"].items()
}
print(f"Loaded model, acc : {checkpoint['best_prec1']}")
torch.save(model_state_dict, args.weight_path)
|
PyTorch/Translation/Transformer/fairseq/data/csrc | csrc | make_batches | // Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <pybind11/pybind11.h>
#include <pybind11/numpy.h>
#include <pybind11/stl.h>
#include <torch/extension.h>
namespace at { namespace native {
namespace {
bool is_batch_full(int64_t num_tokens, int64_t max_tokens, int64_t max_sentences, int64_t batch_length){
if (batch_length == 0){
return false;
} else if (batch_length == max_sentences || num_tokens > max_tokens){
return true;
} else {
return false;
}
}
}
std::vector<std::vector<int64_t> > make_batches(py::array_t<int64_t> src_lengths, py::array_t<int64_t> tgt_lengths, py::array_t<int64_t> idx_list, int64_t max_tokens, int64_t max_sentences, uint64_t bsz_mult, int64_t max_len){
std::vector<std::vector<int64_t> > batches;
auto src_l = src_lengths.unchecked<1>();
auto tgt_l = tgt_lengths.unchecked<1>();
auto idx_l = idx_list.unchecked<1>();
AT_ASSERTM(src_l.shape(0) == tgt_l.shape(0), "tgt_list and src_list should have the same shape");
AT_ASSERTM(idx_l.shape(0) == tgt_l.shape(0), "idx_list and tgt_list should have the same shape");
ssize_t nelem = src_l.shape(0);
int64_t sample_len =0;
std::vector<int64_t> sample_lens;
std::vector<int64_t> batch;
for (ssize_t i=0; i < nelem; i++){
int64_t idx = idx_l(i);
int64_t sample_num_tokens = std::max(src_l(idx), tgt_l(idx));
if (sample_num_tokens > max_len) continue;
sample_len = std::max(sample_len, sample_num_tokens);
sample_lens.push_back(sample_num_tokens);
int64_t num_tokens = (batch.size() + 1) * sample_len;
if (is_batch_full(num_tokens, max_tokens, max_sentences, batch.size())){
int64_t mode_len = std::max(batch.size() / bsz_mult * bsz_mult, batch.size() % bsz_mult);
std::vector<int64_t> new_batch;
new_batch.reserve(mode_len);
std::copy(batch.begin()+mode_len, batch.end(), std::back_inserter(new_batch));
batch.erase(batch.begin()+mode_len, batch.end());
sample_lens.erase(sample_lens.begin(), sample_lens.begin()+mode_len);
//sample_len always contains at least one element
sample_len = *std::max_element(sample_lens.begin(), sample_lens.end());
batches.push_back(batch);
batch = new_batch;
}
batch.push_back(idx);
}
if (batch.size() > 0) batches.push_back(batch);
return batches;
}
}}
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m){
m.def("make_batches", &at::native::make_batches);
}
|
PyTorch/Segmentation/nnUNet/triton | triton | run_offline_performance_test_on_triton | #!/usr/bin/env python3
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""
For models with variable-sized inputs you must provide the --input-shape argument so that perf_analyzer knows
what shape tensors to use. For example, for a model that has an input called IMAGE that has shape [ 3, N, M ],
where N and M are variable-size dimensions, to tell perf_analyzer to send batch-size 4 requests of shape [ 3, 224, 224 ]
`--shape IMAGE:3,224,224`.
"""
import argparse
import csv
import os
import sys
from pathlib import Path
from typing import Dict, List, Optional
# method from PEP-366 to support relative import in executed modules
if __package__ is None:
__package__ = Path(__file__).parent.name
from .deployment_toolkit.report import save_results, show_results, sort_results
from .deployment_toolkit.warmup import warmup
def calculate_average_latency(r):
avg_sum_fields = [
"Client Send",
"Network+Server Send/Recv",
"Server Queue",
"Server Compute",
"Server Compute Input",
"Server Compute Infer",
"Server Compute Output",
"Client Recv",
]
avg_latency = sum([int(r.get(f, 0)) for f in avg_sum_fields])
return avg_latency
def update_performance_data(results: List, batch_size: int, performance_partial_file: str):
row: Dict = {"batch_size": batch_size}
with open(performance_partial_file, "r") as csvfile:
reader = csv.DictReader(csvfile)
for r in reader:
avg_latency = calculate_average_latency(r)
row = {**row, **r, "avg latency": avg_latency}
results.append(row)
def _parse_batch_sizes(batch_sizes: str):
batches = batch_sizes.split(sep=",")
return list(map(lambda x: int(x.strip()), batches))
def offline_performance(
model_name: str,
batch_sizes: List[int],
result_path: str,
input_shapes: Optional[List[str]] = None,
profiling_data: str = "random",
triton_instances: int = 1,
server_url: str = "localhost",
measurement_window: int = 10000,
shared_memory: bool = False
):
print("\n")
print(f"==== Static batching analysis start ====")
print("\n")
input_shapes = " ".join(map(lambda shape: f" --shape {shape}", input_shapes)) if input_shapes else ""
results: List[Dict] = list()
for batch_size in batch_sizes:
print(f"Running performance tests for batch size: {batch_size}")
performance_partial_file = f"triton_performance_partial_{batch_size}.csv"
exec_args = f"""-max-threads {triton_instances} \
-m {model_name} \
-x 1 \
-c {triton_instances} \
-t {triton_instances} \
-p {measurement_window} \
-v \
-i http \
-u {server_url}:8000 \
-b {batch_size} \
-f {performance_partial_file} \
--input-data {profiling_data} {input_shapes}"""
if shared_memory:
exec_args += " --shared-memory=cuda"
result = os.system(f"perf_client {exec_args}")
if result != 0:
print(f"Failed running performance tests. Perf client failed with exit code {result}")
sys.exit(1)
update_performance_data(results, batch_size, performance_partial_file)
os.remove(performance_partial_file)
results = sort_results(results=results)
save_results(filename=result_path, data=results)
show_results(results=results)
print("Performance results for static batching stored in: {0}".format(result_path))
print("\n")
print(f"==== Analysis done ====")
print("\n")
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--model-name", type=str, required=True, help="Name of the model to test")
parser.add_argument(
"--input-data", type=str, required=False, default="random", help="Input data to perform profiling."
)
parser.add_argument(
"--input-shape",
action="append",
required=False,
help="Input data shape in form INPUT_NAME:<full_shape_without_batch_axis>.",
)
parser.add_argument("--batch-sizes", type=str, required=True, help="List of batch sizes to tests. Comma separated.")
parser.add_argument("--result-path", type=str, required=True, help="Path where result file is going to be stored.")
parser.add_argument("--triton-instances", type=int, default=1, help="Number of Triton Server instances")
parser.add_argument("--server-url", type=str, required=False, default="localhost", help="Url to Triton server")
parser.add_argument(
"--measurement-window", required=False, help="Time which perf_analyzer will wait for results", default=10000
)
parser.add_argument("--shared-memory", help="Use shared memory for communication with Triton", action="store_true",
default=False)
args = parser.parse_args()
warmup(
server_url=args.server_url,
model_name=args.model_name,
batch_sizes=_parse_batch_sizes(args.batch_sizes),
triton_instances=args.triton_instances,
profiling_data=args.input_data,
input_shapes=args.input_shape,
measurement_window=args.measurement_window,
shared_memory=args.shared_memory
)
offline_performance(
server_url=args.server_url,
model_name=args.model_name,
batch_sizes=_parse_batch_sizes(args.batch_sizes),
triton_instances=args.triton_instances,
profiling_data=args.input_data,
input_shapes=args.input_shape,
result_path=args.result_path,
measurement_window=args.measurement_window,
shared_memory=args.shared_memory
)
if __name__ == "__main__":
main()
|
PyTorch/Segmentation/MaskRCNN/pytorch/maskrcnn_benchmark/utils | utils | comm | # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
"""
This file contains primitives for multi-gpu communication.
This is useful when doing distributed training.
"""
import pickle
import time
import torch
import torch.distributed as dist
def get_world_size():
if not dist.is_available():
return 1
if not dist.is_initialized():
return 1
return dist.get_world_size()
def get_rank():
if not dist.is_available():
return 0
if not dist.is_initialized():
return 0
return dist.get_rank()
def is_main_process():
return get_rank() == 0
def synchronize():
"""
Helper function to synchronize (barrier) among all processes when
using distributed training
"""
if not dist.is_available():
return
if not dist.is_initialized():
return
world_size = dist.get_world_size()
if world_size == 1:
return
dist.barrier()
def all_gather(data):
"""
Run all_gather on arbitrary picklable data (not necessarily tensors)
Args:
data: any picklable object
Returns:
list[data]: list of data gathered from each rank
"""
world_size = get_world_size()
if world_size == 1:
return [data]
# serialized to a Tensor
buffer = pickle.dumps(data)
storage = torch.ByteStorage.from_buffer(buffer)
tensor = torch.ByteTensor(storage).to("cuda")
# obtain Tensor size of each rank
local_size = torch.IntTensor([tensor.numel()]).to("cuda")
size_list = [torch.IntTensor([0]).to("cuda") for _ in range(world_size)]
dist.all_gather(size_list, local_size)
size_list = [int(size.item()) for size in size_list]
max_size = max(size_list)
# receiving Tensor from all ranks
# we pad the tensor because torch all_gather does not support
# gathering tensors of different shapes
tensor_list = []
for _ in size_list:
tensor_list.append(torch.ByteTensor(size=(max_size,)).to("cuda"))
if local_size != max_size:
padding = torch.ByteTensor(size=(max_size - local_size,)).to("cuda")
tensor = torch.cat((tensor, padding), dim=0)
dist.all_gather(tensor_list, tensor)
data_list = []
for size, tensor in zip(size_list, tensor_list):
buffer = tensor.cpu().numpy().tobytes()[:size]
data_list.append(pickle.loads(buffer))
return data_list
def reduce_dict(input_dict, average=True):
"""
Args:
input_dict (dict): all the values will be reduced
average (bool): whether to do average or sum
Reduce the values in the dictionary from all processes so that process with rank
0 has the averaged results. Returns a dict with the same fields as
input_dict, after reduction.
"""
world_size = get_world_size()
if world_size < 2:
return input_dict
with torch.no_grad():
names = []
values = []
# sort the keys so that they are consistent across processes
for k in sorted(input_dict.keys()):
names.append(k)
values.append(input_dict[k])
values = torch.stack(values, dim=0)
dist.reduce(values, dst=0)
if dist.get_rank() == 0 and average:
# only main process gets accumulated, so only divide by
# world_size in this case
values /= world_size
reduced_dict = {k: v for k, v in zip(names, values)}
return reduced_dict
def synchronized_timestamp():
torch.cuda.synchronize()
return time.time()
|
PyTorch/SpeechSynthesis/FastPitch/scripts/mandarin_chinese | mandarin_chinese | split_sf | # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from pathlib import Path
# Define val and test; the remaining ones will be train IDs
val_ids = {
'com_SF_ce227', 'com_SF_ce832', 'com_SF_ce912','com_SF_ce979',
'com_SF_ce998', 'com_SF_ce1045', 'com_SF_ce1282','com_SF_ce1329',
'com_SF_ce1350', 'com_SF_ce1376', 'com_SF_ce1519','com_SF_ce1664',
'com_SF_ce1777', 'com_SF_ce1843', 'com_SF_ce2017','com_SF_ce2042',
'com_SF_ce2100', 'com_SF_ce2251', 'com_SF_ce2443','com_SF_ce2566',
}
test_ids = {
'com_SF_ce161', 'com_SF_ce577', 'com_SF_ce781', 'com_SF_ce814',
'com_SF_ce1042', 'com_SF_ce1089', 'com_SF_ce1123', 'com_SF_ce1425',
'com_SF_ce1514', 'com_SF_ce1577', 'com_SF_ce1780', 'com_SF_ce1857',
'com_SF_ce1940', 'com_SF_ce2051', 'com_SF_ce2181', 'com_SF_ce2258',
'com_SF_ce2406', 'com_SF_ce2512', 'com_SF_ce2564', 'com_SF_ce2657'
}
def generate(fpath, ids_text, pitch=True, text=True):
with open(fpath, 'w') as f:
for id_, txt in ids_text.items():
row = f"wavs/{id_}.wav"
row += "|" + f"pitch/{id_}.pt" if pitch else ""
row += "|" + txt if text else ""
f.write(row + "\n")
def generate_inference_tsv(fpath, ids_text):
with open(fpath, 'w') as f:
f.write("output\ttext\n")
for id_, txt in ids_text.items():
f.write(f"{id_}.wav\t{txt}\n")
def main():
parser = argparse.ArgumentParser(
description='SF bilingual dataset filelists generator')
parser.add_argument('transcripts', type=Path, default='./text_SF.txt',
help='Path to LJSpeech dataset metadata')
parser.add_argument('output_dir', default='data/filelists', type=Path,
help='Directory to generate filelists to')
args = parser.parse_args()
with open(args.transcripts) as f:
# A dict of ID:transcript pairs
transcripts = dict(line.replace("\ufeff", "").replace("-", "-").strip().split(' ', 1)
for line in f)
transcripts = {id_.replace("com_DL", "com_SF"): text.lower()
for id_, text in transcripts.items()}
val_ids_text = {id_: transcripts[id_] for id_ in val_ids}
test_ids_text = {id_: transcripts[id_] for id_ in test_ids}
train_ids_text = {id_: transcripts[id_] for id_ in transcripts
if id_ not in test_ids and id_ not in val_ids}
prefix = Path(args.output_dir, "sf_audio_pitch_text_")
generate(str(prefix) + "val.txt", val_ids_text)
generate(str(prefix) + "test.txt", test_ids_text)
generate(str(prefix) + "train.txt", train_ids_text)
prefix = Path(args.output_dir, "sf_audio_")
generate(str(prefix) + "val.txt", val_ids_text, False, False)
generate(str(prefix) + "test.txt", test_ids_text, False, False)
generate(str(prefix) + "train.txt", train_ids_text, False, False)
# train + val + test for pre-processing
generate(Path(args.output_dir, "sf_audio_text.txt"),
{**val_ids_text, **test_ids_text, **train_ids_text}, False, True)
generate_inference_tsv(Path(args.output_dir, "sf_test.tsv"), test_ids_text)
if __name__ == '__main__':
main()
|
TensorFlow2/LanguageModeling/BERT | BERT | model_saving_utils | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities to save models."""
from __future__ import absolute_import
from __future__ import division
# from __future__ import google_type_annotations
from __future__ import print_function
import os
from absl import logging
import tensorflow as tf
import typing
def export_bert_model(model_export_path: typing.Text,
model: tf.keras.Model,
checkpoint_dir: typing.Optional[typing.Text] = None,
restore_model_using_load_weights: bool = False) -> None:
"""Export BERT model for serving which does not include the optimizer.
Arguments:
model_export_path: Path to which exported model will be saved.
model: Keras model object to export.
checkpoint_dir: Path from which model weights will be loaded, if
specified.
restore_model_using_load_weights: Whether to use checkpoint.restore() API
for custom checkpoint or to use model.load_weights() API.
There are 2 different ways to save checkpoints. One is using
tf.train.Checkpoint and another is using Keras model.save_weights().
Custom training loop implementation uses tf.train.Checkpoint API
and Keras ModelCheckpoint callback internally uses model.save_weights()
API. Since these two API's cannot be used toghether, model loading logic
must be take into account how model checkpoint was saved.
Raises:
ValueError when either model_export_path or model is not specified.
"""
if not model_export_path:
raise ValueError('model_export_path must be specified.')
if not isinstance(model, tf.keras.Model):
raise ValueError('model must be a tf.keras.Model object.')
if checkpoint_dir:
# Keras compile/fit() was used to save checkpoint using
# model.save_weights().
if restore_model_using_load_weights:
model_weight_path = os.path.join(checkpoint_dir, 'checkpoint')
assert tf.io.gfile.exists(model_weight_path)
model.load_weights(model_weight_path)
# tf.train.Checkpoint API was used via custom training loop logic.
else:
checkpoint = tf.train.Checkpoint(model=model)
# Restores the model from latest checkpoint.
latest_checkpoint_file = tf.train.latest_checkpoint(checkpoint_dir)
assert latest_checkpoint_file
logging.info('Checkpoint file %s found and restoring from '
'checkpoint', latest_checkpoint_file)
checkpoint.restore(
latest_checkpoint_file).assert_existing_objects_matched()
model.save(model_export_path, include_optimizer=False, save_format='tf')
class BertModelCheckpoint(tf.keras.callbacks.Callback):
"""Keras callback that saves model at the end of every epoch."""
def __init__(self, checkpoint_dir, checkpoint):
"""Initializes BertModelCheckpoint.
Arguments:
checkpoint_dir: Directory of the to be saved checkpoint file.
checkpoint: tf.train.Checkpoint object.
"""
super(BertModelCheckpoint, self).__init__()
self.checkpoint_file_name = os.path.join(
checkpoint_dir, 'bert_training_checkpoint_step_{global_step}.ckpt')
assert isinstance(checkpoint, tf.train.Checkpoint)
self.checkpoint = checkpoint
def on_epoch_end(self, epoch, logs=None):
global_step = tf.keras.backend.get_value(self.model.optimizer.iterations)
formatted_file_name = self.checkpoint_file_name.format(
global_step=global_step)
saved_path = self.checkpoint.save(formatted_file_name)
logging.info('Saving model TF checkpoint to : %s', saved_path)
|
TensorFlow2/Recommendation/DLRM_and_DCNv2/deployment/deployment_toolkit | deployment_toolkit | triton_client | # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import time
from enum import Enum
from typing import Any, Dict, Optional
# pytype: disable=import-error
from .utils import parse_server_url
try:
import tritonclient.grpc as grpc_client
from tritonclient import utils as client_utils # noqa: F401
except ImportError:
try:
import tritonclientutils as client_utils # noqa: F401
import tritongrpcclient as grpc_client
except ImportError:
client_utils = None
grpc_client = None
try:
import tritonclient.http as http_client
except (ImportError, RuntimeError):
try:
import tritonhttpclient as http_client
except (ImportError, RuntimeError):
http_client = None
# pytype: enable=import-error
LOGGER = logging.getLogger(__name__)
class TritonServerNotReadyException(Exception):
pass
# TODO: in which state "native" warm-up takes place?
class ModelState(Enum):
"""Describe model state in Triton.
Attributes:
LOADING: Loading of model
UNLOADING: Unloading of model
UNAVAILABLE: Model is missing or could not be loaded
READY: Model is ready for inference
"""
LOADING = "LOADING"
UNLOADING = "UNLOADING"
UNAVAILABLE = "UNAVAILABLE"
READY = "READY"
class TritonClientProtocol(Enum):
"""Describe protocol with which client communicates with Triton"""
GRPC = "grpc"
HTTP = "http"
# TODO: How to obtain models that are available but not loaded yet?
# TODO: encode model_name and model_version as for ex. model_name/model_version (here and in many other places)
# TODO: How to obtain server model loading mode
class TritonClient:
"""Provide high-level API for communicating with Triton.
Usage:
>>> client = TritonClient("grpc://127.0.0.1:8001")
>>> client.load_model("ResNet50")
Above sample loads model on Triton and run inference iterating over provided dataloader.
Args:
server_url: url where Triton is binded in format `<protocol>://<address/hostname>:<port>`
verbose: provide verbose logs from tritonclient library
Attributes:
client: handle to low-level API client obtained from tritonclient python package
Raises:
RuntimeError: in case of missing tritonclient library for selected protocol
or problems with connecting to Triton or its not in ready state yet.
ValueError: in case of errors in parsing provided server_url. Example source of errors are: missing protocol unknown protocol was requested.
InferenceServerClient: in case of error in processing initial requests on server side
"""
def __init__(self, server_url: str, *, verbose: bool = False):
self.server_url = server_url
self._verbose = verbose
self.client = self._create_client(server_url=server_url, verbose=verbose)
def wait_for_server_ready(self, timeout: int):
"""
Parameters
----------
timeout : int
timeout in seconds to send a ready status
request to the server before raising
an exception
Raises
------
TritonModelAnalyzerException
If server readiness could not be
determined in given num_retries
"""
retries = timeout
while retries > 0:
try:
if self.client.is_server_ready() and self.client.is_server_live():
return
else:
time.sleep(1)
retries -= 1
except Exception as e:
time.sleep(1)
retries -= 1
if retries == 0:
return TritonServerNotReadyException(e)
raise TritonServerNotReadyException(
"Could not determine server readiness. " "Number of retries exceeded."
)
def get_server_metadata(self):
"""Returns `server metadata <https://github.com/kubeflow/kfserving/blob/master/docs/predict-api/v2/required_api.md#server-metadata-response-json-object>`_.
>>> client.get_server_metadata()
{name: "triton", version: "2.5.0", extensions: ["classification", "sequence", "model_repository", "schedule_policy", "model_configuration", "system_shared_memory", "cuda_shared_memory", "binary_tensor_data", "statistics"}
Returns:
Dictionary with server metadata.
Raises:
InferenceServerClient: in case of error in processing request on server side
"""
server_metadata = self.client.get_server_metadata()
server_metadata = self._format_response(server_metadata)
return server_metadata
def get_model_metadata(self, model_name: str, model_version: Optional[str] = None):
"""Returns `model metadata <https://github.com/kubeflow/kfserving/blob/master/docs/predict-api/v2/required_api.md#model-metadata>`_.
Args:
model_name: name of the model which metadata is requested to obtain.
model_version: version of the model which metadata is requested to obtain.
Returns:
Dictionary with model metadata.
Raises:
InferenceServerClient: in case of error in processing request on server side.
"""
model_metadata = self.client.get_model_metadata(model_name, model_version)
model_metadata = self._format_response(model_metadata)
return model_metadata
def load_model(self, model_name: str) -> None:
"""Requests that a model be loaded into Triton, or reloaded if the model is already loaded.
Args:
model_name: name of the model to load
Raises:
InferenceServerException: in case of error in processing request on server side.
"""
self.client.load_model(model_name)
def wait_for_model(
self,
*,
model_name: str,
model_version: str,
timeout_s: int = 120,
check_interval_s: int = 5,
) -> Dict[str, Any]:
"""Iteratively check for model state until model is ready or unavailable.
Args:
model_name: name of the model to wait for
model_version: version of the model to wait for
timeout_s: how long in seconds to wait till model is in ready or in unavailable state
check_interval_s: time intervals in seconds at which state of model is should be checked
Returns:
Dictionary with model metadata.
Raises:
RuntimeError: in case model is not ready yet (is marked unavailable or timeout has been reached)
InferenceServerException: in case of error in processing request on server side.
"""
def _shall_wait(model_state: ModelState) -> bool:
return model_state not in [ModelState.UNAVAILABLE, ModelState.READY]
elapsed_time_s = 0
start_time_s = time.time()
state = self.get_model_state(model_name, model_version)
while elapsed_time_s < timeout_s and _shall_wait(state):
LOGGER.info(
f"waiting for model... {elapsed_time_s:.0f}/{timeout_s} state={state}"
)
time.sleep(check_interval_s)
state = self.get_model_state(model_name, model_version)
elapsed_time_s = time.time() - start_time_s
if not self.client.is_model_ready(model_name):
raise RuntimeError(
f"Model {model_name} requested to be loaded, but is not ready"
)
model_metadata = self.client.get_model_metadata(model_name)
model_metadata = self._format_response(model_metadata)
return model_metadata
def get_model_state(self, model_name: str, model_version: str) -> ModelState:
"""Obtains the state of a model on Triton.
Args:
model_name: name of the model which state is requested to obtain.
model_version: version of the model which state is requested to obtain.
Returns:
Requested model state.
Raises:
InferenceServerException: in case of error in processing request on server side.
"""
def handle_http_response(models):
models_states = {}
for model in models:
if not model.get("version"):
continue
model_state = (
ModelState(model["state"])
if model.get("state")
else ModelState.LOADING
)
models_states[(model["name"], model["version"])] = model_state
return models_states
def handle_grpc_response(models):
models_states = {}
for model in models:
if not model.version:
continue
model_state = (
ModelState(model.state) if model.state else ModelState.LOADING
)
models_states[(model.name, model.version)] = model_state
return models_states
repository_index = self.client.get_model_repository_index()
if isinstance(repository_index, list):
models_states = handle_http_response(models=repository_index)
else:
models_states = handle_grpc_response(models=repository_index.models)
return models_states.get((model_name, model_version), ModelState.UNAVAILABLE)
def _format_response(self, response):
if not isinstance(response, dict):
response = json.loads(
grpc_client.MessageToJson(response, preserving_proto_field_name=True)
)
return response
def _create_client(self, server_url: str, verbose: bool):
protocol, host, port = parse_server_url(server_url)
if protocol == TritonClientProtocol.HTTP and http_client is None:
raise RuntimeError(
"Could not obtain Triton HTTP client. Install extras while installing tritonclient wheel. "
"Example installation call: "
"find /workspace/install/python/ -iname triton*manylinux*.whl -exec pip install {}[all] \\;"
)
LOGGER.debug(f"Connecting to {server_url}")
client_lib = {
TritonClientProtocol.HTTP.value: http_client,
TritonClientProtocol.GRPC.value: grpc_client,
}[protocol.value]
server_url = f"{host}:{port}"
# pytype: disable=attribute-error
client = client_lib.InferenceServerClient(url=server_url, verbose=verbose)
# pytype: enable=attribute-error
return client
|
TensorFlow/LanguageModeling/BERT/biobert/scripts | scripts | ner_bc5cdr-disease | #!/bin/bash
echo "Container nvidia build = " $NVIDIA_BUILD_ID
init_checkpoint=${1:-"/results/biobert_tf_uncased_base/model.ckpt"}
train_batch_size=${2:-8}
learning_rate=${3:-3.125e-6}
cased=${4:-false}
precision=${5:-"fp16"}
use_xla=${6:-"true"}
num_gpu=${7:-"16"}
seq_length=${8:-128}
bert_model=${9:-"base"}
eval_batch_size=${10:-8} #Eval and Predict BS is assumed to be same
epochs=${11:-"100.0"}
if [ "$cased" = "true" ] ; then
DO_LOWER_CASE=0
CASING_DIR_PREFIX="cased"
case_flag="--do_lower_case=False"
else
DO_LOWER_CASE=1
CASING_DIR_PREFIX="uncased"
case_flag="--do_lower_case=True"
fi
if [ "$bert_model" = "large" ] ; then
export BERT_DIR=/workspace/bert/data/download/google_pretrained_weights/${CASING_DIR_PREFIX}_L-24_H-1024_A-16
else
export BERT_DIR=/workspace/bert/data/download/google_pretrained_weights/${CASING_DIR_PREFIX}_L-12_H-768_A-12
fi
export GBS=$(expr $train_batch_size \* $num_gpu)
printf -v TAG "tf_bert_biobert_ner_bc5cdr_disease_%s_%s_gbs%d" "$bert_model" "$precision" $GBS
DATESTAMP=`date +'%y%m%d%H%M%S'`
DATASET_DIR=/workspace/bert/data/biobert/BC5CDR/disease
OUTPUT_DIR=/results/${TAG}_${DATESTAMP}
mkdir -p ${OUTPUT_DIR}
use_fp16=""
if [ "$precision" = "fp16" ] ; then
echo "fp16 activated!"
use_fp16="--amp"
else
echo "fp32/tf32 activated!"
use_fp16="--noamp"
fi
if [ "$use_xla" = "true" ] ; then
use_xla_tag="--use_xla"
echo "XLA activated"
else
use_xla_tag="--nouse_xla"
fi
if [ $num_gpu -gt 1 ] ; then
mpi_command="mpirun -np $num_gpu -H localhost:$num_gpu \
--allow-run-as-root -bind-to none -map-by slot \
-x NCCL_DEBUG=INFO \
-x LD_LIBRARY_PATH \
-x PATH -mca pml ob1 -mca btl ^openib"
use_hvd="--horovod"
else
mpi_command=""
use_hvd=""
fi
$mpi_command python3 /workspace/bert/run_ner.py \
--do_prepare=true \
--do_train=true \
--do_eval=true \
--do_predict=true \
--task_name="bc5cdr" \
--vocab_file=$BERT_DIR/vocab.txt \
--bert_config_file=$BERT_DIR/bert_config.json \
--init_checkpoint=$init_checkpoint \
--num_train_epochs=$epochs \
--data_dir=$DATASET_DIR \
--output_dir=$OUTPUT_DIR \
--learning_rate=$learning_rate \
--train_batch_size=$train_batch_size \
--eval_batch_size=$eval_batch_size \
--predict_batch_size=$eval_batch_size \
--max_seq_length=$seq_length \
"$use_hvd" "$use_fp16" $use_xla_tag $case_flag
|
TensorFlow2/Classification/ConvNets/scripts/docker | docker | requirements | six
google-api-python-client>=1.6.7
google-cloud-bigquery>=0.31.0
kaggle>=1.3.9
numpy>=1.15.4
oauth2client>=4.1.2
pandas>=0.22.0
psutil>=5.4.3
py-cpuinfo>=3.3.0
scipy>=0.19.1
tensorflow-hub>=0.6.0
tensorflow-model-optimization>=0.2.1
tensorflow-datasets
tensorflow-addons
dataclasses
gin-config
tf_slim>=1.1.0
typing
sentencepiece
Cython
matplotlib
opencv-python-headless
pyyaml
Pillow
-e git+https://github.com/cocodataset/cocoapi#egg=pycocotools&subdirectory=PythonAPI |
PyTorch/Segmentation/MaskRCNN/pytorch/scripts | scripts | eval | #!/bin/bash
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#Predictions will be stored in `FOLDER`/inference`
#1x8x4 DGX1V
GPU=8
CONFIG='configs/e2e_mask_rcnn_R_50_FPN_1x.yaml'
#This folder should a file called 'last_checkpoint' which contains the path to the actual checkpoint
FOLDER='/results'
#Example
# /results
# ------last_checkpoint
# ------model.pth
#
# last_checkpoint
#-----------------------------
#|/results/model.pth |
#| |
#| |
#| |
#| |
#| |
#-----------------------------
LOGFILE="$FOLDER/joblog.log"
if ! [ -d "$FOLDER" ]; then mkdir $FOLDER; fi
python3 -m torch.distributed.launch --nproc_per_node=$GPU tools/test_net.py \
--config-file $CONFIG \
DATASETS.TEST "(\"coco_2017_val\",)" \
DTYPE "float16" \
OUTPUT_DIR $FOLDER \
| tee $LOGFILE
|
TensorFlow2/Segmentation/UNet_Medical/examples | examples | unet_TRAIN_FULL_TF-AMP | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script launches U-Net run in TF-AMP and runs 5-fold cross-validation training for 6400 iterations.
# Usage:
# bash unet_TRAIN_FULL_TF-AMP.sh <number of GPUs> <path to dataset> <path to results directory> <batch size>
horovodrun -np $1 python main.py --data_dir $2 --model_dir $3 --log_every 100 --max_steps 6400 --batch_size $4 --exec_mode train_and_evaluate --fold 0 --augment --xla --amp > $3/log_TF-AMP_${1}GPU_fold0.txt
horovodrun -np $1 python main.py --data_dir $2 --model_dir $3 --log_every 100 --max_steps 6400 --batch_size $4 --exec_mode train_and_evaluate --fold 1 --augment --xla --amp > $3/log_TF-AMP_${1}GPU_fold1.txt
horovodrun -np $1 python main.py --data_dir $2 --model_dir $3 --log_every 100 --max_steps 6400 --batch_size $4 --exec_mode train_and_evaluate --fold 2 --augment --xla --amp > $3/log_TF-AMP_${1}GPU_fold2.txt
horovodrun -np $1 python main.py --data_dir $2 --model_dir $3 --log_every 100 --max_steps 6400 --batch_size $4 --exec_mode train_and_evaluate --fold 3 --augment --xla --amp > $3/log_TF-AMP_${1}GPU_fold3.txt
horovodrun -np $1 python main.py --data_dir $2 --model_dir $3 --log_every 100 --max_steps 6400 --batch_size $4 --exec_mode train_and_evaluate --fold 4 --augment --xla --amp > $3/log_TF-AMP_${1}GPU_fold4.txt
python runtime/parse_results.py --model_dir $3 --exec_mode convergence --env TF-AMP_${1}GPU |
PaddlePaddle/Classification/RN50v1.5/utils | utils | config | # Copyright (c) 2022 NVIDIA Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import copy
import argparse
import logging
import distutils.util
import dllogger
from utils.mode import RunScope
from utils.utility import get_num_trainers
from utils.save_load import _PDOPT_SUFFIX, _PDPARAMS_SUFFIX
_AUTO_LAST_EPOCH = 'auto'
def _get_full_path_of_ckpt(args):
if args.from_checkpoint is None:
args.last_epoch_of_checkpoint = -1
return
def _check_file_exist(path_with_prefix):
pdopt_path = path_with_prefix + _PDOPT_SUFFIX
pdparams_path = path_with_prefix + _PDPARAMS_SUFFIX
found = False
if os.path.exists(pdopt_path) and os.path.exists(pdparams_path):
found = True
return found, pdopt_path, pdparams_path
target_from_checkpoint = os.path.join(args.from_checkpoint,
args.model_prefix)
if args.last_epoch_of_checkpoint is None:
args.last_epoch_of_checkpoint = -1
elif args.last_epoch_of_checkpoint == _AUTO_LAST_EPOCH:
folders = os.listdir(args.from_checkpoint)
args.last_epoch_of_checkpoint = -1
for folder in folders:
tmp_ckpt_path = os.path.join(args.from_checkpoint, folder,
args.model_prefix)
try:
folder = int(folder)
except ValueError:
logging.warning(
f"Skip folder '{folder}' since its name is not integer-convertable."
)
continue
if folder > args.last_epoch_of_checkpoint and \
_check_file_exist(tmp_ckpt_path)[0]:
args.last_epoch_of_checkpoint = folder
epoch_with_prefix = os.path.join(str(args.last_epoch_of_checkpoint), args.model_prefix) \
if args.last_epoch_of_checkpoint > -1 else args.model_prefix
target_from_checkpoint = os.path.join(args.from_checkpoint,
epoch_with_prefix)
else:
try:
args.last_epoch_of_checkpoint = int(args.last_epoch_of_checkpoint)
except ValueError:
raise ValueError(f"The value of --last-epoch-of-checkpoint should be None, {_AUTO_LAST_EPOCH}" \
f" or integer >= 0, but receive {args.last_epoch_of_checkpoint}")
args.from_checkpoint = target_from_checkpoint
found, pdopt_path, pdparams_path = _check_file_exist(args.from_checkpoint)
if not found:
args.from_checkpoint = None
args.last_epoch_of_checkpoint = -1
logging.warning(
f"Cannot find {pdopt_path} and {pdparams_path}, disable --from-checkpoint."
)
def _get_full_path_of_pretrained_params(args):
if args.from_pretrained_params is None:
args.last_epoch_of_checkpoint = -1
return
args.from_pretrained_params = os.path.join(args.from_pretrained_params,
args.model_prefix)
pdparams_path = args.from_pretrained_params + _PDPARAMS_SUFFIX
if not os.path.exists(pdparams_path):
args.from_pretrained_params = None
logging.warning(
f"Cannot find {pdparams_path}, disable --from-pretrained-params.")
args.last_epoch_of_checkpoint = -1
def print_args(args):
args_for_log = copy.deepcopy(args)
# Due to dllogger cannot serialize Enum into JSON.
args_for_log.run_scope = args_for_log.run_scope.value
dllogger.log(step='PARAMETER', data=vars(args_for_log))
def check_and_process_args(args):
# Precess the scope of run
run_scope = None
for scope in RunScope:
if args.run_scope == scope.value:
run_scope = scope
break
assert run_scope is not None, \
f"only support {[scope.value for scope in RunScope]} as run_scope"
args.run_scope = run_scope
# Precess image layout and channel
args.image_channel = args.image_shape[0]
if args.data_layout == "NHWC":
args.image_shape = [
args.image_shape[1], args.image_shape[2], args.image_shape[0]
]
# Precess learning rate
args.lr = get_num_trainers() * args.lr
# Precess model loading
assert not (args.from_checkpoint is not None and \
args.from_pretrained_params is not None), \
"--from-pretrained-params and --from-checkpoint should " \
"not be set simultaneously."
_get_full_path_of_pretrained_params(args)
_get_full_path_of_ckpt(args)
args.start_epoch = args.last_epoch_of_checkpoint + 1
# Precess benchmark
if args.benchmark:
assert args.run_scope in [
RunScope.TRAIN_ONLY, RunScope.EVAL_ONLY
], "If benchmark enabled, run_scope must be `train_only` or `eval_only`"
# Only run one epoch when benchmark or eval_only.
if args.benchmark or \
(args.run_scope == RunScope.EVAL_ONLY):
args.epochs = args.start_epoch + 1
if args.run_scope == RunScope.EVAL_ONLY:
args.eval_interval = 1
def add_global_args(parser):
group = parser.add_argument_group('Global')
group.add_argument(
'--output-dir',
type=str,
default='./output/',
help='A path to store trained models.')
group.add_argument(
'--run-scope',
default='train_eval',
choices=('train_eval', 'train_only', 'eval_only'),
help='Running scope. It should be one of {train_eval, train_only, eval_only}.'
)
group.add_argument(
'--epochs',
type=int,
default=90,
help='The number of epochs for training.')
group.add_argument(
'--save-interval',
type=int,
default=1,
help='The iteration interval to save checkpoints.')
group.add_argument(
'--eval-interval',
type=int,
default=1,
help='The iteration interval to test trained models on a given validation dataset. ' \
'Ignored when --run-scope is train_only.'
)
group.add_argument(
'--print-interval',
type=int,
default=10,
help='The iteration interval to show training/evaluation message.')
group.add_argument(
'--report-file',
type=str,
default='./report.json',
help='A file in which to store JSON experiment report.')
group.add_argument(
'--data-layout',
default='NCHW',
choices=('NCHW', 'NHWC'),
help='Data format. It should be one of {NCHW, NHWC}.')
group.add_argument(
'--benchmark', action='store_true', help='To enable benchmark mode.')
group.add_argument(
'--benchmark-steps',
type=int,
default=100,
help='Steps for benchmark run, only be applied when --benchmark is set.'
)
group.add_argument(
'--benchmark-warmup-steps',
type=int,
default=100,
help='Warmup steps for benchmark run, only be applied when --benchmark is set.'
)
group.add_argument(
'--model-prefix',
type=str,
default="resnet_50_paddle",
help='The prefix name of model files to save/load.')
group.add_argument(
'--from-pretrained-params',
type=str,
default=None,
help='A folder path which contains pretrained parameters, that is a file in name' \
' --model-prefix + .pdparams. It should not be set with --from-checkpoint' \
' at the same time.'
)
group.add_argument(
'--from-checkpoint',
type=str,
default=None,
help='A checkpoint path to resume training. It should not be set ' \
'with --from-pretrained-params at the same time. The path provided ' \
'could be a folder contains < epoch_id/ckpt_files > or < ckpt_files >.'
)
group.add_argument(
'--last-epoch-of-checkpoint',
type=str,
default=None,
help='The epoch id of the checkpoint given by --from-checkpoint. ' \
'It should be None, auto or integer >= 0. If it is set as ' \
'None, then training will start from 0-th epoch. If it is set as ' \
'auto, then it will search largest integer-convertable folder ' \
' --from-checkpoint, which contains required checkpoint. ' \
'Default is None.'
)
group.add_argument(
'--show-config',
type=distutils.util.strtobool,
default=True,
help='To show arguments.')
group.add_argument(
'--enable-cpu-affinity',
type=distutils.util.strtobool,
default=True,
help='To enable in-built GPU-CPU affinity.')
return parser
def add_advance_args(parser):
group = parser.add_argument_group('Advanced Training')
# AMP
group.add_argument(
'--amp',
action='store_true',
help='Enable automatic mixed precision training (AMP).')
group.add_argument(
'--scale-loss',
type=float,
default=1.0,
help='The loss scalar for AMP training, only be applied when --amp is set.'
)
group.add_argument(
'--use-dynamic-loss-scaling',
action='store_true',
help='Enable dynamic loss scaling in AMP training, only be applied when --amp is set.'
)
group.add_argument(
'--use-pure-fp16',
action='store_true',
help='Enable pure FP16 training, only be applied when --amp is set.')
group.add_argument(
'--fuse-resunit',
action='store_true',
help='Enable CUDNNv8 ResUnit fusion, only be applied when --amp is set.')
# ASP
group.add_argument(
'--asp',
action='store_true',
help='Enable automatic sparse training (ASP).')
group.add_argument(
'--prune-model',
action='store_true',
help='Prune model to 2:4 sparse pattern, only be applied when --asp is set.'
)
group.add_argument(
'--mask-algo',
default='mask_1d',
choices=('mask_1d', 'mask_2d_greedy', 'mask_2d_best'),
help='The algorithm to generate sparse masks. It should be one of ' \
'{mask_1d, mask_2d_greedy, mask_2d_best}. This only be applied ' \
'when --asp and --prune-model is set.'
)
return parser
def add_dataset_args(parser):
def float_list(x):
return list(map(float, x.split(',')))
def int_list(x):
return list(map(int, x.split(',')))
dataset_group = parser.add_argument_group('Dataset')
dataset_group.add_argument(
'--image-root',
type=str,
default='/imagenet',
help='A root folder of train/val images. It should contain train and val folders, ' \
'which store corresponding images.'
)
dataset_group.add_argument(
'--image-shape',
type=int_list,
default=[4, 224, 224],
help='The image shape. Its shape should be [channel, height, width].')
# Data Loader
dataset_group.add_argument(
'--batch-size',
type=int,
default=256,
help='The batch size for both training and evaluation.')
dataset_group.add_argument(
'--dali-random-seed',
type=int,
default=42,
help='The random seed for DALI data loader.')
dataset_group.add_argument(
'--dali-num-threads',
type=int,
default=4,
help='The number of threads applied to DALI data loader.')
dataset_group.add_argument(
'--dali-output-fp16',
action='store_true',
help='Output FP16 data from DALI data loader.')
# Augmentation
augmentation_group = parser.add_argument_group('Data Augmentation')
augmentation_group.add_argument(
'--crop-size',
type=int,
default=224,
help='The size to crop input images.')
augmentation_group.add_argument(
'--rand-crop-scale',
type=float_list,
default=[0.08, 1.],
help='Range from which to choose a random area fraction.')
augmentation_group.add_argument(
'--rand-crop-ratio',
type=float_list,
default=[3.0 / 4, 4.0 / 3],
help='Range from which to choose a random aspect ratio (width/height).')
augmentation_group.add_argument(
'--normalize-scale',
type=float,
default=1.0 / 255.0,
help='A scalar to normalize images.')
augmentation_group.add_argument(
'--normalize-mean',
type=float_list,
default=[0.485, 0.456, 0.406],
help='The mean values to normalize RGB images.')
augmentation_group.add_argument(
'--normalize-std',
type=float_list,
default=[0.229, 0.224, 0.225],
help='The std values to normalize RGB images.')
augmentation_group.add_argument(
'--resize-short',
type=int,
default=256,
help='The length of the shorter dimension of the resized image.')
return parser
def add_model_args(parser):
group = parser.add_argument_group('Model')
group.add_argument(
'--model-arch-name',
type=str,
default='ResNet50',
help='The model architecture name. It should be one of {ResNet50}.')
group.add_argument(
'--num-of-class',
type=int,
default=1000,
help='The number classes of images.')
group.add_argument(
'--bn-weight-decay',
action='store_true',
help='Apply weight decay to BatchNorm shift and scale.')
return parser
def add_training_args(parser):
group = parser.add_argument_group('Training')
group.add_argument(
'--label-smoothing',
type=float,
default=0.1,
help='The ratio of label smoothing.')
group.add_argument(
'--optimizer',
default='Momentum',
metavar="OPTIMIZER",
choices=('Momentum'),
help='The name of optimizer. It should be one of {Momentum}.')
group.add_argument(
'--momentum',
type=float,
default=0.875,
help='The momentum value of optimizer.')
group.add_argument(
'--weight-decay',
type=float,
default=3.0517578125e-05,
help='The coefficient of weight decay.')
group.add_argument(
'--lr-scheduler',
default='Cosine',
metavar="LR_SCHEDULER",
choices=('Cosine'),
help='The name of learning rate scheduler. It should be one of {Cosine}.'
)
group.add_argument(
'--lr', type=float, default=0.256, help='The initial learning rate.')
group.add_argument(
'--warmup-epochs',
type=int,
default=5,
help='The number of epochs for learning rate warmup.')
group.add_argument(
'--warmup-start-lr',
type=float,
default=0.0,
help='The initial learning rate for warmup.')
return parser
def add_trt_args(parser):
group = parser.add_argument_group('Paddle-TRT')
group.add_argument(
'--device',
type=int,
default='0',
help='The GPU device id for Paddle-TRT inference.'
)
group.add_argument(
'--trt-inference-dir',
type=str,
default='./inference',
help='A path to store/load inference models. ' \
'export_model.py would export models to this folder, ' \
'then inference.py would load from here.'
)
group.add_argument(
'--trt-precision',
default='FP32',
choices=('FP32', 'FP16', 'INT8'),
help='The precision of TensorRT. It should be one of {FP32, FP16, INT8}.'
)
group.add_argument(
'--trt-workspace-size',
type=int,
default=(1 << 30),
help='The memory workspace of TensorRT in MB.')
group.add_argument(
'--trt-min-subgraph-size',
type=int,
default=3,
help='The minimal subgraph size to enable PaddleTRT.')
group.add_argument(
'--trt-use-static',
type=distutils.util.strtobool,
default=False,
help='Fix TensorRT engine at first running.')
group.add_argument(
'--trt-use-calib-mode',
type=distutils.util.strtobool,
default=False,
help='Use the PTQ calibration of PaddleTRT int8.')
group.add_argument(
'--trt-export-log-path',
type=str,
default='./export.json',
help='A file in which to store JSON model exporting report.')
group.add_argument(
'--trt-log-path',
type=str,
default='./inference.json',
help='A file in which to store JSON inference report.')
group.add_argument(
'--trt-use-synthetic',
type=distutils.util.strtobool,
default=False,
help='Apply synthetic data for benchmark.')
return parser
def parse_args(including_trt=False):
parser = argparse.ArgumentParser(
description="PaddlePaddle RN50v1.5 training script",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser = add_global_args(parser)
parser = add_dataset_args(parser)
parser = add_model_args(parser)
parser = add_training_args(parser)
parser = add_advance_args(parser)
if including_trt:
parser = add_trt_args(parser)
args = parser.parse_args()
check_and_process_args(args)
return args
|
PyTorch/SpeechSynthesis/Tacotron2/trtis_cpp/trtis_client/src | src | TRTISClient | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "TRTISClient.hpp"
#include "request_http.h"
#include <sstream>
#include <stdexcept>
#include <string>
#include <unistd.h>
namespace ni = nvidia::inferenceserver;
namespace nic = nvidia::inferenceserver::client;
#define checkOperation(X) checkOperation_((X), #X " failed.")
/******************************************************************************
* HELPER FUNCTIONS ***********************************************************
*****************************************************************************/
namespace
{
void checkOperation_(nic::Error err, const std::string& msg)
{
if (!err.IsOk()) {
std::ostringstream oss;
oss << msg << " : " << err;
throw std::runtime_error(oss.str());
}
}
std::unique_ptr<nic::InferContext>
createInferContext(const std::string& url, const bool verbose)
{
const std::string modelName = "tacotron2waveglow";
std::map<std::string, std::string> http_headers;
// Create a health context and get the ready and live state of the
// server.
std::unique_ptr<nic::ServerHealthContext> health_ctx;
checkOperation(nic::ServerHealthHttpContext::Create(
&health_ctx, url, http_headers, verbose));
bool live, ready;
checkOperation(health_ctx->GetLive(&live));
checkOperation(health_ctx->GetReady(&ready));
if (verbose) {
std::cout << "Health for model " << modelName << ":" << std::endl;
std::cout << "Live: " << live << std::endl;
std::cout << "Ready: " << ready << std::endl;
}
// Create a status context and get the status of the model.
std::unique_ptr<nic::ServerStatusContext> status_ctx;
checkOperation(nic::ServerStatusHttpContext::Create(
&status_ctx, url, http_headers, modelName, verbose));
ni::ServerStatus server_status;
checkOperation(status_ctx->GetServerStatus(&server_status));
std::unique_ptr<nic::InferContext> inferContext;
checkOperation(nic::InferHttpContext::Create(
&inferContext,
url,
http_headers,
modelName,
-1 /* model_version */,
verbose));
return inferContext;
}
void setBatchSize(nic::InferContext& inferContext, const int batchSize)
{
// Set the context options to do batch-size 1 requests. Also request
// that all output tensors be returned.
std::unique_ptr<nic::InferContext::Options> options;
checkOperation(nic::InferContext::Options::Create(&options));
options->SetBatchSize(batchSize);
for (const auto& output : inferContext.Outputs()) {
options->AddRawResult(output);
}
checkOperation(inferContext.SetRunOptions(*options));
}
} // namespace
/******************************************************************************
* CONSTRUCTORS / DESTRUCTOR **************************************************
*****************************************************************************/
TRTISClient::TRTISClient(const std::string& url) : m_url(url)
{
// do nothing
}
/******************************************************************************
* PUBLIC METHODS *************************************************************
*****************************************************************************/
std::vector<std::vector<float>> TRTISClient::execute(
const std::vector<std::string>& input,
int targetBatchSize,
const bool verbose)
{
size_t inputIdx = 0;
nic::Error err;
// Create the inference context for the model.
std::unique_ptr<nic::InferContext> inferContext
= createInferContext(m_url, verbose);
const int maxBatchSize = inferContext->MaxBatchSize();
if (targetBatchSize == 0) {
targetBatchSize = maxBatchSize;
} else if (maxBatchSize < targetBatchSize) {
throw std::runtime_error(
"Request batch size is greater than context can "
"handle:"
+ std::to_string(targetBatchSize) + " / "
+ std::to_string(maxBatchSize));
}
// allocate vectors
std::map<std::string, std::unique_ptr<nic::InferContext::Result>> results;
std::vector<std::vector<float>> output;
// loop until we've handle all input
while (inputIdx < input.size()) {
const int batchSize
= std::min(static_cast<int>(input.size() - inputIdx), targetBatchSize);
setBatchSize(*inferContext, batchSize);
// create input tensors
std::shared_ptr<nic::InferContext::Input> inputDataTensor;
checkOperation(inferContext->GetInput("INPUT", &inputDataTensor));
checkOperation(inputDataTensor->Reset());
checkOperation(inputDataTensor->SetShape(std::vector<int64_t>{batchSize}));
// queue up batch items
checkOperation(inputDataTensor->SetFromString(std::vector<std::string>(
input.begin() + inputIdx, input.begin() + inputIdx + batchSize)));
// execute synchronously
checkOperation(inferContext->Run(&results));
if (results.size() != 2) {
throw std::runtime_error(
"Got invalid number of tensor back: " + std::to_string(results.size())
+ ", but expected 2.");
}
const uint8_t* resultBytes;
size_t resultSize;
for (int batchIndex = 0; batchIndex < batchSize; ++batchIndex) {
checkOperation(
results["OUTPUT"]->GetRaw(batchIndex, &resultBytes, &resultSize));
const float* const wavData = reinterpret_cast<const float*>(resultBytes);
const size_t wavSize = resultSize / sizeof(*wavData);
checkOperation(results["OUTPUT_LENGTH"]->GetRaw(
batchIndex, &resultBytes, &resultSize));
const int32_t* const wavLength
= reinterpret_cast<const int32_t*>(resultBytes);
const size_t numLengths = resultSize / sizeof(*wavLength);
if (numLengths != 1) {
throw std::runtime_error(
"Got back output with multiple lengths: "
+ std::to_string(numLengths));
} else if (*wavLength > wavSize) {
throw std::runtime_error(
"Got sample length greater than tensor size: "
+ std::to_string(*wavLength) + "/" + std::to_string(wavSize));
}
output.emplace_back(std::vector<float>(wavData, wavData + *wavLength));
}
inputIdx += batchSize;
}
return output;
}
int TRTISClient::getMaxBatchSize() const
{
std::unique_ptr<nic::InferContext> inferContext
= createInferContext(m_url, false);
return inferContext->MaxBatchSize();
}
|
TensorFlow2/Detection/Efficientdet/visualize | visualize | __init__ | # Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Visualization library is mostly based on TensorFlow object detection API:
# https://github.com/tensorflow/models/tree/master/research/object_detection
|
TensorFlow/Detection/SSD/models/research/object_detection/matchers | matchers | bipartite_matcher_test | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.core.bipartite_matcher."""
import tensorflow as tf
from object_detection.matchers import bipartite_matcher
class GreedyBipartiteMatcherTest(tf.test.TestCase):
def test_get_expected_matches_when_all_rows_are_valid(self):
similarity_matrix = tf.constant([[0.50, 0.1, 0.8], [0.15, 0.2, 0.3]])
valid_rows = tf.ones([2], dtype=tf.bool)
expected_match_results = [-1, 1, 0]
matcher = bipartite_matcher.GreedyBipartiteMatcher()
match = matcher.match(similarity_matrix, valid_rows=valid_rows)
with self.test_session() as sess:
match_results_out = sess.run(match._match_results)
self.assertAllEqual(match_results_out, expected_match_results)
def test_get_expected_matches_with_all_rows_be_default(self):
similarity_matrix = tf.constant([[0.50, 0.1, 0.8], [0.15, 0.2, 0.3]])
expected_match_results = [-1, 1, 0]
matcher = bipartite_matcher.GreedyBipartiteMatcher()
match = matcher.match(similarity_matrix)
with self.test_session() as sess:
match_results_out = sess.run(match._match_results)
self.assertAllEqual(match_results_out, expected_match_results)
def test_get_no_matches_with_zero_valid_rows(self):
similarity_matrix = tf.constant([[0.50, 0.1, 0.8], [0.15, 0.2, 0.3]])
valid_rows = tf.zeros([2], dtype=tf.bool)
expected_match_results = [-1, -1, -1]
matcher = bipartite_matcher.GreedyBipartiteMatcher()
match = matcher.match(similarity_matrix, valid_rows)
with self.test_session() as sess:
match_results_out = sess.run(match._match_results)
self.assertAllEqual(match_results_out, expected_match_results)
def test_get_expected_matches_with_only_one_valid_row(self):
similarity_matrix = tf.constant([[0.50, 0.1, 0.8], [0.15, 0.2, 0.3]])
valid_rows = tf.constant([True, False], dtype=tf.bool)
expected_match_results = [-1, -1, 0]
matcher = bipartite_matcher.GreedyBipartiteMatcher()
match = matcher.match(similarity_matrix, valid_rows)
with self.test_session() as sess:
match_results_out = sess.run(match._match_results)
self.assertAllEqual(match_results_out, expected_match_results)
def test_get_expected_matches_with_only_one_valid_row_at_bottom(self):
similarity_matrix = tf.constant([[0.15, 0.2, 0.3], [0.50, 0.1, 0.8]])
valid_rows = tf.constant([False, True], dtype=tf.bool)
expected_match_results = [-1, -1, 0]
matcher = bipartite_matcher.GreedyBipartiteMatcher()
match = matcher.match(similarity_matrix, valid_rows)
with self.test_session() as sess:
match_results_out = sess.run(match._match_results)
self.assertAllEqual(match_results_out, expected_match_results)
if __name__ == '__main__':
tf.test.main()
|
PyTorch/Segmentation/MaskRCNN/pytorch/maskrcnn_benchmark/csrc/cuda | cuda | nms | // Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
// Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#include <ATen/ATen.h>
#include <ATen/ceil_div.h>
#include <ATen/cuda/CUDAContext.h>
#include <THC/THC.h>
#include <vector>
#include <iostream>
int const threadsPerBlock = sizeof(unsigned long long) * 8;
__device__ inline float devIoU(float const * const a, float const * const b) {
float left = max(a[0], b[0]), right = min(a[2], b[2]);
float top = max(a[1], b[1]), bottom = min(a[3], b[3]);
float width = max(right - left + 1, 0.f), height = max(bottom - top + 1, 0.f);
float interS = width * height;
float Sa = (a[2] - a[0] + 1) * (a[3] - a[1] + 1);
float Sb = (b[2] - b[0] + 1) * (b[3] - b[1] + 1);
return interS / (Sa + Sb - interS);
}
__global__ void nms_kernel(const int n_boxes, const float nms_overlap_thresh,
const float *dev_boxes, unsigned long long *dev_mask) {
const int row_start = blockIdx.y;
const int col_start = blockIdx.x;
// if (row_start > col_start) return;
const int row_size =
min(n_boxes - row_start * threadsPerBlock, threadsPerBlock);
const int col_size =
min(n_boxes - col_start * threadsPerBlock, threadsPerBlock);
__shared__ float block_boxes[threadsPerBlock * 5];
if (threadIdx.x < col_size) {
block_boxes[threadIdx.x * 5 + 0] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 0];
block_boxes[threadIdx.x * 5 + 1] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 1];
block_boxes[threadIdx.x * 5 + 2] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 2];
block_boxes[threadIdx.x * 5 + 3] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 3];
block_boxes[threadIdx.x * 5 + 4] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 4];
}
__syncthreads();
if (threadIdx.x < row_size) {
const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x;
const float *cur_box = dev_boxes + cur_box_idx * 5;
int i = 0;
unsigned long long t = 0;
int start = 0;
if (row_start == col_start) {
start = threadIdx.x + 1;
}
for (i = start; i < col_size; i++) {
if (devIoU(cur_box, block_boxes + i * 5) > nms_overlap_thresh) {
t |= 1ULL << i;
}
}
const int col_blocks = at::ceil_div(n_boxes, threadsPerBlock);
dev_mask[cur_box_idx * col_blocks + col_start] = t;
}
}
// boxes is a N x 5 tensor
at::Tensor nms_cuda(const at::Tensor boxes, float nms_overlap_thresh) {
using scalar_t = float;
AT_ASSERTM(boxes.is_cuda(), "boxes must be a CUDA tensor");
auto scores = boxes.select(1, 4);
auto order_t = std::get<1>(scores.sort(0, /* descending=*/true));
auto boxes_sorted = boxes.index_select(0, order_t);
int boxes_num = boxes.size(0);
const int col_blocks = at::ceil_div(boxes_num, threadsPerBlock);
scalar_t* boxes_dev = boxes_sorted.data_ptr<scalar_t>();
THCState *state = at::globalContext().lazyInitCUDA(); // TODO replace with getTHCState
unsigned long long* mask_dev = NULL;
//C10_CUDA_CHECK(THCudaMalloc(state, (void**) &mask_dev,
// boxes_num * col_blocks * sizeof(unsigned long long)));
mask_dev = (unsigned long long*) c10::cuda::CUDACachingAllocator::raw_alloc(boxes_num * col_blocks * sizeof(unsigned long long));
dim3 blocks(at::ceil_div(boxes_num, threadsPerBlock),
at::ceil_div(boxes_num, threadsPerBlock));
dim3 threads(threadsPerBlock);
nms_kernel<<<blocks, threads>>>(boxes_num,
nms_overlap_thresh,
boxes_dev,
mask_dev);
std::vector<unsigned long long> mask_host(boxes_num * col_blocks);
C10_CUDA_CHECK(cudaMemcpy(&mask_host[0],
mask_dev,
sizeof(unsigned long long) * boxes_num * col_blocks,
cudaMemcpyDeviceToHost));
std::vector<unsigned long long> remv(col_blocks);
memset(&remv[0], 0, sizeof(unsigned long long) * col_blocks);
at::Tensor keep = at::empty({boxes_num}, boxes.options().dtype(at::kLong).device(at::kCPU));
int64_t* keep_out = keep.data_ptr<int64_t>();
int num_to_keep = 0;
for (int i = 0; i < boxes_num; i++) {
int nblock = i / threadsPerBlock;
int inblock = i % threadsPerBlock;
if (!(remv[nblock] & (1ULL << inblock))) {
keep_out[num_to_keep++] = i;
unsigned long long *p = &mask_host[0] + i * col_blocks;
for (int j = nblock; j < col_blocks; j++) {
remv[j] |= p[j];
}
}
}
c10::cuda::CUDACachingAllocator::raw_delete(mask_dev);
// TODO improve this part
return std::get<0>(order_t.index({
keep.narrow(/*dim=*/0, /*start=*/0, /*length=*/num_to_keep).to(
order_t.device(), keep.scalar_type())
}).sort(0, false));
}
|
TensorFlow/LanguageModeling/BERT/data | data | create_datasets_from_start | #!/bin/bash
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
export BERT_PREP_WORKING_DIR="${BERT_PREP_WORKING_DIR}"
to_download=${1:-"wiki_only"} # By default, we don't download BooksCorpus dataset due to recent issues with the host server
#Download
if [ "$to_download" = "wiki_books" ] ; then
python3 /workspace/bert/data/bertPrep.py --action download --dataset bookscorpus
fi
python3 /workspace/bert/data/bertPrep.py --action download --dataset wikicorpus_en
python3 /workspace/bert/data/bertPrep.py --action download --dataset squad
python3 /workspace/bert/data/bertPrep.py --action download --dataset mrpc
python3 /workspace/bert/data/bertPrep.py --action download --dataset sst-2
python3 ${BERT_PREP_WORKING_DIR}/bertPrep.py --action download --dataset google_pretrained_weights
mkdir -p /workspace/bert/data/download/nvidia_pretrained
#SQuAD Large Checkpoint
echo "Downloading SQuAD Large Checkpoint"
cd /workspace/bert/data/download/nvidia_pretrained && \
wget --content-disposition https://api.ngc.nvidia.com/v2/models/nvidia/bert_tf_ckpt_large_qa_squad11_amp_384/versions/19.03.1/zip -O bert_tf_ckpt_large_qa_squad11_amp_384_19.03.1.zip \
&& unzip bert_tf_ckpt_large_qa_squad11_amp_384_19.03.1.zip -d bert_tf_squad11_large_384 && rm bert_tf_ckpt_large_qa_squad11_amp_384_19.03.1.zip
#SQuAD Base Checkpoint
cd /workspace/bert/data/download/nvidia_pretrained && \
wget --content-disposition https://api.ngc.nvidia.com/v2/models/nvidia/bert_tf_ckpt_base_qa_squad11_amp_128/versions/19.03.1/zip -O bert_tf_ckpt_base_qa_squad11_amp_128_19.03.1.zip \
&& unzip bert_tf_ckpt_base_qa_squad11_amp_128_19.03.1.zip -d bert_tf_squad11_base_128 && rm bert_tf_ckpt_base_qa_squad11_amp_128_19.03.1.zip
#Pretraining Large checkpoint
cd /workspace/bert/data/download/nvidia_pretrained && \
wget --content-disposition https://api.ngc.nvidia.com/v2/models/nvidia/bert_tf_ckpt_large_pretraining_amp_lamb/versions/19.03.1/zip -O bert_tf_ckpt_large_pretraining_amp_lamb_19.03.1.zip \
&& unzip bert_tf_ckpt_large_pretraining_amp_lamb_19.03.1.zip -d bert_tf_pretraining_large_lamb && rm bert_tf_ckpt_large_pretraining_amp_lamb_19.03.1.zip
python3 /workspace/bert/data/bertPrep.py --action download --dataset google_pretrained_weights # Redundant, to verify and remove
DATASET="wikicorpus_en"
# Properly format the text files
if [ "$to_download" = "wiki_books" ] ; then
python3 /workspace/bert/data/bertPrep.py --action text_formatting --dataset bookscorpus
DATASET="books_wiki_en_corpus"
fi
python3 /workspace/bert/data/bertPrep.py --action text_formatting --dataset wikicorpus_en
# Shard the text files
python3 /workspace/bert/data/bertPrep.py --action sharding --dataset $DATASET
# Create TFRecord files Phase 1
python3 ${BERT_PREP_WORKING_DIR}/bertPrep.py --action create_tfrecord_files --dataset ${DATASET} --max_seq_length 128 \
--max_predictions_per_seq 20 --vocab_file ${BERT_PREP_WORKING_DIR}/download/google_pretrained_weights/uncased_L-24_H-1024_A-16/vocab.txt
# Create TFRecord files Phase 2
python3 ${BERT_PREP_WORKING_DIR}/bertPrep.py --action create_tfrecord_files --dataset ${DATASET} --max_seq_length 512 \
--max_predictions_per_seq 80 --vocab_file ${BERT_PREP_WORKING_DIR}/download/google_pretrained_weights/uncased_L-24_H-1024_A-16/vocab.txt
|
PyTorch/Forecasting/TFT/triton | triton | model | # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import torch
import torch.nn as nn
def update_argparser(parser):
parser.add_argument("--checkpoint", type=str, help="Path to checkpoint to be used", required=True)
parser.add_argument("--precision", type=str, choices=['fp16', 'fp32'], required=True)
class TFTWrapper(nn.Module):
def __init__(self, model):
super().__init__()
self.model = model
def forward(self, s_cat, s_cont, k_cat, k_cont, o_cat, o_cont, target, id):
# wrapped_input = torch.jit.annotate(Dict[str, Optional[Tensor]], {})
wrapped_input = {}
input_names = ['s_cat', 's_cont', 'k_cat', 'k_cont', 'o_cat', 'o_cont', 'target', 'id']
wrapped_input['s_cat'] = s_cat if s_cat.shape[1] != 1 else None
wrapped_input['s_cont'] = s_cont if s_cont.shape[1] != 1 else None
wrapped_input['k_cat'] = k_cat if k_cat.shape[1] != 1 else None
wrapped_input['k_cont'] = k_cont if k_cont.shape[1] != 1 else None
wrapped_input['o_cat'] = o_cat if o_cat.shape[1] != 1 else None
wrapped_input['o_cont'] = o_cont if o_cont.shape[1] != 1 else None
wrapped_input['target'] = target
wrapped_input['id'] = id if id.numel() else None
return self.model(wrapped_input)
def get_model(**args):
#get model config
os.environ["TFT_SCRIPTING"] = "True"
from modeling import TemporalFusionTransformer
state_dict = torch.load(os.path.join(args['checkpoint'], "checkpoint.pt"))
config = state_dict['config']
#create model
model = TemporalFusionTransformer(config)
#load model
model.load_state_dict(state_dict['model'])
model.eval()
model.cuda()
model = TFTWrapper(model).cuda()
tensor_names = {
"inputs": ['s_cat__0', 's_cont__1', 'k_cat__2', 'k_cont__3', 'o_cat__4', 'o_cont__5', 'target__6', 'id__7'],
"outputs": ["target__0"]
}
return model, tensor_names |
Tools/PyTorch/TimeSeriesPredictionPlatform/triton/deployment_toolkit/perf_analyzer | perf_analyzer | perf_analyzer | # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import pathlib
from subprocess import PIPE, CalledProcessError, Popen
# method from PEP-366 to support relative import in executed modules
if __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from .exceptions import PerfAnalyzerException
MAX_INTERVAL_CHANGES = 10
COUNT_INTERVAL_DELTA = 50
TIME_INTERVAL_DELTA = 2000
LOGGER = logging.getLogger(__name__)
class PerfAnalyzer:
"""
This class provides an interface for running workloads
with perf_analyzer.
"""
def __init__(self, config):
"""
Parameters
----------
config : PerfAnalyzerConfig
keys are names of arguments to perf_analyzer,
values are their values.
"""
self.bin_path = "perf_analyzer"
self._config = config
self._output = str()
def run(self):
"""
Runs the perf analyzer with the
initialized configuration
Returns
-------
List of Records
List of the metrics obtained from this
run of perf_analyzer
Raises
------
PerfAnalyzerException
If subprocess throws CalledProcessError
"""
for _ in range(MAX_INTERVAL_CHANGES):
command = [self.bin_path]
command += self._config.to_cli_string().replace("=", " ").split()
LOGGER.debug(f"Perf Analyze command: {command}")
try:
process = Popen(command, start_new_session=True, stdout=PIPE, encoding="utf-8")
streamed_output = ""
while True:
output = process.stdout.readline()
if output == "" and process.poll() is not None:
break
if output:
streamed_output += output
print(output.rstrip())
self._output += streamed_output
result = process.poll()
if result != 0:
raise CalledProcessError(returncode=result, cmd=command, output=streamed_output)
return
except CalledProcessError as e:
if self._faild_with_measruement_inverval(e.output):
if self._config["measurement-mode"] is None or self._config["measurement-mode"] == "count_windows":
self._increase_request_count()
else:
self._increase_time_interval()
else:
raise PerfAnalyzerException(
f"Running perf_analyzer with {e.cmd} failed with" f" exit status {e.returncode} : {e.output}"
)
raise PerfAnalyzerException(f"Ran perf_analyzer {MAX_INTERVAL_CHANGES} times, but no valid requests recorded.")
def output(self):
"""
Returns
-------
The stdout output of the
last perf_analyzer run
"""
if self._output:
return self._output
raise PerfAnalyzerException("Attempted to get perf_analyzer output" "without calling run first.")
def _faild_with_measruement_inverval(self, output: str):
return (
output.find("Failed to obtain stable measurement") or output.find("Please use a larger time window")
) != -1
def _increase_request_count(self):
self._config["measurement-request-count"] += COUNT_INTERVAL_DELTA
LOGGER.debug(
"perf_analyzer's measurement request count is too small, "
f"increased to {self._config['measurement-request-count']}."
)
def _increase_time_interval(self):
self._config["measurement-interval"] += TIME_INTERVAL_DELTA
LOGGER.debug(
"perf_analyzer's measurement window is too small, "
f"increased to {self._config['measurement-interval']} ms."
)
|
PyTorch/Classification/ConvNets/efficientnet/training/TF32 | TF32 | DGXA100_efficientnet-widese-b0_TF32 | python ./multiproc.py --nproc_per_node 8 ./launch.py --model efficientnet-widese-b0 --precision TF32 --mode convergence --platform DGXA100 /imagenet --workspace ${1:-./} --raport-file raport.json
|
TensorFlow/LanguageModeling/Transformer-XL/tf/scripts/docker | docker | interactive | #!/bin/bash
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
nvidia-docker run --init -it --rm --network=host --ipc=host -v $PWD:/workspace/transformer-xl transformer-xl bash
|
PyTorch/SpeechSynthesis/HiFiGAN/fastpitch | fastpitch | attention | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from torch import nn
from torch.nn import functional as F
class ConvNorm(torch.nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=1, stride=1,
padding=None, dilation=1, bias=True, w_init_gain='linear'):
super(ConvNorm, self).__init__()
if padding is None:
assert(kernel_size % 2 == 1)
padding = int(dilation * (kernel_size - 1) / 2)
self.conv = torch.nn.Conv1d(in_channels, out_channels,
kernel_size=kernel_size, stride=stride,
padding=padding, dilation=dilation,
bias=bias)
torch.nn.init.xavier_uniform_(
self.conv.weight, gain=torch.nn.init.calculate_gain(w_init_gain))
def forward(self, signal):
conv_signal = self.conv(signal)
return conv_signal
class Invertible1x1ConvLUS(torch.nn.Module):
def __init__(self, c):
super(Invertible1x1ConvLUS, self).__init__()
# Sample a random orthonormal matrix to initialize weights
W, _ = torch.linalg.qr(torch.randn(c, c))
# Ensure determinant is 1.0 not -1.0
if torch.det(W) < 0:
W[:, 0] = -1*W[:, 0]
p, lower, upper = torch.lu_unpack(*torch.lu(W))
self.register_buffer('p', p)
# diagonals of lower will always be 1s anyway
lower = torch.tril(lower, -1)
lower_diag = torch.diag(torch.eye(c, c))
self.register_buffer('lower_diag', lower_diag)
self.lower = nn.Parameter(lower)
self.upper_diag = nn.Parameter(torch.diag(upper))
self.upper = nn.Parameter(torch.triu(upper, 1))
def forward(self, z, reverse=False):
U = torch.triu(self.upper, 1) + torch.diag(self.upper_diag)
L = torch.tril(self.lower, -1) + torch.diag(self.lower_diag)
W = torch.mm(self.p, torch.mm(L, U))
if reverse:
if not hasattr(self, 'W_inverse'):
# Reverse computation
W_inverse = W.float().inverse()
if z.type() == 'torch.cuda.HalfTensor':
W_inverse = W_inverse.half()
self.W_inverse = W_inverse[..., None]
z = F.conv1d(z, self.W_inverse, bias=None, stride=1, padding=0)
return z
else:
W = W[..., None]
z = F.conv1d(z, W, bias=None, stride=1, padding=0)
log_det_W = torch.sum(torch.log(torch.abs(self.upper_diag)))
return z, log_det_W
class ConvAttention(torch.nn.Module):
def __init__(self, n_mel_channels=80, n_speaker_dim=128,
n_text_channels=512, n_att_channels=80, temperature=1.0,
n_mel_convs=2, align_query_enc_type='3xconv',
use_query_proj=True):
super(ConvAttention, self).__init__()
self.temperature = temperature
self.att_scaling_factor = np.sqrt(n_att_channels)
self.softmax = torch.nn.Softmax(dim=3)
self.log_softmax = torch.nn.LogSoftmax(dim=3)
self.query_proj = Invertible1x1ConvLUS(n_mel_channels)
self.attn_proj = torch.nn.Conv2d(n_att_channels, 1, kernel_size=1)
self.align_query_enc_type = align_query_enc_type
self.use_query_proj = bool(use_query_proj)
self.key_proj = nn.Sequential(
ConvNorm(n_text_channels,
n_text_channels * 2,
kernel_size=3,
bias=True,
w_init_gain='relu'),
torch.nn.ReLU(),
ConvNorm(n_text_channels * 2,
n_att_channels,
kernel_size=1,
bias=True))
self.align_query_enc_type = align_query_enc_type
if align_query_enc_type == "inv_conv":
self.query_proj = Invertible1x1ConvLUS(n_mel_channels)
elif align_query_enc_type == "3xconv":
self.query_proj = nn.Sequential(
ConvNorm(n_mel_channels,
n_mel_channels * 2,
kernel_size=3,
bias=True,
w_init_gain='relu'),
torch.nn.ReLU(),
ConvNorm(n_mel_channels * 2,
n_mel_channels,
kernel_size=1,
bias=True),
torch.nn.ReLU(),
ConvNorm(n_mel_channels,
n_att_channels,
kernel_size=1,
bias=True))
else:
raise ValueError("Unknown query encoder type specified")
def run_padded_sequence(self, sorted_idx, unsort_idx, lens, padded_data,
recurrent_model):
"""Sorts input data by previded ordering (and un-ordering) and runs the
packed data through the recurrent model
Args:
sorted_idx (torch.tensor): 1D sorting index
unsort_idx (torch.tensor): 1D unsorting index (inverse of sorted_idx)
lens: lengths of input data (sorted in descending order)
padded_data (torch.tensor): input sequences (padded)
recurrent_model (nn.Module): recurrent model to run data through
Returns:
hidden_vectors (torch.tensor): outputs of the RNN, in the original,
unsorted, ordering
"""
# sort the data by decreasing length using provided index
# we assume batch index is in dim=1
padded_data = padded_data[:, sorted_idx]
padded_data = nn.utils.rnn.pack_padded_sequence(padded_data, lens)
hidden_vectors = recurrent_model(padded_data)[0]
hidden_vectors, _ = nn.utils.rnn.pad_packed_sequence(hidden_vectors)
# unsort the results at dim=1 and return
hidden_vectors = hidden_vectors[:, unsort_idx]
return hidden_vectors
def encode_query(self, query, query_lens):
query = query.permute(2, 0, 1) # seq_len, batch, feature dim
lens, ids = torch.sort(query_lens, descending=True)
original_ids = [0] * lens.size(0)
for i in range(len(ids)):
original_ids[ids[i]] = i
query_encoded = self.run_padded_sequence(ids, original_ids, lens,
query, self.query_lstm)
query_encoded = query_encoded.permute(1, 2, 0)
return query_encoded
def forward(self, queries, keys, query_lens, mask=None, key_lens=None,
keys_encoded=None, attn_prior=None):
"""Attention mechanism for flowtron parallel
Unlike in Flowtron, we have no restrictions such as causality etc,
since we only need this during training.
Args:
queries (torch.tensor): B x C x T1 tensor
(probably going to be mel data)
keys (torch.tensor): B x C2 x T2 tensor (text data)
query_lens: lengths for sorting the queries in descending order
mask (torch.tensor): uint8 binary mask for variable length entries
(should be in the T2 domain)
Output:
attn (torch.tensor): B x 1 x T1 x T2 attention mask.
Final dim T2 should sum to 1
"""
keys_enc = self.key_proj(keys) # B x n_attn_dims x T2
# Beware can only do this since query_dim = attn_dim = n_mel_channels
if self.use_query_proj:
if self.align_query_enc_type == "inv_conv":
queries_enc, log_det_W = self.query_proj(queries)
elif self.align_query_enc_type == "3xconv":
queries_enc = self.query_proj(queries)
log_det_W = 0.0
else:
queries_enc, log_det_W = self.query_proj(queries)
else:
queries_enc, log_det_W = queries, 0.0
# different ways of computing attn,
# one is isotopic gaussians (per phoneme)
# Simplistic Gaussian Isotopic Attention
# B x n_attn_dims x T1 x T2
attn = (queries_enc[:, :, :, None] - keys_enc[:, :, None]) ** 2
# compute log likelihood from a gaussian
attn = -0.0005 * attn.sum(1, keepdim=True)
if attn_prior is not None:
attn = self.log_softmax(attn) + torch.log(attn_prior[:, None]+1e-8)
attn_logprob = attn.clone()
if mask is not None:
attn.data.masked_fill_(mask.permute(0, 2, 1).unsqueeze(2),
-float("inf"))
attn = self.softmax(attn) # Softmax along T2
return attn, attn_logprob
|
TensorFlow2/Classification/ConvNets/efficientnet_v1/B4/training/FP32 | FP32 | convergence_8xV100-32G | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
horovodrun -np 8 bash ./scripts/bind.sh --cpu=exclusive --ib=single -- python3 main.py \
--cfg config/efficientnet_v1/b4_cfg.py \
--mode train_and_eval \
--use_xla \
--model_dir ./output \
--data_dir /data \
--log_steps 100 \
--max_epochs 500 \
--save_checkpoint_freq 5 \
--train_batch_size 32 \
--eval_batch_size 32 \
--train_img_size 380 \
--eval_img_size 380 \
--augmenter_name autoaugment \
--lr_decay cosine \
--mixup_alpha 0.2 \
--defer_img_mixing \
--moving_average_decay 0.9999 \
--lr_init 0.005
|
PyTorch/Classification/GPUNet/triton/deployment_toolkit | deployment_toolkit | __init__ | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
TensorFlow/Classification/ConvNets/triton | triton | run_online_performance_test_on_triton | #!/usr/bin/env python3
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""
For models with variable-sized inputs you must provide the --input-shape argument so that perf_analyzer knows
what shape tensors to use. For example, for a model that has an input called IMAGE that has shape [ 3, N, M ],
where N and M are variable-size dimensions, to tell perf_analyzer to send batch-size 4 requests of shape [ 3, 224, 224 ]
`--shape IMAGE:3,224,224`.
"""
import argparse
import csv
import os
import sys
from pathlib import Path
from typing import List, Optional
# method from PEP-366 to support relative import in executed modules
if __package__ is None:
__package__ = Path(__file__).parent.name
from .deployment_toolkit.report import save_results, show_results, sort_results
from .deployment_toolkit.warmup import warmup
def calculate_average_latency(r):
avg_sum_fields = [
"Client Send",
"Network+Server Send/Recv",
"Server Queue",
"Server Compute",
"Server Compute Input",
"Server Compute Infer",
"Server Compute Output",
"Client Recv",
]
avg_latency = sum([int(r.get(f, 0)) for f in avg_sum_fields])
return avg_latency
def update_performance_data(results: List, performance_file: str):
with open(performance_file, "r") as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
row["avg latency"] = calculate_average_latency(row)
results.append(row)
def _parse_batch_sizes(batch_sizes: str):
batches = batch_sizes.split(sep=",")
return list(map(lambda x: int(x.strip()), batches))
def online_performance(
model_name: str,
batch_sizes: List[int],
result_path: str,
input_shapes: Optional[List[str]] = None,
profiling_data: str = "random",
triton_instances: int = 1,
triton_gpu_engine_count: int = 1,
server_url: str = "localhost",
measurement_window: int = 10000,
shared_memory: bool = False
):
print("\n")
print(f"==== Dynamic batching analysis start ====")
print("\n")
input_shapes = " ".join(map(lambda shape: f" --shape {shape}", input_shapes)) if input_shapes else ""
print(f"Running performance tests for dynamic batching")
performance_file = f"triton_performance_dynamic_partial.csv"
max_batch_size = max(batch_sizes)
max_total_requests = 2 * max_batch_size * triton_instances * triton_gpu_engine_count
max_concurrency = min(256, max_total_requests)
batch_size = max(1, max_total_requests // 256)
step = max(1, max_concurrency // 32)
min_concurrency = step
exec_args = f"""-m {model_name} \
-x 1 \
-p {measurement_window} \
-v \
-i http \
-u {server_url}:8000 \
-b {batch_size} \
-f {performance_file} \
--concurrency-range {min_concurrency}:{max_concurrency}:{step} \
--input-data {profiling_data} {input_shapes}"""
if shared_memory:
exec_args += " --shared-memory=cuda"
result = os.system(f"perf_client {exec_args}")
if result != 0:
print(f"Failed running performance tests. Perf client failed with exit code {result}")
sys.exit(1)
results = list()
update_performance_data(results=results, performance_file=performance_file)
results = sort_results(results=results)
save_results(filename=result_path, data=results)
show_results(results=results)
os.remove(performance_file)
print("Performance results for dynamic batching stored in: {0}".format(result_path))
print("\n")
print(f"==== Analysis done ====")
print("\n")
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--model-name", type=str, required=True, help="Name of the model to test")
parser.add_argument(
"--input-data", type=str, required=False, default="random", help="Input data to perform profiling."
)
parser.add_argument(
"--input-shape",
action="append",
required=False,
help="Input data shape in form INPUT_NAME:<full_shape_without_batch_axis>.",
)
parser.add_argument("--batch-sizes", type=str, required=True, help="List of batch sizes to tests. Comma separated.")
parser.add_argument("--triton-instances", type=int, default=1, help="Number of Triton Server instances")
parser.add_argument(
"--number-of-model-instances", type=int, default=1, help="Number of models instances on Triton Server"
)
parser.add_argument("--result-path", type=str, required=True, help="Path where result file is going to be stored.")
parser.add_argument("--server-url", type=str, required=False, default="localhost", help="Url to Triton server")
parser.add_argument(
"--measurement-window", required=False, help="Time which perf_analyzer will wait for results", default=10000
)
parser.add_argument("--shared-memory", help="Use shared memory for communication with Triton", action="store_true",
default=False)
args = parser.parse_args()
warmup(
server_url=args.server_url,
model_name=args.model_name,
batch_sizes=_parse_batch_sizes(args.batch_sizes),
triton_instances=args.triton_instances,
triton_gpu_engine_count=args.number_of_model_instances,
profiling_data=args.input_data,
input_shapes=args.input_shape,
measurement_window=args.measurement_window,
shared_memory=args.shared_memory
)
online_performance(
server_url=args.server_url,
model_name=args.model_name,
batch_sizes=_parse_batch_sizes(args.batch_sizes),
triton_instances=args.triton_instances,
triton_gpu_engine_count=args.number_of_model_instances,
profiling_data=args.input_data,
input_shapes=args.input_shape,
result_path=args.result_path,
measurement_window=args.measurement_window,
shared_memory=args.shared_memory
)
if __name__ == "__main__":
main()
|
PyTorch/LanguageModeling/BERT/triton/dist6l/runner | runner | start_NVIDIA-A30 | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/bin/bash
# Install Docker
. /etc/os-release && \
curl -fsSL https://download.docker.com/linux/debian/gpg | apt-key add - && \
echo "deb [arch=amd64] https://download.docker.com/linux/debian buster stable" > /etc/apt/sources.list.d/docker.list && \
curl -s -L https://nvidia.github.io/nvidia-docker/gpgkey| apt-key add - && \
curl -s -L https://nvidia.github.io/nvidia-docker/$ID$VERSION_ID/nvidia-docker.list > /etc/apt/sources.list.d/nvidia-docker.list && \
apt-get update && \
apt-get install -y docker-ce docker-ce-cli containerd.io nvidia-docker2
# Install packages
pip install -r triton/runner/requirements.txt
# Evaluate Runner
python3 -m "triton.dist6l.runner.__main__" \
--config-path "triton/dist6l/runner/config_NVIDIA-A30.yaml" \
--device 0 |
PyTorch/Segmentation/MaskRCNN/pytorch/maskrcnn_benchmark/config | config | paths_catalog_dlfw_ci | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
"""Centralized catalog of paths."""
import os
class DatasetCatalog(object):
DATA_DIR = "/data/coco/coco-2014"
DATASETS = {
"coco_2014_train": {
"img_dir": "coco_train2014",
"ann_file": "annotations/instances_train2014.json"
},
"coco_2014_val": {
"img_dir": "coco_val2014",
"ann_file": "annotations/instances_val2014.json"
},
"coco_2014_minival": {
"img_dir": "coco_val2014",
"ann_file": "annotations/instances_minival2014.json"
},
"coco_2014_valminusminival": {
"img_dir": "coco_val2014",
"ann_file": "annotations/instances_valminusminival2014.json"
},
}
@staticmethod
def get(name):
if "coco" in name:
data_dir = DatasetCatalog.DATA_DIR
attrs = DatasetCatalog.DATASETS[name]
args = dict(
root=os.path.join(data_dir, attrs["img_dir"]),
ann_file=os.path.join(data_dir, attrs["ann_file"]),
)
return dict(
factory="COCODataset",
args=args,
)
elif "voc" in name:
data_dir = DatasetCatalog.DATA_DIR
attrs = DatasetCatalog.DATASETS[name]
args = dict(
data_dir=os.path.join(data_dir, attrs["data_dir"]),
split=attrs["split"],
)
return dict(
factory="PascalVOCDataset",
args=args,
)
raise RuntimeError("Dataset not available: {}".format(name))
class ModelCatalog(object):
S3_C2_DETECTRON_URL = "https://dl.fbaipublicfiles.com/detectron"
C2_IMAGENET_MODELS = {
"MSRA/R-50": "ImageNetPretrained/MSRA/R-50.pkl",
"MSRA/R-50-GN": "ImageNetPretrained/47261647/R-50-GN.pkl",
"MSRA/R-101": "ImageNetPretrained/MSRA/R-101.pkl",
"MSRA/R-101-GN": "ImageNetPretrained/47592356/R-101-GN.pkl",
"FAIR/20171220/X-101-32x8d": "ImageNetPretrained/20171220/X-101-32x8d.pkl",
}
C2_DETECTRON_SUFFIX = "output/train/coco_2014_train%3Acoco_2014_valminusminival/generalized_rcnn/model_final.pkl"
C2_DETECTRON_MODELS = {
"35857197/e2e_faster_rcnn_R-50-C4_1x": "01_33_49.iAX0mXvW",
"35857345/e2e_faster_rcnn_R-50-FPN_1x": "01_36_30.cUF7QR7I",
"35857890/e2e_faster_rcnn_R-101-FPN_1x": "01_38_50.sNxI7sX7",
"36761737/e2e_faster_rcnn_X-101-32x8d-FPN_1x": "06_31_39.5MIHi1fZ",
"35858791/e2e_mask_rcnn_R-50-C4_1x": "01_45_57.ZgkA7hPB",
"35858933/e2e_mask_rcnn_R-50-FPN_1x": "01_48_14.DzEQe4wC",
"35861795/e2e_mask_rcnn_R-101-FPN_1x": "02_31_37.KqyEK4tT",
"36761843/e2e_mask_rcnn_X-101-32x8d-FPN_1x": "06_35_59.RZotkLKI",
}
@staticmethod
def get(name):
if name.startswith("Caffe2Detectron/COCO"):
return ModelCatalog.get_c2_detectron_12_2017_baselines(name)
if name.startswith("ImageNetPretrained"):
return ModelCatalog.get_c2_imagenet_pretrained(name)
raise RuntimeError("model not present in the catalog {}".format(name))
@staticmethod
def get_c2_imagenet_pretrained(name):
prefix = ModelCatalog.S3_C2_DETECTRON_URL
name = name[len("ImageNetPretrained/"):]
name = ModelCatalog.C2_IMAGENET_MODELS[name]
url = "/".join([prefix, name])
return url
@staticmethod
def get_c2_detectron_12_2017_baselines(name):
# Detectron C2 models are stored following the structure
# prefix/<model_id>/2012_2017_baselines/<model_name>.yaml.<signature>/suffix
# we use as identifiers in the catalog Caffe2Detectron/COCO/<model_id>/<model_name>
prefix = ModelCatalog.S3_C2_DETECTRON_URL
suffix = ModelCatalog.C2_DETECTRON_SUFFIX
# remove identification prefix
name = name[len("Caffe2Detectron/COCO/"):]
# split in <model_id> and <model_name>
model_id, model_name = name.split("/")
# parsing to make it match the url address from the Caffe2 models
model_name = "{}.yaml".format(model_name)
signature = ModelCatalog.C2_DETECTRON_MODELS[name]
unique_name = ".".join([model_name, signature])
url = "/".join([prefix, model_id, "12_2017_baselines", unique_name, suffix])
return url
|
PyTorch/SpeechRecognition/Jasper/scripts | scripts | download_librispeech | #!/usr/bin/env bash
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
DATA_SET="LibriSpeech"
DATA_ROOT_DIR="/datasets"
DATA_DIR="${DATA_ROOT_DIR}/${DATA_SET}"
if [ ! -d "$DATA_DIR" ]
then
mkdir --mode 755 $DATA_DIR
python utils/download_librispeech.py \
utils/librispeech.csv \
$DATA_DIR \
-e ${DATA_ROOT_DIR}/
else
echo "Directory $DATA_DIR already exists."
fi
|
TensorFlow/Detection/SSD/models/research/object_detection/models | models | ssd_feature_extractor_test | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base test class SSDFeatureExtractors."""
from abc import abstractmethod
import itertools
import numpy as np
import tensorflow as tf
from google.protobuf import text_format
from object_detection.builders import hyperparams_builder
from object_detection.protos import hyperparams_pb2
from object_detection.utils import test_case
class SsdFeatureExtractorTestBase(test_case.TestCase):
def _build_conv_hyperparams(self):
conv_hyperparams = hyperparams_pb2.Hyperparams()
conv_hyperparams_text_proto = """
activation: RELU_6
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
batch_norm {
scale: false
}
"""
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams)
return hyperparams_builder.KerasLayerHyperparams(conv_hyperparams)
def conv_hyperparams_fn(self):
with tf.contrib.slim.arg_scope([]) as sc:
return sc
@abstractmethod
def _create_feature_extractor(self, depth_multiplier, pad_to_multiple,
use_explicit_padding=False, use_keras=False):
"""Constructs a new feature extractor.
Args:
depth_multiplier: float depth multiplier for feature extractor
pad_to_multiple: the nearest multiple to zero pad the input height and
width dimensions to.
use_explicit_padding: use 'VALID' padding for convolutions, but prepad
inputs so that the output dimensions are the same as if 'SAME' padding
were used.
use_keras: if True builds a keras-based feature extractor, if False builds
a slim-based one.
Returns:
an ssd_meta_arch.SSDFeatureExtractor or an
ssd_meta_arch.SSDKerasFeatureExtractor object.
"""
pass
def _extract_features(self, image_tensor, depth_multiplier, pad_to_multiple,
use_explicit_padding=False, use_keras=False):
try:
feature_extractor = self._create_feature_extractor(depth_multiplier,
pad_to_multiple,
use_explicit_padding,
use_keras=use_keras)
# If the unit test does not support a use_keras arg, it raises an error:
except TypeError:
feature_extractor = self._create_feature_extractor(depth_multiplier,
pad_to_multiple,
use_explicit_padding)
if use_keras:
feature_maps = feature_extractor(image_tensor)
else:
feature_maps = feature_extractor.extract_features(image_tensor)
return feature_maps
def check_extract_features_returns_correct_shape(
self, batch_size, image_height, image_width, depth_multiplier,
pad_to_multiple, expected_feature_map_shapes, use_explicit_padding=False,
use_keras=False):
def graph_fn(image_tensor):
return self._extract_features(image_tensor,
depth_multiplier,
pad_to_multiple,
use_explicit_padding,
use_keras=use_keras)
image_tensor = np.random.rand(batch_size, image_height, image_width,
3).astype(np.float32)
feature_maps = self.execute(graph_fn, [image_tensor])
for feature_map, expected_shape in itertools.izip(
feature_maps, expected_feature_map_shapes):
self.assertAllEqual(feature_map.shape, expected_shape)
def check_extract_features_returns_correct_shapes_with_dynamic_inputs(
self, batch_size, image_height, image_width, depth_multiplier,
pad_to_multiple, expected_feature_map_shapes, use_explicit_padding=False,
use_keras=False):
def graph_fn(image_height, image_width):
image_tensor = tf.random_uniform([batch_size, image_height, image_width,
3], dtype=tf.float32)
return self._extract_features(image_tensor,
depth_multiplier,
pad_to_multiple,
use_explicit_padding,
use_keras=use_keras)
feature_maps = self.execute_cpu(graph_fn, [
np.array(image_height, dtype=np.int32),
np.array(image_width, dtype=np.int32)
])
for feature_map, expected_shape in itertools.izip(
feature_maps, expected_feature_map_shapes):
self.assertAllEqual(feature_map.shape, expected_shape)
def check_extract_features_raises_error_with_invalid_image_size(
self, image_height, image_width, depth_multiplier, pad_to_multiple,
use_keras=False):
preprocessed_inputs = tf.placeholder(tf.float32, (4, None, None, 3))
feature_maps = self._extract_features(preprocessed_inputs,
depth_multiplier,
pad_to_multiple,
use_keras=use_keras)
test_preprocessed_image = np.random.rand(4, image_height, image_width, 3)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
with self.assertRaises(tf.errors.InvalidArgumentError):
sess.run(feature_maps,
feed_dict={preprocessed_inputs: test_preprocessed_image})
def check_feature_extractor_variables_under_scope(
self, depth_multiplier, pad_to_multiple, scope_name, use_keras=False):
variables = self.get_feature_extractor_variables(
depth_multiplier, pad_to_multiple, use_keras)
for variable in variables:
self.assertTrue(variable.name.startswith(scope_name))
def get_feature_extractor_variables(
self, depth_multiplier, pad_to_multiple, use_keras=False):
g = tf.Graph()
with g.as_default():
preprocessed_inputs = tf.placeholder(tf.float32, (4, None, None, 3))
self._extract_features(preprocessed_inputs,
depth_multiplier,
pad_to_multiple,
use_keras=use_keras)
return g.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
|
TensorFlow/Detection/SSD/models/research/object_detection/anchor_generators | anchor_generators | grid_anchor_generator_test | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.grid_anchor_generator."""
import numpy as np
import tensorflow as tf
from object_detection.anchor_generators import grid_anchor_generator
from object_detection.utils import test_case
class GridAnchorGeneratorTest(test_case.TestCase):
def test_construct_single_anchor(self):
"""Builds a 1x1 anchor grid to test the size of the output boxes."""
def graph_fn():
scales = [0.5, 1.0, 2.0]
aspect_ratios = [0.25, 1.0, 4.0]
anchor_offset = [7, -3]
anchor_generator = grid_anchor_generator.GridAnchorGenerator(
scales, aspect_ratios, anchor_offset=anchor_offset)
anchors_list = anchor_generator.generate(feature_map_shape_list=[(1, 1)])
anchor_corners = anchors_list[0].get()
return (anchor_corners,)
exp_anchor_corners = [[-121, -35, 135, 29], [-249, -67, 263, 61],
[-505, -131, 519, 125], [-57, -67, 71, 61],
[-121, -131, 135, 125], [-249, -259, 263, 253],
[-25, -131, 39, 125], [-57, -259, 71, 253],
[-121, -515, 135, 509]]
anchor_corners_out = self.execute(graph_fn, [])
self.assertAllClose(anchor_corners_out, exp_anchor_corners)
def test_construct_anchor_grid(self):
def graph_fn():
base_anchor_size = [10, 10]
anchor_stride = [19, 19]
anchor_offset = [0, 0]
scales = [0.5, 1.0, 2.0]
aspect_ratios = [1.0]
anchor_generator = grid_anchor_generator.GridAnchorGenerator(
scales,
aspect_ratios,
base_anchor_size=base_anchor_size,
anchor_stride=anchor_stride,
anchor_offset=anchor_offset)
anchors_list = anchor_generator.generate(feature_map_shape_list=[(2, 2)])
anchor_corners = anchors_list[0].get()
return (anchor_corners,)
exp_anchor_corners = [[-2.5, -2.5, 2.5, 2.5], [-5., -5., 5., 5.],
[-10., -10., 10., 10.], [-2.5, 16.5, 2.5, 21.5],
[-5., 14., 5, 24], [-10., 9., 10, 29],
[16.5, -2.5, 21.5, 2.5], [14., -5., 24, 5],
[9., -10., 29, 10], [16.5, 16.5, 21.5, 21.5],
[14., 14., 24, 24], [9., 9., 29, 29]]
anchor_corners_out = self.execute(graph_fn, [])
self.assertAllClose(anchor_corners_out, exp_anchor_corners)
def test_construct_anchor_grid_with_dynamic_feature_map_shapes(self):
def graph_fn(feature_map_height, feature_map_width):
base_anchor_size = [10, 10]
anchor_stride = [19, 19]
anchor_offset = [0, 0]
scales = [0.5, 1.0, 2.0]
aspect_ratios = [1.0]
anchor_generator = grid_anchor_generator.GridAnchorGenerator(
scales,
aspect_ratios,
base_anchor_size=base_anchor_size,
anchor_stride=anchor_stride,
anchor_offset=anchor_offset)
anchors_list = anchor_generator.generate(
feature_map_shape_list=[(feature_map_height, feature_map_width)])
anchor_corners = anchors_list[0].get()
return (anchor_corners,)
exp_anchor_corners = [[-2.5, -2.5, 2.5, 2.5], [-5., -5., 5., 5.],
[-10., -10., 10., 10.], [-2.5, 16.5, 2.5, 21.5],
[-5., 14., 5, 24], [-10., 9., 10, 29],
[16.5, -2.5, 21.5, 2.5], [14., -5., 24, 5],
[9., -10., 29, 10], [16.5, 16.5, 21.5, 21.5],
[14., 14., 24, 24], [9., 9., 29, 29]]
anchor_corners_out = self.execute_cpu(graph_fn,
[np.array(2, dtype=np.int32),
np.array(2, dtype=np.int32)])
self.assertAllClose(anchor_corners_out, exp_anchor_corners)
if __name__ == '__main__':
tf.test.main()
|
PyTorch/SpeechRecognition/Jasper | Jasper | train | # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import copy
import os
import random
import time
import torch
import numpy as np
import torch.distributed as dist
from contextlib import suppress as empty_context
from common import helpers
from common.dali.data_loader import DaliDataLoader
from common.dataset import AudioDataset, get_data_loader
from common.features import BaseFeatures, FilterbankFeatures
from common.helpers import (Checkpointer, greedy_wer, num_weights, print_once,
process_evaluation_epoch)
from common.optimizers import AdamW, lr_policy, Novograd
from common.tb_dllogger import flush_log, init_log, log
from common.utils import BenchmarkStats
from jasper import config
from jasper.model import CTCLossNM, GreedyCTCDecoder, Jasper
def parse_args():
parser = argparse.ArgumentParser(description='Jasper')
training = parser.add_argument_group('training setup')
training.add_argument('--epochs', default=400, type=int,
help='Number of epochs for the entire training; influences the lr schedule')
training.add_argument("--warmup_epochs", default=0, type=int,
help='Initial epochs of increasing learning rate')
training.add_argument("--hold_epochs", default=0, type=int,
help='Constant max learning rate epochs after warmup')
training.add_argument('--epochs_this_job', default=0, type=int,
help=('Run for a number of epochs with no effect on the lr schedule.'
'Useful for re-starting the training.'))
training.add_argument('--cudnn_benchmark', action='store_true', default=True,
help='Enable cudnn benchmark')
training.add_argument('--amp', '--fp16', action='store_true', default=False,
help='Use pytorch native mixed precision training')
training.add_argument('--seed', default=42, type=int, help='Random seed')
training.add_argument('--local_rank', '--local-rank', default=os.getenv('LOCAL_RANK', 0),
type=int, help='GPU id used for distributed training')
training.add_argument('--pre_allocate_range', default=None, type=int, nargs=2,
help='Warmup with batches of length [min, max] before training')
optim = parser.add_argument_group('optimization setup')
optim.add_argument('--batch_size', default=32, type=int,
help='Global batch size')
optim.add_argument('--lr', default=1e-3, type=float,
help='Peak learning rate')
optim.add_argument("--min_lr", default=1e-5, type=float,
help='minimum learning rate')
optim.add_argument("--lr_policy", default='exponential', type=str,
choices=['exponential', 'legacy'], help='lr scheduler')
optim.add_argument("--lr_exp_gamma", default=0.99, type=float,
help='gamma factor for exponential lr scheduler')
optim.add_argument('--weight_decay', default=1e-3, type=float,
help='Weight decay for the optimizer')
optim.add_argument('--grad_accumulation_steps', default=1, type=int,
help='Number of accumulation steps')
optim.add_argument('--optimizer', default='novograd', type=str,
choices=['novograd', 'adamw'], help='Optimization algorithm')
optim.add_argument('--ema', type=float, default=0.0,
help='Discount factor for exp averaging of model weights')
io = parser.add_argument_group('feature and checkpointing setup')
io.add_argument('--dali_device', type=str, choices=['none', 'cpu', 'gpu'],
default='gpu', help='Use DALI pipeline for fast data processing')
io.add_argument('--resume', action='store_true',
help='Try to resume from last saved checkpoint.')
io.add_argument('--ckpt', default=None, type=str,
help='Path to a checkpoint for resuming training')
io.add_argument('--save_frequency', default=10, type=int,
help='Checkpoint saving frequency in epochs')
io.add_argument('--keep_milestones', default=[100, 200, 300], type=int, nargs='+',
help='Milestone checkpoints to keep from removing')
io.add_argument('--save_best_from', default=380, type=int,
help='Epoch on which to begin tracking best checkpoint (dev WER)')
io.add_argument('--eval_frequency', default=200, type=int,
help='Number of steps between evaluations on dev set')
io.add_argument('--log_frequency', default=25, type=int,
help='Number of steps between printing training stats')
io.add_argument('--prediction_frequency', default=100, type=int,
help='Number of steps between printing sample decodings')
io.add_argument('--model_config', type=str, required=True,
help='Path of the model configuration file')
io.add_argument('--train_manifests', type=str, required=True, nargs='+',
help='Paths of the training dataset manifest file')
io.add_argument('--val_manifests', type=str, required=True, nargs='+',
help='Paths of the evaluation datasets manifest files')
io.add_argument('--dataset_dir', required=True, type=str,
help='Root dir of dataset')
io.add_argument('--output_dir', type=str, required=True,
help='Directory for logs and checkpoints')
io.add_argument('--log_file', type=str, default=None,
help='Path to save the training logfile.')
io.add_argument('--benchmark_epochs_num', type=int, default=1,
help='Number of epochs accounted in final average throughput.')
io.add_argument('--override_config', type=str, action='append',
help='Overrides a value from a config .yaml.'
' Syntax: `--override_config nested.config.key=val`.')
return parser.parse_args()
def reduce_tensor(tensor, num_gpus):
rt = tensor.clone()
dist.all_reduce(rt, op=dist.ReduceOp.SUM)
return rt.true_divide(num_gpus)
def apply_ema(model, ema_model, decay):
if not decay:
return
sd = getattr(model, 'module', model).state_dict()
for k, v in ema_model.state_dict().items():
v.copy_(decay * v + (1 - decay) * sd[k])
@torch.no_grad()
def evaluate(epoch, step, val_loader, val_feat_proc, labels, model,
ema_model, ctc_loss, greedy_decoder, use_amp, use_dali=False):
for model, subset in [(model, 'dev'), (ema_model, 'dev_ema')]:
if model is None:
continue
model.eval()
torch.cuda.synchronize()
start_time = time.time()
agg = {'losses': [], 'preds': [], 'txts': []}
for batch in val_loader:
if use_dali:
# with DALI, the data is already on GPU
feat, feat_lens, txt, txt_lens = batch
if val_feat_proc is not None:
feat, feat_lens = val_feat_proc(feat, feat_lens)
else:
batch = [t.cuda(non_blocking=True) for t in batch]
audio, audio_lens, txt, txt_lens = batch
feat, feat_lens = val_feat_proc(audio, audio_lens)
with torch.cuda.amp.autocast(enabled=use_amp):
log_probs, enc_lens = model(feat, feat_lens)
loss = ctc_loss(log_probs, txt, enc_lens, txt_lens)
pred = greedy_decoder(log_probs)
agg['losses'] += helpers.gather_losses([loss])
agg['preds'] += helpers.gather_predictions([pred], labels)
agg['txts'] += helpers.gather_transcripts([txt], [txt_lens], labels)
wer, loss = process_evaluation_epoch(agg)
torch.cuda.synchronize()
log(() if epoch is None else (epoch,),
step, subset, {'loss': loss, 'wer': 100.0 * wer,
'took': time.time() - start_time})
model.train()
return wer
def main():
args = parse_args()
assert(torch.cuda.is_available())
assert args.prediction_frequency % args.log_frequency == 0
torch.backends.cudnn.benchmark = args.cudnn_benchmark
# set up distributed training
multi_gpu = int(os.environ.get('WORLD_SIZE', 1)) > 1
if multi_gpu:
torch.cuda.set_device(args.local_rank)
dist.init_process_group(backend='nccl', init_method='env://')
world_size = dist.get_world_size()
print_once(f'Distributed training with {world_size} GPUs\n')
else:
world_size = 1
torch.manual_seed(args.seed + args.local_rank)
np.random.seed(args.seed + args.local_rank)
random.seed(args.seed + args.local_rank)
init_log(args)
cfg = config.load(args.model_config)
config.apply_config_overrides(cfg, args)
symbols = helpers.add_ctc_blank(cfg['labels'])
assert args.grad_accumulation_steps >= 1
assert args.batch_size % args.grad_accumulation_steps == 0
batch_size = args.batch_size // args.grad_accumulation_steps
print_once('Setting up datasets...')
train_dataset_kw, train_features_kw = config.input(cfg, 'train')
val_dataset_kw, val_features_kw = config.input(cfg, 'val')
use_dali = args.dali_device in ('cpu', 'gpu')
if use_dali:
assert train_dataset_kw['ignore_offline_speed_perturbation'], \
"DALI doesn't support offline speed perturbation"
# pad_to_max_duration is not supported by DALI - have simple padders
if train_features_kw['pad_to_max_duration']:
train_feat_proc = BaseFeatures(
pad_align=train_features_kw['pad_align'],
pad_to_max_duration=True,
max_duration=train_features_kw['max_duration'],
sample_rate=train_features_kw['sample_rate'],
window_size=train_features_kw['window_size'],
window_stride=train_features_kw['window_stride'])
train_features_kw['pad_to_max_duration'] = False
else:
train_feat_proc = None
if val_features_kw['pad_to_max_duration']:
val_feat_proc = BaseFeatures(
pad_align=val_features_kw['pad_align'],
pad_to_max_duration=True,
max_duration=val_features_kw['max_duration'],
sample_rate=val_features_kw['sample_rate'],
window_size=val_features_kw['window_size'],
window_stride=val_features_kw['window_stride'])
val_features_kw['pad_to_max_duration'] = False
else:
val_feat_proc = None
train_loader = DaliDataLoader(gpu_id=args.local_rank,
dataset_path=args.dataset_dir,
config_data=train_dataset_kw,
config_features=train_features_kw,
json_names=args.train_manifests,
batch_size=batch_size,
grad_accumulation_steps=args.grad_accumulation_steps,
pipeline_type="train",
device_type=args.dali_device,
symbols=symbols)
val_loader = DaliDataLoader(gpu_id=args.local_rank,
dataset_path=args.dataset_dir,
config_data=val_dataset_kw,
config_features=val_features_kw,
json_names=args.val_manifests,
batch_size=batch_size,
pipeline_type="val",
device_type=args.dali_device,
symbols=symbols)
else:
train_dataset_kw, train_features_kw = config.input(cfg, 'train')
train_dataset = AudioDataset(args.dataset_dir,
args.train_manifests,
symbols,
**train_dataset_kw)
train_loader = get_data_loader(train_dataset,
batch_size,
multi_gpu=multi_gpu,
shuffle=True,
num_workers=4)
train_feat_proc = FilterbankFeatures(**train_features_kw)
val_dataset_kw, val_features_kw = config.input(cfg, 'val')
val_dataset = AudioDataset(args.dataset_dir,
args.val_manifests,
symbols,
**val_dataset_kw)
val_loader = get_data_loader(val_dataset,
batch_size,
multi_gpu=multi_gpu,
shuffle=False,
num_workers=4,
drop_last=False)
val_feat_proc = FilterbankFeatures(**val_features_kw)
dur = train_dataset.duration / 3600
dur_f = train_dataset.duration_filtered / 3600
nsampl = len(train_dataset)
print_once(f'Training samples: {nsampl} ({dur:.1f}h, '
f'filtered {dur_f:.1f}h)')
if train_feat_proc is not None:
train_feat_proc.cuda()
if val_feat_proc is not None:
val_feat_proc.cuda()
steps_per_epoch = len(train_loader) // args.grad_accumulation_steps
# set up the model
model = Jasper(encoder_kw=config.encoder(cfg),
decoder_kw=config.decoder(cfg, n_classes=len(symbols)))
model.cuda()
ctc_loss = CTCLossNM(n_classes=len(symbols))
greedy_decoder = GreedyCTCDecoder()
print_once(f'Model size: {num_weights(model) / 10**6:.1f}M params\n')
# optimization
kw = {'lr': args.lr, 'weight_decay': args.weight_decay}
if args.optimizer == "novograd":
optimizer = Novograd(model.parameters(), **kw)
elif args.optimizer == "adamw":
optimizer = AdamW(model.parameters(), **kw)
else:
raise ValueError(f'Invalid optimizer "{args.optimizer}"')
scaler = torch.cuda.amp.GradScaler(enabled=args.amp)
adjust_lr = lambda step, epoch, optimizer: lr_policy(
step, epoch, args.lr, optimizer, steps_per_epoch=steps_per_epoch,
warmup_epochs=args.warmup_epochs, hold_epochs=args.hold_epochs,
num_epochs=args.epochs, policy=args.lr_policy, min_lr=args.min_lr,
exp_gamma=args.lr_exp_gamma)
if args.ema > 0:
ema_model = copy.deepcopy(model)
else:
ema_model = None
if multi_gpu:
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[args.local_rank], output_device=args.local_rank)
# load checkpoint
meta = {'best_wer': 10**6, 'start_epoch': 0}
checkpointer = Checkpointer(args.output_dir, 'Jasper',
args.keep_milestones)
if args.resume:
args.ckpt = checkpointer.last_checkpoint() or args.ckpt
if args.ckpt is not None:
checkpointer.load(args.ckpt, model, ema_model, optimizer, scaler, meta)
start_epoch = meta['start_epoch']
best_wer = meta['best_wer']
epoch = 1
step = start_epoch * steps_per_epoch + 1
# training loop
model.train()
# pre-allocate
if args.pre_allocate_range is not None:
n_feats = train_features_kw['n_filt']
pad_align = train_features_kw['pad_align']
a, b = args.pre_allocate_range
for n_frames in range(a, b + pad_align, pad_align):
print_once(f'Pre-allocation ({batch_size}x{n_feats}x{n_frames})...')
feat = torch.randn(batch_size, n_feats, n_frames, device='cuda')
feat_lens = torch.ones(batch_size, device='cuda').fill_(n_frames)
txt = torch.randint(high=len(symbols)-1, size=(batch_size, 100),
device='cuda')
txt_lens = torch.ones(batch_size, device='cuda').fill_(100)
with torch.cuda.amp.autocast(enabled=args.amp):
log_probs, enc_lens = model(feat, feat_lens)
del feat
loss = ctc_loss(log_probs, txt, enc_lens, txt_lens)
loss.backward()
model.zero_grad()
torch.cuda.empty_cache()
bmark_stats = BenchmarkStats()
for epoch in range(start_epoch + 1, args.epochs + 1):
if multi_gpu and not use_dali:
train_loader.sampler.set_epoch(epoch)
torch.cuda.synchronize()
epoch_start_time = time.time()
epoch_utts = 0
epoch_loss = 0
accumulated_batches = 0
for batch in train_loader:
if accumulated_batches == 0:
step_loss = 0
step_utts = 0
step_start_time = time.time()
if use_dali:
# with DALI, the data is already on GPU
feat, feat_lens, txt, txt_lens = batch
if train_feat_proc is not None:
feat, feat_lens = train_feat_proc(feat, feat_lens)
else:
batch = [t.cuda(non_blocking=True) for t in batch]
audio, audio_lens, txt, txt_lens = batch
feat, feat_lens = train_feat_proc(audio, audio_lens)
# Use context manager to prevent redundant accumulation of gradients
if (multi_gpu and accumulated_batches + 1 < args.grad_accumulation_steps):
ctx = model.no_sync()
else:
ctx = empty_context()
with ctx:
with torch.cuda.amp.autocast(enabled=args.amp):
log_probs, enc_lens = model(feat, feat_lens)
loss = ctc_loss(log_probs, txt, enc_lens, txt_lens)
loss /= args.grad_accumulation_steps
if multi_gpu:
reduced_loss = reduce_tensor(loss.data, world_size)
else:
reduced_loss = loss
if torch.isnan(reduced_loss).any():
print_once(f'WARNING: loss is NaN; skipping update')
continue
else:
step_loss += reduced_loss.item()
step_utts += batch[0].size(0) * world_size
epoch_utts += batch[0].size(0) * world_size
accumulated_batches += 1
scaler.scale(loss).backward()
if accumulated_batches % args.grad_accumulation_steps == 0:
epoch_loss += step_loss
scaler.step(optimizer)
scaler.update()
adjust_lr(step, epoch, optimizer)
optimizer.zero_grad()
apply_ema(model, ema_model, args.ema)
if step % args.log_frequency == 0:
preds = greedy_decoder(log_probs)
wer, pred_utt, ref = greedy_wer(preds, txt, txt_lens, symbols)
if step % args.prediction_frequency == 0:
print_once(f' Decoded: {pred_utt[:90]}')
print_once(f' Reference: {ref[:90]}')
step_time = time.time() - step_start_time
log((epoch, step % steps_per_epoch or steps_per_epoch, steps_per_epoch),
step, 'train',
{'loss': step_loss,
'wer': 100.0 * wer,
'throughput': step_utts / step_time,
'took': step_time,
'lrate': optimizer.param_groups[0]['lr']})
step_start_time = time.time()
if step % args.eval_frequency == 0:
wer = evaluate(epoch, step, val_loader, val_feat_proc,
symbols, model, ema_model, ctc_loss,
greedy_decoder, args.amp, use_dali)
if wer < best_wer and epoch >= args.save_best_from:
checkpointer.save(model, ema_model, optimizer, scaler,
epoch, step, best_wer, is_best=True)
best_wer = wer
step += 1
accumulated_batches = 0
# end of step
# DALI iterator need to be exhausted;
# if not using DALI, simulate drop_last=True with grad accumulation
if not use_dali and step > steps_per_epoch * epoch:
break
torch.cuda.synchronize()
epoch_time = time.time() - epoch_start_time
epoch_loss /= steps_per_epoch
log((epoch,), None, 'train_avg', {'throughput': epoch_utts / epoch_time,
'took': epoch_time,
'loss': epoch_loss})
bmark_stats.update(epoch_utts, epoch_time, epoch_loss)
if epoch % args.save_frequency == 0 or epoch in args.keep_milestones:
checkpointer.save(model, ema_model, optimizer, scaler, epoch, step,
best_wer)
if 0 < args.epochs_this_job <= epoch - start_epoch:
print_once(f'Finished after {args.epochs_this_job} epochs.')
break
# end of epoch
log((), None, 'train_avg', bmark_stats.get(args.benchmark_epochs_num))
evaluate(None, step, val_loader, val_feat_proc, symbols, model,
ema_model, ctc_loss, greedy_decoder, args.amp, use_dali)
if epoch == args.epochs:
checkpointer.save(model, ema_model, optimizer, scaler, epoch, step,
best_wer)
flush_log()
if __name__ == "__main__":
main()
|
TensorFlow/Segmentation/UNet_Industrial/model/layers | layers | activation | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# ==============================================================================
#
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==============================================================================
import tensorflow as tf
from model.layers.utils import _log_hparams
__all__ = ['crelu', 'elu', 'leaky_relu', 'prelu', 'relu', 'relu6', 'selu', 'sigmoid', 'softmax', 'tanh']
def crelu(features, name='crelu', axis=-1):
net = tf.nn.crelu(features, name=name, axis=axis)
_log_hparams(classname='CReLU', layername=net.name, axis=axis, out_shape=str(net.get_shape()), out_dtype=net.dtype)
return net
def elu(features, name='elu'):
net = tf.nn.elu(features, name=name)
_log_hparams(classname='ELU', layername=net.name, out_shape=str(net.get_shape()), out_dtype=net.dtype)
return net
def leaky_relu(features, alpha=0.2, name='leaky_relu'):
net = tf.nn.leaky_relu(features, alpha=alpha, name=name)
_log_hparams(
classname='LeakyReLU', layername=net.name, alpha=alpha, out_shape=str(net.get_shape()), out_dtype=net.dtype
)
return net
def prelu(inputs, channel_shared=False, trainable=True, name='prelu'):
def parametric_relu(_x):
if channel_shared:
w_shape = (1, )
else:
w_shape = int(_x.get_shape()[-1])
alphas = tf.get_variable(
'alpha', w_shape, trainable=trainable, initializer=tf.initializers.truncated_normal(mean=-1.0, stddev=0.2)
)
alphas = tf.nn.sigmoid(alphas, name="constraining_alpha_var_in_0_1")
return tf.maximum(_x, _x * alphas)
with tf.variable_scope(name):
net = parametric_relu(inputs)
_log_hparams(
classname='PReLU',
layername=net.name,
channel_shared=channel_shared,
trainable=trainable,
out_shape=str(net.get_shape()),
out_dtype=net.dtype
)
return net
def relu(inputs, name='relu'):
net = tf.nn.relu(inputs, name=name)
_log_hparams(classname='ReLU', layername=net.name, out_shape=str(net.get_shape()), out_dtype=net.dtype)
return net
def relu6(inputs, name='relu6'):
net = tf.nn.relu6(inputs, name=name)
_log_hparams(classname='ReLU6', layername=net.name, out_shape=str(net.get_shape()), out_dtype=net.dtype)
return net
def selu(features, name='selu'):
net = tf.nn.selu(features, name=name)
_log_hparams(classname='SELU', layername=net.name, out_shape=str(net.get_shape()), out_dtype=net.dtype)
return net
def sigmoid(x, name='sigmoid'):
net = tf.math.sigmoid(x, name=name)
_log_hparams(classname='Sigmoid', layername=net.name, out_shape=str(net.get_shape()), out_dtype=net.dtype)
return net
def softmax(inputs, axis=None, name="softmax"):
net = tf.nn.softmax(
inputs,
axis=axis,
name=name,
)
_log_hparams(
classname='Softmax', layername=net.name, axis=axis, out_shape=str(net.get_shape()), out_dtype=net.dtype
)
return net
def tanh(inputs, name='tanh'):
net = tf.math.tanh(inputs, name=name)
_log_hparams(classname='TanH', layername=net.name, out_shape=str(net.get_shape()), out_dtype=net.dtype)
return net
|
TensorFlow2/Classification/ConvNets/dataloader | dataloader | preprocessing | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Preprocessing functions for images."""
from __future__ import absolute_import
from __future__ import division
# from __future__ import google_type_annotations
from __future__ import print_function
import tensorflow as tf
from typing import List, Optional, Text, Tuple
from dataloader import augment
# Calculated from the ImageNet training set
MEAN_RGB = (0.485 * 255, 0.456 * 255, 0.406 * 255)
STDDEV_RGB = (0.229 * 255, 0.224 * 255, 0.225 * 255)
IMAGE_SIZE = 224
CROP_PADDING = 32
def mean_image_subtraction(
image_bytes: tf.Tensor,
means: Tuple[float, ...],
num_channels: int = 3,
dtype: tf.dtypes.DType = tf.float32,
) -> tf.Tensor:
"""Subtracts the given means from each image channel.
For example:
means = [123.68, 116.779, 103.939]
image_bytes = mean_image_subtraction(image_bytes, means)
Note that the rank of `image` must be known.
Args:
image_bytes: a tensor of size [height, width, C].
means: a C-vector of values to subtract from each channel.
num_channels: number of color channels in the image that will be distorted.
dtype: the dtype to convert the images to. Set to `None` to skip conversion.
Returns:
the centered image.
Raises:
ValueError: If the rank of `image` is unknown, if `image` has a rank other
than three or if the number of channels in `image` doesn't match the
number of values in `means`.
"""
if image_bytes.get_shape().ndims != 3:
raise ValueError('Input must be of size [height, width, C>0]')
if len(means) != num_channels:
raise ValueError('len(means) must match the number of channels')
# We have a 1-D tensor of means; convert to 3-D.
# Note(b/130245863): we explicitly call `broadcast` instead of simply
# expanding dimensions for better performance.
means = tf.broadcast_to(means, tf.shape(image_bytes))
if dtype is not None:
means = tf.cast(means, dtype=dtype)
return image_bytes - means
def standardize_image(
image_bytes: tf.Tensor,
stddev: Tuple[float, ...],
num_channels: int = 3,
dtype: tf.dtypes.DType = tf.float32,
) -> tf.Tensor:
"""Divides the given stddev from each image channel.
For example:
stddev = [123.68, 116.779, 103.939]
image_bytes = standardize_image(image_bytes, stddev)
Note that the rank of `image` must be known.
Args:
image_bytes: a tensor of size [height, width, C].
stddev: a C-vector of values to divide from each channel.
num_channels: number of color channels in the image that will be distorted.
dtype: the dtype to convert the images to. Set to `None` to skip conversion.
Returns:
the centered image.
Raises:
ValueError: If the rank of `image` is unknown, if `image` has a rank other
than three or if the number of channels in `image` doesn't match the
number of values in `stddev`.
"""
if image_bytes.get_shape().ndims != 3:
raise ValueError('Input must be of size [height, width, C>0]')
if len(stddev) != num_channels:
raise ValueError('len(stddev) must match the number of channels')
# We have a 1-D tensor of stddev; convert to 3-D.
# Note(b/130245863): we explicitly call `broadcast` instead of simply
# expanding dimensions for better performance.
stddev = tf.broadcast_to(stddev, tf.shape(image_bytes))
if dtype is not None:
stddev = tf.cast(stddev, dtype=dtype)
return image_bytes / stddev
def normalize_images(features: tf.Tensor,
mean_rgb: Tuple[float, ...] = MEAN_RGB,
stddev_rgb: Tuple[float, ...] = STDDEV_RGB,
num_channels: int = 3,
dtype: tf.dtypes.DType = tf.float32,
data_format: Text = 'channels_last') -> tf.Tensor:
"""Normalizes the input image channels with the given mean and stddev.
Args:
features: `Tensor` representing decoded images in float format.
mean_rgb: the mean of the channels to subtract.
stddev_rgb: the stddev of the channels to divide.
num_channels: the number of channels in the input image tensor.
dtype: the dtype to convert the images to. Set to `None` to skip conversion.
data_format: the format of the input image tensor
['channels_first', 'channels_last'].
Returns:
A normalized image `Tensor`.
"""
# TODO(allencwang) - figure out how to use mean_image_subtraction and
# standardize_image on batches of images and replace the following.
if data_format == 'channels_first':
stats_shape = [num_channels, 1, 1]
else:
stats_shape = [1, 1, num_channels]
if dtype is not None:
features = tf.image.convert_image_dtype(features, dtype=dtype)
if mean_rgb is not None:
mean_rgb = tf.constant(mean_rgb,
shape=stats_shape,
dtype=features.dtype)
mean_rgb = tf.broadcast_to(mean_rgb, tf.shape(features))
features = features - mean_rgb
if stddev_rgb is not None:
stddev_rgb = tf.constant(stddev_rgb,
shape=stats_shape,
dtype=features.dtype)
stddev_rgb = tf.broadcast_to(stddev_rgb, tf.shape(features))
features = features / stddev_rgb
return features
def decode_and_center_crop(image_bytes: tf.Tensor,
image_size: int = IMAGE_SIZE,
crop_padding: int = CROP_PADDING) -> tf.Tensor:
"""Crops to center of image with padding then scales image_size.
Args:
image_bytes: `Tensor` representing an image binary of arbitrary size.
image_size: image height/width dimension.
crop_padding: the padding size to use when centering the crop.
Returns:
A decoded and cropped image `Tensor`.
"""
decoded = image_bytes.dtype != tf.string
shape = (tf.shape(image_bytes) if decoded
else tf.image.extract_jpeg_shape(image_bytes))
image_height = shape[0]
image_width = shape[1]
padded_center_crop_size = tf.cast(
((image_size / (image_size + crop_padding)) *
tf.cast(tf.minimum(image_height, image_width), tf.float32)),
tf.int32)
offset_height = ((image_height - padded_center_crop_size) + 1) // 2
offset_width = ((image_width - padded_center_crop_size) + 1) // 2
crop_window = tf.stack([offset_height, offset_width,
padded_center_crop_size, padded_center_crop_size])
if decoded:
image = tf.image.crop_to_bounding_box(
image_bytes,
offset_height=offset_height,
offset_width=offset_width,
target_height=padded_center_crop_size,
target_width=padded_center_crop_size)
else:
image = tf.image.decode_and_crop_jpeg(image_bytes, crop_window, channels=3)
image = resize_image(image_bytes=image,
height=image_size,
width=image_size)
return image
def decode_crop_and_flip(image_bytes: tf.Tensor) -> tf.Tensor:
"""Crops an image to a random part of the image, then randomly flips.
Args:
image_bytes: `Tensor` representing an image binary of arbitrary size.
Returns:
A decoded and cropped image `Tensor`.
"""
decoded = image_bytes.dtype != tf.string
bbox = tf.constant([0.0, 0.0, 1.0, 1.0], dtype=tf.float32, shape=[1, 1, 4])
shape = (tf.shape(image_bytes) if decoded
else tf.image.extract_jpeg_shape(image_bytes))
sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box(
shape,
bounding_boxes=bbox,
min_object_covered=0.1,
aspect_ratio_range=[0.75, 1.33],
area_range=[0.05, 1.0],
max_attempts=100,
use_image_if_no_bounding_boxes=True)
bbox_begin, bbox_size, _ = sample_distorted_bounding_box
# Reassemble the bounding box in the format the crop op requires.
offset_height, offset_width, _ = tf.unstack(bbox_begin)
target_height, target_width, _ = tf.unstack(bbox_size)
crop_window = tf.stack([offset_height, offset_width,
target_height, target_width])
if decoded:
cropped = tf.image.crop_to_bounding_box(
image_bytes,
offset_height=offset_height,
offset_width=offset_width,
target_height=target_height,
target_width=target_width)
else:
cropped = tf.image.decode_and_crop_jpeg(image_bytes,
crop_window,
channels=3)
# Flip to add a little more random distortion in.
cropped = tf.image.random_flip_left_right(cropped)
return cropped
def resize_image(image_bytes: tf.Tensor,
height: int = IMAGE_SIZE,
width: int = IMAGE_SIZE) -> tf.Tensor:
"""Resizes an image to a given height and width.
Args:
image_bytes: `Tensor` representing an image binary of arbitrary size.
height: image height dimension.
width: image width dimension.
Returns:
A tensor containing the resized image.
"""
return tf.compat.v1.image.resize(
image_bytes, [height, width], method=tf.image.ResizeMethod.BILINEAR,
align_corners=False)
def preprocess_for_predict(
images: tf.Tensor,
image_size: int = IMAGE_SIZE,
num_channels: int = 3,
dtype: tf.dtypes.DType = tf.float32
) -> tf.Tensor:
images = tf.reshape(images, [image_size, image_size, num_channels])
if dtype is not None:
images = tf.image.convert_image_dtype(images, dtype=dtype)
return images
def preprocess_for_eval(
image_bytes: tf.Tensor,
image_size: int = IMAGE_SIZE,
num_channels: int = 3,
mean_subtract: bool = False,
standardize: bool = False,
dtype: tf.dtypes.DType = tf.float32
) -> tf.Tensor:
"""Preprocesses the given image for evaluation.
Args:
image_bytes: `Tensor` representing an image binary of arbitrary size.
image_size: image height/width dimension.
num_channels: number of image input channels.
mean_subtract: whether or not to apply mean subtraction.
standardize: whether or not to apply standardization.
dtype: the dtype to convert the images to. Set to `None` to skip conversion.
Returns:
A preprocessed and normalized image `Tensor`.
"""
images = decode_and_center_crop(image_bytes, image_size)
images = tf.reshape(images, [image_size, image_size, num_channels])
if mean_subtract:
images = mean_image_subtraction(image_bytes=images, means=MEAN_RGB)
if standardize:
images = standardize_image(image_bytes=images, stddev=STDDEV_RGB)
if dtype is not None:
images = tf.image.convert_image_dtype(images, dtype=dtype)
return images
def load_eval_image(filename: Text, image_size: int = IMAGE_SIZE) -> tf.Tensor:
"""Reads an image from the filesystem and applies image preprocessing.
Args:
filename: a filename path of an image.
image_size: image height/width dimension.
Returns:
A preprocessed and normalized image `Tensor`.
"""
image_bytes = tf.io.read_file(filename)
image = preprocess_for_eval(image_bytes, image_size)
return image
def build_eval_dataset(filenames: List[Text],
labels: List[int] = None,
image_size: int = IMAGE_SIZE,
batch_size: int = 1) -> tf.Tensor:
"""Builds a tf.data.Dataset from a list of filenames and labels.
Args:
filenames: a list of filename paths of images.
labels: a list of labels corresponding to each image.
image_size: image height/width dimension.
batch_size: the batch size used by the dataset
Returns:
A preprocessed and normalized image `Tensor`.
"""
if labels is None:
labels = [0] * len(filenames)
filenames = tf.constant(filenames)
labels = tf.constant(labels)
dataset = tf.data.Dataset.from_tensor_slices((filenames, labels))
dataset = dataset.map(
lambda filename, label: (load_eval_image(filename, image_size), label))
dataset = dataset.batch(batch_size)
return dataset
def preprocess_for_train(image_bytes: tf.Tensor,
image_size: int = IMAGE_SIZE,
augmenter: Optional[augment.ImageAugment] = None,
mean_subtract: bool = False,
standardize: bool = False,
dtype: tf.dtypes.DType = tf.float32) -> tf.Tensor:
"""Preprocesses the given image for training.
Args:
image_bytes: `Tensor` representing an image binary of
arbitrary size of dtype tf.uint8.
image_size: image height/width dimension.
augmenter: the image augmenter to apply.
mean_subtract: whether or not to apply mean subtraction.
standardize: whether or not to apply standardization.
dtype: the dtype to convert the images to. Set to `None` to skip conversion.
Returns:
A preprocessed and normalized image `Tensor`.
"""
images = decode_crop_and_flip(image_bytes=image_bytes)
images = resize_image(images, height=image_size, width=image_size)
if mean_subtract:
images = mean_image_subtraction(image_bytes=images, means=MEAN_RGB)
if standardize:
images = standardize_image(image_bytes=images, stddev=STDDEV_RGB)
if augmenter is not None:
images = augmenter.distort(images)
if dtype is not None:
images = tf.image.convert_image_dtype(images, dtype)
return images
|
TensorFlow2/LanguageModeling/BERT/official/utils/accelerator | accelerator | tpu_test | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test TPU optimized matmul embedding."""
import numpy as np
import tensorflow as tf
from official.utils.accelerator import tpu as tpu_utils
TEST_CASES = [
dict(embedding_dim=256, vocab_size=1000, sequence_length=64,
batch_size=32, seed=54131),
dict(embedding_dim=8, vocab_size=15, sequence_length=12,
batch_size=256, seed=536413),
dict(embedding_dim=2048, vocab_size=512, sequence_length=50,
batch_size=8, seed=35124)
]
class TPUBaseTester(tf.test.TestCase):
def construct_embedding_and_values(self, embedding_dim, vocab_size,
sequence_length, batch_size, seed):
np.random.seed(seed)
embeddings = np.random.random(size=(vocab_size, embedding_dim))
embedding_table = tf.convert_to_tensor(value=embeddings, dtype=tf.float32)
tokens = np.random.randint(low=1, high=vocab_size-1,
size=(batch_size, sequence_length))
for i in range(batch_size):
tokens[i, np.random.randint(low=0, high=sequence_length-1):] = 0
values = tf.convert_to_tensor(value=tokens, dtype=tf.int32)
mask = tf.cast(tf.not_equal(values, 0), dtype=tf.float32)
return embedding_table, values, mask
def _test_embedding(self, embedding_dim, vocab_size,
sequence_length, batch_size, seed):
"""Test that matmul embedding matches embedding lookup (gather)."""
with self.test_session():
embedding_table, values, mask = self.construct_embedding_and_values(
embedding_dim=embedding_dim,
vocab_size=vocab_size,
sequence_length=sequence_length,
batch_size=batch_size,
seed=seed
)
embedding = (tf.nn.embedding_lookup(params=embedding_table, ids=values) *
tf.expand_dims(mask, -1))
matmul_embedding = tpu_utils.embedding_matmul(
embedding_table=embedding_table, values=values, mask=mask)
self.assertAllClose(embedding, matmul_embedding)
def _test_masking(self, embedding_dim, vocab_size,
sequence_length, batch_size, seed):
"""Test that matmul embedding properly zeros masked positions."""
with self.test_session():
embedding_table, values, mask = self.construct_embedding_and_values(
embedding_dim=embedding_dim,
vocab_size=vocab_size,
sequence_length=sequence_length,
batch_size=batch_size,
seed=seed
)
matmul_embedding = tpu_utils.embedding_matmul(
embedding_table=embedding_table, values=values, mask=mask)
self.assertAllClose(matmul_embedding,
matmul_embedding * tf.expand_dims(mask, -1))
def test_embedding_0(self):
self._test_embedding(**TEST_CASES[0])
def test_embedding_1(self):
self._test_embedding(**TEST_CASES[1])
def test_embedding_2(self):
self._test_embedding(**TEST_CASES[2])
def test_masking_0(self):
self._test_masking(**TEST_CASES[0])
def test_masking_1(self):
self._test_masking(**TEST_CASES[1])
def test_masking_2(self):
self._test_masking(**TEST_CASES[2])
if __name__ == "__main__":
tf.test.main()
|
PyTorch/SpeechSynthesis/FastPitch/platform | platform | DGXA100_FastPitch_TF32_4GPU | #!/bin/bash
set -a
: ${NUM_GPUS:=4}
: ${BATCH_SIZE:=32}
: ${GRAD_ACCUMULATION:=2}
: ${AMP:=false}
bash scripts/train.sh "$@"
|
Tools/PyTorch/TimeSeriesPredictionPlatform/conf/hydra/job_logging | job_logging | secondary | # @package _group_
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
version: 1
formatters:
simple:
format: '%(message)s'
handlers:
console:
class: logging.StreamHandler
formatter: simple
stream: ext://sys.stdout
root:
handlers: [console]
level: ERROR
disable_existing_loggers: false
|
PyTorch/SpeechSynthesis/FastPitch/fastpitch | fastpitch | loss_function | # *****************************************************************************
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
import torch
import torch.nn.functional as F
from torch import nn
from common.utils import mask_from_lens
from fastpitch.attn_loss_function import AttentionCTCLoss
class FastPitchLoss(nn.Module):
def __init__(self, dur_predictor_loss_scale=1.0,
pitch_predictor_loss_scale=1.0, attn_loss_scale=1.0,
energy_predictor_loss_scale=0.1):
super(FastPitchLoss, self).__init__()
self.dur_predictor_loss_scale = dur_predictor_loss_scale
self.pitch_predictor_loss_scale = pitch_predictor_loss_scale
self.energy_predictor_loss_scale = energy_predictor_loss_scale
self.attn_loss_scale = attn_loss_scale
self.attn_ctc_loss = AttentionCTCLoss()
def forward(self, model_out, targets, is_training=True, meta_agg='mean'):
(mel_out, dec_mask, dur_pred, log_dur_pred, pitch_pred, pitch_tgt,
energy_pred, energy_tgt, attn_soft, attn_hard, attn_dur,
attn_logprob) = model_out
(mel_tgt, in_lens, out_lens) = targets
dur_tgt = attn_dur
dur_lens = in_lens
mel_tgt.requires_grad = False
# (B,H,T) => (B,T,H)
mel_tgt = mel_tgt.transpose(1, 2)
dur_mask = mask_from_lens(dur_lens, max_len=dur_tgt.size(1))
log_dur_tgt = torch.log(dur_tgt.float() + 1)
loss_fn = F.mse_loss
dur_pred_loss = loss_fn(log_dur_pred, log_dur_tgt, reduction='none')
dur_pred_loss = (dur_pred_loss * dur_mask).sum() / dur_mask.sum()
ldiff = mel_tgt.size(1) - mel_out.size(1)
mel_out = F.pad(mel_out, (0, 0, 0, ldiff, 0, 0), value=0.0)
mel_mask = mel_tgt.ne(0).float()
loss_fn = F.mse_loss
mel_loss = loss_fn(mel_out, mel_tgt, reduction='none')
mel_loss = (mel_loss * mel_mask).sum() / mel_mask.sum()
ldiff = pitch_tgt.size(2) - pitch_pred.size(2)
pitch_pred = F.pad(pitch_pred, (0, ldiff, 0, 0, 0, 0), value=0.0)
pitch_loss = F.mse_loss(pitch_tgt, pitch_pred, reduction='none')
pitch_loss = (pitch_loss * dur_mask.unsqueeze(1)).sum() / dur_mask.sum()
if energy_pred is not None:
energy_pred = F.pad(energy_pred, (0, ldiff, 0, 0), value=0.0)
energy_loss = F.mse_loss(energy_tgt, energy_pred, reduction='none')
energy_loss = (energy_loss * dur_mask).sum() / dur_mask.sum()
else:
energy_loss = 0
# Attention loss
attn_loss = self.attn_ctc_loss(attn_logprob, in_lens, out_lens)
loss = (mel_loss
+ dur_pred_loss * self.dur_predictor_loss_scale
+ pitch_loss * self.pitch_predictor_loss_scale
+ energy_loss * self.energy_predictor_loss_scale
+ attn_loss * self.attn_loss_scale)
meta = {
'loss': loss.clone().detach(),
'mel_loss': mel_loss.clone().detach(),
'duration_predictor_loss': dur_pred_loss.clone().detach(),
'pitch_loss': pitch_loss.clone().detach(),
'attn_loss': attn_loss.clone().detach(),
'dur_error': (torch.abs(dur_pred - dur_tgt).sum()
/ dur_mask.sum()).detach(),
}
if energy_pred is not None:
meta['energy_loss'] = energy_loss.clone().detach()
assert meta_agg in ('sum', 'mean')
if meta_agg == 'sum':
bsz = mel_out.size(0)
meta = {k: v * bsz for k, v in meta.items()}
return loss, meta
|
TensorFlow2/Classification/ConvNets/scripts | scripts | bind | #! /bin/bash
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -euo pipefail
print_usage() {
cat << EOF
${0} [options] [--] COMMAND [ARG...]
Control binding policy for each task. Assumes one rank will be launched for each GPU.
Options:
--cpu=MODE
* exclusive -- bind each rank to an exclusive set of cores near its GPU
* exclusive,nosmt -- bind each rank to an exclusive set of cores near its GPU, without hyperthreading
* node -- bind each rank to all cores in the NUMA node nearest its GPU [default]
* *.sh -- bind each rank using the bash associative array bind_cpu_cores or bind_cpu_nodes from a file
* off -- don't bind
--mem=MODE
* node -- bind each rank to the nearest NUMA node [default]
* *.sh -- bind each rank using the bash associative array bind_mem from a file
* off -- don't bind
--ib=MODE
* single -- bind each rank to a single IB device near its GPU
* off -- don't bind [default]
--cluster=CLUSTER
Select which cluster is being used. May be required if system params cannot be detected.
EOF
}
################################################################################
# Argument parsing
################################################################################
cpu_mode='node'
mem_mode='node'
ib_mode='off'
cluster=''
while [ $# -gt 0 ]; do
case "$1" in
-h|--help) print_usage ; exit 0 ;;
--cpu=*) cpu_mode="${1/*=/}"; shift ;;
--cpu) cpu_mode="$2"; shift 2 ;;
--mem=*) mem_mode="${1/*=/}"; shift ;;
--mem) mem_mode="$2"; shift 2 ;;
--ib=*) ib_mode="${1/*=/}"; shift ;;
--ib) ib_mode="$2"; shift 2 ;;
--cluster=*) cluster="${1/*=/}"; shift ;;
--cluster) cluster="$2"; shift 2 ;;
--) shift; break ;;
*) break ;;
esac
done
if [ $# -lt 1 ]; then
echo 'ERROR: no command given' 2>&1
print_usage
exit 1
fi
################################################################################
# Get system params
################################################################################
# LOCAL_RANK is set with an enroot hook for Pytorch containers
# SLURM_LOCALID is set by Slurm
# OMPI_COMM_WORLD_LOCAL_RANK is set by mpirun
readonly local_rank="${LOCAL_RANK:=${SLURM_LOCALID:=${OMPI_COMM_WORLD_LOCAL_RANK:-}}}"
if [ -z "${local_rank}" ]; then
echo 'ERROR: cannot read LOCAL_RANK from env' >&2
exit 1
fi
num_gpus=$(nvidia-smi -i 0 --query-gpu=count --format=csv,noheader,nounits)
if [ "${local_rank}" -ge "${num_gpus}" ]; then
echo "ERROR: local rank is ${local_rank}, but there are only ${num_gpus} gpus available" >&2
exit 1
fi
get_lscpu_value() {
awk -F: "(\$1 == \"${1}\"){gsub(/ /, \"\", \$2); print \$2; found=1} END{exit found!=1}"
}
lscpu_out=$(lscpu)
num_sockets=$(get_lscpu_value 'Socket(s)' <<< "${lscpu_out}")
num_nodes=$(get_lscpu_value 'NUMA node(s)' <<< "${lscpu_out}")
cores_per_socket=$(get_lscpu_value 'Core(s) per socket' <<< "${lscpu_out}")
echo "num_sockets = ${num_sockets} num_nodes=${num_nodes} cores_per_socket=${cores_per_socket}"
readonly cores_per_node=$(( (num_sockets * cores_per_socket) / num_nodes ))
if [ ${num_gpus} -gt 1 ]; then
readonly gpus_per_node=$(( num_gpus / num_nodes ))
else
readonly gpus_per_node=1
fi
readonly cores_per_gpu=$(( cores_per_node / gpus_per_node ))
readonly local_node=$(( local_rank / gpus_per_node ))
declare -a ibdevs=()
case "${cluster}" in
circe)
# Need to specialize for circe because IB detection is hard
ibdevs=(mlx5_1 mlx5_2 mlx5_3 mlx5_4 mlx5_7 mlx5_8 mlx5_9 mlx5_10)
;;
selene)
# Need to specialize for selene because IB detection is hard
ibdevs=(mlx5_0 mlx5_1 mlx5_2 mlx5_3 mlx5_6 mlx5_7 mlx5_8 mlx5_9)
;;
'')
if ibstat_out="$(ibstat -l 2>/dev/null | sort -V)" ; then
mapfile -t ibdevs <<< "${ibstat_out}"
fi
;;
*)
echo "ERROR: Unknown cluster '${cluster}'" >&2
exit 1
;;
esac
readonly num_ibdevs="${#ibdevs[@]}"
################################################################################
# Setup for exec
################################################################################
declare -a numactl_args=()
case "${cpu_mode}" in
exclusive)
numactl_args+=( "$(printf -- "--physcpubind=%u-%u,%u-%u" \
$(( local_rank * cores_per_gpu )) \
$(( (local_rank + 1) * cores_per_gpu - 1 )) \
$(( local_rank * cores_per_gpu + (cores_per_gpu * gpus_per_node * num_nodes) )) \
$(( (local_rank + 1) * cores_per_gpu + (cores_per_gpu * gpus_per_node * num_nodes) - 1 )) \
)" )
;;
exclusive,nosmt)
numactl_args+=( "$(printf -- "--physcpubind=%u-%u" \
$(( local_rank * cores_per_gpu )) \
$(( (local_rank + 1) * cores_per_gpu - 1 )) \
)" )
;;
node)
numactl_args+=( "--cpunodebind=${local_node}" )
;;
*.sh)
source "${cpu_mode}"
if [ -n "${bind_cpu_cores:-}" ]; then
numactl_args+=( "--physcpubind=${bind_cpu_cores[${local_rank}]}" )
elif [ -n "${bind_cpu_nodes:-}" ]; then
numactl_args+=( "--cpunodebind=${bind_cpu_nodes[${local_rank}]}" )
else
echo "ERROR: invalid CPU affinity file ${cpu_mode}." >&2
exit 1
fi
;;
off|'')
;;
*)
echo "ERROR: invalid cpu mode '${cpu_mode}'" 2>&1
print_usage
exit 1
;;
esac
case "${mem_mode}" in
node)
numactl_args+=( "--membind=${local_node}" )
;;
*.sh)
source "${mem_mode}"
if [ -z "${bind_mem:-}" ]; then
echo "ERROR: invalid memory affinity file ${mem_mode}." >&2
exit 1
fi
numactl_args+=( "--membind=${bind_mem[${local_rank}]}" )
;;
off|'')
;;
*)
echo "ERROR: invalid mem mode '${mem_mode}'" 2>&1
print_usage
exit 1
;;
esac
case "${ib_mode}" in
single)
if [ "${num_ibdevs}" -eq 0 ]; then
echo "WARNING: used '$0 --ib=single', but there are 0 IB devices available; skipping IB binding." 2>&1
else
readonly ibdev="${ibdevs[$(( local_rank * num_ibdevs / num_gpus ))]}"
export OMPI_MCA_btl_openib_if_include="${OMPI_MCA_btl_openib_if_include-$ibdev}"
export UCX_NET_DEVICES="${UCX_NET_DEVICES-$ibdev:1}"
fi
;;
off|'')
;;
*)
echo "ERROR: invalid ib mode '${ib_mode}'" 2>&1
print_usage
exit 1
;;
esac
################################################################################
# Exec
################################################################################
if [ "${#numactl_args[@]}" -gt 0 ] ; then
set -x
exec numactl "${numactl_args[@]}" -- "${@}"
else
exec "${@}"
fi
|
PyTorch/Recommendation/NCF | NCF | test_dataset | # Copyright (c) 2018, deepakn94, robieta. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -----------------------------------------------------------------------
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/bin/bash
set -e
set -x
DATASET_NAME=${1:-'ml-20m'}
RAW_DATADIR=${2:-"/data/${DATASET_NAME}"}
CACHED_DATADIR=${3:-"$/data/cache/${DATASET_NAME}"}
# you can add another option to this case in order to support other datasets
case ${DATASET_NAME} in
'ml-20m')
ZIP_PATH=${RAW_DATADIR}/'ml-20m.zip'
RATINGS_PATH=${RAW_DATADIR}'/ml-20m/ratings.csv'
;;
'ml-1m')
ZIP_PATH=${RAW_DATADIR}/'ml-1m.zip'
RATINGS_PATH=${RAW_DATADIR}'/ml-1m/ratings.dat'
;;
*)
echo "Unsupported dataset name: $DATASET_NAME"
exit 1
esac
if [ ! -d ${RAW_DATADIR} ]; then
mkdir -p ${RAW_DATADIR}
fi
if [ ! -d ${CACHED_DATADIR} ]; then
mkdir -p ${CACHED_DATADIR}
fi
if [ -f log ]; then
rm -f log
fi
if [ ! -f ${ZIP_PATH} ]; then
echo "Dataset not found. Please download it from: https://grouplens.org/datasets/movielens/20m/ and put it in ${ZIP_PATH}"
exit 1
fi
if [ ! -f ${RATINGS_PATH} ]; then
unzip -u ${ZIP_PATH} -d ${RAW_DATADIR}
fi
for test_name in more_pos less_pos less_user less_item more_user more_item other_names;
do
NEW_DIR=${CACHED_DATADIR}/${test_name}
if [ ! -d ${NEW_DIR} ]; then
mkdir -p ${NEW_DIR}
fi
python convert_test.py --path ${RATINGS_PATH} --output $NEW_DIR --test ${test_name}
echo "Generated testing for $test_name"
done
for test_sample in '0' '10' '200';
do
NEW_DIR=${CACHED_DATADIR}/sample_${test_name}
if [ ! -d ${NEW_DIR} ]; then
mkdir -p ${NEW_DIR}
fi
python convert_test.py --path ${RATINGS_PATH} --output $NEW_DIR --valid_negative $test_sample
echo "Generated testing for $test_name"
done
echo "Dataset $DATASET_NAME successfully prepared at: $CACHED_DATADIR"
echo "You can now run the training with: python -m torch.distributed.launch --nproc_per_node=<number_of_GPUs> --use_env ncf.py --data ${CACHED_DATADIR}"
|
PyTorch/SpeechSynthesis/FastPitch/triton/deployment_toolkit/bermuda | bermuda | onnx2trt_conv | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import Dict, Iterable, Optional
# pytype: disable=import-error
import onnx
import tensorrt as trt
from ..core import BaseConverter, Format, Model, Precision, ShapeSpec
from ..extensions import converters
from .utils import get_input_shapes
# pytype: enable=import-error
LOGGER = logging.getLogger(__name__)
TRT_LOGGER = trt.Logger(trt.Logger.INFO)
class Onnx2TRTConverter(BaseConverter):
def __init__(self, *, max_batch_size: int, max_workspace_size: int, precision: str):
self._max_batch_size = max_batch_size
self._max_workspace_size = max_workspace_size
self._precision = Precision(precision)
def convert(self, model: Model, dataloader_fn) -> Model:
input_shapes = get_input_shapes(dataloader_fn(), self._max_batch_size)
cuda_engine = onnx2trt(
model.handle,
shapes=input_shapes,
max_workspace_size=self._max_workspace_size,
max_batch_size=self._max_batch_size,
model_precision=self._precision.value,
)
return model._replace(handle=cuda_engine)
@staticmethod
def required_source_model_precision(requested_model_precision: Precision) -> Precision:
# TensorRT requires source models to be in FP32 precision
return Precision.FP32
def onnx2trt(
onnx_model: onnx.ModelProto,
*,
shapes: Dict[str, ShapeSpec],
max_workspace_size: int,
max_batch_size: int,
model_precision: str,
) -> "trt.ICudaEngine":
"""
Converts onnx model to TensorRT ICudaEngine
Args:
onnx_model: onnx.Model to convert
shapes: dictionary containing min shape, max shape, opt shape for each input name
max_workspace_size: The maximum GPU temporary memory which the CudaEngine can use at execution time.
max_batch_size: The maximum batch size which can be used at execution time,
and also the batch size for which the CudaEngine will be optimized.
model_precision: precision of kernels (possible values: fp16, fp32)
Returns: TensorRT ICudaEngine
"""
# Whether or not 16-bit kernels are permitted.
# During :class:`ICudaEngine` build fp16 kernels will also be tried when this mode is enabled.
fp16_mode = "16" in model_precision
builder = trt.Builder(TRT_LOGGER)
builder.fp16_mode = fp16_mode
builder.max_batch_size = max_batch_size
builder.max_workspace_size = max_workspace_size
# In TensorRT 7.0, the ONNX parser only supports full-dimensions mode,
# meaning that your network definition must be created with the explicitBatch flag set.
# For more information, see
# https://docs.nvidia.com/deeplearning/tensorrt/developer-guide/index.html#work_dynamic_shapes
flags = 1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
network = builder.create_network(flags)
with trt.OnnxParser(network, TRT_LOGGER) as parser:
# onnx model parsing
if not parser.parse(onnx_model.SerializeToString()):
for i in range(parser.num_errors):
LOGGER.error(f"OnnxParser error {i}/{parser.num_errors}: {parser.get_error(i)}")
raise RuntimeError("Error during parsing ONNX model (see logs for details)")
# optimization
config = builder.create_builder_config()
config.flags |= bool(fp16_mode) << int(trt.BuilderFlag.FP16)
config.max_workspace_size = max_workspace_size
profile = builder.create_optimization_profile()
for name, spec in shapes.items():
profile.set_shape(name, **spec._asdict())
config.add_optimization_profile(profile)
engine = builder.build_engine(network, config=config)
return engine
converters.register_extension(f"{Format.ONNX.value}--{Format.TRT.value}", Onnx2TRTConverter)
|
TensorFlow2/Segmentation/nnUNet/runtime | runtime | utils | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import multiprocessing
import os
import pickle
import shutil
import sys
from functools import wraps
from pathlib import Path
import horovod.tensorflow as hvd
import numpy as np
import tensorflow as tf
from tqdm import tqdm
def hvd_init():
hvd.init()
gpus = tf.config.experimental.list_physical_devices("GPU")
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
if gpus:
tf.config.experimental.set_visible_devices(gpus[hvd.local_rank()], "GPU")
def set_tf_flags(args):
os.environ["CUDA_CACHE_DISABLE"] = "0"
os.environ["HOROVOD_GPU_ALLREDUCE"] = "NCCL"
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
os.environ["TF_GPU_THREAD_MODE"] = "gpu_private"
os.environ["TF_GPU_THREAD_COUNT"] = str(hvd.size())
os.environ["TF_USE_CUDNN_BATCHNORM_SPATIAL_PERSISTENT"] = "1"
os.environ["TF_ADJUST_HUE_FUSED"] = "1"
os.environ["TF_ADJUST_SATURATION_FUSED"] = "1"
os.environ["TF_ENABLE_WINOGRAD_NONFUSED"] = "1"
os.environ["TF_SYNC_ON_FINISH"] = "0"
os.environ["TF_AUTOTUNE_THRESHOLD"] = "2"
os.environ["TF_ENABLE_AUTO_MIXED_PRECISION"] = "0"
os.environ["TF_ENABLE_LAYOUT_NHWC"] = "1"
os.environ["TF_CPP_VMODULE"] = "4"
if args.xla:
os.environ["TF_XLA_ENABLE_GPU_GRAPH_CAPTURE"] = "1"
if args.amp:
os.environ["XLA_FLAGS"] = "--xla_gpu_force_conv_nhwc"
tf.config.optimizer.set_jit(True)
if hvd.size() > 1:
tf.config.threading.set_inter_op_parallelism_threads(max(2, (multiprocessing.cpu_count() // hvd.size()) - 2))
else:
tf.config.threading.set_inter_op_parallelism_threads(8)
if args.amp:
tf.keras.mixed_precision.set_global_policy("mixed_float16")
def is_main_process():
return hvd.rank() == 0
def progress_bar(iterable, *args, quiet, **kwargs):
if quiet or not is_main_process():
return iterable
return tqdm(iterable, *args, **kwargs)
def rank_zero_only(fn):
@wraps(fn)
def wrapped_fn(*args, **kwargs):
if is_main_process():
return fn(*args, **kwargs)
return wrapped_fn
def set_seed(seed):
np.random.seed(seed)
tf.random.set_seed(seed)
def get_task_code(args):
return f"{args.task}_{args.dim}d_tf2"
def get_config_file(args):
task_code = get_task_code(args)
path = os.path.join(args.data, "config.pkl")
if not os.path.exists(path):
path = os.path.join(args.data, task_code, "config.pkl")
return pickle.load(open(path, "rb"))
def get_tta_flips(dim):
if dim == 2:
return [[1], [2], [1, 2]]
return [[1], [2], [3], [1, 2], [1, 3], [2, 3], [1, 2, 3]]
def make_empty_dir(path, force=False):
path = Path(path)
if path.exists():
if not path.is_dir():
print(f"Output path {path} exists and is not a directory." "Please remove it and try again.")
sys.exit(1)
else:
if not force:
decision = input(f"Output path {path} exists. Continue and replace it? [Y/n]: ")
if decision.strip().lower() not in ["", "y"]:
sys.exit(1)
shutil.rmtree(path, ignore_errors=True)
path.mkdir(parents=True)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.